]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/match.pd
re PR fortran/78395 ([OOP] error on polymorphic assignment)
[thirdparty/gcc.git] / gcc / match.pd
CommitLineData
3d2cf79f
RB
1/* Match-and-simplify patterns for shared GENERIC and GIMPLE folding.
2 This file is consumed by genmatch which produces gimple-match.c
3 and generic-match.c from it.
4
818ab71a 5 Copyright (C) 2014-2016 Free Software Foundation, Inc.
3d2cf79f
RB
6 Contributed by Richard Biener <rguenther@suse.de>
7 and Prathamesh Kulkarni <bilbotheelffriend@gmail.com>
8
9This file is part of GCC.
10
11GCC is free software; you can redistribute it and/or modify it under
12the terms of the GNU General Public License as published by the Free
13Software Foundation; either version 3, or (at your option) any later
14version.
15
16GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17WARRANTY; without even the implied warranty of MERCHANTABILITY or
18FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19for more details.
20
21You should have received a copy of the GNU General Public License
22along with GCC; see the file COPYING3. If not see
23<http://www.gnu.org/licenses/>. */
24
25
26/* Generic tree predicates we inherit. */
27(define_predicates
cc7b5acf 28 integer_onep integer_zerop integer_all_onesp integer_minus_onep
53a19317 29 integer_each_onep integer_truep integer_nonzerop
cc7b5acf 30 real_zerop real_onep real_minus_onep
b0eb889b 31 zerop
f3582e54 32 CONSTANT_CLASS_P
887ab609 33 tree_expr_nonnegative_p
67dbe582 34 integer_valued_real_p
53a19317
RB
35 integer_pow2p
36 HONOR_NANS)
e0ee10ed 37
f84e7fd6
RB
38/* Operator lists. */
39(define_operator_list tcc_comparison
40 lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
41(define_operator_list inverted_tcc_comparison
42 ge gt ne eq lt le ordered unordered ge gt le lt ltgt uneq)
43(define_operator_list inverted_tcc_comparison_with_nans
44 unge ungt ne eq unlt unle ordered unordered ge gt le lt ltgt uneq)
534bd33b
MG
45(define_operator_list swapped_tcc_comparison
46 gt ge eq ne le lt unordered ordered ungt unge unlt unle uneq ltgt)
07cdc2b8
RB
47(define_operator_list simple_comparison lt le eq ne ge gt)
48(define_operator_list swapped_simple_comparison gt ge eq ne le lt)
49
b1dc4a20 50#include "cfn-operators.pd"
257aecb4 51
543a9bcd
RS
52/* Define operand lists for math rounding functions {,i,l,ll}FN,
53 where the versions prefixed with "i" return an int, those prefixed with
54 "l" return a long and those prefixed with "ll" return a long long.
55
56 Also define operand lists:
57
58 X<FN>F for all float functions, in the order i, l, ll
59 X<FN> for all double functions, in the same order
60 X<FN>L for all long double functions, in the same order. */
61#define DEFINE_INT_AND_FLOAT_ROUND_FN(FN) \
543a9bcd
RS
62 (define_operator_list X##FN##F BUILT_IN_I##FN##F \
63 BUILT_IN_L##FN##F \
64 BUILT_IN_LL##FN##F) \
65 (define_operator_list X##FN BUILT_IN_I##FN \
66 BUILT_IN_L##FN \
67 BUILT_IN_LL##FN) \
68 (define_operator_list X##FN##L BUILT_IN_I##FN##L \
69 BUILT_IN_L##FN##L \
70 BUILT_IN_LL##FN##L)
71
543a9bcd
RS
72DEFINE_INT_AND_FLOAT_ROUND_FN (FLOOR)
73DEFINE_INT_AND_FLOAT_ROUND_FN (CEIL)
74DEFINE_INT_AND_FLOAT_ROUND_FN (ROUND)
75DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
f84e7fd6 76
e0ee10ed 77/* Simplifications of operations with one constant operand and
36a60e48 78 simplifications to constants or single values. */
e0ee10ed
RB
79
80(for op (plus pointer_plus minus bit_ior bit_xor)
81 (simplify
82 (op @0 integer_zerop)
83 (non_lvalue @0)))
84
a499aac5
RB
85/* 0 +p index -> (type)index */
86(simplify
87 (pointer_plus integer_zerop @1)
88 (non_lvalue (convert @1)))
89
a7f24614
RB
90/* See if ARG1 is zero and X + ARG1 reduces to X.
91 Likewise if the operands are reversed. */
92(simplify
93 (plus:c @0 real_zerop@1)
94 (if (fold_real_zero_addition_p (type, @1, 0))
95 (non_lvalue @0)))
96
97/* See if ARG1 is zero and X - ARG1 reduces to X. */
98(simplify
99 (minus @0 real_zerop@1)
100 (if (fold_real_zero_addition_p (type, @1, 1))
101 (non_lvalue @0)))
102
e0ee10ed
RB
103/* Simplify x - x.
104 This is unsafe for certain floats even in non-IEEE formats.
105 In IEEE, it is unsafe because it does wrong for NaNs.
106 Also note that operand_equal_p is always false if an operand
107 is volatile. */
108(simplify
a7f24614 109 (minus @0 @0)
1b457aa4 110 (if (!FLOAT_TYPE_P (type) || !HONOR_NANS (type))
a7f24614 111 { build_zero_cst (type); }))
e0ee10ed
RB
112
113(simplify
a7f24614
RB
114 (mult @0 integer_zerop@1)
115 @1)
116
117/* Maybe fold x * 0 to 0. The expressions aren't the same
118 when x is NaN, since x * 0 is also NaN. Nor are they the
119 same in modes with signed zeros, since multiplying a
120 negative value by 0 gives -0, not +0. */
121(simplify
122 (mult @0 real_zerop@1)
8b5ee871 123 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
a7f24614
RB
124 @1))
125
126/* In IEEE floating point, x*1 is not equivalent to x for snans.
127 Likewise for complex arithmetic with signed zeros. */
128(simplify
129 (mult @0 real_onep)
8b5ee871
MG
130 (if (!HONOR_SNANS (type)
131 && (!HONOR_SIGNED_ZEROS (type)
a7f24614
RB
132 || !COMPLEX_FLOAT_TYPE_P (type)))
133 (non_lvalue @0)))
134
135/* Transform x * -1.0 into -x. */
136(simplify
137 (mult @0 real_minus_onep)
8b5ee871
MG
138 (if (!HONOR_SNANS (type)
139 && (!HONOR_SIGNED_ZEROS (type)
a7f24614
RB
140 || !COMPLEX_FLOAT_TYPE_P (type)))
141 (negate @0)))
e0ee10ed 142
5b7f6ed0 143/* X * 1, X / 1 -> X. */
e0ee10ed
RB
144(for op (mult trunc_div ceil_div floor_div round_div exact_div)
145 (simplify
146 (op @0 integer_onep)
147 (non_lvalue @0)))
148
5b7f6ed0
MG
149/* Preserve explicit divisions by 0: the C++ front-end wants to detect
150 undefined behavior in constexpr evaluation, and assuming that the division
151 traps enables better optimizations than these anyway. */
a7f24614 152(for div (trunc_div ceil_div floor_div round_div exact_div)
5b7f6ed0
MG
153 /* 0 / X is always zero. */
154 (simplify
155 (div integer_zerop@0 @1)
156 /* But not for 0 / 0 so that we can get the proper warnings and errors. */
157 (if (!integer_zerop (@1))
158 @0))
da186c1f 159 /* X / -1 is -X. */
a7f24614 160 (simplify
09240451
MG
161 (div @0 integer_minus_onep@1)
162 (if (!TYPE_UNSIGNED (type))
da186c1f 163 (negate @0)))
5b7f6ed0
MG
164 /* X / X is one. */
165 (simplify
166 (div @0 @0)
167 /* But not for 0 / 0 so that we can get the proper warnings and errors. */
168 (if (!integer_zerop (@0))
169 { build_one_cst (type); }))
da186c1f
RB
170 /* X / abs (X) is X < 0 ? -1 : 1. */
171 (simplify
d96a5585
RB
172 (div:C @0 (abs @0))
173 (if (INTEGRAL_TYPE_P (type)
da186c1f
RB
174 && TYPE_OVERFLOW_UNDEFINED (type))
175 (cond (lt @0 { build_zero_cst (type); })
176 { build_minus_one_cst (type); } { build_one_cst (type); })))
177 /* X / -X is -1. */
178 (simplify
d96a5585 179 (div:C @0 (negate @0))
da186c1f
RB
180 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
181 && TYPE_OVERFLOW_UNDEFINED (type))
182 { build_minus_one_cst (type); })))
a7f24614
RB
183
184/* For unsigned integral types, FLOOR_DIV_EXPR is the same as
185 TRUNC_DIV_EXPR. Rewrite into the latter in this case. */
186(simplify
187 (floor_div @0 @1)
09240451
MG
188 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
189 && TYPE_UNSIGNED (type))
a7f24614
RB
190 (trunc_div @0 @1)))
191
28093105
RB
192/* Combine two successive divisions. Note that combining ceil_div
193 and floor_div is trickier and combining round_div even more so. */
194(for div (trunc_div exact_div)
c306cfaf
RB
195 (simplify
196 (div (div @0 INTEGER_CST@1) INTEGER_CST@2)
197 (with {
198 bool overflow_p;
199 wide_int mul = wi::mul (@1, @2, TYPE_SIGN (type), &overflow_p);
200 }
201 (if (!overflow_p)
8fdc6c67
RB
202 (div @0 { wide_int_to_tree (type, mul); })
203 (if (TYPE_UNSIGNED (type)
204 || mul != wi::min_value (TYPE_PRECISION (type), SIGNED))
205 { build_zero_cst (type); })))))
c306cfaf 206
a7f24614 207/* Optimize A / A to 1.0 if we don't care about
09240451 208 NaNs or Infinities. */
a7f24614
RB
209(simplify
210 (rdiv @0 @0)
09240451 211 (if (FLOAT_TYPE_P (type)
1b457aa4 212 && ! HONOR_NANS (type)
8b5ee871 213 && ! HONOR_INFINITIES (type))
09240451
MG
214 { build_one_cst (type); }))
215
216/* Optimize -A / A to -1.0 if we don't care about
217 NaNs or Infinities. */
218(simplify
e04d2a35 219 (rdiv:C @0 (negate @0))
09240451 220 (if (FLOAT_TYPE_P (type)
1b457aa4 221 && ! HONOR_NANS (type)
8b5ee871 222 && ! HONOR_INFINITIES (type))
09240451 223 { build_minus_one_cst (type); }))
a7f24614 224
8c6961ca
PK
225/* PR71078: x / abs(x) -> copysign (1.0, x) */
226(simplify
227 (rdiv:C (convert? @0) (convert? (abs @0)))
228 (if (SCALAR_FLOAT_TYPE_P (type)
229 && ! HONOR_NANS (type)
230 && ! HONOR_INFINITIES (type))
231 (switch
232 (if (types_match (type, float_type_node))
233 (BUILT_IN_COPYSIGNF { build_one_cst (type); } (convert @0)))
234 (if (types_match (type, double_type_node))
235 (BUILT_IN_COPYSIGN { build_one_cst (type); } (convert @0)))
236 (if (types_match (type, long_double_type_node))
237 (BUILT_IN_COPYSIGNL { build_one_cst (type); } (convert @0))))))
238
a7f24614
RB
239/* In IEEE floating point, x/1 is not equivalent to x for snans. */
240(simplify
241 (rdiv @0 real_onep)
8b5ee871 242 (if (!HONOR_SNANS (type))
a7f24614
RB
243 (non_lvalue @0)))
244
245/* In IEEE floating point, x/-1 is not equivalent to -x for snans. */
246(simplify
247 (rdiv @0 real_minus_onep)
8b5ee871 248 (if (!HONOR_SNANS (type))
a7f24614
RB
249 (negate @0)))
250
5711ac88
N
251(if (flag_reciprocal_math)
252 /* Convert (A/B)/C to A/(B*C) */
253 (simplify
254 (rdiv (rdiv:s @0 @1) @2)
255 (rdiv @0 (mult @1 @2)))
256
257 /* Convert A/(B/C) to (A/B)*C */
258 (simplify
259 (rdiv @0 (rdiv:s @1 @2))
260 (mult (rdiv @0 @1) @2)))
261
262/* Optimize (X & (-A)) / A where A is a power of 2, to X >> log2(A) */
263(for div (trunc_div ceil_div floor_div round_div exact_div)
264 (simplify
265 (div (convert? (bit_and @0 INTEGER_CST@1)) INTEGER_CST@2)
266 (if (integer_pow2p (@2)
267 && tree_int_cst_sgn (@2) > 0
268 && wi::add (@2, @1) == 0
269 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
270 (rshift (convert @0) { build_int_cst (integer_type_node,
271 wi::exact_log2 (@2)); }))))
272
a7f24614
RB
273/* If ARG1 is a constant, we can convert this to a multiply by the
274 reciprocal. This does not have the same rounding properties,
275 so only do this if -freciprocal-math. We can actually
276 always safely do it if ARG1 is a power of two, but it's hard to
277 tell if it is or not in a portable manner. */
278(for cst (REAL_CST COMPLEX_CST VECTOR_CST)
279 (simplify
280 (rdiv @0 cst@1)
281 (if (optimize)
53bc4b3a
RB
282 (if (flag_reciprocal_math
283 && !real_zerop (@1))
a7f24614 284 (with
249700b5 285 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @1); }
a7f24614 286 (if (tem)
8fdc6c67
RB
287 (mult @0 { tem; } )))
288 (if (cst != COMPLEX_CST)
289 (with { tree inverse = exact_inverse (type, @1); }
290 (if (inverse)
291 (mult @0 { inverse; } ))))))))
a7f24614 292
a7f24614 293(for mod (ceil_mod floor_mod round_mod trunc_mod)
e0ee10ed
RB
294 /* 0 % X is always zero. */
295 (simplify
a7f24614 296 (mod integer_zerop@0 @1)
e0ee10ed
RB
297 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
298 (if (!integer_zerop (@1))
299 @0))
300 /* X % 1 is always zero. */
301 (simplify
a7f24614
RB
302 (mod @0 integer_onep)
303 { build_zero_cst (type); })
304 /* X % -1 is zero. */
305 (simplify
09240451
MG
306 (mod @0 integer_minus_onep@1)
307 (if (!TYPE_UNSIGNED (type))
bc4315fb 308 { build_zero_cst (type); }))
5b7f6ed0
MG
309 /* X % X is zero. */
310 (simplify
311 (mod @0 @0)
312 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
313 (if (!integer_zerop (@0))
314 { build_zero_cst (type); }))
bc4315fb
MG
315 /* (X % Y) % Y is just X % Y. */
316 (simplify
317 (mod (mod@2 @0 @1) @1)
98e30e51
RB
318 @2)
319 /* From extract_muldiv_1: (X * C1) % C2 is zero if C1 is a multiple of C2. */
320 (simplify
321 (mod (mult @0 INTEGER_CST@1) INTEGER_CST@2)
322 (if (ANY_INTEGRAL_TYPE_P (type)
323 && TYPE_OVERFLOW_UNDEFINED (type)
324 && wi::multiple_of_p (@1, @2, TYPE_SIGN (type)))
325 { build_zero_cst (type); })))
a7f24614
RB
326
327/* X % -C is the same as X % C. */
328(simplify
329 (trunc_mod @0 INTEGER_CST@1)
330 (if (TYPE_SIGN (type) == SIGNED
331 && !TREE_OVERFLOW (@1)
332 && wi::neg_p (@1)
333 && !TYPE_OVERFLOW_TRAPS (type)
334 /* Avoid this transformation if C is INT_MIN, i.e. C == -C. */
335 && !sign_bit_p (@1, @1))
336 (trunc_mod @0 (negate @1))))
e0ee10ed 337
8f0c696a
RB
338/* X % -Y is the same as X % Y. */
339(simplify
340 (trunc_mod @0 (convert? (negate @1)))
a2a743a1
MP
341 (if (INTEGRAL_TYPE_P (type)
342 && !TYPE_UNSIGNED (type)
8f0c696a 343 && !TYPE_OVERFLOW_TRAPS (type)
20b8d734
JJ
344 && tree_nop_conversion_p (type, TREE_TYPE (@1))
345 /* Avoid this transformation if X might be INT_MIN or
346 Y might be -1, because we would then change valid
347 INT_MIN % -(-1) into invalid INT_MIN % -1. */
348 && (expr_not_equal_to (@0, TYPE_MIN_VALUE (type))
349 || expr_not_equal_to (@1, wi::minus_one (TYPE_PRECISION
350 (TREE_TYPE (@1))))))
8f0c696a
RB
351 (trunc_mod @0 (convert @1))))
352
f461569a
MP
353/* X - (X / Y) * Y is the same as X % Y. */
354(simplify
2eef1fc1
RB
355 (minus (convert1? @0) (convert2? (mult:c (trunc_div @@0 @@1) @1)))
356 (if (INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
fba46f03 357 (convert (trunc_mod @0 @1))))
f461569a 358
8f0c696a
RB
359/* Optimize TRUNC_MOD_EXPR by a power of two into a BIT_AND_EXPR,
360 i.e. "X % C" into "X & (C - 1)", if X and C are positive.
361 Also optimize A % (C << N) where C is a power of 2,
362 to A & ((C << N) - 1). */
363(match (power_of_two_cand @1)
364 INTEGER_CST@1)
365(match (power_of_two_cand @1)
366 (lshift INTEGER_CST@1 @2))
367(for mod (trunc_mod floor_mod)
368 (simplify
4ab1e111 369 (mod @0 (convert?@3 (power_of_two_cand@1 @2)))
8f0c696a
RB
370 (if ((TYPE_UNSIGNED (type)
371 || tree_expr_nonnegative_p (@0))
4ab1e111 372 && tree_nop_conversion_p (type, TREE_TYPE (@3))
8f0c696a 373 && integer_pow2p (@2) && tree_int_cst_sgn (@2) > 0)
4ab1e111 374 (bit_and @0 (convert (minus @1 { build_int_cst (TREE_TYPE (@1), 1); }))))))
8f0c696a 375
887ab609
N
376/* Simplify (unsigned t * 2)/2 -> unsigned t & 0x7FFFFFFF. */
377(simplify
378 (trunc_div (mult @0 integer_pow2p@1) @1)
379 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
380 (bit_and @0 { wide_int_to_tree
381 (type, wi::mask (TYPE_PRECISION (type) - wi::exact_log2 (@1),
382 false, TYPE_PRECISION (type))); })))
383
5f8d832e
N
384/* Simplify (unsigned t / 2) * 2 -> unsigned t & ~1. */
385(simplify
386 (mult (trunc_div @0 integer_pow2p@1) @1)
387 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
388 (bit_and @0 (negate @1))))
389
95765f36
N
390/* Simplify (t * 2) / 2) -> t. */
391(for div (trunc_div ceil_div floor_div round_div exact_div)
392 (simplify
393 (div (mult @0 @1) @1)
394 (if (ANY_INTEGRAL_TYPE_P (type)
395 && TYPE_OVERFLOW_UNDEFINED (type))
396 @0)))
397
d202f9bd 398(for op (negate abs)
9b054b08
RS
399 /* Simplify cos(-x) and cos(|x|) -> cos(x). Similarly for cosh. */
400 (for coss (COS COSH)
401 (simplify
402 (coss (op @0))
403 (coss @0)))
404 /* Simplify pow(-x, y) and pow(|x|,y) -> pow(x,y) if y is an even integer. */
405 (for pows (POW)
406 (simplify
407 (pows (op @0) REAL_CST@1)
408 (with { HOST_WIDE_INT n; }
409 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
5d3498b4 410 (pows @0 @1)))))
de3fbea3
RB
411 /* Likewise for powi. */
412 (for pows (POWI)
413 (simplify
414 (pows (op @0) INTEGER_CST@1)
415 (if (wi::bit_and (@1, 1) == 0)
416 (pows @0 @1))))
5d3498b4
RS
417 /* Strip negate and abs from both operands of hypot. */
418 (for hypots (HYPOT)
419 (simplify
420 (hypots (op @0) @1)
421 (hypots @0 @1))
422 (simplify
423 (hypots @0 (op @1))
424 (hypots @0 @1)))
425 /* copysign(-x, y) and copysign(abs(x), y) -> copysign(x, y). */
426 (for copysigns (COPYSIGN)
427 (simplify
428 (copysigns (op @0) @1)
429 (copysigns @0 @1))))
430
431/* abs(x)*abs(x) -> x*x. Should be valid for all types. */
432(simplify
433 (mult (abs@1 @0) @1)
434 (mult @0 @0))
435
436/* cos(copysign(x, y)) -> cos(x). Similarly for cosh. */
437(for coss (COS COSH)
438 copysigns (COPYSIGN)
439 (simplify
440 (coss (copysigns @0 @1))
441 (coss @0)))
442
443/* pow(copysign(x, y), z) -> pow(x, z) if z is an even integer. */
444(for pows (POW)
445 copysigns (COPYSIGN)
446 (simplify
de3fbea3 447 (pows (copysigns @0 @2) REAL_CST@1)
5d3498b4
RS
448 (with { HOST_WIDE_INT n; }
449 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
450 (pows @0 @1)))))
de3fbea3
RB
451/* Likewise for powi. */
452(for pows (POWI)
453 copysigns (COPYSIGN)
454 (simplify
455 (pows (copysigns @0 @2) INTEGER_CST@1)
456 (if (wi::bit_and (@1, 1) == 0)
457 (pows @0 @1))))
5d3498b4
RS
458
459(for hypots (HYPOT)
460 copysigns (COPYSIGN)
461 /* hypot(copysign(x, y), z) -> hypot(x, z). */
462 (simplify
463 (hypots (copysigns @0 @1) @2)
464 (hypots @0 @2))
465 /* hypot(x, copysign(y, z)) -> hypot(x, y). */
466 (simplify
467 (hypots @0 (copysigns @1 @2))
468 (hypots @0 @1)))
469
eeb57981
RB
470/* copysign(x, CST) -> [-]abs (x). */
471(for copysigns (COPYSIGN)
472 (simplify
473 (copysigns @0 REAL_CST@1)
474 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
475 (negate (abs @0))
476 (abs @0))))
477
5d3498b4
RS
478/* copysign(copysign(x, y), z) -> copysign(x, z). */
479(for copysigns (COPYSIGN)
480 (simplify
481 (copysigns (copysigns @0 @1) @2)
482 (copysigns @0 @2)))
483
484/* copysign(x,y)*copysign(x,y) -> x*x. */
485(for copysigns (COPYSIGN)
486 (simplify
487 (mult (copysigns@2 @0 @1) @2)
488 (mult @0 @0)))
489
490/* ccos(-x) -> ccos(x). Similarly for ccosh. */
491(for ccoss (CCOS CCOSH)
492 (simplify
493 (ccoss (negate @0))
494 (ccoss @0)))
d202f9bd 495
abcc43f5
RS
496/* cabs(-x) and cos(conj(x)) -> cabs(x). */
497(for ops (conj negate)
498 (for cabss (CABS)
499 (simplify
500 (cabss (ops @0))
501 (cabss @0))))
502
0a8f32b8
RB
503/* Fold (a * (1 << b)) into (a << b) */
504(simplify
505 (mult:c @0 (convert? (lshift integer_onep@1 @2)))
506 (if (! FLOAT_TYPE_P (type)
9ff6fb6e 507 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
0a8f32b8
RB
508 (lshift @0 @2)))
509
510/* Fold (C1/X)*C2 into (C1*C2)/X. */
511(simplify
ff86345f
RB
512 (mult (rdiv@3 REAL_CST@0 @1) REAL_CST@2)
513 (if (flag_associative_math
514 && single_use (@3))
0a8f32b8
RB
515 (with
516 { tree tem = const_binop (MULT_EXPR, type, @0, @2); }
517 (if (tem)
518 (rdiv { tem; } @1)))))
519
5711ac88
N
520/* Convert C1/(X*C2) into (C1/C2)/X */
521(simplify
522 (rdiv REAL_CST@0 (mult @1 REAL_CST@2))
523 (if (flag_reciprocal_math)
524 (with
525 { tree tem = const_binop (RDIV_EXPR, type, @0, @2); }
526 (if (tem)
527 (rdiv { tem; } @1)))))
528
0a8f32b8
RB
529/* Simplify ~X & X as zero. */
530(simplify
531 (bit_and:c (convert? @0) (convert? (bit_not @0)))
532 { build_zero_cst (type); })
533
89b80c42
PK
534/* PR71636: Transform x & ((1U << b) - 1) -> x & ~(~0U << b); */
535(simplify
536 (bit_and:c @0 (plus:s (lshift:s integer_onep @1) integer_minus_onep))
537 (if (TYPE_UNSIGNED (type))
538 (bit_and @0 (bit_not (lshift { build_all_ones_cst (type); } @1)))))
539
7aa13860
PK
540/* PR35691: Transform
541 (x == 0 & y == 0) -> (x | typeof(x)(y)) == 0.
542 (x != 0 | y != 0) -> (x | typeof(x)(y)) != 0. */
543(for bitop (bit_and bit_ior)
544 cmp (eq ne)
545 (simplify
546 (bitop (cmp @0 integer_zerop@2) (cmp @1 integer_zerop))
547 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
548 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
549 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
550 (cmp (bit_ior @0 (convert @1)) @2))))
551
10158317
RB
552/* Fold (A & ~B) - (A & B) into (A ^ B) - B. */
553(simplify
a9658b11 554 (minus (bit_and:cs @0 (bit_not @1)) (bit_and:cs @0 @1))
10158317
RB
555 (minus (bit_xor @0 @1) @1))
556(simplify
557 (minus (bit_and:s @0 INTEGER_CST@2) (bit_and:s @0 INTEGER_CST@1))
558 (if (wi::bit_not (@2) == @1)
559 (minus (bit_xor @0 @1) @1)))
560
561/* Fold (A & B) - (A & ~B) into B - (A ^ B). */
562(simplify
a8e9f9a3 563 (minus (bit_and:cs @0 @1) (bit_and:cs @0 (bit_not @1)))
10158317
RB
564 (minus @1 (bit_xor @0 @1)))
565
566/* Simplify (X & ~Y) | (~X & Y) -> X ^ Y. */
567(simplify
a9658b11 568 (bit_ior (bit_and:c @0 (bit_not @1)) (bit_and:c (bit_not @0) @1))
10158317
RB
569 (bit_xor @0 @1))
570(simplify
571 (bit_ior:c (bit_and @0 INTEGER_CST@2) (bit_and (bit_not @0) INTEGER_CST@1))
572 (if (wi::bit_not (@2) == @1)
573 (bit_xor @0 @1)))
2066ef6a
PK
574
575/* PR53979: Transform ((a ^ b) | a) -> (a | b) */
576(simplify
577 (bit_ior:c (bit_xor:c @0 @1) @0)
578 (bit_ior @0 @1))
579
d982c5b7
MG
580/* Simplify (~X & Y) to X ^ Y if we know that (X & ~Y) is 0. */
581#if GIMPLE
582(simplify
583 (bit_and (bit_not SSA_NAME@0) INTEGER_CST@1)
584 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
585 && (get_nonzero_bits (@0) & wi::bit_not (@1)) == 0)
586 (bit_xor @0 @1)))
587#endif
10158317 588
bc4315fb
MG
589/* X % Y is smaller than Y. */
590(for cmp (lt ge)
591 (simplify
592 (cmp (trunc_mod @0 @1) @1)
593 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
594 { constant_boolean_node (cmp == LT_EXPR, type); })))
595(for cmp (gt le)
596 (simplify
597 (cmp @1 (trunc_mod @0 @1))
598 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
599 { constant_boolean_node (cmp == GT_EXPR, type); })))
600
e0ee10ed
RB
601/* x | ~0 -> ~0 */
602(simplify
ca0b7ece
RB
603 (bit_ior @0 integer_all_onesp@1)
604 @1)
605
606/* x | 0 -> x */
607(simplify
608 (bit_ior @0 integer_zerop)
609 @0)
e0ee10ed
RB
610
611/* x & 0 -> 0 */
612(simplify
ca0b7ece
RB
613 (bit_and @0 integer_zerop@1)
614 @1)
e0ee10ed 615
a4398a30 616/* ~x | x -> -1 */
8b5ee871
MG
617/* ~x ^ x -> -1 */
618/* ~x + x -> -1 */
619(for op (bit_ior bit_xor plus)
620 (simplify
621 (op:c (convert? @0) (convert? (bit_not @0)))
622 (convert { build_all_ones_cst (TREE_TYPE (@0)); })))
a4398a30 623
e0ee10ed
RB
624/* x ^ x -> 0 */
625(simplify
626 (bit_xor @0 @0)
627 { build_zero_cst (type); })
628
36a60e48
RB
629/* Canonicalize X ^ ~0 to ~X. */
630(simplify
631 (bit_xor @0 integer_all_onesp@1)
632 (bit_not @0))
633
634/* x & ~0 -> x */
635(simplify
636 (bit_and @0 integer_all_onesp)
637 (non_lvalue @0))
638
639/* x & x -> x, x | x -> x */
640(for bitop (bit_and bit_ior)
641 (simplify
642 (bitop @0 @0)
643 (non_lvalue @0)))
644
c7986356
MG
645/* x & C -> x if we know that x & ~C == 0. */
646#if GIMPLE
647(simplify
648 (bit_and SSA_NAME@0 INTEGER_CST@1)
649 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
650 && (get_nonzero_bits (@0) & wi::bit_not (@1)) == 0)
651 @0))
652#endif
653
0f770b01
RV
654/* x + (x & 1) -> (x + 1) & ~1 */
655(simplify
44fc0a51
RB
656 (plus:c @0 (bit_and:s @0 integer_onep@1))
657 (bit_and (plus @0 @1) (bit_not @1)))
0f770b01
RV
658
659/* x & ~(x & y) -> x & ~y */
660/* x | ~(x | y) -> x | ~y */
661(for bitop (bit_and bit_ior)
af563d4b 662 (simplify
44fc0a51
RB
663 (bitop:c @0 (bit_not (bitop:cs @0 @1)))
664 (bitop @0 (bit_not @1))))
af563d4b
MG
665
666/* (x | y) & ~x -> y & ~x */
667/* (x & y) | ~x -> y | ~x */
668(for bitop (bit_and bit_ior)
669 rbitop (bit_ior bit_and)
670 (simplify
671 (bitop:c (rbitop:c @0 @1) (bit_not@2 @0))
672 (bitop @1 @2)))
0f770b01 673
f13c4673
MP
674/* (x & y) ^ (x | y) -> x ^ y */
675(simplify
2d6f2dce
MP
676 (bit_xor:c (bit_and @0 @1) (bit_ior @0 @1))
677 (bit_xor @0 @1))
f13c4673 678
9ea65ca6
MP
679/* (x ^ y) ^ (x | y) -> x & y */
680(simplify
681 (bit_xor:c (bit_xor @0 @1) (bit_ior @0 @1))
682 (bit_and @0 @1))
683
684/* (x & y) + (x ^ y) -> x | y */
685/* (x & y) | (x ^ y) -> x | y */
686/* (x & y) ^ (x ^ y) -> x | y */
687(for op (plus bit_ior bit_xor)
688 (simplify
689 (op:c (bit_and @0 @1) (bit_xor @0 @1))
690 (bit_ior @0 @1)))
691
692/* (x & y) + (x | y) -> x + y */
693(simplify
694 (plus:c (bit_and @0 @1) (bit_ior @0 @1))
695 (plus @0 @1))
696
9737efaf
MP
697/* (x + y) - (x | y) -> x & y */
698(simplify
699 (minus (plus @0 @1) (bit_ior @0 @1))
700 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
701 && !TYPE_SATURATING (type))
702 (bit_and @0 @1)))
703
704/* (x + y) - (x & y) -> x | y */
705(simplify
706 (minus (plus @0 @1) (bit_and @0 @1))
707 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
708 && !TYPE_SATURATING (type))
709 (bit_ior @0 @1)))
710
9ea65ca6
MP
711/* (x | y) - (x ^ y) -> x & y */
712(simplify
713 (minus (bit_ior @0 @1) (bit_xor @0 @1))
714 (bit_and @0 @1))
715
716/* (x | y) - (x & y) -> x ^ y */
717(simplify
718 (minus (bit_ior @0 @1) (bit_and @0 @1))
719 (bit_xor @0 @1))
720
66cc6273
MP
721/* (x | y) & ~(x & y) -> x ^ y */
722(simplify
723 (bit_and:c (bit_ior @0 @1) (bit_not (bit_and @0 @1)))
724 (bit_xor @0 @1))
725
726/* (x | y) & (~x ^ y) -> x & y */
727(simplify
728 (bit_and:c (bit_ior:c @0 @1) (bit_xor:c @1 (bit_not @0)))
729 (bit_and @0 @1))
730
5b00d921
RB
731/* ~x & ~y -> ~(x | y)
732 ~x | ~y -> ~(x & y) */
733(for op (bit_and bit_ior)
734 rop (bit_ior bit_and)
735 (simplify
736 (op (convert1? (bit_not @0)) (convert2? (bit_not @1)))
ece46666
MG
737 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
738 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
5b00d921
RB
739 (bit_not (rop (convert @0) (convert @1))))))
740
14ea9f92 741/* If we are XORing or adding two BIT_AND_EXPR's, both of which are and'ing
5b00d921
RB
742 with a constant, and the two constants have no bits in common,
743 we should treat this as a BIT_IOR_EXPR since this may produce more
744 simplifications. */
14ea9f92
RB
745(for op (bit_xor plus)
746 (simplify
747 (op (convert1? (bit_and@4 @0 INTEGER_CST@1))
748 (convert2? (bit_and@5 @2 INTEGER_CST@3)))
749 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
750 && tree_nop_conversion_p (type, TREE_TYPE (@2))
751 && wi::bit_and (@1, @3) == 0)
752 (bit_ior (convert @4) (convert @5)))))
5b00d921
RB
753
754/* (X | Y) ^ X -> Y & ~ X*/
755(simplify
2eef1fc1 756 (bit_xor:c (convert1? (bit_ior:c @@0 @1)) (convert2? @0))
5b00d921
RB
757 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
758 (convert (bit_and @1 (bit_not @0)))))
759
760/* Convert ~X ^ ~Y to X ^ Y. */
761(simplify
762 (bit_xor (convert1? (bit_not @0)) (convert2? (bit_not @1)))
ece46666
MG
763 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
764 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
5b00d921
RB
765 (bit_xor (convert @0) (convert @1))))
766
767/* Convert ~X ^ C to X ^ ~C. */
768(simplify
769 (bit_xor (convert? (bit_not @0)) INTEGER_CST@1)
c8ba6498
EB
770 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
771 (bit_xor (convert @0) (bit_not @1))))
5b00d921 772
e39dab2c
MG
773/* Fold (X & Y) ^ Y and (X ^ Y) & Y as ~X & Y. */
774(for opo (bit_and bit_xor)
775 opi (bit_xor bit_and)
776 (simplify
777 (opo:c (opi:c @0 @1) @1)
778 (bit_and (bit_not @0) @1)))
97e77391 779
14ea9f92
RB
780/* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
781 operands are another bit-wise operation with a common input. If so,
782 distribute the bit operations to save an operation and possibly two if
783 constants are involved. For example, convert
784 (A | B) & (A | C) into A | (B & C)
785 Further simplification will occur if B and C are constants. */
e07ab2fe
MG
786(for op (bit_and bit_ior bit_xor)
787 rop (bit_ior bit_and bit_and)
14ea9f92 788 (simplify
2eef1fc1 789 (op (convert? (rop:c @@0 @1)) (convert? (rop:c @0 @2)))
e07ab2fe
MG
790 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
791 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
14ea9f92
RB
792 (rop (convert @0) (op (convert @1) (convert @2))))))
793
e39dab2c
MG
794/* Some simple reassociation for bit operations, also handled in reassoc. */
795/* (X & Y) & Y -> X & Y
796 (X | Y) | Y -> X | Y */
797(for op (bit_and bit_ior)
798 (simplify
2eef1fc1 799 (op:c (convert1?@2 (op:c @0 @@1)) (convert2? @1))
e39dab2c
MG
800 @2))
801/* (X ^ Y) ^ Y -> X */
802(simplify
2eef1fc1 803 (bit_xor:c (convert1? (bit_xor:c @0 @@1)) (convert2? @1))
ece46666 804 (convert @0))
e39dab2c
MG
805/* (X & Y) & (X & Z) -> (X & Y) & Z
806 (X | Y) | (X | Z) -> (X | Y) | Z */
807(for op (bit_and bit_ior)
808 (simplify
809 (op:c (convert1?@3 (op:c@4 @0 @1)) (convert2?@5 (op:c@6 @0 @2)))
810 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
811 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
812 (if (single_use (@5) && single_use (@6))
813 (op @3 (convert @2))
814 (if (single_use (@3) && single_use (@4))
815 (op (convert @1) @5))))))
816/* (X ^ Y) ^ (X ^ Z) -> Y ^ Z */
817(simplify
818 (bit_xor (convert1? (bit_xor:c @0 @1)) (convert2? (bit_xor:c @0 @2)))
819 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
820 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
d78789f5 821 (bit_xor (convert @1) (convert @2))))
5b00d921 822
b14a9c57
RB
823(simplify
824 (abs (abs@1 @0))
825 @1)
f3582e54
RB
826(simplify
827 (abs (negate @0))
828 (abs @0))
829(simplify
830 (abs tree_expr_nonnegative_p@0)
831 @0)
832
55cf3946
RB
833/* A few cases of fold-const.c negate_expr_p predicate. */
834(match negate_expr_p
835 INTEGER_CST
b14a9c57
RB
836 (if ((INTEGRAL_TYPE_P (type)
837 && TYPE_OVERFLOW_WRAPS (type))
838 || (!TYPE_OVERFLOW_SANITIZED (type)
55cf3946
RB
839 && may_negate_without_overflow_p (t)))))
840(match negate_expr_p
841 FIXED_CST)
842(match negate_expr_p
843 (negate @0)
844 (if (!TYPE_OVERFLOW_SANITIZED (type))))
845(match negate_expr_p
846 REAL_CST
847 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (t)))))
848/* VECTOR_CST handling of non-wrapping types would recurse in unsupported
849 ways. */
850(match negate_expr_p
851 VECTOR_CST
852 (if (FLOAT_TYPE_P (TREE_TYPE (type)) || TYPE_OVERFLOW_WRAPS (type))))
0a8f32b8
RB
853
854/* (-A) * (-B) -> A * B */
855(simplify
856 (mult:c (convert1? (negate @0)) (convert2? negate_expr_p@1))
857 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
858 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
859 (mult (convert @0) (convert (negate @1)))))
55cf3946
RB
860
861/* -(A + B) -> (-B) - A. */
b14a9c57 862(simplify
55cf3946
RB
863 (negate (plus:c @0 negate_expr_p@1))
864 (if (!HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type))
865 && !HONOR_SIGNED_ZEROS (element_mode (type)))
866 (minus (negate @1) @0)))
867
868/* A - B -> A + (-B) if B is easily negatable. */
b14a9c57 869(simplify
55cf3946 870 (minus @0 negate_expr_p@1)
e4e96a4f
KT
871 (if (!FIXED_POINT_TYPE_P (type))
872 (plus @0 (negate @1))))
d4573ffe 873
5609420f
RB
874/* Try to fold (type) X op CST -> (type) (X op ((type-x) CST))
875 when profitable.
876 For bitwise binary operations apply operand conversions to the
877 binary operation result instead of to the operands. This allows
878 to combine successive conversions and bitwise binary operations.
879 We combine the above two cases by using a conditional convert. */
880(for bitop (bit_and bit_ior bit_xor)
881 (simplify
882 (bitop (convert @0) (convert? @1))
883 (if (((TREE_CODE (@1) == INTEGER_CST
884 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
ad6f996c 885 && int_fits_type_p (@1, TREE_TYPE (@0)))
aea417d7 886 || types_match (@0, @1))
ad6f996c
RB
887 /* ??? This transform conflicts with fold-const.c doing
888 Convert (T)(x & c) into (T)x & (T)c, if c is an integer
889 constants (if x has signed type, the sign bit cannot be set
890 in c). This folds extension into the BIT_AND_EXPR.
891 Restrict it to GIMPLE to avoid endless recursions. */
892 && (bitop != BIT_AND_EXPR || GIMPLE)
5609420f
RB
893 && (/* That's a good idea if the conversion widens the operand, thus
894 after hoisting the conversion the operation will be narrower. */
895 TYPE_PRECISION (TREE_TYPE (@0)) < TYPE_PRECISION (type)
896 /* It's also a good idea if the conversion is to a non-integer
897 mode. */
898 || GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT
899 /* Or if the precision of TO is not the same as the precision
900 of its mode. */
901 || TYPE_PRECISION (type) != GET_MODE_PRECISION (TYPE_MODE (type))))
902 (convert (bitop @0 (convert @1))))))
903
b14a9c57
RB
904(for bitop (bit_and bit_ior)
905 rbitop (bit_ior bit_and)
906 /* (x | y) & x -> x */
907 /* (x & y) | x -> x */
908 (simplify
909 (bitop:c (rbitop:c @0 @1) @0)
910 @0)
911 /* (~x | y) & x -> x & y */
912 /* (~x & y) | x -> x | y */
913 (simplify
914 (bitop:c (rbitop:c (bit_not @0) @1) @0)
915 (bitop @0 @1)))
916
5609420f
RB
917/* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */
918(simplify
919 (bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
920 (bit_ior (bit_and @0 @2) (bit_and @1 @2)))
921
922/* Combine successive equal operations with constants. */
923(for bitop (bit_and bit_ior bit_xor)
924 (simplify
925 (bitop (bitop @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
926 (bitop @0 (bitop @1 @2))))
927
928/* Try simple folding for X op !X, and X op X with the help
929 of the truth_valued_p and logical_inverted_value predicates. */
930(match truth_valued_p
931 @0
932 (if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1)))
f84e7fd6 933(for op (tcc_comparison truth_and truth_andif truth_or truth_orif truth_xor)
5609420f
RB
934 (match truth_valued_p
935 (op @0 @1)))
936(match truth_valued_p
937 (truth_not @0))
938
0a8f32b8
RB
939(match (logical_inverted_value @0)
940 (truth_not @0))
5609420f
RB
941(match (logical_inverted_value @0)
942 (bit_not truth_valued_p@0))
943(match (logical_inverted_value @0)
09240451 944 (eq @0 integer_zerop))
5609420f 945(match (logical_inverted_value @0)
09240451 946 (ne truth_valued_p@0 integer_truep))
5609420f 947(match (logical_inverted_value @0)
09240451 948 (bit_xor truth_valued_p@0 integer_truep))
5609420f
RB
949
950/* X & !X -> 0. */
951(simplify
952 (bit_and:c @0 (logical_inverted_value @0))
953 { build_zero_cst (type); })
954/* X | !X and X ^ !X -> 1, , if X is truth-valued. */
955(for op (bit_ior bit_xor)
956 (simplify
957 (op:c truth_valued_p@0 (logical_inverted_value @0))
f84e7fd6 958 { constant_boolean_node (true, type); }))
59c20dc7
RB
959/* X ==/!= !X is false/true. */
960(for op (eq ne)
961 (simplify
962 (op:c truth_valued_p@0 (logical_inverted_value @0))
963 { constant_boolean_node (op == NE_EXPR ? true : false, type); }))
5609420f 964
5609420f
RB
965/* If arg1 and arg2 are booleans (or any single bit type)
966 then try to simplify:
967
968 (~X & Y) -> X < Y
969 (X & ~Y) -> Y < X
970 (~X | Y) -> X <= Y
971 (X | ~Y) -> Y <= X
972
973 But only do this if our result feeds into a comparison as
974 this transformation is not always a win, particularly on
975 targets with and-not instructions.
976 -> simplify_bitwise_binary_boolean */
977(simplify
978 (ne (bit_and:c (bit_not @0) @1) integer_zerop)
979 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
980 && TYPE_PRECISION (TREE_TYPE (@1)) == 1)
0f3f9437
RB
981 (if (TYPE_UNSIGNED (TREE_TYPE (@1)))
982 (lt @0 @1)
983 (gt @0 @1))))
5609420f
RB
984(simplify
985 (ne (bit_ior:c (bit_not @0) @1) integer_zerop)
986 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
987 && TYPE_PRECISION (TREE_TYPE (@1)) == 1)
0f3f9437
RB
988 (if (TYPE_UNSIGNED (TREE_TYPE (@1)))
989 (le @0 @1)
990 (ge @0 @1))))
5609420f 991
5609420f
RB
992/* ~~x -> x */
993(simplify
994 (bit_not (bit_not @0))
995 @0)
996
b14a9c57
RB
997/* Convert ~ (-A) to A - 1. */
998(simplify
999 (bit_not (convert? (negate @0)))
ece46666
MG
1000 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1001 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
8b5ee871 1002 (convert (minus @0 { build_each_one_cst (TREE_TYPE (@0)); }))))
b14a9c57
RB
1003
1004/* Convert ~ (A - 1) or ~ (A + -1) to -A. */
1005(simplify
8b5ee871 1006 (bit_not (convert? (minus @0 integer_each_onep)))
ece46666
MG
1007 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1008 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
b14a9c57
RB
1009 (convert (negate @0))))
1010(simplify
1011 (bit_not (convert? (plus @0 integer_all_onesp)))
ece46666
MG
1012 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1013 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
b14a9c57
RB
1014 (convert (negate @0))))
1015
1016/* Part of convert ~(X ^ Y) to ~X ^ Y or X ^ ~Y if ~X or ~Y simplify. */
1017(simplify
1018 (bit_not (convert? (bit_xor @0 INTEGER_CST@1)))
1019 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1020 (convert (bit_xor @0 (bit_not @1)))))
1021(simplify
1022 (bit_not (convert? (bit_xor:c (bit_not @0) @1)))
1023 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1024 (convert (bit_xor @0 @1))))
1025
f52baa7b
MP
1026/* (x & ~m) | (y & m) -> ((x ^ y) & m) ^ x */
1027(simplify
44fc0a51
RB
1028 (bit_ior:c (bit_and:cs @0 (bit_not @2)) (bit_and:cs @1 @2))
1029 (bit_xor (bit_and (bit_xor @0 @1) @2) @0))
f52baa7b 1030
f7b7b0aa
MP
1031/* Fold A - (A & B) into ~B & A. */
1032(simplify
2eef1fc1 1033 (minus (convert1? @0) (convert2?:s (bit_and:cs @@0 @1)))
f7b7b0aa
MP
1034 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1035 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1036 (convert (bit_and (bit_not @1) @0))))
5609420f 1037
84ff66b8
AV
1038
1039
1040/* ((X inner_op C0) outer_op C1)
1041 With X being a tree where value_range has reasoned certain bits to always be
1042 zero throughout its computed value range,
1043 inner_op = {|,^}, outer_op = {|,^} and inner_op != outer_op
1044 where zero_mask has 1's for all bits that are sure to be 0 in
1045 and 0's otherwise.
1046 if (inner_op == '^') C0 &= ~C1;
1047 if ((C0 & ~zero_mask) == 0) then emit (X outer_op (C0 outer_op C1)
1048 if ((C1 & ~zero_mask) == 0) then emit (X inner_op (C0 outer_op C1)
1049*/
1050(for inner_op (bit_ior bit_xor)
1051 outer_op (bit_xor bit_ior)
1052(simplify
1053 (outer_op
1054 (inner_op:s @2 INTEGER_CST@0) INTEGER_CST@1)
1055 (with
1056 {
1057 bool fail = false;
1058 wide_int zero_mask_not;
1059 wide_int C0;
1060 wide_int cst_emit;
1061
1062 if (TREE_CODE (@2) == SSA_NAME)
1063 zero_mask_not = get_nonzero_bits (@2);
1064 else
1065 fail = true;
1066
1067 if (inner_op == BIT_XOR_EXPR)
1068 {
1069 C0 = wi::bit_and_not (@0, @1);
1070 cst_emit = wi::bit_or (C0, @1);
1071 }
1072 else
1073 {
1074 C0 = @0;
1075 cst_emit = wi::bit_xor (@0, @1);
1076 }
1077 }
1078 (if (!fail && wi::bit_and (C0, zero_mask_not) == 0)
1079 (outer_op @2 { wide_int_to_tree (type, cst_emit); })
1080 (if (!fail && wi::bit_and (@1, zero_mask_not) == 0)
1081 (inner_op @2 { wide_int_to_tree (type, cst_emit); }))))))
1082
a499aac5
RB
1083/* Associate (p +p off1) +p off2 as (p +p (off1 + off2)). */
1084(simplify
44fc0a51
RB
1085 (pointer_plus (pointer_plus:s @0 @1) @3)
1086 (pointer_plus @0 (plus @1 @3)))
a499aac5
RB
1087
1088/* Pattern match
1089 tem1 = (long) ptr1;
1090 tem2 = (long) ptr2;
1091 tem3 = tem2 - tem1;
1092 tem4 = (unsigned long) tem3;
1093 tem5 = ptr1 + tem4;
1094 and produce
1095 tem5 = ptr2; */
1096(simplify
1097 (pointer_plus @0 (convert?@2 (minus@3 (convert @1) (convert @0))))
1098 /* Conditionally look through a sign-changing conversion. */
1099 (if (TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@3))
1100 && ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@1)))
1101 || (GENERIC && type == TREE_TYPE (@1))))
1102 @1))
1103
1104/* Pattern match
1105 tem = (sizetype) ptr;
1106 tem = tem & algn;
1107 tem = -tem;
1108 ... = ptr p+ tem;
1109 and produce the simpler and easier to analyze with respect to alignment
1110 ... = ptr & ~algn; */
1111(simplify
1112 (pointer_plus @0 (negate (bit_and (convert @0) INTEGER_CST@1)))
1113 (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), wi::bit_not (@1)); }
1114 (bit_and @0 { algn; })))
1115
99e943a2
RB
1116/* Try folding difference of addresses. */
1117(simplify
1118 (minus (convert ADDR_EXPR@0) (convert @1))
1119 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1120 (with { HOST_WIDE_INT diff; }
1121 (if (ptr_difference_const (@0, @1, &diff))
1122 { build_int_cst_type (type, diff); }))))
1123(simplify
1124 (minus (convert @0) (convert ADDR_EXPR@1))
1125 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1126 (with { HOST_WIDE_INT diff; }
1127 (if (ptr_difference_const (@0, @1, &diff))
1128 { build_int_cst_type (type, diff); }))))
1129
bab73f11
RB
1130/* If arg0 is derived from the address of an object or function, we may
1131 be able to fold this expression using the object or function's
1132 alignment. */
1133(simplify
1134 (bit_and (convert? @0) INTEGER_CST@1)
1135 (if (POINTER_TYPE_P (TREE_TYPE (@0))
1136 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
1137 (with
1138 {
1139 unsigned int align;
1140 unsigned HOST_WIDE_INT bitpos;
1141 get_pointer_alignment_1 (@0, &align, &bitpos);
1142 }
1143 (if (wi::ltu_p (@1, align / BITS_PER_UNIT))
1144 { wide_int_to_tree (type, wi::bit_and (@1, bitpos / BITS_PER_UNIT)); }))))
99e943a2 1145
a499aac5 1146
cc7b5acf
RB
1147/* We can't reassociate at all for saturating types. */
1148(if (!TYPE_SATURATING (type))
1149
1150 /* Contract negates. */
1151 /* A + (-B) -> A - B */
1152 (simplify
1153 (plus:c (convert1? @0) (convert2? (negate @1)))
1154 /* Apply STRIP_NOPS on @0 and the negate. */
1155 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1156 && tree_nop_conversion_p (type, TREE_TYPE (@1))
6a4f0678 1157 && !TYPE_OVERFLOW_SANITIZED (type))
cc7b5acf
RB
1158 (minus (convert @0) (convert @1))))
1159 /* A - (-B) -> A + B */
1160 (simplify
1161 (minus (convert1? @0) (convert2? (negate @1)))
1162 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
2f68e8bc 1163 && tree_nop_conversion_p (type, TREE_TYPE (@1))
6a4f0678 1164 && !TYPE_OVERFLOW_SANITIZED (type))
cc7b5acf
RB
1165 (plus (convert @0) (convert @1))))
1166 /* -(-A) -> A */
1167 (simplify
1168 (negate (convert? (negate @1)))
1169 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
6a4f0678 1170 && !TYPE_OVERFLOW_SANITIZED (type))
a0f12cf8 1171 (convert @1)))
cc7b5acf 1172
7318e44f
RB
1173 /* We can't reassociate floating-point unless -fassociative-math
1174 or fixed-point plus or minus because of saturation to +-Inf. */
1175 (if ((!FLOAT_TYPE_P (type) || flag_associative_math)
1176 && !FIXED_POINT_TYPE_P (type))
cc7b5acf
RB
1177
1178 /* Match patterns that allow contracting a plus-minus pair
1179 irrespective of overflow issues. */
1180 /* (A +- B) - A -> +- B */
1181 /* (A +- B) -+ B -> A */
1182 /* A - (A +- B) -> -+ B */
1183 /* A +- (B -+ A) -> +- B */
1184 (simplify
1185 (minus (plus:c @0 @1) @0)
1186 @1)
1187 (simplify
1188 (minus (minus @0 @1) @0)
1189 (negate @1))
1190 (simplify
1191 (plus:c (minus @0 @1) @1)
1192 @0)
1193 (simplify
1194 (minus @0 (plus:c @0 @1))
1195 (negate @1))
1196 (simplify
1197 (minus @0 (minus @0 @1))
1198 @1)
1199
1200 /* (A +- CST) +- CST -> A + CST */
1201 (for outer_op (plus minus)
1202 (for inner_op (plus minus)
1203 (simplify
1204 (outer_op (inner_op @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1205 /* If the constant operation overflows we cannot do the transform
1206 as we would introduce undefined overflow, for example
1207 with (a - 1) + INT_MIN. */
23f27839 1208 (with { tree cst = const_binop (outer_op == inner_op
cc7b5acf
RB
1209 ? PLUS_EXPR : MINUS_EXPR, type, @1, @2); }
1210 (if (cst && !TREE_OVERFLOW (cst))
1211 (inner_op @0 { cst; } ))))))
1212
1213 /* (CST - A) +- CST -> CST - A */
1214 (for outer_op (plus minus)
1215 (simplify
1216 (outer_op (minus CONSTANT_CLASS_P@1 @0) CONSTANT_CLASS_P@2)
23f27839 1217 (with { tree cst = const_binop (outer_op, type, @1, @2); }
cc7b5acf
RB
1218 (if (cst && !TREE_OVERFLOW (cst))
1219 (minus { cst; } @0)))))
1220
1221 /* ~A + A -> -1 */
1222 (simplify
1223 (plus:c (bit_not @0) @0)
1224 (if (!TYPE_OVERFLOW_TRAPS (type))
1225 { build_all_ones_cst (type); }))
1226
1227 /* ~A + 1 -> -A */
1228 (simplify
e19740ae
RB
1229 (plus (convert? (bit_not @0)) integer_each_onep)
1230 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1231 (negate (convert @0))))
1232
1233 /* -A - 1 -> ~A */
1234 (simplify
1235 (minus (convert? (negate @0)) integer_each_onep)
1236 (if (!TYPE_OVERFLOW_TRAPS (type)
1237 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
1238 (bit_not (convert @0))))
1239
1240 /* -1 - A -> ~A */
1241 (simplify
1242 (minus integer_all_onesp @0)
bc4315fb 1243 (bit_not @0))
cc7b5acf
RB
1244
1245 /* (T)(P + A) - (T)P -> (T) A */
1246 (for add (plus pointer_plus)
1247 (simplify
2eef1fc1 1248 (minus (convert (add @@0 @1))
cc7b5acf 1249 (convert @0))
09240451 1250 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
cc7b5acf
RB
1251 /* For integer types, if A has a smaller type
1252 than T the result depends on the possible
1253 overflow in P + A.
1254 E.g. T=size_t, A=(unsigned)429497295, P>0.
1255 However, if an overflow in P + A would cause
1256 undefined behavior, we can assume that there
1257 is no overflow. */
1258 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1259 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1260 /* For pointer types, if the conversion of A to the
1261 final type requires a sign- or zero-extension,
1262 then we have to punt - it is not defined which
1263 one is correct. */
1264 || (POINTER_TYPE_P (TREE_TYPE (@0))
1265 && TREE_CODE (@1) == INTEGER_CST
1266 && tree_int_cst_sign_bit (@1) == 0))
a8fc2579
RB
1267 (convert @1))))
1268
1269 /* (T)P - (T)(P + A) -> -(T) A */
1270 (for add (plus pointer_plus)
1271 (simplify
1272 (minus (convert @0)
2eef1fc1 1273 (convert (add @@0 @1)))
a8fc2579
RB
1274 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1275 /* For integer types, if A has a smaller type
1276 than T the result depends on the possible
1277 overflow in P + A.
1278 E.g. T=size_t, A=(unsigned)429497295, P>0.
1279 However, if an overflow in P + A would cause
1280 undefined behavior, we can assume that there
1281 is no overflow. */
1282 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1283 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1284 /* For pointer types, if the conversion of A to the
1285 final type requires a sign- or zero-extension,
1286 then we have to punt - it is not defined which
1287 one is correct. */
1288 || (POINTER_TYPE_P (TREE_TYPE (@0))
1289 && TREE_CODE (@1) == INTEGER_CST
1290 && tree_int_cst_sign_bit (@1) == 0))
1291 (negate (convert @1)))))
1292
1293 /* (T)(P + A) - (T)(P + B) -> (T)A - (T)B */
1294 (for add (plus pointer_plus)
1295 (simplify
2eef1fc1 1296 (minus (convert (add @@0 @1))
a8fc2579
RB
1297 (convert (add @0 @2)))
1298 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1299 /* For integer types, if A has a smaller type
1300 than T the result depends on the possible
1301 overflow in P + A.
1302 E.g. T=size_t, A=(unsigned)429497295, P>0.
1303 However, if an overflow in P + A would cause
1304 undefined behavior, we can assume that there
1305 is no overflow. */
1306 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1307 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1308 /* For pointer types, if the conversion of A to the
1309 final type requires a sign- or zero-extension,
1310 then we have to punt - it is not defined which
1311 one is correct. */
1312 || (POINTER_TYPE_P (TREE_TYPE (@0))
1313 && TREE_CODE (@1) == INTEGER_CST
1314 && tree_int_cst_sign_bit (@1) == 0
1315 && TREE_CODE (@2) == INTEGER_CST
1316 && tree_int_cst_sign_bit (@2) == 0))
1317 (minus (convert @1) (convert @2)))))))
cc7b5acf
RB
1318
1319
0122e8e5 1320/* Simplifications of MIN_EXPR, MAX_EXPR, fmin() and fmax(). */
a7f24614 1321
0122e8e5 1322(for minmax (min max FMIN FMAX)
a7f24614
RB
1323 (simplify
1324 (minmax @0 @0)
1325 @0))
4a334cba
RS
1326/* min(max(x,y),y) -> y. */
1327(simplify
1328 (min:c (max:c @0 @1) @1)
1329 @1)
1330/* max(min(x,y),y) -> y. */
1331(simplify
1332 (max:c (min:c @0 @1) @1)
1333 @1)
d657e995
RB
1334/* max(a,-a) -> abs(a). */
1335(simplify
1336 (max:c @0 (negate @0))
1337 (if (TREE_CODE (type) != COMPLEX_TYPE
1338 && (! ANY_INTEGRAL_TYPE_P (type)
1339 || TYPE_OVERFLOW_UNDEFINED (type)))
1340 (abs @0)))
54f84ca9
RB
1341/* min(a,-a) -> -abs(a). */
1342(simplify
1343 (min:c @0 (negate @0))
1344 (if (TREE_CODE (type) != COMPLEX_TYPE
1345 && (! ANY_INTEGRAL_TYPE_P (type)
1346 || TYPE_OVERFLOW_UNDEFINED (type)))
1347 (negate (abs @0))))
a7f24614
RB
1348(simplify
1349 (min @0 @1)
2c2870a1
MG
1350 (switch
1351 (if (INTEGRAL_TYPE_P (type)
1352 && TYPE_MIN_VALUE (type)
1353 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
1354 @1)
1355 (if (INTEGRAL_TYPE_P (type)
1356 && TYPE_MAX_VALUE (type)
1357 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
1358 @0)))
a7f24614
RB
1359(simplify
1360 (max @0 @1)
2c2870a1
MG
1361 (switch
1362 (if (INTEGRAL_TYPE_P (type)
1363 && TYPE_MAX_VALUE (type)
1364 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
1365 @1)
1366 (if (INTEGRAL_TYPE_P (type)
1367 && TYPE_MIN_VALUE (type)
1368 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
1369 @0)))
ad6e4ba8
BC
1370
1371/* (convert (minmax ((convert (x) c)))) -> minmax (x c) if x is promoted
1372 and the outer convert demotes the expression back to x's type. */
1373(for minmax (min max)
1374 (simplify
1375 (convert (minmax@0 (convert @1) INTEGER_CST@2))
1376 (if (types_match (@1, type) && int_fits_type_p (@2, type)
1377 && TYPE_SIGN (TREE_TYPE (@0)) == TYPE_SIGN (type)
1378 && TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type))
1379 (minmax @1 (convert @2)))))
1380
0122e8e5
RS
1381(for minmax (FMIN FMAX)
1382 /* If either argument is NaN, return the other one. Avoid the
1383 transformation if we get (and honor) a signalling NaN. */
1384 (simplify
1385 (minmax:c @0 REAL_CST@1)
1386 (if (real_isnan (TREE_REAL_CST_PTR (@1))
1387 && (!HONOR_SNANS (@1) || !TREE_REAL_CST (@1).signalling))
1388 @0)))
1389/* Convert fmin/fmax to MIN_EXPR/MAX_EXPR. C99 requires these
1390 functions to return the numeric arg if the other one is NaN.
1391 MIN and MAX don't honor that, so only transform if -ffinite-math-only
1392 is set. C99 doesn't require -0.0 to be handled, so we don't have to
1393 worry about it either. */
1394(if (flag_finite_math_only)
1395 (simplify
1396 (FMIN @0 @1)
1397 (min @0 @1))
1398 (simplify
1399 (FMAX @0 @1)
1400 (max @0 @1)))
ce0e66ff
MG
1401/* min (-A, -B) -> -max (A, B) */
1402(for minmax (min max FMIN FMAX)
1403 maxmin (max min FMAX FMIN)
1404 (simplify
1405 (minmax (negate:s@2 @0) (negate:s@3 @1))
1406 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
1407 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1408 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
1409 (negate (maxmin @0 @1)))))
1410/* MIN (~X, ~Y) -> ~MAX (X, Y)
1411 MAX (~X, ~Y) -> ~MIN (X, Y) */
1412(for minmax (min max)
1413 maxmin (max min)
1414 (simplify
1415 (minmax (bit_not:s@2 @0) (bit_not:s@3 @1))
1416 (bit_not (maxmin @0 @1))))
a7f24614 1417
b4817bd6
MG
1418/* MIN (X, Y) == X -> X <= Y */
1419(for minmax (min min max max)
1420 cmp (eq ne eq ne )
1421 out (le gt ge lt )
1422 (simplify
1423 (cmp:c (minmax:c @0 @1) @0)
1424 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)))
1425 (out @0 @1))))
1426/* MIN (X, 5) == 0 -> X == 0
1427 MIN (X, 5) == 7 -> false */
1428(for cmp (eq ne)
1429 (simplify
1430 (cmp (min @0 INTEGER_CST@1) INTEGER_CST@2)
1431 (if (wi::lt_p (@1, @2, TYPE_SIGN (TREE_TYPE (@0))))
1432 { constant_boolean_node (cmp == NE_EXPR, type); }
1433 (if (wi::gt_p (@1, @2, TYPE_SIGN (TREE_TYPE (@0))))
1434 (cmp @0 @2)))))
1435(for cmp (eq ne)
1436 (simplify
1437 (cmp (max @0 INTEGER_CST@1) INTEGER_CST@2)
1438 (if (wi::gt_p (@1, @2, TYPE_SIGN (TREE_TYPE (@0))))
1439 { constant_boolean_node (cmp == NE_EXPR, type); }
1440 (if (wi::lt_p (@1, @2, TYPE_SIGN (TREE_TYPE (@0))))
1441 (cmp @0 @2)))))
1442/* MIN (X, C1) < C2 -> X < C2 || C1 < C2 */
1443(for minmax (min min max max min min max max )
1444 cmp (lt le gt ge gt ge lt le )
1445 comb (bit_ior bit_ior bit_ior bit_ior bit_and bit_and bit_and bit_and)
1446 (simplify
1447 (cmp (minmax @0 INTEGER_CST@1) INTEGER_CST@2)
1448 (comb (cmp @0 @2) (cmp @1 @2))))
1449
a7f24614
RB
1450/* Simplifications of shift and rotates. */
1451
1452(for rotate (lrotate rrotate)
1453 (simplify
1454 (rotate integer_all_onesp@0 @1)
1455 @0))
1456
1457/* Optimize -1 >> x for arithmetic right shifts. */
1458(simplify
1459 (rshift integer_all_onesp@0 @1)
1460 (if (!TYPE_UNSIGNED (type)
1461 && tree_expr_nonnegative_p (@1))
1462 @0))
1463
12085390
N
1464/* Optimize (x >> c) << c into x & (-1<<c). */
1465(simplify
1466 (lshift (rshift @0 INTEGER_CST@1) @1)
1467 (if (wi::ltu_p (@1, element_precision (type)))
1468 (bit_and @0 (lshift { build_minus_one_cst (type); } @1))))
1469
1470/* Optimize (x << c) >> c into x & ((unsigned)-1 >> c) for unsigned
1471 types. */
1472(simplify
1473 (rshift (lshift @0 INTEGER_CST@1) @1)
1474 (if (TYPE_UNSIGNED (type)
1475 && (wi::ltu_p (@1, element_precision (type))))
1476 (bit_and @0 (rshift { build_minus_one_cst (type); } @1))))
1477
a7f24614
RB
1478(for shiftrotate (lrotate rrotate lshift rshift)
1479 (simplify
1480 (shiftrotate @0 integer_zerop)
1481 (non_lvalue @0))
1482 (simplify
1483 (shiftrotate integer_zerop@0 @1)
1484 @0)
1485 /* Prefer vector1 << scalar to vector1 << vector2
1486 if vector2 is uniform. */
1487 (for vec (VECTOR_CST CONSTRUCTOR)
1488 (simplify
1489 (shiftrotate @0 vec@1)
1490 (with { tree tem = uniform_vector_p (@1); }
1491 (if (tem)
1492 (shiftrotate @0 { tem; }))))))
1493
1494/* Rewrite an LROTATE_EXPR by a constant into an
1495 RROTATE_EXPR by a new constant. */
1496(simplify
1497 (lrotate @0 INTEGER_CST@1)
23f27839 1498 (rrotate @0 { const_binop (MINUS_EXPR, TREE_TYPE (@1),
a7f24614
RB
1499 build_int_cst (TREE_TYPE (@1),
1500 element_precision (type)), @1); }))
1501
14ea9f92
RB
1502/* Turn (a OP c1) OP c2 into a OP (c1+c2). */
1503(for op (lrotate rrotate rshift lshift)
1504 (simplify
1505 (op (op @0 INTEGER_CST@1) INTEGER_CST@2)
1506 (with { unsigned int prec = element_precision (type); }
1507 (if (wi::ge_p (@1, 0, TYPE_SIGN (TREE_TYPE (@1)))
1508 && wi::lt_p (@1, prec, TYPE_SIGN (TREE_TYPE (@1)))
1509 && wi::ge_p (@2, 0, TYPE_SIGN (TREE_TYPE (@2)))
1510 && wi::lt_p (@2, prec, TYPE_SIGN (TREE_TYPE (@2))))
1511 (with { unsigned int low = wi::add (@1, @2).to_uhwi (); }
1512 /* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2
1513 being well defined. */
1514 (if (low >= prec)
1515 (if (op == LROTATE_EXPR || op == RROTATE_EXPR)
8fdc6c67 1516 (op @0 { build_int_cst (TREE_TYPE (@1), low % prec); })
50301115 1517 (if (TYPE_UNSIGNED (type) || op == LSHIFT_EXPR)
8fdc6c67
RB
1518 { build_zero_cst (type); }
1519 (op @0 { build_int_cst (TREE_TYPE (@1), prec - 1); })))
1520 (op @0 { build_int_cst (TREE_TYPE (@1), low); })))))))
14ea9f92
RB
1521
1522
01ada710
MP
1523/* ((1 << A) & 1) != 0 -> A == 0
1524 ((1 << A) & 1) == 0 -> A != 0 */
1525(for cmp (ne eq)
1526 icmp (eq ne)
1527 (simplify
1528 (cmp (bit_and (lshift integer_onep @0) integer_onep) integer_zerop)
1529 (icmp @0 { build_zero_cst (TREE_TYPE (@0)); })))
cc7b5acf 1530
f2e609c3
MP
1531/* (CST1 << A) == CST2 -> A == ctz (CST2) - ctz (CST1)
1532 (CST1 << A) != CST2 -> A != ctz (CST2) - ctz (CST1)
1533 if CST2 != 0. */
1534(for cmp (ne eq)
1535 (simplify
1536 (cmp (lshift INTEGER_CST@0 @1) INTEGER_CST@2)
1537 (with { int cand = wi::ctz (@2) - wi::ctz (@0); }
1538 (if (cand < 0
1539 || (!integer_zerop (@2)
1540 && wi::ne_p (wi::lshift (@0, cand), @2)))
8fdc6c67
RB
1541 { constant_boolean_node (cmp == NE_EXPR, type); }
1542 (if (!integer_zerop (@2)
1543 && wi::eq_p (wi::lshift (@0, cand), @2))
1544 (cmp @1 { build_int_cst (TREE_TYPE (@1), cand); }))))))
f2e609c3 1545
1ffbaa3f
RB
1546/* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1))
1547 (X >> C1) & C2 into (X >> C1) & (C2 | ~((type) -1 >> C1))
1548 if the new mask might be further optimized. */
1549(for shift (lshift rshift)
1550 (simplify
44fc0a51
RB
1551 (bit_and (convert?:s@4 (shift:s@5 (convert1?@3 @0) INTEGER_CST@1))
1552 INTEGER_CST@2)
1ffbaa3f
RB
1553 (if (tree_nop_conversion_p (TREE_TYPE (@4), TREE_TYPE (@5))
1554 && TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT
1555 && tree_fits_uhwi_p (@1)
1556 && tree_to_uhwi (@1) > 0
1557 && tree_to_uhwi (@1) < TYPE_PRECISION (type))
1558 (with
1559 {
1560 unsigned int shiftc = tree_to_uhwi (@1);
1561 unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (@2);
1562 unsigned HOST_WIDE_INT newmask, zerobits = 0;
1563 tree shift_type = TREE_TYPE (@3);
1564 unsigned int prec;
1565
1566 if (shift == LSHIFT_EXPR)
fecfbfa4 1567 zerobits = ((HOST_WIDE_INT_1U << shiftc) - 1);
1ffbaa3f
RB
1568 else if (shift == RSHIFT_EXPR
1569 && (TYPE_PRECISION (shift_type)
1570 == GET_MODE_PRECISION (TYPE_MODE (shift_type))))
1571 {
1572 prec = TYPE_PRECISION (TREE_TYPE (@3));
1573 tree arg00 = @0;
1574 /* See if more bits can be proven as zero because of
1575 zero extension. */
1576 if (@3 != @0
1577 && TYPE_UNSIGNED (TREE_TYPE (@0)))
1578 {
1579 tree inner_type = TREE_TYPE (@0);
1580 if ((TYPE_PRECISION (inner_type)
1581 == GET_MODE_PRECISION (TYPE_MODE (inner_type)))
1582 && TYPE_PRECISION (inner_type) < prec)
1583 {
1584 prec = TYPE_PRECISION (inner_type);
1585 /* See if we can shorten the right shift. */
1586 if (shiftc < prec)
1587 shift_type = inner_type;
1588 /* Otherwise X >> C1 is all zeros, so we'll optimize
1589 it into (X, 0) later on by making sure zerobits
1590 is all ones. */
1591 }
1592 }
dd4786fe 1593 zerobits = HOST_WIDE_INT_M1U;
1ffbaa3f
RB
1594 if (shiftc < prec)
1595 {
1596 zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc;
1597 zerobits <<= prec - shiftc;
1598 }
1599 /* For arithmetic shift if sign bit could be set, zerobits
1600 can contain actually sign bits, so no transformation is
1601 possible, unless MASK masks them all away. In that
1602 case the shift needs to be converted into logical shift. */
1603 if (!TYPE_UNSIGNED (TREE_TYPE (@3))
1604 && prec == TYPE_PRECISION (TREE_TYPE (@3)))
1605 {
1606 if ((mask & zerobits) == 0)
1607 shift_type = unsigned_type_for (TREE_TYPE (@3));
1608 else
1609 zerobits = 0;
1610 }
1611 }
1612 }
1613 /* ((X << 16) & 0xff00) is (X, 0). */
1614 (if ((mask & zerobits) == mask)
8fdc6c67
RB
1615 { build_int_cst (type, 0); }
1616 (with { newmask = mask | zerobits; }
1617 (if (newmask != mask && (newmask & (newmask + 1)) == 0)
1618 (with
1619 {
1620 /* Only do the transformation if NEWMASK is some integer
1621 mode's mask. */
1622 for (prec = BITS_PER_UNIT;
1623 prec < HOST_BITS_PER_WIDE_INT; prec <<= 1)
fecfbfa4 1624 if (newmask == (HOST_WIDE_INT_1U << prec) - 1)
8fdc6c67
RB
1625 break;
1626 }
1627 (if (prec < HOST_BITS_PER_WIDE_INT
dd4786fe 1628 || newmask == HOST_WIDE_INT_M1U)
8fdc6c67
RB
1629 (with
1630 { tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); }
1631 (if (!tree_int_cst_equal (newmaskt, @2))
1632 (if (shift_type != TREE_TYPE (@3))
1633 (bit_and (convert (shift:shift_type (convert @3) @1)) { newmaskt; })
1634 (bit_and @4 { newmaskt; })))))))))))))
1ffbaa3f 1635
84ff66b8
AV
1636/* Fold (X {&,^,|} C2) << C1 into (X << C1) {&,^,|} (C2 << C1)
1637 (X {&,^,|} C2) >> C1 into (X >> C1) & (C2 >> C1). */
98e30e51 1638(for shift (lshift rshift)
84ff66b8
AV
1639 (for bit_op (bit_and bit_xor bit_ior)
1640 (simplify
1641 (shift (convert?:s (bit_op:s @0 INTEGER_CST@2)) INTEGER_CST@1)
1642 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1643 (with { tree mask = int_const_binop (shift, fold_convert (type, @2), @1); }
1644 (bit_op (shift (convert @0) @1) { mask; }))))))
98e30e51 1645
ad1d92ab
MM
1646/* ~(~X >> Y) -> X >> Y (for arithmetic shift). */
1647(simplify
1648 (bit_not (convert1?:s (rshift:s (convert2?@0 (bit_not @1)) @2)))
1649 (if (!TYPE_UNSIGNED (TREE_TYPE (@0))
ece46666
MG
1650 && (element_precision (TREE_TYPE (@0))
1651 <= element_precision (TREE_TYPE (@1))
1652 || !TYPE_UNSIGNED (TREE_TYPE (@1))))
ad1d92ab
MM
1653 (with
1654 { tree shift_type = TREE_TYPE (@0); }
1655 (convert (rshift (convert:shift_type @1) @2)))))
1656
1657/* ~(~X >>r Y) -> X >>r Y
1658 ~(~X <<r Y) -> X <<r Y */
1659(for rotate (lrotate rrotate)
1660 (simplify
1661 (bit_not (convert1?:s (rotate:s (convert2?@0 (bit_not @1)) @2)))
ece46666
MG
1662 (if ((element_precision (TREE_TYPE (@0))
1663 <= element_precision (TREE_TYPE (@1))
1664 || !TYPE_UNSIGNED (TREE_TYPE (@1)))
1665 && (element_precision (type) <= element_precision (TREE_TYPE (@0))
1666 || !TYPE_UNSIGNED (TREE_TYPE (@0))))
ad1d92ab
MM
1667 (with
1668 { tree rotate_type = TREE_TYPE (@0); }
1669 (convert (rotate (convert:rotate_type @1) @2))))))
98e30e51 1670
d4573ffe
RB
1671/* Simplifications of conversions. */
1672
1673/* Basic strip-useless-type-conversions / strip_nops. */
f3582e54 1674(for cvt (convert view_convert float fix_trunc)
d4573ffe
RB
1675 (simplify
1676 (cvt @0)
1677 (if ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@0)))
1678 || (GENERIC && type == TREE_TYPE (@0)))
1679 @0)))
1680
1681/* Contract view-conversions. */
1682(simplify
1683 (view_convert (view_convert @0))
1684 (view_convert @0))
1685
1686/* For integral conversions with the same precision or pointer
1687 conversions use a NOP_EXPR instead. */
1688(simplify
1689 (view_convert @0)
1690 (if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
1691 && (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
1692 && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0)))
1693 (convert @0)))
1694
1695/* Strip inner integral conversions that do not change precision or size. */
1696(simplify
1697 (view_convert (convert@0 @1))
1698 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
1699 && (INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
1700 && (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
1701 && (TYPE_SIZE (TREE_TYPE (@0)) == TYPE_SIZE (TREE_TYPE (@1))))
1702 (view_convert @1)))
1703
1704/* Re-association barriers around constants and other re-association
1705 barriers can be removed. */
1706(simplify
1707 (paren CONSTANT_CLASS_P@0)
1708 @0)
1709(simplify
1710 (paren (paren@1 @0))
1711 @1)
1e51d0a2
RB
1712
1713/* Handle cases of two conversions in a row. */
1714(for ocvt (convert float fix_trunc)
1715 (for icvt (convert float)
1716 (simplify
1717 (ocvt (icvt@1 @0))
1718 (with
1719 {
1720 tree inside_type = TREE_TYPE (@0);
1721 tree inter_type = TREE_TYPE (@1);
1722 int inside_int = INTEGRAL_TYPE_P (inside_type);
1723 int inside_ptr = POINTER_TYPE_P (inside_type);
1724 int inside_float = FLOAT_TYPE_P (inside_type);
09240451 1725 int inside_vec = VECTOR_TYPE_P (inside_type);
1e51d0a2
RB
1726 unsigned int inside_prec = TYPE_PRECISION (inside_type);
1727 int inside_unsignedp = TYPE_UNSIGNED (inside_type);
1728 int inter_int = INTEGRAL_TYPE_P (inter_type);
1729 int inter_ptr = POINTER_TYPE_P (inter_type);
1730 int inter_float = FLOAT_TYPE_P (inter_type);
09240451 1731 int inter_vec = VECTOR_TYPE_P (inter_type);
1e51d0a2
RB
1732 unsigned int inter_prec = TYPE_PRECISION (inter_type);
1733 int inter_unsignedp = TYPE_UNSIGNED (inter_type);
1734 int final_int = INTEGRAL_TYPE_P (type);
1735 int final_ptr = POINTER_TYPE_P (type);
1736 int final_float = FLOAT_TYPE_P (type);
09240451 1737 int final_vec = VECTOR_TYPE_P (type);
1e51d0a2
RB
1738 unsigned int final_prec = TYPE_PRECISION (type);
1739 int final_unsignedp = TYPE_UNSIGNED (type);
1740 }
64d3a1f0
RB
1741 (switch
1742 /* In addition to the cases of two conversions in a row
1743 handled below, if we are converting something to its own
1744 type via an object of identical or wider precision, neither
1745 conversion is needed. */
1746 (if (((GIMPLE && useless_type_conversion_p (type, inside_type))
1747 || (GENERIC
1748 && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (inside_type)))
1749 && (((inter_int || inter_ptr) && final_int)
1750 || (inter_float && final_float))
1751 && inter_prec >= final_prec)
1752 (ocvt @0))
1753
1754 /* Likewise, if the intermediate and initial types are either both
1755 float or both integer, we don't need the middle conversion if the
1756 former is wider than the latter and doesn't change the signedness
1757 (for integers). Avoid this if the final type is a pointer since
36088299 1758 then we sometimes need the middle conversion. */
64d3a1f0
RB
1759 (if (((inter_int && inside_int) || (inter_float && inside_float))
1760 && (final_int || final_float)
1761 && inter_prec >= inside_prec
36088299 1762 && (inter_float || inter_unsignedp == inside_unsignedp))
64d3a1f0
RB
1763 (ocvt @0))
1764
1765 /* If we have a sign-extension of a zero-extended value, we can
1766 replace that by a single zero-extension. Likewise if the
1767 final conversion does not change precision we can drop the
1768 intermediate conversion. */
1769 (if (inside_int && inter_int && final_int
1770 && ((inside_prec < inter_prec && inter_prec < final_prec
1771 && inside_unsignedp && !inter_unsignedp)
1772 || final_prec == inter_prec))
1773 (ocvt @0))
1774
1775 /* Two conversions in a row are not needed unless:
1e51d0a2
RB
1776 - some conversion is floating-point (overstrict for now), or
1777 - some conversion is a vector (overstrict for now), or
1778 - the intermediate type is narrower than both initial and
1779 final, or
1780 - the intermediate type and innermost type differ in signedness,
1781 and the outermost type is wider than the intermediate, or
1782 - the initial type is a pointer type and the precisions of the
1783 intermediate and final types differ, or
1784 - the final type is a pointer type and the precisions of the
1785 initial and intermediate types differ. */
64d3a1f0
RB
1786 (if (! inside_float && ! inter_float && ! final_float
1787 && ! inside_vec && ! inter_vec && ! final_vec
1788 && (inter_prec >= inside_prec || inter_prec >= final_prec)
1789 && ! (inside_int && inter_int
1790 && inter_unsignedp != inside_unsignedp
1791 && inter_prec < final_prec)
1792 && ((inter_unsignedp && inter_prec > inside_prec)
1793 == (final_unsignedp && final_prec > inter_prec))
1794 && ! (inside_ptr && inter_prec != final_prec)
36088299 1795 && ! (final_ptr && inside_prec != inter_prec))
64d3a1f0
RB
1796 (ocvt @0))
1797
1798 /* A truncation to an unsigned type (a zero-extension) should be
1799 canonicalized as bitwise and of a mask. */
1d510e04
JJ
1800 (if (GIMPLE /* PR70366: doing this in GENERIC breaks -Wconversion. */
1801 && final_int && inter_int && inside_int
64d3a1f0
RB
1802 && final_prec == inside_prec
1803 && final_prec > inter_prec
1804 && inter_unsignedp)
1805 (convert (bit_and @0 { wide_int_to_tree
1806 (inside_type,
1807 wi::mask (inter_prec, false,
1808 TYPE_PRECISION (inside_type))); })))
1809
1810 /* If we are converting an integer to a floating-point that can
1811 represent it exactly and back to an integer, we can skip the
1812 floating-point conversion. */
1813 (if (GIMPLE /* PR66211 */
1814 && inside_int && inter_float && final_int &&
1815 (unsigned) significand_size (TYPE_MODE (inter_type))
1816 >= inside_prec - !inside_unsignedp)
1817 (convert @0)))))))
ea2042ba
RB
1818
1819/* If we have a narrowing conversion to an integral type that is fed by a
1820 BIT_AND_EXPR, we might be able to remove the BIT_AND_EXPR if it merely
1821 masks off bits outside the final type (and nothing else). */
1822(simplify
1823 (convert (bit_and @0 INTEGER_CST@1))
1824 (if (INTEGRAL_TYPE_P (type)
1825 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
1826 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))
1827 && operand_equal_p (@1, build_low_bits_mask (TREE_TYPE (@1),
1828 TYPE_PRECISION (type)), 0))
1829 (convert @0)))
a25454ea
RB
1830
1831
1832/* (X /[ex] A) * A -> X. */
1833(simplify
2eef1fc1
RB
1834 (mult (convert1? (exact_div @0 @@1)) (convert2? @1))
1835 (convert @0))
eaeba53a 1836
a7f24614
RB
1837/* Canonicalization of binary operations. */
1838
1839/* Convert X + -C into X - C. */
1840(simplify
1841 (plus @0 REAL_CST@1)
1842 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
23f27839 1843 (with { tree tem = const_unop (NEGATE_EXPR, type, @1); }
a7f24614
RB
1844 (if (!TREE_OVERFLOW (tem) || !flag_trapping_math)
1845 (minus @0 { tem; })))))
1846
6b6aa8d3 1847/* Convert x+x into x*2. */
a7f24614
RB
1848(simplify
1849 (plus @0 @0)
1850 (if (SCALAR_FLOAT_TYPE_P (type))
6b6aa8d3
MG
1851 (mult @0 { build_real (type, dconst2); })
1852 (if (INTEGRAL_TYPE_P (type))
1853 (mult @0 { build_int_cst (type, 2); }))))
a7f24614
RB
1854
1855(simplify
1856 (minus integer_zerop @1)
1857 (negate @1))
1858
1859/* (ARG0 - ARG1) is the same as (-ARG1 + ARG0). So check whether
1860 ARG0 is zero and X + ARG0 reduces to X, since that would mean
1861 (-ARG1 + ARG0) reduces to -ARG1. */
1862(simplify
1863 (minus real_zerop@0 @1)
1864 (if (fold_real_zero_addition_p (type, @0, 0))
1865 (negate @1)))
1866
1867/* Transform x * -1 into -x. */
1868(simplify
1869 (mult @0 integer_minus_onep)
1870 (negate @0))
eaeba53a 1871
96285749
RS
1872/* True if we can easily extract the real and imaginary parts of a complex
1873 number. */
1874(match compositional_complex
1875 (convert? (complex @0 @1)))
1876
eaeba53a
RB
1877/* COMPLEX_EXPR and REALPART/IMAGPART_EXPR cancellations. */
1878(simplify
1879 (complex (realpart @0) (imagpart @0))
1880 @0)
1881(simplify
1882 (realpart (complex @0 @1))
1883 @0)
1884(simplify
1885 (imagpart (complex @0 @1))
1886 @1)
83633539 1887
77c028c5
MG
1888/* Sometimes we only care about half of a complex expression. */
1889(simplify
1890 (realpart (convert?:s (conj:s @0)))
1891 (convert (realpart @0)))
1892(simplify
1893 (imagpart (convert?:s (conj:s @0)))
1894 (convert (negate (imagpart @0))))
1895(for part (realpart imagpart)
1896 (for op (plus minus)
1897 (simplify
1898 (part (convert?:s@2 (op:s @0 @1)))
1899 (convert (op (part @0) (part @1))))))
1900(simplify
1901 (realpart (convert?:s (CEXPI:s @0)))
1902 (convert (COS @0)))
1903(simplify
1904 (imagpart (convert?:s (CEXPI:s @0)))
1905 (convert (SIN @0)))
1906
1907/* conj(conj(x)) -> x */
1908(simplify
1909 (conj (convert? (conj @0)))
1910 (if (tree_nop_conversion_p (TREE_TYPE (@0), type))
1911 (convert @0)))
1912
1913/* conj({x,y}) -> {x,-y} */
1914(simplify
1915 (conj (convert?:s (complex:s @0 @1)))
1916 (with { tree itype = TREE_TYPE (type); }
1917 (complex (convert:itype @0) (negate (convert:itype @1)))))
83633539
RB
1918
1919/* BSWAP simplifications, transforms checked by gcc.dg/builtin-bswap-8.c. */
1920(for bswap (BUILT_IN_BSWAP16 BUILT_IN_BSWAP32 BUILT_IN_BSWAP64)
1921 (simplify
1922 (bswap (bswap @0))
1923 @0)
1924 (simplify
1925 (bswap (bit_not (bswap @0)))
1926 (bit_not @0))
1927 (for bitop (bit_xor bit_ior bit_and)
1928 (simplify
1929 (bswap (bitop:c (bswap @0) @1))
1930 (bitop @0 (bswap @1)))))
96994de0
RB
1931
1932
1933/* Combine COND_EXPRs and VEC_COND_EXPRs. */
1934
1935/* Simplify constant conditions.
1936 Only optimize constant conditions when the selected branch
1937 has the same type as the COND_EXPR. This avoids optimizing
1938 away "c ? x : throw", where the throw has a void type.
1939 Note that we cannot throw away the fold-const.c variant nor
1940 this one as we depend on doing this transform before possibly
1941 A ? B : B -> B triggers and the fold-const.c one can optimize
1942 0 ? A : B to B even if A has side-effects. Something
1943 genmatch cannot handle. */
1944(simplify
1945 (cond INTEGER_CST@0 @1 @2)
8fdc6c67
RB
1946 (if (integer_zerop (@0))
1947 (if (!VOID_TYPE_P (TREE_TYPE (@2)) || VOID_TYPE_P (type))
1948 @2)
1949 (if (!VOID_TYPE_P (TREE_TYPE (@1)) || VOID_TYPE_P (type))
1950 @1)))
96994de0
RB
1951(simplify
1952 (vec_cond VECTOR_CST@0 @1 @2)
1953 (if (integer_all_onesp (@0))
8fdc6c67
RB
1954 @1
1955 (if (integer_zerop (@0))
1956 @2)))
96994de0
RB
1957
1958(for cnd (cond vec_cond)
1959 /* A ? B : (A ? X : C) -> A ? B : C. */
1960 (simplify
1961 (cnd @0 (cnd @0 @1 @2) @3)
1962 (cnd @0 @1 @3))
1963 (simplify
1964 (cnd @0 @1 (cnd @0 @2 @3))
1965 (cnd @0 @1 @3))
24a179f8
RB
1966 /* A ? B : (!A ? C : X) -> A ? B : C. */
1967 /* ??? This matches embedded conditions open-coded because genmatch
1968 would generate matching code for conditions in separate stmts only.
1969 The following is still important to merge then and else arm cases
1970 from if-conversion. */
1971 (simplify
1972 (cnd @0 @1 (cnd @2 @3 @4))
1973 (if (COMPARISON_CLASS_P (@0)
1974 && COMPARISON_CLASS_P (@2)
1975 && invert_tree_comparison
1976 (TREE_CODE (@0), HONOR_NANS (TREE_OPERAND (@0, 0))) == TREE_CODE (@2)
1977 && operand_equal_p (TREE_OPERAND (@0, 0), TREE_OPERAND (@2, 0), 0)
1978 && operand_equal_p (TREE_OPERAND (@0, 1), TREE_OPERAND (@2, 1), 0))
1979 (cnd @0 @1 @3)))
1980 (simplify
1981 (cnd @0 (cnd @1 @2 @3) @4)
1982 (if (COMPARISON_CLASS_P (@0)
1983 && COMPARISON_CLASS_P (@1)
1984 && invert_tree_comparison
1985 (TREE_CODE (@0), HONOR_NANS (TREE_OPERAND (@0, 0))) == TREE_CODE (@1)
1986 && operand_equal_p (TREE_OPERAND (@0, 0), TREE_OPERAND (@1, 0), 0)
1987 && operand_equal_p (TREE_OPERAND (@0, 1), TREE_OPERAND (@1, 1), 0))
1988 (cnd @0 @3 @4)))
96994de0
RB
1989
1990 /* A ? B : B -> B. */
1991 (simplify
1992 (cnd @0 @1 @1)
09240451 1993 @1)
96994de0 1994
09240451
MG
1995 /* !A ? B : C -> A ? C : B. */
1996 (simplify
1997 (cnd (logical_inverted_value truth_valued_p@0) @1 @2)
1998 (cnd @0 @2 @1)))
f84e7fd6 1999
a3ca1bc5
RB
2000/* A + (B vcmp C ? 1 : 0) -> A - (B vcmp C ? -1 : 0), since vector comparisons
2001 return all -1 or all 0 results. */
f43d102e
RS
2002/* ??? We could instead convert all instances of the vec_cond to negate,
2003 but that isn't necessarily a win on its own. */
2004(simplify
a3ca1bc5 2005 (plus:c @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
f43d102e 2006 (if (VECTOR_TYPE_P (type)
4d8989d5 2007 && TYPE_VECTOR_SUBPARTS (type) == TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1))
f43d102e 2008 && (TYPE_MODE (TREE_TYPE (type))
4d8989d5 2009 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
a3ca1bc5 2010 (minus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
f43d102e 2011
a3ca1bc5 2012/* ... likewise A - (B vcmp C ? 1 : 0) -> A + (B vcmp C ? -1 : 0). */
f43d102e 2013(simplify
a3ca1bc5 2014 (minus @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
f43d102e 2015 (if (VECTOR_TYPE_P (type)
4d8989d5 2016 && TYPE_VECTOR_SUBPARTS (type) == TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1))
f43d102e 2017 && (TYPE_MODE (TREE_TYPE (type))
4d8989d5 2018 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
a3ca1bc5 2019 (plus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
f84e7fd6 2020
2ee05f1e 2021
f84e7fd6
RB
2022/* Simplifications of comparisons. */
2023
24f1db9c
RB
2024/* See if we can reduce the magnitude of a constant involved in a
2025 comparison by changing the comparison code. This is a canonicalization
2026 formerly done by maybe_canonicalize_comparison_1. */
2027(for cmp (le gt)
2028 acmp (lt ge)
2029 (simplify
2030 (cmp @0 INTEGER_CST@1)
2031 (if (tree_int_cst_sgn (@1) == -1)
2032 (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::add (@1, 1)); }))))
2033(for cmp (ge lt)
2034 acmp (gt le)
2035 (simplify
2036 (cmp @0 INTEGER_CST@1)
2037 (if (tree_int_cst_sgn (@1) == 1)
2038 (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::sub (@1, 1)); }))))
2039
2040
f84e7fd6
RB
2041/* We can simplify a logical negation of a comparison to the
2042 inverted comparison. As we cannot compute an expression
2043 operator using invert_tree_comparison we have to simulate
2044 that with expression code iteration. */
2045(for cmp (tcc_comparison)
2046 icmp (inverted_tcc_comparison)
2047 ncmp (inverted_tcc_comparison_with_nans)
2048 /* Ideally we'd like to combine the following two patterns
2049 and handle some more cases by using
2050 (logical_inverted_value (cmp @0 @1))
2051 here but for that genmatch would need to "inline" that.
2052 For now implement what forward_propagate_comparison did. */
2053 (simplify
2054 (bit_not (cmp @0 @1))
2055 (if (VECTOR_TYPE_P (type)
2056 || (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1))
2057 /* Comparison inversion may be impossible for trapping math,
2058 invert_tree_comparison will tell us. But we can't use
2059 a computed operator in the replacement tree thus we have
2060 to play the trick below. */
2061 (with { enum tree_code ic = invert_tree_comparison
1b457aa4 2062 (cmp, HONOR_NANS (@0)); }
f84e7fd6 2063 (if (ic == icmp)
8fdc6c67
RB
2064 (icmp @0 @1)
2065 (if (ic == ncmp)
2066 (ncmp @0 @1))))))
f84e7fd6 2067 (simplify
09240451
MG
2068 (bit_xor (cmp @0 @1) integer_truep)
2069 (with { enum tree_code ic = invert_tree_comparison
1b457aa4 2070 (cmp, HONOR_NANS (@0)); }
09240451 2071 (if (ic == icmp)
8fdc6c67
RB
2072 (icmp @0 @1)
2073 (if (ic == ncmp)
2074 (ncmp @0 @1))))))
e18c1d66 2075
2ee05f1e
RB
2076/* Transform comparisons of the form X - Y CMP 0 to X CMP Y.
2077 ??? The transformation is valid for the other operators if overflow
2078 is undefined for the type, but performing it here badly interacts
2079 with the transformation in fold_cond_expr_with_comparison which
2080 attempts to synthetize ABS_EXPR. */
2081(for cmp (eq ne)
2082 (simplify
d9ba1961
RB
2083 (cmp (minus@2 @0 @1) integer_zerop)
2084 (if (single_use (@2))
2085 (cmp @0 @1))))
2ee05f1e
RB
2086
2087/* Transform comparisons of the form X * C1 CMP 0 to X CMP 0 in the
2088 signed arithmetic case. That form is created by the compiler
2089 often enough for folding it to be of value. One example is in
2090 computing loop trip counts after Operator Strength Reduction. */
07cdc2b8
RB
2091(for cmp (simple_comparison)
2092 scmp (swapped_simple_comparison)
2ee05f1e 2093 (simplify
bc6e9db4 2094 (cmp (mult@3 @0 INTEGER_CST@1) integer_zerop@2)
2ee05f1e
RB
2095 /* Handle unfolded multiplication by zero. */
2096 (if (integer_zerop (@1))
8fdc6c67
RB
2097 (cmp @1 @2)
2098 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
bc6e9db4
RB
2099 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
2100 && single_use (@3))
8fdc6c67
RB
2101 /* If @1 is negative we swap the sense of the comparison. */
2102 (if (tree_int_cst_sgn (@1) < 0)
2103 (scmp @0 @2)
2104 (cmp @0 @2))))))
2ee05f1e
RB
2105
2106/* Simplify comparison of something with itself. For IEEE
2107 floating-point, we can only do some of these simplifications. */
287f8f17 2108(for cmp (eq ge le)
2ee05f1e
RB
2109 (simplify
2110 (cmp @0 @0)
287f8f17 2111 (if (! FLOAT_TYPE_P (TREE_TYPE (@0))
b9407883 2112 || ! HONOR_NANS (@0))
287f8f17
RB
2113 { constant_boolean_node (true, type); }
2114 (if (cmp != EQ_EXPR)
2115 (eq @0 @0)))))
2ee05f1e
RB
2116(for cmp (ne gt lt)
2117 (simplify
2118 (cmp @0 @0)
2119 (if (cmp != NE_EXPR
2120 || ! FLOAT_TYPE_P (TREE_TYPE (@0))
b9407883 2121 || ! HONOR_NANS (@0))
2ee05f1e 2122 { constant_boolean_node (false, type); })))
b5d3d787
RB
2123(for cmp (unle unge uneq)
2124 (simplify
2125 (cmp @0 @0)
2126 { constant_boolean_node (true, type); }))
dd53d197
MG
2127(for cmp (unlt ungt)
2128 (simplify
2129 (cmp @0 @0)
2130 (unordered @0 @0)))
b5d3d787
RB
2131(simplify
2132 (ltgt @0 @0)
2133 (if (!flag_trapping_math)
2134 { constant_boolean_node (false, type); }))
2ee05f1e
RB
2135
2136/* Fold ~X op ~Y as Y op X. */
07cdc2b8 2137(for cmp (simple_comparison)
2ee05f1e 2138 (simplify
7fe996ba
RB
2139 (cmp (bit_not@2 @0) (bit_not@3 @1))
2140 (if (single_use (@2) && single_use (@3))
2141 (cmp @1 @0))))
2ee05f1e
RB
2142
2143/* Fold ~X op C as X op' ~C, where op' is the swapped comparison. */
07cdc2b8
RB
2144(for cmp (simple_comparison)
2145 scmp (swapped_simple_comparison)
2ee05f1e 2146 (simplify
7fe996ba
RB
2147 (cmp (bit_not@2 @0) CONSTANT_CLASS_P@1)
2148 (if (single_use (@2)
2149 && (TREE_CODE (@1) == INTEGER_CST || TREE_CODE (@1) == VECTOR_CST))
2ee05f1e
RB
2150 (scmp @0 (bit_not @1)))))
2151
07cdc2b8
RB
2152(for cmp (simple_comparison)
2153 /* Fold (double)float1 CMP (double)float2 into float1 CMP float2. */
2154 (simplify
2155 (cmp (convert@2 @0) (convert? @1))
2156 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
2157 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
2158 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
2159 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
2160 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))))
2161 (with
2162 {
2163 tree type1 = TREE_TYPE (@1);
2164 if (TREE_CODE (@1) == REAL_CST && !DECIMAL_FLOAT_TYPE_P (type1))
2165 {
2166 REAL_VALUE_TYPE orig = TREE_REAL_CST (@1);
2167 if (TYPE_PRECISION (type1) > TYPE_PRECISION (float_type_node)
2168 && exact_real_truncate (TYPE_MODE (float_type_node), &orig))
2169 type1 = float_type_node;
2170 if (TYPE_PRECISION (type1) > TYPE_PRECISION (double_type_node)
2171 && exact_real_truncate (TYPE_MODE (double_type_node), &orig))
2172 type1 = double_type_node;
2173 }
2174 tree newtype
2175 = (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type1)
2176 ? TREE_TYPE (@0) : type1);
2177 }
2178 (if (TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (newtype))
2179 (cmp (convert:newtype @0) (convert:newtype @1))))))
2180
2181 (simplify
2182 (cmp @0 REAL_CST@1)
2183 /* IEEE doesn't distinguish +0 and -0 in comparisons. */
64d3a1f0
RB
2184 (switch
2185 /* a CMP (-0) -> a CMP 0 */
2186 (if (REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1)))
2187 (cmp @0 { build_real (TREE_TYPE (@1), dconst0); }))
2188 /* x != NaN is always true, other ops are always false. */
2189 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
2190 && ! HONOR_SNANS (@1))
2191 { constant_boolean_node (cmp == NE_EXPR, type); })
2192 /* Fold comparisons against infinity. */
2193 (if (REAL_VALUE_ISINF (TREE_REAL_CST (@1))
2194 && MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (@1))))
2195 (with
2196 {
2197 REAL_VALUE_TYPE max;
2198 enum tree_code code = cmp;
2199 bool neg = REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1));
2200 if (neg)
2201 code = swap_tree_comparison (code);
2202 }
2203 (switch
2204 /* x > +Inf is always false, if with ignore sNANs. */
2205 (if (code == GT_EXPR
2206 && ! HONOR_SNANS (@0))
2207 { constant_boolean_node (false, type); })
2208 (if (code == LE_EXPR)
2209 /* x <= +Inf is always true, if we don't case about NaNs. */
2210 (if (! HONOR_NANS (@0))
2211 { constant_boolean_node (true, type); }
b0eb889b 2212 /* x <= +Inf is the same as x == x, i.e. !isnan(x). */
64d3a1f0
RB
2213 (eq @0 @0)))
2214 /* x == +Inf and x >= +Inf are always equal to x > DBL_MAX. */
2215 (if (code == EQ_EXPR || code == GE_EXPR)
2216 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
2217 (if (neg)
2218 (lt @0 { build_real (TREE_TYPE (@0), max); })
2219 (gt @0 { build_real (TREE_TYPE (@0), max); }))))
2220 /* x < +Inf is always equal to x <= DBL_MAX. */
2221 (if (code == LT_EXPR)
2222 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
2223 (if (neg)
2224 (ge @0 { build_real (TREE_TYPE (@0), max); })
2225 (le @0 { build_real (TREE_TYPE (@0), max); }))))
2226 /* x != +Inf is always equal to !(x > DBL_MAX). */
2227 (if (code == NE_EXPR)
2228 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
2229 (if (! HONOR_NANS (@0))
2230 (if (neg)
2231 (ge @0 { build_real (TREE_TYPE (@0), max); })
2232 (le @0 { build_real (TREE_TYPE (@0), max); }))
2233 (if (neg)
2234 (bit_xor (lt @0 { build_real (TREE_TYPE (@0), max); })
2235 { build_one_cst (type); })
2236 (bit_xor (gt @0 { build_real (TREE_TYPE (@0), max); })
2237 { build_one_cst (type); }))))))))))
07cdc2b8
RB
2238
2239 /* If this is a comparison of a real constant with a PLUS_EXPR
2240 or a MINUS_EXPR of a real constant, we can convert it into a
2241 comparison with a revised real constant as long as no overflow
2242 occurs when unsafe_math_optimizations are enabled. */
2243 (if (flag_unsafe_math_optimizations)
2244 (for op (plus minus)
2245 (simplify
2246 (cmp (op @0 REAL_CST@1) REAL_CST@2)
2247 (with
2248 {
2249 tree tem = const_binop (op == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR,
2250 TREE_TYPE (@1), @2, @1);
2251 }
f980c9a2 2252 (if (tem && !TREE_OVERFLOW (tem))
07cdc2b8
RB
2253 (cmp @0 { tem; }))))))
2254
2255 /* Likewise, we can simplify a comparison of a real constant with
2256 a MINUS_EXPR whose first operand is also a real constant, i.e.
2257 (c1 - x) < c2 becomes x > c1-c2. Reordering is allowed on
2258 floating-point types only if -fassociative-math is set. */
2259 (if (flag_associative_math)
2260 (simplify
0409237b 2261 (cmp (minus REAL_CST@0 @1) REAL_CST@2)
07cdc2b8 2262 (with { tree tem = const_binop (MINUS_EXPR, TREE_TYPE (@1), @0, @2); }
f980c9a2 2263 (if (tem && !TREE_OVERFLOW (tem))
07cdc2b8
RB
2264 (cmp { tem; } @1)))))
2265
2266 /* Fold comparisons against built-in math functions. */
2267 (if (flag_unsafe_math_optimizations
2268 && ! flag_errno_math)
2269 (for sq (SQRT)
2270 (simplify
2271 (cmp (sq @0) REAL_CST@1)
64d3a1f0
RB
2272 (switch
2273 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
2274 (switch
2275 /* sqrt(x) < y is always false, if y is negative. */
2276 (if (cmp == EQ_EXPR || cmp == LT_EXPR || cmp == LE_EXPR)
8fdc6c67 2277 { constant_boolean_node (false, type); })
64d3a1f0
RB
2278 /* sqrt(x) > y is always true, if y is negative and we
2279 don't care about NaNs, i.e. negative values of x. */
2280 (if (cmp == NE_EXPR || !HONOR_NANS (@0))
2281 { constant_boolean_node (true, type); })
2282 /* sqrt(x) > y is the same as x >= 0, if y is negative. */
2283 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })))
c53233c6
RS
2284 (if (real_equal (TREE_REAL_CST_PTR (@1), &dconst0))
2285 (switch
2286 /* sqrt(x) < 0 is always false. */
2287 (if (cmp == LT_EXPR)
2288 { constant_boolean_node (false, type); })
2289 /* sqrt(x) >= 0 is always true if we don't care about NaNs. */
2290 (if (cmp == GE_EXPR && !HONOR_NANS (@0))
2291 { constant_boolean_node (true, type); })
2292 /* sqrt(x) <= 0 -> x == 0. */
2293 (if (cmp == LE_EXPR)
2294 (eq @0 @1))
2295 /* Otherwise sqrt(x) cmp 0 -> x cmp 0. Here cmp can be >=, >,
2296 == or !=. In the last case:
2297
2298 (sqrt(x) != 0) == (NaN != 0) == true == (x != 0)
2299
2300 if x is negative or NaN. Due to -funsafe-math-optimizations,
2301 the results for other x follow from natural arithmetic. */
2302 (cmp @0 @1)))
64d3a1f0
RB
2303 (if (cmp == GT_EXPR || cmp == GE_EXPR)
2304 (with
2305 {
2306 REAL_VALUE_TYPE c2;
5c88ea94
RS
2307 real_arithmetic (&c2, MULT_EXPR,
2308 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
64d3a1f0
RB
2309 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
2310 }
2311 (if (REAL_VALUE_ISINF (c2))
2312 /* sqrt(x) > y is x == +Inf, when y is very large. */
2313 (if (HONOR_INFINITIES (@0))
2314 (eq @0 { build_real (TREE_TYPE (@0), c2); })
2315 { constant_boolean_node (false, type); })
2316 /* sqrt(x) > c is the same as x > c*c. */
2317 (cmp @0 { build_real (TREE_TYPE (@0), c2); }))))
2318 (if (cmp == LT_EXPR || cmp == LE_EXPR)
2319 (with
2320 {
2321 REAL_VALUE_TYPE c2;
5c88ea94
RS
2322 real_arithmetic (&c2, MULT_EXPR,
2323 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
64d3a1f0
RB
2324 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
2325 }
2326 (if (REAL_VALUE_ISINF (c2))
2327 (switch
2328 /* sqrt(x) < y is always true, when y is a very large
2329 value and we don't care about NaNs or Infinities. */
2330 (if (! HONOR_NANS (@0) && ! HONOR_INFINITIES (@0))
2331 { constant_boolean_node (true, type); })
2332 /* sqrt(x) < y is x != +Inf when y is very large and we
2333 don't care about NaNs. */
2334 (if (! HONOR_NANS (@0))
2335 (ne @0 { build_real (TREE_TYPE (@0), c2); }))
2336 /* sqrt(x) < y is x >= 0 when y is very large and we
2337 don't care about Infinities. */
2338 (if (! HONOR_INFINITIES (@0))
2339 (ge @0 { build_real (TREE_TYPE (@0), dconst0); }))
2340 /* sqrt(x) < y is x >= 0 && x != +Inf, when y is large. */
2341 (if (GENERIC)
2342 (truth_andif
2343 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
2344 (ne @0 { build_real (TREE_TYPE (@0), c2); }))))
2345 /* sqrt(x) < c is the same as x < c*c, if we ignore NaNs. */
2346 (if (! HONOR_NANS (@0))
2347 (cmp @0 { build_real (TREE_TYPE (@0), c2); })
2348 /* sqrt(x) < c is the same as x >= 0 && x < c*c. */
2349 (if (GENERIC)
2350 (truth_andif
2351 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
2352 (cmp @0 { build_real (TREE_TYPE (@0), c2); }))))))))))))
2ee05f1e 2353
cfdc4f33
MG
2354/* Unordered tests if either argument is a NaN. */
2355(simplify
2356 (bit_ior (unordered @0 @0) (unordered @1 @1))
aea417d7 2357 (if (types_match (@0, @1))
cfdc4f33 2358 (unordered @0 @1)))
257b01ba
MG
2359(simplify
2360 (bit_and (ordered @0 @0) (ordered @1 @1))
2361 (if (types_match (@0, @1))
2362 (ordered @0 @1)))
cfdc4f33
MG
2363(simplify
2364 (bit_ior:c (unordered @0 @0) (unordered:c@2 @0 @1))
2365 @2)
257b01ba
MG
2366(simplify
2367 (bit_and:c (ordered @0 @0) (ordered:c@2 @0 @1))
2368 @2)
e18c1d66 2369
90c6f26c
RB
2370/* Simple range test simplifications. */
2371/* A < B || A >= B -> true. */
5d30c58d
RB
2372(for test1 (lt le le le ne ge)
2373 test2 (ge gt ge ne eq ne)
90c6f26c
RB
2374 (simplify
2375 (bit_ior:c (test1 @0 @1) (test2 @0 @1))
2376 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
2377 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
2378 { constant_boolean_node (true, type); })))
2379/* A < B && A >= B -> false. */
2380(for test1 (lt lt lt le ne eq)
2381 test2 (ge gt eq gt eq gt)
2382 (simplify
2383 (bit_and:c (test1 @0 @1) (test2 @0 @1))
2384 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
2385 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
2386 { constant_boolean_node (false, type); })))
2387
534bd33b
MG
2388/* -A CMP -B -> B CMP A. */
2389(for cmp (tcc_comparison)
2390 scmp (swapped_tcc_comparison)
2391 (simplify
2392 (cmp (negate @0) (negate @1))
2393 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
2394 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2395 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
2396 (scmp @0 @1)))
2397 (simplify
2398 (cmp (negate @0) CONSTANT_CLASS_P@1)
2399 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
2400 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2401 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
23f27839 2402 (with { tree tem = const_unop (NEGATE_EXPR, TREE_TYPE (@0), @1); }
534bd33b
MG
2403 (if (tem && !TREE_OVERFLOW (tem))
2404 (scmp @0 { tem; }))))))
2405
b0eb889b
MG
2406/* Convert ABS_EXPR<x> == 0 or ABS_EXPR<x> != 0 to x == 0 or x != 0. */
2407(for op (eq ne)
2408 (simplify
2409 (op (abs @0) zerop@1)
2410 (op @0 @1)))
2411
79d4f7c6
RB
2412/* From fold_sign_changed_comparison and fold_widened_comparison. */
2413(for cmp (simple_comparison)
2414 (simplify
2415 (cmp (convert@0 @00) (convert?@1 @10))
452ec2a5 2416 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
79d4f7c6
RB
2417 /* Disable this optimization if we're casting a function pointer
2418 type on targets that require function pointer canonicalization. */
2419 && !(targetm.have_canonicalize_funcptr_for_compare ()
2420 && TREE_CODE (TREE_TYPE (@00)) == POINTER_TYPE
2fde61e3
RB
2421 && TREE_CODE (TREE_TYPE (TREE_TYPE (@00))) == FUNCTION_TYPE)
2422 && single_use (@0))
79d4f7c6
RB
2423 (if (TYPE_PRECISION (TREE_TYPE (@00)) == TYPE_PRECISION (TREE_TYPE (@0))
2424 && (TREE_CODE (@10) == INTEGER_CST
2425 || (@1 != @10 && types_match (TREE_TYPE (@10), TREE_TYPE (@00))))
2426 && (TYPE_UNSIGNED (TREE_TYPE (@00)) == TYPE_UNSIGNED (TREE_TYPE (@0))
2427 || cmp == NE_EXPR
2428 || cmp == EQ_EXPR)
2429 && (POINTER_TYPE_P (TREE_TYPE (@00)) == POINTER_TYPE_P (TREE_TYPE (@0))))
2430 /* ??? The special-casing of INTEGER_CST conversion was in the original
2431 code and here to avoid a spurious overflow flag on the resulting
2432 constant which fold_convert produces. */
2433 (if (TREE_CODE (@1) == INTEGER_CST)
2434 (cmp @00 { force_fit_type (TREE_TYPE (@00), wi::to_widest (@1), 0,
2435 TREE_OVERFLOW (@1)); })
2436 (cmp @00 (convert @1)))
2437
2438 (if (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@00)))
2439 /* If possible, express the comparison in the shorter mode. */
2440 (if ((cmp == EQ_EXPR || cmp == NE_EXPR
7fd82d52
PP
2441 || TYPE_UNSIGNED (TREE_TYPE (@0)) == TYPE_UNSIGNED (TREE_TYPE (@00))
2442 || (!TYPE_UNSIGNED (TREE_TYPE (@0))
2443 && TYPE_UNSIGNED (TREE_TYPE (@00))))
79d4f7c6
RB
2444 && (types_match (TREE_TYPE (@10), TREE_TYPE (@00))
2445 || ((TYPE_PRECISION (TREE_TYPE (@00))
2446 >= TYPE_PRECISION (TREE_TYPE (@10)))
2447 && (TYPE_UNSIGNED (TREE_TYPE (@00))
2448 == TYPE_UNSIGNED (TREE_TYPE (@10))))
2449 || (TREE_CODE (@10) == INTEGER_CST
f6c15759 2450 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
79d4f7c6
RB
2451 && int_fits_type_p (@10, TREE_TYPE (@00)))))
2452 (cmp @00 (convert @10))
2453 (if (TREE_CODE (@10) == INTEGER_CST
f6c15759 2454 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
79d4f7c6
RB
2455 && !int_fits_type_p (@10, TREE_TYPE (@00)))
2456 (with
2457 {
2458 tree min = lower_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
2459 tree max = upper_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
2460 bool above = integer_nonzerop (const_binop (LT_EXPR, type, max, @10));
2461 bool below = integer_nonzerop (const_binop (LT_EXPR, type, @10, min));
2462 }
2463 (if (above || below)
2464 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
2465 { constant_boolean_node (cmp == EQ_EXPR ? false : true, type); }
2466 (if (cmp == LT_EXPR || cmp == LE_EXPR)
2467 { constant_boolean_node (above ? true : false, type); }
2468 (if (cmp == GT_EXPR || cmp == GE_EXPR)
2469 { constant_boolean_node (above ? false : true, type); }))))))))))))
66e1cacf 2470
96a111a3
RB
2471(for cmp (eq ne)
2472 /* A local variable can never be pointed to by
2473 the default SSA name of an incoming parameter.
2474 SSA names are canonicalized to 2nd place. */
2475 (simplify
2476 (cmp addr@0 SSA_NAME@1)
2477 (if (SSA_NAME_IS_DEFAULT_DEF (@1)
2478 && TREE_CODE (SSA_NAME_VAR (@1)) == PARM_DECL)
2479 (with { tree base = get_base_address (TREE_OPERAND (@0, 0)); }
2480 (if (TREE_CODE (base) == VAR_DECL
2481 && auto_var_in_fn_p (base, current_function_decl))
2482 (if (cmp == NE_EXPR)
2483 { constant_boolean_node (true, type); }
2484 { constant_boolean_node (false, type); }))))))
2485
66e1cacf
RB
2486/* Equality compare simplifications from fold_binary */
2487(for cmp (eq ne)
2488
2489 /* If we have (A | C) == D where C & ~D != 0, convert this into 0.
2490 Similarly for NE_EXPR. */
2491 (simplify
2492 (cmp (convert?@3 (bit_ior @0 INTEGER_CST@1)) INTEGER_CST@2)
2493 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
2494 && wi::bit_and_not (@1, @2) != 0)
2495 { constant_boolean_node (cmp == NE_EXPR, type); }))
2496
2497 /* (X ^ Y) == 0 becomes X == Y, and (X ^ Y) != 0 becomes X != Y. */
2498 (simplify
2499 (cmp (bit_xor @0 @1) integer_zerop)
2500 (cmp @0 @1))
2501
2502 /* (X ^ Y) == Y becomes X == 0.
2503 Likewise (X ^ Y) == X becomes Y == 0. */
2504 (simplify
99e943a2 2505 (cmp:c (bit_xor:c @0 @1) @0)
66e1cacf
RB
2506 (cmp @1 { build_zero_cst (TREE_TYPE (@1)); }))
2507
2508 /* (X ^ C1) op C2 can be rewritten as X op (C1 ^ C2). */
2509 (simplify
2510 (cmp (convert?@3 (bit_xor @0 INTEGER_CST@1)) INTEGER_CST@2)
2511 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0)))
d057c866 2512 (cmp @0 (bit_xor @1 (convert @2)))))
d057c866
RB
2513
2514 (simplify
2515 (cmp (convert? addr@0) integer_zerop)
2516 (if (tree_single_nonzero_warnv_p (@0, NULL))
2517 { constant_boolean_node (cmp == NE_EXPR, type); })))
2518
b0eb889b
MG
2519/* If we have (A & C) == C where C is a power of 2, convert this into
2520 (A & C) != 0. Similarly for NE_EXPR. */
2521(for cmp (eq ne)
2522 icmp (ne eq)
2523 (simplify
2524 (cmp (bit_and@2 @0 integer_pow2p@1) @1)
2525 (icmp @2 { build_zero_cst (TREE_TYPE (@0)); })))
2526
2527/* If we have (A & C) != 0 where C is the sign bit of A, convert
2528 this into A < 0. Similarly for (A & C) == 0 into A >= 0. */
2529(for cmp (eq ne)
2530 ncmp (ge lt)
2531 (simplify
2532 (cmp (bit_and (convert?@2 @0) integer_pow2p@1) integer_zerop)
2533 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
2534 && (TYPE_PRECISION (TREE_TYPE (@0))
2535 == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@0))))
2536 && element_precision (@2) >= element_precision (@0)
2537 && wi::only_sign_bit_p (@1, element_precision (@0)))
2538 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
2539 (ncmp (convert:stype @0) { build_zero_cst (stype); })))))
2540
68aba1f6
RB
2541/* When the addresses are not directly of decls compare base and offset.
2542 This implements some remaining parts of fold_comparison address
2543 comparisons but still no complete part of it. Still it is good
2544 enough to make fold_stmt not regress when not dispatching to fold_binary. */
2545(for cmp (simple_comparison)
2546 (simplify
f501d5cd 2547 (cmp (convert1?@2 addr@0) (convert2? addr@1))
68aba1f6
RB
2548 (with
2549 {
2550 HOST_WIDE_INT off0, off1;
2551 tree base0 = get_addr_base_and_unit_offset (TREE_OPERAND (@0, 0), &off0);
2552 tree base1 = get_addr_base_and_unit_offset (TREE_OPERAND (@1, 0), &off1);
2553 if (base0 && TREE_CODE (base0) == MEM_REF)
2554 {
2555 off0 += mem_ref_offset (base0).to_short_addr ();
2556 base0 = TREE_OPERAND (base0, 0);
2557 }
2558 if (base1 && TREE_CODE (base1) == MEM_REF)
2559 {
2560 off1 += mem_ref_offset (base1).to_short_addr ();
2561 base1 = TREE_OPERAND (base1, 0);
2562 }
2563 }
da571fda
RB
2564 (if (base0 && base1)
2565 (with
2566 {
aad88aed 2567 int equal = 2;
70f40fea
JJ
2568 /* Punt in GENERIC on variables with value expressions;
2569 the value expressions might point to fields/elements
2570 of other vars etc. */
2571 if (GENERIC
2572 && ((VAR_P (base0) && DECL_HAS_VALUE_EXPR_P (base0))
2573 || (VAR_P (base1) && DECL_HAS_VALUE_EXPR_P (base1))))
2574 ;
2575 else if (decl_in_symtab_p (base0)
2576 && decl_in_symtab_p (base1))
da571fda
RB
2577 equal = symtab_node::get_create (base0)
2578 ->equal_address_to (symtab_node::get_create (base1));
c3bea076
RB
2579 else if ((DECL_P (base0)
2580 || TREE_CODE (base0) == SSA_NAME
2581 || TREE_CODE (base0) == STRING_CST)
2582 && (DECL_P (base1)
2583 || TREE_CODE (base1) == SSA_NAME
2584 || TREE_CODE (base1) == STRING_CST))
aad88aed 2585 equal = (base0 == base1);
da571fda
RB
2586 }
2587 (if (equal == 1
2588 && (cmp == EQ_EXPR || cmp == NE_EXPR
2589 /* If the offsets are equal we can ignore overflow. */
2590 || off0 == off1
2591 || POINTER_TYPE_OVERFLOW_UNDEFINED
c3bea076 2592 /* Or if we compare using pointers to decls or strings. */
da571fda 2593 || (POINTER_TYPE_P (TREE_TYPE (@2))
c3bea076 2594 && (DECL_P (base0) || TREE_CODE (base0) == STRING_CST))))
da571fda
RB
2595 (switch
2596 (if (cmp == EQ_EXPR)
2597 { constant_boolean_node (off0 == off1, type); })
2598 (if (cmp == NE_EXPR)
2599 { constant_boolean_node (off0 != off1, type); })
2600 (if (cmp == LT_EXPR)
2601 { constant_boolean_node (off0 < off1, type); })
2602 (if (cmp == LE_EXPR)
2603 { constant_boolean_node (off0 <= off1, type); })
2604 (if (cmp == GE_EXPR)
2605 { constant_boolean_node (off0 >= off1, type); })
2606 (if (cmp == GT_EXPR)
2607 { constant_boolean_node (off0 > off1, type); }))
2608 (if (equal == 0
2609 && DECL_P (base0) && DECL_P (base1)
2610 /* If we compare this as integers require equal offset. */
2611 && (!INTEGRAL_TYPE_P (TREE_TYPE (@2))
2612 || off0 == off1))
2613 (switch
2614 (if (cmp == EQ_EXPR)
2615 { constant_boolean_node (false, type); })
2616 (if (cmp == NE_EXPR)
2617 { constant_boolean_node (true, type); })))))))))
66e1cacf 2618
98998245
RB
2619/* Simplify pointer equality compares using PTA. */
2620(for neeq (ne eq)
2621 (simplify
2622 (neeq @0 @1)
2623 (if (POINTER_TYPE_P (TREE_TYPE (@0))
2624 && ptrs_compare_unequal (@0, @1))
2625 { neeq == EQ_EXPR ? boolean_false_node : boolean_true_node; })))
2626
8f63caf6 2627/* PR70920: Transform (intptr_t)x eq/ne CST to x eq/ne (typeof x) CST.
467719fb
PK
2628 and (typeof ptr_cst) x eq/ne ptr_cst to x eq/ne (typeof x) CST.
2629 Disable the transform if either operand is pointer to function.
2630 This broke pr22051-2.c for arm where function pointer
2631 canonicalizaion is not wanted. */
1c0a8806 2632
8f63caf6
RB
2633(for cmp (ne eq)
2634 (simplify
2635 (cmp (convert @0) INTEGER_CST@1)
467719fb
PK
2636 (if ((POINTER_TYPE_P (TREE_TYPE (@0)) && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@0)))
2637 && INTEGRAL_TYPE_P (TREE_TYPE (@1)))
2638 || (INTEGRAL_TYPE_P (TREE_TYPE (@0)) && POINTER_TYPE_P (TREE_TYPE (@1))
2639 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@1)))))
8f63caf6
RB
2640 (cmp @0 (convert @1)))))
2641
21aacde4
RB
2642/* Non-equality compare simplifications from fold_binary */
2643(for cmp (lt gt le ge)
2644 /* Comparisons with the highest or lowest possible integer of
2645 the specified precision will have known values. */
2646 (simplify
2647 (cmp (convert?@2 @0) INTEGER_CST@1)
2648 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
2649 && tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0)))
2650 (with
2651 {
2652 tree arg1_type = TREE_TYPE (@1);
2653 unsigned int prec = TYPE_PRECISION (arg1_type);
2654 wide_int max = wi::max_value (arg1_type);
2655 wide_int signed_max = wi::max_value (prec, SIGNED);
2656 wide_int min = wi::min_value (arg1_type);
2657 }
2658 (switch
2659 (if (wi::eq_p (@1, max))
2660 (switch
2661 (if (cmp == GT_EXPR)
2662 { constant_boolean_node (false, type); })
2663 (if (cmp == GE_EXPR)
2664 (eq @2 @1))
2665 (if (cmp == LE_EXPR)
2666 { constant_boolean_node (true, type); })
2667 (if (cmp == LT_EXPR)
2668 (ne @2 @1))))
21aacde4
RB
2669 (if (wi::eq_p (@1, min))
2670 (switch
2671 (if (cmp == LT_EXPR)
2672 { constant_boolean_node (false, type); })
2673 (if (cmp == LE_EXPR)
2674 (eq @2 @1))
2675 (if (cmp == GE_EXPR)
2676 { constant_boolean_node (true, type); })
2677 (if (cmp == GT_EXPR)
2678 (ne @2 @1))))
9bc22d19
RB
2679 (if (wi::eq_p (@1, max - 1))
2680 (switch
2681 (if (cmp == GT_EXPR)
2682 (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::add (@1, 1)); }))
2683 (if (cmp == LE_EXPR)
2684 (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::add (@1, 1)); }))))
21aacde4
RB
2685 (if (wi::eq_p (@1, min + 1))
2686 (switch
2687 (if (cmp == GE_EXPR)
2688 (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::sub (@1, 1)); }))
2689 (if (cmp == LT_EXPR)
2690 (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::sub (@1, 1)); }))))
2691 (if (wi::eq_p (@1, signed_max)
2692 && TYPE_UNSIGNED (arg1_type)
2693 /* We will flip the signedness of the comparison operator
2694 associated with the mode of @1, so the sign bit is
2695 specified by this mode. Check that @1 is the signed
2696 max associated with this sign bit. */
2697 && prec == GET_MODE_PRECISION (TYPE_MODE (arg1_type))
2698 /* signed_type does not work on pointer types. */
2699 && INTEGRAL_TYPE_P (arg1_type))
2700 /* The following case also applies to X < signed_max+1
2701 and X >= signed_max+1 because previous transformations. */
2702 (if (cmp == LE_EXPR || cmp == GT_EXPR)
2703 (with { tree st = signed_type_for (arg1_type); }
2704 (if (cmp == LE_EXPR)
2705 (ge (convert:st @0) { build_zero_cst (st); })
2706 (lt (convert:st @0) { build_zero_cst (st); }))))))))))
2707
b5d3d787
RB
2708(for cmp (unordered ordered unlt unle ungt unge uneq ltgt)
2709 /* If the second operand is NaN, the result is constant. */
2710 (simplify
2711 (cmp @0 REAL_CST@1)
2712 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
2713 && (cmp != LTGT_EXPR || ! flag_trapping_math))
50301115 2714 { constant_boolean_node (cmp == ORDERED_EXPR || cmp == LTGT_EXPR
b5d3d787 2715 ? false : true, type); })))
21aacde4 2716
55cf3946
RB
2717/* bool_var != 0 becomes bool_var. */
2718(simplify
b5d3d787 2719 (ne @0 integer_zerop)
55cf3946
RB
2720 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
2721 && types_match (type, TREE_TYPE (@0)))
2722 (non_lvalue @0)))
2723/* bool_var == 1 becomes bool_var. */
2724(simplify
b5d3d787 2725 (eq @0 integer_onep)
55cf3946
RB
2726 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
2727 && types_match (type, TREE_TYPE (@0)))
2728 (non_lvalue @0)))
b5d3d787
RB
2729/* Do not handle
2730 bool_var == 0 becomes !bool_var or
2731 bool_var != 1 becomes !bool_var
2732 here because that only is good in assignment context as long
2733 as we require a tcc_comparison in GIMPLE_CONDs where we'd
2734 replace if (x == 0) with tem = ~x; if (tem != 0) which is
2735 clearly less optimal and which we'll transform again in forwprop. */
55cf3946 2736
ca1206be
MG
2737/* When one argument is a constant, overflow detection can be simplified.
2738 Currently restricted to single use so as not to interfere too much with
2739 ADD_OVERFLOW detection in tree-ssa-math-opts.c.
2740 A + CST CMP A -> A CMP' CST' */
2741(for cmp (lt le ge gt)
2742 out (gt gt le le)
2743 (simplify
a8e9f9a3 2744 (cmp:c (plus@2 @0 INTEGER_CST@1) @0)
ca1206be
MG
2745 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
2746 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
2747 && wi::ne_p (@1, 0)
2748 && single_use (@2))
2749 (out @0 { wide_int_to_tree (TREE_TYPE (@0), wi::max_value
2750 (TYPE_PRECISION (TREE_TYPE (@0)), UNSIGNED) - @1); }))))
2751
3563f78f
MG
2752/* To detect overflow in unsigned A - B, A < B is simpler than A - B > A.
2753 However, the detection logic for SUB_OVERFLOW in tree-ssa-math-opts.c
2754 expects the long form, so we restrict the transformation for now. */
2755(for cmp (gt le)
2756 (simplify
a8e9f9a3 2757 (cmp:c (minus@2 @0 @1) @0)
3563f78f
MG
2758 (if (single_use (@2)
2759 && ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2760 && TYPE_UNSIGNED (TREE_TYPE (@0))
2761 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
2762 (cmp @1 @0))))
3563f78f
MG
2763
2764/* Testing for overflow is unnecessary if we already know the result. */
3563f78f
MG
2765/* A - B > A */
2766(for cmp (gt le)
2767 out (ne eq)
2768 (simplify
a8e9f9a3 2769 (cmp:c (realpart (IFN_SUB_OVERFLOW@2 @0 @1)) @0)
3563f78f
MG
2770 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
2771 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
2772 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
2773/* A + B < A */
2774(for cmp (lt ge)
2775 out (ne eq)
2776 (simplify
a8e9f9a3 2777 (cmp:c (realpart (IFN_ADD_OVERFLOW:c@2 @0 @1)) @0)
3563f78f
MG
2778 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
2779 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
2780 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
2781
603aeb87 2782/* For unsigned operands, -1 / B < A checks whether A * B would overflow.
0557293f 2783 Simplify it to __builtin_mul_overflow (A, B, <unused>). */
0557293f
AM
2784(for cmp (lt ge)
2785 out (ne eq)
2786 (simplify
603aeb87 2787 (cmp:c (trunc_div:s integer_all_onesp @1) @0)
0557293f
AM
2788 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) && !VECTOR_TYPE_P (TREE_TYPE (@0)))
2789 (with { tree t = TREE_TYPE (@0), cpx = build_complex_type (t); }
2790 (out (imagpart (IFN_MUL_OVERFLOW:cpx @0 @1)) { build_zero_cst (t); })))))
55cf3946 2791
53f3cd25
RS
2792/* Simplification of math builtins. These rules must all be optimizations
2793 as well as IL simplifications. If there is a possibility that the new
2794 form could be a pessimization, the rule should go in the canonicalization
2795 section that follows this one.
e18c1d66 2796
53f3cd25
RS
2797 Rules can generally go in this section if they satisfy one of
2798 the following:
2799
2800 - the rule describes an identity
2801
2802 - the rule replaces calls with something as simple as addition or
2803 multiplication
2804
2805 - the rule contains unary calls only and simplifies the surrounding
2806 arithmetic. (The idea here is to exclude non-unary calls in which
2807 one operand is constant and in which the call is known to be cheap
2808 when the operand has that value.) */
52c6378a 2809
53f3cd25 2810(if (flag_unsafe_math_optimizations)
52c6378a
N
2811 /* Simplify sqrt(x) * sqrt(x) -> x. */
2812 (simplify
2813 (mult (SQRT@1 @0) @1)
2814 (if (!HONOR_SNANS (type))
2815 @0))
2816
35401640
N
2817 /* Simplify sqrt(x) * sqrt(y) -> sqrt(x*y). */
2818 (for root (SQRT CBRT)
2819 (simplify
2820 (mult (root:s @0) (root:s @1))
2821 (root (mult @0 @1))))
2822
35401640
N
2823 /* Simplify expN(x) * expN(y) -> expN(x+y). */
2824 (for exps (EXP EXP2 EXP10 POW10)
2825 (simplify
2826 (mult (exps:s @0) (exps:s @1))
2827 (exps (plus @0 @1))))
2828
52c6378a 2829 /* Simplify a/root(b/c) into a*root(c/b). */
35401640
N
2830 (for root (SQRT CBRT)
2831 (simplify
2832 (rdiv @0 (root:s (rdiv:s @1 @2)))
2833 (mult @0 (root (rdiv @2 @1)))))
2834
2835 /* Simplify x/expN(y) into x*expN(-y). */
2836 (for exps (EXP EXP2 EXP10 POW10)
2837 (simplify
2838 (rdiv @0 (exps:s @1))
2839 (mult @0 (exps (negate @1)))))
52c6378a 2840
eee7b6c4
RB
2841 (for logs (LOG LOG2 LOG10 LOG10)
2842 exps (EXP EXP2 EXP10 POW10)
8acda9b2 2843 /* logN(expN(x)) -> x. */
e18c1d66
RB
2844 (simplify
2845 (logs (exps @0))
8acda9b2
RS
2846 @0)
2847 /* expN(logN(x)) -> x. */
2848 (simplify
2849 (exps (logs @0))
2850 @0))
53f3cd25 2851
e18c1d66
RB
2852 /* Optimize logN(func()) for various exponential functions. We
2853 want to determine the value "x" and the power "exponent" in
2854 order to transform logN(x**exponent) into exponent*logN(x). */
eee7b6c4
RB
2855 (for logs (LOG LOG LOG LOG2 LOG2 LOG2 LOG10 LOG10)
2856 exps (EXP2 EXP10 POW10 EXP EXP10 POW10 EXP EXP2)
e18c1d66
RB
2857 (simplify
2858 (logs (exps @0))
c9e926ce
RS
2859 (if (SCALAR_FLOAT_TYPE_P (type))
2860 (with {
2861 tree x;
2862 switch (exps)
2863 {
2864 CASE_CFN_EXP:
2865 /* Prepare to do logN(exp(exponent)) -> exponent*logN(e). */
2866 x = build_real_truncate (type, dconst_e ());
2867 break;
2868 CASE_CFN_EXP2:
2869 /* Prepare to do logN(exp2(exponent)) -> exponent*logN(2). */
2870 x = build_real (type, dconst2);
2871 break;
2872 CASE_CFN_EXP10:
2873 CASE_CFN_POW10:
2874 /* Prepare to do logN(exp10(exponent)) -> exponent*logN(10). */
2875 {
2876 REAL_VALUE_TYPE dconst10;
2877 real_from_integer (&dconst10, VOIDmode, 10, SIGNED);
2878 x = build_real (type, dconst10);
2879 }
2880 break;
2881 default:
2882 gcc_unreachable ();
2883 }
2884 }
2885 (mult (logs { x; }) @0)))))
53f3cd25 2886
e18c1d66
RB
2887 (for logs (LOG LOG
2888 LOG2 LOG2
2889 LOG10 LOG10)
2890 exps (SQRT CBRT)
2891 (simplify
2892 (logs (exps @0))
c9e926ce
RS
2893 (if (SCALAR_FLOAT_TYPE_P (type))
2894 (with {
2895 tree x;
2896 switch (exps)
2897 {
2898 CASE_CFN_SQRT:
2899 /* Prepare to do logN(sqrt(x)) -> 0.5*logN(x). */
2900 x = build_real (type, dconsthalf);
2901 break;
2902 CASE_CFN_CBRT:
2903 /* Prepare to do logN(cbrt(x)) -> (1/3)*logN(x). */
2904 x = build_real_truncate (type, dconst_third ());
2905 break;
2906 default:
2907 gcc_unreachable ();
2908 }
2909 }
2910 (mult { x; } (logs @0))))))
53f3cd25
RS
2911
2912 /* logN(pow(x,exponent)) -> exponent*logN(x). */
e18c1d66
RB
2913 (for logs (LOG LOG2 LOG10)
2914 pows (POW)
2915 (simplify
2916 (logs (pows @0 @1))
53f3cd25
RS
2917 (mult @1 (logs @0))))
2918
2919 (for sqrts (SQRT)
2920 cbrts (CBRT)
b4838d77 2921 pows (POW)
53f3cd25
RS
2922 exps (EXP EXP2 EXP10 POW10)
2923 /* sqrt(expN(x)) -> expN(x*0.5). */
2924 (simplify
2925 (sqrts (exps @0))
2926 (exps (mult @0 { build_real (type, dconsthalf); })))
2927 /* cbrt(expN(x)) -> expN(x/3). */
2928 (simplify
2929 (cbrts (exps @0))
b4838d77
RS
2930 (exps (mult @0 { build_real_truncate (type, dconst_third ()); })))
2931 /* pow(expN(x), y) -> expN(x*y). */
2932 (simplify
2933 (pows (exps @0) @1)
2934 (exps (mult @0 @1))))
cfed37a0
RS
2935
2936 /* tan(atan(x)) -> x. */
2937 (for tans (TAN)
2938 atans (ATAN)
2939 (simplify
2940 (tans (atans @0))
2941 @0)))
53f3cd25 2942
abcc43f5
RS
2943/* cabs(x+0i) or cabs(0+xi) -> abs(x). */
2944(simplify
e04d2a35 2945 (CABS (complex:C @0 real_zerop@1))
abcc43f5
RS
2946 (abs @0))
2947
67dbe582
RS
2948/* trunc(trunc(x)) -> trunc(x), etc. */
2949(for fns (TRUNC FLOOR CEIL ROUND NEARBYINT RINT)
2950 (simplify
2951 (fns (fns @0))
2952 (fns @0)))
2953/* f(x) -> x if x is integer valued and f does nothing for such values. */
afeb246c 2954(for fns (TRUNC FLOOR CEIL ROUND NEARBYINT RINT)
67dbe582
RS
2955 (simplify
2956 (fns integer_valued_real_p@0)
2957 @0))
67dbe582 2958
4d7836c4
RS
2959/* hypot(x,0) and hypot(0,x) -> abs(x). */
2960(simplify
c9e926ce 2961 (HYPOT:c @0 real_zerop@1)
4d7836c4
RS
2962 (abs @0))
2963
b4838d77
RS
2964/* pow(1,x) -> 1. */
2965(simplify
2966 (POW real_onep@0 @1)
2967 @0)
2968
461e4145
RS
2969(simplify
2970 /* copysign(x,x) -> x. */
2971 (COPYSIGN @0 @0)
2972 @0)
2973
2974(simplify
2975 /* copysign(x,y) -> fabs(x) if y is nonnegative. */
2976 (COPYSIGN @0 tree_expr_nonnegative_p@1)
2977 (abs @0))
2978
86c0733f
RS
2979(for scale (LDEXP SCALBN SCALBLN)
2980 /* ldexp(0, x) -> 0. */
2981 (simplify
2982 (scale real_zerop@0 @1)
2983 @0)
2984 /* ldexp(x, 0) -> x. */
2985 (simplify
2986 (scale @0 integer_zerop@1)
2987 @0)
2988 /* ldexp(x, y) -> x if x is +-Inf or NaN. */
2989 (simplify
2990 (scale REAL_CST@0 @1)
2991 (if (!real_isfinite (TREE_REAL_CST_PTR (@0)))
2992 @0)))
2993
53f3cd25
RS
2994/* Canonicalization of sequences of math builtins. These rules represent
2995 IL simplifications but are not necessarily optimizations.
2996
2997 The sincos pass is responsible for picking "optimal" implementations
2998 of math builtins, which may be more complicated and can sometimes go
2999 the other way, e.g. converting pow into a sequence of sqrts.
3000 We only want to do these canonicalizations before the pass has run. */
3001
3002(if (flag_unsafe_math_optimizations && canonicalize_math_p ())
3003 /* Simplify tan(x) * cos(x) -> sin(x). */
3004 (simplify
3005 (mult:c (TAN:s @0) (COS:s @0))
3006 (SIN @0))
3007
3008 /* Simplify x * pow(x,c) -> pow(x,c+1). */
3009 (simplify
de3fbea3 3010 (mult:c @0 (POW:s @0 REAL_CST@1))
53f3cd25
RS
3011 (if (!TREE_OVERFLOW (@1))
3012 (POW @0 (plus @1 { build_one_cst (type); }))))
3013
3014 /* Simplify sin(x) / cos(x) -> tan(x). */
3015 (simplify
3016 (rdiv (SIN:s @0) (COS:s @0))
3017 (TAN @0))
3018
3019 /* Simplify cos(x) / sin(x) -> 1 / tan(x). */
3020 (simplify
3021 (rdiv (COS:s @0) (SIN:s @0))
3022 (rdiv { build_one_cst (type); } (TAN @0)))
3023
3024 /* Simplify sin(x) / tan(x) -> cos(x). */
3025 (simplify
3026 (rdiv (SIN:s @0) (TAN:s @0))
3027 (if (! HONOR_NANS (@0)
3028 && ! HONOR_INFINITIES (@0))
c9e926ce 3029 (COS @0)))
53f3cd25
RS
3030
3031 /* Simplify tan(x) / sin(x) -> 1.0 / cos(x). */
3032 (simplify
3033 (rdiv (TAN:s @0) (SIN:s @0))
3034 (if (! HONOR_NANS (@0)
3035 && ! HONOR_INFINITIES (@0))
3036 (rdiv { build_one_cst (type); } (COS @0))))
3037
3038 /* Simplify pow(x,y) * pow(x,z) -> pow(x,y+z). */
3039 (simplify
3040 (mult (POW:s @0 @1) (POW:s @0 @2))
3041 (POW @0 (plus @1 @2)))
3042
3043 /* Simplify pow(x,y) * pow(z,y) -> pow(x*z,y). */
3044 (simplify
3045 (mult (POW:s @0 @1) (POW:s @2 @1))
3046 (POW (mult @0 @2) @1))
3047
de3fbea3
RB
3048 /* Simplify powi(x,y) * powi(z,y) -> powi(x*z,y). */
3049 (simplify
3050 (mult (POWI:s @0 @1) (POWI:s @2 @1))
3051 (POWI (mult @0 @2) @1))
3052
53f3cd25
RS
3053 /* Simplify pow(x,c) / x -> pow(x,c-1). */
3054 (simplify
3055 (rdiv (POW:s @0 REAL_CST@1) @0)
3056 (if (!TREE_OVERFLOW (@1))
3057 (POW @0 (minus @1 { build_one_cst (type); }))))
3058
3059 /* Simplify x / pow (y,z) -> x * pow(y,-z). */
3060 (simplify
3061 (rdiv @0 (POW:s @1 @2))
3062 (mult @0 (POW @1 (negate @2))))
3063
3064 (for sqrts (SQRT)
3065 cbrts (CBRT)
3066 pows (POW)
3067 /* sqrt(sqrt(x)) -> pow(x,1/4). */
3068 (simplify
3069 (sqrts (sqrts @0))
3070 (pows @0 { build_real (type, dconst_quarter ()); }))
3071 /* sqrt(cbrt(x)) -> pow(x,1/6). */
3072 (simplify
3073 (sqrts (cbrts @0))
3074 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
3075 /* cbrt(sqrt(x)) -> pow(x,1/6). */
3076 (simplify
3077 (cbrts (sqrts @0))
3078 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
3079 /* cbrt(cbrt(x)) -> pow(x,1/9), iff x is nonnegative. */
3080 (simplify
3081 (cbrts (cbrts tree_expr_nonnegative_p@0))
3082 (pows @0 { build_real_truncate (type, dconst_ninth ()); }))
3083 /* sqrt(pow(x,y)) -> pow(|x|,y*0.5). */
3084 (simplify
3085 (sqrts (pows @0 @1))
3086 (pows (abs @0) (mult @1 { build_real (type, dconsthalf); })))
3087 /* cbrt(pow(x,y)) -> pow(x,y/3), iff x is nonnegative. */
3088 (simplify
3089 (cbrts (pows tree_expr_nonnegative_p@0 @1))
b4838d77
RS
3090 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
3091 /* pow(sqrt(x),y) -> pow(x,y*0.5). */
3092 (simplify
3093 (pows (sqrts @0) @1)
3094 (pows @0 (mult @1 { build_real (type, dconsthalf); })))
3095 /* pow(cbrt(x),y) -> pow(x,y/3) iff x is nonnegative. */
3096 (simplify
3097 (pows (cbrts tree_expr_nonnegative_p@0) @1)
3098 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
3099 /* pow(pow(x,y),z) -> pow(x,y*z) iff x is nonnegative. */
3100 (simplify
3101 (pows (pows tree_expr_nonnegative_p@0 @1) @2)
3102 (pows @0 (mult @1 @2))))
abcc43f5
RS
3103
3104 /* cabs(x+xi) -> fabs(x)*sqrt(2). */
3105 (simplify
3106 (CABS (complex @0 @0))
96285749
RS
3107 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
3108
4d7836c4
RS
3109 /* hypot(x,x) -> fabs(x)*sqrt(2). */
3110 (simplify
3111 (HYPOT @0 @0)
3112 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
3113
96285749
RS
3114 /* cexp(x+yi) -> exp(x)*cexpi(y). */
3115 (for cexps (CEXP)
3116 exps (EXP)
3117 cexpis (CEXPI)
3118 (simplify
3119 (cexps compositional_complex@0)
3120 (if (targetm.libc_has_function (function_c99_math_complex))
3121 (complex
3122 (mult (exps@1 (realpart @0)) (realpart (cexpis:type@2 (imagpart @0))))
3123 (mult @1 (imagpart @2)))))))
e18c1d66 3124
67dbe582
RS
3125(if (canonicalize_math_p ())
3126 /* floor(x) -> trunc(x) if x is nonnegative. */
3127 (for floors (FLOOR)
3128 truncs (TRUNC)
3129 (simplify
3130 (floors tree_expr_nonnegative_p@0)
3131 (truncs @0))))
3132
3133(match double_value_p
3134 @0
3135 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == double_type_node)))
3136(for froms (BUILT_IN_TRUNCL
3137 BUILT_IN_FLOORL
3138 BUILT_IN_CEILL
3139 BUILT_IN_ROUNDL
3140 BUILT_IN_NEARBYINTL
3141 BUILT_IN_RINTL)
3142 tos (BUILT_IN_TRUNC
3143 BUILT_IN_FLOOR
3144 BUILT_IN_CEIL
3145 BUILT_IN_ROUND
3146 BUILT_IN_NEARBYINT
3147 BUILT_IN_RINT)
3148 /* truncl(extend(x)) -> extend(trunc(x)), etc., if x is a double. */
3149 (if (optimize && canonicalize_math_p ())
3150 (simplify
3151 (froms (convert double_value_p@0))
3152 (convert (tos @0)))))
3153
3154(match float_value_p
3155 @0
3156 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == float_type_node)))
3157(for froms (BUILT_IN_TRUNCL BUILT_IN_TRUNC
3158 BUILT_IN_FLOORL BUILT_IN_FLOOR
3159 BUILT_IN_CEILL BUILT_IN_CEIL
3160 BUILT_IN_ROUNDL BUILT_IN_ROUND
3161 BUILT_IN_NEARBYINTL BUILT_IN_NEARBYINT
3162 BUILT_IN_RINTL BUILT_IN_RINT)
3163 tos (BUILT_IN_TRUNCF BUILT_IN_TRUNCF
3164 BUILT_IN_FLOORF BUILT_IN_FLOORF
3165 BUILT_IN_CEILF BUILT_IN_CEILF
3166 BUILT_IN_ROUNDF BUILT_IN_ROUNDF
3167 BUILT_IN_NEARBYINTF BUILT_IN_NEARBYINTF
3168 BUILT_IN_RINTF BUILT_IN_RINTF)
3169 /* truncl(extend(x)) and trunc(extend(x)) -> extend(truncf(x)), etc.,
3170 if x is a float. */
5dac7dbd
JDA
3171 (if (optimize && canonicalize_math_p ()
3172 && targetm.libc_has_function (function_c99_misc))
67dbe582
RS
3173 (simplify
3174 (froms (convert float_value_p@0))
3175 (convert (tos @0)))))
3176
543a9bcd
RS
3177(for froms (XFLOORL XCEILL XROUNDL XRINTL)
3178 tos (XFLOOR XCEIL XROUND XRINT)
3179 /* llfloorl(extend(x)) -> llfloor(x), etc., if x is a double. */
3180 (if (optimize && canonicalize_math_p ())
3181 (simplify
3182 (froms (convert double_value_p@0))
3183 (tos @0))))
3184
3185(for froms (XFLOORL XCEILL XROUNDL XRINTL
3186 XFLOOR XCEIL XROUND XRINT)
3187 tos (XFLOORF XCEILF XROUNDF XRINTF)
3188 /* llfloorl(extend(x)) and llfloor(extend(x)) -> llfloorf(x), etc.,
3189 if x is a float. */
3190 (if (optimize && canonicalize_math_p ())
3191 (simplify
3192 (froms (convert float_value_p@0))
3193 (tos @0))))
3194
3195(if (canonicalize_math_p ())
3196 /* xfloor(x) -> fix_trunc(x) if x is nonnegative. */
3197 (for floors (IFLOOR LFLOOR LLFLOOR)
3198 (simplify
3199 (floors tree_expr_nonnegative_p@0)
3200 (fix_trunc @0))))
3201
3202(if (canonicalize_math_p ())
3203 /* xfloor(x) -> fix_trunc(x), etc., if x is integer valued. */
3204 (for fns (IFLOOR LFLOOR LLFLOOR
3205 ICEIL LCEIL LLCEIL
3206 IROUND LROUND LLROUND)
3207 (simplify
3208 (fns integer_valued_real_p@0)
3209 (fix_trunc @0)))
3210 (if (!flag_errno_math)
3211 /* xrint(x) -> fix_trunc(x), etc., if x is integer valued. */
3212 (for rints (IRINT LRINT LLRINT)
3213 (simplify
3214 (rints integer_valued_real_p@0)
3215 (fix_trunc @0)))))
3216
3217(if (canonicalize_math_p ())
3218 (for ifn (IFLOOR ICEIL IROUND IRINT)
3219 lfn (LFLOOR LCEIL LROUND LRINT)
3220 llfn (LLFLOOR LLCEIL LLROUND LLRINT)
3221 /* Canonicalize iround (x) to lround (x) on ILP32 targets where
3222 sizeof (int) == sizeof (long). */
3223 (if (TYPE_PRECISION (integer_type_node)
3224 == TYPE_PRECISION (long_integer_type_node))
3225 (simplify
3226 (ifn @0)
3227 (lfn:long_integer_type_node @0)))
3228 /* Canonicalize llround (x) to lround (x) on LP64 targets where
3229 sizeof (long long) == sizeof (long). */
3230 (if (TYPE_PRECISION (long_long_integer_type_node)
3231 == TYPE_PRECISION (long_integer_type_node))
3232 (simplify
3233 (llfn @0)
3234 (lfn:long_integer_type_node @0)))))
3235
92c52eab
RS
3236/* cproj(x) -> x if we're ignoring infinities. */
3237(simplify
3238 (CPROJ @0)
3239 (if (!HONOR_INFINITIES (type))
3240 @0))
3241
4534c203
RB
3242/* If the real part is inf and the imag part is known to be
3243 nonnegative, return (inf + 0i). */
3244(simplify
3245 (CPROJ (complex REAL_CST@0 tree_expr_nonnegative_p@1))
3246 (if (real_isinf (TREE_REAL_CST_PTR (@0)))
92c52eab
RS
3247 { build_complex_inf (type, false); }))
3248
4534c203
RB
3249/* If the imag part is inf, return (inf+I*copysign(0,imag)). */
3250(simplify
3251 (CPROJ (complex @0 REAL_CST@1))
3252 (if (real_isinf (TREE_REAL_CST_PTR (@1)))
92c52eab 3253 { build_complex_inf (type, TREE_REAL_CST_PTR (@1)->sign); }))
4534c203 3254
b4838d77
RS
3255(for pows (POW)
3256 sqrts (SQRT)
3257 cbrts (CBRT)
3258 (simplify
3259 (pows @0 REAL_CST@1)
3260 (with {
3261 const REAL_VALUE_TYPE *value = TREE_REAL_CST_PTR (@1);
3262 REAL_VALUE_TYPE tmp;
3263 }
3264 (switch
3265 /* pow(x,0) -> 1. */
3266 (if (real_equal (value, &dconst0))
3267 { build_real (type, dconst1); })
3268 /* pow(x,1) -> x. */
3269 (if (real_equal (value, &dconst1))
3270 @0)
3271 /* pow(x,-1) -> 1/x. */
3272 (if (real_equal (value, &dconstm1))
3273 (rdiv { build_real (type, dconst1); } @0))
3274 /* pow(x,0.5) -> sqrt(x). */
3275 (if (flag_unsafe_math_optimizations
3276 && canonicalize_math_p ()
3277 && real_equal (value, &dconsthalf))
3278 (sqrts @0))
3279 /* pow(x,1/3) -> cbrt(x). */
3280 (if (flag_unsafe_math_optimizations
3281 && canonicalize_math_p ()
3282 && (tmp = real_value_truncate (TYPE_MODE (type), dconst_third ()),
3283 real_equal (value, &tmp)))
3284 (cbrts @0))))))
4534c203 3285
5ddc84ca
RS
3286/* powi(1,x) -> 1. */
3287(simplify
3288 (POWI real_onep@0 @1)
3289 @0)
3290
3291(simplify
3292 (POWI @0 INTEGER_CST@1)
3293 (switch
3294 /* powi(x,0) -> 1. */
3295 (if (wi::eq_p (@1, 0))
3296 { build_real (type, dconst1); })
3297 /* powi(x,1) -> x. */
3298 (if (wi::eq_p (@1, 1))
3299 @0)
3300 /* powi(x,-1) -> 1/x. */
3301 (if (wi::eq_p (@1, -1))
3302 (rdiv { build_real (type, dconst1); } @0))))
3303
be144838
JL
3304/* Narrowing of arithmetic and logical operations.
3305
3306 These are conceptually similar to the transformations performed for
3307 the C/C++ front-ends by shorten_binary_op and shorten_compare. Long
3308 term we want to move all that code out of the front-ends into here. */
3309
3310/* If we have a narrowing conversion of an arithmetic operation where
3311 both operands are widening conversions from the same type as the outer
3312 narrowing conversion. Then convert the innermost operands to a suitable
9c582551 3313 unsigned type (to avoid introducing undefined behavior), perform the
be144838
JL
3314 operation and convert the result to the desired type. */
3315(for op (plus minus)
3316 (simplify
93f90bec 3317 (convert (op:s (convert@2 @0) (convert?@3 @1)))
be144838
JL
3318 (if (INTEGRAL_TYPE_P (type)
3319 /* We check for type compatibility between @0 and @1 below,
3320 so there's no need to check that @1/@3 are integral types. */
3321 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
3322 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
3323 /* The precision of the type of each operand must match the
3324 precision of the mode of each operand, similarly for the
3325 result. */
3326 && (TYPE_PRECISION (TREE_TYPE (@0))
3327 == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@0))))
3328 && (TYPE_PRECISION (TREE_TYPE (@1))
3329 == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@1))))
3330 && TYPE_PRECISION (type) == GET_MODE_PRECISION (TYPE_MODE (type))
3331 /* The inner conversion must be a widening conversion. */
3332 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
93f90bec
BC
3333 && types_match (@0, type)
3334 && (types_match (@0, @1)
3335 /* Or the second operand is const integer or converted const
3336 integer from valueize. */
3337 || TREE_CODE (@1) == INTEGER_CST))
be144838 3338 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
93f90bec 3339 (op @0 (convert @1))
8fdc6c67 3340 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
93f90bec
BC
3341 (convert (op (convert:utype @0)
3342 (convert:utype @1))))))))
48451e8f
JL
3343
3344/* This is another case of narrowing, specifically when there's an outer
3345 BIT_AND_EXPR which masks off bits outside the type of the innermost
3346 operands. Like the previous case we have to convert the operands
9c582551 3347 to unsigned types to avoid introducing undefined behavior for the
48451e8f
JL
3348 arithmetic operation. */
3349(for op (minus plus)
8fdc6c67
RB
3350 (simplify
3351 (bit_and (op:s (convert@2 @0) (convert@3 @1)) INTEGER_CST@4)
3352 (if (INTEGRAL_TYPE_P (type)
3353 /* We check for type compatibility between @0 and @1 below,
3354 so there's no need to check that @1/@3 are integral types. */
3355 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
3356 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
3357 /* The precision of the type of each operand must match the
3358 precision of the mode of each operand, similarly for the
3359 result. */
3360 && (TYPE_PRECISION (TREE_TYPE (@0))
3361 == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@0))))
3362 && (TYPE_PRECISION (TREE_TYPE (@1))
3363 == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@1))))
3364 && TYPE_PRECISION (type) == GET_MODE_PRECISION (TYPE_MODE (type))
3365 /* The inner conversion must be a widening conversion. */
3366 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
3367 && types_match (@0, @1)
3368 && (tree_int_cst_min_precision (@4, TYPE_SIGN (TREE_TYPE (@0)))
3369 <= TYPE_PRECISION (TREE_TYPE (@0)))
0a8c1e23
JL
3370 && (wi::bit_and (@4, wi::mask (TYPE_PRECISION (TREE_TYPE (@0)),
3371 true, TYPE_PRECISION (type))) == 0))
8fdc6c67
RB
3372 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
3373 (with { tree ntype = TREE_TYPE (@0); }
3374 (convert (bit_and (op @0 @1) (convert:ntype @4))))
3375 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
3376 (convert (bit_and (op (convert:utype @0) (convert:utype @1))
3377 (convert:utype @4))))))))
4f7a5692
MC
3378
3379/* Transform (@0 < @1 and @0 < @2) to use min,
3380 (@0 > @1 and @0 > @2) to use max */
3381(for op (lt le gt ge)
3382 ext (min min max max)
3383 (simplify
4618c453
RB
3384 (bit_and (op:cs @0 @1) (op:cs @0 @2))
3385 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3386 && TREE_CODE (@0) != INTEGER_CST)
4f7a5692
MC
3387 (op @0 (ext @1 @2)))))
3388
7317ef4a
RS
3389(simplify
3390 /* signbit(x) -> 0 if x is nonnegative. */
3391 (SIGNBIT tree_expr_nonnegative_p@0)
3392 { integer_zero_node; })
3393
3394(simplify
3395 /* signbit(x) -> x<0 if x doesn't have signed zeros. */
3396 (SIGNBIT @0)
3397 (if (!HONOR_SIGNED_ZEROS (@0))
3398 (convert (lt @0 { build_real (TREE_TYPE (@0), dconst0); }))))
a8b85ce9
MG
3399
3400/* Transform comparisons of the form X +- C1 CMP C2 to X CMP C2 -+ C1. */
3401(for cmp (eq ne)
3402 (for op (plus minus)
3403 rop (minus plus)
3404 (simplify
3405 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
3406 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
3407 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
3408 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@0))
3409 && !TYPE_SATURATING (TREE_TYPE (@0)))
3410 (with { tree res = int_const_binop (rop, @2, @1); }
3411 (if (TREE_OVERFLOW (res))
3412 { constant_boolean_node (cmp == NE_EXPR, type); }
3413 (if (single_use (@3))
3414 (cmp @0 { res; }))))))))
3415(for cmp (lt le gt ge)
3416 (for op (plus minus)
3417 rop (minus plus)
3418 (simplify
3419 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
3420 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
3421 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
3422 (with { tree res = int_const_binop (rop, @2, @1); }
3423 (if (TREE_OVERFLOW (res))
3424 {
3425 fold_overflow_warning (("assuming signed overflow does not occur "
3426 "when simplifying conditional to constant"),
3427 WARN_STRICT_OVERFLOW_CONDITIONAL);
3428 bool less = cmp == LE_EXPR || cmp == LT_EXPR;
3429 /* wi::ges_p (@2, 0) should be sufficient for a signed type. */
3430 bool ovf_high = wi::lt_p (@1, 0, TYPE_SIGN (TREE_TYPE (@1)))
3431 != (op == MINUS_EXPR);
3432 constant_boolean_node (less == ovf_high, type);
3433 }
3434 (if (single_use (@3))
3435 (with
3436 {
3437 fold_overflow_warning (("assuming signed overflow does not occur "
3438 "when changing X +- C1 cmp C2 to "
3439 "X cmp C2 -+ C1"),
3440 WARN_STRICT_OVERFLOW_COMPARISON);
3441 }
3442 (cmp @0 { res; })))))))))
d3e40b76
RB
3443
3444/* Canonicalizations of BIT_FIELD_REFs. */
3445
3446(simplify
3447 (BIT_FIELD_REF @0 @1 @2)
3448 (switch
3449 (if (TREE_CODE (TREE_TYPE (@0)) == COMPLEX_TYPE
3450 && tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
3451 (switch
3452 (if (integer_zerop (@2))
3453 (view_convert (realpart @0)))
3454 (if (tree_int_cst_equal (@2, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
3455 (view_convert (imagpart @0)))))
3456 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3457 && INTEGRAL_TYPE_P (type)
171f6f05
RB
3458 /* On GIMPLE this should only apply to register arguments. */
3459 && (! GIMPLE || is_gimple_reg (@0))
d3e40b76
RB
3460 /* A bit-field-ref that referenced the full argument can be stripped. */
3461 && ((compare_tree_int (@1, TYPE_PRECISION (TREE_TYPE (@0))) == 0
3462 && integer_zerop (@2))
3463 /* Low-parts can be reduced to integral conversions.
3464 ??? The following doesn't work for PDP endian. */
3465 || (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN
3466 /* Don't even think about BITS_BIG_ENDIAN. */
3467 && TYPE_PRECISION (TREE_TYPE (@0)) % BITS_PER_UNIT == 0
3468 && TYPE_PRECISION (type) % BITS_PER_UNIT == 0
3469 && compare_tree_int (@2, (BYTES_BIG_ENDIAN
3470 ? (TYPE_PRECISION (TREE_TYPE (@0))
3471 - TYPE_PRECISION (type))
3472 : 0)) == 0)))
3473 (convert @0))))
3474
3475/* Simplify vector extracts. */
3476
3477(simplify
3478 (BIT_FIELD_REF CONSTRUCTOR@0 @1 @2)
3479 (if (VECTOR_TYPE_P (TREE_TYPE (@0))
3480 && (types_match (type, TREE_TYPE (TREE_TYPE (@0)))
3481 || (VECTOR_TYPE_P (type)
3482 && types_match (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
3483 (with
3484 {
3485 tree ctor = (TREE_CODE (@0) == SSA_NAME
3486 ? gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0)) : @0);
3487 tree eltype = TREE_TYPE (TREE_TYPE (ctor));
3488 unsigned HOST_WIDE_INT width = tree_to_uhwi (TYPE_SIZE (eltype));
3489 unsigned HOST_WIDE_INT n = tree_to_uhwi (@1);
3490 unsigned HOST_WIDE_INT idx = tree_to_uhwi (@2);
3491 }
3492 (if (n != 0
3493 && (idx % width) == 0
3494 && (n % width) == 0
3495 && ((idx + n) / width) <= TYPE_VECTOR_SUBPARTS (TREE_TYPE (ctor)))
3496 (with
3497 {
3498 idx = idx / width;
3499 n = n / width;
3500 /* Constructor elements can be subvectors. */
3501 unsigned HOST_WIDE_INT k = 1;
3502 if (CONSTRUCTOR_NELTS (ctor) != 0)
3503 {
3504 tree cons_elem = TREE_TYPE (CONSTRUCTOR_ELT (ctor, 0)->value);
3505 if (TREE_CODE (cons_elem) == VECTOR_TYPE)
3506 k = TYPE_VECTOR_SUBPARTS (cons_elem);
3507 }
3508 }
3509 (switch
3510 /* We keep an exact subset of the constructor elements. */
3511 (if ((idx % k) == 0 && (n % k) == 0)
3512 (if (CONSTRUCTOR_NELTS (ctor) == 0)
3513 { build_constructor (type, NULL); }
3514 (with
3515 {
3516 idx /= k;
3517 n /= k;
3518 }
3519 (if (n == 1)
3520 (if (idx < CONSTRUCTOR_NELTS (ctor))
3521 { CONSTRUCTOR_ELT (ctor, idx)->value; }
3522 { build_zero_cst (type); })
3523 {
3524 vec<constructor_elt, va_gc> *vals;
3525 vec_alloc (vals, n);
3526 for (unsigned i = 0;
3527 i < n && idx + i < CONSTRUCTOR_NELTS (ctor); ++i)
3528 CONSTRUCTOR_APPEND_ELT (vals, NULL_TREE,
3529 CONSTRUCTOR_ELT (ctor, idx + i)->value);
3530 build_constructor (type, vals);
3531 }))))
3532 /* The bitfield references a single constructor element. */
3533 (if (idx + n <= (idx / k + 1) * k)
3534 (switch
3535 (if (CONSTRUCTOR_NELTS (ctor) <= idx / k)
3536 { build_zero_cst (type); })
3537 (if (n == k)
3538 { CONSTRUCTOR_ELT (ctor, idx / k)->value; })
3539 (BIT_FIELD_REF { CONSTRUCTOR_ELT (ctor, idx / k)->value; }
3540 @1 { bitsize_int ((idx % k) * width); })))))))))