]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/match.pd
* tree.c (build_common_builtin_nodes): Remove doubled ECF_LEAF.
[thirdparty/gcc.git] / gcc / match.pd
CommitLineData
3d2cf79f
RB
1/* Match-and-simplify patterns for shared GENERIC and GIMPLE folding.
2 This file is consumed by genmatch which produces gimple-match.c
3 and generic-match.c from it.
4
5 Copyright (C) 2014 Free Software Foundation, Inc.
6 Contributed by Richard Biener <rguenther@suse.de>
7 and Prathamesh Kulkarni <bilbotheelffriend@gmail.com>
8
9This file is part of GCC.
10
11GCC is free software; you can redistribute it and/or modify it under
12the terms of the GNU General Public License as published by the Free
13Software Foundation; either version 3, or (at your option) any later
14version.
15
16GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17WARRANTY; without even the implied warranty of MERCHANTABILITY or
18FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19for more details.
20
21You should have received a copy of the GNU General Public License
22along with GCC; see the file COPYING3. If not see
23<http://www.gnu.org/licenses/>. */
24
25
26/* Generic tree predicates we inherit. */
27(define_predicates
cc7b5acf
RB
28 integer_onep integer_zerop integer_all_onesp integer_minus_onep
29 integer_each_onep
30 real_zerop real_onep real_minus_onep
f3582e54
RB
31 CONSTANT_CLASS_P
32 tree_expr_nonnegative_p)
e0ee10ed 33
f84e7fd6
RB
34/* Operator lists. */
35(define_operator_list tcc_comparison
36 lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
37(define_operator_list inverted_tcc_comparison
38 ge gt ne eq lt le ordered unordered ge gt le lt ltgt uneq)
39(define_operator_list inverted_tcc_comparison_with_nans
40 unge ungt ne eq unlt unle ordered unordered ge gt le lt ltgt uneq)
41
e0ee10ed
RB
42
43/* Simplifications of operations with one constant operand and
36a60e48 44 simplifications to constants or single values. */
e0ee10ed
RB
45
46(for op (plus pointer_plus minus bit_ior bit_xor)
47 (simplify
48 (op @0 integer_zerop)
49 (non_lvalue @0)))
50
a499aac5
RB
51/* 0 +p index -> (type)index */
52(simplify
53 (pointer_plus integer_zerop @1)
54 (non_lvalue (convert @1)))
55
e0ee10ed
RB
56/* Simplify x - x.
57 This is unsafe for certain floats even in non-IEEE formats.
58 In IEEE, it is unsafe because it does wrong for NaNs.
59 Also note that operand_equal_p is always false if an operand
60 is volatile. */
61(simplify
62 (minus @0 @0)
63 (if (!FLOAT_TYPE_P (type) || !HONOR_NANS (TYPE_MODE (type)))
64 { build_zero_cst (type); }))
65
66(simplify
67 (mult @0 integer_zerop@1)
68 @1)
69
70/* Make sure to preserve divisions by zero. This is the reason why
71 we don't simplify x / x to 1 or 0 / x to 0. */
72(for op (mult trunc_div ceil_div floor_div round_div exact_div)
73 (simplify
74 (op @0 integer_onep)
75 (non_lvalue @0)))
76
77/* Same applies to modulo operations, but fold is inconsistent here
78 and simplifies 0 % x to 0, only preserving literal 0 % 0. */
79(for op (ceil_mod floor_mod round_mod trunc_mod)
80 /* 0 % X is always zero. */
81 (simplify
60e09045 82 (op integer_zerop@0 @1)
e0ee10ed
RB
83 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
84 (if (!integer_zerop (@1))
85 @0))
86 /* X % 1 is always zero. */
87 (simplify
60e09045 88 (op @0 integer_onep)
e0ee10ed
RB
89 { build_zero_cst (type); }))
90
91/* x | ~0 -> ~0 */
92(simplify
93 (bit_ior @0 integer_all_onesp@1)
94 @1)
95
96/* x & 0 -> 0 */
97(simplify
98 (bit_and @0 integer_zerop@1)
99 @1)
100
101/* x ^ x -> 0 */
102(simplify
103 (bit_xor @0 @0)
104 { build_zero_cst (type); })
105
36a60e48
RB
106/* Canonicalize X ^ ~0 to ~X. */
107(simplify
108 (bit_xor @0 integer_all_onesp@1)
109 (bit_not @0))
110
111/* x & ~0 -> x */
112(simplify
113 (bit_and @0 integer_all_onesp)
114 (non_lvalue @0))
115
116/* x & x -> x, x | x -> x */
117(for bitop (bit_and bit_ior)
118 (simplify
119 (bitop @0 @0)
120 (non_lvalue @0)))
121
f3582e54
RB
122(simplify
123 (abs (negate @0))
124 (abs @0))
125(simplify
126 (abs tree_expr_nonnegative_p@0)
127 @0)
128
d4573ffe 129
5609420f
RB
130/* Try to fold (type) X op CST -> (type) (X op ((type-x) CST))
131 when profitable.
132 For bitwise binary operations apply operand conversions to the
133 binary operation result instead of to the operands. This allows
134 to combine successive conversions and bitwise binary operations.
135 We combine the above two cases by using a conditional convert. */
136(for bitop (bit_and bit_ior bit_xor)
137 (simplify
138 (bitop (convert @0) (convert? @1))
139 (if (((TREE_CODE (@1) == INTEGER_CST
140 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
ad6f996c
RB
141 && int_fits_type_p (@1, TREE_TYPE (@0)))
142 || (GIMPLE && types_compatible_p (TREE_TYPE (@0), TREE_TYPE (@1)))
143 || (GENERIC && TREE_TYPE (@0) == TREE_TYPE (@1)))
144 /* ??? This transform conflicts with fold-const.c doing
145 Convert (T)(x & c) into (T)x & (T)c, if c is an integer
146 constants (if x has signed type, the sign bit cannot be set
147 in c). This folds extension into the BIT_AND_EXPR.
148 Restrict it to GIMPLE to avoid endless recursions. */
149 && (bitop != BIT_AND_EXPR || GIMPLE)
5609420f
RB
150 && (/* That's a good idea if the conversion widens the operand, thus
151 after hoisting the conversion the operation will be narrower. */
152 TYPE_PRECISION (TREE_TYPE (@0)) < TYPE_PRECISION (type)
153 /* It's also a good idea if the conversion is to a non-integer
154 mode. */
155 || GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT
156 /* Or if the precision of TO is not the same as the precision
157 of its mode. */
158 || TYPE_PRECISION (type) != GET_MODE_PRECISION (TYPE_MODE (type))))
159 (convert (bitop @0 (convert @1))))))
160
161/* Simplify (A & B) OP0 (C & B) to (A OP0 C) & B. */
162(for bitop (bit_and bit_ior bit_xor)
163 (simplify
164 (bitop (bit_and:c @0 @1) (bit_and @2 @1))
165 (bit_and (bitop @0 @2) @1)))
166
167/* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */
168(simplify
169 (bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
170 (bit_ior (bit_and @0 @2) (bit_and @1 @2)))
171
172/* Combine successive equal operations with constants. */
173(for bitop (bit_and bit_ior bit_xor)
174 (simplify
175 (bitop (bitop @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
176 (bitop @0 (bitop @1 @2))))
177
178/* Try simple folding for X op !X, and X op X with the help
179 of the truth_valued_p and logical_inverted_value predicates. */
180(match truth_valued_p
181 @0
182 (if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1)))
f84e7fd6 183(for op (tcc_comparison truth_and truth_andif truth_or truth_orif truth_xor)
5609420f
RB
184 (match truth_valued_p
185 (op @0 @1)))
186(match truth_valued_p
187 (truth_not @0))
188
189(match (logical_inverted_value @0)
190 (bit_not truth_valued_p@0))
191(match (logical_inverted_value @0)
192 (eq @0 integer_zerop)
193 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)))))
194(match (logical_inverted_value @0)
195 (ne truth_valued_p@0 integer_onep)
196 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)))))
197(match (logical_inverted_value @0)
f84e7fd6
RB
198 (bit_xor truth_valued_p@0 integer_onep)
199 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)))))
5609420f
RB
200
201/* X & !X -> 0. */
202(simplify
203 (bit_and:c @0 (logical_inverted_value @0))
204 { build_zero_cst (type); })
205/* X | !X and X ^ !X -> 1, , if X is truth-valued. */
206(for op (bit_ior bit_xor)
207 (simplify
208 (op:c truth_valued_p@0 (logical_inverted_value @0))
f84e7fd6 209 { constant_boolean_node (true, type); }))
5609420f
RB
210
211(for bitop (bit_and bit_ior)
212 rbitop (bit_ior bit_and)
213 /* (x | y) & x -> x */
214 /* (x & y) | x -> x */
215 (simplify
216 (bitop:c (rbitop:c @0 @1) @0)
217 @0)
218 /* (~x | y) & x -> x & y */
219 /* (~x & y) | x -> x | y */
220 (simplify
221 (bitop:c (rbitop:c (bit_not @0) @1) @0)
222 (bitop @0 @1)))
223
224/* If arg1 and arg2 are booleans (or any single bit type)
225 then try to simplify:
226
227 (~X & Y) -> X < Y
228 (X & ~Y) -> Y < X
229 (~X | Y) -> X <= Y
230 (X | ~Y) -> Y <= X
231
232 But only do this if our result feeds into a comparison as
233 this transformation is not always a win, particularly on
234 targets with and-not instructions.
235 -> simplify_bitwise_binary_boolean */
236(simplify
237 (ne (bit_and:c (bit_not @0) @1) integer_zerop)
238 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
239 && TYPE_PRECISION (TREE_TYPE (@1)) == 1)
240 (lt @0 @1)))
241(simplify
242 (ne (bit_ior:c (bit_not @0) @1) integer_zerop)
243 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
244 && TYPE_PRECISION (TREE_TYPE (@1)) == 1)
245 (le @0 @1)))
246
5609420f
RB
247/* ~~x -> x */
248(simplify
249 (bit_not (bit_not @0))
250 @0)
251
5609420f 252
a499aac5
RB
253/* Associate (p +p off1) +p off2 as (p +p (off1 + off2)). */
254(simplify
255 (pointer_plus (pointer_plus @0 @1) @3)
256 (pointer_plus @0 (plus @1 @3)))
257
258/* Pattern match
259 tem1 = (long) ptr1;
260 tem2 = (long) ptr2;
261 tem3 = tem2 - tem1;
262 tem4 = (unsigned long) tem3;
263 tem5 = ptr1 + tem4;
264 and produce
265 tem5 = ptr2; */
266(simplify
267 (pointer_plus @0 (convert?@2 (minus@3 (convert @1) (convert @0))))
268 /* Conditionally look through a sign-changing conversion. */
269 (if (TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@3))
270 && ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@1)))
271 || (GENERIC && type == TREE_TYPE (@1))))
272 @1))
273
274/* Pattern match
275 tem = (sizetype) ptr;
276 tem = tem & algn;
277 tem = -tem;
278 ... = ptr p+ tem;
279 and produce the simpler and easier to analyze with respect to alignment
280 ... = ptr & ~algn; */
281(simplify
282 (pointer_plus @0 (negate (bit_and (convert @0) INTEGER_CST@1)))
283 (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), wi::bit_not (@1)); }
284 (bit_and @0 { algn; })))
285
286
cc7b5acf
RB
287/* We can't reassociate at all for saturating types. */
288(if (!TYPE_SATURATING (type))
289
290 /* Contract negates. */
291 /* A + (-B) -> A - B */
292 (simplify
293 (plus:c (convert1? @0) (convert2? (negate @1)))
294 /* Apply STRIP_NOPS on @0 and the negate. */
295 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
296 && tree_nop_conversion_p (type, TREE_TYPE (@1))
6a4f0678 297 && !TYPE_OVERFLOW_SANITIZED (type))
cc7b5acf
RB
298 (minus (convert @0) (convert @1))))
299 /* A - (-B) -> A + B */
300 (simplify
301 (minus (convert1? @0) (convert2? (negate @1)))
302 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
2f68e8bc 303 && tree_nop_conversion_p (type, TREE_TYPE (@1))
6a4f0678 304 && !TYPE_OVERFLOW_SANITIZED (type))
cc7b5acf
RB
305 (plus (convert @0) (convert @1))))
306 /* -(-A) -> A */
307 (simplify
308 (negate (convert? (negate @1)))
309 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
6a4f0678 310 && !TYPE_OVERFLOW_SANITIZED (type))
a0f12cf8 311 (convert @1)))
cc7b5acf
RB
312
313 /* We can't reassociate floating-point or fixed-point plus or minus
314 because of saturation to +-Inf. */
315 (if (!FLOAT_TYPE_P (type) && !FIXED_POINT_TYPE_P (type))
316
317 /* Match patterns that allow contracting a plus-minus pair
318 irrespective of overflow issues. */
319 /* (A +- B) - A -> +- B */
320 /* (A +- B) -+ B -> A */
321 /* A - (A +- B) -> -+ B */
322 /* A +- (B -+ A) -> +- B */
323 (simplify
324 (minus (plus:c @0 @1) @0)
325 @1)
326 (simplify
327 (minus (minus @0 @1) @0)
328 (negate @1))
329 (simplify
330 (plus:c (minus @0 @1) @1)
331 @0)
332 (simplify
333 (minus @0 (plus:c @0 @1))
334 (negate @1))
335 (simplify
336 (minus @0 (minus @0 @1))
337 @1)
338
339 /* (A +- CST) +- CST -> A + CST */
340 (for outer_op (plus minus)
341 (for inner_op (plus minus)
342 (simplify
343 (outer_op (inner_op @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
344 /* If the constant operation overflows we cannot do the transform
345 as we would introduce undefined overflow, for example
346 with (a - 1) + INT_MIN. */
347 (with { tree cst = fold_binary (outer_op == inner_op
348 ? PLUS_EXPR : MINUS_EXPR, type, @1, @2); }
349 (if (cst && !TREE_OVERFLOW (cst))
350 (inner_op @0 { cst; } ))))))
351
352 /* (CST - A) +- CST -> CST - A */
353 (for outer_op (plus minus)
354 (simplify
355 (outer_op (minus CONSTANT_CLASS_P@1 @0) CONSTANT_CLASS_P@2)
356 (with { tree cst = fold_binary (outer_op, type, @1, @2); }
357 (if (cst && !TREE_OVERFLOW (cst))
358 (minus { cst; } @0)))))
359
360 /* ~A + A -> -1 */
361 (simplify
362 (plus:c (bit_not @0) @0)
363 (if (!TYPE_OVERFLOW_TRAPS (type))
364 { build_all_ones_cst (type); }))
365
366 /* ~A + 1 -> -A */
367 (simplify
368 (plus (bit_not @0) integer_each_onep)
369 (negate @0))
370
371 /* (T)(P + A) - (T)P -> (T) A */
372 (for add (plus pointer_plus)
373 (simplify
374 (minus (convert (add @0 @1))
375 (convert @0))
376 (if (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@1))
377 /* For integer types, if A has a smaller type
378 than T the result depends on the possible
379 overflow in P + A.
380 E.g. T=size_t, A=(unsigned)429497295, P>0.
381 However, if an overflow in P + A would cause
382 undefined behavior, we can assume that there
383 is no overflow. */
384 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
385 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
386 /* For pointer types, if the conversion of A to the
387 final type requires a sign- or zero-extension,
388 then we have to punt - it is not defined which
389 one is correct. */
390 || (POINTER_TYPE_P (TREE_TYPE (@0))
391 && TREE_CODE (@1) == INTEGER_CST
392 && tree_int_cst_sign_bit (@1) == 0))
393 (convert @1))))))
394
395
396
d4573ffe
RB
397/* Simplifications of conversions. */
398
399/* Basic strip-useless-type-conversions / strip_nops. */
f3582e54 400(for cvt (convert view_convert float fix_trunc)
d4573ffe
RB
401 (simplify
402 (cvt @0)
403 (if ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@0)))
404 || (GENERIC && type == TREE_TYPE (@0)))
405 @0)))
406
407/* Contract view-conversions. */
408(simplify
409 (view_convert (view_convert @0))
410 (view_convert @0))
411
412/* For integral conversions with the same precision or pointer
413 conversions use a NOP_EXPR instead. */
414(simplify
415 (view_convert @0)
416 (if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
417 && (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
418 && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0)))
419 (convert @0)))
420
421/* Strip inner integral conversions that do not change precision or size. */
422(simplify
423 (view_convert (convert@0 @1))
424 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
425 && (INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
426 && (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
427 && (TYPE_SIZE (TREE_TYPE (@0)) == TYPE_SIZE (TREE_TYPE (@1))))
428 (view_convert @1)))
429
430/* Re-association barriers around constants and other re-association
431 barriers can be removed. */
432(simplify
433 (paren CONSTANT_CLASS_P@0)
434 @0)
435(simplify
436 (paren (paren@1 @0))
437 @1)
1e51d0a2
RB
438
439/* Handle cases of two conversions in a row. */
440(for ocvt (convert float fix_trunc)
441 (for icvt (convert float)
442 (simplify
443 (ocvt (icvt@1 @0))
444 (with
445 {
446 tree inside_type = TREE_TYPE (@0);
447 tree inter_type = TREE_TYPE (@1);
448 int inside_int = INTEGRAL_TYPE_P (inside_type);
449 int inside_ptr = POINTER_TYPE_P (inside_type);
450 int inside_float = FLOAT_TYPE_P (inside_type);
451 int inside_vec = TREE_CODE (inside_type) == VECTOR_TYPE;
452 unsigned int inside_prec = TYPE_PRECISION (inside_type);
453 int inside_unsignedp = TYPE_UNSIGNED (inside_type);
454 int inter_int = INTEGRAL_TYPE_P (inter_type);
455 int inter_ptr = POINTER_TYPE_P (inter_type);
456 int inter_float = FLOAT_TYPE_P (inter_type);
457 int inter_vec = TREE_CODE (inter_type) == VECTOR_TYPE;
458 unsigned int inter_prec = TYPE_PRECISION (inter_type);
459 int inter_unsignedp = TYPE_UNSIGNED (inter_type);
460 int final_int = INTEGRAL_TYPE_P (type);
461 int final_ptr = POINTER_TYPE_P (type);
462 int final_float = FLOAT_TYPE_P (type);
463 int final_vec = TREE_CODE (type) == VECTOR_TYPE;
464 unsigned int final_prec = TYPE_PRECISION (type);
465 int final_unsignedp = TYPE_UNSIGNED (type);
466 }
467 /* In addition to the cases of two conversions in a row
468 handled below, if we are converting something to its own
469 type via an object of identical or wider precision, neither
470 conversion is needed. */
471 (if (((GIMPLE && useless_type_conversion_p (type, inside_type))
472 || (GENERIC
473 && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (inside_type)))
474 && (((inter_int || inter_ptr) && final_int)
475 || (inter_float && final_float))
476 && inter_prec >= final_prec)
477 (ocvt @0))
478
479 /* Likewise, if the intermediate and initial types are either both
480 float or both integer, we don't need the middle conversion if the
481 former is wider than the latter and doesn't change the signedness
482 (for integers). Avoid this if the final type is a pointer since
483 then we sometimes need the middle conversion. Likewise if the
484 final type has a precision not equal to the size of its mode. */
485 (if (((inter_int && inside_int)
486 || (inter_float && inside_float)
487 || (inter_vec && inside_vec))
488 && inter_prec >= inside_prec
489 && (inter_float || inter_vec
490 || inter_unsignedp == inside_unsignedp)
491 && ! (final_prec != GET_MODE_PRECISION (TYPE_MODE (type))
492 && TYPE_MODE (type) == TYPE_MODE (inter_type))
493 && ! final_ptr
494 && (! final_vec || inter_prec == inside_prec))
495 (ocvt @0))
496
497 /* If we have a sign-extension of a zero-extended value, we can
498 replace that by a single zero-extension. Likewise if the
499 final conversion does not change precision we can drop the
500 intermediate conversion. */
501 (if (inside_int && inter_int && final_int
502 && ((inside_prec < inter_prec && inter_prec < final_prec
503 && inside_unsignedp && !inter_unsignedp)
504 || final_prec == inter_prec))
505 (ocvt @0))
506
507 /* Two conversions in a row are not needed unless:
508 - some conversion is floating-point (overstrict for now), or
509 - some conversion is a vector (overstrict for now), or
510 - the intermediate type is narrower than both initial and
511 final, or
512 - the intermediate type and innermost type differ in signedness,
513 and the outermost type is wider than the intermediate, or
514 - the initial type is a pointer type and the precisions of the
515 intermediate and final types differ, or
516 - the final type is a pointer type and the precisions of the
517 initial and intermediate types differ. */
518 (if (! inside_float && ! inter_float && ! final_float
519 && ! inside_vec && ! inter_vec && ! final_vec
520 && (inter_prec >= inside_prec || inter_prec >= final_prec)
521 && ! (inside_int && inter_int
522 && inter_unsignedp != inside_unsignedp
523 && inter_prec < final_prec)
524 && ((inter_unsignedp && inter_prec > inside_prec)
525 == (final_unsignedp && final_prec > inter_prec))
526 && ! (inside_ptr && inter_prec != final_prec)
527 && ! (final_ptr && inside_prec != inter_prec)
528 && ! (final_prec != GET_MODE_PRECISION (TYPE_MODE (type))
529 && TYPE_MODE (type) == TYPE_MODE (inter_type)))
1f00c1b9
RB
530 (ocvt @0))
531
532 /* A truncation to an unsigned type (a zero-extension) should be
533 canonicalized as bitwise and of a mask. */
534 (if (final_int && inter_int && inside_int
535 && final_prec == inside_prec
536 && final_prec > inter_prec
537 && inter_unsignedp)
538 (convert (bit_and @0 { wide_int_to_tree
539 (inside_type,
540 wi::mask (inter_prec, false,
541 TYPE_PRECISION (inside_type))); })))
542
543 /* If we are converting an integer to a floating-point that can
544 represent it exactly and back to an integer, we can skip the
545 floating-point conversion. */
546 (if (inside_int && inter_float && final_int &&
547 (unsigned) significand_size (TYPE_MODE (inter_type))
548 >= inside_prec - !inside_unsignedp)
549 (convert @0))))))
ea2042ba
RB
550
551/* If we have a narrowing conversion to an integral type that is fed by a
552 BIT_AND_EXPR, we might be able to remove the BIT_AND_EXPR if it merely
553 masks off bits outside the final type (and nothing else). */
554(simplify
555 (convert (bit_and @0 INTEGER_CST@1))
556 (if (INTEGRAL_TYPE_P (type)
557 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
558 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))
559 && operand_equal_p (@1, build_low_bits_mask (TREE_TYPE (@1),
560 TYPE_PRECISION (type)), 0))
561 (convert @0)))
a25454ea
RB
562
563
564/* (X /[ex] A) * A -> X. */
565(simplify
566 (mult (convert? (exact_div @0 @1)) @1)
567 /* Look through a sign-changing conversion. */
568 (if (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (type))
569 (convert @0)))
eaeba53a
RB
570
571
572/* COMPLEX_EXPR and REALPART/IMAGPART_EXPR cancellations. */
573(simplify
574 (complex (realpart @0) (imagpart @0))
575 @0)
576(simplify
577 (realpart (complex @0 @1))
578 @0)
579(simplify
580 (imagpart (complex @0 @1))
581 @1)
83633539
RB
582
583
584/* BSWAP simplifications, transforms checked by gcc.dg/builtin-bswap-8.c. */
585(for bswap (BUILT_IN_BSWAP16 BUILT_IN_BSWAP32 BUILT_IN_BSWAP64)
586 (simplify
587 (bswap (bswap @0))
588 @0)
589 (simplify
590 (bswap (bit_not (bswap @0)))
591 (bit_not @0))
592 (for bitop (bit_xor bit_ior bit_and)
593 (simplify
594 (bswap (bitop:c (bswap @0) @1))
595 (bitop @0 (bswap @1)))))
96994de0
RB
596
597
598/* Combine COND_EXPRs and VEC_COND_EXPRs. */
599
600/* Simplify constant conditions.
601 Only optimize constant conditions when the selected branch
602 has the same type as the COND_EXPR. This avoids optimizing
603 away "c ? x : throw", where the throw has a void type.
604 Note that we cannot throw away the fold-const.c variant nor
605 this one as we depend on doing this transform before possibly
606 A ? B : B -> B triggers and the fold-const.c one can optimize
607 0 ? A : B to B even if A has side-effects. Something
608 genmatch cannot handle. */
609(simplify
610 (cond INTEGER_CST@0 @1 @2)
611 (if (integer_zerop (@0)
612 && (!VOID_TYPE_P (TREE_TYPE (@2))
613 || VOID_TYPE_P (type)))
614 @2)
615 (if (!integer_zerop (@0)
616 && (!VOID_TYPE_P (TREE_TYPE (@1))
617 || VOID_TYPE_P (type)))
618 @1))
619(simplify
620 (vec_cond VECTOR_CST@0 @1 @2)
621 (if (integer_all_onesp (@0))
622 @1)
623 (if (integer_zerop (@0))
624 @2))
625
626(for cnd (cond vec_cond)
627 /* A ? B : (A ? X : C) -> A ? B : C. */
628 (simplify
629 (cnd @0 (cnd @0 @1 @2) @3)
630 (cnd @0 @1 @3))
631 (simplify
632 (cnd @0 @1 (cnd @0 @2 @3))
633 (cnd @0 @1 @3))
634
635 /* A ? B : B -> B. */
636 (simplify
637 (cnd @0 @1 @1)
638 @1))
639
640/* !A ? B : C -> A ? C : B. */
641(simplify
642 (cond (logical_inverted_value truth_valued_p@0) @1 @2)
643 (cond @0 @2 @1))
f84e7fd6
RB
644
645
646/* Simplifications of comparisons. */
647
648/* We can simplify a logical negation of a comparison to the
649 inverted comparison. As we cannot compute an expression
650 operator using invert_tree_comparison we have to simulate
651 that with expression code iteration. */
652(for cmp (tcc_comparison)
653 icmp (inverted_tcc_comparison)
654 ncmp (inverted_tcc_comparison_with_nans)
655 /* Ideally we'd like to combine the following two patterns
656 and handle some more cases by using
657 (logical_inverted_value (cmp @0 @1))
658 here but for that genmatch would need to "inline" that.
659 For now implement what forward_propagate_comparison did. */
660 (simplify
661 (bit_not (cmp @0 @1))
662 (if (VECTOR_TYPE_P (type)
663 || (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1))
664 /* Comparison inversion may be impossible for trapping math,
665 invert_tree_comparison will tell us. But we can't use
666 a computed operator in the replacement tree thus we have
667 to play the trick below. */
668 (with { enum tree_code ic = invert_tree_comparison
669 (cmp, HONOR_NANS (TYPE_MODE (TREE_TYPE (@0)))); }
670 (if (ic == icmp)
671 (icmp @0 @1))
672 (if (ic == ncmp)
673 (ncmp @0 @1)))))
674 (simplify
675 (bit_xor (cmp @0 @1) integer_onep)
676 (if (INTEGRAL_TYPE_P (type))
677 (with { enum tree_code ic = invert_tree_comparison
678 (cmp, HONOR_NANS (TYPE_MODE (TREE_TYPE (@0)))); }
679 (if (ic == icmp)
680 (icmp @0 @1))
681 (if (ic == ncmp)
682 (ncmp @0 @1))))))