]>
Commit | Line | Data |
---|---|---|
1 | /* Match-and-simplify patterns for shared GENERIC and GIMPLE folding. | |
2 | This file is consumed by genmatch which produces gimple-match.c | |
3 | and generic-match.c from it. | |
4 | ||
5 | Copyright (C) 2014-2020 Free Software Foundation, Inc. | |
6 | Contributed by Richard Biener <rguenther@suse.de> | |
7 | and Prathamesh Kulkarni <bilbotheelffriend@gmail.com> | |
8 | ||
9 | This file is part of GCC. | |
10 | ||
11 | GCC is free software; you can redistribute it and/or modify it under | |
12 | the terms of the GNU General Public License as published by the Free | |
13 | Software Foundation; either version 3, or (at your option) any later | |
14 | version. | |
15 | ||
16 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
17 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
18 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
19 | for more details. | |
20 | ||
21 | You should have received a copy of the GNU General Public License | |
22 | along with GCC; see the file COPYING3. If not see | |
23 | <http://www.gnu.org/licenses/>. */ | |
24 | ||
25 | ||
26 | /* Generic tree predicates we inherit. */ | |
27 | (define_predicates | |
28 | integer_onep integer_zerop integer_all_onesp integer_minus_onep | |
29 | integer_each_onep integer_truep integer_nonzerop | |
30 | real_zerop real_onep real_minus_onep | |
31 | zerop | |
32 | initializer_each_zero_or_onep | |
33 | CONSTANT_CLASS_P | |
34 | tree_expr_nonnegative_p | |
35 | tree_expr_nonzero_p | |
36 | integer_valued_real_p | |
37 | integer_pow2p | |
38 | uniform_integer_cst_p | |
39 | HONOR_NANS | |
40 | uniform_vector_p) | |
41 | ||
42 | /* Operator lists. */ | |
43 | (define_operator_list tcc_comparison | |
44 | lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt) | |
45 | (define_operator_list inverted_tcc_comparison | |
46 | ge gt ne eq lt le ordered unordered ge gt le lt ltgt uneq) | |
47 | (define_operator_list inverted_tcc_comparison_with_nans | |
48 | unge ungt ne eq unlt unle ordered unordered ge gt le lt ltgt uneq) | |
49 | (define_operator_list swapped_tcc_comparison | |
50 | gt ge eq ne le lt unordered ordered ungt unge unlt unle uneq ltgt) | |
51 | (define_operator_list simple_comparison lt le eq ne ge gt) | |
52 | (define_operator_list swapped_simple_comparison gt ge eq ne le lt) | |
53 | ||
54 | #include "cfn-operators.pd" | |
55 | ||
56 | /* Define operand lists for math rounding functions {,i,l,ll}FN, | |
57 | where the versions prefixed with "i" return an int, those prefixed with | |
58 | "l" return a long and those prefixed with "ll" return a long long. | |
59 | ||
60 | Also define operand lists: | |
61 | ||
62 | X<FN>F for all float functions, in the order i, l, ll | |
63 | X<FN> for all double functions, in the same order | |
64 | X<FN>L for all long double functions, in the same order. */ | |
65 | #define DEFINE_INT_AND_FLOAT_ROUND_FN(FN) \ | |
66 | (define_operator_list X##FN##F BUILT_IN_I##FN##F \ | |
67 | BUILT_IN_L##FN##F \ | |
68 | BUILT_IN_LL##FN##F) \ | |
69 | (define_operator_list X##FN BUILT_IN_I##FN \ | |
70 | BUILT_IN_L##FN \ | |
71 | BUILT_IN_LL##FN) \ | |
72 | (define_operator_list X##FN##L BUILT_IN_I##FN##L \ | |
73 | BUILT_IN_L##FN##L \ | |
74 | BUILT_IN_LL##FN##L) | |
75 | ||
76 | DEFINE_INT_AND_FLOAT_ROUND_FN (FLOOR) | |
77 | DEFINE_INT_AND_FLOAT_ROUND_FN (CEIL) | |
78 | DEFINE_INT_AND_FLOAT_ROUND_FN (ROUND) | |
79 | DEFINE_INT_AND_FLOAT_ROUND_FN (RINT) | |
80 | ||
81 | /* Binary operations and their associated IFN_COND_* function. */ | |
82 | (define_operator_list UNCOND_BINARY | |
83 | plus minus | |
84 | mult trunc_div trunc_mod rdiv | |
85 | min max | |
86 | bit_and bit_ior bit_xor | |
87 | lshift rshift) | |
88 | (define_operator_list COND_BINARY | |
89 | IFN_COND_ADD IFN_COND_SUB | |
90 | IFN_COND_MUL IFN_COND_DIV IFN_COND_MOD IFN_COND_RDIV | |
91 | IFN_COND_MIN IFN_COND_MAX | |
92 | IFN_COND_AND IFN_COND_IOR IFN_COND_XOR | |
93 | IFN_COND_SHL IFN_COND_SHR) | |
94 | ||
95 | /* Same for ternary operations. */ | |
96 | (define_operator_list UNCOND_TERNARY | |
97 | IFN_FMA IFN_FMS IFN_FNMA IFN_FNMS) | |
98 | (define_operator_list COND_TERNARY | |
99 | IFN_COND_FMA IFN_COND_FMS IFN_COND_FNMA IFN_COND_FNMS) | |
100 | ||
101 | /* With nop_convert? combine convert? and view_convert? in one pattern | |
102 | plus conditionalize on tree_nop_conversion_p conversions. */ | |
103 | (match (nop_convert @0) | |
104 | (convert @0) | |
105 | (if (tree_nop_conversion_p (type, TREE_TYPE (@0))))) | |
106 | (match (nop_convert @0) | |
107 | (view_convert @0) | |
108 | (if (VECTOR_TYPE_P (type) && VECTOR_TYPE_P (TREE_TYPE (@0)) | |
109 | && known_eq (TYPE_VECTOR_SUBPARTS (type), | |
110 | TYPE_VECTOR_SUBPARTS (TREE_TYPE (@0))) | |
111 | && tree_nop_conversion_p (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0)))))) | |
112 | ||
113 | /* Transform likes of (char) ABS_EXPR <(int) x> into (char) ABSU_EXPR <x> | |
114 | ABSU_EXPR returns unsigned absolute value of the operand and the operand | |
115 | of the ABSU_EXPR will have the corresponding signed type. */ | |
116 | (simplify (abs (convert @0)) | |
117 | (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
118 | && !TYPE_UNSIGNED (TREE_TYPE (@0)) | |
119 | && element_precision (type) > element_precision (TREE_TYPE (@0))) | |
120 | (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); } | |
121 | (convert (absu:utype @0))))) | |
122 | ||
123 | #if GIMPLE | |
124 | /* Optimize (X + (X >> (prec - 1))) ^ (X >> (prec - 1)) into abs (X). */ | |
125 | (simplify | |
126 | (bit_xor:c (plus:c @0 (rshift@2 @0 INTEGER_CST@1)) @2) | |
127 | (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
128 | && !TYPE_UNSIGNED (TREE_TYPE (@0)) | |
129 | && wi::to_widest (@1) == element_precision (TREE_TYPE (@0)) - 1) | |
130 | (abs @0))) | |
131 | #endif | |
132 | ||
133 | /* Simplifications of operations with one constant operand and | |
134 | simplifications to constants or single values. */ | |
135 | ||
136 | (for op (plus pointer_plus minus bit_ior bit_xor) | |
137 | (simplify | |
138 | (op @0 integer_zerop) | |
139 | (non_lvalue @0))) | |
140 | ||
141 | /* 0 +p index -> (type)index */ | |
142 | (simplify | |
143 | (pointer_plus integer_zerop @1) | |
144 | (non_lvalue (convert @1))) | |
145 | ||
146 | /* ptr - 0 -> (type)ptr */ | |
147 | (simplify | |
148 | (pointer_diff @0 integer_zerop) | |
149 | (convert @0)) | |
150 | ||
151 | /* See if ARG1 is zero and X + ARG1 reduces to X. | |
152 | Likewise if the operands are reversed. */ | |
153 | (simplify | |
154 | (plus:c @0 real_zerop@1) | |
155 | (if (fold_real_zero_addition_p (type, @1, 0)) | |
156 | (non_lvalue @0))) | |
157 | ||
158 | /* See if ARG1 is zero and X - ARG1 reduces to X. */ | |
159 | (simplify | |
160 | (minus @0 real_zerop@1) | |
161 | (if (fold_real_zero_addition_p (type, @1, 1)) | |
162 | (non_lvalue @0))) | |
163 | ||
164 | /* Even if the fold_real_zero_addition_p can't simplify X + 0.0 | |
165 | into X, we can optimize (X + 0.0) + 0.0 or (X + 0.0) - 0.0 | |
166 | or (X - 0.0) + 0.0 into X + 0.0 and (X - 0.0) - 0.0 into X - 0.0 | |
167 | if not -frounding-math. For sNaNs the first operation would raise | |
168 | exceptions but turn the result into qNan, so the second operation | |
169 | would not raise it. */ | |
170 | (for inner_op (plus minus) | |
171 | (for outer_op (plus minus) | |
172 | (simplify | |
173 | (outer_op (inner_op@3 @0 REAL_CST@1) REAL_CST@2) | |
174 | (if (real_zerop (@1) | |
175 | && real_zerop (@2) | |
176 | && !HONOR_SIGN_DEPENDENT_ROUNDING (type)) | |
177 | (with { bool inner_plus = ((inner_op == PLUS_EXPR) | |
178 | ^ REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1))); | |
179 | bool outer_plus | |
180 | = ((outer_op == PLUS_EXPR) | |
181 | ^ REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@2))); } | |
182 | (if (outer_plus && !inner_plus) | |
183 | (outer_op @0 @2) | |
184 | @3)))))) | |
185 | ||
186 | /* Simplify x - x. | |
187 | This is unsafe for certain floats even in non-IEEE formats. | |
188 | In IEEE, it is unsafe because it does wrong for NaNs. | |
189 | Also note that operand_equal_p is always false if an operand | |
190 | is volatile. */ | |
191 | (simplify | |
192 | (minus @0 @0) | |
193 | (if (!FLOAT_TYPE_P (type) || !HONOR_NANS (type)) | |
194 | { build_zero_cst (type); })) | |
195 | (simplify | |
196 | (pointer_diff @@0 @0) | |
197 | { build_zero_cst (type); }) | |
198 | ||
199 | (simplify | |
200 | (mult @0 integer_zerop@1) | |
201 | @1) | |
202 | ||
203 | /* Maybe fold x * 0 to 0. The expressions aren't the same | |
204 | when x is NaN, since x * 0 is also NaN. Nor are they the | |
205 | same in modes with signed zeros, since multiplying a | |
206 | negative value by 0 gives -0, not +0. */ | |
207 | (simplify | |
208 | (mult @0 real_zerop@1) | |
209 | (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)) | |
210 | @1)) | |
211 | ||
212 | /* In IEEE floating point, x*1 is not equivalent to x for snans. | |
213 | Likewise for complex arithmetic with signed zeros. */ | |
214 | (simplify | |
215 | (mult @0 real_onep) | |
216 | (if (!HONOR_SNANS (type) | |
217 | && (!HONOR_SIGNED_ZEROS (type) | |
218 | || !COMPLEX_FLOAT_TYPE_P (type))) | |
219 | (non_lvalue @0))) | |
220 | ||
221 | /* Transform x * -1.0 into -x. */ | |
222 | (simplify | |
223 | (mult @0 real_minus_onep) | |
224 | (if (!HONOR_SNANS (type) | |
225 | && (!HONOR_SIGNED_ZEROS (type) | |
226 | || !COMPLEX_FLOAT_TYPE_P (type))) | |
227 | (negate @0))) | |
228 | ||
229 | /* Transform { 0 or 1 } * { 0 or 1 } into { 0 or 1 } & { 0 or 1 } */ | |
230 | (simplify | |
231 | (mult SSA_NAME@1 SSA_NAME@2) | |
232 | (if (INTEGRAL_TYPE_P (type) | |
233 | && get_nonzero_bits (@1) == 1 | |
234 | && get_nonzero_bits (@2) == 1) | |
235 | (bit_and @1 @2))) | |
236 | ||
237 | /* Transform x * { 0 or 1, 0 or 1, ... } into x & { 0 or -1, 0 or -1, ...}, | |
238 | unless the target has native support for the former but not the latter. */ | |
239 | (simplify | |
240 | (mult @0 VECTOR_CST@1) | |
241 | (if (initializer_each_zero_or_onep (@1) | |
242 | && !HONOR_SNANS (type) | |
243 | && !HONOR_SIGNED_ZEROS (type)) | |
244 | (with { tree itype = FLOAT_TYPE_P (type) ? unsigned_type_for (type) : type; } | |
245 | (if (itype | |
246 | && (!VECTOR_MODE_P (TYPE_MODE (type)) | |
247 | || (VECTOR_MODE_P (TYPE_MODE (itype)) | |
248 | && optab_handler (and_optab, | |
249 | TYPE_MODE (itype)) != CODE_FOR_nothing))) | |
250 | (view_convert (bit_and:itype (view_convert @0) | |
251 | (ne @1 { build_zero_cst (type); }))))))) | |
252 | ||
253 | (for cmp (gt ge lt le) | |
254 | outp (convert convert negate negate) | |
255 | outn (negate negate convert convert) | |
256 | /* Transform (X > 0.0 ? 1.0 : -1.0) into copysign(1, X). */ | |
257 | /* Transform (X >= 0.0 ? 1.0 : -1.0) into copysign(1, X). */ | |
258 | /* Transform (X < 0.0 ? 1.0 : -1.0) into copysign(1,-X). */ | |
259 | /* Transform (X <= 0.0 ? 1.0 : -1.0) into copysign(1,-X). */ | |
260 | (simplify | |
261 | (cond (cmp @0 real_zerop) real_onep@1 real_minus_onep) | |
262 | (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type) | |
263 | && types_match (type, TREE_TYPE (@0))) | |
264 | (switch | |
265 | (if (types_match (type, float_type_node)) | |
266 | (BUILT_IN_COPYSIGNF @1 (outp @0))) | |
267 | (if (types_match (type, double_type_node)) | |
268 | (BUILT_IN_COPYSIGN @1 (outp @0))) | |
269 | (if (types_match (type, long_double_type_node)) | |
270 | (BUILT_IN_COPYSIGNL @1 (outp @0)))))) | |
271 | /* Transform (X > 0.0 ? -1.0 : 1.0) into copysign(1,-X). */ | |
272 | /* Transform (X >= 0.0 ? -1.0 : 1.0) into copysign(1,-X). */ | |
273 | /* Transform (X < 0.0 ? -1.0 : 1.0) into copysign(1,X). */ | |
274 | /* Transform (X <= 0.0 ? -1.0 : 1.0) into copysign(1,X). */ | |
275 | (simplify | |
276 | (cond (cmp @0 real_zerop) real_minus_onep real_onep@1) | |
277 | (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type) | |
278 | && types_match (type, TREE_TYPE (@0))) | |
279 | (switch | |
280 | (if (types_match (type, float_type_node)) | |
281 | (BUILT_IN_COPYSIGNF @1 (outn @0))) | |
282 | (if (types_match (type, double_type_node)) | |
283 | (BUILT_IN_COPYSIGN @1 (outn @0))) | |
284 | (if (types_match (type, long_double_type_node)) | |
285 | (BUILT_IN_COPYSIGNL @1 (outn @0))))))) | |
286 | ||
287 | /* Transform X * copysign (1.0, X) into abs(X). */ | |
288 | (simplify | |
289 | (mult:c @0 (COPYSIGN_ALL real_onep @0)) | |
290 | (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)) | |
291 | (abs @0))) | |
292 | ||
293 | /* Transform X * copysign (1.0, -X) into -abs(X). */ | |
294 | (simplify | |
295 | (mult:c @0 (COPYSIGN_ALL real_onep (negate @0))) | |
296 | (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)) | |
297 | (negate (abs @0)))) | |
298 | ||
299 | /* Transform copysign (CST, X) into copysign (ABS(CST), X). */ | |
300 | (simplify | |
301 | (COPYSIGN_ALL REAL_CST@0 @1) | |
302 | (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@0))) | |
303 | (COPYSIGN_ALL (negate @0) @1))) | |
304 | ||
305 | /* X * 1, X / 1 -> X. */ | |
306 | (for op (mult trunc_div ceil_div floor_div round_div exact_div) | |
307 | (simplify | |
308 | (op @0 integer_onep) | |
309 | (non_lvalue @0))) | |
310 | ||
311 | /* (A / (1 << B)) -> (A >> B). | |
312 | Only for unsigned A. For signed A, this would not preserve rounding | |
313 | toward zero. | |
314 | For example: (-1 / ( 1 << B)) != -1 >> B. | |
315 | Also also widening conversions, like: | |
316 | (A / (unsigned long long) (1U << B)) -> (A >> B) | |
317 | or | |
318 | (A / (unsigned long long) (1 << B)) -> (A >> B). | |
319 | If the left shift is signed, it can be done only if the upper bits | |
320 | of A starting from shift's type sign bit are zero, as | |
321 | (unsigned long long) (1 << 31) is -2147483648ULL, not 2147483648ULL, | |
322 | so it is valid only if A >> 31 is zero. */ | |
323 | (simplify | |
324 | (trunc_div @0 (convert? (lshift integer_onep@1 @2))) | |
325 | (if ((TYPE_UNSIGNED (type) || tree_expr_nonnegative_p (@0)) | |
326 | && (!VECTOR_TYPE_P (type) | |
327 | || target_supports_op_p (type, RSHIFT_EXPR, optab_vector) | |
328 | || target_supports_op_p (type, RSHIFT_EXPR, optab_scalar)) | |
329 | && (useless_type_conversion_p (type, TREE_TYPE (@1)) | |
330 | || (element_precision (type) >= element_precision (TREE_TYPE (@1)) | |
331 | && (TYPE_UNSIGNED (TREE_TYPE (@1)) | |
332 | || (element_precision (type) | |
333 | == element_precision (TREE_TYPE (@1))) | |
334 | || (INTEGRAL_TYPE_P (type) | |
335 | && (tree_nonzero_bits (@0) | |
336 | & wi::mask (element_precision (TREE_TYPE (@1)) - 1, | |
337 | true, | |
338 | element_precision (type))) == 0))))) | |
339 | (rshift @0 @2))) | |
340 | ||
341 | /* Preserve explicit divisions by 0: the C++ front-end wants to detect | |
342 | undefined behavior in constexpr evaluation, and assuming that the division | |
343 | traps enables better optimizations than these anyway. */ | |
344 | (for div (trunc_div ceil_div floor_div round_div exact_div) | |
345 | /* 0 / X is always zero. */ | |
346 | (simplify | |
347 | (div integer_zerop@0 @1) | |
348 | /* But not for 0 / 0 so that we can get the proper warnings and errors. */ | |
349 | (if (!integer_zerop (@1)) | |
350 | @0)) | |
351 | /* X / -1 is -X. */ | |
352 | (simplify | |
353 | (div @0 integer_minus_onep@1) | |
354 | (if (!TYPE_UNSIGNED (type)) | |
355 | (negate @0))) | |
356 | /* X / X is one. */ | |
357 | (simplify | |
358 | (div @0 @0) | |
359 | /* But not for 0 / 0 so that we can get the proper warnings and errors. | |
360 | And not for _Fract types where we can't build 1. */ | |
361 | (if (!integer_zerop (@0) && !ALL_FRACT_MODE_P (TYPE_MODE (type))) | |
362 | { build_one_cst (type); })) | |
363 | /* X / abs (X) is X < 0 ? -1 : 1. */ | |
364 | (simplify | |
365 | (div:C @0 (abs @0)) | |
366 | (if (INTEGRAL_TYPE_P (type) | |
367 | && TYPE_OVERFLOW_UNDEFINED (type)) | |
368 | (cond (lt @0 { build_zero_cst (type); }) | |
369 | { build_minus_one_cst (type); } { build_one_cst (type); }))) | |
370 | /* X / -X is -1. */ | |
371 | (simplify | |
372 | (div:C @0 (negate @0)) | |
373 | (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type)) | |
374 | && TYPE_OVERFLOW_UNDEFINED (type)) | |
375 | { build_minus_one_cst (type); }))) | |
376 | ||
377 | /* For unsigned integral types, FLOOR_DIV_EXPR is the same as | |
378 | TRUNC_DIV_EXPR. Rewrite into the latter in this case. */ | |
379 | (simplify | |
380 | (floor_div @0 @1) | |
381 | (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type)) | |
382 | && TYPE_UNSIGNED (type)) | |
383 | (trunc_div @0 @1))) | |
384 | ||
385 | /* Combine two successive divisions. Note that combining ceil_div | |
386 | and floor_div is trickier and combining round_div even more so. */ | |
387 | (for div (trunc_div exact_div) | |
388 | (simplify | |
389 | (div (div@3 @0 INTEGER_CST@1) INTEGER_CST@2) | |
390 | (with { | |
391 | wi::overflow_type overflow; | |
392 | wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2), | |
393 | TYPE_SIGN (type), &overflow); | |
394 | } | |
395 | (if (div == EXACT_DIV_EXPR | |
396 | || optimize_successive_divisions_p (@2, @3)) | |
397 | (if (!overflow) | |
398 | (div @0 { wide_int_to_tree (type, mul); }) | |
399 | (if (TYPE_UNSIGNED (type) | |
400 | || mul != wi::min_value (TYPE_PRECISION (type), SIGNED)) | |
401 | { build_zero_cst (type); })))))) | |
402 | ||
403 | /* Combine successive multiplications. Similar to above, but handling | |
404 | overflow is different. */ | |
405 | (simplify | |
406 | (mult (mult @0 INTEGER_CST@1) INTEGER_CST@2) | |
407 | (with { | |
408 | wi::overflow_type overflow; | |
409 | wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2), | |
410 | TYPE_SIGN (type), &overflow); | |
411 | } | |
412 | /* Skip folding on overflow: the only special case is @1 * @2 == -INT_MIN, | |
413 | otherwise undefined overflow implies that @0 must be zero. */ | |
414 | (if (!overflow || TYPE_OVERFLOW_WRAPS (type)) | |
415 | (mult @0 { wide_int_to_tree (type, mul); })))) | |
416 | ||
417 | /* Optimize A / A to 1.0 if we don't care about | |
418 | NaNs or Infinities. */ | |
419 | (simplify | |
420 | (rdiv @0 @0) | |
421 | (if (FLOAT_TYPE_P (type) | |
422 | && ! HONOR_NANS (type) | |
423 | && ! HONOR_INFINITIES (type)) | |
424 | { build_one_cst (type); })) | |
425 | ||
426 | /* Optimize -A / A to -1.0 if we don't care about | |
427 | NaNs or Infinities. */ | |
428 | (simplify | |
429 | (rdiv:C @0 (negate @0)) | |
430 | (if (FLOAT_TYPE_P (type) | |
431 | && ! HONOR_NANS (type) | |
432 | && ! HONOR_INFINITIES (type)) | |
433 | { build_minus_one_cst (type); })) | |
434 | ||
435 | /* PR71078: x / abs(x) -> copysign (1.0, x) */ | |
436 | (simplify | |
437 | (rdiv:C (convert? @0) (convert? (abs @0))) | |
438 | (if (SCALAR_FLOAT_TYPE_P (type) | |
439 | && ! HONOR_NANS (type) | |
440 | && ! HONOR_INFINITIES (type)) | |
441 | (switch | |
442 | (if (types_match (type, float_type_node)) | |
443 | (BUILT_IN_COPYSIGNF { build_one_cst (type); } (convert @0))) | |
444 | (if (types_match (type, double_type_node)) | |
445 | (BUILT_IN_COPYSIGN { build_one_cst (type); } (convert @0))) | |
446 | (if (types_match (type, long_double_type_node)) | |
447 | (BUILT_IN_COPYSIGNL { build_one_cst (type); } (convert @0)))))) | |
448 | ||
449 | /* In IEEE floating point, x/1 is not equivalent to x for snans. */ | |
450 | (simplify | |
451 | (rdiv @0 real_onep) | |
452 | (if (!HONOR_SNANS (type)) | |
453 | (non_lvalue @0))) | |
454 | ||
455 | /* In IEEE floating point, x/-1 is not equivalent to -x for snans. */ | |
456 | (simplify | |
457 | (rdiv @0 real_minus_onep) | |
458 | (if (!HONOR_SNANS (type)) | |
459 | (negate @0))) | |
460 | ||
461 | (if (flag_reciprocal_math) | |
462 | /* Convert (A/B)/C to A/(B*C). */ | |
463 | (simplify | |
464 | (rdiv (rdiv:s @0 @1) @2) | |
465 | (rdiv @0 (mult @1 @2))) | |
466 | ||
467 | /* Canonicalize x / (C1 * y) to (x * C2) / y. */ | |
468 | (simplify | |
469 | (rdiv @0 (mult:s @1 REAL_CST@2)) | |
470 | (with | |
471 | { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @2); } | |
472 | (if (tem) | |
473 | (rdiv (mult @0 { tem; } ) @1)))) | |
474 | ||
475 | /* Convert A/(B/C) to (A/B)*C */ | |
476 | (simplify | |
477 | (rdiv @0 (rdiv:s @1 @2)) | |
478 | (mult (rdiv @0 @1) @2))) | |
479 | ||
480 | /* Simplify x / (- y) to -x / y. */ | |
481 | (simplify | |
482 | (rdiv @0 (negate @1)) | |
483 | (rdiv (negate @0) @1)) | |
484 | ||
485 | (if (flag_unsafe_math_optimizations) | |
486 | /* Simplify (C / x op 0.0) to x op 0.0 for C != 0, C != Inf/Nan. | |
487 | Since C / x may underflow to zero, do this only for unsafe math. */ | |
488 | (for op (lt le gt ge) | |
489 | neg_op (gt ge lt le) | |
490 | (simplify | |
491 | (op (rdiv REAL_CST@0 @1) real_zerop@2) | |
492 | (if (!HONOR_SIGNED_ZEROS (@1) && !HONOR_INFINITIES (@1)) | |
493 | (switch | |
494 | (if (real_less (&dconst0, TREE_REAL_CST_PTR (@0))) | |
495 | (op @1 @2)) | |
496 | /* For C < 0, use the inverted operator. */ | |
497 | (if (real_less (TREE_REAL_CST_PTR (@0), &dconst0)) | |
498 | (neg_op @1 @2))))))) | |
499 | ||
500 | /* Optimize (X & (-A)) / A where A is a power of 2, to X >> log2(A) */ | |
501 | (for div (trunc_div ceil_div floor_div round_div exact_div) | |
502 | (simplify | |
503 | (div (convert? (bit_and @0 INTEGER_CST@1)) INTEGER_CST@2) | |
504 | (if (integer_pow2p (@2) | |
505 | && tree_int_cst_sgn (@2) > 0 | |
506 | && tree_nop_conversion_p (type, TREE_TYPE (@0)) | |
507 | && wi::to_wide (@2) + wi::to_wide (@1) == 0) | |
508 | (rshift (convert @0) | |
509 | { build_int_cst (integer_type_node, | |
510 | wi::exact_log2 (wi::to_wide (@2))); })))) | |
511 | ||
512 | /* If ARG1 is a constant, we can convert this to a multiply by the | |
513 | reciprocal. This does not have the same rounding properties, | |
514 | so only do this if -freciprocal-math. We can actually | |
515 | always safely do it if ARG1 is a power of two, but it's hard to | |
516 | tell if it is or not in a portable manner. */ | |
517 | (for cst (REAL_CST COMPLEX_CST VECTOR_CST) | |
518 | (simplify | |
519 | (rdiv @0 cst@1) | |
520 | (if (optimize) | |
521 | (if (flag_reciprocal_math | |
522 | && !real_zerop (@1)) | |
523 | (with | |
524 | { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @1); } | |
525 | (if (tem) | |
526 | (mult @0 { tem; } ))) | |
527 | (if (cst != COMPLEX_CST) | |
528 | (with { tree inverse = exact_inverse (type, @1); } | |
529 | (if (inverse) | |
530 | (mult @0 { inverse; } )))))))) | |
531 | ||
532 | (for mod (ceil_mod floor_mod round_mod trunc_mod) | |
533 | /* 0 % X is always zero. */ | |
534 | (simplify | |
535 | (mod integer_zerop@0 @1) | |
536 | /* But not for 0 % 0 so that we can get the proper warnings and errors. */ | |
537 | (if (!integer_zerop (@1)) | |
538 | @0)) | |
539 | /* X % 1 is always zero. */ | |
540 | (simplify | |
541 | (mod @0 integer_onep) | |
542 | { build_zero_cst (type); }) | |
543 | /* X % -1 is zero. */ | |
544 | (simplify | |
545 | (mod @0 integer_minus_onep@1) | |
546 | (if (!TYPE_UNSIGNED (type)) | |
547 | { build_zero_cst (type); })) | |
548 | /* X % X is zero. */ | |
549 | (simplify | |
550 | (mod @0 @0) | |
551 | /* But not for 0 % 0 so that we can get the proper warnings and errors. */ | |
552 | (if (!integer_zerop (@0)) | |
553 | { build_zero_cst (type); })) | |
554 | /* (X % Y) % Y is just X % Y. */ | |
555 | (simplify | |
556 | (mod (mod@2 @0 @1) @1) | |
557 | @2) | |
558 | /* From extract_muldiv_1: (X * C1) % C2 is zero if C1 is a multiple of C2. */ | |
559 | (simplify | |
560 | (mod (mult @0 INTEGER_CST@1) INTEGER_CST@2) | |
561 | (if (ANY_INTEGRAL_TYPE_P (type) | |
562 | && TYPE_OVERFLOW_UNDEFINED (type) | |
563 | && wi::multiple_of_p (wi::to_wide (@1), wi::to_wide (@2), | |
564 | TYPE_SIGN (type))) | |
565 | { build_zero_cst (type); })) | |
566 | /* For (X % C) == 0, if X is signed and C is power of 2, use unsigned | |
567 | modulo and comparison, since it is simpler and equivalent. */ | |
568 | (for cmp (eq ne) | |
569 | (simplify | |
570 | (cmp (mod @0 integer_pow2p@2) integer_zerop@1) | |
571 | (if (!TYPE_UNSIGNED (TREE_TYPE (@0))) | |
572 | (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); } | |
573 | (cmp (mod (convert:utype @0) (convert:utype @2)) (convert:utype @1))))))) | |
574 | ||
575 | /* X % -C is the same as X % C. */ | |
576 | (simplify | |
577 | (trunc_mod @0 INTEGER_CST@1) | |
578 | (if (TYPE_SIGN (type) == SIGNED | |
579 | && !TREE_OVERFLOW (@1) | |
580 | && wi::neg_p (wi::to_wide (@1)) | |
581 | && !TYPE_OVERFLOW_TRAPS (type) | |
582 | /* Avoid this transformation if C is INT_MIN, i.e. C == -C. */ | |
583 | && !sign_bit_p (@1, @1)) | |
584 | (trunc_mod @0 (negate @1)))) | |
585 | ||
586 | /* X % -Y is the same as X % Y. */ | |
587 | (simplify | |
588 | (trunc_mod @0 (convert? (negate @1))) | |
589 | (if (INTEGRAL_TYPE_P (type) | |
590 | && !TYPE_UNSIGNED (type) | |
591 | && !TYPE_OVERFLOW_TRAPS (type) | |
592 | && tree_nop_conversion_p (type, TREE_TYPE (@1)) | |
593 | /* Avoid this transformation if X might be INT_MIN or | |
594 | Y might be -1, because we would then change valid | |
595 | INT_MIN % -(-1) into invalid INT_MIN % -1. */ | |
596 | && (expr_not_equal_to (@0, wi::to_wide (TYPE_MIN_VALUE (type))) | |
597 | || expr_not_equal_to (@1, wi::minus_one (TYPE_PRECISION | |
598 | (TREE_TYPE (@1)))))) | |
599 | (trunc_mod @0 (convert @1)))) | |
600 | ||
601 | /* X - (X / Y) * Y is the same as X % Y. */ | |
602 | (simplify | |
603 | (minus (convert1? @0) (convert2? (mult:c (trunc_div @@0 @@1) @1))) | |
604 | (if (INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type)) | |
605 | (convert (trunc_mod @0 @1)))) | |
606 | ||
607 | /* Optimize TRUNC_MOD_EXPR by a power of two into a BIT_AND_EXPR, | |
608 | i.e. "X % C" into "X & (C - 1)", if X and C are positive. | |
609 | Also optimize A % (C << N) where C is a power of 2, | |
610 | to A & ((C << N) - 1). */ | |
611 | (match (power_of_two_cand @1) | |
612 | INTEGER_CST@1) | |
613 | (match (power_of_two_cand @1) | |
614 | (lshift INTEGER_CST@1 @2)) | |
615 | (for mod (trunc_mod floor_mod) | |
616 | (simplify | |
617 | (mod @0 (convert?@3 (power_of_two_cand@1 @2))) | |
618 | (if ((TYPE_UNSIGNED (type) | |
619 | || tree_expr_nonnegative_p (@0)) | |
620 | && tree_nop_conversion_p (type, TREE_TYPE (@3)) | |
621 | && integer_pow2p (@2) && tree_int_cst_sgn (@2) > 0) | |
622 | (bit_and @0 (convert (minus @1 { build_int_cst (TREE_TYPE (@1), 1); })))))) | |
623 | ||
624 | /* Simplify (unsigned t * 2)/2 -> unsigned t & 0x7FFFFFFF. */ | |
625 | (simplify | |
626 | (trunc_div (mult @0 integer_pow2p@1) @1) | |
627 | (if (TYPE_UNSIGNED (TREE_TYPE (@0))) | |
628 | (bit_and @0 { wide_int_to_tree | |
629 | (type, wi::mask (TYPE_PRECISION (type) | |
630 | - wi::exact_log2 (wi::to_wide (@1)), | |
631 | false, TYPE_PRECISION (type))); }))) | |
632 | ||
633 | /* Simplify (unsigned t / 2) * 2 -> unsigned t & ~1. */ | |
634 | (simplify | |
635 | (mult (trunc_div @0 integer_pow2p@1) @1) | |
636 | (if (TYPE_UNSIGNED (TREE_TYPE (@0))) | |
637 | (bit_and @0 (negate @1)))) | |
638 | ||
639 | /* Simplify (t * 2) / 2) -> t. */ | |
640 | (for div (trunc_div ceil_div floor_div round_div exact_div) | |
641 | (simplify | |
642 | (div (mult:c @0 @1) @1) | |
643 | (if (ANY_INTEGRAL_TYPE_P (type) | |
644 | && TYPE_OVERFLOW_UNDEFINED (type)) | |
645 | @0))) | |
646 | ||
647 | (for op (negate abs) | |
648 | /* Simplify cos(-x) and cos(|x|) -> cos(x). Similarly for cosh. */ | |
649 | (for coss (COS COSH) | |
650 | (simplify | |
651 | (coss (op @0)) | |
652 | (coss @0))) | |
653 | /* Simplify pow(-x, y) and pow(|x|,y) -> pow(x,y) if y is an even integer. */ | |
654 | (for pows (POW) | |
655 | (simplify | |
656 | (pows (op @0) REAL_CST@1) | |
657 | (with { HOST_WIDE_INT n; } | |
658 | (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0) | |
659 | (pows @0 @1))))) | |
660 | /* Likewise for powi. */ | |
661 | (for pows (POWI) | |
662 | (simplify | |
663 | (pows (op @0) INTEGER_CST@1) | |
664 | (if ((wi::to_wide (@1) & 1) == 0) | |
665 | (pows @0 @1)))) | |
666 | /* Strip negate and abs from both operands of hypot. */ | |
667 | (for hypots (HYPOT) | |
668 | (simplify | |
669 | (hypots (op @0) @1) | |
670 | (hypots @0 @1)) | |
671 | (simplify | |
672 | (hypots @0 (op @1)) | |
673 | (hypots @0 @1))) | |
674 | /* copysign(-x, y) and copysign(abs(x), y) -> copysign(x, y). */ | |
675 | (for copysigns (COPYSIGN_ALL) | |
676 | (simplify | |
677 | (copysigns (op @0) @1) | |
678 | (copysigns @0 @1)))) | |
679 | ||
680 | /* abs(x)*abs(x) -> x*x. Should be valid for all types. */ | |
681 | (simplify | |
682 | (mult (abs@1 @0) @1) | |
683 | (mult @0 @0)) | |
684 | ||
685 | /* Convert absu(x)*absu(x) -> x*x. */ | |
686 | (simplify | |
687 | (mult (absu@1 @0) @1) | |
688 | (mult (convert@2 @0) @2)) | |
689 | ||
690 | /* cos(copysign(x, y)) -> cos(x). Similarly for cosh. */ | |
691 | (for coss (COS COSH) | |
692 | copysigns (COPYSIGN) | |
693 | (simplify | |
694 | (coss (copysigns @0 @1)) | |
695 | (coss @0))) | |
696 | ||
697 | /* pow(copysign(x, y), z) -> pow(x, z) if z is an even integer. */ | |
698 | (for pows (POW) | |
699 | copysigns (COPYSIGN) | |
700 | (simplify | |
701 | (pows (copysigns @0 @2) REAL_CST@1) | |
702 | (with { HOST_WIDE_INT n; } | |
703 | (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0) | |
704 | (pows @0 @1))))) | |
705 | /* Likewise for powi. */ | |
706 | (for pows (POWI) | |
707 | copysigns (COPYSIGN) | |
708 | (simplify | |
709 | (pows (copysigns @0 @2) INTEGER_CST@1) | |
710 | (if ((wi::to_wide (@1) & 1) == 0) | |
711 | (pows @0 @1)))) | |
712 | ||
713 | (for hypots (HYPOT) | |
714 | copysigns (COPYSIGN) | |
715 | /* hypot(copysign(x, y), z) -> hypot(x, z). */ | |
716 | (simplify | |
717 | (hypots (copysigns @0 @1) @2) | |
718 | (hypots @0 @2)) | |
719 | /* hypot(x, copysign(y, z)) -> hypot(x, y). */ | |
720 | (simplify | |
721 | (hypots @0 (copysigns @1 @2)) | |
722 | (hypots @0 @1))) | |
723 | ||
724 | /* copysign(x, CST) -> [-]abs (x). */ | |
725 | (for copysigns (COPYSIGN_ALL) | |
726 | (simplify | |
727 | (copysigns @0 REAL_CST@1) | |
728 | (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1))) | |
729 | (negate (abs @0)) | |
730 | (abs @0)))) | |
731 | ||
732 | /* copysign(copysign(x, y), z) -> copysign(x, z). */ | |
733 | (for copysigns (COPYSIGN_ALL) | |
734 | (simplify | |
735 | (copysigns (copysigns @0 @1) @2) | |
736 | (copysigns @0 @2))) | |
737 | ||
738 | /* copysign(x,y)*copysign(x,y) -> x*x. */ | |
739 | (for copysigns (COPYSIGN_ALL) | |
740 | (simplify | |
741 | (mult (copysigns@2 @0 @1) @2) | |
742 | (mult @0 @0))) | |
743 | ||
744 | /* ccos(-x) -> ccos(x). Similarly for ccosh. */ | |
745 | (for ccoss (CCOS CCOSH) | |
746 | (simplify | |
747 | (ccoss (negate @0)) | |
748 | (ccoss @0))) | |
749 | ||
750 | /* cabs(-x) and cos(conj(x)) -> cabs(x). */ | |
751 | (for ops (conj negate) | |
752 | (for cabss (CABS) | |
753 | (simplify | |
754 | (cabss (ops @0)) | |
755 | (cabss @0)))) | |
756 | ||
757 | /* Fold (a * (1 << b)) into (a << b) */ | |
758 | (simplify | |
759 | (mult:c @0 (convert? (lshift integer_onep@1 @2))) | |
760 | (if (! FLOAT_TYPE_P (type) | |
761 | && tree_nop_conversion_p (type, TREE_TYPE (@1))) | |
762 | (lshift @0 @2))) | |
763 | ||
764 | /* Fold (1 << (C - x)) where C = precision(type) - 1 | |
765 | into ((1 << C) >> x). */ | |
766 | (simplify | |
767 | (lshift integer_onep@0 (minus@1 INTEGER_CST@2 @3)) | |
768 | (if (INTEGRAL_TYPE_P (type) | |
769 | && wi::eq_p (wi::to_wide (@2), TYPE_PRECISION (type) - 1) | |
770 | && single_use (@1)) | |
771 | (if (TYPE_UNSIGNED (type)) | |
772 | (rshift (lshift @0 @2) @3) | |
773 | (with | |
774 | { tree utype = unsigned_type_for (type); } | |
775 | (convert (rshift (lshift (convert:utype @0) @2) @3)))))) | |
776 | ||
777 | /* Fold (C1/X)*C2 into (C1*C2)/X. */ | |
778 | (simplify | |
779 | (mult (rdiv@3 REAL_CST@0 @1) REAL_CST@2) | |
780 | (if (flag_associative_math | |
781 | && single_use (@3)) | |
782 | (with | |
783 | { tree tem = const_binop (MULT_EXPR, type, @0, @2); } | |
784 | (if (tem) | |
785 | (rdiv { tem; } @1))))) | |
786 | ||
787 | /* Simplify ~X & X as zero. */ | |
788 | (simplify | |
789 | (bit_and:c (convert? @0) (convert? (bit_not @0))) | |
790 | { build_zero_cst (type); }) | |
791 | ||
792 | /* PR71636: Transform x & ((1U << b) - 1) -> x & ~(~0U << b); */ | |
793 | (simplify | |
794 | (bit_and:c @0 (plus:s (lshift:s integer_onep @1) integer_minus_onep)) | |
795 | (if (TYPE_UNSIGNED (type)) | |
796 | (bit_and @0 (bit_not (lshift { build_all_ones_cst (type); } @1))))) | |
797 | ||
798 | (for bitop (bit_and bit_ior) | |
799 | cmp (eq ne) | |
800 | /* PR35691: Transform | |
801 | (x == 0 & y == 0) -> (x | typeof(x)(y)) == 0. | |
802 | (x != 0 | y != 0) -> (x | typeof(x)(y)) != 0. */ | |
803 | (simplify | |
804 | (bitop (cmp @0 integer_zerop@2) (cmp @1 integer_zerop)) | |
805 | (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
806 | && INTEGRAL_TYPE_P (TREE_TYPE (@1)) | |
807 | && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))) | |
808 | (cmp (bit_ior @0 (convert @1)) @2))) | |
809 | /* Transform: | |
810 | (x == -1 & y == -1) -> (x & typeof(x)(y)) == -1. | |
811 | (x != -1 | y != -1) -> (x & typeof(x)(y)) != -1. */ | |
812 | (simplify | |
813 | (bitop (cmp @0 integer_all_onesp@2) (cmp @1 integer_all_onesp)) | |
814 | (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
815 | && INTEGRAL_TYPE_P (TREE_TYPE (@1)) | |
816 | && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))) | |
817 | (cmp (bit_and @0 (convert @1)) @2)))) | |
818 | ||
819 | /* Fold (A & ~B) - (A & B) into (A ^ B) - B. */ | |
820 | (simplify | |
821 | (minus (bit_and:cs @0 (bit_not @1)) (bit_and:cs @0 @1)) | |
822 | (minus (bit_xor @0 @1) @1)) | |
823 | (simplify | |
824 | (minus (bit_and:s @0 INTEGER_CST@2) (bit_and:s @0 INTEGER_CST@1)) | |
825 | (if (~wi::to_wide (@2) == wi::to_wide (@1)) | |
826 | (minus (bit_xor @0 @1) @1))) | |
827 | ||
828 | /* Fold (A & B) - (A & ~B) into B - (A ^ B). */ | |
829 | (simplify | |
830 | (minus (bit_and:cs @0 @1) (bit_and:cs @0 (bit_not @1))) | |
831 | (minus @1 (bit_xor @0 @1))) | |
832 | ||
833 | /* Simplify (X & ~Y) |^+ (~X & Y) -> X ^ Y. */ | |
834 | (for op (bit_ior bit_xor plus) | |
835 | (simplify | |
836 | (op (bit_and:c @0 (bit_not @1)) (bit_and:c (bit_not @0) @1)) | |
837 | (bit_xor @0 @1)) | |
838 | (simplify | |
839 | (op:c (bit_and @0 INTEGER_CST@2) (bit_and (bit_not @0) INTEGER_CST@1)) | |
840 | (if (~wi::to_wide (@2) == wi::to_wide (@1)) | |
841 | (bit_xor @0 @1)))) | |
842 | ||
843 | /* PR53979: Transform ((a ^ b) | a) -> (a | b) */ | |
844 | (simplify | |
845 | (bit_ior:c (bit_xor:c @0 @1) @0) | |
846 | (bit_ior @0 @1)) | |
847 | ||
848 | /* (a & ~b) | (a ^ b) --> a ^ b */ | |
849 | (simplify | |
850 | (bit_ior:c (bit_and:c @0 (bit_not @1)) (bit_xor:c@2 @0 @1)) | |
851 | @2) | |
852 | ||
853 | /* (a & ~b) ^ ~a --> ~(a & b) */ | |
854 | (simplify | |
855 | (bit_xor:c (bit_and:cs @0 (bit_not @1)) (bit_not @0)) | |
856 | (bit_not (bit_and @0 @1))) | |
857 | ||
858 | /* (~a & b) ^ a --> (a | b) */ | |
859 | (simplify | |
860 | (bit_xor:c (bit_and:cs (bit_not @0) @1) @0) | |
861 | (bit_ior @0 @1)) | |
862 | ||
863 | /* (a | b) & ~(a ^ b) --> a & b */ | |
864 | (simplify | |
865 | (bit_and:c (bit_ior @0 @1) (bit_not (bit_xor:c @0 @1))) | |
866 | (bit_and @0 @1)) | |
867 | ||
868 | /* a | ~(a ^ b) --> a | ~b */ | |
869 | (simplify | |
870 | (bit_ior:c @0 (bit_not:s (bit_xor:c @0 @1))) | |
871 | (bit_ior @0 (bit_not @1))) | |
872 | ||
873 | /* (a | b) | (a &^ b) --> a | b */ | |
874 | (for op (bit_and bit_xor) | |
875 | (simplify | |
876 | (bit_ior:c (bit_ior@2 @0 @1) (op:c @0 @1)) | |
877 | @2)) | |
878 | ||
879 | /* (a & b) | ~(a ^ b) --> ~(a ^ b) */ | |
880 | (simplify | |
881 | (bit_ior:c (bit_and:c @0 @1) (bit_not@2 (bit_xor @0 @1))) | |
882 | @2) | |
883 | ||
884 | /* ~(~a & b) --> a | ~b */ | |
885 | (simplify | |
886 | (bit_not (bit_and:cs (bit_not @0) @1)) | |
887 | (bit_ior @0 (bit_not @1))) | |
888 | ||
889 | /* ~(~a | b) --> a & ~b */ | |
890 | (simplify | |
891 | (bit_not (bit_ior:cs (bit_not @0) @1)) | |
892 | (bit_and @0 (bit_not @1))) | |
893 | ||
894 | /* Simplify (~X & Y) to X ^ Y if we know that (X & ~Y) is 0. */ | |
895 | #if GIMPLE | |
896 | (simplify | |
897 | (bit_and (bit_not SSA_NAME@0) INTEGER_CST@1) | |
898 | (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
899 | && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0) | |
900 | (bit_xor @0 @1))) | |
901 | #endif | |
902 | ||
903 | /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M, | |
904 | ((A & N) + B) & M -> (A + B) & M | |
905 | Similarly if (N & M) == 0, | |
906 | ((A | N) + B) & M -> (A + B) & M | |
907 | and for - instead of + (or unary - instead of +) | |
908 | and/or ^ instead of |. | |
909 | If B is constant and (B & M) == 0, fold into A & M. */ | |
910 | (for op (plus minus) | |
911 | (for bitop (bit_and bit_ior bit_xor) | |
912 | (simplify | |
913 | (bit_and (op:s (bitop:s@0 @3 INTEGER_CST@4) @1) INTEGER_CST@2) | |
914 | (with | |
915 | { tree pmop[2]; | |
916 | tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, bitop, | |
917 | @3, @4, @1, ERROR_MARK, NULL_TREE, | |
918 | NULL_TREE, pmop); } | |
919 | (if (utype) | |
920 | (convert (bit_and (op (convert:utype { pmop[0]; }) | |
921 | (convert:utype { pmop[1]; })) | |
922 | (convert:utype @2)))))) | |
923 | (simplify | |
924 | (bit_and (op:s @0 (bitop:s@1 @3 INTEGER_CST@4)) INTEGER_CST@2) | |
925 | (with | |
926 | { tree pmop[2]; | |
927 | tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK, | |
928 | NULL_TREE, NULL_TREE, @1, bitop, @3, | |
929 | @4, pmop); } | |
930 | (if (utype) | |
931 | (convert (bit_and (op (convert:utype { pmop[0]; }) | |
932 | (convert:utype { pmop[1]; })) | |
933 | (convert:utype @2))))))) | |
934 | (simplify | |
935 | (bit_and (op:s @0 @1) INTEGER_CST@2) | |
936 | (with | |
937 | { tree pmop[2]; | |
938 | tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK, | |
939 | NULL_TREE, NULL_TREE, @1, ERROR_MARK, | |
940 | NULL_TREE, NULL_TREE, pmop); } | |
941 | (if (utype) | |
942 | (convert (bit_and (op (convert:utype { pmop[0]; }) | |
943 | (convert:utype { pmop[1]; })) | |
944 | (convert:utype @2))))))) | |
945 | (for bitop (bit_and bit_ior bit_xor) | |
946 | (simplify | |
947 | (bit_and (negate:s (bitop:s@0 @2 INTEGER_CST@3)) INTEGER_CST@1) | |
948 | (with | |
949 | { tree pmop[2]; | |
950 | tree utype = fold_bit_and_mask (TREE_TYPE (@0), @1, NEGATE_EXPR, @0, | |
951 | bitop, @2, @3, NULL_TREE, ERROR_MARK, | |
952 | NULL_TREE, NULL_TREE, pmop); } | |
953 | (if (utype) | |
954 | (convert (bit_and (negate (convert:utype { pmop[0]; })) | |
955 | (convert:utype @1))))))) | |
956 | ||
957 | /* X % Y is smaller than Y. */ | |
958 | (for cmp (lt ge) | |
959 | (simplify | |
960 | (cmp (trunc_mod @0 @1) @1) | |
961 | (if (TYPE_UNSIGNED (TREE_TYPE (@0))) | |
962 | { constant_boolean_node (cmp == LT_EXPR, type); }))) | |
963 | (for cmp (gt le) | |
964 | (simplify | |
965 | (cmp @1 (trunc_mod @0 @1)) | |
966 | (if (TYPE_UNSIGNED (TREE_TYPE (@0))) | |
967 | { constant_boolean_node (cmp == GT_EXPR, type); }))) | |
968 | ||
969 | /* x | ~0 -> ~0 */ | |
970 | (simplify | |
971 | (bit_ior @0 integer_all_onesp@1) | |
972 | @1) | |
973 | ||
974 | /* x | 0 -> x */ | |
975 | (simplify | |
976 | (bit_ior @0 integer_zerop) | |
977 | @0) | |
978 | ||
979 | /* x & 0 -> 0 */ | |
980 | (simplify | |
981 | (bit_and @0 integer_zerop@1) | |
982 | @1) | |
983 | ||
984 | /* ~x | x -> -1 */ | |
985 | /* ~x ^ x -> -1 */ | |
986 | /* ~x + x -> -1 */ | |
987 | (for op (bit_ior bit_xor plus) | |
988 | (simplify | |
989 | (op:c (convert? @0) (convert? (bit_not @0))) | |
990 | (convert { build_all_ones_cst (TREE_TYPE (@0)); }))) | |
991 | ||
992 | /* x ^ x -> 0 */ | |
993 | (simplify | |
994 | (bit_xor @0 @0) | |
995 | { build_zero_cst (type); }) | |
996 | ||
997 | /* Canonicalize X ^ ~0 to ~X. */ | |
998 | (simplify | |
999 | (bit_xor @0 integer_all_onesp@1) | |
1000 | (bit_not @0)) | |
1001 | ||
1002 | /* x & ~0 -> x */ | |
1003 | (simplify | |
1004 | (bit_and @0 integer_all_onesp) | |
1005 | (non_lvalue @0)) | |
1006 | ||
1007 | /* x & x -> x, x | x -> x */ | |
1008 | (for bitop (bit_and bit_ior) | |
1009 | (simplify | |
1010 | (bitop @0 @0) | |
1011 | (non_lvalue @0))) | |
1012 | ||
1013 | /* x & C -> x if we know that x & ~C == 0. */ | |
1014 | #if GIMPLE | |
1015 | (simplify | |
1016 | (bit_and SSA_NAME@0 INTEGER_CST@1) | |
1017 | (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
1018 | && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0) | |
1019 | @0)) | |
1020 | #endif | |
1021 | ||
1022 | /* ~(~X - Y) -> X + Y and ~(~X + Y) -> X - Y. */ | |
1023 | (simplify | |
1024 | (bit_not (minus (bit_not @0) @1)) | |
1025 | (plus @0 @1)) | |
1026 | (simplify | |
1027 | (bit_not (plus:c (bit_not @0) @1)) | |
1028 | (minus @0 @1)) | |
1029 | ||
1030 | /* x + (x & 1) -> (x + 1) & ~1 */ | |
1031 | (simplify | |
1032 | (plus:c @0 (bit_and:s @0 integer_onep@1)) | |
1033 | (bit_and (plus @0 @1) (bit_not @1))) | |
1034 | ||
1035 | /* x & ~(x & y) -> x & ~y */ | |
1036 | /* x | ~(x | y) -> x | ~y */ | |
1037 | (for bitop (bit_and bit_ior) | |
1038 | (simplify | |
1039 | (bitop:c @0 (bit_not (bitop:cs @0 @1))) | |
1040 | (bitop @0 (bit_not @1)))) | |
1041 | ||
1042 | /* (~x & y) | ~(x | y) -> ~x */ | |
1043 | (simplify | |
1044 | (bit_ior:c (bit_and:c (bit_not@2 @0) @1) (bit_not (bit_ior:c @0 @1))) | |
1045 | @2) | |
1046 | ||
1047 | /* (x | y) ^ (x | ~y) -> ~x */ | |
1048 | (simplify | |
1049 | (bit_xor:c (bit_ior:c @0 @1) (bit_ior:c @0 (bit_not @1))) | |
1050 | (bit_not @0)) | |
1051 | ||
1052 | /* (x & y) | ~(x | y) -> ~(x ^ y) */ | |
1053 | (simplify | |
1054 | (bit_ior:c (bit_and:s @0 @1) (bit_not:s (bit_ior:s @0 @1))) | |
1055 | (bit_not (bit_xor @0 @1))) | |
1056 | ||
1057 | /* (~x | y) ^ (x ^ y) -> x | ~y */ | |
1058 | (simplify | |
1059 | (bit_xor:c (bit_ior:cs (bit_not @0) @1) (bit_xor:s @0 @1)) | |
1060 | (bit_ior @0 (bit_not @1))) | |
1061 | ||
1062 | /* (x ^ y) | ~(x | y) -> ~(x & y) */ | |
1063 | (simplify | |
1064 | (bit_ior:c (bit_xor:s @0 @1) (bit_not:s (bit_ior:s @0 @1))) | |
1065 | (bit_not (bit_and @0 @1))) | |
1066 | ||
1067 | /* (x | y) & ~x -> y & ~x */ | |
1068 | /* (x & y) | ~x -> y | ~x */ | |
1069 | (for bitop (bit_and bit_ior) | |
1070 | rbitop (bit_ior bit_and) | |
1071 | (simplify | |
1072 | (bitop:c (rbitop:c @0 @1) (bit_not@2 @0)) | |
1073 | (bitop @1 @2))) | |
1074 | ||
1075 | /* (x & y) ^ (x | y) -> x ^ y */ | |
1076 | (simplify | |
1077 | (bit_xor:c (bit_and @0 @1) (bit_ior @0 @1)) | |
1078 | (bit_xor @0 @1)) | |
1079 | ||
1080 | /* (x ^ y) ^ (x | y) -> x & y */ | |
1081 | (simplify | |
1082 | (bit_xor:c (bit_xor @0 @1) (bit_ior @0 @1)) | |
1083 | (bit_and @0 @1)) | |
1084 | ||
1085 | /* (x & y) + (x ^ y) -> x | y */ | |
1086 | /* (x & y) | (x ^ y) -> x | y */ | |
1087 | /* (x & y) ^ (x ^ y) -> x | y */ | |
1088 | (for op (plus bit_ior bit_xor) | |
1089 | (simplify | |
1090 | (op:c (bit_and @0 @1) (bit_xor @0 @1)) | |
1091 | (bit_ior @0 @1))) | |
1092 | ||
1093 | /* (x & y) + (x | y) -> x + y */ | |
1094 | (simplify | |
1095 | (plus:c (bit_and @0 @1) (bit_ior @0 @1)) | |
1096 | (plus @0 @1)) | |
1097 | ||
1098 | /* (x + y) - (x | y) -> x & y */ | |
1099 | (simplify | |
1100 | (minus (plus @0 @1) (bit_ior @0 @1)) | |
1101 | (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type) | |
1102 | && !TYPE_SATURATING (type)) | |
1103 | (bit_and @0 @1))) | |
1104 | ||
1105 | /* (x + y) - (x & y) -> x | y */ | |
1106 | (simplify | |
1107 | (minus (plus @0 @1) (bit_and @0 @1)) | |
1108 | (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type) | |
1109 | && !TYPE_SATURATING (type)) | |
1110 | (bit_ior @0 @1))) | |
1111 | ||
1112 | /* (x | y) - (x ^ y) -> x & y */ | |
1113 | (simplify | |
1114 | (minus (bit_ior @0 @1) (bit_xor @0 @1)) | |
1115 | (bit_and @0 @1)) | |
1116 | ||
1117 | /* (x | y) - (x & y) -> x ^ y */ | |
1118 | (simplify | |
1119 | (minus (bit_ior @0 @1) (bit_and @0 @1)) | |
1120 | (bit_xor @0 @1)) | |
1121 | ||
1122 | /* (x | y) & ~(x & y) -> x ^ y */ | |
1123 | (simplify | |
1124 | (bit_and:c (bit_ior @0 @1) (bit_not (bit_and @0 @1))) | |
1125 | (bit_xor @0 @1)) | |
1126 | ||
1127 | /* (x | y) & (~x ^ y) -> x & y */ | |
1128 | (simplify | |
1129 | (bit_and:c (bit_ior:c @0 @1) (bit_xor:c @1 (bit_not @0))) | |
1130 | (bit_and @0 @1)) | |
1131 | ||
1132 | /* (~x | y) & (x | ~y) -> ~(x ^ y) */ | |
1133 | (simplify | |
1134 | (bit_and (bit_ior:cs (bit_not @0) @1) (bit_ior:cs @0 (bit_not @1))) | |
1135 | (bit_not (bit_xor @0 @1))) | |
1136 | ||
1137 | /* (~x | y) ^ (x | ~y) -> x ^ y */ | |
1138 | (simplify | |
1139 | (bit_xor (bit_ior:c (bit_not @0) @1) (bit_ior:c @0 (bit_not @1))) | |
1140 | (bit_xor @0 @1)) | |
1141 | ||
1142 | /* ~x & ~y -> ~(x | y) | |
1143 | ~x | ~y -> ~(x & y) */ | |
1144 | (for op (bit_and bit_ior) | |
1145 | rop (bit_ior bit_and) | |
1146 | (simplify | |
1147 | (op (convert1? (bit_not @0)) (convert2? (bit_not @1))) | |
1148 | (if (element_precision (type) <= element_precision (TREE_TYPE (@0)) | |
1149 | && element_precision (type) <= element_precision (TREE_TYPE (@1))) | |
1150 | (bit_not (rop (convert @0) (convert @1)))))) | |
1151 | ||
1152 | /* If we are XORing or adding two BIT_AND_EXPR's, both of which are and'ing | |
1153 | with a constant, and the two constants have no bits in common, | |
1154 | we should treat this as a BIT_IOR_EXPR since this may produce more | |
1155 | simplifications. */ | |
1156 | (for op (bit_xor plus) | |
1157 | (simplify | |
1158 | (op (convert1? (bit_and@4 @0 INTEGER_CST@1)) | |
1159 | (convert2? (bit_and@5 @2 INTEGER_CST@3))) | |
1160 | (if (tree_nop_conversion_p (type, TREE_TYPE (@0)) | |
1161 | && tree_nop_conversion_p (type, TREE_TYPE (@2)) | |
1162 | && (wi::to_wide (@1) & wi::to_wide (@3)) == 0) | |
1163 | (bit_ior (convert @4) (convert @5))))) | |
1164 | ||
1165 | /* (X | Y) ^ X -> Y & ~ X*/ | |
1166 | (simplify | |
1167 | (bit_xor:c (convert1? (bit_ior:c @@0 @1)) (convert2? @0)) | |
1168 | (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) | |
1169 | (convert (bit_and @1 (bit_not @0))))) | |
1170 | ||
1171 | /* Convert ~X ^ ~Y to X ^ Y. */ | |
1172 | (simplify | |
1173 | (bit_xor (convert1? (bit_not @0)) (convert2? (bit_not @1))) | |
1174 | (if (element_precision (type) <= element_precision (TREE_TYPE (@0)) | |
1175 | && element_precision (type) <= element_precision (TREE_TYPE (@1))) | |
1176 | (bit_xor (convert @0) (convert @1)))) | |
1177 | ||
1178 | /* Convert ~X ^ C to X ^ ~C. */ | |
1179 | (simplify | |
1180 | (bit_xor (convert? (bit_not @0)) INTEGER_CST@1) | |
1181 | (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) | |
1182 | (bit_xor (convert @0) (bit_not @1)))) | |
1183 | ||
1184 | /* Fold (X & Y) ^ Y and (X ^ Y) & Y as ~X & Y. */ | |
1185 | (for opo (bit_and bit_xor) | |
1186 | opi (bit_xor bit_and) | |
1187 | (simplify | |
1188 | (opo:c (opi:cs @0 @1) @1) | |
1189 | (bit_and (bit_not @0) @1))) | |
1190 | ||
1191 | /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both | |
1192 | operands are another bit-wise operation with a common input. If so, | |
1193 | distribute the bit operations to save an operation and possibly two if | |
1194 | constants are involved. For example, convert | |
1195 | (A | B) & (A | C) into A | (B & C) | |
1196 | Further simplification will occur if B and C are constants. */ | |
1197 | (for op (bit_and bit_ior bit_xor) | |
1198 | rop (bit_ior bit_and bit_and) | |
1199 | (simplify | |
1200 | (op (convert? (rop:c @@0 @1)) (convert? (rop:c @0 @2))) | |
1201 | (if (tree_nop_conversion_p (type, TREE_TYPE (@1)) | |
1202 | && tree_nop_conversion_p (type, TREE_TYPE (@2))) | |
1203 | (rop (convert @0) (op (convert @1) (convert @2)))))) | |
1204 | ||
1205 | /* Some simple reassociation for bit operations, also handled in reassoc. */ | |
1206 | /* (X & Y) & Y -> X & Y | |
1207 | (X | Y) | Y -> X | Y */ | |
1208 | (for op (bit_and bit_ior) | |
1209 | (simplify | |
1210 | (op:c (convert1?@2 (op:c @0 @@1)) (convert2? @1)) | |
1211 | @2)) | |
1212 | /* (X ^ Y) ^ Y -> X */ | |
1213 | (simplify | |
1214 | (bit_xor:c (convert1? (bit_xor:c @0 @@1)) (convert2? @1)) | |
1215 | (convert @0)) | |
1216 | /* (X & Y) & (X & Z) -> (X & Y) & Z | |
1217 | (X | Y) | (X | Z) -> (X | Y) | Z */ | |
1218 | (for op (bit_and bit_ior) | |
1219 | (simplify | |
1220 | (op (convert1?@3 (op:c@4 @0 @1)) (convert2?@5 (op:c@6 @0 @2))) | |
1221 | (if (tree_nop_conversion_p (type, TREE_TYPE (@1)) | |
1222 | && tree_nop_conversion_p (type, TREE_TYPE (@2))) | |
1223 | (if (single_use (@5) && single_use (@6)) | |
1224 | (op @3 (convert @2)) | |
1225 | (if (single_use (@3) && single_use (@4)) | |
1226 | (op (convert @1) @5)))))) | |
1227 | /* (X ^ Y) ^ (X ^ Z) -> Y ^ Z */ | |
1228 | (simplify | |
1229 | (bit_xor (convert1? (bit_xor:c @0 @1)) (convert2? (bit_xor:c @0 @2))) | |
1230 | (if (tree_nop_conversion_p (type, TREE_TYPE (@1)) | |
1231 | && tree_nop_conversion_p (type, TREE_TYPE (@2))) | |
1232 | (bit_xor (convert @1) (convert @2)))) | |
1233 | ||
1234 | /* Convert abs (abs (X)) into abs (X). | |
1235 | also absu (absu (X)) into absu (X). */ | |
1236 | (simplify | |
1237 | (abs (abs@1 @0)) | |
1238 | @1) | |
1239 | ||
1240 | (simplify | |
1241 | (absu (convert@2 (absu@1 @0))) | |
1242 | (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@1))) | |
1243 | @1)) | |
1244 | ||
1245 | /* Convert abs[u] (-X) -> abs[u] (X). */ | |
1246 | (simplify | |
1247 | (abs (negate @0)) | |
1248 | (abs @0)) | |
1249 | ||
1250 | (simplify | |
1251 | (absu (negate @0)) | |
1252 | (absu @0)) | |
1253 | ||
1254 | /* Convert abs[u] (X) where X is nonnegative -> (X). */ | |
1255 | (simplify | |
1256 | (abs tree_expr_nonnegative_p@0) | |
1257 | @0) | |
1258 | ||
1259 | (simplify | |
1260 | (absu tree_expr_nonnegative_p@0) | |
1261 | (convert @0)) | |
1262 | ||
1263 | /* A few cases of fold-const.c negate_expr_p predicate. */ | |
1264 | (match negate_expr_p | |
1265 | INTEGER_CST | |
1266 | (if ((INTEGRAL_TYPE_P (type) | |
1267 | && TYPE_UNSIGNED (type)) | |
1268 | || (!TYPE_OVERFLOW_SANITIZED (type) | |
1269 | && may_negate_without_overflow_p (t))))) | |
1270 | (match negate_expr_p | |
1271 | FIXED_CST) | |
1272 | (match negate_expr_p | |
1273 | (negate @0) | |
1274 | (if (!TYPE_OVERFLOW_SANITIZED (type)))) | |
1275 | (match negate_expr_p | |
1276 | REAL_CST | |
1277 | (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (t))))) | |
1278 | /* VECTOR_CST handling of non-wrapping types would recurse in unsupported | |
1279 | ways. */ | |
1280 | (match negate_expr_p | |
1281 | VECTOR_CST | |
1282 | (if (FLOAT_TYPE_P (TREE_TYPE (type)) || TYPE_OVERFLOW_WRAPS (type)))) | |
1283 | (match negate_expr_p | |
1284 | (minus @0 @1) | |
1285 | (if ((ANY_INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type)) | |
1286 | || (FLOAT_TYPE_P (type) | |
1287 | && !HONOR_SIGN_DEPENDENT_ROUNDING (type) | |
1288 | && !HONOR_SIGNED_ZEROS (type))))) | |
1289 | ||
1290 | /* (-A) * (-B) -> A * B */ | |
1291 | (simplify | |
1292 | (mult:c (convert1? (negate @0)) (convert2? negate_expr_p@1)) | |
1293 | (if (tree_nop_conversion_p (type, TREE_TYPE (@0)) | |
1294 | && tree_nop_conversion_p (type, TREE_TYPE (@1))) | |
1295 | (mult (convert @0) (convert (negate @1))))) | |
1296 | ||
1297 | /* -(A + B) -> (-B) - A. */ | |
1298 | (simplify | |
1299 | (negate (plus:c @0 negate_expr_p@1)) | |
1300 | (if (!HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type)) | |
1301 | && !HONOR_SIGNED_ZEROS (element_mode (type))) | |
1302 | (minus (negate @1) @0))) | |
1303 | ||
1304 | /* -(A - B) -> B - A. */ | |
1305 | (simplify | |
1306 | (negate (minus @0 @1)) | |
1307 | (if ((ANY_INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_SANITIZED (type)) | |
1308 | || (FLOAT_TYPE_P (type) | |
1309 | && !HONOR_SIGN_DEPENDENT_ROUNDING (type) | |
1310 | && !HONOR_SIGNED_ZEROS (type))) | |
1311 | (minus @1 @0))) | |
1312 | (simplify | |
1313 | (negate (pointer_diff @0 @1)) | |
1314 | (if (TYPE_OVERFLOW_UNDEFINED (type)) | |
1315 | (pointer_diff @1 @0))) | |
1316 | ||
1317 | /* A - B -> A + (-B) if B is easily negatable. */ | |
1318 | (simplify | |
1319 | (minus @0 negate_expr_p@1) | |
1320 | (if (!FIXED_POINT_TYPE_P (type)) | |
1321 | (plus @0 (negate @1)))) | |
1322 | ||
1323 | /* Try to fold (type) X op CST -> (type) (X op ((type-x) CST)) | |
1324 | when profitable. | |
1325 | For bitwise binary operations apply operand conversions to the | |
1326 | binary operation result instead of to the operands. This allows | |
1327 | to combine successive conversions and bitwise binary operations. | |
1328 | We combine the above two cases by using a conditional convert. */ | |
1329 | (for bitop (bit_and bit_ior bit_xor) | |
1330 | (simplify | |
1331 | (bitop (convert@2 @0) (convert?@3 @1)) | |
1332 | (if (((TREE_CODE (@1) == INTEGER_CST | |
1333 | && INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
1334 | && int_fits_type_p (@1, TREE_TYPE (@0))) | |
1335 | || types_match (@0, @1)) | |
1336 | /* ??? This transform conflicts with fold-const.c doing | |
1337 | Convert (T)(x & c) into (T)x & (T)c, if c is an integer | |
1338 | constants (if x has signed type, the sign bit cannot be set | |
1339 | in c). This folds extension into the BIT_AND_EXPR. | |
1340 | Restrict it to GIMPLE to avoid endless recursions. */ | |
1341 | && (bitop != BIT_AND_EXPR || GIMPLE) | |
1342 | && (/* That's a good idea if the conversion widens the operand, thus | |
1343 | after hoisting the conversion the operation will be narrower. */ | |
1344 | TYPE_PRECISION (TREE_TYPE (@0)) < TYPE_PRECISION (type) | |
1345 | /* It's also a good idea if the conversion is to a non-integer | |
1346 | mode. */ | |
1347 | || GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT | |
1348 | /* Or if the precision of TO is not the same as the precision | |
1349 | of its mode. */ | |
1350 | || !type_has_mode_precision_p (type) | |
1351 | /* In GIMPLE, getting rid of 2 conversions for one new results | |
1352 | in smaller IL. */ | |
1353 | || (GIMPLE | |
1354 | && TREE_CODE (@1) != INTEGER_CST | |
1355 | && tree_nop_conversion_p (type, TREE_TYPE (@0)) | |
1356 | && single_use (@2) | |
1357 | && single_use (@3)))) | |
1358 | (convert (bitop @0 (convert @1))))) | |
1359 | /* In GIMPLE, getting rid of 2 conversions for one new results | |
1360 | in smaller IL. */ | |
1361 | (simplify | |
1362 | (convert (bitop:cs@2 (nop_convert:s @0) @1)) | |
1363 | (if (GIMPLE | |
1364 | && TREE_CODE (@1) != INTEGER_CST | |
1365 | && tree_nop_conversion_p (type, TREE_TYPE (@2)) | |
1366 | && types_match (type, @0)) | |
1367 | (bitop @0 (convert @1))))) | |
1368 | ||
1369 | (for bitop (bit_and bit_ior) | |
1370 | rbitop (bit_ior bit_and) | |
1371 | /* (x | y) & x -> x */ | |
1372 | /* (x & y) | x -> x */ | |
1373 | (simplify | |
1374 | (bitop:c (rbitop:c @0 @1) @0) | |
1375 | @0) | |
1376 | /* (~x | y) & x -> x & y */ | |
1377 | /* (~x & y) | x -> x | y */ | |
1378 | (simplify | |
1379 | (bitop:c (rbitop:c (bit_not @0) @1) @0) | |
1380 | (bitop @0 @1))) | |
1381 | ||
1382 | /* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */ | |
1383 | (simplify | |
1384 | (bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2) | |
1385 | (bit_ior (bit_and @0 @2) (bit_and @1 @2))) | |
1386 | ||
1387 | /* Combine successive equal operations with constants. */ | |
1388 | (for bitop (bit_and bit_ior bit_xor) | |
1389 | (simplify | |
1390 | (bitop (bitop @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2) | |
1391 | (if (!CONSTANT_CLASS_P (@0)) | |
1392 | /* This is the canonical form regardless of whether (bitop @1 @2) can be | |
1393 | folded to a constant. */ | |
1394 | (bitop @0 (bitop @1 @2)) | |
1395 | /* In this case we have three constants and (bitop @0 @1) doesn't fold | |
1396 | to a constant. This can happen if @0 or @1 is a POLY_INT_CST and if | |
1397 | the values involved are such that the operation can't be decided at | |
1398 | compile time. Try folding one of @0 or @1 with @2 to see whether | |
1399 | that combination can be decided at compile time. | |
1400 | ||
1401 | Keep the existing form if both folds fail, to avoid endless | |
1402 | oscillation. */ | |
1403 | (with { tree cst1 = const_binop (bitop, type, @0, @2); } | |
1404 | (if (cst1) | |
1405 | (bitop @1 { cst1; }) | |
1406 | (with { tree cst2 = const_binop (bitop, type, @1, @2); } | |
1407 | (if (cst2) | |
1408 | (bitop @0 { cst2; })))))))) | |
1409 | ||
1410 | /* Try simple folding for X op !X, and X op X with the help | |
1411 | of the truth_valued_p and logical_inverted_value predicates. */ | |
1412 | (match truth_valued_p | |
1413 | @0 | |
1414 | (if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1))) | |
1415 | (for op (tcc_comparison truth_and truth_andif truth_or truth_orif truth_xor) | |
1416 | (match truth_valued_p | |
1417 | (op @0 @1))) | |
1418 | (match truth_valued_p | |
1419 | (truth_not @0)) | |
1420 | ||
1421 | (match (logical_inverted_value @0) | |
1422 | (truth_not @0)) | |
1423 | (match (logical_inverted_value @0) | |
1424 | (bit_not truth_valued_p@0)) | |
1425 | (match (logical_inverted_value @0) | |
1426 | (eq @0 integer_zerop)) | |
1427 | (match (logical_inverted_value @0) | |
1428 | (ne truth_valued_p@0 integer_truep)) | |
1429 | (match (logical_inverted_value @0) | |
1430 | (bit_xor truth_valued_p@0 integer_truep)) | |
1431 | ||
1432 | /* X & !X -> 0. */ | |
1433 | (simplify | |
1434 | (bit_and:c @0 (logical_inverted_value @0)) | |
1435 | { build_zero_cst (type); }) | |
1436 | /* X | !X and X ^ !X -> 1, , if X is truth-valued. */ | |
1437 | (for op (bit_ior bit_xor) | |
1438 | (simplify | |
1439 | (op:c truth_valued_p@0 (logical_inverted_value @0)) | |
1440 | { constant_boolean_node (true, type); })) | |
1441 | /* X ==/!= !X is false/true. */ | |
1442 | (for op (eq ne) | |
1443 | (simplify | |
1444 | (op:c truth_valued_p@0 (logical_inverted_value @0)) | |
1445 | { constant_boolean_node (op == NE_EXPR ? true : false, type); })) | |
1446 | ||
1447 | /* ~~x -> x */ | |
1448 | (simplify | |
1449 | (bit_not (bit_not @0)) | |
1450 | @0) | |
1451 | ||
1452 | /* Convert ~ (-A) to A - 1. */ | |
1453 | (simplify | |
1454 | (bit_not (convert? (negate @0))) | |
1455 | (if (element_precision (type) <= element_precision (TREE_TYPE (@0)) | |
1456 | || !TYPE_UNSIGNED (TREE_TYPE (@0))) | |
1457 | (convert (minus @0 { build_each_one_cst (TREE_TYPE (@0)); })))) | |
1458 | ||
1459 | /* Convert - (~A) to A + 1. */ | |
1460 | (simplify | |
1461 | (negate (nop_convert? (bit_not @0))) | |
1462 | (plus (view_convert @0) { build_each_one_cst (type); })) | |
1463 | ||
1464 | /* Convert ~ (A - 1) or ~ (A + -1) to -A. */ | |
1465 | (simplify | |
1466 | (bit_not (convert? (minus @0 integer_each_onep))) | |
1467 | (if (element_precision (type) <= element_precision (TREE_TYPE (@0)) | |
1468 | || !TYPE_UNSIGNED (TREE_TYPE (@0))) | |
1469 | (convert (negate @0)))) | |
1470 | (simplify | |
1471 | (bit_not (convert? (plus @0 integer_all_onesp))) | |
1472 | (if (element_precision (type) <= element_precision (TREE_TYPE (@0)) | |
1473 | || !TYPE_UNSIGNED (TREE_TYPE (@0))) | |
1474 | (convert (negate @0)))) | |
1475 | ||
1476 | /* Part of convert ~(X ^ Y) to ~X ^ Y or X ^ ~Y if ~X or ~Y simplify. */ | |
1477 | (simplify | |
1478 | (bit_not (convert? (bit_xor @0 INTEGER_CST@1))) | |
1479 | (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) | |
1480 | (convert (bit_xor @0 (bit_not @1))))) | |
1481 | (simplify | |
1482 | (bit_not (convert? (bit_xor:c (bit_not @0) @1))) | |
1483 | (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) | |
1484 | (convert (bit_xor @0 @1)))) | |
1485 | ||
1486 | /* Otherwise prefer ~(X ^ Y) to ~X ^ Y as more canonical. */ | |
1487 | (simplify | |
1488 | (bit_xor:c (nop_convert?:s (bit_not:s @0)) @1) | |
1489 | (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) | |
1490 | (bit_not (bit_xor (view_convert @0) @1)))) | |
1491 | ||
1492 | /* (x & ~m) | (y & m) -> ((x ^ y) & m) ^ x */ | |
1493 | (simplify | |
1494 | (bit_ior:c (bit_and:cs @0 (bit_not @2)) (bit_and:cs @1 @2)) | |
1495 | (bit_xor (bit_and (bit_xor @0 @1) @2) @0)) | |
1496 | ||
1497 | /* Fold A - (A & B) into ~B & A. */ | |
1498 | (simplify | |
1499 | (minus (convert1? @0) (convert2?:s (bit_and:cs @@0 @1))) | |
1500 | (if (tree_nop_conversion_p (type, TREE_TYPE (@0)) | |
1501 | && tree_nop_conversion_p (type, TREE_TYPE (@1))) | |
1502 | (convert (bit_and (bit_not @1) @0)))) | |
1503 | ||
1504 | /* (m1 CMP m2) * d -> (m1 CMP m2) ? d : 0 */ | |
1505 | (for cmp (gt lt ge le) | |
1506 | (simplify | |
1507 | (mult (convert (cmp @0 @1)) @2) | |
1508 | (if (GIMPLE || !TREE_SIDE_EFFECTS (@2)) | |
1509 | (cond (cmp @0 @1) @2 { build_zero_cst (type); })))) | |
1510 | ||
1511 | /* For integral types with undefined overflow and C != 0 fold | |
1512 | x * C EQ/NE y * C into x EQ/NE y. */ | |
1513 | (for cmp (eq ne) | |
1514 | (simplify | |
1515 | (cmp (mult:c @0 @1) (mult:c @2 @1)) | |
1516 | (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)) | |
1517 | && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) | |
1518 | && tree_expr_nonzero_p (@1)) | |
1519 | (cmp @0 @2)))) | |
1520 | ||
1521 | /* For integral types with wrapping overflow and C odd fold | |
1522 | x * C EQ/NE y * C into x EQ/NE y. */ | |
1523 | (for cmp (eq ne) | |
1524 | (simplify | |
1525 | (cmp (mult @0 INTEGER_CST@1) (mult @2 @1)) | |
1526 | (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)) | |
1527 | && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)) | |
1528 | && (TREE_INT_CST_LOW (@1) & 1) != 0) | |
1529 | (cmp @0 @2)))) | |
1530 | ||
1531 | /* For integral types with undefined overflow and C != 0 fold | |
1532 | x * C RELOP y * C into: | |
1533 | ||
1534 | x RELOP y for nonnegative C | |
1535 | y RELOP x for negative C */ | |
1536 | (for cmp (lt gt le ge) | |
1537 | (simplify | |
1538 | (cmp (mult:c @0 @1) (mult:c @2 @1)) | |
1539 | (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)) | |
1540 | && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) | |
1541 | (if (tree_expr_nonnegative_p (@1) && tree_expr_nonzero_p (@1)) | |
1542 | (cmp @0 @2) | |
1543 | (if (TREE_CODE (@1) == INTEGER_CST | |
1544 | && wi::neg_p (wi::to_wide (@1), TYPE_SIGN (TREE_TYPE (@1)))) | |
1545 | (cmp @2 @0)))))) | |
1546 | ||
1547 | /* (X - 1U) <= INT_MAX-1U into (int) X > 0. */ | |
1548 | (for cmp (le gt) | |
1549 | icmp (gt le) | |
1550 | (simplify | |
1551 | (cmp (plus @0 integer_minus_onep@1) INTEGER_CST@2) | |
1552 | (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
1553 | && TYPE_UNSIGNED (TREE_TYPE (@0)) | |
1554 | && TYPE_PRECISION (TREE_TYPE (@0)) > 1 | |
1555 | && (wi::to_wide (@2) | |
1556 | == wi::max_value (TYPE_PRECISION (TREE_TYPE (@0)), SIGNED) - 1)) | |
1557 | (with { tree stype = signed_type_for (TREE_TYPE (@0)); } | |
1558 | (icmp (convert:stype @0) { build_int_cst (stype, 0); }))))) | |
1559 | ||
1560 | /* X / 4 < Y / 4 iff X < Y when the division is known to be exact. */ | |
1561 | (for cmp (simple_comparison) | |
1562 | (simplify | |
1563 | (cmp (convert?@3 (exact_div @0 INTEGER_CST@2)) (convert? (exact_div @1 @2))) | |
1564 | (if (element_precision (@3) >= element_precision (@0) | |
1565 | && types_match (@0, @1)) | |
1566 | (if (wi::lt_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2)))) | |
1567 | (if (!TYPE_UNSIGNED (TREE_TYPE (@3))) | |
1568 | (cmp @1 @0) | |
1569 | (if (tree_expr_nonzero_p (@0) && tree_expr_nonzero_p (@1)) | |
1570 | (with | |
1571 | { | |
1572 | tree utype = unsigned_type_for (TREE_TYPE (@0)); | |
1573 | } | |
1574 | (cmp (convert:utype @1) (convert:utype @0))))) | |
1575 | (if (wi::gt_p (wi::to_wide (@2), 1, TYPE_SIGN (TREE_TYPE (@2)))) | |
1576 | (if (TYPE_UNSIGNED (TREE_TYPE (@0)) || !TYPE_UNSIGNED (TREE_TYPE (@3))) | |
1577 | (cmp @0 @1) | |
1578 | (with | |
1579 | { | |
1580 | tree utype = unsigned_type_for (TREE_TYPE (@0)); | |
1581 | } | |
1582 | (cmp (convert:utype @0) (convert:utype @1))))))))) | |
1583 | ||
1584 | /* X / C1 op C2 into a simple range test. */ | |
1585 | (for cmp (simple_comparison) | |
1586 | (simplify | |
1587 | (cmp (trunc_div:s @0 INTEGER_CST@1) INTEGER_CST@2) | |
1588 | (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
1589 | && integer_nonzerop (@1) | |
1590 | && !TREE_OVERFLOW (@1) | |
1591 | && !TREE_OVERFLOW (@2)) | |
1592 | (with { tree lo, hi; bool neg_overflow; | |
1593 | enum tree_code code = fold_div_compare (cmp, @1, @2, &lo, &hi, | |
1594 | &neg_overflow); } | |
1595 | (switch | |
1596 | (if (code == LT_EXPR || code == GE_EXPR) | |
1597 | (if (TREE_OVERFLOW (lo)) | |
1598 | { build_int_cst (type, (code == LT_EXPR) ^ neg_overflow); } | |
1599 | (if (code == LT_EXPR) | |
1600 | (lt @0 { lo; }) | |
1601 | (ge @0 { lo; })))) | |
1602 | (if (code == LE_EXPR || code == GT_EXPR) | |
1603 | (if (TREE_OVERFLOW (hi)) | |
1604 | { build_int_cst (type, (code == LE_EXPR) ^ neg_overflow); } | |
1605 | (if (code == LE_EXPR) | |
1606 | (le @0 { hi; }) | |
1607 | (gt @0 { hi; })))) | |
1608 | (if (!lo && !hi) | |
1609 | { build_int_cst (type, code == NE_EXPR); }) | |
1610 | (if (code == EQ_EXPR && !hi) | |
1611 | (ge @0 { lo; })) | |
1612 | (if (code == EQ_EXPR && !lo) | |
1613 | (le @0 { hi; })) | |
1614 | (if (code == NE_EXPR && !hi) | |
1615 | (lt @0 { lo; })) | |
1616 | (if (code == NE_EXPR && !lo) | |
1617 | (gt @0 { hi; })) | |
1618 | (if (GENERIC) | |
1619 | { build_range_check (UNKNOWN_LOCATION, type, @0, code == EQ_EXPR, | |
1620 | lo, hi); }) | |
1621 | (with | |
1622 | { | |
1623 | tree etype = range_check_type (TREE_TYPE (@0)); | |
1624 | if (etype) | |
1625 | { | |
1626 | hi = fold_convert (etype, hi); | |
1627 | lo = fold_convert (etype, lo); | |
1628 | hi = const_binop (MINUS_EXPR, etype, hi, lo); | |
1629 | } | |
1630 | } | |
1631 | (if (etype && hi && !TREE_OVERFLOW (hi)) | |
1632 | (if (code == EQ_EXPR) | |
1633 | (le (minus (convert:etype @0) { lo; }) { hi; }) | |
1634 | (gt (minus (convert:etype @0) { lo; }) { hi; }))))))))) | |
1635 | ||
1636 | /* X + Z < Y + Z is the same as X < Y when there is no overflow. */ | |
1637 | (for op (lt le ge gt) | |
1638 | (simplify | |
1639 | (op (plus:c @0 @2) (plus:c @1 @2)) | |
1640 | (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
1641 | && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) | |
1642 | (op @0 @1)))) | |
1643 | /* For equality and subtraction, this is also true with wrapping overflow. */ | |
1644 | (for op (eq ne minus) | |
1645 | (simplify | |
1646 | (op (plus:c @0 @2) (plus:c @1 @2)) | |
1647 | (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
1648 | && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) | |
1649 | || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))) | |
1650 | (op @0 @1)))) | |
1651 | ||
1652 | /* X - Z < Y - Z is the same as X < Y when there is no overflow. */ | |
1653 | (for op (lt le ge gt) | |
1654 | (simplify | |
1655 | (op (minus @0 @2) (minus @1 @2)) | |
1656 | (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
1657 | && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) | |
1658 | (op @0 @1)))) | |
1659 | /* For equality and subtraction, this is also true with wrapping overflow. */ | |
1660 | (for op (eq ne minus) | |
1661 | (simplify | |
1662 | (op (minus @0 @2) (minus @1 @2)) | |
1663 | (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
1664 | && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) | |
1665 | || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))) | |
1666 | (op @0 @1)))) | |
1667 | /* And for pointers... */ | |
1668 | (for op (simple_comparison) | |
1669 | (simplify | |
1670 | (op (pointer_diff@3 @0 @2) (pointer_diff @1 @2)) | |
1671 | (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2))) | |
1672 | (op @0 @1)))) | |
1673 | (simplify | |
1674 | (minus (pointer_diff@3 @0 @2) (pointer_diff @1 @2)) | |
1675 | (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3)) | |
1676 | && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2))) | |
1677 | (pointer_diff @0 @1))) | |
1678 | ||
1679 | /* Z - X < Z - Y is the same as Y < X when there is no overflow. */ | |
1680 | (for op (lt le ge gt) | |
1681 | (simplify | |
1682 | (op (minus @2 @0) (minus @2 @1)) | |
1683 | (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
1684 | && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) | |
1685 | (op @1 @0)))) | |
1686 | /* For equality and subtraction, this is also true with wrapping overflow. */ | |
1687 | (for op (eq ne minus) | |
1688 | (simplify | |
1689 | (op (minus @2 @0) (minus @2 @1)) | |
1690 | (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
1691 | && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) | |
1692 | || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))) | |
1693 | (op @1 @0)))) | |
1694 | /* And for pointers... */ | |
1695 | (for op (simple_comparison) | |
1696 | (simplify | |
1697 | (op (pointer_diff@3 @2 @0) (pointer_diff @2 @1)) | |
1698 | (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2))) | |
1699 | (op @1 @0)))) | |
1700 | (simplify | |
1701 | (minus (pointer_diff@3 @2 @0) (pointer_diff @2 @1)) | |
1702 | (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3)) | |
1703 | && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2))) | |
1704 | (pointer_diff @1 @0))) | |
1705 | ||
1706 | /* X + Y < Y is the same as X < 0 when there is no overflow. */ | |
1707 | (for op (lt le gt ge) | |
1708 | (simplify | |
1709 | (op:c (plus:c@2 @0 @1) @1) | |
1710 | (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
1711 | && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) | |
1712 | && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0)) | |
1713 | && (CONSTANT_CLASS_P (@0) || single_use (@2))) | |
1714 | (op @0 { build_zero_cst (TREE_TYPE (@0)); })))) | |
1715 | /* For equality, this is also true with wrapping overflow. */ | |
1716 | (for op (eq ne) | |
1717 | (simplify | |
1718 | (op:c (nop_convert?@3 (plus:c@2 @0 (convert1? @1))) (convert2? @1)) | |
1719 | (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
1720 | && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) | |
1721 | || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))) | |
1722 | && (CONSTANT_CLASS_P (@0) || (single_use (@2) && single_use (@3))) | |
1723 | && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@2)) | |
1724 | && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@1))) | |
1725 | (op @0 { build_zero_cst (TREE_TYPE (@0)); }))) | |
1726 | (simplify | |
1727 | (op:c (nop_convert?@3 (pointer_plus@2 (convert1? @0) @1)) (convert2? @0)) | |
1728 | (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0)) | |
1729 | && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0)) | |
1730 | && (CONSTANT_CLASS_P (@1) || (single_use (@2) && single_use (@3)))) | |
1731 | (op @1 { build_zero_cst (TREE_TYPE (@1)); })))) | |
1732 | ||
1733 | /* X - Y < X is the same as Y > 0 when there is no overflow. | |
1734 | For equality, this is also true with wrapping overflow. */ | |
1735 | (for op (simple_comparison) | |
1736 | (simplify | |
1737 | (op:c @0 (minus@2 @0 @1)) | |
1738 | (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
1739 | && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) | |
1740 | || ((op == EQ_EXPR || op == NE_EXPR) | |
1741 | && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))) | |
1742 | && (CONSTANT_CLASS_P (@1) || single_use (@2))) | |
1743 | (op @1 { build_zero_cst (TREE_TYPE (@1)); })))) | |
1744 | ||
1745 | /* Transform: | |
1746 | (X / Y) == 0 -> X < Y if X, Y are unsigned. | |
1747 | (X / Y) != 0 -> X >= Y, if X, Y are unsigned. */ | |
1748 | (for cmp (eq ne) | |
1749 | ocmp (lt ge) | |
1750 | (simplify | |
1751 | (cmp (trunc_div @0 @1) integer_zerop) | |
1752 | (if (TYPE_UNSIGNED (TREE_TYPE (@0)) | |
1753 | /* Complex ==/!= is allowed, but not </>=. */ | |
1754 | && TREE_CODE (TREE_TYPE (@0)) != COMPLEX_TYPE | |
1755 | && (VECTOR_TYPE_P (type) || !VECTOR_TYPE_P (TREE_TYPE (@0)))) | |
1756 | (ocmp @0 @1)))) | |
1757 | ||
1758 | /* X == C - X can never be true if C is odd. */ | |
1759 | (for cmp (eq ne) | |
1760 | (simplify | |
1761 | (cmp:c (convert? @0) (convert1? (minus INTEGER_CST@1 (convert2? @0)))) | |
1762 | (if (TREE_INT_CST_LOW (@1) & 1) | |
1763 | { constant_boolean_node (cmp == NE_EXPR, type); }))) | |
1764 | ||
1765 | /* Arguments on which one can call get_nonzero_bits to get the bits | |
1766 | possibly set. */ | |
1767 | (match with_possible_nonzero_bits | |
1768 | INTEGER_CST@0) | |
1769 | (match with_possible_nonzero_bits | |
1770 | SSA_NAME@0 | |
1771 | (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0))))) | |
1772 | /* Slightly extended version, do not make it recursive to keep it cheap. */ | |
1773 | (match (with_possible_nonzero_bits2 @0) | |
1774 | with_possible_nonzero_bits@0) | |
1775 | (match (with_possible_nonzero_bits2 @0) | |
1776 | (bit_and:c with_possible_nonzero_bits@0 @2)) | |
1777 | ||
1778 | /* Same for bits that are known to be set, but we do not have | |
1779 | an equivalent to get_nonzero_bits yet. */ | |
1780 | (match (with_certain_nonzero_bits2 @0) | |
1781 | INTEGER_CST@0) | |
1782 | (match (with_certain_nonzero_bits2 @0) | |
1783 | (bit_ior @1 INTEGER_CST@0)) | |
1784 | ||
1785 | /* X == C (or X & Z == Y | C) is impossible if ~nonzero(X) & C != 0. */ | |
1786 | (for cmp (eq ne) | |
1787 | (simplify | |
1788 | (cmp:c (with_possible_nonzero_bits2 @0) (with_certain_nonzero_bits2 @1)) | |
1789 | (if (wi::bit_and_not (wi::to_wide (@1), get_nonzero_bits (@0)) != 0) | |
1790 | { constant_boolean_node (cmp == NE_EXPR, type); }))) | |
1791 | ||
1792 | /* ((X inner_op C0) outer_op C1) | |
1793 | With X being a tree where value_range has reasoned certain bits to always be | |
1794 | zero throughout its computed value range, | |
1795 | inner_op = {|,^}, outer_op = {|,^} and inner_op != outer_op | |
1796 | where zero_mask has 1's for all bits that are sure to be 0 in | |
1797 | and 0's otherwise. | |
1798 | if (inner_op == '^') C0 &= ~C1; | |
1799 | if ((C0 & ~zero_mask) == 0) then emit (X outer_op (C0 outer_op C1) | |
1800 | if ((C1 & ~zero_mask) == 0) then emit (X inner_op (C0 outer_op C1) | |
1801 | */ | |
1802 | (for inner_op (bit_ior bit_xor) | |
1803 | outer_op (bit_xor bit_ior) | |
1804 | (simplify | |
1805 | (outer_op | |
1806 | (inner_op:s @2 INTEGER_CST@0) INTEGER_CST@1) | |
1807 | (with | |
1808 | { | |
1809 | bool fail = false; | |
1810 | wide_int zero_mask_not; | |
1811 | wide_int C0; | |
1812 | wide_int cst_emit; | |
1813 | ||
1814 | if (TREE_CODE (@2) == SSA_NAME) | |
1815 | zero_mask_not = get_nonzero_bits (@2); | |
1816 | else | |
1817 | fail = true; | |
1818 | ||
1819 | if (inner_op == BIT_XOR_EXPR) | |
1820 | { | |
1821 | C0 = wi::bit_and_not (wi::to_wide (@0), wi::to_wide (@1)); | |
1822 | cst_emit = C0 | wi::to_wide (@1); | |
1823 | } | |
1824 | else | |
1825 | { | |
1826 | C0 = wi::to_wide (@0); | |
1827 | cst_emit = C0 ^ wi::to_wide (@1); | |
1828 | } | |
1829 | } | |
1830 | (if (!fail && (C0 & zero_mask_not) == 0) | |
1831 | (outer_op @2 { wide_int_to_tree (type, cst_emit); }) | |
1832 | (if (!fail && (wi::to_wide (@1) & zero_mask_not) == 0) | |
1833 | (inner_op @2 { wide_int_to_tree (type, cst_emit); })))))) | |
1834 | ||
1835 | /* Associate (p +p off1) +p off2 as (p +p (off1 + off2)). */ | |
1836 | (simplify | |
1837 | (pointer_plus (pointer_plus:s @0 @1) @3) | |
1838 | (pointer_plus @0 (plus @1 @3))) | |
1839 | ||
1840 | /* Pattern match | |
1841 | tem1 = (long) ptr1; | |
1842 | tem2 = (long) ptr2; | |
1843 | tem3 = tem2 - tem1; | |
1844 | tem4 = (unsigned long) tem3; | |
1845 | tem5 = ptr1 + tem4; | |
1846 | and produce | |
1847 | tem5 = ptr2; */ | |
1848 | (simplify | |
1849 | (pointer_plus @0 (convert?@2 (minus@3 (convert @1) (convert @0)))) | |
1850 | /* Conditionally look through a sign-changing conversion. */ | |
1851 | (if (TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@3)) | |
1852 | && ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@1))) | |
1853 | || (GENERIC && type == TREE_TYPE (@1)))) | |
1854 | @1)) | |
1855 | (simplify | |
1856 | (pointer_plus @0 (convert?@2 (pointer_diff@3 @1 @@0))) | |
1857 | (if (TYPE_PRECISION (TREE_TYPE (@2)) >= TYPE_PRECISION (TREE_TYPE (@3))) | |
1858 | (convert @1))) | |
1859 | ||
1860 | /* Pattern match | |
1861 | tem = (sizetype) ptr; | |
1862 | tem = tem & algn; | |
1863 | tem = -tem; | |
1864 | ... = ptr p+ tem; | |
1865 | and produce the simpler and easier to analyze with respect to alignment | |
1866 | ... = ptr & ~algn; */ | |
1867 | (simplify | |
1868 | (pointer_plus @0 (negate (bit_and (convert @0) INTEGER_CST@1))) | |
1869 | (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), ~wi::to_wide (@1)); } | |
1870 | (bit_and @0 { algn; }))) | |
1871 | ||
1872 | /* Try folding difference of addresses. */ | |
1873 | (simplify | |
1874 | (minus (convert ADDR_EXPR@0) (convert @1)) | |
1875 | (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) | |
1876 | (with { poly_int64 diff; } | |
1877 | (if (ptr_difference_const (@0, @1, &diff)) | |
1878 | { build_int_cst_type (type, diff); })))) | |
1879 | (simplify | |
1880 | (minus (convert @0) (convert ADDR_EXPR@1)) | |
1881 | (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) | |
1882 | (with { poly_int64 diff; } | |
1883 | (if (ptr_difference_const (@0, @1, &diff)) | |
1884 | { build_int_cst_type (type, diff); })))) | |
1885 | (simplify | |
1886 | (pointer_diff (convert?@2 ADDR_EXPR@0) (convert1?@3 @1)) | |
1887 | (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0)) | |
1888 | && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1))) | |
1889 | (with { poly_int64 diff; } | |
1890 | (if (ptr_difference_const (@0, @1, &diff)) | |
1891 | { build_int_cst_type (type, diff); })))) | |
1892 | (simplify | |
1893 | (pointer_diff (convert?@2 @0) (convert1?@3 ADDR_EXPR@1)) | |
1894 | (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0)) | |
1895 | && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1))) | |
1896 | (with { poly_int64 diff; } | |
1897 | (if (ptr_difference_const (@0, @1, &diff)) | |
1898 | { build_int_cst_type (type, diff); })))) | |
1899 | ||
1900 | /* Canonicalize (T *)(ptr - ptr-cst) to &MEM[ptr + -ptr-cst]. */ | |
1901 | (simplify | |
1902 | (convert (pointer_diff @0 INTEGER_CST@1)) | |
1903 | (if (POINTER_TYPE_P (type)) | |
1904 | { build_fold_addr_expr_with_type | |
1905 | (build2 (MEM_REF, char_type_node, @0, | |
1906 | wide_int_to_tree (ptr_type_node, wi::neg (wi::to_wide (@1)))), | |
1907 | type); })) | |
1908 | ||
1909 | /* If arg0 is derived from the address of an object or function, we may | |
1910 | be able to fold this expression using the object or function's | |
1911 | alignment. */ | |
1912 | (simplify | |
1913 | (bit_and (convert? @0) INTEGER_CST@1) | |
1914 | (if (POINTER_TYPE_P (TREE_TYPE (@0)) | |
1915 | && tree_nop_conversion_p (type, TREE_TYPE (@0))) | |
1916 | (with | |
1917 | { | |
1918 | unsigned int align; | |
1919 | unsigned HOST_WIDE_INT bitpos; | |
1920 | get_pointer_alignment_1 (@0, &align, &bitpos); | |
1921 | } | |
1922 | (if (wi::ltu_p (wi::to_wide (@1), align / BITS_PER_UNIT)) | |
1923 | { wide_int_to_tree (type, (wi::to_wide (@1) | |
1924 | & (bitpos / BITS_PER_UNIT))); })))) | |
1925 | ||
1926 | (match min_value | |
1927 | INTEGER_CST | |
1928 | (if (INTEGRAL_TYPE_P (type) | |
1929 | && wi::eq_p (wi::to_wide (t), wi::min_value (type))))) | |
1930 | ||
1931 | (match max_value | |
1932 | INTEGER_CST | |
1933 | (if (INTEGRAL_TYPE_P (type) | |
1934 | && wi::eq_p (wi::to_wide (t), wi::max_value (type))))) | |
1935 | ||
1936 | /* x > y && x != XXX_MIN --> x > y | |
1937 | x > y && x == XXX_MIN --> false . */ | |
1938 | (for eqne (eq ne) | |
1939 | (simplify | |
1940 | (bit_and:c (gt:c@2 @0 @1) (eqne @0 min_value)) | |
1941 | (switch | |
1942 | (if (eqne == EQ_EXPR) | |
1943 | { constant_boolean_node (false, type); }) | |
1944 | (if (eqne == NE_EXPR) | |
1945 | @2) | |
1946 | ))) | |
1947 | ||
1948 | /* x < y && x != XXX_MAX --> x < y | |
1949 | x < y && x == XXX_MAX --> false. */ | |
1950 | (for eqne (eq ne) | |
1951 | (simplify | |
1952 | (bit_and:c (lt:c@2 @0 @1) (eqne @0 max_value)) | |
1953 | (switch | |
1954 | (if (eqne == EQ_EXPR) | |
1955 | { constant_boolean_node (false, type); }) | |
1956 | (if (eqne == NE_EXPR) | |
1957 | @2) | |
1958 | ))) | |
1959 | ||
1960 | /* x <= y && x == XXX_MIN --> x == XXX_MIN. */ | |
1961 | (simplify | |
1962 | (bit_and:c (le:c @0 @1) (eq@2 @0 min_value)) | |
1963 | @2) | |
1964 | ||
1965 | /* x >= y && x == XXX_MAX --> x == XXX_MAX. */ | |
1966 | (simplify | |
1967 | (bit_and:c (ge:c @0 @1) (eq@2 @0 max_value)) | |
1968 | @2) | |
1969 | ||
1970 | /* x > y || x != XXX_MIN --> x != XXX_MIN. */ | |
1971 | (simplify | |
1972 | (bit_ior:c (gt:c @0 @1) (ne@2 @0 min_value)) | |
1973 | @2) | |
1974 | ||
1975 | /* x <= y || x != XXX_MIN --> true. */ | |
1976 | (simplify | |
1977 | (bit_ior:c (le:c @0 @1) (ne @0 min_value)) | |
1978 | { constant_boolean_node (true, type); }) | |
1979 | ||
1980 | /* x <= y || x == XXX_MIN --> x <= y. */ | |
1981 | (simplify | |
1982 | (bit_ior:c (le:c@2 @0 @1) (eq @0 min_value)) | |
1983 | @2) | |
1984 | ||
1985 | /* x < y || x != XXX_MAX --> x != XXX_MAX. */ | |
1986 | (simplify | |
1987 | (bit_ior:c (lt:c @0 @1) (ne@2 @0 max_value)) | |
1988 | @2) | |
1989 | ||
1990 | /* x >= y || x != XXX_MAX --> true | |
1991 | x >= y || x == XXX_MAX --> x >= y. */ | |
1992 | (for eqne (eq ne) | |
1993 | (simplify | |
1994 | (bit_ior:c (ge:c@2 @0 @1) (eqne @0 max_value)) | |
1995 | (switch | |
1996 | (if (eqne == EQ_EXPR) | |
1997 | @2) | |
1998 | (if (eqne == NE_EXPR) | |
1999 | { constant_boolean_node (true, type); })))) | |
2000 | ||
2001 | /* Convert (X == CST1) && (X OP2 CST2) to a known value | |
2002 | based on CST1 OP2 CST2. Similarly for (X != CST1). */ | |
2003 | ||
2004 | (for code1 (eq ne) | |
2005 | (for code2 (eq ne lt gt le ge) | |
2006 | (simplify | |
2007 | (bit_and:c (code1@3 @0 INTEGER_CST@1) (code2@4 @0 INTEGER_CST@2)) | |
2008 | (with | |
2009 | { | |
2010 | int cmp = tree_int_cst_compare (@1, @2); | |
2011 | bool val; | |
2012 | switch (code2) | |
2013 | { | |
2014 | case EQ_EXPR: val = (cmp == 0); break; | |
2015 | case NE_EXPR: val = (cmp != 0); break; | |
2016 | case LT_EXPR: val = (cmp < 0); break; | |
2017 | case GT_EXPR: val = (cmp > 0); break; | |
2018 | case LE_EXPR: val = (cmp <= 0); break; | |
2019 | case GE_EXPR: val = (cmp >= 0); break; | |
2020 | default: gcc_unreachable (); | |
2021 | } | |
2022 | } | |
2023 | (switch | |
2024 | (if (code1 == EQ_EXPR && val) @3) | |
2025 | (if (code1 == EQ_EXPR && !val) { constant_boolean_node (false, type); }) | |
2026 | (if (code1 == NE_EXPR && !val) @4)))))) | |
2027 | ||
2028 | /* Convert (X OP1 CST1) && (X OP2 CST2). */ | |
2029 | ||
2030 | (for code1 (lt le gt ge) | |
2031 | (for code2 (lt le gt ge) | |
2032 | (simplify | |
2033 | (bit_and (code1:c@3 @0 INTEGER_CST@1) (code2:c@4 @0 INTEGER_CST@2)) | |
2034 | (with | |
2035 | { | |
2036 | int cmp = tree_int_cst_compare (@1, @2); | |
2037 | } | |
2038 | (switch | |
2039 | /* Choose the more restrictive of two < or <= comparisons. */ | |
2040 | (if ((code1 == LT_EXPR || code1 == LE_EXPR) | |
2041 | && (code2 == LT_EXPR || code2 == LE_EXPR)) | |
2042 | (if ((cmp < 0) || (cmp == 0 && code1 == LT_EXPR)) | |
2043 | @3 | |
2044 | @4)) | |
2045 | /* Likewise chose the more restrictive of two > or >= comparisons. */ | |
2046 | (if ((code1 == GT_EXPR || code1 == GE_EXPR) | |
2047 | && (code2 == GT_EXPR || code2 == GE_EXPR)) | |
2048 | (if ((cmp > 0) || (cmp == 0 && code1 == GT_EXPR)) | |
2049 | @3 | |
2050 | @4)) | |
2051 | /* Check for singleton ranges. */ | |
2052 | (if (cmp == 0 | |
2053 | && ((code1 == LE_EXPR && code2 == GE_EXPR) | |
2054 | || (code1 == GE_EXPR && code2 == LE_EXPR))) | |
2055 | (eq @0 @1)) | |
2056 | /* Check for disjoint ranges. */ | |
2057 | (if (cmp <= 0 | |
2058 | && (code1 == LT_EXPR || code1 == LE_EXPR) | |
2059 | && (code2 == GT_EXPR || code2 == GE_EXPR)) | |
2060 | { constant_boolean_node (false, type); }) | |
2061 | (if (cmp >= 0 | |
2062 | && (code1 == GT_EXPR || code1 == GE_EXPR) | |
2063 | && (code2 == LT_EXPR || code2 == LE_EXPR)) | |
2064 | { constant_boolean_node (false, type); }) | |
2065 | ))))) | |
2066 | ||
2067 | /* Convert (X == CST1) || (X OP2 CST2) to a known value | |
2068 | based on CST1 OP2 CST2. Similarly for (X != CST1). */ | |
2069 | ||
2070 | (for code1 (eq ne) | |
2071 | (for code2 (eq ne lt gt le ge) | |
2072 | (simplify | |
2073 | (bit_ior:c (code1@3 @0 INTEGER_CST@1) (code2@4 @0 INTEGER_CST@2)) | |
2074 | (with | |
2075 | { | |
2076 | int cmp = tree_int_cst_compare (@1, @2); | |
2077 | bool val; | |
2078 | switch (code2) | |
2079 | { | |
2080 | case EQ_EXPR: val = (cmp == 0); break; | |
2081 | case NE_EXPR: val = (cmp != 0); break; | |
2082 | case LT_EXPR: val = (cmp < 0); break; | |
2083 | case GT_EXPR: val = (cmp > 0); break; | |
2084 | case LE_EXPR: val = (cmp <= 0); break; | |
2085 | case GE_EXPR: val = (cmp >= 0); break; | |
2086 | default: gcc_unreachable (); | |
2087 | } | |
2088 | } | |
2089 | (switch | |
2090 | (if (code1 == EQ_EXPR && val) @4) | |
2091 | (if (code1 == NE_EXPR && val) { constant_boolean_node (true, type); }) | |
2092 | (if (code1 == NE_EXPR && !val) @3)))))) | |
2093 | ||
2094 | /* Convert (X OP1 CST1) || (X OP2 CST2). */ | |
2095 | ||
2096 | (for code1 (lt le gt ge) | |
2097 | (for code2 (lt le gt ge) | |
2098 | (simplify | |
2099 | (bit_ior (code1@3 @0 INTEGER_CST@1) (code2@4 @0 INTEGER_CST@2)) | |
2100 | (with | |
2101 | { | |
2102 | int cmp = tree_int_cst_compare (@1, @2); | |
2103 | } | |
2104 | (switch | |
2105 | /* Choose the more restrictive of two < or <= comparisons. */ | |
2106 | (if ((code1 == LT_EXPR || code1 == LE_EXPR) | |
2107 | && (code2 == LT_EXPR || code2 == LE_EXPR)) | |
2108 | (if ((cmp < 0) || (cmp == 0 && code1 == LT_EXPR)) | |
2109 | @4 | |
2110 | @3)) | |
2111 | /* Likewise chose the more restrictive of two > or >= comparisons. */ | |
2112 | (if ((code1 == GT_EXPR || code1 == GE_EXPR) | |
2113 | && (code2 == GT_EXPR || code2 == GE_EXPR)) | |
2114 | (if ((cmp > 0) || (cmp == 0 && code1 == GT_EXPR)) | |
2115 | @4 | |
2116 | @3)) | |
2117 | /* Check for singleton ranges. */ | |
2118 | (if (cmp == 0 | |
2119 | && ((code1 == LT_EXPR && code2 == GT_EXPR) | |
2120 | || (code1 == GT_EXPR && code2 == LT_EXPR))) | |
2121 | (ne @0 @2)) | |
2122 | /* Check for disjoint ranges. */ | |
2123 | (if (cmp >= 0 | |
2124 | && (code1 == LT_EXPR || code1 == LE_EXPR) | |
2125 | && (code2 == GT_EXPR || code2 == GE_EXPR)) | |
2126 | { constant_boolean_node (true, type); }) | |
2127 | (if (cmp <= 0 | |
2128 | && (code1 == GT_EXPR || code1 == GE_EXPR) | |
2129 | && (code2 == LT_EXPR || code2 == LE_EXPR)) | |
2130 | { constant_boolean_node (true, type); }) | |
2131 | ))))) | |
2132 | ||
2133 | /* We can't reassociate at all for saturating types. */ | |
2134 | (if (!TYPE_SATURATING (type)) | |
2135 | ||
2136 | /* Contract negates. */ | |
2137 | /* A + (-B) -> A - B */ | |
2138 | (simplify | |
2139 | (plus:c @0 (convert? (negate @1))) | |
2140 | /* Apply STRIP_NOPS on the negate. */ | |
2141 | (if (tree_nop_conversion_p (type, TREE_TYPE (@1)) | |
2142 | && !TYPE_OVERFLOW_SANITIZED (type)) | |
2143 | (with | |
2144 | { | |
2145 | tree t1 = type; | |
2146 | if (INTEGRAL_TYPE_P (type) | |
2147 | && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1))) | |
2148 | t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1); | |
2149 | } | |
2150 | (convert (minus (convert:t1 @0) (convert:t1 @1)))))) | |
2151 | /* A - (-B) -> A + B */ | |
2152 | (simplify | |
2153 | (minus @0 (convert? (negate @1))) | |
2154 | (if (tree_nop_conversion_p (type, TREE_TYPE (@1)) | |
2155 | && !TYPE_OVERFLOW_SANITIZED (type)) | |
2156 | (with | |
2157 | { | |
2158 | tree t1 = type; | |
2159 | if (INTEGRAL_TYPE_P (type) | |
2160 | && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1))) | |
2161 | t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1); | |
2162 | } | |
2163 | (convert (plus (convert:t1 @0) (convert:t1 @1)))))) | |
2164 | /* -(T)(-A) -> (T)A | |
2165 | Sign-extension is ok except for INT_MIN, which thankfully cannot | |
2166 | happen without overflow. */ | |
2167 | (simplify | |
2168 | (negate (convert (negate @1))) | |
2169 | (if (INTEGRAL_TYPE_P (type) | |
2170 | && (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@1)) | |
2171 | || (!TYPE_UNSIGNED (TREE_TYPE (@1)) | |
2172 | && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1)))) | |
2173 | && !TYPE_OVERFLOW_SANITIZED (type) | |
2174 | && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1))) | |
2175 | (convert @1))) | |
2176 | (simplify | |
2177 | (negate (convert negate_expr_p@1)) | |
2178 | (if (SCALAR_FLOAT_TYPE_P (type) | |
2179 | && ((DECIMAL_FLOAT_TYPE_P (type) | |
2180 | == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1)) | |
2181 | && TYPE_PRECISION (type) >= TYPE_PRECISION (TREE_TYPE (@1))) | |
2182 | || !HONOR_SIGN_DEPENDENT_ROUNDING (type))) | |
2183 | (convert (negate @1)))) | |
2184 | (simplify | |
2185 | (negate (nop_convert? (negate @1))) | |
2186 | (if (!TYPE_OVERFLOW_SANITIZED (type) | |
2187 | && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1))) | |
2188 | (view_convert @1))) | |
2189 | ||
2190 | /* We can't reassociate floating-point unless -fassociative-math | |
2191 | or fixed-point plus or minus because of saturation to +-Inf. */ | |
2192 | (if ((!FLOAT_TYPE_P (type) || flag_associative_math) | |
2193 | && !FIXED_POINT_TYPE_P (type)) | |
2194 | ||
2195 | /* Match patterns that allow contracting a plus-minus pair | |
2196 | irrespective of overflow issues. */ | |
2197 | /* (A +- B) - A -> +- B */ | |
2198 | /* (A +- B) -+ B -> A */ | |
2199 | /* A - (A +- B) -> -+ B */ | |
2200 | /* A +- (B -+ A) -> +- B */ | |
2201 | (simplify | |
2202 | (minus (nop_convert1? (plus:c (nop_convert2? @0) @1)) @0) | |
2203 | (view_convert @1)) | |
2204 | (simplify | |
2205 | (minus (nop_convert1? (minus (nop_convert2? @0) @1)) @0) | |
2206 | (if (!ANY_INTEGRAL_TYPE_P (type) | |
2207 | || TYPE_OVERFLOW_WRAPS (type)) | |
2208 | (negate (view_convert @1)) | |
2209 | (view_convert (negate @1)))) | |
2210 | (simplify | |
2211 | (plus:c (nop_convert1? (minus @0 (nop_convert2? @1))) @1) | |
2212 | (view_convert @0)) | |
2213 | (simplify | |
2214 | (minus @0 (nop_convert1? (plus:c (nop_convert2? @0) @1))) | |
2215 | (if (!ANY_INTEGRAL_TYPE_P (type) | |
2216 | || TYPE_OVERFLOW_WRAPS (type)) | |
2217 | (negate (view_convert @1)) | |
2218 | (view_convert (negate @1)))) | |
2219 | (simplify | |
2220 | (minus @0 (nop_convert1? (minus (nop_convert2? @0) @1))) | |
2221 | (view_convert @1)) | |
2222 | /* (A +- B) + (C - A) -> C +- B */ | |
2223 | /* (A + B) - (A - C) -> B + C */ | |
2224 | /* More cases are handled with comparisons. */ | |
2225 | (simplify | |
2226 | (plus:c (plus:c @0 @1) (minus @2 @0)) | |
2227 | (plus @2 @1)) | |
2228 | (simplify | |
2229 | (plus:c (minus @0 @1) (minus @2 @0)) | |
2230 | (minus @2 @1)) | |
2231 | (simplify | |
2232 | (plus:c (pointer_diff @0 @1) (pointer_diff @2 @0)) | |
2233 | (if (TYPE_OVERFLOW_UNDEFINED (type) | |
2234 | && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))) | |
2235 | (pointer_diff @2 @1))) | |
2236 | (simplify | |
2237 | (minus (plus:c @0 @1) (minus @0 @2)) | |
2238 | (plus @1 @2)) | |
2239 | ||
2240 | /* (A +- CST1) +- CST2 -> A + CST3 | |
2241 | Use view_convert because it is safe for vectors and equivalent for | |
2242 | scalars. */ | |
2243 | (for outer_op (plus minus) | |
2244 | (for inner_op (plus minus) | |
2245 | neg_inner_op (minus plus) | |
2246 | (simplify | |
2247 | (outer_op (nop_convert? (inner_op @0 CONSTANT_CLASS_P@1)) | |
2248 | CONSTANT_CLASS_P@2) | |
2249 | /* If one of the types wraps, use that one. */ | |
2250 | (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type)) | |
2251 | /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse | |
2252 | forever if something doesn't simplify into a constant. */ | |
2253 | (if (!CONSTANT_CLASS_P (@0)) | |
2254 | (if (outer_op == PLUS_EXPR) | |
2255 | (plus (view_convert @0) (inner_op @2 (view_convert @1))) | |
2256 | (minus (view_convert @0) (neg_inner_op @2 (view_convert @1))))) | |
2257 | (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
2258 | || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))) | |
2259 | (if (outer_op == PLUS_EXPR) | |
2260 | (view_convert (plus @0 (inner_op (view_convert @2) @1))) | |
2261 | (view_convert (minus @0 (neg_inner_op (view_convert @2) @1)))) | |
2262 | /* If the constant operation overflows we cannot do the transform | |
2263 | directly as we would introduce undefined overflow, for example | |
2264 | with (a - 1) + INT_MIN. */ | |
2265 | (if (types_match (type, @0)) | |
2266 | (with { tree cst = const_binop (outer_op == inner_op | |
2267 | ? PLUS_EXPR : MINUS_EXPR, | |
2268 | type, @1, @2); } | |
2269 | (if (cst && !TREE_OVERFLOW (cst)) | |
2270 | (inner_op @0 { cst; } ) | |
2271 | /* X+INT_MAX+1 is X-INT_MIN. */ | |
2272 | (if (INTEGRAL_TYPE_P (type) && cst | |
2273 | && wi::to_wide (cst) == wi::min_value (type)) | |
2274 | (neg_inner_op @0 { wide_int_to_tree (type, wi::to_wide (cst)); }) | |
2275 | /* Last resort, use some unsigned type. */ | |
2276 | (with { tree utype = unsigned_type_for (type); } | |
2277 | (if (utype) | |
2278 | (view_convert (inner_op | |
2279 | (view_convert:utype @0) | |
2280 | (view_convert:utype | |
2281 | { drop_tree_overflow (cst); })))))))))))))) | |
2282 | ||
2283 | /* (CST1 - A) +- CST2 -> CST3 - A */ | |
2284 | (for outer_op (plus minus) | |
2285 | (simplify | |
2286 | (outer_op (nop_convert? (minus CONSTANT_CLASS_P@1 @0)) CONSTANT_CLASS_P@2) | |
2287 | /* If one of the types wraps, use that one. */ | |
2288 | (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type)) | |
2289 | /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse | |
2290 | forever if something doesn't simplify into a constant. */ | |
2291 | (if (!CONSTANT_CLASS_P (@0)) | |
2292 | (minus (outer_op (view_convert @1) @2) (view_convert @0))) | |
2293 | (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
2294 | || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))) | |
2295 | (view_convert (minus (outer_op @1 (view_convert @2)) @0)) | |
2296 | (if (types_match (type, @0)) | |
2297 | (with { tree cst = const_binop (outer_op, type, @1, @2); } | |
2298 | (if (cst && !TREE_OVERFLOW (cst)) | |
2299 | (minus { cst; } @0)))))))) | |
2300 | ||
2301 | /* CST1 - (CST2 - A) -> CST3 + A | |
2302 | Use view_convert because it is safe for vectors and equivalent for | |
2303 | scalars. */ | |
2304 | (simplify | |
2305 | (minus CONSTANT_CLASS_P@1 (nop_convert? (minus CONSTANT_CLASS_P@2 @0))) | |
2306 | /* If one of the types wraps, use that one. */ | |
2307 | (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type)) | |
2308 | /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse | |
2309 | forever if something doesn't simplify into a constant. */ | |
2310 | (if (!CONSTANT_CLASS_P (@0)) | |
2311 | (plus (view_convert @0) (minus @1 (view_convert @2)))) | |
2312 | (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
2313 | || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))) | |
2314 | (view_convert (plus @0 (minus (view_convert @1) @2))) | |
2315 | (if (types_match (type, @0)) | |
2316 | (with { tree cst = const_binop (MINUS_EXPR, type, @1, @2); } | |
2317 | (if (cst && !TREE_OVERFLOW (cst)) | |
2318 | (plus { cst; } @0))))))) | |
2319 | ||
2320 | /* ((T)(A)) + CST -> (T)(A + CST) */ | |
2321 | #if GIMPLE | |
2322 | (simplify | |
2323 | (plus (convert SSA_NAME@0) INTEGER_CST@1) | |
2324 | (if (TREE_CODE (TREE_TYPE (@0)) == INTEGER_TYPE | |
2325 | && TREE_CODE (type) == INTEGER_TYPE | |
2326 | && TYPE_PRECISION (type) > TYPE_PRECISION (TREE_TYPE (@0)) | |
2327 | && int_fits_type_p (@1, TREE_TYPE (@0))) | |
2328 | /* Perform binary operation inside the cast if the constant fits | |
2329 | and (A + CST)'s range does not overflow. */ | |
2330 | (with | |
2331 | { | |
2332 | wi::overflow_type min_ovf = wi::OVF_OVERFLOW, | |
2333 | max_ovf = wi::OVF_OVERFLOW; | |
2334 | tree inner_type = TREE_TYPE (@0); | |
2335 | ||
2336 | wide_int w1 | |
2337 | = wide_int::from (wi::to_wide (@1), TYPE_PRECISION (inner_type), | |
2338 | TYPE_SIGN (inner_type)); | |
2339 | ||
2340 | wide_int wmin0, wmax0; | |
2341 | if (get_range_info (@0, &wmin0, &wmax0) == VR_RANGE) | |
2342 | { | |
2343 | wi::add (wmin0, w1, TYPE_SIGN (inner_type), &min_ovf); | |
2344 | wi::add (wmax0, w1, TYPE_SIGN (inner_type), &max_ovf); | |
2345 | } | |
2346 | } | |
2347 | (if (min_ovf == wi::OVF_NONE && max_ovf == wi::OVF_NONE) | |
2348 | (convert (plus @0 { wide_int_to_tree (TREE_TYPE (@0), w1); } ))) | |
2349 | ))) | |
2350 | #endif | |
2351 | ||
2352 | /* ((T)(A + CST1)) + CST2 -> (T)(A) + (T)CST1 + CST2 */ | |
2353 | #if GIMPLE | |
2354 | (for op (plus minus) | |
2355 | (simplify | |
2356 | (plus (convert:s (op:s @0 INTEGER_CST@1)) INTEGER_CST@2) | |
2357 | (if (TREE_CODE (TREE_TYPE (@0)) == INTEGER_TYPE | |
2358 | && TREE_CODE (type) == INTEGER_TYPE | |
2359 | && TYPE_PRECISION (type) > TYPE_PRECISION (TREE_TYPE (@0)) | |
2360 | && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) | |
2361 | && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0)) | |
2362 | && TYPE_OVERFLOW_WRAPS (type)) | |
2363 | (plus (convert @0) (op @2 (convert @1)))))) | |
2364 | #endif | |
2365 | ||
2366 | /* ~A + A -> -1 */ | |
2367 | (simplify | |
2368 | (plus:c (bit_not @0) @0) | |
2369 | (if (!TYPE_OVERFLOW_TRAPS (type)) | |
2370 | { build_all_ones_cst (type); })) | |
2371 | ||
2372 | /* ~A + 1 -> -A */ | |
2373 | (simplify | |
2374 | (plus (convert? (bit_not @0)) integer_each_onep) | |
2375 | (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) | |
2376 | (negate (convert @0)))) | |
2377 | ||
2378 | /* -A - 1 -> ~A */ | |
2379 | (simplify | |
2380 | (minus (convert? (negate @0)) integer_each_onep) | |
2381 | (if (!TYPE_OVERFLOW_TRAPS (type) | |
2382 | && tree_nop_conversion_p (type, TREE_TYPE (@0))) | |
2383 | (bit_not (convert @0)))) | |
2384 | ||
2385 | /* -1 - A -> ~A */ | |
2386 | (simplify | |
2387 | (minus integer_all_onesp @0) | |
2388 | (bit_not @0)) | |
2389 | ||
2390 | /* (T)(P + A) - (T)P -> (T) A */ | |
2391 | (simplify | |
2392 | (minus (convert (plus:c @@0 @1)) | |
2393 | (convert? @0)) | |
2394 | (if (element_precision (type) <= element_precision (TREE_TYPE (@1)) | |
2395 | /* For integer types, if A has a smaller type | |
2396 | than T the result depends on the possible | |
2397 | overflow in P + A. | |
2398 | E.g. T=size_t, A=(unsigned)429497295, P>0. | |
2399 | However, if an overflow in P + A would cause | |
2400 | undefined behavior, we can assume that there | |
2401 | is no overflow. */ | |
2402 | || (INTEGRAL_TYPE_P (TREE_TYPE (@1)) | |
2403 | && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1)))) | |
2404 | (convert @1))) | |
2405 | (simplify | |
2406 | (minus (convert (pointer_plus @@0 @1)) | |
2407 | (convert @0)) | |
2408 | (if (element_precision (type) <= element_precision (TREE_TYPE (@1)) | |
2409 | /* For pointer types, if the conversion of A to the | |
2410 | final type requires a sign- or zero-extension, | |
2411 | then we have to punt - it is not defined which | |
2412 | one is correct. */ | |
2413 | || (POINTER_TYPE_P (TREE_TYPE (@0)) | |
2414 | && TREE_CODE (@1) == INTEGER_CST | |
2415 | && tree_int_cst_sign_bit (@1) == 0)) | |
2416 | (convert @1))) | |
2417 | (simplify | |
2418 | (pointer_diff (pointer_plus @@0 @1) @0) | |
2419 | /* The second argument of pointer_plus must be interpreted as signed, and | |
2420 | thus sign-extended if necessary. */ | |
2421 | (with { tree stype = signed_type_for (TREE_TYPE (@1)); } | |
2422 | /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR | |
2423 | second arg is unsigned even when we need to consider it as signed, | |
2424 | we don't want to diagnose overflow here. */ | |
2425 | (convert (view_convert:stype @1)))) | |
2426 | ||
2427 | /* (T)P - (T)(P + A) -> -(T) A */ | |
2428 | (simplify | |
2429 | (minus (convert? @0) | |
2430 | (convert (plus:c @@0 @1))) | |
2431 | (if (INTEGRAL_TYPE_P (type) | |
2432 | && TYPE_OVERFLOW_UNDEFINED (type) | |
2433 | && element_precision (type) <= element_precision (TREE_TYPE (@1))) | |
2434 | (with { tree utype = unsigned_type_for (type); } | |
2435 | (convert (negate (convert:utype @1)))) | |
2436 | (if (element_precision (type) <= element_precision (TREE_TYPE (@1)) | |
2437 | /* For integer types, if A has a smaller type | |
2438 | than T the result depends on the possible | |
2439 | overflow in P + A. | |
2440 | E.g. T=size_t, A=(unsigned)429497295, P>0. | |
2441 | However, if an overflow in P + A would cause | |
2442 | undefined behavior, we can assume that there | |
2443 | is no overflow. */ | |
2444 | || (INTEGRAL_TYPE_P (TREE_TYPE (@1)) | |
2445 | && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1)))) | |
2446 | (negate (convert @1))))) | |
2447 | (simplify | |
2448 | (minus (convert @0) | |
2449 | (convert (pointer_plus @@0 @1))) | |
2450 | (if (INTEGRAL_TYPE_P (type) | |
2451 | && TYPE_OVERFLOW_UNDEFINED (type) | |
2452 | && element_precision (type) <= element_precision (TREE_TYPE (@1))) | |
2453 | (with { tree utype = unsigned_type_for (type); } | |
2454 | (convert (negate (convert:utype @1)))) | |
2455 | (if (element_precision (type) <= element_precision (TREE_TYPE (@1)) | |
2456 | /* For pointer types, if the conversion of A to the | |
2457 | final type requires a sign- or zero-extension, | |
2458 | then we have to punt - it is not defined which | |
2459 | one is correct. */ | |
2460 | || (POINTER_TYPE_P (TREE_TYPE (@0)) | |
2461 | && TREE_CODE (@1) == INTEGER_CST | |
2462 | && tree_int_cst_sign_bit (@1) == 0)) | |
2463 | (negate (convert @1))))) | |
2464 | (simplify | |
2465 | (pointer_diff @0 (pointer_plus @@0 @1)) | |
2466 | /* The second argument of pointer_plus must be interpreted as signed, and | |
2467 | thus sign-extended if necessary. */ | |
2468 | (with { tree stype = signed_type_for (TREE_TYPE (@1)); } | |
2469 | /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR | |
2470 | second arg is unsigned even when we need to consider it as signed, | |
2471 | we don't want to diagnose overflow here. */ | |
2472 | (negate (convert (view_convert:stype @1))))) | |
2473 | ||
2474 | /* (T)(P + A) - (T)(P + B) -> (T)A - (T)B */ | |
2475 | (simplify | |
2476 | (minus (convert (plus:c @@0 @1)) | |
2477 | (convert (plus:c @0 @2))) | |
2478 | (if (INTEGRAL_TYPE_P (type) | |
2479 | && TYPE_OVERFLOW_UNDEFINED (type) | |
2480 | && element_precision (type) <= element_precision (TREE_TYPE (@1)) | |
2481 | && element_precision (type) <= element_precision (TREE_TYPE (@2))) | |
2482 | (with { tree utype = unsigned_type_for (type); } | |
2483 | (convert (minus (convert:utype @1) (convert:utype @2)))) | |
2484 | (if (((element_precision (type) <= element_precision (TREE_TYPE (@1))) | |
2485 | == (element_precision (type) <= element_precision (TREE_TYPE (@2)))) | |
2486 | && (element_precision (type) <= element_precision (TREE_TYPE (@1)) | |
2487 | /* For integer types, if A has a smaller type | |
2488 | than T the result depends on the possible | |
2489 | overflow in P + A. | |
2490 | E.g. T=size_t, A=(unsigned)429497295, P>0. | |
2491 | However, if an overflow in P + A would cause | |
2492 | undefined behavior, we can assume that there | |
2493 | is no overflow. */ | |
2494 | || (INTEGRAL_TYPE_P (TREE_TYPE (@1)) | |
2495 | && INTEGRAL_TYPE_P (TREE_TYPE (@2)) | |
2496 | && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1)) | |
2497 | && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@2))))) | |
2498 | (minus (convert @1) (convert @2))))) | |
2499 | (simplify | |
2500 | (minus (convert (pointer_plus @@0 @1)) | |
2501 | (convert (pointer_plus @0 @2))) | |
2502 | (if (INTEGRAL_TYPE_P (type) | |
2503 | && TYPE_OVERFLOW_UNDEFINED (type) | |
2504 | && element_precision (type) <= element_precision (TREE_TYPE (@1))) | |
2505 | (with { tree utype = unsigned_type_for (type); } | |
2506 | (convert (minus (convert:utype @1) (convert:utype @2)))) | |
2507 | (if (element_precision (type) <= element_precision (TREE_TYPE (@1)) | |
2508 | /* For pointer types, if the conversion of A to the | |
2509 | final type requires a sign- or zero-extension, | |
2510 | then we have to punt - it is not defined which | |
2511 | one is correct. */ | |
2512 | || (POINTER_TYPE_P (TREE_TYPE (@0)) | |
2513 | && TREE_CODE (@1) == INTEGER_CST | |
2514 | && tree_int_cst_sign_bit (@1) == 0 | |
2515 | && TREE_CODE (@2) == INTEGER_CST | |
2516 | && tree_int_cst_sign_bit (@2) == 0)) | |
2517 | (minus (convert @1) (convert @2))))) | |
2518 | (simplify | |
2519 | (pointer_diff (pointer_plus @@0 @1) (pointer_plus @0 @2)) | |
2520 | /* The second argument of pointer_plus must be interpreted as signed, and | |
2521 | thus sign-extended if necessary. */ | |
2522 | (with { tree stype = signed_type_for (TREE_TYPE (@1)); } | |
2523 | /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR | |
2524 | second arg is unsigned even when we need to consider it as signed, | |
2525 | we don't want to diagnose overflow here. */ | |
2526 | (minus (convert (view_convert:stype @1)) | |
2527 | (convert (view_convert:stype @2))))))) | |
2528 | ||
2529 | /* (A * C) +- (B * C) -> (A+-B) * C and (A * C) +- A -> A * (C+-1). | |
2530 | Modeled after fold_plusminus_mult_expr. */ | |
2531 | (if (!TYPE_SATURATING (type) | |
2532 | && (!FLOAT_TYPE_P (type) || flag_associative_math)) | |
2533 | (for plusminus (plus minus) | |
2534 | (simplify | |
2535 | (plusminus (mult:cs@3 @0 @1) (mult:cs@4 @0 @2)) | |
2536 | (if ((!ANY_INTEGRAL_TYPE_P (type) | |
2537 | || TYPE_OVERFLOW_WRAPS (type) | |
2538 | || (INTEGRAL_TYPE_P (type) | |
2539 | && tree_expr_nonzero_p (@0) | |
2540 | && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type))))) | |
2541 | /* If @1 +- @2 is constant require a hard single-use on either | |
2542 | original operand (but not on both). */ | |
2543 | && (single_use (@3) || single_use (@4))) | |
2544 | (mult (plusminus @1 @2) @0))) | |
2545 | /* We cannot generate constant 1 for fract. */ | |
2546 | (if (!ALL_FRACT_MODE_P (TYPE_MODE (type))) | |
2547 | (simplify | |
2548 | (plusminus @0 (mult:c@3 @0 @2)) | |
2549 | (if ((!ANY_INTEGRAL_TYPE_P (type) | |
2550 | || TYPE_OVERFLOW_WRAPS (type) | |
2551 | /* For @0 + @0*@2 this transformation would introduce UB | |
2552 | (where there was none before) for @0 in [-1,0] and @2 max. | |
2553 | For @0 - @0*@2 this transformation would introduce UB | |
2554 | for @0 0 and @2 in [min,min+1] or @0 -1 and @2 min+1. */ | |
2555 | || (INTEGRAL_TYPE_P (type) | |
2556 | && ((tree_expr_nonzero_p (@0) | |
2557 | && expr_not_equal_to (@0, | |
2558 | wi::minus_one (TYPE_PRECISION (type)))) | |
2559 | || (plusminus == PLUS_EXPR | |
2560 | ? expr_not_equal_to (@2, | |
2561 | wi::max_value (TYPE_PRECISION (type), SIGNED)) | |
2562 | /* Let's ignore the @0 -1 and @2 min case. */ | |
2563 | : (expr_not_equal_to (@2, | |
2564 | wi::min_value (TYPE_PRECISION (type), SIGNED)) | |
2565 | && expr_not_equal_to (@2, | |
2566 | wi::min_value (TYPE_PRECISION (type), SIGNED) | |
2567 | + 1)))))) | |
2568 | && single_use (@3)) | |
2569 | (mult (plusminus { build_one_cst (type); } @2) @0))) | |
2570 | (simplify | |
2571 | (plusminus (mult:c@3 @0 @2) @0) | |
2572 | (if ((!ANY_INTEGRAL_TYPE_P (type) | |
2573 | || TYPE_OVERFLOW_WRAPS (type) | |
2574 | /* For @0*@2 + @0 this transformation would introduce UB | |
2575 | (where there was none before) for @0 in [-1,0] and @2 max. | |
2576 | For @0*@2 - @0 this transformation would introduce UB | |
2577 | for @0 0 and @2 min. */ | |
2578 | || (INTEGRAL_TYPE_P (type) | |
2579 | && ((tree_expr_nonzero_p (@0) | |
2580 | && (plusminus == MINUS_EXPR | |
2581 | || expr_not_equal_to (@0, | |
2582 | wi::minus_one (TYPE_PRECISION (type))))) | |
2583 | || expr_not_equal_to (@2, | |
2584 | (plusminus == PLUS_EXPR | |
2585 | ? wi::max_value (TYPE_PRECISION (type), SIGNED) | |
2586 | : wi::min_value (TYPE_PRECISION (type), SIGNED)))))) | |
2587 | && single_use (@3)) | |
2588 | (mult (plusminus @2 { build_one_cst (type); }) @0)))))) | |
2589 | ||
2590 | #if GIMPLE | |
2591 | /* Canonicalize X + (X << C) into X * (1 + (1 << C)) and | |
2592 | (X << C1) + (X << C2) into X * ((1 << C1) + (1 << C2)). */ | |
2593 | (simplify | |
2594 | (plus:c @0 (lshift:s @0 INTEGER_CST@1)) | |
2595 | (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
2596 | && tree_fits_uhwi_p (@1) | |
2597 | && tree_to_uhwi (@1) < element_precision (type)) | |
2598 | (with { tree t = type; | |
2599 | if (!TYPE_OVERFLOW_WRAPS (t)) t = unsigned_type_for (t); | |
2600 | wide_int w = wi::set_bit_in_zero (tree_to_uhwi (@1), | |
2601 | element_precision (type)); | |
2602 | w += 1; | |
2603 | tree cst = wide_int_to_tree (VECTOR_TYPE_P (t) ? TREE_TYPE (t) | |
2604 | : t, w); | |
2605 | cst = build_uniform_cst (t, cst); } | |
2606 | (convert (mult (convert:t @0) { cst; }))))) | |
2607 | (simplify | |
2608 | (plus (lshift:s @0 INTEGER_CST@1) (lshift:s @0 INTEGER_CST@2)) | |
2609 | (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
2610 | && tree_fits_uhwi_p (@1) | |
2611 | && tree_to_uhwi (@1) < element_precision (type) | |
2612 | && tree_fits_uhwi_p (@2) | |
2613 | && tree_to_uhwi (@2) < element_precision (type)) | |
2614 | (with { tree t = type; | |
2615 | if (!TYPE_OVERFLOW_WRAPS (t)) t = unsigned_type_for (t); | |
2616 | unsigned int prec = element_precision (type); | |
2617 | wide_int w = wi::set_bit_in_zero (tree_to_uhwi (@1), prec); | |
2618 | w += wi::set_bit_in_zero (tree_to_uhwi (@2), prec); | |
2619 | tree cst = wide_int_to_tree (VECTOR_TYPE_P (t) ? TREE_TYPE (t) | |
2620 | : t, w); | |
2621 | cst = build_uniform_cst (t, cst); } | |
2622 | (convert (mult (convert:t @0) { cst; }))))) | |
2623 | #endif | |
2624 | ||
2625 | /* Simplifications of MIN_EXPR, MAX_EXPR, fmin() and fmax(). */ | |
2626 | ||
2627 | (for minmax (min max FMIN_ALL FMAX_ALL) | |
2628 | (simplify | |
2629 | (minmax @0 @0) | |
2630 | @0)) | |
2631 | /* min(max(x,y),y) -> y. */ | |
2632 | (simplify | |
2633 | (min:c (max:c @0 @1) @1) | |
2634 | @1) | |
2635 | /* max(min(x,y),y) -> y. */ | |
2636 | (simplify | |
2637 | (max:c (min:c @0 @1) @1) | |
2638 | @1) | |
2639 | /* max(a,-a) -> abs(a). */ | |
2640 | (simplify | |
2641 | (max:c @0 (negate @0)) | |
2642 | (if (TREE_CODE (type) != COMPLEX_TYPE | |
2643 | && (! ANY_INTEGRAL_TYPE_P (type) | |
2644 | || TYPE_OVERFLOW_UNDEFINED (type))) | |
2645 | (abs @0))) | |
2646 | /* min(a,-a) -> -abs(a). */ | |
2647 | (simplify | |
2648 | (min:c @0 (negate @0)) | |
2649 | (if (TREE_CODE (type) != COMPLEX_TYPE | |
2650 | && (! ANY_INTEGRAL_TYPE_P (type) | |
2651 | || TYPE_OVERFLOW_UNDEFINED (type))) | |
2652 | (negate (abs @0)))) | |
2653 | (simplify | |
2654 | (min @0 @1) | |
2655 | (switch | |
2656 | (if (INTEGRAL_TYPE_P (type) | |
2657 | && TYPE_MIN_VALUE (type) | |
2658 | && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST)) | |
2659 | @1) | |
2660 | (if (INTEGRAL_TYPE_P (type) | |
2661 | && TYPE_MAX_VALUE (type) | |
2662 | && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST)) | |
2663 | @0))) | |
2664 | (simplify | |
2665 | (max @0 @1) | |
2666 | (switch | |
2667 | (if (INTEGRAL_TYPE_P (type) | |
2668 | && TYPE_MAX_VALUE (type) | |
2669 | && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST)) | |
2670 | @1) | |
2671 | (if (INTEGRAL_TYPE_P (type) | |
2672 | && TYPE_MIN_VALUE (type) | |
2673 | && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST)) | |
2674 | @0))) | |
2675 | ||
2676 | /* max (a, a + CST) -> a + CST where CST is positive. */ | |
2677 | /* max (a, a + CST) -> a where CST is negative. */ | |
2678 | (simplify | |
2679 | (max:c @0 (plus@2 @0 INTEGER_CST@1)) | |
2680 | (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) | |
2681 | (if (tree_int_cst_sgn (@1) > 0) | |
2682 | @2 | |
2683 | @0))) | |
2684 | ||
2685 | /* min (a, a + CST) -> a where CST is positive. */ | |
2686 | /* min (a, a + CST) -> a + CST where CST is negative. */ | |
2687 | (simplify | |
2688 | (min:c @0 (plus@2 @0 INTEGER_CST@1)) | |
2689 | (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) | |
2690 | (if (tree_int_cst_sgn (@1) > 0) | |
2691 | @0 | |
2692 | @2))) | |
2693 | ||
2694 | /* (convert (minmax ((convert (x) c)))) -> minmax (x c) if x is promoted | |
2695 | and the outer convert demotes the expression back to x's type. */ | |
2696 | (for minmax (min max) | |
2697 | (simplify | |
2698 | (convert (minmax@0 (convert @1) INTEGER_CST@2)) | |
2699 | (if (INTEGRAL_TYPE_P (type) | |
2700 | && types_match (@1, type) && int_fits_type_p (@2, type) | |
2701 | && TYPE_SIGN (TREE_TYPE (@0)) == TYPE_SIGN (type) | |
2702 | && TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type)) | |
2703 | (minmax @1 (convert @2))))) | |
2704 | ||
2705 | (for minmax (FMIN_ALL FMAX_ALL) | |
2706 | /* If either argument is NaN, return the other one. Avoid the | |
2707 | transformation if we get (and honor) a signalling NaN. */ | |
2708 | (simplify | |
2709 | (minmax:c @0 REAL_CST@1) | |
2710 | (if (real_isnan (TREE_REAL_CST_PTR (@1)) | |
2711 | && (!HONOR_SNANS (@1) || !TREE_REAL_CST (@1).signalling)) | |
2712 | @0))) | |
2713 | /* Convert fmin/fmax to MIN_EXPR/MAX_EXPR. C99 requires these | |
2714 | functions to return the numeric arg if the other one is NaN. | |
2715 | MIN and MAX don't honor that, so only transform if -ffinite-math-only | |
2716 | is set. C99 doesn't require -0.0 to be handled, so we don't have to | |
2717 | worry about it either. */ | |
2718 | (if (flag_finite_math_only) | |
2719 | (simplify | |
2720 | (FMIN_ALL @0 @1) | |
2721 | (min @0 @1)) | |
2722 | (simplify | |
2723 | (FMAX_ALL @0 @1) | |
2724 | (max @0 @1))) | |
2725 | /* min (-A, -B) -> -max (A, B) */ | |
2726 | (for minmax (min max FMIN_ALL FMAX_ALL) | |
2727 | maxmin (max min FMAX_ALL FMIN_ALL) | |
2728 | (simplify | |
2729 | (minmax (negate:s@2 @0) (negate:s@3 @1)) | |
2730 | (if (FLOAT_TYPE_P (TREE_TYPE (@0)) | |
2731 | || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
2732 | && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))) | |
2733 | (negate (maxmin @0 @1))))) | |
2734 | /* MIN (~X, ~Y) -> ~MAX (X, Y) | |
2735 | MAX (~X, ~Y) -> ~MIN (X, Y) */ | |
2736 | (for minmax (min max) | |
2737 | maxmin (max min) | |
2738 | (simplify | |
2739 | (minmax (bit_not:s@2 @0) (bit_not:s@3 @1)) | |
2740 | (bit_not (maxmin @0 @1)))) | |
2741 | ||
2742 | /* MIN (X, Y) == X -> X <= Y */ | |
2743 | (for minmax (min min max max) | |
2744 | cmp (eq ne eq ne ) | |
2745 | out (le gt ge lt ) | |
2746 | (simplify | |
2747 | (cmp:c (minmax:c @0 @1) @0) | |
2748 | (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))) | |
2749 | (out @0 @1)))) | |
2750 | /* MIN (X, 5) == 0 -> X == 0 | |
2751 | MIN (X, 5) == 7 -> false */ | |
2752 | (for cmp (eq ne) | |
2753 | (simplify | |
2754 | (cmp (min @0 INTEGER_CST@1) INTEGER_CST@2) | |
2755 | (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2), | |
2756 | TYPE_SIGN (TREE_TYPE (@0)))) | |
2757 | { constant_boolean_node (cmp == NE_EXPR, type); } | |
2758 | (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2), | |
2759 | TYPE_SIGN (TREE_TYPE (@0)))) | |
2760 | (cmp @0 @2))))) | |
2761 | (for cmp (eq ne) | |
2762 | (simplify | |
2763 | (cmp (max @0 INTEGER_CST@1) INTEGER_CST@2) | |
2764 | (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2), | |
2765 | TYPE_SIGN (TREE_TYPE (@0)))) | |
2766 | { constant_boolean_node (cmp == NE_EXPR, type); } | |
2767 | (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2), | |
2768 | TYPE_SIGN (TREE_TYPE (@0)))) | |
2769 | (cmp @0 @2))))) | |
2770 | /* MIN (X, C1) < C2 -> X < C2 || C1 < C2 */ | |
2771 | (for minmax (min min max max min min max max ) | |
2772 | cmp (lt le gt ge gt ge lt le ) | |
2773 | comb (bit_ior bit_ior bit_ior bit_ior bit_and bit_and bit_and bit_and) | |
2774 | (simplify | |
2775 | (cmp (minmax @0 INTEGER_CST@1) INTEGER_CST@2) | |
2776 | (comb (cmp @0 @2) (cmp @1 @2)))) | |
2777 | ||
2778 | /* Undo fancy way of writing max/min or other ?: expressions, | |
2779 | like a - ((a - b) & -(a < b)), in this case into (a < b) ? b : a. | |
2780 | People normally use ?: and that is what we actually try to optimize. */ | |
2781 | (for cmp (simple_comparison) | |
2782 | (simplify | |
2783 | (minus @0 (bit_and:c (minus @0 @1) | |
2784 | (convert? (negate@4 (convert? (cmp@5 @2 @3)))))) | |
2785 | (if (INTEGRAL_TYPE_P (type) | |
2786 | && INTEGRAL_TYPE_P (TREE_TYPE (@4)) | |
2787 | && TREE_CODE (TREE_TYPE (@4)) != BOOLEAN_TYPE | |
2788 | && INTEGRAL_TYPE_P (TREE_TYPE (@5)) | |
2789 | && (TYPE_PRECISION (TREE_TYPE (@4)) >= TYPE_PRECISION (type) | |
2790 | || !TYPE_UNSIGNED (TREE_TYPE (@4))) | |
2791 | && (GIMPLE || !TREE_SIDE_EFFECTS (@1))) | |
2792 | (cond (cmp @2 @3) @1 @0))) | |
2793 | (simplify | |
2794 | (plus:c @0 (bit_and:c (minus @1 @0) | |
2795 | (convert? (negate@4 (convert? (cmp@5 @2 @3)))))) | |
2796 | (if (INTEGRAL_TYPE_P (type) | |
2797 | && INTEGRAL_TYPE_P (TREE_TYPE (@4)) | |
2798 | && TREE_CODE (TREE_TYPE (@4)) != BOOLEAN_TYPE | |
2799 | && INTEGRAL_TYPE_P (TREE_TYPE (@5)) | |
2800 | && (TYPE_PRECISION (TREE_TYPE (@4)) >= TYPE_PRECISION (type) | |
2801 | || !TYPE_UNSIGNED (TREE_TYPE (@4))) | |
2802 | && (GIMPLE || !TREE_SIDE_EFFECTS (@1))) | |
2803 | (cond (cmp @2 @3) @1 @0))) | |
2804 | /* Similarly with ^ instead of - though in that case with :c. */ | |
2805 | (simplify | |
2806 | (bit_xor:c @0 (bit_and:c (bit_xor:c @0 @1) | |
2807 | (convert? (negate@4 (convert? (cmp@5 @2 @3)))))) | |
2808 | (if (INTEGRAL_TYPE_P (type) | |
2809 | && INTEGRAL_TYPE_P (TREE_TYPE (@4)) | |
2810 | && TREE_CODE (TREE_TYPE (@4)) != BOOLEAN_TYPE | |
2811 | && INTEGRAL_TYPE_P (TREE_TYPE (@5)) | |
2812 | && (TYPE_PRECISION (TREE_TYPE (@4)) >= TYPE_PRECISION (type) | |
2813 | || !TYPE_UNSIGNED (TREE_TYPE (@4))) | |
2814 | && (GIMPLE || !TREE_SIDE_EFFECTS (@1))) | |
2815 | (cond (cmp @2 @3) @1 @0)))) | |
2816 | ||
2817 | /* Simplifications of shift and rotates. */ | |
2818 | ||
2819 | (for rotate (lrotate rrotate) | |
2820 | (simplify | |
2821 | (rotate integer_all_onesp@0 @1) | |
2822 | @0)) | |
2823 | ||
2824 | /* Optimize -1 >> x for arithmetic right shifts. */ | |
2825 | (simplify | |
2826 | (rshift integer_all_onesp@0 @1) | |
2827 | (if (!TYPE_UNSIGNED (type) | |
2828 | && tree_expr_nonnegative_p (@1)) | |
2829 | @0)) | |
2830 | ||
2831 | /* Optimize (x >> c) << c into x & (-1<<c). */ | |
2832 | (simplify | |
2833 | (lshift (nop_convert? (rshift @0 INTEGER_CST@1)) @1) | |
2834 | (if (wi::ltu_p (wi::to_wide (@1), element_precision (type))) | |
2835 | /* It doesn't matter if the right shift is arithmetic or logical. */ | |
2836 | (bit_and (view_convert @0) (lshift { build_minus_one_cst (type); } @1)))) | |
2837 | ||
2838 | (simplify | |
2839 | (lshift (convert (convert@2 (rshift @0 INTEGER_CST@1))) @1) | |
2840 | (if (wi::ltu_p (wi::to_wide (@1), element_precision (type)) | |
2841 | /* Allow intermediate conversion to integral type with whatever sign, as | |
2842 | long as the low TYPE_PRECISION (type) | |
2843 | - TYPE_PRECISION (TREE_TYPE (@2)) bits are preserved. */ | |
2844 | && INTEGRAL_TYPE_P (type) | |
2845 | && INTEGRAL_TYPE_P (TREE_TYPE (@2)) | |
2846 | && INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
2847 | && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0)) | |
2848 | && (TYPE_PRECISION (TREE_TYPE (@2)) >= TYPE_PRECISION (type) | |
2849 | || wi::geu_p (wi::to_wide (@1), | |
2850 | TYPE_PRECISION (type) | |
2851 | - TYPE_PRECISION (TREE_TYPE (@2))))) | |
2852 | (bit_and (convert @0) (lshift { build_minus_one_cst (type); } @1)))) | |
2853 | ||
2854 | /* Optimize (x << c) >> c into x & ((unsigned)-1 >> c) for unsigned | |
2855 | types. */ | |
2856 | (simplify | |
2857 | (rshift (lshift @0 INTEGER_CST@1) @1) | |
2858 | (if (TYPE_UNSIGNED (type) | |
2859 | && (wi::ltu_p (wi::to_wide (@1), element_precision (type)))) | |
2860 | (bit_and @0 (rshift { build_minus_one_cst (type); } @1)))) | |
2861 | ||
2862 | (for shiftrotate (lrotate rrotate lshift rshift) | |
2863 | (simplify | |
2864 | (shiftrotate @0 integer_zerop) | |
2865 | (non_lvalue @0)) | |
2866 | (simplify | |
2867 | (shiftrotate integer_zerop@0 @1) | |
2868 | @0) | |
2869 | /* Prefer vector1 << scalar to vector1 << vector2 | |
2870 | if vector2 is uniform. */ | |
2871 | (for vec (VECTOR_CST CONSTRUCTOR) | |
2872 | (simplify | |
2873 | (shiftrotate @0 vec@1) | |
2874 | (with { tree tem = uniform_vector_p (@1); } | |
2875 | (if (tem) | |
2876 | (shiftrotate @0 { tem; })))))) | |
2877 | ||
2878 | /* Simplify X << Y where Y's low width bits are 0 to X, as only valid | |
2879 | Y is 0. Similarly for X >> Y. */ | |
2880 | #if GIMPLE | |
2881 | (for shift (lshift rshift) | |
2882 | (simplify | |
2883 | (shift @0 SSA_NAME@1) | |
2884 | (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))) | |
2885 | (with { | |
2886 | int width = ceil_log2 (element_precision (TREE_TYPE (@0))); | |
2887 | int prec = TYPE_PRECISION (TREE_TYPE (@1)); | |
2888 | } | |
2889 | (if ((get_nonzero_bits (@1) & wi::mask (width, false, prec)) == 0) | |
2890 | @0))))) | |
2891 | #endif | |
2892 | ||
2893 | /* Rewrite an LROTATE_EXPR by a constant into an | |
2894 | RROTATE_EXPR by a new constant. */ | |
2895 | (simplify | |
2896 | (lrotate @0 INTEGER_CST@1) | |
2897 | (rrotate @0 { const_binop (MINUS_EXPR, TREE_TYPE (@1), | |
2898 | build_int_cst (TREE_TYPE (@1), | |
2899 | element_precision (type)), @1); })) | |
2900 | ||
2901 | /* Turn (a OP c1) OP c2 into a OP (c1+c2). */ | |
2902 | (for op (lrotate rrotate rshift lshift) | |
2903 | (simplify | |
2904 | (op (op @0 INTEGER_CST@1) INTEGER_CST@2) | |
2905 | (with { unsigned int prec = element_precision (type); } | |
2906 | (if (wi::ge_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1))) | |
2907 | && wi::lt_p (wi::to_wide (@1), prec, TYPE_SIGN (TREE_TYPE (@1))) | |
2908 | && wi::ge_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2))) | |
2909 | && wi::lt_p (wi::to_wide (@2), prec, TYPE_SIGN (TREE_TYPE (@2)))) | |
2910 | (with { unsigned int low = (tree_to_uhwi (@1) | |
2911 | + tree_to_uhwi (@2)); } | |
2912 | /* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2 | |
2913 | being well defined. */ | |
2914 | (if (low >= prec) | |
2915 | (if (op == LROTATE_EXPR || op == RROTATE_EXPR) | |
2916 | (op @0 { build_int_cst (TREE_TYPE (@1), low % prec); }) | |
2917 | (if (TYPE_UNSIGNED (type) || op == LSHIFT_EXPR) | |
2918 | { build_zero_cst (type); } | |
2919 | (op @0 { build_int_cst (TREE_TYPE (@1), prec - 1); }))) | |
2920 | (op @0 { build_int_cst (TREE_TYPE (@1), low); }))))))) | |
2921 | ||
2922 | ||
2923 | /* ((1 << A) & 1) != 0 -> A == 0 | |
2924 | ((1 << A) & 1) == 0 -> A != 0 */ | |
2925 | (for cmp (ne eq) | |
2926 | icmp (eq ne) | |
2927 | (simplify | |
2928 | (cmp (bit_and (lshift integer_onep @0) integer_onep) integer_zerop) | |
2929 | (icmp @0 { build_zero_cst (TREE_TYPE (@0)); }))) | |
2930 | ||
2931 | /* (CST1 << A) == CST2 -> A == ctz (CST2) - ctz (CST1) | |
2932 | (CST1 << A) != CST2 -> A != ctz (CST2) - ctz (CST1) | |
2933 | if CST2 != 0. */ | |
2934 | (for cmp (ne eq) | |
2935 | (simplify | |
2936 | (cmp (lshift INTEGER_CST@0 @1) INTEGER_CST@2) | |
2937 | (with { int cand = wi::ctz (wi::to_wide (@2)) - wi::ctz (wi::to_wide (@0)); } | |
2938 | (if (cand < 0 | |
2939 | || (!integer_zerop (@2) | |
2940 | && wi::lshift (wi::to_wide (@0), cand) != wi::to_wide (@2))) | |
2941 | { constant_boolean_node (cmp == NE_EXPR, type); } | |
2942 | (if (!integer_zerop (@2) | |
2943 | && wi::lshift (wi::to_wide (@0), cand) == wi::to_wide (@2)) | |
2944 | (cmp @1 { build_int_cst (TREE_TYPE (@1), cand); })))))) | |
2945 | ||
2946 | /* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1)) | |
2947 | (X >> C1) & C2 into (X >> C1) & (C2 | ~((type) -1 >> C1)) | |
2948 | if the new mask might be further optimized. */ | |
2949 | (for shift (lshift rshift) | |
2950 | (simplify | |
2951 | (bit_and (convert?:s@4 (shift:s@5 (convert1?@3 @0) INTEGER_CST@1)) | |
2952 | INTEGER_CST@2) | |
2953 | (if (tree_nop_conversion_p (TREE_TYPE (@4), TREE_TYPE (@5)) | |
2954 | && TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT | |
2955 | && tree_fits_uhwi_p (@1) | |
2956 | && tree_to_uhwi (@1) > 0 | |
2957 | && tree_to_uhwi (@1) < TYPE_PRECISION (type)) | |
2958 | (with | |
2959 | { | |
2960 | unsigned int shiftc = tree_to_uhwi (@1); | |
2961 | unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (@2); | |
2962 | unsigned HOST_WIDE_INT newmask, zerobits = 0; | |
2963 | tree shift_type = TREE_TYPE (@3); | |
2964 | unsigned int prec; | |
2965 | ||
2966 | if (shift == LSHIFT_EXPR) | |
2967 | zerobits = ((HOST_WIDE_INT_1U << shiftc) - 1); | |
2968 | else if (shift == RSHIFT_EXPR | |
2969 | && type_has_mode_precision_p (shift_type)) | |
2970 | { | |
2971 | prec = TYPE_PRECISION (TREE_TYPE (@3)); | |
2972 | tree arg00 = @0; | |
2973 | /* See if more bits can be proven as zero because of | |
2974 | zero extension. */ | |
2975 | if (@3 != @0 | |
2976 | && TYPE_UNSIGNED (TREE_TYPE (@0))) | |
2977 | { | |
2978 | tree inner_type = TREE_TYPE (@0); | |
2979 | if (type_has_mode_precision_p (inner_type) | |
2980 | && TYPE_PRECISION (inner_type) < prec) | |
2981 | { | |
2982 | prec = TYPE_PRECISION (inner_type); | |
2983 | /* See if we can shorten the right shift. */ | |
2984 | if (shiftc < prec) | |
2985 | shift_type = inner_type; | |
2986 | /* Otherwise X >> C1 is all zeros, so we'll optimize | |
2987 | it into (X, 0) later on by making sure zerobits | |
2988 | is all ones. */ | |
2989 | } | |
2990 | } | |
2991 | zerobits = HOST_WIDE_INT_M1U; | |
2992 | if (shiftc < prec) | |
2993 | { | |
2994 | zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc; | |
2995 | zerobits <<= prec - shiftc; | |
2996 | } | |
2997 | /* For arithmetic shift if sign bit could be set, zerobits | |
2998 | can contain actually sign bits, so no transformation is | |
2999 | possible, unless MASK masks them all away. In that | |
3000 | case the shift needs to be converted into logical shift. */ | |
3001 | if (!TYPE_UNSIGNED (TREE_TYPE (@3)) | |
3002 | && prec == TYPE_PRECISION (TREE_TYPE (@3))) | |
3003 | { | |
3004 | if ((mask & zerobits) == 0) | |
3005 | shift_type = unsigned_type_for (TREE_TYPE (@3)); | |
3006 | else | |
3007 | zerobits = 0; | |
3008 | } | |
3009 | } | |
3010 | } | |
3011 | /* ((X << 16) & 0xff00) is (X, 0). */ | |
3012 | (if ((mask & zerobits) == mask) | |
3013 | { build_int_cst (type, 0); } | |
3014 | (with { newmask = mask | zerobits; } | |
3015 | (if (newmask != mask && (newmask & (newmask + 1)) == 0) | |
3016 | (with | |
3017 | { | |
3018 | /* Only do the transformation if NEWMASK is some integer | |
3019 | mode's mask. */ | |
3020 | for (prec = BITS_PER_UNIT; | |
3021 | prec < HOST_BITS_PER_WIDE_INT; prec <<= 1) | |
3022 | if (newmask == (HOST_WIDE_INT_1U << prec) - 1) | |
3023 | break; | |
3024 | } | |
3025 | (if (prec < HOST_BITS_PER_WIDE_INT | |
3026 | || newmask == HOST_WIDE_INT_M1U) | |
3027 | (with | |
3028 | { tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); } | |
3029 | (if (!tree_int_cst_equal (newmaskt, @2)) | |
3030 | (if (shift_type != TREE_TYPE (@3)) | |
3031 | (bit_and (convert (shift:shift_type (convert @3) @1)) { newmaskt; }) | |
3032 | (bit_and @4 { newmaskt; }))))))))))))) | |
3033 | ||
3034 | /* Fold (X {&,^,|} C2) << C1 into (X << C1) {&,^,|} (C2 << C1) | |
3035 | (X {&,^,|} C2) >> C1 into (X >> C1) & (C2 >> C1). */ | |
3036 | (for shift (lshift rshift) | |
3037 | (for bit_op (bit_and bit_xor bit_ior) | |
3038 | (simplify | |
3039 | (shift (convert?:s (bit_op:s @0 INTEGER_CST@2)) INTEGER_CST@1) | |
3040 | (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) | |
3041 | (with { tree mask = int_const_binop (shift, fold_convert (type, @2), @1); } | |
3042 | (bit_op (shift (convert @0) @1) { mask; })))))) | |
3043 | ||
3044 | /* ~(~X >> Y) -> X >> Y (for arithmetic shift). */ | |
3045 | (simplify | |
3046 | (bit_not (convert1?:s (rshift:s (convert2?@0 (bit_not @1)) @2))) | |
3047 | (if (!TYPE_UNSIGNED (TREE_TYPE (@0)) | |
3048 | && (element_precision (TREE_TYPE (@0)) | |
3049 | <= element_precision (TREE_TYPE (@1)) | |
3050 | || !TYPE_UNSIGNED (TREE_TYPE (@1)))) | |
3051 | (with | |
3052 | { tree shift_type = TREE_TYPE (@0); } | |
3053 | (convert (rshift (convert:shift_type @1) @2))))) | |
3054 | ||
3055 | /* ~(~X >>r Y) -> X >>r Y | |
3056 | ~(~X <<r Y) -> X <<r Y */ | |
3057 | (for rotate (lrotate rrotate) | |
3058 | (simplify | |
3059 | (bit_not (convert1?:s (rotate:s (convert2?@0 (bit_not @1)) @2))) | |
3060 | (if ((element_precision (TREE_TYPE (@0)) | |
3061 | <= element_precision (TREE_TYPE (@1)) | |
3062 | || !TYPE_UNSIGNED (TREE_TYPE (@1))) | |
3063 | && (element_precision (type) <= element_precision (TREE_TYPE (@0)) | |
3064 | || !TYPE_UNSIGNED (TREE_TYPE (@0)))) | |
3065 | (with | |
3066 | { tree rotate_type = TREE_TYPE (@0); } | |
3067 | (convert (rotate (convert:rotate_type @1) @2)))))) | |
3068 | ||
3069 | /* Simplifications of conversions. */ | |
3070 | ||
3071 | /* Basic strip-useless-type-conversions / strip_nops. */ | |
3072 | (for cvt (convert view_convert float fix_trunc) | |
3073 | (simplify | |
3074 | (cvt @0) | |
3075 | (if ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@0))) | |
3076 | || (GENERIC && type == TREE_TYPE (@0))) | |
3077 | @0))) | |
3078 | ||
3079 | /* Contract view-conversions. */ | |
3080 | (simplify | |
3081 | (view_convert (view_convert @0)) | |
3082 | (view_convert @0)) | |
3083 | ||
3084 | /* For integral conversions with the same precision or pointer | |
3085 | conversions use a NOP_EXPR instead. */ | |
3086 | (simplify | |
3087 | (view_convert @0) | |
3088 | (if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)) | |
3089 | && (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0))) | |
3090 | && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0))) | |
3091 | (convert @0))) | |
3092 | ||
3093 | /* Strip inner integral conversions that do not change precision or size, or | |
3094 | zero-extend while keeping the same size (for bool-to-char). */ | |
3095 | (simplify | |
3096 | (view_convert (convert@0 @1)) | |
3097 | (if ((INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0))) | |
3098 | && (INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1))) | |
3099 | && TYPE_SIZE (TREE_TYPE (@0)) == TYPE_SIZE (TREE_TYPE (@1)) | |
3100 | && (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)) | |
3101 | || (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@1)) | |
3102 | && TYPE_UNSIGNED (TREE_TYPE (@1))))) | |
3103 | (view_convert @1))) | |
3104 | ||
3105 | /* Simplify a view-converted empty constructor. */ | |
3106 | (simplify | |
3107 | (view_convert CONSTRUCTOR@0) | |
3108 | (if (TREE_CODE (@0) != SSA_NAME | |
3109 | && CONSTRUCTOR_NELTS (@0) == 0) | |
3110 | { build_zero_cst (type); })) | |
3111 | ||
3112 | /* Re-association barriers around constants and other re-association | |
3113 | barriers can be removed. */ | |
3114 | (simplify | |
3115 | (paren CONSTANT_CLASS_P@0) | |
3116 | @0) | |
3117 | (simplify | |
3118 | (paren (paren@1 @0)) | |
3119 | @1) | |
3120 | ||
3121 | /* Handle cases of two conversions in a row. */ | |
3122 | (for ocvt (convert float fix_trunc) | |
3123 | (for icvt (convert float) | |
3124 | (simplify | |
3125 | (ocvt (icvt@1 @0)) | |
3126 | (with | |
3127 | { | |
3128 | tree inside_type = TREE_TYPE (@0); | |
3129 | tree inter_type = TREE_TYPE (@1); | |
3130 | int inside_int = INTEGRAL_TYPE_P (inside_type); | |
3131 | int inside_ptr = POINTER_TYPE_P (inside_type); | |
3132 | int inside_float = FLOAT_TYPE_P (inside_type); | |
3133 | int inside_vec = VECTOR_TYPE_P (inside_type); | |
3134 | unsigned int inside_prec = TYPE_PRECISION (inside_type); | |
3135 | int inside_unsignedp = TYPE_UNSIGNED (inside_type); | |
3136 | int inter_int = INTEGRAL_TYPE_P (inter_type); | |
3137 | int inter_ptr = POINTER_TYPE_P (inter_type); | |
3138 | int inter_float = FLOAT_TYPE_P (inter_type); | |
3139 | int inter_vec = VECTOR_TYPE_P (inter_type); | |
3140 | unsigned int inter_prec = TYPE_PRECISION (inter_type); | |
3141 | int inter_unsignedp = TYPE_UNSIGNED (inter_type); | |
3142 | int final_int = INTEGRAL_TYPE_P (type); | |
3143 | int final_ptr = POINTER_TYPE_P (type); | |
3144 | int final_float = FLOAT_TYPE_P (type); | |
3145 | int final_vec = VECTOR_TYPE_P (type); | |
3146 | unsigned int final_prec = TYPE_PRECISION (type); | |
3147 | int final_unsignedp = TYPE_UNSIGNED (type); | |
3148 | } | |
3149 | (switch | |
3150 | /* In addition to the cases of two conversions in a row | |
3151 | handled below, if we are converting something to its own | |
3152 | type via an object of identical or wider precision, neither | |
3153 | conversion is needed. */ | |
3154 | (if (((GIMPLE && useless_type_conversion_p (type, inside_type)) | |
3155 | || (GENERIC | |
3156 | && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (inside_type))) | |
3157 | && (((inter_int || inter_ptr) && final_int) | |
3158 | || (inter_float && final_float)) | |
3159 | && inter_prec >= final_prec) | |
3160 | (ocvt @0)) | |
3161 | ||
3162 | /* Likewise, if the intermediate and initial types are either both | |
3163 | float or both integer, we don't need the middle conversion if the | |
3164 | former is wider than the latter and doesn't change the signedness | |
3165 | (for integers). Avoid this if the final type is a pointer since | |
3166 | then we sometimes need the middle conversion. */ | |
3167 | (if (((inter_int && inside_int) || (inter_float && inside_float)) | |
3168 | && (final_int || final_float) | |
3169 | && inter_prec >= inside_prec | |
3170 | && (inter_float || inter_unsignedp == inside_unsignedp)) | |
3171 | (ocvt @0)) | |
3172 | ||
3173 | /* If we have a sign-extension of a zero-extended value, we can | |
3174 | replace that by a single zero-extension. Likewise if the | |
3175 | final conversion does not change precision we can drop the | |
3176 | intermediate conversion. */ | |
3177 | (if (inside_int && inter_int && final_int | |
3178 | && ((inside_prec < inter_prec && inter_prec < final_prec | |
3179 | && inside_unsignedp && !inter_unsignedp) | |
3180 | || final_prec == inter_prec)) | |
3181 | (ocvt @0)) | |
3182 | ||
3183 | /* Two conversions in a row are not needed unless: | |
3184 | - some conversion is floating-point (overstrict for now), or | |
3185 | - some conversion is a vector (overstrict for now), or | |
3186 | - the intermediate type is narrower than both initial and | |
3187 | final, or | |
3188 | - the intermediate type and innermost type differ in signedness, | |
3189 | and the outermost type is wider than the intermediate, or | |
3190 | - the initial type is a pointer type and the precisions of the | |
3191 | intermediate and final types differ, or | |
3192 | - the final type is a pointer type and the precisions of the | |
3193 | initial and intermediate types differ. */ | |
3194 | (if (! inside_float && ! inter_float && ! final_float | |
3195 | && ! inside_vec && ! inter_vec && ! final_vec | |
3196 | && (inter_prec >= inside_prec || inter_prec >= final_prec) | |
3197 | && ! (inside_int && inter_int | |
3198 | && inter_unsignedp != inside_unsignedp | |
3199 | && inter_prec < final_prec) | |
3200 | && ((inter_unsignedp && inter_prec > inside_prec) | |
3201 | == (final_unsignedp && final_prec > inter_prec)) | |
3202 | && ! (inside_ptr && inter_prec != final_prec) | |
3203 | && ! (final_ptr && inside_prec != inter_prec)) | |
3204 | (ocvt @0)) | |
3205 | ||
3206 | /* A truncation to an unsigned type (a zero-extension) should be | |
3207 | canonicalized as bitwise and of a mask. */ | |
3208 | (if (GIMPLE /* PR70366: doing this in GENERIC breaks -Wconversion. */ | |
3209 | && final_int && inter_int && inside_int | |
3210 | && final_prec == inside_prec | |
3211 | && final_prec > inter_prec | |
3212 | && inter_unsignedp) | |
3213 | (convert (bit_and @0 { wide_int_to_tree | |
3214 | (inside_type, | |
3215 | wi::mask (inter_prec, false, | |
3216 | TYPE_PRECISION (inside_type))); }))) | |
3217 | ||
3218 | /* If we are converting an integer to a floating-point that can | |
3219 | represent it exactly and back to an integer, we can skip the | |
3220 | floating-point conversion. */ | |
3221 | (if (GIMPLE /* PR66211 */ | |
3222 | && inside_int && inter_float && final_int && | |
3223 | (unsigned) significand_size (TYPE_MODE (inter_type)) | |
3224 | >= inside_prec - !inside_unsignedp) | |
3225 | (convert @0))))))) | |
3226 | ||
3227 | /* If we have a narrowing conversion to an integral type that is fed by a | |
3228 | BIT_AND_EXPR, we might be able to remove the BIT_AND_EXPR if it merely | |
3229 | masks off bits outside the final type (and nothing else). */ | |
3230 | (simplify | |
3231 | (convert (bit_and @0 INTEGER_CST@1)) | |
3232 | (if (INTEGRAL_TYPE_P (type) | |
3233 | && INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
3234 | && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0)) | |
3235 | && operand_equal_p (@1, build_low_bits_mask (TREE_TYPE (@1), | |
3236 | TYPE_PRECISION (type)), 0)) | |
3237 | (convert @0))) | |
3238 | ||
3239 | ||
3240 | /* (X /[ex] A) * A -> X. */ | |
3241 | (simplify | |
3242 | (mult (convert1? (exact_div @0 @@1)) (convert2? @1)) | |
3243 | (convert @0)) | |
3244 | ||
3245 | /* Simplify (A / B) * B + (A % B) -> A. */ | |
3246 | (for div (trunc_div ceil_div floor_div round_div) | |
3247 | mod (trunc_mod ceil_mod floor_mod round_mod) | |
3248 | (simplify | |
3249 | (plus:c (mult:c (div @0 @1) @1) (mod @0 @1)) | |
3250 | @0)) | |
3251 | ||
3252 | /* ((X /[ex] A) +- B) * A --> X +- A * B. */ | |
3253 | (for op (plus minus) | |
3254 | (simplify | |
3255 | (mult (convert1? (op (convert2? (exact_div @0 INTEGER_CST@@1)) INTEGER_CST@2)) @1) | |
3256 | (if (tree_nop_conversion_p (type, TREE_TYPE (@2)) | |
3257 | && tree_nop_conversion_p (TREE_TYPE (@0), TREE_TYPE (@2))) | |
3258 | (with | |
3259 | { | |
3260 | wi::overflow_type overflow; | |
3261 | wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2), | |
3262 | TYPE_SIGN (type), &overflow); | |
3263 | } | |
3264 | (if (types_match (type, TREE_TYPE (@2)) | |
3265 | && types_match (TREE_TYPE (@0), TREE_TYPE (@2)) && !overflow) | |
3266 | (op @0 { wide_int_to_tree (type, mul); }) | |
3267 | (with { tree utype = unsigned_type_for (type); } | |
3268 | (convert (op (convert:utype @0) | |
3269 | (mult (convert:utype @1) (convert:utype @2)))))))))) | |
3270 | ||
3271 | /* Canonicalization of binary operations. */ | |
3272 | ||
3273 | /* Convert X + -C into X - C. */ | |
3274 | (simplify | |
3275 | (plus @0 REAL_CST@1) | |
3276 | (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1))) | |
3277 | (with { tree tem = const_unop (NEGATE_EXPR, type, @1); } | |
3278 | (if (!TREE_OVERFLOW (tem) || !flag_trapping_math) | |
3279 | (minus @0 { tem; }))))) | |
3280 | ||
3281 | /* Convert x+x into x*2. */ | |
3282 | (simplify | |
3283 | (plus @0 @0) | |
3284 | (if (SCALAR_FLOAT_TYPE_P (type)) | |
3285 | (mult @0 { build_real (type, dconst2); }) | |
3286 | (if (INTEGRAL_TYPE_P (type)) | |
3287 | (mult @0 { build_int_cst (type, 2); })))) | |
3288 | ||
3289 | /* 0 - X -> -X. */ | |
3290 | (simplify | |
3291 | (minus integer_zerop @1) | |
3292 | (negate @1)) | |
3293 | (simplify | |
3294 | (pointer_diff integer_zerop @1) | |
3295 | (negate (convert @1))) | |
3296 | ||
3297 | /* (ARG0 - ARG1) is the same as (-ARG1 + ARG0). So check whether | |
3298 | ARG0 is zero and X + ARG0 reduces to X, since that would mean | |
3299 | (-ARG1 + ARG0) reduces to -ARG1. */ | |
3300 | (simplify | |
3301 | (minus real_zerop@0 @1) | |
3302 | (if (fold_real_zero_addition_p (type, @0, 0)) | |
3303 | (negate @1))) | |
3304 | ||
3305 | /* Transform x * -1 into -x. */ | |
3306 | (simplify | |
3307 | (mult @0 integer_minus_onep) | |
3308 | (negate @0)) | |
3309 | ||
3310 | /* Reassociate (X * CST) * Y to (X * Y) * CST. This does not introduce | |
3311 | signed overflow for CST != 0 && CST != -1. */ | |
3312 | (simplify | |
3313 | (mult:c (mult:s@3 @0 INTEGER_CST@1) @2) | |
3314 | (if (TREE_CODE (@2) != INTEGER_CST | |
3315 | && single_use (@3) | |
3316 | && !integer_zerop (@1) && !integer_minus_onep (@1)) | |
3317 | (mult (mult @0 @2) @1))) | |
3318 | ||
3319 | /* True if we can easily extract the real and imaginary parts of a complex | |
3320 | number. */ | |
3321 | (match compositional_complex | |
3322 | (convert? (complex @0 @1))) | |
3323 | ||
3324 | /* COMPLEX_EXPR and REALPART/IMAGPART_EXPR cancellations. */ | |
3325 | (simplify | |
3326 | (complex (realpart @0) (imagpart @0)) | |
3327 | @0) | |
3328 | (simplify | |
3329 | (realpart (complex @0 @1)) | |
3330 | @0) | |
3331 | (simplify | |
3332 | (imagpart (complex @0 @1)) | |
3333 | @1) | |
3334 | ||
3335 | /* Sometimes we only care about half of a complex expression. */ | |
3336 | (simplify | |
3337 | (realpart (convert?:s (conj:s @0))) | |
3338 | (convert (realpart @0))) | |
3339 | (simplify | |
3340 | (imagpart (convert?:s (conj:s @0))) | |
3341 | (convert (negate (imagpart @0)))) | |
3342 | (for part (realpart imagpart) | |
3343 | (for op (plus minus) | |
3344 | (simplify | |
3345 | (part (convert?:s@2 (op:s @0 @1))) | |
3346 | (convert (op (part @0) (part @1)))))) | |
3347 | (simplify | |
3348 | (realpart (convert?:s (CEXPI:s @0))) | |
3349 | (convert (COS @0))) | |
3350 | (simplify | |
3351 | (imagpart (convert?:s (CEXPI:s @0))) | |
3352 | (convert (SIN @0))) | |
3353 | ||
3354 | /* conj(conj(x)) -> x */ | |
3355 | (simplify | |
3356 | (conj (convert? (conj @0))) | |
3357 | (if (tree_nop_conversion_p (TREE_TYPE (@0), type)) | |
3358 | (convert @0))) | |
3359 | ||
3360 | /* conj({x,y}) -> {x,-y} */ | |
3361 | (simplify | |
3362 | (conj (convert?:s (complex:s @0 @1))) | |
3363 | (with { tree itype = TREE_TYPE (type); } | |
3364 | (complex (convert:itype @0) (negate (convert:itype @1))))) | |
3365 | ||
3366 | /* BSWAP simplifications, transforms checked by gcc.dg/builtin-bswap-8.c. */ | |
3367 | (for bswap (BUILT_IN_BSWAP16 BUILT_IN_BSWAP32 BUILT_IN_BSWAP64) | |
3368 | (simplify | |
3369 | (bswap (bswap @0)) | |
3370 | @0) | |
3371 | (simplify | |
3372 | (bswap (bit_not (bswap @0))) | |
3373 | (bit_not @0)) | |
3374 | (for bitop (bit_xor bit_ior bit_and) | |
3375 | (simplify | |
3376 | (bswap (bitop:c (bswap @0) @1)) | |
3377 | (bitop @0 (bswap @1))))) | |
3378 | ||
3379 | ||
3380 | /* Combine COND_EXPRs and VEC_COND_EXPRs. */ | |
3381 | ||
3382 | /* Simplify constant conditions. | |
3383 | Only optimize constant conditions when the selected branch | |
3384 | has the same type as the COND_EXPR. This avoids optimizing | |
3385 | away "c ? x : throw", where the throw has a void type. | |
3386 | Note that we cannot throw away the fold-const.c variant nor | |
3387 | this one as we depend on doing this transform before possibly | |
3388 | A ? B : B -> B triggers and the fold-const.c one can optimize | |
3389 | 0 ? A : B to B even if A has side-effects. Something | |
3390 | genmatch cannot handle. */ | |
3391 | (simplify | |
3392 | (cond INTEGER_CST@0 @1 @2) | |
3393 | (if (integer_zerop (@0)) | |
3394 | (if (!VOID_TYPE_P (TREE_TYPE (@2)) || VOID_TYPE_P (type)) | |
3395 | @2) | |
3396 | (if (!VOID_TYPE_P (TREE_TYPE (@1)) || VOID_TYPE_P (type)) | |
3397 | @1))) | |
3398 | (simplify | |
3399 | (vec_cond VECTOR_CST@0 @1 @2) | |
3400 | (if (integer_all_onesp (@0)) | |
3401 | @1 | |
3402 | (if (integer_zerop (@0)) | |
3403 | @2))) | |
3404 | ||
3405 | /* Sink unary operations to constant branches, but only if we do fold it to | |
3406 | constants. */ | |
3407 | (for op (negate bit_not abs absu) | |
3408 | (simplify | |
3409 | (op (vec_cond @0 VECTOR_CST@1 VECTOR_CST@2)) | |
3410 | (with | |
3411 | { | |
3412 | tree cst1, cst2; | |
3413 | cst1 = const_unop (op, type, @1); | |
3414 | if (cst1) | |
3415 | cst2 = const_unop (op, type, @2); | |
3416 | } | |
3417 | (if (cst1 && cst2) | |
3418 | (vec_cond @0 { cst1; } { cst2; }))))) | |
3419 | ||
3420 | /* Simplification moved from fold_cond_expr_with_comparison. It may also | |
3421 | be extended. */ | |
3422 | /* This pattern implements two kinds simplification: | |
3423 | ||
3424 | Case 1) | |
3425 | (cond (cmp (convert1? x) c1) (convert2? x) c2) -> (minmax (x c)) if: | |
3426 | 1) Conversions are type widening from smaller type. | |
3427 | 2) Const c1 equals to c2 after canonicalizing comparison. | |
3428 | 3) Comparison has tree code LT, LE, GT or GE. | |
3429 | This specific pattern is needed when (cmp (convert x) c) may not | |
3430 | be simplified by comparison patterns because of multiple uses of | |
3431 | x. It also makes sense here because simplifying across multiple | |
3432 | referred var is always benefitial for complicated cases. | |
3433 | ||
3434 | Case 2) | |
3435 | (cond (eq (convert1? x) c1) (convert2? x) c2) -> (cond (eq x c1) c1 c2). */ | |
3436 | (for cmp (lt le gt ge eq) | |
3437 | (simplify | |
3438 | (cond (cmp (convert1? @1) INTEGER_CST@3) (convert2? @1) INTEGER_CST@2) | |
3439 | (with | |
3440 | { | |
3441 | tree from_type = TREE_TYPE (@1); | |
3442 | tree c1_type = TREE_TYPE (@3), c2_type = TREE_TYPE (@2); | |
3443 | enum tree_code code = ERROR_MARK; | |
3444 | ||
3445 | if (INTEGRAL_TYPE_P (from_type) | |
3446 | && int_fits_type_p (@2, from_type) | |
3447 | && (types_match (c1_type, from_type) | |
3448 | || (TYPE_PRECISION (c1_type) > TYPE_PRECISION (from_type) | |
3449 | && (TYPE_UNSIGNED (from_type) | |
3450 | || TYPE_SIGN (c1_type) == TYPE_SIGN (from_type)))) | |
3451 | && (types_match (c2_type, from_type) | |
3452 | || (TYPE_PRECISION (c2_type) > TYPE_PRECISION (from_type) | |
3453 | && (TYPE_UNSIGNED (from_type) | |
3454 | || TYPE_SIGN (c2_type) == TYPE_SIGN (from_type))))) | |
3455 | { | |
3456 | if (cmp != EQ_EXPR) | |
3457 | { | |
3458 | if (wi::to_widest (@3) == (wi::to_widest (@2) - 1)) | |
3459 | { | |
3460 | /* X <= Y - 1 equals to X < Y. */ | |
3461 | if (cmp == LE_EXPR) | |
3462 | code = LT_EXPR; | |
3463 | /* X > Y - 1 equals to X >= Y. */ | |
3464 | if (cmp == GT_EXPR) | |
3465 | code = GE_EXPR; | |
3466 | } | |
3467 | if (wi::to_widest (@3) == (wi::to_widest (@2) + 1)) | |
3468 | { | |
3469 | /* X < Y + 1 equals to X <= Y. */ | |
3470 | if (cmp == LT_EXPR) | |
3471 | code = LE_EXPR; | |
3472 | /* X >= Y + 1 equals to X > Y. */ | |
3473 | if (cmp == GE_EXPR) | |
3474 | code = GT_EXPR; | |
3475 | } | |
3476 | if (code != ERROR_MARK | |
3477 | || wi::to_widest (@2) == wi::to_widest (@3)) | |
3478 | { | |
3479 | if (cmp == LT_EXPR || cmp == LE_EXPR) | |
3480 | code = MIN_EXPR; | |
3481 | if (cmp == GT_EXPR || cmp == GE_EXPR) | |
3482 | code = MAX_EXPR; | |
3483 | } | |
3484 | } | |
3485 | /* Can do A == C1 ? A : C2 -> A == C1 ? C1 : C2? */ | |
3486 | else if (int_fits_type_p (@3, from_type)) | |
3487 | code = EQ_EXPR; | |
3488 | } | |
3489 | } | |
3490 | (if (code == MAX_EXPR) | |
3491 | (convert (max @1 (convert @2))) | |
3492 | (if (code == MIN_EXPR) | |
3493 | (convert (min @1 (convert @2))) | |
3494 | (if (code == EQ_EXPR) | |
3495 | (convert (cond (eq @1 (convert @3)) | |
3496 | (convert:from_type @3) (convert:from_type @2))))))))) | |
3497 | ||
3498 | /* (cond (cmp (convert? x) c1) (op x c2) c3) -> (op (minmax x c1) c2) if: | |
3499 | ||
3500 | 1) OP is PLUS or MINUS. | |
3501 | 2) CMP is LT, LE, GT or GE. | |
3502 | 3) C3 == (C1 op C2), and computation doesn't have undefined behavior. | |
3503 | ||
3504 | This pattern also handles special cases like: | |
3505 | ||
3506 | A) Operand x is a unsigned to signed type conversion and c1 is | |
3507 | integer zero. In this case, | |
3508 | (signed type)x < 0 <=> x > MAX_VAL(signed type) | |
3509 | (signed type)x >= 0 <=> x <= MAX_VAL(signed type) | |
3510 | B) Const c1 may not equal to (C3 op' C2). In this case we also | |
3511 | check equality for (c1+1) and (c1-1) by adjusting comparison | |
3512 | code. | |
3513 | ||
3514 | TODO: Though signed type is handled by this pattern, it cannot be | |
3515 | simplified at the moment because C standard requires additional | |
3516 | type promotion. In order to match&simplify it here, the IR needs | |
3517 | to be cleaned up by other optimizers, i.e, VRP. */ | |
3518 | (for op (plus minus) | |
3519 | (for cmp (lt le gt ge) | |
3520 | (simplify | |
3521 | (cond (cmp (convert? @X) INTEGER_CST@1) (op @X INTEGER_CST@2) INTEGER_CST@3) | |
3522 | (with { tree from_type = TREE_TYPE (@X), to_type = TREE_TYPE (@1); } | |
3523 | (if (types_match (from_type, to_type) | |
3524 | /* Check if it is special case A). */ | |
3525 | || (TYPE_UNSIGNED (from_type) | |
3526 | && !TYPE_UNSIGNED (to_type) | |
3527 | && TYPE_PRECISION (from_type) == TYPE_PRECISION (to_type) | |
3528 | && integer_zerop (@1) | |
3529 | && (cmp == LT_EXPR || cmp == GE_EXPR))) | |
3530 | (with | |
3531 | { | |
3532 | wi::overflow_type overflow = wi::OVF_NONE; | |
3533 | enum tree_code code, cmp_code = cmp; | |
3534 | wide_int real_c1; | |
3535 | wide_int c1 = wi::to_wide (@1); | |
3536 | wide_int c2 = wi::to_wide (@2); | |
3537 | wide_int c3 = wi::to_wide (@3); | |
3538 | signop sgn = TYPE_SIGN (from_type); | |
3539 | ||
3540 | /* Handle special case A), given x of unsigned type: | |
3541 | ((signed type)x < 0) <=> (x > MAX_VAL(signed type)) | |
3542 | ((signed type)x >= 0) <=> (x <= MAX_VAL(signed type)) */ | |
3543 | if (!types_match (from_type, to_type)) | |
3544 | { | |
3545 | if (cmp_code == LT_EXPR) | |
3546 | cmp_code = GT_EXPR; | |
3547 | if (cmp_code == GE_EXPR) | |
3548 | cmp_code = LE_EXPR; | |
3549 | c1 = wi::max_value (to_type); | |
3550 | } | |
3551 | /* To simplify this pattern, we require c3 = (c1 op c2). Here we | |
3552 | compute (c3 op' c2) and check if it equals to c1 with op' being | |
3553 | the inverted operator of op. Make sure overflow doesn't happen | |
3554 | if it is undefined. */ | |
3555 | if (op == PLUS_EXPR) | |
3556 | real_c1 = wi::sub (c3, c2, sgn, &overflow); | |
3557 | else | |
3558 | real_c1 = wi::add (c3, c2, sgn, &overflow); | |
3559 | ||
3560 | code = cmp_code; | |
3561 | if (!overflow || !TYPE_OVERFLOW_UNDEFINED (from_type)) | |
3562 | { | |
3563 | /* Check if c1 equals to real_c1. Boundary condition is handled | |
3564 | by adjusting comparison operation if necessary. */ | |
3565 | if (!wi::cmp (wi::sub (real_c1, 1, sgn, &overflow), c1, sgn) | |
3566 | && !overflow) | |
3567 | { | |
3568 | /* X <= Y - 1 equals to X < Y. */ | |
3569 | if (cmp_code == LE_EXPR) | |
3570 | code = LT_EXPR; | |
3571 | /* X > Y - 1 equals to X >= Y. */ | |
3572 | if (cmp_code == GT_EXPR) | |
3573 | code = GE_EXPR; | |
3574 | } | |
3575 | if (!wi::cmp (wi::add (real_c1, 1, sgn, &overflow), c1, sgn) | |
3576 | && !overflow) | |
3577 | { | |
3578 | /* X < Y + 1 equals to X <= Y. */ | |
3579 | if (cmp_code == LT_EXPR) | |
3580 | code = LE_EXPR; | |
3581 | /* X >= Y + 1 equals to X > Y. */ | |
3582 | if (cmp_code == GE_EXPR) | |
3583 | code = GT_EXPR; | |
3584 | } | |
3585 | if (code != cmp_code || !wi::cmp (real_c1, c1, sgn)) | |
3586 | { | |
3587 | if (cmp_code == LT_EXPR || cmp_code == LE_EXPR) | |
3588 | code = MIN_EXPR; | |
3589 | if (cmp_code == GT_EXPR || cmp_code == GE_EXPR) | |
3590 | code = MAX_EXPR; | |
3591 | } | |
3592 | } | |
3593 | } | |
3594 | (if (code == MAX_EXPR) | |
3595 | (op (max @X { wide_int_to_tree (from_type, real_c1); }) | |
3596 | { wide_int_to_tree (from_type, c2); }) | |
3597 | (if (code == MIN_EXPR) | |
3598 | (op (min @X { wide_int_to_tree (from_type, real_c1); }) | |
3599 | { wide_int_to_tree (from_type, c2); }))))))))) | |
3600 | ||
3601 | (for cnd (cond vec_cond) | |
3602 | /* A ? B : (A ? X : C) -> A ? B : C. */ | |
3603 | (simplify | |
3604 | (cnd @0 (cnd @0 @1 @2) @3) | |
3605 | (cnd @0 @1 @3)) | |
3606 | (simplify | |
3607 | (cnd @0 @1 (cnd @0 @2 @3)) | |
3608 | (cnd @0 @1 @3)) | |
3609 | /* A ? B : (!A ? C : X) -> A ? B : C. */ | |
3610 | /* ??? This matches embedded conditions open-coded because genmatch | |
3611 | would generate matching code for conditions in separate stmts only. | |
3612 | The following is still important to merge then and else arm cases | |
3613 | from if-conversion. */ | |
3614 | (simplify | |
3615 | (cnd @0 @1 (cnd @2 @3 @4)) | |
3616 | (if (inverse_conditions_p (@0, @2)) | |
3617 | (cnd @0 @1 @3))) | |
3618 | (simplify | |
3619 | (cnd @0 (cnd @1 @2 @3) @4) | |
3620 | (if (inverse_conditions_p (@0, @1)) | |
3621 | (cnd @0 @3 @4))) | |
3622 | ||
3623 | /* A ? B : B -> B. */ | |
3624 | (simplify | |
3625 | (cnd @0 @1 @1) | |
3626 | @1) | |
3627 | ||
3628 | /* !A ? B : C -> A ? C : B. */ | |
3629 | (simplify | |
3630 | (cnd (logical_inverted_value truth_valued_p@0) @1 @2) | |
3631 | (cnd @0 @2 @1))) | |
3632 | ||
3633 | /* A + (B vcmp C ? 1 : 0) -> A - (B vcmp C ? -1 : 0), since vector comparisons | |
3634 | return all -1 or all 0 results. */ | |
3635 | /* ??? We could instead convert all instances of the vec_cond to negate, | |
3636 | but that isn't necessarily a win on its own. */ | |
3637 | (simplify | |
3638 | (plus:c @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2))) | |
3639 | (if (VECTOR_TYPE_P (type) | |
3640 | && known_eq (TYPE_VECTOR_SUBPARTS (type), | |
3641 | TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1))) | |
3642 | && (TYPE_MODE (TREE_TYPE (type)) | |
3643 | == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1))))) | |
3644 | (minus @3 (view_convert (vec_cond @0 (negate @1) @2))))) | |
3645 | ||
3646 | /* ... likewise A - (B vcmp C ? 1 : 0) -> A + (B vcmp C ? -1 : 0). */ | |
3647 | (simplify | |
3648 | (minus @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2))) | |
3649 | (if (VECTOR_TYPE_P (type) | |
3650 | && known_eq (TYPE_VECTOR_SUBPARTS (type), | |
3651 | TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1))) | |
3652 | && (TYPE_MODE (TREE_TYPE (type)) | |
3653 | == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1))))) | |
3654 | (plus @3 (view_convert (vec_cond @0 (negate @1) @2))))) | |
3655 | ||
3656 | ||
3657 | /* Simplifications of comparisons. */ | |
3658 | ||
3659 | /* See if we can reduce the magnitude of a constant involved in a | |
3660 | comparison by changing the comparison code. This is a canonicalization | |
3661 | formerly done by maybe_canonicalize_comparison_1. */ | |
3662 | (for cmp (le gt) | |
3663 | acmp (lt ge) | |
3664 | (simplify | |
3665 | (cmp @0 uniform_integer_cst_p@1) | |
3666 | (with { tree cst = uniform_integer_cst_p (@1); } | |
3667 | (if (tree_int_cst_sgn (cst) == -1) | |
3668 | (acmp @0 { build_uniform_cst (TREE_TYPE (@1), | |
3669 | wide_int_to_tree (TREE_TYPE (cst), | |
3670 | wi::to_wide (cst) | |
3671 | + 1)); }))))) | |
3672 | (for cmp (ge lt) | |
3673 | acmp (gt le) | |
3674 | (simplify | |
3675 | (cmp @0 uniform_integer_cst_p@1) | |
3676 | (with { tree cst = uniform_integer_cst_p (@1); } | |
3677 | (if (tree_int_cst_sgn (cst) == 1) | |
3678 | (acmp @0 { build_uniform_cst (TREE_TYPE (@1), | |
3679 | wide_int_to_tree (TREE_TYPE (cst), | |
3680 | wi::to_wide (cst) - 1)); }))))) | |
3681 | ||
3682 | /* We can simplify a logical negation of a comparison to the | |
3683 | inverted comparison. As we cannot compute an expression | |
3684 | operator using invert_tree_comparison we have to simulate | |
3685 | that with expression code iteration. */ | |
3686 | (for cmp (tcc_comparison) | |
3687 | icmp (inverted_tcc_comparison) | |
3688 | ncmp (inverted_tcc_comparison_with_nans) | |
3689 | /* Ideally we'd like to combine the following two patterns | |
3690 | and handle some more cases by using | |
3691 | (logical_inverted_value (cmp @0 @1)) | |
3692 | here but for that genmatch would need to "inline" that. | |
3693 | For now implement what forward_propagate_comparison did. */ | |
3694 | (simplify | |
3695 | (bit_not (cmp @0 @1)) | |
3696 | (if (VECTOR_TYPE_P (type) | |
3697 | || (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1)) | |
3698 | /* Comparison inversion may be impossible for trapping math, | |
3699 | invert_tree_comparison will tell us. But we can't use | |
3700 | a computed operator in the replacement tree thus we have | |
3701 | to play the trick below. */ | |
3702 | (with { enum tree_code ic = invert_tree_comparison | |
3703 | (cmp, HONOR_NANS (@0)); } | |
3704 | (if (ic == icmp) | |
3705 | (icmp @0 @1) | |
3706 | (if (ic == ncmp) | |
3707 | (ncmp @0 @1)))))) | |
3708 | (simplify | |
3709 | (bit_xor (cmp @0 @1) integer_truep) | |
3710 | (with { enum tree_code ic = invert_tree_comparison | |
3711 | (cmp, HONOR_NANS (@0)); } | |
3712 | (if (ic == icmp) | |
3713 | (icmp @0 @1) | |
3714 | (if (ic == ncmp) | |
3715 | (ncmp @0 @1)))))) | |
3716 | ||
3717 | /* Transform comparisons of the form X - Y CMP 0 to X CMP Y. | |
3718 | ??? The transformation is valid for the other operators if overflow | |
3719 | is undefined for the type, but performing it here badly interacts | |
3720 | with the transformation in fold_cond_expr_with_comparison which | |
3721 | attempts to synthetize ABS_EXPR. */ | |
3722 | (for cmp (eq ne) | |
3723 | (for sub (minus pointer_diff) | |
3724 | (simplify | |
3725 | (cmp (sub@2 @0 @1) integer_zerop) | |
3726 | (if (single_use (@2)) | |
3727 | (cmp @0 @1))))) | |
3728 | ||
3729 | /* Transform comparisons of the form X * C1 CMP 0 to X CMP 0 in the | |
3730 | signed arithmetic case. That form is created by the compiler | |
3731 | often enough for folding it to be of value. One example is in | |
3732 | computing loop trip counts after Operator Strength Reduction. */ | |
3733 | (for cmp (simple_comparison) | |
3734 | scmp (swapped_simple_comparison) | |
3735 | (simplify | |
3736 | (cmp (mult@3 @0 INTEGER_CST@1) integer_zerop@2) | |
3737 | /* Handle unfolded multiplication by zero. */ | |
3738 | (if (integer_zerop (@1)) | |
3739 | (cmp @1 @2) | |
3740 | (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
3741 | && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) | |
3742 | && single_use (@3)) | |
3743 | /* If @1 is negative we swap the sense of the comparison. */ | |
3744 | (if (tree_int_cst_sgn (@1) < 0) | |
3745 | (scmp @0 @2) | |
3746 | (cmp @0 @2)))))) | |
3747 | ||
3748 | /* Simplify comparison of something with itself. For IEEE | |
3749 | floating-point, we can only do some of these simplifications. */ | |
3750 | (for cmp (eq ge le) | |
3751 | (simplify | |
3752 | (cmp @0 @0) | |
3753 | (if (! FLOAT_TYPE_P (TREE_TYPE (@0)) | |
3754 | || ! HONOR_NANS (@0)) | |
3755 | { constant_boolean_node (true, type); } | |
3756 | (if (cmp != EQ_EXPR) | |
3757 | (eq @0 @0))))) | |
3758 | (for cmp (ne gt lt) | |
3759 | (simplify | |
3760 | (cmp @0 @0) | |
3761 | (if (cmp != NE_EXPR | |
3762 | || ! FLOAT_TYPE_P (TREE_TYPE (@0)) | |
3763 | || ! HONOR_NANS (@0)) | |
3764 | { constant_boolean_node (false, type); }))) | |
3765 | (for cmp (unle unge uneq) | |
3766 | (simplify | |
3767 | (cmp @0 @0) | |
3768 | { constant_boolean_node (true, type); })) | |
3769 | (for cmp (unlt ungt) | |
3770 | (simplify | |
3771 | (cmp @0 @0) | |
3772 | (unordered @0 @0))) | |
3773 | (simplify | |
3774 | (ltgt @0 @0) | |
3775 | (if (!flag_trapping_math) | |
3776 | { constant_boolean_node (false, type); })) | |
3777 | ||
3778 | /* Fold ~X op ~Y as Y op X. */ | |
3779 | (for cmp (simple_comparison) | |
3780 | (simplify | |
3781 | (cmp (bit_not@2 @0) (bit_not@3 @1)) | |
3782 | (if (single_use (@2) && single_use (@3)) | |
3783 | (cmp @1 @0)))) | |
3784 | ||
3785 | /* Fold ~X op C as X op' ~C, where op' is the swapped comparison. */ | |
3786 | (for cmp (simple_comparison) | |
3787 | scmp (swapped_simple_comparison) | |
3788 | (simplify | |
3789 | (cmp (bit_not@2 @0) CONSTANT_CLASS_P@1) | |
3790 | (if (single_use (@2) | |
3791 | && (TREE_CODE (@1) == INTEGER_CST || TREE_CODE (@1) == VECTOR_CST)) | |
3792 | (scmp @0 (bit_not @1))))) | |
3793 | ||
3794 | (for cmp (simple_comparison) | |
3795 | /* Fold (double)float1 CMP (double)float2 into float1 CMP float2. */ | |
3796 | (simplify | |
3797 | (cmp (convert@2 @0) (convert? @1)) | |
3798 | (if (FLOAT_TYPE_P (TREE_TYPE (@0)) | |
3799 | && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2)) | |
3800 | == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0))) | |
3801 | && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2)) | |
3802 | == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1)))) | |
3803 | (with | |
3804 | { | |
3805 | tree type1 = TREE_TYPE (@1); | |
3806 | if (TREE_CODE (@1) == REAL_CST && !DECIMAL_FLOAT_TYPE_P (type1)) | |
3807 | { | |
3808 | REAL_VALUE_TYPE orig = TREE_REAL_CST (@1); | |
3809 | if (TYPE_PRECISION (type1) > TYPE_PRECISION (float_type_node) | |
3810 | && exact_real_truncate (TYPE_MODE (float_type_node), &orig)) | |
3811 | type1 = float_type_node; | |
3812 | if (TYPE_PRECISION (type1) > TYPE_PRECISION (double_type_node) | |
3813 | && exact_real_truncate (TYPE_MODE (double_type_node), &orig)) | |
3814 | type1 = double_type_node; | |
3815 | } | |
3816 | tree newtype | |
3817 | = (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type1) | |
3818 | ? TREE_TYPE (@0) : type1); | |
3819 | } | |
3820 | (if (TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (newtype)) | |
3821 | (cmp (convert:newtype @0) (convert:newtype @1)))))) | |
3822 | ||
3823 | (simplify | |
3824 | (cmp @0 REAL_CST@1) | |
3825 | /* IEEE doesn't distinguish +0 and -0 in comparisons. */ | |
3826 | (switch | |
3827 | /* a CMP (-0) -> a CMP 0 */ | |
3828 | (if (REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1))) | |
3829 | (cmp @0 { build_real (TREE_TYPE (@1), dconst0); })) | |
3830 | /* x != NaN is always true, other ops are always false. */ | |
3831 | (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1)) | |
3832 | && ! HONOR_SNANS (@1)) | |
3833 | { constant_boolean_node (cmp == NE_EXPR, type); }) | |
3834 | /* Fold comparisons against infinity. */ | |
3835 | (if (REAL_VALUE_ISINF (TREE_REAL_CST (@1)) | |
3836 | && MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (@1)))) | |
3837 | (with | |
3838 | { | |
3839 | REAL_VALUE_TYPE max; | |
3840 | enum tree_code code = cmp; | |
3841 | bool neg = REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)); | |
3842 | if (neg) | |
3843 | code = swap_tree_comparison (code); | |
3844 | } | |
3845 | (switch | |
3846 | /* x > +Inf is always false, if we ignore NaNs or exceptions. */ | |
3847 | (if (code == GT_EXPR | |
3848 | && !(HONOR_NANS (@0) && flag_trapping_math)) | |
3849 | { constant_boolean_node (false, type); }) | |
3850 | (if (code == LE_EXPR) | |
3851 | /* x <= +Inf is always true, if we don't care about NaNs. */ | |
3852 | (if (! HONOR_NANS (@0)) | |
3853 | { constant_boolean_node (true, type); } | |
3854 | /* x <= +Inf is the same as x == x, i.e. !isnan(x), but this loses | |
3855 | an "invalid" exception. */ | |
3856 | (if (!flag_trapping_math) | |
3857 | (eq @0 @0)))) | |
3858 | /* x == +Inf and x >= +Inf are always equal to x > DBL_MAX, but | |
3859 | for == this introduces an exception for x a NaN. */ | |
3860 | (if ((code == EQ_EXPR && !(HONOR_NANS (@0) && flag_trapping_math)) | |
3861 | || code == GE_EXPR) | |
3862 | (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); } | |
3863 | (if (neg) | |
3864 | (lt @0 { build_real (TREE_TYPE (@0), max); }) | |
3865 | (gt @0 { build_real (TREE_TYPE (@0), max); })))) | |
3866 | /* x < +Inf is always equal to x <= DBL_MAX. */ | |
3867 | (if (code == LT_EXPR) | |
3868 | (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); } | |
3869 | (if (neg) | |
3870 | (ge @0 { build_real (TREE_TYPE (@0), max); }) | |
3871 | (le @0 { build_real (TREE_TYPE (@0), max); })))) | |
3872 | /* x != +Inf is always equal to !(x > DBL_MAX), but this introduces | |
3873 | an exception for x a NaN so use an unordered comparison. */ | |
3874 | (if (code == NE_EXPR) | |
3875 | (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); } | |
3876 | (if (! HONOR_NANS (@0)) | |
3877 | (if (neg) | |
3878 | (ge @0 { build_real (TREE_TYPE (@0), max); }) | |
3879 | (le @0 { build_real (TREE_TYPE (@0), max); })) | |
3880 | (if (neg) | |
3881 | (unge @0 { build_real (TREE_TYPE (@0), max); }) | |
3882 | (unle @0 { build_real (TREE_TYPE (@0), max); })))))))))) | |
3883 | ||
3884 | /* If this is a comparison of a real constant with a PLUS_EXPR | |
3885 | or a MINUS_EXPR of a real constant, we can convert it into a | |
3886 | comparison with a revised real constant as long as no overflow | |
3887 | occurs when unsafe_math_optimizations are enabled. */ | |
3888 | (if (flag_unsafe_math_optimizations) | |
3889 | (for op (plus minus) | |
3890 | (simplify | |
3891 | (cmp (op @0 REAL_CST@1) REAL_CST@2) | |
3892 | (with | |
3893 | { | |
3894 | tree tem = const_binop (op == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR, | |
3895 | TREE_TYPE (@1), @2, @1); | |
3896 | } | |
3897 | (if (tem && !TREE_OVERFLOW (tem)) | |
3898 | (cmp @0 { tem; })))))) | |
3899 | ||
3900 | /* Likewise, we can simplify a comparison of a real constant with | |
3901 | a MINUS_EXPR whose first operand is also a real constant, i.e. | |
3902 | (c1 - x) < c2 becomes x > c1-c2. Reordering is allowed on | |
3903 | floating-point types only if -fassociative-math is set. */ | |
3904 | (if (flag_associative_math) | |
3905 | (simplify | |
3906 | (cmp (minus REAL_CST@0 @1) REAL_CST@2) | |
3907 | (with { tree tem = const_binop (MINUS_EXPR, TREE_TYPE (@1), @0, @2); } | |
3908 | (if (tem && !TREE_OVERFLOW (tem)) | |
3909 | (cmp { tem; } @1))))) | |
3910 | ||
3911 | /* Fold comparisons against built-in math functions. */ | |
3912 | (if (flag_unsafe_math_optimizations && ! flag_errno_math) | |
3913 | (for sq (SQRT) | |
3914 | (simplify | |
3915 | (cmp (sq @0) REAL_CST@1) | |
3916 | (switch | |
3917 | (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1))) | |
3918 | (switch | |
3919 | /* sqrt(x) < y is always false, if y is negative. */ | |
3920 | (if (cmp == EQ_EXPR || cmp == LT_EXPR || cmp == LE_EXPR) | |
3921 | { constant_boolean_node (false, type); }) | |
3922 | /* sqrt(x) > y is always true, if y is negative and we | |
3923 | don't care about NaNs, i.e. negative values of x. */ | |
3924 | (if (cmp == NE_EXPR || !HONOR_NANS (@0)) | |
3925 | { constant_boolean_node (true, type); }) | |
3926 | /* sqrt(x) > y is the same as x >= 0, if y is negative. */ | |
3927 | (ge @0 { build_real (TREE_TYPE (@0), dconst0); }))) | |
3928 | (if (real_equal (TREE_REAL_CST_PTR (@1), &dconst0)) | |
3929 | (switch | |
3930 | /* sqrt(x) < 0 is always false. */ | |
3931 | (if (cmp == LT_EXPR) | |
3932 | { constant_boolean_node (false, type); }) | |
3933 | /* sqrt(x) >= 0 is always true if we don't care about NaNs. */ | |
3934 | (if (cmp == GE_EXPR && !HONOR_NANS (@0)) | |
3935 | { constant_boolean_node (true, type); }) | |
3936 | /* sqrt(x) <= 0 -> x == 0. */ | |
3937 | (if (cmp == LE_EXPR) | |
3938 | (eq @0 @1)) | |
3939 | /* Otherwise sqrt(x) cmp 0 -> x cmp 0. Here cmp can be >=, >, | |
3940 | == or !=. In the last case: | |
3941 | ||
3942 | (sqrt(x) != 0) == (NaN != 0) == true == (x != 0) | |
3943 | ||
3944 | if x is negative or NaN. Due to -funsafe-math-optimizations, | |
3945 | the results for other x follow from natural arithmetic. */ | |
3946 | (cmp @0 @1))) | |
3947 | (if ((cmp == LT_EXPR | |
3948 | || cmp == LE_EXPR | |
3949 | || cmp == GT_EXPR | |
3950 | || cmp == GE_EXPR) | |
3951 | && !REAL_VALUE_ISNAN (TREE_REAL_CST (@1)) | |
3952 | /* Give up for -frounding-math. */ | |
3953 | && !HONOR_SIGN_DEPENDENT_ROUNDING (TREE_TYPE (@0))) | |
3954 | (with | |
3955 | { | |
3956 | REAL_VALUE_TYPE c2; | |
3957 | enum tree_code ncmp = cmp; | |
3958 | const real_format *fmt | |
3959 | = REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@0))); | |
3960 | real_arithmetic (&c2, MULT_EXPR, | |
3961 | &TREE_REAL_CST (@1), &TREE_REAL_CST (@1)); | |
3962 | real_convert (&c2, fmt, &c2); | |
3963 | /* See PR91734: if c2 is inexact and sqrt(c2) < c (or sqrt(c2) >= c), | |
3964 | then change LT_EXPR into LE_EXPR or GE_EXPR into GT_EXPR. */ | |
3965 | if (!REAL_VALUE_ISINF (c2)) | |
3966 | { | |
3967 | tree c3 = fold_const_call (CFN_SQRT, TREE_TYPE (@0), | |
3968 | build_real (TREE_TYPE (@0), c2)); | |
3969 | if (c3 == NULL_TREE || TREE_CODE (c3) != REAL_CST) | |
3970 | ncmp = ERROR_MARK; | |
3971 | else if ((cmp == LT_EXPR || cmp == GE_EXPR) | |
3972 | && real_less (&TREE_REAL_CST (c3), &TREE_REAL_CST (@1))) | |
3973 | ncmp = cmp == LT_EXPR ? LE_EXPR : GT_EXPR; | |
3974 | else if ((cmp == LE_EXPR || cmp == GT_EXPR) | |
3975 | && real_less (&TREE_REAL_CST (@1), &TREE_REAL_CST (c3))) | |
3976 | ncmp = cmp == LE_EXPR ? LT_EXPR : GE_EXPR; | |
3977 | else | |
3978 | { | |
3979 | /* With rounding to even, sqrt of up to 3 different values | |
3980 | gives the same normal result, so in some cases c2 needs | |
3981 | to be adjusted. */ | |
3982 | REAL_VALUE_TYPE c2alt, tow; | |
3983 | if (cmp == LT_EXPR || cmp == GE_EXPR) | |
3984 | tow = dconst0; | |
3985 | else | |
3986 | real_inf (&tow); | |
3987 | real_nextafter (&c2alt, fmt, &c2, &tow); | |
3988 | real_convert (&c2alt, fmt, &c2alt); | |
3989 | if (REAL_VALUE_ISINF (c2alt)) | |
3990 | ncmp = ERROR_MARK; | |
3991 | else | |
3992 | { | |
3993 | c3 = fold_const_call (CFN_SQRT, TREE_TYPE (@0), | |
3994 | build_real (TREE_TYPE (@0), c2alt)); | |
3995 | if (c3 == NULL_TREE || TREE_CODE (c3) != REAL_CST) | |
3996 | ncmp = ERROR_MARK; | |
3997 | else if (real_equal (&TREE_REAL_CST (c3), | |
3998 | &TREE_REAL_CST (@1))) | |
3999 | c2 = c2alt; | |
4000 | } | |
4001 | } | |
4002 | } | |
4003 | } | |
4004 | (if (cmp == GT_EXPR || cmp == GE_EXPR) | |
4005 | (if (REAL_VALUE_ISINF (c2)) | |
4006 | /* sqrt(x) > y is x == +Inf, when y is very large. */ | |
4007 | (if (HONOR_INFINITIES (@0)) | |
4008 | (eq @0 { build_real (TREE_TYPE (@0), c2); }) | |
4009 | { constant_boolean_node (false, type); }) | |
4010 | /* sqrt(x) > c is the same as x > c*c. */ | |
4011 | (if (ncmp != ERROR_MARK) | |
4012 | (if (ncmp == GE_EXPR) | |
4013 | (ge @0 { build_real (TREE_TYPE (@0), c2); }) | |
4014 | (gt @0 { build_real (TREE_TYPE (@0), c2); })))) | |
4015 | /* else if (cmp == LT_EXPR || cmp == LE_EXPR) */ | |
4016 | (if (REAL_VALUE_ISINF (c2)) | |
4017 | (switch | |
4018 | /* sqrt(x) < y is always true, when y is a very large | |
4019 | value and we don't care about NaNs or Infinities. */ | |
4020 | (if (! HONOR_NANS (@0) && ! HONOR_INFINITIES (@0)) | |
4021 | { constant_boolean_node (true, type); }) | |
4022 | /* sqrt(x) < y is x != +Inf when y is very large and we | |
4023 | don't care about NaNs. */ | |
4024 | (if (! HONOR_NANS (@0)) | |
4025 | (ne @0 { build_real (TREE_TYPE (@0), c2); })) | |
4026 | /* sqrt(x) < y is x >= 0 when y is very large and we | |
4027 | don't care about Infinities. */ | |
4028 | (if (! HONOR_INFINITIES (@0)) | |
4029 | (ge @0 { build_real (TREE_TYPE (@0), dconst0); })) | |
4030 | /* sqrt(x) < y is x >= 0 && x != +Inf, when y is large. */ | |
4031 | (if (GENERIC) | |
4032 | (truth_andif | |
4033 | (ge @0 { build_real (TREE_TYPE (@0), dconst0); }) | |
4034 | (ne @0 { build_real (TREE_TYPE (@0), c2); })))) | |
4035 | /* sqrt(x) < c is the same as x < c*c, if we ignore NaNs. */ | |
4036 | (if (ncmp != ERROR_MARK && ! HONOR_NANS (@0)) | |
4037 | (if (ncmp == LT_EXPR) | |
4038 | (lt @0 { build_real (TREE_TYPE (@0), c2); }) | |
4039 | (le @0 { build_real (TREE_TYPE (@0), c2); })) | |
4040 | /* sqrt(x) < c is the same as x >= 0 && x < c*c. */ | |
4041 | (if (ncmp != ERROR_MARK && GENERIC) | |
4042 | (if (ncmp == LT_EXPR) | |
4043 | (truth_andif | |
4044 | (ge @0 { build_real (TREE_TYPE (@0), dconst0); }) | |
4045 | (lt @0 { build_real (TREE_TYPE (@0), c2); })) | |
4046 | (truth_andif | |
4047 | (ge @0 { build_real (TREE_TYPE (@0), dconst0); }) | |
4048 | (le @0 { build_real (TREE_TYPE (@0), c2); }))))))))))) | |
4049 | /* Transform sqrt(x) cmp sqrt(y) -> x cmp y. */ | |
4050 | (simplify | |
4051 | (cmp (sq @0) (sq @1)) | |
4052 | (if (! HONOR_NANS (@0)) | |
4053 | (cmp @0 @1)))))) | |
4054 | ||
4055 | /* Optimize various special cases of (FTYPE) N CMP (FTYPE) M. */ | |
4056 | (for cmp (lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt) | |
4057 | icmp (lt le eq ne ge gt unordered ordered lt le gt ge eq ne) | |
4058 | (simplify | |
4059 | (cmp (float@0 @1) (float @2)) | |
4060 | (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@0)) | |
4061 | && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0))) | |
4062 | (with | |
4063 | { | |
4064 | format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@0)))); | |
4065 | tree type1 = TREE_TYPE (@1); | |
4066 | bool type1_signed_p = TYPE_SIGN (type1) == SIGNED; | |
4067 | tree type2 = TREE_TYPE (@2); | |
4068 | bool type2_signed_p = TYPE_SIGN (type2) == SIGNED; | |
4069 | } | |
4070 | (if (fmt.can_represent_integral_type_p (type1) | |
4071 | && fmt.can_represent_integral_type_p (type2)) | |
4072 | (if (cmp == ORDERED_EXPR || cmp == UNORDERED_EXPR) | |
4073 | { constant_boolean_node (cmp == ORDERED_EXPR, type); } | |
4074 | (if (TYPE_PRECISION (type1) > TYPE_PRECISION (type2) | |
4075 | && type1_signed_p >= type2_signed_p) | |
4076 | (icmp @1 (convert @2)) | |
4077 | (if (TYPE_PRECISION (type1) < TYPE_PRECISION (type2) | |
4078 | && type1_signed_p <= type2_signed_p) | |
4079 | (icmp (convert:type2 @1) @2) | |
4080 | (if (TYPE_PRECISION (type1) == TYPE_PRECISION (type2) | |
4081 | && type1_signed_p == type2_signed_p) | |
4082 | (icmp @1 @2)))))))))) | |
4083 | ||
4084 | /* Optimize various special cases of (FTYPE) N CMP CST. */ | |
4085 | (for cmp (lt le eq ne ge gt) | |
4086 | icmp (le le eq ne ge ge) | |
4087 | (simplify | |
4088 | (cmp (float @0) REAL_CST@1) | |
4089 | (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@1)) | |
4090 | && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))) | |
4091 | (with | |
4092 | { | |
4093 | tree itype = TREE_TYPE (@0); | |
4094 | format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@1)))); | |
4095 | const REAL_VALUE_TYPE *cst = TREE_REAL_CST_PTR (@1); | |
4096 | /* Be careful to preserve any potential exceptions due to | |
4097 | NaNs. qNaNs are ok in == or != context. | |
4098 | TODO: relax under -fno-trapping-math or | |
4099 | -fno-signaling-nans. */ | |
4100 | bool exception_p | |
4101 | = real_isnan (cst) && (cst->signalling | |
4102 | || (cmp != EQ_EXPR && cmp != NE_EXPR)); | |
4103 | } | |
4104 | /* TODO: allow non-fitting itype and SNaNs when | |
4105 | -fno-trapping-math. */ | |
4106 | (if (fmt.can_represent_integral_type_p (itype) && ! exception_p) | |
4107 | (with | |
4108 | { | |
4109 | signop isign = TYPE_SIGN (itype); | |
4110 | REAL_VALUE_TYPE imin, imax; | |
4111 | real_from_integer (&imin, fmt, wi::min_value (itype), isign); | |
4112 | real_from_integer (&imax, fmt, wi::max_value (itype), isign); | |
4113 | ||
4114 | REAL_VALUE_TYPE icst; | |
4115 | if (cmp == GT_EXPR || cmp == GE_EXPR) | |
4116 | real_ceil (&icst, fmt, cst); | |
4117 | else if (cmp == LT_EXPR || cmp == LE_EXPR) | |
4118 | real_floor (&icst, fmt, cst); | |
4119 | else | |
4120 | real_trunc (&icst, fmt, cst); | |
4121 | ||
4122 | bool cst_int_p = !real_isnan (cst) && real_identical (&icst, cst); | |
4123 | ||
4124 | bool overflow_p = false; | |
4125 | wide_int icst_val | |
4126 | = real_to_integer (&icst, &overflow_p, TYPE_PRECISION (itype)); | |
4127 | } | |
4128 | (switch | |
4129 | /* Optimize cases when CST is outside of ITYPE's range. */ | |
4130 | (if (real_compare (LT_EXPR, cst, &imin)) | |
4131 | { constant_boolean_node (cmp == GT_EXPR || cmp == GE_EXPR || cmp == NE_EXPR, | |
4132 | type); }) | |
4133 | (if (real_compare (GT_EXPR, cst, &imax)) | |
4134 | { constant_boolean_node (cmp == LT_EXPR || cmp == LE_EXPR || cmp == NE_EXPR, | |
4135 | type); }) | |
4136 | /* Remove cast if CST is an integer representable by ITYPE. */ | |
4137 | (if (cst_int_p) | |
4138 | (cmp @0 { gcc_assert (!overflow_p); | |
4139 | wide_int_to_tree (itype, icst_val); }) | |
4140 | ) | |
4141 | /* When CST is fractional, optimize | |
4142 | (FTYPE) N == CST -> 0 | |
4143 | (FTYPE) N != CST -> 1. */ | |
4144 | (if (cmp == EQ_EXPR || cmp == NE_EXPR) | |
4145 | { constant_boolean_node (cmp == NE_EXPR, type); }) | |
4146 | /* Otherwise replace with sensible integer constant. */ | |
4147 | (with | |
4148 | { | |
4149 | gcc_checking_assert (!overflow_p); | |
4150 | } | |
4151 | (icmp @0 { wide_int_to_tree (itype, icst_val); }))))))))) | |
4152 | ||
4153 | /* Fold A /[ex] B CMP C to A CMP B * C. */ | |
4154 | (for cmp (eq ne) | |
4155 | (simplify | |
4156 | (cmp (exact_div @0 @1) INTEGER_CST@2) | |
4157 | (if (!integer_zerop (@1)) | |
4158 | (if (wi::to_wide (@2) == 0) | |
4159 | (cmp @0 @2) | |
4160 | (if (TREE_CODE (@1) == INTEGER_CST) | |
4161 | (with | |
4162 | { | |
4163 | wi::overflow_type ovf; | |
4164 | wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1), | |
4165 | TYPE_SIGN (TREE_TYPE (@1)), &ovf); | |
4166 | } | |
4167 | (if (ovf) | |
4168 | { constant_boolean_node (cmp == NE_EXPR, type); } | |
4169 | (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); })))))))) | |
4170 | (for cmp (lt le gt ge) | |
4171 | (simplify | |
4172 | (cmp (exact_div @0 INTEGER_CST@1) INTEGER_CST@2) | |
4173 | (if (wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1)))) | |
4174 | (with | |
4175 | { | |
4176 | wi::overflow_type ovf; | |
4177 | wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1), | |
4178 | TYPE_SIGN (TREE_TYPE (@1)), &ovf); | |
4179 | } | |
4180 | (if (ovf) | |
4181 | { constant_boolean_node (wi::lt_p (wi::to_wide (@2), 0, | |
4182 | TYPE_SIGN (TREE_TYPE (@2))) | |
4183 | != (cmp == LT_EXPR || cmp == LE_EXPR), type); } | |
4184 | (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); })))))) | |
4185 | ||
4186 | /* Fold (size_t)(A /[ex] B) CMP C to (size_t)A CMP (size_t)B * C or A CMP' 0. | |
4187 | ||
4188 | For small C (less than max/B), this is (size_t)A CMP (size_t)B * C. | |
4189 | For large C (more than min/B+2^size), this is also true, with the | |
4190 | multiplication computed modulo 2^size. | |
4191 | For intermediate C, this just tests the sign of A. */ | |
4192 | (for cmp (lt le gt ge) | |
4193 | cmp2 (ge ge lt lt) | |
4194 | (simplify | |
4195 | (cmp (convert (exact_div @0 INTEGER_CST@1)) INTEGER_CST@2) | |
4196 | (if (tree_nop_conversion_p (TREE_TYPE (@0), TREE_TYPE (@2)) | |
4197 | && TYPE_UNSIGNED (TREE_TYPE (@2)) && !TYPE_UNSIGNED (TREE_TYPE (@0)) | |
4198 | && wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1)))) | |
4199 | (with | |
4200 | { | |
4201 | tree utype = TREE_TYPE (@2); | |
4202 | wide_int denom = wi::to_wide (@1); | |
4203 | wide_int right = wi::to_wide (@2); | |
4204 | wide_int smax = wi::sdiv_trunc (wi::max_value (TREE_TYPE (@0)), denom); | |
4205 | wide_int smin = wi::sdiv_trunc (wi::min_value (TREE_TYPE (@0)), denom); | |
4206 | bool small = wi::leu_p (right, smax); | |
4207 | bool large = wi::geu_p (right, smin); | |
4208 | } | |
4209 | (if (small || large) | |
4210 | (cmp (convert:utype @0) (mult @2 (convert @1))) | |
4211 | (cmp2 @0 { build_zero_cst (TREE_TYPE (@0)); })))))) | |
4212 | ||
4213 | /* Unordered tests if either argument is a NaN. */ | |
4214 | (simplify | |
4215 | (bit_ior (unordered @0 @0) (unordered @1 @1)) | |
4216 | (if (types_match (@0, @1)) | |
4217 | (unordered @0 @1))) | |
4218 | (simplify | |
4219 | (bit_and (ordered @0 @0) (ordered @1 @1)) | |
4220 | (if (types_match (@0, @1)) | |
4221 | (ordered @0 @1))) | |
4222 | (simplify | |
4223 | (bit_ior:c (unordered @0 @0) (unordered:c@2 @0 @1)) | |
4224 | @2) | |
4225 | (simplify | |
4226 | (bit_and:c (ordered @0 @0) (ordered:c@2 @0 @1)) | |
4227 | @2) | |
4228 | ||
4229 | /* Simple range test simplifications. */ | |
4230 | /* A < B || A >= B -> true. */ | |
4231 | (for test1 (lt le le le ne ge) | |
4232 | test2 (ge gt ge ne eq ne) | |
4233 | (simplify | |
4234 | (bit_ior:c (test1 @0 @1) (test2 @0 @1)) | |
4235 | (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
4236 | || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0))) | |
4237 | { constant_boolean_node (true, type); }))) | |
4238 | /* A < B && A >= B -> false. */ | |
4239 | (for test1 (lt lt lt le ne eq) | |
4240 | test2 (ge gt eq gt eq gt) | |
4241 | (simplify | |
4242 | (bit_and:c (test1 @0 @1) (test2 @0 @1)) | |
4243 | (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
4244 | || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0))) | |
4245 | { constant_boolean_node (false, type); }))) | |
4246 | ||
4247 | /* A & (2**N - 1) <= 2**K - 1 -> A & (2**N - 2**K) == 0 | |
4248 | A & (2**N - 1) > 2**K - 1 -> A & (2**N - 2**K) != 0 | |
4249 | ||
4250 | Note that comparisons | |
4251 | A & (2**N - 1) < 2**K -> A & (2**N - 2**K) == 0 | |
4252 | A & (2**N - 1) >= 2**K -> A & (2**N - 2**K) != 0 | |
4253 | will be canonicalized to above so there's no need to | |
4254 | consider them here. | |
4255 | */ | |
4256 | ||
4257 | (for cmp (le gt) | |
4258 | eqcmp (eq ne) | |
4259 | (simplify | |
4260 | (cmp (bit_and@0 @1 INTEGER_CST@2) INTEGER_CST@3) | |
4261 | (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))) | |
4262 | (with | |
4263 | { | |
4264 | tree ty = TREE_TYPE (@0); | |
4265 | unsigned prec = TYPE_PRECISION (ty); | |
4266 | wide_int mask = wi::to_wide (@2, prec); | |
4267 | wide_int rhs = wi::to_wide (@3, prec); | |
4268 | signop sgn = TYPE_SIGN (ty); | |
4269 | } | |
4270 | (if ((mask & (mask + 1)) == 0 && wi::gt_p (rhs, 0, sgn) | |
4271 | && (rhs & (rhs + 1)) == 0 && wi::ge_p (mask, rhs, sgn)) | |
4272 | (eqcmp (bit_and @1 { wide_int_to_tree (ty, mask - rhs); }) | |
4273 | { build_zero_cst (ty); })))))) | |
4274 | ||
4275 | /* -A CMP -B -> B CMP A. */ | |
4276 | (for cmp (tcc_comparison) | |
4277 | scmp (swapped_tcc_comparison) | |
4278 | (simplify | |
4279 | (cmp (negate @0) (negate @1)) | |
4280 | (if (FLOAT_TYPE_P (TREE_TYPE (@0)) | |
4281 | || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
4282 | && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))) | |
4283 | (scmp @0 @1))) | |
4284 | (simplify | |
4285 | (cmp (negate @0) CONSTANT_CLASS_P@1) | |
4286 | (if (FLOAT_TYPE_P (TREE_TYPE (@0)) | |
4287 | || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
4288 | && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))) | |
4289 | (with { tree tem = const_unop (NEGATE_EXPR, TREE_TYPE (@0), @1); } | |
4290 | (if (tem && !TREE_OVERFLOW (tem)) | |
4291 | (scmp @0 { tem; })))))) | |
4292 | ||
4293 | /* Convert ABS_EXPR<x> == 0 or ABS_EXPR<x> != 0 to x == 0 or x != 0. */ | |
4294 | (for op (eq ne) | |
4295 | (simplify | |
4296 | (op (abs @0) zerop@1) | |
4297 | (op @0 @1))) | |
4298 | ||
4299 | /* From fold_sign_changed_comparison and fold_widened_comparison. | |
4300 | FIXME: the lack of symmetry is disturbing. */ | |
4301 | (for cmp (simple_comparison) | |
4302 | (simplify | |
4303 | (cmp (convert@0 @00) (convert?@1 @10)) | |
4304 | (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
4305 | /* Disable this optimization if we're casting a function pointer | |
4306 | type on targets that require function pointer canonicalization. */ | |
4307 | && !(targetm.have_canonicalize_funcptr_for_compare () | |
4308 | && ((POINTER_TYPE_P (TREE_TYPE (@00)) | |
4309 | && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@00)))) | |
4310 | || (POINTER_TYPE_P (TREE_TYPE (@10)) | |
4311 | && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@10)))))) | |
4312 | && single_use (@0)) | |
4313 | (if (TYPE_PRECISION (TREE_TYPE (@00)) == TYPE_PRECISION (TREE_TYPE (@0)) | |
4314 | && (TREE_CODE (@10) == INTEGER_CST | |
4315 | || @1 != @10) | |
4316 | && (TYPE_UNSIGNED (TREE_TYPE (@00)) == TYPE_UNSIGNED (TREE_TYPE (@0)) | |
4317 | || cmp == NE_EXPR | |
4318 | || cmp == EQ_EXPR) | |
4319 | && !POINTER_TYPE_P (TREE_TYPE (@00))) | |
4320 | /* ??? The special-casing of INTEGER_CST conversion was in the original | |
4321 | code and here to avoid a spurious overflow flag on the resulting | |
4322 | constant which fold_convert produces. */ | |
4323 | (if (TREE_CODE (@1) == INTEGER_CST) | |
4324 | (cmp @00 { force_fit_type (TREE_TYPE (@00), wi::to_widest (@1), 0, | |
4325 | TREE_OVERFLOW (@1)); }) | |
4326 | (cmp @00 (convert @1))) | |
4327 | ||
4328 | (if (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@00))) | |
4329 | /* If possible, express the comparison in the shorter mode. */ | |
4330 | (if ((cmp == EQ_EXPR || cmp == NE_EXPR | |
4331 | || TYPE_UNSIGNED (TREE_TYPE (@0)) == TYPE_UNSIGNED (TREE_TYPE (@00)) | |
4332 | || (!TYPE_UNSIGNED (TREE_TYPE (@0)) | |
4333 | && TYPE_UNSIGNED (TREE_TYPE (@00)))) | |
4334 | && (types_match (TREE_TYPE (@10), TREE_TYPE (@00)) | |
4335 | || ((TYPE_PRECISION (TREE_TYPE (@00)) | |
4336 | >= TYPE_PRECISION (TREE_TYPE (@10))) | |
4337 | && (TYPE_UNSIGNED (TREE_TYPE (@00)) | |
4338 | == TYPE_UNSIGNED (TREE_TYPE (@10)))) | |
4339 | || (TREE_CODE (@10) == INTEGER_CST | |
4340 | && INTEGRAL_TYPE_P (TREE_TYPE (@00)) | |
4341 | && int_fits_type_p (@10, TREE_TYPE (@00))))) | |
4342 | (cmp @00 (convert @10)) | |
4343 | (if (TREE_CODE (@10) == INTEGER_CST | |
4344 | && INTEGRAL_TYPE_P (TREE_TYPE (@00)) | |
4345 | && !int_fits_type_p (@10, TREE_TYPE (@00))) | |
4346 | (with | |
4347 | { | |
4348 | tree min = lower_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00)); | |
4349 | tree max = upper_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00)); | |
4350 | bool above = integer_nonzerop (const_binop (LT_EXPR, type, max, @10)); | |
4351 | bool below = integer_nonzerop (const_binop (LT_EXPR, type, @10, min)); | |
4352 | } | |
4353 | (if (above || below) | |
4354 | (if (cmp == EQ_EXPR || cmp == NE_EXPR) | |
4355 | { constant_boolean_node (cmp == EQ_EXPR ? false : true, type); } | |
4356 | (if (cmp == LT_EXPR || cmp == LE_EXPR) | |
4357 | { constant_boolean_node (above ? true : false, type); } | |
4358 | (if (cmp == GT_EXPR || cmp == GE_EXPR) | |
4359 | { constant_boolean_node (above ? false : true, type); })))))))))))) | |
4360 | ||
4361 | (for cmp (eq ne) | |
4362 | (simplify | |
4363 | /* SSA names are canonicalized to 2nd place. */ | |
4364 | (cmp addr@0 SSA_NAME@1) | |
4365 | (with | |
4366 | { poly_int64 off; tree base; } | |
4367 | /* A local variable can never be pointed to by | |
4368 | the default SSA name of an incoming parameter. */ | |
4369 | (if (SSA_NAME_IS_DEFAULT_DEF (@1) | |
4370 | && TREE_CODE (SSA_NAME_VAR (@1)) == PARM_DECL | |
4371 | && (base = get_base_address (TREE_OPERAND (@0, 0))) | |
4372 | && TREE_CODE (base) == VAR_DECL | |
4373 | && auto_var_in_fn_p (base, current_function_decl)) | |
4374 | (if (cmp == NE_EXPR) | |
4375 | { constant_boolean_node (true, type); } | |
4376 | { constant_boolean_node (false, type); }) | |
4377 | /* If the address is based on @1 decide using the offset. */ | |
4378 | (if ((base = get_addr_base_and_unit_offset (TREE_OPERAND (@0, 0), &off)) | |
4379 | && TREE_CODE (base) == MEM_REF | |
4380 | && TREE_OPERAND (base, 0) == @1) | |
4381 | (with { off += mem_ref_offset (base).force_shwi (); } | |
4382 | (if (known_ne (off, 0)) | |
4383 | { constant_boolean_node (cmp == NE_EXPR, type); } | |
4384 | (if (known_eq (off, 0)) | |
4385 | { constant_boolean_node (cmp == EQ_EXPR, type); })))))))) | |
4386 | ||
4387 | /* Equality compare simplifications from fold_binary */ | |
4388 | (for cmp (eq ne) | |
4389 | ||
4390 | /* If we have (A | C) == D where C & ~D != 0, convert this into 0. | |
4391 | Similarly for NE_EXPR. */ | |
4392 | (simplify | |
4393 | (cmp (convert?@3 (bit_ior @0 INTEGER_CST@1)) INTEGER_CST@2) | |
4394 | (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0)) | |
4395 | && wi::bit_and_not (wi::to_wide (@1), wi::to_wide (@2)) != 0) | |
4396 | { constant_boolean_node (cmp == NE_EXPR, type); })) | |
4397 | ||
4398 | /* (X ^ Y) == 0 becomes X == Y, and (X ^ Y) != 0 becomes X != Y. */ | |
4399 | (simplify | |
4400 | (cmp (bit_xor @0 @1) integer_zerop) | |
4401 | (cmp @0 @1)) | |
4402 | ||
4403 | /* (X ^ Y) == Y becomes X == 0. | |
4404 | Likewise (X ^ Y) == X becomes Y == 0. */ | |
4405 | (simplify | |
4406 | (cmp:c (bit_xor:c @0 @1) @0) | |
4407 | (cmp @1 { build_zero_cst (TREE_TYPE (@1)); })) | |
4408 | ||
4409 | /* (X ^ C1) op C2 can be rewritten as X op (C1 ^ C2). */ | |
4410 | (simplify | |
4411 | (cmp (convert?@3 (bit_xor @0 INTEGER_CST@1)) INTEGER_CST@2) | |
4412 | (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))) | |
4413 | (cmp @0 (bit_xor @1 (convert @2))))) | |
4414 | ||
4415 | (simplify | |
4416 | (cmp (convert? addr@0) integer_zerop) | |
4417 | (if (tree_single_nonzero_warnv_p (@0, NULL)) | |
4418 | { constant_boolean_node (cmp == NE_EXPR, type); })) | |
4419 | ||
4420 | /* (X & C) op (Y & C) into (X ^ Y) & C op 0. */ | |
4421 | (simplify | |
4422 | (cmp (bit_and:cs @0 @2) (bit_and:cs @1 @2)) | |
4423 | (cmp (bit_and (bit_xor @0 @1) @2) { build_zero_cst (TREE_TYPE (@2)); }))) | |
4424 | ||
4425 | /* (X < 0) != (Y < 0) into (X ^ Y) < 0. | |
4426 | (X >= 0) != (Y >= 0) into (X ^ Y) < 0. | |
4427 | (X < 0) == (Y < 0) into (X ^ Y) >= 0. | |
4428 | (X >= 0) == (Y >= 0) into (X ^ Y) >= 0. */ | |
4429 | (for cmp (eq ne) | |
4430 | ncmp (ge lt) | |
4431 | (for sgncmp (ge lt) | |
4432 | (simplify | |
4433 | (cmp (sgncmp @0 integer_zerop@2) (sgncmp @1 integer_zerop)) | |
4434 | (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
4435 | && !TYPE_UNSIGNED (TREE_TYPE (@0)) | |
4436 | && types_match (@0, @1)) | |
4437 | (ncmp (bit_xor @0 @1) @2))))) | |
4438 | /* (X < 0) == (Y >= 0) into (X ^ Y) < 0. | |
4439 | (X < 0) != (Y >= 0) into (X ^ Y) >= 0. */ | |
4440 | (for cmp (eq ne) | |
4441 | ncmp (lt ge) | |
4442 | (simplify | |
4443 | (cmp:c (lt @0 integer_zerop@2) (ge @1 integer_zerop)) | |
4444 | (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
4445 | && !TYPE_UNSIGNED (TREE_TYPE (@0)) | |
4446 | && types_match (@0, @1)) | |
4447 | (ncmp (bit_xor @0 @1) @2)))) | |
4448 | ||
4449 | /* If we have (A & C) == C where C is a power of 2, convert this into | |
4450 | (A & C) != 0. Similarly for NE_EXPR. */ | |
4451 | (for cmp (eq ne) | |
4452 | icmp (ne eq) | |
4453 | (simplify | |
4454 | (cmp (bit_and@2 @0 integer_pow2p@1) @1) | |
4455 | (icmp @2 { build_zero_cst (TREE_TYPE (@0)); }))) | |
4456 | ||
4457 | /* If we have (A & C) != 0 ? D : 0 where C and D are powers of 2, | |
4458 | convert this into a shift followed by ANDing with D. */ | |
4459 | (simplify | |
4460 | (cond | |
4461 | (ne (bit_and @0 integer_pow2p@1) integer_zerop) | |
4462 | INTEGER_CST@2 integer_zerop) | |
4463 | (if (integer_pow2p (@2)) | |
4464 | (with { | |
4465 | int shift = (wi::exact_log2 (wi::to_wide (@2)) | |
4466 | - wi::exact_log2 (wi::to_wide (@1))); | |
4467 | } | |
4468 | (if (shift > 0) | |
4469 | (bit_and | |
4470 | (lshift (convert @0) { build_int_cst (integer_type_node, shift); }) @2) | |
4471 | (bit_and | |
4472 | (convert (rshift @0 { build_int_cst (integer_type_node, -shift); })) | |
4473 | @2))))) | |
4474 | ||
4475 | /* If we have (A & C) != 0 where C is the sign bit of A, convert | |
4476 | this into A < 0. Similarly for (A & C) == 0 into A >= 0. */ | |
4477 | (for cmp (eq ne) | |
4478 | ncmp (ge lt) | |
4479 | (simplify | |
4480 | (cmp (bit_and (convert?@2 @0) integer_pow2p@1) integer_zerop) | |
4481 | (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
4482 | && type_has_mode_precision_p (TREE_TYPE (@0)) | |
4483 | && element_precision (@2) >= element_precision (@0) | |
4484 | && wi::only_sign_bit_p (wi::to_wide (@1), element_precision (@0))) | |
4485 | (with { tree stype = signed_type_for (TREE_TYPE (@0)); } | |
4486 | (ncmp (convert:stype @0) { build_zero_cst (stype); }))))) | |
4487 | ||
4488 | /* If we have A < 0 ? C : 0 where C is a power of 2, convert | |
4489 | this into a right shift or sign extension followed by ANDing with C. */ | |
4490 | (simplify | |
4491 | (cond | |
4492 | (lt @0 integer_zerop) | |
4493 | INTEGER_CST@1 integer_zerop) | |
4494 | (if (integer_pow2p (@1) | |
4495 | && !TYPE_UNSIGNED (TREE_TYPE (@0))) | |
4496 | (with { | |
4497 | int shift = element_precision (@0) - wi::exact_log2 (wi::to_wide (@1)) - 1; | |
4498 | } | |
4499 | (if (shift >= 0) | |
4500 | (bit_and | |
4501 | (convert (rshift @0 { build_int_cst (integer_type_node, shift); })) | |
4502 | @1) | |
4503 | /* Otherwise ctype must be wider than TREE_TYPE (@0) and pure | |
4504 | sign extension followed by AND with C will achieve the effect. */ | |
4505 | (bit_and (convert @0) @1))))) | |
4506 | ||
4507 | /* When the addresses are not directly of decls compare base and offset. | |
4508 | This implements some remaining parts of fold_comparison address | |
4509 | comparisons but still no complete part of it. Still it is good | |
4510 | enough to make fold_stmt not regress when not dispatching to fold_binary. */ | |
4511 | (for cmp (simple_comparison) | |
4512 | (simplify | |
4513 | (cmp (convert1?@2 addr@0) (convert2? addr@1)) | |
4514 | (with | |
4515 | { | |
4516 | poly_int64 off0, off1; | |
4517 | tree base0 = get_addr_base_and_unit_offset (TREE_OPERAND (@0, 0), &off0); | |
4518 | tree base1 = get_addr_base_and_unit_offset (TREE_OPERAND (@1, 0), &off1); | |
4519 | if (base0 && TREE_CODE (base0) == MEM_REF) | |
4520 | { | |
4521 | off0 += mem_ref_offset (base0).force_shwi (); | |
4522 | base0 = TREE_OPERAND (base0, 0); | |
4523 | } | |
4524 | if (base1 && TREE_CODE (base1) == MEM_REF) | |
4525 | { | |
4526 | off1 += mem_ref_offset (base1).force_shwi (); | |
4527 | base1 = TREE_OPERAND (base1, 0); | |
4528 | } | |
4529 | } | |
4530 | (if (base0 && base1) | |
4531 | (with | |
4532 | { | |
4533 | int equal = 2; | |
4534 | /* Punt in GENERIC on variables with value expressions; | |
4535 | the value expressions might point to fields/elements | |
4536 | of other vars etc. */ | |
4537 | if (GENERIC | |
4538 | && ((VAR_P (base0) && DECL_HAS_VALUE_EXPR_P (base0)) | |
4539 | || (VAR_P (base1) && DECL_HAS_VALUE_EXPR_P (base1)))) | |
4540 | ; | |
4541 | else if (decl_in_symtab_p (base0) | |
4542 | && decl_in_symtab_p (base1)) | |
4543 | equal = symtab_node::get_create (base0) | |
4544 | ->equal_address_to (symtab_node::get_create (base1)); | |
4545 | else if ((DECL_P (base0) | |
4546 | || TREE_CODE (base0) == SSA_NAME | |
4547 | || TREE_CODE (base0) == STRING_CST) | |
4548 | && (DECL_P (base1) | |
4549 | || TREE_CODE (base1) == SSA_NAME | |
4550 | || TREE_CODE (base1) == STRING_CST)) | |
4551 | equal = (base0 == base1); | |
4552 | if (equal == 0) | |
4553 | { | |
4554 | HOST_WIDE_INT ioff0 = -1, ioff1 = -1; | |
4555 | off0.is_constant (&ioff0); | |
4556 | off1.is_constant (&ioff1); | |
4557 | if ((DECL_P (base0) && TREE_CODE (base1) == STRING_CST) | |
4558 | || (TREE_CODE (base0) == STRING_CST && DECL_P (base1)) | |
4559 | || (TREE_CODE (base0) == STRING_CST | |
4560 | && TREE_CODE (base1) == STRING_CST | |
4561 | && ioff0 >= 0 && ioff1 >= 0 | |
4562 | && ioff0 < TREE_STRING_LENGTH (base0) | |
4563 | && ioff1 < TREE_STRING_LENGTH (base1) | |
4564 | /* This is a too conservative test that the STRING_CSTs | |
4565 | will not end up being string-merged. */ | |
4566 | && strncmp (TREE_STRING_POINTER (base0) + ioff0, | |
4567 | TREE_STRING_POINTER (base1) + ioff1, | |
4568 | MIN (TREE_STRING_LENGTH (base0) - ioff0, | |
4569 | TREE_STRING_LENGTH (base1) - ioff1)) != 0)) | |
4570 | ; | |
4571 | else if (!DECL_P (base0) || !DECL_P (base1)) | |
4572 | equal = 2; | |
4573 | else if (cmp != EQ_EXPR && cmp != NE_EXPR) | |
4574 | equal = 2; | |
4575 | /* If this is a pointer comparison, ignore for now even | |
4576 | valid equalities where one pointer is the offset zero | |
4577 | of one object and the other to one past end of another one. */ | |
4578 | else if (!INTEGRAL_TYPE_P (TREE_TYPE (@2))) | |
4579 | ; | |
4580 | /* Assume that automatic variables can't be adjacent to global | |
4581 | variables. */ | |
4582 | else if (is_global_var (base0) != is_global_var (base1)) | |
4583 | ; | |
4584 | else | |
4585 | { | |
4586 | tree sz0 = DECL_SIZE_UNIT (base0); | |
4587 | tree sz1 = DECL_SIZE_UNIT (base1); | |
4588 | /* If sizes are unknown, e.g. VLA or not representable, | |
4589 | punt. */ | |
4590 | if (!tree_fits_poly_int64_p (sz0) | |
4591 | || !tree_fits_poly_int64_p (sz1)) | |
4592 | equal = 2; | |
4593 | else | |
4594 | { | |
4595 | poly_int64 size0 = tree_to_poly_int64 (sz0); | |
4596 | poly_int64 size1 = tree_to_poly_int64 (sz1); | |
4597 | /* If one offset is pointing (or could be) to the beginning | |
4598 | of one object and the other is pointing to one past the | |
4599 | last byte of the other object, punt. */ | |
4600 | if (maybe_eq (off0, 0) && maybe_eq (off1, size1)) | |
4601 | equal = 2; | |
4602 | else if (maybe_eq (off1, 0) && maybe_eq (off0, size0)) | |
4603 | equal = 2; | |
4604 | /* If both offsets are the same, there are some cases | |
4605 | we know that are ok. Either if we know they aren't | |
4606 | zero, or if we know both sizes are no zero. */ | |
4607 | if (equal == 2 | |
4608 | && known_eq (off0, off1) | |
4609 | && (known_ne (off0, 0) | |
4610 | || (known_ne (size0, 0) && known_ne (size1, 0)))) | |
4611 | equal = 0; | |
4612 | } | |
4613 | } | |
4614 | } | |
4615 | } | |
4616 | (if (equal == 1 | |
4617 | && (cmp == EQ_EXPR || cmp == NE_EXPR | |
4618 | /* If the offsets are equal we can ignore overflow. */ | |
4619 | || known_eq (off0, off1) | |
4620 | || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) | |
4621 | /* Or if we compare using pointers to decls or strings. */ | |
4622 | || (POINTER_TYPE_P (TREE_TYPE (@2)) | |
4623 | && (DECL_P (base0) || TREE_CODE (base0) == STRING_CST)))) | |
4624 | (switch | |
4625 | (if (cmp == EQ_EXPR && (known_eq (off0, off1) || known_ne (off0, off1))) | |
4626 | { constant_boolean_node (known_eq (off0, off1), type); }) | |
4627 | (if (cmp == NE_EXPR && (known_eq (off0, off1) || known_ne (off0, off1))) | |
4628 | { constant_boolean_node (known_ne (off0, off1), type); }) | |
4629 | (if (cmp == LT_EXPR && (known_lt (off0, off1) || known_ge (off0, off1))) | |
4630 | { constant_boolean_node (known_lt (off0, off1), type); }) | |
4631 | (if (cmp == LE_EXPR && (known_le (off0, off1) || known_gt (off0, off1))) | |
4632 | { constant_boolean_node (known_le (off0, off1), type); }) | |
4633 | (if (cmp == GE_EXPR && (known_ge (off0, off1) || known_lt (off0, off1))) | |
4634 | { constant_boolean_node (known_ge (off0, off1), type); }) | |
4635 | (if (cmp == GT_EXPR && (known_gt (off0, off1) || known_le (off0, off1))) | |
4636 | { constant_boolean_node (known_gt (off0, off1), type); })) | |
4637 | (if (equal == 0) | |
4638 | (switch | |
4639 | (if (cmp == EQ_EXPR) | |
4640 | { constant_boolean_node (false, type); }) | |
4641 | (if (cmp == NE_EXPR) | |
4642 | { constant_boolean_node (true, type); }))))))))) | |
4643 | ||
4644 | /* Simplify pointer equality compares using PTA. */ | |
4645 | (for neeq (ne eq) | |
4646 | (simplify | |
4647 | (neeq @0 @1) | |
4648 | (if (POINTER_TYPE_P (TREE_TYPE (@0)) | |
4649 | && ptrs_compare_unequal (@0, @1)) | |
4650 | { constant_boolean_node (neeq != EQ_EXPR, type); }))) | |
4651 | ||
4652 | /* PR70920: Transform (intptr_t)x eq/ne CST to x eq/ne (typeof x) CST. | |
4653 | and (typeof ptr_cst) x eq/ne ptr_cst to x eq/ne (typeof x) CST. | |
4654 | Disable the transform if either operand is pointer to function. | |
4655 | This broke pr22051-2.c for arm where function pointer | |
4656 | canonicalizaion is not wanted. */ | |
4657 | ||
4658 | (for cmp (ne eq) | |
4659 | (simplify | |
4660 | (cmp (convert @0) INTEGER_CST@1) | |
4661 | (if (((POINTER_TYPE_P (TREE_TYPE (@0)) | |
4662 | && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@0))) | |
4663 | && INTEGRAL_TYPE_P (TREE_TYPE (@1))) | |
4664 | || (INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
4665 | && POINTER_TYPE_P (TREE_TYPE (@1)) | |
4666 | && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@1))))) | |
4667 | && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))) | |
4668 | (cmp @0 (convert @1))))) | |
4669 | ||
4670 | /* Non-equality compare simplifications from fold_binary */ | |
4671 | (for cmp (lt gt le ge) | |
4672 | /* Comparisons with the highest or lowest possible integer of | |
4673 | the specified precision will have known values. */ | |
4674 | (simplify | |
4675 | (cmp (convert?@2 @0) uniform_integer_cst_p@1) | |
4676 | (if ((INTEGRAL_TYPE_P (TREE_TYPE (@1)) | |
4677 | || POINTER_TYPE_P (TREE_TYPE (@1)) | |
4678 | || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@1))) | |
4679 | && tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0))) | |
4680 | (with | |
4681 | { | |
4682 | tree cst = uniform_integer_cst_p (@1); | |
4683 | tree arg1_type = TREE_TYPE (cst); | |
4684 | unsigned int prec = TYPE_PRECISION (arg1_type); | |
4685 | wide_int max = wi::max_value (arg1_type); | |
4686 | wide_int signed_max = wi::max_value (prec, SIGNED); | |
4687 | wide_int min = wi::min_value (arg1_type); | |
4688 | } | |
4689 | (switch | |
4690 | (if (wi::to_wide (cst) == max) | |
4691 | (switch | |
4692 | (if (cmp == GT_EXPR) | |
4693 | { constant_boolean_node (false, type); }) | |
4694 | (if (cmp == GE_EXPR) | |
4695 | (eq @2 @1)) | |
4696 | (if (cmp == LE_EXPR) | |
4697 | { constant_boolean_node (true, type); }) | |
4698 | (if (cmp == LT_EXPR) | |
4699 | (ne @2 @1)))) | |
4700 | (if (wi::to_wide (cst) == min) | |
4701 | (switch | |
4702 | (if (cmp == LT_EXPR) | |
4703 | { constant_boolean_node (false, type); }) | |
4704 | (if (cmp == LE_EXPR) | |
4705 | (eq @2 @1)) | |
4706 | (if (cmp == GE_EXPR) | |
4707 | { constant_boolean_node (true, type); }) | |
4708 | (if (cmp == GT_EXPR) | |
4709 | (ne @2 @1)))) | |
4710 | (if (wi::to_wide (cst) == max - 1) | |
4711 | (switch | |
4712 | (if (cmp == GT_EXPR) | |
4713 | (eq @2 { build_uniform_cst (TREE_TYPE (@1), | |
4714 | wide_int_to_tree (TREE_TYPE (cst), | |
4715 | wi::to_wide (cst) | |
4716 | + 1)); })) | |
4717 | (if (cmp == LE_EXPR) | |
4718 | (ne @2 { build_uniform_cst (TREE_TYPE (@1), | |
4719 | wide_int_to_tree (TREE_TYPE (cst), | |
4720 | wi::to_wide (cst) | |
4721 | + 1)); })))) | |
4722 | (if (wi::to_wide (cst) == min + 1) | |
4723 | (switch | |
4724 | (if (cmp == GE_EXPR) | |
4725 | (ne @2 { build_uniform_cst (TREE_TYPE (@1), | |
4726 | wide_int_to_tree (TREE_TYPE (cst), | |
4727 | wi::to_wide (cst) | |
4728 | - 1)); })) | |
4729 | (if (cmp == LT_EXPR) | |
4730 | (eq @2 { build_uniform_cst (TREE_TYPE (@1), | |
4731 | wide_int_to_tree (TREE_TYPE (cst), | |
4732 | wi::to_wide (cst) | |
4733 | - 1)); })))) | |
4734 | (if (wi::to_wide (cst) == signed_max | |
4735 | && TYPE_UNSIGNED (arg1_type) | |
4736 | /* We will flip the signedness of the comparison operator | |
4737 | associated with the mode of @1, so the sign bit is | |
4738 | specified by this mode. Check that @1 is the signed | |
4739 | max associated with this sign bit. */ | |
4740 | && prec == GET_MODE_PRECISION (SCALAR_INT_TYPE_MODE (arg1_type)) | |
4741 | /* signed_type does not work on pointer types. */ | |
4742 | && INTEGRAL_TYPE_P (arg1_type)) | |
4743 | /* The following case also applies to X < signed_max+1 | |
4744 | and X >= signed_max+1 because previous transformations. */ | |
4745 | (if (cmp == LE_EXPR || cmp == GT_EXPR) | |
4746 | (with { tree st = signed_type_for (TREE_TYPE (@1)); } | |
4747 | (switch | |
4748 | (if (cst == @1 && cmp == LE_EXPR) | |
4749 | (ge (convert:st @0) { build_zero_cst (st); })) | |
4750 | (if (cst == @1 && cmp == GT_EXPR) | |
4751 | (lt (convert:st @0) { build_zero_cst (st); })) | |
4752 | (if (cmp == LE_EXPR) | |
4753 | (ge (view_convert:st @0) { build_zero_cst (st); })) | |
4754 | (if (cmp == GT_EXPR) | |
4755 | (lt (view_convert:st @0) { build_zero_cst (st); }))))))))))) | |
4756 | ||
4757 | (for cmp (unordered ordered unlt unle ungt unge uneq ltgt) | |
4758 | /* If the second operand is NaN, the result is constant. */ | |
4759 | (simplify | |
4760 | (cmp @0 REAL_CST@1) | |
4761 | (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1)) | |
4762 | && (cmp != LTGT_EXPR || ! flag_trapping_math)) | |
4763 | { constant_boolean_node (cmp == ORDERED_EXPR || cmp == LTGT_EXPR | |
4764 | ? false : true, type); }))) | |
4765 | ||
4766 | /* bool_var != 0 becomes bool_var. */ | |
4767 | (simplify | |
4768 | (ne @0 integer_zerop) | |
4769 | (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE | |
4770 | && types_match (type, TREE_TYPE (@0))) | |
4771 | (non_lvalue @0))) | |
4772 | /* bool_var == 1 becomes bool_var. */ | |
4773 | (simplify | |
4774 | (eq @0 integer_onep) | |
4775 | (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE | |
4776 | && types_match (type, TREE_TYPE (@0))) | |
4777 | (non_lvalue @0))) | |
4778 | /* Do not handle | |
4779 | bool_var == 0 becomes !bool_var or | |
4780 | bool_var != 1 becomes !bool_var | |
4781 | here because that only is good in assignment context as long | |
4782 | as we require a tcc_comparison in GIMPLE_CONDs where we'd | |
4783 | replace if (x == 0) with tem = ~x; if (tem != 0) which is | |
4784 | clearly less optimal and which we'll transform again in forwprop. */ | |
4785 | ||
4786 | /* When one argument is a constant, overflow detection can be simplified. | |
4787 | Currently restricted to single use so as not to interfere too much with | |
4788 | ADD_OVERFLOW detection in tree-ssa-math-opts.c. | |
4789 | A + CST CMP A -> A CMP' CST' */ | |
4790 | (for cmp (lt le ge gt) | |
4791 | out (gt gt le le) | |
4792 | (simplify | |
4793 | (cmp:c (plus@2 @0 INTEGER_CST@1) @0) | |
4794 | (if (TYPE_UNSIGNED (TREE_TYPE (@0)) | |
4795 | && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)) | |
4796 | && wi::to_wide (@1) != 0 | |
4797 | && single_use (@2)) | |
4798 | (with { unsigned int prec = TYPE_PRECISION (TREE_TYPE (@0)); } | |
4799 | (out @0 { wide_int_to_tree (TREE_TYPE (@0), | |
4800 | wi::max_value (prec, UNSIGNED) | |
4801 | - wi::to_wide (@1)); }))))) | |
4802 | ||
4803 | /* To detect overflow in unsigned A - B, A < B is simpler than A - B > A. | |
4804 | However, the detection logic for SUB_OVERFLOW in tree-ssa-math-opts.c | |
4805 | expects the long form, so we restrict the transformation for now. */ | |
4806 | (for cmp (gt le) | |
4807 | (simplify | |
4808 | (cmp:c (minus@2 @0 @1) @0) | |
4809 | (if (single_use (@2) | |
4810 | && ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
4811 | && TYPE_UNSIGNED (TREE_TYPE (@0))) | |
4812 | (cmp @1 @0)))) | |
4813 | ||
4814 | /* Optimize A - B + -1 >= A into B >= A for unsigned comparisons. */ | |
4815 | (for cmp (ge lt) | |
4816 | (simplify | |
4817 | (cmp:c (plus (minus @0 @1) integer_minus_onep) @0) | |
4818 | (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
4819 | && TYPE_UNSIGNED (TREE_TYPE (@0))) | |
4820 | (cmp @1 @0)))) | |
4821 | ||
4822 | /* Testing for overflow is unnecessary if we already know the result. */ | |
4823 | /* A - B > A */ | |
4824 | (for cmp (gt le) | |
4825 | out (ne eq) | |
4826 | (simplify | |
4827 | (cmp:c (realpart (IFN_SUB_OVERFLOW@2 @0 @1)) @0) | |
4828 | (if (TYPE_UNSIGNED (TREE_TYPE (@0)) | |
4829 | && types_match (TREE_TYPE (@0), TREE_TYPE (@1))) | |
4830 | (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); })))) | |
4831 | /* A + B < A */ | |
4832 | (for cmp (lt ge) | |
4833 | out (ne eq) | |
4834 | (simplify | |
4835 | (cmp:c (realpart (IFN_ADD_OVERFLOW:c@2 @0 @1)) @0) | |
4836 | (if (TYPE_UNSIGNED (TREE_TYPE (@0)) | |
4837 | && types_match (TREE_TYPE (@0), TREE_TYPE (@1))) | |
4838 | (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); })))) | |
4839 | ||
4840 | /* For unsigned operands, -1 / B < A checks whether A * B would overflow. | |
4841 | Simplify it to __builtin_mul_overflow (A, B, <unused>). */ | |
4842 | (for cmp (lt ge) | |
4843 | out (ne eq) | |
4844 | (simplify | |
4845 | (cmp:c (trunc_div:s integer_all_onesp @1) @0) | |
4846 | (if (TYPE_UNSIGNED (TREE_TYPE (@0)) && !VECTOR_TYPE_P (TREE_TYPE (@0))) | |
4847 | (with { tree t = TREE_TYPE (@0), cpx = build_complex_type (t); } | |
4848 | (out (imagpart (IFN_MUL_OVERFLOW:cpx @0 @1)) { build_zero_cst (t); }))))) | |
4849 | ||
4850 | /* Similarly, for unsigned operands, (((type) A * B) >> prec) != 0 where type | |
4851 | is at least twice as wide as type of A and B, simplify to | |
4852 | __builtin_mul_overflow (A, B, <unused>). */ | |
4853 | (for cmp (eq ne) | |
4854 | (simplify | |
4855 | (cmp (rshift (mult:s (convert@3 @0) (convert @1)) INTEGER_CST@2) | |
4856 | integer_zerop) | |
4857 | (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
4858 | && INTEGRAL_TYPE_P (TREE_TYPE (@3)) | |
4859 | && TYPE_UNSIGNED (TREE_TYPE (@0)) | |
4860 | && (TYPE_PRECISION (TREE_TYPE (@3)) | |
4861 | >= 2 * TYPE_PRECISION (TREE_TYPE (@0))) | |
4862 | && tree_fits_uhwi_p (@2) | |
4863 | && tree_to_uhwi (@2) == TYPE_PRECISION (TREE_TYPE (@0)) | |
4864 | && types_match (@0, @1) | |
4865 | && type_has_mode_precision_p (TREE_TYPE (@0)) | |
4866 | && (optab_handler (umulv4_optab, TYPE_MODE (TREE_TYPE (@0))) | |
4867 | != CODE_FOR_nothing)) | |
4868 | (with { tree t = TREE_TYPE (@0), cpx = build_complex_type (t); } | |
4869 | (cmp (imagpart (IFN_MUL_OVERFLOW:cpx @0 @1)) { build_zero_cst (t); }))))) | |
4870 | ||
4871 | /* Simplification of math builtins. These rules must all be optimizations | |
4872 | as well as IL simplifications. If there is a possibility that the new | |
4873 | form could be a pessimization, the rule should go in the canonicalization | |
4874 | section that follows this one. | |
4875 | ||
4876 | Rules can generally go in this section if they satisfy one of | |
4877 | the following: | |
4878 | ||
4879 | - the rule describes an identity | |
4880 | ||
4881 | - the rule replaces calls with something as simple as addition or | |
4882 | multiplication | |
4883 | ||
4884 | - the rule contains unary calls only and simplifies the surrounding | |
4885 | arithmetic. (The idea here is to exclude non-unary calls in which | |
4886 | one operand is constant and in which the call is known to be cheap | |
4887 | when the operand has that value.) */ | |
4888 | ||
4889 | (if (flag_unsafe_math_optimizations) | |
4890 | /* Simplify sqrt(x) * sqrt(x) -> x. */ | |
4891 | (simplify | |
4892 | (mult (SQRT_ALL@1 @0) @1) | |
4893 | (if (!HONOR_SNANS (type)) | |
4894 | @0)) | |
4895 | ||
4896 | (for op (plus minus) | |
4897 | /* Simplify (A / C) +- (B / C) -> (A +- B) / C. */ | |
4898 | (simplify | |
4899 | (op (rdiv @0 @1) | |
4900 | (rdiv @2 @1)) | |
4901 | (rdiv (op @0 @2) @1))) | |
4902 | ||
4903 | (for cmp (lt le gt ge) | |
4904 | neg_cmp (gt ge lt le) | |
4905 | /* Simplify (x * C1) cmp C2 -> x cmp (C2 / C1), where C1 != 0. */ | |
4906 | (simplify | |
4907 | (cmp (mult @0 REAL_CST@1) REAL_CST@2) | |
4908 | (with | |
4909 | { tree tem = const_binop (RDIV_EXPR, type, @2, @1); } | |
4910 | (if (tem | |
4911 | && !(REAL_VALUE_ISINF (TREE_REAL_CST (tem)) | |
4912 | || (real_zerop (tem) && !real_zerop (@1)))) | |
4913 | (switch | |
4914 | (if (real_less (&dconst0, TREE_REAL_CST_PTR (@1))) | |
4915 | (cmp @0 { tem; })) | |
4916 | (if (real_less (TREE_REAL_CST_PTR (@1), &dconst0)) | |
4917 | (neg_cmp @0 { tem; }))))))) | |
4918 | ||
4919 | /* Simplify sqrt(x) * sqrt(y) -> sqrt(x*y). */ | |
4920 | (for root (SQRT CBRT) | |
4921 | (simplify | |
4922 | (mult (root:s @0) (root:s @1)) | |
4923 | (root (mult @0 @1)))) | |
4924 | ||
4925 | /* Simplify expN(x) * expN(y) -> expN(x+y). */ | |
4926 | (for exps (EXP EXP2 EXP10 POW10) | |
4927 | (simplify | |
4928 | (mult (exps:s @0) (exps:s @1)) | |
4929 | (exps (plus @0 @1)))) | |
4930 | ||
4931 | /* Simplify a/root(b/c) into a*root(c/b). */ | |
4932 | (for root (SQRT CBRT) | |
4933 | (simplify | |
4934 | (rdiv @0 (root:s (rdiv:s @1 @2))) | |
4935 | (mult @0 (root (rdiv @2 @1))))) | |
4936 | ||
4937 | /* Simplify x/expN(y) into x*expN(-y). */ | |
4938 | (for exps (EXP EXP2 EXP10 POW10) | |
4939 | (simplify | |
4940 | (rdiv @0 (exps:s @1)) | |
4941 | (mult @0 (exps (negate @1))))) | |
4942 | ||
4943 | (for logs (LOG LOG2 LOG10 LOG10) | |
4944 | exps (EXP EXP2 EXP10 POW10) | |
4945 | /* logN(expN(x)) -> x. */ | |
4946 | (simplify | |
4947 | (logs (exps @0)) | |
4948 | @0) | |
4949 | /* expN(logN(x)) -> x. */ | |
4950 | (simplify | |
4951 | (exps (logs @0)) | |
4952 | @0)) | |
4953 | ||
4954 | /* Optimize logN(func()) for various exponential functions. We | |
4955 | want to determine the value "x" and the power "exponent" in | |
4956 | order to transform logN(x**exponent) into exponent*logN(x). */ | |
4957 | (for logs (LOG LOG LOG LOG2 LOG2 LOG2 LOG10 LOG10) | |
4958 | exps (EXP2 EXP10 POW10 EXP EXP10 POW10 EXP EXP2) | |
4959 | (simplify | |
4960 | (logs (exps @0)) | |
4961 | (if (SCALAR_FLOAT_TYPE_P (type)) | |
4962 | (with { | |
4963 | tree x; | |
4964 | switch (exps) | |
4965 | { | |
4966 | CASE_CFN_EXP: | |
4967 | /* Prepare to do logN(exp(exponent)) -> exponent*logN(e). */ | |
4968 | x = build_real_truncate (type, dconst_e ()); | |
4969 | break; | |
4970 | CASE_CFN_EXP2: | |
4971 | /* Prepare to do logN(exp2(exponent)) -> exponent*logN(2). */ | |
4972 | x = build_real (type, dconst2); | |
4973 | break; | |
4974 | CASE_CFN_EXP10: | |
4975 | CASE_CFN_POW10: | |
4976 | /* Prepare to do logN(exp10(exponent)) -> exponent*logN(10). */ | |
4977 | { | |
4978 | REAL_VALUE_TYPE dconst10; | |
4979 | real_from_integer (&dconst10, VOIDmode, 10, SIGNED); | |
4980 | x = build_real (type, dconst10); | |
4981 | } | |
4982 | break; | |
4983 | default: | |
4984 | gcc_unreachable (); | |
4985 | } | |
4986 | } | |
4987 | (mult (logs { x; }) @0))))) | |
4988 | ||
4989 | (for logs (LOG LOG | |
4990 | LOG2 LOG2 | |
4991 | LOG10 LOG10) | |
4992 | exps (SQRT CBRT) | |
4993 | (simplify | |
4994 | (logs (exps @0)) | |
4995 | (if (SCALAR_FLOAT_TYPE_P (type)) | |
4996 | (with { | |
4997 | tree x; | |
4998 | switch (exps) | |
4999 | { | |
5000 | CASE_CFN_SQRT: | |
5001 | /* Prepare to do logN(sqrt(x)) -> 0.5*logN(x). */ | |
5002 | x = build_real (type, dconsthalf); | |
5003 | break; | |
5004 | CASE_CFN_CBRT: | |
5005 | /* Prepare to do logN(cbrt(x)) -> (1/3)*logN(x). */ | |
5006 | x = build_real_truncate (type, dconst_third ()); | |
5007 | break; | |
5008 | default: | |
5009 | gcc_unreachable (); | |
5010 | } | |
5011 | } | |
5012 | (mult { x; } (logs @0)))))) | |
5013 | ||
5014 | /* logN(pow(x,exponent)) -> exponent*logN(x). */ | |
5015 | (for logs (LOG LOG2 LOG10) | |
5016 | pows (POW) | |
5017 | (simplify | |
5018 | (logs (pows @0 @1)) | |
5019 | (mult @1 (logs @0)))) | |
5020 | ||
5021 | /* pow(C,x) -> exp(log(C)*x) if C > 0, | |
5022 | or if C is a positive power of 2, | |
5023 | pow(C,x) -> exp2(log2(C)*x). */ | |
5024 | #if GIMPLE | |
5025 | (for pows (POW) | |
5026 | exps (EXP) | |
5027 | logs (LOG) | |
5028 | exp2s (EXP2) | |
5029 | log2s (LOG2) | |
5030 | (simplify | |
5031 | (pows REAL_CST@0 @1) | |
5032 | (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0) | |
5033 | && real_isfinite (TREE_REAL_CST_PTR (@0)) | |
5034 | /* As libmvec doesn't have a vectorized exp2, defer optimizing | |
5035 | the use_exp2 case until after vectorization. It seems actually | |
5036 | beneficial for all constants to postpone this until later, | |
5037 | because exp(log(C)*x), while faster, will have worse precision | |
5038 | and if x folds into a constant too, that is unnecessary | |
5039 | pessimization. */ | |
5040 | && canonicalize_math_after_vectorization_p ()) | |
5041 | (with { | |
5042 | const REAL_VALUE_TYPE *const value = TREE_REAL_CST_PTR (@0); | |
5043 | bool use_exp2 = false; | |
5044 | if (targetm.libc_has_function (function_c99_misc) | |
5045 | && value->cl == rvc_normal) | |
5046 | { | |
5047 | REAL_VALUE_TYPE frac_rvt = *value; | |
5048 | SET_REAL_EXP (&frac_rvt, 1); | |
5049 | if (real_equal (&frac_rvt, &dconst1)) | |
5050 | use_exp2 = true; | |
5051 | } | |
5052 | } | |
5053 | (if (!use_exp2) | |
5054 | (if (optimize_pow_to_exp (@0, @1)) | |
5055 | (exps (mult (logs @0) @1))) | |
5056 | (exp2s (mult (log2s @0) @1))))))) | |
5057 | #endif | |
5058 | ||
5059 | /* pow(C,x)*expN(y) -> expN(logN(C)*x+y) if C > 0. */ | |
5060 | (for pows (POW) | |
5061 | exps (EXP EXP2 EXP10 POW10) | |
5062 | logs (LOG LOG2 LOG10 LOG10) | |
5063 | (simplify | |
5064 | (mult:c (pows:s REAL_CST@0 @1) (exps:s @2)) | |
5065 | (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0) | |
5066 | && real_isfinite (TREE_REAL_CST_PTR (@0))) | |
5067 | (exps (plus (mult (logs @0) @1) @2))))) | |
5068 | ||
5069 | (for sqrts (SQRT) | |
5070 | cbrts (CBRT) | |
5071 | pows (POW) | |
5072 | exps (EXP EXP2 EXP10 POW10) | |
5073 | /* sqrt(expN(x)) -> expN(x*0.5). */ | |
5074 | (simplify | |
5075 | (sqrts (exps @0)) | |
5076 | (exps (mult @0 { build_real (type, dconsthalf); }))) | |
5077 | /* cbrt(expN(x)) -> expN(x/3). */ | |
5078 | (simplify | |
5079 | (cbrts (exps @0)) | |
5080 | (exps (mult @0 { build_real_truncate (type, dconst_third ()); }))) | |
5081 | /* pow(expN(x), y) -> expN(x*y). */ | |
5082 | (simplify | |
5083 | (pows (exps @0) @1) | |
5084 | (exps (mult @0 @1)))) | |
5085 | ||
5086 | /* tan(atan(x)) -> x. */ | |
5087 | (for tans (TAN) | |
5088 | atans (ATAN) | |
5089 | (simplify | |
5090 | (tans (atans @0)) | |
5091 | @0))) | |
5092 | ||
5093 | /* Simplify sin(atan(x)) -> x / sqrt(x*x + 1). */ | |
5094 | (for sins (SIN) | |
5095 | atans (ATAN) | |
5096 | sqrts (SQRT) | |
5097 | copysigns (COPYSIGN) | |
5098 | (simplify | |
5099 | (sins (atans:s @0)) | |
5100 | (with | |
5101 | { | |
5102 | REAL_VALUE_TYPE r_cst; | |
5103 | build_sinatan_real (&r_cst, type); | |
5104 | tree t_cst = build_real (type, r_cst); | |
5105 | tree t_one = build_one_cst (type); | |
5106 | } | |
5107 | (if (SCALAR_FLOAT_TYPE_P (type)) | |
5108 | (cond (lt (abs @0) { t_cst; }) | |
5109 | (rdiv @0 (sqrts (plus (mult @0 @0) { t_one; }))) | |
5110 | (copysigns { t_one; } @0)))))) | |
5111 | ||
5112 | /* Simplify cos(atan(x)) -> 1 / sqrt(x*x + 1). */ | |
5113 | (for coss (COS) | |
5114 | atans (ATAN) | |
5115 | sqrts (SQRT) | |
5116 | copysigns (COPYSIGN) | |
5117 | (simplify | |
5118 | (coss (atans:s @0)) | |
5119 | (with | |
5120 | { | |
5121 | REAL_VALUE_TYPE r_cst; | |
5122 | build_sinatan_real (&r_cst, type); | |
5123 | tree t_cst = build_real (type, r_cst); | |
5124 | tree t_one = build_one_cst (type); | |
5125 | tree t_zero = build_zero_cst (type); | |
5126 | } | |
5127 | (if (SCALAR_FLOAT_TYPE_P (type)) | |
5128 | (cond (lt (abs @0) { t_cst; }) | |
5129 | (rdiv { t_one; } (sqrts (plus (mult @0 @0) { t_one; }))) | |
5130 | (copysigns { t_zero; } @0)))))) | |
5131 | ||
5132 | (if (!flag_errno_math) | |
5133 | /* Simplify sinh(atanh(x)) -> x / sqrt((1 - x)*(1 + x)). */ | |
5134 | (for sinhs (SINH) | |
5135 | atanhs (ATANH) | |
5136 | sqrts (SQRT) | |
5137 | (simplify | |
5138 | (sinhs (atanhs:s @0)) | |
5139 | (with { tree t_one = build_one_cst (type); } | |
5140 | (rdiv @0 (sqrts (mult (minus { t_one; } @0) (plus { t_one; } @0))))))) | |
5141 | ||
5142 | /* Simplify cosh(atanh(x)) -> 1 / sqrt((1 - x)*(1 + x)) */ | |
5143 | (for coshs (COSH) | |
5144 | atanhs (ATANH) | |
5145 | sqrts (SQRT) | |
5146 | (simplify | |
5147 | (coshs (atanhs:s @0)) | |
5148 | (with { tree t_one = build_one_cst (type); } | |
5149 | (rdiv { t_one; } (sqrts (mult (minus { t_one; } @0) (plus { t_one; } @0)))))))) | |
5150 | ||
5151 | /* cabs(x+0i) or cabs(0+xi) -> abs(x). */ | |
5152 | (simplify | |
5153 | (CABS (complex:C @0 real_zerop@1)) | |
5154 | (abs @0)) | |
5155 | ||
5156 | /* trunc(trunc(x)) -> trunc(x), etc. */ | |
5157 | (for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL) | |
5158 | (simplify | |
5159 | (fns (fns @0)) | |
5160 | (fns @0))) | |
5161 | /* f(x) -> x if x is integer valued and f does nothing for such values. */ | |
5162 | (for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL) | |
5163 | (simplify | |
5164 | (fns integer_valued_real_p@0) | |
5165 | @0)) | |
5166 | ||
5167 | /* hypot(x,0) and hypot(0,x) -> abs(x). */ | |
5168 | (simplify | |
5169 | (HYPOT:c @0 real_zerop@1) | |
5170 | (abs @0)) | |
5171 | ||
5172 | /* pow(1,x) -> 1. */ | |
5173 | (simplify | |
5174 | (POW real_onep@0 @1) | |
5175 | @0) | |
5176 | ||
5177 | (simplify | |
5178 | /* copysign(x,x) -> x. */ | |
5179 | (COPYSIGN_ALL @0 @0) | |
5180 | @0) | |
5181 | ||
5182 | (simplify | |
5183 | /* copysign(x,y) -> fabs(x) if y is nonnegative. */ | |
5184 | (COPYSIGN_ALL @0 tree_expr_nonnegative_p@1) | |
5185 | (abs @0)) | |
5186 | ||
5187 | (for scale (LDEXP SCALBN SCALBLN) | |
5188 | /* ldexp(0, x) -> 0. */ | |
5189 | (simplify | |
5190 | (scale real_zerop@0 @1) | |
5191 | @0) | |
5192 | /* ldexp(x, 0) -> x. */ | |
5193 | (simplify | |
5194 | (scale @0 integer_zerop@1) | |
5195 | @0) | |
5196 | /* ldexp(x, y) -> x if x is +-Inf or NaN. */ | |
5197 | (simplify | |
5198 | (scale REAL_CST@0 @1) | |
5199 | (if (!real_isfinite (TREE_REAL_CST_PTR (@0))) | |
5200 | @0))) | |
5201 | ||
5202 | /* Canonicalization of sequences of math builtins. These rules represent | |
5203 | IL simplifications but are not necessarily optimizations. | |
5204 | ||
5205 | The sincos pass is responsible for picking "optimal" implementations | |
5206 | of math builtins, which may be more complicated and can sometimes go | |
5207 | the other way, e.g. converting pow into a sequence of sqrts. | |
5208 | We only want to do these canonicalizations before the pass has run. */ | |
5209 | ||
5210 | (if (flag_unsafe_math_optimizations && canonicalize_math_p ()) | |
5211 | /* Simplify tan(x) * cos(x) -> sin(x). */ | |
5212 | (simplify | |
5213 | (mult:c (TAN:s @0) (COS:s @0)) | |
5214 | (SIN @0)) | |
5215 | ||
5216 | /* Simplify x * pow(x,c) -> pow(x,c+1). */ | |
5217 | (simplify | |
5218 | (mult:c @0 (POW:s @0 REAL_CST@1)) | |
5219 | (if (!TREE_OVERFLOW (@1)) | |
5220 | (POW @0 (plus @1 { build_one_cst (type); })))) | |
5221 | ||
5222 | /* Simplify sin(x) / cos(x) -> tan(x). */ | |
5223 | (simplify | |
5224 | (rdiv (SIN:s @0) (COS:s @0)) | |
5225 | (TAN @0)) | |
5226 | ||
5227 | /* Simplify sinh(x) / cosh(x) -> tanh(x). */ | |
5228 | (simplify | |
5229 | (rdiv (SINH:s @0) (COSH:s @0)) | |
5230 | (TANH @0)) | |
5231 | ||
5232 | /* Simplify cos(x) / sin(x) -> 1 / tan(x). */ | |
5233 | (simplify | |
5234 | (rdiv (COS:s @0) (SIN:s @0)) | |
5235 | (rdiv { build_one_cst (type); } (TAN @0))) | |
5236 | ||
5237 | /* Simplify sin(x) / tan(x) -> cos(x). */ | |
5238 | (simplify | |
5239 | (rdiv (SIN:s @0) (TAN:s @0)) | |
5240 | (if (! HONOR_NANS (@0) | |
5241 | && ! HONOR_INFINITIES (@0)) | |
5242 | (COS @0))) | |
5243 | ||
5244 | /* Simplify tan(x) / sin(x) -> 1.0 / cos(x). */ | |
5245 | (simplify | |
5246 | (rdiv (TAN:s @0) (SIN:s @0)) | |
5247 | (if (! HONOR_NANS (@0) | |
5248 | && ! HONOR_INFINITIES (@0)) | |
5249 | (rdiv { build_one_cst (type); } (COS @0)))) | |
5250 | ||
5251 | /* Simplify pow(x,y) * pow(x,z) -> pow(x,y+z). */ | |
5252 | (simplify | |
5253 | (mult (POW:s @0 @1) (POW:s @0 @2)) | |
5254 | (POW @0 (plus @1 @2))) | |
5255 | ||
5256 | /* Simplify pow(x,y) * pow(z,y) -> pow(x*z,y). */ | |
5257 | (simplify | |
5258 | (mult (POW:s @0 @1) (POW:s @2 @1)) | |
5259 | (POW (mult @0 @2) @1)) | |
5260 | ||
5261 | /* Simplify powi(x,y) * powi(z,y) -> powi(x*z,y). */ | |
5262 | (simplify | |
5263 | (mult (POWI:s @0 @1) (POWI:s @2 @1)) | |
5264 | (POWI (mult @0 @2) @1)) | |
5265 | ||
5266 | /* Simplify pow(x,c) / x -> pow(x,c-1). */ | |
5267 | (simplify | |
5268 | (rdiv (POW:s @0 REAL_CST@1) @0) | |
5269 | (if (!TREE_OVERFLOW (@1)) | |
5270 | (POW @0 (minus @1 { build_one_cst (type); })))) | |
5271 | ||
5272 | /* Simplify x / pow (y,z) -> x * pow(y,-z). */ | |
5273 | (simplify | |
5274 | (rdiv @0 (POW:s @1 @2)) | |
5275 | (mult @0 (POW @1 (negate @2)))) | |
5276 | ||
5277 | (for sqrts (SQRT) | |
5278 | cbrts (CBRT) | |
5279 | pows (POW) | |
5280 | /* sqrt(sqrt(x)) -> pow(x,1/4). */ | |
5281 | (simplify | |
5282 | (sqrts (sqrts @0)) | |
5283 | (pows @0 { build_real (type, dconst_quarter ()); })) | |
5284 | /* sqrt(cbrt(x)) -> pow(x,1/6). */ | |
5285 | (simplify | |
5286 | (sqrts (cbrts @0)) | |
5287 | (pows @0 { build_real_truncate (type, dconst_sixth ()); })) | |
5288 | /* cbrt(sqrt(x)) -> pow(x,1/6). */ | |
5289 | (simplify | |
5290 | (cbrts (sqrts @0)) | |
5291 | (pows @0 { build_real_truncate (type, dconst_sixth ()); })) | |
5292 | /* cbrt(cbrt(x)) -> pow(x,1/9), iff x is nonnegative. */ | |
5293 | (simplify | |
5294 | (cbrts (cbrts tree_expr_nonnegative_p@0)) | |
5295 | (pows @0 { build_real_truncate (type, dconst_ninth ()); })) | |
5296 | /* sqrt(pow(x,y)) -> pow(|x|,y*0.5). */ | |
5297 | (simplify | |
5298 | (sqrts (pows @0 @1)) | |
5299 | (pows (abs @0) (mult @1 { build_real (type, dconsthalf); }))) | |
5300 | /* cbrt(pow(x,y)) -> pow(x,y/3), iff x is nonnegative. */ | |
5301 | (simplify | |
5302 | (cbrts (pows tree_expr_nonnegative_p@0 @1)) | |
5303 | (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); }))) | |
5304 | /* pow(sqrt(x),y) -> pow(x,y*0.5). */ | |
5305 | (simplify | |
5306 | (pows (sqrts @0) @1) | |
5307 | (pows @0 (mult @1 { build_real (type, dconsthalf); }))) | |
5308 | /* pow(cbrt(x),y) -> pow(x,y/3) iff x is nonnegative. */ | |
5309 | (simplify | |
5310 | (pows (cbrts tree_expr_nonnegative_p@0) @1) | |
5311 | (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); }))) | |
5312 | /* pow(pow(x,y),z) -> pow(x,y*z) iff x is nonnegative. */ | |
5313 | (simplify | |
5314 | (pows (pows tree_expr_nonnegative_p@0 @1) @2) | |
5315 | (pows @0 (mult @1 @2)))) | |
5316 | ||
5317 | /* cabs(x+xi) -> fabs(x)*sqrt(2). */ | |
5318 | (simplify | |
5319 | (CABS (complex @0 @0)) | |
5320 | (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); })) | |
5321 | ||
5322 | /* hypot(x,x) -> fabs(x)*sqrt(2). */ | |
5323 | (simplify | |
5324 | (HYPOT @0 @0) | |
5325 | (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); })) | |
5326 | ||
5327 | /* cexp(x+yi) -> exp(x)*cexpi(y). */ | |
5328 | (for cexps (CEXP) | |
5329 | exps (EXP) | |
5330 | cexpis (CEXPI) | |
5331 | (simplify | |
5332 | (cexps compositional_complex@0) | |
5333 | (if (targetm.libc_has_function (function_c99_math_complex)) | |
5334 | (complex | |
5335 | (mult (exps@1 (realpart @0)) (realpart (cexpis:type@2 (imagpart @0)))) | |
5336 | (mult @1 (imagpart @2))))))) | |
5337 | ||
5338 | (if (canonicalize_math_p ()) | |
5339 | /* floor(x) -> trunc(x) if x is nonnegative. */ | |
5340 | (for floors (FLOOR_ALL) | |
5341 | truncs (TRUNC_ALL) | |
5342 | (simplify | |
5343 | (floors tree_expr_nonnegative_p@0) | |
5344 | (truncs @0)))) | |
5345 | ||
5346 | (match double_value_p | |
5347 | @0 | |
5348 | (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == double_type_node))) | |
5349 | (for froms (BUILT_IN_TRUNCL | |
5350 | BUILT_IN_FLOORL | |
5351 | BUILT_IN_CEILL | |
5352 | BUILT_IN_ROUNDL | |
5353 | BUILT_IN_NEARBYINTL | |
5354 | BUILT_IN_RINTL) | |
5355 | tos (BUILT_IN_TRUNC | |
5356 | BUILT_IN_FLOOR | |
5357 | BUILT_IN_CEIL | |
5358 | BUILT_IN_ROUND | |
5359 | BUILT_IN_NEARBYINT | |
5360 | BUILT_IN_RINT) | |
5361 | /* truncl(extend(x)) -> extend(trunc(x)), etc., if x is a double. */ | |
5362 | (if (optimize && canonicalize_math_p ()) | |
5363 | (simplify | |
5364 | (froms (convert double_value_p@0)) | |
5365 | (convert (tos @0))))) | |
5366 | ||
5367 | (match float_value_p | |
5368 | @0 | |
5369 | (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == float_type_node))) | |
5370 | (for froms (BUILT_IN_TRUNCL BUILT_IN_TRUNC | |
5371 | BUILT_IN_FLOORL BUILT_IN_FLOOR | |
5372 | BUILT_IN_CEILL BUILT_IN_CEIL | |
5373 | BUILT_IN_ROUNDL BUILT_IN_ROUND | |
5374 | BUILT_IN_NEARBYINTL BUILT_IN_NEARBYINT | |
5375 | BUILT_IN_RINTL BUILT_IN_RINT) | |
5376 | tos (BUILT_IN_TRUNCF BUILT_IN_TRUNCF | |
5377 | BUILT_IN_FLOORF BUILT_IN_FLOORF | |
5378 | BUILT_IN_CEILF BUILT_IN_CEILF | |
5379 | BUILT_IN_ROUNDF BUILT_IN_ROUNDF | |
5380 | BUILT_IN_NEARBYINTF BUILT_IN_NEARBYINTF | |
5381 | BUILT_IN_RINTF BUILT_IN_RINTF) | |
5382 | /* truncl(extend(x)) and trunc(extend(x)) -> extend(truncf(x)), etc., | |
5383 | if x is a float. */ | |
5384 | (if (optimize && canonicalize_math_p () | |
5385 | && targetm.libc_has_function (function_c99_misc)) | |
5386 | (simplify | |
5387 | (froms (convert float_value_p@0)) | |
5388 | (convert (tos @0))))) | |
5389 | ||
5390 | (for froms (XFLOORL XCEILL XROUNDL XRINTL) | |
5391 | tos (XFLOOR XCEIL XROUND XRINT) | |
5392 | /* llfloorl(extend(x)) -> llfloor(x), etc., if x is a double. */ | |
5393 | (if (optimize && canonicalize_math_p ()) | |
5394 | (simplify | |
5395 | (froms (convert double_value_p@0)) | |
5396 | (tos @0)))) | |
5397 | ||
5398 | (for froms (XFLOORL XCEILL XROUNDL XRINTL | |
5399 | XFLOOR XCEIL XROUND XRINT) | |
5400 | tos (XFLOORF XCEILF XROUNDF XRINTF) | |
5401 | /* llfloorl(extend(x)) and llfloor(extend(x)) -> llfloorf(x), etc., | |
5402 | if x is a float. */ | |
5403 | (if (optimize && canonicalize_math_p ()) | |
5404 | (simplify | |
5405 | (froms (convert float_value_p@0)) | |
5406 | (tos @0)))) | |
5407 | ||
5408 | (if (canonicalize_math_p ()) | |
5409 | /* xfloor(x) -> fix_trunc(x) if x is nonnegative. */ | |
5410 | (for floors (IFLOOR LFLOOR LLFLOOR) | |
5411 | (simplify | |
5412 | (floors tree_expr_nonnegative_p@0) | |
5413 | (fix_trunc @0)))) | |
5414 | ||
5415 | (if (canonicalize_math_p ()) | |
5416 | /* xfloor(x) -> fix_trunc(x), etc., if x is integer valued. */ | |
5417 | (for fns (IFLOOR LFLOOR LLFLOOR | |
5418 | ICEIL LCEIL LLCEIL | |
5419 | IROUND LROUND LLROUND) | |
5420 | (simplify | |
5421 | (fns integer_valued_real_p@0) | |
5422 | (fix_trunc @0))) | |
5423 | (if (!flag_errno_math) | |
5424 | /* xrint(x) -> fix_trunc(x), etc., if x is integer valued. */ | |
5425 | (for rints (IRINT LRINT LLRINT) | |
5426 | (simplify | |
5427 | (rints integer_valued_real_p@0) | |
5428 | (fix_trunc @0))))) | |
5429 | ||
5430 | (if (canonicalize_math_p ()) | |
5431 | (for ifn (IFLOOR ICEIL IROUND IRINT) | |
5432 | lfn (LFLOOR LCEIL LROUND LRINT) | |
5433 | llfn (LLFLOOR LLCEIL LLROUND LLRINT) | |
5434 | /* Canonicalize iround (x) to lround (x) on ILP32 targets where | |
5435 | sizeof (int) == sizeof (long). */ | |
5436 | (if (TYPE_PRECISION (integer_type_node) | |
5437 | == TYPE_PRECISION (long_integer_type_node)) | |
5438 | (simplify | |
5439 | (ifn @0) | |
5440 | (lfn:long_integer_type_node @0))) | |
5441 | /* Canonicalize llround (x) to lround (x) on LP64 targets where | |
5442 | sizeof (long long) == sizeof (long). */ | |
5443 | (if (TYPE_PRECISION (long_long_integer_type_node) | |
5444 | == TYPE_PRECISION (long_integer_type_node)) | |
5445 | (simplify | |
5446 | (llfn @0) | |
5447 | (lfn:long_integer_type_node @0))))) | |
5448 | ||
5449 | /* cproj(x) -> x if we're ignoring infinities. */ | |
5450 | (simplify | |
5451 | (CPROJ @0) | |
5452 | (if (!HONOR_INFINITIES (type)) | |
5453 | @0)) | |
5454 | ||
5455 | /* If the real part is inf and the imag part is known to be | |
5456 | nonnegative, return (inf + 0i). */ | |
5457 | (simplify | |
5458 | (CPROJ (complex REAL_CST@0 tree_expr_nonnegative_p@1)) | |
5459 | (if (real_isinf (TREE_REAL_CST_PTR (@0))) | |
5460 | { build_complex_inf (type, false); })) | |
5461 | ||
5462 | /* If the imag part is inf, return (inf+I*copysign(0,imag)). */ | |
5463 | (simplify | |
5464 | (CPROJ (complex @0 REAL_CST@1)) | |
5465 | (if (real_isinf (TREE_REAL_CST_PTR (@1))) | |
5466 | { build_complex_inf (type, TREE_REAL_CST_PTR (@1)->sign); })) | |
5467 | ||
5468 | (for pows (POW) | |
5469 | sqrts (SQRT) | |
5470 | cbrts (CBRT) | |
5471 | (simplify | |
5472 | (pows @0 REAL_CST@1) | |
5473 | (with { | |
5474 | const REAL_VALUE_TYPE *value = TREE_REAL_CST_PTR (@1); | |
5475 | REAL_VALUE_TYPE tmp; | |
5476 | } | |
5477 | (switch | |
5478 | /* pow(x,0) -> 1. */ | |
5479 | (if (real_equal (value, &dconst0)) | |
5480 | { build_real (type, dconst1); }) | |
5481 | /* pow(x,1) -> x. */ | |
5482 | (if (real_equal (value, &dconst1)) | |
5483 | @0) | |
5484 | /* pow(x,-1) -> 1/x. */ | |
5485 | (if (real_equal (value, &dconstm1)) | |
5486 | (rdiv { build_real (type, dconst1); } @0)) | |
5487 | /* pow(x,0.5) -> sqrt(x). */ | |
5488 | (if (flag_unsafe_math_optimizations | |
5489 | && canonicalize_math_p () | |
5490 | && real_equal (value, &dconsthalf)) | |
5491 | (sqrts @0)) | |
5492 | /* pow(x,1/3) -> cbrt(x). */ | |
5493 | (if (flag_unsafe_math_optimizations | |
5494 | && canonicalize_math_p () | |
5495 | && (tmp = real_value_truncate (TYPE_MODE (type), dconst_third ()), | |
5496 | real_equal (value, &tmp))) | |
5497 | (cbrts @0)))))) | |
5498 | ||
5499 | /* powi(1,x) -> 1. */ | |
5500 | (simplify | |
5501 | (POWI real_onep@0 @1) | |
5502 | @0) | |
5503 | ||
5504 | (simplify | |
5505 | (POWI @0 INTEGER_CST@1) | |
5506 | (switch | |
5507 | /* powi(x,0) -> 1. */ | |
5508 | (if (wi::to_wide (@1) == 0) | |
5509 | { build_real (type, dconst1); }) | |
5510 | /* powi(x,1) -> x. */ | |
5511 | (if (wi::to_wide (@1) == 1) | |
5512 | @0) | |
5513 | /* powi(x,-1) -> 1/x. */ | |
5514 | (if (wi::to_wide (@1) == -1) | |
5515 | (rdiv { build_real (type, dconst1); } @0)))) | |
5516 | ||
5517 | /* Narrowing of arithmetic and logical operations. | |
5518 | ||
5519 | These are conceptually similar to the transformations performed for | |
5520 | the C/C++ front-ends by shorten_binary_op and shorten_compare. Long | |
5521 | term we want to move all that code out of the front-ends into here. */ | |
5522 | ||
5523 | /* Convert (outertype)((innertype0)a+(innertype1)b) | |
5524 | into ((newtype)a+(newtype)b) where newtype | |
5525 | is the widest mode from all of these. */ | |
5526 | (for op (plus minus mult rdiv) | |
5527 | (simplify | |
5528 | (convert (op:s@0 (convert1?@3 @1) (convert2?@4 @2))) | |
5529 | /* If we have a narrowing conversion of an arithmetic operation where | |
5530 | both operands are widening conversions from the same type as the outer | |
5531 | narrowing conversion. Then convert the innermost operands to a | |
5532 | suitable unsigned type (to avoid introducing undefined behavior), | |
5533 | perform the operation and convert the result to the desired type. */ | |
5534 | (if (INTEGRAL_TYPE_P (type) | |
5535 | && op != MULT_EXPR | |
5536 | && op != RDIV_EXPR | |
5537 | /* We check for type compatibility between @0 and @1 below, | |
5538 | so there's no need to check that @2/@4 are integral types. */ | |
5539 | && INTEGRAL_TYPE_P (TREE_TYPE (@1)) | |
5540 | && INTEGRAL_TYPE_P (TREE_TYPE (@3)) | |
5541 | /* The precision of the type of each operand must match the | |
5542 | precision of the mode of each operand, similarly for the | |
5543 | result. */ | |
5544 | && type_has_mode_precision_p (TREE_TYPE (@1)) | |
5545 | && type_has_mode_precision_p (TREE_TYPE (@2)) | |
5546 | && type_has_mode_precision_p (type) | |
5547 | /* The inner conversion must be a widening conversion. */ | |
5548 | && TYPE_PRECISION (TREE_TYPE (@3)) > TYPE_PRECISION (TREE_TYPE (@1)) | |
5549 | && types_match (@1, type) | |
5550 | && (types_match (@1, @2) | |
5551 | /* Or the second operand is const integer or converted const | |
5552 | integer from valueize. */ | |
5553 | || TREE_CODE (@2) == INTEGER_CST)) | |
5554 | (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1))) | |
5555 | (op @1 (convert @2)) | |
5556 | (with { tree utype = unsigned_type_for (TREE_TYPE (@1)); } | |
5557 | (convert (op (convert:utype @1) | |
5558 | (convert:utype @2))))) | |
5559 | (if (FLOAT_TYPE_P (type) | |
5560 | && DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)) | |
5561 | == DECIMAL_FLOAT_TYPE_P (type)) | |
5562 | (with { tree arg0 = strip_float_extensions (@1); | |
5563 | tree arg1 = strip_float_extensions (@2); | |
5564 | tree itype = TREE_TYPE (@0); | |
5565 | tree ty1 = TREE_TYPE (arg0); | |
5566 | tree ty2 = TREE_TYPE (arg1); | |
5567 | enum tree_code code = TREE_CODE (itype); } | |
5568 | (if (FLOAT_TYPE_P (ty1) | |
5569 | && FLOAT_TYPE_P (ty2)) | |
5570 | (with { tree newtype = type; | |
5571 | if (TYPE_MODE (ty1) == SDmode | |
5572 | || TYPE_MODE (ty2) == SDmode | |
5573 | || TYPE_MODE (type) == SDmode) | |
5574 | newtype = dfloat32_type_node; | |
5575 | if (TYPE_MODE (ty1) == DDmode | |
5576 | || TYPE_MODE (ty2) == DDmode | |
5577 | || TYPE_MODE (type) == DDmode) | |
5578 | newtype = dfloat64_type_node; | |
5579 | if (TYPE_MODE (ty1) == TDmode | |
5580 | || TYPE_MODE (ty2) == TDmode | |
5581 | || TYPE_MODE (type) == TDmode) | |
5582 | newtype = dfloat128_type_node; } | |
5583 | (if ((newtype == dfloat32_type_node | |
5584 | || newtype == dfloat64_type_node | |
5585 | || newtype == dfloat128_type_node) | |
5586 | && newtype == type | |
5587 | && types_match (newtype, type)) | |
5588 | (op (convert:newtype @1) (convert:newtype @2)) | |
5589 | (with { if (TYPE_PRECISION (ty1) > TYPE_PRECISION (newtype)) | |
5590 | newtype = ty1; | |
5591 | if (TYPE_PRECISION (ty2) > TYPE_PRECISION (newtype)) | |
5592 | newtype = ty2; } | |
5593 | /* Sometimes this transformation is safe (cannot | |
5594 | change results through affecting double rounding | |
5595 | cases) and sometimes it is not. If NEWTYPE is | |
5596 | wider than TYPE, e.g. (float)((long double)double | |
5597 | + (long double)double) converted to | |
5598 | (float)(double + double), the transformation is | |
5599 | unsafe regardless of the details of the types | |
5600 | involved; double rounding can arise if the result | |
5601 | of NEWTYPE arithmetic is a NEWTYPE value half way | |
5602 | between two representable TYPE values but the | |
5603 | exact value is sufficiently different (in the | |
5604 | right direction) for this difference to be | |
5605 | visible in ITYPE arithmetic. If NEWTYPE is the | |
5606 | same as TYPE, however, the transformation may be | |
5607 | safe depending on the types involved: it is safe | |
5608 | if the ITYPE has strictly more than twice as many | |
5609 | mantissa bits as TYPE, can represent infinities | |
5610 | and NaNs if the TYPE can, and has sufficient | |
5611 | exponent range for the product or ratio of two | |
5612 | values representable in the TYPE to be within the | |
5613 | range of normal values of ITYPE. */ | |
5614 | (if (TYPE_PRECISION (newtype) < TYPE_PRECISION (itype) | |
5615 | && (flag_unsafe_math_optimizations | |
5616 | || (TYPE_PRECISION (newtype) == TYPE_PRECISION (type) | |
5617 | && real_can_shorten_arithmetic (TYPE_MODE (itype), | |
5618 | TYPE_MODE (type)) | |
5619 | && !excess_precision_type (newtype))) | |
5620 | && !types_match (itype, newtype)) | |
5621 | (convert:type (op (convert:newtype @1) | |
5622 | (convert:newtype @2))) | |
5623 | )))) ) | |
5624 | )) | |
5625 | ))) | |
5626 | ||
5627 | /* This is another case of narrowing, specifically when there's an outer | |
5628 | BIT_AND_EXPR which masks off bits outside the type of the innermost | |
5629 | operands. Like the previous case we have to convert the operands | |
5630 | to unsigned types to avoid introducing undefined behavior for the | |
5631 | arithmetic operation. */ | |
5632 | (for op (minus plus) | |
5633 | (simplify | |
5634 | (bit_and (op:s (convert@2 @0) (convert@3 @1)) INTEGER_CST@4) | |
5635 | (if (INTEGRAL_TYPE_P (type) | |
5636 | /* We check for type compatibility between @0 and @1 below, | |
5637 | so there's no need to check that @1/@3 are integral types. */ | |
5638 | && INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
5639 | && INTEGRAL_TYPE_P (TREE_TYPE (@2)) | |
5640 | /* The precision of the type of each operand must match the | |
5641 | precision of the mode of each operand, similarly for the | |
5642 | result. */ | |
5643 | && type_has_mode_precision_p (TREE_TYPE (@0)) | |
5644 | && type_has_mode_precision_p (TREE_TYPE (@1)) | |
5645 | && type_has_mode_precision_p (type) | |
5646 | /* The inner conversion must be a widening conversion. */ | |
5647 | && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0)) | |
5648 | && types_match (@0, @1) | |
5649 | && (tree_int_cst_min_precision (@4, TYPE_SIGN (TREE_TYPE (@0))) | |
5650 | <= TYPE_PRECISION (TREE_TYPE (@0))) | |
5651 | && (wi::to_wide (@4) | |
5652 | & wi::mask (TYPE_PRECISION (TREE_TYPE (@0)), | |
5653 | true, TYPE_PRECISION (type))) == 0) | |
5654 | (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))) | |
5655 | (with { tree ntype = TREE_TYPE (@0); } | |
5656 | (convert (bit_and (op @0 @1) (convert:ntype @4)))) | |
5657 | (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); } | |
5658 | (convert (bit_and (op (convert:utype @0) (convert:utype @1)) | |
5659 | (convert:utype @4)))))))) | |
5660 | ||
5661 | /* Transform (@0 < @1 and @0 < @2) to use min, | |
5662 | (@0 > @1 and @0 > @2) to use max */ | |
5663 | (for logic (bit_and bit_and bit_and bit_and bit_ior bit_ior bit_ior bit_ior) | |
5664 | op (lt le gt ge lt le gt ge ) | |
5665 | ext (min min max max max max min min ) | |
5666 | (simplify | |
5667 | (logic (op:cs @0 @1) (op:cs @0 @2)) | |
5668 | (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
5669 | && TREE_CODE (@0) != INTEGER_CST) | |
5670 | (op @0 (ext @1 @2))))) | |
5671 | ||
5672 | (simplify | |
5673 | /* signbit(x) -> 0 if x is nonnegative. */ | |
5674 | (SIGNBIT tree_expr_nonnegative_p@0) | |
5675 | { integer_zero_node; }) | |
5676 | ||
5677 | (simplify | |
5678 | /* signbit(x) -> x<0 if x doesn't have signed zeros. */ | |
5679 | (SIGNBIT @0) | |
5680 | (if (!HONOR_SIGNED_ZEROS (@0)) | |
5681 | (convert (lt @0 { build_real (TREE_TYPE (@0), dconst0); })))) | |
5682 | ||
5683 | /* Transform comparisons of the form X +- C1 CMP C2 to X CMP C2 -+ C1. */ | |
5684 | (for cmp (eq ne) | |
5685 | (for op (plus minus) | |
5686 | rop (minus plus) | |
5687 | (simplify | |
5688 | (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2) | |
5689 | (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2) | |
5690 | && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0)) | |
5691 | && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@0)) | |
5692 | && !TYPE_SATURATING (TREE_TYPE (@0))) | |
5693 | (with { tree res = int_const_binop (rop, @2, @1); } | |
5694 | (if (TREE_OVERFLOW (res) | |
5695 | && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) | |
5696 | { constant_boolean_node (cmp == NE_EXPR, type); } | |
5697 | (if (single_use (@3)) | |
5698 | (cmp @0 { TREE_OVERFLOW (res) | |
5699 | ? drop_tree_overflow (res) : res; })))))))) | |
5700 | (for cmp (lt le gt ge) | |
5701 | (for op (plus minus) | |
5702 | rop (minus plus) | |
5703 | (simplify | |
5704 | (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2) | |
5705 | (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2) | |
5706 | && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) | |
5707 | (with { tree res = int_const_binop (rop, @2, @1); } | |
5708 | (if (TREE_OVERFLOW (res)) | |
5709 | { | |
5710 | fold_overflow_warning (("assuming signed overflow does not occur " | |
5711 | "when simplifying conditional to constant"), | |
5712 | WARN_STRICT_OVERFLOW_CONDITIONAL); | |
5713 | bool less = cmp == LE_EXPR || cmp == LT_EXPR; | |
5714 | /* wi::ges_p (@2, 0) should be sufficient for a signed type. */ | |
5715 | bool ovf_high = wi::lt_p (wi::to_wide (@1), 0, | |
5716 | TYPE_SIGN (TREE_TYPE (@1))) | |
5717 | != (op == MINUS_EXPR); | |
5718 | constant_boolean_node (less == ovf_high, type); | |
5719 | } | |
5720 | (if (single_use (@3)) | |
5721 | (with | |
5722 | { | |
5723 | fold_overflow_warning (("assuming signed overflow does not occur " | |
5724 | "when changing X +- C1 cmp C2 to " | |
5725 | "X cmp C2 -+ C1"), | |
5726 | WARN_STRICT_OVERFLOW_COMPARISON); | |
5727 | } | |
5728 | (cmp @0 { res; }))))))))) | |
5729 | ||
5730 | /* Canonicalizations of BIT_FIELD_REFs. */ | |
5731 | ||
5732 | (simplify | |
5733 | (BIT_FIELD_REF (BIT_FIELD_REF @0 @1 @2) @3 @4) | |
5734 | (BIT_FIELD_REF @0 @3 { const_binop (PLUS_EXPR, bitsizetype, @2, @4); })) | |
5735 | ||
5736 | (simplify | |
5737 | (BIT_FIELD_REF (view_convert @0) @1 @2) | |
5738 | (BIT_FIELD_REF @0 @1 @2)) | |
5739 | ||
5740 | (simplify | |
5741 | (BIT_FIELD_REF @0 @1 integer_zerop) | |
5742 | (if (tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (@0)))) | |
5743 | (view_convert @0))) | |
5744 | ||
5745 | (simplify | |
5746 | (BIT_FIELD_REF @0 @1 @2) | |
5747 | (switch | |
5748 | (if (TREE_CODE (TREE_TYPE (@0)) == COMPLEX_TYPE | |
5749 | && tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0))))) | |
5750 | (switch | |
5751 | (if (integer_zerop (@2)) | |
5752 | (view_convert (realpart @0))) | |
5753 | (if (tree_int_cst_equal (@2, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0))))) | |
5754 | (view_convert (imagpart @0))))) | |
5755 | (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
5756 | && INTEGRAL_TYPE_P (type) | |
5757 | /* On GIMPLE this should only apply to register arguments. */ | |
5758 | && (! GIMPLE || is_gimple_reg (@0)) | |
5759 | /* A bit-field-ref that referenced the full argument can be stripped. */ | |
5760 | && ((compare_tree_int (@1, TYPE_PRECISION (TREE_TYPE (@0))) == 0 | |
5761 | && integer_zerop (@2)) | |
5762 | /* Low-parts can be reduced to integral conversions. | |
5763 | ??? The following doesn't work for PDP endian. */ | |
5764 | || (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN | |
5765 | /* Don't even think about BITS_BIG_ENDIAN. */ | |
5766 | && TYPE_PRECISION (TREE_TYPE (@0)) % BITS_PER_UNIT == 0 | |
5767 | && TYPE_PRECISION (type) % BITS_PER_UNIT == 0 | |
5768 | && compare_tree_int (@2, (BYTES_BIG_ENDIAN | |
5769 | ? (TYPE_PRECISION (TREE_TYPE (@0)) | |
5770 | - TYPE_PRECISION (type)) | |
5771 | : 0)) == 0))) | |
5772 | (convert @0)))) | |
5773 | ||
5774 | /* Simplify vector extracts. */ | |
5775 | ||
5776 | (simplify | |
5777 | (BIT_FIELD_REF CONSTRUCTOR@0 @1 @2) | |
5778 | (if (VECTOR_TYPE_P (TREE_TYPE (@0)) | |
5779 | && (types_match (type, TREE_TYPE (TREE_TYPE (@0))) | |
5780 | || (VECTOR_TYPE_P (type) | |
5781 | && types_match (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0)))))) | |
5782 | (with | |
5783 | { | |
5784 | tree ctor = (TREE_CODE (@0) == SSA_NAME | |
5785 | ? gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0)) : @0); | |
5786 | tree eltype = TREE_TYPE (TREE_TYPE (ctor)); | |
5787 | unsigned HOST_WIDE_INT width = tree_to_uhwi (TYPE_SIZE (eltype)); | |
5788 | unsigned HOST_WIDE_INT n = tree_to_uhwi (@1); | |
5789 | unsigned HOST_WIDE_INT idx = tree_to_uhwi (@2); | |
5790 | } | |
5791 | (if (n != 0 | |
5792 | && (idx % width) == 0 | |
5793 | && (n % width) == 0 | |
5794 | && known_le ((idx + n) / width, | |
5795 | TYPE_VECTOR_SUBPARTS (TREE_TYPE (ctor)))) | |
5796 | (with | |
5797 | { | |
5798 | idx = idx / width; | |
5799 | n = n / width; | |
5800 | /* Constructor elements can be subvectors. */ | |
5801 | poly_uint64 k = 1; | |
5802 | if (CONSTRUCTOR_NELTS (ctor) != 0) | |
5803 | { | |
5804 | tree cons_elem = TREE_TYPE (CONSTRUCTOR_ELT (ctor, 0)->value); | |
5805 | if (TREE_CODE (cons_elem) == VECTOR_TYPE) | |
5806 | k = TYPE_VECTOR_SUBPARTS (cons_elem); | |
5807 | } | |
5808 | unsigned HOST_WIDE_INT elt, count, const_k; | |
5809 | } | |
5810 | (switch | |
5811 | /* We keep an exact subset of the constructor elements. */ | |
5812 | (if (multiple_p (idx, k, &elt) && multiple_p (n, k, &count)) | |
5813 | (if (CONSTRUCTOR_NELTS (ctor) == 0) | |
5814 | { build_constructor (type, NULL); } | |
5815 | (if (count == 1) | |
5816 | (if (elt < CONSTRUCTOR_NELTS (ctor)) | |
5817 | (view_convert { CONSTRUCTOR_ELT (ctor, elt)->value; }) | |
5818 | { build_zero_cst (type); }) | |
5819 | /* We don't want to emit new CTORs unless the old one goes away. | |
5820 | ??? Eventually allow this if the CTOR ends up constant or | |
5821 | uniform. */ | |
5822 | (if (single_use (@0)) | |
5823 | { | |
5824 | vec<constructor_elt, va_gc> *vals; | |
5825 | vec_alloc (vals, count); | |
5826 | for (unsigned i = 0; | |
5827 | i < count && elt + i < CONSTRUCTOR_NELTS (ctor); ++i) | |
5828 | CONSTRUCTOR_APPEND_ELT (vals, NULL_TREE, | |
5829 | CONSTRUCTOR_ELT (ctor, elt + i)->value); | |
5830 | build_constructor (type, vals); | |
5831 | })))) | |
5832 | /* The bitfield references a single constructor element. */ | |
5833 | (if (k.is_constant (&const_k) | |
5834 | && idx + n <= (idx / const_k + 1) * const_k) | |
5835 | (switch | |
5836 | (if (CONSTRUCTOR_NELTS (ctor) <= idx / const_k) | |
5837 | { build_zero_cst (type); }) | |
5838 | (if (n == const_k) | |
5839 | (view_convert { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; })) | |
5840 | (BIT_FIELD_REF { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; } | |
5841 | @1 { bitsize_int ((idx % const_k) * width); }))))))))) | |
5842 | ||
5843 | /* Simplify a bit extraction from a bit insertion for the cases with | |
5844 | the inserted element fully covering the extraction or the insertion | |
5845 | not touching the extraction. */ | |
5846 | (simplify | |
5847 | (BIT_FIELD_REF (bit_insert @0 @1 @ipos) @rsize @rpos) | |
5848 | (with | |
5849 | { | |
5850 | unsigned HOST_WIDE_INT isize; | |
5851 | if (INTEGRAL_TYPE_P (TREE_TYPE (@1))) | |
5852 | isize = TYPE_PRECISION (TREE_TYPE (@1)); | |
5853 | else | |
5854 | isize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (@1))); | |
5855 | } | |
5856 | (switch | |
5857 | (if (wi::leu_p (wi::to_wide (@ipos), wi::to_wide (@rpos)) | |
5858 | && wi::leu_p (wi::to_wide (@rpos) + wi::to_wide (@rsize), | |
5859 | wi::to_wide (@ipos) + isize)) | |
5860 | (BIT_FIELD_REF @1 @rsize { wide_int_to_tree (bitsizetype, | |
5861 | wi::to_wide (@rpos) | |
5862 | - wi::to_wide (@ipos)); })) | |
5863 | (if (wi::geu_p (wi::to_wide (@ipos), | |
5864 | wi::to_wide (@rpos) + wi::to_wide (@rsize)) | |
5865 | || wi::geu_p (wi::to_wide (@rpos), | |
5866 | wi::to_wide (@ipos) + isize)) | |
5867 | (BIT_FIELD_REF @0 @rsize @rpos))))) | |
5868 | ||
5869 | (if (canonicalize_math_after_vectorization_p ()) | |
5870 | (for fmas (FMA) | |
5871 | (simplify | |
5872 | (fmas:c (negate @0) @1 @2) | |
5873 | (IFN_FNMA @0 @1 @2)) | |
5874 | (simplify | |
5875 | (fmas @0 @1 (negate @2)) | |
5876 | (IFN_FMS @0 @1 @2)) | |
5877 | (simplify | |
5878 | (fmas:c (negate @0) @1 (negate @2)) | |
5879 | (IFN_FNMS @0 @1 @2)) | |
5880 | (simplify | |
5881 | (negate (fmas@3 @0 @1 @2)) | |
5882 | (if (single_use (@3)) | |
5883 | (IFN_FNMS @0 @1 @2)))) | |
5884 | ||
5885 | (simplify | |
5886 | (IFN_FMS:c (negate @0) @1 @2) | |
5887 | (IFN_FNMS @0 @1 @2)) | |
5888 | (simplify | |
5889 | (IFN_FMS @0 @1 (negate @2)) | |
5890 | (IFN_FMA @0 @1 @2)) | |
5891 | (simplify | |
5892 | (IFN_FMS:c (negate @0) @1 (negate @2)) | |
5893 | (IFN_FNMA @0 @1 @2)) | |
5894 | (simplify | |
5895 | (negate (IFN_FMS@3 @0 @1 @2)) | |
5896 | (if (single_use (@3)) | |
5897 | (IFN_FNMA @0 @1 @2))) | |
5898 | ||
5899 | (simplify | |
5900 | (IFN_FNMA:c (negate @0) @1 @2) | |
5901 | (IFN_FMA @0 @1 @2)) | |
5902 | (simplify | |
5903 | (IFN_FNMA @0 @1 (negate @2)) | |
5904 | (IFN_FNMS @0 @1 @2)) | |
5905 | (simplify | |
5906 | (IFN_FNMA:c (negate @0) @1 (negate @2)) | |
5907 | (IFN_FMS @0 @1 @2)) | |
5908 | (simplify | |
5909 | (negate (IFN_FNMA@3 @0 @1 @2)) | |
5910 | (if (single_use (@3)) | |
5911 | (IFN_FMS @0 @1 @2))) | |
5912 | ||
5913 | (simplify | |
5914 | (IFN_FNMS:c (negate @0) @1 @2) | |
5915 | (IFN_FMS @0 @1 @2)) | |
5916 | (simplify | |
5917 | (IFN_FNMS @0 @1 (negate @2)) | |
5918 | (IFN_FNMA @0 @1 @2)) | |
5919 | (simplify | |
5920 | (IFN_FNMS:c (negate @0) @1 (negate @2)) | |
5921 | (IFN_FMA @0 @1 @2)) | |
5922 | (simplify | |
5923 | (negate (IFN_FNMS@3 @0 @1 @2)) | |
5924 | (if (single_use (@3)) | |
5925 | (IFN_FMA @0 @1 @2)))) | |
5926 | ||
5927 | /* POPCOUNT simplifications. */ | |
5928 | (for popcount (BUILT_IN_POPCOUNT BUILT_IN_POPCOUNTL BUILT_IN_POPCOUNTLL | |
5929 | BUILT_IN_POPCOUNTIMAX) | |
5930 | /* popcount(X&1) is nop_expr(X&1). */ | |
5931 | (simplify | |
5932 | (popcount @0) | |
5933 | (if (tree_nonzero_bits (@0) == 1) | |
5934 | (convert @0))) | |
5935 | /* popcount(X) + popcount(Y) is popcount(X|Y) when X&Y must be zero. */ | |
5936 | (simplify | |
5937 | (plus (popcount:s @0) (popcount:s @1)) | |
5938 | (if (wi::bit_and (tree_nonzero_bits (@0), tree_nonzero_bits (@1)) == 0) | |
5939 | (popcount (bit_ior @0 @1)))) | |
5940 | /* popcount(X) == 0 is X == 0, and related (in)equalities. */ | |
5941 | (for cmp (le eq ne gt) | |
5942 | rep (eq eq ne ne) | |
5943 | (simplify | |
5944 | (cmp (popcount @0) integer_zerop) | |
5945 | (rep @0 { build_zero_cst (TREE_TYPE (@0)); })))) | |
5946 | ||
5947 | #if GIMPLE | |
5948 | /* 64- and 32-bits branchless implementations of popcount are detected: | |
5949 | ||
5950 | int popcount64c (uint64_t x) | |
5951 | { | |
5952 | x -= (x >> 1) & 0x5555555555555555ULL; | |
5953 | x = (x & 0x3333333333333333ULL) + ((x >> 2) & 0x3333333333333333ULL); | |
5954 | x = (x + (x >> 4)) & 0x0f0f0f0f0f0f0f0fULL; | |
5955 | return (x * 0x0101010101010101ULL) >> 56; | |
5956 | } | |
5957 | ||
5958 | int popcount32c (uint32_t x) | |
5959 | { | |
5960 | x -= (x >> 1) & 0x55555555; | |
5961 | x = (x & 0x33333333) + ((x >> 2) & 0x33333333); | |
5962 | x = (x + (x >> 4)) & 0x0f0f0f0f; | |
5963 | return (x * 0x01010101) >> 24; | |
5964 | } */ | |
5965 | (simplify | |
5966 | (rshift | |
5967 | (mult | |
5968 | (bit_and | |
5969 | (plus:c | |
5970 | (rshift @8 INTEGER_CST@5) | |
5971 | (plus:c@8 | |
5972 | (bit_and @6 INTEGER_CST@7) | |
5973 | (bit_and | |
5974 | (rshift | |
5975 | (minus@6 @0 | |
5976 | (bit_and (rshift @0 INTEGER_CST@4) INTEGER_CST@11)) | |
5977 | INTEGER_CST@10) | |
5978 | INTEGER_CST@9))) | |
5979 | INTEGER_CST@3) | |
5980 | INTEGER_CST@2) | |
5981 | INTEGER_CST@1) | |
5982 | /* Check constants and optab. */ | |
5983 | (with { unsigned prec = TYPE_PRECISION (type); | |
5984 | int shift = (64 - prec) & 63; | |
5985 | unsigned HOST_WIDE_INT c1 | |
5986 | = HOST_WIDE_INT_UC (0x0101010101010101) >> shift; | |
5987 | unsigned HOST_WIDE_INT c2 | |
5988 | = HOST_WIDE_INT_UC (0x0F0F0F0F0F0F0F0F) >> shift; | |
5989 | unsigned HOST_WIDE_INT c3 | |
5990 | = HOST_WIDE_INT_UC (0x3333333333333333) >> shift; | |
5991 | unsigned HOST_WIDE_INT c4 | |
5992 | = HOST_WIDE_INT_UC (0x5555555555555555) >> shift; | |
5993 | } | |
5994 | (if (prec >= 16 | |
5995 | && prec <= 64 | |
5996 | && pow2p_hwi (prec) | |
5997 | && TYPE_UNSIGNED (type) | |
5998 | && integer_onep (@4) | |
5999 | && wi::to_widest (@10) == 2 | |
6000 | && wi::to_widest (@5) == 4 | |
6001 | && wi::to_widest (@1) == prec - 8 | |
6002 | && tree_to_uhwi (@2) == c1 | |
6003 | && tree_to_uhwi (@3) == c2 | |
6004 | && tree_to_uhwi (@9) == c3 | |
6005 | && tree_to_uhwi (@7) == c3 | |
6006 | && tree_to_uhwi (@11) == c4 | |
6007 | && direct_internal_fn_supported_p (IFN_POPCOUNT, type, | |
6008 | OPTIMIZE_FOR_BOTH)) | |
6009 | (convert (IFN_POPCOUNT:type @0))))) | |
6010 | ||
6011 | /* __builtin_ffs needs to deal on many targets with the possible zero | |
6012 | argument. If we know the argument is always non-zero, __builtin_ctz + 1 | |
6013 | should lead to better code. */ | |
6014 | (simplify | |
6015 | (FFS tree_expr_nonzero_p@0) | |
6016 | (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) | |
6017 | && direct_internal_fn_supported_p (IFN_CTZ, TREE_TYPE (@0), | |
6018 | OPTIMIZE_FOR_SPEED)) | |
6019 | (plus (CTZ:type @0) { build_one_cst (type); }))) | |
6020 | #endif | |
6021 | ||
6022 | /* Simplify: | |
6023 | ||
6024 | a = a1 op a2 | |
6025 | r = c ? a : b; | |
6026 | ||
6027 | to: | |
6028 | ||
6029 | r = c ? a1 op a2 : b; | |
6030 | ||
6031 | if the target can do it in one go. This makes the operation conditional | |
6032 | on c, so could drop potentially-trapping arithmetic, but that's a valid | |
6033 | simplification if the result of the operation isn't needed. | |
6034 | ||
6035 | Avoid speculatively generating a stand-alone vector comparison | |
6036 | on targets that might not support them. Any target implementing | |
6037 | conditional internal functions must support the same comparisons | |
6038 | inside and outside a VEC_COND_EXPR. */ | |
6039 | ||
6040 | #if GIMPLE | |
6041 | (for uncond_op (UNCOND_BINARY) | |
6042 | cond_op (COND_BINARY) | |
6043 | (simplify | |
6044 | (vec_cond @0 (view_convert? (uncond_op@4 @1 @2)) @3) | |
6045 | (with { tree op_type = TREE_TYPE (@4); } | |
6046 | (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type) | |
6047 | && element_precision (type) == element_precision (op_type)) | |
6048 | (view_convert (cond_op @0 @1 @2 (view_convert:op_type @3)))))) | |
6049 | (simplify | |
6050 | (vec_cond @0 @1 (view_convert? (uncond_op@4 @2 @3))) | |
6051 | (with { tree op_type = TREE_TYPE (@4); } | |
6052 | (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type) | |
6053 | && element_precision (type) == element_precision (op_type)) | |
6054 | (view_convert (cond_op (bit_not @0) @2 @3 (view_convert:op_type @1))))))) | |
6055 | ||
6056 | /* Same for ternary operations. */ | |
6057 | (for uncond_op (UNCOND_TERNARY) | |
6058 | cond_op (COND_TERNARY) | |
6059 | (simplify | |
6060 | (vec_cond @0 (view_convert? (uncond_op@5 @1 @2 @3)) @4) | |
6061 | (with { tree op_type = TREE_TYPE (@5); } | |
6062 | (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type) | |
6063 | && element_precision (type) == element_precision (op_type)) | |
6064 | (view_convert (cond_op @0 @1 @2 @3 (view_convert:op_type @4)))))) | |
6065 | (simplify | |
6066 | (vec_cond @0 @1 (view_convert? (uncond_op@5 @2 @3 @4))) | |
6067 | (with { tree op_type = TREE_TYPE (@5); } | |
6068 | (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type) | |
6069 | && element_precision (type) == element_precision (op_type)) | |
6070 | (view_convert (cond_op (bit_not @0) @2 @3 @4 | |
6071 | (view_convert:op_type @1))))))) | |
6072 | #endif | |
6073 | ||
6074 | /* Detect cases in which a VEC_COND_EXPR effectively replaces the | |
6075 | "else" value of an IFN_COND_*. */ | |
6076 | (for cond_op (COND_BINARY) | |
6077 | (simplify | |
6078 | (vec_cond @0 (view_convert? (cond_op @0 @1 @2 @3)) @4) | |
6079 | (with { tree op_type = TREE_TYPE (@3); } | |
6080 | (if (element_precision (type) == element_precision (op_type)) | |
6081 | (view_convert (cond_op @0 @1 @2 (view_convert:op_type @4)))))) | |
6082 | (simplify | |
6083 | (vec_cond @0 @1 (view_convert? (cond_op @2 @3 @4 @5))) | |
6084 | (with { tree op_type = TREE_TYPE (@5); } | |
6085 | (if (inverse_conditions_p (@0, @2) | |
6086 | && element_precision (type) == element_precision (op_type)) | |
6087 | (view_convert (cond_op @2 @3 @4 (view_convert:op_type @1))))))) | |
6088 | ||
6089 | /* Same for ternary operations. */ | |
6090 | (for cond_op (COND_TERNARY) | |
6091 | (simplify | |
6092 | (vec_cond @0 (view_convert? (cond_op @0 @1 @2 @3 @4)) @5) | |
6093 | (with { tree op_type = TREE_TYPE (@4); } | |
6094 | (if (element_precision (type) == element_precision (op_type)) | |
6095 | (view_convert (cond_op @0 @1 @2 @3 (view_convert:op_type @5)))))) | |
6096 | (simplify | |
6097 | (vec_cond @0 @1 (view_convert? (cond_op @2 @3 @4 @5 @6))) | |
6098 | (with { tree op_type = TREE_TYPE (@6); } | |
6099 | (if (inverse_conditions_p (@0, @2) | |
6100 | && element_precision (type) == element_precision (op_type)) | |
6101 | (view_convert (cond_op @2 @3 @4 @5 (view_convert:op_type @1))))))) | |
6102 | ||
6103 | /* For pointers @0 and @2 and nonnegative constant offset @1, look for | |
6104 | expressions like: | |
6105 | ||
6106 | A: (@0 + @1 < @2) | (@2 + @1 < @0) | |
6107 | B: (@0 + @1 <= @2) | (@2 + @1 <= @0) | |
6108 | ||
6109 | If pointers are known not to wrap, B checks whether @1 bytes starting | |
6110 | at @0 and @2 do not overlap, while A tests the same thing for @1 + 1 | |
6111 | bytes. A is more efficiently tested as: | |
6112 | ||
6113 | A: (sizetype) (@0 + @1 - @2) > @1 * 2 | |
6114 | ||
6115 | The equivalent expression for B is given by replacing @1 with @1 - 1: | |
6116 | ||
6117 | B: (sizetype) (@0 + (@1 - 1) - @2) > (@1 - 1) * 2 | |
6118 | ||
6119 | @0 and @2 can be swapped in both expressions without changing the result. | |
6120 | ||
6121 | The folds rely on sizetype's being unsigned (which is always true) | |
6122 | and on its being the same width as the pointer (which we have to check). | |
6123 | ||
6124 | The fold replaces two pointer_plus expressions, two comparisons and | |
6125 | an IOR with a pointer_plus, a pointer_diff, and a comparison, so in | |
6126 | the best case it's a saving of two operations. The A fold retains one | |
6127 | of the original pointer_pluses, so is a win even if both pointer_pluses | |
6128 | are used elsewhere. The B fold is a wash if both pointer_pluses are | |
6129 | used elsewhere, since all we end up doing is replacing a comparison with | |
6130 | a pointer_plus. We do still apply the fold under those circumstances | |
6131 | though, in case applying it to other conditions eventually makes one of the | |
6132 | pointer_pluses dead. */ | |
6133 | (for ior (truth_orif truth_or bit_ior) | |
6134 | (for cmp (le lt) | |
6135 | (simplify | |
6136 | (ior (cmp:cs (pointer_plus@3 @0 INTEGER_CST@1) @2) | |
6137 | (cmp:cs (pointer_plus@4 @2 @1) @0)) | |
6138 | (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) | |
6139 | && TYPE_OVERFLOW_WRAPS (sizetype) | |
6140 | && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (sizetype)) | |
6141 | /* Calculate the rhs constant. */ | |
6142 | (with { offset_int off = wi::to_offset (@1) - (cmp == LE_EXPR ? 1 : 0); | |
6143 | offset_int rhs = off * 2; } | |
6144 | /* Always fails for negative values. */ | |
6145 | (if (wi::min_precision (rhs, UNSIGNED) <= TYPE_PRECISION (sizetype)) | |
6146 | /* Since the order of @0 and @2 doesn't matter, let tree_swap_operands_p | |
6147 | pick a canonical order. This increases the chances of using the | |
6148 | same pointer_plus in multiple checks. */ | |
6149 | (with { bool swap_p = tree_swap_operands_p (@0, @2); | |
6150 | tree rhs_tree = wide_int_to_tree (sizetype, rhs); } | |
6151 | (if (cmp == LT_EXPR) | |
6152 | (gt (convert:sizetype | |
6153 | (pointer_diff:ssizetype { swap_p ? @4 : @3; } | |
6154 | { swap_p ? @0 : @2; })) | |
6155 | { rhs_tree; }) | |
6156 | (gt (convert:sizetype | |
6157 | (pointer_diff:ssizetype | |
6158 | (pointer_plus { swap_p ? @2 : @0; } | |
6159 | { wide_int_to_tree (sizetype, off); }) | |
6160 | { swap_p ? @0 : @2; })) | |
6161 | { rhs_tree; }))))))))) | |
6162 | ||
6163 | /* Fold REDUC (@0 & @1) -> @0[I] & @1[I] if element I is the only nonzero | |
6164 | element of @1. */ | |
6165 | (for reduc (IFN_REDUC_PLUS IFN_REDUC_IOR IFN_REDUC_XOR) | |
6166 | (simplify (reduc (view_convert? (bit_and @0 VECTOR_CST@1))) | |
6167 | (with { int i = single_nonzero_element (@1); } | |
6168 | (if (i >= 0) | |
6169 | (with { tree elt = vector_cst_elt (@1, i); | |
6170 | tree elt_type = TREE_TYPE (elt); | |
6171 | unsigned int elt_bits = tree_to_uhwi (TYPE_SIZE (elt_type)); | |
6172 | tree size = bitsize_int (elt_bits); | |
6173 | tree pos = bitsize_int (elt_bits * i); } | |
6174 | (view_convert | |
6175 | (bit_and:elt_type | |
6176 | (BIT_FIELD_REF:elt_type @0 { size; } { pos; }) | |
6177 | { elt; }))))))) | |
6178 | ||
6179 | (simplify | |
6180 | (vec_perm @0 @1 VECTOR_CST@2) | |
6181 | (with | |
6182 | { | |
6183 | tree op0 = @0, op1 = @1, op2 = @2; | |
6184 | ||
6185 | /* Build a vector of integers from the tree mask. */ | |
6186 | vec_perm_builder builder; | |
6187 | if (!tree_to_vec_perm_builder (&builder, op2)) | |
6188 | return NULL_TREE; | |
6189 | ||
6190 | /* Create a vec_perm_indices for the integer vector. */ | |
6191 | poly_uint64 nelts = TYPE_VECTOR_SUBPARTS (type); | |
6192 | bool single_arg = (op0 == op1); | |
6193 | vec_perm_indices sel (builder, single_arg ? 1 : 2, nelts); | |
6194 | } | |
6195 | (if (sel.series_p (0, 1, 0, 1)) | |
6196 | { op0; } | |
6197 | (if (sel.series_p (0, 1, nelts, 1)) | |
6198 | { op1; } | |
6199 | (with | |
6200 | { | |
6201 | if (!single_arg) | |
6202 | { | |
6203 | if (sel.all_from_input_p (0)) | |
6204 | op1 = op0; | |
6205 | else if (sel.all_from_input_p (1)) | |
6206 | { | |
6207 | op0 = op1; | |
6208 | sel.rotate_inputs (1); | |
6209 | } | |
6210 | else if (known_ge (poly_uint64 (sel[0]), nelts)) | |
6211 | { | |
6212 | std::swap (op0, op1); | |
6213 | sel.rotate_inputs (1); | |
6214 | } | |
6215 | } | |
6216 | gassign *def; | |
6217 | tree cop0 = op0, cop1 = op1; | |
6218 | if (TREE_CODE (op0) == SSA_NAME | |
6219 | && (def = dyn_cast <gassign *> (SSA_NAME_DEF_STMT (op0))) | |
6220 | && gimple_assign_rhs_code (def) == CONSTRUCTOR) | |
6221 | cop0 = gimple_assign_rhs1 (def); | |
6222 | if (TREE_CODE (op1) == SSA_NAME | |
6223 | && (def = dyn_cast <gassign *> (SSA_NAME_DEF_STMT (op1))) | |
6224 | && gimple_assign_rhs_code (def) == CONSTRUCTOR) | |
6225 | cop1 = gimple_assign_rhs1 (def); | |
6226 | ||
6227 | tree t; | |
6228 | } | |
6229 | (if ((TREE_CODE (cop0) == VECTOR_CST | |
6230 | || TREE_CODE (cop0) == CONSTRUCTOR) | |
6231 | && (TREE_CODE (cop1) == VECTOR_CST | |
6232 | || TREE_CODE (cop1) == CONSTRUCTOR) | |
6233 | && (t = fold_vec_perm (type, cop0, cop1, sel))) | |
6234 | { t; } | |
6235 | (with | |
6236 | { | |
6237 | bool changed = (op0 == op1 && !single_arg); | |
6238 | tree ins = NULL_TREE; | |
6239 | unsigned at = 0; | |
6240 | ||
6241 | /* See if the permutation is performing a single element | |
6242 | insert from a CONSTRUCTOR or constant and use a BIT_INSERT_EXPR | |
6243 | in that case. But only if the vector mode is supported, | |
6244 | otherwise this is invalid GIMPLE. */ | |
6245 | if (TYPE_MODE (type) != BLKmode | |
6246 | && (TREE_CODE (cop0) == VECTOR_CST | |
6247 | || TREE_CODE (cop0) == CONSTRUCTOR | |
6248 | || TREE_CODE (cop1) == VECTOR_CST | |
6249 | || TREE_CODE (cop1) == CONSTRUCTOR)) | |
6250 | { | |
6251 | bool insert_first_p = sel.series_p (1, 1, nelts + 1, 1); | |
6252 | if (insert_first_p) | |
6253 | { | |
6254 | /* After canonicalizing the first elt to come from the | |
6255 | first vector we only can insert the first elt from | |
6256 | the first vector. */ | |
6257 | at = 0; | |
6258 | if ((ins = fold_read_from_vector (cop0, sel[0]))) | |
6259 | op0 = op1; | |
6260 | } | |
6261 | /* The above can fail for two-element vectors which always | |
6262 | appear to insert the first element, so try inserting | |
6263 | into the second lane as well. For more than two | |
6264 | elements that's wasted time. */ | |
6265 | if (!insert_first_p || (!ins && maybe_eq (nelts, 2u))) | |
6266 | { | |
6267 | unsigned int encoded_nelts = sel.encoding ().encoded_nelts (); | |
6268 | for (at = 0; at < encoded_nelts; ++at) | |
6269 | if (maybe_ne (sel[at], at)) | |
6270 | break; | |
6271 | if (at < encoded_nelts | |
6272 | && (known_eq (at + 1, nelts) | |
6273 | || sel.series_p (at + 1, 1, at + 1, 1))) | |
6274 | { | |
6275 | if (known_lt (poly_uint64 (sel[at]), nelts)) | |
6276 | ins = fold_read_from_vector (cop0, sel[at]); | |
6277 | else | |
6278 | ins = fold_read_from_vector (cop1, sel[at] - nelts); | |
6279 | } | |
6280 | } | |
6281 | } | |
6282 | ||
6283 | /* Generate a canonical form of the selector. */ | |
6284 | if (!ins && sel.encoding () != builder) | |
6285 | { | |
6286 | /* Some targets are deficient and fail to expand a single | |
6287 | argument permutation while still allowing an equivalent | |
6288 | 2-argument version. */ | |
6289 | tree oldop2 = op2; | |
6290 | if (sel.ninputs () == 2 | |
6291 | || can_vec_perm_const_p (TYPE_MODE (type), sel, false)) | |
6292 | op2 = vec_perm_indices_to_tree (TREE_TYPE (op2), sel); | |
6293 | else | |
6294 | { | |
6295 | vec_perm_indices sel2 (builder, 2, nelts); | |
6296 | if (can_vec_perm_const_p (TYPE_MODE (type), sel2, false)) | |
6297 | op2 = vec_perm_indices_to_tree (TREE_TYPE (op2), sel2); | |
6298 | else | |
6299 | /* Not directly supported with either encoding, | |
6300 | so use the preferred form. */ | |
6301 | op2 = vec_perm_indices_to_tree (TREE_TYPE (op2), sel); | |
6302 | } | |
6303 | if (!operand_equal_p (op2, oldop2, 0)) | |
6304 | changed = true; | |
6305 | } | |
6306 | } | |
6307 | (if (ins) | |
6308 | (bit_insert { op0; } { ins; } | |
6309 | { bitsize_int (at * vector_element_bits (type)); }) | |
6310 | (if (changed) | |
6311 | (vec_perm { op0; } { op1; } { op2; })))))))))) | |
6312 | ||
6313 | /* VEC_PERM_EXPR (v, v, mask) -> v where v contains same element. */ | |
6314 | ||
6315 | (match vec_same_elem_p | |
6316 | @0 | |
6317 | (if (uniform_vector_p (@0)))) | |
6318 | ||
6319 | (match vec_same_elem_p | |
6320 | (vec_duplicate @0)) | |
6321 | ||
6322 | (simplify | |
6323 | (vec_perm vec_same_elem_p@0 @0 @1) | |
6324 | @0) | |
6325 | ||
6326 | /* Match count trailing zeroes for simplify_count_trailing_zeroes in fwprop. | |
6327 | The canonical form is array[((x & -x) * C) >> SHIFT] where C is a magic | |
6328 | constant which when multiplied by a power of 2 contains a unique value | |
6329 | in the top 5 or 6 bits. This is then indexed into a table which maps it | |
6330 | to the number of trailing zeroes. */ | |
6331 | (match (ctz_table_index @1 @2 @3) | |
6332 | (rshift (mult (bit_and:c (negate @1) @1) INTEGER_CST@2) INTEGER_CST@3)) |