]>
Commit | Line | Data |
---|---|---|
3dec5460 | 1 | /* Reassociation for trees. |
fbd26352 | 2 | Copyright (C) 2005-2019 Free Software Foundation, Inc. |
3dec5460 | 3 | Contributed by Daniel Berlin <dan@dberlin.org> |
4 | ||
5 | This file is part of GCC. | |
6 | ||
7 | GCC is free software; you can redistribute it and/or modify | |
8 | it under the terms of the GNU General Public License as published by | |
8c4c00c1 | 9 | the Free Software Foundation; either version 3, or (at your option) |
3dec5460 | 10 | any later version. |
11 | ||
12 | GCC is distributed in the hope that it will be useful, | |
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | GNU General Public License for more details. | |
16 | ||
17 | You should have received a copy of the GNU General Public License | |
8c4c00c1 | 18 | along with GCC; see the file COPYING3. If not see |
19 | <http://www.gnu.org/licenses/>. */ | |
3dec5460 | 20 | |
21 | #include "config.h" | |
22 | #include "system.h" | |
23 | #include "coretypes.h" | |
9ef16211 | 24 | #include "backend.h" |
7c29e30e | 25 | #include "target.h" |
26 | #include "rtl.h" | |
9ef16211 | 27 | #include "tree.h" |
28 | #include "gimple.h" | |
7c29e30e | 29 | #include "cfghooks.h" |
30 | #include "alloc-pool.h" | |
31 | #include "tree-pass.h" | |
ad7b10a2 | 32 | #include "memmodel.h" |
b0c0c879 | 33 | #include "tm_p.h" |
7c29e30e | 34 | #include "ssa.h" |
35 | #include "optabs-tree.h" | |
36 | #include "gimple-pretty-print.h" | |
37 | #include "diagnostic-core.h" | |
b20a8bb4 | 38 | #include "fold-const.h" |
9ed99284 | 39 | #include "stor-layout.h" |
94ea8568 | 40 | #include "cfganal.h" |
bc61cadb | 41 | #include "gimple-fold.h" |
42 | #include "tree-eh.h" | |
dcf1a1ec | 43 | #include "gimple-iterator.h" |
e795d6e1 | 44 | #include "gimplify-me.h" |
073c1fd5 | 45 | #include "tree-cfg.h" |
073c1fd5 | 46 | #include "tree-ssa-loop.h" |
d53441c8 | 47 | #include "flags.h" |
073c1fd5 | 48 | #include "tree-ssa.h" |
54aceb26 | 49 | #include "langhooks.h" |
a4c3fb95 | 50 | #include "cfgloop.h" |
5b1c765d | 51 | #include "params.h" |
f7715905 | 52 | #include "builtins.h" |
e3668db5 | 53 | #include "gimplify.h" |
390bb332 | 54 | #include "case-cfn-macros.h" |
3dec5460 | 55 | |
54aceb26 | 56 | /* This is a simple global reassociation pass. It is, in part, based |
57 | on the LLVM pass of the same name (They do some things more/less | |
58 | than we do, in different orders, etc). | |
3dec5460 | 59 | |
54aceb26 | 60 | It consists of five steps: |
3dec5460 | 61 | |
54aceb26 | 62 | 1. Breaking up subtract operations into addition + negate, where |
63 | it would promote the reassociation of adds. | |
3dec5460 | 64 | |
54aceb26 | 65 | 2. Left linearization of the expression trees, so that (A+B)+(C+D) |
66 | becomes (((A+B)+C)+D), which is easier for us to rewrite later. | |
67 | During linearization, we place the operands of the binary | |
04009ada | 68 | expressions into a vector of operand_entry_* |
3dec5460 | 69 | |
54aceb26 | 70 | 3. Optimization of the operand lists, eliminating things like a + |
71 | -a, a & a, etc. | |
3dec5460 | 72 | |
8c5ac7f6 | 73 | 3a. Combine repeated factors with the same occurrence counts |
74 | into a __builtin_powi call that will later be optimized into | |
75 | an optimal number of multiplies. | |
76 | ||
54aceb26 | 77 | 4. Rewrite the expression trees we linearized and optimized so |
78 | they are in proper rank order. | |
3dec5460 | 79 | |
54aceb26 | 80 | 5. Repropagate negates, as nothing else will clean it up ATM. |
3dec5460 | 81 | |
54aceb26 | 82 | A bit of theory on #4, since nobody seems to write anything down |
83 | about why it makes sense to do it the way they do it: | |
3dec5460 | 84 | |
54aceb26 | 85 | We could do this much nicer theoretically, but don't (for reasons |
86 | explained after how to do it theoretically nice :P). | |
3dec5460 | 87 | |
54aceb26 | 88 | In order to promote the most redundancy elimination, you want |
89 | binary expressions whose operands are the same rank (or | |
191ec5a2 | 90 | preferably, the same value) exposed to the redundancy eliminator, |
54aceb26 | 91 | for possible elimination. |
3dec5460 | 92 | |
54aceb26 | 93 | So the way to do this if we really cared, is to build the new op |
94 | tree from the leaves to the roots, merging as you go, and putting the | |
95 | new op on the end of the worklist, until you are left with one | |
96 | thing on the worklist. | |
3dec5460 | 97 | |
54aceb26 | 98 | IE if you have to rewrite the following set of operands (listed with |
99 | rank in parentheses), with opcode PLUS_EXPR: | |
3dec5460 | 100 | |
54aceb26 | 101 | a (1), b (1), c (1), d (2), e (2) |
3dec5460 | 102 | |
3dec5460 | 103 | |
54aceb26 | 104 | We start with our merge worklist empty, and the ops list with all of |
105 | those on it. | |
3dec5460 | 106 | |
54aceb26 | 107 | You want to first merge all leaves of the same rank, as much as |
108 | possible. | |
109 | ||
110 | So first build a binary op of | |
111 | ||
112 | mergetmp = a + b, and put "mergetmp" on the merge worklist. | |
113 | ||
114 | Because there is no three operand form of PLUS_EXPR, c is not going to | |
115 | be exposed to redundancy elimination as a rank 1 operand. | |
116 | ||
117 | So you might as well throw it on the merge worklist (you could also | |
118 | consider it to now be a rank two operand, and merge it with d and e, | |
119 | but in this case, you then have evicted e from a binary op. So at | |
120 | least in this situation, you can't win.) | |
121 | ||
122 | Then build a binary op of d + e | |
123 | mergetmp2 = d + e | |
124 | ||
125 | and put mergetmp2 on the merge worklist. | |
48e1416a | 126 | |
54aceb26 | 127 | so merge worklist = {mergetmp, c, mergetmp2} |
48e1416a | 128 | |
54aceb26 | 129 | Continue building binary ops of these operations until you have only |
130 | one operation left on the worklist. | |
48e1416a | 131 | |
54aceb26 | 132 | So we have |
48e1416a | 133 | |
54aceb26 | 134 | build binary op |
135 | mergetmp3 = mergetmp + c | |
48e1416a | 136 | |
54aceb26 | 137 | worklist = {mergetmp2, mergetmp3} |
48e1416a | 138 | |
54aceb26 | 139 | mergetmp4 = mergetmp2 + mergetmp3 |
48e1416a | 140 | |
54aceb26 | 141 | worklist = {mergetmp4} |
48e1416a | 142 | |
54aceb26 | 143 | because we have one operation left, we can now just set the original |
144 | statement equal to the result of that operation. | |
48e1416a | 145 | |
54aceb26 | 146 | This will at least expose a + b and d + e to redundancy elimination |
147 | as binary operations. | |
48e1416a | 148 | |
54aceb26 | 149 | For extra points, you can reuse the old statements to build the |
150 | mergetmps, since you shouldn't run out. | |
151 | ||
152 | So why don't we do this? | |
48e1416a | 153 | |
54aceb26 | 154 | Because it's expensive, and rarely will help. Most trees we are |
155 | reassociating have 3 or less ops. If they have 2 ops, they already | |
156 | will be written into a nice single binary op. If you have 3 ops, a | |
157 | single simple check suffices to tell you whether the first two are of the | |
158 | same rank. If so, you know to order it | |
159 | ||
160 | mergetmp = op1 + op2 | |
161 | newstmt = mergetmp + op3 | |
48e1416a | 162 | |
54aceb26 | 163 | instead of |
164 | mergetmp = op2 + op3 | |
165 | newstmt = mergetmp + op1 | |
48e1416a | 166 | |
54aceb26 | 167 | If all three are of the same rank, you can't expose them all in a |
168 | single binary operator anyway, so the above is *still* the best you | |
169 | can do. | |
48e1416a | 170 | |
54aceb26 | 171 | Thus, this is what we do. When we have three ops left, we check to see |
172 | what order to put them in, and call it a day. As a nod to vector sum | |
2d97a789 | 173 | reduction, we check if any of the ops are really a phi node that is a |
54aceb26 | 174 | destructive update for the associating op, and keep the destructive |
175 | update together for vector sum reduction recognition. */ | |
3dec5460 | 176 | |
83505659 | 177 | /* Enable insertion of __builtin_powi calls during execute_reassoc. See |
178 | point 3a in the pass header comment. */ | |
179 | static bool reassoc_insert_powi_p; | |
3dec5460 | 180 | |
54aceb26 | 181 | /* Statistics */ |
182 | static struct | |
183 | { | |
184 | int linearized; | |
185 | int constants_eliminated; | |
186 | int ops_eliminated; | |
187 | int rewritten; | |
8c5ac7f6 | 188 | int pows_encountered; |
189 | int pows_created; | |
54aceb26 | 190 | } reassociate_stats; |
3dec5460 | 191 | |
54aceb26 | 192 | /* Operator, rank pair. */ |
04009ada | 193 | struct operand_entry |
3dec5460 | 194 | { |
54aceb26 | 195 | unsigned int rank; |
7faa1ffa | 196 | unsigned int id; |
54aceb26 | 197 | tree op; |
8c5ac7f6 | 198 | unsigned int count; |
527d8479 | 199 | gimple *stmt_to_insert; |
04009ada | 200 | }; |
54aceb26 | 201 | |
1dc6c44d | 202 | static object_allocator<operand_entry> operand_entry_pool |
203 | ("operand entry pool"); | |
54aceb26 | 204 | |
17b5ea6f | 205 | /* This is used to assign a unique ID to each struct operand_entry |
206 | so that qsort results are identical on different hosts. */ | |
7faa1ffa | 207 | static unsigned int next_operand_entry_id; |
3dec5460 | 208 | |
209 | /* Starting rank number for a given basic block, so that we can rank | |
210 | operations using unmovable instructions in that BB based on the bb | |
211 | depth. */ | |
b30a8715 | 212 | static long *bb_rank; |
3dec5460 | 213 | |
54aceb26 | 214 | /* Operand->rank hashtable. */ |
06ecf488 | 215 | static hash_map<tree, long> *operand_rank; |
3dec5460 | 216 | |
e3668db5 | 217 | /* Vector of SSA_NAMEs on which after reassociate_bb is done with |
218 | all basic blocks the CFG should be adjusted - basic blocks | |
219 | split right after that SSA_NAME's definition statement and before | |
220 | the only use, which must be a bit ior. */ | |
221 | static vec<tree> reassoc_branch_fixups; | |
222 | ||
b248bf30 | 223 | /* Forward decls. */ |
224 | static long get_rank (tree); | |
42acab1c | 225 | static bool reassoc_stmt_dominates_stmt_p (gimple *, gimple *); |
b248bf30 | 226 | |
54675e05 | 227 | /* Wrapper around gsi_remove, which adjusts gimple_uid of debug stmts |
228 | possibly added by gsi_remove. */ | |
229 | ||
230 | bool | |
231 | reassoc_remove_stmt (gimple_stmt_iterator *gsi) | |
232 | { | |
42acab1c | 233 | gimple *stmt = gsi_stmt (*gsi); |
54675e05 | 234 | |
c64f38bf | 235 | if (!MAY_HAVE_DEBUG_BIND_STMTS || gimple_code (stmt) == GIMPLE_PHI) |
54675e05 | 236 | return gsi_remove (gsi, true); |
237 | ||
238 | gimple_stmt_iterator prev = *gsi; | |
239 | gsi_prev (&prev); | |
240 | unsigned uid = gimple_uid (stmt); | |
241 | basic_block bb = gimple_bb (stmt); | |
242 | bool ret = gsi_remove (gsi, true); | |
243 | if (!gsi_end_p (prev)) | |
244 | gsi_next (&prev); | |
245 | else | |
246 | prev = gsi_start_bb (bb); | |
42acab1c | 247 | gimple *end_stmt = gsi_stmt (*gsi); |
54675e05 | 248 | while ((stmt = gsi_stmt (prev)) != end_stmt) |
249 | { | |
250 | gcc_assert (stmt && is_gimple_debug (stmt) && gimple_uid (stmt) == 0); | |
251 | gimple_set_uid (stmt, uid); | |
252 | gsi_next (&prev); | |
253 | } | |
254 | return ret; | |
255 | } | |
b248bf30 | 256 | |
257 | /* Bias amount for loop-carried phis. We want this to be larger than | |
258 | the depth of any reassociation tree we can see, but not larger than | |
259 | the rank difference between two blocks. */ | |
260 | #define PHI_LOOP_BIAS (1 << 15) | |
261 | ||
262 | /* Rank assigned to a phi statement. If STMT is a loop-carried phi of | |
263 | an innermost loop, and the phi has only a single use which is inside | |
264 | the loop, then the rank is the block rank of the loop latch plus an | |
265 | extra bias for the loop-carried dependence. This causes expressions | |
266 | calculated into an accumulator variable to be independent for each | |
267 | iteration of the loop. If STMT is some other phi, the rank is the | |
268 | block rank of its containing block. */ | |
269 | static long | |
42acab1c | 270 | phi_rank (gimple *stmt) |
b248bf30 | 271 | { |
272 | basic_block bb = gimple_bb (stmt); | |
273 | struct loop *father = bb->loop_father; | |
274 | tree res; | |
275 | unsigned i; | |
276 | use_operand_p use; | |
42acab1c | 277 | gimple *use_stmt; |
b248bf30 | 278 | |
279 | /* We only care about real loops (those with a latch). */ | |
280 | if (!father->latch) | |
281 | return bb_rank[bb->index]; | |
282 | ||
283 | /* Interesting phis must be in headers of innermost loops. */ | |
284 | if (bb != father->header | |
285 | || father->inner) | |
286 | return bb_rank[bb->index]; | |
287 | ||
288 | /* Ignore virtual SSA_NAMEs. */ | |
289 | res = gimple_phi_result (stmt); | |
7c782c9b | 290 | if (virtual_operand_p (res)) |
b248bf30 | 291 | return bb_rank[bb->index]; |
292 | ||
293 | /* The phi definition must have a single use, and that use must be | |
294 | within the loop. Otherwise this isn't an accumulator pattern. */ | |
295 | if (!single_imm_use (res, &use, &use_stmt) | |
296 | || gimple_bb (use_stmt)->loop_father != father) | |
297 | return bb_rank[bb->index]; | |
298 | ||
299 | /* Look for phi arguments from within the loop. If found, bias this phi. */ | |
300 | for (i = 0; i < gimple_phi_num_args (stmt); i++) | |
301 | { | |
302 | tree arg = gimple_phi_arg_def (stmt, i); | |
303 | if (TREE_CODE (arg) == SSA_NAME | |
304 | && !SSA_NAME_IS_DEFAULT_DEF (arg)) | |
305 | { | |
42acab1c | 306 | gimple *def_stmt = SSA_NAME_DEF_STMT (arg); |
b248bf30 | 307 | if (gimple_bb (def_stmt)->loop_father == father) |
308 | return bb_rank[father->latch->index] + PHI_LOOP_BIAS; | |
309 | } | |
310 | } | |
311 | ||
312 | /* Must be an uninteresting phi. */ | |
313 | return bb_rank[bb->index]; | |
314 | } | |
315 | ||
316 | /* If EXP is an SSA_NAME defined by a PHI statement that represents a | |
317 | loop-carried dependence of an innermost loop, return TRUE; else | |
318 | return FALSE. */ | |
319 | static bool | |
320 | loop_carried_phi (tree exp) | |
321 | { | |
42acab1c | 322 | gimple *phi_stmt; |
b248bf30 | 323 | long block_rank; |
324 | ||
325 | if (TREE_CODE (exp) != SSA_NAME | |
326 | || SSA_NAME_IS_DEFAULT_DEF (exp)) | |
327 | return false; | |
328 | ||
329 | phi_stmt = SSA_NAME_DEF_STMT (exp); | |
330 | ||
331 | if (gimple_code (SSA_NAME_DEF_STMT (exp)) != GIMPLE_PHI) | |
332 | return false; | |
333 | ||
334 | /* Non-loop-carried phis have block rank. Loop-carried phis have | |
335 | an additional bias added in. If this phi doesn't have block rank, | |
336 | it's biased and should not be propagated. */ | |
337 | block_rank = bb_rank[gimple_bb (phi_stmt)->index]; | |
338 | ||
339 | if (phi_rank (phi_stmt) != block_rank) | |
340 | return true; | |
341 | ||
342 | return false; | |
343 | } | |
344 | ||
345 | /* Return the maximum of RANK and the rank that should be propagated | |
346 | from expression OP. For most operands, this is just the rank of OP. | |
347 | For loop-carried phis, the value is zero to avoid undoing the bias | |
348 | in favor of the phi. */ | |
349 | static long | |
350 | propagate_rank (long rank, tree op) | |
351 | { | |
352 | long op_rank; | |
353 | ||
354 | if (loop_carried_phi (op)) | |
355 | return rank; | |
356 | ||
357 | op_rank = get_rank (op); | |
358 | ||
359 | return MAX (rank, op_rank); | |
360 | } | |
3dec5460 | 361 | |
54aceb26 | 362 | /* Look up the operand rank structure for expression E. */ |
3dec5460 | 363 | |
b30a8715 | 364 | static inline long |
54aceb26 | 365 | find_operand_rank (tree e) |
3dec5460 | 366 | { |
06ecf488 | 367 | long *slot = operand_rank->get (e); |
368 | return slot ? *slot : -1; | |
3dec5460 | 369 | } |
370 | ||
54aceb26 | 371 | /* Insert {E,RANK} into the operand rank hashtable. */ |
3dec5460 | 372 | |
b30a8715 | 373 | static inline void |
374 | insert_operand_rank (tree e, long rank) | |
3dec5460 | 375 | { |
b30a8715 | 376 | gcc_assert (rank > 0); |
06ecf488 | 377 | gcc_assert (!operand_rank->put (e, rank)); |
3dec5460 | 378 | } |
379 | ||
3dec5460 | 380 | /* Given an expression E, return the rank of the expression. */ |
381 | ||
b30a8715 | 382 | static long |
3dec5460 | 383 | get_rank (tree e) |
384 | { | |
3dec5460 | 385 | /* SSA_NAME's have the rank of the expression they are the result |
386 | of. | |
387 | For globals and uninitialized values, the rank is 0. | |
388 | For function arguments, use the pre-setup rank. | |
389 | For PHI nodes, stores, asm statements, etc, we use the rank of | |
390 | the BB. | |
391 | For simple operations, the rank is the maximum rank of any of | |
392 | its operands, or the bb_rank, whichever is less. | |
393 | I make no claims that this is optimal, however, it gives good | |
394 | results. */ | |
395 | ||
b248bf30 | 396 | /* We make an exception to the normal ranking system to break |
397 | dependences of accumulator variables in loops. Suppose we | |
398 | have a simple one-block loop containing: | |
399 | ||
400 | x_1 = phi(x_0, x_2) | |
401 | b = a + x_1 | |
402 | c = b + d | |
403 | x_2 = c + e | |
404 | ||
405 | As shown, each iteration of the calculation into x is fully | |
406 | dependent upon the iteration before it. We would prefer to | |
407 | see this in the form: | |
408 | ||
409 | x_1 = phi(x_0, x_2) | |
410 | b = a + d | |
411 | c = b + e | |
412 | x_2 = c + x_1 | |
413 | ||
414 | If the loop is unrolled, the calculations of b and c from | |
415 | different iterations can be interleaved. | |
416 | ||
417 | To obtain this result during reassociation, we bias the rank | |
418 | of the phi definition x_1 upward, when it is recognized as an | |
419 | accumulator pattern. The artificial rank causes it to be | |
420 | added last, providing the desired independence. */ | |
421 | ||
3dec5460 | 422 | if (TREE_CODE (e) == SSA_NAME) |
423 | { | |
e86e65ef | 424 | ssa_op_iter iter; |
42acab1c | 425 | gimple *stmt; |
aa52f48f | 426 | long rank; |
b248bf30 | 427 | tree op; |
54aceb26 | 428 | |
61e371b0 | 429 | if (SSA_NAME_IS_DEFAULT_DEF (e)) |
b30a8715 | 430 | return find_operand_rank (e); |
54aceb26 | 431 | |
3dec5460 | 432 | stmt = SSA_NAME_DEF_STMT (e); |
b248bf30 | 433 | if (gimple_code (stmt) == GIMPLE_PHI) |
434 | return phi_rank (stmt); | |
435 | ||
e86e65ef | 436 | if (!is_gimple_assign (stmt)) |
75a70cf9 | 437 | return bb_rank[gimple_bb (stmt)->index]; |
3dec5460 | 438 | |
439 | /* If we already have a rank for this expression, use that. */ | |
b30a8715 | 440 | rank = find_operand_rank (e); |
441 | if (rank != -1) | |
442 | return rank; | |
3dec5460 | 443 | |
b248bf30 | 444 | /* Otherwise, find the maximum rank for the operands. As an |
445 | exception, remove the bias from loop-carried phis when propagating | |
446 | the rank so that dependent operations are not also biased. */ | |
e86e65ef | 447 | /* Simply walk over all SSA uses - this takes advatage of the |
448 | fact that non-SSA operands are is_gimple_min_invariant and | |
449 | thus have rank 0. */ | |
3dec5460 | 450 | rank = 0; |
e86e65ef | 451 | FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_USE) |
452 | rank = propagate_rank (rank, op); | |
54aceb26 | 453 | |
3dec5460 | 454 | if (dump_file && (dump_flags & TDF_DETAILS)) |
455 | { | |
456 | fprintf (dump_file, "Rank for "); | |
1ffa4346 | 457 | print_generic_expr (dump_file, e); |
b30a8715 | 458 | fprintf (dump_file, " is %ld\n", (rank + 1)); |
3dec5460 | 459 | } |
54aceb26 | 460 | |
3dec5460 | 461 | /* Note the rank in the hashtable so we don't recompute it. */ |
54aceb26 | 462 | insert_operand_rank (e, (rank + 1)); |
3dec5460 | 463 | return (rank + 1); |
464 | } | |
465 | ||
e86e65ef | 466 | /* Constants, globals, etc., are rank 0 */ |
3dec5460 | 467 | return 0; |
468 | } | |
469 | ||
54aceb26 | 470 | |
471 | /* We want integer ones to end up last no matter what, since they are | |
472 | the ones we can do the most with. */ | |
b520fc9d | 473 | #define INTEGER_CONST_TYPE 1 << 4 |
474 | #define FLOAT_ONE_CONST_TYPE 1 << 3 | |
54aceb26 | 475 | #define FLOAT_CONST_TYPE 1 << 2 |
476 | #define OTHER_CONST_TYPE 1 << 1 | |
477 | ||
478 | /* Classify an invariant tree into integer, float, or other, so that | |
479 | we can sort them to be near other constants of the same type. */ | |
480 | static inline int | |
481 | constant_type (tree t) | |
482 | { | |
483 | if (INTEGRAL_TYPE_P (TREE_TYPE (t))) | |
484 | return INTEGER_CONST_TYPE; | |
485 | else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (t))) | |
b520fc9d | 486 | { |
487 | /* Sort -1.0 and 1.0 constants last, while in some cases | |
488 | const_binop can't optimize some inexact operations, multiplication | |
489 | by -1.0 or 1.0 can be always merged with others. */ | |
490 | if (real_onep (t) || real_minus_onep (t)) | |
491 | return FLOAT_ONE_CONST_TYPE; | |
492 | return FLOAT_CONST_TYPE; | |
493 | } | |
54aceb26 | 494 | else |
495 | return OTHER_CONST_TYPE; | |
496 | } | |
497 | ||
498 | /* qsort comparison function to sort operand entries PA and PB by rank | |
499 | so that the sorted array is ordered by rank in decreasing order. */ | |
500 | static int | |
501 | sort_by_operand_rank (const void *pa, const void *pb) | |
502 | { | |
04009ada | 503 | const operand_entry *oea = *(const operand_entry *const *)pa; |
504 | const operand_entry *oeb = *(const operand_entry *const *)pb; | |
54aceb26 | 505 | |
20977abe | 506 | if (oeb->rank != oea->rank) |
507 | return oeb->rank > oea->rank ? 1 : -1; | |
508 | ||
54aceb26 | 509 | /* It's nicer for optimize_expression if constants that are likely |
20977abe | 510 | to fold when added/multiplied/whatever are put next to each |
54aceb26 | 511 | other. Since all constants have rank 0, order them by type. */ |
20977abe | 512 | if (oea->rank == 0) |
17b5ea6f | 513 | { |
514 | if (constant_type (oeb->op) != constant_type (oea->op)) | |
b520fc9d | 515 | return constant_type (oea->op) - constant_type (oeb->op); |
17b5ea6f | 516 | else |
517 | /* To make sorting result stable, we use unique IDs to determine | |
518 | order. */ | |
7faa1ffa | 519 | return oeb->id > oea->id ? 1 : -1; |
17b5ea6f | 520 | } |
54aceb26 | 521 | |
20977abe | 522 | if (TREE_CODE (oea->op) != SSA_NAME) |
523 | { | |
524 | if (TREE_CODE (oeb->op) != SSA_NAME) | |
525 | return oeb->id > oea->id ? 1 : -1; | |
526 | else | |
527 | return 1; | |
528 | } | |
529 | else if (TREE_CODE (oeb->op) != SSA_NAME) | |
530 | return -1; | |
531 | ||
54aceb26 | 532 | /* Lastly, make sure the versions that are the same go next to each |
c89eca7b | 533 | other. */ |
20977abe | 534 | if (SSA_NAME_VERSION (oeb->op) != SSA_NAME_VERSION (oea->op)) |
17b5ea6f | 535 | { |
20977abe | 536 | /* As SSA_NAME_VERSION is assigned pretty randomly, because we reuse |
537 | versions of removed SSA_NAMEs, so if possible, prefer to sort | |
538 | based on basic block and gimple_uid of the SSA_NAME_DEF_STMT. | |
539 | See PR60418. */ | |
540 | gimple *stmta = SSA_NAME_DEF_STMT (oea->op); | |
541 | gimple *stmtb = SSA_NAME_DEF_STMT (oeb->op); | |
542 | basic_block bba = gimple_bb (stmta); | |
543 | basic_block bbb = gimple_bb (stmtb); | |
544 | if (bbb != bba) | |
c89eca7b | 545 | { |
20977abe | 546 | /* One of the SSA_NAMEs can be defined in oeN->stmt_to_insert |
547 | but the other might not. */ | |
548 | if (!bba) | |
549 | return 1; | |
550 | if (!bbb) | |
551 | return -1; | |
552 | /* If neither is, compare bb_rank. */ | |
553 | if (bb_rank[bbb->index] != bb_rank[bba->index]) | |
bf27ea99 | 554 | return (bb_rank[bbb->index] >> 16) - (bb_rank[bba->index] >> 16); |
d869d6e4 | 555 | } |
20977abe | 556 | |
557 | bool da = reassoc_stmt_dominates_stmt_p (stmta, stmtb); | |
558 | bool db = reassoc_stmt_dominates_stmt_p (stmtb, stmta); | |
559 | if (da != db) | |
560 | return da ? 1 : -1; | |
561 | ||
562 | return SSA_NAME_VERSION (oeb->op) > SSA_NAME_VERSION (oea->op) ? 1 : -1; | |
17b5ea6f | 563 | } |
54aceb26 | 564 | |
20977abe | 565 | return oeb->id > oea->id ? 1 : -1; |
54aceb26 | 566 | } |
567 | ||
568 | /* Add an operand entry to *OPS for the tree operand OP. */ | |
569 | ||
570 | static void | |
527d8479 | 571 | add_to_ops_vec (vec<operand_entry *> *ops, tree op, gimple *stmt_to_insert = NULL) |
54aceb26 | 572 | { |
04009ada | 573 | operand_entry *oe = operand_entry_pool.allocate (); |
54aceb26 | 574 | |
575 | oe->op = op; | |
576 | oe->rank = get_rank (op); | |
17b5ea6f | 577 | oe->id = next_operand_entry_id++; |
8c5ac7f6 | 578 | oe->count = 1; |
527d8479 | 579 | oe->stmt_to_insert = stmt_to_insert; |
f1f41a6c | 580 | ops->safe_push (oe); |
54aceb26 | 581 | } |
3dec5460 | 582 | |
8c5ac7f6 | 583 | /* Add an operand entry to *OPS for the tree operand OP with repeat |
584 | count REPEAT. */ | |
585 | ||
586 | static void | |
04009ada | 587 | add_repeat_to_ops_vec (vec<operand_entry *> *ops, tree op, |
8c5ac7f6 | 588 | HOST_WIDE_INT repeat) |
589 | { | |
04009ada | 590 | operand_entry *oe = operand_entry_pool.allocate (); |
8c5ac7f6 | 591 | |
592 | oe->op = op; | |
593 | oe->rank = get_rank (op); | |
594 | oe->id = next_operand_entry_id++; | |
595 | oe->count = repeat; | |
527d8479 | 596 | oe->stmt_to_insert = NULL; |
f1f41a6c | 597 | ops->safe_push (oe); |
8c5ac7f6 | 598 | |
599 | reassociate_stats.pows_encountered++; | |
600 | } | |
601 | ||
54aceb26 | 602 | /* Return true if STMT is reassociable operation containing a binary |
a4c3fb95 | 603 | operation with tree code CODE, and is inside LOOP. */ |
3dec5460 | 604 | |
605 | static bool | |
42acab1c | 606 | is_reassociable_op (gimple *stmt, enum tree_code code, struct loop *loop) |
54aceb26 | 607 | { |
75a70cf9 | 608 | basic_block bb = gimple_bb (stmt); |
a4c3fb95 | 609 | |
75a70cf9 | 610 | if (gimple_bb (stmt) == NULL) |
a4c3fb95 | 611 | return false; |
612 | ||
a4c3fb95 | 613 | if (!flow_bb_inside_loop_p (loop, bb)) |
614 | return false; | |
615 | ||
75a70cf9 | 616 | if (is_gimple_assign (stmt) |
617 | && gimple_assign_rhs_code (stmt) == code | |
618 | && has_single_use (gimple_assign_lhs (stmt))) | |
d4d349db | 619 | { |
620 | tree rhs1 = gimple_assign_rhs1 (stmt); | |
84489950 | 621 | tree rhs2 = gimple_assign_rhs2 (stmt); |
d4d349db | 622 | if (TREE_CODE (rhs1) == SSA_NAME |
623 | && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs1)) | |
624 | return false; | |
625 | if (rhs2 | |
626 | && TREE_CODE (rhs2) == SSA_NAME | |
627 | && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs2)) | |
628 | return false; | |
629 | return true; | |
630 | } | |
75a70cf9 | 631 | |
54aceb26 | 632 | return false; |
633 | } | |
634 | ||
635 | ||
5b849c60 | 636 | /* Return true if STMT is a nop-conversion. */ |
637 | ||
638 | static bool | |
639 | gimple_nop_conversion_p (gimple *stmt) | |
640 | { | |
641 | if (gassign *ass = dyn_cast <gassign *> (stmt)) | |
642 | { | |
643 | if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (ass)) | |
644 | && tree_nop_conversion_p (TREE_TYPE (gimple_assign_lhs (ass)), | |
645 | TREE_TYPE (gimple_assign_rhs1 (ass)))) | |
646 | return true; | |
647 | } | |
648 | return false; | |
649 | } | |
650 | ||
54aceb26 | 651 | /* Given NAME, if NAME is defined by a unary operation OPCODE, return the |
652 | operand of the negate operation. Otherwise, return NULL. */ | |
653 | ||
654 | static tree | |
655 | get_unary_op (tree name, enum tree_code opcode) | |
656 | { | |
42acab1c | 657 | gimple *stmt = SSA_NAME_DEF_STMT (name); |
54aceb26 | 658 | |
5b849c60 | 659 | /* Look through nop conversions (sign changes). */ |
660 | if (gimple_nop_conversion_p (stmt) | |
661 | && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME) | |
662 | stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt)); | |
663 | ||
75a70cf9 | 664 | if (!is_gimple_assign (stmt)) |
54aceb26 | 665 | return NULL_TREE; |
666 | ||
75a70cf9 | 667 | if (gimple_assign_rhs_code (stmt) == opcode) |
668 | return gimple_assign_rhs1 (stmt); | |
54aceb26 | 669 | return NULL_TREE; |
670 | } | |
671 | ||
5b849c60 | 672 | /* Return true if OP1 and OP2 have the same value if casted to either type. */ |
673 | ||
674 | static bool | |
675 | ops_equal_values_p (tree op1, tree op2) | |
676 | { | |
677 | if (op1 == op2) | |
678 | return true; | |
679 | ||
6fb59218 | 680 | tree orig_op1 = op1; |
5b849c60 | 681 | if (TREE_CODE (op1) == SSA_NAME) |
682 | { | |
683 | gimple *stmt = SSA_NAME_DEF_STMT (op1); | |
684 | if (gimple_nop_conversion_p (stmt)) | |
685 | { | |
686 | op1 = gimple_assign_rhs1 (stmt); | |
687 | if (op1 == op2) | |
688 | return true; | |
689 | } | |
690 | } | |
691 | ||
692 | if (TREE_CODE (op2) == SSA_NAME) | |
693 | { | |
694 | gimple *stmt = SSA_NAME_DEF_STMT (op2); | |
695 | if (gimple_nop_conversion_p (stmt)) | |
696 | { | |
697 | op2 = gimple_assign_rhs1 (stmt); | |
6fb59218 | 698 | if (op1 == op2 |
699 | || orig_op1 == op2) | |
5b849c60 | 700 | return true; |
701 | } | |
702 | } | |
703 | ||
704 | return false; | |
705 | } | |
706 | ||
707 | ||
54aceb26 | 708 | /* If CURR and LAST are a pair of ops that OPCODE allows us to |
709 | eliminate through equivalences, do so, remove them from OPS, and | |
710 | return true. Otherwise, return false. */ | |
711 | ||
712 | static bool | |
713 | eliminate_duplicate_pair (enum tree_code opcode, | |
04009ada | 714 | vec<operand_entry *> *ops, |
54aceb26 | 715 | bool *all_done, |
716 | unsigned int i, | |
04009ada | 717 | operand_entry *curr, |
718 | operand_entry *last) | |
54aceb26 | 719 | { |
720 | ||
91543a50 | 721 | /* If we have two of the same op, and the opcode is & |, min, or max, |
722 | we can eliminate one of them. | |
54aceb26 | 723 | If we have two of the same op, and the opcode is ^, we can |
724 | eliminate both of them. */ | |
3dec5460 | 725 | |
54aceb26 | 726 | if (last && last->op == curr->op) |
3dec5460 | 727 | { |
54aceb26 | 728 | switch (opcode) |
729 | { | |
91543a50 | 730 | case MAX_EXPR: |
731 | case MIN_EXPR: | |
54aceb26 | 732 | case BIT_IOR_EXPR: |
733 | case BIT_AND_EXPR: | |
734 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
735 | { | |
736 | fprintf (dump_file, "Equivalence: "); | |
1ffa4346 | 737 | print_generic_expr (dump_file, curr->op); |
91543a50 | 738 | fprintf (dump_file, " [&|minmax] "); |
1ffa4346 | 739 | print_generic_expr (dump_file, last->op); |
54aceb26 | 740 | fprintf (dump_file, " -> "); |
1ffa4346 | 741 | print_generic_stmt (dump_file, last->op); |
54aceb26 | 742 | } |
743 | ||
f1f41a6c | 744 | ops->ordered_remove (i); |
54aceb26 | 745 | reassociate_stats.ops_eliminated ++; |
746 | ||
747 | return true; | |
748 | ||
749 | case BIT_XOR_EXPR: | |
750 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
751 | { | |
752 | fprintf (dump_file, "Equivalence: "); | |
1ffa4346 | 753 | print_generic_expr (dump_file, curr->op); |
54aceb26 | 754 | fprintf (dump_file, " ^ "); |
1ffa4346 | 755 | print_generic_expr (dump_file, last->op); |
54aceb26 | 756 | fprintf (dump_file, " -> nothing\n"); |
757 | } | |
758 | ||
759 | reassociate_stats.ops_eliminated += 2; | |
760 | ||
f1f41a6c | 761 | if (ops->length () == 2) |
54aceb26 | 762 | { |
a014ba8f | 763 | ops->truncate (0); |
385f3f36 | 764 | add_to_ops_vec (ops, build_zero_cst (TREE_TYPE (last->op))); |
54aceb26 | 765 | *all_done = true; |
766 | } | |
767 | else | |
768 | { | |
f1f41a6c | 769 | ops->ordered_remove (i-1); |
770 | ops->ordered_remove (i-1); | |
54aceb26 | 771 | } |
772 | ||
773 | return true; | |
774 | ||
775 | default: | |
776 | break; | |
777 | } | |
778 | } | |
3dec5460 | 779 | return false; |
780 | } | |
781 | ||
f1f41a6c | 782 | static vec<tree> plus_negates; |
c2f29260 | 783 | |
50fa62c6 | 784 | /* If OPCODE is PLUS_EXPR, CURR->OP is a negate expression or a bitwise not |
785 | expression, look in OPS for a corresponding positive operation to cancel | |
786 | it out. If we find one, remove the other from OPS, replace | |
787 | OPS[CURRINDEX] with 0 or -1, respectively, and return true. Otherwise, | |
788 | return false. */ | |
3dec5460 | 789 | |
790 | static bool | |
54aceb26 | 791 | eliminate_plus_minus_pair (enum tree_code opcode, |
04009ada | 792 | vec<operand_entry *> *ops, |
54aceb26 | 793 | unsigned int currindex, |
04009ada | 794 | operand_entry *curr) |
54aceb26 | 795 | { |
796 | tree negateop; | |
50fa62c6 | 797 | tree notop; |
54aceb26 | 798 | unsigned int i; |
04009ada | 799 | operand_entry *oe; |
54aceb26 | 800 | |
801 | if (opcode != PLUS_EXPR || TREE_CODE (curr->op) != SSA_NAME) | |
3dec5460 | 802 | return false; |
54aceb26 | 803 | |
804 | negateop = get_unary_op (curr->op, NEGATE_EXPR); | |
50fa62c6 | 805 | notop = get_unary_op (curr->op, BIT_NOT_EXPR); |
806 | if (negateop == NULL_TREE && notop == NULL_TREE) | |
54aceb26 | 807 | return false; |
808 | ||
809 | /* Any non-negated version will have a rank that is one less than | |
810 | the current rank. So once we hit those ranks, if we don't find | |
811 | one, we can stop. */ | |
812 | ||
813 | for (i = currindex + 1; | |
f1f41a6c | 814 | ops->iterate (i, &oe) |
54aceb26 | 815 | && oe->rank >= curr->rank - 1 ; |
816 | i++) | |
3dec5460 | 817 | { |
5b849c60 | 818 | if (negateop |
819 | && ops_equal_values_p (oe->op, negateop)) | |
54aceb26 | 820 | { |
54aceb26 | 821 | if (dump_file && (dump_flags & TDF_DETAILS)) |
822 | { | |
823 | fprintf (dump_file, "Equivalence: "); | |
1ffa4346 | 824 | print_generic_expr (dump_file, negateop); |
54aceb26 | 825 | fprintf (dump_file, " + -"); |
1ffa4346 | 826 | print_generic_expr (dump_file, oe->op); |
54aceb26 | 827 | fprintf (dump_file, " -> 0\n"); |
828 | } | |
829 | ||
f1f41a6c | 830 | ops->ordered_remove (i); |
385f3f36 | 831 | add_to_ops_vec (ops, build_zero_cst (TREE_TYPE (oe->op))); |
f1f41a6c | 832 | ops->ordered_remove (currindex); |
54aceb26 | 833 | reassociate_stats.ops_eliminated ++; |
834 | ||
50fa62c6 | 835 | return true; |
836 | } | |
5b849c60 | 837 | else if (notop |
838 | && ops_equal_values_p (oe->op, notop)) | |
50fa62c6 | 839 | { |
840 | tree op_type = TREE_TYPE (oe->op); | |
841 | ||
842 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
843 | { | |
844 | fprintf (dump_file, "Equivalence: "); | |
1ffa4346 | 845 | print_generic_expr (dump_file, notop); |
50fa62c6 | 846 | fprintf (dump_file, " + ~"); |
1ffa4346 | 847 | print_generic_expr (dump_file, oe->op); |
50fa62c6 | 848 | fprintf (dump_file, " -> -1\n"); |
849 | } | |
850 | ||
f1f41a6c | 851 | ops->ordered_remove (i); |
a4225fff | 852 | add_to_ops_vec (ops, build_all_ones_cst (op_type)); |
f1f41a6c | 853 | ops->ordered_remove (currindex); |
50fa62c6 | 854 | reassociate_stats.ops_eliminated ++; |
855 | ||
54aceb26 | 856 | return true; |
857 | } | |
3dec5460 | 858 | } |
859 | ||
5b849c60 | 860 | /* If CURR->OP is a negate expr without nop conversion in a plus expr: |
861 | save it for later inspection in repropagate_negates(). */ | |
862 | if (negateop != NULL_TREE | |
863 | && gimple_assign_rhs_code (SSA_NAME_DEF_STMT (curr->op)) == NEGATE_EXPR) | |
f1f41a6c | 864 | plus_negates.safe_push (curr->op); |
c2f29260 | 865 | |
54aceb26 | 866 | return false; |
867 | } | |
868 | ||
869 | /* If OPCODE is BIT_IOR_EXPR, BIT_AND_EXPR, and, CURR->OP is really a | |
870 | bitwise not expression, look in OPS for a corresponding operand to | |
871 | cancel it out. If we find one, remove the other from OPS, replace | |
872 | OPS[CURRINDEX] with 0, and return true. Otherwise, return | |
873 | false. */ | |
874 | ||
875 | static bool | |
876 | eliminate_not_pairs (enum tree_code opcode, | |
04009ada | 877 | vec<operand_entry *> *ops, |
54aceb26 | 878 | unsigned int currindex, |
04009ada | 879 | operand_entry *curr) |
54aceb26 | 880 | { |
881 | tree notop; | |
882 | unsigned int i; | |
04009ada | 883 | operand_entry *oe; |
54aceb26 | 884 | |
885 | if ((opcode != BIT_IOR_EXPR && opcode != BIT_AND_EXPR) | |
886 | || TREE_CODE (curr->op) != SSA_NAME) | |
887 | return false; | |
888 | ||
889 | notop = get_unary_op (curr->op, BIT_NOT_EXPR); | |
890 | if (notop == NULL_TREE) | |
891 | return false; | |
892 | ||
893 | /* Any non-not version will have a rank that is one less than | |
894 | the current rank. So once we hit those ranks, if we don't find | |
895 | one, we can stop. */ | |
896 | ||
897 | for (i = currindex + 1; | |
f1f41a6c | 898 | ops->iterate (i, &oe) |
54aceb26 | 899 | && oe->rank >= curr->rank - 1; |
900 | i++) | |
3dec5460 | 901 | { |
54aceb26 | 902 | if (oe->op == notop) |
3dec5460 | 903 | { |
54aceb26 | 904 | if (dump_file && (dump_flags & TDF_DETAILS)) |
3dec5460 | 905 | { |
54aceb26 | 906 | fprintf (dump_file, "Equivalence: "); |
1ffa4346 | 907 | print_generic_expr (dump_file, notop); |
54aceb26 | 908 | if (opcode == BIT_AND_EXPR) |
909 | fprintf (dump_file, " & ~"); | |
910 | else if (opcode == BIT_IOR_EXPR) | |
911 | fprintf (dump_file, " | ~"); | |
1ffa4346 | 912 | print_generic_expr (dump_file, oe->op); |
54aceb26 | 913 | if (opcode == BIT_AND_EXPR) |
914 | fprintf (dump_file, " -> 0\n"); | |
915 | else if (opcode == BIT_IOR_EXPR) | |
916 | fprintf (dump_file, " -> -1\n"); | |
3dec5460 | 917 | } |
54aceb26 | 918 | |
919 | if (opcode == BIT_AND_EXPR) | |
385f3f36 | 920 | oe->op = build_zero_cst (TREE_TYPE (oe->op)); |
54aceb26 | 921 | else if (opcode == BIT_IOR_EXPR) |
c2c07119 | 922 | oe->op = build_all_ones_cst (TREE_TYPE (oe->op)); |
54aceb26 | 923 | |
f1f41a6c | 924 | reassociate_stats.ops_eliminated += ops->length () - 1; |
925 | ops->truncate (0); | |
926 | ops->quick_push (oe); | |
54aceb26 | 927 | return true; |
3dec5460 | 928 | } |
929 | } | |
54aceb26 | 930 | |
931 | return false; | |
3dec5460 | 932 | } |
933 | ||
54aceb26 | 934 | /* Use constant value that may be present in OPS to try to eliminate |
935 | operands. Note that this function is only really used when we've | |
936 | eliminated ops for other reasons, or merged constants. Across | |
937 | single statements, fold already does all of this, plus more. There | |
938 | is little point in duplicating logic, so I've only included the | |
939 | identities that I could ever construct testcases to trigger. */ | |
3dec5460 | 940 | |
54aceb26 | 941 | static void |
942 | eliminate_using_constants (enum tree_code opcode, | |
04009ada | 943 | vec<operand_entry *> *ops) |
3dec5460 | 944 | { |
04009ada | 945 | operand_entry *oelast = ops->last (); |
46ef5347 | 946 | tree type = TREE_TYPE (oelast->op); |
3dec5460 | 947 | |
46ef5347 | 948 | if (oelast->rank == 0 |
c02417ad | 949 | && (ANY_INTEGRAL_TYPE_P (type) || FLOAT_TYPE_P (type))) |
3dec5460 | 950 | { |
54aceb26 | 951 | switch (opcode) |
3dec5460 | 952 | { |
54aceb26 | 953 | case BIT_AND_EXPR: |
954 | if (integer_zerop (oelast->op)) | |
955 | { | |
f1f41a6c | 956 | if (ops->length () != 1) |
54aceb26 | 957 | { |
958 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
959 | fprintf (dump_file, "Found & 0, removing all other ops\n"); | |
960 | ||
f1f41a6c | 961 | reassociate_stats.ops_eliminated += ops->length () - 1; |
48e1416a | 962 | |
f1f41a6c | 963 | ops->truncate (0); |
964 | ops->quick_push (oelast); | |
54aceb26 | 965 | return; |
966 | } | |
967 | } | |
968 | else if (integer_all_onesp (oelast->op)) | |
969 | { | |
f1f41a6c | 970 | if (ops->length () != 1) |
54aceb26 | 971 | { |
972 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
973 | fprintf (dump_file, "Found & -1, removing\n"); | |
f1f41a6c | 974 | ops->pop (); |
54aceb26 | 975 | reassociate_stats.ops_eliminated++; |
976 | } | |
977 | } | |
978 | break; | |
979 | case BIT_IOR_EXPR: | |
980 | if (integer_all_onesp (oelast->op)) | |
981 | { | |
f1f41a6c | 982 | if (ops->length () != 1) |
54aceb26 | 983 | { |
984 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
985 | fprintf (dump_file, "Found | -1, removing all other ops\n"); | |
986 | ||
f1f41a6c | 987 | reassociate_stats.ops_eliminated += ops->length () - 1; |
48e1416a | 988 | |
f1f41a6c | 989 | ops->truncate (0); |
990 | ops->quick_push (oelast); | |
54aceb26 | 991 | return; |
992 | } | |
48e1416a | 993 | } |
54aceb26 | 994 | else if (integer_zerop (oelast->op)) |
995 | { | |
f1f41a6c | 996 | if (ops->length () != 1) |
54aceb26 | 997 | { |
998 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
999 | fprintf (dump_file, "Found | 0, removing\n"); | |
f1f41a6c | 1000 | ops->pop (); |
54aceb26 | 1001 | reassociate_stats.ops_eliminated++; |
1002 | } | |
1003 | } | |
1004 | break; | |
1005 | case MULT_EXPR: | |
46ef5347 | 1006 | if (integer_zerop (oelast->op) |
1007 | || (FLOAT_TYPE_P (type) | |
93633022 | 1008 | && !HONOR_NANS (type) |
fe994837 | 1009 | && !HONOR_SIGNED_ZEROS (type) |
46ef5347 | 1010 | && real_zerop (oelast->op))) |
54aceb26 | 1011 | { |
f1f41a6c | 1012 | if (ops->length () != 1) |
54aceb26 | 1013 | { |
1014 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
1015 | fprintf (dump_file, "Found * 0, removing all other ops\n"); | |
48e1416a | 1016 | |
f1f41a6c | 1017 | reassociate_stats.ops_eliminated += ops->length () - 1; |
a74580b3 | 1018 | ops->truncate (0); |
f1f41a6c | 1019 | ops->quick_push (oelast); |
54aceb26 | 1020 | return; |
1021 | } | |
1022 | } | |
46ef5347 | 1023 | else if (integer_onep (oelast->op) |
1024 | || (FLOAT_TYPE_P (type) | |
fe994837 | 1025 | && !HONOR_SNANS (type) |
46ef5347 | 1026 | && real_onep (oelast->op))) |
54aceb26 | 1027 | { |
f1f41a6c | 1028 | if (ops->length () != 1) |
54aceb26 | 1029 | { |
1030 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
1031 | fprintf (dump_file, "Found * 1, removing\n"); | |
f1f41a6c | 1032 | ops->pop (); |
54aceb26 | 1033 | reassociate_stats.ops_eliminated++; |
1034 | return; | |
1035 | } | |
1036 | } | |
1037 | break; | |
1038 | case BIT_XOR_EXPR: | |
1039 | case PLUS_EXPR: | |
1040 | case MINUS_EXPR: | |
46ef5347 | 1041 | if (integer_zerop (oelast->op) |
1042 | || (FLOAT_TYPE_P (type) | |
1043 | && (opcode == PLUS_EXPR || opcode == MINUS_EXPR) | |
1044 | && fold_real_zero_addition_p (type, oelast->op, | |
1045 | opcode == MINUS_EXPR))) | |
3dec5460 | 1046 | { |
f1f41a6c | 1047 | if (ops->length () != 1) |
3dec5460 | 1048 | { |
54aceb26 | 1049 | if (dump_file && (dump_flags & TDF_DETAILS)) |
1050 | fprintf (dump_file, "Found [|^+] 0, removing\n"); | |
f1f41a6c | 1051 | ops->pop (); |
54aceb26 | 1052 | reassociate_stats.ops_eliminated++; |
1053 | return; | |
3dec5460 | 1054 | } |
3dec5460 | 1055 | } |
54aceb26 | 1056 | break; |
1057 | default: | |
1058 | break; | |
3dec5460 | 1059 | } |
1060 | } | |
3dec5460 | 1061 | } |
1062 | ||
dddf5036 | 1063 | |
04009ada | 1064 | static void linearize_expr_tree (vec<operand_entry *> *, gimple *, |
dddf5036 | 1065 | bool, bool); |
1066 | ||
1067 | /* Structure for tracking and counting operands. */ | |
6dc50383 | 1068 | struct oecount { |
7faa1ffa | 1069 | unsigned int cnt; |
1070 | unsigned int id; | |
dddf5036 | 1071 | enum tree_code oecode; |
1072 | tree op; | |
6dc50383 | 1073 | }; |
dddf5036 | 1074 | |
dddf5036 | 1075 | |
1076 | /* The heap for the oecount hashtable and the sorted list of operands. */ | |
f1f41a6c | 1077 | static vec<oecount> cvec; |
dddf5036 | 1078 | |
3e871d4d | 1079 | |
1080 | /* Oecount hashtable helpers. */ | |
1081 | ||
613732c1 | 1082 | struct oecount_hasher : int_hash <int, 0, 1> |
3e871d4d | 1083 | { |
613732c1 | 1084 | static inline hashval_t hash (int); |
1085 | static inline bool equal (int, int); | |
3e871d4d | 1086 | }; |
1087 | ||
dddf5036 | 1088 | /* Hash function for oecount. */ |
1089 | ||
3e871d4d | 1090 | inline hashval_t |
613732c1 | 1091 | oecount_hasher::hash (int p) |
dddf5036 | 1092 | { |
2933f7af | 1093 | const oecount *c = &cvec[p - 42]; |
dddf5036 | 1094 | return htab_hash_pointer (c->op) ^ (hashval_t)c->oecode; |
1095 | } | |
1096 | ||
1097 | /* Comparison function for oecount. */ | |
1098 | ||
3e871d4d | 1099 | inline bool |
613732c1 | 1100 | oecount_hasher::equal (int p1, int p2) |
dddf5036 | 1101 | { |
2933f7af | 1102 | const oecount *c1 = &cvec[p1 - 42]; |
1103 | const oecount *c2 = &cvec[p2 - 42]; | |
7faa1ffa | 1104 | return c1->oecode == c2->oecode && c1->op == c2->op; |
dddf5036 | 1105 | } |
1106 | ||
1107 | /* Comparison function for qsort sorting oecount elements by count. */ | |
1108 | ||
1109 | static int | |
1110 | oecount_cmp (const void *p1, const void *p2) | |
1111 | { | |
1112 | const oecount *c1 = (const oecount *)p1; | |
1113 | const oecount *c2 = (const oecount *)p2; | |
17b5ea6f | 1114 | if (c1->cnt != c2->cnt) |
7faa1ffa | 1115 | return c1->cnt > c2->cnt ? 1 : -1; |
17b5ea6f | 1116 | else |
1117 | /* If counts are identical, use unique IDs to stabilize qsort. */ | |
7faa1ffa | 1118 | return c1->id > c2->id ? 1 : -1; |
dddf5036 | 1119 | } |
1120 | ||
56e650d6 | 1121 | /* Return TRUE iff STMT represents a builtin call that raises OP |
1122 | to some exponent. */ | |
1123 | ||
1124 | static bool | |
42acab1c | 1125 | stmt_is_power_of_op (gimple *stmt, tree op) |
56e650d6 | 1126 | { |
56e650d6 | 1127 | if (!is_gimple_call (stmt)) |
1128 | return false; | |
1129 | ||
390bb332 | 1130 | switch (gimple_call_combined_fn (stmt)) |
56e650d6 | 1131 | { |
390bb332 | 1132 | CASE_CFN_POW: |
1133 | CASE_CFN_POWI: | |
56e650d6 | 1134 | return (operand_equal_p (gimple_call_arg (stmt, 0), op, 0)); |
1135 | ||
1136 | default: | |
1137 | return false; | |
1138 | } | |
1139 | } | |
1140 | ||
1141 | /* Given STMT which is a __builtin_pow* call, decrement its exponent | |
1142 | in place and return the result. Assumes that stmt_is_power_of_op | |
1143 | was previously called for STMT and returned TRUE. */ | |
1144 | ||
1145 | static HOST_WIDE_INT | |
42acab1c | 1146 | decrement_power (gimple *stmt) |
56e650d6 | 1147 | { |
1148 | REAL_VALUE_TYPE c, cint; | |
1149 | HOST_WIDE_INT power; | |
1150 | tree arg1; | |
1151 | ||
390bb332 | 1152 | switch (gimple_call_combined_fn (stmt)) |
56e650d6 | 1153 | { |
390bb332 | 1154 | CASE_CFN_POW: |
56e650d6 | 1155 | arg1 = gimple_call_arg (stmt, 1); |
1156 | c = TREE_REAL_CST (arg1); | |
1157 | power = real_to_integer (&c) - 1; | |
e913b5cd | 1158 | real_from_integer (&cint, VOIDmode, power, SIGNED); |
56e650d6 | 1159 | gimple_call_set_arg (stmt, 1, build_real (TREE_TYPE (arg1), cint)); |
1160 | return power; | |
1161 | ||
390bb332 | 1162 | CASE_CFN_POWI: |
56e650d6 | 1163 | arg1 = gimple_call_arg (stmt, 1); |
f9ae6f95 | 1164 | power = TREE_INT_CST_LOW (arg1) - 1; |
56e650d6 | 1165 | gimple_call_set_arg (stmt, 1, build_int_cst (TREE_TYPE (arg1), power)); |
1166 | return power; | |
1167 | ||
1168 | default: | |
1169 | gcc_unreachable (); | |
1170 | } | |
1171 | } | |
1172 | ||
8ccdb0a4 | 1173 | /* Replace SSA defined by STMT and replace all its uses with new |
1174 | SSA. Also return the new SSA. */ | |
1175 | ||
1176 | static tree | |
51e9c45e | 1177 | make_new_ssa_for_def (gimple *stmt, enum tree_code opcode, tree op) |
8ccdb0a4 | 1178 | { |
1179 | gimple *use_stmt; | |
1180 | use_operand_p use; | |
1181 | imm_use_iterator iter; | |
51e9c45e | 1182 | tree new_lhs, new_debug_lhs = NULL_TREE; |
140cd7ff | 1183 | tree lhs = gimple_get_lhs (stmt); |
8ccdb0a4 | 1184 | |
1185 | new_lhs = make_ssa_name (TREE_TYPE (lhs)); | |
1186 | gimple_set_lhs (stmt, new_lhs); | |
1187 | ||
1188 | /* Also need to update GIMPLE_DEBUGs. */ | |
1189 | FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs) | |
1190 | { | |
51e9c45e | 1191 | tree repl = new_lhs; |
1192 | if (is_gimple_debug (use_stmt)) | |
1193 | { | |
1194 | if (new_debug_lhs == NULL_TREE) | |
1195 | { | |
1196 | new_debug_lhs = make_node (DEBUG_EXPR_DECL); | |
1197 | gdebug *def_temp | |
1198 | = gimple_build_debug_bind (new_debug_lhs, | |
1199 | build2 (opcode, TREE_TYPE (lhs), | |
1200 | new_lhs, op), | |
1201 | stmt); | |
1202 | DECL_ARTIFICIAL (new_debug_lhs) = 1; | |
1203 | TREE_TYPE (new_debug_lhs) = TREE_TYPE (lhs); | |
1204 | SET_DECL_MODE (new_debug_lhs, TYPE_MODE (TREE_TYPE (lhs))); | |
1205 | gimple_set_uid (def_temp, gimple_uid (stmt)); | |
1206 | gimple_stmt_iterator gsi = gsi_for_stmt (stmt); | |
1207 | gsi_insert_after (&gsi, def_temp, GSI_SAME_STMT); | |
1208 | } | |
1209 | repl = new_debug_lhs; | |
1210 | } | |
8ccdb0a4 | 1211 | FOR_EACH_IMM_USE_ON_STMT (use, iter) |
51e9c45e | 1212 | SET_USE (use, repl); |
8ccdb0a4 | 1213 | update_stmt (use_stmt); |
1214 | } | |
1215 | return new_lhs; | |
1216 | } | |
1217 | ||
1218 | /* Replace all SSAs defined in STMTS_TO_FIX and replace its | |
1219 | uses with new SSAs. Also do this for the stmt that defines DEF | |
1220 | if *DEF is not OP. */ | |
1221 | ||
1222 | static void | |
51e9c45e | 1223 | make_new_ssa_for_all_defs (tree *def, enum tree_code opcode, tree op, |
8ccdb0a4 | 1224 | vec<gimple *> &stmts_to_fix) |
1225 | { | |
1226 | unsigned i; | |
1227 | gimple *stmt; | |
1228 | ||
1229 | if (*def != op | |
1230 | && TREE_CODE (*def) == SSA_NAME | |
1231 | && (stmt = SSA_NAME_DEF_STMT (*def)) | |
1232 | && gimple_code (stmt) != GIMPLE_NOP) | |
51e9c45e | 1233 | *def = make_new_ssa_for_def (stmt, opcode, op); |
8ccdb0a4 | 1234 | |
1235 | FOR_EACH_VEC_ELT (stmts_to_fix, i, stmt) | |
51e9c45e | 1236 | make_new_ssa_for_def (stmt, opcode, op); |
8ccdb0a4 | 1237 | } |
1238 | ||
56e650d6 | 1239 | /* Find the single immediate use of STMT's LHS, and replace it |
1240 | with OP. Remove STMT. If STMT's LHS is the same as *DEF, | |
1241 | replace *DEF with OP as well. */ | |
1242 | ||
1243 | static void | |
42acab1c | 1244 | propagate_op_to_single_use (tree op, gimple *stmt, tree *def) |
56e650d6 | 1245 | { |
1246 | tree lhs; | |
42acab1c | 1247 | gimple *use_stmt; |
56e650d6 | 1248 | use_operand_p use; |
1249 | gimple_stmt_iterator gsi; | |
1250 | ||
1251 | if (is_gimple_call (stmt)) | |
1252 | lhs = gimple_call_lhs (stmt); | |
1253 | else | |
1254 | lhs = gimple_assign_lhs (stmt); | |
1255 | ||
1256 | gcc_assert (has_single_use (lhs)); | |
1257 | single_imm_use (lhs, &use, &use_stmt); | |
1258 | if (lhs == *def) | |
1259 | *def = op; | |
1260 | SET_USE (use, op); | |
1261 | if (TREE_CODE (op) != SSA_NAME) | |
1262 | update_stmt (use_stmt); | |
1263 | gsi = gsi_for_stmt (stmt); | |
e6b37e57 | 1264 | unlink_stmt_vdef (stmt); |
54675e05 | 1265 | reassoc_remove_stmt (&gsi); |
56e650d6 | 1266 | release_defs (stmt); |
56e650d6 | 1267 | } |
1268 | ||
dddf5036 | 1269 | /* Walks the linear chain with result *DEF searching for an operation |
1270 | with operand OP and code OPCODE removing that from the chain. *DEF | |
1271 | is updated if there is only one operand but no operation left. */ | |
1272 | ||
1273 | static void | |
1274 | zero_one_operation (tree *def, enum tree_code opcode, tree op) | |
1275 | { | |
51e9c45e | 1276 | tree orig_def = *def; |
42acab1c | 1277 | gimple *stmt = SSA_NAME_DEF_STMT (*def); |
8ccdb0a4 | 1278 | /* PR72835 - Record the stmt chain that has to be updated such that |
1279 | we dont use the same LHS when the values computed are different. */ | |
1280 | auto_vec<gimple *, 64> stmts_to_fix; | |
dddf5036 | 1281 | |
1282 | do | |
1283 | { | |
56e650d6 | 1284 | tree name; |
1285 | ||
30633bcc | 1286 | if (opcode == MULT_EXPR) |
56e650d6 | 1287 | { |
30633bcc | 1288 | if (stmt_is_power_of_op (stmt, op)) |
1289 | { | |
1290 | if (decrement_power (stmt) == 1) | |
8ccdb0a4 | 1291 | { |
1292 | if (stmts_to_fix.length () > 0) | |
1293 | stmts_to_fix.pop (); | |
1294 | propagate_op_to_single_use (op, stmt, def); | |
1295 | } | |
1296 | break; | |
30633bcc | 1297 | } |
e1f7cc4b | 1298 | else if (gimple_assign_rhs_code (stmt) == NEGATE_EXPR) |
30633bcc | 1299 | { |
e1f7cc4b | 1300 | if (gimple_assign_rhs1 (stmt) == op) |
1301 | { | |
6cec5c6b | 1302 | tree cst = build_minus_one_cst (TREE_TYPE (op)); |
8ccdb0a4 | 1303 | if (stmts_to_fix.length () > 0) |
1304 | stmts_to_fix.pop (); | |
6cec5c6b | 1305 | propagate_op_to_single_use (cst, stmt, def); |
8ccdb0a4 | 1306 | break; |
e1f7cc4b | 1307 | } |
1308 | else if (integer_minus_onep (op) | |
1309 | || real_minus_onep (op)) | |
1310 | { | |
1311 | gimple_assign_set_rhs_code | |
1312 | (stmt, TREE_CODE (gimple_assign_rhs1 (stmt))); | |
8ccdb0a4 | 1313 | break; |
e1f7cc4b | 1314 | } |
30633bcc | 1315 | } |
56e650d6 | 1316 | } |
1317 | ||
1318 | name = gimple_assign_rhs1 (stmt); | |
dddf5036 | 1319 | |
1320 | /* If this is the operation we look for and one of the operands | |
1321 | is ours simply propagate the other operand into the stmts | |
1322 | single use. */ | |
1323 | if (gimple_assign_rhs_code (stmt) == opcode | |
1324 | && (name == op | |
1325 | || gimple_assign_rhs2 (stmt) == op)) | |
1326 | { | |
dddf5036 | 1327 | if (name == op) |
1328 | name = gimple_assign_rhs2 (stmt); | |
8ccdb0a4 | 1329 | if (stmts_to_fix.length () > 0) |
1330 | stmts_to_fix.pop (); | |
56e650d6 | 1331 | propagate_op_to_single_use (name, stmt, def); |
8ccdb0a4 | 1332 | break; |
dddf5036 | 1333 | } |
1334 | ||
56e650d6 | 1335 | /* We might have a multiply of two __builtin_pow* calls, and |
30633bcc | 1336 | the operand might be hiding in the rightmost one. Likewise |
1337 | this can happen for a negate. */ | |
56e650d6 | 1338 | if (opcode == MULT_EXPR |
1339 | && gimple_assign_rhs_code (stmt) == opcode | |
bdb5a35a | 1340 | && TREE_CODE (gimple_assign_rhs2 (stmt)) == SSA_NAME |
1341 | && has_single_use (gimple_assign_rhs2 (stmt))) | |
56e650d6 | 1342 | { |
42acab1c | 1343 | gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt)); |
56e650d6 | 1344 | if (stmt_is_power_of_op (stmt2, op)) |
1345 | { | |
1346 | if (decrement_power (stmt2) == 1) | |
1347 | propagate_op_to_single_use (op, stmt2, def); | |
8ccdb0a4 | 1348 | else |
1349 | stmts_to_fix.safe_push (stmt2); | |
1350 | break; | |
56e650d6 | 1351 | } |
30633bcc | 1352 | else if (is_gimple_assign (stmt2) |
e1f7cc4b | 1353 | && gimple_assign_rhs_code (stmt2) == NEGATE_EXPR) |
30633bcc | 1354 | { |
e1f7cc4b | 1355 | if (gimple_assign_rhs1 (stmt2) == op) |
1356 | { | |
6cec5c6b | 1357 | tree cst = build_minus_one_cst (TREE_TYPE (op)); |
1358 | propagate_op_to_single_use (cst, stmt2, def); | |
8ccdb0a4 | 1359 | break; |
e1f7cc4b | 1360 | } |
1361 | else if (integer_minus_onep (op) | |
1362 | || real_minus_onep (op)) | |
1363 | { | |
8ccdb0a4 | 1364 | stmts_to_fix.safe_push (stmt2); |
e1f7cc4b | 1365 | gimple_assign_set_rhs_code |
1366 | (stmt2, TREE_CODE (gimple_assign_rhs1 (stmt2))); | |
8ccdb0a4 | 1367 | break; |
e1f7cc4b | 1368 | } |
30633bcc | 1369 | } |
56e650d6 | 1370 | } |
1371 | ||
dddf5036 | 1372 | /* Continue walking the chain. */ |
1373 | gcc_assert (name != op | |
1374 | && TREE_CODE (name) == SSA_NAME); | |
1375 | stmt = SSA_NAME_DEF_STMT (name); | |
8ccdb0a4 | 1376 | stmts_to_fix.safe_push (stmt); |
dddf5036 | 1377 | } |
1378 | while (1); | |
8ccdb0a4 | 1379 | |
51e9c45e | 1380 | if (stmts_to_fix.length () > 0 || *def == orig_def) |
1381 | make_new_ssa_for_all_defs (def, opcode, op, stmts_to_fix); | |
dddf5036 | 1382 | } |
1383 | ||
82cb4e1c | 1384 | /* Returns true if statement S1 dominates statement S2. Like |
1385 | stmt_dominates_stmt_p, but uses stmt UIDs to optimize. */ | |
c9dbb922 | 1386 | |
82cb4e1c | 1387 | static bool |
42acab1c | 1388 | reassoc_stmt_dominates_stmt_p (gimple *s1, gimple *s2) |
c9dbb922 | 1389 | { |
82cb4e1c | 1390 | basic_block bb1 = gimple_bb (s1), bb2 = gimple_bb (s2); |
1391 | ||
1392 | /* If bb1 is NULL, it should be a GIMPLE_NOP def stmt of an (D) | |
1393 | SSA_NAME. Assume it lives at the beginning of function and | |
1394 | thus dominates everything. */ | |
1395 | if (!bb1 || s1 == s2) | |
1396 | return true; | |
1397 | ||
1398 | /* If bb2 is NULL, it doesn't dominate any stmt with a bb. */ | |
1399 | if (!bb2) | |
1400 | return false; | |
1401 | ||
1402 | if (bb1 == bb2) | |
1403 | { | |
1404 | /* PHIs in the same basic block are assumed to be | |
1405 | executed all in parallel, if only one stmt is a PHI, | |
1406 | it dominates the other stmt in the same basic block. */ | |
1407 | if (gimple_code (s1) == GIMPLE_PHI) | |
1408 | return true; | |
1409 | ||
1410 | if (gimple_code (s2) == GIMPLE_PHI) | |
1411 | return false; | |
1412 | ||
1413 | gcc_assert (gimple_uid (s1) && gimple_uid (s2)); | |
1414 | ||
1415 | if (gimple_uid (s1) < gimple_uid (s2)) | |
1416 | return true; | |
1417 | ||
1418 | if (gimple_uid (s1) > gimple_uid (s2)) | |
1419 | return false; | |
1420 | ||
1421 | gimple_stmt_iterator gsi = gsi_for_stmt (s1); | |
1422 | unsigned int uid = gimple_uid (s1); | |
1423 | for (gsi_next (&gsi); !gsi_end_p (gsi); gsi_next (&gsi)) | |
1424 | { | |
42acab1c | 1425 | gimple *s = gsi_stmt (gsi); |
82cb4e1c | 1426 | if (gimple_uid (s) != uid) |
1427 | break; | |
1428 | if (s == s2) | |
1429 | return true; | |
1430 | } | |
1431 | ||
1432 | return false; | |
1433 | } | |
1434 | ||
1435 | return dominated_by_p (CDI_DOMINATORS, bb2, bb1); | |
1436 | } | |
1437 | ||
1438 | /* Insert STMT after INSERT_POINT. */ | |
1439 | ||
1440 | static void | |
42acab1c | 1441 | insert_stmt_after (gimple *stmt, gimple *insert_point) |
82cb4e1c | 1442 | { |
1443 | gimple_stmt_iterator gsi; | |
1444 | basic_block bb; | |
1445 | ||
1446 | if (gimple_code (insert_point) == GIMPLE_PHI) | |
1447 | bb = gimple_bb (insert_point); | |
1448 | else if (!stmt_ends_bb_p (insert_point)) | |
1449 | { | |
1450 | gsi = gsi_for_stmt (insert_point); | |
1451 | gimple_set_uid (stmt, gimple_uid (insert_point)); | |
1452 | gsi_insert_after (&gsi, stmt, GSI_NEW_STMT); | |
1453 | return; | |
1454 | } | |
1455 | else | |
1456 | /* We assume INSERT_POINT is a SSA_NAME_DEF_STMT of some SSA_NAME, | |
1457 | thus if it must end a basic block, it should be a call that can | |
1458 | throw, or some assignment that can throw. If it throws, the LHS | |
1459 | of it will not be initialized though, so only valid places using | |
1460 | the SSA_NAME should be dominated by the fallthru edge. */ | |
1461 | bb = find_fallthru_edge (gimple_bb (insert_point)->succs)->dest; | |
1462 | gsi = gsi_after_labels (bb); | |
1463 | if (gsi_end_p (gsi)) | |
1464 | { | |
1465 | gimple_stmt_iterator gsi2 = gsi_last_bb (bb); | |
1466 | gimple_set_uid (stmt, | |
1467 | gsi_end_p (gsi2) ? 1 : gimple_uid (gsi_stmt (gsi2))); | |
1468 | } | |
1469 | else | |
1470 | gimple_set_uid (stmt, gimple_uid (gsi_stmt (gsi))); | |
1471 | gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); | |
c9dbb922 | 1472 | } |
1473 | ||
dddf5036 | 1474 | /* Builds one statement performing OP1 OPCODE OP2 using TMPVAR for |
1475 | the result. Places the statement after the definition of either | |
1476 | OP1 or OP2. Returns the new statement. */ | |
1477 | ||
42acab1c | 1478 | static gimple * |
03d37e4e | 1479 | build_and_add_sum (tree type, tree op1, tree op2, enum tree_code opcode) |
dddf5036 | 1480 | { |
42acab1c | 1481 | gimple *op1def = NULL, *op2def = NULL; |
dddf5036 | 1482 | gimple_stmt_iterator gsi; |
1483 | tree op; | |
1a91d914 | 1484 | gassign *sum; |
dddf5036 | 1485 | |
1486 | /* Create the addition statement. */ | |
f9e245b2 | 1487 | op = make_ssa_name (type); |
e9cf809e | 1488 | sum = gimple_build_assign (op, opcode, op1, op2); |
dddf5036 | 1489 | |
1490 | /* Find an insertion place and insert. */ | |
1491 | if (TREE_CODE (op1) == SSA_NAME) | |
1492 | op1def = SSA_NAME_DEF_STMT (op1); | |
1493 | if (TREE_CODE (op2) == SSA_NAME) | |
1494 | op2def = SSA_NAME_DEF_STMT (op2); | |
1495 | if ((!op1def || gimple_nop_p (op1def)) | |
1496 | && (!op2def || gimple_nop_p (op2def))) | |
1497 | { | |
34154e27 | 1498 | gsi = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))); |
82cb4e1c | 1499 | if (gsi_end_p (gsi)) |
dddf5036 | 1500 | { |
82cb4e1c | 1501 | gimple_stmt_iterator gsi2 |
34154e27 | 1502 | = gsi_last_bb (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))); |
82cb4e1c | 1503 | gimple_set_uid (sum, |
1504 | gsi_end_p (gsi2) ? 1 : gimple_uid (gsi_stmt (gsi2))); | |
dddf5036 | 1505 | } |
1506 | else | |
82cb4e1c | 1507 | gimple_set_uid (sum, gimple_uid (gsi_stmt (gsi))); |
1508 | gsi_insert_before (&gsi, sum, GSI_NEW_STMT); | |
dddf5036 | 1509 | } |
1510 | else | |
1511 | { | |
42acab1c | 1512 | gimple *insert_point; |
82cb4e1c | 1513 | if ((!op1def || gimple_nop_p (op1def)) |
1514 | || (op2def && !gimple_nop_p (op2def) | |
1515 | && reassoc_stmt_dominates_stmt_p (op1def, op2def))) | |
1516 | insert_point = op2def; | |
dddf5036 | 1517 | else |
82cb4e1c | 1518 | insert_point = op1def; |
1519 | insert_stmt_after (sum, insert_point); | |
dddf5036 | 1520 | } |
1521 | update_stmt (sum); | |
1522 | ||
1523 | return sum; | |
1524 | } | |
1525 | ||
1526 | /* Perform un-distribution of divisions and multiplications. | |
1527 | A * X + B * X is transformed into (A + B) * X and A / X + B / X | |
1528 | to (A + B) / X for real X. | |
1529 | ||
1530 | The algorithm is organized as follows. | |
1531 | ||
1532 | - First we walk the addition chain *OPS looking for summands that | |
1533 | are defined by a multiplication or a real division. This results | |
1534 | in the candidates bitmap with relevant indices into *OPS. | |
1535 | ||
1536 | - Second we build the chains of multiplications or divisions for | |
9d75589a | 1537 | these candidates, counting the number of occurrences of (operand, code) |
dddf5036 | 1538 | pairs in all of the candidates chains. |
1539 | ||
9d75589a | 1540 | - Third we sort the (operand, code) pairs by number of occurrence and |
dddf5036 | 1541 | process them starting with the pair with the most uses. |
1542 | ||
1543 | * For each such pair we walk the candidates again to build a | |
1544 | second candidate bitmap noting all multiplication/division chains | |
9d75589a | 1545 | that have at least one occurrence of (operand, code). |
dddf5036 | 1546 | |
1547 | * We build an alternate addition chain only covering these | |
1548 | candidates with one (operand, code) operation removed from their | |
1549 | multiplication/division chain. | |
1550 | ||
1551 | * The first candidate gets replaced by the alternate addition chain | |
1552 | multiplied/divided by the operand. | |
1553 | ||
1554 | * All candidate chains get disabled for further processing and | |
1555 | processing of (operand, code) pairs continues. | |
1556 | ||
1557 | The alternate addition chains built are re-processed by the main | |
1558 | reassociation algorithm which allows optimizing a * x * y + b * y * x | |
1559 | to (a + b ) * x * y in one invocation of the reassociation pass. */ | |
1560 | ||
1561 | static bool | |
1562 | undistribute_ops_list (enum tree_code opcode, | |
04009ada | 1563 | vec<operand_entry *> *ops, struct loop *loop) |
dddf5036 | 1564 | { |
f1f41a6c | 1565 | unsigned int length = ops->length (); |
04009ada | 1566 | operand_entry *oe1; |
dddf5036 | 1567 | unsigned i, j; |
dddf5036 | 1568 | unsigned nr_candidates, nr_candidates2; |
1569 | sbitmap_iterator sbi0; | |
04009ada | 1570 | vec<operand_entry *> *subops; |
dddf5036 | 1571 | bool changed = false; |
7faa1ffa | 1572 | unsigned int next_oecount_id = 0; |
dddf5036 | 1573 | |
1574 | if (length <= 1 | |
1575 | || opcode != PLUS_EXPR) | |
1576 | return false; | |
1577 | ||
1578 | /* Build a list of candidates to process. */ | |
3c6549f8 | 1579 | auto_sbitmap candidates (length); |
53c5d9d4 | 1580 | bitmap_clear (candidates); |
dddf5036 | 1581 | nr_candidates = 0; |
f1f41a6c | 1582 | FOR_EACH_VEC_ELT (*ops, i, oe1) |
dddf5036 | 1583 | { |
1584 | enum tree_code dcode; | |
42acab1c | 1585 | gimple *oe1def; |
dddf5036 | 1586 | |
1587 | if (TREE_CODE (oe1->op) != SSA_NAME) | |
1588 | continue; | |
1589 | oe1def = SSA_NAME_DEF_STMT (oe1->op); | |
1590 | if (!is_gimple_assign (oe1def)) | |
1591 | continue; | |
1592 | dcode = gimple_assign_rhs_code (oe1def); | |
1593 | if ((dcode != MULT_EXPR | |
1594 | && dcode != RDIV_EXPR) | |
1595 | || !is_reassociable_op (oe1def, dcode, loop)) | |
1596 | continue; | |
1597 | ||
08b7917c | 1598 | bitmap_set_bit (candidates, i); |
dddf5036 | 1599 | nr_candidates++; |
1600 | } | |
1601 | ||
1602 | if (nr_candidates < 2) | |
3c6549f8 | 1603 | return false; |
dddf5036 | 1604 | |
1605 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
1606 | { | |
1607 | fprintf (dump_file, "searching for un-distribute opportunities "); | |
1608 | print_generic_expr (dump_file, | |
54e7de93 | 1609 | (*ops)[bitmap_first_set_bit (candidates)]->op, TDF_NONE); |
dddf5036 | 1610 | fprintf (dump_file, " %d\n", nr_candidates); |
1611 | } | |
1612 | ||
1613 | /* Build linearized sub-operand lists and the counting table. */ | |
f1f41a6c | 1614 | cvec.create (0); |
c1f445d2 | 1615 | |
1616 | hash_table<oecount_hasher> ctable (15); | |
1617 | ||
f1f41a6c | 1618 | /* ??? Macro arguments cannot have multi-argument template types in |
1619 | them. This typedef is needed to workaround that limitation. */ | |
04009ada | 1620 | typedef vec<operand_entry *> vec_operand_entry_t_heap; |
f1f41a6c | 1621 | subops = XCNEWVEC (vec_operand_entry_t_heap, ops->length ()); |
0d211963 | 1622 | EXECUTE_IF_SET_IN_BITMAP (candidates, 0, i, sbi0) |
dddf5036 | 1623 | { |
42acab1c | 1624 | gimple *oedef; |
dddf5036 | 1625 | enum tree_code oecode; |
1626 | unsigned j; | |
1627 | ||
f1f41a6c | 1628 | oedef = SSA_NAME_DEF_STMT ((*ops)[i]->op); |
dddf5036 | 1629 | oecode = gimple_assign_rhs_code (oedef); |
1630 | linearize_expr_tree (&subops[i], oedef, | |
1631 | associative_tree_code (oecode), false); | |
1632 | ||
f1f41a6c | 1633 | FOR_EACH_VEC_ELT (subops[i], j, oe1) |
dddf5036 | 1634 | { |
1635 | oecount c; | |
2933f7af | 1636 | int *slot; |
1637 | int idx; | |
dddf5036 | 1638 | c.oecode = oecode; |
1639 | c.cnt = 1; | |
17b5ea6f | 1640 | c.id = next_oecount_id++; |
dddf5036 | 1641 | c.op = oe1->op; |
f1f41a6c | 1642 | cvec.safe_push (c); |
1643 | idx = cvec.length () + 41; | |
2933f7af | 1644 | slot = ctable.find_slot (idx, INSERT); |
dddf5036 | 1645 | if (!*slot) |
1646 | { | |
2933f7af | 1647 | *slot = idx; |
dddf5036 | 1648 | } |
1649 | else | |
1650 | { | |
f1f41a6c | 1651 | cvec.pop (); |
2933f7af | 1652 | cvec[*slot - 42].cnt++; |
dddf5036 | 1653 | } |
1654 | } | |
1655 | } | |
dddf5036 | 1656 | |
1657 | /* Sort the counting table. */ | |
f1f41a6c | 1658 | cvec.qsort (oecount_cmp); |
dddf5036 | 1659 | |
1660 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
1661 | { | |
1662 | oecount *c; | |
1663 | fprintf (dump_file, "Candidates:\n"); | |
f1f41a6c | 1664 | FOR_EACH_VEC_ELT (cvec, j, c) |
dddf5036 | 1665 | { |
1666 | fprintf (dump_file, " %u %s: ", c->cnt, | |
1667 | c->oecode == MULT_EXPR | |
1668 | ? "*" : c->oecode == RDIV_EXPR ? "/" : "?"); | |
1ffa4346 | 1669 | print_generic_expr (dump_file, c->op); |
dddf5036 | 1670 | fprintf (dump_file, "\n"); |
1671 | } | |
1672 | } | |
1673 | ||
c31fb425 | 1674 | /* Process the (operand, code) pairs in order of most occurrence. */ |
3c6549f8 | 1675 | auto_sbitmap candidates2 (length); |
f1f41a6c | 1676 | while (!cvec.is_empty ()) |
dddf5036 | 1677 | { |
f1f41a6c | 1678 | oecount *c = &cvec.last (); |
dddf5036 | 1679 | if (c->cnt < 2) |
1680 | break; | |
1681 | ||
1682 | /* Now collect the operands in the outer chain that contain | |
1683 | the common operand in their inner chain. */ | |
53c5d9d4 | 1684 | bitmap_clear (candidates2); |
dddf5036 | 1685 | nr_candidates2 = 0; |
0d211963 | 1686 | EXECUTE_IF_SET_IN_BITMAP (candidates, 0, i, sbi0) |
dddf5036 | 1687 | { |
42acab1c | 1688 | gimple *oedef; |
dddf5036 | 1689 | enum tree_code oecode; |
1690 | unsigned j; | |
f1f41a6c | 1691 | tree op = (*ops)[i]->op; |
dddf5036 | 1692 | |
1693 | /* If we undistributed in this chain already this may be | |
1694 | a constant. */ | |
1695 | if (TREE_CODE (op) != SSA_NAME) | |
1696 | continue; | |
1697 | ||
1698 | oedef = SSA_NAME_DEF_STMT (op); | |
1699 | oecode = gimple_assign_rhs_code (oedef); | |
1700 | if (oecode != c->oecode) | |
1701 | continue; | |
1702 | ||
f1f41a6c | 1703 | FOR_EACH_VEC_ELT (subops[i], j, oe1) |
dddf5036 | 1704 | { |
56e650d6 | 1705 | if (oe1->op == c->op) |
dddf5036 | 1706 | { |
08b7917c | 1707 | bitmap_set_bit (candidates2, i); |
dddf5036 | 1708 | ++nr_candidates2; |
1709 | break; | |
1710 | } | |
1711 | } | |
1712 | } | |
1713 | ||
1714 | if (nr_candidates2 >= 2) | |
1715 | { | |
04009ada | 1716 | operand_entry *oe1, *oe2; |
42acab1c | 1717 | gimple *prod; |
53c5d9d4 | 1718 | int first = bitmap_first_set_bit (candidates2); |
dddf5036 | 1719 | |
1720 | /* Build the new addition chain. */ | |
f1f41a6c | 1721 | oe1 = (*ops)[first]; |
dddf5036 | 1722 | if (dump_file && (dump_flags & TDF_DETAILS)) |
1723 | { | |
1724 | fprintf (dump_file, "Building ("); | |
1ffa4346 | 1725 | print_generic_expr (dump_file, oe1->op); |
dddf5036 | 1726 | } |
dddf5036 | 1727 | zero_one_operation (&oe1->op, c->oecode, c->op); |
0d211963 | 1728 | EXECUTE_IF_SET_IN_BITMAP (candidates2, first+1, i, sbi0) |
dddf5036 | 1729 | { |
42acab1c | 1730 | gimple *sum; |
f1f41a6c | 1731 | oe2 = (*ops)[i]; |
dddf5036 | 1732 | if (dump_file && (dump_flags & TDF_DETAILS)) |
1733 | { | |
1734 | fprintf (dump_file, " + "); | |
1ffa4346 | 1735 | print_generic_expr (dump_file, oe2->op); |
dddf5036 | 1736 | } |
1737 | zero_one_operation (&oe2->op, c->oecode, c->op); | |
03d37e4e | 1738 | sum = build_and_add_sum (TREE_TYPE (oe1->op), |
1739 | oe1->op, oe2->op, opcode); | |
385f3f36 | 1740 | oe2->op = build_zero_cst (TREE_TYPE (oe2->op)); |
dddf5036 | 1741 | oe2->rank = 0; |
1742 | oe1->op = gimple_get_lhs (sum); | |
1743 | } | |
1744 | ||
1745 | /* Apply the multiplication/division. */ | |
03d37e4e | 1746 | prod = build_and_add_sum (TREE_TYPE (oe1->op), |
1747 | oe1->op, c->op, c->oecode); | |
dddf5036 | 1748 | if (dump_file && (dump_flags & TDF_DETAILS)) |
1749 | { | |
1750 | fprintf (dump_file, ") %s ", c->oecode == MULT_EXPR ? "*" : "/"); | |
1ffa4346 | 1751 | print_generic_expr (dump_file, c->op); |
dddf5036 | 1752 | fprintf (dump_file, "\n"); |
1753 | } | |
1754 | ||
1755 | /* Record it in the addition chain and disable further | |
1756 | undistribution with this op. */ | |
1757 | oe1->op = gimple_assign_lhs (prod); | |
1758 | oe1->rank = get_rank (oe1->op); | |
f1f41a6c | 1759 | subops[first].release (); |
dddf5036 | 1760 | |
1761 | changed = true; | |
1762 | } | |
1763 | ||
f1f41a6c | 1764 | cvec.pop (); |
dddf5036 | 1765 | } |
1766 | ||
f1f41a6c | 1767 | for (i = 0; i < ops->length (); ++i) |
1768 | subops[i].release (); | |
dddf5036 | 1769 | free (subops); |
f1f41a6c | 1770 | cvec.release (); |
dddf5036 | 1771 | |
1772 | return changed; | |
1773 | } | |
1774 | ||
5d6da7da | 1775 | /* If OPCODE is BIT_IOR_EXPR or BIT_AND_EXPR and CURR is a comparison |
1776 | expression, examine the other OPS to see if any of them are comparisons | |
1777 | of the same values, which we may be able to combine or eliminate. | |
1778 | For example, we can rewrite (a < b) | (a == b) as (a <= b). */ | |
1779 | ||
1780 | static bool | |
1781 | eliminate_redundant_comparison (enum tree_code opcode, | |
04009ada | 1782 | vec<operand_entry *> *ops, |
5d6da7da | 1783 | unsigned int currindex, |
04009ada | 1784 | operand_entry *curr) |
5d6da7da | 1785 | { |
1786 | tree op1, op2; | |
1787 | enum tree_code lcode, rcode; | |
42acab1c | 1788 | gimple *def1, *def2; |
5d6da7da | 1789 | int i; |
04009ada | 1790 | operand_entry *oe; |
5d6da7da | 1791 | |
1792 | if (opcode != BIT_IOR_EXPR && opcode != BIT_AND_EXPR) | |
1793 | return false; | |
1794 | ||
1795 | /* Check that CURR is a comparison. */ | |
1796 | if (TREE_CODE (curr->op) != SSA_NAME) | |
1797 | return false; | |
1798 | def1 = SSA_NAME_DEF_STMT (curr->op); | |
1799 | if (!is_gimple_assign (def1)) | |
1800 | return false; | |
1801 | lcode = gimple_assign_rhs_code (def1); | |
1802 | if (TREE_CODE_CLASS (lcode) != tcc_comparison) | |
1803 | return false; | |
1804 | op1 = gimple_assign_rhs1 (def1); | |
1805 | op2 = gimple_assign_rhs2 (def1); | |
1806 | ||
1807 | /* Now look for a similar comparison in the remaining OPS. */ | |
f1f41a6c | 1808 | for (i = currindex + 1; ops->iterate (i, &oe); i++) |
5d6da7da | 1809 | { |
1810 | tree t; | |
1811 | ||
1812 | if (TREE_CODE (oe->op) != SSA_NAME) | |
1813 | continue; | |
1814 | def2 = SSA_NAME_DEF_STMT (oe->op); | |
1815 | if (!is_gimple_assign (def2)) | |
1816 | continue; | |
1817 | rcode = gimple_assign_rhs_code (def2); | |
1818 | if (TREE_CODE_CLASS (rcode) != tcc_comparison) | |
1819 | continue; | |
5d6da7da | 1820 | |
1821 | /* If we got here, we have a match. See if we can combine the | |
1822 | two comparisons. */ | |
c82d157a | 1823 | if (opcode == BIT_IOR_EXPR) |
1824 | t = maybe_fold_or_comparisons (lcode, op1, op2, | |
1825 | rcode, gimple_assign_rhs1 (def2), | |
1826 | gimple_assign_rhs2 (def2)); | |
1827 | else | |
1828 | t = maybe_fold_and_comparisons (lcode, op1, op2, | |
1829 | rcode, gimple_assign_rhs1 (def2), | |
1830 | gimple_assign_rhs2 (def2)); | |
5d6da7da | 1831 | if (!t) |
1832 | continue; | |
c82d157a | 1833 | |
1834 | /* maybe_fold_and_comparisons and maybe_fold_or_comparisons | |
1835 | always give us a boolean_type_node value back. If the original | |
1836 | BIT_AND_EXPR or BIT_IOR_EXPR was of a wider integer type, | |
1837 | we need to convert. */ | |
1838 | if (!useless_type_conversion_p (TREE_TYPE (curr->op), TREE_TYPE (t))) | |
1839 | t = fold_convert (TREE_TYPE (curr->op), t); | |
1840 | ||
89c993b6 | 1841 | if (TREE_CODE (t) != INTEGER_CST |
1842 | && !operand_equal_p (t, curr->op, 0)) | |
1843 | { | |
1844 | enum tree_code subcode; | |
1845 | tree newop1, newop2; | |
1846 | if (!COMPARISON_CLASS_P (t)) | |
1847 | continue; | |
1848 | extract_ops_from_tree (t, &subcode, &newop1, &newop2); | |
1849 | STRIP_USELESS_TYPE_CONVERSION (newop1); | |
1850 | STRIP_USELESS_TYPE_CONVERSION (newop2); | |
1851 | if (!is_gimple_val (newop1) || !is_gimple_val (newop2)) | |
1852 | continue; | |
1853 | } | |
1854 | ||
5d6da7da | 1855 | if (dump_file && (dump_flags & TDF_DETAILS)) |
1856 | { | |
1857 | fprintf (dump_file, "Equivalence: "); | |
1ffa4346 | 1858 | print_generic_expr (dump_file, curr->op); |
5d6da7da | 1859 | fprintf (dump_file, " %s ", op_symbol_code (opcode)); |
1ffa4346 | 1860 | print_generic_expr (dump_file, oe->op); |
5d6da7da | 1861 | fprintf (dump_file, " -> "); |
1ffa4346 | 1862 | print_generic_expr (dump_file, t); |
5d6da7da | 1863 | fprintf (dump_file, "\n"); |
1864 | } | |
1865 | ||
1866 | /* Now we can delete oe, as it has been subsumed by the new combined | |
1867 | expression t. */ | |
f1f41a6c | 1868 | ops->ordered_remove (i); |
5d6da7da | 1869 | reassociate_stats.ops_eliminated ++; |
1870 | ||
1871 | /* If t is the same as curr->op, we're done. Otherwise we must | |
1872 | replace curr->op with t. Special case is if we got a constant | |
1873 | back, in which case we add it to the end instead of in place of | |
1874 | the current entry. */ | |
1875 | if (TREE_CODE (t) == INTEGER_CST) | |
1876 | { | |
f1f41a6c | 1877 | ops->ordered_remove (currindex); |
5d6da7da | 1878 | add_to_ops_vec (ops, t); |
1879 | } | |
c82d157a | 1880 | else if (!operand_equal_p (t, curr->op, 0)) |
5d6da7da | 1881 | { |
42acab1c | 1882 | gimple *sum; |
5d6da7da | 1883 | enum tree_code subcode; |
1884 | tree newop1; | |
1885 | tree newop2; | |
b2d33090 | 1886 | gcc_assert (COMPARISON_CLASS_P (t)); |
5d6da7da | 1887 | extract_ops_from_tree (t, &subcode, &newop1, &newop2); |
b2d33090 | 1888 | STRIP_USELESS_TYPE_CONVERSION (newop1); |
1889 | STRIP_USELESS_TYPE_CONVERSION (newop2); | |
1890 | gcc_checking_assert (is_gimple_val (newop1) | |
1891 | && is_gimple_val (newop2)); | |
03d37e4e | 1892 | sum = build_and_add_sum (TREE_TYPE (t), newop1, newop2, subcode); |
5d6da7da | 1893 | curr->op = gimple_get_lhs (sum); |
1894 | } | |
1895 | return true; | |
1896 | } | |
1897 | ||
1898 | return false; | |
1899 | } | |
dddf5036 | 1900 | |
527d8479 | 1901 | |
0d7ddd44 | 1902 | /* Transform repeated addition of same values into multiply with |
1903 | constant. */ | |
1904 | static bool | |
527d8479 | 1905 | transform_add_to_multiply (vec<operand_entry *> *ops) |
0d7ddd44 | 1906 | { |
1907 | operand_entry *oe; | |
1908 | tree op = NULL_TREE; | |
1909 | int j; | |
1910 | int i, start = -1, end = 0, count = 0; | |
7e6da23d | 1911 | auto_vec<std::pair <int, int> > indxs; |
0d7ddd44 | 1912 | bool changed = false; |
1913 | ||
1914 | if (!INTEGRAL_TYPE_P (TREE_TYPE ((*ops)[0]->op)) | |
933b9f7f | 1915 | && (!SCALAR_FLOAT_TYPE_P (TREE_TYPE ((*ops)[0]->op)) |
1916 | || !flag_unsafe_math_optimizations)) | |
0d7ddd44 | 1917 | return false; |
1918 | ||
1919 | /* Look for repeated operands. */ | |
1920 | FOR_EACH_VEC_ELT (*ops, i, oe) | |
1921 | { | |
1922 | if (start == -1) | |
1923 | { | |
1924 | count = 1; | |
1925 | op = oe->op; | |
1926 | start = i; | |
1927 | } | |
1928 | else if (operand_equal_p (oe->op, op, 0)) | |
1929 | { | |
1930 | count++; | |
1931 | end = i; | |
1932 | } | |
1933 | else | |
1934 | { | |
1935 | if (count > 1) | |
1936 | indxs.safe_push (std::make_pair (start, end)); | |
1937 | count = 1; | |
1938 | op = oe->op; | |
1939 | start = i; | |
1940 | } | |
1941 | } | |
1942 | ||
1943 | if (count > 1) | |
1944 | indxs.safe_push (std::make_pair (start, end)); | |
1945 | ||
1946 | for (j = indxs.length () - 1; j >= 0; --j) | |
1947 | { | |
1948 | /* Convert repeated operand addition to multiplication. */ | |
1949 | start = indxs[j].first; | |
1950 | end = indxs[j].second; | |
1951 | op = (*ops)[start]->op; | |
1952 | count = end - start + 1; | |
1953 | for (i = end; i >= start; --i) | |
1954 | ops->unordered_remove (i); | |
1955 | tree tmp = make_ssa_name (TREE_TYPE (op)); | |
1956 | tree cst = build_int_cst (integer_type_node, count); | |
0d7ddd44 | 1957 | gassign *mul_stmt |
1958 | = gimple_build_assign (tmp, MULT_EXPR, | |
1959 | op, fold_convert (TREE_TYPE (op), cst)); | |
0d7ddd44 | 1960 | gimple_set_visited (mul_stmt, true); |
527d8479 | 1961 | add_to_ops_vec (ops, tmp, mul_stmt); |
0d7ddd44 | 1962 | changed = true; |
1963 | } | |
1964 | ||
1965 | return changed; | |
1966 | } | |
1967 | ||
1968 | ||
54aceb26 | 1969 | /* Perform various identities and other optimizations on the list of |
1970 | operand entries, stored in OPS. The tree code for the binary | |
1971 | operation between all the operands is OPCODE. */ | |
3dec5460 | 1972 | |
54aceb26 | 1973 | static void |
1974 | optimize_ops_list (enum tree_code opcode, | |
04009ada | 1975 | vec<operand_entry *> *ops) |
54aceb26 | 1976 | { |
f1f41a6c | 1977 | unsigned int length = ops->length (); |
54aceb26 | 1978 | unsigned int i; |
04009ada | 1979 | operand_entry *oe; |
1980 | operand_entry *oelast = NULL; | |
54aceb26 | 1981 | bool iterate = false; |
3dec5460 | 1982 | |
54aceb26 | 1983 | if (length == 1) |
1984 | return; | |
3dec5460 | 1985 | |
f1f41a6c | 1986 | oelast = ops->last (); |
3dec5460 | 1987 | |
54aceb26 | 1988 | /* If the last two are constants, pop the constants off, merge them |
1989 | and try the next two. */ | |
1990 | if (oelast->rank == 0 && is_gimple_min_invariant (oelast->op)) | |
1991 | { | |
04009ada | 1992 | operand_entry *oelm1 = (*ops)[length - 2]; |
54aceb26 | 1993 | |
1994 | if (oelm1->rank == 0 | |
1995 | && is_gimple_min_invariant (oelm1->op) | |
c8ca3ee7 | 1996 | && useless_type_conversion_p (TREE_TYPE (oelm1->op), |
1997 | TREE_TYPE (oelast->op))) | |
54aceb26 | 1998 | { |
5f9acd88 | 1999 | tree folded = fold_binary (opcode, TREE_TYPE (oelm1->op), |
54aceb26 | 2000 | oelm1->op, oelast->op); |
2001 | ||
5f9acd88 | 2002 | if (folded && is_gimple_min_invariant (folded)) |
54aceb26 | 2003 | { |
2004 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
2005 | fprintf (dump_file, "Merging constants\n"); | |
2006 | ||
f1f41a6c | 2007 | ops->pop (); |
2008 | ops->pop (); | |
54aceb26 | 2009 | |
2010 | add_to_ops_vec (ops, folded); | |
2011 | reassociate_stats.constants_eliminated++; | |
2012 | ||
2013 | optimize_ops_list (opcode, ops); | |
2014 | return; | |
2015 | } | |
2016 | } | |
2017 | } | |
2018 | ||
2019 | eliminate_using_constants (opcode, ops); | |
2020 | oelast = NULL; | |
2021 | ||
f1f41a6c | 2022 | for (i = 0; ops->iterate (i, &oe);) |
54aceb26 | 2023 | { |
2024 | bool done = false; | |
2025 | ||
2026 | if (eliminate_not_pairs (opcode, ops, i, oe)) | |
2027 | return; | |
2028 | if (eliminate_duplicate_pair (opcode, ops, &done, i, oe, oelast) | |
5d6da7da | 2029 | || (!done && eliminate_plus_minus_pair (opcode, ops, i, oe)) |
2030 | || (!done && eliminate_redundant_comparison (opcode, ops, i, oe))) | |
54aceb26 | 2031 | { |
2032 | if (done) | |
2033 | return; | |
2034 | iterate = true; | |
2035 | oelast = NULL; | |
2036 | continue; | |
2037 | } | |
2038 | oelast = oe; | |
2039 | i++; | |
2040 | } | |
2041 | ||
f1f41a6c | 2042 | length = ops->length (); |
2043 | oelast = ops->last (); | |
54aceb26 | 2044 | |
2045 | if (iterate) | |
2046 | optimize_ops_list (opcode, ops); | |
2047 | } | |
2048 | ||
946e9eb4 | 2049 | /* The following functions are subroutines to optimize_range_tests and allow |
2050 | it to try to change a logical combination of comparisons into a range | |
2051 | test. | |
2052 | ||
2053 | For example, both | |
2054 | X == 2 || X == 5 || X == 3 || X == 4 | |
2055 | and | |
2056 | X >= 2 && X <= 5 | |
2057 | are converted to | |
2058 | (unsigned) (X - 2) <= 3 | |
2059 | ||
2060 | For more information see comments above fold_test_range in fold-const.c, | |
2061 | this implementation is for GIMPLE. */ | |
2062 | ||
2063 | struct range_entry | |
2064 | { | |
2065 | tree exp; | |
2066 | tree low; | |
2067 | tree high; | |
2068 | bool in_p; | |
2069 | bool strict_overflow_p; | |
2070 | unsigned int idx, next; | |
2071 | }; | |
2072 | ||
2073 | /* This is similar to make_range in fold-const.c, but on top of | |
8a2c7744 | 2074 | GIMPLE instead of trees. If EXP is non-NULL, it should be |
2075 | an SSA_NAME and STMT argument is ignored, otherwise STMT | |
2076 | argument should be a GIMPLE_COND. */ | |
946e9eb4 | 2077 | |
2078 | static void | |
42acab1c | 2079 | init_range_entry (struct range_entry *r, tree exp, gimple *stmt) |
946e9eb4 | 2080 | { |
2081 | int in_p; | |
2082 | tree low, high; | |
2083 | bool is_bool, strict_overflow_p; | |
2084 | ||
2085 | r->exp = NULL_TREE; | |
2086 | r->in_p = false; | |
2087 | r->strict_overflow_p = false; | |
2088 | r->low = NULL_TREE; | |
2089 | r->high = NULL_TREE; | |
8a2c7744 | 2090 | if (exp != NULL_TREE |
2091 | && (TREE_CODE (exp) != SSA_NAME || !INTEGRAL_TYPE_P (TREE_TYPE (exp)))) | |
946e9eb4 | 2092 | return; |
2093 | ||
2094 | /* Start with simply saying "EXP != 0" and then look at the code of EXP | |
2095 | and see if we can refine the range. Some of the cases below may not | |
2096 | happen, but it doesn't seem worth worrying about this. We "continue" | |
2097 | the outer loop when we've changed something; otherwise we "break" | |
2098 | the switch, which will "break" the while. */ | |
8a2c7744 | 2099 | low = exp ? build_int_cst (TREE_TYPE (exp), 0) : boolean_false_node; |
946e9eb4 | 2100 | high = low; |
2101 | in_p = 0; | |
2102 | strict_overflow_p = false; | |
2103 | is_bool = false; | |
8a2c7744 | 2104 | if (exp == NULL_TREE) |
2105 | is_bool = true; | |
2106 | else if (TYPE_PRECISION (TREE_TYPE (exp)) == 1) | |
946e9eb4 | 2107 | { |
2108 | if (TYPE_UNSIGNED (TREE_TYPE (exp))) | |
2109 | is_bool = true; | |
2110 | else | |
2111 | return; | |
2112 | } | |
2113 | else if (TREE_CODE (TREE_TYPE (exp)) == BOOLEAN_TYPE) | |
2114 | is_bool = true; | |
2115 | ||
2116 | while (1) | |
2117 | { | |
946e9eb4 | 2118 | enum tree_code code; |
2119 | tree arg0, arg1, exp_type; | |
2120 | tree nexp; | |
2121 | location_t loc; | |
2122 | ||
8a2c7744 | 2123 | if (exp != NULL_TREE) |
2124 | { | |
81cab242 | 2125 | if (TREE_CODE (exp) != SSA_NAME |
2126 | || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (exp)) | |
8a2c7744 | 2127 | break; |
946e9eb4 | 2128 | |
8a2c7744 | 2129 | stmt = SSA_NAME_DEF_STMT (exp); |
2130 | if (!is_gimple_assign (stmt)) | |
2131 | break; | |
2132 | ||
2133 | code = gimple_assign_rhs_code (stmt); | |
2134 | arg0 = gimple_assign_rhs1 (stmt); | |
2135 | arg1 = gimple_assign_rhs2 (stmt); | |
2136 | exp_type = TREE_TYPE (exp); | |
2137 | } | |
2138 | else | |
2139 | { | |
2140 | code = gimple_cond_code (stmt); | |
2141 | arg0 = gimple_cond_lhs (stmt); | |
2142 | arg1 = gimple_cond_rhs (stmt); | |
2143 | exp_type = boolean_type_node; | |
2144 | } | |
946e9eb4 | 2145 | |
9adacac7 | 2146 | if (TREE_CODE (arg0) != SSA_NAME) |
2147 | break; | |
946e9eb4 | 2148 | loc = gimple_location (stmt); |
2149 | switch (code) | |
2150 | { | |
2151 | case BIT_NOT_EXPR: | |
e423480a | 2152 | if (TREE_CODE (TREE_TYPE (exp)) == BOOLEAN_TYPE |
2153 | /* Ensure the range is either +[-,0], +[0,0], | |
2154 | -[-,0], -[0,0] or +[1,-], +[1,1], -[1,-] or | |
2155 | -[1,1]. If it is e.g. +[-,-] or -[-,-] | |
2156 | or similar expression of unconditional true or | |
2157 | false, it should not be negated. */ | |
2158 | && ((high && integer_zerop (high)) | |
2159 | || (low && integer_onep (low)))) | |
946e9eb4 | 2160 | { |
2161 | in_p = !in_p; | |
2162 | exp = arg0; | |
2163 | continue; | |
2164 | } | |
2165 | break; | |
2166 | case SSA_NAME: | |
2167 | exp = arg0; | |
2168 | continue; | |
2169 | CASE_CONVERT: | |
2170 | if (is_bool) | |
c7aed3df | 2171 | { |
2172 | if ((TYPE_PRECISION (exp_type) == 1 | |
2173 | || TREE_CODE (exp_type) == BOOLEAN_TYPE) | |
2174 | && TYPE_PRECISION (TREE_TYPE (arg0)) > 1) | |
2175 | return; | |
2176 | } | |
2177 | else if (TYPE_PRECISION (TREE_TYPE (arg0)) == 1) | |
946e9eb4 | 2178 | { |
2179 | if (TYPE_UNSIGNED (TREE_TYPE (arg0))) | |
2180 | is_bool = true; | |
2181 | else | |
2182 | return; | |
2183 | } | |
2184 | else if (TREE_CODE (TREE_TYPE (arg0)) == BOOLEAN_TYPE) | |
2185 | is_bool = true; | |
2186 | goto do_default; | |
2187 | case EQ_EXPR: | |
2188 | case NE_EXPR: | |
2189 | case LT_EXPR: | |
2190 | case LE_EXPR: | |
2191 | case GE_EXPR: | |
2192 | case GT_EXPR: | |
2193 | is_bool = true; | |
2194 | /* FALLTHRU */ | |
2195 | default: | |
2196 | if (!is_bool) | |
2197 | return; | |
2198 | do_default: | |
2199 | nexp = make_range_step (loc, code, arg0, arg1, exp_type, | |
2200 | &low, &high, &in_p, | |
2201 | &strict_overflow_p); | |
2202 | if (nexp != NULL_TREE) | |
2203 | { | |
2204 | exp = nexp; | |
2205 | gcc_assert (TREE_CODE (exp) == SSA_NAME); | |
2206 | continue; | |
2207 | } | |
2208 | break; | |
2209 | } | |
2210 | break; | |
2211 | } | |
2212 | if (is_bool) | |
2213 | { | |
2214 | r->exp = exp; | |
2215 | r->in_p = in_p; | |
2216 | r->low = low; | |
2217 | r->high = high; | |
2218 | r->strict_overflow_p = strict_overflow_p; | |
2219 | } | |
2220 | } | |
2221 | ||
2222 | /* Comparison function for qsort. Sort entries | |
2223 | without SSA_NAME exp first, then with SSA_NAMEs sorted | |
2224 | by increasing SSA_NAME_VERSION, and for the same SSA_NAMEs | |
2225 | by increasing ->low and if ->low is the same, by increasing | |
2226 | ->high. ->low == NULL_TREE means minimum, ->high == NULL_TREE | |
2227 | maximum. */ | |
2228 | ||
2229 | static int | |
2230 | range_entry_cmp (const void *a, const void *b) | |
2231 | { | |
2232 | const struct range_entry *p = (const struct range_entry *) a; | |
2233 | const struct range_entry *q = (const struct range_entry *) b; | |
2234 | ||
2235 | if (p->exp != NULL_TREE && TREE_CODE (p->exp) == SSA_NAME) | |
2236 | { | |
2237 | if (q->exp != NULL_TREE && TREE_CODE (q->exp) == SSA_NAME) | |
2238 | { | |
2239 | /* Group range_entries for the same SSA_NAME together. */ | |
2240 | if (SSA_NAME_VERSION (p->exp) < SSA_NAME_VERSION (q->exp)) | |
2241 | return -1; | |
2242 | else if (SSA_NAME_VERSION (p->exp) > SSA_NAME_VERSION (q->exp)) | |
2243 | return 1; | |
2244 | /* If ->low is different, NULL low goes first, then by | |
2245 | ascending low. */ | |
2246 | if (p->low != NULL_TREE) | |
2247 | { | |
2248 | if (q->low != NULL_TREE) | |
2249 | { | |
2250 | tree tem = fold_binary (LT_EXPR, boolean_type_node, | |
2251 | p->low, q->low); | |
2252 | if (tem && integer_onep (tem)) | |
2253 | return -1; | |
2254 | tem = fold_binary (GT_EXPR, boolean_type_node, | |
2255 | p->low, q->low); | |
2256 | if (tem && integer_onep (tem)) | |
2257 | return 1; | |
2258 | } | |
2259 | else | |
2260 | return 1; | |
2261 | } | |
2262 | else if (q->low != NULL_TREE) | |
2263 | return -1; | |
2264 | /* If ->high is different, NULL high goes last, before that by | |
2265 | ascending high. */ | |
2266 | if (p->high != NULL_TREE) | |
2267 | { | |
2268 | if (q->high != NULL_TREE) | |
2269 | { | |
2270 | tree tem = fold_binary (LT_EXPR, boolean_type_node, | |
2271 | p->high, q->high); | |
2272 | if (tem && integer_onep (tem)) | |
2273 | return -1; | |
2274 | tem = fold_binary (GT_EXPR, boolean_type_node, | |
2275 | p->high, q->high); | |
2276 | if (tem && integer_onep (tem)) | |
2277 | return 1; | |
2278 | } | |
2279 | else | |
2280 | return -1; | |
2281 | } | |
eff8617c | 2282 | else if (q->high != NULL_TREE) |
946e9eb4 | 2283 | return 1; |
2284 | /* If both ranges are the same, sort below by ascending idx. */ | |
2285 | } | |
2286 | else | |
2287 | return 1; | |
2288 | } | |
2289 | else if (q->exp != NULL_TREE && TREE_CODE (q->exp) == SSA_NAME) | |
2290 | return -1; | |
2291 | ||
2292 | if (p->idx < q->idx) | |
2293 | return -1; | |
2294 | else | |
2295 | { | |
2296 | gcc_checking_assert (p->idx > q->idx); | |
2297 | return 1; | |
2298 | } | |
2299 | } | |
2300 | ||
070dd4d4 | 2301 | /* Helper function for update_range_test. Force EXPR into an SSA_NAME, |
2302 | insert needed statements BEFORE or after GSI. */ | |
2303 | ||
2304 | static tree | |
2305 | force_into_ssa_name (gimple_stmt_iterator *gsi, tree expr, bool before) | |
2306 | { | |
2307 | enum gsi_iterator_update m = before ? GSI_SAME_STMT : GSI_CONTINUE_LINKING; | |
2308 | tree ret = force_gimple_operand_gsi (gsi, expr, true, NULL_TREE, before, m); | |
2309 | if (TREE_CODE (ret) != SSA_NAME) | |
2310 | { | |
2311 | gimple *g = gimple_build_assign (make_ssa_name (TREE_TYPE (ret)), ret); | |
2312 | if (before) | |
2313 | gsi_insert_before (gsi, g, GSI_SAME_STMT); | |
2314 | else | |
2315 | gsi_insert_after (gsi, g, GSI_CONTINUE_LINKING); | |
2316 | ret = gimple_assign_lhs (g); | |
2317 | } | |
2318 | return ret; | |
2319 | } | |
2320 | ||
946e9eb4 | 2321 | /* Helper routine of optimize_range_test. |
2322 | [EXP, IN_P, LOW, HIGH, STRICT_OVERFLOW_P] is a merged range for | |
2323 | RANGE and OTHERRANGE through OTHERRANGE + COUNT - 1 ranges, | |
e3668db5 | 2324 | OPCODE and OPS are arguments of optimize_range_tests. If OTHERRANGE |
2325 | is NULL, OTHERRANGEP should not be and then OTHERRANGEP points to | |
2326 | an array of COUNT pointers to other ranges. Return | |
8a2c7744 | 2327 | true if the range merge has been successful. |
2328 | If OPCODE is ERROR_MARK, this is called from within | |
2329 | maybe_optimize_range_tests and is performing inter-bb range optimization. | |
82cb4e1c | 2330 | In that case, whether an op is BIT_AND_EXPR or BIT_IOR_EXPR is found in |
2331 | oe->rank. */ | |
946e9eb4 | 2332 | |
2333 | static bool | |
2334 | update_range_test (struct range_entry *range, struct range_entry *otherrange, | |
e3668db5 | 2335 | struct range_entry **otherrangep, |
946e9eb4 | 2336 | unsigned int count, enum tree_code opcode, |
04009ada | 2337 | vec<operand_entry *> *ops, tree exp, gimple_seq seq, |
e3668db5 | 2338 | bool in_p, tree low, tree high, bool strict_overflow_p) |
946e9eb4 | 2339 | { |
04009ada | 2340 | operand_entry *oe = (*ops)[range->idx]; |
8a2c7744 | 2341 | tree op = oe->op; |
14a72c4e | 2342 | gimple *stmt = op ? SSA_NAME_DEF_STMT (op) |
2343 | : last_stmt (BASIC_BLOCK_FOR_FN (cfun, oe->id)); | |
8a2c7744 | 2344 | location_t loc = gimple_location (stmt); |
2345 | tree optype = op ? TREE_TYPE (op) : boolean_type_node; | |
e3668db5 | 2346 | tree tem = build_range_check (loc, optype, unshare_expr (exp), |
2347 | in_p, low, high); | |
946e9eb4 | 2348 | enum warn_strict_overflow_code wc = WARN_STRICT_OVERFLOW_COMPARISON; |
2349 | gimple_stmt_iterator gsi; | |
14a72c4e | 2350 | unsigned int i, uid; |
946e9eb4 | 2351 | |
2352 | if (tem == NULL_TREE) | |
2353 | return false; | |
2354 | ||
14a72c4e | 2355 | /* If op is default def SSA_NAME, there is no place to insert the |
2356 | new comparison. Give up, unless we can use OP itself as the | |
2357 | range test. */ | |
2358 | if (op && SSA_NAME_IS_DEFAULT_DEF (op)) | |
2359 | { | |
2360 | if (op == range->exp | |
2361 | && ((TYPE_PRECISION (optype) == 1 && TYPE_UNSIGNED (optype)) | |
2362 | || TREE_CODE (optype) == BOOLEAN_TYPE) | |
2363 | && (op == tem | |
2364 | || (TREE_CODE (tem) == EQ_EXPR | |
2365 | && TREE_OPERAND (tem, 0) == op | |
2366 | && integer_onep (TREE_OPERAND (tem, 1)))) | |
2367 | && opcode != BIT_IOR_EXPR | |
2368 | && (opcode != ERROR_MARK || oe->rank != BIT_IOR_EXPR)) | |
2369 | { | |
2370 | stmt = NULL; | |
2371 | tem = op; | |
2372 | } | |
2373 | else | |
2374 | return false; | |
2375 | } | |
2376 | ||
946e9eb4 | 2377 | if (strict_overflow_p && issue_strict_overflow_warning (wc)) |
2378 | warning_at (loc, OPT_Wstrict_overflow, | |
2379 | "assuming signed overflow does not occur " | |
2380 | "when simplifying range test"); | |
2381 | ||
2382 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
2383 | { | |
2384 | struct range_entry *r; | |
2385 | fprintf (dump_file, "Optimizing range tests "); | |
1ffa4346 | 2386 | print_generic_expr (dump_file, range->exp); |
946e9eb4 | 2387 | fprintf (dump_file, " %c[", range->in_p ? '+' : '-'); |
1ffa4346 | 2388 | print_generic_expr (dump_file, range->low); |
946e9eb4 | 2389 | fprintf (dump_file, ", "); |
1ffa4346 | 2390 | print_generic_expr (dump_file, range->high); |
946e9eb4 | 2391 | fprintf (dump_file, "]"); |
e3668db5 | 2392 | for (i = 0; i < count; i++) |
946e9eb4 | 2393 | { |
e3668db5 | 2394 | if (otherrange) |
2395 | r = otherrange + i; | |
2396 | else | |
2397 | r = otherrangep[i]; | |
4c168df0 | 2398 | if (r->exp |
2399 | && r->exp != range->exp | |
2400 | && TREE_CODE (r->exp) == SSA_NAME) | |
2401 | { | |
2402 | fprintf (dump_file, " and "); | |
2403 | print_generic_expr (dump_file, r->exp); | |
2404 | } | |
2405 | else | |
2406 | fprintf (dump_file, " and"); | |
2407 | fprintf (dump_file, " %c[", r->in_p ? '+' : '-'); | |
1ffa4346 | 2408 | print_generic_expr (dump_file, r->low); |
946e9eb4 | 2409 | fprintf (dump_file, ", "); |
1ffa4346 | 2410 | print_generic_expr (dump_file, r->high); |
946e9eb4 | 2411 | fprintf (dump_file, "]"); |
2412 | } | |
2413 | fprintf (dump_file, "\n into "); | |
1ffa4346 | 2414 | print_generic_expr (dump_file, tem); |
946e9eb4 | 2415 | fprintf (dump_file, "\n"); |
2416 | } | |
2417 | ||
8a2c7744 | 2418 | if (opcode == BIT_IOR_EXPR |
2419 | || (opcode == ERROR_MARK && oe->rank == BIT_IOR_EXPR)) | |
946e9eb4 | 2420 | tem = invert_truthvalue_loc (loc, tem); |
2421 | ||
8a2c7744 | 2422 | tem = fold_convert_loc (loc, optype, tem); |
14a72c4e | 2423 | if (stmt) |
2424 | { | |
2425 | gsi = gsi_for_stmt (stmt); | |
2426 | uid = gimple_uid (stmt); | |
2427 | } | |
2428 | else | |
2429 | { | |
2430 | gsi = gsi_none (); | |
2431 | uid = 0; | |
2432 | } | |
2433 | if (stmt == NULL) | |
2434 | gcc_checking_assert (tem == op); | |
a0ebffaa | 2435 | /* In rare cases range->exp can be equal to lhs of stmt. |
2436 | In that case we have to insert after the stmt rather then before | |
927d5076 | 2437 | it. If stmt is a PHI, insert it at the start of the basic block. */ |
14a72c4e | 2438 | else if (op != range->exp) |
927d5076 | 2439 | { |
2440 | gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT); | |
070dd4d4 | 2441 | tem = force_into_ssa_name (&gsi, tem, true); |
927d5076 | 2442 | gsi_prev (&gsi); |
2443 | } | |
2444 | else if (gimple_code (stmt) != GIMPLE_PHI) | |
e3668db5 | 2445 | { |
2446 | gsi_insert_seq_after (&gsi, seq, GSI_CONTINUE_LINKING); | |
070dd4d4 | 2447 | tem = force_into_ssa_name (&gsi, tem, false); |
e3668db5 | 2448 | } |
a0ebffaa | 2449 | else |
2450 | { | |
927d5076 | 2451 | gsi = gsi_after_labels (gimple_bb (stmt)); |
2452 | if (!gsi_end_p (gsi)) | |
2453 | uid = gimple_uid (gsi_stmt (gsi)); | |
2454 | else | |
2455 | { | |
2456 | gsi = gsi_start_bb (gimple_bb (stmt)); | |
2457 | uid = 1; | |
2458 | while (!gsi_end_p (gsi)) | |
2459 | { | |
2460 | uid = gimple_uid (gsi_stmt (gsi)); | |
2461 | gsi_next (&gsi); | |
2462 | } | |
2463 | } | |
e3668db5 | 2464 | gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT); |
070dd4d4 | 2465 | tem = force_into_ssa_name (&gsi, tem, true); |
927d5076 | 2466 | if (gsi_end_p (gsi)) |
2467 | gsi = gsi_last_bb (gimple_bb (stmt)); | |
2468 | else | |
2469 | gsi_prev (&gsi); | |
a0ebffaa | 2470 | } |
2471 | for (; !gsi_end_p (gsi); gsi_prev (&gsi)) | |
82cb4e1c | 2472 | if (gimple_uid (gsi_stmt (gsi))) |
2473 | break; | |
2474 | else | |
927d5076 | 2475 | gimple_set_uid (gsi_stmt (gsi), uid); |
946e9eb4 | 2476 | |
8a2c7744 | 2477 | oe->op = tem; |
946e9eb4 | 2478 | range->exp = exp; |
2479 | range->low = low; | |
2480 | range->high = high; | |
2481 | range->in_p = in_p; | |
2482 | range->strict_overflow_p = false; | |
2483 | ||
e3668db5 | 2484 | for (i = 0; i < count; i++) |
946e9eb4 | 2485 | { |
e3668db5 | 2486 | if (otherrange) |
2487 | range = otherrange + i; | |
2488 | else | |
2489 | range = otherrangep[i]; | |
f1f41a6c | 2490 | oe = (*ops)[range->idx]; |
8a2c7744 | 2491 | /* Now change all the other range test immediate uses, so that |
2492 | those tests will be optimized away. */ | |
2493 | if (opcode == ERROR_MARK) | |
2494 | { | |
2495 | if (oe->op) | |
82cb4e1c | 2496 | oe->op = build_int_cst (TREE_TYPE (oe->op), |
2497 | oe->rank == BIT_IOR_EXPR ? 0 : 1); | |
8a2c7744 | 2498 | else |
82cb4e1c | 2499 | oe->op = (oe->rank == BIT_IOR_EXPR |
2500 | ? boolean_false_node : boolean_true_node); | |
8a2c7744 | 2501 | } |
82cb4e1c | 2502 | else |
2503 | oe->op = error_mark_node; | |
946e9eb4 | 2504 | range->exp = NULL_TREE; |
fafde080 | 2505 | range->low = NULL_TREE; |
2506 | range->high = NULL_TREE; | |
946e9eb4 | 2507 | } |
2508 | return true; | |
2509 | } | |
2510 | ||
b0c0c879 | 2511 | /* Optimize X == CST1 || X == CST2 |
2512 | if popcount (CST1 ^ CST2) == 1 into | |
2513 | (X & ~(CST1 ^ CST2)) == (CST1 & ~(CST1 ^ CST2)). | |
2514 | Similarly for ranges. E.g. | |
2515 | X != 2 && X != 3 && X != 10 && X != 11 | |
2516 | will be transformed by the previous optimization into | |
2517 | !((X - 2U) <= 1U || (X - 10U) <= 1U) | |
2518 | and this loop can transform that into | |
2519 | !(((X & ~8) - 2U) <= 1U). */ | |
2520 | ||
2521 | static bool | |
2522 | optimize_range_tests_xor (enum tree_code opcode, tree type, | |
2523 | tree lowi, tree lowj, tree highi, tree highj, | |
04009ada | 2524 | vec<operand_entry *> *ops, |
b0c0c879 | 2525 | struct range_entry *rangei, |
2526 | struct range_entry *rangej) | |
2527 | { | |
2528 | tree lowxor, highxor, tem, exp; | |
e3668db5 | 2529 | /* Check lowi ^ lowj == highi ^ highj and |
2530 | popcount (lowi ^ lowj) == 1. */ | |
b0c0c879 | 2531 | lowxor = fold_binary (BIT_XOR_EXPR, type, lowi, lowj); |
2532 | if (lowxor == NULL_TREE || TREE_CODE (lowxor) != INTEGER_CST) | |
2533 | return false; | |
8016354a | 2534 | if (!integer_pow2p (lowxor)) |
b0c0c879 | 2535 | return false; |
2536 | highxor = fold_binary (BIT_XOR_EXPR, type, highi, highj); | |
2537 | if (!tree_int_cst_equal (lowxor, highxor)) | |
2538 | return false; | |
2539 | ||
fe7507dc | 2540 | exp = rangei->exp; |
2541 | scalar_int_mode mode = as_a <scalar_int_mode> (TYPE_MODE (type)); | |
2542 | int prec = GET_MODE_PRECISION (mode); | |
2543 | if (TYPE_PRECISION (type) < prec | |
2544 | || (wi::to_wide (TYPE_MIN_VALUE (type)) | |
2545 | != wi::min_value (prec, TYPE_SIGN (type))) | |
2546 | || (wi::to_wide (TYPE_MAX_VALUE (type)) | |
2547 | != wi::max_value (prec, TYPE_SIGN (type)))) | |
2548 | { | |
2549 | type = build_nonstandard_integer_type (prec, TYPE_UNSIGNED (type)); | |
2550 | exp = fold_convert (type, exp); | |
2551 | lowxor = fold_convert (type, lowxor); | |
2552 | lowi = fold_convert (type, lowi); | |
2553 | highi = fold_convert (type, highi); | |
2554 | } | |
b0c0c879 | 2555 | tem = fold_build1 (BIT_NOT_EXPR, type, lowxor); |
fe7507dc | 2556 | exp = fold_build2 (BIT_AND_EXPR, type, exp, tem); |
b0c0c879 | 2557 | lowj = fold_build2 (BIT_AND_EXPR, type, lowi, tem); |
2558 | highj = fold_build2 (BIT_AND_EXPR, type, highi, tem); | |
e3668db5 | 2559 | if (update_range_test (rangei, rangej, NULL, 1, opcode, ops, exp, |
2560 | NULL, rangei->in_p, lowj, highj, | |
b0c0c879 | 2561 | rangei->strict_overflow_p |
2562 | || rangej->strict_overflow_p)) | |
2563 | return true; | |
2564 | return false; | |
2565 | } | |
2566 | ||
2567 | /* Optimize X == CST1 || X == CST2 | |
2568 | if popcount (CST2 - CST1) == 1 into | |
2569 | ((X - CST1) & ~(CST2 - CST1)) == 0. | |
2570 | Similarly for ranges. E.g. | |
2571 | X == 43 || X == 76 || X == 44 || X == 78 || X == 77 || X == 46 | |
2572 | || X == 75 || X == 45 | |
2573 | will be transformed by the previous optimization into | |
2574 | (X - 43U) <= 3U || (X - 75U) <= 3U | |
2575 | and this loop can transform that into | |
2576 | ((X - 43U) & ~(75U - 43U)) <= 3U. */ | |
2577 | static bool | |
2578 | optimize_range_tests_diff (enum tree_code opcode, tree type, | |
fafde080 | 2579 | tree lowi, tree lowj, tree highi, tree highj, |
2580 | vec<operand_entry *> *ops, | |
2581 | struct range_entry *rangei, | |
2582 | struct range_entry *rangej) | |
b0c0c879 | 2583 | { |
2584 | tree tem1, tem2, mask; | |
2585 | /* Check highi - lowi == highj - lowj. */ | |
2586 | tem1 = fold_binary (MINUS_EXPR, type, highi, lowi); | |
2587 | if (tem1 == NULL_TREE || TREE_CODE (tem1) != INTEGER_CST) | |
2588 | return false; | |
2589 | tem2 = fold_binary (MINUS_EXPR, type, highj, lowj); | |
2590 | if (!tree_int_cst_equal (tem1, tem2)) | |
2591 | return false; | |
2592 | /* Check popcount (lowj - lowi) == 1. */ | |
2593 | tem1 = fold_binary (MINUS_EXPR, type, lowj, lowi); | |
2594 | if (tem1 == NULL_TREE || TREE_CODE (tem1) != INTEGER_CST) | |
2595 | return false; | |
8016354a | 2596 | if (!integer_pow2p (tem1)) |
b0c0c879 | 2597 | return false; |
2598 | ||
fe7507dc | 2599 | scalar_int_mode mode = as_a <scalar_int_mode> (TYPE_MODE (type)); |
2600 | int prec = GET_MODE_PRECISION (mode); | |
2601 | if (TYPE_PRECISION (type) < prec | |
2602 | || (wi::to_wide (TYPE_MIN_VALUE (type)) | |
2603 | != wi::min_value (prec, TYPE_SIGN (type))) | |
2604 | || (wi::to_wide (TYPE_MAX_VALUE (type)) | |
2605 | != wi::max_value (prec, TYPE_SIGN (type)))) | |
2606 | type = build_nonstandard_integer_type (prec, 1); | |
2607 | else | |
2608 | type = unsigned_type_for (type); | |
db7164e3 | 2609 | tem1 = fold_convert (type, tem1); |
2610 | tem2 = fold_convert (type, tem2); | |
2611 | lowi = fold_convert (type, lowi); | |
b0c0c879 | 2612 | mask = fold_build1 (BIT_NOT_EXPR, type, tem1); |
da78c088 | 2613 | tem1 = fold_build2 (MINUS_EXPR, type, |
db7164e3 | 2614 | fold_convert (type, rangei->exp), lowi); |
b0c0c879 | 2615 | tem1 = fold_build2 (BIT_AND_EXPR, type, tem1, mask); |
2616 | lowj = build_int_cst (type, 0); | |
e3668db5 | 2617 | if (update_range_test (rangei, rangej, NULL, 1, opcode, ops, tem1, |
2618 | NULL, rangei->in_p, lowj, tem2, | |
b0c0c879 | 2619 | rangei->strict_overflow_p |
2620 | || rangej->strict_overflow_p)) | |
2621 | return true; | |
2622 | return false; | |
2623 | } | |
2624 | ||
2625 | /* It does some common checks for function optimize_range_tests_xor and | |
2626 | optimize_range_tests_diff. | |
2627 | If OPTIMIZE_XOR is TRUE, it calls optimize_range_tests_xor. | |
2628 | Else it calls optimize_range_tests_diff. */ | |
2629 | ||
2630 | static bool | |
2631 | optimize_range_tests_1 (enum tree_code opcode, int first, int length, | |
04009ada | 2632 | bool optimize_xor, vec<operand_entry *> *ops, |
b0c0c879 | 2633 | struct range_entry *ranges) |
2634 | { | |
2635 | int i, j; | |
2636 | bool any_changes = false; | |
2637 | for (i = first; i < length; i++) | |
2638 | { | |
2639 | tree lowi, highi, lowj, highj, type, tem; | |
2640 | ||
2641 | if (ranges[i].exp == NULL_TREE || ranges[i].in_p) | |
2642 | continue; | |
2643 | type = TREE_TYPE (ranges[i].exp); | |
2644 | if (!INTEGRAL_TYPE_P (type)) | |
2645 | continue; | |
2646 | lowi = ranges[i].low; | |
2647 | if (lowi == NULL_TREE) | |
2648 | lowi = TYPE_MIN_VALUE (type); | |
2649 | highi = ranges[i].high; | |
2650 | if (highi == NULL_TREE) | |
2651 | continue; | |
2652 | for (j = i + 1; j < length && j < i + 64; j++) | |
2653 | { | |
2654 | bool changes; | |
2655 | if (ranges[i].exp != ranges[j].exp || ranges[j].in_p) | |
2656 | continue; | |
2657 | lowj = ranges[j].low; | |
2658 | if (lowj == NULL_TREE) | |
2659 | continue; | |
2660 | highj = ranges[j].high; | |
2661 | if (highj == NULL_TREE) | |
2662 | highj = TYPE_MAX_VALUE (type); | |
2663 | /* Check lowj > highi. */ | |
2664 | tem = fold_binary (GT_EXPR, boolean_type_node, | |
2665 | lowj, highi); | |
2666 | if (tem == NULL_TREE || !integer_onep (tem)) | |
2667 | continue; | |
2668 | if (optimize_xor) | |
2669 | changes = optimize_range_tests_xor (opcode, type, lowi, lowj, | |
2670 | highi, highj, ops, | |
2671 | ranges + i, ranges + j); | |
2672 | else | |
2673 | changes = optimize_range_tests_diff (opcode, type, lowi, lowj, | |
2674 | highi, highj, ops, | |
2675 | ranges + i, ranges + j); | |
2676 | if (changes) | |
2677 | { | |
2678 | any_changes = true; | |
2679 | break; | |
2680 | } | |
2681 | } | |
2682 | } | |
2683 | return any_changes; | |
2684 | } | |
2685 | ||
e3668db5 | 2686 | /* Helper function of optimize_range_tests_to_bit_test. Handle a single |
2687 | range, EXP, LOW, HIGH, compute bit mask of bits to test and return | |
2688 | EXP on success, NULL otherwise. */ | |
2689 | ||
2690 | static tree | |
2691 | extract_bit_test_mask (tree exp, int prec, tree totallow, tree low, tree high, | |
2692 | wide_int *mask, tree *totallowp) | |
2693 | { | |
2694 | tree tem = int_const_binop (MINUS_EXPR, high, low); | |
2695 | if (tem == NULL_TREE | |
2696 | || TREE_CODE (tem) != INTEGER_CST | |
2697 | || TREE_OVERFLOW (tem) | |
2698 | || tree_int_cst_sgn (tem) == -1 | |
2699 | || compare_tree_int (tem, prec) != -1) | |
2700 | return NULL_TREE; | |
2701 | ||
2702 | unsigned HOST_WIDE_INT max = tree_to_uhwi (tem) + 1; | |
2703 | *mask = wi::shifted_mask (0, max, false, prec); | |
2704 | if (TREE_CODE (exp) == BIT_AND_EXPR | |
2705 | && TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST) | |
2706 | { | |
2707 | widest_int msk = wi::to_widest (TREE_OPERAND (exp, 1)); | |
2708 | msk = wi::zext (~msk, TYPE_PRECISION (TREE_TYPE (exp))); | |
2709 | if (wi::popcount (msk) == 1 | |
2710 | && wi::ltu_p (msk, prec - max)) | |
2711 | { | |
2712 | *mask |= wi::shifted_mask (msk.to_uhwi (), max, false, prec); | |
2713 | max += msk.to_uhwi (); | |
2714 | exp = TREE_OPERAND (exp, 0); | |
2715 | if (integer_zerop (low) | |
2716 | && TREE_CODE (exp) == PLUS_EXPR | |
2717 | && TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST) | |
2718 | { | |
f6835415 | 2719 | tree ret = TREE_OPERAND (exp, 0); |
2720 | STRIP_NOPS (ret); | |
e3668db5 | 2721 | widest_int bias |
2722 | = wi::neg (wi::sext (wi::to_widest (TREE_OPERAND (exp, 1)), | |
2723 | TYPE_PRECISION (TREE_TYPE (low)))); | |
f6835415 | 2724 | tree tbias = wide_int_to_tree (TREE_TYPE (ret), bias); |
e3668db5 | 2725 | if (totallowp) |
2726 | { | |
2727 | *totallowp = tbias; | |
f6835415 | 2728 | return ret; |
e3668db5 | 2729 | } |
2730 | else if (!tree_int_cst_lt (totallow, tbias)) | |
2731 | return NULL_TREE; | |
f6835415 | 2732 | bias = wi::to_widest (tbias); |
e3668db5 | 2733 | bias -= wi::to_widest (totallow); |
32115eac | 2734 | if (bias >= 0 && bias < prec - max) |
e3668db5 | 2735 | { |
2736 | *mask = wi::lshift (*mask, bias); | |
f6835415 | 2737 | return ret; |
e3668db5 | 2738 | } |
2739 | } | |
2740 | } | |
2741 | } | |
2742 | if (totallowp) | |
2743 | return exp; | |
2744 | if (!tree_int_cst_lt (totallow, low)) | |
2745 | return exp; | |
2746 | tem = int_const_binop (MINUS_EXPR, low, totallow); | |
2747 | if (tem == NULL_TREE | |
2748 | || TREE_CODE (tem) != INTEGER_CST | |
2749 | || TREE_OVERFLOW (tem) | |
2750 | || compare_tree_int (tem, prec - max) == 1) | |
2751 | return NULL_TREE; | |
2752 | ||
2753 | *mask = wi::lshift (*mask, wi::to_widest (tem)); | |
2754 | return exp; | |
2755 | } | |
2756 | ||
2757 | /* Attempt to optimize small range tests using bit test. | |
2758 | E.g. | |
2759 | X != 43 && X != 76 && X != 44 && X != 78 && X != 49 | |
2760 | && X != 77 && X != 46 && X != 75 && X != 45 && X != 82 | |
2761 | has been by earlier optimizations optimized into: | |
2762 | ((X - 43U) & ~32U) > 3U && X != 49 && X != 82 | |
2763 | As all the 43 through 82 range is less than 64 numbers, | |
2764 | for 64-bit word targets optimize that into: | |
2765 | (X - 43U) > 40U && ((1 << (X - 43U)) & 0x8F0000004FULL) == 0 */ | |
2766 | ||
2767 | static bool | |
2768 | optimize_range_tests_to_bit_test (enum tree_code opcode, int first, int length, | |
04009ada | 2769 | vec<operand_entry *> *ops, |
e3668db5 | 2770 | struct range_entry *ranges) |
2771 | { | |
2772 | int i, j; | |
2773 | bool any_changes = false; | |
2774 | int prec = GET_MODE_BITSIZE (word_mode); | |
2775 | auto_vec<struct range_entry *, 64> candidates; | |
2776 | ||
2777 | for (i = first; i < length - 2; i++) | |
2778 | { | |
2779 | tree lowi, highi, lowj, highj, type; | |
2780 | ||
2781 | if (ranges[i].exp == NULL_TREE || ranges[i].in_p) | |
2782 | continue; | |
2783 | type = TREE_TYPE (ranges[i].exp); | |
2784 | if (!INTEGRAL_TYPE_P (type)) | |
2785 | continue; | |
2786 | lowi = ranges[i].low; | |
2787 | if (lowi == NULL_TREE) | |
2788 | lowi = TYPE_MIN_VALUE (type); | |
2789 | highi = ranges[i].high; | |
2790 | if (highi == NULL_TREE) | |
2791 | continue; | |
2792 | wide_int mask; | |
2793 | tree exp = extract_bit_test_mask (ranges[i].exp, prec, lowi, lowi, | |
2794 | highi, &mask, &lowi); | |
2795 | if (exp == NULL_TREE) | |
2796 | continue; | |
2797 | bool strict_overflow_p = ranges[i].strict_overflow_p; | |
2798 | candidates.truncate (0); | |
2799 | int end = MIN (i + 64, length); | |
2800 | for (j = i + 1; j < end; j++) | |
2801 | { | |
2802 | tree exp2; | |
2803 | if (ranges[j].exp == NULL_TREE || ranges[j].in_p) | |
2804 | continue; | |
2805 | if (ranges[j].exp == exp) | |
2806 | ; | |
2807 | else if (TREE_CODE (ranges[j].exp) == BIT_AND_EXPR) | |
2808 | { | |
2809 | exp2 = TREE_OPERAND (ranges[j].exp, 0); | |
2810 | if (exp2 == exp) | |
2811 | ; | |
2812 | else if (TREE_CODE (exp2) == PLUS_EXPR) | |
2813 | { | |
2814 | exp2 = TREE_OPERAND (exp2, 0); | |
2815 | STRIP_NOPS (exp2); | |
2816 | if (exp2 != exp) | |
2817 | continue; | |
2818 | } | |
2819 | else | |
2820 | continue; | |
2821 | } | |
2822 | else | |
2823 | continue; | |
2824 | lowj = ranges[j].low; | |
2825 | if (lowj == NULL_TREE) | |
2826 | continue; | |
2827 | highj = ranges[j].high; | |
2828 | if (highj == NULL_TREE) | |
2829 | highj = TYPE_MAX_VALUE (type); | |
2830 | wide_int mask2; | |
2831 | exp2 = extract_bit_test_mask (ranges[j].exp, prec, lowi, lowj, | |
2832 | highj, &mask2, NULL); | |
2833 | if (exp2 != exp) | |
2834 | continue; | |
2835 | mask |= mask2; | |
2836 | strict_overflow_p |= ranges[j].strict_overflow_p; | |
2837 | candidates.safe_push (&ranges[j]); | |
2838 | } | |
2839 | ||
2840 | /* If we need otherwise 3 or more comparisons, use a bit test. */ | |
2841 | if (candidates.length () >= 2) | |
2842 | { | |
2843 | tree high = wide_int_to_tree (TREE_TYPE (lowi), | |
2844 | wi::to_widest (lowi) | |
8f936b5e | 2845 | + prec - 1 - wi::clz (mask)); |
04009ada | 2846 | operand_entry *oe = (*ops)[ranges[i].idx]; |
e3668db5 | 2847 | tree op = oe->op; |
42acab1c | 2848 | gimple *stmt = op ? SSA_NAME_DEF_STMT (op) |
14a72c4e | 2849 | : last_stmt (BASIC_BLOCK_FOR_FN (cfun, oe->id)); |
e3668db5 | 2850 | location_t loc = gimple_location (stmt); |
2851 | tree optype = op ? TREE_TYPE (op) : boolean_type_node; | |
2852 | ||
2853 | /* See if it isn't cheaper to pretend the minimum value of the | |
2854 | range is 0, if maximum value is small enough. | |
2855 | We can avoid then subtraction of the minimum value, but the | |
2856 | mask constant could be perhaps more expensive. */ | |
2857 | if (compare_tree_int (lowi, 0) > 0 | |
2858 | && compare_tree_int (high, prec) < 0) | |
2859 | { | |
2860 | int cost_diff; | |
2861 | HOST_WIDE_INT m = tree_to_uhwi (lowi); | |
2862 | rtx reg = gen_raw_REG (word_mode, 10000); | |
2863 | bool speed_p = optimize_bb_for_speed_p (gimple_bb (stmt)); | |
2864 | cost_diff = set_rtx_cost (gen_rtx_PLUS (word_mode, reg, | |
2865 | GEN_INT (-m)), speed_p); | |
2866 | rtx r = immed_wide_int_const (mask, word_mode); | |
2867 | cost_diff += set_src_cost (gen_rtx_AND (word_mode, reg, r), | |
5ae4887d | 2868 | word_mode, speed_p); |
e3668db5 | 2869 | r = immed_wide_int_const (wi::lshift (mask, m), word_mode); |
2870 | cost_diff -= set_src_cost (gen_rtx_AND (word_mode, reg, r), | |
5ae4887d | 2871 | word_mode, speed_p); |
e3668db5 | 2872 | if (cost_diff > 0) |
2873 | { | |
2874 | mask = wi::lshift (mask, m); | |
2875 | lowi = build_zero_cst (TREE_TYPE (lowi)); | |
2876 | } | |
2877 | } | |
2878 | ||
2879 | tree tem = build_range_check (loc, optype, unshare_expr (exp), | |
2880 | false, lowi, high); | |
2881 | if (tem == NULL_TREE || is_gimple_val (tem)) | |
2882 | continue; | |
2883 | tree etype = unsigned_type_for (TREE_TYPE (exp)); | |
2884 | exp = fold_build2_loc (loc, MINUS_EXPR, etype, | |
2885 | fold_convert_loc (loc, etype, exp), | |
2886 | fold_convert_loc (loc, etype, lowi)); | |
2887 | exp = fold_convert_loc (loc, integer_type_node, exp); | |
2888 | tree word_type = lang_hooks.types.type_for_mode (word_mode, 1); | |
2889 | exp = fold_build2_loc (loc, LSHIFT_EXPR, word_type, | |
2890 | build_int_cst (word_type, 1), exp); | |
2891 | exp = fold_build2_loc (loc, BIT_AND_EXPR, word_type, exp, | |
2892 | wide_int_to_tree (word_type, mask)); | |
2893 | exp = fold_build2_loc (loc, EQ_EXPR, optype, exp, | |
2894 | build_zero_cst (word_type)); | |
2895 | if (is_gimple_val (exp)) | |
2896 | continue; | |
2897 | ||
2898 | /* The shift might have undefined behavior if TEM is true, | |
2899 | but reassociate_bb isn't prepared to have basic blocks | |
2900 | split when it is running. So, temporarily emit a code | |
2901 | with BIT_IOR_EXPR instead of &&, and fix it up in | |
2902 | branch_fixup. */ | |
2903 | gimple_seq seq; | |
2904 | tem = force_gimple_operand (tem, &seq, true, NULL_TREE); | |
2905 | gcc_assert (TREE_CODE (tem) == SSA_NAME); | |
2906 | gimple_set_visited (SSA_NAME_DEF_STMT (tem), true); | |
2907 | gimple_seq seq2; | |
2908 | exp = force_gimple_operand (exp, &seq2, true, NULL_TREE); | |
2909 | gimple_seq_add_seq_without_update (&seq, seq2); | |
2910 | gcc_assert (TREE_CODE (exp) == SSA_NAME); | |
2911 | gimple_set_visited (SSA_NAME_DEF_STMT (exp), true); | |
42acab1c | 2912 | gimple *g = gimple_build_assign (make_ssa_name (optype), |
14a72c4e | 2913 | BIT_IOR_EXPR, tem, exp); |
e3668db5 | 2914 | gimple_set_location (g, loc); |
2915 | gimple_seq_add_stmt_without_update (&seq, g); | |
2916 | exp = gimple_assign_lhs (g); | |
2917 | tree val = build_zero_cst (optype); | |
2918 | if (update_range_test (&ranges[i], NULL, candidates.address (), | |
2919 | candidates.length (), opcode, ops, exp, | |
2920 | seq, false, val, val, strict_overflow_p)) | |
2921 | { | |
2922 | any_changes = true; | |
2923 | reassoc_branch_fixups.safe_push (tem); | |
2924 | } | |
2925 | else | |
2926 | gimple_seq_discard (seq); | |
2927 | } | |
2928 | } | |
2929 | return any_changes; | |
2930 | } | |
2931 | ||
4c168df0 | 2932 | /* Optimize x != 0 && y != 0 && z != 0 into (x | y | z) != 0 |
2933 | and similarly x != -1 && y != -1 && y != -1 into (x & y & z) != -1. */ | |
2934 | ||
2935 | static bool | |
2936 | optimize_range_tests_cmp_bitwise (enum tree_code opcode, int first, int length, | |
2937 | vec<operand_entry *> *ops, | |
2938 | struct range_entry *ranges) | |
2939 | { | |
2940 | int i; | |
2941 | unsigned int b; | |
2942 | bool any_changes = false; | |
2943 | auto_vec<int, 128> buckets; | |
2944 | auto_vec<int, 32> chains; | |
2945 | auto_vec<struct range_entry *, 32> candidates; | |
2946 | ||
2947 | for (i = first; i < length; i++) | |
2948 | { | |
2949 | if (ranges[i].exp == NULL_TREE | |
2950 | || TREE_CODE (ranges[i].exp) != SSA_NAME | |
2951 | || !ranges[i].in_p | |
2952 | || TYPE_PRECISION (TREE_TYPE (ranges[i].exp)) <= 1 | |
2953 | || TREE_CODE (TREE_TYPE (ranges[i].exp)) == BOOLEAN_TYPE | |
2954 | || ranges[i].low == NULL_TREE | |
2955 | || ranges[i].low != ranges[i].high) | |
2956 | continue; | |
2957 | ||
2958 | bool zero_p = integer_zerop (ranges[i].low); | |
2959 | if (!zero_p && !integer_all_onesp (ranges[i].low)) | |
2960 | continue; | |
2961 | ||
2962 | b = TYPE_PRECISION (TREE_TYPE (ranges[i].exp)) * 2 + !zero_p; | |
2963 | if (buckets.length () <= b) | |
2964 | buckets.safe_grow_cleared (b + 1); | |
2965 | if (chains.length () <= (unsigned) i) | |
2966 | chains.safe_grow (i + 1); | |
2967 | chains[i] = buckets[b]; | |
2968 | buckets[b] = i + 1; | |
2969 | } | |
2970 | ||
2971 | FOR_EACH_VEC_ELT (buckets, b, i) | |
2972 | if (i && chains[i - 1]) | |
2973 | { | |
2974 | int j, k = i; | |
2975 | for (j = chains[i - 1]; j; j = chains[j - 1]) | |
2976 | { | |
2977 | gimple *gk = SSA_NAME_DEF_STMT (ranges[k - 1].exp); | |
2978 | gimple *gj = SSA_NAME_DEF_STMT (ranges[j - 1].exp); | |
2979 | if (reassoc_stmt_dominates_stmt_p (gk, gj)) | |
2980 | k = j; | |
2981 | } | |
2982 | tree type1 = TREE_TYPE (ranges[k - 1].exp); | |
2983 | tree type2 = NULL_TREE; | |
2984 | bool strict_overflow_p = false; | |
2985 | candidates.truncate (0); | |
2986 | for (j = i; j; j = chains[j - 1]) | |
2987 | { | |
2988 | tree type = TREE_TYPE (ranges[j - 1].exp); | |
2989 | strict_overflow_p |= ranges[j - 1].strict_overflow_p; | |
2990 | if (j == k | |
2991 | || useless_type_conversion_p (type1, type)) | |
2992 | ; | |
2993 | else if (type2 == NULL_TREE | |
2994 | || useless_type_conversion_p (type2, type)) | |
2995 | { | |
2996 | if (type2 == NULL_TREE) | |
2997 | type2 = type; | |
2998 | candidates.safe_push (&ranges[j - 1]); | |
2999 | } | |
3000 | } | |
3001 | unsigned l = candidates.length (); | |
3002 | for (j = i; j; j = chains[j - 1]) | |
3003 | { | |
3004 | tree type = TREE_TYPE (ranges[j - 1].exp); | |
3005 | if (j == k) | |
3006 | continue; | |
3007 | if (useless_type_conversion_p (type1, type)) | |
3008 | ; | |
3009 | else if (type2 == NULL_TREE | |
3010 | || useless_type_conversion_p (type2, type)) | |
3011 | continue; | |
3012 | candidates.safe_push (&ranges[j - 1]); | |
3013 | } | |
3014 | gimple_seq seq = NULL; | |
3015 | tree op = NULL_TREE; | |
3016 | unsigned int id; | |
3017 | struct range_entry *r; | |
3018 | candidates.safe_push (&ranges[k - 1]); | |
3019 | FOR_EACH_VEC_ELT (candidates, id, r) | |
3020 | { | |
3021 | gimple *g; | |
3022 | if (id == 0) | |
3023 | { | |
3024 | op = r->exp; | |
3025 | continue; | |
3026 | } | |
3027 | if (id == l) | |
3028 | { | |
3029 | g = gimple_build_assign (make_ssa_name (type1), NOP_EXPR, op); | |
3030 | gimple_seq_add_stmt_without_update (&seq, g); | |
3031 | op = gimple_assign_lhs (g); | |
3032 | } | |
3033 | tree type = TREE_TYPE (r->exp); | |
3034 | tree exp = r->exp; | |
3035 | if (id >= l && !useless_type_conversion_p (type1, type)) | |
3036 | { | |
3037 | g = gimple_build_assign (make_ssa_name (type1), NOP_EXPR, exp); | |
3038 | gimple_seq_add_stmt_without_update (&seq, g); | |
3039 | exp = gimple_assign_lhs (g); | |
3040 | } | |
3041 | g = gimple_build_assign (make_ssa_name (id >= l ? type1 : type2), | |
3042 | (b & 1) ? BIT_AND_EXPR : BIT_IOR_EXPR, | |
3043 | op, exp); | |
3044 | gimple_seq_add_stmt_without_update (&seq, g); | |
3045 | op = gimple_assign_lhs (g); | |
3046 | } | |
3047 | candidates.pop (); | |
3048 | if (update_range_test (&ranges[k - 1], NULL, candidates.address (), | |
3049 | candidates.length (), opcode, ops, op, | |
3050 | seq, true, ranges[k - 1].low, | |
3051 | ranges[k - 1].low, strict_overflow_p)) | |
3052 | any_changes = true; | |
3053 | else | |
3054 | gimple_seq_discard (seq); | |
3055 | } | |
3056 | ||
3057 | return any_changes; | |
3058 | } | |
3059 | ||
fafde080 | 3060 | /* Attempt to optimize for signed a and b where b is known to be >= 0: |
3061 | a >= 0 && a < b into (unsigned) a < (unsigned) b | |
3062 | a >= 0 && a <= b into (unsigned) a <= (unsigned) b */ | |
3063 | ||
3064 | static bool | |
3065 | optimize_range_tests_var_bound (enum tree_code opcode, int first, int length, | |
3066 | vec<operand_entry *> *ops, | |
54ec8b11 | 3067 | struct range_entry *ranges, |
3068 | basic_block first_bb) | |
fafde080 | 3069 | { |
3070 | int i; | |
3071 | bool any_changes = false; | |
3072 | hash_map<tree, int> *map = NULL; | |
3073 | ||
3074 | for (i = first; i < length; i++) | |
3075 | { | |
b4d09fc1 | 3076 | if (ranges[i].exp == NULL_TREE |
3077 | || TREE_CODE (ranges[i].exp) != SSA_NAME | |
3078 | || !ranges[i].in_p) | |
fafde080 | 3079 | continue; |
3080 | ||
3081 | tree type = TREE_TYPE (ranges[i].exp); | |
3082 | if (!INTEGRAL_TYPE_P (type) | |
3083 | || TYPE_UNSIGNED (type) | |
3084 | || ranges[i].low == NULL_TREE | |
3085 | || !integer_zerop (ranges[i].low) | |
3086 | || ranges[i].high != NULL_TREE) | |
3087 | continue; | |
3088 | /* EXP >= 0 here. */ | |
3089 | if (map == NULL) | |
3090 | map = new hash_map <tree, int>; | |
3091 | map->put (ranges[i].exp, i); | |
3092 | } | |
3093 | ||
3094 | if (map == NULL) | |
3095 | return false; | |
3096 | ||
3097 | for (i = 0; i < length; i++) | |
3098 | { | |
493a1c52 | 3099 | bool in_p = ranges[i].in_p; |
fafde080 | 3100 | if (ranges[i].low == NULL_TREE |
493a1c52 | 3101 | || ranges[i].high == NULL_TREE) |
fafde080 | 3102 | continue; |
493a1c52 | 3103 | if (!integer_zerop (ranges[i].low) |
3104 | || !integer_zerop (ranges[i].high)) | |
3105 | { | |
3106 | if (ranges[i].exp | |
3107 | && TYPE_PRECISION (TREE_TYPE (ranges[i].exp)) == 1 | |
3108 | && TYPE_UNSIGNED (TREE_TYPE (ranges[i].exp)) | |
3109 | && integer_onep (ranges[i].low) | |
3110 | && integer_onep (ranges[i].high)) | |
3111 | in_p = !in_p; | |
3112 | else | |
3113 | continue; | |
3114 | } | |
fafde080 | 3115 | |
3116 | gimple *stmt; | |
3117 | tree_code ccode; | |
3118 | tree rhs1, rhs2; | |
3119 | if (ranges[i].exp) | |
3120 | { | |
b4d09fc1 | 3121 | if (TREE_CODE (ranges[i].exp) != SSA_NAME) |
3122 | continue; | |
fafde080 | 3123 | stmt = SSA_NAME_DEF_STMT (ranges[i].exp); |
3124 | if (!is_gimple_assign (stmt)) | |
3125 | continue; | |
3126 | ccode = gimple_assign_rhs_code (stmt); | |
3127 | rhs1 = gimple_assign_rhs1 (stmt); | |
3128 | rhs2 = gimple_assign_rhs2 (stmt); | |
3129 | } | |
3130 | else | |
3131 | { | |
3132 | operand_entry *oe = (*ops)[ranges[i].idx]; | |
3133 | stmt = last_stmt (BASIC_BLOCK_FOR_FN (cfun, oe->id)); | |
3134 | if (gimple_code (stmt) != GIMPLE_COND) | |
3135 | continue; | |
3136 | ccode = gimple_cond_code (stmt); | |
3137 | rhs1 = gimple_cond_lhs (stmt); | |
3138 | rhs2 = gimple_cond_rhs (stmt); | |
3139 | } | |
3140 | ||
3141 | if (TREE_CODE (rhs1) != SSA_NAME | |
3142 | || rhs2 == NULL_TREE | |
3143 | || TREE_CODE (rhs2) != SSA_NAME) | |
3144 | continue; | |
3145 | ||
3146 | switch (ccode) | |
3147 | { | |
3148 | case GT_EXPR: | |
3149 | case GE_EXPR: | |
42fad061 | 3150 | case LT_EXPR: |
3151 | case LE_EXPR: | |
3152 | break; | |
3153 | default: | |
3154 | continue; | |
3155 | } | |
493a1c52 | 3156 | if (in_p) |
42fad061 | 3157 | ccode = invert_tree_comparison (ccode, false); |
3158 | switch (ccode) | |
3159 | { | |
3160 | case GT_EXPR: | |
3161 | case GE_EXPR: | |
3162 | std::swap (rhs1, rhs2); | |
fafde080 | 3163 | ccode = swap_tree_comparison (ccode); |
3164 | break; | |
3165 | case LT_EXPR: | |
3166 | case LE_EXPR: | |
fafde080 | 3167 | break; |
3168 | default: | |
42fad061 | 3169 | gcc_unreachable (); |
fafde080 | 3170 | } |
3171 | ||
3172 | int *idx = map->get (rhs1); | |
3173 | if (idx == NULL) | |
3174 | continue; | |
3175 | ||
54ec8b11 | 3176 | /* maybe_optimize_range_tests allows statements without side-effects |
3177 | in the basic blocks as long as they are consumed in the same bb. | |
3178 | Make sure rhs2's def stmt is not among them, otherwise we can't | |
3179 | use safely get_nonzero_bits on it. E.g. in: | |
3180 | # RANGE [-83, 1] NONZERO 173 | |
3181 | # k_32 = PHI <k_47(13), k_12(9)> | |
3182 | ... | |
3183 | if (k_32 >= 0) | |
3184 | goto <bb 5>; [26.46%] | |
3185 | else | |
3186 | goto <bb 9>; [73.54%] | |
3187 | ||
3188 | <bb 5> [local count: 140323371]: | |
3189 | # RANGE [0, 1] NONZERO 1 | |
3190 | _5 = (int) k_32; | |
3191 | # RANGE [0, 4] NONZERO 4 | |
3192 | _21 = _5 << 2; | |
3193 | # RANGE [0, 4] NONZERO 4 | |
3194 | iftmp.0_44 = (char) _21; | |
3195 | if (k_32 < iftmp.0_44) | |
3196 | goto <bb 6>; [84.48%] | |
3197 | else | |
3198 | goto <bb 9>; [15.52%] | |
3199 | the ranges on _5/_21/iftmp.0_44 are flow sensitive, assume that | |
3200 | k_32 >= 0. If we'd optimize k_32 >= 0 to true and k_32 < iftmp.0_44 | |
3201 | to (unsigned) k_32 < (unsigned) iftmp.0_44, then we would execute | |
3202 | those stmts even for negative k_32 and the value ranges would be no | |
3203 | longer guaranteed and so the optimization would be invalid. */ | |
ccf8d652 | 3204 | while (opcode == ERROR_MARK) |
54ec8b11 | 3205 | { |
3206 | gimple *g = SSA_NAME_DEF_STMT (rhs2); | |
3207 | basic_block bb2 = gimple_bb (g); | |
3208 | if (bb2 | |
3209 | && bb2 != first_bb | |
3210 | && dominated_by_p (CDI_DOMINATORS, bb2, first_bb)) | |
3211 | { | |
3212 | /* As an exception, handle a few common cases. */ | |
3213 | if (gimple_assign_cast_p (g) | |
ccf8d652 | 3214 | && INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (g)))) |
3215 | { | |
3216 | tree op0 = gimple_assign_rhs1 (g); | |
3217 | if (TYPE_UNSIGNED (TREE_TYPE (op0)) | |
3218 | && (TYPE_PRECISION (TREE_TYPE (rhs2)) | |
3219 | > TYPE_PRECISION (TREE_TYPE (op0)))) | |
3220 | /* Zero-extension is always ok. */ | |
3221 | break; | |
3222 | else if (TYPE_PRECISION (TREE_TYPE (rhs2)) | |
3223 | == TYPE_PRECISION (TREE_TYPE (op0)) | |
3224 | && TREE_CODE (op0) == SSA_NAME) | |
3225 | { | |
3226 | /* Cast from signed to unsigned or vice versa. Retry | |
3227 | with the op0 as new rhs2. */ | |
3228 | rhs2 = op0; | |
3229 | continue; | |
3230 | } | |
3231 | } | |
54ec8b11 | 3232 | else if (is_gimple_assign (g) |
3233 | && gimple_assign_rhs_code (g) == BIT_AND_EXPR | |
3234 | && TREE_CODE (gimple_assign_rhs2 (g)) == INTEGER_CST | |
3235 | && !wi::neg_p (wi::to_wide (gimple_assign_rhs2 (g)))) | |
3236 | /* Masking with INTEGER_CST with MSB clear is always ok | |
ccf8d652 | 3237 | too. */ |
3238 | break; | |
3239 | rhs2 = NULL_TREE; | |
54ec8b11 | 3240 | } |
ccf8d652 | 3241 | break; |
54ec8b11 | 3242 | } |
ccf8d652 | 3243 | if (rhs2 == NULL_TREE) |
3244 | continue; | |
54ec8b11 | 3245 | |
fafde080 | 3246 | wide_int nz = get_nonzero_bits (rhs2); |
3247 | if (wi::neg_p (nz)) | |
3248 | continue; | |
3249 | ||
3250 | /* We have EXP < RHS2 or EXP <= RHS2 where EXP >= 0 | |
3251 | and RHS2 is known to be RHS2 >= 0. */ | |
3252 | tree utype = unsigned_type_for (TREE_TYPE (rhs1)); | |
3253 | ||
3254 | enum warn_strict_overflow_code wc = WARN_STRICT_OVERFLOW_COMPARISON; | |
3255 | if ((ranges[*idx].strict_overflow_p | |
3256 | || ranges[i].strict_overflow_p) | |
3257 | && issue_strict_overflow_warning (wc)) | |
3258 | warning_at (gimple_location (stmt), OPT_Wstrict_overflow, | |
3259 | "assuming signed overflow does not occur " | |
3260 | "when simplifying range test"); | |
3261 | ||
3262 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
3263 | { | |
3264 | struct range_entry *r = &ranges[*idx]; | |
3265 | fprintf (dump_file, "Optimizing range test "); | |
1ffa4346 | 3266 | print_generic_expr (dump_file, r->exp); |
fafde080 | 3267 | fprintf (dump_file, " +["); |
1ffa4346 | 3268 | print_generic_expr (dump_file, r->low); |
fafde080 | 3269 | fprintf (dump_file, ", "); |
1ffa4346 | 3270 | print_generic_expr (dump_file, r->high); |
fafde080 | 3271 | fprintf (dump_file, "] and comparison "); |
1ffa4346 | 3272 | print_generic_expr (dump_file, rhs1); |
fafde080 | 3273 | fprintf (dump_file, " %s ", op_symbol_code (ccode)); |
1ffa4346 | 3274 | print_generic_expr (dump_file, rhs2); |
fafde080 | 3275 | fprintf (dump_file, "\n into ("); |
1ffa4346 | 3276 | print_generic_expr (dump_file, utype); |
fafde080 | 3277 | fprintf (dump_file, ") "); |
1ffa4346 | 3278 | print_generic_expr (dump_file, rhs1); |
fafde080 | 3279 | fprintf (dump_file, " %s (", op_symbol_code (ccode)); |
1ffa4346 | 3280 | print_generic_expr (dump_file, utype); |
fafde080 | 3281 | fprintf (dump_file, ") "); |
1ffa4346 | 3282 | print_generic_expr (dump_file, rhs2); |
fafde080 | 3283 | fprintf (dump_file, "\n"); |
3284 | } | |
3285 | ||
42fad061 | 3286 | operand_entry *oe = (*ops)[ranges[i].idx]; |
3287 | ranges[i].in_p = 0; | |
3288 | if (opcode == BIT_IOR_EXPR | |
3289 | || (opcode == ERROR_MARK && oe->rank == BIT_IOR_EXPR)) | |
3290 | { | |
3291 | ranges[i].in_p = 1; | |
3292 | ccode = invert_tree_comparison (ccode, false); | |
3293 | } | |
fafde080 | 3294 | |
3295 | unsigned int uid = gimple_uid (stmt); | |
3296 | gimple_stmt_iterator gsi = gsi_for_stmt (stmt); | |
3297 | gimple *g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, rhs1); | |
3298 | gimple_set_uid (g, uid); | |
3299 | rhs1 = gimple_assign_lhs (g); | |
3300 | gsi_insert_before (&gsi, g, GSI_SAME_STMT); | |
ccf8d652 | 3301 | if (!useless_type_conversion_p (utype, TREE_TYPE (rhs2))) |
3302 | { | |
3303 | g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, rhs2); | |
3304 | gimple_set_uid (g, uid); | |
3305 | rhs2 = gimple_assign_lhs (g); | |
3306 | gsi_insert_before (&gsi, g, GSI_SAME_STMT); | |
3307 | } | |
48baf518 | 3308 | if (tree_swap_operands_p (rhs1, rhs2)) |
fafde080 | 3309 | { |
3310 | std::swap (rhs1, rhs2); | |
3311 | ccode = swap_tree_comparison (ccode); | |
3312 | } | |
3313 | if (gimple_code (stmt) == GIMPLE_COND) | |
3314 | { | |
3315 | gcond *c = as_a <gcond *> (stmt); | |
3316 | gimple_cond_set_code (c, ccode); | |
3317 | gimple_cond_set_lhs (c, rhs1); | |
3318 | gimple_cond_set_rhs (c, rhs2); | |
3319 | update_stmt (stmt); | |
3320 | } | |
3321 | else | |
3322 | { | |
221d7858 | 3323 | tree ctype = oe->op ? TREE_TYPE (oe->op) : boolean_type_node; |
3324 | if (!INTEGRAL_TYPE_P (ctype) | |
3325 | || (TREE_CODE (ctype) != BOOLEAN_TYPE | |
3326 | && TYPE_PRECISION (ctype) != 1)) | |
3327 | ctype = boolean_type_node; | |
3328 | g = gimple_build_assign (make_ssa_name (ctype), ccode, rhs1, rhs2); | |
fafde080 | 3329 | gimple_set_uid (g, uid); |
3330 | gsi_insert_before (&gsi, g, GSI_SAME_STMT); | |
221d7858 | 3331 | if (oe->op && ctype != TREE_TYPE (oe->op)) |
3332 | { | |
3333 | g = gimple_build_assign (make_ssa_name (TREE_TYPE (oe->op)), | |
3334 | NOP_EXPR, gimple_assign_lhs (g)); | |
3335 | gimple_set_uid (g, uid); | |
3336 | gsi_insert_before (&gsi, g, GSI_SAME_STMT); | |
3337 | } | |
fafde080 | 3338 | ranges[i].exp = gimple_assign_lhs (g); |
221d7858 | 3339 | oe->op = ranges[i].exp; |
3340 | ranges[i].low = build_zero_cst (TREE_TYPE (ranges[i].exp)); | |
3341 | ranges[i].high = ranges[i].low; | |
fafde080 | 3342 | } |
3343 | ranges[i].strict_overflow_p = false; | |
42fad061 | 3344 | oe = (*ops)[ranges[*idx].idx]; |
fafde080 | 3345 | /* Now change all the other range test immediate uses, so that |
3346 | those tests will be optimized away. */ | |
3347 | if (opcode == ERROR_MARK) | |
3348 | { | |
3349 | if (oe->op) | |
3350 | oe->op = build_int_cst (TREE_TYPE (oe->op), | |
3351 | oe->rank == BIT_IOR_EXPR ? 0 : 1); | |
3352 | else | |
3353 | oe->op = (oe->rank == BIT_IOR_EXPR | |
3354 | ? boolean_false_node : boolean_true_node); | |
3355 | } | |
3356 | else | |
3357 | oe->op = error_mark_node; | |
3358 | ranges[*idx].exp = NULL_TREE; | |
3359 | ranges[*idx].low = NULL_TREE; | |
3360 | ranges[*idx].high = NULL_TREE; | |
3361 | any_changes = true; | |
3362 | } | |
3363 | ||
3364 | delete map; | |
3365 | return any_changes; | |
3366 | } | |
3367 | ||
946e9eb4 | 3368 | /* Optimize range tests, similarly how fold_range_test optimizes |
3369 | it on trees. The tree code for the binary | |
8a2c7744 | 3370 | operation between all the operands is OPCODE. |
3371 | If OPCODE is ERROR_MARK, optimize_range_tests is called from within | |
3372 | maybe_optimize_range_tests for inter-bb range optimization. | |
3373 | In that case if oe->op is NULL, oe->id is bb->index whose | |
3374 | GIMPLE_COND is && or ||ed into the test, and oe->rank says | |
54ec8b11 | 3375 | the actual opcode. |
3376 | FIRST_BB is the first basic block if OPCODE is ERROR_MARK. */ | |
946e9eb4 | 3377 | |
82cb4e1c | 3378 | static bool |
946e9eb4 | 3379 | optimize_range_tests (enum tree_code opcode, |
54ec8b11 | 3380 | vec<operand_entry *> *ops, basic_block first_bb) |
946e9eb4 | 3381 | { |
f1f41a6c | 3382 | unsigned int length = ops->length (), i, j, first; |
04009ada | 3383 | operand_entry *oe; |
946e9eb4 | 3384 | struct range_entry *ranges; |
3385 | bool any_changes = false; | |
3386 | ||
3387 | if (length == 1) | |
82cb4e1c | 3388 | return false; |
946e9eb4 | 3389 | |
3390 | ranges = XNEWVEC (struct range_entry, length); | |
3391 | for (i = 0; i < length; i++) | |
3392 | { | |
f1f41a6c | 3393 | oe = (*ops)[i]; |
946e9eb4 | 3394 | ranges[i].idx = i; |
8a2c7744 | 3395 | init_range_entry (ranges + i, oe->op, |
14a72c4e | 3396 | oe->op |
3397 | ? NULL | |
3398 | : last_stmt (BASIC_BLOCK_FOR_FN (cfun, oe->id))); | |
946e9eb4 | 3399 | /* For | invert it now, we will invert it again before emitting |
3400 | the optimized expression. */ | |
8a2c7744 | 3401 | if (opcode == BIT_IOR_EXPR |
3402 | || (opcode == ERROR_MARK && oe->rank == BIT_IOR_EXPR)) | |
946e9eb4 | 3403 | ranges[i].in_p = !ranges[i].in_p; |
3404 | } | |
3405 | ||
3406 | qsort (ranges, length, sizeof (*ranges), range_entry_cmp); | |
3407 | for (i = 0; i < length; i++) | |
3408 | if (ranges[i].exp != NULL_TREE && TREE_CODE (ranges[i].exp) == SSA_NAME) | |
3409 | break; | |
3410 | ||
3411 | /* Try to merge ranges. */ | |
3412 | for (first = i; i < length; i++) | |
3413 | { | |
3414 | tree low = ranges[i].low; | |
3415 | tree high = ranges[i].high; | |
3416 | int in_p = ranges[i].in_p; | |
3417 | bool strict_overflow_p = ranges[i].strict_overflow_p; | |
3418 | int update_fail_count = 0; | |
3419 | ||
3420 | for (j = i + 1; j < length; j++) | |
3421 | { | |
3422 | if (ranges[i].exp != ranges[j].exp) | |
3423 | break; | |
3424 | if (!merge_ranges (&in_p, &low, &high, in_p, low, high, | |
3425 | ranges[j].in_p, ranges[j].low, ranges[j].high)) | |
3426 | break; | |
3427 | strict_overflow_p |= ranges[j].strict_overflow_p; | |
3428 | } | |
3429 | ||
3430 | if (j == i + 1) | |
3431 | continue; | |
3432 | ||
e3668db5 | 3433 | if (update_range_test (ranges + i, ranges + i + 1, NULL, j - i - 1, |
3434 | opcode, ops, ranges[i].exp, NULL, in_p, | |
3435 | low, high, strict_overflow_p)) | |
946e9eb4 | 3436 | { |
3437 | i = j - 1; | |
3438 | any_changes = true; | |
3439 | } | |
3440 | /* Avoid quadratic complexity if all merge_ranges calls would succeed, | |
3441 | while update_range_test would fail. */ | |
3442 | else if (update_fail_count == 64) | |
3443 | i = j - 1; | |
3444 | else | |
3445 | ++update_fail_count; | |
3446 | } | |
3447 | ||
b0c0c879 | 3448 | any_changes |= optimize_range_tests_1 (opcode, first, length, true, |
3449 | ops, ranges); | |
946e9eb4 | 3450 | |
b0c0c879 | 3451 | if (BRANCH_COST (optimize_function_for_speed_p (cfun), false) >= 2) |
3452 | any_changes |= optimize_range_tests_1 (opcode, first, length, false, | |
3453 | ops, ranges); | |
e3668db5 | 3454 | if (lshift_cheap_p (optimize_function_for_speed_p (cfun))) |
3455 | any_changes |= optimize_range_tests_to_bit_test (opcode, first, length, | |
3456 | ops, ranges); | |
4c168df0 | 3457 | any_changes |= optimize_range_tests_cmp_bitwise (opcode, first, length, |
3458 | ops, ranges); | |
fafde080 | 3459 | any_changes |= optimize_range_tests_var_bound (opcode, first, length, ops, |
54ec8b11 | 3460 | ranges, first_bb); |
946e9eb4 | 3461 | |
8a2c7744 | 3462 | if (any_changes && opcode != ERROR_MARK) |
946e9eb4 | 3463 | { |
3464 | j = 0; | |
f1f41a6c | 3465 | FOR_EACH_VEC_ELT (*ops, i, oe) |
946e9eb4 | 3466 | { |
3467 | if (oe->op == error_mark_node) | |
3468 | continue; | |
3469 | else if (i != j) | |
f1f41a6c | 3470 | (*ops)[j] = oe; |
946e9eb4 | 3471 | j++; |
3472 | } | |
f1f41a6c | 3473 | ops->truncate (j); |
946e9eb4 | 3474 | } |
3475 | ||
3476 | XDELETEVEC (ranges); | |
82cb4e1c | 3477 | return any_changes; |
946e9eb4 | 3478 | } |
3479 | ||
026f2137 | 3480 | /* A subroutine of optimize_vec_cond_expr to extract and canonicalize |
3481 | the operands of the VEC_COND_EXPR. Returns ERROR_MARK on failure, | |
3482 | otherwise the comparison code. */ | |
3483 | ||
3484 | static tree_code | |
3485 | ovce_extract_ops (tree var, gassign **rets, bool *reti) | |
3486 | { | |
3487 | if (TREE_CODE (var) != SSA_NAME) | |
3488 | return ERROR_MARK; | |
3489 | ||
3490 | gassign *stmt = dyn_cast <gassign *> (SSA_NAME_DEF_STMT (var)); | |
3491 | if (stmt == NULL) | |
3492 | return ERROR_MARK; | |
3493 | ||
3494 | /* ??? If we start creating more COND_EXPR, we could perform | |
3495 | this same optimization with them. For now, simplify. */ | |
3496 | if (gimple_assign_rhs_code (stmt) != VEC_COND_EXPR) | |
3497 | return ERROR_MARK; | |
3498 | ||
3499 | tree cond = gimple_assign_rhs1 (stmt); | |
3500 | tree_code cmp = TREE_CODE (cond); | |
3501 | if (TREE_CODE_CLASS (cmp) != tcc_comparison) | |
3502 | return ERROR_MARK; | |
3503 | ||
3504 | /* ??? For now, allow only canonical true and false result vectors. | |
3505 | We could expand this to other constants should the need arise, | |
3506 | but at the moment we don't create them. */ | |
3507 | tree t = gimple_assign_rhs2 (stmt); | |
3508 | tree f = gimple_assign_rhs3 (stmt); | |
3509 | bool inv; | |
3510 | if (integer_all_onesp (t)) | |
3511 | inv = false; | |
3512 | else if (integer_all_onesp (f)) | |
3513 | { | |
3514 | cmp = invert_tree_comparison (cmp, false); | |
3515 | inv = true; | |
3516 | } | |
3517 | else | |
3518 | return ERROR_MARK; | |
3519 | if (!integer_zerop (f)) | |
3520 | return ERROR_MARK; | |
3521 | ||
3522 | /* Success! */ | |
3523 | if (rets) | |
3524 | *rets = stmt; | |
3525 | if (reti) | |
3526 | *reti = inv; | |
3527 | return cmp; | |
3528 | } | |
3529 | ||
3530 | /* Optimize the condition of VEC_COND_EXPRs which have been combined | |
3531 | with OPCODE (either BIT_AND_EXPR or BIT_IOR_EXPR). */ | |
3532 | ||
3533 | static bool | |
3534 | optimize_vec_cond_expr (tree_code opcode, vec<operand_entry *> *ops) | |
3535 | { | |
3536 | unsigned int length = ops->length (), i, j; | |
3537 | bool any_changes = false; | |
3538 | ||
3539 | if (length == 1) | |
3540 | return false; | |
3541 | ||
3542 | for (i = 0; i < length; ++i) | |
3543 | { | |
3544 | tree elt0 = (*ops)[i]->op; | |
3545 | ||
3546 | gassign *stmt0; | |
3547 | bool invert; | |
3548 | tree_code cmp0 = ovce_extract_ops (elt0, &stmt0, &invert); | |
3549 | if (cmp0 == ERROR_MARK) | |
3550 | continue; | |
3551 | ||
3552 | for (j = i + 1; j < length; ++j) | |
3553 | { | |
3554 | tree &elt1 = (*ops)[j]->op; | |
3555 | ||
3556 | gassign *stmt1; | |
3557 | tree_code cmp1 = ovce_extract_ops (elt1, &stmt1, NULL); | |
3558 | if (cmp1 == ERROR_MARK) | |
3559 | continue; | |
3560 | ||
3561 | tree cond0 = gimple_assign_rhs1 (stmt0); | |
3562 | tree x0 = TREE_OPERAND (cond0, 0); | |
3563 | tree y0 = TREE_OPERAND (cond0, 1); | |
3564 | ||
3565 | tree cond1 = gimple_assign_rhs1 (stmt1); | |
3566 | tree x1 = TREE_OPERAND (cond1, 0); | |
3567 | tree y1 = TREE_OPERAND (cond1, 1); | |
3568 | ||
3569 | tree comb; | |
3570 | if (opcode == BIT_AND_EXPR) | |
3571 | comb = maybe_fold_and_comparisons (cmp0, x0, y0, cmp1, x1, y1); | |
3572 | else if (opcode == BIT_IOR_EXPR) | |
3573 | comb = maybe_fold_or_comparisons (cmp0, x0, y0, cmp1, x1, y1); | |
3574 | else | |
3575 | gcc_unreachable (); | |
3576 | if (comb == NULL) | |
3577 | continue; | |
3578 | ||
3579 | /* Success! */ | |
3580 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
3581 | { | |
3582 | fprintf (dump_file, "Transforming "); | |
1ffa4346 | 3583 | print_generic_expr (dump_file, cond0); |
026f2137 | 3584 | fprintf (dump_file, " %c ", opcode == BIT_AND_EXPR ? '&' : '|'); |
1ffa4346 | 3585 | print_generic_expr (dump_file, cond1); |
026f2137 | 3586 | fprintf (dump_file, " into "); |
1ffa4346 | 3587 | print_generic_expr (dump_file, comb); |
026f2137 | 3588 | fputc ('\n', dump_file); |
3589 | } | |
3590 | ||
3591 | gimple_assign_set_rhs1 (stmt0, comb); | |
3592 | if (invert) | |
3593 | std::swap (*gimple_assign_rhs2_ptr (stmt0), | |
3594 | *gimple_assign_rhs3_ptr (stmt0)); | |
3595 | update_stmt (stmt0); | |
3596 | ||
3597 | elt1 = error_mark_node; | |
3598 | any_changes = true; | |
3599 | } | |
3600 | } | |
3601 | ||
3602 | if (any_changes) | |
3603 | { | |
3604 | operand_entry *oe; | |
3605 | j = 0; | |
3606 | FOR_EACH_VEC_ELT (*ops, i, oe) | |
3607 | { | |
3608 | if (oe->op == error_mark_node) | |
3609 | continue; | |
3610 | else if (i != j) | |
3611 | (*ops)[j] = oe; | |
3612 | j++; | |
3613 | } | |
3614 | ops->truncate (j); | |
3615 | } | |
3616 | ||
3617 | return any_changes; | |
3618 | } | |
3619 | ||
8a2c7744 | 3620 | /* Return true if STMT is a cast like: |
3621 | <bb N>: | |
3622 | ... | |
3623 | _123 = (int) _234; | |
3624 | ||
3625 | <bb M>: | |
3626 | # _345 = PHI <_123(N), 1(...), 1(...)> | |
3627 | where _234 has bool type, _123 has single use and | |
3628 | bb N has a single successor M. This is commonly used in | |
be9f4473 | 3629 | the last block of a range test. |
3630 | ||
3631 | Also Return true if STMT is tcc_compare like: | |
3632 | <bb N>: | |
3633 | ... | |
3634 | _234 = a_2(D) == 2; | |
3635 | ||
3636 | <bb M>: | |
3637 | # _345 = PHI <_234(N), 1(...), 1(...)> | |
3638 | _346 = (int) _345; | |
3639 | where _234 has booltype, single use and | |
3640 | bb N has a single successor M. This is commonly used in | |
8a2c7744 | 3641 | the last block of a range test. */ |
3642 | ||
3643 | static bool | |
42acab1c | 3644 | final_range_test_p (gimple *stmt) |
8a2c7744 | 3645 | { |
be9f4473 | 3646 | basic_block bb, rhs_bb, lhs_bb; |
8a2c7744 | 3647 | edge e; |
3648 | tree lhs, rhs; | |
3649 | use_operand_p use_p; | |
42acab1c | 3650 | gimple *use_stmt; |
8a2c7744 | 3651 | |
be9f4473 | 3652 | if (!gimple_assign_cast_p (stmt) |
3653 | && (!is_gimple_assign (stmt) | |
3654 | || (TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) | |
3655 | != tcc_comparison))) | |
8a2c7744 | 3656 | return false; |
3657 | bb = gimple_bb (stmt); | |
3658 | if (!single_succ_p (bb)) | |
3659 | return false; | |
3660 | e = single_succ_edge (bb); | |
3661 | if (e->flags & EDGE_COMPLEX) | |
3662 | return false; | |
3663 | ||
3664 | lhs = gimple_assign_lhs (stmt); | |
3665 | rhs = gimple_assign_rhs1 (stmt); | |
be9f4473 | 3666 | if (gimple_assign_cast_p (stmt) |
3667 | && (!INTEGRAL_TYPE_P (TREE_TYPE (lhs)) | |
3668 | || TREE_CODE (rhs) != SSA_NAME | |
3669 | || TREE_CODE (TREE_TYPE (rhs)) != BOOLEAN_TYPE)) | |
8a2c7744 | 3670 | return false; |
3671 | ||
be9f4473 | 3672 | if (!gimple_assign_cast_p (stmt) |
3673 | && (TREE_CODE (TREE_TYPE (lhs)) != BOOLEAN_TYPE)) | |
3674 | return false; | |
3675 | ||
8a2c7744 | 3676 | /* Test whether lhs is consumed only by a PHI in the only successor bb. */ |
3677 | if (!single_imm_use (lhs, &use_p, &use_stmt)) | |
3678 | return false; | |
3679 | ||
3680 | if (gimple_code (use_stmt) != GIMPLE_PHI | |
3681 | || gimple_bb (use_stmt) != e->dest) | |
3682 | return false; | |
3683 | ||
3684 | /* And that the rhs is defined in the same loop. */ | |
be9f4473 | 3685 | if (gimple_assign_cast_p (stmt)) |
3686 | { | |
3687 | if (TREE_CODE (rhs) != SSA_NAME | |
3688 | || !(rhs_bb = gimple_bb (SSA_NAME_DEF_STMT (rhs))) | |
3689 | || !flow_bb_inside_loop_p (loop_containing_stmt (stmt), rhs_bb)) | |
3690 | return false; | |
3691 | } | |
3692 | else | |
3693 | { | |
3694 | if (TREE_CODE (lhs) != SSA_NAME | |
3695 | || !(lhs_bb = gimple_bb (SSA_NAME_DEF_STMT (lhs))) | |
3696 | || !flow_bb_inside_loop_p (loop_containing_stmt (stmt), lhs_bb)) | |
3697 | return false; | |
3698 | } | |
8a2c7744 | 3699 | |
3700 | return true; | |
3701 | } | |
3702 | ||
3703 | /* Return true if BB is suitable basic block for inter-bb range test | |
3704 | optimization. If BACKWARD is true, BB should be the only predecessor | |
3705 | of TEST_BB, and *OTHER_BB is either NULL and filled by the routine, | |
3706 | or compared with to find a common basic block to which all conditions | |
3707 | branch to if true resp. false. If BACKWARD is false, TEST_BB should | |
3708 | be the only predecessor of BB. */ | |
3709 | ||
3710 | static bool | |
3711 | suitable_cond_bb (basic_block bb, basic_block test_bb, basic_block *other_bb, | |
3712 | bool backward) | |
3713 | { | |
3714 | edge_iterator ei, ei2; | |
3715 | edge e, e2; | |
42acab1c | 3716 | gimple *stmt; |
1a91d914 | 3717 | gphi_iterator gsi; |
8a2c7744 | 3718 | bool other_edge_seen = false; |
3719 | bool is_cond; | |
3720 | ||
3721 | if (test_bb == bb) | |
3722 | return false; | |
3723 | /* Check last stmt first. */ | |
3724 | stmt = last_stmt (bb); | |
3725 | if (stmt == NULL | |
3726 | || (gimple_code (stmt) != GIMPLE_COND | |
3727 | && (backward || !final_range_test_p (stmt))) | |
3728 | || gimple_visited_p (stmt) | |
aac19106 | 3729 | || stmt_could_throw_p (cfun, stmt) |
8a2c7744 | 3730 | || *other_bb == bb) |
3731 | return false; | |
3732 | is_cond = gimple_code (stmt) == GIMPLE_COND; | |
3733 | if (is_cond) | |
3734 | { | |
3735 | /* If last stmt is GIMPLE_COND, verify that one of the succ edges | |
3736 | goes to the next bb (if BACKWARD, it is TEST_BB), and the other | |
3737 | to *OTHER_BB (if not set yet, try to find it out). */ | |
3738 | if (EDGE_COUNT (bb->succs) != 2) | |
3739 | return false; | |
3740 | FOR_EACH_EDGE (e, ei, bb->succs) | |
3741 | { | |
3742 | if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE))) | |
3743 | return false; | |
3744 | if (e->dest == test_bb) | |
3745 | { | |
3746 | if (backward) | |
3747 | continue; | |
3748 | else | |
3749 | return false; | |
3750 | } | |
3751 | if (e->dest == bb) | |
3752 | return false; | |
3753 | if (*other_bb == NULL) | |
3754 | { | |
3755 | FOR_EACH_EDGE (e2, ei2, test_bb->succs) | |
3756 | if (!(e2->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE))) | |
3757 | return false; | |
3758 | else if (e->dest == e2->dest) | |
3759 | *other_bb = e->dest; | |
3760 | if (*other_bb == NULL) | |
3761 | return false; | |
3762 | } | |
3763 | if (e->dest == *other_bb) | |
3764 | other_edge_seen = true; | |
3765 | else if (backward) | |
3766 | return false; | |
3767 | } | |
3768 | if (*other_bb == NULL || !other_edge_seen) | |
3769 | return false; | |
3770 | } | |
3771 | else if (single_succ (bb) != *other_bb) | |
3772 | return false; | |
3773 | ||
3774 | /* Now check all PHIs of *OTHER_BB. */ | |
3775 | e = find_edge (bb, *other_bb); | |
3776 | e2 = find_edge (test_bb, *other_bb); | |
3777 | for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi)) | |
3778 | { | |
1a91d914 | 3779 | gphi *phi = gsi.phi (); |
8a2c7744 | 3780 | /* If both BB and TEST_BB end with GIMPLE_COND, all PHI arguments |
3781 | corresponding to BB and TEST_BB predecessor must be the same. */ | |
3782 | if (!operand_equal_p (gimple_phi_arg_def (phi, e->dest_idx), | |
3783 | gimple_phi_arg_def (phi, e2->dest_idx), 0)) | |
3784 | { | |
3785 | /* Otherwise, if one of the blocks doesn't end with GIMPLE_COND, | |
3786 | one of the PHIs should have the lhs of the last stmt in | |
3787 | that block as PHI arg and that PHI should have 0 or 1 | |
3788 | corresponding to it in all other range test basic blocks | |
3789 | considered. */ | |
3790 | if (!is_cond) | |
3791 | { | |
3792 | if (gimple_phi_arg_def (phi, e->dest_idx) | |
3793 | == gimple_assign_lhs (stmt) | |
3794 | && (integer_zerop (gimple_phi_arg_def (phi, e2->dest_idx)) | |
3795 | || integer_onep (gimple_phi_arg_def (phi, | |
3796 | e2->dest_idx)))) | |
3797 | continue; | |
3798 | } | |
3799 | else | |
3800 | { | |
42acab1c | 3801 | gimple *test_last = last_stmt (test_bb); |
8a2c7744 | 3802 | if (gimple_code (test_last) != GIMPLE_COND |
3803 | && gimple_phi_arg_def (phi, e2->dest_idx) | |
3804 | == gimple_assign_lhs (test_last) | |
3805 | && (integer_zerop (gimple_phi_arg_def (phi, e->dest_idx)) | |
3806 | || integer_onep (gimple_phi_arg_def (phi, e->dest_idx)))) | |
3807 | continue; | |
3808 | } | |
3809 | ||
3810 | return false; | |
3811 | } | |
3812 | } | |
3813 | return true; | |
3814 | } | |
3815 | ||
3816 | /* Return true if BB doesn't have side-effects that would disallow | |
3817 | range test optimization, all SSA_NAMEs set in the bb are consumed | |
3818 | in the bb and there are no PHIs. */ | |
3819 | ||
3820 | static bool | |
3821 | no_side_effect_bb (basic_block bb) | |
3822 | { | |
3823 | gimple_stmt_iterator gsi; | |
42acab1c | 3824 | gimple *last; |
8a2c7744 | 3825 | |
3826 | if (!gimple_seq_empty_p (phi_nodes (bb))) | |
3827 | return false; | |
3828 | last = last_stmt (bb); | |
3829 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
3830 | { | |
42acab1c | 3831 | gimple *stmt = gsi_stmt (gsi); |
8a2c7744 | 3832 | tree lhs; |
3833 | imm_use_iterator imm_iter; | |
3834 | use_operand_p use_p; | |
3835 | ||
3836 | if (is_gimple_debug (stmt)) | |
3837 | continue; | |
3838 | if (gimple_has_side_effects (stmt)) | |
3839 | return false; | |
3840 | if (stmt == last) | |
3841 | return true; | |
3842 | if (!is_gimple_assign (stmt)) | |
3843 | return false; | |
3844 | lhs = gimple_assign_lhs (stmt); | |
3845 | if (TREE_CODE (lhs) != SSA_NAME) | |
3846 | return false; | |
3847 | if (gimple_assign_rhs_could_trap_p (stmt)) | |
3848 | return false; | |
3849 | FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs) | |
3850 | { | |
42acab1c | 3851 | gimple *use_stmt = USE_STMT (use_p); |
8a2c7744 | 3852 | if (is_gimple_debug (use_stmt)) |
3853 | continue; | |
3854 | if (gimple_bb (use_stmt) != bb) | |
3855 | return false; | |
3856 | } | |
3857 | } | |
3858 | return false; | |
3859 | } | |
3860 | ||
3861 | /* If VAR is set by CODE (BIT_{AND,IOR}_EXPR) which is reassociable, | |
3862 | return true and fill in *OPS recursively. */ | |
3863 | ||
3864 | static bool | |
04009ada | 3865 | get_ops (tree var, enum tree_code code, vec<operand_entry *> *ops, |
8a2c7744 | 3866 | struct loop *loop) |
3867 | { | |
42acab1c | 3868 | gimple *stmt = SSA_NAME_DEF_STMT (var); |
8a2c7744 | 3869 | tree rhs[2]; |
3870 | int i; | |
3871 | ||
3872 | if (!is_reassociable_op (stmt, code, loop)) | |
3873 | return false; | |
3874 | ||
3875 | rhs[0] = gimple_assign_rhs1 (stmt); | |
3876 | rhs[1] = gimple_assign_rhs2 (stmt); | |
3877 | gimple_set_visited (stmt, true); | |
3878 | for (i = 0; i < 2; i++) | |
3879 | if (TREE_CODE (rhs[i]) == SSA_NAME | |
3880 | && !get_ops (rhs[i], code, ops, loop) | |
3881 | && has_single_use (rhs[i])) | |
3882 | { | |
04009ada | 3883 | operand_entry *oe = operand_entry_pool.allocate (); |
8a2c7744 | 3884 | |
3885 | oe->op = rhs[i]; | |
3886 | oe->rank = code; | |
3887 | oe->id = 0; | |
3888 | oe->count = 1; | |
527d8479 | 3889 | oe->stmt_to_insert = NULL; |
f1f41a6c | 3890 | ops->safe_push (oe); |
8a2c7744 | 3891 | } |
3892 | return true; | |
3893 | } | |
3894 | ||
82cb4e1c | 3895 | /* Find the ops that were added by get_ops starting from VAR, see if |
3896 | they were changed during update_range_test and if yes, create new | |
3897 | stmts. */ | |
3898 | ||
3899 | static tree | |
04009ada | 3900 | update_ops (tree var, enum tree_code code, vec<operand_entry *> ops, |
82cb4e1c | 3901 | unsigned int *pidx, struct loop *loop) |
3902 | { | |
42acab1c | 3903 | gimple *stmt = SSA_NAME_DEF_STMT (var); |
82cb4e1c | 3904 | tree rhs[4]; |
3905 | int i; | |
3906 | ||
3907 | if (!is_reassociable_op (stmt, code, loop)) | |
3908 | return NULL; | |
3909 | ||
3910 | rhs[0] = gimple_assign_rhs1 (stmt); | |
3911 | rhs[1] = gimple_assign_rhs2 (stmt); | |
3912 | rhs[2] = rhs[0]; | |
3913 | rhs[3] = rhs[1]; | |
3914 | for (i = 0; i < 2; i++) | |
3915 | if (TREE_CODE (rhs[i]) == SSA_NAME) | |
3916 | { | |
3917 | rhs[2 + i] = update_ops (rhs[i], code, ops, pidx, loop); | |
3918 | if (rhs[2 + i] == NULL_TREE) | |
3919 | { | |
3920 | if (has_single_use (rhs[i])) | |
3921 | rhs[2 + i] = ops[(*pidx)++]->op; | |
3922 | else | |
3923 | rhs[2 + i] = rhs[i]; | |
3924 | } | |
3925 | } | |
3926 | if ((rhs[2] != rhs[0] || rhs[3] != rhs[1]) | |
3927 | && (rhs[2] != rhs[1] || rhs[3] != rhs[0])) | |
3928 | { | |
3929 | gimple_stmt_iterator gsi = gsi_for_stmt (stmt); | |
f9e245b2 | 3930 | var = make_ssa_name (TREE_TYPE (var)); |
e9cf809e | 3931 | gassign *g = gimple_build_assign (var, gimple_assign_rhs_code (stmt), |
3932 | rhs[2], rhs[3]); | |
82cb4e1c | 3933 | gimple_set_uid (g, gimple_uid (stmt)); |
3934 | gimple_set_visited (g, true); | |
3935 | gsi_insert_before (&gsi, g, GSI_SAME_STMT); | |
3936 | } | |
3937 | return var; | |
3938 | } | |
3939 | ||
3940 | /* Structure to track the initial value passed to get_ops and | |
3941 | the range in the ops vector for each basic block. */ | |
3942 | ||
3943 | struct inter_bb_range_test_entry | |
3944 | { | |
3945 | tree op; | |
3946 | unsigned int first_idx, last_idx; | |
3947 | }; | |
3948 | ||
55a2d273 | 3949 | /* Inter-bb range test optimization. |
8a2c7744 | 3950 | |
55a2d273 | 3951 | Returns TRUE if a gimple conditional is optimized to a true/false, |
3952 | otherwise return FALSE. | |
3953 | ||
3954 | This indicates to the caller that it should run a CFG cleanup pass | |
3955 | once reassociation is completed. */ | |
3956 | ||
3957 | static bool | |
42acab1c | 3958 | maybe_optimize_range_tests (gimple *stmt) |
8a2c7744 | 3959 | { |
3960 | basic_block first_bb = gimple_bb (stmt); | |
3961 | basic_block last_bb = first_bb; | |
3962 | basic_block other_bb = NULL; | |
3963 | basic_block bb; | |
3964 | edge_iterator ei; | |
3965 | edge e; | |
04009ada | 3966 | auto_vec<operand_entry *> ops; |
c2078b80 | 3967 | auto_vec<inter_bb_range_test_entry> bbinfo; |
3c8e4ef8 | 3968 | bool any_changes = false; |
55a2d273 | 3969 | bool cfg_cleanup_needed = false; |
8a2c7744 | 3970 | |
3971 | /* Consider only basic blocks that end with GIMPLE_COND or | |
3972 | a cast statement satisfying final_range_test_p. All | |
3973 | but the last bb in the first_bb .. last_bb range | |
3974 | should end with GIMPLE_COND. */ | |
3975 | if (gimple_code (stmt) == GIMPLE_COND) | |
3976 | { | |
3977 | if (EDGE_COUNT (first_bb->succs) != 2) | |
55a2d273 | 3978 | return cfg_cleanup_needed; |
8a2c7744 | 3979 | } |
3980 | else if (final_range_test_p (stmt)) | |
3981 | other_bb = single_succ (first_bb); | |
3982 | else | |
55a2d273 | 3983 | return cfg_cleanup_needed; |
8a2c7744 | 3984 | |
aac19106 | 3985 | if (stmt_could_throw_p (cfun, stmt)) |
55a2d273 | 3986 | return cfg_cleanup_needed; |
8a2c7744 | 3987 | |
3988 | /* As relative ordering of post-dominator sons isn't fixed, | |
3989 | maybe_optimize_range_tests can be called first on any | |
3990 | bb in the range we want to optimize. So, start searching | |
3991 | backwards, if first_bb can be set to a predecessor. */ | |
3992 | while (single_pred_p (first_bb)) | |
3993 | { | |
3994 | basic_block pred_bb = single_pred (first_bb); | |
3995 | if (!suitable_cond_bb (pred_bb, first_bb, &other_bb, true)) | |
3996 | break; | |
3997 | if (!no_side_effect_bb (first_bb)) | |
3998 | break; | |
3999 | first_bb = pred_bb; | |
4000 | } | |
4001 | /* If first_bb is last_bb, other_bb hasn't been computed yet. | |
4002 | Before starting forward search in last_bb successors, find | |
4003 | out the other_bb. */ | |
4004 | if (first_bb == last_bb) | |
4005 | { | |
4006 | other_bb = NULL; | |
4007 | /* As non-GIMPLE_COND last stmt always terminates the range, | |
4008 | if forward search didn't discover anything, just give up. */ | |
4009 | if (gimple_code (stmt) != GIMPLE_COND) | |
55a2d273 | 4010 | return cfg_cleanup_needed; |
8a2c7744 | 4011 | /* Look at both successors. Either it ends with a GIMPLE_COND |
4012 | and satisfies suitable_cond_bb, or ends with a cast and | |
4013 | other_bb is that cast's successor. */ | |
4014 | FOR_EACH_EDGE (e, ei, first_bb->succs) | |
4015 | if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)) | |
4016 | || e->dest == first_bb) | |
55a2d273 | 4017 | return cfg_cleanup_needed; |
8a2c7744 | 4018 | else if (single_pred_p (e->dest)) |
4019 | { | |
4020 | stmt = last_stmt (e->dest); | |
4021 | if (stmt | |
4022 | && gimple_code (stmt) == GIMPLE_COND | |
4023 | && EDGE_COUNT (e->dest->succs) == 2) | |
4024 | { | |
4025 | if (suitable_cond_bb (first_bb, e->dest, &other_bb, true)) | |
4026 | break; | |
4027 | else | |
4028 | other_bb = NULL; | |
4029 | } | |
4030 | else if (stmt | |
4031 | && final_range_test_p (stmt) | |
4032 | && find_edge (first_bb, single_succ (e->dest))) | |
4033 | { | |
4034 | other_bb = single_succ (e->dest); | |
4035 | if (other_bb == first_bb) | |
4036 | other_bb = NULL; | |
4037 | } | |
4038 | } | |
4039 | if (other_bb == NULL) | |
55a2d273 | 4040 | return cfg_cleanup_needed; |
8a2c7744 | 4041 | } |
4042 | /* Now do the forward search, moving last_bb to successor bbs | |
4043 | that aren't other_bb. */ | |
4044 | while (EDGE_COUNT (last_bb->succs) == 2) | |
4045 | { | |
4046 | FOR_EACH_EDGE (e, ei, last_bb->succs) | |
4047 | if (e->dest != other_bb) | |
4048 | break; | |
4049 | if (e == NULL) | |
4050 | break; | |
4051 | if (!single_pred_p (e->dest)) | |
4052 | break; | |
4053 | if (!suitable_cond_bb (e->dest, last_bb, &other_bb, false)) | |
4054 | break; | |
4055 | if (!no_side_effect_bb (e->dest)) | |
4056 | break; | |
4057 | last_bb = e->dest; | |
4058 | } | |
4059 | if (first_bb == last_bb) | |
55a2d273 | 4060 | return cfg_cleanup_needed; |
8a2c7744 | 4061 | /* Here basic blocks first_bb through last_bb's predecessor |
4062 | end with GIMPLE_COND, all of them have one of the edges to | |
4063 | other_bb and another to another block in the range, | |
4064 | all blocks except first_bb don't have side-effects and | |
4065 | last_bb ends with either GIMPLE_COND, or cast satisfying | |
4066 | final_range_test_p. */ | |
4067 | for (bb = last_bb; ; bb = single_pred (bb)) | |
4068 | { | |
4069 | enum tree_code code; | |
4070 | tree lhs, rhs; | |
82cb4e1c | 4071 | inter_bb_range_test_entry bb_ent; |
8a2c7744 | 4072 | |
82cb4e1c | 4073 | bb_ent.op = NULL_TREE; |
4074 | bb_ent.first_idx = ops.length (); | |
4075 | bb_ent.last_idx = bb_ent.first_idx; | |
8a2c7744 | 4076 | e = find_edge (bb, other_bb); |
4077 | stmt = last_stmt (bb); | |
4078 | gimple_set_visited (stmt, true); | |
4079 | if (gimple_code (stmt) != GIMPLE_COND) | |
4080 | { | |
4081 | use_operand_p use_p; | |
42acab1c | 4082 | gimple *phi; |
8a2c7744 | 4083 | edge e2; |
4084 | unsigned int d; | |
4085 | ||
4086 | lhs = gimple_assign_lhs (stmt); | |
4087 | rhs = gimple_assign_rhs1 (stmt); | |
4088 | gcc_assert (bb == last_bb); | |
4089 | ||
4090 | /* stmt is | |
4091 | _123 = (int) _234; | |
be9f4473 | 4092 | OR |
4093 | _234 = a_2(D) == 2; | |
8a2c7744 | 4094 | |
4095 | followed by: | |
4096 | <bb M>: | |
4097 | # _345 = PHI <_123(N), 1(...), 1(...)> | |
4098 | ||
4099 | or 0 instead of 1. If it is 0, the _234 | |
4100 | range test is anded together with all the | |
4101 | other range tests, if it is 1, it is ored with | |
4102 | them. */ | |
4103 | single_imm_use (lhs, &use_p, &phi); | |
4104 | gcc_assert (gimple_code (phi) == GIMPLE_PHI); | |
4105 | e2 = find_edge (first_bb, other_bb); | |
4106 | d = e2->dest_idx; | |
4107 | gcc_assert (gimple_phi_arg_def (phi, e->dest_idx) == lhs); | |
4108 | if (integer_zerop (gimple_phi_arg_def (phi, d))) | |
4109 | code = BIT_AND_EXPR; | |
4110 | else | |
4111 | { | |
4112 | gcc_checking_assert (integer_onep (gimple_phi_arg_def (phi, d))); | |
4113 | code = BIT_IOR_EXPR; | |
4114 | } | |
4115 | ||
4116 | /* If _234 SSA_NAME_DEF_STMT is | |
4117 | _234 = _567 | _789; | |
4118 | (or &, corresponding to 1/0 in the phi arguments, | |
4119 | push into ops the individual range test arguments | |
4120 | of the bitwise or resp. and, recursively. */ | |
5b2b88ce | 4121 | if (TREE_CODE (rhs) == SSA_NAME |
be9f4473 | 4122 | && (TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) |
4123 | != tcc_comparison) | |
ec6d2d63 | 4124 | && !get_ops (rhs, code, &ops, |
4125 | loop_containing_stmt (stmt)) | |
8a2c7744 | 4126 | && has_single_use (rhs)) |
4127 | { | |
4128 | /* Otherwise, push the _234 range test itself. */ | |
04009ada | 4129 | operand_entry *oe = operand_entry_pool.allocate (); |
8a2c7744 | 4130 | |
4131 | oe->op = rhs; | |
4132 | oe->rank = code; | |
4133 | oe->id = 0; | |
4134 | oe->count = 1; | |
527d8479 | 4135 | oe->stmt_to_insert = NULL; |
f1f41a6c | 4136 | ops.safe_push (oe); |
82cb4e1c | 4137 | bb_ent.last_idx++; |
be9f4473 | 4138 | bb_ent.op = rhs; |
4139 | } | |
4140 | else if (is_gimple_assign (stmt) | |
4141 | && (TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) | |
4142 | == tcc_comparison) | |
5b2b88ce | 4143 | && !get_ops (lhs, code, &ops, |
4144 | loop_containing_stmt (stmt)) | |
be9f4473 | 4145 | && has_single_use (lhs)) |
4146 | { | |
4147 | operand_entry *oe = operand_entry_pool.allocate (); | |
4148 | oe->op = lhs; | |
4149 | oe->rank = code; | |
4150 | oe->id = 0; | |
4151 | oe->count = 1; | |
4152 | ops.safe_push (oe); | |
4153 | bb_ent.last_idx++; | |
4154 | bb_ent.op = lhs; | |
8a2c7744 | 4155 | } |
82cb4e1c | 4156 | else |
be9f4473 | 4157 | { |
4158 | bb_ent.last_idx = ops.length (); | |
4159 | bb_ent.op = rhs; | |
4160 | } | |
82cb4e1c | 4161 | bbinfo.safe_push (bb_ent); |
8a2c7744 | 4162 | continue; |
4163 | } | |
4164 | /* Otherwise stmt is GIMPLE_COND. */ | |
4165 | code = gimple_cond_code (stmt); | |
4166 | lhs = gimple_cond_lhs (stmt); | |
4167 | rhs = gimple_cond_rhs (stmt); | |
4168 | if (TREE_CODE (lhs) == SSA_NAME | |
4169 | && INTEGRAL_TYPE_P (TREE_TYPE (lhs)) | |
4170 | && ((code != EQ_EXPR && code != NE_EXPR) | |
4171 | || rhs != boolean_false_node | |
4172 | /* Either push into ops the individual bitwise | |
4173 | or resp. and operands, depending on which | |
4174 | edge is other_bb. */ | |
4175 | || !get_ops (lhs, (((e->flags & EDGE_TRUE_VALUE) == 0) | |
4176 | ^ (code == EQ_EXPR)) | |
4177 | ? BIT_AND_EXPR : BIT_IOR_EXPR, &ops, | |
4178 | loop_containing_stmt (stmt)))) | |
4179 | { | |
4180 | /* Or push the GIMPLE_COND stmt itself. */ | |
04009ada | 4181 | operand_entry *oe = operand_entry_pool.allocate (); |
8a2c7744 | 4182 | |
4183 | oe->op = NULL; | |
4184 | oe->rank = (e->flags & EDGE_TRUE_VALUE) | |
4185 | ? BIT_IOR_EXPR : BIT_AND_EXPR; | |
4186 | /* oe->op = NULL signs that there is no SSA_NAME | |
4187 | for the range test, and oe->id instead is the | |
4188 | basic block number, at which's end the GIMPLE_COND | |
4189 | is. */ | |
4190 | oe->id = bb->index; | |
4191 | oe->count = 1; | |
527d8479 | 4192 | oe->stmt_to_insert = NULL; |
f1f41a6c | 4193 | ops.safe_push (oe); |
82cb4e1c | 4194 | bb_ent.op = NULL; |
4195 | bb_ent.last_idx++; | |
8a2c7744 | 4196 | } |
82cb4e1c | 4197 | else if (ops.length () > bb_ent.first_idx) |
4198 | { | |
4199 | bb_ent.op = lhs; | |
4200 | bb_ent.last_idx = ops.length (); | |
4201 | } | |
4202 | bbinfo.safe_push (bb_ent); | |
8a2c7744 | 4203 | if (bb == first_bb) |
4204 | break; | |
4205 | } | |
f1f41a6c | 4206 | if (ops.length () > 1) |
54ec8b11 | 4207 | any_changes = optimize_range_tests (ERROR_MARK, &ops, first_bb); |
3c8e4ef8 | 4208 | if (any_changes) |
82cb4e1c | 4209 | { |
b7975a3d | 4210 | unsigned int idx, max_idx = 0; |
3c8e4ef8 | 4211 | /* update_ops relies on has_single_use predicates returning the |
4212 | same values as it did during get_ops earlier. Additionally it | |
4213 | never removes statements, only adds new ones and it should walk | |
4214 | from the single imm use and check the predicate already before | |
4215 | making those changes. | |
4216 | On the other side, the handling of GIMPLE_COND directly can turn | |
4217 | previously multiply used SSA_NAMEs into single use SSA_NAMEs, so | |
4218 | it needs to be done in a separate loop afterwards. */ | |
4219 | for (bb = last_bb, idx = 0; ; bb = single_pred (bb), idx++) | |
82cb4e1c | 4220 | { |
3c8e4ef8 | 4221 | if (bbinfo[idx].first_idx < bbinfo[idx].last_idx |
4222 | && bbinfo[idx].op != NULL_TREE) | |
82cb4e1c | 4223 | { |
82cb4e1c | 4224 | tree new_op; |
4225 | ||
b7975a3d | 4226 | max_idx = idx; |
3c8e4ef8 | 4227 | stmt = last_stmt (bb); |
4228 | new_op = update_ops (bbinfo[idx].op, | |
4229 | (enum tree_code) | |
4230 | ops[bbinfo[idx].first_idx]->rank, | |
4231 | ops, &bbinfo[idx].first_idx, | |
4232 | loop_containing_stmt (stmt)); | |
82cb4e1c | 4233 | if (new_op == NULL_TREE) |
4234 | { | |
4235 | gcc_assert (bb == last_bb); | |
4236 | new_op = ops[bbinfo[idx].first_idx++]->op; | |
4237 | } | |
4238 | if (bbinfo[idx].op != new_op) | |
4239 | { | |
4240 | imm_use_iterator iter; | |
4241 | use_operand_p use_p; | |
be9f4473 | 4242 | gimple *use_stmt, *cast_or_tcc_cmp_stmt = NULL; |
82cb4e1c | 4243 | |
4244 | FOR_EACH_IMM_USE_STMT (use_stmt, iter, bbinfo[idx].op) | |
f36f5c1b | 4245 | if (is_gimple_debug (use_stmt)) |
82cb4e1c | 4246 | continue; |
f36f5c1b | 4247 | else if (gimple_code (use_stmt) == GIMPLE_COND |
4248 | || gimple_code (use_stmt) == GIMPLE_PHI) | |
4249 | FOR_EACH_IMM_USE_ON_STMT (use_p, iter) | |
4250 | SET_USE (use_p, new_op); | |
be9f4473 | 4251 | else if ((is_gimple_assign (use_stmt) |
4252 | && (TREE_CODE_CLASS | |
4253 | (gimple_assign_rhs_code (use_stmt)) | |
4254 | == tcc_comparison))) | |
4255 | cast_or_tcc_cmp_stmt = use_stmt; | |
82cb4e1c | 4256 | else if (gimple_assign_cast_p (use_stmt)) |
be9f4473 | 4257 | cast_or_tcc_cmp_stmt = use_stmt; |
f36f5c1b | 4258 | else |
4259 | gcc_unreachable (); | |
be9f4473 | 4260 | |
4261 | if (cast_or_tcc_cmp_stmt) | |
82cb4e1c | 4262 | { |
4263 | gcc_assert (bb == last_bb); | |
be9f4473 | 4264 | tree lhs = gimple_assign_lhs (cast_or_tcc_cmp_stmt); |
f9e245b2 | 4265 | tree new_lhs = make_ssa_name (TREE_TYPE (lhs)); |
82cb4e1c | 4266 | enum tree_code rhs_code |
be9f4473 | 4267 | = gimple_assign_cast_p (cast_or_tcc_cmp_stmt) |
4268 | ? gimple_assign_rhs_code (cast_or_tcc_cmp_stmt) | |
4269 | : CONVERT_EXPR; | |
1a91d914 | 4270 | gassign *g; |
4c0d6cf7 | 4271 | if (is_gimple_min_invariant (new_op)) |
4272 | { | |
4273 | new_op = fold_convert (TREE_TYPE (lhs), new_op); | |
4274 | g = gimple_build_assign (new_lhs, new_op); | |
4275 | } | |
4276 | else | |
e9cf809e | 4277 | g = gimple_build_assign (new_lhs, rhs_code, new_op); |
be9f4473 | 4278 | gimple_stmt_iterator gsi |
4279 | = gsi_for_stmt (cast_or_tcc_cmp_stmt); | |
4280 | gimple_set_uid (g, gimple_uid (cast_or_tcc_cmp_stmt)); | |
82cb4e1c | 4281 | gimple_set_visited (g, true); |
f36f5c1b | 4282 | gsi_insert_before (&gsi, g, GSI_SAME_STMT); |
82cb4e1c | 4283 | FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs) |
4284 | if (is_gimple_debug (use_stmt)) | |
4285 | continue; | |
4286 | else if (gimple_code (use_stmt) == GIMPLE_COND | |
4287 | || gimple_code (use_stmt) == GIMPLE_PHI) | |
4288 | FOR_EACH_IMM_USE_ON_STMT (use_p, iter) | |
4289 | SET_USE (use_p, new_lhs); | |
4290 | else | |
4291 | gcc_unreachable (); | |
4292 | } | |
4293 | } | |
4294 | } | |
4295 | if (bb == first_bb) | |
4296 | break; | |
4297 | } | |
3c8e4ef8 | 4298 | for (bb = last_bb, idx = 0; ; bb = single_pred (bb), idx++) |
4299 | { | |
4300 | if (bbinfo[idx].first_idx < bbinfo[idx].last_idx | |
4301 | && bbinfo[idx].op == NULL_TREE | |
4302 | && ops[bbinfo[idx].first_idx]->op != NULL_TREE) | |
4303 | { | |
1a91d914 | 4304 | gcond *cond_stmt = as_a <gcond *> (last_stmt (bb)); |
b7975a3d | 4305 | |
4306 | if (idx > max_idx) | |
4307 | max_idx = idx; | |
4308 | ||
55a2d273 | 4309 | /* If we collapse the conditional to a true/false |
4310 | condition, then bubble that knowledge up to our caller. */ | |
3c8e4ef8 | 4311 | if (integer_zerop (ops[bbinfo[idx].first_idx]->op)) |
55a2d273 | 4312 | { |
4313 | gimple_cond_make_false (cond_stmt); | |
4314 | cfg_cleanup_needed = true; | |
4315 | } | |
3c8e4ef8 | 4316 | else if (integer_onep (ops[bbinfo[idx].first_idx]->op)) |
55a2d273 | 4317 | { |
4318 | gimple_cond_make_true (cond_stmt); | |
4319 | cfg_cleanup_needed = true; | |
4320 | } | |
3c8e4ef8 | 4321 | else |
4322 | { | |
1a91d914 | 4323 | gimple_cond_set_code (cond_stmt, NE_EXPR); |
4324 | gimple_cond_set_lhs (cond_stmt, | |
4325 | ops[bbinfo[idx].first_idx]->op); | |
4326 | gimple_cond_set_rhs (cond_stmt, boolean_false_node); | |
3c8e4ef8 | 4327 | } |
1a91d914 | 4328 | update_stmt (cond_stmt); |
3c8e4ef8 | 4329 | } |
4330 | if (bb == first_bb) | |
4331 | break; | |
4332 | } | |
b7975a3d | 4333 | |
4334 | /* The above changes could result in basic blocks after the first | |
4335 | modified one, up to and including last_bb, to be executed even if | |
4336 | they would not be in the original program. If the value ranges of | |
4337 | assignment lhs' in those bbs were dependent on the conditions | |
4338 | guarding those basic blocks which now can change, the VRs might | |
4339 | be incorrect. As no_side_effect_bb should ensure those SSA_NAMEs | |
4340 | are only used within the same bb, it should be not a big deal if | |
4341 | we just reset all the VRs in those bbs. See PR68671. */ | |
4342 | for (bb = last_bb, idx = 0; idx < max_idx; bb = single_pred (bb), idx++) | |
4343 | reset_flow_sensitive_info_in_bb (bb); | |
82cb4e1c | 4344 | } |
55a2d273 | 4345 | return cfg_cleanup_needed; |
8a2c7744 | 4346 | } |
4347 | ||
54aceb26 | 4348 | /* Return true if OPERAND is defined by a PHI node which uses the LHS |
4349 | of STMT in it's operands. This is also known as a "destructive | |
4350 | update" operation. */ | |
4351 | ||
4352 | static bool | |
42acab1c | 4353 | is_phi_for_stmt (gimple *stmt, tree operand) |
54aceb26 | 4354 | { |
42acab1c | 4355 | gimple *def_stmt; |
1a91d914 | 4356 | gphi *def_phi; |
75a70cf9 | 4357 | tree lhs; |
54aceb26 | 4358 | use_operand_p arg_p; |
4359 | ssa_op_iter i; | |
4360 | ||
4361 | if (TREE_CODE (operand) != SSA_NAME) | |
4362 | return false; | |
4363 | ||
75a70cf9 | 4364 | lhs = gimple_assign_lhs (stmt); |
4365 | ||
54aceb26 | 4366 | def_stmt = SSA_NAME_DEF_STMT (operand); |
1a91d914 | 4367 | def_phi = dyn_cast <gphi *> (def_stmt); |
4368 | if (!def_phi) | |
54aceb26 | 4369 | return false; |
4370 | ||
1a91d914 | 4371 | FOR_EACH_PHI_ARG (arg_p, def_phi, i, SSA_OP_USE) |
54aceb26 | 4372 | if (lhs == USE_FROM_PTR (arg_p)) |
4373 | return true; | |
4374 | return false; | |
4375 | } | |
4376 | ||
9ce93694 | 4377 | /* Remove def stmt of VAR if VAR has zero uses and recurse |
4378 | on rhs1 operand if so. */ | |
4379 | ||
4380 | static void | |
4381 | remove_visited_stmt_chain (tree var) | |
4382 | { | |
42acab1c | 4383 | gimple *stmt; |
9ce93694 | 4384 | gimple_stmt_iterator gsi; |
4385 | ||
4386 | while (1) | |
4387 | { | |
4388 | if (TREE_CODE (var) != SSA_NAME || !has_zero_uses (var)) | |
4389 | return; | |
4390 | stmt = SSA_NAME_DEF_STMT (var); | |
8c5ac7f6 | 4391 | if (is_gimple_assign (stmt) && gimple_visited_p (stmt)) |
4392 | { | |
4393 | var = gimple_assign_rhs1 (stmt); | |
8c5ac7f6 | 4394 | gsi = gsi_for_stmt (stmt); |
54675e05 | 4395 | reassoc_remove_stmt (&gsi); |
8c5ac7f6 | 4396 | release_defs (stmt); |
8c5ac7f6 | 4397 | } |
4398 | else | |
9ce93694 | 4399 | return; |
9ce93694 | 4400 | } |
4401 | } | |
4402 | ||
5b1c765d | 4403 | /* This function checks three consequtive operands in |
4404 | passed operands vector OPS starting from OPINDEX and | |
4405 | swaps two operands if it is profitable for binary operation | |
4406 | consuming OPINDEX + 1 abnd OPINDEX + 2 operands. | |
4407 | ||
4408 | We pair ops with the same rank if possible. | |
4409 | ||
4410 | The alternative we try is to see if STMT is a destructive | |
4411 | update style statement, which is like: | |
4412 | b = phi (a, ...) | |
4413 | a = c + b; | |
4414 | In that case, we want to use the destructive update form to | |
4415 | expose the possible vectorizer sum reduction opportunity. | |
4416 | In that case, the third operand will be the phi node. This | |
4417 | check is not performed if STMT is null. | |
4418 | ||
4419 | We could, of course, try to be better as noted above, and do a | |
4420 | lot of work to try to find these opportunities in >3 operand | |
4421 | cases, but it is unlikely to be worth it. */ | |
4422 | ||
4423 | static void | |
04009ada | 4424 | swap_ops_for_binary_stmt (vec<operand_entry *> ops, |
42acab1c | 4425 | unsigned int opindex, gimple *stmt) |
5b1c765d | 4426 | { |
04009ada | 4427 | operand_entry *oe1, *oe2, *oe3; |
5b1c765d | 4428 | |
f1f41a6c | 4429 | oe1 = ops[opindex]; |
4430 | oe2 = ops[opindex + 1]; | |
4431 | oe3 = ops[opindex + 2]; | |
5b1c765d | 4432 | |
4433 | if ((oe1->rank == oe2->rank | |
4434 | && oe2->rank != oe3->rank) | |
4435 | || (stmt && is_phi_for_stmt (stmt, oe3->op) | |
4436 | && !is_phi_for_stmt (stmt, oe1->op) | |
4437 | && !is_phi_for_stmt (stmt, oe2->op))) | |
ce352457 | 4438 | std::swap (*oe1, *oe3); |
5b1c765d | 4439 | else if ((oe1->rank == oe3->rank |
4440 | && oe2->rank != oe3->rank) | |
4441 | || (stmt && is_phi_for_stmt (stmt, oe2->op) | |
4442 | && !is_phi_for_stmt (stmt, oe1->op) | |
4443 | && !is_phi_for_stmt (stmt, oe3->op))) | |
5de40da3 | 4444 | std::swap (*oe1, *oe2); |
bb88a6c7 | 4445 | } |
4446 | ||
82cb4e1c | 4447 | /* If definition of RHS1 or RHS2 dominates STMT, return the later of those |
4448 | two definitions, otherwise return STMT. */ | |
a2bd0c99 | 4449 | |
42acab1c | 4450 | static inline gimple * |
4451 | find_insert_point (gimple *stmt, tree rhs1, tree rhs2) | |
a2bd0c99 | 4452 | { |
82cb4e1c | 4453 | if (TREE_CODE (rhs1) == SSA_NAME |
4454 | && reassoc_stmt_dominates_stmt_p (stmt, SSA_NAME_DEF_STMT (rhs1))) | |
4455 | stmt = SSA_NAME_DEF_STMT (rhs1); | |
4456 | if (TREE_CODE (rhs2) == SSA_NAME | |
4457 | && reassoc_stmt_dominates_stmt_p (stmt, SSA_NAME_DEF_STMT (rhs2))) | |
4458 | stmt = SSA_NAME_DEF_STMT (rhs2); | |
4459 | return stmt; | |
a2bd0c99 | 4460 | } |
4461 | ||
fc3b1c44 | 4462 | /* If the stmt that defines operand has to be inserted, insert it |
4463 | before the use. */ | |
4464 | static void | |
4465 | insert_stmt_before_use (gimple *stmt, gimple *stmt_to_insert) | |
4466 | { | |
4467 | gcc_assert (is_gimple_assign (stmt_to_insert)); | |
4468 | tree rhs1 = gimple_assign_rhs1 (stmt_to_insert); | |
4469 | tree rhs2 = gimple_assign_rhs2 (stmt_to_insert); | |
4470 | gimple *insert_point = find_insert_point (stmt, rhs1, rhs2); | |
4471 | gimple_stmt_iterator gsi = gsi_for_stmt (insert_point); | |
4472 | gimple_set_uid (stmt_to_insert, gimple_uid (insert_point)); | |
4473 | ||
4474 | /* If the insert point is not stmt, then insert_point would be | |
4475 | the point where operand rhs1 or rhs2 is defined. In this case, | |
4476 | stmt_to_insert has to be inserted afterwards. This would | |
4477 | only happen when the stmt insertion point is flexible. */ | |
4478 | if (stmt == insert_point) | |
4479 | gsi_insert_before (&gsi, stmt_to_insert, GSI_NEW_STMT); | |
4480 | else | |
4481 | insert_stmt_after (stmt_to_insert, insert_point); | |
4482 | } | |
4483 | ||
4484 | ||
54aceb26 | 4485 | /* Recursively rewrite our linearized statements so that the operators |
4486 | match those in OPS[OPINDEX], putting the computation in rank | |
e70cda06 | 4487 | order. Return new lhs. |
4488 | CHANGED is true if we shouldn't reuse the lhs SSA_NAME both in | |
4489 | the current stmt and during recursive invocations. | |
4490 | NEXT_CHANGED is true if we shouldn't reuse the lhs SSA_NAME in | |
4491 | recursive invocations. */ | |
54aceb26 | 4492 | |
82cb4e1c | 4493 | static tree |
42acab1c | 4494 | rewrite_expr_tree (gimple *stmt, unsigned int opindex, |
e70cda06 | 4495 | vec<operand_entry *> ops, bool changed, bool next_changed) |
54aceb26 | 4496 | { |
75a70cf9 | 4497 | tree rhs1 = gimple_assign_rhs1 (stmt); |
4498 | tree rhs2 = gimple_assign_rhs2 (stmt); | |
82cb4e1c | 4499 | tree lhs = gimple_assign_lhs (stmt); |
04009ada | 4500 | operand_entry *oe; |
54aceb26 | 4501 | |
54aceb26 | 4502 | /* The final recursion case for this function is that you have |
4503 | exactly two operations left. | |
229fa37d | 4504 | If we had exactly one op in the entire list to start with, we |
54aceb26 | 4505 | would have never called this function, and the tail recursion |
4506 | rewrites them one at a time. */ | |
f1f41a6c | 4507 | if (opindex + 2 == ops.length ()) |
54aceb26 | 4508 | { |
04009ada | 4509 | operand_entry *oe1, *oe2; |
54aceb26 | 4510 | |
f1f41a6c | 4511 | oe1 = ops[opindex]; |
4512 | oe2 = ops[opindex + 1]; | |
54aceb26 | 4513 | |
75a70cf9 | 4514 | if (rhs1 != oe1->op || rhs2 != oe2->op) |
54aceb26 | 4515 | { |
82cb4e1c | 4516 | gimple_stmt_iterator gsi = gsi_for_stmt (stmt); |
4517 | unsigned int uid = gimple_uid (stmt); | |
4518 | ||
54aceb26 | 4519 | if (dump_file && (dump_flags & TDF_DETAILS)) |
4520 | { | |
4521 | fprintf (dump_file, "Transforming "); | |
1ffa4346 | 4522 | print_gimple_stmt (dump_file, stmt, 0); |
54aceb26 | 4523 | } |
4524 | ||
fc3b1c44 | 4525 | /* If the stmt that defines operand has to be inserted, insert it |
4526 | before the use. */ | |
4527 | if (oe1->stmt_to_insert) | |
4528 | insert_stmt_before_use (stmt, oe1->stmt_to_insert); | |
4529 | if (oe2->stmt_to_insert) | |
4530 | insert_stmt_before_use (stmt, oe2->stmt_to_insert); | |
229fa37d | 4531 | /* Even when changed is false, reassociation could have e.g. removed |
4532 | some redundant operations, so unless we are just swapping the | |
4533 | arguments or unless there is no change at all (then we just | |
4534 | return lhs), force creation of a new SSA_NAME. */ | |
4535 | if (changed || ((rhs1 != oe2->op || rhs2 != oe1->op) && opindex)) | |
82cb4e1c | 4536 | { |
42acab1c | 4537 | gimple *insert_point |
4538 | = find_insert_point (stmt, oe1->op, oe2->op); | |
f9e245b2 | 4539 | lhs = make_ssa_name (TREE_TYPE (lhs)); |
82cb4e1c | 4540 | stmt |
e9cf809e | 4541 | = gimple_build_assign (lhs, gimple_assign_rhs_code (stmt), |
4542 | oe1->op, oe2->op); | |
82cb4e1c | 4543 | gimple_set_uid (stmt, uid); |
4544 | gimple_set_visited (stmt, true); | |
4545 | if (insert_point == gsi_stmt (gsi)) | |
4546 | gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); | |
4547 | else | |
4548 | insert_stmt_after (stmt, insert_point); | |
4549 | } | |
4550 | else | |
4551 | { | |
4552 | gcc_checking_assert (find_insert_point (stmt, oe1->op, oe2->op) | |
4553 | == stmt); | |
4554 | gimple_assign_set_rhs1 (stmt, oe1->op); | |
4555 | gimple_assign_set_rhs2 (stmt, oe2->op); | |
4556 | update_stmt (stmt); | |
4557 | } | |
4558 | ||
9ce93694 | 4559 | if (rhs1 != oe1->op && rhs1 != oe2->op) |
4560 | remove_visited_stmt_chain (rhs1); | |
54aceb26 | 4561 | |
4562 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
4563 | { | |
4564 | fprintf (dump_file, " into "); | |
1ffa4346 | 4565 | print_gimple_stmt (dump_file, stmt, 0); |
54aceb26 | 4566 | } |
54aceb26 | 4567 | } |
82cb4e1c | 4568 | return lhs; |
54aceb26 | 4569 | } |
4570 | ||
4571 | /* If we hit here, we should have 3 or more ops left. */ | |
f1f41a6c | 4572 | gcc_assert (opindex + 2 < ops.length ()); |
54aceb26 | 4573 | |
4574 | /* Rewrite the next operator. */ | |
f1f41a6c | 4575 | oe = ops[opindex]; |
54aceb26 | 4576 | |
527d8479 | 4577 | /* If the stmt that defines operand has to be inserted, insert it |
4578 | before the use. */ | |
4579 | if (oe->stmt_to_insert) | |
4580 | insert_stmt_before_use (stmt, oe->stmt_to_insert); | |
4581 | ||
82cb4e1c | 4582 | /* Recurse on the LHS of the binary operator, which is guaranteed to |
4583 | be the non-leaf side. */ | |
4584 | tree new_rhs1 | |
4585 | = rewrite_expr_tree (SSA_NAME_DEF_STMT (rhs1), opindex + 1, ops, | |
e70cda06 | 4586 | changed || oe->op != rhs2 || next_changed, |
4587 | false); | |
54aceb26 | 4588 | |
82cb4e1c | 4589 | if (oe->op != rhs2 || new_rhs1 != rhs1) |
4590 | { | |
54aceb26 | 4591 | if (dump_file && (dump_flags & TDF_DETAILS)) |
4592 | { | |
4593 | fprintf (dump_file, "Transforming "); | |
1ffa4346 | 4594 | print_gimple_stmt (dump_file, stmt, 0); |
54aceb26 | 4595 | } |
4596 | ||
82cb4e1c | 4597 | /* If changed is false, this is either opindex == 0 |
4598 | or all outer rhs2's were equal to corresponding oe->op, | |
4599 | and powi_result is NULL. | |
4600 | That means lhs is equivalent before and after reassociation. | |
4601 | Otherwise ensure the old lhs SSA_NAME is not reused and | |
4602 | create a new stmt as well, so that any debug stmts will be | |
4603 | properly adjusted. */ | |
4604 | if (changed) | |
4605 | { | |
4606 | gimple_stmt_iterator gsi = gsi_for_stmt (stmt); | |
4607 | unsigned int uid = gimple_uid (stmt); | |
42acab1c | 4608 | gimple *insert_point = find_insert_point (stmt, new_rhs1, oe->op); |
82cb4e1c | 4609 | |
f9e245b2 | 4610 | lhs = make_ssa_name (TREE_TYPE (lhs)); |
e9cf809e | 4611 | stmt = gimple_build_assign (lhs, gimple_assign_rhs_code (stmt), |
4612 | new_rhs1, oe->op); | |
82cb4e1c | 4613 | gimple_set_uid (stmt, uid); |
4614 | gimple_set_visited (stmt, true); | |
4615 | if (insert_point == gsi_stmt (gsi)) | |
4616 | gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); | |
4617 | else | |
4618 | insert_stmt_after (stmt, insert_point); | |
4619 | } | |
4620 | else | |
4621 | { | |
4622 | gcc_checking_assert (find_insert_point (stmt, new_rhs1, oe->op) | |
4623 | == stmt); | |
4624 | gimple_assign_set_rhs1 (stmt, new_rhs1); | |
4625 | gimple_assign_set_rhs2 (stmt, oe->op); | |
4626 | update_stmt (stmt); | |
4627 | } | |
54aceb26 | 4628 | |
4629 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
4630 | { | |
4631 | fprintf (dump_file, " into "); | |
1ffa4346 | 4632 | print_gimple_stmt (dump_file, stmt, 0); |
54aceb26 | 4633 | } |
4634 | } | |
82cb4e1c | 4635 | return lhs; |
54aceb26 | 4636 | } |
4637 | ||
5b1c765d | 4638 | /* Find out how many cycles we need to compute statements chain. |
4639 | OPS_NUM holds number os statements in a chain. CPU_WIDTH is a | |
4640 | maximum number of independent statements we may execute per cycle. */ | |
4641 | ||
4642 | static int | |
4643 | get_required_cycles (int ops_num, int cpu_width) | |
4644 | { | |
4645 | int res; | |
4646 | int elog; | |
4647 | unsigned int rest; | |
4648 | ||
4649 | /* While we have more than 2 * cpu_width operands | |
4650 | we may reduce number of operands by cpu_width | |
4651 | per cycle. */ | |
4652 | res = ops_num / (2 * cpu_width); | |
4653 | ||
4654 | /* Remained operands count may be reduced twice per cycle | |
4655 | until we have only one operand. */ | |
4656 | rest = (unsigned)(ops_num - res * cpu_width); | |
4657 | elog = exact_log2 (rest); | |
4658 | if (elog >= 0) | |
4659 | res += elog; | |
4660 | else | |
4661 | res += floor_log2 (rest) + 1; | |
4662 | ||
4663 | return res; | |
4664 | } | |
4665 | ||
4666 | /* Returns an optimal number of registers to use for computation of | |
4667 | given statements. */ | |
4668 | ||
4669 | static int | |
4670 | get_reassociation_width (int ops_num, enum tree_code opc, | |
3754d046 | 4671 | machine_mode mode) |
5b1c765d | 4672 | { |
4673 | int param_width = PARAM_VALUE (PARAM_TREE_REASSOC_WIDTH); | |
4674 | int width; | |
4675 | int width_min; | |
4676 | int cycles_best; | |
4677 | ||
4678 | if (param_width > 0) | |
4679 | width = param_width; | |
4680 | else | |
4681 | width = targetm.sched.reassociation_width (opc, mode); | |
4682 | ||
4683 | if (width == 1) | |
4684 | return width; | |
4685 | ||
4686 | /* Get the minimal time required for sequence computation. */ | |
4687 | cycles_best = get_required_cycles (ops_num, width); | |
4688 | ||
4689 | /* Check if we may use less width and still compute sequence for | |
4690 | the same time. It will allow us to reduce registers usage. | |
4691 | get_required_cycles is monotonically increasing with lower width | |
4692 | so we can perform a binary search for the minimal width that still | |
4693 | results in the optimal cycle count. */ | |
4694 | width_min = 1; | |
4695 | while (width > width_min) | |
4696 | { | |
4697 | int width_mid = (width + width_min) / 2; | |
4698 | ||
4699 | if (get_required_cycles (ops_num, width_mid) == cycles_best) | |
4700 | width = width_mid; | |
4701 | else if (width_min < width_mid) | |
4702 | width_min = width_mid; | |
4703 | else | |
4704 | break; | |
4705 | } | |
4706 | ||
4707 | return width; | |
4708 | } | |
4709 | ||
4710 | /* Recursively rewrite our linearized statements so that the operators | |
4711 | match those in OPS[OPINDEX], putting the computation in rank | |
4712 | order and trying to allow operations to be executed in | |
4713 | parallel. */ | |
4714 | ||
4715 | static void | |
1a91d914 | 4716 | rewrite_expr_tree_parallel (gassign *stmt, int width, |
04009ada | 4717 | vec<operand_entry *> ops) |
5b1c765d | 4718 | { |
4719 | enum tree_code opcode = gimple_assign_rhs_code (stmt); | |
f1f41a6c | 4720 | int op_num = ops.length (); |
5db2fd79 | 4721 | gcc_assert (op_num > 0); |
5b1c765d | 4722 | int stmt_num = op_num - 1; |
42acab1c | 4723 | gimple **stmts = XALLOCAVEC (gimple *, stmt_num); |
5b1c765d | 4724 | int op_index = op_num - 1; |
4725 | int stmt_index = 0; | |
4726 | int ready_stmts_end = 0; | |
4727 | int i = 0; | |
527d8479 | 4728 | gimple *stmt1 = NULL, *stmt2 = NULL; |
5b1c765d | 4729 | tree last_rhs1 = gimple_assign_rhs1 (stmt); |
5b1c765d | 4730 | |
4731 | /* We start expression rewriting from the top statements. | |
4732 | So, in this loop we create a full list of statements | |
4733 | we will work with. */ | |
4734 | stmts[stmt_num - 1] = stmt; | |
4735 | for (i = stmt_num - 2; i >= 0; i--) | |
4736 | stmts[i] = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmts[i+1])); | |
4737 | ||
5b1c765d | 4738 | for (i = 0; i < stmt_num; i++) |
4739 | { | |
4740 | tree op1, op2; | |
4741 | ||
4742 | /* Determine whether we should use results of | |
4743 | already handled statements or not. */ | |
4744 | if (ready_stmts_end == 0 | |
4745 | && (i - stmt_index >= width || op_index < 1)) | |
4746 | ready_stmts_end = i; | |
4747 | ||
4748 | /* Now we choose operands for the next statement. Non zero | |
4749 | value in ready_stmts_end means here that we should use | |
4750 | the result of already generated statements as new operand. */ | |
4751 | if (ready_stmts_end > 0) | |
4752 | { | |
4753 | op1 = gimple_assign_lhs (stmts[stmt_index++]); | |
4754 | if (ready_stmts_end > stmt_index) | |
4755 | op2 = gimple_assign_lhs (stmts[stmt_index++]); | |
4756 | else if (op_index >= 0) | |
527d8479 | 4757 | { |
4758 | operand_entry *oe = ops[op_index--]; | |
4759 | stmt2 = oe->stmt_to_insert; | |
4760 | op2 = oe->op; | |
4761 | } | |
5b1c765d | 4762 | else |
4763 | { | |
4764 | gcc_assert (stmt_index < i); | |
4765 | op2 = gimple_assign_lhs (stmts[stmt_index++]); | |
4766 | } | |
4767 | ||
4768 | if (stmt_index >= ready_stmts_end) | |
4769 | ready_stmts_end = 0; | |
4770 | } | |
4771 | else | |
4772 | { | |
4773 | if (op_index > 1) | |
4774 | swap_ops_for_binary_stmt (ops, op_index - 2, NULL); | |
527d8479 | 4775 | operand_entry *oe2 = ops[op_index--]; |
4776 | operand_entry *oe1 = ops[op_index--]; | |
4777 | op2 = oe2->op; | |
4778 | stmt2 = oe2->stmt_to_insert; | |
4779 | op1 = oe1->op; | |
4780 | stmt1 = oe1->stmt_to_insert; | |
5b1c765d | 4781 | } |
4782 | ||
4783 | /* If we emit the last statement then we should put | |
4784 | operands into the last statement. It will also | |
4785 | break the loop. */ | |
4786 | if (op_index < 0 && stmt_index == i) | |
4787 | i = stmt_num - 1; | |
4788 | ||
4789 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
4790 | { | |
4791 | fprintf (dump_file, "Transforming "); | |
1ffa4346 | 4792 | print_gimple_stmt (dump_file, stmts[i], 0); |
5b1c765d | 4793 | } |
4794 | ||
fc3b1c44 | 4795 | /* If the stmt that defines operand has to be inserted, insert it |
4796 | before the use. */ | |
4797 | if (stmt1) | |
4798 | insert_stmt_before_use (stmts[i], stmt1); | |
4799 | if (stmt2) | |
4800 | insert_stmt_before_use (stmts[i], stmt2); | |
4801 | stmt1 = stmt2 = NULL; | |
4802 | ||
5b1c765d | 4803 | /* We keep original statement only for the last one. All |
4804 | others are recreated. */ | |
4805 | if (i == stmt_num - 1) | |
4806 | { | |
4807 | gimple_assign_set_rhs1 (stmts[i], op1); | |
4808 | gimple_assign_set_rhs2 (stmts[i], op2); | |
4809 | update_stmt (stmts[i]); | |
4810 | } | |
4811 | else | |
3bdaecd5 | 4812 | { |
4813 | stmts[i] = build_and_add_sum (TREE_TYPE (last_rhs1), op1, op2, opcode); | |
3bdaecd5 | 4814 | } |
5b1c765d | 4815 | if (dump_file && (dump_flags & TDF_DETAILS)) |
4816 | { | |
4817 | fprintf (dump_file, " into "); | |
1ffa4346 | 4818 | print_gimple_stmt (dump_file, stmts[i], 0); |
5b1c765d | 4819 | } |
4820 | } | |
4821 | ||
4822 | remove_visited_stmt_chain (last_rhs1); | |
4823 | } | |
4824 | ||
54aceb26 | 4825 | /* Transform STMT, which is really (A +B) + (C + D) into the left |
4826 | linear form, ((A+B)+C)+D. | |
4827 | Recurse on D if necessary. */ | |
4828 | ||
4829 | static void | |
42acab1c | 4830 | linearize_expr (gimple *stmt) |
54aceb26 | 4831 | { |
82cb4e1c | 4832 | gimple_stmt_iterator gsi; |
42acab1c | 4833 | gimple *binlhs = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt)); |
4834 | gimple *binrhs = SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt)); | |
4835 | gimple *oldbinrhs = binrhs; | |
75a70cf9 | 4836 | enum tree_code rhscode = gimple_assign_rhs_code (stmt); |
42acab1c | 4837 | gimple *newbinrhs = NULL; |
a4c3fb95 | 4838 | struct loop *loop = loop_containing_stmt (stmt); |
82cb4e1c | 4839 | tree lhs = gimple_assign_lhs (stmt); |
54aceb26 | 4840 | |
75a70cf9 | 4841 | gcc_assert (is_reassociable_op (binlhs, rhscode, loop) |
4842 | && is_reassociable_op (binrhs, rhscode, loop)); | |
4843 | ||
82cb4e1c | 4844 | gsi = gsi_for_stmt (stmt); |
54aceb26 | 4845 | |
75a70cf9 | 4846 | gimple_assign_set_rhs2 (stmt, gimple_assign_rhs1 (binrhs)); |
e9cf809e | 4847 | binrhs = gimple_build_assign (make_ssa_name (TREE_TYPE (lhs)), |
4848 | gimple_assign_rhs_code (binrhs), | |
4849 | gimple_assign_lhs (binlhs), | |
4850 | gimple_assign_rhs2 (binrhs)); | |
75a70cf9 | 4851 | gimple_assign_set_rhs1 (stmt, gimple_assign_lhs (binrhs)); |
82cb4e1c | 4852 | gsi_insert_before (&gsi, binrhs, GSI_SAME_STMT); |
4853 | gimple_set_uid (binrhs, gimple_uid (stmt)); | |
54aceb26 | 4854 | |
75a70cf9 | 4855 | if (TREE_CODE (gimple_assign_rhs2 (stmt)) == SSA_NAME) |
4856 | newbinrhs = SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt)); | |
54aceb26 | 4857 | |
4858 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
4859 | { | |
4860 | fprintf (dump_file, "Linearized: "); | |
1ffa4346 | 4861 | print_gimple_stmt (dump_file, stmt, 0); |
54aceb26 | 4862 | } |
4863 | ||
4864 | reassociate_stats.linearized++; | |
54aceb26 | 4865 | update_stmt (stmt); |
75a70cf9 | 4866 | |
82cb4e1c | 4867 | gsi = gsi_for_stmt (oldbinrhs); |
54675e05 | 4868 | reassoc_remove_stmt (&gsi); |
82cb4e1c | 4869 | release_defs (oldbinrhs); |
4870 | ||
75a70cf9 | 4871 | gimple_set_visited (stmt, true); |
4872 | gimple_set_visited (binlhs, true); | |
4873 | gimple_set_visited (binrhs, true); | |
54aceb26 | 4874 | |
4875 | /* Tail recurse on the new rhs if it still needs reassociation. */ | |
a4c3fb95 | 4876 | if (newbinrhs && is_reassociable_op (newbinrhs, rhscode, loop)) |
75a70cf9 | 4877 | /* ??? This should probably be linearize_expr (newbinrhs) but I don't |
4878 | want to change the algorithm while converting to tuples. */ | |
54aceb26 | 4879 | linearize_expr (stmt); |
54aceb26 | 4880 | } |
4881 | ||
75a70cf9 | 4882 | /* If LHS has a single immediate use that is a GIMPLE_ASSIGN statement, return |
54aceb26 | 4883 | it. Otherwise, return NULL. */ |
4884 | ||
42acab1c | 4885 | static gimple * |
54aceb26 | 4886 | get_single_immediate_use (tree lhs) |
4887 | { | |
4888 | use_operand_p immuse; | |
42acab1c | 4889 | gimple *immusestmt; |
54aceb26 | 4890 | |
4891 | if (TREE_CODE (lhs) == SSA_NAME | |
75a70cf9 | 4892 | && single_imm_use (lhs, &immuse, &immusestmt) |
4893 | && is_gimple_assign (immusestmt)) | |
4894 | return immusestmt; | |
4895 | ||
4896 | return NULL; | |
54aceb26 | 4897 | } |
54aceb26 | 4898 | |
54aceb26 | 4899 | /* Recursively negate the value of TONEGATE, and return the SSA_NAME |
4900 | representing the negated value. Insertions of any necessary | |
75a70cf9 | 4901 | instructions go before GSI. |
54aceb26 | 4902 | This function is recursive in that, if you hand it "a_5" as the |
4903 | value to negate, and a_5 is defined by "a_5 = b_3 + b_4", it will | |
4904 | transform b_3 + b_4 into a_5 = -b_3 + -b_4. */ | |
4905 | ||
4906 | static tree | |
82cb4e1c | 4907 | negate_value (tree tonegate, gimple_stmt_iterator *gsip) |
54aceb26 | 4908 | { |
42acab1c | 4909 | gimple *negatedefstmt = NULL; |
54aceb26 | 4910 | tree resultofnegate; |
82cb4e1c | 4911 | gimple_stmt_iterator gsi; |
4912 | unsigned int uid; | |
54aceb26 | 4913 | |
54aceb26 | 4914 | /* If we are trying to negate a name, defined by an add, negate the |
4915 | add operands instead. */ | |
75a70cf9 | 4916 | if (TREE_CODE (tonegate) == SSA_NAME) |
4917 | negatedefstmt = SSA_NAME_DEF_STMT (tonegate); | |
54aceb26 | 4918 | if (TREE_CODE (tonegate) == SSA_NAME |
75a70cf9 | 4919 | && is_gimple_assign (negatedefstmt) |
4920 | && TREE_CODE (gimple_assign_lhs (negatedefstmt)) == SSA_NAME | |
4921 | && has_single_use (gimple_assign_lhs (negatedefstmt)) | |
4922 | && gimple_assign_rhs_code (negatedefstmt) == PLUS_EXPR) | |
54aceb26 | 4923 | { |
75a70cf9 | 4924 | tree rhs1 = gimple_assign_rhs1 (negatedefstmt); |
4925 | tree rhs2 = gimple_assign_rhs2 (negatedefstmt); | |
82cb4e1c | 4926 | tree lhs = gimple_assign_lhs (negatedefstmt); |
42acab1c | 4927 | gimple *g; |
75a70cf9 | 4928 | |
4929 | gsi = gsi_for_stmt (negatedefstmt); | |
4930 | rhs1 = negate_value (rhs1, &gsi); | |
75a70cf9 | 4931 | |
4932 | gsi = gsi_for_stmt (negatedefstmt); | |
4933 | rhs2 = negate_value (rhs2, &gsi); | |
75a70cf9 | 4934 | |
82cb4e1c | 4935 | gsi = gsi_for_stmt (negatedefstmt); |
f9e245b2 | 4936 | lhs = make_ssa_name (TREE_TYPE (lhs)); |
82cb4e1c | 4937 | gimple_set_visited (negatedefstmt, true); |
e9cf809e | 4938 | g = gimple_build_assign (lhs, PLUS_EXPR, rhs1, rhs2); |
82cb4e1c | 4939 | gimple_set_uid (g, gimple_uid (negatedefstmt)); |
4940 | gsi_insert_before (&gsi, g, GSI_SAME_STMT); | |
4941 | return lhs; | |
54aceb26 | 4942 | } |
4943 | ||
4944 | tonegate = fold_build1 (NEGATE_EXPR, TREE_TYPE (tonegate), tonegate); | |
82cb4e1c | 4945 | resultofnegate = force_gimple_operand_gsi (gsip, tonegate, true, |
75a70cf9 | 4946 | NULL_TREE, true, GSI_SAME_STMT); |
82cb4e1c | 4947 | gsi = *gsip; |
4948 | uid = gimple_uid (gsi_stmt (gsi)); | |
4949 | for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi)) | |
4950 | { | |
42acab1c | 4951 | gimple *stmt = gsi_stmt (gsi); |
82cb4e1c | 4952 | if (gimple_uid (stmt) != 0) |
4953 | break; | |
4954 | gimple_set_uid (stmt, uid); | |
4955 | } | |
54aceb26 | 4956 | return resultofnegate; |
54aceb26 | 4957 | } |
4958 | ||
4959 | /* Return true if we should break up the subtract in STMT into an add | |
4960 | with negate. This is true when we the subtract operands are really | |
4961 | adds, or the subtract itself is used in an add expression. In | |
4962 | either case, breaking up the subtract into an add with negate | |
4963 | exposes the adds to reassociation. */ | |
4964 | ||
4965 | static bool | |
42acab1c | 4966 | should_break_up_subtract (gimple *stmt) |
54aceb26 | 4967 | { |
75a70cf9 | 4968 | tree lhs = gimple_assign_lhs (stmt); |
4969 | tree binlhs = gimple_assign_rhs1 (stmt); | |
4970 | tree binrhs = gimple_assign_rhs2 (stmt); | |
42acab1c | 4971 | gimple *immusestmt; |
a4c3fb95 | 4972 | struct loop *loop = loop_containing_stmt (stmt); |
54aceb26 | 4973 | |
4974 | if (TREE_CODE (binlhs) == SSA_NAME | |
a4c3fb95 | 4975 | && is_reassociable_op (SSA_NAME_DEF_STMT (binlhs), PLUS_EXPR, loop)) |
54aceb26 | 4976 | return true; |
4977 | ||
4978 | if (TREE_CODE (binrhs) == SSA_NAME | |
a4c3fb95 | 4979 | && is_reassociable_op (SSA_NAME_DEF_STMT (binrhs), PLUS_EXPR, loop)) |
54aceb26 | 4980 | return true; |
4981 | ||
4982 | if (TREE_CODE (lhs) == SSA_NAME | |
4983 | && (immusestmt = get_single_immediate_use (lhs)) | |
75a70cf9 | 4984 | && is_gimple_assign (immusestmt) |
dddf5036 | 4985 | && (gimple_assign_rhs_code (immusestmt) == PLUS_EXPR |
29a45e93 | 4986 | || (gimple_assign_rhs_code (immusestmt) == MINUS_EXPR |
4987 | && gimple_assign_rhs1 (immusestmt) == lhs) | |
4988 | || gimple_assign_rhs_code (immusestmt) == MULT_EXPR)) | |
54aceb26 | 4989 | return true; |
4990 | return false; | |
54aceb26 | 4991 | } |
4992 | ||
4993 | /* Transform STMT from A - B into A + -B. */ | |
4994 | ||
4995 | static void | |
42acab1c | 4996 | break_up_subtract (gimple *stmt, gimple_stmt_iterator *gsip) |
54aceb26 | 4997 | { |
75a70cf9 | 4998 | tree rhs1 = gimple_assign_rhs1 (stmt); |
4999 | tree rhs2 = gimple_assign_rhs2 (stmt); | |
54aceb26 | 5000 | |
5001 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
5002 | { | |
5003 | fprintf (dump_file, "Breaking up subtract "); | |
1ffa4346 | 5004 | print_gimple_stmt (dump_file, stmt, 0); |
54aceb26 | 5005 | } |
5006 | ||
75a70cf9 | 5007 | rhs2 = negate_value (rhs2, gsip); |
5008 | gimple_assign_set_rhs_with_ops (gsip, PLUS_EXPR, rhs1, rhs2); | |
54aceb26 | 5009 | update_stmt (stmt); |
5010 | } | |
5011 | ||
8c5ac7f6 | 5012 | /* Determine whether STMT is a builtin call that raises an SSA name |
5013 | to an integer power and has only one use. If so, and this is early | |
5014 | reassociation and unsafe math optimizations are permitted, place | |
5015 | the SSA name in *BASE and the exponent in *EXPONENT, and return TRUE. | |
5016 | If any of these conditions does not hold, return FALSE. */ | |
5017 | ||
5018 | static bool | |
5403ed8b | 5019 | acceptable_pow_call (gcall *stmt, tree *base, HOST_WIDE_INT *exponent) |
8c5ac7f6 | 5020 | { |
390bb332 | 5021 | tree arg1; |
8c5ac7f6 | 5022 | REAL_VALUE_TYPE c, cint; |
5023 | ||
390bb332 | 5024 | switch (gimple_call_combined_fn (stmt)) |
8c5ac7f6 | 5025 | { |
390bb332 | 5026 | CASE_CFN_POW: |
54faaad5 | 5027 | if (flag_errno_math) |
5028 | return false; | |
5029 | ||
8c5ac7f6 | 5030 | *base = gimple_call_arg (stmt, 0); |
5031 | arg1 = gimple_call_arg (stmt, 1); | |
5032 | ||
5033 | if (TREE_CODE (arg1) != REAL_CST) | |
5034 | return false; | |
5035 | ||
5036 | c = TREE_REAL_CST (arg1); | |
5037 | ||
5038 | if (REAL_EXP (&c) > HOST_BITS_PER_WIDE_INT) | |
5039 | return false; | |
5040 | ||
5041 | *exponent = real_to_integer (&c); | |
e913b5cd | 5042 | real_from_integer (&cint, VOIDmode, *exponent, SIGNED); |
8c5ac7f6 | 5043 | if (!real_identical (&c, &cint)) |
5044 | return false; | |
5045 | ||
5046 | break; | |
5047 | ||
390bb332 | 5048 | CASE_CFN_POWI: |
8c5ac7f6 | 5049 | *base = gimple_call_arg (stmt, 0); |
5050 | arg1 = gimple_call_arg (stmt, 1); | |
5051 | ||
e913b5cd | 5052 | if (!tree_fits_shwi_p (arg1)) |
8c5ac7f6 | 5053 | return false; |
5054 | ||
e913b5cd | 5055 | *exponent = tree_to_shwi (arg1); |
8c5ac7f6 | 5056 | break; |
5057 | ||
5058 | default: | |
5059 | return false; | |
5060 | } | |
5061 | ||
5062 | /* Expanding negative exponents is generally unproductive, so we don't | |
5063 | complicate matters with those. Exponents of zero and one should | |
5064 | have been handled by expression folding. */ | |
5065 | if (*exponent < 2 || TREE_CODE (*base) != SSA_NAME) | |
5066 | return false; | |
5067 | ||
5068 | return true; | |
5069 | } | |
5070 | ||
9eafdd7b | 5071 | /* Try to derive and add operand entry for OP to *OPS. Return false if |
5072 | unsuccessful. */ | |
5073 | ||
5074 | static bool | |
5075 | try_special_add_to_ops (vec<operand_entry *> *ops, | |
5076 | enum tree_code code, | |
5077 | tree op, gimple* def_stmt) | |
5078 | { | |
5079 | tree base = NULL_TREE; | |
5080 | HOST_WIDE_INT exponent = 0; | |
5081 | ||
5403ed8b | 5082 | if (TREE_CODE (op) != SSA_NAME |
5083 | || ! has_single_use (op)) | |
9eafdd7b | 5084 | return false; |
5085 | ||
5086 | if (code == MULT_EXPR | |
5403ed8b | 5087 | && reassoc_insert_powi_p |
5088 | && flag_unsafe_math_optimizations | |
5089 | && is_gimple_call (def_stmt) | |
5090 | && acceptable_pow_call (as_a <gcall *> (def_stmt), &base, &exponent)) | |
9eafdd7b | 5091 | { |
5092 | add_repeat_to_ops_vec (ops, base, exponent); | |
5093 | gimple_set_visited (def_stmt, true); | |
5094 | return true; | |
5095 | } | |
5096 | else if (code == MULT_EXPR | |
5097 | && is_gimple_assign (def_stmt) | |
5098 | && gimple_assign_rhs_code (def_stmt) == NEGATE_EXPR | |
5099 | && !HONOR_SNANS (TREE_TYPE (op)) | |
5100 | && (!HONOR_SIGNED_ZEROS (TREE_TYPE (op)) | |
5101 | || !COMPLEX_FLOAT_TYPE_P (TREE_TYPE (op)))) | |
5102 | { | |
5103 | tree rhs1 = gimple_assign_rhs1 (def_stmt); | |
5104 | tree cst = build_minus_one_cst (TREE_TYPE (op)); | |
5105 | add_to_ops_vec (ops, rhs1); | |
5106 | add_to_ops_vec (ops, cst); | |
5107 | gimple_set_visited (def_stmt, true); | |
5108 | return true; | |
5109 | } | |
5110 | ||
5111 | return false; | |
5112 | } | |
5113 | ||
54aceb26 | 5114 | /* Recursively linearize a binary expression that is the RHS of STMT. |
5115 | Place the operands of the expression tree in the vector named OPS. */ | |
5116 | ||
5117 | static void | |
04009ada | 5118 | linearize_expr_tree (vec<operand_entry *> *ops, gimple *stmt, |
dddf5036 | 5119 | bool is_associative, bool set_visited) |
54aceb26 | 5120 | { |
75a70cf9 | 5121 | tree binlhs = gimple_assign_rhs1 (stmt); |
5122 | tree binrhs = gimple_assign_rhs2 (stmt); | |
42acab1c | 5123 | gimple *binlhsdef = NULL, *binrhsdef = NULL; |
54aceb26 | 5124 | bool binlhsisreassoc = false; |
5125 | bool binrhsisreassoc = false; | |
75a70cf9 | 5126 | enum tree_code rhscode = gimple_assign_rhs_code (stmt); |
a4c3fb95 | 5127 | struct loop *loop = loop_containing_stmt (stmt); |
54aceb26 | 5128 | |
dddf5036 | 5129 | if (set_visited) |
5130 | gimple_set_visited (stmt, true); | |
54aceb26 | 5131 | |
5132 | if (TREE_CODE (binlhs) == SSA_NAME) | |
5133 | { | |
5134 | binlhsdef = SSA_NAME_DEF_STMT (binlhs); | |
df0675b8 | 5135 | binlhsisreassoc = (is_reassociable_op (binlhsdef, rhscode, loop) |
aac19106 | 5136 | && !stmt_could_throw_p (cfun, binlhsdef)); |
54aceb26 | 5137 | } |
5138 | ||
5139 | if (TREE_CODE (binrhs) == SSA_NAME) | |
5140 | { | |
5141 | binrhsdef = SSA_NAME_DEF_STMT (binrhs); | |
df0675b8 | 5142 | binrhsisreassoc = (is_reassociable_op (binrhsdef, rhscode, loop) |
aac19106 | 5143 | && !stmt_could_throw_p (cfun, binrhsdef)); |
54aceb26 | 5144 | } |
5145 | ||
5146 | /* If the LHS is not reassociable, but the RHS is, we need to swap | |
5147 | them. If neither is reassociable, there is nothing we can do, so | |
5148 | just put them in the ops vector. If the LHS is reassociable, | |
5149 | linearize it. If both are reassociable, then linearize the RHS | |
5150 | and the LHS. */ | |
5151 | ||
5152 | if (!binlhsisreassoc) | |
5153 | { | |
dddf5036 | 5154 | /* If this is not a associative operation like division, give up. */ |
5155 | if (!is_associative) | |
5156 | { | |
5157 | add_to_ops_vec (ops, binrhs); | |
5158 | return; | |
5159 | } | |
5160 | ||
54aceb26 | 5161 | if (!binrhsisreassoc) |
5162 | { | |
9eafdd7b | 5163 | if (!try_special_add_to_ops (ops, rhscode, binrhs, binrhsdef)) |
8c5ac7f6 | 5164 | add_to_ops_vec (ops, binrhs); |
5165 | ||
9eafdd7b | 5166 | if (!try_special_add_to_ops (ops, rhscode, binlhs, binlhsdef)) |
8c5ac7f6 | 5167 | add_to_ops_vec (ops, binlhs); |
5168 | ||
54aceb26 | 5169 | return; |
5170 | } | |
5171 | ||
5172 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
5173 | { | |
5174 | fprintf (dump_file, "swapping operands of "); | |
1ffa4346 | 5175 | print_gimple_stmt (dump_file, stmt, 0); |
54aceb26 | 5176 | } |
5177 | ||
8f6fa493 | 5178 | swap_ssa_operands (stmt, |
5179 | gimple_assign_rhs1_ptr (stmt), | |
5180 | gimple_assign_rhs2_ptr (stmt)); | |
54aceb26 | 5181 | update_stmt (stmt); |
5182 | ||
5183 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
5184 | { | |
5185 | fprintf (dump_file, " is now "); | |
1ffa4346 | 5186 | print_gimple_stmt (dump_file, stmt, 0); |
54aceb26 | 5187 | } |
5188 | ||
5189 | /* We want to make it so the lhs is always the reassociative op, | |
5190 | so swap. */ | |
dfcf26a5 | 5191 | std::swap (binlhs, binrhs); |
54aceb26 | 5192 | } |
5193 | else if (binrhsisreassoc) | |
5194 | { | |
5195 | linearize_expr (stmt); | |
75a70cf9 | 5196 | binlhs = gimple_assign_rhs1 (stmt); |
5197 | binrhs = gimple_assign_rhs2 (stmt); | |
54aceb26 | 5198 | } |
5199 | ||
5200 | gcc_assert (TREE_CODE (binrhs) != SSA_NAME | |
a4c3fb95 | 5201 | || !is_reassociable_op (SSA_NAME_DEF_STMT (binrhs), |
5202 | rhscode, loop)); | |
dddf5036 | 5203 | linearize_expr_tree (ops, SSA_NAME_DEF_STMT (binlhs), |
5204 | is_associative, set_visited); | |
8c5ac7f6 | 5205 | |
9eafdd7b | 5206 | if (!try_special_add_to_ops (ops, rhscode, binrhs, binrhsdef)) |
8c5ac7f6 | 5207 | add_to_ops_vec (ops, binrhs); |
54aceb26 | 5208 | } |
5209 | ||
5210 | /* Repropagate the negates back into subtracts, since no other pass | |
5211 | currently does it. */ | |
5212 | ||
5213 | static void | |
5214 | repropagate_negates (void) | |
5215 | { | |
5216 | unsigned int i = 0; | |
5217 | tree negate; | |
5218 | ||
f1f41a6c | 5219 | FOR_EACH_VEC_ELT (plus_negates, i, negate) |
54aceb26 | 5220 | { |
42acab1c | 5221 | gimple *user = get_single_immediate_use (negate); |
54aceb26 | 5222 | |
c07e5b8b | 5223 | if (!user || !is_gimple_assign (user)) |
5224 | continue; | |
5225 | ||
54aceb26 | 5226 | /* The negate operand can be either operand of a PLUS_EXPR |
5227 | (it can be the LHS if the RHS is a constant for example). | |
5228 | ||
5229 | Force the negate operand to the RHS of the PLUS_EXPR, then | |
5230 | transform the PLUS_EXPR into a MINUS_EXPR. */ | |
c07e5b8b | 5231 | if (gimple_assign_rhs_code (user) == PLUS_EXPR) |
54aceb26 | 5232 | { |
54aceb26 | 5233 | /* If the negated operand appears on the LHS of the |
5234 | PLUS_EXPR, exchange the operands of the PLUS_EXPR | |
5235 | to force the negated operand to the RHS of the PLUS_EXPR. */ | |
75a70cf9 | 5236 | if (gimple_assign_rhs1 (user) == negate) |
54aceb26 | 5237 | { |
8f6fa493 | 5238 | swap_ssa_operands (user, |
5239 | gimple_assign_rhs1_ptr (user), | |
5240 | gimple_assign_rhs2_ptr (user)); | |
54aceb26 | 5241 | } |
5242 | ||
5243 | /* Now transform the PLUS_EXPR into a MINUS_EXPR and replace | |
5244 | the RHS of the PLUS_EXPR with the operand of the NEGATE_EXPR. */ | |
75a70cf9 | 5245 | if (gimple_assign_rhs2 (user) == negate) |
54aceb26 | 5246 | { |
75a70cf9 | 5247 | tree rhs1 = gimple_assign_rhs1 (user); |
5b849c60 | 5248 | tree rhs2 = gimple_assign_rhs1 (SSA_NAME_DEF_STMT (negate)); |
75a70cf9 | 5249 | gimple_stmt_iterator gsi = gsi_for_stmt (user); |
5250 | gimple_assign_set_rhs_with_ops (&gsi, MINUS_EXPR, rhs1, rhs2); | |
54aceb26 | 5251 | update_stmt (user); |
5252 | } | |
5253 | } | |
c07e5b8b | 5254 | else if (gimple_assign_rhs_code (user) == MINUS_EXPR) |
5255 | { | |
5256 | if (gimple_assign_rhs1 (user) == negate) | |
5257 | { | |
5258 | /* We have | |
5259 | x = -a | |
5260 | y = x - b | |
5261 | which we transform into | |
5262 | x = a + b | |
5263 | y = -x . | |
5264 | This pushes down the negate which we possibly can merge | |
5265 | into some other operation, hence insert it into the | |
5266 | plus_negates vector. */ | |
42acab1c | 5267 | gimple *feed = SSA_NAME_DEF_STMT (negate); |
c07e5b8b | 5268 | tree a = gimple_assign_rhs1 (feed); |
82cb4e1c | 5269 | tree b = gimple_assign_rhs2 (user); |
5270 | gimple_stmt_iterator gsi = gsi_for_stmt (feed); | |
5271 | gimple_stmt_iterator gsi2 = gsi_for_stmt (user); | |
f9e245b2 | 5272 | tree x = make_ssa_name (TREE_TYPE (gimple_assign_lhs (feed))); |
42acab1c | 5273 | gimple *g = gimple_build_assign (x, PLUS_EXPR, a, b); |
82cb4e1c | 5274 | gsi_insert_before (&gsi2, g, GSI_SAME_STMT); |
806413d2 | 5275 | gimple_assign_set_rhs_with_ops (&gsi2, NEGATE_EXPR, x); |
82cb4e1c | 5276 | user = gsi_stmt (gsi2); |
5277 | update_stmt (user); | |
54675e05 | 5278 | reassoc_remove_stmt (&gsi); |
82cb4e1c | 5279 | release_defs (feed); |
5280 | plus_negates.safe_push (gimple_assign_lhs (user)); | |
c07e5b8b | 5281 | } |
5282 | else | |
5283 | { | |
5284 | /* Transform "x = -a; y = b - x" into "y = b + a", getting | |
5285 | rid of one operation. */ | |
42acab1c | 5286 | gimple *feed = SSA_NAME_DEF_STMT (negate); |
c07e5b8b | 5287 | tree a = gimple_assign_rhs1 (feed); |
5288 | tree rhs1 = gimple_assign_rhs1 (user); | |
5289 | gimple_stmt_iterator gsi = gsi_for_stmt (user); | |
5290 | gimple_assign_set_rhs_with_ops (&gsi, PLUS_EXPR, rhs1, a); | |
5291 | update_stmt (gsi_stmt (gsi)); | |
5292 | } | |
5293 | } | |
54aceb26 | 5294 | } |
5295 | } | |
5296 | ||
c07e5b8b | 5297 | /* Returns true if OP is of a type for which we can do reassociation. |
5298 | That is for integral or non-saturating fixed-point types, and for | |
5299 | floating point type when associative-math is enabled. */ | |
5300 | ||
5301 | static bool | |
5302 | can_reassociate_p (tree op) | |
5303 | { | |
5304 | tree type = TREE_TYPE (op); | |
d4d349db | 5305 | if (TREE_CODE (op) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op)) |
5306 | return false; | |
026f2137 | 5307 | if ((ANY_INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type)) |
c07e5b8b | 5308 | || NON_SAT_FIXED_POINT_TYPE_P (type) |
f4a50267 | 5309 | || (flag_associative_math && FLOAT_TYPE_P (type))) |
c07e5b8b | 5310 | return true; |
5311 | return false; | |
5312 | } | |
5313 | ||
54aceb26 | 5314 | /* Break up subtract operations in block BB. |
5315 | ||
5316 | We do this top down because we don't know whether the subtract is | |
5317 | part of a possible chain of reassociation except at the top. | |
48e1416a | 5318 | |
54aceb26 | 5319 | IE given |
5320 | d = f + g | |
5321 | c = a + e | |
5322 | b = c - d | |
5323 | q = b - r | |
5324 | k = t - q | |
48e1416a | 5325 | |
54aceb26 | 5326 | we want to break up k = t - q, but we won't until we've transformed q |
75a70cf9 | 5327 | = b - r, which won't be broken up until we transform b = c - d. |
5328 | ||
82cb4e1c | 5329 | En passant, clear the GIMPLE visited flag on every statement |
5330 | and set UIDs within each basic block. */ | |
54aceb26 | 5331 | |
5332 | static void | |
5333 | break_up_subtract_bb (basic_block bb) | |
5334 | { | |
75a70cf9 | 5335 | gimple_stmt_iterator gsi; |
54aceb26 | 5336 | basic_block son; |
82cb4e1c | 5337 | unsigned int uid = 1; |
54aceb26 | 5338 | |
75a70cf9 | 5339 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) |
54aceb26 | 5340 | { |
42acab1c | 5341 | gimple *stmt = gsi_stmt (gsi); |
75a70cf9 | 5342 | gimple_set_visited (stmt, false); |
82cb4e1c | 5343 | gimple_set_uid (stmt, uid++); |
54aceb26 | 5344 | |
c07e5b8b | 5345 | if (!is_gimple_assign (stmt) |
5346 | || !can_reassociate_p (gimple_assign_lhs (stmt))) | |
5347 | continue; | |
5348 | ||
75a70cf9 | 5349 | /* Look for simple gimple subtract operations. */ |
c07e5b8b | 5350 | if (gimple_assign_rhs_code (stmt) == MINUS_EXPR) |
54aceb26 | 5351 | { |
c07e5b8b | 5352 | if (!can_reassociate_p (gimple_assign_rhs1 (stmt)) |
5353 | || !can_reassociate_p (gimple_assign_rhs2 (stmt))) | |
54aceb26 | 5354 | continue; |
5355 | ||
5356 | /* Check for a subtract used only in an addition. If this | |
5357 | is the case, transform it into add of a negate for better | |
5358 | reassociation. IE transform C = A-B into C = A + -B if C | |
5359 | is only used in an addition. */ | |
75a70cf9 | 5360 | if (should_break_up_subtract (stmt)) |
5361 | break_up_subtract (stmt, &gsi); | |
54aceb26 | 5362 | } |
c07e5b8b | 5363 | else if (gimple_assign_rhs_code (stmt) == NEGATE_EXPR |
5364 | && can_reassociate_p (gimple_assign_rhs1 (stmt))) | |
f1f41a6c | 5365 | plus_negates.safe_push (gimple_assign_lhs (stmt)); |
54aceb26 | 5366 | } |
5367 | for (son = first_dom_son (CDI_DOMINATORS, bb); | |
5368 | son; | |
5369 | son = next_dom_son (CDI_DOMINATORS, son)) | |
5370 | break_up_subtract_bb (son); | |
5371 | } | |
5372 | ||
8c5ac7f6 | 5373 | /* Used for repeated factor analysis. */ |
04009ada | 5374 | struct repeat_factor |
8c5ac7f6 | 5375 | { |
5376 | /* An SSA name that occurs in a multiply chain. */ | |
5377 | tree factor; | |
5378 | ||
5379 | /* Cached rank of the factor. */ | |
5380 | unsigned rank; | |
5381 | ||
5382 | /* Number of occurrences of the factor in the chain. */ | |
5383 | HOST_WIDE_INT count; | |
5384 | ||
5385 | /* An SSA name representing the product of this factor and | |
5386 | all factors appearing later in the repeated factor vector. */ | |
5387 | tree repr; | |
5388 | }; | |
5389 | ||
8c5ac7f6 | 5390 | |
f1f41a6c | 5391 | static vec<repeat_factor> repeat_factor_vec; |
8c5ac7f6 | 5392 | |
5393 | /* Used for sorting the repeat factor vector. Sort primarily by | |
5394 | ascending occurrence count, secondarily by descending rank. */ | |
5395 | ||
5396 | static int | |
5397 | compare_repeat_factors (const void *x1, const void *x2) | |
5398 | { | |
04009ada | 5399 | const repeat_factor *rf1 = (const repeat_factor *) x1; |
5400 | const repeat_factor *rf2 = (const repeat_factor *) x2; | |
8c5ac7f6 | 5401 | |
5402 | if (rf1->count != rf2->count) | |
5403 | return rf1->count - rf2->count; | |
5404 | ||
5405 | return rf2->rank - rf1->rank; | |
5406 | } | |
5407 | ||
8c5ac7f6 | 5408 | /* Look for repeated operands in OPS in the multiply tree rooted at |
5409 | STMT. Replace them with an optimal sequence of multiplies and powi | |
97269507 | 5410 | builtin calls, and remove the used operands from OPS. Return an |
5411 | SSA name representing the value of the replacement sequence. */ | |
8c5ac7f6 | 5412 | |
97269507 | 5413 | static tree |
04009ada | 5414 | attempt_builtin_powi (gimple *stmt, vec<operand_entry *> *ops) |
8c5ac7f6 | 5415 | { |
5416 | unsigned i, j, vec_len; | |
5417 | int ii; | |
04009ada | 5418 | operand_entry *oe; |
5419 | repeat_factor *rf1, *rf2; | |
8c5ac7f6 | 5420 | repeat_factor rfnew; |
97269507 | 5421 | tree result = NULL_TREE; |
8c5ac7f6 | 5422 | tree target_ssa, iter_result; |
5423 | tree type = TREE_TYPE (gimple_get_lhs (stmt)); | |
5424 | tree powi_fndecl = mathfn_built_in (type, BUILT_IN_POWI); | |
5425 | gimple_stmt_iterator gsi = gsi_for_stmt (stmt); | |
42acab1c | 5426 | gimple *mul_stmt, *pow_stmt; |
8c5ac7f6 | 5427 | |
5428 | /* Nothing to do if BUILT_IN_POWI doesn't exist for this type and | |
5429 | target. */ | |
5430 | if (!powi_fndecl) | |
97269507 | 5431 | return NULL_TREE; |
8c5ac7f6 | 5432 | |
5433 | /* Allocate the repeated factor vector. */ | |
f1f41a6c | 5434 | repeat_factor_vec.create (10); |
8c5ac7f6 | 5435 | |
5436 | /* Scan the OPS vector for all SSA names in the product and build | |
5437 | up a vector of occurrence counts for each factor. */ | |
f1f41a6c | 5438 | FOR_EACH_VEC_ELT (*ops, i, oe) |
8c5ac7f6 | 5439 | { |
5440 | if (TREE_CODE (oe->op) == SSA_NAME) | |
5441 | { | |
f1f41a6c | 5442 | FOR_EACH_VEC_ELT (repeat_factor_vec, j, rf1) |
8c5ac7f6 | 5443 | { |
5444 | if (rf1->factor == oe->op) | |
5445 | { | |
5446 | rf1->count += oe->count; | |
5447 | break; | |
5448 | } | |
5449 | } | |
5450 | ||
f1f41a6c | 5451 | if (j >= repeat_factor_vec.length ()) |
8c5ac7f6 | 5452 | { |
5453 | rfnew.factor = oe->op; | |
5454 | rfnew.rank = oe->rank; | |
5455 | rfnew.count = oe->count; | |
5456 | rfnew.repr = NULL_TREE; | |
f1f41a6c | 5457 | repeat_factor_vec.safe_push (rfnew); |
8c5ac7f6 | 5458 | } |
5459 | } | |
5460 | } | |
5461 | ||
5462 | /* Sort the repeated factor vector by (a) increasing occurrence count, | |
5463 | and (b) decreasing rank. */ | |
f1f41a6c | 5464 | repeat_factor_vec.qsort (compare_repeat_factors); |
8c5ac7f6 | 5465 | |
5466 | /* It is generally best to combine as many base factors as possible | |
5467 | into a product before applying __builtin_powi to the result. | |
5468 | However, the sort order chosen for the repeated factor vector | |
5469 | allows us to cache partial results for the product of the base | |
5470 | factors for subsequent use. When we already have a cached partial | |
5471 | result from a previous iteration, it is best to make use of it | |
5472 | before looking for another __builtin_pow opportunity. | |
5473 | ||
5474 | As an example, consider x * x * y * y * y * z * z * z * z. | |
5475 | We want to first compose the product x * y * z, raise it to the | |
5476 | second power, then multiply this by y * z, and finally multiply | |
5477 | by z. This can be done in 5 multiplies provided we cache y * z | |
5478 | for use in both expressions: | |
5479 | ||
5480 | t1 = y * z | |
5481 | t2 = t1 * x | |
5482 | t3 = t2 * t2 | |
5483 | t4 = t1 * t3 | |
5484 | result = t4 * z | |
5485 | ||
5486 | If we instead ignored the cached y * z and first multiplied by | |
5487 | the __builtin_pow opportunity z * z, we would get the inferior: | |
5488 | ||
5489 | t1 = y * z | |
5490 | t2 = t1 * x | |
5491 | t3 = t2 * t2 | |
5492 | t4 = z * z | |
5493 | t5 = t3 * t4 | |
5494 | result = t5 * y */ | |
5495 | ||
f1f41a6c | 5496 | vec_len = repeat_factor_vec.length (); |
8c5ac7f6 | 5497 | |
5498 | /* Repeatedly look for opportunities to create a builtin_powi call. */ | |
5499 | while (true) | |
5500 | { | |
5501 | HOST_WIDE_INT power; | |
5502 | ||
5503 | /* First look for the largest cached product of factors from | |
5504 | preceding iterations. If found, create a builtin_powi for | |
5505 | it if the minimum occurrence count for its factors is at | |
5506 | least 2, or just use this cached product as our next | |
5507 | multiplicand if the minimum occurrence count is 1. */ | |
f1f41a6c | 5508 | FOR_EACH_VEC_ELT (repeat_factor_vec, j, rf1) |
8c5ac7f6 | 5509 | { |
5510 | if (rf1->repr && rf1->count > 0) | |
5511 | break; | |
5512 | } | |
5513 | ||
5514 | if (j < vec_len) | |
5515 | { | |
5516 | power = rf1->count; | |
5517 | ||
5518 | if (power == 1) | |
5519 | { | |
5520 | iter_result = rf1->repr; | |
5521 | ||
5522 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
5523 | { | |
5524 | unsigned elt; | |
04009ada | 5525 | repeat_factor *rf; |
8c5ac7f6 | 5526 | fputs ("Multiplying by cached product ", dump_file); |
5527 | for (elt = j; elt < vec_len; elt++) | |
5528 | { | |
f1f41a6c | 5529 | rf = &repeat_factor_vec[elt]; |
1ffa4346 | 5530 | print_generic_expr (dump_file, rf->factor); |
8c5ac7f6 | 5531 | if (elt < vec_len - 1) |
5532 | fputs (" * ", dump_file); | |
5533 | } | |
5534 | fputs ("\n", dump_file); | |
5535 | } | |
5536 | } | |
5537 | else | |
5538 | { | |
03d37e4e | 5539 | iter_result = make_temp_ssa_name (type, NULL, "reassocpow"); |
8c5ac7f6 | 5540 | pow_stmt = gimple_build_call (powi_fndecl, 2, rf1->repr, |
5541 | build_int_cst (integer_type_node, | |
5542 | power)); | |
5543 | gimple_call_set_lhs (pow_stmt, iter_result); | |
5544 | gimple_set_location (pow_stmt, gimple_location (stmt)); | |
0be0fe66 | 5545 | gimple_set_uid (pow_stmt, gimple_uid (stmt)); |
8c5ac7f6 | 5546 | gsi_insert_before (&gsi, pow_stmt, GSI_SAME_STMT); |
5547 | ||
5548 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
5549 | { | |
5550 | unsigned elt; | |
04009ada | 5551 | repeat_factor *rf; |
8c5ac7f6 | 5552 | fputs ("Building __builtin_pow call for cached product (", |
5553 | dump_file); | |
5554 | for (elt = j; elt < vec_len; elt++) | |
5555 | { | |
f1f41a6c | 5556 | rf = &repeat_factor_vec[elt]; |
1ffa4346 | 5557 | print_generic_expr (dump_file, rf->factor); |
8c5ac7f6 | 5558 | if (elt < vec_len - 1) |
5559 | fputs (" * ", dump_file); | |
5560 | } | |
f03df321 | 5561 | fprintf (dump_file, ")^" HOST_WIDE_INT_PRINT_DEC"\n", |
8ef3b7cb | 5562 | power); |
8c5ac7f6 | 5563 | } |
5564 | } | |
5565 | } | |
5566 | else | |
5567 | { | |
5568 | /* Otherwise, find the first factor in the repeated factor | |
5569 | vector whose occurrence count is at least 2. If no such | |
5570 | factor exists, there are no builtin_powi opportunities | |
5571 | remaining. */ | |
f1f41a6c | 5572 | FOR_EACH_VEC_ELT (repeat_factor_vec, j, rf1) |
8c5ac7f6 | 5573 | { |
5574 | if (rf1->count >= 2) | |
5575 | break; | |
5576 | } | |
5577 | ||
5578 | if (j >= vec_len) | |
5579 | break; | |
5580 | ||
5581 | power = rf1->count; | |
5582 | ||
5583 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
5584 | { | |
5585 | unsigned elt; | |
04009ada | 5586 | repeat_factor *rf; |
8c5ac7f6 | 5587 | fputs ("Building __builtin_pow call for (", dump_file); |
5588 | for (elt = j; elt < vec_len; elt++) | |
5589 | { | |
f1f41a6c | 5590 | rf = &repeat_factor_vec[elt]; |
1ffa4346 | 5591 | print_generic_expr (dump_file, rf->factor); |
8c5ac7f6 | 5592 | if (elt < vec_len - 1) |
5593 | fputs (" * ", dump_file); | |
5594 | } | |
f03df321 | 5595 | fprintf (dump_file, ")^" HOST_WIDE_INT_PRINT_DEC"\n", power); |
8c5ac7f6 | 5596 | } |
5597 | ||
5598 | reassociate_stats.pows_created++; | |
5599 | ||
5600 | /* Visit each element of the vector in reverse order (so that | |
5601 | high-occurrence elements are visited first, and within the | |
5602 | same occurrence count, lower-ranked elements are visited | |
5603 | first). Form a linear product of all elements in this order | |
5604 | whose occurrencce count is at least that of element J. | |
5605 | Record the SSA name representing the product of each element | |
5606 | with all subsequent elements in the vector. */ | |
5607 | if (j == vec_len - 1) | |
5608 | rf1->repr = rf1->factor; | |
5609 | else | |
5610 | { | |
5611 | for (ii = vec_len - 2; ii >= (int)j; ii--) | |
5612 | { | |
5613 | tree op1, op2; | |
5614 | ||
f1f41a6c | 5615 | rf1 = &repeat_factor_vec[ii]; |
5616 | rf2 = &repeat_factor_vec[ii + 1]; | |
8c5ac7f6 | 5617 | |
5618 | /* Init the last factor's representative to be itself. */ | |
5619 | if (!rf2->repr) | |
5620 | rf2->repr = rf2->factor; | |
5621 | ||
5622 | op1 = rf1->factor; | |
5623 | op2 = rf2->repr; | |
5624 | ||
03d37e4e | 5625 | target_ssa = make_temp_ssa_name (type, NULL, "reassocpow"); |
e9cf809e | 5626 | mul_stmt = gimple_build_assign (target_ssa, MULT_EXPR, |
5627 | op1, op2); | |
8c5ac7f6 | 5628 | gimple_set_location (mul_stmt, gimple_location (stmt)); |
0be0fe66 | 5629 | gimple_set_uid (mul_stmt, gimple_uid (stmt)); |
8c5ac7f6 | 5630 | gsi_insert_before (&gsi, mul_stmt, GSI_SAME_STMT); |
5631 | rf1->repr = target_ssa; | |
5632 | ||
5633 | /* Don't reprocess the multiply we just introduced. */ | |
5634 | gimple_set_visited (mul_stmt, true); | |
5635 | } | |
5636 | } | |
5637 | ||
5638 | /* Form a call to __builtin_powi for the maximum product | |
5639 | just formed, raised to the power obtained earlier. */ | |
f1f41a6c | 5640 | rf1 = &repeat_factor_vec[j]; |
03d37e4e | 5641 | iter_result = make_temp_ssa_name (type, NULL, "reassocpow"); |
8c5ac7f6 | 5642 | pow_stmt = gimple_build_call (powi_fndecl, 2, rf1->repr, |
5643 | build_int_cst (integer_type_node, | |
5644 | power)); | |
5645 | gimple_call_set_lhs (pow_stmt, iter_result); | |
5646 | gimple_set_location (pow_stmt, gimple_location (stmt)); | |
0be0fe66 | 5647 | gimple_set_uid (pow_stmt, gimple_uid (stmt)); |
8c5ac7f6 | 5648 | gsi_insert_before (&gsi, pow_stmt, GSI_SAME_STMT); |
5649 | } | |
5650 | ||
97269507 | 5651 | /* If we previously formed at least one other builtin_powi call, |
5652 | form the product of this one and those others. */ | |
5653 | if (result) | |
5654 | { | |
03d37e4e | 5655 | tree new_result = make_temp_ssa_name (type, NULL, "reassocpow"); |
e9cf809e | 5656 | mul_stmt = gimple_build_assign (new_result, MULT_EXPR, |
5657 | result, iter_result); | |
97269507 | 5658 | gimple_set_location (mul_stmt, gimple_location (stmt)); |
0be0fe66 | 5659 | gimple_set_uid (mul_stmt, gimple_uid (stmt)); |
97269507 | 5660 | gsi_insert_before (&gsi, mul_stmt, GSI_SAME_STMT); |
5661 | gimple_set_visited (mul_stmt, true); | |
5662 | result = new_result; | |
5663 | } | |
5664 | else | |
5665 | result = iter_result; | |
8c5ac7f6 | 5666 | |
5667 | /* Decrement the occurrence count of each element in the product | |
5668 | by the count found above, and remove this many copies of each | |
5669 | factor from OPS. */ | |
5670 | for (i = j; i < vec_len; i++) | |
5671 | { | |
5672 | unsigned k = power; | |
5673 | unsigned n; | |
5674 | ||
f1f41a6c | 5675 | rf1 = &repeat_factor_vec[i]; |
8c5ac7f6 | 5676 | rf1->count -= power; |
5677 | ||
f1f41a6c | 5678 | FOR_EACH_VEC_ELT_REVERSE (*ops, n, oe) |
8c5ac7f6 | 5679 | { |
5680 | if (oe->op == rf1->factor) | |
5681 | { | |
5682 | if (oe->count <= k) | |
5683 | { | |
f1f41a6c | 5684 | ops->ordered_remove (n); |
8c5ac7f6 | 5685 | k -= oe->count; |
5686 | ||
5687 | if (k == 0) | |
5688 | break; | |
5689 | } | |
5690 | else | |
5691 | { | |
5692 | oe->count -= k; | |
5693 | break; | |
5694 | } | |
5695 | } | |
5696 | } | |
5697 | } | |
5698 | } | |
5699 | ||
5700 | /* At this point all elements in the repeated factor vector have a | |
5701 | remaining occurrence count of 0 or 1, and those with a count of 1 | |
5702 | don't have cached representatives. Re-sort the ops vector and | |
5703 | clean up. */ | |
f1f41a6c | 5704 | ops->qsort (sort_by_operand_rank); |
5705 | repeat_factor_vec.release (); | |
97269507 | 5706 | |
5707 | /* Return the final product computed herein. Note that there may | |
5708 | still be some elements with single occurrence count left in OPS; | |
5709 | those will be handled by the normal reassociation logic. */ | |
5710 | return result; | |
5711 | } | |
5712 | ||
389034a4 | 5713 | /* Attempt to optimize |
5714 | CST1 * copysign (CST2, y) -> copysign (CST1 * CST2, y) if CST1 > 0, or | |
5715 | CST1 * copysign (CST2, y) -> -copysign (CST1 * CST2, y) if CST1 < 0. */ | |
5716 | ||
5717 | static void | |
5718 | attempt_builtin_copysign (vec<operand_entry *> *ops) | |
5719 | { | |
5720 | operand_entry *oe; | |
5721 | unsigned int i; | |
5722 | unsigned int length = ops->length (); | |
5723 | tree cst = ops->last ()->op; | |
5724 | ||
5725 | if (length == 1 || TREE_CODE (cst) != REAL_CST) | |
5726 | return; | |
5727 | ||
5728 | FOR_EACH_VEC_ELT (*ops, i, oe) | |
5729 | { | |
5730 | if (TREE_CODE (oe->op) == SSA_NAME | |
5731 | && has_single_use (oe->op)) | |
5732 | { | |
5733 | gimple *def_stmt = SSA_NAME_DEF_STMT (oe->op); | |
390bb332 | 5734 | if (gcall *old_call = dyn_cast <gcall *> (def_stmt)) |
389034a4 | 5735 | { |
389034a4 | 5736 | tree arg0, arg1; |
390bb332 | 5737 | switch (gimple_call_combined_fn (old_call)) |
389034a4 | 5738 | { |
390bb332 | 5739 | CASE_CFN_COPYSIGN: |
8c32188e | 5740 | CASE_CFN_COPYSIGN_FN: |
390bb332 | 5741 | arg0 = gimple_call_arg (old_call, 0); |
5742 | arg1 = gimple_call_arg (old_call, 1); | |
389034a4 | 5743 | /* The first argument of copysign must be a constant, |
5744 | otherwise there's nothing to do. */ | |
5745 | if (TREE_CODE (arg0) == REAL_CST) | |
5746 | { | |
390bb332 | 5747 | tree type = TREE_TYPE (arg0); |
5748 | tree mul = const_binop (MULT_EXPR, type, cst, arg0); | |
389034a4 | 5749 | /* If we couldn't fold to a single constant, skip it. |
5750 | That happens e.g. for inexact multiplication when | |
5751 | -frounding-math. */ | |
5752 | if (mul == NULL_TREE) | |
5753 | break; | |
390bb332 | 5754 | /* Instead of adjusting OLD_CALL, let's build a new |
5755 | call to not leak the LHS and prevent keeping bogus | |
5756 | debug statements. DCE will clean up the old call. */ | |
5757 | gcall *new_call; | |
5758 | if (gimple_call_internal_p (old_call)) | |
5759 | new_call = gimple_build_call_internal | |
5760 | (IFN_COPYSIGN, 2, mul, arg1); | |
5761 | else | |
5762 | new_call = gimple_build_call | |
5763 | (gimple_call_fndecl (old_call), 2, mul, arg1); | |
5764 | tree lhs = make_ssa_name (type); | |
5765 | gimple_call_set_lhs (new_call, lhs); | |
5766 | gimple_set_location (new_call, | |
5767 | gimple_location (old_call)); | |
5768 | insert_stmt_after (new_call, old_call); | |
389034a4 | 5769 | /* We've used the constant, get rid of it. */ |
5770 | ops->pop (); | |
5771 | bool cst1_neg = real_isneg (TREE_REAL_CST_PTR (cst)); | |
5772 | /* Handle the CST1 < 0 case by negating the result. */ | |
5773 | if (cst1_neg) | |
5774 | { | |
5775 | tree negrhs = make_ssa_name (TREE_TYPE (lhs)); | |
5776 | gimple *negate_stmt | |
5777 | = gimple_build_assign (negrhs, NEGATE_EXPR, lhs); | |
390bb332 | 5778 | insert_stmt_after (negate_stmt, new_call); |
389034a4 | 5779 | oe->op = negrhs; |
5780 | } | |
5781 | else | |
5782 | oe->op = lhs; | |
5783 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
5784 | { | |
5785 | fprintf (dump_file, "Optimizing copysign: "); | |
1ffa4346 | 5786 | print_generic_expr (dump_file, cst); |
390bb332 | 5787 | fprintf (dump_file, " * COPYSIGN ("); |
1ffa4346 | 5788 | print_generic_expr (dump_file, arg0); |
389034a4 | 5789 | fprintf (dump_file, ", "); |
1ffa4346 | 5790 | print_generic_expr (dump_file, arg1); |
390bb332 | 5791 | fprintf (dump_file, ") into %sCOPYSIGN (", |
389034a4 | 5792 | cst1_neg ? "-" : ""); |
1ffa4346 | 5793 | print_generic_expr (dump_file, mul); |
389034a4 | 5794 | fprintf (dump_file, ", "); |
1ffa4346 | 5795 | print_generic_expr (dump_file, arg1); |
389034a4 | 5796 | fprintf (dump_file, "\n"); |
5797 | } | |
5798 | return; | |
5799 | } | |
5800 | break; | |
5801 | default: | |
5802 | break; | |
5803 | } | |
5804 | } | |
5805 | } | |
5806 | } | |
5807 | } | |
5808 | ||
97269507 | 5809 | /* Transform STMT at *GSI into a copy by replacing its rhs with NEW_RHS. */ |
5810 | ||
5811 | static void | |
42acab1c | 5812 | transform_stmt_to_copy (gimple_stmt_iterator *gsi, gimple *stmt, tree new_rhs) |
97269507 | 5813 | { |
5814 | tree rhs1; | |
5815 | ||
5816 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
5817 | { | |
5818 | fprintf (dump_file, "Transforming "); | |
1ffa4346 | 5819 | print_gimple_stmt (dump_file, stmt, 0); |
97269507 | 5820 | } |
5821 | ||
5822 | rhs1 = gimple_assign_rhs1 (stmt); | |
5823 | gimple_assign_set_rhs_from_tree (gsi, new_rhs); | |
5824 | update_stmt (stmt); | |
5825 | remove_visited_stmt_chain (rhs1); | |
5826 | ||
5827 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
5828 | { | |
5829 | fprintf (dump_file, " into "); | |
1ffa4346 | 5830 | print_gimple_stmt (dump_file, stmt, 0); |
97269507 | 5831 | } |
5832 | } | |
5833 | ||
5834 | /* Transform STMT at *GSI into a multiply of RHS1 and RHS2. */ | |
5835 | ||
5836 | static void | |
42acab1c | 5837 | transform_stmt_to_multiply (gimple_stmt_iterator *gsi, gimple *stmt, |
97269507 | 5838 | tree rhs1, tree rhs2) |
5839 | { | |
5840 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
5841 | { | |
5842 | fprintf (dump_file, "Transforming "); | |
1ffa4346 | 5843 | print_gimple_stmt (dump_file, stmt, 0); |
97269507 | 5844 | } |
5845 | ||
61e371b0 | 5846 | gimple_assign_set_rhs_with_ops (gsi, MULT_EXPR, rhs1, rhs2); |
97269507 | 5847 | update_stmt (gsi_stmt (*gsi)); |
5848 | remove_visited_stmt_chain (rhs1); | |
5849 | ||
5850 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
5851 | { | |
5852 | fprintf (dump_file, " into "); | |
1ffa4346 | 5853 | print_gimple_stmt (dump_file, stmt, 0); |
97269507 | 5854 | } |
8c5ac7f6 | 5855 | } |
5856 | ||
54aceb26 | 5857 | /* Reassociate expressions in basic block BB and its post-dominator as |
55a2d273 | 5858 | children. |
54aceb26 | 5859 | |
55a2d273 | 5860 | Bubble up return status from maybe_optimize_range_tests. */ |
5861 | ||
5862 | static bool | |
54aceb26 | 5863 | reassociate_bb (basic_block bb) |
5864 | { | |
75a70cf9 | 5865 | gimple_stmt_iterator gsi; |
54aceb26 | 5866 | basic_block son; |
42acab1c | 5867 | gimple *stmt = last_stmt (bb); |
55a2d273 | 5868 | bool cfg_cleanup_needed = false; |
8a2c7744 | 5869 | |
5870 | if (stmt && !gimple_visited_p (stmt)) | |
55a2d273 | 5871 | cfg_cleanup_needed |= maybe_optimize_range_tests (stmt); |
54aceb26 | 5872 | |
75a70cf9 | 5873 | for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi)) |
54aceb26 | 5874 | { |
8a2c7744 | 5875 | stmt = gsi_stmt (gsi); |
54aceb26 | 5876 | |
df0675b8 | 5877 | if (is_gimple_assign (stmt) |
aac19106 | 5878 | && !stmt_could_throw_p (cfun, stmt)) |
54aceb26 | 5879 | { |
75a70cf9 | 5880 | tree lhs, rhs1, rhs2; |
5881 | enum tree_code rhs_code = gimple_assign_rhs_code (stmt); | |
54aceb26 | 5882 | |
75a70cf9 | 5883 | /* If this is not a gimple binary expression, there is |
5884 | nothing for us to do with it. */ | |
5885 | if (get_gimple_rhs_class (rhs_code) != GIMPLE_BINARY_RHS) | |
54aceb26 | 5886 | continue; |
5887 | ||
75a70cf9 | 5888 | /* If this was part of an already processed statement, |
5889 | we don't need to touch it again. */ | |
5890 | if (gimple_visited_p (stmt)) | |
dddf5036 | 5891 | { |
5892 | /* This statement might have become dead because of previous | |
5893 | reassociations. */ | |
5894 | if (has_zero_uses (gimple_get_lhs (stmt))) | |
5895 | { | |
54675e05 | 5896 | reassoc_remove_stmt (&gsi); |
dddf5036 | 5897 | release_defs (stmt); |
fb715874 | 5898 | /* We might end up removing the last stmt above which |
5899 | places the iterator to the end of the sequence. | |
5900 | Reset it to the last stmt in this case which might | |
5901 | be the end of the sequence as well if we removed | |
5902 | the last statement of the sequence. In which case | |
5903 | we need to bail out. */ | |
5904 | if (gsi_end_p (gsi)) | |
5905 | { | |
5906 | gsi = gsi_last_bb (bb); | |
5907 | if (gsi_end_p (gsi)) | |
5908 | break; | |
5909 | } | |
dddf5036 | 5910 | } |
5911 | continue; | |
5912 | } | |
75a70cf9 | 5913 | |
5914 | lhs = gimple_assign_lhs (stmt); | |
5915 | rhs1 = gimple_assign_rhs1 (stmt); | |
5916 | rhs2 = gimple_assign_rhs2 (stmt); | |
5917 | ||
ca3c9092 | 5918 | /* For non-bit or min/max operations we can't associate |
5919 | all types. Verify that here. */ | |
5920 | if (rhs_code != BIT_IOR_EXPR | |
5921 | && rhs_code != BIT_AND_EXPR | |
5922 | && rhs_code != BIT_XOR_EXPR | |
5923 | && rhs_code != MIN_EXPR | |
5924 | && rhs_code != MAX_EXPR | |
5925 | && (!can_reassociate_p (lhs) | |
5926 | || !can_reassociate_p (rhs1) | |
5927 | || !can_reassociate_p (rhs2))) | |
54aceb26 | 5928 | continue; |
5929 | ||
75a70cf9 | 5930 | if (associative_tree_code (rhs_code)) |
54aceb26 | 5931 | { |
04009ada | 5932 | auto_vec<operand_entry *> ops; |
97269507 | 5933 | tree powi_result = NULL_TREE; |
026f2137 | 5934 | bool is_vector = VECTOR_TYPE_P (TREE_TYPE (lhs)); |
54aceb26 | 5935 | |
5936 | /* There may be no immediate uses left by the time we | |
5937 | get here because we may have eliminated them all. */ | |
72e3ec84 | 5938 | if (TREE_CODE (lhs) == SSA_NAME && has_zero_uses (lhs)) |
54aceb26 | 5939 | continue; |
5940 | ||
75a70cf9 | 5941 | gimple_set_visited (stmt, true); |
dddf5036 | 5942 | linearize_expr_tree (&ops, stmt, true, true); |
f1f41a6c | 5943 | ops.qsort (sort_by_operand_rank); |
e70cda06 | 5944 | int orig_len = ops.length (); |
75a70cf9 | 5945 | optimize_ops_list (rhs_code, &ops); |
dddf5036 | 5946 | if (undistribute_ops_list (rhs_code, &ops, |
5947 | loop_containing_stmt (stmt))) | |
5948 | { | |
f1f41a6c | 5949 | ops.qsort (sort_by_operand_rank); |
dddf5036 | 5950 | optimize_ops_list (rhs_code, &ops); |
5951 | } | |
54aceb26 | 5952 | |
0d7ddd44 | 5953 | if (rhs_code == PLUS_EXPR |
527d8479 | 5954 | && transform_add_to_multiply (&ops)) |
0d7ddd44 | 5955 | ops.qsort (sort_by_operand_rank); |
5956 | ||
946e9eb4 | 5957 | if (rhs_code == BIT_IOR_EXPR || rhs_code == BIT_AND_EXPR) |
026f2137 | 5958 | { |
5959 | if (is_vector) | |
5960 | optimize_vec_cond_expr (rhs_code, &ops); | |
5961 | else | |
54ec8b11 | 5962 | optimize_range_tests (rhs_code, &ops, NULL); |
026f2137 | 5963 | } |
946e9eb4 | 5964 | |
026f2137 | 5965 | if (rhs_code == MULT_EXPR && !is_vector) |
5966 | { | |
5967 | attempt_builtin_copysign (&ops); | |
389034a4 | 5968 | |
026f2137 | 5969 | if (reassoc_insert_powi_p |
5970 | && flag_unsafe_math_optimizations) | |
5971 | powi_result = attempt_builtin_powi (stmt, &ops); | |
5972 | } | |
8c5ac7f6 | 5973 | |
9eafdd7b | 5974 | operand_entry *last; |
5975 | bool negate_result = false; | |
5976 | if (ops.length () > 1 | |
5977 | && rhs_code == MULT_EXPR) | |
5978 | { | |
5979 | last = ops.last (); | |
24c41395 | 5980 | if ((integer_minus_onep (last->op) |
9eafdd7b | 5981 | || real_minus_onep (last->op)) |
5982 | && !HONOR_SNANS (TREE_TYPE (lhs)) | |
5983 | && (!HONOR_SIGNED_ZEROS (TREE_TYPE (lhs)) | |
5984 | || !COMPLEX_FLOAT_TYPE_P (TREE_TYPE (lhs)))) | |
5985 | { | |
5986 | ops.pop (); | |
5987 | negate_result = true; | |
5988 | } | |
5989 | } | |
5990 | ||
9977f724 | 5991 | tree new_lhs = lhs; |
97269507 | 5992 | /* If the operand vector is now empty, all operands were |
5993 | consumed by the __builtin_powi optimization. */ | |
f1f41a6c | 5994 | if (ops.length () == 0) |
97269507 | 5995 | transform_stmt_to_copy (&gsi, stmt, powi_result); |
f1f41a6c | 5996 | else if (ops.length () == 1) |
54aceb26 | 5997 | { |
f1f41a6c | 5998 | tree last_op = ops.last ()->op; |
527d8479 | 5999 | |
6000 | /* If the stmt that defines operand has to be inserted, insert it | |
6001 | before the use. */ | |
6002 | if (ops.last ()->stmt_to_insert) | |
6003 | insert_stmt_before_use (stmt, ops.last ()->stmt_to_insert); | |
97269507 | 6004 | if (powi_result) |
6005 | transform_stmt_to_multiply (&gsi, stmt, last_op, | |
6006 | powi_result); | |
6007 | else | |
6008 | transform_stmt_to_copy (&gsi, stmt, last_op); | |
54aceb26 | 6009 | } |
6010 | else | |
5b1c765d | 6011 | { |
3754d046 | 6012 | machine_mode mode = TYPE_MODE (TREE_TYPE (lhs)); |
f1f41a6c | 6013 | int ops_num = ops.length (); |
5b1c765d | 6014 | int width = get_reassociation_width (ops_num, rhs_code, mode); |
6015 | ||
6016 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
6017 | fprintf (dump_file, | |
6018 | "Width = %d was chosen for reassociation\n", width); | |
6019 | ||
b17ce62f | 6020 | |
077cf883 | 6021 | /* For binary bit operations, if there are at least 3 |
6022 | operands and the last last operand in OPS is a constant, | |
6023 | move it to the front. This helps ensure that we generate | |
6024 | (X & Y) & C rather than (X & C) & Y. The former will | |
6025 | often match a canonical bit test when we get to RTL. */ | |
04acc76e | 6026 | if (ops.length () > 2 |
077cf883 | 6027 | && (rhs_code == BIT_AND_EXPR |
6028 | || rhs_code == BIT_IOR_EXPR | |
6029 | || rhs_code == BIT_XOR_EXPR) | |
b17ce62f | 6030 | && TREE_CODE (ops.last ()->op) == INTEGER_CST) |
6031 | std::swap (*ops[0], *ops[ops_num - 1]); | |
6032 | ||
5b1c765d | 6033 | if (width > 1 |
f1f41a6c | 6034 | && ops.length () > 3) |
1a91d914 | 6035 | rewrite_expr_tree_parallel (as_a <gassign *> (stmt), |
6036 | width, ops); | |
5b1c765d | 6037 | else |
a2bd0c99 | 6038 | { |
6039 | /* When there are three operands left, we want | |
6040 | to make sure the ones that get the double | |
6041 | binary op are chosen wisely. */ | |
6042 | int len = ops.length (); | |
6043 | if (len >= 3) | |
6044 | swap_ops_for_binary_stmt (ops, len - 3, stmt); | |
6045 | ||
82cb4e1c | 6046 | new_lhs = rewrite_expr_tree (stmt, 0, ops, |
9977f724 | 6047 | powi_result != NULL |
e70cda06 | 6048 | || negate_result, |
6049 | len != orig_len); | |
a2bd0c99 | 6050 | } |
97269507 | 6051 | |
6052 | /* If we combined some repeated factors into a | |
6053 | __builtin_powi call, multiply that result by the | |
6054 | reassociated operands. */ | |
6055 | if (powi_result) | |
6056 | { | |
42acab1c | 6057 | gimple *mul_stmt, *lhs_stmt = SSA_NAME_DEF_STMT (lhs); |
82cb4e1c | 6058 | tree type = TREE_TYPE (lhs); |
03d37e4e | 6059 | tree target_ssa = make_temp_ssa_name (type, NULL, |
6060 | "reassocpow"); | |
82cb4e1c | 6061 | gimple_set_lhs (lhs_stmt, target_ssa); |
6062 | update_stmt (lhs_stmt); | |
6063 | if (lhs != new_lhs) | |
9977f724 | 6064 | { |
6065 | target_ssa = new_lhs; | |
6066 | new_lhs = lhs; | |
6067 | } | |
e9cf809e | 6068 | mul_stmt = gimple_build_assign (lhs, MULT_EXPR, |
6069 | powi_result, target_ssa); | |
97269507 | 6070 | gimple_set_location (mul_stmt, gimple_location (stmt)); |
0be0fe66 | 6071 | gimple_set_uid (mul_stmt, gimple_uid (stmt)); |
97269507 | 6072 | gsi_insert_after (&gsi, mul_stmt, GSI_NEW_STMT); |
6073 | } | |
5b1c765d | 6074 | } |
9eafdd7b | 6075 | |
6076 | if (negate_result) | |
6077 | { | |
6078 | stmt = SSA_NAME_DEF_STMT (lhs); | |
6079 | tree tmp = make_ssa_name (TREE_TYPE (lhs)); | |
6080 | gimple_set_lhs (stmt, tmp); | |
9977f724 | 6081 | if (lhs != new_lhs) |
6082 | tmp = new_lhs; | |
9eafdd7b | 6083 | gassign *neg_stmt = gimple_build_assign (lhs, NEGATE_EXPR, |
6084 | tmp); | |
40bbfc17 | 6085 | gimple_set_uid (neg_stmt, gimple_uid (stmt)); |
9eafdd7b | 6086 | gsi_insert_after (&gsi, neg_stmt, GSI_NEW_STMT); |
6087 | update_stmt (stmt); | |
6088 | } | |
54aceb26 | 6089 | } |
6090 | } | |
6091 | } | |
6092 | for (son = first_dom_son (CDI_POST_DOMINATORS, bb); | |
6093 | son; | |
6094 | son = next_dom_son (CDI_POST_DOMINATORS, son)) | |
55a2d273 | 6095 | cfg_cleanup_needed |= reassociate_bb (son); |
6096 | ||
6097 | return cfg_cleanup_needed; | |
54aceb26 | 6098 | } |
6099 | ||
e3668db5 | 6100 | /* Add jumps around shifts for range tests turned into bit tests. |
6101 | For each SSA_NAME VAR we have code like: | |
6102 | VAR = ...; // final stmt of range comparison | |
6103 | // bit test here...; | |
6104 | OTHERVAR = ...; // final stmt of the bit test sequence | |
6105 | RES = VAR | OTHERVAR; | |
6106 | Turn the above into: | |
6107 | VAR = ...; | |
6108 | if (VAR != 0) | |
6109 | goto <l3>; | |
6110 | else | |
6111 | goto <l2>; | |
6112 | <l2>: | |
6113 | // bit test here...; | |
6114 | OTHERVAR = ...; | |
6115 | <l3>: | |
6116 | # RES = PHI<1(l1), OTHERVAR(l2)>; */ | |
6117 | ||
6118 | static void | |
6119 | branch_fixup (void) | |
6120 | { | |
6121 | tree var; | |
6122 | unsigned int i; | |
6123 | ||
6124 | FOR_EACH_VEC_ELT (reassoc_branch_fixups, i, var) | |
6125 | { | |
42acab1c | 6126 | gimple *def_stmt = SSA_NAME_DEF_STMT (var); |
6127 | gimple *use_stmt; | |
e3668db5 | 6128 | use_operand_p use; |
6129 | bool ok = single_imm_use (var, &use, &use_stmt); | |
6130 | gcc_assert (ok | |
6131 | && is_gimple_assign (use_stmt) | |
6132 | && gimple_assign_rhs_code (use_stmt) == BIT_IOR_EXPR | |
6133 | && gimple_bb (def_stmt) == gimple_bb (use_stmt)); | |
6134 | ||
6135 | basic_block cond_bb = gimple_bb (def_stmt); | |
6136 | basic_block then_bb = split_block (cond_bb, def_stmt)->dest; | |
6137 | basic_block merge_bb = split_block (then_bb, use_stmt)->dest; | |
6138 | ||
6139 | gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt); | |
42acab1c | 6140 | gimple *g = gimple_build_cond (NE_EXPR, var, |
6141 | build_zero_cst (TREE_TYPE (var)), | |
6142 | NULL_TREE, NULL_TREE); | |
e3668db5 | 6143 | location_t loc = gimple_location (use_stmt); |
6144 | gimple_set_location (g, loc); | |
6145 | gsi_insert_after (&gsi, g, GSI_NEW_STMT); | |
6146 | ||
6147 | edge etrue = make_edge (cond_bb, merge_bb, EDGE_TRUE_VALUE); | |
720cfc43 | 6148 | etrue->probability = profile_probability::even (); |
e3668db5 | 6149 | edge efalse = find_edge (cond_bb, then_bb); |
6150 | efalse->flags = EDGE_FALSE_VALUE; | |
6151 | efalse->probability -= etrue->probability; | |
ea5d3981 | 6152 | then_bb->count -= etrue->count (); |
e3668db5 | 6153 | |
6154 | tree othervar = NULL_TREE; | |
6155 | if (gimple_assign_rhs1 (use_stmt) == var) | |
6156 | othervar = gimple_assign_rhs2 (use_stmt); | |
6157 | else if (gimple_assign_rhs2 (use_stmt) == var) | |
6158 | othervar = gimple_assign_rhs1 (use_stmt); | |
6159 | else | |
6160 | gcc_unreachable (); | |
6161 | tree lhs = gimple_assign_lhs (use_stmt); | |
1a91d914 | 6162 | gphi *phi = create_phi_node (lhs, merge_bb); |
e3668db5 | 6163 | add_phi_arg (phi, build_one_cst (TREE_TYPE (lhs)), etrue, loc); |
6164 | add_phi_arg (phi, othervar, single_succ_edge (then_bb), loc); | |
6165 | gsi = gsi_for_stmt (use_stmt); | |
6166 | gsi_remove (&gsi, true); | |
6167 | ||
6168 | set_immediate_dominator (CDI_DOMINATORS, merge_bb, cond_bb); | |
6169 | set_immediate_dominator (CDI_POST_DOMINATORS, cond_bb, merge_bb); | |
6170 | } | |
6171 | reassoc_branch_fixups.release (); | |
6172 | } | |
6173 | ||
04009ada | 6174 | void dump_ops_vector (FILE *file, vec<operand_entry *> ops); |
6175 | void debug_ops_vector (vec<operand_entry *> ops); | |
54aceb26 | 6176 | |
6177 | /* Dump the operand entry vector OPS to FILE. */ | |
6178 | ||
6179 | void | |
04009ada | 6180 | dump_ops_vector (FILE *file, vec<operand_entry *> ops) |
54aceb26 | 6181 | { |
04009ada | 6182 | operand_entry *oe; |
54aceb26 | 6183 | unsigned int i; |
6184 | ||
f1f41a6c | 6185 | FOR_EACH_VEC_ELT (ops, i, oe) |
54aceb26 | 6186 | { |
6187 | fprintf (file, "Op %d -> rank: %d, tree: ", i, oe->rank); | |
1ffa4346 | 6188 | print_generic_expr (file, oe->op); |
e3cd52d9 | 6189 | fprintf (file, "\n"); |
54aceb26 | 6190 | } |
6191 | } | |
6192 | ||
6193 | /* Dump the operand entry vector OPS to STDERR. */ | |
6194 | ||
4b987fac | 6195 | DEBUG_FUNCTION void |
04009ada | 6196 | debug_ops_vector (vec<operand_entry *> ops) |
54aceb26 | 6197 | { |
6198 | dump_ops_vector (stderr, ops); | |
6199 | } | |
6200 | ||
55a2d273 | 6201 | /* Bubble up return status from reassociate_bb. */ |
6202 | ||
6203 | static bool | |
54aceb26 | 6204 | do_reassoc (void) |
6205 | { | |
34154e27 | 6206 | break_up_subtract_bb (ENTRY_BLOCK_PTR_FOR_FN (cfun)); |
55a2d273 | 6207 | return reassociate_bb (EXIT_BLOCK_PTR_FOR_FN (cfun)); |
54aceb26 | 6208 | } |
6209 | ||
6210 | /* Initialize the reassociation pass. */ | |
6211 | ||
6212 | static void | |
6213 | init_reassoc (void) | |
6214 | { | |
6215 | int i; | |
b30a8715 | 6216 | long rank = 2; |
a28770e1 | 6217 | int *bbs = XNEWVEC (int, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS); |
54aceb26 | 6218 | |
a4c3fb95 | 6219 | /* Find the loops, so that we can prevent moving calculations in |
6220 | them. */ | |
6221 | loop_optimizer_init (AVOID_CFG_MODIFICATIONS); | |
6222 | ||
54aceb26 | 6223 | memset (&reassociate_stats, 0, sizeof (reassociate_stats)); |
6224 | ||
17b5ea6f | 6225 | next_operand_entry_id = 0; |
54aceb26 | 6226 | |
6227 | /* Reverse RPO (Reverse Post Order) will give us something where | |
6228 | deeper loops come later. */ | |
6180f28d | 6229 | pre_and_rev_post_order_compute (NULL, bbs, false); |
fe672ac0 | 6230 | bb_rank = XCNEWVEC (long, last_basic_block_for_fn (cfun)); |
06ecf488 | 6231 | operand_rank = new hash_map<tree, long>; |
54aceb26 | 6232 | |
61e371b0 | 6233 | /* Give each default definition a distinct rank. This includes |
6234 | parameters and the static chain. Walk backwards over all | |
6235 | SSA names so that we get proper rank ordering according | |
6236 | to tree_swap_operands_p. */ | |
6237 | for (i = num_ssa_names - 1; i > 0; --i) | |
54aceb26 | 6238 | { |
61e371b0 | 6239 | tree name = ssa_name (i); |
6240 | if (name && SSA_NAME_IS_DEFAULT_DEF (name)) | |
6241 | insert_operand_rank (name, ++rank); | |
54aceb26 | 6242 | } |
6243 | ||
6244 | /* Set up rank for each BB */ | |
a28770e1 | 6245 | for (i = 0; i < n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS; i++) |
bf27ea99 | 6246 | bb_rank[bbs[i]] = ++rank << 16; |
54aceb26 | 6247 | |
6248 | free (bbs); | |
54aceb26 | 6249 | calculate_dominance_info (CDI_POST_DOMINATORS); |
1e094109 | 6250 | plus_negates = vNULL; |
54aceb26 | 6251 | } |
6252 | ||
6253 | /* Cleanup after the reassociation pass, and print stats if | |
6254 | requested. */ | |
6255 | ||
6256 | static void | |
6257 | fini_reassoc (void) | |
6258 | { | |
581f8050 | 6259 | statistics_counter_event (cfun, "Linearized", |
6260 | reassociate_stats.linearized); | |
6261 | statistics_counter_event (cfun, "Constants eliminated", | |
6262 | reassociate_stats.constants_eliminated); | |
6263 | statistics_counter_event (cfun, "Ops eliminated", | |
6264 | reassociate_stats.ops_eliminated); | |
6265 | statistics_counter_event (cfun, "Statements rewritten", | |
6266 | reassociate_stats.rewritten); | |
8c5ac7f6 | 6267 | statistics_counter_event (cfun, "Built-in pow[i] calls encountered", |
6268 | reassociate_stats.pows_encountered); | |
6269 | statistics_counter_event (cfun, "Built-in powi calls created", | |
6270 | reassociate_stats.pows_created); | |
54aceb26 | 6271 | |
06ecf488 | 6272 | delete operand_rank; |
672758dc | 6273 | operand_entry_pool.release (); |
54aceb26 | 6274 | free (bb_rank); |
f1f41a6c | 6275 | plus_negates.release (); |
54aceb26 | 6276 | free_dominance_info (CDI_POST_DOMINATORS); |
a4c3fb95 | 6277 | loop_optimizer_finalize (); |
54aceb26 | 6278 | } |
6279 | ||
83505659 | 6280 | /* Gate and execute functions for Reassociation. If INSERT_POWI_P, enable |
55a2d273 | 6281 | insertion of __builtin_powi calls. |
6282 | ||
6283 | Returns TODO_cfg_cleanup if a CFG cleanup pass is desired due to | |
6284 | optimization of a gimple conditional. Otherwise returns zero. */ | |
54aceb26 | 6285 | |
2a1990e9 | 6286 | static unsigned int |
83505659 | 6287 | execute_reassoc (bool insert_powi_p) |
54aceb26 | 6288 | { |
83505659 | 6289 | reassoc_insert_powi_p = insert_powi_p; |
6290 | ||
3dec5460 | 6291 | init_reassoc (); |
54aceb26 | 6292 | |
55a2d273 | 6293 | bool cfg_cleanup_needed; |
6294 | cfg_cleanup_needed = do_reassoc (); | |
54aceb26 | 6295 | repropagate_negates (); |
e3668db5 | 6296 | branch_fixup (); |
54aceb26 | 6297 | |
3dec5460 | 6298 | fini_reassoc (); |
55a2d273 | 6299 | return cfg_cleanup_needed ? TODO_cleanup_cfg : 0; |
3dec5460 | 6300 | } |
6301 | ||
cbe8bda8 | 6302 | namespace { |
6303 | ||
6304 | const pass_data pass_data_reassoc = | |
3dec5460 | 6305 | { |
cbe8bda8 | 6306 | GIMPLE_PASS, /* type */ |
6307 | "reassoc", /* name */ | |
6308 | OPTGROUP_NONE, /* optinfo_flags */ | |
cbe8bda8 | 6309 | TV_TREE_REASSOC, /* tv_id */ |
6310 | ( PROP_cfg | PROP_ssa ), /* properties_required */ | |
6311 | 0, /* properties_provided */ | |
6312 | 0, /* properties_destroyed */ | |
6313 | 0, /* todo_flags_start */ | |
8b88439e | 6314 | TODO_update_ssa_only_virtuals, /* todo_flags_finish */ |
3dec5460 | 6315 | }; |
cbe8bda8 | 6316 | |
6317 | class pass_reassoc : public gimple_opt_pass | |
6318 | { | |
6319 | public: | |
9af5ce0c | 6320 | pass_reassoc (gcc::context *ctxt) |
83505659 | 6321 | : gimple_opt_pass (pass_data_reassoc, ctxt), insert_powi_p (false) |
cbe8bda8 | 6322 | {} |
6323 | ||
6324 | /* opt_pass methods: */ | |
ae84f584 | 6325 | opt_pass * clone () { return new pass_reassoc (m_ctxt); } |
83505659 | 6326 | void set_pass_param (unsigned int n, bool param) |
6327 | { | |
6328 | gcc_assert (n == 0); | |
6329 | insert_powi_p = param; | |
6330 | } | |
31315c24 | 6331 | virtual bool gate (function *) { return flag_tree_reassoc != 0; } |
83505659 | 6332 | virtual unsigned int execute (function *) |
6333 | { return execute_reassoc (insert_powi_p); } | |
cbe8bda8 | 6334 | |
83505659 | 6335 | private: |
6336 | /* Enable insertion of __builtin_powi calls during execute_reassoc. See | |
6337 | point 3a in the pass header comment. */ | |
6338 | bool insert_powi_p; | |
cbe8bda8 | 6339 | }; // class pass_reassoc |
6340 | ||
6341 | } // anon namespace | |
6342 | ||
6343 | gimple_opt_pass * | |
6344 | make_pass_reassoc (gcc::context *ctxt) | |
6345 | { | |
6346 | return new pass_reassoc (ctxt); | |
6347 | } |