]>
Commit | Line | Data |
---|---|---|
a7e5372d | 1 | /* Loop invariant motion. |
d1e082c2 | 2 | Copyright (C) 2003-2013 Free Software Foundation, Inc. |
b8698a0f | 3 | |
a7e5372d | 4 | This file is part of GCC. |
b8698a0f | 5 | |
a7e5372d ZD |
6 | GCC is free software; you can redistribute it and/or modify it |
7 | under the terms of the GNU General Public License as published by the | |
9dcd6f09 | 8 | Free Software Foundation; either version 3, or (at your option) any |
a7e5372d | 9 | later version. |
b8698a0f | 10 | |
a7e5372d ZD |
11 | GCC is distributed in the hope that it will be useful, but WITHOUT |
12 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
14 | for more details. | |
b8698a0f | 15 | |
a7e5372d | 16 | You should have received a copy of the GNU General Public License |
9dcd6f09 NC |
17 | along with GCC; see the file COPYING3. If not see |
18 | <http://www.gnu.org/licenses/>. */ | |
a7e5372d ZD |
19 | |
20 | #include "config.h" | |
21 | #include "system.h" | |
22 | #include "coretypes.h" | |
23 | #include "tm.h" | |
24 | #include "tree.h" | |
a7e5372d | 25 | #include "tm_p.h" |
a7e5372d | 26 | #include "basic-block.h" |
2eb79bbb | 27 | #include "gimple-pretty-print.h" |
a7e5372d | 28 | #include "tree-flow.h" |
a7e5372d ZD |
29 | #include "cfgloop.h" |
30 | #include "domwalk.h" | |
31 | #include "params.h" | |
32 | #include "tree-pass.h" | |
33 | #include "flags.h" | |
bf190e8d | 34 | #include "hash-table.h" |
72425608 ZD |
35 | #include "tree-affine.h" |
36 | #include "pointer-set.h" | |
8ded35f9 | 37 | #include "tree-ssa-propagate.h" |
a7e5372d | 38 | |
f10a6654 ZD |
39 | /* TODO: Support for predicated code motion. I.e. |
40 | ||
41 | while (1) | |
42 | { | |
43 | if (cond) | |
44 | { | |
45 | a = inv; | |
46 | something; | |
47 | } | |
48 | } | |
49 | ||
039496da | 50 | Where COND and INV are invariants, but evaluating INV may trap or be |
f10a6654 ZD |
51 | invalid from some other reason if !COND. This may be transformed to |
52 | ||
53 | if (cond) | |
54 | a = inv; | |
55 | while (1) | |
56 | { | |
57 | if (cond) | |
58 | something; | |
59 | } */ | |
60 | ||
a7e5372d ZD |
61 | /* The auxiliary data kept for each statement. */ |
62 | ||
63 | struct lim_aux_data | |
64 | { | |
65 | struct loop *max_loop; /* The outermost loop in that the statement | |
66 | is invariant. */ | |
67 | ||
68 | struct loop *tgt_loop; /* The loop out of that we want to move the | |
69 | invariant. */ | |
70 | ||
71 | struct loop *always_executed_in; | |
72 | /* The outermost loop for that we are sure | |
73 | the statement is executed if the loop | |
74 | is entered. */ | |
75 | ||
a7e5372d ZD |
76 | unsigned cost; /* Cost of the computation performed by the |
77 | statement. */ | |
78 | ||
76421b44 RB |
79 | vec<gimple> depends; /* Vector of statements that must be also |
80 | hoisted out of the loop when this statement | |
81 | is hoisted; i.e. those that define the | |
82 | operands of the statement and are inside of | |
83 | the MAX_LOOP loop. */ | |
a7e5372d ZD |
84 | }; |
85 | ||
726a989a RB |
86 | /* Maps statements to their lim_aux_data. */ |
87 | ||
88 | static struct pointer_map_t *lim_aux_data_map; | |
a7e5372d | 89 | |
72425608 | 90 | /* Description of a memory reference location. */ |
a7e5372d | 91 | |
72425608 | 92 | typedef struct mem_ref_loc |
a7e5372d ZD |
93 | { |
94 | tree *ref; /* The reference itself. */ | |
726a989a | 95 | gimple stmt; /* The statement in that it occurs. */ |
72425608 ZD |
96 | } *mem_ref_loc_p; |
97 | ||
72425608 | 98 | |
72425608 ZD |
99 | /* Description of a memory reference. */ |
100 | ||
101 | typedef struct mem_ref | |
01fd257a | 102 | { |
72425608 ZD |
103 | unsigned id; /* ID assigned to the memory reference |
104 | (its index in memory_accesses.refs_list) */ | |
01fd257a | 105 | hashval_t hash; /* Its hash value. */ |
bdb01696 RB |
106 | |
107 | /* The memory access itself and associated caching of alias-oracle | |
108 | query meta-data. */ | |
109 | ao_ref mem; | |
110 | ||
374001cb | 111 | bitmap_head stored; /* The set of loops in that this memory location |
72425608 | 112 | is stored to. */ |
15d19bf8 | 113 | vec<vec<mem_ref_loc> > accesses_in_loop; |
72425608 ZD |
114 | /* The locations of the accesses. Vector |
115 | indexed by the loop number. */ | |
72425608 ZD |
116 | |
117 | /* The following sets are computed on demand. We keep both set and | |
118 | its complement, so that we know whether the information was | |
119 | already computed or not. */ | |
374001cb | 120 | bitmap_head indep_loop; /* The set of loops in that the memory |
72425608 ZD |
121 | reference is independent, meaning: |
122 | If it is stored in the loop, this store | |
123 | is independent on all other loads and | |
124 | stores. | |
125 | If it is only loaded, then it is independent | |
126 | on all stores in the loop. */ | |
374001cb | 127 | bitmap_head dep_loop; /* The complement of INDEP_LOOP. */ |
72425608 ZD |
128 | } *mem_ref_p; |
129 | ||
c00217fc RB |
130 | /* We use two bits per loop in the ref->{in,}dep_loop bitmaps, the first |
131 | to record (in)dependence against stores in the loop and its subloops, the | |
132 | second to record (in)dependence against all references in the loop | |
133 | and its subloops. */ | |
134 | #define LOOP_DEP_BIT(loopnum, storedp) (2 * (loopnum) + (storedp ? 1 : 0)) | |
72425608 | 135 | |
bf190e8d LC |
136 | /* Mem_ref hashtable helpers. */ |
137 | ||
138 | struct mem_ref_hasher : typed_noop_remove <mem_ref> | |
139 | { | |
140 | typedef mem_ref value_type; | |
141 | typedef tree_node compare_type; | |
142 | static inline hashval_t hash (const value_type *); | |
143 | static inline bool equal (const value_type *, const compare_type *); | |
144 | }; | |
145 | ||
146 | /* A hash function for struct mem_ref object OBJ. */ | |
147 | ||
148 | inline hashval_t | |
149 | mem_ref_hasher::hash (const value_type *mem) | |
150 | { | |
151 | return mem->hash; | |
152 | } | |
153 | ||
154 | /* An equality function for struct mem_ref object MEM1 with | |
155 | memory reference OBJ2. */ | |
156 | ||
157 | inline bool | |
158 | mem_ref_hasher::equal (const value_type *mem1, const compare_type *obj2) | |
159 | { | |
160 | return operand_equal_p (mem1->mem.ref, (const_tree) obj2, 0); | |
161 | } | |
72425608 | 162 | |
72425608 ZD |
163 | |
164 | /* Description of memory accesses in loops. */ | |
165 | ||
166 | static struct | |
167 | { | |
168 | /* The hash table of memory references accessed in loops. */ | |
bf190e8d | 169 | hash_table <mem_ref_hasher> refs; |
72425608 ZD |
170 | |
171 | /* The list of memory references. */ | |
9771b263 | 172 | vec<mem_ref_p> refs_list; |
72425608 ZD |
173 | |
174 | /* The set of memory references accessed in each loop. */ | |
374001cb | 175 | vec<bitmap_head> refs_in_loop; |
72425608 | 176 | |
c00217fc | 177 | /* The set of memory references stored in each loop. */ |
374001cb | 178 | vec<bitmap_head> refs_stored_in_loop; |
c00217fc | 179 | |
c00217fc | 180 | /* The set of memory references stored in each loop, including subloops . */ |
374001cb | 181 | vec<bitmap_head> all_refs_stored_in_loop; |
72425608 ZD |
182 | |
183 | /* Cache for expanding memory addresses. */ | |
184 | struct pointer_map_t *ttae_cache; | |
185 | } memory_accesses; | |
186 | ||
3f9b14ff SB |
187 | /* Obstack for the bitmaps in the above data structures. */ |
188 | static bitmap_obstack lim_bitmap_obstack; | |
189 | ||
72425608 | 190 | static bool ref_indep_loop_p (struct loop *, mem_ref_p); |
a7e5372d ZD |
191 | |
192 | /* Minimum cost of an expensive expression. */ | |
193 | #define LIM_EXPENSIVE ((unsigned) PARAM_VALUE (PARAM_LIM_EXPENSIVE)) | |
194 | ||
8a519095 | 195 | /* The outermost loop for which execution of the header guarantees that the |
a7e5372d ZD |
196 | block will be executed. */ |
197 | #define ALWAYS_EXECUTED_IN(BB) ((struct loop *) (BB)->aux) | |
8a519095 | 198 | #define SET_ALWAYS_EXECUTED_IN(BB, VAL) ((BB)->aux = (void *) (VAL)) |
a7e5372d | 199 | |
e6647190 RB |
200 | /* ID of the shared unanalyzable mem. */ |
201 | #define UNANALYZABLE_MEM_ID 0 | |
202 | ||
546d314c | 203 | /* Whether the reference was analyzable. */ |
e6647190 | 204 | #define MEM_ANALYZABLE(REF) ((REF)->id != UNANALYZABLE_MEM_ID) |
546d314c | 205 | |
726a989a RB |
206 | static struct lim_aux_data * |
207 | init_lim_data (gimple stmt) | |
208 | { | |
209 | void **p = pointer_map_insert (lim_aux_data_map, stmt); | |
210 | ||
211 | *p = XCNEW (struct lim_aux_data); | |
212 | return (struct lim_aux_data *) *p; | |
213 | } | |
214 | ||
215 | static struct lim_aux_data * | |
216 | get_lim_data (gimple stmt) | |
217 | { | |
218 | void **p = pointer_map_contains (lim_aux_data_map, stmt); | |
219 | if (!p) | |
220 | return NULL; | |
221 | ||
222 | return (struct lim_aux_data *) *p; | |
223 | } | |
224 | ||
225 | /* Releases the memory occupied by DATA. */ | |
226 | ||
227 | static void | |
228 | free_lim_aux_data (struct lim_aux_data *data) | |
229 | { | |
76421b44 | 230 | data->depends.release(); |
726a989a RB |
231 | free (data); |
232 | } | |
233 | ||
234 | static void | |
235 | clear_lim_data (gimple stmt) | |
236 | { | |
237 | void **p = pointer_map_contains (lim_aux_data_map, stmt); | |
238 | if (!p) | |
239 | return; | |
240 | ||
241 | free_lim_aux_data ((struct lim_aux_data *) *p); | |
242 | *p = NULL; | |
243 | } | |
244 | ||
a7e5372d ZD |
245 | /* Calls CBCK for each index in memory reference ADDR_P. There are two |
246 | kinds situations handled; in each of these cases, the memory reference | |
247 | and DATA are passed to the callback: | |
b8698a0f | 248 | |
a7e5372d ZD |
249 | Access to an array: ARRAY_{RANGE_}REF (base, index). In this case we also |
250 | pass the pointer to the index to the callback. | |
251 | ||
252 | Pointer dereference: INDIRECT_REF (addr). In this case we also pass the | |
253 | pointer to addr to the callback. | |
b8698a0f | 254 | |
a7e5372d ZD |
255 | If the callback returns false, the whole search stops and false is returned. |
256 | Otherwise the function returns true after traversing through the whole | |
257 | reference *ADDR_P. */ | |
258 | ||
259 | bool | |
260 | for_each_index (tree *addr_p, bool (*cbck) (tree, tree *, void *), void *data) | |
261 | { | |
be35cf60 | 262 | tree *nxt, *idx; |
a7e5372d ZD |
263 | |
264 | for (; ; addr_p = nxt) | |
265 | { | |
266 | switch (TREE_CODE (*addr_p)) | |
267 | { | |
268 | case SSA_NAME: | |
269 | return cbck (*addr_p, addr_p, data); | |
270 | ||
70f34814 | 271 | case MEM_REF: |
a7e5372d ZD |
272 | nxt = &TREE_OPERAND (*addr_p, 0); |
273 | return cbck (*addr_p, nxt, data); | |
274 | ||
275 | case BIT_FIELD_REF: | |
a7e5372d | 276 | case VIEW_CONVERT_EXPR: |
8b11a64c ZD |
277 | case REALPART_EXPR: |
278 | case IMAGPART_EXPR: | |
a7e5372d ZD |
279 | nxt = &TREE_OPERAND (*addr_p, 0); |
280 | break; | |
281 | ||
be35cf60 ZD |
282 | case COMPONENT_REF: |
283 | /* If the component has varying offset, it behaves like index | |
284 | as well. */ | |
285 | idx = &TREE_OPERAND (*addr_p, 2); | |
286 | if (*idx | |
287 | && !cbck (*addr_p, idx, data)) | |
288 | return false; | |
289 | ||
290 | nxt = &TREE_OPERAND (*addr_p, 0); | |
291 | break; | |
292 | ||
a7e5372d | 293 | case ARRAY_REF: |
61c25908 | 294 | case ARRAY_RANGE_REF: |
a7e5372d ZD |
295 | nxt = &TREE_OPERAND (*addr_p, 0); |
296 | if (!cbck (*addr_p, &TREE_OPERAND (*addr_p, 1), data)) | |
297 | return false; | |
298 | break; | |
299 | ||
300 | case VAR_DECL: | |
301 | case PARM_DECL: | |
a2ce7808 | 302 | case CONST_DECL: |
a7e5372d ZD |
303 | case STRING_CST: |
304 | case RESULT_DECL: | |
60407f7a | 305 | case VECTOR_CST: |
33674347 | 306 | case COMPLEX_CST: |
e2889823 PB |
307 | case INTEGER_CST: |
308 | case REAL_CST: | |
325217ed | 309 | case FIXED_CST: |
bb0c55f6 | 310 | case CONSTRUCTOR: |
a7e5372d ZD |
311 | return true; |
312 | ||
3d45dd59 RG |
313 | case ADDR_EXPR: |
314 | gcc_assert (is_gimple_min_invariant (*addr_p)); | |
315 | return true; | |
316 | ||
ac182688 ZD |
317 | case TARGET_MEM_REF: |
318 | idx = &TMR_BASE (*addr_p); | |
319 | if (*idx | |
320 | && !cbck (*addr_p, idx, data)) | |
321 | return false; | |
322 | idx = &TMR_INDEX (*addr_p); | |
4d948885 RG |
323 | if (*idx |
324 | && !cbck (*addr_p, idx, data)) | |
325 | return false; | |
326 | idx = &TMR_INDEX2 (*addr_p); | |
ac182688 ZD |
327 | if (*idx |
328 | && !cbck (*addr_p, idx, data)) | |
329 | return false; | |
330 | return true; | |
331 | ||
a7e5372d | 332 | default: |
1e128c5f | 333 | gcc_unreachable (); |
a7e5372d ZD |
334 | } |
335 | } | |
336 | } | |
337 | ||
338 | /* If it is possible to hoist the statement STMT unconditionally, | |
339 | returns MOVE_POSSIBLE. | |
340 | If it is possible to hoist the statement STMT, but we must avoid making | |
341 | it executed if it would not be executed in the original program (e.g. | |
342 | because it may trap), return MOVE_PRESERVE_EXECUTION. | |
343 | Otherwise return MOVE_IMPOSSIBLE. */ | |
344 | ||
40923b20 | 345 | enum move_pos |
726a989a | 346 | movement_possibility (gimple stmt) |
a7e5372d | 347 | { |
726a989a RB |
348 | tree lhs; |
349 | enum move_pos ret = MOVE_POSSIBLE; | |
a7e5372d ZD |
350 | |
351 | if (flag_unswitch_loops | |
726a989a | 352 | && gimple_code (stmt) == GIMPLE_COND) |
a7e5372d ZD |
353 | { |
354 | /* If we perform unswitching, force the operands of the invariant | |
355 | condition to be moved out of the loop. */ | |
a7e5372d ZD |
356 | return MOVE_POSSIBLE; |
357 | } | |
358 | ||
e3bdfed6 RG |
359 | if (gimple_code (stmt) == GIMPLE_PHI |
360 | && gimple_phi_num_args (stmt) <= 2 | |
ea057359 | 361 | && !virtual_operand_p (gimple_phi_result (stmt)) |
e3bdfed6 RG |
362 | && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_phi_result (stmt))) |
363 | return MOVE_POSSIBLE; | |
364 | ||
726a989a | 365 | if (gimple_get_lhs (stmt) == NULL_TREE) |
a7e5372d ZD |
366 | return MOVE_IMPOSSIBLE; |
367 | ||
5006671f | 368 | if (gimple_vdef (stmt)) |
72425608 ZD |
369 | return MOVE_IMPOSSIBLE; |
370 | ||
726a989a RB |
371 | if (stmt_ends_bb_p (stmt) |
372 | || gimple_has_volatile_ops (stmt) | |
373 | || gimple_has_side_effects (stmt) | |
374 | || stmt_could_throw_p (stmt)) | |
a7e5372d ZD |
375 | return MOVE_IMPOSSIBLE; |
376 | ||
726a989a | 377 | if (is_gimple_call (stmt)) |
f10a6654 ZD |
378 | { |
379 | /* While pure or const call is guaranteed to have no side effects, we | |
380 | cannot move it arbitrarily. Consider code like | |
381 | ||
382 | char *s = something (); | |
383 | ||
384 | while (1) | |
385 | { | |
386 | if (s) | |
387 | t = strlen (s); | |
388 | else | |
389 | t = 0; | |
390 | } | |
391 | ||
392 | Here the strlen call cannot be moved out of the loop, even though | |
393 | s is invariant. In addition to possibly creating a call with | |
394 | invalid arguments, moving out a function call that is not executed | |
395 | may cause performance regressions in case the call is costly and | |
396 | not executed at all. */ | |
726a989a RB |
397 | ret = MOVE_PRESERVE_EXECUTION; |
398 | lhs = gimple_call_lhs (stmt); | |
f10a6654 | 399 | } |
726a989a RB |
400 | else if (is_gimple_assign (stmt)) |
401 | lhs = gimple_assign_lhs (stmt); | |
402 | else | |
403 | return MOVE_IMPOSSIBLE; | |
404 | ||
405 | if (TREE_CODE (lhs) == SSA_NAME | |
406 | && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)) | |
407 | return MOVE_IMPOSSIBLE; | |
408 | ||
409 | if (TREE_CODE (lhs) != SSA_NAME | |
410 | || gimple_could_trap_p (stmt)) | |
411 | return MOVE_PRESERVE_EXECUTION; | |
412 | ||
19c0d7df AH |
413 | /* Non local loads in a transaction cannot be hoisted out. Well, |
414 | unless the load happens on every path out of the loop, but we | |
415 | don't take this into account yet. */ | |
416 | if (flag_tm | |
417 | && gimple_in_transaction (stmt) | |
418 | && gimple_assign_single_p (stmt)) | |
419 | { | |
420 | tree rhs = gimple_assign_rhs1 (stmt); | |
421 | if (DECL_P (rhs) && is_global_var (rhs)) | |
422 | { | |
423 | if (dump_file) | |
424 | { | |
425 | fprintf (dump_file, "Cannot hoist conditional load of "); | |
426 | print_generic_expr (dump_file, rhs, TDF_SLIM); | |
427 | fprintf (dump_file, " because it is in a transaction.\n"); | |
428 | } | |
429 | return MOVE_IMPOSSIBLE; | |
430 | } | |
431 | } | |
432 | ||
726a989a | 433 | return ret; |
a7e5372d ZD |
434 | } |
435 | ||
436 | /* Suppose that operand DEF is used inside the LOOP. Returns the outermost | |
2a7e31df | 437 | loop to that we could move the expression using DEF if it did not have |
a7e5372d ZD |
438 | other operands, i.e. the outermost loop enclosing LOOP in that the value |
439 | of DEF is invariant. */ | |
440 | ||
441 | static struct loop * | |
442 | outermost_invariant_loop (tree def, struct loop *loop) | |
443 | { | |
726a989a | 444 | gimple def_stmt; |
a7e5372d ZD |
445 | basic_block def_bb; |
446 | struct loop *max_loop; | |
726a989a | 447 | struct lim_aux_data *lim_data; |
a7e5372d | 448 | |
726a989a | 449 | if (!def) |
a7e5372d ZD |
450 | return superloop_at_depth (loop, 1); |
451 | ||
726a989a RB |
452 | if (TREE_CODE (def) != SSA_NAME) |
453 | { | |
454 | gcc_assert (is_gimple_min_invariant (def)); | |
455 | return superloop_at_depth (loop, 1); | |
456 | } | |
457 | ||
a7e5372d | 458 | def_stmt = SSA_NAME_DEF_STMT (def); |
726a989a | 459 | def_bb = gimple_bb (def_stmt); |
a7e5372d ZD |
460 | if (!def_bb) |
461 | return superloop_at_depth (loop, 1); | |
462 | ||
463 | max_loop = find_common_loop (loop, def_bb->loop_father); | |
464 | ||
726a989a RB |
465 | lim_data = get_lim_data (def_stmt); |
466 | if (lim_data != NULL && lim_data->max_loop != NULL) | |
a7e5372d | 467 | max_loop = find_common_loop (max_loop, |
726a989a | 468 | loop_outer (lim_data->max_loop)); |
a7e5372d ZD |
469 | if (max_loop == loop) |
470 | return NULL; | |
9ba025a2 | 471 | max_loop = superloop_at_depth (loop, loop_depth (max_loop) + 1); |
a7e5372d ZD |
472 | |
473 | return max_loop; | |
474 | } | |
475 | ||
a7e5372d ZD |
476 | /* DATA is a structure containing information associated with a statement |
477 | inside LOOP. DEF is one of the operands of this statement. | |
b8698a0f | 478 | |
a7e5372d ZD |
479 | Find the outermost loop enclosing LOOP in that value of DEF is invariant |
480 | and record this in DATA->max_loop field. If DEF itself is defined inside | |
481 | this loop as well (i.e. we need to hoist it out of the loop if we want | |
482 | to hoist the statement represented by DATA), record the statement in that | |
483 | DEF is defined to the DATA->depends list. Additionally if ADD_COST is true, | |
484 | add the cost of the computation of DEF to the DATA->cost. | |
b8698a0f | 485 | |
a7e5372d ZD |
486 | If DEF is not invariant in LOOP, return false. Otherwise return TRUE. */ |
487 | ||
488 | static bool | |
489 | add_dependency (tree def, struct lim_aux_data *data, struct loop *loop, | |
490 | bool add_cost) | |
491 | { | |
726a989a RB |
492 | gimple def_stmt = SSA_NAME_DEF_STMT (def); |
493 | basic_block def_bb = gimple_bb (def_stmt); | |
a7e5372d | 494 | struct loop *max_loop; |
726a989a | 495 | struct lim_aux_data *def_data; |
a7e5372d ZD |
496 | |
497 | if (!def_bb) | |
498 | return true; | |
499 | ||
500 | max_loop = outermost_invariant_loop (def, loop); | |
501 | if (!max_loop) | |
502 | return false; | |
503 | ||
504 | if (flow_loop_nested_p (data->max_loop, max_loop)) | |
505 | data->max_loop = max_loop; | |
506 | ||
726a989a RB |
507 | def_data = get_lim_data (def_stmt); |
508 | if (!def_data) | |
a7e5372d ZD |
509 | return true; |
510 | ||
511 | if (add_cost | |
512 | /* Only add the cost if the statement defining DEF is inside LOOP, | |
513 | i.e. if it is likely that by moving the invariants dependent | |
514 | on it, we will be able to avoid creating a new register for | |
515 | it (since it will be only used in these dependent invariants). */ | |
516 | && def_bb->loop_father == loop) | |
726a989a | 517 | data->cost += def_data->cost; |
a7e5372d | 518 | |
76421b44 | 519 | data->depends.safe_push (def_stmt); |
a7e5372d ZD |
520 | |
521 | return true; | |
522 | } | |
523 | ||
546d314c RG |
524 | /* Returns an estimate for a cost of statement STMT. The values here |
525 | are just ad-hoc constants, similar to costs for inlining. */ | |
a7e5372d ZD |
526 | |
527 | static unsigned | |
726a989a | 528 | stmt_cost (gimple stmt) |
a7e5372d | 529 | { |
a7e5372d | 530 | /* Always try to create possibilities for unswitching. */ |
e3bdfed6 RG |
531 | if (gimple_code (stmt) == GIMPLE_COND |
532 | || gimple_code (stmt) == GIMPLE_PHI) | |
a7e5372d ZD |
533 | return LIM_EXPENSIVE; |
534 | ||
546d314c | 535 | /* We should be hoisting calls if possible. */ |
726a989a | 536 | if (is_gimple_call (stmt)) |
a7e5372d | 537 | { |
546d314c | 538 | tree fndecl; |
a7e5372d ZD |
539 | |
540 | /* Unless the call is a builtin_constant_p; this always folds to a | |
541 | constant, so moving it is useless. */ | |
726a989a RB |
542 | fndecl = gimple_call_fndecl (stmt); |
543 | if (fndecl | |
544 | && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL | |
545 | && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P) | |
a7e5372d ZD |
546 | return 0; |
547 | ||
546d314c | 548 | return LIM_EXPENSIVE; |
726a989a RB |
549 | } |
550 | ||
546d314c RG |
551 | /* Hoisting memory references out should almost surely be a win. */ |
552 | if (gimple_references_memory_p (stmt)) | |
553 | return LIM_EXPENSIVE; | |
554 | ||
726a989a | 555 | if (gimple_code (stmt) != GIMPLE_ASSIGN) |
546d314c | 556 | return 1; |
a7e5372d | 557 | |
726a989a RB |
558 | switch (gimple_assign_rhs_code (stmt)) |
559 | { | |
a7e5372d | 560 | case MULT_EXPR: |
67af611e RG |
561 | case WIDEN_MULT_EXPR: |
562 | case WIDEN_MULT_PLUS_EXPR: | |
563 | case WIDEN_MULT_MINUS_EXPR: | |
564 | case DOT_PROD_EXPR: | |
565 | case FMA_EXPR: | |
a7e5372d ZD |
566 | case TRUNC_DIV_EXPR: |
567 | case CEIL_DIV_EXPR: | |
568 | case FLOOR_DIV_EXPR: | |
569 | case ROUND_DIV_EXPR: | |
570 | case EXACT_DIV_EXPR: | |
571 | case CEIL_MOD_EXPR: | |
572 | case FLOOR_MOD_EXPR: | |
573 | case ROUND_MOD_EXPR: | |
574 | case TRUNC_MOD_EXPR: | |
b4852851 | 575 | case RDIV_EXPR: |
a7e5372d | 576 | /* Division and multiplication are usually expensive. */ |
546d314c | 577 | return LIM_EXPENSIVE; |
a7e5372d | 578 | |
e0a60731 RG |
579 | case LSHIFT_EXPR: |
580 | case RSHIFT_EXPR: | |
67af611e RG |
581 | case WIDEN_LSHIFT_EXPR: |
582 | case LROTATE_EXPR: | |
583 | case RROTATE_EXPR: | |
546d314c RG |
584 | /* Shifts and rotates are usually expensive. */ |
585 | return LIM_EXPENSIVE; | |
586 | ||
587 | case CONSTRUCTOR: | |
588 | /* Make vector construction cost proportional to the number | |
589 | of elements. */ | |
590 | return CONSTRUCTOR_NELTS (gimple_assign_rhs1 (stmt)); | |
591 | ||
592 | case SSA_NAME: | |
593 | case PAREN_EXPR: | |
594 | /* Whether or not something is wrapped inside a PAREN_EXPR | |
595 | should not change move cost. Nor should an intermediate | |
596 | unpropagated SSA name copy. */ | |
597 | return 0; | |
e0a60731 | 598 | |
a7e5372d | 599 | default: |
546d314c | 600 | return 1; |
a7e5372d | 601 | } |
a7e5372d ZD |
602 | } |
603 | ||
72425608 ZD |
604 | /* Finds the outermost loop between OUTER and LOOP in that the memory reference |
605 | REF is independent. If REF is not independent in LOOP, NULL is returned | |
606 | instead. */ | |
607 | ||
608 | static struct loop * | |
609 | outermost_indep_loop (struct loop *outer, struct loop *loop, mem_ref_p ref) | |
610 | { | |
611 | struct loop *aloop; | |
612 | ||
374001cb | 613 | if (bitmap_bit_p (&ref->stored, loop->num)) |
72425608 ZD |
614 | return NULL; |
615 | ||
616 | for (aloop = outer; | |
617 | aloop != loop; | |
618 | aloop = superloop_at_depth (loop, loop_depth (aloop) + 1)) | |
374001cb | 619 | if (!bitmap_bit_p (&ref->stored, aloop->num) |
72425608 ZD |
620 | && ref_indep_loop_p (aloop, ref)) |
621 | return aloop; | |
622 | ||
623 | if (ref_indep_loop_p (loop, ref)) | |
624 | return loop; | |
625 | else | |
626 | return NULL; | |
627 | } | |
628 | ||
629 | /* If there is a simple load or store to a memory reference in STMT, returns | |
fa10beec | 630 | the location of the memory reference, and sets IS_STORE according to whether |
72425608 ZD |
631 | it is a store or load. Otherwise, returns NULL. */ |
632 | ||
633 | static tree * | |
726a989a | 634 | simple_mem_ref_in_stmt (gimple stmt, bool *is_store) |
72425608 | 635 | { |
1fc1ef37 | 636 | tree *lhs, *rhs; |
72425608 | 637 | |
1fc1ef37 EB |
638 | /* Recognize SSA_NAME = MEM and MEM = (SSA_NAME | invariant) patterns. */ |
639 | if (!gimple_assign_single_p (stmt)) | |
72425608 ZD |
640 | return NULL; |
641 | ||
726a989a | 642 | lhs = gimple_assign_lhs_ptr (stmt); |
1fc1ef37 | 643 | rhs = gimple_assign_rhs1_ptr (stmt); |
72425608 | 644 | |
1fc1ef37 | 645 | if (TREE_CODE (*lhs) == SSA_NAME && gimple_vuse (stmt)) |
72425608 | 646 | { |
72425608 | 647 | *is_store = false; |
1fc1ef37 | 648 | return rhs; |
72425608 | 649 | } |
1fc1ef37 EB |
650 | else if (gimple_vdef (stmt) |
651 | && (TREE_CODE (*rhs) == SSA_NAME || is_gimple_min_invariant (*rhs))) | |
72425608 ZD |
652 | { |
653 | *is_store = true; | |
654 | return lhs; | |
655 | } | |
656 | else | |
657 | return NULL; | |
658 | } | |
659 | ||
660 | /* Returns the memory reference contained in STMT. */ | |
661 | ||
662 | static mem_ref_p | |
726a989a | 663 | mem_ref_in_stmt (gimple stmt) |
72425608 ZD |
664 | { |
665 | bool store; | |
666 | tree *mem = simple_mem_ref_in_stmt (stmt, &store); | |
667 | hashval_t hash; | |
668 | mem_ref_p ref; | |
669 | ||
670 | if (!mem) | |
671 | return NULL; | |
672 | gcc_assert (!store); | |
673 | ||
674 | hash = iterative_hash_expr (*mem, 0); | |
bf190e8d | 675 | ref = memory_accesses.refs.find_with_hash (*mem, hash); |
72425608 ZD |
676 | |
677 | gcc_assert (ref != NULL); | |
678 | return ref; | |
679 | } | |
680 | ||
e3bdfed6 RG |
681 | /* From a controlling predicate in DOM determine the arguments from |
682 | the PHI node PHI that are chosen if the predicate evaluates to | |
683 | true and false and store them to *TRUE_ARG_P and *FALSE_ARG_P if | |
684 | they are non-NULL. Returns true if the arguments can be determined, | |
685 | else return false. */ | |
686 | ||
687 | static bool | |
688 | extract_true_false_args_from_phi (basic_block dom, gimple phi, | |
689 | tree *true_arg_p, tree *false_arg_p) | |
690 | { | |
691 | basic_block bb = gimple_bb (phi); | |
692 | edge true_edge, false_edge, tem; | |
693 | tree arg0 = NULL_TREE, arg1 = NULL_TREE; | |
694 | ||
695 | /* We have to verify that one edge into the PHI node is dominated | |
696 | by the true edge of the predicate block and the other edge | |
697 | dominated by the false edge. This ensures that the PHI argument | |
698 | we are going to take is completely determined by the path we | |
12d80acc RG |
699 | take from the predicate block. |
700 | We can only use BB dominance checks below if the destination of | |
701 | the true/false edges are dominated by their edge, thus only | |
702 | have a single predecessor. */ | |
e3bdfed6 RG |
703 | extract_true_false_edges_from_block (dom, &true_edge, &false_edge); |
704 | tem = EDGE_PRED (bb, 0); | |
705 | if (tem == true_edge | |
12d80acc RG |
706 | || (single_pred_p (true_edge->dest) |
707 | && (tem->src == true_edge->dest | |
708 | || dominated_by_p (CDI_DOMINATORS, | |
709 | tem->src, true_edge->dest)))) | |
e3bdfed6 RG |
710 | arg0 = PHI_ARG_DEF (phi, tem->dest_idx); |
711 | else if (tem == false_edge | |
12d80acc RG |
712 | || (single_pred_p (false_edge->dest) |
713 | && (tem->src == false_edge->dest | |
714 | || dominated_by_p (CDI_DOMINATORS, | |
715 | tem->src, false_edge->dest)))) | |
e3bdfed6 RG |
716 | arg1 = PHI_ARG_DEF (phi, tem->dest_idx); |
717 | else | |
718 | return false; | |
719 | tem = EDGE_PRED (bb, 1); | |
720 | if (tem == true_edge | |
12d80acc RG |
721 | || (single_pred_p (true_edge->dest) |
722 | && (tem->src == true_edge->dest | |
723 | || dominated_by_p (CDI_DOMINATORS, | |
724 | tem->src, true_edge->dest)))) | |
e3bdfed6 RG |
725 | arg0 = PHI_ARG_DEF (phi, tem->dest_idx); |
726 | else if (tem == false_edge | |
12d80acc RG |
727 | || (single_pred_p (false_edge->dest) |
728 | && (tem->src == false_edge->dest | |
729 | || dominated_by_p (CDI_DOMINATORS, | |
730 | tem->src, false_edge->dest)))) | |
e3bdfed6 RG |
731 | arg1 = PHI_ARG_DEF (phi, tem->dest_idx); |
732 | else | |
733 | return false; | |
734 | if (!arg0 || !arg1) | |
735 | return false; | |
736 | ||
737 | if (true_arg_p) | |
738 | *true_arg_p = arg0; | |
739 | if (false_arg_p) | |
740 | *false_arg_p = arg1; | |
741 | ||
742 | return true; | |
743 | } | |
744 | ||
a7e5372d ZD |
745 | /* Determine the outermost loop to that it is possible to hoist a statement |
746 | STMT and store it to LIM_DATA (STMT)->max_loop. To do this we determine | |
747 | the outermost loop in that the value computed by STMT is invariant. | |
748 | If MUST_PRESERVE_EXEC is true, additionally choose such a loop that | |
749 | we preserve the fact whether STMT is executed. It also fills other related | |
750 | information to LIM_DATA (STMT). | |
b8698a0f | 751 | |
a7e5372d ZD |
752 | The function returns false if STMT cannot be hoisted outside of the loop it |
753 | is defined in, and true otherwise. */ | |
754 | ||
755 | static bool | |
726a989a | 756 | determine_max_movement (gimple stmt, bool must_preserve_exec) |
a7e5372d | 757 | { |
726a989a | 758 | basic_block bb = gimple_bb (stmt); |
a7e5372d ZD |
759 | struct loop *loop = bb->loop_father; |
760 | struct loop *level; | |
726a989a | 761 | struct lim_aux_data *lim_data = get_lim_data (stmt); |
4c124b4c AM |
762 | tree val; |
763 | ssa_op_iter iter; | |
b8698a0f | 764 | |
a7e5372d ZD |
765 | if (must_preserve_exec) |
766 | level = ALWAYS_EXECUTED_IN (bb); | |
767 | else | |
768 | level = superloop_at_depth (loop, 1); | |
769 | lim_data->max_loop = level; | |
770 | ||
e3bdfed6 RG |
771 | if (gimple_code (stmt) == GIMPLE_PHI) |
772 | { | |
773 | use_operand_p use_p; | |
774 | unsigned min_cost = UINT_MAX; | |
775 | unsigned total_cost = 0; | |
776 | struct lim_aux_data *def_data; | |
777 | ||
778 | /* We will end up promoting dependencies to be unconditionally | |
779 | evaluated. For this reason the PHI cost (and thus the | |
780 | cost we remove from the loop by doing the invariant motion) | |
781 | is that of the cheapest PHI argument dependency chain. */ | |
782 | FOR_EACH_PHI_ARG (use_p, stmt, iter, SSA_OP_USE) | |
783 | { | |
784 | val = USE_FROM_PTR (use_p); | |
785 | if (TREE_CODE (val) != SSA_NAME) | |
786 | continue; | |
787 | if (!add_dependency (val, lim_data, loop, false)) | |
788 | return false; | |
789 | def_data = get_lim_data (SSA_NAME_DEF_STMT (val)); | |
790 | if (def_data) | |
791 | { | |
792 | min_cost = MIN (min_cost, def_data->cost); | |
793 | total_cost += def_data->cost; | |
794 | } | |
795 | } | |
796 | ||
797 | lim_data->cost += min_cost; | |
798 | ||
799 | if (gimple_phi_num_args (stmt) > 1) | |
800 | { | |
801 | basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb); | |
802 | gimple cond; | |
803 | if (gsi_end_p (gsi_last_bb (dom))) | |
804 | return false; | |
805 | cond = gsi_stmt (gsi_last_bb (dom)); | |
806 | if (gimple_code (cond) != GIMPLE_COND) | |
807 | return false; | |
808 | /* Verify that this is an extended form of a diamond and | |
809 | the PHI arguments are completely controlled by the | |
810 | predicate in DOM. */ | |
811 | if (!extract_true_false_args_from_phi (dom, stmt, NULL, NULL)) | |
812 | return false; | |
813 | ||
814 | /* Fold in dependencies and cost of the condition. */ | |
815 | FOR_EACH_SSA_TREE_OPERAND (val, cond, iter, SSA_OP_USE) | |
816 | { | |
817 | if (!add_dependency (val, lim_data, loop, false)) | |
818 | return false; | |
819 | def_data = get_lim_data (SSA_NAME_DEF_STMT (val)); | |
820 | if (def_data) | |
821 | total_cost += def_data->cost; | |
822 | } | |
823 | ||
824 | /* We want to avoid unconditionally executing very expensive | |
825 | operations. As costs for our dependencies cannot be | |
826 | negative just claim we are not invariand for this case. | |
827 | We also are not sure whether the control-flow inside the | |
828 | loop will vanish. */ | |
829 | if (total_cost - min_cost >= 2 * LIM_EXPENSIVE | |
830 | && !(min_cost != 0 | |
831 | && total_cost / min_cost <= 2)) | |
832 | return false; | |
833 | ||
834 | /* Assume that the control-flow in the loop will vanish. | |
835 | ??? We should verify this and not artificially increase | |
836 | the cost if that is not the case. */ | |
837 | lim_data->cost += stmt_cost (stmt); | |
838 | } | |
839 | ||
840 | return true; | |
841 | } | |
842 | else | |
843 | FOR_EACH_SSA_TREE_OPERAND (val, stmt, iter, SSA_OP_USE) | |
844 | if (!add_dependency (val, lim_data, loop, true)) | |
845 | return false; | |
a7e5372d | 846 | |
5006671f | 847 | if (gimple_vuse (stmt)) |
72425608 ZD |
848 | { |
849 | mem_ref_p ref = mem_ref_in_stmt (stmt); | |
850 | ||
851 | if (ref) | |
852 | { | |
853 | lim_data->max_loop | |
854 | = outermost_indep_loop (lim_data->max_loop, loop, ref); | |
855 | if (!lim_data->max_loop) | |
856 | return false; | |
857 | } | |
858 | else | |
859 | { | |
5006671f | 860 | if ((val = gimple_vuse (stmt)) != NULL_TREE) |
72425608 ZD |
861 | { |
862 | if (!add_dependency (val, lim_data, loop, false)) | |
863 | return false; | |
864 | } | |
865 | } | |
866 | } | |
a7e5372d ZD |
867 | |
868 | lim_data->cost += stmt_cost (stmt); | |
869 | ||
870 | return true; | |
871 | } | |
872 | ||
873 | /* Suppose that some statement in ORIG_LOOP is hoisted to the loop LEVEL, | |
874 | and that one of the operands of this statement is computed by STMT. | |
875 | Ensure that STMT (together with all the statements that define its | |
876 | operands) is hoisted at least out of the loop LEVEL. */ | |
877 | ||
878 | static void | |
726a989a | 879 | set_level (gimple stmt, struct loop *orig_loop, struct loop *level) |
a7e5372d | 880 | { |
726a989a | 881 | struct loop *stmt_loop = gimple_bb (stmt)->loop_father; |
726a989a | 882 | struct lim_aux_data *lim_data; |
76421b44 RB |
883 | gimple dep_stmt; |
884 | unsigned i; | |
a7e5372d ZD |
885 | |
886 | stmt_loop = find_common_loop (orig_loop, stmt_loop); | |
726a989a RB |
887 | lim_data = get_lim_data (stmt); |
888 | if (lim_data != NULL && lim_data->tgt_loop != NULL) | |
a7e5372d | 889 | stmt_loop = find_common_loop (stmt_loop, |
726a989a | 890 | loop_outer (lim_data->tgt_loop)); |
a7e5372d ZD |
891 | if (flow_loop_nested_p (stmt_loop, level)) |
892 | return; | |
893 | ||
726a989a RB |
894 | gcc_assert (level == lim_data->max_loop |
895 | || flow_loop_nested_p (lim_data->max_loop, level)); | |
a7e5372d | 896 | |
726a989a | 897 | lim_data->tgt_loop = level; |
76421b44 RB |
898 | FOR_EACH_VEC_ELT (lim_data->depends, i, dep_stmt) |
899 | set_level (dep_stmt, orig_loop, level); | |
a7e5372d ZD |
900 | } |
901 | ||
902 | /* Determines an outermost loop from that we want to hoist the statement STMT. | |
903 | For now we chose the outermost possible loop. TODO -- use profiling | |
904 | information to set it more sanely. */ | |
905 | ||
906 | static void | |
726a989a | 907 | set_profitable_level (gimple stmt) |
a7e5372d | 908 | { |
726a989a | 909 | set_level (stmt, gimple_bb (stmt)->loop_father, get_lim_data (stmt)->max_loop); |
a7e5372d ZD |
910 | } |
911 | ||
726a989a | 912 | /* Returns true if STMT is a call that has side effects. */ |
a7e5372d ZD |
913 | |
914 | static bool | |
726a989a | 915 | nonpure_call_p (gimple stmt) |
a7e5372d | 916 | { |
726a989a | 917 | if (gimple_code (stmt) != GIMPLE_CALL) |
a7e5372d ZD |
918 | return false; |
919 | ||
726a989a | 920 | return gimple_has_side_effects (stmt); |
a7e5372d ZD |
921 | } |
922 | ||
e0a60731 RG |
923 | /* Rewrite a/b to a*(1/b). Return the invariant stmt to process. */ |
924 | ||
726a989a RB |
925 | static gimple |
926 | rewrite_reciprocal (gimple_stmt_iterator *bsi) | |
e0a60731 | 927 | { |
726a989a | 928 | gimple stmt, stmt1, stmt2; |
83d5977e | 929 | tree name, lhs, type; |
f50d67f6 | 930 | tree real_one; |
0ca5af51 | 931 | gimple_stmt_iterator gsi; |
e0a60731 | 932 | |
726a989a RB |
933 | stmt = gsi_stmt (*bsi); |
934 | lhs = gimple_assign_lhs (stmt); | |
935 | type = TREE_TYPE (lhs); | |
e0a60731 | 936 | |
8e8e423f | 937 | real_one = build_one_cst (type); |
e0a60731 | 938 | |
83d5977e RG |
939 | name = make_temp_ssa_name (type, NULL, "reciptmp"); |
940 | stmt1 = gimple_build_assign_with_ops (RDIV_EXPR, name, real_one, | |
941 | gimple_assign_rhs2 (stmt)); | |
726a989a RB |
942 | |
943 | stmt2 = gimple_build_assign_with_ops (MULT_EXPR, lhs, name, | |
944 | gimple_assign_rhs1 (stmt)); | |
e0a60731 RG |
945 | |
946 | /* Replace division stmt with reciprocal and multiply stmts. | |
947 | The multiply stmt is not invariant, so update iterator | |
948 | and avoid rescanning. */ | |
0ca5af51 AO |
949 | gsi = *bsi; |
950 | gsi_insert_before (bsi, stmt1, GSI_NEW_STMT); | |
951 | gsi_replace (&gsi, stmt2, true); | |
e0a60731 RG |
952 | |
953 | /* Continue processing with invariant reciprocal statement. */ | |
954 | return stmt1; | |
955 | } | |
956 | ||
957 | /* Check if the pattern at *BSI is a bittest of the form | |
958 | (A >> B) & 1 != 0 and in this case rewrite it to A & (1 << B) != 0. */ | |
959 | ||
726a989a RB |
960 | static gimple |
961 | rewrite_bittest (gimple_stmt_iterator *bsi) | |
e0a60731 | 962 | { |
726a989a | 963 | gimple stmt, use_stmt, stmt1, stmt2; |
83d5977e | 964 | tree lhs, name, t, a, b; |
e0a60731 RG |
965 | use_operand_p use; |
966 | ||
726a989a RB |
967 | stmt = gsi_stmt (*bsi); |
968 | lhs = gimple_assign_lhs (stmt); | |
e0a60731 RG |
969 | |
970 | /* Verify that the single use of lhs is a comparison against zero. */ | |
971 | if (TREE_CODE (lhs) != SSA_NAME | |
5c7ec4f0 | 972 | || !single_imm_use (lhs, &use, &use_stmt) |
726a989a | 973 | || gimple_code (use_stmt) != GIMPLE_COND) |
e0a60731 | 974 | return stmt; |
726a989a RB |
975 | if (gimple_cond_lhs (use_stmt) != lhs |
976 | || (gimple_cond_code (use_stmt) != NE_EXPR | |
977 | && gimple_cond_code (use_stmt) != EQ_EXPR) | |
978 | || !integer_zerop (gimple_cond_rhs (use_stmt))) | |
e0a60731 RG |
979 | return stmt; |
980 | ||
981 | /* Get at the operands of the shift. The rhs is TMP1 & 1. */ | |
726a989a RB |
982 | stmt1 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt)); |
983 | if (gimple_code (stmt1) != GIMPLE_ASSIGN) | |
e0a60731 RG |
984 | return stmt; |
985 | ||
0d52bcc1 | 986 | /* There is a conversion in between possibly inserted by fold. */ |
1a87cf0c | 987 | if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt1))) |
e0a60731 | 988 | { |
726a989a | 989 | t = gimple_assign_rhs1 (stmt1); |
e0a60731 RG |
990 | if (TREE_CODE (t) != SSA_NAME |
991 | || !has_single_use (t)) | |
992 | return stmt; | |
993 | stmt1 = SSA_NAME_DEF_STMT (t); | |
726a989a | 994 | if (gimple_code (stmt1) != GIMPLE_ASSIGN) |
e0a60731 | 995 | return stmt; |
e0a60731 RG |
996 | } |
997 | ||
998 | /* Verify that B is loop invariant but A is not. Verify that with | |
999 | all the stmt walking we are still in the same loop. */ | |
726a989a RB |
1000 | if (gimple_assign_rhs_code (stmt1) != RSHIFT_EXPR |
1001 | || loop_containing_stmt (stmt1) != loop_containing_stmt (stmt)) | |
1002 | return stmt; | |
e0a60731 | 1003 | |
726a989a RB |
1004 | a = gimple_assign_rhs1 (stmt1); |
1005 | b = gimple_assign_rhs2 (stmt1); | |
1006 | ||
1007 | if (outermost_invariant_loop (b, loop_containing_stmt (stmt1)) != NULL | |
1008 | && outermost_invariant_loop (a, loop_containing_stmt (stmt1)) == NULL) | |
1009 | { | |
0ca5af51 AO |
1010 | gimple_stmt_iterator rsi; |
1011 | ||
e0a60731 | 1012 | /* 1 << B */ |
e0a60731 RG |
1013 | t = fold_build2 (LSHIFT_EXPR, TREE_TYPE (a), |
1014 | build_int_cst (TREE_TYPE (a), 1), b); | |
83d5977e RG |
1015 | name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp"); |
1016 | stmt1 = gimple_build_assign (name, t); | |
e0a60731 RG |
1017 | |
1018 | /* A & (1 << B) */ | |
1019 | t = fold_build2 (BIT_AND_EXPR, TREE_TYPE (a), a, name); | |
83d5977e RG |
1020 | name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp"); |
1021 | stmt2 = gimple_build_assign (name, t); | |
a6e2d112 UB |
1022 | |
1023 | /* Replace the SSA_NAME we compare against zero. Adjust | |
1024 | the type of zero accordingly. */ | |
5c7ec4f0 | 1025 | SET_USE (use, name); |
726a989a | 1026 | gimple_cond_set_rhs (use_stmt, build_int_cst_type (TREE_TYPE (name), 0)); |
e0a60731 | 1027 | |
0ca5af51 AO |
1028 | /* Don't use gsi_replace here, none of the new assignments sets |
1029 | the variable originally set in stmt. Move bsi to stmt1, and | |
1030 | then remove the original stmt, so that we get a chance to | |
1031 | retain debug info for it. */ | |
1032 | rsi = *bsi; | |
1033 | gsi_insert_before (bsi, stmt1, GSI_NEW_STMT); | |
1034 | gsi_insert_before (&rsi, stmt2, GSI_SAME_STMT); | |
1035 | gsi_remove (&rsi, true); | |
e0a60731 RG |
1036 | |
1037 | return stmt1; | |
1038 | } | |
1039 | ||
1040 | return stmt; | |
1041 | } | |
1042 | ||
1043 | ||
a7e5372d ZD |
1044 | /* Determine the outermost loops in that statements in basic block BB are |
1045 | invariant, and record them to the LIM_DATA associated with the statements. | |
1046 | Callback for walk_dominator_tree. */ | |
1047 | ||
1048 | static void | |
1049 | determine_invariantness_stmt (struct dom_walk_data *dw_data ATTRIBUTE_UNUSED, | |
1050 | basic_block bb) | |
1051 | { | |
1052 | enum move_pos pos; | |
726a989a RB |
1053 | gimple_stmt_iterator bsi; |
1054 | gimple stmt; | |
a7e5372d ZD |
1055 | bool maybe_never = ALWAYS_EXECUTED_IN (bb) == NULL; |
1056 | struct loop *outermost = ALWAYS_EXECUTED_IN (bb); | |
726a989a | 1057 | struct lim_aux_data *lim_data; |
a7e5372d | 1058 | |
9ba025a2 | 1059 | if (!loop_outer (bb->loop_father)) |
a7e5372d ZD |
1060 | return; |
1061 | ||
1062 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
1063 | fprintf (dump_file, "Basic block %d (loop %d -- depth %d):\n\n", | |
9ba025a2 | 1064 | bb->index, bb->loop_father->num, loop_depth (bb->loop_father)); |
a7e5372d | 1065 | |
e3bdfed6 RG |
1066 | /* Look at PHI nodes, but only if there is at most two. |
1067 | ??? We could relax this further by post-processing the inserted | |
1068 | code and transforming adjacent cond-exprs with the same predicate | |
1069 | to control flow again. */ | |
1070 | bsi = gsi_start_phis (bb); | |
1071 | if (!gsi_end_p (bsi) | |
1072 | && ((gsi_next (&bsi), gsi_end_p (bsi)) | |
1073 | || (gsi_next (&bsi), gsi_end_p (bsi)))) | |
1074 | for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi)) | |
1075 | { | |
1076 | stmt = gsi_stmt (bsi); | |
1077 | ||
1078 | pos = movement_possibility (stmt); | |
1079 | if (pos == MOVE_IMPOSSIBLE) | |
1080 | continue; | |
1081 | ||
1082 | lim_data = init_lim_data (stmt); | |
1083 | lim_data->always_executed_in = outermost; | |
1084 | ||
1085 | if (!determine_max_movement (stmt, false)) | |
1086 | { | |
1087 | lim_data->max_loop = NULL; | |
1088 | continue; | |
1089 | } | |
1090 | ||
1091 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
1092 | { | |
1093 | print_gimple_stmt (dump_file, stmt, 2, 0); | |
1094 | fprintf (dump_file, " invariant up to level %d, cost %d.\n\n", | |
1095 | loop_depth (lim_data->max_loop), | |
1096 | lim_data->cost); | |
1097 | } | |
1098 | ||
1099 | if (lim_data->cost >= LIM_EXPENSIVE) | |
1100 | set_profitable_level (stmt); | |
1101 | } | |
1102 | ||
726a989a | 1103 | for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi)) |
a7e5372d | 1104 | { |
726a989a | 1105 | stmt = gsi_stmt (bsi); |
a7e5372d ZD |
1106 | |
1107 | pos = movement_possibility (stmt); | |
1108 | if (pos == MOVE_IMPOSSIBLE) | |
1109 | { | |
1110 | if (nonpure_call_p (stmt)) | |
1111 | { | |
1112 | maybe_never = true; | |
1113 | outermost = NULL; | |
1114 | } | |
8ded35f9 RG |
1115 | /* Make sure to note always_executed_in for stores to make |
1116 | store-motion work. */ | |
1117 | else if (stmt_makes_single_store (stmt)) | |
1118 | { | |
726a989a RB |
1119 | struct lim_aux_data *lim_data = init_lim_data (stmt); |
1120 | lim_data->always_executed_in = outermost; | |
8ded35f9 | 1121 | } |
a7e5372d ZD |
1122 | continue; |
1123 | } | |
1124 | ||
726a989a RB |
1125 | if (is_gimple_assign (stmt) |
1126 | && (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) | |
1127 | == GIMPLE_BINARY_RHS)) | |
a9b77cd1 | 1128 | { |
726a989a RB |
1129 | tree op0 = gimple_assign_rhs1 (stmt); |
1130 | tree op1 = gimple_assign_rhs2 (stmt); | |
1131 | struct loop *ol1 = outermost_invariant_loop (op1, | |
1132 | loop_containing_stmt (stmt)); | |
a9b77cd1 ZD |
1133 | |
1134 | /* If divisor is invariant, convert a/b to a*(1/b), allowing reciprocal | |
1135 | to be hoisted out of loop, saving expensive divide. */ | |
1136 | if (pos == MOVE_POSSIBLE | |
726a989a | 1137 | && gimple_assign_rhs_code (stmt) == RDIV_EXPR |
a9b77cd1 ZD |
1138 | && flag_unsafe_math_optimizations |
1139 | && !flag_trapping_math | |
726a989a RB |
1140 | && ol1 != NULL |
1141 | && outermost_invariant_loop (op0, ol1) == NULL) | |
a9b77cd1 ZD |
1142 | stmt = rewrite_reciprocal (&bsi); |
1143 | ||
1144 | /* If the shift count is invariant, convert (A >> B) & 1 to | |
1145 | A & (1 << B) allowing the bit mask to be hoisted out of the loop | |
1146 | saving an expensive shift. */ | |
1147 | if (pos == MOVE_POSSIBLE | |
726a989a RB |
1148 | && gimple_assign_rhs_code (stmt) == BIT_AND_EXPR |
1149 | && integer_onep (op1) | |
1150 | && TREE_CODE (op0) == SSA_NAME | |
1151 | && has_single_use (op0)) | |
a9b77cd1 ZD |
1152 | stmt = rewrite_bittest (&bsi); |
1153 | } | |
37cca405 | 1154 | |
726a989a RB |
1155 | lim_data = init_lim_data (stmt); |
1156 | lim_data->always_executed_in = outermost; | |
a7e5372d ZD |
1157 | |
1158 | if (maybe_never && pos == MOVE_PRESERVE_EXECUTION) | |
1159 | continue; | |
1160 | ||
1161 | if (!determine_max_movement (stmt, pos == MOVE_PRESERVE_EXECUTION)) | |
1162 | { | |
726a989a | 1163 | lim_data->max_loop = NULL; |
a7e5372d ZD |
1164 | continue; |
1165 | } | |
1166 | ||
1167 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
1168 | { | |
726a989a | 1169 | print_gimple_stmt (dump_file, stmt, 2, 0); |
a7e5372d | 1170 | fprintf (dump_file, " invariant up to level %d, cost %d.\n\n", |
726a989a RB |
1171 | loop_depth (lim_data->max_loop), |
1172 | lim_data->cost); | |
a7e5372d ZD |
1173 | } |
1174 | ||
726a989a | 1175 | if (lim_data->cost >= LIM_EXPENSIVE) |
a7e5372d ZD |
1176 | set_profitable_level (stmt); |
1177 | } | |
1178 | } | |
1179 | ||
1180 | /* For each statement determines the outermost loop in that it is invariant, | |
1181 | statements on whose motion it depends and the cost of the computation. | |
1182 | This information is stored to the LIM_DATA structure associated with | |
1183 | each statement. */ | |
1184 | ||
1185 | static void | |
1186 | determine_invariantness (void) | |
1187 | { | |
1188 | struct dom_walk_data walk_data; | |
1189 | ||
1190 | memset (&walk_data, 0, sizeof (struct dom_walk_data)); | |
2b28c07a | 1191 | walk_data.dom_direction = CDI_DOMINATORS; |
ccf5c864 | 1192 | walk_data.before_dom_children = determine_invariantness_stmt; |
a7e5372d ZD |
1193 | |
1194 | init_walk_dominator_tree (&walk_data); | |
1195 | walk_dominator_tree (&walk_data, ENTRY_BLOCK_PTR); | |
1196 | fini_walk_dominator_tree (&walk_data); | |
1197 | } | |
1198 | ||
a7e5372d | 1199 | /* Hoist the statements in basic block BB out of the loops prescribed by |
2a7e31df | 1200 | data stored in LIM_DATA structures associated with each statement. Callback |
a7e5372d ZD |
1201 | for walk_dominator_tree. */ |
1202 | ||
1203 | static void | |
e3bdfed6 | 1204 | move_computations_stmt (struct dom_walk_data *dw_data, |
a7e5372d ZD |
1205 | basic_block bb) |
1206 | { | |
1207 | struct loop *level; | |
726a989a RB |
1208 | gimple_stmt_iterator bsi; |
1209 | gimple stmt; | |
a7e5372d | 1210 | unsigned cost = 0; |
726a989a | 1211 | struct lim_aux_data *lim_data; |
a7e5372d | 1212 | |
9ba025a2 | 1213 | if (!loop_outer (bb->loop_father)) |
a7e5372d ZD |
1214 | return; |
1215 | ||
e3bdfed6 RG |
1216 | for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); ) |
1217 | { | |
1218 | gimple new_stmt; | |
1219 | stmt = gsi_stmt (bsi); | |
1220 | ||
1221 | lim_data = get_lim_data (stmt); | |
1222 | if (lim_data == NULL) | |
1223 | { | |
1224 | gsi_next (&bsi); | |
1225 | continue; | |
1226 | } | |
1227 | ||
1228 | cost = lim_data->cost; | |
1229 | level = lim_data->tgt_loop; | |
1230 | clear_lim_data (stmt); | |
1231 | ||
1232 | if (!level) | |
1233 | { | |
1234 | gsi_next (&bsi); | |
1235 | continue; | |
1236 | } | |
1237 | ||
1238 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
1239 | { | |
1240 | fprintf (dump_file, "Moving PHI node\n"); | |
1241 | print_gimple_stmt (dump_file, stmt, 0, 0); | |
1242 | fprintf (dump_file, "(cost %u) out of loop %d.\n\n", | |
1243 | cost, level->num); | |
1244 | } | |
1245 | ||
1246 | if (gimple_phi_num_args (stmt) == 1) | |
1247 | { | |
1248 | tree arg = PHI_ARG_DEF (stmt, 0); | |
1249 | new_stmt = gimple_build_assign_with_ops (TREE_CODE (arg), | |
1250 | gimple_phi_result (stmt), | |
1251 | arg, NULL_TREE); | |
1252 | SSA_NAME_DEF_STMT (gimple_phi_result (stmt)) = new_stmt; | |
1253 | } | |
1254 | else | |
1255 | { | |
1256 | basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb); | |
1257 | gimple cond = gsi_stmt (gsi_last_bb (dom)); | |
1258 | tree arg0 = NULL_TREE, arg1 = NULL_TREE, t; | |
1259 | /* Get the PHI arguments corresponding to the true and false | |
1260 | edges of COND. */ | |
1261 | extract_true_false_args_from_phi (dom, stmt, &arg0, &arg1); | |
1262 | gcc_assert (arg0 && arg1); | |
1263 | t = build2 (gimple_cond_code (cond), boolean_type_node, | |
1264 | gimple_cond_lhs (cond), gimple_cond_rhs (cond)); | |
73804b12 RG |
1265 | new_stmt = gimple_build_assign_with_ops (COND_EXPR, |
1266 | gimple_phi_result (stmt), | |
1267 | t, arg0, arg1); | |
e3bdfed6 RG |
1268 | SSA_NAME_DEF_STMT (gimple_phi_result (stmt)) = new_stmt; |
1269 | *((unsigned int *)(dw_data->global_data)) |= TODO_cleanup_cfg; | |
1270 | } | |
1271 | gsi_insert_on_edge (loop_preheader_edge (level), new_stmt); | |
1272 | remove_phi_node (&bsi, false); | |
1273 | } | |
1274 | ||
726a989a | 1275 | for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); ) |
a7e5372d | 1276 | { |
13714310 RG |
1277 | edge e; |
1278 | ||
726a989a | 1279 | stmt = gsi_stmt (bsi); |
a7e5372d | 1280 | |
726a989a RB |
1281 | lim_data = get_lim_data (stmt); |
1282 | if (lim_data == NULL) | |
a7e5372d | 1283 | { |
726a989a | 1284 | gsi_next (&bsi); |
a7e5372d ZD |
1285 | continue; |
1286 | } | |
1287 | ||
726a989a RB |
1288 | cost = lim_data->cost; |
1289 | level = lim_data->tgt_loop; | |
1290 | clear_lim_data (stmt); | |
a7e5372d ZD |
1291 | |
1292 | if (!level) | |
1293 | { | |
726a989a | 1294 | gsi_next (&bsi); |
a7e5372d ZD |
1295 | continue; |
1296 | } | |
1297 | ||
1298 | /* We do not really want to move conditionals out of the loop; we just | |
1299 | placed it here to force its operands to be moved if necessary. */ | |
726a989a | 1300 | if (gimple_code (stmt) == GIMPLE_COND) |
a7e5372d ZD |
1301 | continue; |
1302 | ||
1303 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
1304 | { | |
1305 | fprintf (dump_file, "Moving statement\n"); | |
726a989a | 1306 | print_gimple_stmt (dump_file, stmt, 0, 0); |
a7e5372d ZD |
1307 | fprintf (dump_file, "(cost %u) out of loop %d.\n\n", |
1308 | cost, level->num); | |
1309 | } | |
72425608 | 1310 | |
13714310 RG |
1311 | e = loop_preheader_edge (level); |
1312 | gcc_assert (!gimple_vdef (stmt)); | |
1313 | if (gimple_vuse (stmt)) | |
1314 | { | |
1315 | /* The new VUSE is the one from the virtual PHI in the loop | |
1316 | header or the one already present. */ | |
1317 | gimple_stmt_iterator gsi2; | |
1318 | for (gsi2 = gsi_start_phis (e->dest); | |
1319 | !gsi_end_p (gsi2); gsi_next (&gsi2)) | |
1320 | { | |
1321 | gimple phi = gsi_stmt (gsi2); | |
ea057359 | 1322 | if (virtual_operand_p (gimple_phi_result (phi))) |
13714310 RG |
1323 | { |
1324 | gimple_set_vuse (stmt, PHI_ARG_DEF_FROM_EDGE (phi, e)); | |
1325 | break; | |
1326 | } | |
1327 | } | |
1328 | } | |
726a989a | 1329 | gsi_remove (&bsi, false); |
13714310 | 1330 | gsi_insert_on_edge (e, stmt); |
a7e5372d ZD |
1331 | } |
1332 | } | |
1333 | ||
1334 | /* Hoist the statements out of the loops prescribed by data stored in | |
2a7e31df | 1335 | LIM_DATA structures associated with each statement.*/ |
a7e5372d | 1336 | |
e3bdfed6 | 1337 | static unsigned int |
a7e5372d ZD |
1338 | move_computations (void) |
1339 | { | |
1340 | struct dom_walk_data walk_data; | |
e3bdfed6 | 1341 | unsigned int todo = 0; |
a7e5372d ZD |
1342 | |
1343 | memset (&walk_data, 0, sizeof (struct dom_walk_data)); | |
e3bdfed6 | 1344 | walk_data.global_data = &todo; |
2b28c07a | 1345 | walk_data.dom_direction = CDI_DOMINATORS; |
ccf5c864 | 1346 | walk_data.before_dom_children = move_computations_stmt; |
a7e5372d ZD |
1347 | |
1348 | init_walk_dominator_tree (&walk_data); | |
1349 | walk_dominator_tree (&walk_data, ENTRY_BLOCK_PTR); | |
1350 | fini_walk_dominator_tree (&walk_data); | |
1351 | ||
726a989a | 1352 | gsi_commit_edge_inserts (); |
5006671f | 1353 | if (need_ssa_update_p (cfun)) |
84d65814 | 1354 | rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa); |
e3bdfed6 RG |
1355 | |
1356 | return todo; | |
a7e5372d ZD |
1357 | } |
1358 | ||
1359 | /* Checks whether the statement defining variable *INDEX can be hoisted | |
1360 | out of the loop passed in DATA. Callback for for_each_index. */ | |
1361 | ||
1362 | static bool | |
1363 | may_move_till (tree ref, tree *index, void *data) | |
1364 | { | |
726a989a | 1365 | struct loop *loop = (struct loop *) data, *max_loop; |
a7e5372d ZD |
1366 | |
1367 | /* If REF is an array reference, check also that the step and the lower | |
1368 | bound is invariant in LOOP. */ | |
1369 | if (TREE_CODE (ref) == ARRAY_REF) | |
1370 | { | |
726a989a RB |
1371 | tree step = TREE_OPERAND (ref, 3); |
1372 | tree lbound = TREE_OPERAND (ref, 2); | |
a7e5372d | 1373 | |
726a989a | 1374 | max_loop = outermost_invariant_loop (step, loop); |
a7e5372d ZD |
1375 | if (!max_loop) |
1376 | return false; | |
1377 | ||
726a989a | 1378 | max_loop = outermost_invariant_loop (lbound, loop); |
a7e5372d ZD |
1379 | if (!max_loop) |
1380 | return false; | |
1381 | } | |
1382 | ||
1383 | max_loop = outermost_invariant_loop (*index, loop); | |
1384 | if (!max_loop) | |
1385 | return false; | |
1386 | ||
1387 | return true; | |
1388 | } | |
1389 | ||
726a989a | 1390 | /* If OP is SSA NAME, force the statement that defines it to be |
b4042a03 | 1391 | moved out of the LOOP. ORIG_LOOP is the loop in that EXPR is used. */ |
a7e5372d ZD |
1392 | |
1393 | static void | |
726a989a | 1394 | force_move_till_op (tree op, struct loop *orig_loop, struct loop *loop) |
a7e5372d | 1395 | { |
726a989a | 1396 | gimple stmt; |
a7e5372d | 1397 | |
726a989a RB |
1398 | if (!op |
1399 | || is_gimple_min_invariant (op)) | |
1400 | return; | |
a7e5372d | 1401 | |
726a989a | 1402 | gcc_assert (TREE_CODE (op) == SSA_NAME); |
b8698a0f | 1403 | |
726a989a RB |
1404 | stmt = SSA_NAME_DEF_STMT (op); |
1405 | if (gimple_nop_p (stmt)) | |
a7e5372d ZD |
1406 | return; |
1407 | ||
726a989a | 1408 | set_level (stmt, orig_loop, loop); |
a7e5372d ZD |
1409 | } |
1410 | ||
1411 | /* Forces statement defining invariants in REF (and *INDEX) to be moved out of | |
b4042a03 ZD |
1412 | the LOOP. The reference REF is used in the loop ORIG_LOOP. Callback for |
1413 | for_each_index. */ | |
1414 | ||
1415 | struct fmt_data | |
1416 | { | |
1417 | struct loop *loop; | |
1418 | struct loop *orig_loop; | |
1419 | }; | |
a7e5372d ZD |
1420 | |
1421 | static bool | |
1422 | force_move_till (tree ref, tree *index, void *data) | |
1423 | { | |
c22940cd | 1424 | struct fmt_data *fmt_data = (struct fmt_data *) data; |
a7e5372d ZD |
1425 | |
1426 | if (TREE_CODE (ref) == ARRAY_REF) | |
1427 | { | |
726a989a RB |
1428 | tree step = TREE_OPERAND (ref, 3); |
1429 | tree lbound = TREE_OPERAND (ref, 2); | |
a7e5372d | 1430 | |
726a989a RB |
1431 | force_move_till_op (step, fmt_data->orig_loop, fmt_data->loop); |
1432 | force_move_till_op (lbound, fmt_data->orig_loop, fmt_data->loop); | |
a7e5372d ZD |
1433 | } |
1434 | ||
726a989a | 1435 | force_move_till_op (*index, fmt_data->orig_loop, fmt_data->loop); |
a7e5372d ZD |
1436 | |
1437 | return true; | |
1438 | } | |
1439 | ||
72425608 ZD |
1440 | /* A function to free the mem_ref object OBJ. */ |
1441 | ||
1442 | static void | |
f5843d08 | 1443 | memref_free (struct mem_ref *mem) |
72425608 | 1444 | { |
72425608 | 1445 | unsigned i; |
15d19bf8 | 1446 | vec<mem_ref_loc> *accs; |
72425608 | 1447 | |
9771b263 | 1448 | FOR_EACH_VEC_ELT (mem->accesses_in_loop, i, accs) |
15d19bf8 | 1449 | accs->release (); |
9771b263 | 1450 | mem->accesses_in_loop.release (); |
72425608 | 1451 | |
72425608 ZD |
1452 | free (mem); |
1453 | } | |
1454 | ||
1455 | /* Allocates and returns a memory reference description for MEM whose hash | |
1456 | value is HASH and id is ID. */ | |
1457 | ||
1458 | static mem_ref_p | |
1459 | mem_ref_alloc (tree mem, unsigned hash, unsigned id) | |
1460 | { | |
1461 | mem_ref_p ref = XNEW (struct mem_ref); | |
bdb01696 | 1462 | ao_ref_init (&ref->mem, mem); |
72425608 ZD |
1463 | ref->id = id; |
1464 | ref->hash = hash; | |
374001cb RB |
1465 | bitmap_initialize (&ref->stored, &lim_bitmap_obstack); |
1466 | bitmap_initialize (&ref->indep_loop, &lim_bitmap_obstack); | |
1467 | bitmap_initialize (&ref->dep_loop, &lim_bitmap_obstack); | |
9771b263 | 1468 | ref->accesses_in_loop.create (0); |
72425608 ZD |
1469 | |
1470 | return ref; | |
1471 | } | |
1472 | ||
72425608 ZD |
1473 | /* Records memory reference location *LOC in LOOP to the memory reference |
1474 | description REF. The reference occurs in statement STMT. */ | |
a7e5372d ZD |
1475 | |
1476 | static void | |
726a989a | 1477 | record_mem_ref_loc (mem_ref_p ref, struct loop *loop, gimple stmt, tree *loc) |
a7e5372d | 1478 | { |
15d19bf8 | 1479 | mem_ref_loc aref; |
72425608 | 1480 | |
9771b263 | 1481 | if (ref->accesses_in_loop.length () |
72425608 | 1482 | <= (unsigned) loop->num) |
9771b263 | 1483 | ref->accesses_in_loop.safe_grow_cleared (loop->num + 1); |
a7e5372d | 1484 | |
15d19bf8 RB |
1485 | aref.stmt = stmt; |
1486 | aref.ref = loc; | |
1487 | ref->accesses_in_loop[loop->num].safe_push (aref); | |
a7e5372d ZD |
1488 | } |
1489 | ||
72425608 | 1490 | /* Marks reference REF as stored in LOOP. */ |
a7e5372d ZD |
1491 | |
1492 | static void | |
72425608 | 1493 | mark_ref_stored (mem_ref_p ref, struct loop *loop) |
a7e5372d | 1494 | { |
374001cb RB |
1495 | while (loop != current_loops->tree_root |
1496 | && bitmap_set_bit (&ref->stored, loop->num)) | |
1497 | loop = loop_outer (loop); | |
72425608 ZD |
1498 | } |
1499 | ||
1500 | /* Gathers memory references in statement STMT in LOOP, storing the | |
1501 | information about them in the memory_accesses structure. Marks | |
1502 | the vops accessed through unrecognized statements there as | |
1503 | well. */ | |
1504 | ||
1505 | static void | |
726a989a | 1506 | gather_mem_refs_stmt (struct loop *loop, gimple stmt) |
72425608 ZD |
1507 | { |
1508 | tree *mem = NULL; | |
1509 | hashval_t hash; | |
bf190e8d | 1510 | mem_ref **slot; |
72425608 | 1511 | mem_ref_p ref; |
72425608 | 1512 | bool is_stored; |
72425608 | 1513 | unsigned id; |
a7e5372d | 1514 | |
5006671f | 1515 | if (!gimple_vuse (stmt)) |
72425608 ZD |
1516 | return; |
1517 | ||
1518 | mem = simple_mem_ref_in_stmt (stmt, &is_stored); | |
1519 | if (!mem) | |
546d314c | 1520 | { |
e6647190 RB |
1521 | /* We use the shared mem_ref for all unanalyzable refs. */ |
1522 | id = UNANALYZABLE_MEM_ID; | |
1523 | ref = memory_accesses.refs_list[id]; | |
546d314c RG |
1524 | if (dump_file && (dump_flags & TDF_DETAILS)) |
1525 | { | |
1526 | fprintf (dump_file, "Unanalyzed memory reference %u: ", id); | |
1527 | print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM); | |
1528 | } | |
e6647190 | 1529 | is_stored = gimple_vdef (stmt); |
72425608 ZD |
1530 | } |
1531 | else | |
a7e5372d | 1532 | { |
e6647190 | 1533 | hash = iterative_hash_expr (*mem, 0); |
bf190e8d | 1534 | slot = memory_accesses.refs.find_slot_with_hash (*mem, hash, INSERT); |
e6647190 | 1535 | if (*slot) |
72425608 | 1536 | { |
e6647190 RB |
1537 | ref = (mem_ref_p) *slot; |
1538 | id = ref->id; | |
72425608 | 1539 | } |
e6647190 RB |
1540 | else |
1541 | { | |
1542 | id = memory_accesses.refs_list.length (); | |
1543 | ref = mem_ref_alloc (*mem, hash, id); | |
1544 | memory_accesses.refs_list.safe_push (ref); | |
1545 | *slot = ref; | |
039496da | 1546 | |
e6647190 RB |
1547 | if (dump_file && (dump_flags & TDF_DETAILS)) |
1548 | { | |
1549 | fprintf (dump_file, "Memory reference %u: ", id); | |
1550 | print_generic_expr (dump_file, ref->mem.ref, TDF_SLIM); | |
1551 | fprintf (dump_file, "\n"); | |
1552 | } | |
1553 | } | |
1554 | ||
1555 | record_mem_ref_loc (ref, loop, stmt, mem); | |
1556 | } | |
374001cb | 1557 | bitmap_set_bit (&memory_accesses.refs_in_loop[loop->num], ref->id); |
72425608 | 1558 | if (is_stored) |
c00217fc | 1559 | { |
374001cb | 1560 | bitmap_set_bit (&memory_accesses.refs_stored_in_loop[loop->num], ref->id); |
c00217fc RB |
1561 | mark_ref_stored (ref, loop); |
1562 | } | |
72425608 | 1563 | return; |
a7e5372d ZD |
1564 | } |
1565 | ||
5a2d2a79 RB |
1566 | static unsigned *bb_loop_postorder; |
1567 | ||
1568 | /* qsort sort function to sort blocks after their loop fathers postorder. */ | |
1569 | ||
1570 | static int | |
1571 | sort_bbs_in_loop_postorder_cmp (const void *bb1_, const void *bb2_) | |
1572 | { | |
1573 | basic_block bb1 = *(basic_block *)const_cast<void *>(bb1_); | |
1574 | basic_block bb2 = *(basic_block *)const_cast<void *>(bb2_); | |
1575 | struct loop *loop1 = bb1->loop_father; | |
1576 | struct loop *loop2 = bb2->loop_father; | |
1577 | if (loop1->num == loop2->num) | |
1578 | return 0; | |
1579 | return bb_loop_postorder[loop1->num] < bb_loop_postorder[loop2->num] ? -1 : 1; | |
1580 | } | |
1581 | ||
72425608 | 1582 | /* Gathers memory references in loops. */ |
a7e5372d ZD |
1583 | |
1584 | static void | |
374001cb | 1585 | analyze_memory_references (void) |
a7e5372d | 1586 | { |
726a989a | 1587 | gimple_stmt_iterator bsi; |
5a2d2a79 | 1588 | basic_block bb, *bbs; |
c00217fc | 1589 | struct loop *loop, *outer; |
72425608 | 1590 | loop_iterator li; |
5a2d2a79 | 1591 | unsigned i, n; |
72425608 | 1592 | |
5a2d2a79 RB |
1593 | /* Initialize bb_loop_postorder with a mapping from loop->num to |
1594 | its postorder index. */ | |
1595 | i = 0; | |
0fc822d0 | 1596 | bb_loop_postorder = XNEWVEC (unsigned, number_of_loops (cfun)); |
5a2d2a79 RB |
1597 | FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST) |
1598 | bb_loop_postorder[loop->num] = i++; | |
1599 | /* Collect all basic-blocks in loops and sort them after their | |
1600 | loops postorder. */ | |
1601 | i = 0; | |
1602 | bbs = XNEWVEC (basic_block, n_basic_blocks - NUM_FIXED_BLOCKS); | |
72425608 | 1603 | FOR_EACH_BB (bb) |
5a2d2a79 RB |
1604 | if (bb->loop_father != current_loops->tree_root) |
1605 | bbs[i++] = bb; | |
1606 | n = i; | |
1607 | qsort (bbs, n, sizeof (basic_block), sort_bbs_in_loop_postorder_cmp); | |
1608 | free (bb_loop_postorder); | |
1609 | ||
1610 | /* Visit blocks in loop postorder and assign mem-ref IDs in that order. | |
1611 | That results in better locality for all the bitmaps. */ | |
1612 | for (i = 0; i < n; ++i) | |
72425608 | 1613 | { |
5a2d2a79 | 1614 | basic_block bb = bbs[i]; |
726a989a | 1615 | for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi)) |
5a2d2a79 | 1616 | gather_mem_refs_stmt (bb->loop_father, gsi_stmt (bsi)); |
72425608 ZD |
1617 | } |
1618 | ||
5a2d2a79 RB |
1619 | free (bbs); |
1620 | ||
546d314c RG |
1621 | /* Propagate the information about accessed memory references up |
1622 | the loop hierarchy. */ | |
72425608 ZD |
1623 | FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST) |
1624 | { | |
c00217fc | 1625 | /* Finalize the overall touched references (including subloops). */ |
374001cb RB |
1626 | bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[loop->num], |
1627 | &memory_accesses.refs_stored_in_loop[loop->num]); | |
c00217fc RB |
1628 | |
1629 | /* Propagate the information about accessed memory references up | |
1630 | the loop hierarchy. */ | |
1631 | outer = loop_outer (loop); | |
1632 | if (outer == current_loops->tree_root) | |
72425608 ZD |
1633 | continue; |
1634 | ||
374001cb RB |
1635 | bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[outer->num], |
1636 | &memory_accesses.all_refs_stored_in_loop[loop->num]); | |
72425608 | 1637 | } |
72425608 ZD |
1638 | } |
1639 | ||
72425608 ZD |
1640 | /* Returns true if MEM1 and MEM2 may alias. TTAE_CACHE is used as a cache in |
1641 | tree_to_aff_combination_expand. */ | |
1642 | ||
1643 | static bool | |
bdb01696 RB |
1644 | mem_refs_may_alias_p (mem_ref_p mem1, mem_ref_p mem2, |
1645 | struct pointer_map_t **ttae_cache) | |
72425608 ZD |
1646 | { |
1647 | /* Perform BASE + OFFSET analysis -- if MEM1 and MEM2 are based on the same | |
1648 | object and their offset differ in such a way that the locations cannot | |
1649 | overlap, then they cannot alias. */ | |
72425608 | 1650 | double_int size1, size2; |
1842e4d4 | 1651 | aff_tree off1, off2; |
72425608 | 1652 | |
1842e4d4 | 1653 | /* Perform basic offset and type-based disambiguation. */ |
bdb01696 | 1654 | if (!refs_may_alias_p_1 (&mem1->mem, &mem2->mem, true)) |
72425608 | 1655 | return false; |
a7e5372d | 1656 | |
72425608 ZD |
1657 | /* The expansion of addresses may be a bit expensive, thus we only do |
1658 | the check at -O2 and higher optimization levels. */ | |
1659 | if (optimize < 2) | |
1660 | return true; | |
1661 | ||
bdb01696 RB |
1662 | get_inner_reference_aff (mem1->mem.ref, &off1, &size1); |
1663 | get_inner_reference_aff (mem2->mem.ref, &off2, &size2); | |
72425608 ZD |
1664 | aff_combination_expand (&off1, ttae_cache); |
1665 | aff_combination_expand (&off2, ttae_cache); | |
1666 | aff_combination_scale (&off1, double_int_minus_one); | |
1667 | aff_combination_add (&off2, &off1); | |
1668 | ||
02f5d6c5 | 1669 | if (aff_comb_cannot_overlap_p (&off2, size1, size2)) |
72425608 ZD |
1670 | return false; |
1671 | ||
1672 | return true; | |
1673 | } | |
1674 | ||
15d19bf8 RB |
1675 | /* Iterates over all locations of REF in LOOP and its subloops calling |
1676 | fn.operator() with the location as argument. When that operator | |
1677 | returns true the iteration is stopped and true is returned. | |
1678 | Otherwise false is returned. */ | |
72425608 | 1679 | |
15d19bf8 RB |
1680 | template <typename FN> |
1681 | static bool | |
1682 | for_all_locs_in_loop (struct loop *loop, mem_ref_p ref, FN fn) | |
72425608 | 1683 | { |
72425608 ZD |
1684 | unsigned i; |
1685 | mem_ref_loc_p loc; | |
72425608 ZD |
1686 | struct loop *subloop; |
1687 | ||
15d19bf8 RB |
1688 | if (ref->accesses_in_loop.length () > (unsigned) loop->num) |
1689 | FOR_EACH_VEC_ELT (ref->accesses_in_loop[loop->num], i, loc) | |
1690 | if (fn (loc)) | |
1691 | return true; | |
72425608 ZD |
1692 | |
1693 | for (subloop = loop->inner; subloop != NULL; subloop = subloop->next) | |
15d19bf8 RB |
1694 | if (for_all_locs_in_loop (subloop, ref, fn)) |
1695 | return true; | |
1696 | ||
1697 | return false; | |
1698 | } | |
1699 | ||
1700 | /* Rewrites location LOC by TMP_VAR. */ | |
1701 | ||
1702 | struct rewrite_mem_ref_loc | |
1703 | { | |
1704 | rewrite_mem_ref_loc (tree tmp_var_) : tmp_var (tmp_var_) {} | |
1705 | bool operator()(mem_ref_loc_p loc); | |
1706 | tree tmp_var; | |
1707 | }; | |
1708 | ||
1709 | bool | |
1710 | rewrite_mem_ref_loc::operator()(mem_ref_loc_p loc) | |
1711 | { | |
1712 | *loc->ref = tmp_var; | |
1713 | update_stmt (loc->stmt); | |
1714 | return false; | |
72425608 ZD |
1715 | } |
1716 | ||
1717 | /* Rewrites all references to REF in LOOP by variable TMP_VAR. */ | |
1718 | ||
1719 | static void | |
1720 | rewrite_mem_refs (struct loop *loop, mem_ref_p ref, tree tmp_var) | |
1721 | { | |
15d19bf8 | 1722 | for_all_locs_in_loop (loop, ref, rewrite_mem_ref_loc (tmp_var)); |
a7e5372d ZD |
1723 | } |
1724 | ||
9fd9ccf7 RB |
1725 | /* Stores the first reference location in LOCP. */ |
1726 | ||
1727 | struct first_mem_ref_loc_1 | |
1728 | { | |
1729 | first_mem_ref_loc_1 (mem_ref_loc_p *locp_) : locp (locp_) {} | |
1730 | bool operator()(mem_ref_loc_p loc); | |
1731 | mem_ref_loc_p *locp; | |
1732 | }; | |
1733 | ||
1734 | bool | |
1735 | first_mem_ref_loc_1::operator()(mem_ref_loc_p loc) | |
1736 | { | |
1737 | *locp = loc; | |
1738 | return true; | |
1739 | } | |
1740 | ||
1741 | /* Returns the first reference location to REF in LOOP. */ | |
1742 | ||
1743 | static mem_ref_loc_p | |
1744 | first_mem_ref_loc (struct loop *loop, mem_ref_p ref) | |
1745 | { | |
1746 | mem_ref_loc_p locp = NULL; | |
1747 | for_all_locs_in_loop (loop, ref, first_mem_ref_loc_1 (&locp)); | |
1748 | return locp; | |
1749 | } | |
1750 | ||
d28cbb07 ZD |
1751 | /* The name and the length of the currently generated variable |
1752 | for lsm. */ | |
1753 | #define MAX_LSM_NAME_LENGTH 40 | |
1754 | static char lsm_tmp_name[MAX_LSM_NAME_LENGTH + 1]; | |
1755 | static int lsm_tmp_name_length; | |
1756 | ||
1757 | /* Adds S to lsm_tmp_name. */ | |
1758 | ||
1759 | static void | |
1760 | lsm_tmp_name_add (const char *s) | |
1761 | { | |
1762 | int l = strlen (s) + lsm_tmp_name_length; | |
1763 | if (l > MAX_LSM_NAME_LENGTH) | |
1764 | return; | |
1765 | ||
1766 | strcpy (lsm_tmp_name + lsm_tmp_name_length, s); | |
1767 | lsm_tmp_name_length = l; | |
1768 | } | |
1769 | ||
1770 | /* Stores the name for temporary variable that replaces REF to | |
1771 | lsm_tmp_name. */ | |
1772 | ||
1773 | static void | |
1774 | gen_lsm_tmp_name (tree ref) | |
1775 | { | |
1776 | const char *name; | |
1777 | ||
1778 | switch (TREE_CODE (ref)) | |
1779 | { | |
70f34814 | 1780 | case MEM_REF: |
d5fed62d | 1781 | case TARGET_MEM_REF: |
d28cbb07 ZD |
1782 | gen_lsm_tmp_name (TREE_OPERAND (ref, 0)); |
1783 | lsm_tmp_name_add ("_"); | |
1784 | break; | |
1785 | ||
70f34814 RG |
1786 | case ADDR_EXPR: |
1787 | gen_lsm_tmp_name (TREE_OPERAND (ref, 0)); | |
1788 | break; | |
1789 | ||
d28cbb07 ZD |
1790 | case BIT_FIELD_REF: |
1791 | case VIEW_CONVERT_EXPR: | |
1792 | case ARRAY_RANGE_REF: | |
1793 | gen_lsm_tmp_name (TREE_OPERAND (ref, 0)); | |
1794 | break; | |
1795 | ||
1796 | case REALPART_EXPR: | |
1797 | gen_lsm_tmp_name (TREE_OPERAND (ref, 0)); | |
1798 | lsm_tmp_name_add ("_RE"); | |
1799 | break; | |
b8698a0f | 1800 | |
d28cbb07 ZD |
1801 | case IMAGPART_EXPR: |
1802 | gen_lsm_tmp_name (TREE_OPERAND (ref, 0)); | |
1803 | lsm_tmp_name_add ("_IM"); | |
1804 | break; | |
1805 | ||
1806 | case COMPONENT_REF: | |
1807 | gen_lsm_tmp_name (TREE_OPERAND (ref, 0)); | |
1808 | lsm_tmp_name_add ("_"); | |
1809 | name = get_name (TREE_OPERAND (ref, 1)); | |
1810 | if (!name) | |
1811 | name = "F"; | |
d28cbb07 | 1812 | lsm_tmp_name_add (name); |
cbe80ff8 | 1813 | break; |
d28cbb07 ZD |
1814 | |
1815 | case ARRAY_REF: | |
1816 | gen_lsm_tmp_name (TREE_OPERAND (ref, 0)); | |
1817 | lsm_tmp_name_add ("_I"); | |
1818 | break; | |
1819 | ||
1820 | case SSA_NAME: | |
d28cbb07 ZD |
1821 | case VAR_DECL: |
1822 | case PARM_DECL: | |
1823 | name = get_name (ref); | |
1824 | if (!name) | |
1825 | name = "D"; | |
1826 | lsm_tmp_name_add (name); | |
1827 | break; | |
1828 | ||
1829 | case STRING_CST: | |
1830 | lsm_tmp_name_add ("S"); | |
1831 | break; | |
1832 | ||
1833 | case RESULT_DECL: | |
1834 | lsm_tmp_name_add ("R"); | |
1835 | break; | |
1836 | ||
150e3929 RG |
1837 | case INTEGER_CST: |
1838 | /* Nothing. */ | |
1839 | break; | |
1840 | ||
d28cbb07 ZD |
1841 | default: |
1842 | gcc_unreachable (); | |
1843 | } | |
1844 | } | |
1845 | ||
1846 | /* Determines name for temporary variable that replaces REF. | |
bbc8a8dc ZD |
1847 | The name is accumulated into the lsm_tmp_name variable. |
1848 | N is added to the name of the temporary. */ | |
d28cbb07 | 1849 | |
bbc8a8dc ZD |
1850 | char * |
1851 | get_lsm_tmp_name (tree ref, unsigned n) | |
d28cbb07 | 1852 | { |
bbc8a8dc ZD |
1853 | char ns[2]; |
1854 | ||
d28cbb07 ZD |
1855 | lsm_tmp_name_length = 0; |
1856 | gen_lsm_tmp_name (ref); | |
1857 | lsm_tmp_name_add ("_lsm"); | |
bbc8a8dc ZD |
1858 | if (n < 10) |
1859 | { | |
1860 | ns[0] = '0' + n; | |
1861 | ns[1] = 0; | |
1862 | lsm_tmp_name_add (ns); | |
1863 | } | |
d28cbb07 ZD |
1864 | return lsm_tmp_name; |
1865 | } | |
1866 | ||
039496da AH |
1867 | struct prev_flag_edges { |
1868 | /* Edge to insert new flag comparison code. */ | |
1869 | edge append_cond_position; | |
1870 | ||
1871 | /* Edge for fall through from previous flag comparison. */ | |
1872 | edge last_cond_fallthru; | |
1873 | }; | |
1874 | ||
1875 | /* Helper function for execute_sm. Emit code to store TMP_VAR into | |
1876 | MEM along edge EX. | |
1877 | ||
1878 | The store is only done if MEM has changed. We do this so no | |
1879 | changes to MEM occur on code paths that did not originally store | |
1880 | into it. | |
1881 | ||
1882 | The common case for execute_sm will transform: | |
1883 | ||
1884 | for (...) { | |
1885 | if (foo) | |
1886 | stuff; | |
1887 | else | |
1888 | MEM = TMP_VAR; | |
1889 | } | |
1890 | ||
1891 | into: | |
1892 | ||
1893 | lsm = MEM; | |
1894 | for (...) { | |
1895 | if (foo) | |
1896 | stuff; | |
1897 | else | |
1898 | lsm = TMP_VAR; | |
1899 | } | |
1900 | MEM = lsm; | |
1901 | ||
1902 | This function will generate: | |
1903 | ||
1904 | lsm = MEM; | |
1905 | ||
1906 | lsm_flag = false; | |
1907 | ... | |
1908 | for (...) { | |
1909 | if (foo) | |
1910 | stuff; | |
1911 | else { | |
1912 | lsm = TMP_VAR; | |
1913 | lsm_flag = true; | |
1914 | } | |
1915 | } | |
1916 | if (lsm_flag) <-- | |
1917 | MEM = lsm; <-- | |
1918 | */ | |
1919 | ||
1920 | static void | |
1921 | execute_sm_if_changed (edge ex, tree mem, tree tmp_var, tree flag) | |
1922 | { | |
1923 | basic_block new_bb, then_bb, old_dest; | |
1924 | bool loop_has_only_one_exit; | |
1925 | edge then_old_edge, orig_ex = ex; | |
1926 | gimple_stmt_iterator gsi; | |
1927 | gimple stmt; | |
1928 | struct prev_flag_edges *prev_edges = (struct prev_flag_edges *) ex->aux; | |
1929 | ||
1930 | /* ?? Insert store after previous store if applicable. See note | |
1931 | below. */ | |
1932 | if (prev_edges) | |
1933 | ex = prev_edges->append_cond_position; | |
1934 | ||
1935 | loop_has_only_one_exit = single_pred_p (ex->dest); | |
1936 | ||
1937 | if (loop_has_only_one_exit) | |
1938 | ex = split_block_after_labels (ex->dest); | |
1939 | ||
1940 | old_dest = ex->dest; | |
1941 | new_bb = split_edge (ex); | |
1942 | then_bb = create_empty_bb (new_bb); | |
1943 | if (current_loops && new_bb->loop_father) | |
1944 | add_bb_to_loop (then_bb, new_bb->loop_father); | |
1945 | ||
1946 | gsi = gsi_start_bb (new_bb); | |
1947 | stmt = gimple_build_cond (NE_EXPR, flag, boolean_false_node, | |
1948 | NULL_TREE, NULL_TREE); | |
1949 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
1950 | ||
1951 | gsi = gsi_start_bb (then_bb); | |
1952 | /* Insert actual store. */ | |
1953 | stmt = gimple_build_assign (unshare_expr (mem), tmp_var); | |
1954 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
1955 | ||
1956 | make_edge (new_bb, then_bb, EDGE_TRUE_VALUE); | |
1957 | make_edge (new_bb, old_dest, EDGE_FALSE_VALUE); | |
1958 | then_old_edge = make_edge (then_bb, old_dest, EDGE_FALLTHRU); | |
1959 | ||
1960 | set_immediate_dominator (CDI_DOMINATORS, then_bb, new_bb); | |
1961 | ||
1962 | if (prev_edges) | |
1963 | { | |
1964 | basic_block prevbb = prev_edges->last_cond_fallthru->src; | |
1965 | redirect_edge_succ (prev_edges->last_cond_fallthru, new_bb); | |
1966 | set_immediate_dominator (CDI_DOMINATORS, new_bb, prevbb); | |
1967 | set_immediate_dominator (CDI_DOMINATORS, old_dest, | |
1968 | recompute_dominator (CDI_DOMINATORS, old_dest)); | |
1969 | } | |
1970 | ||
1971 | /* ?? Because stores may alias, they must happen in the exact | |
1972 | sequence they originally happened. Save the position right after | |
1973 | the (_lsm) store we just created so we can continue appending after | |
1974 | it and maintain the original order. */ | |
1975 | { | |
1976 | struct prev_flag_edges *p; | |
1977 | ||
1978 | if (orig_ex->aux) | |
1979 | orig_ex->aux = NULL; | |
1980 | alloc_aux_for_edge (orig_ex, sizeof (struct prev_flag_edges)); | |
1981 | p = (struct prev_flag_edges *) orig_ex->aux; | |
1982 | p->append_cond_position = then_old_edge; | |
1983 | p->last_cond_fallthru = find_edge (new_bb, old_dest); | |
1984 | orig_ex->aux = (void *) p; | |
1985 | } | |
1986 | ||
1987 | if (!loop_has_only_one_exit) | |
1988 | for (gsi = gsi_start_phis (old_dest); !gsi_end_p (gsi); gsi_next (&gsi)) | |
1989 | { | |
1990 | gimple phi = gsi_stmt (gsi); | |
1991 | unsigned i; | |
1992 | ||
1993 | for (i = 0; i < gimple_phi_num_args (phi); i++) | |
1994 | if (gimple_phi_arg_edge (phi, i)->src == new_bb) | |
1995 | { | |
1996 | tree arg = gimple_phi_arg_def (phi, i); | |
9e227d60 | 1997 | add_phi_arg (phi, arg, then_old_edge, UNKNOWN_LOCATION); |
039496da AH |
1998 | update_stmt (phi); |
1999 | } | |
2000 | } | |
2001 | /* Remove the original fall through edge. This was the | |
2002 | single_succ_edge (new_bb). */ | |
2003 | EDGE_SUCC (new_bb, 0)->flags &= ~EDGE_FALLTHRU; | |
2004 | } | |
2005 | ||
15d19bf8 RB |
2006 | /* When REF is set on the location, set flag indicating the store. */ |
2007 | ||
2008 | struct sm_set_flag_if_changed | |
2009 | { | |
2010 | sm_set_flag_if_changed (tree flag_) : flag (flag_) {} | |
2011 | bool operator()(mem_ref_loc_p loc); | |
2012 | tree flag; | |
2013 | }; | |
2014 | ||
2015 | bool | |
2016 | sm_set_flag_if_changed::operator()(mem_ref_loc_p loc) | |
2017 | { | |
2018 | /* Only set the flag for writes. */ | |
2019 | if (is_gimple_assign (loc->stmt) | |
2020 | && gimple_assign_lhs_ptr (loc->stmt) == loc->ref) | |
2021 | { | |
2022 | gimple_stmt_iterator gsi = gsi_for_stmt (loc->stmt); | |
2023 | gimple stmt = gimple_build_assign (flag, boolean_true_node); | |
2024 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
2025 | } | |
2026 | return false; | |
2027 | } | |
2028 | ||
039496da AH |
2029 | /* Helper function for execute_sm. On every location where REF is |
2030 | set, set an appropriate flag indicating the store. */ | |
2031 | ||
2032 | static tree | |
2033 | execute_sm_if_changed_flag_set (struct loop *loop, mem_ref_p ref) | |
2034 | { | |
039496da | 2035 | tree flag; |
bdb01696 | 2036 | char *str = get_lsm_tmp_name (ref->mem.ref, ~0); |
039496da | 2037 | lsm_tmp_name_add ("_flag"); |
7cc434a3 | 2038 | flag = create_tmp_reg (boolean_type_node, str); |
15d19bf8 | 2039 | for_all_locs_in_loop (loop, ref, sm_set_flag_if_changed (flag)); |
039496da AH |
2040 | return flag; |
2041 | } | |
2042 | ||
72425608 | 2043 | /* Executes store motion of memory reference REF from LOOP. |
ca83d385 ZD |
2044 | Exits from the LOOP are stored in EXITS. The initialization of the |
2045 | temporary variable is put to the preheader of the loop, and assignments | |
2046 | to the reference from the temporary variable are emitted to exits. */ | |
a7e5372d ZD |
2047 | |
2048 | static void | |
9771b263 | 2049 | execute_sm (struct loop *loop, vec<edge> exits, mem_ref_p ref) |
a7e5372d | 2050 | { |
039496da | 2051 | tree tmp_var, store_flag; |
a7e5372d | 2052 | unsigned i; |
039496da | 2053 | gimple load; |
b4042a03 | 2054 | struct fmt_data fmt_data; |
9fd9ccf7 | 2055 | edge ex; |
726a989a | 2056 | struct lim_aux_data *lim_data; |
039496da | 2057 | bool multi_threaded_model_p = false; |
9fd9ccf7 | 2058 | gimple_stmt_iterator gsi; |
a7e5372d | 2059 | |
a3631d97 ZD |
2060 | if (dump_file && (dump_flags & TDF_DETAILS)) |
2061 | { | |
2062 | fprintf (dump_file, "Executing store motion of "); | |
bdb01696 | 2063 | print_generic_expr (dump_file, ref->mem.ref, 0); |
a3631d97 ZD |
2064 | fprintf (dump_file, " from loop %d\n", loop->num); |
2065 | } | |
2066 | ||
bdb01696 RB |
2067 | tmp_var = create_tmp_reg (TREE_TYPE (ref->mem.ref), |
2068 | get_lsm_tmp_name (ref->mem.ref, ~0)); | |
a7e5372d | 2069 | |
b4042a03 ZD |
2070 | fmt_data.loop = loop; |
2071 | fmt_data.orig_loop = loop; | |
bdb01696 | 2072 | for_each_index (&ref->mem.ref, force_move_till, &fmt_data); |
a7e5372d | 2073 | |
874a3589 | 2074 | if (block_in_transaction (loop_preheader_edge (loop)->src) |
039496da AH |
2075 | || !PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES)) |
2076 | multi_threaded_model_p = true; | |
2077 | ||
2078 | if (multi_threaded_model_p) | |
2079 | store_flag = execute_sm_if_changed_flag_set (loop, ref); | |
2080 | ||
72425608 | 2081 | rewrite_mem_refs (loop, ref, tmp_var); |
a7e5372d | 2082 | |
9fd9ccf7 RB |
2083 | /* Emit the load code on a random exit edge or into the latch if |
2084 | the loop does not exit, so that we are sure it will be processed | |
2085 | by move_computations after all dependencies. */ | |
2086 | gsi = gsi_for_stmt (first_mem_ref_loc (loop, ref)->stmt); | |
039496da AH |
2087 | |
2088 | /* FIXME/TODO: For the multi-threaded variant, we could avoid this | |
2089 | load altogether, since the store is predicated by a flag. We | |
2090 | could, do the load only if it was originally in the loop. */ | |
bdb01696 | 2091 | load = gimple_build_assign (tmp_var, unshare_expr (ref->mem.ref)); |
726a989a RB |
2092 | lim_data = init_lim_data (load); |
2093 | lim_data->max_loop = loop; | |
2094 | lim_data->tgt_loop = loop; | |
9fd9ccf7 | 2095 | gsi_insert_before (&gsi, load, GSI_SAME_STMT); |
a7e5372d | 2096 | |
039496da | 2097 | if (multi_threaded_model_p) |
a7e5372d | 2098 | { |
039496da AH |
2099 | load = gimple_build_assign (store_flag, boolean_false_node); |
2100 | lim_data = init_lim_data (load); | |
2101 | lim_data->max_loop = loop; | |
2102 | lim_data->tgt_loop = loop; | |
9fd9ccf7 | 2103 | gsi_insert_before (&gsi, load, GSI_SAME_STMT); |
a7e5372d | 2104 | } |
039496da AH |
2105 | |
2106 | /* Sink the store to every exit from the loop. */ | |
9771b263 | 2107 | FOR_EACH_VEC_ELT (exits, i, ex) |
039496da AH |
2108 | if (!multi_threaded_model_p) |
2109 | { | |
2110 | gimple store; | |
bdb01696 | 2111 | store = gimple_build_assign (unshare_expr (ref->mem.ref), tmp_var); |
039496da AH |
2112 | gsi_insert_on_edge (ex, store); |
2113 | } | |
2114 | else | |
bdb01696 | 2115 | execute_sm_if_changed (ex, ref->mem.ref, tmp_var, store_flag); |
a7e5372d ZD |
2116 | } |
2117 | ||
72425608 ZD |
2118 | /* Hoists memory references MEM_REFS out of LOOP. EXITS is the list of exit |
2119 | edges of the LOOP. */ | |
a7e5372d ZD |
2120 | |
2121 | static void | |
72425608 | 2122 | hoist_memory_references (struct loop *loop, bitmap mem_refs, |
9771b263 | 2123 | vec<edge> exits) |
a7e5372d | 2124 | { |
72425608 ZD |
2125 | mem_ref_p ref; |
2126 | unsigned i; | |
2127 | bitmap_iterator bi; | |
a3631d97 | 2128 | |
72425608 | 2129 | EXECUTE_IF_SET_IN_BITMAP (mem_refs, 0, i, bi) |
a7e5372d | 2130 | { |
9771b263 | 2131 | ref = memory_accesses.refs_list[i]; |
72425608 | 2132 | execute_sm (loop, exits, ref); |
a7e5372d | 2133 | } |
01fd257a ZD |
2134 | } |
2135 | ||
15d19bf8 RB |
2136 | struct ref_always_accessed |
2137 | { | |
2138 | ref_always_accessed (struct loop *loop_, tree base_, bool stored_p_) | |
2139 | : loop (loop_), base (base_), stored_p (stored_p_) {} | |
2140 | bool operator()(mem_ref_loc_p loc); | |
2141 | struct loop *loop; | |
2142 | tree base; | |
2143 | bool stored_p; | |
2144 | }; | |
a7e5372d | 2145 | |
15d19bf8 RB |
2146 | bool |
2147 | ref_always_accessed::operator()(mem_ref_loc_p loc) | |
a7e5372d | 2148 | { |
72425608 | 2149 | struct loop *must_exec; |
58adb739 | 2150 | |
15d19bf8 RB |
2151 | if (!get_lim_data (loc->stmt)) |
2152 | return false; | |
a7e5372d | 2153 | |
15d19bf8 RB |
2154 | /* If we require an always executed store make sure the statement |
2155 | stores to the reference. */ | |
2156 | if (stored_p) | |
72425608 | 2157 | { |
15d19bf8 RB |
2158 | tree lhs; |
2159 | if (!gimple_get_lhs (loc->stmt)) | |
2160 | return false; | |
2161 | lhs = get_base_address (gimple_get_lhs (loc->stmt)); | |
2162 | if (!lhs) | |
2163 | return false; | |
2164 | if (INDIRECT_REF_P (lhs) | |
2165 | || TREE_CODE (lhs) == MEM_REF) | |
2166 | lhs = TREE_OPERAND (lhs, 0); | |
2167 | if (lhs != base) | |
2168 | return false; | |
2169 | } | |
a7e5372d | 2170 | |
15d19bf8 RB |
2171 | must_exec = get_lim_data (loc->stmt)->always_executed_in; |
2172 | if (!must_exec) | |
2173 | return false; | |
58adb739 | 2174 | |
15d19bf8 RB |
2175 | if (must_exec == loop |
2176 | || flow_loop_nested_p (must_exec, loop)) | |
2177 | return true; | |
a7e5372d | 2178 | |
15d19bf8 RB |
2179 | return false; |
2180 | } | |
01fd257a | 2181 | |
15d19bf8 RB |
2182 | /* Returns true if REF is always accessed in LOOP. If STORED_P is true |
2183 | make sure REF is always stored to in LOOP. */ | |
2184 | ||
2185 | static bool | |
2186 | ref_always_accessed_p (struct loop *loop, mem_ref_p ref, bool stored_p) | |
2187 | { | |
2188 | tree base = ao_ref_base (&ref->mem); | |
2189 | if (TREE_CODE (base) == MEM_REF) | |
2190 | base = TREE_OPERAND (base, 0); | |
2191 | ||
2192 | return for_all_locs_in_loop (loop, ref, | |
2193 | ref_always_accessed (loop, base, stored_p)); | |
01fd257a ZD |
2194 | } |
2195 | ||
72425608 | 2196 | /* Returns true if REF1 and REF2 are independent. */ |
01fd257a | 2197 | |
72425608 ZD |
2198 | static bool |
2199 | refs_independent_p (mem_ref_p ref1, mem_ref_p ref2) | |
01fd257a | 2200 | { |
bdb01696 | 2201 | if (ref1 == ref2) |
72425608 | 2202 | return true; |
bdb01696 | 2203 | |
72425608 ZD |
2204 | if (dump_file && (dump_flags & TDF_DETAILS)) |
2205 | fprintf (dump_file, "Querying dependency of refs %u and %u: ", | |
2206 | ref1->id, ref2->id); | |
2207 | ||
bdb01696 | 2208 | if (mem_refs_may_alias_p (ref1, ref2, &memory_accesses.ttae_cache)) |
72425608 | 2209 | { |
72425608 ZD |
2210 | if (dump_file && (dump_flags & TDF_DETAILS)) |
2211 | fprintf (dump_file, "dependent.\n"); | |
2212 | return false; | |
2213 | } | |
2214 | else | |
2215 | { | |
72425608 ZD |
2216 | if (dump_file && (dump_flags & TDF_DETAILS)) |
2217 | fprintf (dump_file, "independent.\n"); | |
2218 | return true; | |
2219 | } | |
01fd257a ZD |
2220 | } |
2221 | ||
c00217fc RB |
2222 | /* Mark REF dependent on stores or loads (according to STORED_P) in LOOP |
2223 | and its super-loops. */ | |
01fd257a ZD |
2224 | |
2225 | static void | |
c00217fc | 2226 | record_dep_loop (struct loop *loop, mem_ref_p ref, bool stored_p) |
01fd257a | 2227 | { |
c00217fc RB |
2228 | /* We can propagate dependent-in-loop bits up the loop |
2229 | hierarchy to all outer loops. */ | |
2230 | while (loop != current_loops->tree_root | |
374001cb | 2231 | && bitmap_set_bit (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p))) |
c00217fc | 2232 | loop = loop_outer (loop); |
72425608 | 2233 | } |
01fd257a | 2234 | |
72425608 ZD |
2235 | /* Returns true if REF is independent on all other memory references in |
2236 | LOOP. */ | |
01fd257a | 2237 | |
72425608 | 2238 | static bool |
c00217fc | 2239 | ref_indep_loop_p_1 (struct loop *loop, mem_ref_p ref, bool stored_p) |
72425608 | 2240 | { |
546d314c | 2241 | bitmap refs_to_check; |
72425608 ZD |
2242 | unsigned i; |
2243 | bitmap_iterator bi; | |
72425608 ZD |
2244 | mem_ref_p aref; |
2245 | ||
c00217fc | 2246 | if (stored_p) |
374001cb | 2247 | refs_to_check = &memory_accesses.refs_in_loop[loop->num]; |
546d314c | 2248 | else |
374001cb | 2249 | refs_to_check = &memory_accesses.refs_stored_in_loop[loop->num]; |
01fd257a | 2250 | |
e6647190 RB |
2251 | if (bitmap_bit_p (refs_to_check, UNANALYZABLE_MEM_ID)) |
2252 | return false; | |
2253 | ||
72425608 | 2254 | EXECUTE_IF_SET_IN_BITMAP (refs_to_check, 0, i, bi) |
01fd257a | 2255 | { |
9771b263 | 2256 | aref = memory_accesses.refs_list[i]; |
e6647190 | 2257 | if (!refs_independent_p (ref, aref)) |
c00217fc | 2258 | return false; |
01fd257a | 2259 | } |
01fd257a | 2260 | |
c00217fc | 2261 | return true; |
01fd257a ZD |
2262 | } |
2263 | ||
72425608 ZD |
2264 | /* Returns true if REF is independent on all other memory references in |
2265 | LOOP. Wrapper over ref_indep_loop_p_1, caching its results. */ | |
01fd257a | 2266 | |
72425608 | 2267 | static bool |
c00217fc | 2268 | ref_indep_loop_p_2 (struct loop *loop, mem_ref_p ref, bool stored_p) |
01fd257a | 2269 | { |
374001cb | 2270 | stored_p |= bitmap_bit_p (&ref->stored, loop->num); |
e6647190 | 2271 | |
374001cb | 2272 | if (bitmap_bit_p (&ref->indep_loop, LOOP_DEP_BIT (loop->num, stored_p))) |
72425608 | 2273 | return true; |
374001cb | 2274 | if (bitmap_bit_p (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p))) |
72425608 | 2275 | return false; |
01fd257a | 2276 | |
c00217fc RB |
2277 | struct loop *inner = loop->inner; |
2278 | while (inner) | |
2279 | { | |
2280 | if (!ref_indep_loop_p_2 (inner, ref, stored_p)) | |
2281 | return false; | |
2282 | inner = inner->next; | |
2283 | } | |
2284 | ||
2285 | bool indep_p = ref_indep_loop_p_1 (loop, ref, stored_p); | |
ed9c043b | 2286 | |
72425608 ZD |
2287 | if (dump_file && (dump_flags & TDF_DETAILS)) |
2288 | fprintf (dump_file, "Querying dependencies of ref %u in loop %d: %s\n", | |
c00217fc | 2289 | ref->id, loop->num, indep_p ? "independent" : "dependent"); |
72425608 | 2290 | |
c00217fc RB |
2291 | /* Record the computed result in the cache. */ |
2292 | if (indep_p) | |
2293 | { | |
374001cb | 2294 | if (bitmap_set_bit (&ref->indep_loop, LOOP_DEP_BIT (loop->num, stored_p)) |
c00217fc RB |
2295 | && stored_p) |
2296 | { | |
2297 | /* If it's independend against all refs then it's independent | |
2298 | against stores, too. */ | |
374001cb | 2299 | bitmap_set_bit (&ref->indep_loop, LOOP_DEP_BIT (loop->num, false)); |
c00217fc RB |
2300 | } |
2301 | } | |
2302 | else | |
2303 | { | |
2304 | record_dep_loop (loop, ref, stored_p); | |
2305 | if (!stored_p) | |
2306 | { | |
2307 | /* If it's dependent against stores it's dependent against | |
2308 | all refs, too. */ | |
2309 | record_dep_loop (loop, ref, true); | |
2310 | } | |
2311 | } | |
72425608 | 2312 | |
c00217fc RB |
2313 | return indep_p; |
2314 | } | |
2315 | ||
2316 | /* Returns true if REF is independent on all other memory references in | |
2317 | LOOP. */ | |
2318 | ||
2319 | static bool | |
2320 | ref_indep_loop_p (struct loop *loop, mem_ref_p ref) | |
2321 | { | |
2322 | gcc_checking_assert (MEM_ANALYZABLE (ref)); | |
2323 | ||
2324 | return ref_indep_loop_p_2 (loop, ref, false); | |
01fd257a ZD |
2325 | } |
2326 | ||
72425608 | 2327 | /* Returns true if we can perform store motion of REF from LOOP. */ |
01fd257a | 2328 | |
72425608 ZD |
2329 | static bool |
2330 | can_sm_ref_p (struct loop *loop, mem_ref_p ref) | |
01fd257a | 2331 | { |
58adb739 RG |
2332 | tree base; |
2333 | ||
546d314c RG |
2334 | /* Can't hoist unanalyzable refs. */ |
2335 | if (!MEM_ANALYZABLE (ref)) | |
2336 | return false; | |
2337 | ||
72425608 | 2338 | /* It should be movable. */ |
bdb01696 RB |
2339 | if (!is_gimple_reg_type (TREE_TYPE (ref->mem.ref)) |
2340 | || TREE_THIS_VOLATILE (ref->mem.ref) | |
2341 | || !for_each_index (&ref->mem.ref, may_move_till, loop)) | |
72425608 | 2342 | return false; |
ed9c043b | 2343 | |
9939e416 | 2344 | /* If it can throw fail, we do not properly update EH info. */ |
bdb01696 | 2345 | if (tree_could_throw_p (ref->mem.ref)) |
9939e416 RG |
2346 | return false; |
2347 | ||
58adb739 RG |
2348 | /* If it can trap, it must be always executed in LOOP. |
2349 | Readonly memory locations may trap when storing to them, but | |
2350 | tree_could_trap_p is a predicate for rvalues, so check that | |
2351 | explicitly. */ | |
bdb01696 RB |
2352 | base = get_base_address (ref->mem.ref); |
2353 | if ((tree_could_trap_p (ref->mem.ref) | |
58adb739 RG |
2354 | || (DECL_P (base) && TREE_READONLY (base))) |
2355 | && !ref_always_accessed_p (loop, ref, true)) | |
72425608 | 2356 | return false; |
ed9c043b | 2357 | |
72425608 ZD |
2358 | /* And it must be independent on all other memory references |
2359 | in LOOP. */ | |
2360 | if (!ref_indep_loop_p (loop, ref)) | |
2361 | return false; | |
ed9c043b | 2362 | |
72425608 | 2363 | return true; |
ed9c043b ZD |
2364 | } |
2365 | ||
72425608 ZD |
2366 | /* Marks the references in LOOP for that store motion should be performed |
2367 | in REFS_TO_SM. SM_EXECUTED is the set of references for that store | |
2368 | motion was performed in one of the outer loops. */ | |
ed9c043b ZD |
2369 | |
2370 | static void | |
72425608 | 2371 | find_refs_for_sm (struct loop *loop, bitmap sm_executed, bitmap refs_to_sm) |
01fd257a | 2372 | { |
374001cb | 2373 | bitmap refs = &memory_accesses.all_refs_stored_in_loop[loop->num]; |
72425608 ZD |
2374 | unsigned i; |
2375 | bitmap_iterator bi; | |
2376 | mem_ref_p ref; | |
2377 | ||
2378 | EXECUTE_IF_AND_COMPL_IN_BITMAP (refs, sm_executed, 0, i, bi) | |
2379 | { | |
9771b263 | 2380 | ref = memory_accesses.refs_list[i]; |
72425608 ZD |
2381 | if (can_sm_ref_p (loop, ref)) |
2382 | bitmap_set_bit (refs_to_sm, i); | |
2383 | } | |
ed9c043b | 2384 | } |
01fd257a | 2385 | |
72425608 ZD |
2386 | /* Checks whether LOOP (with exits stored in EXITS array) is suitable |
2387 | for a store motion optimization (i.e. whether we can insert statement | |
2388 | on its exits). */ | |
ed9c043b | 2389 | |
72425608 ZD |
2390 | static bool |
2391 | loop_suitable_for_sm (struct loop *loop ATTRIBUTE_UNUSED, | |
9771b263 | 2392 | vec<edge> exits) |
ed9c043b | 2393 | { |
72425608 ZD |
2394 | unsigned i; |
2395 | edge ex; | |
01fd257a | 2396 | |
9771b263 | 2397 | FOR_EACH_VEC_ELT (exits, i, ex) |
6391db68 | 2398 | if (ex->flags & (EDGE_ABNORMAL | EDGE_EH)) |
72425608 ZD |
2399 | return false; |
2400 | ||
2401 | return true; | |
01fd257a ZD |
2402 | } |
2403 | ||
a7e5372d | 2404 | /* Try to perform store motion for all memory references modified inside |
72425608 ZD |
2405 | LOOP. SM_EXECUTED is the bitmap of the memory references for that |
2406 | store motion was executed in one of the outer loops. */ | |
a7e5372d ZD |
2407 | |
2408 | static void | |
72425608 | 2409 | store_motion_loop (struct loop *loop, bitmap sm_executed) |
a7e5372d | 2410 | { |
9771b263 | 2411 | vec<edge> exits = get_loop_exit_edges (loop); |
72425608 | 2412 | struct loop *subloop; |
cad1735b | 2413 | bitmap sm_in_loop = BITMAP_ALLOC (&lim_bitmap_obstack); |
a7e5372d | 2414 | |
72425608 | 2415 | if (loop_suitable_for_sm (loop, exits)) |
a7e5372d | 2416 | { |
72425608 ZD |
2417 | find_refs_for_sm (loop, sm_executed, sm_in_loop); |
2418 | hoist_memory_references (loop, sm_in_loop, exits); | |
a7e5372d | 2419 | } |
9771b263 | 2420 | exits.release (); |
72425608 ZD |
2421 | |
2422 | bitmap_ior_into (sm_executed, sm_in_loop); | |
2423 | for (subloop = loop->inner; subloop != NULL; subloop = subloop->next) | |
2424 | store_motion_loop (subloop, sm_executed); | |
2425 | bitmap_and_compl_into (sm_executed, sm_in_loop); | |
2426 | BITMAP_FREE (sm_in_loop); | |
a7e5372d ZD |
2427 | } |
2428 | ||
2429 | /* Try to perform store motion for all memory references modified inside | |
d73be268 | 2430 | loops. */ |
a7e5372d ZD |
2431 | |
2432 | static void | |
72425608 | 2433 | store_motion (void) |
a7e5372d ZD |
2434 | { |
2435 | struct loop *loop; | |
cad1735b | 2436 | bitmap sm_executed = BITMAP_ALLOC (&lim_bitmap_obstack); |
d16464bb | 2437 | |
72425608 ZD |
2438 | for (loop = current_loops->tree_root->inner; loop != NULL; loop = loop->next) |
2439 | store_motion_loop (loop, sm_executed); | |
42fd6772 | 2440 | |
72425608 | 2441 | BITMAP_FREE (sm_executed); |
726a989a | 2442 | gsi_commit_edge_inserts (); |
a7e5372d ZD |
2443 | } |
2444 | ||
2445 | /* Fills ALWAYS_EXECUTED_IN information for basic blocks of LOOP, i.e. | |
2446 | for each such basic block bb records the outermost loop for that execution | |
2447 | of its header implies execution of bb. CONTAINS_CALL is the bitmap of | |
2448 | blocks that contain a nonpure call. */ | |
2449 | ||
2450 | static void | |
374001cb | 2451 | fill_always_executed_in_1 (struct loop *loop, sbitmap contains_call) |
a7e5372d ZD |
2452 | { |
2453 | basic_block bb = NULL, *bbs, last = NULL; | |
2454 | unsigned i; | |
2455 | edge e; | |
2456 | struct loop *inn_loop = loop; | |
2457 | ||
8a519095 | 2458 | if (ALWAYS_EXECUTED_IN (loop->header) == NULL) |
a7e5372d ZD |
2459 | { |
2460 | bbs = get_loop_body_in_dom_order (loop); | |
2461 | ||
2462 | for (i = 0; i < loop->num_nodes; i++) | |
2463 | { | |
628f6a4e | 2464 | edge_iterator ei; |
a7e5372d ZD |
2465 | bb = bbs[i]; |
2466 | ||
2467 | if (dominated_by_p (CDI_DOMINATORS, loop->latch, bb)) | |
2468 | last = bb; | |
2469 | ||
d7c028c0 | 2470 | if (bitmap_bit_p (contains_call, bb->index)) |
a7e5372d ZD |
2471 | break; |
2472 | ||
628f6a4e | 2473 | FOR_EACH_EDGE (e, ei, bb->succs) |
a7e5372d ZD |
2474 | if (!flow_bb_inside_loop_p (loop, e->dest)) |
2475 | break; | |
2476 | if (e) | |
2477 | break; | |
2478 | ||
2479 | /* A loop might be infinite (TODO use simple loop analysis | |
2480 | to disprove this if possible). */ | |
2481 | if (bb->flags & BB_IRREDUCIBLE_LOOP) | |
2482 | break; | |
2483 | ||
2484 | if (!flow_bb_inside_loop_p (inn_loop, bb)) | |
2485 | break; | |
2486 | ||
2487 | if (bb->loop_father->header == bb) | |
2488 | { | |
2489 | if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb)) | |
2490 | break; | |
2491 | ||
2492 | /* In a loop that is always entered we may proceed anyway. | |
2493 | But record that we entered it and stop once we leave it. */ | |
2494 | inn_loop = bb->loop_father; | |
2495 | } | |
2496 | } | |
2497 | ||
2498 | while (1) | |
2499 | { | |
8a519095 | 2500 | SET_ALWAYS_EXECUTED_IN (last, loop); |
a7e5372d ZD |
2501 | if (last == loop->header) |
2502 | break; | |
2503 | last = get_immediate_dominator (CDI_DOMINATORS, last); | |
2504 | } | |
2505 | ||
2506 | free (bbs); | |
2507 | } | |
2508 | ||
2509 | for (loop = loop->inner; loop; loop = loop->next) | |
374001cb | 2510 | fill_always_executed_in_1 (loop, contains_call); |
a7e5372d ZD |
2511 | } |
2512 | ||
374001cb RB |
2513 | /* Fills ALWAYS_EXECUTED_IN information for basic blocks, i.e. |
2514 | for each such basic block bb records the outermost loop for that execution | |
2515 | of its header implies execution of bb. */ | |
a7e5372d ZD |
2516 | |
2517 | static void | |
374001cb | 2518 | fill_always_executed_in (void) |
a7e5372d ZD |
2519 | { |
2520 | sbitmap contains_call = sbitmap_alloc (last_basic_block); | |
a7e5372d | 2521 | basic_block bb; |
374001cb | 2522 | struct loop *loop; |
3f9b14ff | 2523 | |
f61e445a | 2524 | bitmap_clear (contains_call); |
a7e5372d ZD |
2525 | FOR_EACH_BB (bb) |
2526 | { | |
374001cb RB |
2527 | gimple_stmt_iterator gsi; |
2528 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
a7e5372d | 2529 | { |
374001cb | 2530 | if (nonpure_call_p (gsi_stmt (gsi))) |
a7e5372d ZD |
2531 | break; |
2532 | } | |
2533 | ||
374001cb | 2534 | if (!gsi_end_p (gsi)) |
d7c028c0 | 2535 | bitmap_set_bit (contains_call, bb->index); |
a7e5372d ZD |
2536 | } |
2537 | ||
d73be268 | 2538 | for (loop = current_loops->tree_root->inner; loop; loop = loop->next) |
374001cb | 2539 | fill_always_executed_in_1 (loop, contains_call); |
a7e5372d ZD |
2540 | |
2541 | sbitmap_free (contains_call); | |
374001cb RB |
2542 | } |
2543 | ||
2544 | ||
2545 | /* Compute the global information needed by the loop invariant motion pass. */ | |
726a989a | 2546 | |
374001cb RB |
2547 | static void |
2548 | tree_ssa_lim_initialize (void) | |
2549 | { | |
2550 | unsigned i; | |
2551 | ||
2552 | bitmap_obstack_initialize (&lim_bitmap_obstack); | |
726a989a | 2553 | lim_aux_data_map = pointer_map_create (); |
19c0d7df AH |
2554 | |
2555 | if (flag_tm) | |
2556 | compute_transaction_bits (); | |
039496da AH |
2557 | |
2558 | alloc_aux_for_edges (0); | |
374001cb | 2559 | |
bf190e8d | 2560 | memory_accesses.refs.create (100); |
374001cb RB |
2561 | memory_accesses.refs_list.create (100); |
2562 | /* Allocate a special, unanalyzable mem-ref with ID zero. */ | |
2563 | memory_accesses.refs_list.quick_push | |
2564 | (mem_ref_alloc (error_mark_node, 0, UNANALYZABLE_MEM_ID)); | |
2565 | ||
0fc822d0 RB |
2566 | memory_accesses.refs_in_loop.create (number_of_loops (cfun)); |
2567 | memory_accesses.refs_in_loop.quick_grow (number_of_loops (cfun)); | |
2568 | memory_accesses.refs_stored_in_loop.create (number_of_loops (cfun)); | |
2569 | memory_accesses.refs_stored_in_loop.quick_grow (number_of_loops (cfun)); | |
2570 | memory_accesses.all_refs_stored_in_loop.create (number_of_loops (cfun)); | |
2571 | memory_accesses.all_refs_stored_in_loop.quick_grow (number_of_loops (cfun)); | |
374001cb | 2572 | |
0fc822d0 | 2573 | for (i = 0; i < number_of_loops (cfun); i++) |
374001cb RB |
2574 | { |
2575 | bitmap_initialize (&memory_accesses.refs_in_loop[i], | |
2576 | &lim_bitmap_obstack); | |
2577 | bitmap_initialize (&memory_accesses.refs_stored_in_loop[i], | |
2578 | &lim_bitmap_obstack); | |
2579 | bitmap_initialize (&memory_accesses.all_refs_stored_in_loop[i], | |
2580 | &lim_bitmap_obstack); | |
2581 | } | |
2582 | ||
2583 | memory_accesses.ttae_cache = NULL; | |
a7e5372d ZD |
2584 | } |
2585 | ||
2586 | /* Cleans up after the invariant motion pass. */ | |
2587 | ||
2588 | static void | |
2589 | tree_ssa_lim_finalize (void) | |
2590 | { | |
2591 | basic_block bb; | |
72425608 | 2592 | unsigned i; |
f5843d08 | 2593 | mem_ref_p ref; |
a7e5372d | 2594 | |
039496da AH |
2595 | free_aux_for_edges (); |
2596 | ||
a7e5372d | 2597 | FOR_EACH_BB (bb) |
8a519095 | 2598 | SET_ALWAYS_EXECUTED_IN (bb, NULL); |
72425608 | 2599 | |
3f9b14ff | 2600 | bitmap_obstack_release (&lim_bitmap_obstack); |
726a989a RB |
2601 | pointer_map_destroy (lim_aux_data_map); |
2602 | ||
bf190e8d | 2603 | memory_accesses.refs.dispose (); |
72425608 | 2604 | |
9771b263 | 2605 | FOR_EACH_VEC_ELT (memory_accesses.refs_list, i, ref) |
f5843d08 | 2606 | memref_free (ref); |
9771b263 | 2607 | memory_accesses.refs_list.release (); |
f5843d08 | 2608 | |
9771b263 | 2609 | memory_accesses.refs_in_loop.release (); |
c00217fc | 2610 | memory_accesses.refs_stored_in_loop.release (); |
9771b263 | 2611 | memory_accesses.all_refs_stored_in_loop.release (); |
72425608 ZD |
2612 | |
2613 | if (memory_accesses.ttae_cache) | |
b6db991c | 2614 | free_affine_expand_cache (&memory_accesses.ttae_cache); |
a7e5372d ZD |
2615 | } |
2616 | ||
d73be268 | 2617 | /* Moves invariants from loops. Only "expensive" invariants are moved out -- |
a7e5372d ZD |
2618 | i.e. those that are likely to be win regardless of the register pressure. */ |
2619 | ||
e3bdfed6 | 2620 | unsigned int |
d73be268 | 2621 | tree_ssa_lim (void) |
a7e5372d | 2622 | { |
e3bdfed6 RG |
2623 | unsigned int todo; |
2624 | ||
d73be268 | 2625 | tree_ssa_lim_initialize (); |
a7e5372d | 2626 | |
72425608 ZD |
2627 | /* Gathers information about memory accesses in the loops. */ |
2628 | analyze_memory_references (); | |
2629 | ||
374001cb RB |
2630 | /* Fills ALWAYS_EXECUTED_IN information for basic blocks. */ |
2631 | fill_always_executed_in (); | |
2632 | ||
a7e5372d ZD |
2633 | /* For each statement determine the outermost loop in that it is |
2634 | invariant and cost for computing the invariant. */ | |
2635 | determine_invariantness (); | |
2636 | ||
72425608 ZD |
2637 | /* Execute store motion. Force the necessary invariants to be moved |
2638 | out of the loops as well. */ | |
2639 | store_motion (); | |
a7e5372d ZD |
2640 | |
2641 | /* Move the expressions that are expensive enough. */ | |
e3bdfed6 | 2642 | todo = move_computations (); |
a7e5372d ZD |
2643 | |
2644 | tree_ssa_lim_finalize (); | |
e3bdfed6 RG |
2645 | |
2646 | return todo; | |
a7e5372d | 2647 | } |