]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-loop-im.c
* tree-ssa.h: Remove all #include's
[thirdparty/gcc.git] / gcc / tree-ssa-loop-im.c
1 /* Loop invariant motion.
2 Copyright (C) 2003-2013 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "tree.h"
25 #include "tm_p.h"
26 #include "basic-block.h"
27 #include "gimple-pretty-print.h"
28 #include "gimple.h"
29 #include "gimple-ssa.h"
30 #include "tree-cfg.h"
31 #include "tree-phinodes.h"
32 #include "ssa-iterators.h"
33 #include "tree-ssanames.h"
34 #include "tree-ssa-loop.h"
35 #include "tree-into-ssa.h"
36 #include "cfgloop.h"
37 #include "domwalk.h"
38 #include "params.h"
39 #include "tree-pass.h"
40 #include "flags.h"
41 #include "hash-table.h"
42 #include "tree-affine.h"
43 #include "pointer-set.h"
44 #include "tree-ssa-propagate.h"
45
46 /* TODO: Support for predicated code motion. I.e.
47
48 while (1)
49 {
50 if (cond)
51 {
52 a = inv;
53 something;
54 }
55 }
56
57 Where COND and INV are invariants, but evaluating INV may trap or be
58 invalid from some other reason if !COND. This may be transformed to
59
60 if (cond)
61 a = inv;
62 while (1)
63 {
64 if (cond)
65 something;
66 } */
67
68 /* The auxiliary data kept for each statement. */
69
70 struct lim_aux_data
71 {
72 struct loop *max_loop; /* The outermost loop in that the statement
73 is invariant. */
74
75 struct loop *tgt_loop; /* The loop out of that we want to move the
76 invariant. */
77
78 struct loop *always_executed_in;
79 /* The outermost loop for that we are sure
80 the statement is executed if the loop
81 is entered. */
82
83 unsigned cost; /* Cost of the computation performed by the
84 statement. */
85
86 vec<gimple> depends; /* Vector of statements that must be also
87 hoisted out of the loop when this statement
88 is hoisted; i.e. those that define the
89 operands of the statement and are inside of
90 the MAX_LOOP loop. */
91 };
92
93 /* Maps statements to their lim_aux_data. */
94
95 static struct pointer_map_t *lim_aux_data_map;
96
97 /* Description of a memory reference location. */
98
99 typedef struct mem_ref_loc
100 {
101 tree *ref; /* The reference itself. */
102 gimple stmt; /* The statement in that it occurs. */
103 } *mem_ref_loc_p;
104
105
106 /* Description of a memory reference. */
107
108 typedef struct mem_ref
109 {
110 unsigned id; /* ID assigned to the memory reference
111 (its index in memory_accesses.refs_list) */
112 hashval_t hash; /* Its hash value. */
113
114 /* The memory access itself and associated caching of alias-oracle
115 query meta-data. */
116 ao_ref mem;
117
118 bitmap_head stored; /* The set of loops in that this memory location
119 is stored to. */
120 vec<vec<mem_ref_loc> > accesses_in_loop;
121 /* The locations of the accesses. Vector
122 indexed by the loop number. */
123
124 /* The following sets are computed on demand. We keep both set and
125 its complement, so that we know whether the information was
126 already computed or not. */
127 bitmap_head indep_loop; /* The set of loops in that the memory
128 reference is independent, meaning:
129 If it is stored in the loop, this store
130 is independent on all other loads and
131 stores.
132 If it is only loaded, then it is independent
133 on all stores in the loop. */
134 bitmap_head dep_loop; /* The complement of INDEP_LOOP. */
135 } *mem_ref_p;
136
137 /* We use two bits per loop in the ref->{in,}dep_loop bitmaps, the first
138 to record (in)dependence against stores in the loop and its subloops, the
139 second to record (in)dependence against all references in the loop
140 and its subloops. */
141 #define LOOP_DEP_BIT(loopnum, storedp) (2 * (loopnum) + (storedp ? 1 : 0))
142
143 /* Mem_ref hashtable helpers. */
144
145 struct mem_ref_hasher : typed_noop_remove <mem_ref>
146 {
147 typedef mem_ref value_type;
148 typedef tree_node compare_type;
149 static inline hashval_t hash (const value_type *);
150 static inline bool equal (const value_type *, const compare_type *);
151 };
152
153 /* A hash function for struct mem_ref object OBJ. */
154
155 inline hashval_t
156 mem_ref_hasher::hash (const value_type *mem)
157 {
158 return mem->hash;
159 }
160
161 /* An equality function for struct mem_ref object MEM1 with
162 memory reference OBJ2. */
163
164 inline bool
165 mem_ref_hasher::equal (const value_type *mem1, const compare_type *obj2)
166 {
167 return operand_equal_p (mem1->mem.ref, (const_tree) obj2, 0);
168 }
169
170
171 /* Description of memory accesses in loops. */
172
173 static struct
174 {
175 /* The hash table of memory references accessed in loops. */
176 hash_table <mem_ref_hasher> refs;
177
178 /* The list of memory references. */
179 vec<mem_ref_p> refs_list;
180
181 /* The set of memory references accessed in each loop. */
182 vec<bitmap_head> refs_in_loop;
183
184 /* The set of memory references stored in each loop. */
185 vec<bitmap_head> refs_stored_in_loop;
186
187 /* The set of memory references stored in each loop, including subloops . */
188 vec<bitmap_head> all_refs_stored_in_loop;
189
190 /* Cache for expanding memory addresses. */
191 struct pointer_map_t *ttae_cache;
192 } memory_accesses;
193
194 /* Obstack for the bitmaps in the above data structures. */
195 static bitmap_obstack lim_bitmap_obstack;
196
197 static bool ref_indep_loop_p (struct loop *, mem_ref_p);
198
199 /* Minimum cost of an expensive expression. */
200 #define LIM_EXPENSIVE ((unsigned) PARAM_VALUE (PARAM_LIM_EXPENSIVE))
201
202 /* The outermost loop for which execution of the header guarantees that the
203 block will be executed. */
204 #define ALWAYS_EXECUTED_IN(BB) ((struct loop *) (BB)->aux)
205 #define SET_ALWAYS_EXECUTED_IN(BB, VAL) ((BB)->aux = (void *) (VAL))
206
207 /* ID of the shared unanalyzable mem. */
208 #define UNANALYZABLE_MEM_ID 0
209
210 /* Whether the reference was analyzable. */
211 #define MEM_ANALYZABLE(REF) ((REF)->id != UNANALYZABLE_MEM_ID)
212
213 static struct lim_aux_data *
214 init_lim_data (gimple stmt)
215 {
216 void **p = pointer_map_insert (lim_aux_data_map, stmt);
217
218 *p = XCNEW (struct lim_aux_data);
219 return (struct lim_aux_data *) *p;
220 }
221
222 static struct lim_aux_data *
223 get_lim_data (gimple stmt)
224 {
225 void **p = pointer_map_contains (lim_aux_data_map, stmt);
226 if (!p)
227 return NULL;
228
229 return (struct lim_aux_data *) *p;
230 }
231
232 /* Releases the memory occupied by DATA. */
233
234 static void
235 free_lim_aux_data (struct lim_aux_data *data)
236 {
237 data->depends.release ();
238 free (data);
239 }
240
241 static void
242 clear_lim_data (gimple stmt)
243 {
244 void **p = pointer_map_contains (lim_aux_data_map, stmt);
245 if (!p)
246 return;
247
248 free_lim_aux_data ((struct lim_aux_data *) *p);
249 *p = NULL;
250 }
251
252
253 /* The possibilities of statement movement. */
254 enum move_pos
255 {
256 MOVE_IMPOSSIBLE, /* No movement -- side effect expression. */
257 MOVE_PRESERVE_EXECUTION, /* Must not cause the non-executed statement
258 become executed -- memory accesses, ... */
259 MOVE_POSSIBLE /* Unlimited movement. */
260 };
261
262
263 /* If it is possible to hoist the statement STMT unconditionally,
264 returns MOVE_POSSIBLE.
265 If it is possible to hoist the statement STMT, but we must avoid making
266 it executed if it would not be executed in the original program (e.g.
267 because it may trap), return MOVE_PRESERVE_EXECUTION.
268 Otherwise return MOVE_IMPOSSIBLE. */
269
270 enum move_pos
271 movement_possibility (gimple stmt)
272 {
273 tree lhs;
274 enum move_pos ret = MOVE_POSSIBLE;
275
276 if (flag_unswitch_loops
277 && gimple_code (stmt) == GIMPLE_COND)
278 {
279 /* If we perform unswitching, force the operands of the invariant
280 condition to be moved out of the loop. */
281 return MOVE_POSSIBLE;
282 }
283
284 if (gimple_code (stmt) == GIMPLE_PHI
285 && gimple_phi_num_args (stmt) <= 2
286 && !virtual_operand_p (gimple_phi_result (stmt))
287 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_phi_result (stmt)))
288 return MOVE_POSSIBLE;
289
290 if (gimple_get_lhs (stmt) == NULL_TREE)
291 return MOVE_IMPOSSIBLE;
292
293 if (gimple_vdef (stmt))
294 return MOVE_IMPOSSIBLE;
295
296 if (stmt_ends_bb_p (stmt)
297 || gimple_has_volatile_ops (stmt)
298 || gimple_has_side_effects (stmt)
299 || stmt_could_throw_p (stmt))
300 return MOVE_IMPOSSIBLE;
301
302 if (is_gimple_call (stmt))
303 {
304 /* While pure or const call is guaranteed to have no side effects, we
305 cannot move it arbitrarily. Consider code like
306
307 char *s = something ();
308
309 while (1)
310 {
311 if (s)
312 t = strlen (s);
313 else
314 t = 0;
315 }
316
317 Here the strlen call cannot be moved out of the loop, even though
318 s is invariant. In addition to possibly creating a call with
319 invalid arguments, moving out a function call that is not executed
320 may cause performance regressions in case the call is costly and
321 not executed at all. */
322 ret = MOVE_PRESERVE_EXECUTION;
323 lhs = gimple_call_lhs (stmt);
324 }
325 else if (is_gimple_assign (stmt))
326 lhs = gimple_assign_lhs (stmt);
327 else
328 return MOVE_IMPOSSIBLE;
329
330 if (TREE_CODE (lhs) == SSA_NAME
331 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
332 return MOVE_IMPOSSIBLE;
333
334 if (TREE_CODE (lhs) != SSA_NAME
335 || gimple_could_trap_p (stmt))
336 return MOVE_PRESERVE_EXECUTION;
337
338 /* Non local loads in a transaction cannot be hoisted out. Well,
339 unless the load happens on every path out of the loop, but we
340 don't take this into account yet. */
341 if (flag_tm
342 && gimple_in_transaction (stmt)
343 && gimple_assign_single_p (stmt))
344 {
345 tree rhs = gimple_assign_rhs1 (stmt);
346 if (DECL_P (rhs) && is_global_var (rhs))
347 {
348 if (dump_file)
349 {
350 fprintf (dump_file, "Cannot hoist conditional load of ");
351 print_generic_expr (dump_file, rhs, TDF_SLIM);
352 fprintf (dump_file, " because it is in a transaction.\n");
353 }
354 return MOVE_IMPOSSIBLE;
355 }
356 }
357
358 return ret;
359 }
360
361 /* Suppose that operand DEF is used inside the LOOP. Returns the outermost
362 loop to that we could move the expression using DEF if it did not have
363 other operands, i.e. the outermost loop enclosing LOOP in that the value
364 of DEF is invariant. */
365
366 static struct loop *
367 outermost_invariant_loop (tree def, struct loop *loop)
368 {
369 gimple def_stmt;
370 basic_block def_bb;
371 struct loop *max_loop;
372 struct lim_aux_data *lim_data;
373
374 if (!def)
375 return superloop_at_depth (loop, 1);
376
377 if (TREE_CODE (def) != SSA_NAME)
378 {
379 gcc_assert (is_gimple_min_invariant (def));
380 return superloop_at_depth (loop, 1);
381 }
382
383 def_stmt = SSA_NAME_DEF_STMT (def);
384 def_bb = gimple_bb (def_stmt);
385 if (!def_bb)
386 return superloop_at_depth (loop, 1);
387
388 max_loop = find_common_loop (loop, def_bb->loop_father);
389
390 lim_data = get_lim_data (def_stmt);
391 if (lim_data != NULL && lim_data->max_loop != NULL)
392 max_loop = find_common_loop (max_loop,
393 loop_outer (lim_data->max_loop));
394 if (max_loop == loop)
395 return NULL;
396 max_loop = superloop_at_depth (loop, loop_depth (max_loop) + 1);
397
398 return max_loop;
399 }
400
401 /* DATA is a structure containing information associated with a statement
402 inside LOOP. DEF is one of the operands of this statement.
403
404 Find the outermost loop enclosing LOOP in that value of DEF is invariant
405 and record this in DATA->max_loop field. If DEF itself is defined inside
406 this loop as well (i.e. we need to hoist it out of the loop if we want
407 to hoist the statement represented by DATA), record the statement in that
408 DEF is defined to the DATA->depends list. Additionally if ADD_COST is true,
409 add the cost of the computation of DEF to the DATA->cost.
410
411 If DEF is not invariant in LOOP, return false. Otherwise return TRUE. */
412
413 static bool
414 add_dependency (tree def, struct lim_aux_data *data, struct loop *loop,
415 bool add_cost)
416 {
417 gimple def_stmt = SSA_NAME_DEF_STMT (def);
418 basic_block def_bb = gimple_bb (def_stmt);
419 struct loop *max_loop;
420 struct lim_aux_data *def_data;
421
422 if (!def_bb)
423 return true;
424
425 max_loop = outermost_invariant_loop (def, loop);
426 if (!max_loop)
427 return false;
428
429 if (flow_loop_nested_p (data->max_loop, max_loop))
430 data->max_loop = max_loop;
431
432 def_data = get_lim_data (def_stmt);
433 if (!def_data)
434 return true;
435
436 if (add_cost
437 /* Only add the cost if the statement defining DEF is inside LOOP,
438 i.e. if it is likely that by moving the invariants dependent
439 on it, we will be able to avoid creating a new register for
440 it (since it will be only used in these dependent invariants). */
441 && def_bb->loop_father == loop)
442 data->cost += def_data->cost;
443
444 data->depends.safe_push (def_stmt);
445
446 return true;
447 }
448
449 /* Returns an estimate for a cost of statement STMT. The values here
450 are just ad-hoc constants, similar to costs for inlining. */
451
452 static unsigned
453 stmt_cost (gimple stmt)
454 {
455 /* Always try to create possibilities for unswitching. */
456 if (gimple_code (stmt) == GIMPLE_COND
457 || gimple_code (stmt) == GIMPLE_PHI)
458 return LIM_EXPENSIVE;
459
460 /* We should be hoisting calls if possible. */
461 if (is_gimple_call (stmt))
462 {
463 tree fndecl;
464
465 /* Unless the call is a builtin_constant_p; this always folds to a
466 constant, so moving it is useless. */
467 fndecl = gimple_call_fndecl (stmt);
468 if (fndecl
469 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
470 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P)
471 return 0;
472
473 return LIM_EXPENSIVE;
474 }
475
476 /* Hoisting memory references out should almost surely be a win. */
477 if (gimple_references_memory_p (stmt))
478 return LIM_EXPENSIVE;
479
480 if (gimple_code (stmt) != GIMPLE_ASSIGN)
481 return 1;
482
483 switch (gimple_assign_rhs_code (stmt))
484 {
485 case MULT_EXPR:
486 case WIDEN_MULT_EXPR:
487 case WIDEN_MULT_PLUS_EXPR:
488 case WIDEN_MULT_MINUS_EXPR:
489 case DOT_PROD_EXPR:
490 case FMA_EXPR:
491 case TRUNC_DIV_EXPR:
492 case CEIL_DIV_EXPR:
493 case FLOOR_DIV_EXPR:
494 case ROUND_DIV_EXPR:
495 case EXACT_DIV_EXPR:
496 case CEIL_MOD_EXPR:
497 case FLOOR_MOD_EXPR:
498 case ROUND_MOD_EXPR:
499 case TRUNC_MOD_EXPR:
500 case RDIV_EXPR:
501 /* Division and multiplication are usually expensive. */
502 return LIM_EXPENSIVE;
503
504 case LSHIFT_EXPR:
505 case RSHIFT_EXPR:
506 case WIDEN_LSHIFT_EXPR:
507 case LROTATE_EXPR:
508 case RROTATE_EXPR:
509 /* Shifts and rotates are usually expensive. */
510 return LIM_EXPENSIVE;
511
512 case CONSTRUCTOR:
513 /* Make vector construction cost proportional to the number
514 of elements. */
515 return CONSTRUCTOR_NELTS (gimple_assign_rhs1 (stmt));
516
517 case SSA_NAME:
518 case PAREN_EXPR:
519 /* Whether or not something is wrapped inside a PAREN_EXPR
520 should not change move cost. Nor should an intermediate
521 unpropagated SSA name copy. */
522 return 0;
523
524 default:
525 return 1;
526 }
527 }
528
529 /* Finds the outermost loop between OUTER and LOOP in that the memory reference
530 REF is independent. If REF is not independent in LOOP, NULL is returned
531 instead. */
532
533 static struct loop *
534 outermost_indep_loop (struct loop *outer, struct loop *loop, mem_ref_p ref)
535 {
536 struct loop *aloop;
537
538 if (bitmap_bit_p (&ref->stored, loop->num))
539 return NULL;
540
541 for (aloop = outer;
542 aloop != loop;
543 aloop = superloop_at_depth (loop, loop_depth (aloop) + 1))
544 if (!bitmap_bit_p (&ref->stored, aloop->num)
545 && ref_indep_loop_p (aloop, ref))
546 return aloop;
547
548 if (ref_indep_loop_p (loop, ref))
549 return loop;
550 else
551 return NULL;
552 }
553
554 /* If there is a simple load or store to a memory reference in STMT, returns
555 the location of the memory reference, and sets IS_STORE according to whether
556 it is a store or load. Otherwise, returns NULL. */
557
558 static tree *
559 simple_mem_ref_in_stmt (gimple stmt, bool *is_store)
560 {
561 tree *lhs, *rhs;
562
563 /* Recognize SSA_NAME = MEM and MEM = (SSA_NAME | invariant) patterns. */
564 if (!gimple_assign_single_p (stmt))
565 return NULL;
566
567 lhs = gimple_assign_lhs_ptr (stmt);
568 rhs = gimple_assign_rhs1_ptr (stmt);
569
570 if (TREE_CODE (*lhs) == SSA_NAME && gimple_vuse (stmt))
571 {
572 *is_store = false;
573 return rhs;
574 }
575 else if (gimple_vdef (stmt)
576 && (TREE_CODE (*rhs) == SSA_NAME || is_gimple_min_invariant (*rhs)))
577 {
578 *is_store = true;
579 return lhs;
580 }
581 else
582 return NULL;
583 }
584
585 /* Returns the memory reference contained in STMT. */
586
587 static mem_ref_p
588 mem_ref_in_stmt (gimple stmt)
589 {
590 bool store;
591 tree *mem = simple_mem_ref_in_stmt (stmt, &store);
592 hashval_t hash;
593 mem_ref_p ref;
594
595 if (!mem)
596 return NULL;
597 gcc_assert (!store);
598
599 hash = iterative_hash_expr (*mem, 0);
600 ref = memory_accesses.refs.find_with_hash (*mem, hash);
601
602 gcc_assert (ref != NULL);
603 return ref;
604 }
605
606 /* From a controlling predicate in DOM determine the arguments from
607 the PHI node PHI that are chosen if the predicate evaluates to
608 true and false and store them to *TRUE_ARG_P and *FALSE_ARG_P if
609 they are non-NULL. Returns true if the arguments can be determined,
610 else return false. */
611
612 static bool
613 extract_true_false_args_from_phi (basic_block dom, gimple phi,
614 tree *true_arg_p, tree *false_arg_p)
615 {
616 basic_block bb = gimple_bb (phi);
617 edge true_edge, false_edge, tem;
618 tree arg0 = NULL_TREE, arg1 = NULL_TREE;
619
620 /* We have to verify that one edge into the PHI node is dominated
621 by the true edge of the predicate block and the other edge
622 dominated by the false edge. This ensures that the PHI argument
623 we are going to take is completely determined by the path we
624 take from the predicate block.
625 We can only use BB dominance checks below if the destination of
626 the true/false edges are dominated by their edge, thus only
627 have a single predecessor. */
628 extract_true_false_edges_from_block (dom, &true_edge, &false_edge);
629 tem = EDGE_PRED (bb, 0);
630 if (tem == true_edge
631 || (single_pred_p (true_edge->dest)
632 && (tem->src == true_edge->dest
633 || dominated_by_p (CDI_DOMINATORS,
634 tem->src, true_edge->dest))))
635 arg0 = PHI_ARG_DEF (phi, tem->dest_idx);
636 else if (tem == false_edge
637 || (single_pred_p (false_edge->dest)
638 && (tem->src == false_edge->dest
639 || dominated_by_p (CDI_DOMINATORS,
640 tem->src, false_edge->dest))))
641 arg1 = PHI_ARG_DEF (phi, tem->dest_idx);
642 else
643 return false;
644 tem = EDGE_PRED (bb, 1);
645 if (tem == true_edge
646 || (single_pred_p (true_edge->dest)
647 && (tem->src == true_edge->dest
648 || dominated_by_p (CDI_DOMINATORS,
649 tem->src, true_edge->dest))))
650 arg0 = PHI_ARG_DEF (phi, tem->dest_idx);
651 else if (tem == false_edge
652 || (single_pred_p (false_edge->dest)
653 && (tem->src == false_edge->dest
654 || dominated_by_p (CDI_DOMINATORS,
655 tem->src, false_edge->dest))))
656 arg1 = PHI_ARG_DEF (phi, tem->dest_idx);
657 else
658 return false;
659 if (!arg0 || !arg1)
660 return false;
661
662 if (true_arg_p)
663 *true_arg_p = arg0;
664 if (false_arg_p)
665 *false_arg_p = arg1;
666
667 return true;
668 }
669
670 /* Determine the outermost loop to that it is possible to hoist a statement
671 STMT and store it to LIM_DATA (STMT)->max_loop. To do this we determine
672 the outermost loop in that the value computed by STMT is invariant.
673 If MUST_PRESERVE_EXEC is true, additionally choose such a loop that
674 we preserve the fact whether STMT is executed. It also fills other related
675 information to LIM_DATA (STMT).
676
677 The function returns false if STMT cannot be hoisted outside of the loop it
678 is defined in, and true otherwise. */
679
680 static bool
681 determine_max_movement (gimple stmt, bool must_preserve_exec)
682 {
683 basic_block bb = gimple_bb (stmt);
684 struct loop *loop = bb->loop_father;
685 struct loop *level;
686 struct lim_aux_data *lim_data = get_lim_data (stmt);
687 tree val;
688 ssa_op_iter iter;
689
690 if (must_preserve_exec)
691 level = ALWAYS_EXECUTED_IN (bb);
692 else
693 level = superloop_at_depth (loop, 1);
694 lim_data->max_loop = level;
695
696 if (gimple_code (stmt) == GIMPLE_PHI)
697 {
698 use_operand_p use_p;
699 unsigned min_cost = UINT_MAX;
700 unsigned total_cost = 0;
701 struct lim_aux_data *def_data;
702
703 /* We will end up promoting dependencies to be unconditionally
704 evaluated. For this reason the PHI cost (and thus the
705 cost we remove from the loop by doing the invariant motion)
706 is that of the cheapest PHI argument dependency chain. */
707 FOR_EACH_PHI_ARG (use_p, stmt, iter, SSA_OP_USE)
708 {
709 val = USE_FROM_PTR (use_p);
710 if (TREE_CODE (val) != SSA_NAME)
711 continue;
712 if (!add_dependency (val, lim_data, loop, false))
713 return false;
714 def_data = get_lim_data (SSA_NAME_DEF_STMT (val));
715 if (def_data)
716 {
717 min_cost = MIN (min_cost, def_data->cost);
718 total_cost += def_data->cost;
719 }
720 }
721
722 lim_data->cost += min_cost;
723
724 if (gimple_phi_num_args (stmt) > 1)
725 {
726 basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
727 gimple cond;
728 if (gsi_end_p (gsi_last_bb (dom)))
729 return false;
730 cond = gsi_stmt (gsi_last_bb (dom));
731 if (gimple_code (cond) != GIMPLE_COND)
732 return false;
733 /* Verify that this is an extended form of a diamond and
734 the PHI arguments are completely controlled by the
735 predicate in DOM. */
736 if (!extract_true_false_args_from_phi (dom, stmt, NULL, NULL))
737 return false;
738
739 /* Fold in dependencies and cost of the condition. */
740 FOR_EACH_SSA_TREE_OPERAND (val, cond, iter, SSA_OP_USE)
741 {
742 if (!add_dependency (val, lim_data, loop, false))
743 return false;
744 def_data = get_lim_data (SSA_NAME_DEF_STMT (val));
745 if (def_data)
746 total_cost += def_data->cost;
747 }
748
749 /* We want to avoid unconditionally executing very expensive
750 operations. As costs for our dependencies cannot be
751 negative just claim we are not invariand for this case.
752 We also are not sure whether the control-flow inside the
753 loop will vanish. */
754 if (total_cost - min_cost >= 2 * LIM_EXPENSIVE
755 && !(min_cost != 0
756 && total_cost / min_cost <= 2))
757 return false;
758
759 /* Assume that the control-flow in the loop will vanish.
760 ??? We should verify this and not artificially increase
761 the cost if that is not the case. */
762 lim_data->cost += stmt_cost (stmt);
763 }
764
765 return true;
766 }
767 else
768 FOR_EACH_SSA_TREE_OPERAND (val, stmt, iter, SSA_OP_USE)
769 if (!add_dependency (val, lim_data, loop, true))
770 return false;
771
772 if (gimple_vuse (stmt))
773 {
774 mem_ref_p ref = mem_ref_in_stmt (stmt);
775
776 if (ref)
777 {
778 lim_data->max_loop
779 = outermost_indep_loop (lim_data->max_loop, loop, ref);
780 if (!lim_data->max_loop)
781 return false;
782 }
783 else
784 {
785 if ((val = gimple_vuse (stmt)) != NULL_TREE)
786 {
787 if (!add_dependency (val, lim_data, loop, false))
788 return false;
789 }
790 }
791 }
792
793 lim_data->cost += stmt_cost (stmt);
794
795 return true;
796 }
797
798 /* Suppose that some statement in ORIG_LOOP is hoisted to the loop LEVEL,
799 and that one of the operands of this statement is computed by STMT.
800 Ensure that STMT (together with all the statements that define its
801 operands) is hoisted at least out of the loop LEVEL. */
802
803 static void
804 set_level (gimple stmt, struct loop *orig_loop, struct loop *level)
805 {
806 struct loop *stmt_loop = gimple_bb (stmt)->loop_father;
807 struct lim_aux_data *lim_data;
808 gimple dep_stmt;
809 unsigned i;
810
811 stmt_loop = find_common_loop (orig_loop, stmt_loop);
812 lim_data = get_lim_data (stmt);
813 if (lim_data != NULL && lim_data->tgt_loop != NULL)
814 stmt_loop = find_common_loop (stmt_loop,
815 loop_outer (lim_data->tgt_loop));
816 if (flow_loop_nested_p (stmt_loop, level))
817 return;
818
819 gcc_assert (level == lim_data->max_loop
820 || flow_loop_nested_p (lim_data->max_loop, level));
821
822 lim_data->tgt_loop = level;
823 FOR_EACH_VEC_ELT (lim_data->depends, i, dep_stmt)
824 set_level (dep_stmt, orig_loop, level);
825 }
826
827 /* Determines an outermost loop from that we want to hoist the statement STMT.
828 For now we chose the outermost possible loop. TODO -- use profiling
829 information to set it more sanely. */
830
831 static void
832 set_profitable_level (gimple stmt)
833 {
834 set_level (stmt, gimple_bb (stmt)->loop_father, get_lim_data (stmt)->max_loop);
835 }
836
837 /* Returns true if STMT is a call that has side effects. */
838
839 static bool
840 nonpure_call_p (gimple stmt)
841 {
842 if (gimple_code (stmt) != GIMPLE_CALL)
843 return false;
844
845 return gimple_has_side_effects (stmt);
846 }
847
848 /* Rewrite a/b to a*(1/b). Return the invariant stmt to process. */
849
850 static gimple
851 rewrite_reciprocal (gimple_stmt_iterator *bsi)
852 {
853 gimple stmt, stmt1, stmt2;
854 tree name, lhs, type;
855 tree real_one;
856 gimple_stmt_iterator gsi;
857
858 stmt = gsi_stmt (*bsi);
859 lhs = gimple_assign_lhs (stmt);
860 type = TREE_TYPE (lhs);
861
862 real_one = build_one_cst (type);
863
864 name = make_temp_ssa_name (type, NULL, "reciptmp");
865 stmt1 = gimple_build_assign_with_ops (RDIV_EXPR, name, real_one,
866 gimple_assign_rhs2 (stmt));
867
868 stmt2 = gimple_build_assign_with_ops (MULT_EXPR, lhs, name,
869 gimple_assign_rhs1 (stmt));
870
871 /* Replace division stmt with reciprocal and multiply stmts.
872 The multiply stmt is not invariant, so update iterator
873 and avoid rescanning. */
874 gsi = *bsi;
875 gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
876 gsi_replace (&gsi, stmt2, true);
877
878 /* Continue processing with invariant reciprocal statement. */
879 return stmt1;
880 }
881
882 /* Check if the pattern at *BSI is a bittest of the form
883 (A >> B) & 1 != 0 and in this case rewrite it to A & (1 << B) != 0. */
884
885 static gimple
886 rewrite_bittest (gimple_stmt_iterator *bsi)
887 {
888 gimple stmt, use_stmt, stmt1, stmt2;
889 tree lhs, name, t, a, b;
890 use_operand_p use;
891
892 stmt = gsi_stmt (*bsi);
893 lhs = gimple_assign_lhs (stmt);
894
895 /* Verify that the single use of lhs is a comparison against zero. */
896 if (TREE_CODE (lhs) != SSA_NAME
897 || !single_imm_use (lhs, &use, &use_stmt)
898 || gimple_code (use_stmt) != GIMPLE_COND)
899 return stmt;
900 if (gimple_cond_lhs (use_stmt) != lhs
901 || (gimple_cond_code (use_stmt) != NE_EXPR
902 && gimple_cond_code (use_stmt) != EQ_EXPR)
903 || !integer_zerop (gimple_cond_rhs (use_stmt)))
904 return stmt;
905
906 /* Get at the operands of the shift. The rhs is TMP1 & 1. */
907 stmt1 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
908 if (gimple_code (stmt1) != GIMPLE_ASSIGN)
909 return stmt;
910
911 /* There is a conversion in between possibly inserted by fold. */
912 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt1)))
913 {
914 t = gimple_assign_rhs1 (stmt1);
915 if (TREE_CODE (t) != SSA_NAME
916 || !has_single_use (t))
917 return stmt;
918 stmt1 = SSA_NAME_DEF_STMT (t);
919 if (gimple_code (stmt1) != GIMPLE_ASSIGN)
920 return stmt;
921 }
922
923 /* Verify that B is loop invariant but A is not. Verify that with
924 all the stmt walking we are still in the same loop. */
925 if (gimple_assign_rhs_code (stmt1) != RSHIFT_EXPR
926 || loop_containing_stmt (stmt1) != loop_containing_stmt (stmt))
927 return stmt;
928
929 a = gimple_assign_rhs1 (stmt1);
930 b = gimple_assign_rhs2 (stmt1);
931
932 if (outermost_invariant_loop (b, loop_containing_stmt (stmt1)) != NULL
933 && outermost_invariant_loop (a, loop_containing_stmt (stmt1)) == NULL)
934 {
935 gimple_stmt_iterator rsi;
936
937 /* 1 << B */
938 t = fold_build2 (LSHIFT_EXPR, TREE_TYPE (a),
939 build_int_cst (TREE_TYPE (a), 1), b);
940 name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp");
941 stmt1 = gimple_build_assign (name, t);
942
943 /* A & (1 << B) */
944 t = fold_build2 (BIT_AND_EXPR, TREE_TYPE (a), a, name);
945 name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp");
946 stmt2 = gimple_build_assign (name, t);
947
948 /* Replace the SSA_NAME we compare against zero. Adjust
949 the type of zero accordingly. */
950 SET_USE (use, name);
951 gimple_cond_set_rhs (use_stmt, build_int_cst_type (TREE_TYPE (name), 0));
952
953 /* Don't use gsi_replace here, none of the new assignments sets
954 the variable originally set in stmt. Move bsi to stmt1, and
955 then remove the original stmt, so that we get a chance to
956 retain debug info for it. */
957 rsi = *bsi;
958 gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
959 gsi_insert_before (&rsi, stmt2, GSI_SAME_STMT);
960 gsi_remove (&rsi, true);
961
962 return stmt1;
963 }
964
965 return stmt;
966 }
967
968 /* For each statement determines the outermost loop in that it is invariant,
969 - statements on whose motion it depends and the cost of the computation.
970 - This information is stored to the LIM_DATA structure associated with
971 - each statement. */
972 class invariantness_dom_walker : public dom_walker
973 {
974 public:
975 invariantness_dom_walker (cdi_direction direction)
976 : dom_walker (direction) {}
977
978 virtual void before_dom_children (basic_block);
979 };
980
981 /* Determine the outermost loops in that statements in basic block BB are
982 invariant, and record them to the LIM_DATA associated with the statements.
983 Callback for dom_walker. */
984
985 void
986 invariantness_dom_walker::before_dom_children (basic_block bb)
987 {
988 enum move_pos pos;
989 gimple_stmt_iterator bsi;
990 gimple stmt;
991 bool maybe_never = ALWAYS_EXECUTED_IN (bb) == NULL;
992 struct loop *outermost = ALWAYS_EXECUTED_IN (bb);
993 struct lim_aux_data *lim_data;
994
995 if (!loop_outer (bb->loop_father))
996 return;
997
998 if (dump_file && (dump_flags & TDF_DETAILS))
999 fprintf (dump_file, "Basic block %d (loop %d -- depth %d):\n\n",
1000 bb->index, bb->loop_father->num, loop_depth (bb->loop_father));
1001
1002 /* Look at PHI nodes, but only if there is at most two.
1003 ??? We could relax this further by post-processing the inserted
1004 code and transforming adjacent cond-exprs with the same predicate
1005 to control flow again. */
1006 bsi = gsi_start_phis (bb);
1007 if (!gsi_end_p (bsi)
1008 && ((gsi_next (&bsi), gsi_end_p (bsi))
1009 || (gsi_next (&bsi), gsi_end_p (bsi))))
1010 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1011 {
1012 stmt = gsi_stmt (bsi);
1013
1014 pos = movement_possibility (stmt);
1015 if (pos == MOVE_IMPOSSIBLE)
1016 continue;
1017
1018 lim_data = init_lim_data (stmt);
1019 lim_data->always_executed_in = outermost;
1020
1021 if (!determine_max_movement (stmt, false))
1022 {
1023 lim_data->max_loop = NULL;
1024 continue;
1025 }
1026
1027 if (dump_file && (dump_flags & TDF_DETAILS))
1028 {
1029 print_gimple_stmt (dump_file, stmt, 2, 0);
1030 fprintf (dump_file, " invariant up to level %d, cost %d.\n\n",
1031 loop_depth (lim_data->max_loop),
1032 lim_data->cost);
1033 }
1034
1035 if (lim_data->cost >= LIM_EXPENSIVE)
1036 set_profitable_level (stmt);
1037 }
1038
1039 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1040 {
1041 stmt = gsi_stmt (bsi);
1042
1043 pos = movement_possibility (stmt);
1044 if (pos == MOVE_IMPOSSIBLE)
1045 {
1046 if (nonpure_call_p (stmt))
1047 {
1048 maybe_never = true;
1049 outermost = NULL;
1050 }
1051 /* Make sure to note always_executed_in for stores to make
1052 store-motion work. */
1053 else if (stmt_makes_single_store (stmt))
1054 {
1055 struct lim_aux_data *lim_data = init_lim_data (stmt);
1056 lim_data->always_executed_in = outermost;
1057 }
1058 continue;
1059 }
1060
1061 if (is_gimple_assign (stmt)
1062 && (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
1063 == GIMPLE_BINARY_RHS))
1064 {
1065 tree op0 = gimple_assign_rhs1 (stmt);
1066 tree op1 = gimple_assign_rhs2 (stmt);
1067 struct loop *ol1 = outermost_invariant_loop (op1,
1068 loop_containing_stmt (stmt));
1069
1070 /* If divisor is invariant, convert a/b to a*(1/b), allowing reciprocal
1071 to be hoisted out of loop, saving expensive divide. */
1072 if (pos == MOVE_POSSIBLE
1073 && gimple_assign_rhs_code (stmt) == RDIV_EXPR
1074 && flag_unsafe_math_optimizations
1075 && !flag_trapping_math
1076 && ol1 != NULL
1077 && outermost_invariant_loop (op0, ol1) == NULL)
1078 stmt = rewrite_reciprocal (&bsi);
1079
1080 /* If the shift count is invariant, convert (A >> B) & 1 to
1081 A & (1 << B) allowing the bit mask to be hoisted out of the loop
1082 saving an expensive shift. */
1083 if (pos == MOVE_POSSIBLE
1084 && gimple_assign_rhs_code (stmt) == BIT_AND_EXPR
1085 && integer_onep (op1)
1086 && TREE_CODE (op0) == SSA_NAME
1087 && has_single_use (op0))
1088 stmt = rewrite_bittest (&bsi);
1089 }
1090
1091 lim_data = init_lim_data (stmt);
1092 lim_data->always_executed_in = outermost;
1093
1094 if (maybe_never && pos == MOVE_PRESERVE_EXECUTION)
1095 continue;
1096
1097 if (!determine_max_movement (stmt, pos == MOVE_PRESERVE_EXECUTION))
1098 {
1099 lim_data->max_loop = NULL;
1100 continue;
1101 }
1102
1103 if (dump_file && (dump_flags & TDF_DETAILS))
1104 {
1105 print_gimple_stmt (dump_file, stmt, 2, 0);
1106 fprintf (dump_file, " invariant up to level %d, cost %d.\n\n",
1107 loop_depth (lim_data->max_loop),
1108 lim_data->cost);
1109 }
1110
1111 if (lim_data->cost >= LIM_EXPENSIVE)
1112 set_profitable_level (stmt);
1113 }
1114 }
1115
1116 class move_computations_dom_walker : public dom_walker
1117 {
1118 public:
1119 move_computations_dom_walker (cdi_direction direction)
1120 : dom_walker (direction), todo_ (0) {}
1121
1122 virtual void before_dom_children (basic_block);
1123
1124 unsigned int todo_;
1125 };
1126
1127 /* Return true if CODE is an operation that when operating on signed
1128 integer types involves undefined behavior on overflow and the
1129 operation can be expressed with unsigned arithmetic. */
1130
1131 static bool
1132 arith_code_with_undefined_signed_overflow (tree_code code)
1133 {
1134 switch (code)
1135 {
1136 case PLUS_EXPR:
1137 case MINUS_EXPR:
1138 case MULT_EXPR:
1139 case NEGATE_EXPR:
1140 case POINTER_PLUS_EXPR:
1141 return true;
1142 default:
1143 return false;
1144 }
1145 }
1146
1147 /* Rewrite STMT, an assignment with a signed integer or pointer arithmetic
1148 operation that can be transformed to unsigned arithmetic by converting
1149 its operand, carrying out the operation in the corresponding unsigned
1150 type and converting the result back to the original type.
1151
1152 Returns a sequence of statements that replace STMT and also contain
1153 a modified form of STMT itself. */
1154
1155 static gimple_seq
1156 rewrite_to_defined_overflow (gimple stmt)
1157 {
1158 if (dump_file && (dump_flags & TDF_DETAILS))
1159 {
1160 fprintf (dump_file, "rewriting stmt with undefined signed "
1161 "overflow ");
1162 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1163 }
1164
1165 tree lhs = gimple_assign_lhs (stmt);
1166 tree type = unsigned_type_for (TREE_TYPE (lhs));
1167 gimple_seq stmts = NULL;
1168 for (unsigned i = 1; i < gimple_num_ops (stmt); ++i)
1169 {
1170 gimple_seq stmts2 = NULL;
1171 gimple_set_op (stmt, i,
1172 force_gimple_operand (fold_convert (type,
1173 gimple_op (stmt, i)),
1174 &stmts2, true, NULL_TREE));
1175 gimple_seq_add_seq (&stmts, stmts2);
1176 }
1177 gimple_assign_set_lhs (stmt, make_ssa_name (type, stmt));
1178 if (gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR)
1179 gimple_assign_set_rhs_code (stmt, PLUS_EXPR);
1180 gimple_seq_add_stmt (&stmts, stmt);
1181 gimple cvt = gimple_build_assign_with_ops
1182 (NOP_EXPR, lhs, gimple_assign_lhs (stmt), NULL_TREE);
1183 gimple_seq_add_stmt (&stmts, cvt);
1184
1185 return stmts;
1186 }
1187
1188 /* Hoist the statements in basic block BB out of the loops prescribed by
1189 data stored in LIM_DATA structures associated with each statement. Callback
1190 for walk_dominator_tree. */
1191
1192 void
1193 move_computations_dom_walker::before_dom_children (basic_block bb)
1194 {
1195 struct loop *level;
1196 gimple_stmt_iterator bsi;
1197 gimple stmt;
1198 unsigned cost = 0;
1199 struct lim_aux_data *lim_data;
1200
1201 if (!loop_outer (bb->loop_father))
1202 return;
1203
1204 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); )
1205 {
1206 gimple new_stmt;
1207 stmt = gsi_stmt (bsi);
1208
1209 lim_data = get_lim_data (stmt);
1210 if (lim_data == NULL)
1211 {
1212 gsi_next (&bsi);
1213 continue;
1214 }
1215
1216 cost = lim_data->cost;
1217 level = lim_data->tgt_loop;
1218 clear_lim_data (stmt);
1219
1220 if (!level)
1221 {
1222 gsi_next (&bsi);
1223 continue;
1224 }
1225
1226 if (dump_file && (dump_flags & TDF_DETAILS))
1227 {
1228 fprintf (dump_file, "Moving PHI node\n");
1229 print_gimple_stmt (dump_file, stmt, 0, 0);
1230 fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1231 cost, level->num);
1232 }
1233
1234 if (gimple_phi_num_args (stmt) == 1)
1235 {
1236 tree arg = PHI_ARG_DEF (stmt, 0);
1237 new_stmt = gimple_build_assign_with_ops (TREE_CODE (arg),
1238 gimple_phi_result (stmt),
1239 arg, NULL_TREE);
1240 SSA_NAME_DEF_STMT (gimple_phi_result (stmt)) = new_stmt;
1241 }
1242 else
1243 {
1244 basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
1245 gimple cond = gsi_stmt (gsi_last_bb (dom));
1246 tree arg0 = NULL_TREE, arg1 = NULL_TREE, t;
1247 /* Get the PHI arguments corresponding to the true and false
1248 edges of COND. */
1249 extract_true_false_args_from_phi (dom, stmt, &arg0, &arg1);
1250 gcc_assert (arg0 && arg1);
1251 t = build2 (gimple_cond_code (cond), boolean_type_node,
1252 gimple_cond_lhs (cond), gimple_cond_rhs (cond));
1253 new_stmt = gimple_build_assign_with_ops (COND_EXPR,
1254 gimple_phi_result (stmt),
1255 t, arg0, arg1);
1256 SSA_NAME_DEF_STMT (gimple_phi_result (stmt)) = new_stmt;
1257 todo_ |= TODO_cleanup_cfg;
1258 }
1259 gsi_insert_on_edge (loop_preheader_edge (level), new_stmt);
1260 remove_phi_node (&bsi, false);
1261 }
1262
1263 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); )
1264 {
1265 edge e;
1266
1267 stmt = gsi_stmt (bsi);
1268
1269 lim_data = get_lim_data (stmt);
1270 if (lim_data == NULL)
1271 {
1272 gsi_next (&bsi);
1273 continue;
1274 }
1275
1276 cost = lim_data->cost;
1277 level = lim_data->tgt_loop;
1278 clear_lim_data (stmt);
1279
1280 if (!level)
1281 {
1282 gsi_next (&bsi);
1283 continue;
1284 }
1285
1286 /* We do not really want to move conditionals out of the loop; we just
1287 placed it here to force its operands to be moved if necessary. */
1288 if (gimple_code (stmt) == GIMPLE_COND)
1289 continue;
1290
1291 if (dump_file && (dump_flags & TDF_DETAILS))
1292 {
1293 fprintf (dump_file, "Moving statement\n");
1294 print_gimple_stmt (dump_file, stmt, 0, 0);
1295 fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1296 cost, level->num);
1297 }
1298
1299 e = loop_preheader_edge (level);
1300 gcc_assert (!gimple_vdef (stmt));
1301 if (gimple_vuse (stmt))
1302 {
1303 /* The new VUSE is the one from the virtual PHI in the loop
1304 header or the one already present. */
1305 gimple_stmt_iterator gsi2;
1306 for (gsi2 = gsi_start_phis (e->dest);
1307 !gsi_end_p (gsi2); gsi_next (&gsi2))
1308 {
1309 gimple phi = gsi_stmt (gsi2);
1310 if (virtual_operand_p (gimple_phi_result (phi)))
1311 {
1312 gimple_set_vuse (stmt, PHI_ARG_DEF_FROM_EDGE (phi, e));
1313 break;
1314 }
1315 }
1316 }
1317 gsi_remove (&bsi, false);
1318 /* In case this is a stmt that is not unconditionally executed
1319 when the target loop header is executed and the stmt may
1320 invoke undefined integer or pointer overflow rewrite it to
1321 unsigned arithmetic. */
1322 if (is_gimple_assign (stmt)
1323 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (stmt)))
1324 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (gimple_assign_lhs (stmt)))
1325 && arith_code_with_undefined_signed_overflow
1326 (gimple_assign_rhs_code (stmt))
1327 && (!ALWAYS_EXECUTED_IN (bb)
1328 || !(ALWAYS_EXECUTED_IN (bb) == level
1329 || flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
1330 gsi_insert_seq_on_edge (e, rewrite_to_defined_overflow (stmt));
1331 else
1332 gsi_insert_on_edge (e, stmt);
1333 }
1334 }
1335
1336 /* Hoist the statements out of the loops prescribed by data stored in
1337 LIM_DATA structures associated with each statement.*/
1338
1339 static unsigned int
1340 move_computations (void)
1341 {
1342 move_computations_dom_walker walker (CDI_DOMINATORS);
1343 walker.walk (cfun->cfg->x_entry_block_ptr);
1344
1345 gsi_commit_edge_inserts ();
1346 if (need_ssa_update_p (cfun))
1347 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
1348
1349 return walker.todo_;
1350 }
1351
1352 /* Checks whether the statement defining variable *INDEX can be hoisted
1353 out of the loop passed in DATA. Callback for for_each_index. */
1354
1355 static bool
1356 may_move_till (tree ref, tree *index, void *data)
1357 {
1358 struct loop *loop = (struct loop *) data, *max_loop;
1359
1360 /* If REF is an array reference, check also that the step and the lower
1361 bound is invariant in LOOP. */
1362 if (TREE_CODE (ref) == ARRAY_REF)
1363 {
1364 tree step = TREE_OPERAND (ref, 3);
1365 tree lbound = TREE_OPERAND (ref, 2);
1366
1367 max_loop = outermost_invariant_loop (step, loop);
1368 if (!max_loop)
1369 return false;
1370
1371 max_loop = outermost_invariant_loop (lbound, loop);
1372 if (!max_loop)
1373 return false;
1374 }
1375
1376 max_loop = outermost_invariant_loop (*index, loop);
1377 if (!max_loop)
1378 return false;
1379
1380 return true;
1381 }
1382
1383 /* If OP is SSA NAME, force the statement that defines it to be
1384 moved out of the LOOP. ORIG_LOOP is the loop in that EXPR is used. */
1385
1386 static void
1387 force_move_till_op (tree op, struct loop *orig_loop, struct loop *loop)
1388 {
1389 gimple stmt;
1390
1391 if (!op
1392 || is_gimple_min_invariant (op))
1393 return;
1394
1395 gcc_assert (TREE_CODE (op) == SSA_NAME);
1396
1397 stmt = SSA_NAME_DEF_STMT (op);
1398 if (gimple_nop_p (stmt))
1399 return;
1400
1401 set_level (stmt, orig_loop, loop);
1402 }
1403
1404 /* Forces statement defining invariants in REF (and *INDEX) to be moved out of
1405 the LOOP. The reference REF is used in the loop ORIG_LOOP. Callback for
1406 for_each_index. */
1407
1408 struct fmt_data
1409 {
1410 struct loop *loop;
1411 struct loop *orig_loop;
1412 };
1413
1414 static bool
1415 force_move_till (tree ref, tree *index, void *data)
1416 {
1417 struct fmt_data *fmt_data = (struct fmt_data *) data;
1418
1419 if (TREE_CODE (ref) == ARRAY_REF)
1420 {
1421 tree step = TREE_OPERAND (ref, 3);
1422 tree lbound = TREE_OPERAND (ref, 2);
1423
1424 force_move_till_op (step, fmt_data->orig_loop, fmt_data->loop);
1425 force_move_till_op (lbound, fmt_data->orig_loop, fmt_data->loop);
1426 }
1427
1428 force_move_till_op (*index, fmt_data->orig_loop, fmt_data->loop);
1429
1430 return true;
1431 }
1432
1433 /* A function to free the mem_ref object OBJ. */
1434
1435 static void
1436 memref_free (struct mem_ref *mem)
1437 {
1438 unsigned i;
1439 vec<mem_ref_loc> *accs;
1440
1441 FOR_EACH_VEC_ELT (mem->accesses_in_loop, i, accs)
1442 accs->release ();
1443 mem->accesses_in_loop.release ();
1444
1445 free (mem);
1446 }
1447
1448 /* Allocates and returns a memory reference description for MEM whose hash
1449 value is HASH and id is ID. */
1450
1451 static mem_ref_p
1452 mem_ref_alloc (tree mem, unsigned hash, unsigned id)
1453 {
1454 mem_ref_p ref = XNEW (struct mem_ref);
1455 ao_ref_init (&ref->mem, mem);
1456 ref->id = id;
1457 ref->hash = hash;
1458 bitmap_initialize (&ref->stored, &lim_bitmap_obstack);
1459 bitmap_initialize (&ref->indep_loop, &lim_bitmap_obstack);
1460 bitmap_initialize (&ref->dep_loop, &lim_bitmap_obstack);
1461 ref->accesses_in_loop.create (0);
1462
1463 return ref;
1464 }
1465
1466 /* Records memory reference location *LOC in LOOP to the memory reference
1467 description REF. The reference occurs in statement STMT. */
1468
1469 static void
1470 record_mem_ref_loc (mem_ref_p ref, struct loop *loop, gimple stmt, tree *loc)
1471 {
1472 mem_ref_loc aref;
1473
1474 if (ref->accesses_in_loop.length ()
1475 <= (unsigned) loop->num)
1476 ref->accesses_in_loop.safe_grow_cleared (loop->num + 1);
1477
1478 aref.stmt = stmt;
1479 aref.ref = loc;
1480 ref->accesses_in_loop[loop->num].safe_push (aref);
1481 }
1482
1483 /* Marks reference REF as stored in LOOP. */
1484
1485 static void
1486 mark_ref_stored (mem_ref_p ref, struct loop *loop)
1487 {
1488 while (loop != current_loops->tree_root
1489 && bitmap_set_bit (&ref->stored, loop->num))
1490 loop = loop_outer (loop);
1491 }
1492
1493 /* Gathers memory references in statement STMT in LOOP, storing the
1494 information about them in the memory_accesses structure. Marks
1495 the vops accessed through unrecognized statements there as
1496 well. */
1497
1498 static void
1499 gather_mem_refs_stmt (struct loop *loop, gimple stmt)
1500 {
1501 tree *mem = NULL;
1502 hashval_t hash;
1503 mem_ref **slot;
1504 mem_ref_p ref;
1505 bool is_stored;
1506 unsigned id;
1507
1508 if (!gimple_vuse (stmt))
1509 return;
1510
1511 mem = simple_mem_ref_in_stmt (stmt, &is_stored);
1512 if (!mem)
1513 {
1514 /* We use the shared mem_ref for all unanalyzable refs. */
1515 id = UNANALYZABLE_MEM_ID;
1516 ref = memory_accesses.refs_list[id];
1517 if (dump_file && (dump_flags & TDF_DETAILS))
1518 {
1519 fprintf (dump_file, "Unanalyzed memory reference %u: ", id);
1520 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1521 }
1522 is_stored = gimple_vdef (stmt);
1523 }
1524 else
1525 {
1526 hash = iterative_hash_expr (*mem, 0);
1527 slot = memory_accesses.refs.find_slot_with_hash (*mem, hash, INSERT);
1528 if (*slot)
1529 {
1530 ref = (mem_ref_p) *slot;
1531 id = ref->id;
1532 }
1533 else
1534 {
1535 id = memory_accesses.refs_list.length ();
1536 ref = mem_ref_alloc (*mem, hash, id);
1537 memory_accesses.refs_list.safe_push (ref);
1538 *slot = ref;
1539
1540 if (dump_file && (dump_flags & TDF_DETAILS))
1541 {
1542 fprintf (dump_file, "Memory reference %u: ", id);
1543 print_generic_expr (dump_file, ref->mem.ref, TDF_SLIM);
1544 fprintf (dump_file, "\n");
1545 }
1546 }
1547
1548 record_mem_ref_loc (ref, loop, stmt, mem);
1549 }
1550 bitmap_set_bit (&memory_accesses.refs_in_loop[loop->num], ref->id);
1551 if (is_stored)
1552 {
1553 bitmap_set_bit (&memory_accesses.refs_stored_in_loop[loop->num], ref->id);
1554 mark_ref_stored (ref, loop);
1555 }
1556 return;
1557 }
1558
1559 static unsigned *bb_loop_postorder;
1560
1561 /* qsort sort function to sort blocks after their loop fathers postorder. */
1562
1563 static int
1564 sort_bbs_in_loop_postorder_cmp (const void *bb1_, const void *bb2_)
1565 {
1566 basic_block bb1 = *(basic_block *)const_cast<void *>(bb1_);
1567 basic_block bb2 = *(basic_block *)const_cast<void *>(bb2_);
1568 struct loop *loop1 = bb1->loop_father;
1569 struct loop *loop2 = bb2->loop_father;
1570 if (loop1->num == loop2->num)
1571 return 0;
1572 return bb_loop_postorder[loop1->num] < bb_loop_postorder[loop2->num] ? -1 : 1;
1573 }
1574
1575 /* Gathers memory references in loops. */
1576
1577 static void
1578 analyze_memory_references (void)
1579 {
1580 gimple_stmt_iterator bsi;
1581 basic_block bb, *bbs;
1582 struct loop *loop, *outer;
1583 loop_iterator li;
1584 unsigned i, n;
1585
1586 /* Initialize bb_loop_postorder with a mapping from loop->num to
1587 its postorder index. */
1588 i = 0;
1589 bb_loop_postorder = XNEWVEC (unsigned, number_of_loops (cfun));
1590 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
1591 bb_loop_postorder[loop->num] = i++;
1592 /* Collect all basic-blocks in loops and sort them after their
1593 loops postorder. */
1594 i = 0;
1595 bbs = XNEWVEC (basic_block, n_basic_blocks - NUM_FIXED_BLOCKS);
1596 FOR_EACH_BB (bb)
1597 if (bb->loop_father != current_loops->tree_root)
1598 bbs[i++] = bb;
1599 n = i;
1600 qsort (bbs, n, sizeof (basic_block), sort_bbs_in_loop_postorder_cmp);
1601 free (bb_loop_postorder);
1602
1603 /* Visit blocks in loop postorder and assign mem-ref IDs in that order.
1604 That results in better locality for all the bitmaps. */
1605 for (i = 0; i < n; ++i)
1606 {
1607 basic_block bb = bbs[i];
1608 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1609 gather_mem_refs_stmt (bb->loop_father, gsi_stmt (bsi));
1610 }
1611
1612 free (bbs);
1613
1614 /* Propagate the information about accessed memory references up
1615 the loop hierarchy. */
1616 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
1617 {
1618 /* Finalize the overall touched references (including subloops). */
1619 bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[loop->num],
1620 &memory_accesses.refs_stored_in_loop[loop->num]);
1621
1622 /* Propagate the information about accessed memory references up
1623 the loop hierarchy. */
1624 outer = loop_outer (loop);
1625 if (outer == current_loops->tree_root)
1626 continue;
1627
1628 bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[outer->num],
1629 &memory_accesses.all_refs_stored_in_loop[loop->num]);
1630 }
1631 }
1632
1633 /* Returns true if MEM1 and MEM2 may alias. TTAE_CACHE is used as a cache in
1634 tree_to_aff_combination_expand. */
1635
1636 static bool
1637 mem_refs_may_alias_p (mem_ref_p mem1, mem_ref_p mem2,
1638 struct pointer_map_t **ttae_cache)
1639 {
1640 /* Perform BASE + OFFSET analysis -- if MEM1 and MEM2 are based on the same
1641 object and their offset differ in such a way that the locations cannot
1642 overlap, then they cannot alias. */
1643 double_int size1, size2;
1644 aff_tree off1, off2;
1645
1646 /* Perform basic offset and type-based disambiguation. */
1647 if (!refs_may_alias_p_1 (&mem1->mem, &mem2->mem, true))
1648 return false;
1649
1650 /* The expansion of addresses may be a bit expensive, thus we only do
1651 the check at -O2 and higher optimization levels. */
1652 if (optimize < 2)
1653 return true;
1654
1655 get_inner_reference_aff (mem1->mem.ref, &off1, &size1);
1656 get_inner_reference_aff (mem2->mem.ref, &off2, &size2);
1657 aff_combination_expand (&off1, ttae_cache);
1658 aff_combination_expand (&off2, ttae_cache);
1659 aff_combination_scale (&off1, double_int_minus_one);
1660 aff_combination_add (&off2, &off1);
1661
1662 if (aff_comb_cannot_overlap_p (&off2, size1, size2))
1663 return false;
1664
1665 return true;
1666 }
1667
1668 /* Iterates over all locations of REF in LOOP and its subloops calling
1669 fn.operator() with the location as argument. When that operator
1670 returns true the iteration is stopped and true is returned.
1671 Otherwise false is returned. */
1672
1673 template <typename FN>
1674 static bool
1675 for_all_locs_in_loop (struct loop *loop, mem_ref_p ref, FN fn)
1676 {
1677 unsigned i;
1678 mem_ref_loc_p loc;
1679 struct loop *subloop;
1680
1681 if (ref->accesses_in_loop.length () > (unsigned) loop->num)
1682 FOR_EACH_VEC_ELT (ref->accesses_in_loop[loop->num], i, loc)
1683 if (fn (loc))
1684 return true;
1685
1686 for (subloop = loop->inner; subloop != NULL; subloop = subloop->next)
1687 if (for_all_locs_in_loop (subloop, ref, fn))
1688 return true;
1689
1690 return false;
1691 }
1692
1693 /* Rewrites location LOC by TMP_VAR. */
1694
1695 struct rewrite_mem_ref_loc
1696 {
1697 rewrite_mem_ref_loc (tree tmp_var_) : tmp_var (tmp_var_) {}
1698 bool operator () (mem_ref_loc_p loc);
1699 tree tmp_var;
1700 };
1701
1702 bool
1703 rewrite_mem_ref_loc::operator () (mem_ref_loc_p loc)
1704 {
1705 *loc->ref = tmp_var;
1706 update_stmt (loc->stmt);
1707 return false;
1708 }
1709
1710 /* Rewrites all references to REF in LOOP by variable TMP_VAR. */
1711
1712 static void
1713 rewrite_mem_refs (struct loop *loop, mem_ref_p ref, tree tmp_var)
1714 {
1715 for_all_locs_in_loop (loop, ref, rewrite_mem_ref_loc (tmp_var));
1716 }
1717
1718 /* Stores the first reference location in LOCP. */
1719
1720 struct first_mem_ref_loc_1
1721 {
1722 first_mem_ref_loc_1 (mem_ref_loc_p *locp_) : locp (locp_) {}
1723 bool operator () (mem_ref_loc_p loc);
1724 mem_ref_loc_p *locp;
1725 };
1726
1727 bool
1728 first_mem_ref_loc_1::operator () (mem_ref_loc_p loc)
1729 {
1730 *locp = loc;
1731 return true;
1732 }
1733
1734 /* Returns the first reference location to REF in LOOP. */
1735
1736 static mem_ref_loc_p
1737 first_mem_ref_loc (struct loop *loop, mem_ref_p ref)
1738 {
1739 mem_ref_loc_p locp = NULL;
1740 for_all_locs_in_loop (loop, ref, first_mem_ref_loc_1 (&locp));
1741 return locp;
1742 }
1743
1744 struct prev_flag_edges {
1745 /* Edge to insert new flag comparison code. */
1746 edge append_cond_position;
1747
1748 /* Edge for fall through from previous flag comparison. */
1749 edge last_cond_fallthru;
1750 };
1751
1752 /* Helper function for execute_sm. Emit code to store TMP_VAR into
1753 MEM along edge EX.
1754
1755 The store is only done if MEM has changed. We do this so no
1756 changes to MEM occur on code paths that did not originally store
1757 into it.
1758
1759 The common case for execute_sm will transform:
1760
1761 for (...) {
1762 if (foo)
1763 stuff;
1764 else
1765 MEM = TMP_VAR;
1766 }
1767
1768 into:
1769
1770 lsm = MEM;
1771 for (...) {
1772 if (foo)
1773 stuff;
1774 else
1775 lsm = TMP_VAR;
1776 }
1777 MEM = lsm;
1778
1779 This function will generate:
1780
1781 lsm = MEM;
1782
1783 lsm_flag = false;
1784 ...
1785 for (...) {
1786 if (foo)
1787 stuff;
1788 else {
1789 lsm = TMP_VAR;
1790 lsm_flag = true;
1791 }
1792 }
1793 if (lsm_flag) <--
1794 MEM = lsm; <--
1795 */
1796
1797 static void
1798 execute_sm_if_changed (edge ex, tree mem, tree tmp_var, tree flag)
1799 {
1800 basic_block new_bb, then_bb, old_dest;
1801 bool loop_has_only_one_exit;
1802 edge then_old_edge, orig_ex = ex;
1803 gimple_stmt_iterator gsi;
1804 gimple stmt;
1805 struct prev_flag_edges *prev_edges = (struct prev_flag_edges *) ex->aux;
1806
1807 /* ?? Insert store after previous store if applicable. See note
1808 below. */
1809 if (prev_edges)
1810 ex = prev_edges->append_cond_position;
1811
1812 loop_has_only_one_exit = single_pred_p (ex->dest);
1813
1814 if (loop_has_only_one_exit)
1815 ex = split_block_after_labels (ex->dest);
1816
1817 old_dest = ex->dest;
1818 new_bb = split_edge (ex);
1819 then_bb = create_empty_bb (new_bb);
1820 if (current_loops && new_bb->loop_father)
1821 add_bb_to_loop (then_bb, new_bb->loop_father);
1822
1823 gsi = gsi_start_bb (new_bb);
1824 stmt = gimple_build_cond (NE_EXPR, flag, boolean_false_node,
1825 NULL_TREE, NULL_TREE);
1826 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1827
1828 gsi = gsi_start_bb (then_bb);
1829 /* Insert actual store. */
1830 stmt = gimple_build_assign (unshare_expr (mem), tmp_var);
1831 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1832
1833 make_edge (new_bb, then_bb, EDGE_TRUE_VALUE);
1834 make_edge (new_bb, old_dest, EDGE_FALSE_VALUE);
1835 then_old_edge = make_edge (then_bb, old_dest, EDGE_FALLTHRU);
1836
1837 set_immediate_dominator (CDI_DOMINATORS, then_bb, new_bb);
1838
1839 if (prev_edges)
1840 {
1841 basic_block prevbb = prev_edges->last_cond_fallthru->src;
1842 redirect_edge_succ (prev_edges->last_cond_fallthru, new_bb);
1843 set_immediate_dominator (CDI_DOMINATORS, new_bb, prevbb);
1844 set_immediate_dominator (CDI_DOMINATORS, old_dest,
1845 recompute_dominator (CDI_DOMINATORS, old_dest));
1846 }
1847
1848 /* ?? Because stores may alias, they must happen in the exact
1849 sequence they originally happened. Save the position right after
1850 the (_lsm) store we just created so we can continue appending after
1851 it and maintain the original order. */
1852 {
1853 struct prev_flag_edges *p;
1854
1855 if (orig_ex->aux)
1856 orig_ex->aux = NULL;
1857 alloc_aux_for_edge (orig_ex, sizeof (struct prev_flag_edges));
1858 p = (struct prev_flag_edges *) orig_ex->aux;
1859 p->append_cond_position = then_old_edge;
1860 p->last_cond_fallthru = find_edge (new_bb, old_dest);
1861 orig_ex->aux = (void *) p;
1862 }
1863
1864 if (!loop_has_only_one_exit)
1865 for (gsi = gsi_start_phis (old_dest); !gsi_end_p (gsi); gsi_next (&gsi))
1866 {
1867 gimple phi = gsi_stmt (gsi);
1868 unsigned i;
1869
1870 for (i = 0; i < gimple_phi_num_args (phi); i++)
1871 if (gimple_phi_arg_edge (phi, i)->src == new_bb)
1872 {
1873 tree arg = gimple_phi_arg_def (phi, i);
1874 add_phi_arg (phi, arg, then_old_edge, UNKNOWN_LOCATION);
1875 update_stmt (phi);
1876 }
1877 }
1878 /* Remove the original fall through edge. This was the
1879 single_succ_edge (new_bb). */
1880 EDGE_SUCC (new_bb, 0)->flags &= ~EDGE_FALLTHRU;
1881 }
1882
1883 /* When REF is set on the location, set flag indicating the store. */
1884
1885 struct sm_set_flag_if_changed
1886 {
1887 sm_set_flag_if_changed (tree flag_) : flag (flag_) {}
1888 bool operator () (mem_ref_loc_p loc);
1889 tree flag;
1890 };
1891
1892 bool
1893 sm_set_flag_if_changed::operator () (mem_ref_loc_p loc)
1894 {
1895 /* Only set the flag for writes. */
1896 if (is_gimple_assign (loc->stmt)
1897 && gimple_assign_lhs_ptr (loc->stmt) == loc->ref)
1898 {
1899 gimple_stmt_iterator gsi = gsi_for_stmt (loc->stmt);
1900 gimple stmt = gimple_build_assign (flag, boolean_true_node);
1901 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1902 }
1903 return false;
1904 }
1905
1906 /* Helper function for execute_sm. On every location where REF is
1907 set, set an appropriate flag indicating the store. */
1908
1909 static tree
1910 execute_sm_if_changed_flag_set (struct loop *loop, mem_ref_p ref)
1911 {
1912 tree flag;
1913 char *str = get_lsm_tmp_name (ref->mem.ref, ~0, "_flag");
1914 flag = create_tmp_reg (boolean_type_node, str);
1915 for_all_locs_in_loop (loop, ref, sm_set_flag_if_changed (flag));
1916 return flag;
1917 }
1918
1919 /* Executes store motion of memory reference REF from LOOP.
1920 Exits from the LOOP are stored in EXITS. The initialization of the
1921 temporary variable is put to the preheader of the loop, and assignments
1922 to the reference from the temporary variable are emitted to exits. */
1923
1924 static void
1925 execute_sm (struct loop *loop, vec<edge> exits, mem_ref_p ref)
1926 {
1927 tree tmp_var, store_flag;
1928 unsigned i;
1929 gimple load;
1930 struct fmt_data fmt_data;
1931 edge ex;
1932 struct lim_aux_data *lim_data;
1933 bool multi_threaded_model_p = false;
1934 gimple_stmt_iterator gsi;
1935
1936 if (dump_file && (dump_flags & TDF_DETAILS))
1937 {
1938 fprintf (dump_file, "Executing store motion of ");
1939 print_generic_expr (dump_file, ref->mem.ref, 0);
1940 fprintf (dump_file, " from loop %d\n", loop->num);
1941 }
1942
1943 tmp_var = create_tmp_reg (TREE_TYPE (ref->mem.ref),
1944 get_lsm_tmp_name (ref->mem.ref, ~0));
1945
1946 fmt_data.loop = loop;
1947 fmt_data.orig_loop = loop;
1948 for_each_index (&ref->mem.ref, force_move_till, &fmt_data);
1949
1950 if (block_in_transaction (loop_preheader_edge (loop)->src)
1951 || !PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES))
1952 multi_threaded_model_p = true;
1953
1954 if (multi_threaded_model_p)
1955 store_flag = execute_sm_if_changed_flag_set (loop, ref);
1956
1957 rewrite_mem_refs (loop, ref, tmp_var);
1958
1959 /* Emit the load code on a random exit edge or into the latch if
1960 the loop does not exit, so that we are sure it will be processed
1961 by move_computations after all dependencies. */
1962 gsi = gsi_for_stmt (first_mem_ref_loc (loop, ref)->stmt);
1963
1964 /* FIXME/TODO: For the multi-threaded variant, we could avoid this
1965 load altogether, since the store is predicated by a flag. We
1966 could, do the load only if it was originally in the loop. */
1967 load = gimple_build_assign (tmp_var, unshare_expr (ref->mem.ref));
1968 lim_data = init_lim_data (load);
1969 lim_data->max_loop = loop;
1970 lim_data->tgt_loop = loop;
1971 gsi_insert_before (&gsi, load, GSI_SAME_STMT);
1972
1973 if (multi_threaded_model_p)
1974 {
1975 load = gimple_build_assign (store_flag, boolean_false_node);
1976 lim_data = init_lim_data (load);
1977 lim_data->max_loop = loop;
1978 lim_data->tgt_loop = loop;
1979 gsi_insert_before (&gsi, load, GSI_SAME_STMT);
1980 }
1981
1982 /* Sink the store to every exit from the loop. */
1983 FOR_EACH_VEC_ELT (exits, i, ex)
1984 if (!multi_threaded_model_p)
1985 {
1986 gimple store;
1987 store = gimple_build_assign (unshare_expr (ref->mem.ref), tmp_var);
1988 gsi_insert_on_edge (ex, store);
1989 }
1990 else
1991 execute_sm_if_changed (ex, ref->mem.ref, tmp_var, store_flag);
1992 }
1993
1994 /* Hoists memory references MEM_REFS out of LOOP. EXITS is the list of exit
1995 edges of the LOOP. */
1996
1997 static void
1998 hoist_memory_references (struct loop *loop, bitmap mem_refs,
1999 vec<edge> exits)
2000 {
2001 mem_ref_p ref;
2002 unsigned i;
2003 bitmap_iterator bi;
2004
2005 EXECUTE_IF_SET_IN_BITMAP (mem_refs, 0, i, bi)
2006 {
2007 ref = memory_accesses.refs_list[i];
2008 execute_sm (loop, exits, ref);
2009 }
2010 }
2011
2012 struct ref_always_accessed
2013 {
2014 ref_always_accessed (struct loop *loop_, tree base_, bool stored_p_)
2015 : loop (loop_), base (base_), stored_p (stored_p_) {}
2016 bool operator () (mem_ref_loc_p loc);
2017 struct loop *loop;
2018 tree base;
2019 bool stored_p;
2020 };
2021
2022 bool
2023 ref_always_accessed::operator () (mem_ref_loc_p loc)
2024 {
2025 struct loop *must_exec;
2026
2027 if (!get_lim_data (loc->stmt))
2028 return false;
2029
2030 /* If we require an always executed store make sure the statement
2031 stores to the reference. */
2032 if (stored_p)
2033 {
2034 tree lhs;
2035 if (!gimple_get_lhs (loc->stmt))
2036 return false;
2037 lhs = get_base_address (gimple_get_lhs (loc->stmt));
2038 if (!lhs)
2039 return false;
2040 if (INDIRECT_REF_P (lhs)
2041 || TREE_CODE (lhs) == MEM_REF)
2042 lhs = TREE_OPERAND (lhs, 0);
2043 if (lhs != base)
2044 return false;
2045 }
2046
2047 must_exec = get_lim_data (loc->stmt)->always_executed_in;
2048 if (!must_exec)
2049 return false;
2050
2051 if (must_exec == loop
2052 || flow_loop_nested_p (must_exec, loop))
2053 return true;
2054
2055 return false;
2056 }
2057
2058 /* Returns true if REF is always accessed in LOOP. If STORED_P is true
2059 make sure REF is always stored to in LOOP. */
2060
2061 static bool
2062 ref_always_accessed_p (struct loop *loop, mem_ref_p ref, bool stored_p)
2063 {
2064 tree base = ao_ref_base (&ref->mem);
2065 if (TREE_CODE (base) == MEM_REF)
2066 base = TREE_OPERAND (base, 0);
2067
2068 return for_all_locs_in_loop (loop, ref,
2069 ref_always_accessed (loop, base, stored_p));
2070 }
2071
2072 /* Returns true if REF1 and REF2 are independent. */
2073
2074 static bool
2075 refs_independent_p (mem_ref_p ref1, mem_ref_p ref2)
2076 {
2077 if (ref1 == ref2)
2078 return true;
2079
2080 if (dump_file && (dump_flags & TDF_DETAILS))
2081 fprintf (dump_file, "Querying dependency of refs %u and %u: ",
2082 ref1->id, ref2->id);
2083
2084 if (mem_refs_may_alias_p (ref1, ref2, &memory_accesses.ttae_cache))
2085 {
2086 if (dump_file && (dump_flags & TDF_DETAILS))
2087 fprintf (dump_file, "dependent.\n");
2088 return false;
2089 }
2090 else
2091 {
2092 if (dump_file && (dump_flags & TDF_DETAILS))
2093 fprintf (dump_file, "independent.\n");
2094 return true;
2095 }
2096 }
2097
2098 /* Mark REF dependent on stores or loads (according to STORED_P) in LOOP
2099 and its super-loops. */
2100
2101 static void
2102 record_dep_loop (struct loop *loop, mem_ref_p ref, bool stored_p)
2103 {
2104 /* We can propagate dependent-in-loop bits up the loop
2105 hierarchy to all outer loops. */
2106 while (loop != current_loops->tree_root
2107 && bitmap_set_bit (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2108 loop = loop_outer (loop);
2109 }
2110
2111 /* Returns true if REF is independent on all other memory references in
2112 LOOP. */
2113
2114 static bool
2115 ref_indep_loop_p_1 (struct loop *loop, mem_ref_p ref, bool stored_p)
2116 {
2117 bitmap refs_to_check;
2118 unsigned i;
2119 bitmap_iterator bi;
2120 mem_ref_p aref;
2121
2122 if (stored_p)
2123 refs_to_check = &memory_accesses.refs_in_loop[loop->num];
2124 else
2125 refs_to_check = &memory_accesses.refs_stored_in_loop[loop->num];
2126
2127 if (bitmap_bit_p (refs_to_check, UNANALYZABLE_MEM_ID))
2128 return false;
2129
2130 EXECUTE_IF_SET_IN_BITMAP (refs_to_check, 0, i, bi)
2131 {
2132 aref = memory_accesses.refs_list[i];
2133 if (!refs_independent_p (ref, aref))
2134 return false;
2135 }
2136
2137 return true;
2138 }
2139
2140 /* Returns true if REF is independent on all other memory references in
2141 LOOP. Wrapper over ref_indep_loop_p_1, caching its results. */
2142
2143 static bool
2144 ref_indep_loop_p_2 (struct loop *loop, mem_ref_p ref, bool stored_p)
2145 {
2146 stored_p |= bitmap_bit_p (&ref->stored, loop->num);
2147
2148 if (bitmap_bit_p (&ref->indep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2149 return true;
2150 if (bitmap_bit_p (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2151 return false;
2152
2153 struct loop *inner = loop->inner;
2154 while (inner)
2155 {
2156 if (!ref_indep_loop_p_2 (inner, ref, stored_p))
2157 return false;
2158 inner = inner->next;
2159 }
2160
2161 bool indep_p = ref_indep_loop_p_1 (loop, ref, stored_p);
2162
2163 if (dump_file && (dump_flags & TDF_DETAILS))
2164 fprintf (dump_file, "Querying dependencies of ref %u in loop %d: %s\n",
2165 ref->id, loop->num, indep_p ? "independent" : "dependent");
2166
2167 /* Record the computed result in the cache. */
2168 if (indep_p)
2169 {
2170 if (bitmap_set_bit (&ref->indep_loop, LOOP_DEP_BIT (loop->num, stored_p))
2171 && stored_p)
2172 {
2173 /* If it's independend against all refs then it's independent
2174 against stores, too. */
2175 bitmap_set_bit (&ref->indep_loop, LOOP_DEP_BIT (loop->num, false));
2176 }
2177 }
2178 else
2179 {
2180 record_dep_loop (loop, ref, stored_p);
2181 if (!stored_p)
2182 {
2183 /* If it's dependent against stores it's dependent against
2184 all refs, too. */
2185 record_dep_loop (loop, ref, true);
2186 }
2187 }
2188
2189 return indep_p;
2190 }
2191
2192 /* Returns true if REF is independent on all other memory references in
2193 LOOP. */
2194
2195 static bool
2196 ref_indep_loop_p (struct loop *loop, mem_ref_p ref)
2197 {
2198 gcc_checking_assert (MEM_ANALYZABLE (ref));
2199
2200 return ref_indep_loop_p_2 (loop, ref, false);
2201 }
2202
2203 /* Returns true if we can perform store motion of REF from LOOP. */
2204
2205 static bool
2206 can_sm_ref_p (struct loop *loop, mem_ref_p ref)
2207 {
2208 tree base;
2209
2210 /* Can't hoist unanalyzable refs. */
2211 if (!MEM_ANALYZABLE (ref))
2212 return false;
2213
2214 /* It should be movable. */
2215 if (!is_gimple_reg_type (TREE_TYPE (ref->mem.ref))
2216 || TREE_THIS_VOLATILE (ref->mem.ref)
2217 || !for_each_index (&ref->mem.ref, may_move_till, loop))
2218 return false;
2219
2220 /* If it can throw fail, we do not properly update EH info. */
2221 if (tree_could_throw_p (ref->mem.ref))
2222 return false;
2223
2224 /* If it can trap, it must be always executed in LOOP.
2225 Readonly memory locations may trap when storing to them, but
2226 tree_could_trap_p is a predicate for rvalues, so check that
2227 explicitly. */
2228 base = get_base_address (ref->mem.ref);
2229 if ((tree_could_trap_p (ref->mem.ref)
2230 || (DECL_P (base) && TREE_READONLY (base)))
2231 && !ref_always_accessed_p (loop, ref, true))
2232 return false;
2233
2234 /* And it must be independent on all other memory references
2235 in LOOP. */
2236 if (!ref_indep_loop_p (loop, ref))
2237 return false;
2238
2239 return true;
2240 }
2241
2242 /* Marks the references in LOOP for that store motion should be performed
2243 in REFS_TO_SM. SM_EXECUTED is the set of references for that store
2244 motion was performed in one of the outer loops. */
2245
2246 static void
2247 find_refs_for_sm (struct loop *loop, bitmap sm_executed, bitmap refs_to_sm)
2248 {
2249 bitmap refs = &memory_accesses.all_refs_stored_in_loop[loop->num];
2250 unsigned i;
2251 bitmap_iterator bi;
2252 mem_ref_p ref;
2253
2254 EXECUTE_IF_AND_COMPL_IN_BITMAP (refs, sm_executed, 0, i, bi)
2255 {
2256 ref = memory_accesses.refs_list[i];
2257 if (can_sm_ref_p (loop, ref))
2258 bitmap_set_bit (refs_to_sm, i);
2259 }
2260 }
2261
2262 /* Checks whether LOOP (with exits stored in EXITS array) is suitable
2263 for a store motion optimization (i.e. whether we can insert statement
2264 on its exits). */
2265
2266 static bool
2267 loop_suitable_for_sm (struct loop *loop ATTRIBUTE_UNUSED,
2268 vec<edge> exits)
2269 {
2270 unsigned i;
2271 edge ex;
2272
2273 FOR_EACH_VEC_ELT (exits, i, ex)
2274 if (ex->flags & (EDGE_ABNORMAL | EDGE_EH))
2275 return false;
2276
2277 return true;
2278 }
2279
2280 /* Try to perform store motion for all memory references modified inside
2281 LOOP. SM_EXECUTED is the bitmap of the memory references for that
2282 store motion was executed in one of the outer loops. */
2283
2284 static void
2285 store_motion_loop (struct loop *loop, bitmap sm_executed)
2286 {
2287 vec<edge> exits = get_loop_exit_edges (loop);
2288 struct loop *subloop;
2289 bitmap sm_in_loop = BITMAP_ALLOC (&lim_bitmap_obstack);
2290
2291 if (loop_suitable_for_sm (loop, exits))
2292 {
2293 find_refs_for_sm (loop, sm_executed, sm_in_loop);
2294 hoist_memory_references (loop, sm_in_loop, exits);
2295 }
2296 exits.release ();
2297
2298 bitmap_ior_into (sm_executed, sm_in_loop);
2299 for (subloop = loop->inner; subloop != NULL; subloop = subloop->next)
2300 store_motion_loop (subloop, sm_executed);
2301 bitmap_and_compl_into (sm_executed, sm_in_loop);
2302 BITMAP_FREE (sm_in_loop);
2303 }
2304
2305 /* Try to perform store motion for all memory references modified inside
2306 loops. */
2307
2308 static void
2309 store_motion (void)
2310 {
2311 struct loop *loop;
2312 bitmap sm_executed = BITMAP_ALLOC (&lim_bitmap_obstack);
2313
2314 for (loop = current_loops->tree_root->inner; loop != NULL; loop = loop->next)
2315 store_motion_loop (loop, sm_executed);
2316
2317 BITMAP_FREE (sm_executed);
2318 gsi_commit_edge_inserts ();
2319 }
2320
2321 /* Fills ALWAYS_EXECUTED_IN information for basic blocks of LOOP, i.e.
2322 for each such basic block bb records the outermost loop for that execution
2323 of its header implies execution of bb. CONTAINS_CALL is the bitmap of
2324 blocks that contain a nonpure call. */
2325
2326 static void
2327 fill_always_executed_in_1 (struct loop *loop, sbitmap contains_call)
2328 {
2329 basic_block bb = NULL, *bbs, last = NULL;
2330 unsigned i;
2331 edge e;
2332 struct loop *inn_loop = loop;
2333
2334 if (ALWAYS_EXECUTED_IN (loop->header) == NULL)
2335 {
2336 bbs = get_loop_body_in_dom_order (loop);
2337
2338 for (i = 0; i < loop->num_nodes; i++)
2339 {
2340 edge_iterator ei;
2341 bb = bbs[i];
2342
2343 if (dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2344 last = bb;
2345
2346 if (bitmap_bit_p (contains_call, bb->index))
2347 break;
2348
2349 FOR_EACH_EDGE (e, ei, bb->succs)
2350 if (!flow_bb_inside_loop_p (loop, e->dest))
2351 break;
2352 if (e)
2353 break;
2354
2355 /* A loop might be infinite (TODO use simple loop analysis
2356 to disprove this if possible). */
2357 if (bb->flags & BB_IRREDUCIBLE_LOOP)
2358 break;
2359
2360 if (!flow_bb_inside_loop_p (inn_loop, bb))
2361 break;
2362
2363 if (bb->loop_father->header == bb)
2364 {
2365 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2366 break;
2367
2368 /* In a loop that is always entered we may proceed anyway.
2369 But record that we entered it and stop once we leave it. */
2370 inn_loop = bb->loop_father;
2371 }
2372 }
2373
2374 while (1)
2375 {
2376 SET_ALWAYS_EXECUTED_IN (last, loop);
2377 if (last == loop->header)
2378 break;
2379 last = get_immediate_dominator (CDI_DOMINATORS, last);
2380 }
2381
2382 free (bbs);
2383 }
2384
2385 for (loop = loop->inner; loop; loop = loop->next)
2386 fill_always_executed_in_1 (loop, contains_call);
2387 }
2388
2389 /* Fills ALWAYS_EXECUTED_IN information for basic blocks, i.e.
2390 for each such basic block bb records the outermost loop for that execution
2391 of its header implies execution of bb. */
2392
2393 static void
2394 fill_always_executed_in (void)
2395 {
2396 sbitmap contains_call = sbitmap_alloc (last_basic_block);
2397 basic_block bb;
2398 struct loop *loop;
2399
2400 bitmap_clear (contains_call);
2401 FOR_EACH_BB (bb)
2402 {
2403 gimple_stmt_iterator gsi;
2404 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2405 {
2406 if (nonpure_call_p (gsi_stmt (gsi)))
2407 break;
2408 }
2409
2410 if (!gsi_end_p (gsi))
2411 bitmap_set_bit (contains_call, bb->index);
2412 }
2413
2414 for (loop = current_loops->tree_root->inner; loop; loop = loop->next)
2415 fill_always_executed_in_1 (loop, contains_call);
2416
2417 sbitmap_free (contains_call);
2418 }
2419
2420
2421 /* Compute the global information needed by the loop invariant motion pass. */
2422
2423 static void
2424 tree_ssa_lim_initialize (void)
2425 {
2426 unsigned i;
2427
2428 bitmap_obstack_initialize (&lim_bitmap_obstack);
2429 lim_aux_data_map = pointer_map_create ();
2430
2431 if (flag_tm)
2432 compute_transaction_bits ();
2433
2434 alloc_aux_for_edges (0);
2435
2436 memory_accesses.refs.create (100);
2437 memory_accesses.refs_list.create (100);
2438 /* Allocate a special, unanalyzable mem-ref with ID zero. */
2439 memory_accesses.refs_list.quick_push
2440 (mem_ref_alloc (error_mark_node, 0, UNANALYZABLE_MEM_ID));
2441
2442 memory_accesses.refs_in_loop.create (number_of_loops (cfun));
2443 memory_accesses.refs_in_loop.quick_grow (number_of_loops (cfun));
2444 memory_accesses.refs_stored_in_loop.create (number_of_loops (cfun));
2445 memory_accesses.refs_stored_in_loop.quick_grow (number_of_loops (cfun));
2446 memory_accesses.all_refs_stored_in_loop.create (number_of_loops (cfun));
2447 memory_accesses.all_refs_stored_in_loop.quick_grow (number_of_loops (cfun));
2448
2449 for (i = 0; i < number_of_loops (cfun); i++)
2450 {
2451 bitmap_initialize (&memory_accesses.refs_in_loop[i],
2452 &lim_bitmap_obstack);
2453 bitmap_initialize (&memory_accesses.refs_stored_in_loop[i],
2454 &lim_bitmap_obstack);
2455 bitmap_initialize (&memory_accesses.all_refs_stored_in_loop[i],
2456 &lim_bitmap_obstack);
2457 }
2458
2459 memory_accesses.ttae_cache = NULL;
2460 }
2461
2462 /* Cleans up after the invariant motion pass. */
2463
2464 static void
2465 tree_ssa_lim_finalize (void)
2466 {
2467 basic_block bb;
2468 unsigned i;
2469 mem_ref_p ref;
2470
2471 free_aux_for_edges ();
2472
2473 FOR_EACH_BB (bb)
2474 SET_ALWAYS_EXECUTED_IN (bb, NULL);
2475
2476 bitmap_obstack_release (&lim_bitmap_obstack);
2477 pointer_map_destroy (lim_aux_data_map);
2478
2479 memory_accesses.refs.dispose ();
2480
2481 FOR_EACH_VEC_ELT (memory_accesses.refs_list, i, ref)
2482 memref_free (ref);
2483 memory_accesses.refs_list.release ();
2484
2485 memory_accesses.refs_in_loop.release ();
2486 memory_accesses.refs_stored_in_loop.release ();
2487 memory_accesses.all_refs_stored_in_loop.release ();
2488
2489 if (memory_accesses.ttae_cache)
2490 free_affine_expand_cache (&memory_accesses.ttae_cache);
2491 }
2492
2493 /* Moves invariants from loops. Only "expensive" invariants are moved out --
2494 i.e. those that are likely to be win regardless of the register pressure. */
2495
2496 unsigned int
2497 tree_ssa_lim (void)
2498 {
2499 unsigned int todo;
2500
2501 tree_ssa_lim_initialize ();
2502
2503 /* Gathers information about memory accesses in the loops. */
2504 analyze_memory_references ();
2505
2506 /* Fills ALWAYS_EXECUTED_IN information for basic blocks. */
2507 fill_always_executed_in ();
2508
2509 /* For each statement determine the outermost loop in that it is
2510 invariant and cost for computing the invariant. */
2511 invariantness_dom_walker (CDI_DOMINATORS)
2512 .walk (cfun->cfg->x_entry_block_ptr);
2513
2514 /* Execute store motion. Force the necessary invariants to be moved
2515 out of the loops as well. */
2516 store_motion ();
2517
2518 /* Move the expressions that are expensive enough. */
2519 todo = move_computations ();
2520
2521 tree_ssa_lim_finalize ();
2522
2523 return todo;
2524 }
2525
2526 /* Loop invariant motion pass. */
2527
2528 static unsigned int
2529 tree_ssa_loop_im (void)
2530 {
2531 if (number_of_loops (cfun) <= 1)
2532 return 0;
2533
2534 return tree_ssa_lim ();
2535 }
2536
2537 static bool
2538 gate_tree_ssa_loop_im (void)
2539 {
2540 return flag_tree_loop_im != 0;
2541 }
2542
2543 namespace {
2544
2545 const pass_data pass_data_lim =
2546 {
2547 GIMPLE_PASS, /* type */
2548 "lim", /* name */
2549 OPTGROUP_LOOP, /* optinfo_flags */
2550 true, /* has_gate */
2551 true, /* has_execute */
2552 TV_LIM, /* tv_id */
2553 PROP_cfg, /* properties_required */
2554 0, /* properties_provided */
2555 0, /* properties_destroyed */
2556 0, /* todo_flags_start */
2557 0, /* todo_flags_finish */
2558 };
2559
2560 class pass_lim : public gimple_opt_pass
2561 {
2562 public:
2563 pass_lim (gcc::context *ctxt)
2564 : gimple_opt_pass (pass_data_lim, ctxt)
2565 {}
2566
2567 /* opt_pass methods: */
2568 opt_pass * clone () { return new pass_lim (m_ctxt); }
2569 bool gate () { return gate_tree_ssa_loop_im (); }
2570 unsigned int execute () { return tree_ssa_loop_im (); }
2571
2572 }; // class pass_lim
2573
2574 } // anon namespace
2575
2576 gimple_opt_pass *
2577 make_pass_lim (gcc::context *ctxt)
2578 {
2579 return new pass_lim (ctxt);
2580 }
2581
2582