]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-loop-im.c
a0a05a316659dcffdfc3179e08d9ba2d85466aa4
[thirdparty/gcc.git] / gcc / tree-ssa-loop-im.c
1 /* Loop invariant motion.
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2010
3 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "tm_p.h"
27 #include "basic-block.h"
28 #include "gimple-pretty-print.h"
29 #include "tree-flow.h"
30 #include "cfgloop.h"
31 #include "domwalk.h"
32 #include "params.h"
33 #include "tree-pass.h"
34 #include "flags.h"
35 #include "hashtab.h"
36 #include "tree-affine.h"
37 #include "pointer-set.h"
38 #include "tree-ssa-propagate.h"
39
40 /* TODO: Support for predicated code motion. I.e.
41
42 while (1)
43 {
44 if (cond)
45 {
46 a = inv;
47 something;
48 }
49 }
50
51 Where COND and INV are invariants, but evaluating INV may trap or be
52 invalid from some other reason if !COND. This may be transformed to
53
54 if (cond)
55 a = inv;
56 while (1)
57 {
58 if (cond)
59 something;
60 } */
61
62 /* A type for the list of statements that have to be moved in order to be able
63 to hoist an invariant computation. */
64
65 struct depend
66 {
67 gimple stmt;
68 struct depend *next;
69 };
70
71 /* The auxiliary data kept for each statement. */
72
73 struct lim_aux_data
74 {
75 struct loop *max_loop; /* The outermost loop in that the statement
76 is invariant. */
77
78 struct loop *tgt_loop; /* The loop out of that we want to move the
79 invariant. */
80
81 struct loop *always_executed_in;
82 /* The outermost loop for that we are sure
83 the statement is executed if the loop
84 is entered. */
85
86 unsigned cost; /* Cost of the computation performed by the
87 statement. */
88
89 struct depend *depends; /* List of statements that must be also hoisted
90 out of the loop when this statement is
91 hoisted; i.e. those that define the operands
92 of the statement and are inside of the
93 MAX_LOOP loop. */
94 };
95
96 /* Maps statements to their lim_aux_data. */
97
98 static struct pointer_map_t *lim_aux_data_map;
99
100 /* Description of a memory reference location. */
101
102 typedef struct mem_ref_loc
103 {
104 tree *ref; /* The reference itself. */
105 gimple stmt; /* The statement in that it occurs. */
106 } *mem_ref_loc_p;
107
108 DEF_VEC_P(mem_ref_loc_p);
109 DEF_VEC_ALLOC_P(mem_ref_loc_p, heap);
110
111 /* The list of memory reference locations in a loop. */
112
113 typedef struct mem_ref_locs
114 {
115 VEC (mem_ref_loc_p, heap) *locs;
116 } *mem_ref_locs_p;
117
118 DEF_VEC_P(mem_ref_locs_p);
119 DEF_VEC_ALLOC_P(mem_ref_locs_p, heap);
120
121 /* Description of a memory reference. */
122
123 typedef struct mem_ref
124 {
125 tree mem; /* The memory itself. */
126 unsigned id; /* ID assigned to the memory reference
127 (its index in memory_accesses.refs_list) */
128 hashval_t hash; /* Its hash value. */
129 bitmap stored; /* The set of loops in that this memory location
130 is stored to. */
131 VEC (mem_ref_locs_p, heap) *accesses_in_loop;
132 /* The locations of the accesses. Vector
133 indexed by the loop number. */
134
135 /* The following sets are computed on demand. We keep both set and
136 its complement, so that we know whether the information was
137 already computed or not. */
138 bitmap indep_loop; /* The set of loops in that the memory
139 reference is independent, meaning:
140 If it is stored in the loop, this store
141 is independent on all other loads and
142 stores.
143 If it is only loaded, then it is independent
144 on all stores in the loop. */
145 bitmap dep_loop; /* The complement of INDEP_LOOP. */
146
147 bitmap indep_ref; /* The set of memory references on that
148 this reference is independent. */
149 bitmap dep_ref; /* The complement of INDEP_REF. */
150 } *mem_ref_p;
151
152 DEF_VEC_P(mem_ref_p);
153 DEF_VEC_ALLOC_P(mem_ref_p, heap);
154
155 DEF_VEC_P(bitmap);
156 DEF_VEC_ALLOC_P(bitmap, heap);
157
158 DEF_VEC_P(htab_t);
159 DEF_VEC_ALLOC_P(htab_t, heap);
160
161 /* Description of memory accesses in loops. */
162
163 static struct
164 {
165 /* The hash table of memory references accessed in loops. */
166 htab_t refs;
167
168 /* The list of memory references. */
169 VEC (mem_ref_p, heap) *refs_list;
170
171 /* The set of memory references accessed in each loop. */
172 VEC (bitmap, heap) *refs_in_loop;
173
174 /* The set of memory references accessed in each loop, including
175 subloops. */
176 VEC (bitmap, heap) *all_refs_in_loop;
177
178 /* The set of memory references stored in each loop, including
179 subloops. */
180 VEC (bitmap, heap) *all_refs_stored_in_loop;
181
182 /* Cache for expanding memory addresses. */
183 struct pointer_map_t *ttae_cache;
184 } memory_accesses;
185
186 static bool ref_indep_loop_p (struct loop *, mem_ref_p);
187
188 /* Minimum cost of an expensive expression. */
189 #define LIM_EXPENSIVE ((unsigned) PARAM_VALUE (PARAM_LIM_EXPENSIVE))
190
191 /* The outermost loop for which execution of the header guarantees that the
192 block will be executed. */
193 #define ALWAYS_EXECUTED_IN(BB) ((struct loop *) (BB)->aux)
194 #define SET_ALWAYS_EXECUTED_IN(BB, VAL) ((BB)->aux = (void *) (VAL))
195
196 /* Whether the reference was analyzable. */
197 #define MEM_ANALYZABLE(REF) ((REF)->mem != error_mark_node)
198
199 static struct lim_aux_data *
200 init_lim_data (gimple stmt)
201 {
202 void **p = pointer_map_insert (lim_aux_data_map, stmt);
203
204 *p = XCNEW (struct lim_aux_data);
205 return (struct lim_aux_data *) *p;
206 }
207
208 static struct lim_aux_data *
209 get_lim_data (gimple stmt)
210 {
211 void **p = pointer_map_contains (lim_aux_data_map, stmt);
212 if (!p)
213 return NULL;
214
215 return (struct lim_aux_data *) *p;
216 }
217
218 /* Releases the memory occupied by DATA. */
219
220 static void
221 free_lim_aux_data (struct lim_aux_data *data)
222 {
223 struct depend *dep, *next;
224
225 for (dep = data->depends; dep; dep = next)
226 {
227 next = dep->next;
228 free (dep);
229 }
230 free (data);
231 }
232
233 static void
234 clear_lim_data (gimple stmt)
235 {
236 void **p = pointer_map_contains (lim_aux_data_map, stmt);
237 if (!p)
238 return;
239
240 free_lim_aux_data ((struct lim_aux_data *) *p);
241 *p = NULL;
242 }
243
244 /* Calls CBCK for each index in memory reference ADDR_P. There are two
245 kinds situations handled; in each of these cases, the memory reference
246 and DATA are passed to the callback:
247
248 Access to an array: ARRAY_{RANGE_}REF (base, index). In this case we also
249 pass the pointer to the index to the callback.
250
251 Pointer dereference: INDIRECT_REF (addr). In this case we also pass the
252 pointer to addr to the callback.
253
254 If the callback returns false, the whole search stops and false is returned.
255 Otherwise the function returns true after traversing through the whole
256 reference *ADDR_P. */
257
258 bool
259 for_each_index (tree *addr_p, bool (*cbck) (tree, tree *, void *), void *data)
260 {
261 tree *nxt, *idx;
262
263 for (; ; addr_p = nxt)
264 {
265 switch (TREE_CODE (*addr_p))
266 {
267 case SSA_NAME:
268 return cbck (*addr_p, addr_p, data);
269
270 case MEM_REF:
271 nxt = &TREE_OPERAND (*addr_p, 0);
272 return cbck (*addr_p, nxt, data);
273
274 case BIT_FIELD_REF:
275 case VIEW_CONVERT_EXPR:
276 case REALPART_EXPR:
277 case IMAGPART_EXPR:
278 nxt = &TREE_OPERAND (*addr_p, 0);
279 break;
280
281 case COMPONENT_REF:
282 /* If the component has varying offset, it behaves like index
283 as well. */
284 idx = &TREE_OPERAND (*addr_p, 2);
285 if (*idx
286 && !cbck (*addr_p, idx, data))
287 return false;
288
289 nxt = &TREE_OPERAND (*addr_p, 0);
290 break;
291
292 case ARRAY_REF:
293 case ARRAY_RANGE_REF:
294 nxt = &TREE_OPERAND (*addr_p, 0);
295 if (!cbck (*addr_p, &TREE_OPERAND (*addr_p, 1), data))
296 return false;
297 break;
298
299 case VAR_DECL:
300 case PARM_DECL:
301 case STRING_CST:
302 case RESULT_DECL:
303 case VECTOR_CST:
304 case COMPLEX_CST:
305 case INTEGER_CST:
306 case REAL_CST:
307 case FIXED_CST:
308 case CONSTRUCTOR:
309 return true;
310
311 case ADDR_EXPR:
312 gcc_assert (is_gimple_min_invariant (*addr_p));
313 return true;
314
315 case TARGET_MEM_REF:
316 idx = &TMR_BASE (*addr_p);
317 if (*idx
318 && !cbck (*addr_p, idx, data))
319 return false;
320 idx = &TMR_INDEX (*addr_p);
321 if (*idx
322 && !cbck (*addr_p, idx, data))
323 return false;
324 idx = &TMR_INDEX2 (*addr_p);
325 if (*idx
326 && !cbck (*addr_p, idx, data))
327 return false;
328 return true;
329
330 default:
331 gcc_unreachable ();
332 }
333 }
334 }
335
336 /* If it is possible to hoist the statement STMT unconditionally,
337 returns MOVE_POSSIBLE.
338 If it is possible to hoist the statement STMT, but we must avoid making
339 it executed if it would not be executed in the original program (e.g.
340 because it may trap), return MOVE_PRESERVE_EXECUTION.
341 Otherwise return MOVE_IMPOSSIBLE. */
342
343 enum move_pos
344 movement_possibility (gimple stmt)
345 {
346 tree lhs;
347 enum move_pos ret = MOVE_POSSIBLE;
348
349 if (flag_unswitch_loops
350 && gimple_code (stmt) == GIMPLE_COND)
351 {
352 /* If we perform unswitching, force the operands of the invariant
353 condition to be moved out of the loop. */
354 return MOVE_POSSIBLE;
355 }
356
357 if (gimple_code (stmt) == GIMPLE_PHI
358 && gimple_phi_num_args (stmt) <= 2
359 && is_gimple_reg (gimple_phi_result (stmt))
360 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_phi_result (stmt)))
361 return MOVE_POSSIBLE;
362
363 if (gimple_get_lhs (stmt) == NULL_TREE)
364 return MOVE_IMPOSSIBLE;
365
366 if (gimple_vdef (stmt))
367 return MOVE_IMPOSSIBLE;
368
369 if (stmt_ends_bb_p (stmt)
370 || gimple_has_volatile_ops (stmt)
371 || gimple_has_side_effects (stmt)
372 || stmt_could_throw_p (stmt))
373 return MOVE_IMPOSSIBLE;
374
375 if (is_gimple_call (stmt))
376 {
377 /* While pure or const call is guaranteed to have no side effects, we
378 cannot move it arbitrarily. Consider code like
379
380 char *s = something ();
381
382 while (1)
383 {
384 if (s)
385 t = strlen (s);
386 else
387 t = 0;
388 }
389
390 Here the strlen call cannot be moved out of the loop, even though
391 s is invariant. In addition to possibly creating a call with
392 invalid arguments, moving out a function call that is not executed
393 may cause performance regressions in case the call is costly and
394 not executed at all. */
395 ret = MOVE_PRESERVE_EXECUTION;
396 lhs = gimple_call_lhs (stmt);
397 }
398 else if (is_gimple_assign (stmt))
399 lhs = gimple_assign_lhs (stmt);
400 else
401 return MOVE_IMPOSSIBLE;
402
403 if (TREE_CODE (lhs) == SSA_NAME
404 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
405 return MOVE_IMPOSSIBLE;
406
407 if (TREE_CODE (lhs) != SSA_NAME
408 || gimple_could_trap_p (stmt))
409 return MOVE_PRESERVE_EXECUTION;
410
411 /* Non local loads in a transaction cannot be hoisted out. Well,
412 unless the load happens on every path out of the loop, but we
413 don't take this into account yet. */
414 if (flag_tm
415 && gimple_in_transaction (stmt)
416 && gimple_assign_single_p (stmt))
417 {
418 tree rhs = gimple_assign_rhs1 (stmt);
419 if (DECL_P (rhs) && is_global_var (rhs))
420 {
421 if (dump_file)
422 {
423 fprintf (dump_file, "Cannot hoist conditional load of ");
424 print_generic_expr (dump_file, rhs, TDF_SLIM);
425 fprintf (dump_file, " because it is in a transaction.\n");
426 }
427 return MOVE_IMPOSSIBLE;
428 }
429 }
430
431 return ret;
432 }
433
434 /* Suppose that operand DEF is used inside the LOOP. Returns the outermost
435 loop to that we could move the expression using DEF if it did not have
436 other operands, i.e. the outermost loop enclosing LOOP in that the value
437 of DEF is invariant. */
438
439 static struct loop *
440 outermost_invariant_loop (tree def, struct loop *loop)
441 {
442 gimple def_stmt;
443 basic_block def_bb;
444 struct loop *max_loop;
445 struct lim_aux_data *lim_data;
446
447 if (!def)
448 return superloop_at_depth (loop, 1);
449
450 if (TREE_CODE (def) != SSA_NAME)
451 {
452 gcc_assert (is_gimple_min_invariant (def));
453 return superloop_at_depth (loop, 1);
454 }
455
456 def_stmt = SSA_NAME_DEF_STMT (def);
457 def_bb = gimple_bb (def_stmt);
458 if (!def_bb)
459 return superloop_at_depth (loop, 1);
460
461 max_loop = find_common_loop (loop, def_bb->loop_father);
462
463 lim_data = get_lim_data (def_stmt);
464 if (lim_data != NULL && lim_data->max_loop != NULL)
465 max_loop = find_common_loop (max_loop,
466 loop_outer (lim_data->max_loop));
467 if (max_loop == loop)
468 return NULL;
469 max_loop = superloop_at_depth (loop, loop_depth (max_loop) + 1);
470
471 return max_loop;
472 }
473
474 /* DATA is a structure containing information associated with a statement
475 inside LOOP. DEF is one of the operands of this statement.
476
477 Find the outermost loop enclosing LOOP in that value of DEF is invariant
478 and record this in DATA->max_loop field. If DEF itself is defined inside
479 this loop as well (i.e. we need to hoist it out of the loop if we want
480 to hoist the statement represented by DATA), record the statement in that
481 DEF is defined to the DATA->depends list. Additionally if ADD_COST is true,
482 add the cost of the computation of DEF to the DATA->cost.
483
484 If DEF is not invariant in LOOP, return false. Otherwise return TRUE. */
485
486 static bool
487 add_dependency (tree def, struct lim_aux_data *data, struct loop *loop,
488 bool add_cost)
489 {
490 gimple def_stmt = SSA_NAME_DEF_STMT (def);
491 basic_block def_bb = gimple_bb (def_stmt);
492 struct loop *max_loop;
493 struct depend *dep;
494 struct lim_aux_data *def_data;
495
496 if (!def_bb)
497 return true;
498
499 max_loop = outermost_invariant_loop (def, loop);
500 if (!max_loop)
501 return false;
502
503 if (flow_loop_nested_p (data->max_loop, max_loop))
504 data->max_loop = max_loop;
505
506 def_data = get_lim_data (def_stmt);
507 if (!def_data)
508 return true;
509
510 if (add_cost
511 /* Only add the cost if the statement defining DEF is inside LOOP,
512 i.e. if it is likely that by moving the invariants dependent
513 on it, we will be able to avoid creating a new register for
514 it (since it will be only used in these dependent invariants). */
515 && def_bb->loop_father == loop)
516 data->cost += def_data->cost;
517
518 dep = XNEW (struct depend);
519 dep->stmt = def_stmt;
520 dep->next = data->depends;
521 data->depends = dep;
522
523 return true;
524 }
525
526 /* Returns an estimate for a cost of statement STMT. The values here
527 are just ad-hoc constants, similar to costs for inlining. */
528
529 static unsigned
530 stmt_cost (gimple stmt)
531 {
532 /* Always try to create possibilities for unswitching. */
533 if (gimple_code (stmt) == GIMPLE_COND
534 || gimple_code (stmt) == GIMPLE_PHI)
535 return LIM_EXPENSIVE;
536
537 /* We should be hoisting calls if possible. */
538 if (is_gimple_call (stmt))
539 {
540 tree fndecl;
541
542 /* Unless the call is a builtin_constant_p; this always folds to a
543 constant, so moving it is useless. */
544 fndecl = gimple_call_fndecl (stmt);
545 if (fndecl
546 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
547 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P)
548 return 0;
549
550 return LIM_EXPENSIVE;
551 }
552
553 /* Hoisting memory references out should almost surely be a win. */
554 if (gimple_references_memory_p (stmt))
555 return LIM_EXPENSIVE;
556
557 if (gimple_code (stmt) != GIMPLE_ASSIGN)
558 return 1;
559
560 switch (gimple_assign_rhs_code (stmt))
561 {
562 case MULT_EXPR:
563 case WIDEN_MULT_EXPR:
564 case WIDEN_MULT_PLUS_EXPR:
565 case WIDEN_MULT_MINUS_EXPR:
566 case DOT_PROD_EXPR:
567 case FMA_EXPR:
568 case TRUNC_DIV_EXPR:
569 case CEIL_DIV_EXPR:
570 case FLOOR_DIV_EXPR:
571 case ROUND_DIV_EXPR:
572 case EXACT_DIV_EXPR:
573 case CEIL_MOD_EXPR:
574 case FLOOR_MOD_EXPR:
575 case ROUND_MOD_EXPR:
576 case TRUNC_MOD_EXPR:
577 case RDIV_EXPR:
578 /* Division and multiplication are usually expensive. */
579 return LIM_EXPENSIVE;
580
581 case LSHIFT_EXPR:
582 case RSHIFT_EXPR:
583 case WIDEN_LSHIFT_EXPR:
584 case LROTATE_EXPR:
585 case RROTATE_EXPR:
586 /* Shifts and rotates are usually expensive. */
587 return LIM_EXPENSIVE;
588
589 case CONSTRUCTOR:
590 /* Make vector construction cost proportional to the number
591 of elements. */
592 return CONSTRUCTOR_NELTS (gimple_assign_rhs1 (stmt));
593
594 case SSA_NAME:
595 case PAREN_EXPR:
596 /* Whether or not something is wrapped inside a PAREN_EXPR
597 should not change move cost. Nor should an intermediate
598 unpropagated SSA name copy. */
599 return 0;
600
601 default:
602 return 1;
603 }
604 }
605
606 /* Finds the outermost loop between OUTER and LOOP in that the memory reference
607 REF is independent. If REF is not independent in LOOP, NULL is returned
608 instead. */
609
610 static struct loop *
611 outermost_indep_loop (struct loop *outer, struct loop *loop, mem_ref_p ref)
612 {
613 struct loop *aloop;
614
615 if (bitmap_bit_p (ref->stored, loop->num))
616 return NULL;
617
618 for (aloop = outer;
619 aloop != loop;
620 aloop = superloop_at_depth (loop, loop_depth (aloop) + 1))
621 if (!bitmap_bit_p (ref->stored, aloop->num)
622 && ref_indep_loop_p (aloop, ref))
623 return aloop;
624
625 if (ref_indep_loop_p (loop, ref))
626 return loop;
627 else
628 return NULL;
629 }
630
631 /* If there is a simple load or store to a memory reference in STMT, returns
632 the location of the memory reference, and sets IS_STORE according to whether
633 it is a store or load. Otherwise, returns NULL. */
634
635 static tree *
636 simple_mem_ref_in_stmt (gimple stmt, bool *is_store)
637 {
638 tree *lhs;
639 enum tree_code code;
640
641 /* Recognize MEM = (SSA_NAME | invariant) and SSA_NAME = MEM patterns. */
642 if (gimple_code (stmt) != GIMPLE_ASSIGN)
643 return NULL;
644
645 code = gimple_assign_rhs_code (stmt);
646
647 lhs = gimple_assign_lhs_ptr (stmt);
648
649 if (TREE_CODE (*lhs) == SSA_NAME)
650 {
651 if (get_gimple_rhs_class (code) != GIMPLE_SINGLE_RHS
652 || !is_gimple_addressable (gimple_assign_rhs1 (stmt)))
653 return NULL;
654
655 *is_store = false;
656 return gimple_assign_rhs1_ptr (stmt);
657 }
658 else if (code == SSA_NAME
659 || (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
660 && is_gimple_min_invariant (gimple_assign_rhs1 (stmt))))
661 {
662 *is_store = true;
663 return lhs;
664 }
665 else
666 return NULL;
667 }
668
669 /* Returns the memory reference contained in STMT. */
670
671 static mem_ref_p
672 mem_ref_in_stmt (gimple stmt)
673 {
674 bool store;
675 tree *mem = simple_mem_ref_in_stmt (stmt, &store);
676 hashval_t hash;
677 mem_ref_p ref;
678
679 if (!mem)
680 return NULL;
681 gcc_assert (!store);
682
683 hash = iterative_hash_expr (*mem, 0);
684 ref = (mem_ref_p) htab_find_with_hash (memory_accesses.refs, *mem, hash);
685
686 gcc_assert (ref != NULL);
687 return ref;
688 }
689
690 /* From a controlling predicate in DOM determine the arguments from
691 the PHI node PHI that are chosen if the predicate evaluates to
692 true and false and store them to *TRUE_ARG_P and *FALSE_ARG_P if
693 they are non-NULL. Returns true if the arguments can be determined,
694 else return false. */
695
696 static bool
697 extract_true_false_args_from_phi (basic_block dom, gimple phi,
698 tree *true_arg_p, tree *false_arg_p)
699 {
700 basic_block bb = gimple_bb (phi);
701 edge true_edge, false_edge, tem;
702 tree arg0 = NULL_TREE, arg1 = NULL_TREE;
703
704 /* We have to verify that one edge into the PHI node is dominated
705 by the true edge of the predicate block and the other edge
706 dominated by the false edge. This ensures that the PHI argument
707 we are going to take is completely determined by the path we
708 take from the predicate block.
709 We can only use BB dominance checks below if the destination of
710 the true/false edges are dominated by their edge, thus only
711 have a single predecessor. */
712 extract_true_false_edges_from_block (dom, &true_edge, &false_edge);
713 tem = EDGE_PRED (bb, 0);
714 if (tem == true_edge
715 || (single_pred_p (true_edge->dest)
716 && (tem->src == true_edge->dest
717 || dominated_by_p (CDI_DOMINATORS,
718 tem->src, true_edge->dest))))
719 arg0 = PHI_ARG_DEF (phi, tem->dest_idx);
720 else if (tem == false_edge
721 || (single_pred_p (false_edge->dest)
722 && (tem->src == false_edge->dest
723 || dominated_by_p (CDI_DOMINATORS,
724 tem->src, false_edge->dest))))
725 arg1 = PHI_ARG_DEF (phi, tem->dest_idx);
726 else
727 return false;
728 tem = EDGE_PRED (bb, 1);
729 if (tem == true_edge
730 || (single_pred_p (true_edge->dest)
731 && (tem->src == true_edge->dest
732 || dominated_by_p (CDI_DOMINATORS,
733 tem->src, true_edge->dest))))
734 arg0 = PHI_ARG_DEF (phi, tem->dest_idx);
735 else if (tem == false_edge
736 || (single_pred_p (false_edge->dest)
737 && (tem->src == false_edge->dest
738 || dominated_by_p (CDI_DOMINATORS,
739 tem->src, false_edge->dest))))
740 arg1 = PHI_ARG_DEF (phi, tem->dest_idx);
741 else
742 return false;
743 if (!arg0 || !arg1)
744 return false;
745
746 if (true_arg_p)
747 *true_arg_p = arg0;
748 if (false_arg_p)
749 *false_arg_p = arg1;
750
751 return true;
752 }
753
754 /* Determine the outermost loop to that it is possible to hoist a statement
755 STMT and store it to LIM_DATA (STMT)->max_loop. To do this we determine
756 the outermost loop in that the value computed by STMT is invariant.
757 If MUST_PRESERVE_EXEC is true, additionally choose such a loop that
758 we preserve the fact whether STMT is executed. It also fills other related
759 information to LIM_DATA (STMT).
760
761 The function returns false if STMT cannot be hoisted outside of the loop it
762 is defined in, and true otherwise. */
763
764 static bool
765 determine_max_movement (gimple stmt, bool must_preserve_exec)
766 {
767 basic_block bb = gimple_bb (stmt);
768 struct loop *loop = bb->loop_father;
769 struct loop *level;
770 struct lim_aux_data *lim_data = get_lim_data (stmt);
771 tree val;
772 ssa_op_iter iter;
773
774 if (must_preserve_exec)
775 level = ALWAYS_EXECUTED_IN (bb);
776 else
777 level = superloop_at_depth (loop, 1);
778 lim_data->max_loop = level;
779
780 if (gimple_code (stmt) == GIMPLE_PHI)
781 {
782 use_operand_p use_p;
783 unsigned min_cost = UINT_MAX;
784 unsigned total_cost = 0;
785 struct lim_aux_data *def_data;
786
787 /* We will end up promoting dependencies to be unconditionally
788 evaluated. For this reason the PHI cost (and thus the
789 cost we remove from the loop by doing the invariant motion)
790 is that of the cheapest PHI argument dependency chain. */
791 FOR_EACH_PHI_ARG (use_p, stmt, iter, SSA_OP_USE)
792 {
793 val = USE_FROM_PTR (use_p);
794 if (TREE_CODE (val) != SSA_NAME)
795 continue;
796 if (!add_dependency (val, lim_data, loop, false))
797 return false;
798 def_data = get_lim_data (SSA_NAME_DEF_STMT (val));
799 if (def_data)
800 {
801 min_cost = MIN (min_cost, def_data->cost);
802 total_cost += def_data->cost;
803 }
804 }
805
806 lim_data->cost += min_cost;
807
808 if (gimple_phi_num_args (stmt) > 1)
809 {
810 basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
811 gimple cond;
812 if (gsi_end_p (gsi_last_bb (dom)))
813 return false;
814 cond = gsi_stmt (gsi_last_bb (dom));
815 if (gimple_code (cond) != GIMPLE_COND)
816 return false;
817 /* Verify that this is an extended form of a diamond and
818 the PHI arguments are completely controlled by the
819 predicate in DOM. */
820 if (!extract_true_false_args_from_phi (dom, stmt, NULL, NULL))
821 return false;
822
823 /* Fold in dependencies and cost of the condition. */
824 FOR_EACH_SSA_TREE_OPERAND (val, cond, iter, SSA_OP_USE)
825 {
826 if (!add_dependency (val, lim_data, loop, false))
827 return false;
828 def_data = get_lim_data (SSA_NAME_DEF_STMT (val));
829 if (def_data)
830 total_cost += def_data->cost;
831 }
832
833 /* We want to avoid unconditionally executing very expensive
834 operations. As costs for our dependencies cannot be
835 negative just claim we are not invariand for this case.
836 We also are not sure whether the control-flow inside the
837 loop will vanish. */
838 if (total_cost - min_cost >= 2 * LIM_EXPENSIVE
839 && !(min_cost != 0
840 && total_cost / min_cost <= 2))
841 return false;
842
843 /* Assume that the control-flow in the loop will vanish.
844 ??? We should verify this and not artificially increase
845 the cost if that is not the case. */
846 lim_data->cost += stmt_cost (stmt);
847 }
848
849 return true;
850 }
851 else
852 FOR_EACH_SSA_TREE_OPERAND (val, stmt, iter, SSA_OP_USE)
853 if (!add_dependency (val, lim_data, loop, true))
854 return false;
855
856 if (gimple_vuse (stmt))
857 {
858 mem_ref_p ref = mem_ref_in_stmt (stmt);
859
860 if (ref)
861 {
862 lim_data->max_loop
863 = outermost_indep_loop (lim_data->max_loop, loop, ref);
864 if (!lim_data->max_loop)
865 return false;
866 }
867 else
868 {
869 if ((val = gimple_vuse (stmt)) != NULL_TREE)
870 {
871 if (!add_dependency (val, lim_data, loop, false))
872 return false;
873 }
874 }
875 }
876
877 lim_data->cost += stmt_cost (stmt);
878
879 return true;
880 }
881
882 /* Suppose that some statement in ORIG_LOOP is hoisted to the loop LEVEL,
883 and that one of the operands of this statement is computed by STMT.
884 Ensure that STMT (together with all the statements that define its
885 operands) is hoisted at least out of the loop LEVEL. */
886
887 static void
888 set_level (gimple stmt, struct loop *orig_loop, struct loop *level)
889 {
890 struct loop *stmt_loop = gimple_bb (stmt)->loop_father;
891 struct depend *dep;
892 struct lim_aux_data *lim_data;
893
894 stmt_loop = find_common_loop (orig_loop, stmt_loop);
895 lim_data = get_lim_data (stmt);
896 if (lim_data != NULL && lim_data->tgt_loop != NULL)
897 stmt_loop = find_common_loop (stmt_loop,
898 loop_outer (lim_data->tgt_loop));
899 if (flow_loop_nested_p (stmt_loop, level))
900 return;
901
902 gcc_assert (level == lim_data->max_loop
903 || flow_loop_nested_p (lim_data->max_loop, level));
904
905 lim_data->tgt_loop = level;
906 for (dep = lim_data->depends; dep; dep = dep->next)
907 set_level (dep->stmt, orig_loop, level);
908 }
909
910 /* Determines an outermost loop from that we want to hoist the statement STMT.
911 For now we chose the outermost possible loop. TODO -- use profiling
912 information to set it more sanely. */
913
914 static void
915 set_profitable_level (gimple stmt)
916 {
917 set_level (stmt, gimple_bb (stmt)->loop_father, get_lim_data (stmt)->max_loop);
918 }
919
920 /* Returns true if STMT is a call that has side effects. */
921
922 static bool
923 nonpure_call_p (gimple stmt)
924 {
925 if (gimple_code (stmt) != GIMPLE_CALL)
926 return false;
927
928 return gimple_has_side_effects (stmt);
929 }
930
931 /* Rewrite a/b to a*(1/b). Return the invariant stmt to process. */
932
933 static gimple
934 rewrite_reciprocal (gimple_stmt_iterator *bsi)
935 {
936 gimple stmt, stmt1, stmt2;
937 tree var, name, lhs, type;
938 tree real_one;
939 gimple_stmt_iterator gsi;
940
941 stmt = gsi_stmt (*bsi);
942 lhs = gimple_assign_lhs (stmt);
943 type = TREE_TYPE (lhs);
944
945 var = create_tmp_reg (type, "reciptmp");
946
947 real_one = build_one_cst (type);
948
949 stmt1 = gimple_build_assign_with_ops (RDIV_EXPR,
950 var, real_one, gimple_assign_rhs2 (stmt));
951 name = make_ssa_name (var, stmt1);
952 gimple_assign_set_lhs (stmt1, name);
953
954 stmt2 = gimple_build_assign_with_ops (MULT_EXPR, lhs, name,
955 gimple_assign_rhs1 (stmt));
956
957 /* Replace division stmt with reciprocal and multiply stmts.
958 The multiply stmt is not invariant, so update iterator
959 and avoid rescanning. */
960 gsi = *bsi;
961 gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
962 gsi_replace (&gsi, stmt2, true);
963
964 /* Continue processing with invariant reciprocal statement. */
965 return stmt1;
966 }
967
968 /* Check if the pattern at *BSI is a bittest of the form
969 (A >> B) & 1 != 0 and in this case rewrite it to A & (1 << B) != 0. */
970
971 static gimple
972 rewrite_bittest (gimple_stmt_iterator *bsi)
973 {
974 gimple stmt, use_stmt, stmt1, stmt2;
975 tree lhs, var, name, t, a, b;
976 use_operand_p use;
977
978 stmt = gsi_stmt (*bsi);
979 lhs = gimple_assign_lhs (stmt);
980
981 /* Verify that the single use of lhs is a comparison against zero. */
982 if (TREE_CODE (lhs) != SSA_NAME
983 || !single_imm_use (lhs, &use, &use_stmt)
984 || gimple_code (use_stmt) != GIMPLE_COND)
985 return stmt;
986 if (gimple_cond_lhs (use_stmt) != lhs
987 || (gimple_cond_code (use_stmt) != NE_EXPR
988 && gimple_cond_code (use_stmt) != EQ_EXPR)
989 || !integer_zerop (gimple_cond_rhs (use_stmt)))
990 return stmt;
991
992 /* Get at the operands of the shift. The rhs is TMP1 & 1. */
993 stmt1 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
994 if (gimple_code (stmt1) != GIMPLE_ASSIGN)
995 return stmt;
996
997 /* There is a conversion in between possibly inserted by fold. */
998 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt1)))
999 {
1000 t = gimple_assign_rhs1 (stmt1);
1001 if (TREE_CODE (t) != SSA_NAME
1002 || !has_single_use (t))
1003 return stmt;
1004 stmt1 = SSA_NAME_DEF_STMT (t);
1005 if (gimple_code (stmt1) != GIMPLE_ASSIGN)
1006 return stmt;
1007 }
1008
1009 /* Verify that B is loop invariant but A is not. Verify that with
1010 all the stmt walking we are still in the same loop. */
1011 if (gimple_assign_rhs_code (stmt1) != RSHIFT_EXPR
1012 || loop_containing_stmt (stmt1) != loop_containing_stmt (stmt))
1013 return stmt;
1014
1015 a = gimple_assign_rhs1 (stmt1);
1016 b = gimple_assign_rhs2 (stmt1);
1017
1018 if (outermost_invariant_loop (b, loop_containing_stmt (stmt1)) != NULL
1019 && outermost_invariant_loop (a, loop_containing_stmt (stmt1)) == NULL)
1020 {
1021 gimple_stmt_iterator rsi;
1022
1023 /* 1 << B */
1024 var = create_tmp_var (TREE_TYPE (a), "shifttmp");
1025 t = fold_build2 (LSHIFT_EXPR, TREE_TYPE (a),
1026 build_int_cst (TREE_TYPE (a), 1), b);
1027 stmt1 = gimple_build_assign (var, t);
1028 name = make_ssa_name (var, stmt1);
1029 gimple_assign_set_lhs (stmt1, name);
1030
1031 /* A & (1 << B) */
1032 t = fold_build2 (BIT_AND_EXPR, TREE_TYPE (a), a, name);
1033 stmt2 = gimple_build_assign (var, t);
1034 name = make_ssa_name (var, stmt2);
1035 gimple_assign_set_lhs (stmt2, name);
1036
1037 /* Replace the SSA_NAME we compare against zero. Adjust
1038 the type of zero accordingly. */
1039 SET_USE (use, name);
1040 gimple_cond_set_rhs (use_stmt, build_int_cst_type (TREE_TYPE (name), 0));
1041
1042 /* Don't use gsi_replace here, none of the new assignments sets
1043 the variable originally set in stmt. Move bsi to stmt1, and
1044 then remove the original stmt, so that we get a chance to
1045 retain debug info for it. */
1046 rsi = *bsi;
1047 gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
1048 gsi_insert_before (&rsi, stmt2, GSI_SAME_STMT);
1049 gsi_remove (&rsi, true);
1050
1051 return stmt1;
1052 }
1053
1054 return stmt;
1055 }
1056
1057
1058 /* Determine the outermost loops in that statements in basic block BB are
1059 invariant, and record them to the LIM_DATA associated with the statements.
1060 Callback for walk_dominator_tree. */
1061
1062 static void
1063 determine_invariantness_stmt (struct dom_walk_data *dw_data ATTRIBUTE_UNUSED,
1064 basic_block bb)
1065 {
1066 enum move_pos pos;
1067 gimple_stmt_iterator bsi;
1068 gimple stmt;
1069 bool maybe_never = ALWAYS_EXECUTED_IN (bb) == NULL;
1070 struct loop *outermost = ALWAYS_EXECUTED_IN (bb);
1071 struct lim_aux_data *lim_data;
1072
1073 if (!loop_outer (bb->loop_father))
1074 return;
1075
1076 if (dump_file && (dump_flags & TDF_DETAILS))
1077 fprintf (dump_file, "Basic block %d (loop %d -- depth %d):\n\n",
1078 bb->index, bb->loop_father->num, loop_depth (bb->loop_father));
1079
1080 /* Look at PHI nodes, but only if there is at most two.
1081 ??? We could relax this further by post-processing the inserted
1082 code and transforming adjacent cond-exprs with the same predicate
1083 to control flow again. */
1084 bsi = gsi_start_phis (bb);
1085 if (!gsi_end_p (bsi)
1086 && ((gsi_next (&bsi), gsi_end_p (bsi))
1087 || (gsi_next (&bsi), gsi_end_p (bsi))))
1088 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1089 {
1090 stmt = gsi_stmt (bsi);
1091
1092 pos = movement_possibility (stmt);
1093 if (pos == MOVE_IMPOSSIBLE)
1094 continue;
1095
1096 lim_data = init_lim_data (stmt);
1097 lim_data->always_executed_in = outermost;
1098
1099 if (!determine_max_movement (stmt, false))
1100 {
1101 lim_data->max_loop = NULL;
1102 continue;
1103 }
1104
1105 if (dump_file && (dump_flags & TDF_DETAILS))
1106 {
1107 print_gimple_stmt (dump_file, stmt, 2, 0);
1108 fprintf (dump_file, " invariant up to level %d, cost %d.\n\n",
1109 loop_depth (lim_data->max_loop),
1110 lim_data->cost);
1111 }
1112
1113 if (lim_data->cost >= LIM_EXPENSIVE)
1114 set_profitable_level (stmt);
1115 }
1116
1117 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1118 {
1119 stmt = gsi_stmt (bsi);
1120
1121 pos = movement_possibility (stmt);
1122 if (pos == MOVE_IMPOSSIBLE)
1123 {
1124 if (nonpure_call_p (stmt))
1125 {
1126 maybe_never = true;
1127 outermost = NULL;
1128 }
1129 /* Make sure to note always_executed_in for stores to make
1130 store-motion work. */
1131 else if (stmt_makes_single_store (stmt))
1132 {
1133 struct lim_aux_data *lim_data = init_lim_data (stmt);
1134 lim_data->always_executed_in = outermost;
1135 }
1136 continue;
1137 }
1138
1139 if (is_gimple_assign (stmt)
1140 && (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
1141 == GIMPLE_BINARY_RHS))
1142 {
1143 tree op0 = gimple_assign_rhs1 (stmt);
1144 tree op1 = gimple_assign_rhs2 (stmt);
1145 struct loop *ol1 = outermost_invariant_loop (op1,
1146 loop_containing_stmt (stmt));
1147
1148 /* If divisor is invariant, convert a/b to a*(1/b), allowing reciprocal
1149 to be hoisted out of loop, saving expensive divide. */
1150 if (pos == MOVE_POSSIBLE
1151 && gimple_assign_rhs_code (stmt) == RDIV_EXPR
1152 && flag_unsafe_math_optimizations
1153 && !flag_trapping_math
1154 && ol1 != NULL
1155 && outermost_invariant_loop (op0, ol1) == NULL)
1156 stmt = rewrite_reciprocal (&bsi);
1157
1158 /* If the shift count is invariant, convert (A >> B) & 1 to
1159 A & (1 << B) allowing the bit mask to be hoisted out of the loop
1160 saving an expensive shift. */
1161 if (pos == MOVE_POSSIBLE
1162 && gimple_assign_rhs_code (stmt) == BIT_AND_EXPR
1163 && integer_onep (op1)
1164 && TREE_CODE (op0) == SSA_NAME
1165 && has_single_use (op0))
1166 stmt = rewrite_bittest (&bsi);
1167 }
1168
1169 lim_data = init_lim_data (stmt);
1170 lim_data->always_executed_in = outermost;
1171
1172 if (maybe_never && pos == MOVE_PRESERVE_EXECUTION)
1173 continue;
1174
1175 if (!determine_max_movement (stmt, pos == MOVE_PRESERVE_EXECUTION))
1176 {
1177 lim_data->max_loop = NULL;
1178 continue;
1179 }
1180
1181 if (dump_file && (dump_flags & TDF_DETAILS))
1182 {
1183 print_gimple_stmt (dump_file, stmt, 2, 0);
1184 fprintf (dump_file, " invariant up to level %d, cost %d.\n\n",
1185 loop_depth (lim_data->max_loop),
1186 lim_data->cost);
1187 }
1188
1189 if (lim_data->cost >= LIM_EXPENSIVE)
1190 set_profitable_level (stmt);
1191 }
1192 }
1193
1194 /* For each statement determines the outermost loop in that it is invariant,
1195 statements on whose motion it depends and the cost of the computation.
1196 This information is stored to the LIM_DATA structure associated with
1197 each statement. */
1198
1199 static void
1200 determine_invariantness (void)
1201 {
1202 struct dom_walk_data walk_data;
1203
1204 memset (&walk_data, 0, sizeof (struct dom_walk_data));
1205 walk_data.dom_direction = CDI_DOMINATORS;
1206 walk_data.before_dom_children = determine_invariantness_stmt;
1207
1208 init_walk_dominator_tree (&walk_data);
1209 walk_dominator_tree (&walk_data, ENTRY_BLOCK_PTR);
1210 fini_walk_dominator_tree (&walk_data);
1211 }
1212
1213 /* Hoist the statements in basic block BB out of the loops prescribed by
1214 data stored in LIM_DATA structures associated with each statement. Callback
1215 for walk_dominator_tree. */
1216
1217 static void
1218 move_computations_stmt (struct dom_walk_data *dw_data,
1219 basic_block bb)
1220 {
1221 struct loop *level;
1222 gimple_stmt_iterator bsi;
1223 gimple stmt;
1224 unsigned cost = 0;
1225 struct lim_aux_data *lim_data;
1226
1227 if (!loop_outer (bb->loop_father))
1228 return;
1229
1230 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); )
1231 {
1232 gimple new_stmt;
1233 stmt = gsi_stmt (bsi);
1234
1235 lim_data = get_lim_data (stmt);
1236 if (lim_data == NULL)
1237 {
1238 gsi_next (&bsi);
1239 continue;
1240 }
1241
1242 cost = lim_data->cost;
1243 level = lim_data->tgt_loop;
1244 clear_lim_data (stmt);
1245
1246 if (!level)
1247 {
1248 gsi_next (&bsi);
1249 continue;
1250 }
1251
1252 if (dump_file && (dump_flags & TDF_DETAILS))
1253 {
1254 fprintf (dump_file, "Moving PHI node\n");
1255 print_gimple_stmt (dump_file, stmt, 0, 0);
1256 fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1257 cost, level->num);
1258 }
1259
1260 if (gimple_phi_num_args (stmt) == 1)
1261 {
1262 tree arg = PHI_ARG_DEF (stmt, 0);
1263 new_stmt = gimple_build_assign_with_ops (TREE_CODE (arg),
1264 gimple_phi_result (stmt),
1265 arg, NULL_TREE);
1266 SSA_NAME_DEF_STMT (gimple_phi_result (stmt)) = new_stmt;
1267 }
1268 else
1269 {
1270 basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
1271 gimple cond = gsi_stmt (gsi_last_bb (dom));
1272 tree arg0 = NULL_TREE, arg1 = NULL_TREE, t;
1273 /* Get the PHI arguments corresponding to the true and false
1274 edges of COND. */
1275 extract_true_false_args_from_phi (dom, stmt, &arg0, &arg1);
1276 gcc_assert (arg0 && arg1);
1277 t = build2 (gimple_cond_code (cond), boolean_type_node,
1278 gimple_cond_lhs (cond), gimple_cond_rhs (cond));
1279 new_stmt = gimple_build_assign_with_ops3 (COND_EXPR,
1280 gimple_phi_result (stmt),
1281 t, arg0, arg1);
1282 SSA_NAME_DEF_STMT (gimple_phi_result (stmt)) = new_stmt;
1283 *((unsigned int *)(dw_data->global_data)) |= TODO_cleanup_cfg;
1284 }
1285 gsi_insert_on_edge (loop_preheader_edge (level), new_stmt);
1286 remove_phi_node (&bsi, false);
1287 }
1288
1289 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); )
1290 {
1291 edge e;
1292
1293 stmt = gsi_stmt (bsi);
1294
1295 lim_data = get_lim_data (stmt);
1296 if (lim_data == NULL)
1297 {
1298 gsi_next (&bsi);
1299 continue;
1300 }
1301
1302 cost = lim_data->cost;
1303 level = lim_data->tgt_loop;
1304 clear_lim_data (stmt);
1305
1306 if (!level)
1307 {
1308 gsi_next (&bsi);
1309 continue;
1310 }
1311
1312 /* We do not really want to move conditionals out of the loop; we just
1313 placed it here to force its operands to be moved if necessary. */
1314 if (gimple_code (stmt) == GIMPLE_COND)
1315 continue;
1316
1317 if (dump_file && (dump_flags & TDF_DETAILS))
1318 {
1319 fprintf (dump_file, "Moving statement\n");
1320 print_gimple_stmt (dump_file, stmt, 0, 0);
1321 fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1322 cost, level->num);
1323 }
1324
1325 e = loop_preheader_edge (level);
1326 gcc_assert (!gimple_vdef (stmt));
1327 if (gimple_vuse (stmt))
1328 {
1329 /* The new VUSE is the one from the virtual PHI in the loop
1330 header or the one already present. */
1331 gimple_stmt_iterator gsi2;
1332 for (gsi2 = gsi_start_phis (e->dest);
1333 !gsi_end_p (gsi2); gsi_next (&gsi2))
1334 {
1335 gimple phi = gsi_stmt (gsi2);
1336 if (!is_gimple_reg (gimple_phi_result (phi)))
1337 {
1338 gimple_set_vuse (stmt, PHI_ARG_DEF_FROM_EDGE (phi, e));
1339 break;
1340 }
1341 }
1342 }
1343 gsi_remove (&bsi, false);
1344 gsi_insert_on_edge (e, stmt);
1345 }
1346 }
1347
1348 /* Hoist the statements out of the loops prescribed by data stored in
1349 LIM_DATA structures associated with each statement.*/
1350
1351 static unsigned int
1352 move_computations (void)
1353 {
1354 struct dom_walk_data walk_data;
1355 unsigned int todo = 0;
1356
1357 memset (&walk_data, 0, sizeof (struct dom_walk_data));
1358 walk_data.global_data = &todo;
1359 walk_data.dom_direction = CDI_DOMINATORS;
1360 walk_data.before_dom_children = move_computations_stmt;
1361
1362 init_walk_dominator_tree (&walk_data);
1363 walk_dominator_tree (&walk_data, ENTRY_BLOCK_PTR);
1364 fini_walk_dominator_tree (&walk_data);
1365
1366 gsi_commit_edge_inserts ();
1367 if (need_ssa_update_p (cfun))
1368 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
1369
1370 return todo;
1371 }
1372
1373 /* Checks whether the statement defining variable *INDEX can be hoisted
1374 out of the loop passed in DATA. Callback for for_each_index. */
1375
1376 static bool
1377 may_move_till (tree ref, tree *index, void *data)
1378 {
1379 struct loop *loop = (struct loop *) data, *max_loop;
1380
1381 /* If REF is an array reference, check also that the step and the lower
1382 bound is invariant in LOOP. */
1383 if (TREE_CODE (ref) == ARRAY_REF)
1384 {
1385 tree step = TREE_OPERAND (ref, 3);
1386 tree lbound = TREE_OPERAND (ref, 2);
1387
1388 max_loop = outermost_invariant_loop (step, loop);
1389 if (!max_loop)
1390 return false;
1391
1392 max_loop = outermost_invariant_loop (lbound, loop);
1393 if (!max_loop)
1394 return false;
1395 }
1396
1397 max_loop = outermost_invariant_loop (*index, loop);
1398 if (!max_loop)
1399 return false;
1400
1401 return true;
1402 }
1403
1404 /* If OP is SSA NAME, force the statement that defines it to be
1405 moved out of the LOOP. ORIG_LOOP is the loop in that EXPR is used. */
1406
1407 static void
1408 force_move_till_op (tree op, struct loop *orig_loop, struct loop *loop)
1409 {
1410 gimple stmt;
1411
1412 if (!op
1413 || is_gimple_min_invariant (op))
1414 return;
1415
1416 gcc_assert (TREE_CODE (op) == SSA_NAME);
1417
1418 stmt = SSA_NAME_DEF_STMT (op);
1419 if (gimple_nop_p (stmt))
1420 return;
1421
1422 set_level (stmt, orig_loop, loop);
1423 }
1424
1425 /* Forces statement defining invariants in REF (and *INDEX) to be moved out of
1426 the LOOP. The reference REF is used in the loop ORIG_LOOP. Callback for
1427 for_each_index. */
1428
1429 struct fmt_data
1430 {
1431 struct loop *loop;
1432 struct loop *orig_loop;
1433 };
1434
1435 static bool
1436 force_move_till (tree ref, tree *index, void *data)
1437 {
1438 struct fmt_data *fmt_data = (struct fmt_data *) data;
1439
1440 if (TREE_CODE (ref) == ARRAY_REF)
1441 {
1442 tree step = TREE_OPERAND (ref, 3);
1443 tree lbound = TREE_OPERAND (ref, 2);
1444
1445 force_move_till_op (step, fmt_data->orig_loop, fmt_data->loop);
1446 force_move_till_op (lbound, fmt_data->orig_loop, fmt_data->loop);
1447 }
1448
1449 force_move_till_op (*index, fmt_data->orig_loop, fmt_data->loop);
1450
1451 return true;
1452 }
1453
1454 /* A hash function for struct mem_ref object OBJ. */
1455
1456 static hashval_t
1457 memref_hash (const void *obj)
1458 {
1459 const struct mem_ref *const mem = (const struct mem_ref *) obj;
1460
1461 return mem->hash;
1462 }
1463
1464 /* An equality function for struct mem_ref object OBJ1 with
1465 memory reference OBJ2. */
1466
1467 static int
1468 memref_eq (const void *obj1, const void *obj2)
1469 {
1470 const struct mem_ref *const mem1 = (const struct mem_ref *) obj1;
1471
1472 return operand_equal_p (mem1->mem, (const_tree) obj2, 0);
1473 }
1474
1475 /* Releases list of memory reference locations ACCS. */
1476
1477 static void
1478 free_mem_ref_locs (mem_ref_locs_p accs)
1479 {
1480 unsigned i;
1481 mem_ref_loc_p loc;
1482
1483 if (!accs)
1484 return;
1485
1486 FOR_EACH_VEC_ELT (mem_ref_loc_p, accs->locs, i, loc)
1487 free (loc);
1488 VEC_free (mem_ref_loc_p, heap, accs->locs);
1489 free (accs);
1490 }
1491
1492 /* A function to free the mem_ref object OBJ. */
1493
1494 static void
1495 memref_free (void *obj)
1496 {
1497 struct mem_ref *const mem = (struct mem_ref *) obj;
1498 unsigned i;
1499 mem_ref_locs_p accs;
1500
1501 BITMAP_FREE (mem->stored);
1502 BITMAP_FREE (mem->indep_loop);
1503 BITMAP_FREE (mem->dep_loop);
1504 BITMAP_FREE (mem->indep_ref);
1505 BITMAP_FREE (mem->dep_ref);
1506
1507 FOR_EACH_VEC_ELT (mem_ref_locs_p, mem->accesses_in_loop, i, accs)
1508 free_mem_ref_locs (accs);
1509 VEC_free (mem_ref_locs_p, heap, mem->accesses_in_loop);
1510
1511 free (mem);
1512 }
1513
1514 /* Allocates and returns a memory reference description for MEM whose hash
1515 value is HASH and id is ID. */
1516
1517 static mem_ref_p
1518 mem_ref_alloc (tree mem, unsigned hash, unsigned id)
1519 {
1520 mem_ref_p ref = XNEW (struct mem_ref);
1521 ref->mem = mem;
1522 ref->id = id;
1523 ref->hash = hash;
1524 ref->stored = BITMAP_ALLOC (NULL);
1525 ref->indep_loop = BITMAP_ALLOC (NULL);
1526 ref->dep_loop = BITMAP_ALLOC (NULL);
1527 ref->indep_ref = BITMAP_ALLOC (NULL);
1528 ref->dep_ref = BITMAP_ALLOC (NULL);
1529 ref->accesses_in_loop = NULL;
1530
1531 return ref;
1532 }
1533
1534 /* Allocates and returns the new list of locations. */
1535
1536 static mem_ref_locs_p
1537 mem_ref_locs_alloc (void)
1538 {
1539 mem_ref_locs_p accs = XNEW (struct mem_ref_locs);
1540 accs->locs = NULL;
1541 return accs;
1542 }
1543
1544 /* Records memory reference location *LOC in LOOP to the memory reference
1545 description REF. The reference occurs in statement STMT. */
1546
1547 static void
1548 record_mem_ref_loc (mem_ref_p ref, struct loop *loop, gimple stmt, tree *loc)
1549 {
1550 mem_ref_loc_p aref = XNEW (struct mem_ref_loc);
1551 mem_ref_locs_p accs;
1552 bitmap ril = VEC_index (bitmap, memory_accesses.refs_in_loop, loop->num);
1553
1554 if (VEC_length (mem_ref_locs_p, ref->accesses_in_loop)
1555 <= (unsigned) loop->num)
1556 VEC_safe_grow_cleared (mem_ref_locs_p, heap, ref->accesses_in_loop,
1557 loop->num + 1);
1558 accs = VEC_index (mem_ref_locs_p, ref->accesses_in_loop, loop->num);
1559 if (!accs)
1560 {
1561 accs = mem_ref_locs_alloc ();
1562 VEC_replace (mem_ref_locs_p, ref->accesses_in_loop, loop->num, accs);
1563 }
1564
1565 aref->stmt = stmt;
1566 aref->ref = loc;
1567
1568 VEC_safe_push (mem_ref_loc_p, heap, accs->locs, aref);
1569 bitmap_set_bit (ril, ref->id);
1570 }
1571
1572 /* Marks reference REF as stored in LOOP. */
1573
1574 static void
1575 mark_ref_stored (mem_ref_p ref, struct loop *loop)
1576 {
1577 for (;
1578 loop != current_loops->tree_root
1579 && !bitmap_bit_p (ref->stored, loop->num);
1580 loop = loop_outer (loop))
1581 bitmap_set_bit (ref->stored, loop->num);
1582 }
1583
1584 /* Gathers memory references in statement STMT in LOOP, storing the
1585 information about them in the memory_accesses structure. Marks
1586 the vops accessed through unrecognized statements there as
1587 well. */
1588
1589 static void
1590 gather_mem_refs_stmt (struct loop *loop, gimple stmt)
1591 {
1592 tree *mem = NULL;
1593 hashval_t hash;
1594 PTR *slot;
1595 mem_ref_p ref;
1596 bool is_stored;
1597 unsigned id;
1598
1599 if (!gimple_vuse (stmt))
1600 return;
1601
1602 mem = simple_mem_ref_in_stmt (stmt, &is_stored);
1603 if (!mem)
1604 {
1605 id = VEC_length (mem_ref_p, memory_accesses.refs_list);
1606 ref = mem_ref_alloc (error_mark_node, 0, id);
1607 VEC_safe_push (mem_ref_p, heap, memory_accesses.refs_list, ref);
1608 if (dump_file && (dump_flags & TDF_DETAILS))
1609 {
1610 fprintf (dump_file, "Unanalyzed memory reference %u: ", id);
1611 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1612 }
1613 if (gimple_vdef (stmt))
1614 mark_ref_stored (ref, loop);
1615 record_mem_ref_loc (ref, loop, stmt, mem);
1616 return;
1617 }
1618
1619 hash = iterative_hash_expr (*mem, 0);
1620 slot = htab_find_slot_with_hash (memory_accesses.refs, *mem, hash, INSERT);
1621
1622 if (*slot)
1623 {
1624 ref = (mem_ref_p) *slot;
1625 id = ref->id;
1626 }
1627 else
1628 {
1629 id = VEC_length (mem_ref_p, memory_accesses.refs_list);
1630 ref = mem_ref_alloc (*mem, hash, id);
1631 VEC_safe_push (mem_ref_p, heap, memory_accesses.refs_list, ref);
1632 *slot = ref;
1633
1634 if (dump_file && (dump_flags & TDF_DETAILS))
1635 {
1636 fprintf (dump_file, "Memory reference %u: ", id);
1637 print_generic_expr (dump_file, ref->mem, TDF_SLIM);
1638 fprintf (dump_file, "\n");
1639 }
1640 }
1641
1642 if (is_stored)
1643 mark_ref_stored (ref, loop);
1644
1645 record_mem_ref_loc (ref, loop, stmt, mem);
1646 return;
1647 }
1648
1649 /* Gathers memory references in loops. */
1650
1651 static void
1652 gather_mem_refs_in_loops (void)
1653 {
1654 gimple_stmt_iterator bsi;
1655 basic_block bb;
1656 struct loop *loop;
1657 loop_iterator li;
1658 bitmap lrefs, alrefs, alrefso;
1659
1660 FOR_EACH_BB (bb)
1661 {
1662 loop = bb->loop_father;
1663 if (loop == current_loops->tree_root)
1664 continue;
1665
1666 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1667 gather_mem_refs_stmt (loop, gsi_stmt (bsi));
1668 }
1669
1670 /* Propagate the information about accessed memory references up
1671 the loop hierarchy. */
1672 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
1673 {
1674 lrefs = VEC_index (bitmap, memory_accesses.refs_in_loop, loop->num);
1675 alrefs = VEC_index (bitmap, memory_accesses.all_refs_in_loop, loop->num);
1676 bitmap_ior_into (alrefs, lrefs);
1677
1678 if (loop_outer (loop) == current_loops->tree_root)
1679 continue;
1680
1681 alrefso = VEC_index (bitmap, memory_accesses.all_refs_in_loop,
1682 loop_outer (loop)->num);
1683 bitmap_ior_into (alrefso, alrefs);
1684 }
1685 }
1686
1687 /* Create a mapping from virtual operands to references that touch them
1688 in LOOP. */
1689
1690 static void
1691 create_vop_ref_mapping_loop (struct loop *loop)
1692 {
1693 bitmap refs = VEC_index (bitmap, memory_accesses.refs_in_loop, loop->num);
1694 struct loop *sloop;
1695 bitmap_iterator bi;
1696 unsigned i;
1697 mem_ref_p ref;
1698
1699 EXECUTE_IF_SET_IN_BITMAP (refs, 0, i, bi)
1700 {
1701 ref = VEC_index (mem_ref_p, memory_accesses.refs_list, i);
1702 for (sloop = loop; sloop != current_loops->tree_root;
1703 sloop = loop_outer (sloop))
1704 if (bitmap_bit_p (ref->stored, loop->num))
1705 {
1706 bitmap refs_stored
1707 = VEC_index (bitmap, memory_accesses.all_refs_stored_in_loop,
1708 sloop->num);
1709 bitmap_set_bit (refs_stored, ref->id);
1710 }
1711 }
1712 }
1713
1714 /* For each non-clobbered virtual operand and each loop, record the memory
1715 references in this loop that touch the operand. */
1716
1717 static void
1718 create_vop_ref_mapping (void)
1719 {
1720 loop_iterator li;
1721 struct loop *loop;
1722
1723 FOR_EACH_LOOP (li, loop, 0)
1724 {
1725 create_vop_ref_mapping_loop (loop);
1726 }
1727 }
1728
1729 /* Gathers information about memory accesses in the loops. */
1730
1731 static void
1732 analyze_memory_references (void)
1733 {
1734 unsigned i;
1735 bitmap empty;
1736
1737 memory_accesses.refs
1738 = htab_create (100, memref_hash, memref_eq, memref_free);
1739 memory_accesses.refs_list = NULL;
1740 memory_accesses.refs_in_loop = VEC_alloc (bitmap, heap,
1741 number_of_loops ());
1742 memory_accesses.all_refs_in_loop = VEC_alloc (bitmap, heap,
1743 number_of_loops ());
1744 memory_accesses.all_refs_stored_in_loop = VEC_alloc (bitmap, heap,
1745 number_of_loops ());
1746
1747 for (i = 0; i < number_of_loops (); i++)
1748 {
1749 empty = BITMAP_ALLOC (NULL);
1750 VEC_quick_push (bitmap, memory_accesses.refs_in_loop, empty);
1751 empty = BITMAP_ALLOC (NULL);
1752 VEC_quick_push (bitmap, memory_accesses.all_refs_in_loop, empty);
1753 empty = BITMAP_ALLOC (NULL);
1754 VEC_quick_push (bitmap, memory_accesses.all_refs_stored_in_loop, empty);
1755 }
1756
1757 memory_accesses.ttae_cache = NULL;
1758
1759 gather_mem_refs_in_loops ();
1760 create_vop_ref_mapping ();
1761 }
1762
1763 /* Returns true if MEM1 and MEM2 may alias. TTAE_CACHE is used as a cache in
1764 tree_to_aff_combination_expand. */
1765
1766 static bool
1767 mem_refs_may_alias_p (tree mem1, tree mem2, struct pointer_map_t **ttae_cache)
1768 {
1769 /* Perform BASE + OFFSET analysis -- if MEM1 and MEM2 are based on the same
1770 object and their offset differ in such a way that the locations cannot
1771 overlap, then they cannot alias. */
1772 double_int size1, size2;
1773 aff_tree off1, off2;
1774
1775 /* Perform basic offset and type-based disambiguation. */
1776 if (!refs_may_alias_p (mem1, mem2))
1777 return false;
1778
1779 /* The expansion of addresses may be a bit expensive, thus we only do
1780 the check at -O2 and higher optimization levels. */
1781 if (optimize < 2)
1782 return true;
1783
1784 get_inner_reference_aff (mem1, &off1, &size1);
1785 get_inner_reference_aff (mem2, &off2, &size2);
1786 aff_combination_expand (&off1, ttae_cache);
1787 aff_combination_expand (&off2, ttae_cache);
1788 aff_combination_scale (&off1, double_int_minus_one);
1789 aff_combination_add (&off2, &off1);
1790
1791 if (aff_comb_cannot_overlap_p (&off2, size1, size2))
1792 return false;
1793
1794 return true;
1795 }
1796
1797 /* Rewrites location LOC by TMP_VAR. */
1798
1799 static void
1800 rewrite_mem_ref_loc (mem_ref_loc_p loc, tree tmp_var)
1801 {
1802 *loc->ref = tmp_var;
1803 update_stmt (loc->stmt);
1804 }
1805
1806 /* Adds all locations of REF in LOOP and its subloops to LOCS. */
1807
1808 static void
1809 get_all_locs_in_loop (struct loop *loop, mem_ref_p ref,
1810 VEC (mem_ref_loc_p, heap) **locs)
1811 {
1812 mem_ref_locs_p accs;
1813 unsigned i;
1814 mem_ref_loc_p loc;
1815 bitmap refs = VEC_index (bitmap, memory_accesses.all_refs_in_loop,
1816 loop->num);
1817 struct loop *subloop;
1818
1819 if (!bitmap_bit_p (refs, ref->id))
1820 return;
1821
1822 if (VEC_length (mem_ref_locs_p, ref->accesses_in_loop)
1823 > (unsigned) loop->num)
1824 {
1825 accs = VEC_index (mem_ref_locs_p, ref->accesses_in_loop, loop->num);
1826 if (accs)
1827 {
1828 FOR_EACH_VEC_ELT (mem_ref_loc_p, accs->locs, i, loc)
1829 VEC_safe_push (mem_ref_loc_p, heap, *locs, loc);
1830 }
1831 }
1832
1833 for (subloop = loop->inner; subloop != NULL; subloop = subloop->next)
1834 get_all_locs_in_loop (subloop, ref, locs);
1835 }
1836
1837 /* Rewrites all references to REF in LOOP by variable TMP_VAR. */
1838
1839 static void
1840 rewrite_mem_refs (struct loop *loop, mem_ref_p ref, tree tmp_var)
1841 {
1842 unsigned i;
1843 mem_ref_loc_p loc;
1844 VEC (mem_ref_loc_p, heap) *locs = NULL;
1845
1846 get_all_locs_in_loop (loop, ref, &locs);
1847 FOR_EACH_VEC_ELT (mem_ref_loc_p, locs, i, loc)
1848 rewrite_mem_ref_loc (loc, tmp_var);
1849 VEC_free (mem_ref_loc_p, heap, locs);
1850 }
1851
1852 /* The name and the length of the currently generated variable
1853 for lsm. */
1854 #define MAX_LSM_NAME_LENGTH 40
1855 static char lsm_tmp_name[MAX_LSM_NAME_LENGTH + 1];
1856 static int lsm_tmp_name_length;
1857
1858 /* Adds S to lsm_tmp_name. */
1859
1860 static void
1861 lsm_tmp_name_add (const char *s)
1862 {
1863 int l = strlen (s) + lsm_tmp_name_length;
1864 if (l > MAX_LSM_NAME_LENGTH)
1865 return;
1866
1867 strcpy (lsm_tmp_name + lsm_tmp_name_length, s);
1868 lsm_tmp_name_length = l;
1869 }
1870
1871 /* Stores the name for temporary variable that replaces REF to
1872 lsm_tmp_name. */
1873
1874 static void
1875 gen_lsm_tmp_name (tree ref)
1876 {
1877 const char *name;
1878
1879 switch (TREE_CODE (ref))
1880 {
1881 case MEM_REF:
1882 case TARGET_MEM_REF:
1883 gen_lsm_tmp_name (TREE_OPERAND (ref, 0));
1884 lsm_tmp_name_add ("_");
1885 break;
1886
1887 case ADDR_EXPR:
1888 gen_lsm_tmp_name (TREE_OPERAND (ref, 0));
1889 break;
1890
1891 case BIT_FIELD_REF:
1892 case VIEW_CONVERT_EXPR:
1893 case ARRAY_RANGE_REF:
1894 gen_lsm_tmp_name (TREE_OPERAND (ref, 0));
1895 break;
1896
1897 case REALPART_EXPR:
1898 gen_lsm_tmp_name (TREE_OPERAND (ref, 0));
1899 lsm_tmp_name_add ("_RE");
1900 break;
1901
1902 case IMAGPART_EXPR:
1903 gen_lsm_tmp_name (TREE_OPERAND (ref, 0));
1904 lsm_tmp_name_add ("_IM");
1905 break;
1906
1907 case COMPONENT_REF:
1908 gen_lsm_tmp_name (TREE_OPERAND (ref, 0));
1909 lsm_tmp_name_add ("_");
1910 name = get_name (TREE_OPERAND (ref, 1));
1911 if (!name)
1912 name = "F";
1913 lsm_tmp_name_add (name);
1914 break;
1915
1916 case ARRAY_REF:
1917 gen_lsm_tmp_name (TREE_OPERAND (ref, 0));
1918 lsm_tmp_name_add ("_I");
1919 break;
1920
1921 case SSA_NAME:
1922 ref = SSA_NAME_VAR (ref);
1923 /* Fallthru. */
1924
1925 case VAR_DECL:
1926 case PARM_DECL:
1927 name = get_name (ref);
1928 if (!name)
1929 name = "D";
1930 lsm_tmp_name_add (name);
1931 break;
1932
1933 case STRING_CST:
1934 lsm_tmp_name_add ("S");
1935 break;
1936
1937 case RESULT_DECL:
1938 lsm_tmp_name_add ("R");
1939 break;
1940
1941 case INTEGER_CST:
1942 /* Nothing. */
1943 break;
1944
1945 default:
1946 gcc_unreachable ();
1947 }
1948 }
1949
1950 /* Determines name for temporary variable that replaces REF.
1951 The name is accumulated into the lsm_tmp_name variable.
1952 N is added to the name of the temporary. */
1953
1954 char *
1955 get_lsm_tmp_name (tree ref, unsigned n)
1956 {
1957 char ns[2];
1958
1959 lsm_tmp_name_length = 0;
1960 gen_lsm_tmp_name (ref);
1961 lsm_tmp_name_add ("_lsm");
1962 if (n < 10)
1963 {
1964 ns[0] = '0' + n;
1965 ns[1] = 0;
1966 lsm_tmp_name_add (ns);
1967 }
1968 return lsm_tmp_name;
1969 }
1970
1971 struct prev_flag_edges {
1972 /* Edge to insert new flag comparison code. */
1973 edge append_cond_position;
1974
1975 /* Edge for fall through from previous flag comparison. */
1976 edge last_cond_fallthru;
1977 };
1978
1979 /* Helper function for execute_sm. Emit code to store TMP_VAR into
1980 MEM along edge EX.
1981
1982 The store is only done if MEM has changed. We do this so no
1983 changes to MEM occur on code paths that did not originally store
1984 into it.
1985
1986 The common case for execute_sm will transform:
1987
1988 for (...) {
1989 if (foo)
1990 stuff;
1991 else
1992 MEM = TMP_VAR;
1993 }
1994
1995 into:
1996
1997 lsm = MEM;
1998 for (...) {
1999 if (foo)
2000 stuff;
2001 else
2002 lsm = TMP_VAR;
2003 }
2004 MEM = lsm;
2005
2006 This function will generate:
2007
2008 lsm = MEM;
2009
2010 lsm_flag = false;
2011 ...
2012 for (...) {
2013 if (foo)
2014 stuff;
2015 else {
2016 lsm = TMP_VAR;
2017 lsm_flag = true;
2018 }
2019 }
2020 if (lsm_flag) <--
2021 MEM = lsm; <--
2022 */
2023
2024 static void
2025 execute_sm_if_changed (edge ex, tree mem, tree tmp_var, tree flag)
2026 {
2027 basic_block new_bb, then_bb, old_dest;
2028 bool loop_has_only_one_exit;
2029 edge then_old_edge, orig_ex = ex;
2030 gimple_stmt_iterator gsi;
2031 gimple stmt;
2032 struct prev_flag_edges *prev_edges = (struct prev_flag_edges *) ex->aux;
2033
2034 /* ?? Insert store after previous store if applicable. See note
2035 below. */
2036 if (prev_edges)
2037 ex = prev_edges->append_cond_position;
2038
2039 loop_has_only_one_exit = single_pred_p (ex->dest);
2040
2041 if (loop_has_only_one_exit)
2042 ex = split_block_after_labels (ex->dest);
2043
2044 old_dest = ex->dest;
2045 new_bb = split_edge (ex);
2046 then_bb = create_empty_bb (new_bb);
2047 if (current_loops && new_bb->loop_father)
2048 add_bb_to_loop (then_bb, new_bb->loop_father);
2049
2050 gsi = gsi_start_bb (new_bb);
2051 stmt = gimple_build_cond (NE_EXPR, flag, boolean_false_node,
2052 NULL_TREE, NULL_TREE);
2053 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2054
2055 gsi = gsi_start_bb (then_bb);
2056 /* Insert actual store. */
2057 stmt = gimple_build_assign (unshare_expr (mem), tmp_var);
2058 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2059
2060 make_edge (new_bb, then_bb, EDGE_TRUE_VALUE);
2061 make_edge (new_bb, old_dest, EDGE_FALSE_VALUE);
2062 then_old_edge = make_edge (then_bb, old_dest, EDGE_FALLTHRU);
2063
2064 set_immediate_dominator (CDI_DOMINATORS, then_bb, new_bb);
2065
2066 if (prev_edges)
2067 {
2068 basic_block prevbb = prev_edges->last_cond_fallthru->src;
2069 redirect_edge_succ (prev_edges->last_cond_fallthru, new_bb);
2070 set_immediate_dominator (CDI_DOMINATORS, new_bb, prevbb);
2071 set_immediate_dominator (CDI_DOMINATORS, old_dest,
2072 recompute_dominator (CDI_DOMINATORS, old_dest));
2073 }
2074
2075 /* ?? Because stores may alias, they must happen in the exact
2076 sequence they originally happened. Save the position right after
2077 the (_lsm) store we just created so we can continue appending after
2078 it and maintain the original order. */
2079 {
2080 struct prev_flag_edges *p;
2081
2082 if (orig_ex->aux)
2083 orig_ex->aux = NULL;
2084 alloc_aux_for_edge (orig_ex, sizeof (struct prev_flag_edges));
2085 p = (struct prev_flag_edges *) orig_ex->aux;
2086 p->append_cond_position = then_old_edge;
2087 p->last_cond_fallthru = find_edge (new_bb, old_dest);
2088 orig_ex->aux = (void *) p;
2089 }
2090
2091 if (!loop_has_only_one_exit)
2092 for (gsi = gsi_start_phis (old_dest); !gsi_end_p (gsi); gsi_next (&gsi))
2093 {
2094 gimple phi = gsi_stmt (gsi);
2095 unsigned i;
2096
2097 for (i = 0; i < gimple_phi_num_args (phi); i++)
2098 if (gimple_phi_arg_edge (phi, i)->src == new_bb)
2099 {
2100 tree arg = gimple_phi_arg_def (phi, i);
2101 add_phi_arg (phi, arg, then_old_edge, UNKNOWN_LOCATION);
2102 update_stmt (phi);
2103 }
2104 }
2105 /* Remove the original fall through edge. This was the
2106 single_succ_edge (new_bb). */
2107 EDGE_SUCC (new_bb, 0)->flags &= ~EDGE_FALLTHRU;
2108 }
2109
2110 /* Helper function for execute_sm. On every location where REF is
2111 set, set an appropriate flag indicating the store. */
2112
2113 static tree
2114 execute_sm_if_changed_flag_set (struct loop *loop, mem_ref_p ref)
2115 {
2116 unsigned i;
2117 mem_ref_loc_p loc;
2118 tree flag;
2119 VEC (mem_ref_loc_p, heap) *locs = NULL;
2120 char *str = get_lsm_tmp_name (ref->mem, ~0);
2121
2122 lsm_tmp_name_add ("_flag");
2123 flag = make_rename_temp (boolean_type_node, str);
2124 get_all_locs_in_loop (loop, ref, &locs);
2125 FOR_EACH_VEC_ELT (mem_ref_loc_p, locs, i, loc)
2126 {
2127 gimple_stmt_iterator gsi;
2128 gimple stmt;
2129
2130 gsi = gsi_for_stmt (loc->stmt);
2131 stmt = gimple_build_assign (flag, boolean_true_node);
2132 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2133 }
2134 VEC_free (mem_ref_loc_p, heap, locs);
2135 return flag;
2136 }
2137
2138 /* Executes store motion of memory reference REF from LOOP.
2139 Exits from the LOOP are stored in EXITS. The initialization of the
2140 temporary variable is put to the preheader of the loop, and assignments
2141 to the reference from the temporary variable are emitted to exits. */
2142
2143 static void
2144 execute_sm (struct loop *loop, VEC (edge, heap) *exits, mem_ref_p ref)
2145 {
2146 tree tmp_var, store_flag;
2147 unsigned i;
2148 gimple load;
2149 struct fmt_data fmt_data;
2150 edge ex, latch_edge;
2151 struct lim_aux_data *lim_data;
2152 bool multi_threaded_model_p = false;
2153
2154 if (dump_file && (dump_flags & TDF_DETAILS))
2155 {
2156 fprintf (dump_file, "Executing store motion of ");
2157 print_generic_expr (dump_file, ref->mem, 0);
2158 fprintf (dump_file, " from loop %d\n", loop->num);
2159 }
2160
2161 tmp_var = make_rename_temp (TREE_TYPE (ref->mem),
2162 get_lsm_tmp_name (ref->mem, ~0));
2163
2164 fmt_data.loop = loop;
2165 fmt_data.orig_loop = loop;
2166 for_each_index (&ref->mem, force_move_till, &fmt_data);
2167
2168 if (block_in_transaction (loop_preheader_edge (loop)->src)
2169 || !PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES))
2170 multi_threaded_model_p = true;
2171
2172 if (multi_threaded_model_p)
2173 store_flag = execute_sm_if_changed_flag_set (loop, ref);
2174
2175 rewrite_mem_refs (loop, ref, tmp_var);
2176
2177 /* Emit the load code into the latch, so that we are sure it will
2178 be processed after all dependencies. */
2179 latch_edge = loop_latch_edge (loop);
2180
2181 /* FIXME/TODO: For the multi-threaded variant, we could avoid this
2182 load altogether, since the store is predicated by a flag. We
2183 could, do the load only if it was originally in the loop. */
2184 load = gimple_build_assign (tmp_var, unshare_expr (ref->mem));
2185 lim_data = init_lim_data (load);
2186 lim_data->max_loop = loop;
2187 lim_data->tgt_loop = loop;
2188 gsi_insert_on_edge (latch_edge, load);
2189
2190 if (multi_threaded_model_p)
2191 {
2192 load = gimple_build_assign (store_flag, boolean_false_node);
2193 lim_data = init_lim_data (load);
2194 lim_data->max_loop = loop;
2195 lim_data->tgt_loop = loop;
2196 gsi_insert_on_edge (latch_edge, load);
2197 }
2198
2199 /* Sink the store to every exit from the loop. */
2200 FOR_EACH_VEC_ELT (edge, exits, i, ex)
2201 if (!multi_threaded_model_p)
2202 {
2203 gimple store;
2204 store = gimple_build_assign (unshare_expr (ref->mem), tmp_var);
2205 gsi_insert_on_edge (ex, store);
2206 }
2207 else
2208 execute_sm_if_changed (ex, ref->mem, tmp_var, store_flag);
2209 }
2210
2211 /* Hoists memory references MEM_REFS out of LOOP. EXITS is the list of exit
2212 edges of the LOOP. */
2213
2214 static void
2215 hoist_memory_references (struct loop *loop, bitmap mem_refs,
2216 VEC (edge, heap) *exits)
2217 {
2218 mem_ref_p ref;
2219 unsigned i;
2220 bitmap_iterator bi;
2221
2222 EXECUTE_IF_SET_IN_BITMAP (mem_refs, 0, i, bi)
2223 {
2224 ref = VEC_index (mem_ref_p, memory_accesses.refs_list, i);
2225 execute_sm (loop, exits, ref);
2226 }
2227 }
2228
2229 /* Returns true if REF is always accessed in LOOP. If STORED_P is true
2230 make sure REF is always stored to in LOOP. */
2231
2232 static bool
2233 ref_always_accessed_p (struct loop *loop, mem_ref_p ref, bool stored_p)
2234 {
2235 VEC (mem_ref_loc_p, heap) *locs = NULL;
2236 unsigned i;
2237 mem_ref_loc_p loc;
2238 bool ret = false;
2239 struct loop *must_exec;
2240 tree base;
2241
2242 base = get_base_address (ref->mem);
2243 if (INDIRECT_REF_P (base)
2244 || TREE_CODE (base) == MEM_REF)
2245 base = TREE_OPERAND (base, 0);
2246
2247 get_all_locs_in_loop (loop, ref, &locs);
2248 FOR_EACH_VEC_ELT (mem_ref_loc_p, locs, i, loc)
2249 {
2250 if (!get_lim_data (loc->stmt))
2251 continue;
2252
2253 /* If we require an always executed store make sure the statement
2254 stores to the reference. */
2255 if (stored_p)
2256 {
2257 tree lhs;
2258 if (!gimple_get_lhs (loc->stmt))
2259 continue;
2260 lhs = get_base_address (gimple_get_lhs (loc->stmt));
2261 if (!lhs)
2262 continue;
2263 if (INDIRECT_REF_P (lhs)
2264 || TREE_CODE (lhs) == MEM_REF)
2265 lhs = TREE_OPERAND (lhs, 0);
2266 if (lhs != base)
2267 continue;
2268 }
2269
2270 must_exec = get_lim_data (loc->stmt)->always_executed_in;
2271 if (!must_exec)
2272 continue;
2273
2274 if (must_exec == loop
2275 || flow_loop_nested_p (must_exec, loop))
2276 {
2277 ret = true;
2278 break;
2279 }
2280 }
2281 VEC_free (mem_ref_loc_p, heap, locs);
2282
2283 return ret;
2284 }
2285
2286 /* Returns true if REF1 and REF2 are independent. */
2287
2288 static bool
2289 refs_independent_p (mem_ref_p ref1, mem_ref_p ref2)
2290 {
2291 if (ref1 == ref2
2292 || bitmap_bit_p (ref1->indep_ref, ref2->id))
2293 return true;
2294 if (bitmap_bit_p (ref1->dep_ref, ref2->id))
2295 return false;
2296 if (!MEM_ANALYZABLE (ref1)
2297 || !MEM_ANALYZABLE (ref2))
2298 return false;
2299
2300 if (dump_file && (dump_flags & TDF_DETAILS))
2301 fprintf (dump_file, "Querying dependency of refs %u and %u: ",
2302 ref1->id, ref2->id);
2303
2304 if (mem_refs_may_alias_p (ref1->mem, ref2->mem,
2305 &memory_accesses.ttae_cache))
2306 {
2307 bitmap_set_bit (ref1->dep_ref, ref2->id);
2308 bitmap_set_bit (ref2->dep_ref, ref1->id);
2309 if (dump_file && (dump_flags & TDF_DETAILS))
2310 fprintf (dump_file, "dependent.\n");
2311 return false;
2312 }
2313 else
2314 {
2315 bitmap_set_bit (ref1->indep_ref, ref2->id);
2316 bitmap_set_bit (ref2->indep_ref, ref1->id);
2317 if (dump_file && (dump_flags & TDF_DETAILS))
2318 fprintf (dump_file, "independent.\n");
2319 return true;
2320 }
2321 }
2322
2323 /* Records the information whether REF is independent in LOOP (according
2324 to INDEP). */
2325
2326 static void
2327 record_indep_loop (struct loop *loop, mem_ref_p ref, bool indep)
2328 {
2329 if (indep)
2330 bitmap_set_bit (ref->indep_loop, loop->num);
2331 else
2332 bitmap_set_bit (ref->dep_loop, loop->num);
2333 }
2334
2335 /* Returns true if REF is independent on all other memory references in
2336 LOOP. */
2337
2338 static bool
2339 ref_indep_loop_p_1 (struct loop *loop, mem_ref_p ref)
2340 {
2341 bitmap refs_to_check;
2342 unsigned i;
2343 bitmap_iterator bi;
2344 bool ret = true, stored = bitmap_bit_p (ref->stored, loop->num);
2345 mem_ref_p aref;
2346
2347 if (stored)
2348 refs_to_check = VEC_index (bitmap,
2349 memory_accesses.all_refs_in_loop, loop->num);
2350 else
2351 refs_to_check = VEC_index (bitmap,
2352 memory_accesses.all_refs_stored_in_loop,
2353 loop->num);
2354
2355 EXECUTE_IF_SET_IN_BITMAP (refs_to_check, 0, i, bi)
2356 {
2357 aref = VEC_index (mem_ref_p, memory_accesses.refs_list, i);
2358 if (!MEM_ANALYZABLE (aref)
2359 || !refs_independent_p (ref, aref))
2360 {
2361 ret = false;
2362 record_indep_loop (loop, aref, false);
2363 break;
2364 }
2365 }
2366
2367 return ret;
2368 }
2369
2370 /* Returns true if REF is independent on all other memory references in
2371 LOOP. Wrapper over ref_indep_loop_p_1, caching its results. */
2372
2373 static bool
2374 ref_indep_loop_p (struct loop *loop, mem_ref_p ref)
2375 {
2376 bool ret;
2377
2378 if (bitmap_bit_p (ref->indep_loop, loop->num))
2379 return true;
2380 if (bitmap_bit_p (ref->dep_loop, loop->num))
2381 return false;
2382
2383 ret = ref_indep_loop_p_1 (loop, ref);
2384
2385 if (dump_file && (dump_flags & TDF_DETAILS))
2386 fprintf (dump_file, "Querying dependencies of ref %u in loop %d: %s\n",
2387 ref->id, loop->num, ret ? "independent" : "dependent");
2388
2389 record_indep_loop (loop, ref, ret);
2390
2391 return ret;
2392 }
2393
2394 /* Returns true if we can perform store motion of REF from LOOP. */
2395
2396 static bool
2397 can_sm_ref_p (struct loop *loop, mem_ref_p ref)
2398 {
2399 tree base;
2400
2401 /* Can't hoist unanalyzable refs. */
2402 if (!MEM_ANALYZABLE (ref))
2403 return false;
2404
2405 /* Unless the reference is stored in the loop, there is nothing to do. */
2406 if (!bitmap_bit_p (ref->stored, loop->num))
2407 return false;
2408
2409 /* It should be movable. */
2410 if (!is_gimple_reg_type (TREE_TYPE (ref->mem))
2411 || TREE_THIS_VOLATILE (ref->mem)
2412 || !for_each_index (&ref->mem, may_move_till, loop))
2413 return false;
2414
2415 /* If it can throw fail, we do not properly update EH info. */
2416 if (tree_could_throw_p (ref->mem))
2417 return false;
2418
2419 /* If it can trap, it must be always executed in LOOP.
2420 Readonly memory locations may trap when storing to them, but
2421 tree_could_trap_p is a predicate for rvalues, so check that
2422 explicitly. */
2423 base = get_base_address (ref->mem);
2424 if ((tree_could_trap_p (ref->mem)
2425 || (DECL_P (base) && TREE_READONLY (base)))
2426 && !ref_always_accessed_p (loop, ref, true))
2427 return false;
2428
2429 /* And it must be independent on all other memory references
2430 in LOOP. */
2431 if (!ref_indep_loop_p (loop, ref))
2432 return false;
2433
2434 return true;
2435 }
2436
2437 /* Marks the references in LOOP for that store motion should be performed
2438 in REFS_TO_SM. SM_EXECUTED is the set of references for that store
2439 motion was performed in one of the outer loops. */
2440
2441 static void
2442 find_refs_for_sm (struct loop *loop, bitmap sm_executed, bitmap refs_to_sm)
2443 {
2444 bitmap refs = VEC_index (bitmap, memory_accesses.all_refs_in_loop,
2445 loop->num);
2446 unsigned i;
2447 bitmap_iterator bi;
2448 mem_ref_p ref;
2449
2450 EXECUTE_IF_AND_COMPL_IN_BITMAP (refs, sm_executed, 0, i, bi)
2451 {
2452 ref = VEC_index (mem_ref_p, memory_accesses.refs_list, i);
2453 if (can_sm_ref_p (loop, ref))
2454 bitmap_set_bit (refs_to_sm, i);
2455 }
2456 }
2457
2458 /* Checks whether LOOP (with exits stored in EXITS array) is suitable
2459 for a store motion optimization (i.e. whether we can insert statement
2460 on its exits). */
2461
2462 static bool
2463 loop_suitable_for_sm (struct loop *loop ATTRIBUTE_UNUSED,
2464 VEC (edge, heap) *exits)
2465 {
2466 unsigned i;
2467 edge ex;
2468
2469 FOR_EACH_VEC_ELT (edge, exits, i, ex)
2470 if (ex->flags & (EDGE_ABNORMAL | EDGE_EH))
2471 return false;
2472
2473 return true;
2474 }
2475
2476 /* Try to perform store motion for all memory references modified inside
2477 LOOP. SM_EXECUTED is the bitmap of the memory references for that
2478 store motion was executed in one of the outer loops. */
2479
2480 static void
2481 store_motion_loop (struct loop *loop, bitmap sm_executed)
2482 {
2483 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
2484 struct loop *subloop;
2485 bitmap sm_in_loop = BITMAP_ALLOC (NULL);
2486
2487 if (loop_suitable_for_sm (loop, exits))
2488 {
2489 find_refs_for_sm (loop, sm_executed, sm_in_loop);
2490 hoist_memory_references (loop, sm_in_loop, exits);
2491 }
2492 VEC_free (edge, heap, exits);
2493
2494 bitmap_ior_into (sm_executed, sm_in_loop);
2495 for (subloop = loop->inner; subloop != NULL; subloop = subloop->next)
2496 store_motion_loop (subloop, sm_executed);
2497 bitmap_and_compl_into (sm_executed, sm_in_loop);
2498 BITMAP_FREE (sm_in_loop);
2499 }
2500
2501 /* Try to perform store motion for all memory references modified inside
2502 loops. */
2503
2504 static void
2505 store_motion (void)
2506 {
2507 struct loop *loop;
2508 bitmap sm_executed = BITMAP_ALLOC (NULL);
2509
2510 for (loop = current_loops->tree_root->inner; loop != NULL; loop = loop->next)
2511 store_motion_loop (loop, sm_executed);
2512
2513 BITMAP_FREE (sm_executed);
2514 gsi_commit_edge_inserts ();
2515 }
2516
2517 /* Fills ALWAYS_EXECUTED_IN information for basic blocks of LOOP, i.e.
2518 for each such basic block bb records the outermost loop for that execution
2519 of its header implies execution of bb. CONTAINS_CALL is the bitmap of
2520 blocks that contain a nonpure call. */
2521
2522 static void
2523 fill_always_executed_in (struct loop *loop, sbitmap contains_call)
2524 {
2525 basic_block bb = NULL, *bbs, last = NULL;
2526 unsigned i;
2527 edge e;
2528 struct loop *inn_loop = loop;
2529
2530 if (ALWAYS_EXECUTED_IN (loop->header) == NULL)
2531 {
2532 bbs = get_loop_body_in_dom_order (loop);
2533
2534 for (i = 0; i < loop->num_nodes; i++)
2535 {
2536 edge_iterator ei;
2537 bb = bbs[i];
2538
2539 if (dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2540 last = bb;
2541
2542 if (TEST_BIT (contains_call, bb->index))
2543 break;
2544
2545 FOR_EACH_EDGE (e, ei, bb->succs)
2546 if (!flow_bb_inside_loop_p (loop, e->dest))
2547 break;
2548 if (e)
2549 break;
2550
2551 /* A loop might be infinite (TODO use simple loop analysis
2552 to disprove this if possible). */
2553 if (bb->flags & BB_IRREDUCIBLE_LOOP)
2554 break;
2555
2556 if (!flow_bb_inside_loop_p (inn_loop, bb))
2557 break;
2558
2559 if (bb->loop_father->header == bb)
2560 {
2561 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2562 break;
2563
2564 /* In a loop that is always entered we may proceed anyway.
2565 But record that we entered it and stop once we leave it. */
2566 inn_loop = bb->loop_father;
2567 }
2568 }
2569
2570 while (1)
2571 {
2572 SET_ALWAYS_EXECUTED_IN (last, loop);
2573 if (last == loop->header)
2574 break;
2575 last = get_immediate_dominator (CDI_DOMINATORS, last);
2576 }
2577
2578 free (bbs);
2579 }
2580
2581 for (loop = loop->inner; loop; loop = loop->next)
2582 fill_always_executed_in (loop, contains_call);
2583 }
2584
2585 /* Compute the global information needed by the loop invariant motion pass. */
2586
2587 static void
2588 tree_ssa_lim_initialize (void)
2589 {
2590 sbitmap contains_call = sbitmap_alloc (last_basic_block);
2591 gimple_stmt_iterator bsi;
2592 struct loop *loop;
2593 basic_block bb;
2594
2595 sbitmap_zero (contains_call);
2596 FOR_EACH_BB (bb)
2597 {
2598 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
2599 {
2600 if (nonpure_call_p (gsi_stmt (bsi)))
2601 break;
2602 }
2603
2604 if (!gsi_end_p (bsi))
2605 SET_BIT (contains_call, bb->index);
2606 }
2607
2608 for (loop = current_loops->tree_root->inner; loop; loop = loop->next)
2609 fill_always_executed_in (loop, contains_call);
2610
2611 sbitmap_free (contains_call);
2612
2613 lim_aux_data_map = pointer_map_create ();
2614
2615 if (flag_tm)
2616 compute_transaction_bits ();
2617
2618 alloc_aux_for_edges (0);
2619 }
2620
2621 /* Cleans up after the invariant motion pass. */
2622
2623 static void
2624 tree_ssa_lim_finalize (void)
2625 {
2626 basic_block bb;
2627 unsigned i;
2628 bitmap b;
2629
2630 free_aux_for_edges ();
2631
2632 FOR_EACH_BB (bb)
2633 SET_ALWAYS_EXECUTED_IN (bb, NULL);
2634
2635 pointer_map_destroy (lim_aux_data_map);
2636
2637 VEC_free (mem_ref_p, heap, memory_accesses.refs_list);
2638 htab_delete (memory_accesses.refs);
2639
2640 FOR_EACH_VEC_ELT (bitmap, memory_accesses.refs_in_loop, i, b)
2641 BITMAP_FREE (b);
2642 VEC_free (bitmap, heap, memory_accesses.refs_in_loop);
2643
2644 FOR_EACH_VEC_ELT (bitmap, memory_accesses.all_refs_in_loop, i, b)
2645 BITMAP_FREE (b);
2646 VEC_free (bitmap, heap, memory_accesses.all_refs_in_loop);
2647
2648 FOR_EACH_VEC_ELT (bitmap, memory_accesses.all_refs_stored_in_loop, i, b)
2649 BITMAP_FREE (b);
2650 VEC_free (bitmap, heap, memory_accesses.all_refs_stored_in_loop);
2651
2652 if (memory_accesses.ttae_cache)
2653 pointer_map_destroy (memory_accesses.ttae_cache);
2654 }
2655
2656 /* Moves invariants from loops. Only "expensive" invariants are moved out --
2657 i.e. those that are likely to be win regardless of the register pressure. */
2658
2659 unsigned int
2660 tree_ssa_lim (void)
2661 {
2662 unsigned int todo;
2663
2664 tree_ssa_lim_initialize ();
2665
2666 /* Gathers information about memory accesses in the loops. */
2667 analyze_memory_references ();
2668
2669 /* For each statement determine the outermost loop in that it is
2670 invariant and cost for computing the invariant. */
2671 determine_invariantness ();
2672
2673 /* Execute store motion. Force the necessary invariants to be moved
2674 out of the loops as well. */
2675 store_motion ();
2676
2677 /* Move the expressions that are expensive enough. */
2678 todo = move_computations ();
2679
2680 tree_ssa_lim_finalize ();
2681
2682 return todo;
2683 }