]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-loop-im.c
* output.h (__gcc_host_wide_int__): Move to hwint.h.
[thirdparty/gcc.git] / gcc / tree-ssa-loop-im.c
1 /* Loop invariant motion.
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2010
3 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "tm_p.h"
27 #include "basic-block.h"
28 #include "tree-pretty-print.h"
29 #include "gimple-pretty-print.h"
30 #include "tree-flow.h"
31 #include "tree-dump.h"
32 #include "timevar.h"
33 #include "cfgloop.h"
34 #include "domwalk.h"
35 #include "params.h"
36 #include "tree-pass.h"
37 #include "flags.h"
38 #include "hashtab.h"
39 #include "tree-affine.h"
40 #include "pointer-set.h"
41 #include "tree-ssa-propagate.h"
42
43 /* TODO: Support for predicated code motion. I.e.
44
45 while (1)
46 {
47 if (cond)
48 {
49 a = inv;
50 something;
51 }
52 }
53
54 Where COND and INV are invariants, but evaluating INV may trap or be
55 invalid from some other reason if !COND. This may be transformed to
56
57 if (cond)
58 a = inv;
59 while (1)
60 {
61 if (cond)
62 something;
63 } */
64
65 /* A type for the list of statements that have to be moved in order to be able
66 to hoist an invariant computation. */
67
68 struct depend
69 {
70 gimple stmt;
71 struct depend *next;
72 };
73
74 /* The auxiliary data kept for each statement. */
75
76 struct lim_aux_data
77 {
78 struct loop *max_loop; /* The outermost loop in that the statement
79 is invariant. */
80
81 struct loop *tgt_loop; /* The loop out of that we want to move the
82 invariant. */
83
84 struct loop *always_executed_in;
85 /* The outermost loop for that we are sure
86 the statement is executed if the loop
87 is entered. */
88
89 unsigned cost; /* Cost of the computation performed by the
90 statement. */
91
92 struct depend *depends; /* List of statements that must be also hoisted
93 out of the loop when this statement is
94 hoisted; i.e. those that define the operands
95 of the statement and are inside of the
96 MAX_LOOP loop. */
97 };
98
99 /* Maps statements to their lim_aux_data. */
100
101 static struct pointer_map_t *lim_aux_data_map;
102
103 /* Description of a memory reference location. */
104
105 typedef struct mem_ref_loc
106 {
107 tree *ref; /* The reference itself. */
108 gimple stmt; /* The statement in that it occurs. */
109 } *mem_ref_loc_p;
110
111 DEF_VEC_P(mem_ref_loc_p);
112 DEF_VEC_ALLOC_P(mem_ref_loc_p, heap);
113
114 /* The list of memory reference locations in a loop. */
115
116 typedef struct mem_ref_locs
117 {
118 VEC (mem_ref_loc_p, heap) *locs;
119 } *mem_ref_locs_p;
120
121 DEF_VEC_P(mem_ref_locs_p);
122 DEF_VEC_ALLOC_P(mem_ref_locs_p, heap);
123
124 /* Description of a memory reference. */
125
126 typedef struct mem_ref
127 {
128 tree mem; /* The memory itself. */
129 unsigned id; /* ID assigned to the memory reference
130 (its index in memory_accesses.refs_list) */
131 hashval_t hash; /* Its hash value. */
132 bitmap stored; /* The set of loops in that this memory location
133 is stored to. */
134 VEC (mem_ref_locs_p, heap) *accesses_in_loop;
135 /* The locations of the accesses. Vector
136 indexed by the loop number. */
137
138 /* The following sets are computed on demand. We keep both set and
139 its complement, so that we know whether the information was
140 already computed or not. */
141 bitmap indep_loop; /* The set of loops in that the memory
142 reference is independent, meaning:
143 If it is stored in the loop, this store
144 is independent on all other loads and
145 stores.
146 If it is only loaded, then it is independent
147 on all stores in the loop. */
148 bitmap dep_loop; /* The complement of INDEP_LOOP. */
149
150 bitmap indep_ref; /* The set of memory references on that
151 this reference is independent. */
152 bitmap dep_ref; /* The complement of INDEP_REF. */
153 } *mem_ref_p;
154
155 DEF_VEC_P(mem_ref_p);
156 DEF_VEC_ALLOC_P(mem_ref_p, heap);
157
158 DEF_VEC_P(bitmap);
159 DEF_VEC_ALLOC_P(bitmap, heap);
160
161 DEF_VEC_P(htab_t);
162 DEF_VEC_ALLOC_P(htab_t, heap);
163
164 /* Description of memory accesses in loops. */
165
166 static struct
167 {
168 /* The hash table of memory references accessed in loops. */
169 htab_t refs;
170
171 /* The list of memory references. */
172 VEC (mem_ref_p, heap) *refs_list;
173
174 /* The set of memory references accessed in each loop. */
175 VEC (bitmap, heap) *refs_in_loop;
176
177 /* The set of memory references accessed in each loop, including
178 subloops. */
179 VEC (bitmap, heap) *all_refs_in_loop;
180
181 /* The set of memory references stored in each loop, including
182 subloops. */
183 VEC (bitmap, heap) *all_refs_stored_in_loop;
184
185 /* Cache for expanding memory addresses. */
186 struct pointer_map_t *ttae_cache;
187 } memory_accesses;
188
189 static bool ref_indep_loop_p (struct loop *, mem_ref_p);
190
191 /* Minimum cost of an expensive expression. */
192 #define LIM_EXPENSIVE ((unsigned) PARAM_VALUE (PARAM_LIM_EXPENSIVE))
193
194 /* The outermost loop for which execution of the header guarantees that the
195 block will be executed. */
196 #define ALWAYS_EXECUTED_IN(BB) ((struct loop *) (BB)->aux)
197 #define SET_ALWAYS_EXECUTED_IN(BB, VAL) ((BB)->aux = (void *) (VAL))
198
199 /* Whether the reference was analyzable. */
200 #define MEM_ANALYZABLE(REF) ((REF)->mem != error_mark_node)
201
202 static struct lim_aux_data *
203 init_lim_data (gimple stmt)
204 {
205 void **p = pointer_map_insert (lim_aux_data_map, stmt);
206
207 *p = XCNEW (struct lim_aux_data);
208 return (struct lim_aux_data *) *p;
209 }
210
211 static struct lim_aux_data *
212 get_lim_data (gimple stmt)
213 {
214 void **p = pointer_map_contains (lim_aux_data_map, stmt);
215 if (!p)
216 return NULL;
217
218 return (struct lim_aux_data *) *p;
219 }
220
221 /* Releases the memory occupied by DATA. */
222
223 static void
224 free_lim_aux_data (struct lim_aux_data *data)
225 {
226 struct depend *dep, *next;
227
228 for (dep = data->depends; dep; dep = next)
229 {
230 next = dep->next;
231 free (dep);
232 }
233 free (data);
234 }
235
236 static void
237 clear_lim_data (gimple stmt)
238 {
239 void **p = pointer_map_contains (lim_aux_data_map, stmt);
240 if (!p)
241 return;
242
243 free_lim_aux_data ((struct lim_aux_data *) *p);
244 *p = NULL;
245 }
246
247 /* Calls CBCK for each index in memory reference ADDR_P. There are two
248 kinds situations handled; in each of these cases, the memory reference
249 and DATA are passed to the callback:
250
251 Access to an array: ARRAY_{RANGE_}REF (base, index). In this case we also
252 pass the pointer to the index to the callback.
253
254 Pointer dereference: INDIRECT_REF (addr). In this case we also pass the
255 pointer to addr to the callback.
256
257 If the callback returns false, the whole search stops and false is returned.
258 Otherwise the function returns true after traversing through the whole
259 reference *ADDR_P. */
260
261 bool
262 for_each_index (tree *addr_p, bool (*cbck) (tree, tree *, void *), void *data)
263 {
264 tree *nxt, *idx;
265
266 for (; ; addr_p = nxt)
267 {
268 switch (TREE_CODE (*addr_p))
269 {
270 case SSA_NAME:
271 return cbck (*addr_p, addr_p, data);
272
273 case MEM_REF:
274 nxt = &TREE_OPERAND (*addr_p, 0);
275 return cbck (*addr_p, nxt, data);
276
277 case BIT_FIELD_REF:
278 case VIEW_CONVERT_EXPR:
279 case REALPART_EXPR:
280 case IMAGPART_EXPR:
281 nxt = &TREE_OPERAND (*addr_p, 0);
282 break;
283
284 case COMPONENT_REF:
285 /* If the component has varying offset, it behaves like index
286 as well. */
287 idx = &TREE_OPERAND (*addr_p, 2);
288 if (*idx
289 && !cbck (*addr_p, idx, data))
290 return false;
291
292 nxt = &TREE_OPERAND (*addr_p, 0);
293 break;
294
295 case ARRAY_REF:
296 case ARRAY_RANGE_REF:
297 nxt = &TREE_OPERAND (*addr_p, 0);
298 if (!cbck (*addr_p, &TREE_OPERAND (*addr_p, 1), data))
299 return false;
300 break;
301
302 case VAR_DECL:
303 case PARM_DECL:
304 case STRING_CST:
305 case RESULT_DECL:
306 case VECTOR_CST:
307 case COMPLEX_CST:
308 case INTEGER_CST:
309 case REAL_CST:
310 case FIXED_CST:
311 case CONSTRUCTOR:
312 return true;
313
314 case ADDR_EXPR:
315 gcc_assert (is_gimple_min_invariant (*addr_p));
316 return true;
317
318 case TARGET_MEM_REF:
319 idx = &TMR_BASE (*addr_p);
320 if (*idx
321 && !cbck (*addr_p, idx, data))
322 return false;
323 idx = &TMR_INDEX (*addr_p);
324 if (*idx
325 && !cbck (*addr_p, idx, data))
326 return false;
327 idx = &TMR_INDEX2 (*addr_p);
328 if (*idx
329 && !cbck (*addr_p, idx, data))
330 return false;
331 return true;
332
333 default:
334 gcc_unreachable ();
335 }
336 }
337 }
338
339 /* If it is possible to hoist the statement STMT unconditionally,
340 returns MOVE_POSSIBLE.
341 If it is possible to hoist the statement STMT, but we must avoid making
342 it executed if it would not be executed in the original program (e.g.
343 because it may trap), return MOVE_PRESERVE_EXECUTION.
344 Otherwise return MOVE_IMPOSSIBLE. */
345
346 enum move_pos
347 movement_possibility (gimple stmt)
348 {
349 tree lhs;
350 enum move_pos ret = MOVE_POSSIBLE;
351
352 if (flag_unswitch_loops
353 && gimple_code (stmt) == GIMPLE_COND)
354 {
355 /* If we perform unswitching, force the operands of the invariant
356 condition to be moved out of the loop. */
357 return MOVE_POSSIBLE;
358 }
359
360 if (gimple_code (stmt) == GIMPLE_PHI
361 && gimple_phi_num_args (stmt) <= 2
362 && is_gimple_reg (gimple_phi_result (stmt))
363 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_phi_result (stmt)))
364 return MOVE_POSSIBLE;
365
366 if (gimple_get_lhs (stmt) == NULL_TREE)
367 return MOVE_IMPOSSIBLE;
368
369 if (gimple_vdef (stmt))
370 return MOVE_IMPOSSIBLE;
371
372 if (stmt_ends_bb_p (stmt)
373 || gimple_has_volatile_ops (stmt)
374 || gimple_has_side_effects (stmt)
375 || stmt_could_throw_p (stmt))
376 return MOVE_IMPOSSIBLE;
377
378 if (is_gimple_call (stmt))
379 {
380 /* While pure or const call is guaranteed to have no side effects, we
381 cannot move it arbitrarily. Consider code like
382
383 char *s = something ();
384
385 while (1)
386 {
387 if (s)
388 t = strlen (s);
389 else
390 t = 0;
391 }
392
393 Here the strlen call cannot be moved out of the loop, even though
394 s is invariant. In addition to possibly creating a call with
395 invalid arguments, moving out a function call that is not executed
396 may cause performance regressions in case the call is costly and
397 not executed at all. */
398 ret = MOVE_PRESERVE_EXECUTION;
399 lhs = gimple_call_lhs (stmt);
400 }
401 else if (is_gimple_assign (stmt))
402 lhs = gimple_assign_lhs (stmt);
403 else
404 return MOVE_IMPOSSIBLE;
405
406 if (TREE_CODE (lhs) == SSA_NAME
407 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
408 return MOVE_IMPOSSIBLE;
409
410 if (TREE_CODE (lhs) != SSA_NAME
411 || gimple_could_trap_p (stmt))
412 return MOVE_PRESERVE_EXECUTION;
413
414 /* Non local loads in a transaction cannot be hoisted out. Well,
415 unless the load happens on every path out of the loop, but we
416 don't take this into account yet. */
417 if (flag_tm
418 && gimple_in_transaction (stmt)
419 && gimple_assign_single_p (stmt))
420 {
421 tree rhs = gimple_assign_rhs1 (stmt);
422 if (DECL_P (rhs) && is_global_var (rhs))
423 {
424 if (dump_file)
425 {
426 fprintf (dump_file, "Cannot hoist conditional load of ");
427 print_generic_expr (dump_file, rhs, TDF_SLIM);
428 fprintf (dump_file, " because it is in a transaction.\n");
429 }
430 return MOVE_IMPOSSIBLE;
431 }
432 }
433
434 return ret;
435 }
436
437 /* Suppose that operand DEF is used inside the LOOP. Returns the outermost
438 loop to that we could move the expression using DEF if it did not have
439 other operands, i.e. the outermost loop enclosing LOOP in that the value
440 of DEF is invariant. */
441
442 static struct loop *
443 outermost_invariant_loop (tree def, struct loop *loop)
444 {
445 gimple def_stmt;
446 basic_block def_bb;
447 struct loop *max_loop;
448 struct lim_aux_data *lim_data;
449
450 if (!def)
451 return superloop_at_depth (loop, 1);
452
453 if (TREE_CODE (def) != SSA_NAME)
454 {
455 gcc_assert (is_gimple_min_invariant (def));
456 return superloop_at_depth (loop, 1);
457 }
458
459 def_stmt = SSA_NAME_DEF_STMT (def);
460 def_bb = gimple_bb (def_stmt);
461 if (!def_bb)
462 return superloop_at_depth (loop, 1);
463
464 max_loop = find_common_loop (loop, def_bb->loop_father);
465
466 lim_data = get_lim_data (def_stmt);
467 if (lim_data != NULL && lim_data->max_loop != NULL)
468 max_loop = find_common_loop (max_loop,
469 loop_outer (lim_data->max_loop));
470 if (max_loop == loop)
471 return NULL;
472 max_loop = superloop_at_depth (loop, loop_depth (max_loop) + 1);
473
474 return max_loop;
475 }
476
477 /* DATA is a structure containing information associated with a statement
478 inside LOOP. DEF is one of the operands of this statement.
479
480 Find the outermost loop enclosing LOOP in that value of DEF is invariant
481 and record this in DATA->max_loop field. If DEF itself is defined inside
482 this loop as well (i.e. we need to hoist it out of the loop if we want
483 to hoist the statement represented by DATA), record the statement in that
484 DEF is defined to the DATA->depends list. Additionally if ADD_COST is true,
485 add the cost of the computation of DEF to the DATA->cost.
486
487 If DEF is not invariant in LOOP, return false. Otherwise return TRUE. */
488
489 static bool
490 add_dependency (tree def, struct lim_aux_data *data, struct loop *loop,
491 bool add_cost)
492 {
493 gimple def_stmt = SSA_NAME_DEF_STMT (def);
494 basic_block def_bb = gimple_bb (def_stmt);
495 struct loop *max_loop;
496 struct depend *dep;
497 struct lim_aux_data *def_data;
498
499 if (!def_bb)
500 return true;
501
502 max_loop = outermost_invariant_loop (def, loop);
503 if (!max_loop)
504 return false;
505
506 if (flow_loop_nested_p (data->max_loop, max_loop))
507 data->max_loop = max_loop;
508
509 def_data = get_lim_data (def_stmt);
510 if (!def_data)
511 return true;
512
513 if (add_cost
514 /* Only add the cost if the statement defining DEF is inside LOOP,
515 i.e. if it is likely that by moving the invariants dependent
516 on it, we will be able to avoid creating a new register for
517 it (since it will be only used in these dependent invariants). */
518 && def_bb->loop_father == loop)
519 data->cost += def_data->cost;
520
521 dep = XNEW (struct depend);
522 dep->stmt = def_stmt;
523 dep->next = data->depends;
524 data->depends = dep;
525
526 return true;
527 }
528
529 /* Returns an estimate for a cost of statement STMT. The values here
530 are just ad-hoc constants, similar to costs for inlining. */
531
532 static unsigned
533 stmt_cost (gimple stmt)
534 {
535 /* Always try to create possibilities for unswitching. */
536 if (gimple_code (stmt) == GIMPLE_COND
537 || gimple_code (stmt) == GIMPLE_PHI)
538 return LIM_EXPENSIVE;
539
540 /* We should be hoisting calls if possible. */
541 if (is_gimple_call (stmt))
542 {
543 tree fndecl;
544
545 /* Unless the call is a builtin_constant_p; this always folds to a
546 constant, so moving it is useless. */
547 fndecl = gimple_call_fndecl (stmt);
548 if (fndecl
549 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
550 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P)
551 return 0;
552
553 return LIM_EXPENSIVE;
554 }
555
556 /* Hoisting memory references out should almost surely be a win. */
557 if (gimple_references_memory_p (stmt))
558 return LIM_EXPENSIVE;
559
560 if (gimple_code (stmt) != GIMPLE_ASSIGN)
561 return 1;
562
563 switch (gimple_assign_rhs_code (stmt))
564 {
565 case MULT_EXPR:
566 case WIDEN_MULT_EXPR:
567 case WIDEN_MULT_PLUS_EXPR:
568 case WIDEN_MULT_MINUS_EXPR:
569 case DOT_PROD_EXPR:
570 case FMA_EXPR:
571 case TRUNC_DIV_EXPR:
572 case CEIL_DIV_EXPR:
573 case FLOOR_DIV_EXPR:
574 case ROUND_DIV_EXPR:
575 case EXACT_DIV_EXPR:
576 case CEIL_MOD_EXPR:
577 case FLOOR_MOD_EXPR:
578 case ROUND_MOD_EXPR:
579 case TRUNC_MOD_EXPR:
580 case RDIV_EXPR:
581 /* Division and multiplication are usually expensive. */
582 return LIM_EXPENSIVE;
583
584 case LSHIFT_EXPR:
585 case RSHIFT_EXPR:
586 case WIDEN_LSHIFT_EXPR:
587 case LROTATE_EXPR:
588 case RROTATE_EXPR:
589 /* Shifts and rotates are usually expensive. */
590 return LIM_EXPENSIVE;
591
592 case CONSTRUCTOR:
593 /* Make vector construction cost proportional to the number
594 of elements. */
595 return CONSTRUCTOR_NELTS (gimple_assign_rhs1 (stmt));
596
597 case SSA_NAME:
598 case PAREN_EXPR:
599 /* Whether or not something is wrapped inside a PAREN_EXPR
600 should not change move cost. Nor should an intermediate
601 unpropagated SSA name copy. */
602 return 0;
603
604 default:
605 return 1;
606 }
607 }
608
609 /* Finds the outermost loop between OUTER and LOOP in that the memory reference
610 REF is independent. If REF is not independent in LOOP, NULL is returned
611 instead. */
612
613 static struct loop *
614 outermost_indep_loop (struct loop *outer, struct loop *loop, mem_ref_p ref)
615 {
616 struct loop *aloop;
617
618 if (bitmap_bit_p (ref->stored, loop->num))
619 return NULL;
620
621 for (aloop = outer;
622 aloop != loop;
623 aloop = superloop_at_depth (loop, loop_depth (aloop) + 1))
624 if (!bitmap_bit_p (ref->stored, aloop->num)
625 && ref_indep_loop_p (aloop, ref))
626 return aloop;
627
628 if (ref_indep_loop_p (loop, ref))
629 return loop;
630 else
631 return NULL;
632 }
633
634 /* If there is a simple load or store to a memory reference in STMT, returns
635 the location of the memory reference, and sets IS_STORE according to whether
636 it is a store or load. Otherwise, returns NULL. */
637
638 static tree *
639 simple_mem_ref_in_stmt (gimple stmt, bool *is_store)
640 {
641 tree *lhs;
642 enum tree_code code;
643
644 /* Recognize MEM = (SSA_NAME | invariant) and SSA_NAME = MEM patterns. */
645 if (gimple_code (stmt) != GIMPLE_ASSIGN)
646 return NULL;
647
648 code = gimple_assign_rhs_code (stmt);
649
650 lhs = gimple_assign_lhs_ptr (stmt);
651
652 if (TREE_CODE (*lhs) == SSA_NAME)
653 {
654 if (get_gimple_rhs_class (code) != GIMPLE_SINGLE_RHS
655 || !is_gimple_addressable (gimple_assign_rhs1 (stmt)))
656 return NULL;
657
658 *is_store = false;
659 return gimple_assign_rhs1_ptr (stmt);
660 }
661 else if (code == SSA_NAME
662 || (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
663 && is_gimple_min_invariant (gimple_assign_rhs1 (stmt))))
664 {
665 *is_store = true;
666 return lhs;
667 }
668 else
669 return NULL;
670 }
671
672 /* Returns the memory reference contained in STMT. */
673
674 static mem_ref_p
675 mem_ref_in_stmt (gimple stmt)
676 {
677 bool store;
678 tree *mem = simple_mem_ref_in_stmt (stmt, &store);
679 hashval_t hash;
680 mem_ref_p ref;
681
682 if (!mem)
683 return NULL;
684 gcc_assert (!store);
685
686 hash = iterative_hash_expr (*mem, 0);
687 ref = (mem_ref_p) htab_find_with_hash (memory_accesses.refs, *mem, hash);
688
689 gcc_assert (ref != NULL);
690 return ref;
691 }
692
693 /* From a controlling predicate in DOM determine the arguments from
694 the PHI node PHI that are chosen if the predicate evaluates to
695 true and false and store them to *TRUE_ARG_P and *FALSE_ARG_P if
696 they are non-NULL. Returns true if the arguments can be determined,
697 else return false. */
698
699 static bool
700 extract_true_false_args_from_phi (basic_block dom, gimple phi,
701 tree *true_arg_p, tree *false_arg_p)
702 {
703 basic_block bb = gimple_bb (phi);
704 edge true_edge, false_edge, tem;
705 tree arg0 = NULL_TREE, arg1 = NULL_TREE;
706
707 /* We have to verify that one edge into the PHI node is dominated
708 by the true edge of the predicate block and the other edge
709 dominated by the false edge. This ensures that the PHI argument
710 we are going to take is completely determined by the path we
711 take from the predicate block.
712 We can only use BB dominance checks below if the destination of
713 the true/false edges are dominated by their edge, thus only
714 have a single predecessor. */
715 extract_true_false_edges_from_block (dom, &true_edge, &false_edge);
716 tem = EDGE_PRED (bb, 0);
717 if (tem == true_edge
718 || (single_pred_p (true_edge->dest)
719 && (tem->src == true_edge->dest
720 || dominated_by_p (CDI_DOMINATORS,
721 tem->src, true_edge->dest))))
722 arg0 = PHI_ARG_DEF (phi, tem->dest_idx);
723 else if (tem == false_edge
724 || (single_pred_p (false_edge->dest)
725 && (tem->src == false_edge->dest
726 || dominated_by_p (CDI_DOMINATORS,
727 tem->src, false_edge->dest))))
728 arg1 = PHI_ARG_DEF (phi, tem->dest_idx);
729 else
730 return false;
731 tem = EDGE_PRED (bb, 1);
732 if (tem == true_edge
733 || (single_pred_p (true_edge->dest)
734 && (tem->src == true_edge->dest
735 || dominated_by_p (CDI_DOMINATORS,
736 tem->src, true_edge->dest))))
737 arg0 = PHI_ARG_DEF (phi, tem->dest_idx);
738 else if (tem == false_edge
739 || (single_pred_p (false_edge->dest)
740 && (tem->src == false_edge->dest
741 || dominated_by_p (CDI_DOMINATORS,
742 tem->src, false_edge->dest))))
743 arg1 = PHI_ARG_DEF (phi, tem->dest_idx);
744 else
745 return false;
746 if (!arg0 || !arg1)
747 return false;
748
749 if (true_arg_p)
750 *true_arg_p = arg0;
751 if (false_arg_p)
752 *false_arg_p = arg1;
753
754 return true;
755 }
756
757 /* Determine the outermost loop to that it is possible to hoist a statement
758 STMT and store it to LIM_DATA (STMT)->max_loop. To do this we determine
759 the outermost loop in that the value computed by STMT is invariant.
760 If MUST_PRESERVE_EXEC is true, additionally choose such a loop that
761 we preserve the fact whether STMT is executed. It also fills other related
762 information to LIM_DATA (STMT).
763
764 The function returns false if STMT cannot be hoisted outside of the loop it
765 is defined in, and true otherwise. */
766
767 static bool
768 determine_max_movement (gimple stmt, bool must_preserve_exec)
769 {
770 basic_block bb = gimple_bb (stmt);
771 struct loop *loop = bb->loop_father;
772 struct loop *level;
773 struct lim_aux_data *lim_data = get_lim_data (stmt);
774 tree val;
775 ssa_op_iter iter;
776
777 if (must_preserve_exec)
778 level = ALWAYS_EXECUTED_IN (bb);
779 else
780 level = superloop_at_depth (loop, 1);
781 lim_data->max_loop = level;
782
783 if (gimple_code (stmt) == GIMPLE_PHI)
784 {
785 use_operand_p use_p;
786 unsigned min_cost = UINT_MAX;
787 unsigned total_cost = 0;
788 struct lim_aux_data *def_data;
789
790 /* We will end up promoting dependencies to be unconditionally
791 evaluated. For this reason the PHI cost (and thus the
792 cost we remove from the loop by doing the invariant motion)
793 is that of the cheapest PHI argument dependency chain. */
794 FOR_EACH_PHI_ARG (use_p, stmt, iter, SSA_OP_USE)
795 {
796 val = USE_FROM_PTR (use_p);
797 if (TREE_CODE (val) != SSA_NAME)
798 continue;
799 if (!add_dependency (val, lim_data, loop, false))
800 return false;
801 def_data = get_lim_data (SSA_NAME_DEF_STMT (val));
802 if (def_data)
803 {
804 min_cost = MIN (min_cost, def_data->cost);
805 total_cost += def_data->cost;
806 }
807 }
808
809 lim_data->cost += min_cost;
810
811 if (gimple_phi_num_args (stmt) > 1)
812 {
813 basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
814 gimple cond;
815 if (gsi_end_p (gsi_last_bb (dom)))
816 return false;
817 cond = gsi_stmt (gsi_last_bb (dom));
818 if (gimple_code (cond) != GIMPLE_COND)
819 return false;
820 /* Verify that this is an extended form of a diamond and
821 the PHI arguments are completely controlled by the
822 predicate in DOM. */
823 if (!extract_true_false_args_from_phi (dom, stmt, NULL, NULL))
824 return false;
825
826 /* Fold in dependencies and cost of the condition. */
827 FOR_EACH_SSA_TREE_OPERAND (val, cond, iter, SSA_OP_USE)
828 {
829 if (!add_dependency (val, lim_data, loop, false))
830 return false;
831 def_data = get_lim_data (SSA_NAME_DEF_STMT (val));
832 if (def_data)
833 total_cost += def_data->cost;
834 }
835
836 /* We want to avoid unconditionally executing very expensive
837 operations. As costs for our dependencies cannot be
838 negative just claim we are not invariand for this case.
839 We also are not sure whether the control-flow inside the
840 loop will vanish. */
841 if (total_cost - min_cost >= 2 * LIM_EXPENSIVE
842 && !(min_cost != 0
843 && total_cost / min_cost <= 2))
844 return false;
845
846 /* Assume that the control-flow in the loop will vanish.
847 ??? We should verify this and not artificially increase
848 the cost if that is not the case. */
849 lim_data->cost += stmt_cost (stmt);
850 }
851
852 return true;
853 }
854 else
855 FOR_EACH_SSA_TREE_OPERAND (val, stmt, iter, SSA_OP_USE)
856 if (!add_dependency (val, lim_data, loop, true))
857 return false;
858
859 if (gimple_vuse (stmt))
860 {
861 mem_ref_p ref = mem_ref_in_stmt (stmt);
862
863 if (ref)
864 {
865 lim_data->max_loop
866 = outermost_indep_loop (lim_data->max_loop, loop, ref);
867 if (!lim_data->max_loop)
868 return false;
869 }
870 else
871 {
872 if ((val = gimple_vuse (stmt)) != NULL_TREE)
873 {
874 if (!add_dependency (val, lim_data, loop, false))
875 return false;
876 }
877 }
878 }
879
880 lim_data->cost += stmt_cost (stmt);
881
882 return true;
883 }
884
885 /* Suppose that some statement in ORIG_LOOP is hoisted to the loop LEVEL,
886 and that one of the operands of this statement is computed by STMT.
887 Ensure that STMT (together with all the statements that define its
888 operands) is hoisted at least out of the loop LEVEL. */
889
890 static void
891 set_level (gimple stmt, struct loop *orig_loop, struct loop *level)
892 {
893 struct loop *stmt_loop = gimple_bb (stmt)->loop_father;
894 struct depend *dep;
895 struct lim_aux_data *lim_data;
896
897 stmt_loop = find_common_loop (orig_loop, stmt_loop);
898 lim_data = get_lim_data (stmt);
899 if (lim_data != NULL && lim_data->tgt_loop != NULL)
900 stmt_loop = find_common_loop (stmt_loop,
901 loop_outer (lim_data->tgt_loop));
902 if (flow_loop_nested_p (stmt_loop, level))
903 return;
904
905 gcc_assert (level == lim_data->max_loop
906 || flow_loop_nested_p (lim_data->max_loop, level));
907
908 lim_data->tgt_loop = level;
909 for (dep = lim_data->depends; dep; dep = dep->next)
910 set_level (dep->stmt, orig_loop, level);
911 }
912
913 /* Determines an outermost loop from that we want to hoist the statement STMT.
914 For now we chose the outermost possible loop. TODO -- use profiling
915 information to set it more sanely. */
916
917 static void
918 set_profitable_level (gimple stmt)
919 {
920 set_level (stmt, gimple_bb (stmt)->loop_father, get_lim_data (stmt)->max_loop);
921 }
922
923 /* Returns true if STMT is a call that has side effects. */
924
925 static bool
926 nonpure_call_p (gimple stmt)
927 {
928 if (gimple_code (stmt) != GIMPLE_CALL)
929 return false;
930
931 return gimple_has_side_effects (stmt);
932 }
933
934 /* Rewrite a/b to a*(1/b). Return the invariant stmt to process. */
935
936 static gimple
937 rewrite_reciprocal (gimple_stmt_iterator *bsi)
938 {
939 gimple stmt, stmt1, stmt2;
940 tree var, name, lhs, type;
941 tree real_one;
942 gimple_stmt_iterator gsi;
943
944 stmt = gsi_stmt (*bsi);
945 lhs = gimple_assign_lhs (stmt);
946 type = TREE_TYPE (lhs);
947
948 var = create_tmp_var (type, "reciptmp");
949 add_referenced_var (var);
950 DECL_GIMPLE_REG_P (var) = 1;
951
952 real_one = build_one_cst (type);
953
954 stmt1 = gimple_build_assign_with_ops (RDIV_EXPR,
955 var, real_one, gimple_assign_rhs2 (stmt));
956 name = make_ssa_name (var, stmt1);
957 gimple_assign_set_lhs (stmt1, name);
958
959 stmt2 = gimple_build_assign_with_ops (MULT_EXPR, lhs, name,
960 gimple_assign_rhs1 (stmt));
961
962 /* Replace division stmt with reciprocal and multiply stmts.
963 The multiply stmt is not invariant, so update iterator
964 and avoid rescanning. */
965 gsi = *bsi;
966 gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
967 gsi_replace (&gsi, stmt2, true);
968
969 /* Continue processing with invariant reciprocal statement. */
970 return stmt1;
971 }
972
973 /* Check if the pattern at *BSI is a bittest of the form
974 (A >> B) & 1 != 0 and in this case rewrite it to A & (1 << B) != 0. */
975
976 static gimple
977 rewrite_bittest (gimple_stmt_iterator *bsi)
978 {
979 gimple stmt, use_stmt, stmt1, stmt2;
980 tree lhs, var, name, t, a, b;
981 use_operand_p use;
982
983 stmt = gsi_stmt (*bsi);
984 lhs = gimple_assign_lhs (stmt);
985
986 /* Verify that the single use of lhs is a comparison against zero. */
987 if (TREE_CODE (lhs) != SSA_NAME
988 || !single_imm_use (lhs, &use, &use_stmt)
989 || gimple_code (use_stmt) != GIMPLE_COND)
990 return stmt;
991 if (gimple_cond_lhs (use_stmt) != lhs
992 || (gimple_cond_code (use_stmt) != NE_EXPR
993 && gimple_cond_code (use_stmt) != EQ_EXPR)
994 || !integer_zerop (gimple_cond_rhs (use_stmt)))
995 return stmt;
996
997 /* Get at the operands of the shift. The rhs is TMP1 & 1. */
998 stmt1 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
999 if (gimple_code (stmt1) != GIMPLE_ASSIGN)
1000 return stmt;
1001
1002 /* There is a conversion in between possibly inserted by fold. */
1003 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt1)))
1004 {
1005 t = gimple_assign_rhs1 (stmt1);
1006 if (TREE_CODE (t) != SSA_NAME
1007 || !has_single_use (t))
1008 return stmt;
1009 stmt1 = SSA_NAME_DEF_STMT (t);
1010 if (gimple_code (stmt1) != GIMPLE_ASSIGN)
1011 return stmt;
1012 }
1013
1014 /* Verify that B is loop invariant but A is not. Verify that with
1015 all the stmt walking we are still in the same loop. */
1016 if (gimple_assign_rhs_code (stmt1) != RSHIFT_EXPR
1017 || loop_containing_stmt (stmt1) != loop_containing_stmt (stmt))
1018 return stmt;
1019
1020 a = gimple_assign_rhs1 (stmt1);
1021 b = gimple_assign_rhs2 (stmt1);
1022
1023 if (outermost_invariant_loop (b, loop_containing_stmt (stmt1)) != NULL
1024 && outermost_invariant_loop (a, loop_containing_stmt (stmt1)) == NULL)
1025 {
1026 gimple_stmt_iterator rsi;
1027
1028 /* 1 << B */
1029 var = create_tmp_var (TREE_TYPE (a), "shifttmp");
1030 add_referenced_var (var);
1031 t = fold_build2 (LSHIFT_EXPR, TREE_TYPE (a),
1032 build_int_cst (TREE_TYPE (a), 1), b);
1033 stmt1 = gimple_build_assign (var, t);
1034 name = make_ssa_name (var, stmt1);
1035 gimple_assign_set_lhs (stmt1, name);
1036
1037 /* A & (1 << B) */
1038 t = fold_build2 (BIT_AND_EXPR, TREE_TYPE (a), a, name);
1039 stmt2 = gimple_build_assign (var, t);
1040 name = make_ssa_name (var, stmt2);
1041 gimple_assign_set_lhs (stmt2, name);
1042
1043 /* Replace the SSA_NAME we compare against zero. Adjust
1044 the type of zero accordingly. */
1045 SET_USE (use, name);
1046 gimple_cond_set_rhs (use_stmt, build_int_cst_type (TREE_TYPE (name), 0));
1047
1048 /* Don't use gsi_replace here, none of the new assignments sets
1049 the variable originally set in stmt. Move bsi to stmt1, and
1050 then remove the original stmt, so that we get a chance to
1051 retain debug info for it. */
1052 rsi = *bsi;
1053 gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
1054 gsi_insert_before (&rsi, stmt2, GSI_SAME_STMT);
1055 gsi_remove (&rsi, true);
1056
1057 return stmt1;
1058 }
1059
1060 return stmt;
1061 }
1062
1063
1064 /* Determine the outermost loops in that statements in basic block BB are
1065 invariant, and record them to the LIM_DATA associated with the statements.
1066 Callback for walk_dominator_tree. */
1067
1068 static void
1069 determine_invariantness_stmt (struct dom_walk_data *dw_data ATTRIBUTE_UNUSED,
1070 basic_block bb)
1071 {
1072 enum move_pos pos;
1073 gimple_stmt_iterator bsi;
1074 gimple stmt;
1075 bool maybe_never = ALWAYS_EXECUTED_IN (bb) == NULL;
1076 struct loop *outermost = ALWAYS_EXECUTED_IN (bb);
1077 struct lim_aux_data *lim_data;
1078
1079 if (!loop_outer (bb->loop_father))
1080 return;
1081
1082 if (dump_file && (dump_flags & TDF_DETAILS))
1083 fprintf (dump_file, "Basic block %d (loop %d -- depth %d):\n\n",
1084 bb->index, bb->loop_father->num, loop_depth (bb->loop_father));
1085
1086 /* Look at PHI nodes, but only if there is at most two.
1087 ??? We could relax this further by post-processing the inserted
1088 code and transforming adjacent cond-exprs with the same predicate
1089 to control flow again. */
1090 bsi = gsi_start_phis (bb);
1091 if (!gsi_end_p (bsi)
1092 && ((gsi_next (&bsi), gsi_end_p (bsi))
1093 || (gsi_next (&bsi), gsi_end_p (bsi))))
1094 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1095 {
1096 stmt = gsi_stmt (bsi);
1097
1098 pos = movement_possibility (stmt);
1099 if (pos == MOVE_IMPOSSIBLE)
1100 continue;
1101
1102 lim_data = init_lim_data (stmt);
1103 lim_data->always_executed_in = outermost;
1104
1105 if (!determine_max_movement (stmt, false))
1106 {
1107 lim_data->max_loop = NULL;
1108 continue;
1109 }
1110
1111 if (dump_file && (dump_flags & TDF_DETAILS))
1112 {
1113 print_gimple_stmt (dump_file, stmt, 2, 0);
1114 fprintf (dump_file, " invariant up to level %d, cost %d.\n\n",
1115 loop_depth (lim_data->max_loop),
1116 lim_data->cost);
1117 }
1118
1119 if (lim_data->cost >= LIM_EXPENSIVE)
1120 set_profitable_level (stmt);
1121 }
1122
1123 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1124 {
1125 stmt = gsi_stmt (bsi);
1126
1127 pos = movement_possibility (stmt);
1128 if (pos == MOVE_IMPOSSIBLE)
1129 {
1130 if (nonpure_call_p (stmt))
1131 {
1132 maybe_never = true;
1133 outermost = NULL;
1134 }
1135 /* Make sure to note always_executed_in for stores to make
1136 store-motion work. */
1137 else if (stmt_makes_single_store (stmt))
1138 {
1139 struct lim_aux_data *lim_data = init_lim_data (stmt);
1140 lim_data->always_executed_in = outermost;
1141 }
1142 continue;
1143 }
1144
1145 if (is_gimple_assign (stmt)
1146 && (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
1147 == GIMPLE_BINARY_RHS))
1148 {
1149 tree op0 = gimple_assign_rhs1 (stmt);
1150 tree op1 = gimple_assign_rhs2 (stmt);
1151 struct loop *ol1 = outermost_invariant_loop (op1,
1152 loop_containing_stmt (stmt));
1153
1154 /* If divisor is invariant, convert a/b to a*(1/b), allowing reciprocal
1155 to be hoisted out of loop, saving expensive divide. */
1156 if (pos == MOVE_POSSIBLE
1157 && gimple_assign_rhs_code (stmt) == RDIV_EXPR
1158 && flag_unsafe_math_optimizations
1159 && !flag_trapping_math
1160 && ol1 != NULL
1161 && outermost_invariant_loop (op0, ol1) == NULL)
1162 stmt = rewrite_reciprocal (&bsi);
1163
1164 /* If the shift count is invariant, convert (A >> B) & 1 to
1165 A & (1 << B) allowing the bit mask to be hoisted out of the loop
1166 saving an expensive shift. */
1167 if (pos == MOVE_POSSIBLE
1168 && gimple_assign_rhs_code (stmt) == BIT_AND_EXPR
1169 && integer_onep (op1)
1170 && TREE_CODE (op0) == SSA_NAME
1171 && has_single_use (op0))
1172 stmt = rewrite_bittest (&bsi);
1173 }
1174
1175 lim_data = init_lim_data (stmt);
1176 lim_data->always_executed_in = outermost;
1177
1178 if (maybe_never && pos == MOVE_PRESERVE_EXECUTION)
1179 continue;
1180
1181 if (!determine_max_movement (stmt, pos == MOVE_PRESERVE_EXECUTION))
1182 {
1183 lim_data->max_loop = NULL;
1184 continue;
1185 }
1186
1187 if (dump_file && (dump_flags & TDF_DETAILS))
1188 {
1189 print_gimple_stmt (dump_file, stmt, 2, 0);
1190 fprintf (dump_file, " invariant up to level %d, cost %d.\n\n",
1191 loop_depth (lim_data->max_loop),
1192 lim_data->cost);
1193 }
1194
1195 if (lim_data->cost >= LIM_EXPENSIVE)
1196 set_profitable_level (stmt);
1197 }
1198 }
1199
1200 /* For each statement determines the outermost loop in that it is invariant,
1201 statements on whose motion it depends and the cost of the computation.
1202 This information is stored to the LIM_DATA structure associated with
1203 each statement. */
1204
1205 static void
1206 determine_invariantness (void)
1207 {
1208 struct dom_walk_data walk_data;
1209
1210 memset (&walk_data, 0, sizeof (struct dom_walk_data));
1211 walk_data.dom_direction = CDI_DOMINATORS;
1212 walk_data.before_dom_children = determine_invariantness_stmt;
1213
1214 init_walk_dominator_tree (&walk_data);
1215 walk_dominator_tree (&walk_data, ENTRY_BLOCK_PTR);
1216 fini_walk_dominator_tree (&walk_data);
1217 }
1218
1219 /* Hoist the statements in basic block BB out of the loops prescribed by
1220 data stored in LIM_DATA structures associated with each statement. Callback
1221 for walk_dominator_tree. */
1222
1223 static void
1224 move_computations_stmt (struct dom_walk_data *dw_data,
1225 basic_block bb)
1226 {
1227 struct loop *level;
1228 gimple_stmt_iterator bsi;
1229 gimple stmt;
1230 unsigned cost = 0;
1231 struct lim_aux_data *lim_data;
1232
1233 if (!loop_outer (bb->loop_father))
1234 return;
1235
1236 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); )
1237 {
1238 gimple new_stmt;
1239 stmt = gsi_stmt (bsi);
1240
1241 lim_data = get_lim_data (stmt);
1242 if (lim_data == NULL)
1243 {
1244 gsi_next (&bsi);
1245 continue;
1246 }
1247
1248 cost = lim_data->cost;
1249 level = lim_data->tgt_loop;
1250 clear_lim_data (stmt);
1251
1252 if (!level)
1253 {
1254 gsi_next (&bsi);
1255 continue;
1256 }
1257
1258 if (dump_file && (dump_flags & TDF_DETAILS))
1259 {
1260 fprintf (dump_file, "Moving PHI node\n");
1261 print_gimple_stmt (dump_file, stmt, 0, 0);
1262 fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1263 cost, level->num);
1264 }
1265
1266 if (gimple_phi_num_args (stmt) == 1)
1267 {
1268 tree arg = PHI_ARG_DEF (stmt, 0);
1269 new_stmt = gimple_build_assign_with_ops (TREE_CODE (arg),
1270 gimple_phi_result (stmt),
1271 arg, NULL_TREE);
1272 SSA_NAME_DEF_STMT (gimple_phi_result (stmt)) = new_stmt;
1273 }
1274 else
1275 {
1276 basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
1277 gimple cond = gsi_stmt (gsi_last_bb (dom));
1278 tree arg0 = NULL_TREE, arg1 = NULL_TREE, t;
1279 /* Get the PHI arguments corresponding to the true and false
1280 edges of COND. */
1281 extract_true_false_args_from_phi (dom, stmt, &arg0, &arg1);
1282 gcc_assert (arg0 && arg1);
1283 t = build2 (gimple_cond_code (cond), boolean_type_node,
1284 gimple_cond_lhs (cond), gimple_cond_rhs (cond));
1285 new_stmt = gimple_build_assign_with_ops3 (COND_EXPR,
1286 gimple_phi_result (stmt),
1287 t, arg0, arg1);
1288 SSA_NAME_DEF_STMT (gimple_phi_result (stmt)) = new_stmt;
1289 *((unsigned int *)(dw_data->global_data)) |= TODO_cleanup_cfg;
1290 }
1291 gsi_insert_on_edge (loop_preheader_edge (level), new_stmt);
1292 remove_phi_node (&bsi, false);
1293 }
1294
1295 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); )
1296 {
1297 stmt = gsi_stmt (bsi);
1298
1299 lim_data = get_lim_data (stmt);
1300 if (lim_data == NULL)
1301 {
1302 gsi_next (&bsi);
1303 continue;
1304 }
1305
1306 cost = lim_data->cost;
1307 level = lim_data->tgt_loop;
1308 clear_lim_data (stmt);
1309
1310 if (!level)
1311 {
1312 gsi_next (&bsi);
1313 continue;
1314 }
1315
1316 /* We do not really want to move conditionals out of the loop; we just
1317 placed it here to force its operands to be moved if necessary. */
1318 if (gimple_code (stmt) == GIMPLE_COND)
1319 continue;
1320
1321 if (dump_file && (dump_flags & TDF_DETAILS))
1322 {
1323 fprintf (dump_file, "Moving statement\n");
1324 print_gimple_stmt (dump_file, stmt, 0, 0);
1325 fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1326 cost, level->num);
1327 }
1328
1329 mark_virtual_ops_for_renaming (stmt);
1330 gsi_remove (&bsi, false);
1331 gsi_insert_on_edge (loop_preheader_edge (level), stmt);
1332 }
1333 }
1334
1335 /* Hoist the statements out of the loops prescribed by data stored in
1336 LIM_DATA structures associated with each statement.*/
1337
1338 static unsigned int
1339 move_computations (void)
1340 {
1341 struct dom_walk_data walk_data;
1342 unsigned int todo = 0;
1343
1344 memset (&walk_data, 0, sizeof (struct dom_walk_data));
1345 walk_data.global_data = &todo;
1346 walk_data.dom_direction = CDI_DOMINATORS;
1347 walk_data.before_dom_children = move_computations_stmt;
1348
1349 init_walk_dominator_tree (&walk_data);
1350 walk_dominator_tree (&walk_data, ENTRY_BLOCK_PTR);
1351 fini_walk_dominator_tree (&walk_data);
1352
1353 gsi_commit_edge_inserts ();
1354 if (need_ssa_update_p (cfun))
1355 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
1356
1357 return todo;
1358 }
1359
1360 /* Checks whether the statement defining variable *INDEX can be hoisted
1361 out of the loop passed in DATA. Callback for for_each_index. */
1362
1363 static bool
1364 may_move_till (tree ref, tree *index, void *data)
1365 {
1366 struct loop *loop = (struct loop *) data, *max_loop;
1367
1368 /* If REF is an array reference, check also that the step and the lower
1369 bound is invariant in LOOP. */
1370 if (TREE_CODE (ref) == ARRAY_REF)
1371 {
1372 tree step = TREE_OPERAND (ref, 3);
1373 tree lbound = TREE_OPERAND (ref, 2);
1374
1375 max_loop = outermost_invariant_loop (step, loop);
1376 if (!max_loop)
1377 return false;
1378
1379 max_loop = outermost_invariant_loop (lbound, loop);
1380 if (!max_loop)
1381 return false;
1382 }
1383
1384 max_loop = outermost_invariant_loop (*index, loop);
1385 if (!max_loop)
1386 return false;
1387
1388 return true;
1389 }
1390
1391 /* If OP is SSA NAME, force the statement that defines it to be
1392 moved out of the LOOP. ORIG_LOOP is the loop in that EXPR is used. */
1393
1394 static void
1395 force_move_till_op (tree op, struct loop *orig_loop, struct loop *loop)
1396 {
1397 gimple stmt;
1398
1399 if (!op
1400 || is_gimple_min_invariant (op))
1401 return;
1402
1403 gcc_assert (TREE_CODE (op) == SSA_NAME);
1404
1405 stmt = SSA_NAME_DEF_STMT (op);
1406 if (gimple_nop_p (stmt))
1407 return;
1408
1409 set_level (stmt, orig_loop, loop);
1410 }
1411
1412 /* Forces statement defining invariants in REF (and *INDEX) to be moved out of
1413 the LOOP. The reference REF is used in the loop ORIG_LOOP. Callback for
1414 for_each_index. */
1415
1416 struct fmt_data
1417 {
1418 struct loop *loop;
1419 struct loop *orig_loop;
1420 };
1421
1422 static bool
1423 force_move_till (tree ref, tree *index, void *data)
1424 {
1425 struct fmt_data *fmt_data = (struct fmt_data *) data;
1426
1427 if (TREE_CODE (ref) == ARRAY_REF)
1428 {
1429 tree step = TREE_OPERAND (ref, 3);
1430 tree lbound = TREE_OPERAND (ref, 2);
1431
1432 force_move_till_op (step, fmt_data->orig_loop, fmt_data->loop);
1433 force_move_till_op (lbound, fmt_data->orig_loop, fmt_data->loop);
1434 }
1435
1436 force_move_till_op (*index, fmt_data->orig_loop, fmt_data->loop);
1437
1438 return true;
1439 }
1440
1441 /* A hash function for struct mem_ref object OBJ. */
1442
1443 static hashval_t
1444 memref_hash (const void *obj)
1445 {
1446 const struct mem_ref *const mem = (const struct mem_ref *) obj;
1447
1448 return mem->hash;
1449 }
1450
1451 /* An equality function for struct mem_ref object OBJ1 with
1452 memory reference OBJ2. */
1453
1454 static int
1455 memref_eq (const void *obj1, const void *obj2)
1456 {
1457 const struct mem_ref *const mem1 = (const struct mem_ref *) obj1;
1458
1459 return operand_equal_p (mem1->mem, (const_tree) obj2, 0);
1460 }
1461
1462 /* Releases list of memory reference locations ACCS. */
1463
1464 static void
1465 free_mem_ref_locs (mem_ref_locs_p accs)
1466 {
1467 unsigned i;
1468 mem_ref_loc_p loc;
1469
1470 if (!accs)
1471 return;
1472
1473 FOR_EACH_VEC_ELT (mem_ref_loc_p, accs->locs, i, loc)
1474 free (loc);
1475 VEC_free (mem_ref_loc_p, heap, accs->locs);
1476 free (accs);
1477 }
1478
1479 /* A function to free the mem_ref object OBJ. */
1480
1481 static void
1482 memref_free (void *obj)
1483 {
1484 struct mem_ref *const mem = (struct mem_ref *) obj;
1485 unsigned i;
1486 mem_ref_locs_p accs;
1487
1488 BITMAP_FREE (mem->stored);
1489 BITMAP_FREE (mem->indep_loop);
1490 BITMAP_FREE (mem->dep_loop);
1491 BITMAP_FREE (mem->indep_ref);
1492 BITMAP_FREE (mem->dep_ref);
1493
1494 FOR_EACH_VEC_ELT (mem_ref_locs_p, mem->accesses_in_loop, i, accs)
1495 free_mem_ref_locs (accs);
1496 VEC_free (mem_ref_locs_p, heap, mem->accesses_in_loop);
1497
1498 free (mem);
1499 }
1500
1501 /* Allocates and returns a memory reference description for MEM whose hash
1502 value is HASH and id is ID. */
1503
1504 static mem_ref_p
1505 mem_ref_alloc (tree mem, unsigned hash, unsigned id)
1506 {
1507 mem_ref_p ref = XNEW (struct mem_ref);
1508 ref->mem = mem;
1509 ref->id = id;
1510 ref->hash = hash;
1511 ref->stored = BITMAP_ALLOC (NULL);
1512 ref->indep_loop = BITMAP_ALLOC (NULL);
1513 ref->dep_loop = BITMAP_ALLOC (NULL);
1514 ref->indep_ref = BITMAP_ALLOC (NULL);
1515 ref->dep_ref = BITMAP_ALLOC (NULL);
1516 ref->accesses_in_loop = NULL;
1517
1518 return ref;
1519 }
1520
1521 /* Allocates and returns the new list of locations. */
1522
1523 static mem_ref_locs_p
1524 mem_ref_locs_alloc (void)
1525 {
1526 mem_ref_locs_p accs = XNEW (struct mem_ref_locs);
1527 accs->locs = NULL;
1528 return accs;
1529 }
1530
1531 /* Records memory reference location *LOC in LOOP to the memory reference
1532 description REF. The reference occurs in statement STMT. */
1533
1534 static void
1535 record_mem_ref_loc (mem_ref_p ref, struct loop *loop, gimple stmt, tree *loc)
1536 {
1537 mem_ref_loc_p aref = XNEW (struct mem_ref_loc);
1538 mem_ref_locs_p accs;
1539 bitmap ril = VEC_index (bitmap, memory_accesses.refs_in_loop, loop->num);
1540
1541 if (VEC_length (mem_ref_locs_p, ref->accesses_in_loop)
1542 <= (unsigned) loop->num)
1543 VEC_safe_grow_cleared (mem_ref_locs_p, heap, ref->accesses_in_loop,
1544 loop->num + 1);
1545 accs = VEC_index (mem_ref_locs_p, ref->accesses_in_loop, loop->num);
1546 if (!accs)
1547 {
1548 accs = mem_ref_locs_alloc ();
1549 VEC_replace (mem_ref_locs_p, ref->accesses_in_loop, loop->num, accs);
1550 }
1551
1552 aref->stmt = stmt;
1553 aref->ref = loc;
1554
1555 VEC_safe_push (mem_ref_loc_p, heap, accs->locs, aref);
1556 bitmap_set_bit (ril, ref->id);
1557 }
1558
1559 /* Marks reference REF as stored in LOOP. */
1560
1561 static void
1562 mark_ref_stored (mem_ref_p ref, struct loop *loop)
1563 {
1564 for (;
1565 loop != current_loops->tree_root
1566 && !bitmap_bit_p (ref->stored, loop->num);
1567 loop = loop_outer (loop))
1568 bitmap_set_bit (ref->stored, loop->num);
1569 }
1570
1571 /* Gathers memory references in statement STMT in LOOP, storing the
1572 information about them in the memory_accesses structure. Marks
1573 the vops accessed through unrecognized statements there as
1574 well. */
1575
1576 static void
1577 gather_mem_refs_stmt (struct loop *loop, gimple stmt)
1578 {
1579 tree *mem = NULL;
1580 hashval_t hash;
1581 PTR *slot;
1582 mem_ref_p ref;
1583 bool is_stored;
1584 unsigned id;
1585
1586 if (!gimple_vuse (stmt))
1587 return;
1588
1589 mem = simple_mem_ref_in_stmt (stmt, &is_stored);
1590 if (!mem)
1591 {
1592 id = VEC_length (mem_ref_p, memory_accesses.refs_list);
1593 ref = mem_ref_alloc (error_mark_node, 0, id);
1594 VEC_safe_push (mem_ref_p, heap, memory_accesses.refs_list, ref);
1595 if (dump_file && (dump_flags & TDF_DETAILS))
1596 {
1597 fprintf (dump_file, "Unanalyzed memory reference %u: ", id);
1598 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1599 }
1600 if (gimple_vdef (stmt))
1601 mark_ref_stored (ref, loop);
1602 record_mem_ref_loc (ref, loop, stmt, mem);
1603 return;
1604 }
1605
1606 hash = iterative_hash_expr (*mem, 0);
1607 slot = htab_find_slot_with_hash (memory_accesses.refs, *mem, hash, INSERT);
1608
1609 if (*slot)
1610 {
1611 ref = (mem_ref_p) *slot;
1612 id = ref->id;
1613 }
1614 else
1615 {
1616 id = VEC_length (mem_ref_p, memory_accesses.refs_list);
1617 ref = mem_ref_alloc (*mem, hash, id);
1618 VEC_safe_push (mem_ref_p, heap, memory_accesses.refs_list, ref);
1619 *slot = ref;
1620
1621 if (dump_file && (dump_flags & TDF_DETAILS))
1622 {
1623 fprintf (dump_file, "Memory reference %u: ", id);
1624 print_generic_expr (dump_file, ref->mem, TDF_SLIM);
1625 fprintf (dump_file, "\n");
1626 }
1627 }
1628
1629 if (is_stored)
1630 mark_ref_stored (ref, loop);
1631
1632 record_mem_ref_loc (ref, loop, stmt, mem);
1633 return;
1634 }
1635
1636 /* Gathers memory references in loops. */
1637
1638 static void
1639 gather_mem_refs_in_loops (void)
1640 {
1641 gimple_stmt_iterator bsi;
1642 basic_block bb;
1643 struct loop *loop;
1644 loop_iterator li;
1645 bitmap lrefs, alrefs, alrefso;
1646
1647 FOR_EACH_BB (bb)
1648 {
1649 loop = bb->loop_father;
1650 if (loop == current_loops->tree_root)
1651 continue;
1652
1653 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1654 gather_mem_refs_stmt (loop, gsi_stmt (bsi));
1655 }
1656
1657 /* Propagate the information about accessed memory references up
1658 the loop hierarchy. */
1659 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
1660 {
1661 lrefs = VEC_index (bitmap, memory_accesses.refs_in_loop, loop->num);
1662 alrefs = VEC_index (bitmap, memory_accesses.all_refs_in_loop, loop->num);
1663 bitmap_ior_into (alrefs, lrefs);
1664
1665 if (loop_outer (loop) == current_loops->tree_root)
1666 continue;
1667
1668 alrefso = VEC_index (bitmap, memory_accesses.all_refs_in_loop,
1669 loop_outer (loop)->num);
1670 bitmap_ior_into (alrefso, alrefs);
1671 }
1672 }
1673
1674 /* Create a mapping from virtual operands to references that touch them
1675 in LOOP. */
1676
1677 static void
1678 create_vop_ref_mapping_loop (struct loop *loop)
1679 {
1680 bitmap refs = VEC_index (bitmap, memory_accesses.refs_in_loop, loop->num);
1681 struct loop *sloop;
1682 bitmap_iterator bi;
1683 unsigned i;
1684 mem_ref_p ref;
1685
1686 EXECUTE_IF_SET_IN_BITMAP (refs, 0, i, bi)
1687 {
1688 ref = VEC_index (mem_ref_p, memory_accesses.refs_list, i);
1689 for (sloop = loop; sloop != current_loops->tree_root;
1690 sloop = loop_outer (sloop))
1691 if (bitmap_bit_p (ref->stored, loop->num))
1692 {
1693 bitmap refs_stored
1694 = VEC_index (bitmap, memory_accesses.all_refs_stored_in_loop,
1695 sloop->num);
1696 bitmap_set_bit (refs_stored, ref->id);
1697 }
1698 }
1699 }
1700
1701 /* For each non-clobbered virtual operand and each loop, record the memory
1702 references in this loop that touch the operand. */
1703
1704 static void
1705 create_vop_ref_mapping (void)
1706 {
1707 loop_iterator li;
1708 struct loop *loop;
1709
1710 FOR_EACH_LOOP (li, loop, 0)
1711 {
1712 create_vop_ref_mapping_loop (loop);
1713 }
1714 }
1715
1716 /* Gathers information about memory accesses in the loops. */
1717
1718 static void
1719 analyze_memory_references (void)
1720 {
1721 unsigned i;
1722 bitmap empty;
1723
1724 memory_accesses.refs
1725 = htab_create (100, memref_hash, memref_eq, memref_free);
1726 memory_accesses.refs_list = NULL;
1727 memory_accesses.refs_in_loop = VEC_alloc (bitmap, heap,
1728 number_of_loops ());
1729 memory_accesses.all_refs_in_loop = VEC_alloc (bitmap, heap,
1730 number_of_loops ());
1731 memory_accesses.all_refs_stored_in_loop = VEC_alloc (bitmap, heap,
1732 number_of_loops ());
1733
1734 for (i = 0; i < number_of_loops (); i++)
1735 {
1736 empty = BITMAP_ALLOC (NULL);
1737 VEC_quick_push (bitmap, memory_accesses.refs_in_loop, empty);
1738 empty = BITMAP_ALLOC (NULL);
1739 VEC_quick_push (bitmap, memory_accesses.all_refs_in_loop, empty);
1740 empty = BITMAP_ALLOC (NULL);
1741 VEC_quick_push (bitmap, memory_accesses.all_refs_stored_in_loop, empty);
1742 }
1743
1744 memory_accesses.ttae_cache = NULL;
1745
1746 gather_mem_refs_in_loops ();
1747 create_vop_ref_mapping ();
1748 }
1749
1750 /* Returns true if MEM1 and MEM2 may alias. TTAE_CACHE is used as a cache in
1751 tree_to_aff_combination_expand. */
1752
1753 static bool
1754 mem_refs_may_alias_p (tree mem1, tree mem2, struct pointer_map_t **ttae_cache)
1755 {
1756 /* Perform BASE + OFFSET analysis -- if MEM1 and MEM2 are based on the same
1757 object and their offset differ in such a way that the locations cannot
1758 overlap, then they cannot alias. */
1759 double_int size1, size2;
1760 aff_tree off1, off2;
1761
1762 /* Perform basic offset and type-based disambiguation. */
1763 if (!refs_may_alias_p (mem1, mem2))
1764 return false;
1765
1766 /* The expansion of addresses may be a bit expensive, thus we only do
1767 the check at -O2 and higher optimization levels. */
1768 if (optimize < 2)
1769 return true;
1770
1771 get_inner_reference_aff (mem1, &off1, &size1);
1772 get_inner_reference_aff (mem2, &off2, &size2);
1773 aff_combination_expand (&off1, ttae_cache);
1774 aff_combination_expand (&off2, ttae_cache);
1775 aff_combination_scale (&off1, double_int_minus_one);
1776 aff_combination_add (&off2, &off1);
1777
1778 if (aff_comb_cannot_overlap_p (&off2, size1, size2))
1779 return false;
1780
1781 return true;
1782 }
1783
1784 /* Rewrites location LOC by TMP_VAR. */
1785
1786 static void
1787 rewrite_mem_ref_loc (mem_ref_loc_p loc, tree tmp_var)
1788 {
1789 mark_virtual_ops_for_renaming (loc->stmt);
1790 *loc->ref = tmp_var;
1791 update_stmt (loc->stmt);
1792 }
1793
1794 /* Adds all locations of REF in LOOP and its subloops to LOCS. */
1795
1796 static void
1797 get_all_locs_in_loop (struct loop *loop, mem_ref_p ref,
1798 VEC (mem_ref_loc_p, heap) **locs)
1799 {
1800 mem_ref_locs_p accs;
1801 unsigned i;
1802 mem_ref_loc_p loc;
1803 bitmap refs = VEC_index (bitmap, memory_accesses.all_refs_in_loop,
1804 loop->num);
1805 struct loop *subloop;
1806
1807 if (!bitmap_bit_p (refs, ref->id))
1808 return;
1809
1810 if (VEC_length (mem_ref_locs_p, ref->accesses_in_loop)
1811 > (unsigned) loop->num)
1812 {
1813 accs = VEC_index (mem_ref_locs_p, ref->accesses_in_loop, loop->num);
1814 if (accs)
1815 {
1816 FOR_EACH_VEC_ELT (mem_ref_loc_p, accs->locs, i, loc)
1817 VEC_safe_push (mem_ref_loc_p, heap, *locs, loc);
1818 }
1819 }
1820
1821 for (subloop = loop->inner; subloop != NULL; subloop = subloop->next)
1822 get_all_locs_in_loop (subloop, ref, locs);
1823 }
1824
1825 /* Rewrites all references to REF in LOOP by variable TMP_VAR. */
1826
1827 static void
1828 rewrite_mem_refs (struct loop *loop, mem_ref_p ref, tree tmp_var)
1829 {
1830 unsigned i;
1831 mem_ref_loc_p loc;
1832 VEC (mem_ref_loc_p, heap) *locs = NULL;
1833
1834 get_all_locs_in_loop (loop, ref, &locs);
1835 FOR_EACH_VEC_ELT (mem_ref_loc_p, locs, i, loc)
1836 rewrite_mem_ref_loc (loc, tmp_var);
1837 VEC_free (mem_ref_loc_p, heap, locs);
1838 }
1839
1840 /* The name and the length of the currently generated variable
1841 for lsm. */
1842 #define MAX_LSM_NAME_LENGTH 40
1843 static char lsm_tmp_name[MAX_LSM_NAME_LENGTH + 1];
1844 static int lsm_tmp_name_length;
1845
1846 /* Adds S to lsm_tmp_name. */
1847
1848 static void
1849 lsm_tmp_name_add (const char *s)
1850 {
1851 int l = strlen (s) + lsm_tmp_name_length;
1852 if (l > MAX_LSM_NAME_LENGTH)
1853 return;
1854
1855 strcpy (lsm_tmp_name + lsm_tmp_name_length, s);
1856 lsm_tmp_name_length = l;
1857 }
1858
1859 /* Stores the name for temporary variable that replaces REF to
1860 lsm_tmp_name. */
1861
1862 static void
1863 gen_lsm_tmp_name (tree ref)
1864 {
1865 const char *name;
1866
1867 switch (TREE_CODE (ref))
1868 {
1869 case MEM_REF:
1870 case TARGET_MEM_REF:
1871 gen_lsm_tmp_name (TREE_OPERAND (ref, 0));
1872 lsm_tmp_name_add ("_");
1873 break;
1874
1875 case ADDR_EXPR:
1876 gen_lsm_tmp_name (TREE_OPERAND (ref, 0));
1877 break;
1878
1879 case BIT_FIELD_REF:
1880 case VIEW_CONVERT_EXPR:
1881 case ARRAY_RANGE_REF:
1882 gen_lsm_tmp_name (TREE_OPERAND (ref, 0));
1883 break;
1884
1885 case REALPART_EXPR:
1886 gen_lsm_tmp_name (TREE_OPERAND (ref, 0));
1887 lsm_tmp_name_add ("_RE");
1888 break;
1889
1890 case IMAGPART_EXPR:
1891 gen_lsm_tmp_name (TREE_OPERAND (ref, 0));
1892 lsm_tmp_name_add ("_IM");
1893 break;
1894
1895 case COMPONENT_REF:
1896 gen_lsm_tmp_name (TREE_OPERAND (ref, 0));
1897 lsm_tmp_name_add ("_");
1898 name = get_name (TREE_OPERAND (ref, 1));
1899 if (!name)
1900 name = "F";
1901 lsm_tmp_name_add (name);
1902 break;
1903
1904 case ARRAY_REF:
1905 gen_lsm_tmp_name (TREE_OPERAND (ref, 0));
1906 lsm_tmp_name_add ("_I");
1907 break;
1908
1909 case SSA_NAME:
1910 ref = SSA_NAME_VAR (ref);
1911 /* Fallthru. */
1912
1913 case VAR_DECL:
1914 case PARM_DECL:
1915 name = get_name (ref);
1916 if (!name)
1917 name = "D";
1918 lsm_tmp_name_add (name);
1919 break;
1920
1921 case STRING_CST:
1922 lsm_tmp_name_add ("S");
1923 break;
1924
1925 case RESULT_DECL:
1926 lsm_tmp_name_add ("R");
1927 break;
1928
1929 case INTEGER_CST:
1930 /* Nothing. */
1931 break;
1932
1933 default:
1934 gcc_unreachable ();
1935 }
1936 }
1937
1938 /* Determines name for temporary variable that replaces REF.
1939 The name is accumulated into the lsm_tmp_name variable.
1940 N is added to the name of the temporary. */
1941
1942 char *
1943 get_lsm_tmp_name (tree ref, unsigned n)
1944 {
1945 char ns[2];
1946
1947 lsm_tmp_name_length = 0;
1948 gen_lsm_tmp_name (ref);
1949 lsm_tmp_name_add ("_lsm");
1950 if (n < 10)
1951 {
1952 ns[0] = '0' + n;
1953 ns[1] = 0;
1954 lsm_tmp_name_add (ns);
1955 }
1956 return lsm_tmp_name;
1957 }
1958
1959 struct prev_flag_edges {
1960 /* Edge to insert new flag comparison code. */
1961 edge append_cond_position;
1962
1963 /* Edge for fall through from previous flag comparison. */
1964 edge last_cond_fallthru;
1965 };
1966
1967 /* Helper function for execute_sm. Emit code to store TMP_VAR into
1968 MEM along edge EX.
1969
1970 The store is only done if MEM has changed. We do this so no
1971 changes to MEM occur on code paths that did not originally store
1972 into it.
1973
1974 The common case for execute_sm will transform:
1975
1976 for (...) {
1977 if (foo)
1978 stuff;
1979 else
1980 MEM = TMP_VAR;
1981 }
1982
1983 into:
1984
1985 lsm = MEM;
1986 for (...) {
1987 if (foo)
1988 stuff;
1989 else
1990 lsm = TMP_VAR;
1991 }
1992 MEM = lsm;
1993
1994 This function will generate:
1995
1996 lsm = MEM;
1997
1998 lsm_flag = false;
1999 ...
2000 for (...) {
2001 if (foo)
2002 stuff;
2003 else {
2004 lsm = TMP_VAR;
2005 lsm_flag = true;
2006 }
2007 }
2008 if (lsm_flag) <--
2009 MEM = lsm; <--
2010 */
2011
2012 static void
2013 execute_sm_if_changed (edge ex, tree mem, tree tmp_var, tree flag)
2014 {
2015 basic_block new_bb, then_bb, old_dest;
2016 bool loop_has_only_one_exit;
2017 edge then_old_edge, orig_ex = ex;
2018 gimple_stmt_iterator gsi;
2019 gimple stmt;
2020 struct prev_flag_edges *prev_edges = (struct prev_flag_edges *) ex->aux;
2021
2022 /* ?? Insert store after previous store if applicable. See note
2023 below. */
2024 if (prev_edges)
2025 ex = prev_edges->append_cond_position;
2026
2027 loop_has_only_one_exit = single_pred_p (ex->dest);
2028
2029 if (loop_has_only_one_exit)
2030 ex = split_block_after_labels (ex->dest);
2031
2032 old_dest = ex->dest;
2033 new_bb = split_edge (ex);
2034 then_bb = create_empty_bb (new_bb);
2035 if (current_loops && new_bb->loop_father)
2036 add_bb_to_loop (then_bb, new_bb->loop_father);
2037
2038 gsi = gsi_start_bb (new_bb);
2039 stmt = gimple_build_cond (NE_EXPR, flag, boolean_false_node,
2040 NULL_TREE, NULL_TREE);
2041 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2042
2043 gsi = gsi_start_bb (then_bb);
2044 /* Insert actual store. */
2045 stmt = gimple_build_assign (unshare_expr (mem), tmp_var);
2046 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2047
2048 make_edge (new_bb, then_bb, EDGE_TRUE_VALUE);
2049 make_edge (new_bb, old_dest, EDGE_FALSE_VALUE);
2050 then_old_edge = make_edge (then_bb, old_dest, EDGE_FALLTHRU);
2051
2052 set_immediate_dominator (CDI_DOMINATORS, then_bb, new_bb);
2053
2054 if (prev_edges)
2055 {
2056 basic_block prevbb = prev_edges->last_cond_fallthru->src;
2057 redirect_edge_succ (prev_edges->last_cond_fallthru, new_bb);
2058 set_immediate_dominator (CDI_DOMINATORS, new_bb, prevbb);
2059 set_immediate_dominator (CDI_DOMINATORS, old_dest,
2060 recompute_dominator (CDI_DOMINATORS, old_dest));
2061 }
2062
2063 /* ?? Because stores may alias, they must happen in the exact
2064 sequence they originally happened. Save the position right after
2065 the (_lsm) store we just created so we can continue appending after
2066 it and maintain the original order. */
2067 {
2068 struct prev_flag_edges *p;
2069
2070 if (orig_ex->aux)
2071 orig_ex->aux = NULL;
2072 alloc_aux_for_edge (orig_ex, sizeof (struct prev_flag_edges));
2073 p = (struct prev_flag_edges *) orig_ex->aux;
2074 p->append_cond_position = then_old_edge;
2075 p->last_cond_fallthru = find_edge (new_bb, old_dest);
2076 orig_ex->aux = (void *) p;
2077 }
2078
2079 if (!loop_has_only_one_exit)
2080 for (gsi = gsi_start_phis (old_dest); !gsi_end_p (gsi); gsi_next (&gsi))
2081 {
2082 gimple phi = gsi_stmt (gsi);
2083 unsigned i;
2084
2085 for (i = 0; i < gimple_phi_num_args (phi); i++)
2086 if (gimple_phi_arg_edge (phi, i)->src == new_bb)
2087 {
2088 tree arg = gimple_phi_arg_def (phi, i);
2089 add_phi_arg (phi, arg, then_old_edge, UNKNOWN_LOCATION);
2090 update_stmt (phi);
2091 }
2092 }
2093 /* Remove the original fall through edge. This was the
2094 single_succ_edge (new_bb). */
2095 EDGE_SUCC (new_bb, 0)->flags &= ~EDGE_FALLTHRU;
2096 }
2097
2098 /* Helper function for execute_sm. On every location where REF is
2099 set, set an appropriate flag indicating the store. */
2100
2101 static tree
2102 execute_sm_if_changed_flag_set (struct loop *loop, mem_ref_p ref)
2103 {
2104 unsigned i;
2105 mem_ref_loc_p loc;
2106 tree flag;
2107 VEC (mem_ref_loc_p, heap) *locs = NULL;
2108 char *str = get_lsm_tmp_name (ref->mem, ~0);
2109
2110 lsm_tmp_name_add ("_flag");
2111 flag = make_rename_temp (boolean_type_node, str);
2112 get_all_locs_in_loop (loop, ref, &locs);
2113 FOR_EACH_VEC_ELT (mem_ref_loc_p, locs, i, loc)
2114 {
2115 gimple_stmt_iterator gsi;
2116 gimple stmt;
2117
2118 gsi = gsi_for_stmt (loc->stmt);
2119 stmt = gimple_build_assign (flag, boolean_true_node);
2120 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2121 }
2122 VEC_free (mem_ref_loc_p, heap, locs);
2123 return flag;
2124 }
2125
2126 /* Executes store motion of memory reference REF from LOOP.
2127 Exits from the LOOP are stored in EXITS. The initialization of the
2128 temporary variable is put to the preheader of the loop, and assignments
2129 to the reference from the temporary variable are emitted to exits. */
2130
2131 static void
2132 execute_sm (struct loop *loop, VEC (edge, heap) *exits, mem_ref_p ref)
2133 {
2134 tree tmp_var, store_flag;
2135 unsigned i;
2136 gimple load;
2137 struct fmt_data fmt_data;
2138 edge ex, latch_edge;
2139 struct lim_aux_data *lim_data;
2140 bool multi_threaded_model_p = false;
2141
2142 if (dump_file && (dump_flags & TDF_DETAILS))
2143 {
2144 fprintf (dump_file, "Executing store motion of ");
2145 print_generic_expr (dump_file, ref->mem, 0);
2146 fprintf (dump_file, " from loop %d\n", loop->num);
2147 }
2148
2149 tmp_var = make_rename_temp (TREE_TYPE (ref->mem),
2150 get_lsm_tmp_name (ref->mem, ~0));
2151
2152 fmt_data.loop = loop;
2153 fmt_data.orig_loop = loop;
2154 for_each_index (&ref->mem, force_move_till, &fmt_data);
2155
2156 if ((flag_tm && block_in_transaction (loop_preheader_edge (loop)->src))
2157 || !PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES))
2158 multi_threaded_model_p = true;
2159
2160 if (multi_threaded_model_p)
2161 store_flag = execute_sm_if_changed_flag_set (loop, ref);
2162
2163 rewrite_mem_refs (loop, ref, tmp_var);
2164
2165 /* Emit the load code into the latch, so that we are sure it will
2166 be processed after all dependencies. */
2167 latch_edge = loop_latch_edge (loop);
2168
2169 /* FIXME/TODO: For the multi-threaded variant, we could avoid this
2170 load altogether, since the store is predicated by a flag. We
2171 could, do the load only if it was originally in the loop. */
2172 load = gimple_build_assign (tmp_var, unshare_expr (ref->mem));
2173 lim_data = init_lim_data (load);
2174 lim_data->max_loop = loop;
2175 lim_data->tgt_loop = loop;
2176 gsi_insert_on_edge (latch_edge, load);
2177
2178 if (multi_threaded_model_p)
2179 {
2180 load = gimple_build_assign (store_flag, boolean_false_node);
2181 lim_data = init_lim_data (load);
2182 lim_data->max_loop = loop;
2183 lim_data->tgt_loop = loop;
2184 gsi_insert_on_edge (latch_edge, load);
2185 }
2186
2187 /* Sink the store to every exit from the loop. */
2188 FOR_EACH_VEC_ELT (edge, exits, i, ex)
2189 if (!multi_threaded_model_p)
2190 {
2191 gimple store;
2192 store = gimple_build_assign (unshare_expr (ref->mem), tmp_var);
2193 gsi_insert_on_edge (ex, store);
2194 }
2195 else
2196 execute_sm_if_changed (ex, ref->mem, tmp_var, store_flag);
2197 }
2198
2199 /* Hoists memory references MEM_REFS out of LOOP. EXITS is the list of exit
2200 edges of the LOOP. */
2201
2202 static void
2203 hoist_memory_references (struct loop *loop, bitmap mem_refs,
2204 VEC (edge, heap) *exits)
2205 {
2206 mem_ref_p ref;
2207 unsigned i;
2208 bitmap_iterator bi;
2209
2210 EXECUTE_IF_SET_IN_BITMAP (mem_refs, 0, i, bi)
2211 {
2212 ref = VEC_index (mem_ref_p, memory_accesses.refs_list, i);
2213 execute_sm (loop, exits, ref);
2214 }
2215 }
2216
2217 /* Returns true if REF is always accessed in LOOP. If STORED_P is true
2218 make sure REF is always stored to in LOOP. */
2219
2220 static bool
2221 ref_always_accessed_p (struct loop *loop, mem_ref_p ref, bool stored_p)
2222 {
2223 VEC (mem_ref_loc_p, heap) *locs = NULL;
2224 unsigned i;
2225 mem_ref_loc_p loc;
2226 bool ret = false;
2227 struct loop *must_exec;
2228 tree base;
2229
2230 base = get_base_address (ref->mem);
2231 if (INDIRECT_REF_P (base)
2232 || TREE_CODE (base) == MEM_REF)
2233 base = TREE_OPERAND (base, 0);
2234
2235 get_all_locs_in_loop (loop, ref, &locs);
2236 FOR_EACH_VEC_ELT (mem_ref_loc_p, locs, i, loc)
2237 {
2238 if (!get_lim_data (loc->stmt))
2239 continue;
2240
2241 /* If we require an always executed store make sure the statement
2242 stores to the reference. */
2243 if (stored_p)
2244 {
2245 tree lhs;
2246 if (!gimple_get_lhs (loc->stmt))
2247 continue;
2248 lhs = get_base_address (gimple_get_lhs (loc->stmt));
2249 if (!lhs)
2250 continue;
2251 if (INDIRECT_REF_P (lhs)
2252 || TREE_CODE (lhs) == MEM_REF)
2253 lhs = TREE_OPERAND (lhs, 0);
2254 if (lhs != base)
2255 continue;
2256 }
2257
2258 must_exec = get_lim_data (loc->stmt)->always_executed_in;
2259 if (!must_exec)
2260 continue;
2261
2262 if (must_exec == loop
2263 || flow_loop_nested_p (must_exec, loop))
2264 {
2265 ret = true;
2266 break;
2267 }
2268 }
2269 VEC_free (mem_ref_loc_p, heap, locs);
2270
2271 return ret;
2272 }
2273
2274 /* Returns true if REF1 and REF2 are independent. */
2275
2276 static bool
2277 refs_independent_p (mem_ref_p ref1, mem_ref_p ref2)
2278 {
2279 if (ref1 == ref2
2280 || bitmap_bit_p (ref1->indep_ref, ref2->id))
2281 return true;
2282 if (bitmap_bit_p (ref1->dep_ref, ref2->id))
2283 return false;
2284 if (!MEM_ANALYZABLE (ref1)
2285 || !MEM_ANALYZABLE (ref2))
2286 return false;
2287
2288 if (dump_file && (dump_flags & TDF_DETAILS))
2289 fprintf (dump_file, "Querying dependency of refs %u and %u: ",
2290 ref1->id, ref2->id);
2291
2292 if (mem_refs_may_alias_p (ref1->mem, ref2->mem,
2293 &memory_accesses.ttae_cache))
2294 {
2295 bitmap_set_bit (ref1->dep_ref, ref2->id);
2296 bitmap_set_bit (ref2->dep_ref, ref1->id);
2297 if (dump_file && (dump_flags & TDF_DETAILS))
2298 fprintf (dump_file, "dependent.\n");
2299 return false;
2300 }
2301 else
2302 {
2303 bitmap_set_bit (ref1->indep_ref, ref2->id);
2304 bitmap_set_bit (ref2->indep_ref, ref1->id);
2305 if (dump_file && (dump_flags & TDF_DETAILS))
2306 fprintf (dump_file, "independent.\n");
2307 return true;
2308 }
2309 }
2310
2311 /* Records the information whether REF is independent in LOOP (according
2312 to INDEP). */
2313
2314 static void
2315 record_indep_loop (struct loop *loop, mem_ref_p ref, bool indep)
2316 {
2317 if (indep)
2318 bitmap_set_bit (ref->indep_loop, loop->num);
2319 else
2320 bitmap_set_bit (ref->dep_loop, loop->num);
2321 }
2322
2323 /* Returns true if REF is independent on all other memory references in
2324 LOOP. */
2325
2326 static bool
2327 ref_indep_loop_p_1 (struct loop *loop, mem_ref_p ref)
2328 {
2329 bitmap refs_to_check;
2330 unsigned i;
2331 bitmap_iterator bi;
2332 bool ret = true, stored = bitmap_bit_p (ref->stored, loop->num);
2333 mem_ref_p aref;
2334
2335 if (stored)
2336 refs_to_check = VEC_index (bitmap,
2337 memory_accesses.all_refs_in_loop, loop->num);
2338 else
2339 refs_to_check = VEC_index (bitmap,
2340 memory_accesses.all_refs_stored_in_loop,
2341 loop->num);
2342
2343 EXECUTE_IF_SET_IN_BITMAP (refs_to_check, 0, i, bi)
2344 {
2345 aref = VEC_index (mem_ref_p, memory_accesses.refs_list, i);
2346 if (!MEM_ANALYZABLE (aref)
2347 || !refs_independent_p (ref, aref))
2348 {
2349 ret = false;
2350 record_indep_loop (loop, aref, false);
2351 break;
2352 }
2353 }
2354
2355 return ret;
2356 }
2357
2358 /* Returns true if REF is independent on all other memory references in
2359 LOOP. Wrapper over ref_indep_loop_p_1, caching its results. */
2360
2361 static bool
2362 ref_indep_loop_p (struct loop *loop, mem_ref_p ref)
2363 {
2364 bool ret;
2365
2366 if (bitmap_bit_p (ref->indep_loop, loop->num))
2367 return true;
2368 if (bitmap_bit_p (ref->dep_loop, loop->num))
2369 return false;
2370
2371 ret = ref_indep_loop_p_1 (loop, ref);
2372
2373 if (dump_file && (dump_flags & TDF_DETAILS))
2374 fprintf (dump_file, "Querying dependencies of ref %u in loop %d: %s\n",
2375 ref->id, loop->num, ret ? "independent" : "dependent");
2376
2377 record_indep_loop (loop, ref, ret);
2378
2379 return ret;
2380 }
2381
2382 /* Returns true if we can perform store motion of REF from LOOP. */
2383
2384 static bool
2385 can_sm_ref_p (struct loop *loop, mem_ref_p ref)
2386 {
2387 tree base;
2388
2389 /* Can't hoist unanalyzable refs. */
2390 if (!MEM_ANALYZABLE (ref))
2391 return false;
2392
2393 /* Unless the reference is stored in the loop, there is nothing to do. */
2394 if (!bitmap_bit_p (ref->stored, loop->num))
2395 return false;
2396
2397 /* It should be movable. */
2398 if (!is_gimple_reg_type (TREE_TYPE (ref->mem))
2399 || TREE_THIS_VOLATILE (ref->mem)
2400 || !for_each_index (&ref->mem, may_move_till, loop))
2401 return false;
2402
2403 /* If it can throw fail, we do not properly update EH info. */
2404 if (tree_could_throw_p (ref->mem))
2405 return false;
2406
2407 /* If it can trap, it must be always executed in LOOP.
2408 Readonly memory locations may trap when storing to them, but
2409 tree_could_trap_p is a predicate for rvalues, so check that
2410 explicitly. */
2411 base = get_base_address (ref->mem);
2412 if ((tree_could_trap_p (ref->mem)
2413 || (DECL_P (base) && TREE_READONLY (base)))
2414 && !ref_always_accessed_p (loop, ref, true))
2415 return false;
2416
2417 /* And it must be independent on all other memory references
2418 in LOOP. */
2419 if (!ref_indep_loop_p (loop, ref))
2420 return false;
2421
2422 return true;
2423 }
2424
2425 /* Marks the references in LOOP for that store motion should be performed
2426 in REFS_TO_SM. SM_EXECUTED is the set of references for that store
2427 motion was performed in one of the outer loops. */
2428
2429 static void
2430 find_refs_for_sm (struct loop *loop, bitmap sm_executed, bitmap refs_to_sm)
2431 {
2432 bitmap refs = VEC_index (bitmap, memory_accesses.all_refs_in_loop,
2433 loop->num);
2434 unsigned i;
2435 bitmap_iterator bi;
2436 mem_ref_p ref;
2437
2438 EXECUTE_IF_AND_COMPL_IN_BITMAP (refs, sm_executed, 0, i, bi)
2439 {
2440 ref = VEC_index (mem_ref_p, memory_accesses.refs_list, i);
2441 if (can_sm_ref_p (loop, ref))
2442 bitmap_set_bit (refs_to_sm, i);
2443 }
2444 }
2445
2446 /* Checks whether LOOP (with exits stored in EXITS array) is suitable
2447 for a store motion optimization (i.e. whether we can insert statement
2448 on its exits). */
2449
2450 static bool
2451 loop_suitable_for_sm (struct loop *loop ATTRIBUTE_UNUSED,
2452 VEC (edge, heap) *exits)
2453 {
2454 unsigned i;
2455 edge ex;
2456
2457 FOR_EACH_VEC_ELT (edge, exits, i, ex)
2458 if (ex->flags & (EDGE_ABNORMAL | EDGE_EH))
2459 return false;
2460
2461 return true;
2462 }
2463
2464 /* Try to perform store motion for all memory references modified inside
2465 LOOP. SM_EXECUTED is the bitmap of the memory references for that
2466 store motion was executed in one of the outer loops. */
2467
2468 static void
2469 store_motion_loop (struct loop *loop, bitmap sm_executed)
2470 {
2471 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
2472 struct loop *subloop;
2473 bitmap sm_in_loop = BITMAP_ALLOC (NULL);
2474
2475 if (loop_suitable_for_sm (loop, exits))
2476 {
2477 find_refs_for_sm (loop, sm_executed, sm_in_loop);
2478 hoist_memory_references (loop, sm_in_loop, exits);
2479 }
2480 VEC_free (edge, heap, exits);
2481
2482 bitmap_ior_into (sm_executed, sm_in_loop);
2483 for (subloop = loop->inner; subloop != NULL; subloop = subloop->next)
2484 store_motion_loop (subloop, sm_executed);
2485 bitmap_and_compl_into (sm_executed, sm_in_loop);
2486 BITMAP_FREE (sm_in_loop);
2487 }
2488
2489 /* Try to perform store motion for all memory references modified inside
2490 loops. */
2491
2492 static void
2493 store_motion (void)
2494 {
2495 struct loop *loop;
2496 bitmap sm_executed = BITMAP_ALLOC (NULL);
2497
2498 for (loop = current_loops->tree_root->inner; loop != NULL; loop = loop->next)
2499 store_motion_loop (loop, sm_executed);
2500
2501 BITMAP_FREE (sm_executed);
2502 gsi_commit_edge_inserts ();
2503 }
2504
2505 /* Fills ALWAYS_EXECUTED_IN information for basic blocks of LOOP, i.e.
2506 for each such basic block bb records the outermost loop for that execution
2507 of its header implies execution of bb. CONTAINS_CALL is the bitmap of
2508 blocks that contain a nonpure call. */
2509
2510 static void
2511 fill_always_executed_in (struct loop *loop, sbitmap contains_call)
2512 {
2513 basic_block bb = NULL, *bbs, last = NULL;
2514 unsigned i;
2515 edge e;
2516 struct loop *inn_loop = loop;
2517
2518 if (ALWAYS_EXECUTED_IN (loop->header) == NULL)
2519 {
2520 bbs = get_loop_body_in_dom_order (loop);
2521
2522 for (i = 0; i < loop->num_nodes; i++)
2523 {
2524 edge_iterator ei;
2525 bb = bbs[i];
2526
2527 if (dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2528 last = bb;
2529
2530 if (TEST_BIT (contains_call, bb->index))
2531 break;
2532
2533 FOR_EACH_EDGE (e, ei, bb->succs)
2534 if (!flow_bb_inside_loop_p (loop, e->dest))
2535 break;
2536 if (e)
2537 break;
2538
2539 /* A loop might be infinite (TODO use simple loop analysis
2540 to disprove this if possible). */
2541 if (bb->flags & BB_IRREDUCIBLE_LOOP)
2542 break;
2543
2544 if (!flow_bb_inside_loop_p (inn_loop, bb))
2545 break;
2546
2547 if (bb->loop_father->header == bb)
2548 {
2549 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2550 break;
2551
2552 /* In a loop that is always entered we may proceed anyway.
2553 But record that we entered it and stop once we leave it. */
2554 inn_loop = bb->loop_father;
2555 }
2556 }
2557
2558 while (1)
2559 {
2560 SET_ALWAYS_EXECUTED_IN (last, loop);
2561 if (last == loop->header)
2562 break;
2563 last = get_immediate_dominator (CDI_DOMINATORS, last);
2564 }
2565
2566 free (bbs);
2567 }
2568
2569 for (loop = loop->inner; loop; loop = loop->next)
2570 fill_always_executed_in (loop, contains_call);
2571 }
2572
2573 /* Compute the global information needed by the loop invariant motion pass. */
2574
2575 static void
2576 tree_ssa_lim_initialize (void)
2577 {
2578 sbitmap contains_call = sbitmap_alloc (last_basic_block);
2579 gimple_stmt_iterator bsi;
2580 struct loop *loop;
2581 basic_block bb;
2582
2583 sbitmap_zero (contains_call);
2584 FOR_EACH_BB (bb)
2585 {
2586 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
2587 {
2588 if (nonpure_call_p (gsi_stmt (bsi)))
2589 break;
2590 }
2591
2592 if (!gsi_end_p (bsi))
2593 SET_BIT (contains_call, bb->index);
2594 }
2595
2596 for (loop = current_loops->tree_root->inner; loop; loop = loop->next)
2597 fill_always_executed_in (loop, contains_call);
2598
2599 sbitmap_free (contains_call);
2600
2601 lim_aux_data_map = pointer_map_create ();
2602
2603 if (flag_tm)
2604 compute_transaction_bits ();
2605
2606 alloc_aux_for_edges (0);
2607 }
2608
2609 /* Cleans up after the invariant motion pass. */
2610
2611 static void
2612 tree_ssa_lim_finalize (void)
2613 {
2614 basic_block bb;
2615 unsigned i;
2616 bitmap b;
2617
2618 free_aux_for_edges ();
2619
2620 FOR_EACH_BB (bb)
2621 SET_ALWAYS_EXECUTED_IN (bb, NULL);
2622
2623 pointer_map_destroy (lim_aux_data_map);
2624
2625 VEC_free (mem_ref_p, heap, memory_accesses.refs_list);
2626 htab_delete (memory_accesses.refs);
2627
2628 FOR_EACH_VEC_ELT (bitmap, memory_accesses.refs_in_loop, i, b)
2629 BITMAP_FREE (b);
2630 VEC_free (bitmap, heap, memory_accesses.refs_in_loop);
2631
2632 FOR_EACH_VEC_ELT (bitmap, memory_accesses.all_refs_in_loop, i, b)
2633 BITMAP_FREE (b);
2634 VEC_free (bitmap, heap, memory_accesses.all_refs_in_loop);
2635
2636 FOR_EACH_VEC_ELT (bitmap, memory_accesses.all_refs_stored_in_loop, i, b)
2637 BITMAP_FREE (b);
2638 VEC_free (bitmap, heap, memory_accesses.all_refs_stored_in_loop);
2639
2640 if (memory_accesses.ttae_cache)
2641 pointer_map_destroy (memory_accesses.ttae_cache);
2642 }
2643
2644 /* Moves invariants from loops. Only "expensive" invariants are moved out --
2645 i.e. those that are likely to be win regardless of the register pressure. */
2646
2647 unsigned int
2648 tree_ssa_lim (void)
2649 {
2650 unsigned int todo;
2651
2652 tree_ssa_lim_initialize ();
2653
2654 /* Gathers information about memory accesses in the loops. */
2655 analyze_memory_references ();
2656
2657 /* For each statement determine the outermost loop in that it is
2658 invariant and cost for computing the invariant. */
2659 determine_invariantness ();
2660
2661 /* Execute store motion. Force the necessary invariants to be moved
2662 out of the loops as well. */
2663 store_motion ();
2664
2665 /* Move the expressions that are expensive enough. */
2666 todo = move_computations ();
2667
2668 tree_ssa_lim_finalize ();
2669
2670 return todo;
2671 }