]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-loop-im.c
Update copyright years.
[thirdparty/gcc.git] / gcc / tree-ssa-loop-im.c
1 /* Loop invariant motion.
2 Copyright (C) 2003-2020 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "tree.h"
25 #include "gimple.h"
26 #include "cfghooks.h"
27 #include "tree-pass.h"
28 #include "ssa.h"
29 #include "gimple-pretty-print.h"
30 #include "fold-const.h"
31 #include "cfganal.h"
32 #include "tree-eh.h"
33 #include "gimplify.h"
34 #include "gimple-iterator.h"
35 #include "tree-cfg.h"
36 #include "tree-ssa-loop-manip.h"
37 #include "tree-ssa-loop.h"
38 #include "tree-into-ssa.h"
39 #include "cfgloop.h"
40 #include "domwalk.h"
41 #include "tree-affine.h"
42 #include "tree-ssa-propagate.h"
43 #include "trans-mem.h"
44 #include "gimple-fold.h"
45 #include "tree-scalar-evolution.h"
46 #include "tree-ssa-loop-niter.h"
47 #include "alias.h"
48 #include "builtins.h"
49 #include "tree-dfa.h"
50
51 /* TODO: Support for predicated code motion. I.e.
52
53 while (1)
54 {
55 if (cond)
56 {
57 a = inv;
58 something;
59 }
60 }
61
62 Where COND and INV are invariants, but evaluating INV may trap or be
63 invalid from some other reason if !COND. This may be transformed to
64
65 if (cond)
66 a = inv;
67 while (1)
68 {
69 if (cond)
70 something;
71 } */
72
73 /* The auxiliary data kept for each statement. */
74
75 struct lim_aux_data
76 {
77 class loop *max_loop; /* The outermost loop in that the statement
78 is invariant. */
79
80 class loop *tgt_loop; /* The loop out of that we want to move the
81 invariant. */
82
83 class loop *always_executed_in;
84 /* The outermost loop for that we are sure
85 the statement is executed if the loop
86 is entered. */
87
88 unsigned cost; /* Cost of the computation performed by the
89 statement. */
90
91 unsigned ref; /* The simple_mem_ref in this stmt or 0. */
92
93 vec<gimple *> depends; /* Vector of statements that must be also
94 hoisted out of the loop when this statement
95 is hoisted; i.e. those that define the
96 operands of the statement and are inside of
97 the MAX_LOOP loop. */
98 };
99
100 /* Maps statements to their lim_aux_data. */
101
102 static hash_map<gimple *, lim_aux_data *> *lim_aux_data_map;
103
104 /* Description of a memory reference location. */
105
106 struct mem_ref_loc
107 {
108 tree *ref; /* The reference itself. */
109 gimple *stmt; /* The statement in that it occurs. */
110 };
111
112
113 /* Description of a memory reference. */
114
115 class im_mem_ref
116 {
117 public:
118 unsigned id : 30; /* ID assigned to the memory reference
119 (its index in memory_accesses.refs_list) */
120 unsigned ref_canonical : 1; /* Whether mem.ref was canonicalized. */
121 unsigned ref_decomposed : 1; /* Whether the ref was hashed from mem. */
122 hashval_t hash; /* Its hash value. */
123
124 /* The memory access itself and associated caching of alias-oracle
125 query meta-data. */
126 ao_ref mem;
127
128 bitmap stored; /* The set of loops in that this memory location
129 is stored to. */
130 vec<mem_ref_loc> accesses_in_loop;
131 /* The locations of the accesses. Vector
132 indexed by the loop number. */
133
134 /* The following sets are computed on demand. We keep both set and
135 its complement, so that we know whether the information was
136 already computed or not. */
137 bitmap_head indep_loop; /* The set of loops in that the memory
138 reference is independent, meaning:
139 If it is stored in the loop, this store
140 is independent on all other loads and
141 stores.
142 If it is only loaded, then it is independent
143 on all stores in the loop. */
144 bitmap_head dep_loop; /* The complement of INDEP_LOOP. */
145 };
146
147 /* We use two bits per loop in the ref->{in,}dep_loop bitmaps, the first
148 to record (in)dependence against stores in the loop and its subloops, the
149 second to record (in)dependence against all references in the loop
150 and its subloops. */
151 #define LOOP_DEP_BIT(loopnum, storedp) (2 * (loopnum) + (storedp ? 1 : 0))
152
153 /* Mem_ref hashtable helpers. */
154
155 struct mem_ref_hasher : nofree_ptr_hash <im_mem_ref>
156 {
157 typedef ao_ref *compare_type;
158 static inline hashval_t hash (const im_mem_ref *);
159 static inline bool equal (const im_mem_ref *, const ao_ref *);
160 };
161
162 /* A hash function for class im_mem_ref object OBJ. */
163
164 inline hashval_t
165 mem_ref_hasher::hash (const im_mem_ref *mem)
166 {
167 return mem->hash;
168 }
169
170 /* An equality function for class im_mem_ref object MEM1 with
171 memory reference OBJ2. */
172
173 inline bool
174 mem_ref_hasher::equal (const im_mem_ref *mem1, const ao_ref *obj2)
175 {
176 if (obj2->max_size_known_p ())
177 return (mem1->ref_decomposed
178 && operand_equal_p (mem1->mem.base, obj2->base, 0)
179 && known_eq (mem1->mem.offset, obj2->offset)
180 && known_eq (mem1->mem.size, obj2->size)
181 && known_eq (mem1->mem.max_size, obj2->max_size)
182 && mem1->mem.volatile_p == obj2->volatile_p
183 && (mem1->mem.ref_alias_set == obj2->ref_alias_set
184 /* We are not canonicalizing alias-sets but for the
185 special-case we didn't canonicalize yet and the
186 incoming ref is a alias-set zero MEM we pick
187 the correct one already. */
188 || (!mem1->ref_canonical
189 && (TREE_CODE (obj2->ref) == MEM_REF
190 || TREE_CODE (obj2->ref) == TARGET_MEM_REF)
191 && obj2->ref_alias_set == 0)
192 /* Likewise if there's a canonical ref with alias-set zero. */
193 || (mem1->ref_canonical && mem1->mem.ref_alias_set == 0))
194 && types_compatible_p (TREE_TYPE (mem1->mem.ref),
195 TREE_TYPE (obj2->ref)));
196 else
197 return operand_equal_p (mem1->mem.ref, obj2->ref, 0);
198 }
199
200
201 /* Description of memory accesses in loops. */
202
203 static struct
204 {
205 /* The hash table of memory references accessed in loops. */
206 hash_table<mem_ref_hasher> *refs;
207
208 /* The list of memory references. */
209 vec<im_mem_ref *> refs_list;
210
211 /* The set of memory references accessed in each loop. */
212 vec<bitmap_head> refs_in_loop;
213
214 /* The set of memory references stored in each loop. */
215 vec<bitmap_head> refs_stored_in_loop;
216
217 /* The set of memory references stored in each loop, including subloops . */
218 vec<bitmap_head> all_refs_stored_in_loop;
219
220 /* Cache for expanding memory addresses. */
221 hash_map<tree, name_expansion *> *ttae_cache;
222 } memory_accesses;
223
224 /* Obstack for the bitmaps in the above data structures. */
225 static bitmap_obstack lim_bitmap_obstack;
226 static obstack mem_ref_obstack;
227
228 static bool ref_indep_loop_p (class loop *, im_mem_ref *);
229 static bool ref_always_accessed_p (class loop *, im_mem_ref *, bool);
230
231 /* Minimum cost of an expensive expression. */
232 #define LIM_EXPENSIVE ((unsigned) param_lim_expensive)
233
234 /* The outermost loop for which execution of the header guarantees that the
235 block will be executed. */
236 #define ALWAYS_EXECUTED_IN(BB) ((class loop *) (BB)->aux)
237 #define SET_ALWAYS_EXECUTED_IN(BB, VAL) ((BB)->aux = (void *) (VAL))
238
239 /* ID of the shared unanalyzable mem. */
240 #define UNANALYZABLE_MEM_ID 0
241
242 /* Whether the reference was analyzable. */
243 #define MEM_ANALYZABLE(REF) ((REF)->id != UNANALYZABLE_MEM_ID)
244
245 static struct lim_aux_data *
246 init_lim_data (gimple *stmt)
247 {
248 lim_aux_data *p = XCNEW (struct lim_aux_data);
249 lim_aux_data_map->put (stmt, p);
250
251 return p;
252 }
253
254 static struct lim_aux_data *
255 get_lim_data (gimple *stmt)
256 {
257 lim_aux_data **p = lim_aux_data_map->get (stmt);
258 if (!p)
259 return NULL;
260
261 return *p;
262 }
263
264 /* Releases the memory occupied by DATA. */
265
266 static void
267 free_lim_aux_data (struct lim_aux_data *data)
268 {
269 data->depends.release ();
270 free (data);
271 }
272
273 static void
274 clear_lim_data (gimple *stmt)
275 {
276 lim_aux_data **p = lim_aux_data_map->get (stmt);
277 if (!p)
278 return;
279
280 free_lim_aux_data (*p);
281 *p = NULL;
282 }
283
284
285 /* The possibilities of statement movement. */
286 enum move_pos
287 {
288 MOVE_IMPOSSIBLE, /* No movement -- side effect expression. */
289 MOVE_PRESERVE_EXECUTION, /* Must not cause the non-executed statement
290 become executed -- memory accesses, ... */
291 MOVE_POSSIBLE /* Unlimited movement. */
292 };
293
294
295 /* If it is possible to hoist the statement STMT unconditionally,
296 returns MOVE_POSSIBLE.
297 If it is possible to hoist the statement STMT, but we must avoid making
298 it executed if it would not be executed in the original program (e.g.
299 because it may trap), return MOVE_PRESERVE_EXECUTION.
300 Otherwise return MOVE_IMPOSSIBLE. */
301
302 enum move_pos
303 movement_possibility (gimple *stmt)
304 {
305 tree lhs;
306 enum move_pos ret = MOVE_POSSIBLE;
307
308 if (flag_unswitch_loops
309 && gimple_code (stmt) == GIMPLE_COND)
310 {
311 /* If we perform unswitching, force the operands of the invariant
312 condition to be moved out of the loop. */
313 return MOVE_POSSIBLE;
314 }
315
316 if (gimple_code (stmt) == GIMPLE_PHI
317 && gimple_phi_num_args (stmt) <= 2
318 && !virtual_operand_p (gimple_phi_result (stmt))
319 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_phi_result (stmt)))
320 return MOVE_POSSIBLE;
321
322 if (gimple_get_lhs (stmt) == NULL_TREE)
323 return MOVE_IMPOSSIBLE;
324
325 if (gimple_vdef (stmt))
326 return MOVE_IMPOSSIBLE;
327
328 if (stmt_ends_bb_p (stmt)
329 || gimple_has_volatile_ops (stmt)
330 || gimple_has_side_effects (stmt)
331 || stmt_could_throw_p (cfun, stmt))
332 return MOVE_IMPOSSIBLE;
333
334 if (is_gimple_call (stmt))
335 {
336 /* While pure or const call is guaranteed to have no side effects, we
337 cannot move it arbitrarily. Consider code like
338
339 char *s = something ();
340
341 while (1)
342 {
343 if (s)
344 t = strlen (s);
345 else
346 t = 0;
347 }
348
349 Here the strlen call cannot be moved out of the loop, even though
350 s is invariant. In addition to possibly creating a call with
351 invalid arguments, moving out a function call that is not executed
352 may cause performance regressions in case the call is costly and
353 not executed at all. */
354 ret = MOVE_PRESERVE_EXECUTION;
355 lhs = gimple_call_lhs (stmt);
356 }
357 else if (is_gimple_assign (stmt))
358 lhs = gimple_assign_lhs (stmt);
359 else
360 return MOVE_IMPOSSIBLE;
361
362 if (TREE_CODE (lhs) == SSA_NAME
363 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
364 return MOVE_IMPOSSIBLE;
365
366 if (TREE_CODE (lhs) != SSA_NAME
367 || gimple_could_trap_p (stmt))
368 return MOVE_PRESERVE_EXECUTION;
369
370 /* Non local loads in a transaction cannot be hoisted out. Well,
371 unless the load happens on every path out of the loop, but we
372 don't take this into account yet. */
373 if (flag_tm
374 && gimple_in_transaction (stmt)
375 && gimple_assign_single_p (stmt))
376 {
377 tree rhs = gimple_assign_rhs1 (stmt);
378 if (DECL_P (rhs) && is_global_var (rhs))
379 {
380 if (dump_file)
381 {
382 fprintf (dump_file, "Cannot hoist conditional load of ");
383 print_generic_expr (dump_file, rhs, TDF_SLIM);
384 fprintf (dump_file, " because it is in a transaction.\n");
385 }
386 return MOVE_IMPOSSIBLE;
387 }
388 }
389
390 return ret;
391 }
392
393 /* Suppose that operand DEF is used inside the LOOP. Returns the outermost
394 loop to that we could move the expression using DEF if it did not have
395 other operands, i.e. the outermost loop enclosing LOOP in that the value
396 of DEF is invariant. */
397
398 static class loop *
399 outermost_invariant_loop (tree def, class loop *loop)
400 {
401 gimple *def_stmt;
402 basic_block def_bb;
403 class loop *max_loop;
404 struct lim_aux_data *lim_data;
405
406 if (!def)
407 return superloop_at_depth (loop, 1);
408
409 if (TREE_CODE (def) != SSA_NAME)
410 {
411 gcc_assert (is_gimple_min_invariant (def));
412 return superloop_at_depth (loop, 1);
413 }
414
415 def_stmt = SSA_NAME_DEF_STMT (def);
416 def_bb = gimple_bb (def_stmt);
417 if (!def_bb)
418 return superloop_at_depth (loop, 1);
419
420 max_loop = find_common_loop (loop, def_bb->loop_father);
421
422 lim_data = get_lim_data (def_stmt);
423 if (lim_data != NULL && lim_data->max_loop != NULL)
424 max_loop = find_common_loop (max_loop,
425 loop_outer (lim_data->max_loop));
426 if (max_loop == loop)
427 return NULL;
428 max_loop = superloop_at_depth (loop, loop_depth (max_loop) + 1);
429
430 return max_loop;
431 }
432
433 /* DATA is a structure containing information associated with a statement
434 inside LOOP. DEF is one of the operands of this statement.
435
436 Find the outermost loop enclosing LOOP in that value of DEF is invariant
437 and record this in DATA->max_loop field. If DEF itself is defined inside
438 this loop as well (i.e. we need to hoist it out of the loop if we want
439 to hoist the statement represented by DATA), record the statement in that
440 DEF is defined to the DATA->depends list. Additionally if ADD_COST is true,
441 add the cost of the computation of DEF to the DATA->cost.
442
443 If DEF is not invariant in LOOP, return false. Otherwise return TRUE. */
444
445 static bool
446 add_dependency (tree def, struct lim_aux_data *data, class loop *loop,
447 bool add_cost)
448 {
449 gimple *def_stmt = SSA_NAME_DEF_STMT (def);
450 basic_block def_bb = gimple_bb (def_stmt);
451 class loop *max_loop;
452 struct lim_aux_data *def_data;
453
454 if (!def_bb)
455 return true;
456
457 max_loop = outermost_invariant_loop (def, loop);
458 if (!max_loop)
459 return false;
460
461 if (flow_loop_nested_p (data->max_loop, max_loop))
462 data->max_loop = max_loop;
463
464 def_data = get_lim_data (def_stmt);
465 if (!def_data)
466 return true;
467
468 if (add_cost
469 /* Only add the cost if the statement defining DEF is inside LOOP,
470 i.e. if it is likely that by moving the invariants dependent
471 on it, we will be able to avoid creating a new register for
472 it (since it will be only used in these dependent invariants). */
473 && def_bb->loop_father == loop)
474 data->cost += def_data->cost;
475
476 data->depends.safe_push (def_stmt);
477
478 return true;
479 }
480
481 /* Returns an estimate for a cost of statement STMT. The values here
482 are just ad-hoc constants, similar to costs for inlining. */
483
484 static unsigned
485 stmt_cost (gimple *stmt)
486 {
487 /* Always try to create possibilities for unswitching. */
488 if (gimple_code (stmt) == GIMPLE_COND
489 || gimple_code (stmt) == GIMPLE_PHI)
490 return LIM_EXPENSIVE;
491
492 /* We should be hoisting calls if possible. */
493 if (is_gimple_call (stmt))
494 {
495 tree fndecl;
496
497 /* Unless the call is a builtin_constant_p; this always folds to a
498 constant, so moving it is useless. */
499 fndecl = gimple_call_fndecl (stmt);
500 if (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_CONSTANT_P))
501 return 0;
502
503 return LIM_EXPENSIVE;
504 }
505
506 /* Hoisting memory references out should almost surely be a win. */
507 if (gimple_references_memory_p (stmt))
508 return LIM_EXPENSIVE;
509
510 if (gimple_code (stmt) != GIMPLE_ASSIGN)
511 return 1;
512
513 switch (gimple_assign_rhs_code (stmt))
514 {
515 case MULT_EXPR:
516 case WIDEN_MULT_EXPR:
517 case WIDEN_MULT_PLUS_EXPR:
518 case WIDEN_MULT_MINUS_EXPR:
519 case DOT_PROD_EXPR:
520 case TRUNC_DIV_EXPR:
521 case CEIL_DIV_EXPR:
522 case FLOOR_DIV_EXPR:
523 case ROUND_DIV_EXPR:
524 case EXACT_DIV_EXPR:
525 case CEIL_MOD_EXPR:
526 case FLOOR_MOD_EXPR:
527 case ROUND_MOD_EXPR:
528 case TRUNC_MOD_EXPR:
529 case RDIV_EXPR:
530 /* Division and multiplication are usually expensive. */
531 return LIM_EXPENSIVE;
532
533 case LSHIFT_EXPR:
534 case RSHIFT_EXPR:
535 case WIDEN_LSHIFT_EXPR:
536 case LROTATE_EXPR:
537 case RROTATE_EXPR:
538 /* Shifts and rotates are usually expensive. */
539 return LIM_EXPENSIVE;
540
541 case CONSTRUCTOR:
542 /* Make vector construction cost proportional to the number
543 of elements. */
544 return CONSTRUCTOR_NELTS (gimple_assign_rhs1 (stmt));
545
546 case SSA_NAME:
547 case PAREN_EXPR:
548 /* Whether or not something is wrapped inside a PAREN_EXPR
549 should not change move cost. Nor should an intermediate
550 unpropagated SSA name copy. */
551 return 0;
552
553 default:
554 return 1;
555 }
556 }
557
558 /* Finds the outermost loop between OUTER and LOOP in that the memory reference
559 REF is independent. If REF is not independent in LOOP, NULL is returned
560 instead. */
561
562 static class loop *
563 outermost_indep_loop (class loop *outer, class loop *loop, im_mem_ref *ref)
564 {
565 class loop *aloop;
566
567 if (ref->stored && bitmap_bit_p (ref->stored, loop->num))
568 return NULL;
569
570 for (aloop = outer;
571 aloop != loop;
572 aloop = superloop_at_depth (loop, loop_depth (aloop) + 1))
573 if ((!ref->stored || !bitmap_bit_p (ref->stored, aloop->num))
574 && ref_indep_loop_p (aloop, ref))
575 return aloop;
576
577 if (ref_indep_loop_p (loop, ref))
578 return loop;
579 else
580 return NULL;
581 }
582
583 /* If there is a simple load or store to a memory reference in STMT, returns
584 the location of the memory reference, and sets IS_STORE according to whether
585 it is a store or load. Otherwise, returns NULL. */
586
587 static tree *
588 simple_mem_ref_in_stmt (gimple *stmt, bool *is_store)
589 {
590 tree *lhs, *rhs;
591
592 /* Recognize SSA_NAME = MEM and MEM = (SSA_NAME | invariant) patterns. */
593 if (!gimple_assign_single_p (stmt))
594 return NULL;
595
596 lhs = gimple_assign_lhs_ptr (stmt);
597 rhs = gimple_assign_rhs1_ptr (stmt);
598
599 if (TREE_CODE (*lhs) == SSA_NAME && gimple_vuse (stmt))
600 {
601 *is_store = false;
602 return rhs;
603 }
604 else if (gimple_vdef (stmt)
605 && (TREE_CODE (*rhs) == SSA_NAME || is_gimple_min_invariant (*rhs)))
606 {
607 *is_store = true;
608 return lhs;
609 }
610 else
611 return NULL;
612 }
613
614 /* From a controlling predicate in DOM determine the arguments from
615 the PHI node PHI that are chosen if the predicate evaluates to
616 true and false and store them to *TRUE_ARG_P and *FALSE_ARG_P if
617 they are non-NULL. Returns true if the arguments can be determined,
618 else return false. */
619
620 static bool
621 extract_true_false_args_from_phi (basic_block dom, gphi *phi,
622 tree *true_arg_p, tree *false_arg_p)
623 {
624 edge te, fe;
625 if (! extract_true_false_controlled_edges (dom, gimple_bb (phi),
626 &te, &fe))
627 return false;
628
629 if (true_arg_p)
630 *true_arg_p = PHI_ARG_DEF (phi, te->dest_idx);
631 if (false_arg_p)
632 *false_arg_p = PHI_ARG_DEF (phi, fe->dest_idx);
633
634 return true;
635 }
636
637 /* Determine the outermost loop to that it is possible to hoist a statement
638 STMT and store it to LIM_DATA (STMT)->max_loop. To do this we determine
639 the outermost loop in that the value computed by STMT is invariant.
640 If MUST_PRESERVE_EXEC is true, additionally choose such a loop that
641 we preserve the fact whether STMT is executed. It also fills other related
642 information to LIM_DATA (STMT).
643
644 The function returns false if STMT cannot be hoisted outside of the loop it
645 is defined in, and true otherwise. */
646
647 static bool
648 determine_max_movement (gimple *stmt, bool must_preserve_exec)
649 {
650 basic_block bb = gimple_bb (stmt);
651 class loop *loop = bb->loop_father;
652 class loop *level;
653 struct lim_aux_data *lim_data = get_lim_data (stmt);
654 tree val;
655 ssa_op_iter iter;
656
657 if (must_preserve_exec)
658 level = ALWAYS_EXECUTED_IN (bb);
659 else
660 level = superloop_at_depth (loop, 1);
661 lim_data->max_loop = level;
662
663 if (gphi *phi = dyn_cast <gphi *> (stmt))
664 {
665 use_operand_p use_p;
666 unsigned min_cost = UINT_MAX;
667 unsigned total_cost = 0;
668 struct lim_aux_data *def_data;
669
670 /* We will end up promoting dependencies to be unconditionally
671 evaluated. For this reason the PHI cost (and thus the
672 cost we remove from the loop by doing the invariant motion)
673 is that of the cheapest PHI argument dependency chain. */
674 FOR_EACH_PHI_ARG (use_p, phi, iter, SSA_OP_USE)
675 {
676 val = USE_FROM_PTR (use_p);
677
678 if (TREE_CODE (val) != SSA_NAME)
679 {
680 /* Assign const 1 to constants. */
681 min_cost = MIN (min_cost, 1);
682 total_cost += 1;
683 continue;
684 }
685 if (!add_dependency (val, lim_data, loop, false))
686 return false;
687
688 gimple *def_stmt = SSA_NAME_DEF_STMT (val);
689 if (gimple_bb (def_stmt)
690 && gimple_bb (def_stmt)->loop_father == loop)
691 {
692 def_data = get_lim_data (def_stmt);
693 if (def_data)
694 {
695 min_cost = MIN (min_cost, def_data->cost);
696 total_cost += def_data->cost;
697 }
698 }
699 }
700
701 min_cost = MIN (min_cost, total_cost);
702 lim_data->cost += min_cost;
703
704 if (gimple_phi_num_args (phi) > 1)
705 {
706 basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
707 gimple *cond;
708 if (gsi_end_p (gsi_last_bb (dom)))
709 return false;
710 cond = gsi_stmt (gsi_last_bb (dom));
711 if (gimple_code (cond) != GIMPLE_COND)
712 return false;
713 /* Verify that this is an extended form of a diamond and
714 the PHI arguments are completely controlled by the
715 predicate in DOM. */
716 if (!extract_true_false_args_from_phi (dom, phi, NULL, NULL))
717 return false;
718
719 /* Fold in dependencies and cost of the condition. */
720 FOR_EACH_SSA_TREE_OPERAND (val, cond, iter, SSA_OP_USE)
721 {
722 if (!add_dependency (val, lim_data, loop, false))
723 return false;
724 def_data = get_lim_data (SSA_NAME_DEF_STMT (val));
725 if (def_data)
726 lim_data->cost += def_data->cost;
727 }
728
729 /* We want to avoid unconditionally executing very expensive
730 operations. As costs for our dependencies cannot be
731 negative just claim we are not invariand for this case.
732 We also are not sure whether the control-flow inside the
733 loop will vanish. */
734 if (total_cost - min_cost >= 2 * LIM_EXPENSIVE
735 && !(min_cost != 0
736 && total_cost / min_cost <= 2))
737 return false;
738
739 /* Assume that the control-flow in the loop will vanish.
740 ??? We should verify this and not artificially increase
741 the cost if that is not the case. */
742 lim_data->cost += stmt_cost (stmt);
743 }
744
745 return true;
746 }
747 else
748 FOR_EACH_SSA_TREE_OPERAND (val, stmt, iter, SSA_OP_USE)
749 if (!add_dependency (val, lim_data, loop, true))
750 return false;
751
752 if (gimple_vuse (stmt))
753 {
754 im_mem_ref *ref
755 = lim_data ? memory_accesses.refs_list[lim_data->ref] : NULL;
756 if (ref
757 && MEM_ANALYZABLE (ref))
758 {
759 lim_data->max_loop = outermost_indep_loop (lim_data->max_loop,
760 loop, ref);
761 if (!lim_data->max_loop)
762 return false;
763 }
764 else if (! add_dependency (gimple_vuse (stmt), lim_data, loop, false))
765 return false;
766 }
767
768 lim_data->cost += stmt_cost (stmt);
769
770 return true;
771 }
772
773 /* Suppose that some statement in ORIG_LOOP is hoisted to the loop LEVEL,
774 and that one of the operands of this statement is computed by STMT.
775 Ensure that STMT (together with all the statements that define its
776 operands) is hoisted at least out of the loop LEVEL. */
777
778 static void
779 set_level (gimple *stmt, class loop *orig_loop, class loop *level)
780 {
781 class loop *stmt_loop = gimple_bb (stmt)->loop_father;
782 struct lim_aux_data *lim_data;
783 gimple *dep_stmt;
784 unsigned i;
785
786 stmt_loop = find_common_loop (orig_loop, stmt_loop);
787 lim_data = get_lim_data (stmt);
788 if (lim_data != NULL && lim_data->tgt_loop != NULL)
789 stmt_loop = find_common_loop (stmt_loop,
790 loop_outer (lim_data->tgt_loop));
791 if (flow_loop_nested_p (stmt_loop, level))
792 return;
793
794 gcc_assert (level == lim_data->max_loop
795 || flow_loop_nested_p (lim_data->max_loop, level));
796
797 lim_data->tgt_loop = level;
798 FOR_EACH_VEC_ELT (lim_data->depends, i, dep_stmt)
799 set_level (dep_stmt, orig_loop, level);
800 }
801
802 /* Determines an outermost loop from that we want to hoist the statement STMT.
803 For now we chose the outermost possible loop. TODO -- use profiling
804 information to set it more sanely. */
805
806 static void
807 set_profitable_level (gimple *stmt)
808 {
809 set_level (stmt, gimple_bb (stmt)->loop_father, get_lim_data (stmt)->max_loop);
810 }
811
812 /* Returns true if STMT is a call that has side effects. */
813
814 static bool
815 nonpure_call_p (gimple *stmt)
816 {
817 if (gimple_code (stmt) != GIMPLE_CALL)
818 return false;
819
820 return gimple_has_side_effects (stmt);
821 }
822
823 /* Rewrite a/b to a*(1/b). Return the invariant stmt to process. */
824
825 static gimple *
826 rewrite_reciprocal (gimple_stmt_iterator *bsi)
827 {
828 gassign *stmt, *stmt1, *stmt2;
829 tree name, lhs, type;
830 tree real_one;
831 gimple_stmt_iterator gsi;
832
833 stmt = as_a <gassign *> (gsi_stmt (*bsi));
834 lhs = gimple_assign_lhs (stmt);
835 type = TREE_TYPE (lhs);
836
837 real_one = build_one_cst (type);
838
839 name = make_temp_ssa_name (type, NULL, "reciptmp");
840 stmt1 = gimple_build_assign (name, RDIV_EXPR, real_one,
841 gimple_assign_rhs2 (stmt));
842 stmt2 = gimple_build_assign (lhs, MULT_EXPR, name,
843 gimple_assign_rhs1 (stmt));
844
845 /* Replace division stmt with reciprocal and multiply stmts.
846 The multiply stmt is not invariant, so update iterator
847 and avoid rescanning. */
848 gsi = *bsi;
849 gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
850 gsi_replace (&gsi, stmt2, true);
851
852 /* Continue processing with invariant reciprocal statement. */
853 return stmt1;
854 }
855
856 /* Check if the pattern at *BSI is a bittest of the form
857 (A >> B) & 1 != 0 and in this case rewrite it to A & (1 << B) != 0. */
858
859 static gimple *
860 rewrite_bittest (gimple_stmt_iterator *bsi)
861 {
862 gassign *stmt;
863 gimple *stmt1;
864 gassign *stmt2;
865 gimple *use_stmt;
866 gcond *cond_stmt;
867 tree lhs, name, t, a, b;
868 use_operand_p use;
869
870 stmt = as_a <gassign *> (gsi_stmt (*bsi));
871 lhs = gimple_assign_lhs (stmt);
872
873 /* Verify that the single use of lhs is a comparison against zero. */
874 if (TREE_CODE (lhs) != SSA_NAME
875 || !single_imm_use (lhs, &use, &use_stmt))
876 return stmt;
877 cond_stmt = dyn_cast <gcond *> (use_stmt);
878 if (!cond_stmt)
879 return stmt;
880 if (gimple_cond_lhs (cond_stmt) != lhs
881 || (gimple_cond_code (cond_stmt) != NE_EXPR
882 && gimple_cond_code (cond_stmt) != EQ_EXPR)
883 || !integer_zerop (gimple_cond_rhs (cond_stmt)))
884 return stmt;
885
886 /* Get at the operands of the shift. The rhs is TMP1 & 1. */
887 stmt1 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
888 if (gimple_code (stmt1) != GIMPLE_ASSIGN)
889 return stmt;
890
891 /* There is a conversion in between possibly inserted by fold. */
892 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt1)))
893 {
894 t = gimple_assign_rhs1 (stmt1);
895 if (TREE_CODE (t) != SSA_NAME
896 || !has_single_use (t))
897 return stmt;
898 stmt1 = SSA_NAME_DEF_STMT (t);
899 if (gimple_code (stmt1) != GIMPLE_ASSIGN)
900 return stmt;
901 }
902
903 /* Verify that B is loop invariant but A is not. Verify that with
904 all the stmt walking we are still in the same loop. */
905 if (gimple_assign_rhs_code (stmt1) != RSHIFT_EXPR
906 || loop_containing_stmt (stmt1) != loop_containing_stmt (stmt))
907 return stmt;
908
909 a = gimple_assign_rhs1 (stmt1);
910 b = gimple_assign_rhs2 (stmt1);
911
912 if (outermost_invariant_loop (b, loop_containing_stmt (stmt1)) != NULL
913 && outermost_invariant_loop (a, loop_containing_stmt (stmt1)) == NULL)
914 {
915 gimple_stmt_iterator rsi;
916
917 /* 1 << B */
918 t = fold_build2 (LSHIFT_EXPR, TREE_TYPE (a),
919 build_int_cst (TREE_TYPE (a), 1), b);
920 name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp");
921 stmt1 = gimple_build_assign (name, t);
922
923 /* A & (1 << B) */
924 t = fold_build2 (BIT_AND_EXPR, TREE_TYPE (a), a, name);
925 name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp");
926 stmt2 = gimple_build_assign (name, t);
927
928 /* Replace the SSA_NAME we compare against zero. Adjust
929 the type of zero accordingly. */
930 SET_USE (use, name);
931 gimple_cond_set_rhs (cond_stmt,
932 build_int_cst_type (TREE_TYPE (name),
933 0));
934
935 /* Don't use gsi_replace here, none of the new assignments sets
936 the variable originally set in stmt. Move bsi to stmt1, and
937 then remove the original stmt, so that we get a chance to
938 retain debug info for it. */
939 rsi = *bsi;
940 gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
941 gsi_insert_before (&rsi, stmt2, GSI_SAME_STMT);
942 gimple *to_release = gsi_stmt (rsi);
943 gsi_remove (&rsi, true);
944 release_defs (to_release);
945
946 return stmt1;
947 }
948
949 return stmt;
950 }
951
952 /* For each statement determines the outermost loop in that it is invariant,
953 - statements on whose motion it depends and the cost of the computation.
954 - This information is stored to the LIM_DATA structure associated with
955 - each statement. */
956 class invariantness_dom_walker : public dom_walker
957 {
958 public:
959 invariantness_dom_walker (cdi_direction direction)
960 : dom_walker (direction) {}
961
962 virtual edge before_dom_children (basic_block);
963 };
964
965 /* Determine the outermost loops in that statements in basic block BB are
966 invariant, and record them to the LIM_DATA associated with the statements.
967 Callback for dom_walker. */
968
969 edge
970 invariantness_dom_walker::before_dom_children (basic_block bb)
971 {
972 enum move_pos pos;
973 gimple_stmt_iterator bsi;
974 gimple *stmt;
975 bool maybe_never = ALWAYS_EXECUTED_IN (bb) == NULL;
976 class loop *outermost = ALWAYS_EXECUTED_IN (bb);
977 struct lim_aux_data *lim_data;
978
979 if (!loop_outer (bb->loop_father))
980 return NULL;
981
982 if (dump_file && (dump_flags & TDF_DETAILS))
983 fprintf (dump_file, "Basic block %d (loop %d -- depth %d):\n\n",
984 bb->index, bb->loop_father->num, loop_depth (bb->loop_father));
985
986 /* Look at PHI nodes, but only if there is at most two.
987 ??? We could relax this further by post-processing the inserted
988 code and transforming adjacent cond-exprs with the same predicate
989 to control flow again. */
990 bsi = gsi_start_phis (bb);
991 if (!gsi_end_p (bsi)
992 && ((gsi_next (&bsi), gsi_end_p (bsi))
993 || (gsi_next (&bsi), gsi_end_p (bsi))))
994 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
995 {
996 stmt = gsi_stmt (bsi);
997
998 pos = movement_possibility (stmt);
999 if (pos == MOVE_IMPOSSIBLE)
1000 continue;
1001
1002 lim_data = get_lim_data (stmt);
1003 if (! lim_data)
1004 lim_data = init_lim_data (stmt);
1005 lim_data->always_executed_in = outermost;
1006
1007 if (!determine_max_movement (stmt, false))
1008 {
1009 lim_data->max_loop = NULL;
1010 continue;
1011 }
1012
1013 if (dump_file && (dump_flags & TDF_DETAILS))
1014 {
1015 print_gimple_stmt (dump_file, stmt, 2);
1016 fprintf (dump_file, " invariant up to level %d, cost %d.\n\n",
1017 loop_depth (lim_data->max_loop),
1018 lim_data->cost);
1019 }
1020
1021 if (lim_data->cost >= LIM_EXPENSIVE)
1022 set_profitable_level (stmt);
1023 }
1024
1025 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1026 {
1027 stmt = gsi_stmt (bsi);
1028
1029 pos = movement_possibility (stmt);
1030 if (pos == MOVE_IMPOSSIBLE)
1031 {
1032 if (nonpure_call_p (stmt))
1033 {
1034 maybe_never = true;
1035 outermost = NULL;
1036 }
1037 /* Make sure to note always_executed_in for stores to make
1038 store-motion work. */
1039 else if (stmt_makes_single_store (stmt))
1040 {
1041 struct lim_aux_data *lim_data = get_lim_data (stmt);
1042 if (! lim_data)
1043 lim_data = init_lim_data (stmt);
1044 lim_data->always_executed_in = outermost;
1045 }
1046 continue;
1047 }
1048
1049 if (is_gimple_assign (stmt)
1050 && (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
1051 == GIMPLE_BINARY_RHS))
1052 {
1053 tree op0 = gimple_assign_rhs1 (stmt);
1054 tree op1 = gimple_assign_rhs2 (stmt);
1055 class loop *ol1 = outermost_invariant_loop (op1,
1056 loop_containing_stmt (stmt));
1057
1058 /* If divisor is invariant, convert a/b to a*(1/b), allowing reciprocal
1059 to be hoisted out of loop, saving expensive divide. */
1060 if (pos == MOVE_POSSIBLE
1061 && gimple_assign_rhs_code (stmt) == RDIV_EXPR
1062 && flag_unsafe_math_optimizations
1063 && !flag_trapping_math
1064 && ol1 != NULL
1065 && outermost_invariant_loop (op0, ol1) == NULL)
1066 stmt = rewrite_reciprocal (&bsi);
1067
1068 /* If the shift count is invariant, convert (A >> B) & 1 to
1069 A & (1 << B) allowing the bit mask to be hoisted out of the loop
1070 saving an expensive shift. */
1071 if (pos == MOVE_POSSIBLE
1072 && gimple_assign_rhs_code (stmt) == BIT_AND_EXPR
1073 && integer_onep (op1)
1074 && TREE_CODE (op0) == SSA_NAME
1075 && has_single_use (op0))
1076 stmt = rewrite_bittest (&bsi);
1077 }
1078
1079 lim_data = get_lim_data (stmt);
1080 if (! lim_data)
1081 lim_data = init_lim_data (stmt);
1082 lim_data->always_executed_in = outermost;
1083
1084 if (maybe_never && pos == MOVE_PRESERVE_EXECUTION)
1085 continue;
1086
1087 if (!determine_max_movement (stmt, pos == MOVE_PRESERVE_EXECUTION))
1088 {
1089 lim_data->max_loop = NULL;
1090 continue;
1091 }
1092
1093 if (dump_file && (dump_flags & TDF_DETAILS))
1094 {
1095 print_gimple_stmt (dump_file, stmt, 2);
1096 fprintf (dump_file, " invariant up to level %d, cost %d.\n\n",
1097 loop_depth (lim_data->max_loop),
1098 lim_data->cost);
1099 }
1100
1101 if (lim_data->cost >= LIM_EXPENSIVE)
1102 set_profitable_level (stmt);
1103 }
1104 return NULL;
1105 }
1106
1107 /* Hoist the statements in basic block BB out of the loops prescribed by
1108 data stored in LIM_DATA structures associated with each statement. Callback
1109 for walk_dominator_tree. */
1110
1111 unsigned int
1112 move_computations_worker (basic_block bb)
1113 {
1114 class loop *level;
1115 unsigned cost = 0;
1116 struct lim_aux_data *lim_data;
1117 unsigned int todo = 0;
1118
1119 if (!loop_outer (bb->loop_father))
1120 return todo;
1121
1122 for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi); )
1123 {
1124 gassign *new_stmt;
1125 gphi *stmt = bsi.phi ();
1126
1127 lim_data = get_lim_data (stmt);
1128 if (lim_data == NULL)
1129 {
1130 gsi_next (&bsi);
1131 continue;
1132 }
1133
1134 cost = lim_data->cost;
1135 level = lim_data->tgt_loop;
1136 clear_lim_data (stmt);
1137
1138 if (!level)
1139 {
1140 gsi_next (&bsi);
1141 continue;
1142 }
1143
1144 if (dump_file && (dump_flags & TDF_DETAILS))
1145 {
1146 fprintf (dump_file, "Moving PHI node\n");
1147 print_gimple_stmt (dump_file, stmt, 0);
1148 fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1149 cost, level->num);
1150 }
1151
1152 if (gimple_phi_num_args (stmt) == 1)
1153 {
1154 tree arg = PHI_ARG_DEF (stmt, 0);
1155 new_stmt = gimple_build_assign (gimple_phi_result (stmt),
1156 TREE_CODE (arg), arg);
1157 }
1158 else
1159 {
1160 basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
1161 gimple *cond = gsi_stmt (gsi_last_bb (dom));
1162 tree arg0 = NULL_TREE, arg1 = NULL_TREE, t;
1163 /* Get the PHI arguments corresponding to the true and false
1164 edges of COND. */
1165 extract_true_false_args_from_phi (dom, stmt, &arg0, &arg1);
1166 gcc_assert (arg0 && arg1);
1167 t = build2 (gimple_cond_code (cond), boolean_type_node,
1168 gimple_cond_lhs (cond), gimple_cond_rhs (cond));
1169 new_stmt = gimple_build_assign (gimple_phi_result (stmt),
1170 COND_EXPR, t, arg0, arg1);
1171 todo |= TODO_cleanup_cfg;
1172 }
1173 if (INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (new_stmt)))
1174 && (!ALWAYS_EXECUTED_IN (bb)
1175 || (ALWAYS_EXECUTED_IN (bb) != level
1176 && !flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
1177 {
1178 tree lhs = gimple_assign_lhs (new_stmt);
1179 SSA_NAME_RANGE_INFO (lhs) = NULL;
1180 }
1181 gsi_insert_on_edge (loop_preheader_edge (level), new_stmt);
1182 remove_phi_node (&bsi, false);
1183 }
1184
1185 for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi); )
1186 {
1187 edge e;
1188
1189 gimple *stmt = gsi_stmt (bsi);
1190
1191 lim_data = get_lim_data (stmt);
1192 if (lim_data == NULL)
1193 {
1194 gsi_next (&bsi);
1195 continue;
1196 }
1197
1198 cost = lim_data->cost;
1199 level = lim_data->tgt_loop;
1200 clear_lim_data (stmt);
1201
1202 if (!level)
1203 {
1204 gsi_next (&bsi);
1205 continue;
1206 }
1207
1208 /* We do not really want to move conditionals out of the loop; we just
1209 placed it here to force its operands to be moved if necessary. */
1210 if (gimple_code (stmt) == GIMPLE_COND)
1211 continue;
1212
1213 if (dump_file && (dump_flags & TDF_DETAILS))
1214 {
1215 fprintf (dump_file, "Moving statement\n");
1216 print_gimple_stmt (dump_file, stmt, 0);
1217 fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1218 cost, level->num);
1219 }
1220
1221 e = loop_preheader_edge (level);
1222 gcc_assert (!gimple_vdef (stmt));
1223 if (gimple_vuse (stmt))
1224 {
1225 /* The new VUSE is the one from the virtual PHI in the loop
1226 header or the one already present. */
1227 gphi_iterator gsi2;
1228 for (gsi2 = gsi_start_phis (e->dest);
1229 !gsi_end_p (gsi2); gsi_next (&gsi2))
1230 {
1231 gphi *phi = gsi2.phi ();
1232 if (virtual_operand_p (gimple_phi_result (phi)))
1233 {
1234 gimple_set_vuse (stmt, PHI_ARG_DEF_FROM_EDGE (phi, e));
1235 break;
1236 }
1237 }
1238 }
1239 gsi_remove (&bsi, false);
1240 if (gimple_has_lhs (stmt)
1241 && TREE_CODE (gimple_get_lhs (stmt)) == SSA_NAME
1242 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_get_lhs (stmt)))
1243 && (!ALWAYS_EXECUTED_IN (bb)
1244 || !(ALWAYS_EXECUTED_IN (bb) == level
1245 || flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
1246 {
1247 tree lhs = gimple_get_lhs (stmt);
1248 SSA_NAME_RANGE_INFO (lhs) = NULL;
1249 }
1250 /* In case this is a stmt that is not unconditionally executed
1251 when the target loop header is executed and the stmt may
1252 invoke undefined integer or pointer overflow rewrite it to
1253 unsigned arithmetic. */
1254 if (is_gimple_assign (stmt)
1255 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (stmt)))
1256 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (gimple_assign_lhs (stmt)))
1257 && arith_code_with_undefined_signed_overflow
1258 (gimple_assign_rhs_code (stmt))
1259 && (!ALWAYS_EXECUTED_IN (bb)
1260 || !(ALWAYS_EXECUTED_IN (bb) == level
1261 || flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
1262 gsi_insert_seq_on_edge (e, rewrite_to_defined_overflow (stmt));
1263 else
1264 gsi_insert_on_edge (e, stmt);
1265 }
1266
1267 return todo;
1268 }
1269
1270 /* Hoist the statements out of the loops prescribed by data stored in
1271 LIM_DATA structures associated with each statement.*/
1272
1273 static unsigned int
1274 move_computations (void)
1275 {
1276 int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
1277 int n = pre_and_rev_post_order_compute_fn (cfun, NULL, rpo, false);
1278 unsigned todo = 0;
1279
1280 for (int i = 0; i < n; ++i)
1281 todo |= move_computations_worker (BASIC_BLOCK_FOR_FN (cfun, rpo[i]));
1282
1283 free (rpo);
1284
1285 gsi_commit_edge_inserts ();
1286 if (need_ssa_update_p (cfun))
1287 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
1288
1289 return todo;
1290 }
1291
1292 /* Checks whether the statement defining variable *INDEX can be hoisted
1293 out of the loop passed in DATA. Callback for for_each_index. */
1294
1295 static bool
1296 may_move_till (tree ref, tree *index, void *data)
1297 {
1298 class loop *loop = (class loop *) data, *max_loop;
1299
1300 /* If REF is an array reference, check also that the step and the lower
1301 bound is invariant in LOOP. */
1302 if (TREE_CODE (ref) == ARRAY_REF)
1303 {
1304 tree step = TREE_OPERAND (ref, 3);
1305 tree lbound = TREE_OPERAND (ref, 2);
1306
1307 max_loop = outermost_invariant_loop (step, loop);
1308 if (!max_loop)
1309 return false;
1310
1311 max_loop = outermost_invariant_loop (lbound, loop);
1312 if (!max_loop)
1313 return false;
1314 }
1315
1316 max_loop = outermost_invariant_loop (*index, loop);
1317 if (!max_loop)
1318 return false;
1319
1320 return true;
1321 }
1322
1323 /* If OP is SSA NAME, force the statement that defines it to be
1324 moved out of the LOOP. ORIG_LOOP is the loop in that EXPR is used. */
1325
1326 static void
1327 force_move_till_op (tree op, class loop *orig_loop, class loop *loop)
1328 {
1329 gimple *stmt;
1330
1331 if (!op
1332 || is_gimple_min_invariant (op))
1333 return;
1334
1335 gcc_assert (TREE_CODE (op) == SSA_NAME);
1336
1337 stmt = SSA_NAME_DEF_STMT (op);
1338 if (gimple_nop_p (stmt))
1339 return;
1340
1341 set_level (stmt, orig_loop, loop);
1342 }
1343
1344 /* Forces statement defining invariants in REF (and *INDEX) to be moved out of
1345 the LOOP. The reference REF is used in the loop ORIG_LOOP. Callback for
1346 for_each_index. */
1347
1348 struct fmt_data
1349 {
1350 class loop *loop;
1351 class loop *orig_loop;
1352 };
1353
1354 static bool
1355 force_move_till (tree ref, tree *index, void *data)
1356 {
1357 struct fmt_data *fmt_data = (struct fmt_data *) data;
1358
1359 if (TREE_CODE (ref) == ARRAY_REF)
1360 {
1361 tree step = TREE_OPERAND (ref, 3);
1362 tree lbound = TREE_OPERAND (ref, 2);
1363
1364 force_move_till_op (step, fmt_data->orig_loop, fmt_data->loop);
1365 force_move_till_op (lbound, fmt_data->orig_loop, fmt_data->loop);
1366 }
1367
1368 force_move_till_op (*index, fmt_data->orig_loop, fmt_data->loop);
1369
1370 return true;
1371 }
1372
1373 /* A function to free the mem_ref object OBJ. */
1374
1375 static void
1376 memref_free (class im_mem_ref *mem)
1377 {
1378 mem->accesses_in_loop.release ();
1379 }
1380
1381 /* Allocates and returns a memory reference description for MEM whose hash
1382 value is HASH and id is ID. */
1383
1384 static im_mem_ref *
1385 mem_ref_alloc (ao_ref *mem, unsigned hash, unsigned id)
1386 {
1387 im_mem_ref *ref = XOBNEW (&mem_ref_obstack, class im_mem_ref);
1388 if (mem)
1389 ref->mem = *mem;
1390 else
1391 ao_ref_init (&ref->mem, error_mark_node);
1392 ref->id = id;
1393 ref->ref_canonical = false;
1394 ref->ref_decomposed = false;
1395 ref->hash = hash;
1396 ref->stored = NULL;
1397 bitmap_initialize (&ref->indep_loop, &lim_bitmap_obstack);
1398 bitmap_initialize (&ref->dep_loop, &lim_bitmap_obstack);
1399 ref->accesses_in_loop.create (1);
1400
1401 return ref;
1402 }
1403
1404 /* Records memory reference location *LOC in LOOP to the memory reference
1405 description REF. The reference occurs in statement STMT. */
1406
1407 static void
1408 record_mem_ref_loc (im_mem_ref *ref, gimple *stmt, tree *loc)
1409 {
1410 mem_ref_loc aref;
1411 aref.stmt = stmt;
1412 aref.ref = loc;
1413 ref->accesses_in_loop.safe_push (aref);
1414 }
1415
1416 /* Set the LOOP bit in REF stored bitmap and allocate that if
1417 necessary. Return whether a bit was changed. */
1418
1419 static bool
1420 set_ref_stored_in_loop (im_mem_ref *ref, class loop *loop)
1421 {
1422 if (!ref->stored)
1423 ref->stored = BITMAP_ALLOC (&lim_bitmap_obstack);
1424 return bitmap_set_bit (ref->stored, loop->num);
1425 }
1426
1427 /* Marks reference REF as stored in LOOP. */
1428
1429 static void
1430 mark_ref_stored (im_mem_ref *ref, class loop *loop)
1431 {
1432 while (loop != current_loops->tree_root
1433 && set_ref_stored_in_loop (ref, loop))
1434 loop = loop_outer (loop);
1435 }
1436
1437 /* Gathers memory references in statement STMT in LOOP, storing the
1438 information about them in the memory_accesses structure. Marks
1439 the vops accessed through unrecognized statements there as
1440 well. */
1441
1442 static void
1443 gather_mem_refs_stmt (class loop *loop, gimple *stmt)
1444 {
1445 tree *mem = NULL;
1446 hashval_t hash;
1447 im_mem_ref **slot;
1448 im_mem_ref *ref;
1449 bool is_stored;
1450 unsigned id;
1451
1452 if (!gimple_vuse (stmt))
1453 return;
1454
1455 mem = simple_mem_ref_in_stmt (stmt, &is_stored);
1456 if (!mem)
1457 {
1458 /* We use the shared mem_ref for all unanalyzable refs. */
1459 id = UNANALYZABLE_MEM_ID;
1460 ref = memory_accesses.refs_list[id];
1461 if (dump_file && (dump_flags & TDF_DETAILS))
1462 {
1463 fprintf (dump_file, "Unanalyzed memory reference %u: ", id);
1464 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1465 }
1466 is_stored = gimple_vdef (stmt);
1467 }
1468 else
1469 {
1470 /* We are looking for equal refs that might differ in structure
1471 such as a.b vs. MEM[&a + 4]. So we key off the ao_ref but
1472 make sure we can canonicalize the ref in the hashtable if
1473 non-operand_equal_p refs are found. For the lookup we mark
1474 the case we want strict equality with aor.max_size == -1. */
1475 ao_ref aor;
1476 ao_ref_init (&aor, *mem);
1477 ao_ref_base (&aor);
1478 ao_ref_alias_set (&aor);
1479 HOST_WIDE_INT offset, size, max_size;
1480 poly_int64 saved_maxsize = aor.max_size, mem_off;
1481 tree mem_base;
1482 bool ref_decomposed;
1483 if (aor.max_size_known_p ()
1484 && aor.offset.is_constant (&offset)
1485 && aor.size.is_constant (&size)
1486 && aor.max_size.is_constant (&max_size)
1487 && size == max_size
1488 && (size % BITS_PER_UNIT) == 0
1489 /* We're canonicalizing to a MEM where TYPE_SIZE specifies the
1490 size. Make sure this is consistent with the extraction. */
1491 && poly_int_tree_p (TYPE_SIZE (TREE_TYPE (*mem)))
1492 && known_eq (wi::to_poly_offset (TYPE_SIZE (TREE_TYPE (*mem))),
1493 aor.size)
1494 && (mem_base = get_addr_base_and_unit_offset (aor.ref, &mem_off)))
1495 {
1496 ref_decomposed = true;
1497 hash = iterative_hash_expr (ao_ref_base (&aor), 0);
1498 hash = iterative_hash_host_wide_int (offset, hash);
1499 hash = iterative_hash_host_wide_int (size, hash);
1500 }
1501 else
1502 {
1503 ref_decomposed = false;
1504 hash = iterative_hash_expr (aor.ref, 0);
1505 aor.max_size = -1;
1506 }
1507 slot = memory_accesses.refs->find_slot_with_hash (&aor, hash, INSERT);
1508 aor.max_size = saved_maxsize;
1509 if (*slot)
1510 {
1511 if (!(*slot)->ref_canonical
1512 && !operand_equal_p (*mem, (*slot)->mem.ref, 0))
1513 {
1514 /* If we didn't yet canonicalize the hashtable ref (which
1515 we'll end up using for code insertion) and hit a second
1516 equal ref that is not structurally equivalent create
1517 a canonical ref which is a bare MEM_REF. */
1518 if (TREE_CODE (*mem) == MEM_REF
1519 || TREE_CODE (*mem) == TARGET_MEM_REF)
1520 {
1521 (*slot)->mem.ref = *mem;
1522 (*slot)->mem.base_alias_set = ao_ref_base_alias_set (&aor);
1523 }
1524 else
1525 {
1526 tree ref_alias_type = reference_alias_ptr_type (*mem);
1527 unsigned int ref_align = get_object_alignment (*mem);
1528 tree ref_type = TREE_TYPE (*mem);
1529 tree tmp = build_fold_addr_expr (unshare_expr (mem_base));
1530 if (TYPE_ALIGN (ref_type) != ref_align)
1531 ref_type = build_aligned_type (ref_type, ref_align);
1532 (*slot)->mem.ref
1533 = fold_build2 (MEM_REF, ref_type, tmp,
1534 build_int_cst (ref_alias_type, mem_off));
1535 if ((*slot)->mem.volatile_p)
1536 TREE_THIS_VOLATILE ((*slot)->mem.ref) = 1;
1537 gcc_checking_assert (TREE_CODE ((*slot)->mem.ref) == MEM_REF
1538 && is_gimple_mem_ref_addr
1539 (TREE_OPERAND ((*slot)->mem.ref,
1540 0)));
1541 (*slot)->mem.base_alias_set = (*slot)->mem.ref_alias_set;
1542 }
1543 (*slot)->ref_canonical = true;
1544 }
1545 ref = *slot;
1546 id = ref->id;
1547 }
1548 else
1549 {
1550 id = memory_accesses.refs_list.length ();
1551 ref = mem_ref_alloc (&aor, hash, id);
1552 ref->ref_decomposed = ref_decomposed;
1553 memory_accesses.refs_list.safe_push (ref);
1554 *slot = ref;
1555
1556 if (dump_file && (dump_flags & TDF_DETAILS))
1557 {
1558 fprintf (dump_file, "Memory reference %u: ", id);
1559 print_generic_expr (dump_file, ref->mem.ref, TDF_SLIM);
1560 fprintf (dump_file, "\n");
1561 }
1562 }
1563
1564 record_mem_ref_loc (ref, stmt, mem);
1565 }
1566 bitmap_set_bit (&memory_accesses.refs_in_loop[loop->num], ref->id);
1567 if (is_stored)
1568 {
1569 bitmap_set_bit (&memory_accesses.refs_stored_in_loop[loop->num], ref->id);
1570 mark_ref_stored (ref, loop);
1571 }
1572 init_lim_data (stmt)->ref = ref->id;
1573 return;
1574 }
1575
1576 static unsigned *bb_loop_postorder;
1577
1578 /* qsort sort function to sort blocks after their loop fathers postorder. */
1579
1580 static int
1581 sort_bbs_in_loop_postorder_cmp (const void *bb1_, const void *bb2_,
1582 void *bb_loop_postorder_)
1583 {
1584 unsigned *bb_loop_postorder = (unsigned *)bb_loop_postorder_;
1585 basic_block bb1 = *(const basic_block *)bb1_;
1586 basic_block bb2 = *(const basic_block *)bb2_;
1587 class loop *loop1 = bb1->loop_father;
1588 class loop *loop2 = bb2->loop_father;
1589 if (loop1->num == loop2->num)
1590 return bb1->index - bb2->index;
1591 return bb_loop_postorder[loop1->num] < bb_loop_postorder[loop2->num] ? -1 : 1;
1592 }
1593
1594 /* qsort sort function to sort ref locs after their loop fathers postorder. */
1595
1596 static int
1597 sort_locs_in_loop_postorder_cmp (const void *loc1_, const void *loc2_,
1598 void *bb_loop_postorder_)
1599 {
1600 unsigned *bb_loop_postorder = (unsigned *)bb_loop_postorder_;
1601 const mem_ref_loc *loc1 = (const mem_ref_loc *)loc1_;
1602 const mem_ref_loc *loc2 = (const mem_ref_loc *)loc2_;
1603 class loop *loop1 = gimple_bb (loc1->stmt)->loop_father;
1604 class loop *loop2 = gimple_bb (loc2->stmt)->loop_father;
1605 if (loop1->num == loop2->num)
1606 return 0;
1607 return bb_loop_postorder[loop1->num] < bb_loop_postorder[loop2->num] ? -1 : 1;
1608 }
1609
1610 /* Gathers memory references in loops. */
1611
1612 static void
1613 analyze_memory_references (void)
1614 {
1615 gimple_stmt_iterator bsi;
1616 basic_block bb, *bbs;
1617 class loop *loop, *outer;
1618 unsigned i, n;
1619
1620 /* Collect all basic-blocks in loops and sort them after their
1621 loops postorder. */
1622 i = 0;
1623 bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS);
1624 FOR_EACH_BB_FN (bb, cfun)
1625 if (bb->loop_father != current_loops->tree_root)
1626 bbs[i++] = bb;
1627 n = i;
1628 gcc_sort_r (bbs, n, sizeof (basic_block), sort_bbs_in_loop_postorder_cmp,
1629 bb_loop_postorder);
1630
1631 /* Visit blocks in loop postorder and assign mem-ref IDs in that order.
1632 That results in better locality for all the bitmaps. */
1633 for (i = 0; i < n; ++i)
1634 {
1635 basic_block bb = bbs[i];
1636 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1637 gather_mem_refs_stmt (bb->loop_father, gsi_stmt (bsi));
1638 }
1639
1640 /* Sort the location list of gathered memory references after their
1641 loop postorder number. */
1642 im_mem_ref *ref;
1643 FOR_EACH_VEC_ELT (memory_accesses.refs_list, i, ref)
1644 ref->accesses_in_loop.sort (sort_locs_in_loop_postorder_cmp,
1645 bb_loop_postorder);
1646
1647 free (bbs);
1648
1649 /* Propagate the information about accessed memory references up
1650 the loop hierarchy. */
1651 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1652 {
1653 /* Finalize the overall touched references (including subloops). */
1654 bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[loop->num],
1655 &memory_accesses.refs_stored_in_loop[loop->num]);
1656
1657 /* Propagate the information about accessed memory references up
1658 the loop hierarchy. */
1659 outer = loop_outer (loop);
1660 if (outer == current_loops->tree_root)
1661 continue;
1662
1663 bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[outer->num],
1664 &memory_accesses.all_refs_stored_in_loop[loop->num]);
1665 }
1666 }
1667
1668 /* Returns true if MEM1 and MEM2 may alias. TTAE_CACHE is used as a cache in
1669 tree_to_aff_combination_expand. */
1670
1671 static bool
1672 mem_refs_may_alias_p (im_mem_ref *mem1, im_mem_ref *mem2,
1673 hash_map<tree, name_expansion *> **ttae_cache)
1674 {
1675 /* Perform BASE + OFFSET analysis -- if MEM1 and MEM2 are based on the same
1676 object and their offset differ in such a way that the locations cannot
1677 overlap, then they cannot alias. */
1678 poly_widest_int size1, size2;
1679 aff_tree off1, off2;
1680
1681 /* Perform basic offset and type-based disambiguation. */
1682 if (!refs_may_alias_p_1 (&mem1->mem, &mem2->mem, true))
1683 return false;
1684
1685 /* The expansion of addresses may be a bit expensive, thus we only do
1686 the check at -O2 and higher optimization levels. */
1687 if (optimize < 2)
1688 return true;
1689
1690 get_inner_reference_aff (mem1->mem.ref, &off1, &size1);
1691 get_inner_reference_aff (mem2->mem.ref, &off2, &size2);
1692 aff_combination_expand (&off1, ttae_cache);
1693 aff_combination_expand (&off2, ttae_cache);
1694 aff_combination_scale (&off1, -1);
1695 aff_combination_add (&off2, &off1);
1696
1697 if (aff_comb_cannot_overlap_p (&off2, size1, size2))
1698 return false;
1699
1700 return true;
1701 }
1702
1703 /* Compare function for bsearch searching for reference locations
1704 in a loop. */
1705
1706 static int
1707 find_ref_loc_in_loop_cmp (const void *loop_, const void *loc_,
1708 void *bb_loop_postorder_)
1709 {
1710 unsigned *bb_loop_postorder = (unsigned *)bb_loop_postorder_;
1711 class loop *loop = (class loop *)const_cast<void *>(loop_);
1712 mem_ref_loc *loc = (mem_ref_loc *)const_cast<void *>(loc_);
1713 class loop *loc_loop = gimple_bb (loc->stmt)->loop_father;
1714 if (loop->num == loc_loop->num
1715 || flow_loop_nested_p (loop, loc_loop))
1716 return 0;
1717 return (bb_loop_postorder[loop->num] < bb_loop_postorder[loc_loop->num]
1718 ? -1 : 1);
1719 }
1720
1721 /* Iterates over all locations of REF in LOOP and its subloops calling
1722 fn.operator() with the location as argument. When that operator
1723 returns true the iteration is stopped and true is returned.
1724 Otherwise false is returned. */
1725
1726 template <typename FN>
1727 static bool
1728 for_all_locs_in_loop (class loop *loop, im_mem_ref *ref, FN fn)
1729 {
1730 unsigned i;
1731 mem_ref_loc *loc;
1732
1733 /* Search for the cluster of locs in the accesses_in_loop vector
1734 which is sorted after postorder index of the loop father. */
1735 loc = ref->accesses_in_loop.bsearch (loop, find_ref_loc_in_loop_cmp,
1736 bb_loop_postorder);
1737 if (!loc)
1738 return false;
1739
1740 /* We have found one location inside loop or its sub-loops. Iterate
1741 both forward and backward to cover the whole cluster. */
1742 i = loc - ref->accesses_in_loop.address ();
1743 while (i > 0)
1744 {
1745 --i;
1746 mem_ref_loc *l = &ref->accesses_in_loop[i];
1747 if (!flow_bb_inside_loop_p (loop, gimple_bb (l->stmt)))
1748 break;
1749 if (fn (l))
1750 return true;
1751 }
1752 for (i = loc - ref->accesses_in_loop.address ();
1753 i < ref->accesses_in_loop.length (); ++i)
1754 {
1755 mem_ref_loc *l = &ref->accesses_in_loop[i];
1756 if (!flow_bb_inside_loop_p (loop, gimple_bb (l->stmt)))
1757 break;
1758 if (fn (l))
1759 return true;
1760 }
1761
1762 return false;
1763 }
1764
1765 /* Rewrites location LOC by TMP_VAR. */
1766
1767 class rewrite_mem_ref_loc
1768 {
1769 public:
1770 rewrite_mem_ref_loc (tree tmp_var_) : tmp_var (tmp_var_) {}
1771 bool operator () (mem_ref_loc *loc);
1772 tree tmp_var;
1773 };
1774
1775 bool
1776 rewrite_mem_ref_loc::operator () (mem_ref_loc *loc)
1777 {
1778 *loc->ref = tmp_var;
1779 update_stmt (loc->stmt);
1780 return false;
1781 }
1782
1783 /* Rewrites all references to REF in LOOP by variable TMP_VAR. */
1784
1785 static void
1786 rewrite_mem_refs (class loop *loop, im_mem_ref *ref, tree tmp_var)
1787 {
1788 for_all_locs_in_loop (loop, ref, rewrite_mem_ref_loc (tmp_var));
1789 }
1790
1791 /* Stores the first reference location in LOCP. */
1792
1793 class first_mem_ref_loc_1
1794 {
1795 public:
1796 first_mem_ref_loc_1 (mem_ref_loc **locp_) : locp (locp_) {}
1797 bool operator () (mem_ref_loc *loc);
1798 mem_ref_loc **locp;
1799 };
1800
1801 bool
1802 first_mem_ref_loc_1::operator () (mem_ref_loc *loc)
1803 {
1804 *locp = loc;
1805 return true;
1806 }
1807
1808 /* Returns the first reference location to REF in LOOP. */
1809
1810 static mem_ref_loc *
1811 first_mem_ref_loc (class loop *loop, im_mem_ref *ref)
1812 {
1813 mem_ref_loc *locp = NULL;
1814 for_all_locs_in_loop (loop, ref, first_mem_ref_loc_1 (&locp));
1815 return locp;
1816 }
1817
1818 struct prev_flag_edges {
1819 /* Edge to insert new flag comparison code. */
1820 edge append_cond_position;
1821
1822 /* Edge for fall through from previous flag comparison. */
1823 edge last_cond_fallthru;
1824 };
1825
1826 /* Helper function for execute_sm. Emit code to store TMP_VAR into
1827 MEM along edge EX.
1828
1829 The store is only done if MEM has changed. We do this so no
1830 changes to MEM occur on code paths that did not originally store
1831 into it.
1832
1833 The common case for execute_sm will transform:
1834
1835 for (...) {
1836 if (foo)
1837 stuff;
1838 else
1839 MEM = TMP_VAR;
1840 }
1841
1842 into:
1843
1844 lsm = MEM;
1845 for (...) {
1846 if (foo)
1847 stuff;
1848 else
1849 lsm = TMP_VAR;
1850 }
1851 MEM = lsm;
1852
1853 This function will generate:
1854
1855 lsm = MEM;
1856
1857 lsm_flag = false;
1858 ...
1859 for (...) {
1860 if (foo)
1861 stuff;
1862 else {
1863 lsm = TMP_VAR;
1864 lsm_flag = true;
1865 }
1866 }
1867 if (lsm_flag) <--
1868 MEM = lsm; <--
1869 */
1870
1871 static void
1872 execute_sm_if_changed (edge ex, tree mem, tree tmp_var, tree flag,
1873 edge preheader, hash_set <basic_block> *flag_bbs)
1874 {
1875 basic_block new_bb, then_bb, old_dest;
1876 bool loop_has_only_one_exit;
1877 edge then_old_edge, orig_ex = ex;
1878 gimple_stmt_iterator gsi;
1879 gimple *stmt;
1880 struct prev_flag_edges *prev_edges = (struct prev_flag_edges *) ex->aux;
1881 bool irr = ex->flags & EDGE_IRREDUCIBLE_LOOP;
1882
1883 profile_count count_sum = profile_count::zero ();
1884 int nbbs = 0, ncount = 0;
1885 profile_probability flag_probability = profile_probability::uninitialized ();
1886
1887 /* Flag is set in FLAG_BBS. Determine probability that flag will be true
1888 at loop exit.
1889
1890 This code may look fancy, but it cannot update profile very realistically
1891 because we do not know the probability that flag will be true at given
1892 loop exit.
1893
1894 We look for two interesting extremes
1895 - when exit is dominated by block setting the flag, we know it will
1896 always be true. This is a common case.
1897 - when all blocks setting the flag have very low frequency we know
1898 it will likely be false.
1899 In all other cases we default to 2/3 for flag being true. */
1900
1901 for (hash_set<basic_block>::iterator it = flag_bbs->begin ();
1902 it != flag_bbs->end (); ++it)
1903 {
1904 if ((*it)->count.initialized_p ())
1905 count_sum += (*it)->count, ncount ++;
1906 if (dominated_by_p (CDI_DOMINATORS, ex->src, *it))
1907 flag_probability = profile_probability::always ();
1908 nbbs++;
1909 }
1910
1911 profile_probability cap = profile_probability::always ().apply_scale (2, 3);
1912
1913 if (flag_probability.initialized_p ())
1914 ;
1915 else if (ncount == nbbs
1916 && preheader->count () >= count_sum && preheader->count ().nonzero_p ())
1917 {
1918 flag_probability = count_sum.probability_in (preheader->count ());
1919 if (flag_probability > cap)
1920 flag_probability = cap;
1921 }
1922
1923 if (!flag_probability.initialized_p ())
1924 flag_probability = cap;
1925
1926 /* ?? Insert store after previous store if applicable. See note
1927 below. */
1928 if (prev_edges)
1929 ex = prev_edges->append_cond_position;
1930
1931 loop_has_only_one_exit = single_pred_p (ex->dest);
1932
1933 if (loop_has_only_one_exit)
1934 ex = split_block_after_labels (ex->dest);
1935 else
1936 {
1937 for (gphi_iterator gpi = gsi_start_phis (ex->dest);
1938 !gsi_end_p (gpi); gsi_next (&gpi))
1939 {
1940 gphi *phi = gpi.phi ();
1941 if (virtual_operand_p (gimple_phi_result (phi)))
1942 continue;
1943
1944 /* When the destination has a non-virtual PHI node with multiple
1945 predecessors make sure we preserve the PHI structure by
1946 forcing a forwarder block so that hoisting of that PHI will
1947 still work. */
1948 split_edge (ex);
1949 break;
1950 }
1951 }
1952
1953 old_dest = ex->dest;
1954 new_bb = split_edge (ex);
1955 then_bb = create_empty_bb (new_bb);
1956 then_bb->count = new_bb->count.apply_probability (flag_probability);
1957 if (irr)
1958 then_bb->flags = BB_IRREDUCIBLE_LOOP;
1959 add_bb_to_loop (then_bb, new_bb->loop_father);
1960
1961 gsi = gsi_start_bb (new_bb);
1962 stmt = gimple_build_cond (NE_EXPR, flag, boolean_false_node,
1963 NULL_TREE, NULL_TREE);
1964 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1965
1966 gsi = gsi_start_bb (then_bb);
1967 /* Insert actual store. */
1968 stmt = gimple_build_assign (unshare_expr (mem), tmp_var);
1969 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1970
1971 edge e1 = single_succ_edge (new_bb);
1972 edge e2 = make_edge (new_bb, then_bb,
1973 EDGE_TRUE_VALUE | (irr ? EDGE_IRREDUCIBLE_LOOP : 0));
1974 e2->probability = flag_probability;
1975
1976 e1->flags |= EDGE_FALSE_VALUE | (irr ? EDGE_IRREDUCIBLE_LOOP : 0);
1977 e1->flags &= ~EDGE_FALLTHRU;
1978
1979 e1->probability = flag_probability.invert ();
1980
1981 then_old_edge = make_single_succ_edge (then_bb, old_dest,
1982 EDGE_FALLTHRU | (irr ? EDGE_IRREDUCIBLE_LOOP : 0));
1983
1984 set_immediate_dominator (CDI_DOMINATORS, then_bb, new_bb);
1985
1986 if (prev_edges)
1987 {
1988 basic_block prevbb = prev_edges->last_cond_fallthru->src;
1989 redirect_edge_succ (prev_edges->last_cond_fallthru, new_bb);
1990 set_immediate_dominator (CDI_DOMINATORS, new_bb, prevbb);
1991 set_immediate_dominator (CDI_DOMINATORS, old_dest,
1992 recompute_dominator (CDI_DOMINATORS, old_dest));
1993 }
1994
1995 /* ?? Because stores may alias, they must happen in the exact
1996 sequence they originally happened. Save the position right after
1997 the (_lsm) store we just created so we can continue appending after
1998 it and maintain the original order. */
1999 {
2000 struct prev_flag_edges *p;
2001
2002 if (orig_ex->aux)
2003 orig_ex->aux = NULL;
2004 alloc_aux_for_edge (orig_ex, sizeof (struct prev_flag_edges));
2005 p = (struct prev_flag_edges *) orig_ex->aux;
2006 p->append_cond_position = then_old_edge;
2007 p->last_cond_fallthru = find_edge (new_bb, old_dest);
2008 orig_ex->aux = (void *) p;
2009 }
2010
2011 if (!loop_has_only_one_exit)
2012 for (gphi_iterator gpi = gsi_start_phis (old_dest);
2013 !gsi_end_p (gpi); gsi_next (&gpi))
2014 {
2015 gphi *phi = gpi.phi ();
2016 unsigned i;
2017
2018 for (i = 0; i < gimple_phi_num_args (phi); i++)
2019 if (gimple_phi_arg_edge (phi, i)->src == new_bb)
2020 {
2021 tree arg = gimple_phi_arg_def (phi, i);
2022 add_phi_arg (phi, arg, then_old_edge, UNKNOWN_LOCATION);
2023 update_stmt (phi);
2024 }
2025 }
2026 }
2027
2028 /* When REF is set on the location, set flag indicating the store. */
2029
2030 class sm_set_flag_if_changed
2031 {
2032 public:
2033 sm_set_flag_if_changed (tree flag_, hash_set <basic_block> *bbs_)
2034 : flag (flag_), bbs (bbs_) {}
2035 bool operator () (mem_ref_loc *loc);
2036 tree flag;
2037 hash_set <basic_block> *bbs;
2038 };
2039
2040 bool
2041 sm_set_flag_if_changed::operator () (mem_ref_loc *loc)
2042 {
2043 /* Only set the flag for writes. */
2044 if (is_gimple_assign (loc->stmt)
2045 && gimple_assign_lhs_ptr (loc->stmt) == loc->ref)
2046 {
2047 gimple_stmt_iterator gsi = gsi_for_stmt (loc->stmt);
2048 gimple *stmt = gimple_build_assign (flag, boolean_true_node);
2049 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2050 bbs->add (gimple_bb (stmt));
2051 }
2052 return false;
2053 }
2054
2055 /* Helper function for execute_sm. On every location where REF is
2056 set, set an appropriate flag indicating the store. */
2057
2058 static tree
2059 execute_sm_if_changed_flag_set (class loop *loop, im_mem_ref *ref,
2060 hash_set <basic_block> *bbs)
2061 {
2062 tree flag;
2063 char *str = get_lsm_tmp_name (ref->mem.ref, ~0, "_flag");
2064 flag = create_tmp_reg (boolean_type_node, str);
2065 for_all_locs_in_loop (loop, ref, sm_set_flag_if_changed (flag, bbs));
2066 return flag;
2067 }
2068
2069 /* Executes store motion of memory reference REF from LOOP.
2070 Exits from the LOOP are stored in EXITS. The initialization of the
2071 temporary variable is put to the preheader of the loop, and assignments
2072 to the reference from the temporary variable are emitted to exits. */
2073
2074 static void
2075 execute_sm (class loop *loop, vec<edge> exits, im_mem_ref *ref)
2076 {
2077 tree tmp_var, store_flag = NULL_TREE;
2078 unsigned i;
2079 gassign *load;
2080 struct fmt_data fmt_data;
2081 edge ex;
2082 struct lim_aux_data *lim_data;
2083 bool multi_threaded_model_p = false;
2084 gimple_stmt_iterator gsi;
2085 hash_set<basic_block> flag_bbs;
2086
2087 if (dump_file && (dump_flags & TDF_DETAILS))
2088 {
2089 fprintf (dump_file, "Executing store motion of ");
2090 print_generic_expr (dump_file, ref->mem.ref);
2091 fprintf (dump_file, " from loop %d\n", loop->num);
2092 }
2093
2094 tmp_var = create_tmp_reg (TREE_TYPE (ref->mem.ref),
2095 get_lsm_tmp_name (ref->mem.ref, ~0));
2096
2097 fmt_data.loop = loop;
2098 fmt_data.orig_loop = loop;
2099 for_each_index (&ref->mem.ref, force_move_till, &fmt_data);
2100
2101 if (bb_in_transaction (loop_preheader_edge (loop)->src)
2102 || (! flag_store_data_races
2103 && ! ref_always_accessed_p (loop, ref, true)))
2104 multi_threaded_model_p = true;
2105
2106 if (multi_threaded_model_p)
2107 store_flag = execute_sm_if_changed_flag_set (loop, ref, &flag_bbs);
2108
2109 rewrite_mem_refs (loop, ref, tmp_var);
2110
2111 /* Emit the load code on a random exit edge or into the latch if
2112 the loop does not exit, so that we are sure it will be processed
2113 by move_computations after all dependencies. */
2114 gsi = gsi_for_stmt (first_mem_ref_loc (loop, ref)->stmt);
2115
2116 /* FIXME/TODO: For the multi-threaded variant, we could avoid this
2117 load altogether, since the store is predicated by a flag. We
2118 could, do the load only if it was originally in the loop. */
2119 load = gimple_build_assign (tmp_var, unshare_expr (ref->mem.ref));
2120 lim_data = init_lim_data (load);
2121 lim_data->max_loop = loop;
2122 lim_data->tgt_loop = loop;
2123 gsi_insert_before (&gsi, load, GSI_SAME_STMT);
2124
2125 if (multi_threaded_model_p)
2126 {
2127 load = gimple_build_assign (store_flag, boolean_false_node);
2128 lim_data = init_lim_data (load);
2129 lim_data->max_loop = loop;
2130 lim_data->tgt_loop = loop;
2131 gsi_insert_before (&gsi, load, GSI_SAME_STMT);
2132 }
2133
2134 /* Sink the store to every exit from the loop. */
2135 FOR_EACH_VEC_ELT (exits, i, ex)
2136 if (!multi_threaded_model_p)
2137 {
2138 gassign *store;
2139 store = gimple_build_assign (unshare_expr (ref->mem.ref), tmp_var);
2140 gsi_insert_on_edge (ex, store);
2141 }
2142 else
2143 execute_sm_if_changed (ex, ref->mem.ref, tmp_var, store_flag,
2144 loop_preheader_edge (loop), &flag_bbs);
2145 }
2146
2147 /* Hoists memory references MEM_REFS out of LOOP. EXITS is the list of exit
2148 edges of the LOOP. */
2149
2150 static void
2151 hoist_memory_references (class loop *loop, bitmap mem_refs,
2152 vec<edge> exits)
2153 {
2154 im_mem_ref *ref;
2155 unsigned i;
2156 bitmap_iterator bi;
2157
2158 EXECUTE_IF_SET_IN_BITMAP (mem_refs, 0, i, bi)
2159 {
2160 ref = memory_accesses.refs_list[i];
2161 execute_sm (loop, exits, ref);
2162 }
2163 }
2164
2165 class ref_always_accessed
2166 {
2167 public:
2168 ref_always_accessed (class loop *loop_, bool stored_p_)
2169 : loop (loop_), stored_p (stored_p_) {}
2170 bool operator () (mem_ref_loc *loc);
2171 class loop *loop;
2172 bool stored_p;
2173 };
2174
2175 bool
2176 ref_always_accessed::operator () (mem_ref_loc *loc)
2177 {
2178 class loop *must_exec;
2179
2180 if (!get_lim_data (loc->stmt))
2181 return false;
2182
2183 /* If we require an always executed store make sure the statement
2184 stores to the reference. */
2185 if (stored_p)
2186 {
2187 tree lhs = gimple_get_lhs (loc->stmt);
2188 if (!lhs
2189 || lhs != *loc->ref)
2190 return false;
2191 }
2192
2193 must_exec = get_lim_data (loc->stmt)->always_executed_in;
2194 if (!must_exec)
2195 return false;
2196
2197 if (must_exec == loop
2198 || flow_loop_nested_p (must_exec, loop))
2199 return true;
2200
2201 return false;
2202 }
2203
2204 /* Returns true if REF is always accessed in LOOP. If STORED_P is true
2205 make sure REF is always stored to in LOOP. */
2206
2207 static bool
2208 ref_always_accessed_p (class loop *loop, im_mem_ref *ref, bool stored_p)
2209 {
2210 return for_all_locs_in_loop (loop, ref,
2211 ref_always_accessed (loop, stored_p));
2212 }
2213
2214 /* Returns true if REF1 and REF2 are independent. */
2215
2216 static bool
2217 refs_independent_p (im_mem_ref *ref1, im_mem_ref *ref2)
2218 {
2219 if (ref1 == ref2)
2220 return true;
2221
2222 if (dump_file && (dump_flags & TDF_DETAILS))
2223 fprintf (dump_file, "Querying dependency of refs %u and %u: ",
2224 ref1->id, ref2->id);
2225
2226 if (mem_refs_may_alias_p (ref1, ref2, &memory_accesses.ttae_cache))
2227 {
2228 if (dump_file && (dump_flags & TDF_DETAILS))
2229 fprintf (dump_file, "dependent.\n");
2230 return false;
2231 }
2232 else
2233 {
2234 if (dump_file && (dump_flags & TDF_DETAILS))
2235 fprintf (dump_file, "independent.\n");
2236 return true;
2237 }
2238 }
2239
2240 /* Mark REF dependent on stores or loads (according to STORED_P) in LOOP
2241 and its super-loops. */
2242
2243 static void
2244 record_dep_loop (class loop *loop, im_mem_ref *ref, bool stored_p)
2245 {
2246 /* We can propagate dependent-in-loop bits up the loop
2247 hierarchy to all outer loops. */
2248 while (loop != current_loops->tree_root
2249 && bitmap_set_bit (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2250 loop = loop_outer (loop);
2251 }
2252
2253 /* Returns true if REF is independent on all other memory
2254 references in LOOP. */
2255
2256 static bool
2257 ref_indep_loop_p_1 (class loop *loop, im_mem_ref *ref, bool stored_p)
2258 {
2259 stored_p |= (ref->stored && bitmap_bit_p (ref->stored, loop->num));
2260
2261 bool indep_p = true;
2262 bitmap refs_to_check;
2263
2264 if (stored_p)
2265 refs_to_check = &memory_accesses.refs_in_loop[loop->num];
2266 else
2267 refs_to_check = &memory_accesses.refs_stored_in_loop[loop->num];
2268
2269 if (bitmap_bit_p (refs_to_check, UNANALYZABLE_MEM_ID))
2270 indep_p = false;
2271 else
2272 {
2273 if (bitmap_bit_p (&ref->indep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2274 return true;
2275 if (bitmap_bit_p (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2276 return false;
2277
2278 class loop *inner = loop->inner;
2279 while (inner)
2280 {
2281 if (!ref_indep_loop_p_1 (inner, ref, stored_p))
2282 {
2283 indep_p = false;
2284 break;
2285 }
2286 inner = inner->next;
2287 }
2288
2289 if (indep_p)
2290 {
2291 unsigned i;
2292 bitmap_iterator bi;
2293 EXECUTE_IF_SET_IN_BITMAP (refs_to_check, 0, i, bi)
2294 {
2295 im_mem_ref *aref = memory_accesses.refs_list[i];
2296 if (!refs_independent_p (ref, aref))
2297 {
2298 indep_p = false;
2299 break;
2300 }
2301 }
2302 }
2303 }
2304
2305 if (dump_file && (dump_flags & TDF_DETAILS))
2306 fprintf (dump_file, "Querying dependencies of ref %u in loop %d: %s\n",
2307 ref->id, loop->num, indep_p ? "independent" : "dependent");
2308
2309 /* Record the computed result in the cache. */
2310 if (indep_p)
2311 {
2312 if (bitmap_set_bit (&ref->indep_loop, LOOP_DEP_BIT (loop->num, stored_p))
2313 && stored_p)
2314 {
2315 /* If it's independend against all refs then it's independent
2316 against stores, too. */
2317 bitmap_set_bit (&ref->indep_loop, LOOP_DEP_BIT (loop->num, false));
2318 }
2319 }
2320 else
2321 {
2322 record_dep_loop (loop, ref, stored_p);
2323 if (!stored_p)
2324 {
2325 /* If it's dependent against stores it's dependent against
2326 all refs, too. */
2327 record_dep_loop (loop, ref, true);
2328 }
2329 }
2330
2331 return indep_p;
2332 }
2333
2334 /* Returns true if REF is independent on all other memory references in
2335 LOOP. */
2336
2337 static bool
2338 ref_indep_loop_p (class loop *loop, im_mem_ref *ref)
2339 {
2340 gcc_checking_assert (MEM_ANALYZABLE (ref));
2341
2342 return ref_indep_loop_p_1 (loop, ref, false);
2343 }
2344
2345 /* Returns true if we can perform store motion of REF from LOOP. */
2346
2347 static bool
2348 can_sm_ref_p (class loop *loop, im_mem_ref *ref)
2349 {
2350 tree base;
2351
2352 /* Can't hoist unanalyzable refs. */
2353 if (!MEM_ANALYZABLE (ref))
2354 return false;
2355
2356 /* It should be movable. */
2357 if (!is_gimple_reg_type (TREE_TYPE (ref->mem.ref))
2358 || TREE_THIS_VOLATILE (ref->mem.ref)
2359 || !for_each_index (&ref->mem.ref, may_move_till, loop))
2360 return false;
2361
2362 /* If it can throw fail, we do not properly update EH info. */
2363 if (tree_could_throw_p (ref->mem.ref))
2364 return false;
2365
2366 /* If it can trap, it must be always executed in LOOP.
2367 Readonly memory locations may trap when storing to them, but
2368 tree_could_trap_p is a predicate for rvalues, so check that
2369 explicitly. */
2370 base = get_base_address (ref->mem.ref);
2371 if ((tree_could_trap_p (ref->mem.ref)
2372 || (DECL_P (base) && TREE_READONLY (base)))
2373 && !ref_always_accessed_p (loop, ref, true))
2374 return false;
2375
2376 /* And it must be independent on all other memory references
2377 in LOOP. */
2378 if (!ref_indep_loop_p (loop, ref))
2379 return false;
2380
2381 return true;
2382 }
2383
2384 /* Marks the references in LOOP for that store motion should be performed
2385 in REFS_TO_SM. SM_EXECUTED is the set of references for that store
2386 motion was performed in one of the outer loops. */
2387
2388 static void
2389 find_refs_for_sm (class loop *loop, bitmap sm_executed, bitmap refs_to_sm)
2390 {
2391 bitmap refs = &memory_accesses.all_refs_stored_in_loop[loop->num];
2392 unsigned i;
2393 bitmap_iterator bi;
2394 im_mem_ref *ref;
2395
2396 EXECUTE_IF_AND_COMPL_IN_BITMAP (refs, sm_executed, 0, i, bi)
2397 {
2398 ref = memory_accesses.refs_list[i];
2399 if (can_sm_ref_p (loop, ref))
2400 bitmap_set_bit (refs_to_sm, i);
2401 }
2402 }
2403
2404 /* Checks whether LOOP (with exits stored in EXITS array) is suitable
2405 for a store motion optimization (i.e. whether we can insert statement
2406 on its exits). */
2407
2408 static bool
2409 loop_suitable_for_sm (class loop *loop ATTRIBUTE_UNUSED,
2410 vec<edge> exits)
2411 {
2412 unsigned i;
2413 edge ex;
2414
2415 FOR_EACH_VEC_ELT (exits, i, ex)
2416 if (ex->flags & (EDGE_ABNORMAL | EDGE_EH))
2417 return false;
2418
2419 return true;
2420 }
2421
2422 /* Try to perform store motion for all memory references modified inside
2423 LOOP. SM_EXECUTED is the bitmap of the memory references for that
2424 store motion was executed in one of the outer loops. */
2425
2426 static void
2427 store_motion_loop (class loop *loop, bitmap sm_executed)
2428 {
2429 vec<edge> exits = get_loop_exit_edges (loop);
2430 class loop *subloop;
2431 bitmap sm_in_loop = BITMAP_ALLOC (&lim_bitmap_obstack);
2432
2433 if (loop_suitable_for_sm (loop, exits))
2434 {
2435 find_refs_for_sm (loop, sm_executed, sm_in_loop);
2436 hoist_memory_references (loop, sm_in_loop, exits);
2437 }
2438 exits.release ();
2439
2440 bitmap_ior_into (sm_executed, sm_in_loop);
2441 for (subloop = loop->inner; subloop != NULL; subloop = subloop->next)
2442 store_motion_loop (subloop, sm_executed);
2443 bitmap_and_compl_into (sm_executed, sm_in_loop);
2444 BITMAP_FREE (sm_in_loop);
2445 }
2446
2447 /* Try to perform store motion for all memory references modified inside
2448 loops. */
2449
2450 static void
2451 store_motion (void)
2452 {
2453 class loop *loop;
2454 bitmap sm_executed = BITMAP_ALLOC (&lim_bitmap_obstack);
2455
2456 for (loop = current_loops->tree_root->inner; loop != NULL; loop = loop->next)
2457 store_motion_loop (loop, sm_executed);
2458
2459 BITMAP_FREE (sm_executed);
2460 gsi_commit_edge_inserts ();
2461 }
2462
2463 /* Fills ALWAYS_EXECUTED_IN information for basic blocks of LOOP, i.e.
2464 for each such basic block bb records the outermost loop for that execution
2465 of its header implies execution of bb. CONTAINS_CALL is the bitmap of
2466 blocks that contain a nonpure call. */
2467
2468 static void
2469 fill_always_executed_in_1 (class loop *loop, sbitmap contains_call)
2470 {
2471 basic_block bb = NULL, *bbs, last = NULL;
2472 unsigned i;
2473 edge e;
2474 class loop *inn_loop = loop;
2475
2476 if (ALWAYS_EXECUTED_IN (loop->header) == NULL)
2477 {
2478 bbs = get_loop_body_in_dom_order (loop);
2479
2480 for (i = 0; i < loop->num_nodes; i++)
2481 {
2482 edge_iterator ei;
2483 bb = bbs[i];
2484
2485 if (dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2486 last = bb;
2487
2488 if (bitmap_bit_p (contains_call, bb->index))
2489 break;
2490
2491 FOR_EACH_EDGE (e, ei, bb->succs)
2492 {
2493 /* If there is an exit from this BB. */
2494 if (!flow_bb_inside_loop_p (loop, e->dest))
2495 break;
2496 /* Or we enter a possibly non-finite loop. */
2497 if (flow_loop_nested_p (bb->loop_father,
2498 e->dest->loop_father)
2499 && ! finite_loop_p (e->dest->loop_father))
2500 break;
2501 }
2502 if (e)
2503 break;
2504
2505 /* A loop might be infinite (TODO use simple loop analysis
2506 to disprove this if possible). */
2507 if (bb->flags & BB_IRREDUCIBLE_LOOP)
2508 break;
2509
2510 if (!flow_bb_inside_loop_p (inn_loop, bb))
2511 break;
2512
2513 if (bb->loop_father->header == bb)
2514 {
2515 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2516 break;
2517
2518 /* In a loop that is always entered we may proceed anyway.
2519 But record that we entered it and stop once we leave it. */
2520 inn_loop = bb->loop_father;
2521 }
2522 }
2523
2524 while (1)
2525 {
2526 SET_ALWAYS_EXECUTED_IN (last, loop);
2527 if (last == loop->header)
2528 break;
2529 last = get_immediate_dominator (CDI_DOMINATORS, last);
2530 }
2531
2532 free (bbs);
2533 }
2534
2535 for (loop = loop->inner; loop; loop = loop->next)
2536 fill_always_executed_in_1 (loop, contains_call);
2537 }
2538
2539 /* Fills ALWAYS_EXECUTED_IN information for basic blocks, i.e.
2540 for each such basic block bb records the outermost loop for that execution
2541 of its header implies execution of bb. */
2542
2543 static void
2544 fill_always_executed_in (void)
2545 {
2546 basic_block bb;
2547 class loop *loop;
2548
2549 auto_sbitmap contains_call (last_basic_block_for_fn (cfun));
2550 bitmap_clear (contains_call);
2551 FOR_EACH_BB_FN (bb, cfun)
2552 {
2553 gimple_stmt_iterator gsi;
2554 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2555 {
2556 if (nonpure_call_p (gsi_stmt (gsi)))
2557 break;
2558 }
2559
2560 if (!gsi_end_p (gsi))
2561 bitmap_set_bit (contains_call, bb->index);
2562 }
2563
2564 for (loop = current_loops->tree_root->inner; loop; loop = loop->next)
2565 fill_always_executed_in_1 (loop, contains_call);
2566 }
2567
2568
2569 /* Compute the global information needed by the loop invariant motion pass. */
2570
2571 static void
2572 tree_ssa_lim_initialize (void)
2573 {
2574 class loop *loop;
2575 unsigned i;
2576
2577 bitmap_obstack_initialize (&lim_bitmap_obstack);
2578 gcc_obstack_init (&mem_ref_obstack);
2579 lim_aux_data_map = new hash_map<gimple *, lim_aux_data *>;
2580
2581 if (flag_tm)
2582 compute_transaction_bits ();
2583
2584 alloc_aux_for_edges (0);
2585
2586 memory_accesses.refs = new hash_table<mem_ref_hasher> (100);
2587 memory_accesses.refs_list.create (100);
2588 /* Allocate a special, unanalyzable mem-ref with ID zero. */
2589 memory_accesses.refs_list.quick_push
2590 (mem_ref_alloc (NULL, 0, UNANALYZABLE_MEM_ID));
2591
2592 memory_accesses.refs_in_loop.create (number_of_loops (cfun));
2593 memory_accesses.refs_in_loop.quick_grow (number_of_loops (cfun));
2594 memory_accesses.refs_stored_in_loop.create (number_of_loops (cfun));
2595 memory_accesses.refs_stored_in_loop.quick_grow (number_of_loops (cfun));
2596 memory_accesses.all_refs_stored_in_loop.create (number_of_loops (cfun));
2597 memory_accesses.all_refs_stored_in_loop.quick_grow (number_of_loops (cfun));
2598
2599 for (i = 0; i < number_of_loops (cfun); i++)
2600 {
2601 bitmap_initialize (&memory_accesses.refs_in_loop[i],
2602 &lim_bitmap_obstack);
2603 bitmap_initialize (&memory_accesses.refs_stored_in_loop[i],
2604 &lim_bitmap_obstack);
2605 bitmap_initialize (&memory_accesses.all_refs_stored_in_loop[i],
2606 &lim_bitmap_obstack);
2607 }
2608
2609 memory_accesses.ttae_cache = NULL;
2610
2611 /* Initialize bb_loop_postorder with a mapping from loop->num to
2612 its postorder index. */
2613 i = 0;
2614 bb_loop_postorder = XNEWVEC (unsigned, number_of_loops (cfun));
2615 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
2616 bb_loop_postorder[loop->num] = i++;
2617 }
2618
2619 /* Cleans up after the invariant motion pass. */
2620
2621 static void
2622 tree_ssa_lim_finalize (void)
2623 {
2624 basic_block bb;
2625 unsigned i;
2626 im_mem_ref *ref;
2627
2628 free_aux_for_edges ();
2629
2630 FOR_EACH_BB_FN (bb, cfun)
2631 SET_ALWAYS_EXECUTED_IN (bb, NULL);
2632
2633 bitmap_obstack_release (&lim_bitmap_obstack);
2634 delete lim_aux_data_map;
2635
2636 delete memory_accesses.refs;
2637 memory_accesses.refs = NULL;
2638
2639 FOR_EACH_VEC_ELT (memory_accesses.refs_list, i, ref)
2640 memref_free (ref);
2641 memory_accesses.refs_list.release ();
2642 obstack_free (&mem_ref_obstack, NULL);
2643
2644 memory_accesses.refs_in_loop.release ();
2645 memory_accesses.refs_stored_in_loop.release ();
2646 memory_accesses.all_refs_stored_in_loop.release ();
2647
2648 if (memory_accesses.ttae_cache)
2649 free_affine_expand_cache (&memory_accesses.ttae_cache);
2650
2651 free (bb_loop_postorder);
2652 }
2653
2654 /* Moves invariants from loops. Only "expensive" invariants are moved out --
2655 i.e. those that are likely to be win regardless of the register pressure. */
2656
2657 static unsigned int
2658 tree_ssa_lim (void)
2659 {
2660 unsigned int todo;
2661
2662 tree_ssa_lim_initialize ();
2663
2664 /* Gathers information about memory accesses in the loops. */
2665 analyze_memory_references ();
2666
2667 /* Fills ALWAYS_EXECUTED_IN information for basic blocks. */
2668 fill_always_executed_in ();
2669
2670 /* For each statement determine the outermost loop in that it is
2671 invariant and cost for computing the invariant. */
2672 invariantness_dom_walker (CDI_DOMINATORS)
2673 .walk (cfun->cfg->x_entry_block_ptr);
2674
2675 /* Execute store motion. Force the necessary invariants to be moved
2676 out of the loops as well. */
2677 store_motion ();
2678
2679 /* Move the expressions that are expensive enough. */
2680 todo = move_computations ();
2681
2682 tree_ssa_lim_finalize ();
2683
2684 return todo;
2685 }
2686
2687 /* Loop invariant motion pass. */
2688
2689 namespace {
2690
2691 const pass_data pass_data_lim =
2692 {
2693 GIMPLE_PASS, /* type */
2694 "lim", /* name */
2695 OPTGROUP_LOOP, /* optinfo_flags */
2696 TV_LIM, /* tv_id */
2697 PROP_cfg, /* properties_required */
2698 0, /* properties_provided */
2699 0, /* properties_destroyed */
2700 0, /* todo_flags_start */
2701 0, /* todo_flags_finish */
2702 };
2703
2704 class pass_lim : public gimple_opt_pass
2705 {
2706 public:
2707 pass_lim (gcc::context *ctxt)
2708 : gimple_opt_pass (pass_data_lim, ctxt)
2709 {}
2710
2711 /* opt_pass methods: */
2712 opt_pass * clone () { return new pass_lim (m_ctxt); }
2713 virtual bool gate (function *) { return flag_tree_loop_im != 0; }
2714 virtual unsigned int execute (function *);
2715
2716 }; // class pass_lim
2717
2718 unsigned int
2719 pass_lim::execute (function *fun)
2720 {
2721 bool in_loop_pipeline = scev_initialized_p ();
2722 if (!in_loop_pipeline)
2723 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
2724
2725 if (number_of_loops (fun) <= 1)
2726 return 0;
2727 unsigned int todo = tree_ssa_lim ();
2728
2729 if (!in_loop_pipeline)
2730 loop_optimizer_finalize ();
2731 else
2732 scev_reset ();
2733 return todo;
2734 }
2735
2736 } // anon namespace
2737
2738 gimple_opt_pass *
2739 make_pass_lim (gcc::context *ctxt)
2740 {
2741 return new pass_lim (ctxt);
2742 }
2743
2744