]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/cfgloop.h
loop-unroll.c: (decide_unrolling_and_peeling): Rename to
[thirdparty/gcc.git] / gcc / cfgloop.h
1 /* Natural loop functions
2 Copyright (C) 1987-2014 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #ifndef GCC_CFGLOOP_H
21 #define GCC_CFGLOOP_H
22
23 #include "double-int.h"
24 #include "wide-int.h"
25 #include "bitmap.h"
26 #include "sbitmap.h"
27 #include "function.h"
28
29 /* Structure to hold decision about unrolling/peeling. */
30 enum lpt_dec
31 {
32 LPT_NONE,
33 LPT_UNROLL_CONSTANT,
34 LPT_UNROLL_RUNTIME,
35 LPT_UNROLL_STUPID
36 };
37
38 struct GTY (()) lpt_decision {
39 enum lpt_dec decision;
40 unsigned times;
41 };
42
43 /* The type of extend applied to an IV. */
44 enum iv_extend_code
45 {
46 IV_SIGN_EXTEND,
47 IV_ZERO_EXTEND,
48 IV_UNKNOWN_EXTEND
49 };
50
51 /* The structure describing a bound on number of iterations of a loop. */
52
53 struct GTY ((chain_next ("%h.next"))) nb_iter_bound {
54 /* The statement STMT is executed at most ... */
55 gimple stmt;
56
57 /* ... BOUND + 1 times (BOUND must be an unsigned constant).
58 The + 1 is added for the following reasons:
59
60 a) 0 would otherwise be unused, while we would need to care more about
61 overflows (as MAX + 1 is sometimes produced as the estimate on number
62 of executions of STMT).
63 b) it is consistent with the result of number_of_iterations_exit. */
64 widest_int bound;
65
66 /* True if the statement will cause the loop to be leaved the (at most)
67 BOUND + 1-st time it is executed, that is, all the statements after it
68 are executed at most BOUND times. */
69 bool is_exit;
70
71 /* The next bound in the list. */
72 struct nb_iter_bound *next;
73 };
74
75 /* Description of the loop exit. */
76
77 struct GTY ((for_user)) loop_exit {
78 /* The exit edge. */
79 edge e;
80
81 /* Previous and next exit in the list of the exits of the loop. */
82 struct loop_exit *prev;
83 struct loop_exit *next;
84
85 /* Next element in the list of loops from that E exits. */
86 struct loop_exit *next_e;
87 };
88
89 struct loop_exit_hasher : ggc_hasher<loop_exit *>
90 {
91 typedef edge compare_type;
92
93 static hashval_t hash (loop_exit *);
94 static bool equal (loop_exit *, edge);
95 static void remove (loop_exit *);
96 };
97
98 typedef struct loop *loop_p;
99
100 /* An integer estimation of the number of iterations. Estimate_state
101 describes what is the state of the estimation. */
102 enum loop_estimation
103 {
104 /* Estimate was not computed yet. */
105 EST_NOT_COMPUTED,
106 /* Estimate is ready. */
107 EST_AVAILABLE,
108 EST_LAST
109 };
110
111 /* Structure to hold information for each natural loop. */
112 struct GTY ((chain_next ("%h.next"))) loop {
113 /* Index into loops array. */
114 int num;
115
116 /* Number of loop insns. */
117 unsigned ninsns;
118
119 /* Basic block of loop header. */
120 basic_block header;
121
122 /* Basic block of loop latch. */
123 basic_block latch;
124
125 /* For loop unrolling/peeling decision. */
126 struct lpt_decision lpt_decision;
127
128 /* Average number of executed insns per iteration. */
129 unsigned av_ninsns;
130
131 /* Number of blocks contained within the loop. */
132 unsigned num_nodes;
133
134 /* Superloops of the loop, starting with the outermost loop. */
135 vec<loop_p, va_gc> *superloops;
136
137 /* The first inner (child) loop or NULL if innermost loop. */
138 struct loop *inner;
139
140 /* Link to the next (sibling) loop. */
141 struct loop *next;
142
143 /* Auxiliary info specific to a pass. */
144 PTR GTY ((skip (""))) aux;
145
146 /* The number of times the latch of the loop is executed. This can be an
147 INTEGER_CST, or a symbolic expression representing the number of
148 iterations like "N - 1", or a COND_EXPR containing the runtime
149 conditions under which the number of iterations is non zero.
150
151 Don't access this field directly: number_of_latch_executions
152 computes and caches the computed information in this field. */
153 tree nb_iterations;
154
155 /* An integer guaranteed to be greater or equal to nb_iterations. Only
156 valid if any_upper_bound is true. */
157 widest_int nb_iterations_upper_bound;
158
159 /* An integer giving an estimate on nb_iterations. Unlike
160 nb_iterations_upper_bound, there is no guarantee that it is at least
161 nb_iterations. */
162 widest_int nb_iterations_estimate;
163
164 bool any_upper_bound;
165 bool any_estimate;
166
167 /* True if the loop can be parallel. */
168 bool can_be_parallel;
169
170 /* True if -Waggressive-loop-optimizations warned about this loop
171 already. */
172 bool warned_aggressive_loop_optimizations;
173
174 /* An integer estimation of the number of iterations. Estimate_state
175 describes what is the state of the estimation. */
176 enum loop_estimation estimate_state;
177
178 /* If > 0, an integer, where the user asserted that for any
179 I in [ 0, nb_iterations ) and for any J in
180 [ I, min ( I + safelen, nb_iterations ) ), the Ith and Jth iterations
181 of the loop can be safely evaluated concurrently. */
182 int safelen;
183
184 /* True if this loop should never be vectorized. */
185 bool dont_vectorize;
186
187 /* True if we should try harder to vectorize this loop. */
188 bool force_vectorize;
189
190 /* For SIMD loops, this is a unique identifier of the loop, referenced
191 by IFN_GOMP_SIMD_VF, IFN_GOMP_SIMD_LANE and IFN_GOMP_SIMD_LAST_LANE
192 builtins. */
193 tree simduid;
194
195 /* Upper bound on number of iterations of a loop. */
196 struct nb_iter_bound *bounds;
197
198 /* Head of the cyclic list of the exits of the loop. */
199 struct loop_exit *exits;
200
201 /* Number of iteration analysis data for RTL. */
202 struct niter_desc *simple_loop_desc;
203
204 /* For sanity checking during loop fixup we record here the former
205 loop header for loops marked for removal. Note that this prevents
206 the basic-block from being collected but its index can still be
207 reused. */
208 basic_block former_header;
209 };
210
211 /* Flags for state of loop structure. */
212 enum
213 {
214 LOOPS_HAVE_PREHEADERS = 1,
215 LOOPS_HAVE_SIMPLE_LATCHES = 2,
216 LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS = 4,
217 LOOPS_HAVE_RECORDED_EXITS = 8,
218 LOOPS_MAY_HAVE_MULTIPLE_LATCHES = 16,
219 LOOP_CLOSED_SSA = 32,
220 LOOPS_NEED_FIXUP = 64,
221 LOOPS_HAVE_FALLTHRU_PREHEADERS = 128
222 };
223
224 #define LOOPS_NORMAL (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES \
225 | LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS)
226 #define AVOID_CFG_MODIFICATIONS (LOOPS_MAY_HAVE_MULTIPLE_LATCHES)
227
228 /* Structure to hold CFG information about natural loops within a function. */
229 struct GTY (()) loops {
230 /* State of loops. */
231 int state;
232
233 /* Array of the loops. */
234 vec<loop_p, va_gc> *larray;
235
236 /* Maps edges to the list of their descriptions as loop exits. Edges
237 whose sources or destinations have loop_father == NULL (which may
238 happen during the cfg manipulations) should not appear in EXITS. */
239 hash_table<loop_exit_hasher> *GTY(()) exits;
240
241 /* Pointer to root of loop hierarchy tree. */
242 struct loop *tree_root;
243 };
244
245 /* Loop recognition. */
246 bool bb_loop_header_p (basic_block);
247 void init_loops_structure (struct function *, struct loops *, unsigned);
248 extern struct loops *flow_loops_find (struct loops *);
249 extern void disambiguate_loops_with_multiple_latches (void);
250 extern void flow_loops_free (struct loops *);
251 extern void flow_loops_dump (FILE *,
252 void (*)(const struct loop *, FILE *, int), int);
253 extern void flow_loop_dump (const struct loop *, FILE *,
254 void (*)(const struct loop *, FILE *, int), int);
255 struct loop *alloc_loop (void);
256 extern void flow_loop_free (struct loop *);
257 int flow_loop_nodes_find (basic_block, struct loop *);
258 unsigned fix_loop_structure (bitmap changed_bbs);
259 bool mark_irreducible_loops (void);
260 void release_recorded_exits (void);
261 void record_loop_exits (void);
262 void rescan_loop_exit (edge, bool, bool);
263
264 /* Loop data structure manipulation/querying. */
265 extern void flow_loop_tree_node_add (struct loop *, struct loop *);
266 extern void flow_loop_tree_node_remove (struct loop *);
267 extern void place_new_loop (struct function *, struct loop *);
268 extern void add_loop (struct loop *, struct loop *);
269 extern bool flow_loop_nested_p (const struct loop *, const struct loop *);
270 extern bool flow_bb_inside_loop_p (const struct loop *, const_basic_block);
271 extern struct loop * find_common_loop (struct loop *, struct loop *);
272 struct loop *superloop_at_depth (struct loop *, unsigned);
273 struct eni_weights_d;
274 extern int num_loop_insns (const struct loop *);
275 extern int average_num_loop_insns (const struct loop *);
276 extern unsigned get_loop_level (const struct loop *);
277 extern bool loop_exit_edge_p (const struct loop *, const_edge);
278 extern bool loop_exits_to_bb_p (struct loop *, basic_block);
279 extern bool loop_exits_from_bb_p (struct loop *, basic_block);
280 extern void mark_loop_exit_edges (void);
281 extern location_t get_loop_location (struct loop *loop);
282
283 /* Loops & cfg manipulation. */
284 extern basic_block *get_loop_body (const struct loop *);
285 extern unsigned get_loop_body_with_size (const struct loop *, basic_block *,
286 unsigned);
287 extern basic_block *get_loop_body_in_dom_order (const struct loop *);
288 extern basic_block *get_loop_body_in_bfs_order (const struct loop *);
289 extern basic_block *get_loop_body_in_custom_order (const struct loop *,
290 int (*) (const void *, const void *));
291
292 extern vec<edge> get_loop_exit_edges (const struct loop *);
293 extern edge single_exit (const struct loop *);
294 extern edge single_likely_exit (struct loop *loop);
295 extern unsigned num_loop_branches (const struct loop *);
296
297 extern edge loop_preheader_edge (const struct loop *);
298 extern edge loop_latch_edge (const struct loop *);
299
300 extern void add_bb_to_loop (basic_block, struct loop *);
301 extern void remove_bb_from_loops (basic_block);
302
303 extern void cancel_loop_tree (struct loop *);
304 extern void delete_loop (struct loop *);
305
306 enum
307 {
308 CP_SIMPLE_PREHEADERS = 1,
309 CP_FALLTHRU_PREHEADERS = 2
310 };
311
312 basic_block create_preheader (struct loop *, int);
313 extern void create_preheaders (int);
314 extern void force_single_succ_latches (void);
315
316 extern void verify_loop_structure (void);
317
318 /* Loop analysis. */
319 extern bool just_once_each_iteration_p (const struct loop *, const_basic_block);
320 gcov_type expected_loop_iterations_unbounded (const struct loop *);
321 extern unsigned expected_loop_iterations (const struct loop *);
322 extern rtx doloop_condition_get (rtx);
323
324
325 /* Loop manipulation. */
326 extern bool can_duplicate_loop_p (const struct loop *loop);
327
328 #define DLTHE_FLAG_UPDATE_FREQ 1 /* Update frequencies in
329 duplicate_loop_to_header_edge. */
330 #define DLTHE_RECORD_COPY_NUMBER 2 /* Record copy number in the aux
331 field of newly create BB. */
332 #define DLTHE_FLAG_COMPLETTE_PEEL 4 /* Update frequencies expecting
333 a complete peeling. */
334
335 extern edge create_empty_if_region_on_edge (edge, tree);
336 extern struct loop *create_empty_loop_on_edge (edge, tree, tree, tree, tree,
337 tree *, tree *, struct loop *);
338 extern struct loop * duplicate_loop (struct loop *, struct loop *);
339 extern void copy_loop_info (struct loop *loop, struct loop *target);
340 extern void duplicate_subloops (struct loop *, struct loop *);
341 extern bool duplicate_loop_to_header_edge (struct loop *, edge,
342 unsigned, sbitmap, edge,
343 vec<edge> *, int);
344 extern struct loop *loopify (edge, edge,
345 basic_block, edge, edge, bool,
346 unsigned, unsigned);
347 struct loop * loop_version (struct loop *, void *,
348 basic_block *, unsigned, unsigned, unsigned, bool);
349 extern bool remove_path (edge);
350 extern void unloop (struct loop *, bool *, bitmap);
351 extern void scale_loop_frequencies (struct loop *, int, int);
352 void mark_loop_for_removal (loop_p);
353
354
355 /* Induction variable analysis. */
356
357 /* The description of induction variable. The things are a bit complicated
358 due to need to handle subregs and extends. The value of the object described
359 by it can be obtained as follows (all computations are done in extend_mode):
360
361 Value in i-th iteration is
362 delta + mult * extend_{extend_mode} (subreg_{mode} (base + i * step)).
363
364 If first_special is true, the value in the first iteration is
365 delta + mult * base
366
367 If extend = UNKNOWN, first_special must be false, delta 0, mult 1 and value is
368 subreg_{mode} (base + i * step)
369
370 The get_iv_value function can be used to obtain these expressions.
371
372 ??? Add a third mode field that would specify the mode in that inner
373 computation is done, which would enable it to be different from the
374 outer one? */
375
376 struct rtx_iv
377 {
378 /* Its base and step (mode of base and step is supposed to be extend_mode,
379 see the description above). */
380 rtx base, step;
381
382 /* The type of extend applied to it (IV_SIGN_EXTEND, IV_ZERO_EXTEND,
383 or IV_UNKNOWN_EXTEND). */
384 enum iv_extend_code extend;
385
386 /* Operations applied in the extended mode. */
387 rtx delta, mult;
388
389 /* The mode it is extended to. */
390 enum machine_mode extend_mode;
391
392 /* The mode the variable iterates in. */
393 enum machine_mode mode;
394
395 /* Whether the first iteration needs to be handled specially. */
396 unsigned first_special : 1;
397 };
398
399 /* The description of an exit from the loop and of the number of iterations
400 till we take the exit. */
401
402 struct GTY(()) niter_desc
403 {
404 /* The edge out of the loop. */
405 edge out_edge;
406
407 /* The other edge leading from the condition. */
408 edge in_edge;
409
410 /* True if we are able to say anything about number of iterations of the
411 loop. */
412 bool simple_p;
413
414 /* True if the loop iterates the constant number of times. */
415 bool const_iter;
416
417 /* Number of iterations if constant. */
418 uint64_t niter;
419
420 /* Assumptions under that the rest of the information is valid. */
421 rtx assumptions;
422
423 /* Assumptions under that the loop ends before reaching the latch,
424 even if value of niter_expr says otherwise. */
425 rtx noloop_assumptions;
426
427 /* Condition under that the loop is infinite. */
428 rtx infinite;
429
430 /* Whether the comparison is signed. */
431 bool signed_p;
432
433 /* The mode in that niter_expr should be computed. */
434 enum machine_mode mode;
435
436 /* The number of iterations of the loop. */
437 rtx niter_expr;
438 };
439
440 extern void iv_analysis_loop_init (struct loop *);
441 extern bool iv_analyze (rtx_insn *, rtx, struct rtx_iv *);
442 extern bool iv_analyze_result (rtx_insn *, rtx, struct rtx_iv *);
443 extern bool iv_analyze_expr (rtx_insn *, rtx, enum machine_mode,
444 struct rtx_iv *);
445 extern rtx get_iv_value (struct rtx_iv *, rtx);
446 extern bool biv_p (rtx_insn *, rtx);
447 extern void find_simple_exit (struct loop *, struct niter_desc *);
448 extern void iv_analysis_done (void);
449
450 extern struct niter_desc *get_simple_loop_desc (struct loop *loop);
451 extern void free_simple_loop_desc (struct loop *loop);
452
453 static inline struct niter_desc *
454 simple_loop_desc (struct loop *loop)
455 {
456 return loop->simple_loop_desc;
457 }
458
459 /* Accessors for the loop structures. */
460
461 /* Returns the loop with index NUM from FNs loop tree. */
462
463 static inline struct loop *
464 get_loop (struct function *fn, unsigned num)
465 {
466 return (*loops_for_fn (fn)->larray)[num];
467 }
468
469 /* Returns the number of superloops of LOOP. */
470
471 static inline unsigned
472 loop_depth (const struct loop *loop)
473 {
474 return vec_safe_length (loop->superloops);
475 }
476
477 /* Returns the immediate superloop of LOOP, or NULL if LOOP is the outermost
478 loop. */
479
480 static inline struct loop *
481 loop_outer (const struct loop *loop)
482 {
483 unsigned n = vec_safe_length (loop->superloops);
484
485 if (n == 0)
486 return NULL;
487
488 return (*loop->superloops)[n - 1];
489 }
490
491 /* Returns true if LOOP has at least one exit edge. */
492
493 static inline bool
494 loop_has_exit_edges (const struct loop *loop)
495 {
496 return loop->exits->next->e != NULL;
497 }
498
499 /* Returns the list of loops in FN. */
500
501 inline vec<loop_p, va_gc> *
502 get_loops (struct function *fn)
503 {
504 struct loops *loops = loops_for_fn (fn);
505 if (!loops)
506 return NULL;
507
508 return loops->larray;
509 }
510
511 /* Returns the number of loops in FN (including the removed
512 ones and the fake loop that forms the root of the loop tree). */
513
514 static inline unsigned
515 number_of_loops (struct function *fn)
516 {
517 struct loops *loops = loops_for_fn (fn);
518 if (!loops)
519 return 0;
520
521 return vec_safe_length (loops->larray);
522 }
523
524 /* Returns true if state of the loops satisfies all properties
525 described by FLAGS. */
526
527 static inline bool
528 loops_state_satisfies_p (unsigned flags)
529 {
530 return (current_loops->state & flags) == flags;
531 }
532
533 /* Sets FLAGS to the loops state. */
534
535 static inline void
536 loops_state_set (unsigned flags)
537 {
538 current_loops->state |= flags;
539 }
540
541 /* Clears FLAGS from the loops state. */
542
543 static inline void
544 loops_state_clear (unsigned flags)
545 {
546 if (!current_loops)
547 return;
548 current_loops->state &= ~flags;
549 }
550
551 /* Loop iterators. */
552
553 /* Flags for loop iteration. */
554
555 enum li_flags
556 {
557 LI_INCLUDE_ROOT = 1, /* Include the fake root of the loop tree. */
558 LI_FROM_INNERMOST = 2, /* Iterate over the loops in the reverse order,
559 starting from innermost ones. */
560 LI_ONLY_INNERMOST = 4 /* Iterate only over innermost loops. */
561 };
562
563 /* The iterator for loops. */
564
565 struct loop_iterator
566 {
567 loop_iterator (loop_p *loop, unsigned flags);
568 ~loop_iterator ();
569
570 inline loop_p next ();
571
572 /* The list of loops to visit. */
573 vec<int> to_visit;
574
575 /* The index of the actual loop. */
576 unsigned idx;
577 };
578
579 inline loop_p
580 loop_iterator::next ()
581 {
582 int anum;
583
584 while (this->to_visit.iterate (this->idx, &anum))
585 {
586 this->idx++;
587 loop_p loop = get_loop (cfun, anum);
588 if (loop)
589 return loop;
590 }
591
592 return NULL;
593 }
594
595 inline
596 loop_iterator::loop_iterator (loop_p *loop, unsigned flags)
597 {
598 struct loop *aloop;
599 unsigned i;
600 int mn;
601
602 this->idx = 0;
603 if (!current_loops)
604 {
605 this->to_visit.create (0);
606 *loop = NULL;
607 return;
608 }
609
610 this->to_visit.create (number_of_loops (cfun));
611 mn = (flags & LI_INCLUDE_ROOT) ? 0 : 1;
612
613 if (flags & LI_ONLY_INNERMOST)
614 {
615 for (i = 0; vec_safe_iterate (current_loops->larray, i, &aloop); i++)
616 if (aloop != NULL
617 && aloop->inner == NULL
618 && aloop->num >= mn)
619 this->to_visit.quick_push (aloop->num);
620 }
621 else if (flags & LI_FROM_INNERMOST)
622 {
623 /* Push the loops to LI->TO_VISIT in postorder. */
624 for (aloop = current_loops->tree_root;
625 aloop->inner != NULL;
626 aloop = aloop->inner)
627 continue;
628
629 while (1)
630 {
631 if (aloop->num >= mn)
632 this->to_visit.quick_push (aloop->num);
633
634 if (aloop->next)
635 {
636 for (aloop = aloop->next;
637 aloop->inner != NULL;
638 aloop = aloop->inner)
639 continue;
640 }
641 else if (!loop_outer (aloop))
642 break;
643 else
644 aloop = loop_outer (aloop);
645 }
646 }
647 else
648 {
649 /* Push the loops to LI->TO_VISIT in preorder. */
650 aloop = current_loops->tree_root;
651 while (1)
652 {
653 if (aloop->num >= mn)
654 this->to_visit.quick_push (aloop->num);
655
656 if (aloop->inner != NULL)
657 aloop = aloop->inner;
658 else
659 {
660 while (aloop != NULL && aloop->next == NULL)
661 aloop = loop_outer (aloop);
662 if (aloop == NULL)
663 break;
664 aloop = aloop->next;
665 }
666 }
667 }
668
669 *loop = this->next ();
670 }
671
672 inline
673 loop_iterator::~loop_iterator ()
674 {
675 this->to_visit.release ();
676 }
677
678 #define FOR_EACH_LOOP(LOOP, FLAGS) \
679 for (loop_iterator li(&(LOOP), FLAGS); \
680 (LOOP); \
681 (LOOP) = li.next ())
682
683 /* The properties of the target. */
684 struct target_cfgloop {
685 /* Number of available registers. */
686 unsigned x_target_avail_regs;
687
688 /* Number of available registers that are call-clobbered. */
689 unsigned x_target_clobbered_regs;
690
691 /* Number of registers reserved for temporary expressions. */
692 unsigned x_target_res_regs;
693
694 /* The cost for register when there still is some reserve, but we are
695 approaching the number of available registers. */
696 unsigned x_target_reg_cost[2];
697
698 /* The cost for register when we need to spill. */
699 unsigned x_target_spill_cost[2];
700 };
701
702 extern struct target_cfgloop default_target_cfgloop;
703 #if SWITCHABLE_TARGET
704 extern struct target_cfgloop *this_target_cfgloop;
705 #else
706 #define this_target_cfgloop (&default_target_cfgloop)
707 #endif
708
709 #define target_avail_regs \
710 (this_target_cfgloop->x_target_avail_regs)
711 #define target_clobbered_regs \
712 (this_target_cfgloop->x_target_clobbered_regs)
713 #define target_res_regs \
714 (this_target_cfgloop->x_target_res_regs)
715 #define target_reg_cost \
716 (this_target_cfgloop->x_target_reg_cost)
717 #define target_spill_cost \
718 (this_target_cfgloop->x_target_spill_cost)
719
720 /* Register pressure estimation for induction variable optimizations & loop
721 invariant motion. */
722 extern unsigned estimate_reg_pressure_cost (unsigned, unsigned, bool, bool);
723 extern void init_set_costs (void);
724
725 /* Loop optimizer initialization. */
726 extern void loop_optimizer_init (unsigned);
727 extern void loop_optimizer_finalize (void);
728
729 /* Optimization passes. */
730 enum
731 {
732 UAP_UNROLL = 1, /* Enables unrolling of loops if it seems profitable. */
733 UAP_UNROLL_ALL = 2 /* Enables unrolling of all loops. */
734 };
735
736 extern void unroll_loops (int);
737 extern void doloop_optimize_loops (void);
738 extern void move_loop_invariants (void);
739 extern void scale_loop_profile (struct loop *loop, int scale, gcov_type iteration_bound);
740 extern vec<basic_block> get_loop_hot_path (const struct loop *loop);
741
742 /* Returns the outermost loop of the loop nest that contains LOOP.*/
743 static inline struct loop *
744 loop_outermost (struct loop *loop)
745 {
746 unsigned n = vec_safe_length (loop->superloops);
747
748 if (n <= 1)
749 return loop;
750
751 return (*loop->superloops)[1];
752 }
753
754 extern void record_niter_bound (struct loop *, const widest_int &, bool, bool);
755 extern HOST_WIDE_INT get_estimated_loop_iterations_int (struct loop *);
756 extern HOST_WIDE_INT get_max_loop_iterations_int (struct loop *);
757 extern bool get_estimated_loop_iterations (struct loop *loop, widest_int *nit);
758 extern bool get_max_loop_iterations (struct loop *loop, widest_int *nit);
759 extern int bb_loop_depth (const_basic_block);
760
761 /* Converts VAL to widest_int. */
762
763 static inline widest_int
764 gcov_type_to_wide_int (gcov_type val)
765 {
766 HOST_WIDE_INT a[2];
767
768 a[0] = (unsigned HOST_WIDE_INT) val;
769 /* If HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_WIDEST_INT, avoid shifting by
770 the size of type. */
771 val >>= HOST_BITS_PER_WIDE_INT - 1;
772 val >>= 1;
773 a[1] = (unsigned HOST_WIDE_INT) val;
774
775 return widest_int::from_array (a, 2);
776 }
777 #endif /* GCC_CFGLOOP_H */