]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/sched-rgn.c
Eliminate last_basic_block macro.
[thirdparty/gcc.git] / gcc / sched-rgn.c
1 /* Instruction scheduling pass.
2 Copyright (C) 1992-2013 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
4 and currently maintained by, Jim Wilson (wilson@cygnus.com)
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 /* This pass implements list scheduling within basic blocks. It is
23 run twice: (1) after flow analysis, but before register allocation,
24 and (2) after register allocation.
25
26 The first run performs interblock scheduling, moving insns between
27 different blocks in the same "region", and the second runs only
28 basic block scheduling.
29
30 Interblock motions performed are useful motions and speculative
31 motions, including speculative loads. Motions requiring code
32 duplication are not supported. The identification of motion type
33 and the check for validity of speculative motions requires
34 construction and analysis of the function's control flow graph.
35
36 The main entry point for this pass is schedule_insns(), called for
37 each function. The work of the scheduler is organized in three
38 levels: (1) function level: insns are subject to splitting,
39 control-flow-graph is constructed, regions are computed (after
40 reload, each region is of one block), (2) region level: control
41 flow graph attributes required for interblock scheduling are
42 computed (dominators, reachability, etc.), data dependences and
43 priorities are computed, and (3) block level: insns in the block
44 are actually scheduled. */
45 \f
46 #include "config.h"
47 #include "system.h"
48 #include "coretypes.h"
49 #include "tm.h"
50 #include "diagnostic-core.h"
51 #include "rtl.h"
52 #include "tm_p.h"
53 #include "hard-reg-set.h"
54 #include "regs.h"
55 #include "function.h"
56 #include "flags.h"
57 #include "insn-config.h"
58 #include "insn-attr.h"
59 #include "except.h"
60 #include "recog.h"
61 #include "params.h"
62 #include "sched-int.h"
63 #include "sel-sched.h"
64 #include "target.h"
65 #include "tree-pass.h"
66 #include "dbgcnt.h"
67
68 #ifdef INSN_SCHEDULING
69
70 /* Some accessor macros for h_i_d members only used within this file. */
71 #define FED_BY_SPEC_LOAD(INSN) (HID (INSN)->fed_by_spec_load)
72 #define IS_LOAD_INSN(INSN) (HID (insn)->is_load_insn)
73
74 /* nr_inter/spec counts interblock/speculative motion for the function. */
75 static int nr_inter, nr_spec;
76
77 static int is_cfg_nonregular (void);
78
79 /* Number of regions in the procedure. */
80 int nr_regions = 0;
81
82 /* Table of region descriptions. */
83 region *rgn_table = NULL;
84
85 /* Array of lists of regions' blocks. */
86 int *rgn_bb_table = NULL;
87
88 /* Topological order of blocks in the region (if b2 is reachable from
89 b1, block_to_bb[b2] > block_to_bb[b1]). Note: A basic block is
90 always referred to by either block or b, while its topological
91 order name (in the region) is referred to by bb. */
92 int *block_to_bb = NULL;
93
94 /* The number of the region containing a block. */
95 int *containing_rgn = NULL;
96
97 /* ebb_head [i] - is index in rgn_bb_table of the head basic block of i'th ebb.
98 Currently we can get a ebb only through splitting of currently
99 scheduling block, therefore, we don't need ebb_head array for every region,
100 hence, its sufficient to hold it for current one only. */
101 int *ebb_head = NULL;
102
103 /* The minimum probability of reaching a source block so that it will be
104 considered for speculative scheduling. */
105 static int min_spec_prob;
106
107 static void find_single_block_region (bool);
108 static void find_rgns (void);
109 static bool too_large (int, int *, int *);
110
111 /* Blocks of the current region being scheduled. */
112 int current_nr_blocks;
113 int current_blocks;
114
115 /* A speculative motion requires checking live information on the path
116 from 'source' to 'target'. The split blocks are those to be checked.
117 After a speculative motion, live information should be modified in
118 the 'update' blocks.
119
120 Lists of split and update blocks for each candidate of the current
121 target are in array bblst_table. */
122 static basic_block *bblst_table;
123 static int bblst_size, bblst_last;
124
125 /* Arrays that hold the DFA state at the end of a basic block, to re-use
126 as the initial state at the start of successor blocks. The BB_STATE
127 array holds the actual DFA state, and BB_STATE_ARRAY[I] is a pointer
128 into BB_STATE for basic block I. FIXME: This should be a vec. */
129 static char *bb_state_array = NULL;
130 static state_t *bb_state = NULL;
131
132 /* Target info declarations.
133
134 The block currently being scheduled is referred to as the "target" block,
135 while other blocks in the region from which insns can be moved to the
136 target are called "source" blocks. The candidate structure holds info
137 about such sources: are they valid? Speculative? Etc. */
138 typedef struct
139 {
140 basic_block *first_member;
141 int nr_members;
142 }
143 bblst;
144
145 typedef struct
146 {
147 char is_valid;
148 char is_speculative;
149 int src_prob;
150 bblst split_bbs;
151 bblst update_bbs;
152 }
153 candidate;
154
155 static candidate *candidate_table;
156 #define IS_VALID(src) (candidate_table[src].is_valid)
157 #define IS_SPECULATIVE(src) (candidate_table[src].is_speculative)
158 #define IS_SPECULATIVE_INSN(INSN) \
159 (IS_SPECULATIVE (BLOCK_TO_BB (BLOCK_NUM (INSN))))
160 #define SRC_PROB(src) ( candidate_table[src].src_prob )
161
162 /* The bb being currently scheduled. */
163 int target_bb;
164
165 /* List of edges. */
166 typedef struct
167 {
168 edge *first_member;
169 int nr_members;
170 }
171 edgelst;
172
173 static edge *edgelst_table;
174 static int edgelst_last;
175
176 static void extract_edgelst (sbitmap, edgelst *);
177
178 /* Target info functions. */
179 static void split_edges (int, int, edgelst *);
180 static void compute_trg_info (int);
181 void debug_candidate (int);
182 void debug_candidates (int);
183
184 /* Dominators array: dom[i] contains the sbitmap of dominators of
185 bb i in the region. */
186 static sbitmap *dom;
187
188 /* bb 0 is the only region entry. */
189 #define IS_RGN_ENTRY(bb) (!bb)
190
191 /* Is bb_src dominated by bb_trg. */
192 #define IS_DOMINATED(bb_src, bb_trg) \
193 ( bitmap_bit_p (dom[bb_src], bb_trg) )
194
195 /* Probability: Prob[i] is an int in [0, REG_BR_PROB_BASE] which is
196 the probability of bb i relative to the region entry. */
197 static int *prob;
198
199 /* Bit-set of edges, where bit i stands for edge i. */
200 typedef sbitmap edgeset;
201
202 /* Number of edges in the region. */
203 static int rgn_nr_edges;
204
205 /* Array of size rgn_nr_edges. */
206 static edge *rgn_edges;
207
208 /* Mapping from each edge in the graph to its number in the rgn. */
209 #define EDGE_TO_BIT(edge) ((int)(size_t)(edge)->aux)
210 #define SET_EDGE_TO_BIT(edge,nr) ((edge)->aux = (void *)(size_t)(nr))
211
212 /* The split edges of a source bb is different for each target
213 bb. In order to compute this efficiently, the 'potential-split edges'
214 are computed for each bb prior to scheduling a region. This is actually
215 the split edges of each bb relative to the region entry.
216
217 pot_split[bb] is the set of potential split edges of bb. */
218 static edgeset *pot_split;
219
220 /* For every bb, a set of its ancestor edges. */
221 static edgeset *ancestor_edges;
222
223 #define INSN_PROBABILITY(INSN) (SRC_PROB (BLOCK_TO_BB (BLOCK_NUM (INSN))))
224
225 /* Speculative scheduling functions. */
226 static int check_live_1 (int, rtx);
227 static void update_live_1 (int, rtx);
228 static int is_pfree (rtx, int, int);
229 static int find_conditional_protection (rtx, int);
230 static int is_conditionally_protected (rtx, int, int);
231 static int is_prisky (rtx, int, int);
232 static int is_exception_free (rtx, int, int);
233
234 static bool sets_likely_spilled (rtx);
235 static void sets_likely_spilled_1 (rtx, const_rtx, void *);
236 static void add_branch_dependences (rtx, rtx);
237 static void compute_block_dependences (int);
238
239 static void schedule_region (int);
240 static void concat_insn_mem_list (rtx, rtx, rtx *, rtx *);
241 static void propagate_deps (int, struct deps_desc *);
242 static void free_pending_lists (void);
243
244 /* Functions for construction of the control flow graph. */
245
246 /* Return 1 if control flow graph should not be constructed, 0 otherwise.
247
248 We decide not to build the control flow graph if there is possibly more
249 than one entry to the function, if computed branches exist, if we
250 have nonlocal gotos, or if we have an unreachable loop. */
251
252 static int
253 is_cfg_nonregular (void)
254 {
255 basic_block b;
256 rtx insn;
257
258 /* If we have a label that could be the target of a nonlocal goto, then
259 the cfg is not well structured. */
260 if (nonlocal_goto_handler_labels)
261 return 1;
262
263 /* If we have any forced labels, then the cfg is not well structured. */
264 if (forced_labels)
265 return 1;
266
267 /* If we have exception handlers, then we consider the cfg not well
268 structured. ?!? We should be able to handle this now that we
269 compute an accurate cfg for EH. */
270 if (current_function_has_exception_handlers ())
271 return 1;
272
273 /* If we have insns which refer to labels as non-jumped-to operands,
274 then we consider the cfg not well structured. */
275 FOR_EACH_BB (b)
276 FOR_BB_INSNS (b, insn)
277 {
278 rtx note, next, set, dest;
279
280 /* If this function has a computed jump, then we consider the cfg
281 not well structured. */
282 if (JUMP_P (insn) && computed_jump_p (insn))
283 return 1;
284
285 if (!INSN_P (insn))
286 continue;
287
288 note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX);
289 if (note == NULL_RTX)
290 continue;
291
292 /* For that label not to be seen as a referred-to label, this
293 must be a single-set which is feeding a jump *only*. This
294 could be a conditional jump with the label split off for
295 machine-specific reasons or a casesi/tablejump. */
296 next = next_nonnote_insn (insn);
297 if (next == NULL_RTX
298 || !JUMP_P (next)
299 || (JUMP_LABEL (next) != XEXP (note, 0)
300 && find_reg_note (next, REG_LABEL_TARGET,
301 XEXP (note, 0)) == NULL_RTX)
302 || BLOCK_FOR_INSN (insn) != BLOCK_FOR_INSN (next))
303 return 1;
304
305 set = single_set (insn);
306 if (set == NULL_RTX)
307 return 1;
308
309 dest = SET_DEST (set);
310 if (!REG_P (dest) || !dead_or_set_p (next, dest))
311 return 1;
312 }
313
314 /* Unreachable loops with more than one basic block are detected
315 during the DFS traversal in find_rgns.
316
317 Unreachable loops with a single block are detected here. This
318 test is redundant with the one in find_rgns, but it's much
319 cheaper to go ahead and catch the trivial case here. */
320 FOR_EACH_BB (b)
321 {
322 if (EDGE_COUNT (b->preds) == 0
323 || (single_pred_p (b)
324 && single_pred (b) == b))
325 return 1;
326 }
327
328 /* All the tests passed. Consider the cfg well structured. */
329 return 0;
330 }
331
332 /* Extract list of edges from a bitmap containing EDGE_TO_BIT bits. */
333
334 static void
335 extract_edgelst (sbitmap set, edgelst *el)
336 {
337 unsigned int i = 0;
338 sbitmap_iterator sbi;
339
340 /* edgelst table space is reused in each call to extract_edgelst. */
341 edgelst_last = 0;
342
343 el->first_member = &edgelst_table[edgelst_last];
344 el->nr_members = 0;
345
346 /* Iterate over each word in the bitset. */
347 EXECUTE_IF_SET_IN_BITMAP (set, 0, i, sbi)
348 {
349 edgelst_table[edgelst_last++] = rgn_edges[i];
350 el->nr_members++;
351 }
352 }
353
354 /* Functions for the construction of regions. */
355
356 /* Print the regions, for debugging purposes. Callable from debugger. */
357
358 DEBUG_FUNCTION void
359 debug_regions (void)
360 {
361 int rgn, bb;
362
363 fprintf (sched_dump, "\n;; ------------ REGIONS ----------\n\n");
364 for (rgn = 0; rgn < nr_regions; rgn++)
365 {
366 fprintf (sched_dump, ";;\trgn %d nr_blocks %d:\n", rgn,
367 rgn_table[rgn].rgn_nr_blocks);
368 fprintf (sched_dump, ";;\tbb/block: ");
369
370 /* We don't have ebb_head initialized yet, so we can't use
371 BB_TO_BLOCK (). */
372 current_blocks = RGN_BLOCKS (rgn);
373
374 for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
375 fprintf (sched_dump, " %d/%d ", bb, rgn_bb_table[current_blocks + bb]);
376
377 fprintf (sched_dump, "\n\n");
378 }
379 }
380
381 /* Print the region's basic blocks. */
382
383 DEBUG_FUNCTION void
384 debug_region (int rgn)
385 {
386 int bb;
387
388 fprintf (stderr, "\n;; ------------ REGION %d ----------\n\n", rgn);
389 fprintf (stderr, ";;\trgn %d nr_blocks %d:\n", rgn,
390 rgn_table[rgn].rgn_nr_blocks);
391 fprintf (stderr, ";;\tbb/block: ");
392
393 /* We don't have ebb_head initialized yet, so we can't use
394 BB_TO_BLOCK (). */
395 current_blocks = RGN_BLOCKS (rgn);
396
397 for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
398 fprintf (stderr, " %d/%d ", bb, rgn_bb_table[current_blocks + bb]);
399
400 fprintf (stderr, "\n\n");
401
402 for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
403 {
404 dump_bb (stderr,
405 BASIC_BLOCK_FOR_FN (cfun, rgn_bb_table[current_blocks + bb]),
406 0, TDF_SLIM | TDF_BLOCKS);
407 fprintf (stderr, "\n");
408 }
409
410 fprintf (stderr, "\n");
411
412 }
413
414 /* True when a bb with index BB_INDEX contained in region RGN. */
415 static bool
416 bb_in_region_p (int bb_index, int rgn)
417 {
418 int i;
419
420 for (i = 0; i < rgn_table[rgn].rgn_nr_blocks; i++)
421 if (rgn_bb_table[current_blocks + i] == bb_index)
422 return true;
423
424 return false;
425 }
426
427 /* Dump region RGN to file F using dot syntax. */
428 void
429 dump_region_dot (FILE *f, int rgn)
430 {
431 int i;
432
433 fprintf (f, "digraph Region_%d {\n", rgn);
434
435 /* We don't have ebb_head initialized yet, so we can't use
436 BB_TO_BLOCK (). */
437 current_blocks = RGN_BLOCKS (rgn);
438
439 for (i = 0; i < rgn_table[rgn].rgn_nr_blocks; i++)
440 {
441 edge e;
442 edge_iterator ei;
443 int src_bb_num = rgn_bb_table[current_blocks + i];
444 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, src_bb_num);
445
446 FOR_EACH_EDGE (e, ei, bb->succs)
447 if (bb_in_region_p (e->dest->index, rgn))
448 fprintf (f, "\t%d -> %d\n", src_bb_num, e->dest->index);
449 }
450 fprintf (f, "}\n");
451 }
452
453 /* The same, but first open a file specified by FNAME. */
454 void
455 dump_region_dot_file (const char *fname, int rgn)
456 {
457 FILE *f = fopen (fname, "wt");
458 dump_region_dot (f, rgn);
459 fclose (f);
460 }
461
462 /* Build a single block region for each basic block in the function.
463 This allows for using the same code for interblock and basic block
464 scheduling. */
465
466 static void
467 find_single_block_region (bool ebbs_p)
468 {
469 basic_block bb, ebb_start;
470 int i = 0;
471
472 nr_regions = 0;
473
474 if (ebbs_p) {
475 int probability_cutoff;
476 if (profile_info && flag_branch_probabilities)
477 probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
478 else
479 probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
480 probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
481
482 FOR_EACH_BB (ebb_start)
483 {
484 RGN_NR_BLOCKS (nr_regions) = 0;
485 RGN_BLOCKS (nr_regions) = i;
486 RGN_DONT_CALC_DEPS (nr_regions) = 0;
487 RGN_HAS_REAL_EBB (nr_regions) = 0;
488
489 for (bb = ebb_start; ; bb = bb->next_bb)
490 {
491 edge e;
492
493 rgn_bb_table[i] = bb->index;
494 RGN_NR_BLOCKS (nr_regions)++;
495 CONTAINING_RGN (bb->index) = nr_regions;
496 BLOCK_TO_BB (bb->index) = i - RGN_BLOCKS (nr_regions);
497 i++;
498
499 if (bb->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
500 || LABEL_P (BB_HEAD (bb->next_bb)))
501 break;
502
503 e = find_fallthru_edge (bb->succs);
504 if (! e)
505 break;
506 if (e->probability <= probability_cutoff)
507 break;
508 }
509
510 ebb_start = bb;
511 nr_regions++;
512 }
513 }
514 else
515 FOR_EACH_BB (bb)
516 {
517 rgn_bb_table[nr_regions] = bb->index;
518 RGN_NR_BLOCKS (nr_regions) = 1;
519 RGN_BLOCKS (nr_regions) = nr_regions;
520 RGN_DONT_CALC_DEPS (nr_regions) = 0;
521 RGN_HAS_REAL_EBB (nr_regions) = 0;
522
523 CONTAINING_RGN (bb->index) = nr_regions;
524 BLOCK_TO_BB (bb->index) = 0;
525 nr_regions++;
526 }
527 }
528
529 /* Estimate number of the insns in the BB. */
530 static int
531 rgn_estimate_number_of_insns (basic_block bb)
532 {
533 int count;
534
535 count = INSN_LUID (BB_END (bb)) - INSN_LUID (BB_HEAD (bb));
536
537 if (MAY_HAVE_DEBUG_INSNS)
538 {
539 rtx insn;
540
541 FOR_BB_INSNS (bb, insn)
542 if (DEBUG_INSN_P (insn))
543 count--;
544 }
545
546 return count;
547 }
548
549 /* Update number of blocks and the estimate for number of insns
550 in the region. Return true if the region is "too large" for interblock
551 scheduling (compile time considerations). */
552
553 static bool
554 too_large (int block, int *num_bbs, int *num_insns)
555 {
556 (*num_bbs)++;
557 (*num_insns) += (common_sched_info->estimate_number_of_insns
558 (BASIC_BLOCK_FOR_FN (cfun, block)));
559
560 return ((*num_bbs > PARAM_VALUE (PARAM_MAX_SCHED_REGION_BLOCKS))
561 || (*num_insns > PARAM_VALUE (PARAM_MAX_SCHED_REGION_INSNS)));
562 }
563
564 /* Update_loop_relations(blk, hdr): Check if the loop headed by max_hdr[blk]
565 is still an inner loop. Put in max_hdr[blk] the header of the most inner
566 loop containing blk. */
567 #define UPDATE_LOOP_RELATIONS(blk, hdr) \
568 { \
569 if (max_hdr[blk] == -1) \
570 max_hdr[blk] = hdr; \
571 else if (dfs_nr[max_hdr[blk]] > dfs_nr[hdr]) \
572 bitmap_clear_bit (inner, hdr); \
573 else if (dfs_nr[max_hdr[blk]] < dfs_nr[hdr]) \
574 { \
575 bitmap_clear_bit (inner,max_hdr[blk]); \
576 max_hdr[blk] = hdr; \
577 } \
578 }
579
580 /* Find regions for interblock scheduling.
581
582 A region for scheduling can be:
583
584 * A loop-free procedure, or
585
586 * A reducible inner loop, or
587
588 * A basic block not contained in any other region.
589
590 ?!? In theory we could build other regions based on extended basic
591 blocks or reverse extended basic blocks. Is it worth the trouble?
592
593 Loop blocks that form a region are put into the region's block list
594 in topological order.
595
596 This procedure stores its results into the following global (ick) variables
597
598 * rgn_nr
599 * rgn_table
600 * rgn_bb_table
601 * block_to_bb
602 * containing region
603
604 We use dominator relationships to avoid making regions out of non-reducible
605 loops.
606
607 This procedure needs to be converted to work on pred/succ lists instead
608 of edge tables. That would simplify it somewhat. */
609
610 static void
611 haifa_find_rgns (void)
612 {
613 int *max_hdr, *dfs_nr, *degree;
614 char no_loops = 1;
615 int node, child, loop_head, i, head, tail;
616 int count = 0, sp, idx = 0;
617 edge_iterator current_edge;
618 edge_iterator *stack;
619 int num_bbs, num_insns, unreachable;
620 int too_large_failure;
621 basic_block bb;
622
623 /* Note if a block is a natural loop header. */
624 sbitmap header;
625
626 /* Note if a block is a natural inner loop header. */
627 sbitmap inner;
628
629 /* Note if a block is in the block queue. */
630 sbitmap in_queue;
631
632 /* Note if a block is in the block queue. */
633 sbitmap in_stack;
634
635 /* Perform a DFS traversal of the cfg. Identify loop headers, inner loops
636 and a mapping from block to its loop header (if the block is contained
637 in a loop, else -1).
638
639 Store results in HEADER, INNER, and MAX_HDR respectively, these will
640 be used as inputs to the second traversal.
641
642 STACK, SP and DFS_NR are only used during the first traversal. */
643
644 /* Allocate and initialize variables for the first traversal. */
645 max_hdr = XNEWVEC (int, last_basic_block_for_fn (cfun));
646 dfs_nr = XCNEWVEC (int, last_basic_block_for_fn (cfun));
647 stack = XNEWVEC (edge_iterator, n_edges_for_fn (cfun));
648
649 inner = sbitmap_alloc (last_basic_block_for_fn (cfun));
650 bitmap_ones (inner);
651
652 header = sbitmap_alloc (last_basic_block_for_fn (cfun));
653 bitmap_clear (header);
654
655 in_queue = sbitmap_alloc (last_basic_block_for_fn (cfun));
656 bitmap_clear (in_queue);
657
658 in_stack = sbitmap_alloc (last_basic_block_for_fn (cfun));
659 bitmap_clear (in_stack);
660
661 for (i = 0; i < last_basic_block_for_fn (cfun); i++)
662 max_hdr[i] = -1;
663
664 #define EDGE_PASSED(E) (ei_end_p ((E)) || ei_edge ((E))->aux)
665 #define SET_EDGE_PASSED(E) (ei_edge ((E))->aux = ei_edge ((E)))
666
667 /* DFS traversal to find inner loops in the cfg. */
668
669 current_edge = ei_start (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))->succs);
670 sp = -1;
671
672 while (1)
673 {
674 if (EDGE_PASSED (current_edge))
675 {
676 /* We have reached a leaf node or a node that was already
677 processed. Pop edges off the stack until we find
678 an edge that has not yet been processed. */
679 while (sp >= 0 && EDGE_PASSED (current_edge))
680 {
681 /* Pop entry off the stack. */
682 current_edge = stack[sp--];
683 node = ei_edge (current_edge)->src->index;
684 gcc_assert (node != ENTRY_BLOCK);
685 child = ei_edge (current_edge)->dest->index;
686 gcc_assert (child != EXIT_BLOCK);
687 bitmap_clear_bit (in_stack, child);
688 if (max_hdr[child] >= 0 && bitmap_bit_p (in_stack, max_hdr[child]))
689 UPDATE_LOOP_RELATIONS (node, max_hdr[child]);
690 ei_next (&current_edge);
691 }
692
693 /* See if have finished the DFS tree traversal. */
694 if (sp < 0 && EDGE_PASSED (current_edge))
695 break;
696
697 /* Nope, continue the traversal with the popped node. */
698 continue;
699 }
700
701 /* Process a node. */
702 node = ei_edge (current_edge)->src->index;
703 gcc_assert (node != ENTRY_BLOCK);
704 bitmap_set_bit (in_stack, node);
705 dfs_nr[node] = ++count;
706
707 /* We don't traverse to the exit block. */
708 child = ei_edge (current_edge)->dest->index;
709 if (child == EXIT_BLOCK)
710 {
711 SET_EDGE_PASSED (current_edge);
712 ei_next (&current_edge);
713 continue;
714 }
715
716 /* If the successor is in the stack, then we've found a loop.
717 Mark the loop, if it is not a natural loop, then it will
718 be rejected during the second traversal. */
719 if (bitmap_bit_p (in_stack, child))
720 {
721 no_loops = 0;
722 bitmap_set_bit (header, child);
723 UPDATE_LOOP_RELATIONS (node, child);
724 SET_EDGE_PASSED (current_edge);
725 ei_next (&current_edge);
726 continue;
727 }
728
729 /* If the child was already visited, then there is no need to visit
730 it again. Just update the loop relationships and restart
731 with a new edge. */
732 if (dfs_nr[child])
733 {
734 if (max_hdr[child] >= 0 && bitmap_bit_p (in_stack, max_hdr[child]))
735 UPDATE_LOOP_RELATIONS (node, max_hdr[child]);
736 SET_EDGE_PASSED (current_edge);
737 ei_next (&current_edge);
738 continue;
739 }
740
741 /* Push an entry on the stack and continue DFS traversal. */
742 stack[++sp] = current_edge;
743 SET_EDGE_PASSED (current_edge);
744 current_edge = ei_start (ei_edge (current_edge)->dest->succs);
745 }
746
747 /* Reset ->aux field used by EDGE_PASSED. */
748 FOR_ALL_BB (bb)
749 {
750 edge_iterator ei;
751 edge e;
752 FOR_EACH_EDGE (e, ei, bb->succs)
753 e->aux = NULL;
754 }
755
756
757 /* Another check for unreachable blocks. The earlier test in
758 is_cfg_nonregular only finds unreachable blocks that do not
759 form a loop.
760
761 The DFS traversal will mark every block that is reachable from
762 the entry node by placing a nonzero value in dfs_nr. Thus if
763 dfs_nr is zero for any block, then it must be unreachable. */
764 unreachable = 0;
765 FOR_EACH_BB (bb)
766 if (dfs_nr[bb->index] == 0)
767 {
768 unreachable = 1;
769 break;
770 }
771
772 /* Gross. To avoid wasting memory, the second pass uses the dfs_nr array
773 to hold degree counts. */
774 degree = dfs_nr;
775
776 FOR_EACH_BB (bb)
777 degree[bb->index] = EDGE_COUNT (bb->preds);
778
779 /* Do not perform region scheduling if there are any unreachable
780 blocks. */
781 if (!unreachable)
782 {
783 int *queue, *degree1 = NULL;
784 /* We use EXTENDED_RGN_HEADER as an addition to HEADER and put
785 there basic blocks, which are forced to be region heads.
786 This is done to try to assemble few smaller regions
787 from a too_large region. */
788 sbitmap extended_rgn_header = NULL;
789 bool extend_regions_p;
790
791 if (no_loops)
792 bitmap_set_bit (header, 0);
793
794 /* Second traversal:find reducible inner loops and topologically sort
795 block of each region. */
796
797 queue = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
798
799 extend_regions_p = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS) > 0;
800 if (extend_regions_p)
801 {
802 degree1 = XNEWVEC (int, last_basic_block_for_fn (cfun));
803 extended_rgn_header =
804 sbitmap_alloc (last_basic_block_for_fn (cfun));
805 bitmap_clear (extended_rgn_header);
806 }
807
808 /* Find blocks which are inner loop headers. We still have non-reducible
809 loops to consider at this point. */
810 FOR_EACH_BB (bb)
811 {
812 if (bitmap_bit_p (header, bb->index) && bitmap_bit_p (inner, bb->index))
813 {
814 edge e;
815 edge_iterator ei;
816 basic_block jbb;
817
818 /* Now check that the loop is reducible. We do this separate
819 from finding inner loops so that we do not find a reducible
820 loop which contains an inner non-reducible loop.
821
822 A simple way to find reducible/natural loops is to verify
823 that each block in the loop is dominated by the loop
824 header.
825
826 If there exists a block that is not dominated by the loop
827 header, then the block is reachable from outside the loop
828 and thus the loop is not a natural loop. */
829 FOR_EACH_BB (jbb)
830 {
831 /* First identify blocks in the loop, except for the loop
832 entry block. */
833 if (bb->index == max_hdr[jbb->index] && bb != jbb)
834 {
835 /* Now verify that the block is dominated by the loop
836 header. */
837 if (!dominated_by_p (CDI_DOMINATORS, jbb, bb))
838 break;
839 }
840 }
841
842 /* If we exited the loop early, then I is the header of
843 a non-reducible loop and we should quit processing it
844 now. */
845 if (jbb != EXIT_BLOCK_PTR_FOR_FN (cfun))
846 continue;
847
848 /* I is a header of an inner loop, or block 0 in a subroutine
849 with no loops at all. */
850 head = tail = -1;
851 too_large_failure = 0;
852 loop_head = max_hdr[bb->index];
853
854 if (extend_regions_p)
855 /* We save degree in case when we meet a too_large region
856 and cancel it. We need a correct degree later when
857 calling extend_rgns. */
858 memcpy (degree1, degree,
859 last_basic_block_for_fn (cfun) * sizeof (int));
860
861 /* Decrease degree of all I's successors for topological
862 ordering. */
863 FOR_EACH_EDGE (e, ei, bb->succs)
864 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
865 --degree[e->dest->index];
866
867 /* Estimate # insns, and count # blocks in the region. */
868 num_bbs = 1;
869 num_insns = common_sched_info->estimate_number_of_insns (bb);
870
871 /* Find all loop latches (blocks with back edges to the loop
872 header) or all the leaf blocks in the cfg has no loops.
873
874 Place those blocks into the queue. */
875 if (no_loops)
876 {
877 FOR_EACH_BB (jbb)
878 /* Leaf nodes have only a single successor which must
879 be EXIT_BLOCK. */
880 if (single_succ_p (jbb)
881 && single_succ (jbb) == EXIT_BLOCK_PTR_FOR_FN (cfun))
882 {
883 queue[++tail] = jbb->index;
884 bitmap_set_bit (in_queue, jbb->index);
885
886 if (too_large (jbb->index, &num_bbs, &num_insns))
887 {
888 too_large_failure = 1;
889 break;
890 }
891 }
892 }
893 else
894 {
895 edge e;
896
897 FOR_EACH_EDGE (e, ei, bb->preds)
898 {
899 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
900 continue;
901
902 node = e->src->index;
903
904 if (max_hdr[node] == loop_head && node != bb->index)
905 {
906 /* This is a loop latch. */
907 queue[++tail] = node;
908 bitmap_set_bit (in_queue, node);
909
910 if (too_large (node, &num_bbs, &num_insns))
911 {
912 too_large_failure = 1;
913 break;
914 }
915 }
916 }
917 }
918
919 /* Now add all the blocks in the loop to the queue.
920
921 We know the loop is a natural loop; however the algorithm
922 above will not always mark certain blocks as being in the
923 loop. Consider:
924 node children
925 a b,c
926 b c
927 c a,d
928 d b
929
930 The algorithm in the DFS traversal may not mark B & D as part
931 of the loop (i.e. they will not have max_hdr set to A).
932
933 We know they can not be loop latches (else they would have
934 had max_hdr set since they'd have a backedge to a dominator
935 block). So we don't need them on the initial queue.
936
937 We know they are part of the loop because they are dominated
938 by the loop header and can be reached by a backwards walk of
939 the edges starting with nodes on the initial queue.
940
941 It is safe and desirable to include those nodes in the
942 loop/scheduling region. To do so we would need to decrease
943 the degree of a node if it is the target of a backedge
944 within the loop itself as the node is placed in the queue.
945
946 We do not do this because I'm not sure that the actual
947 scheduling code will properly handle this case. ?!? */
948
949 while (head < tail && !too_large_failure)
950 {
951 edge e;
952 child = queue[++head];
953
954 FOR_EACH_EDGE (e, ei,
955 BASIC_BLOCK_FOR_FN (cfun, child)->preds)
956 {
957 node = e->src->index;
958
959 /* See discussion above about nodes not marked as in
960 this loop during the initial DFS traversal. */
961 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
962 || max_hdr[node] != loop_head)
963 {
964 tail = -1;
965 break;
966 }
967 else if (!bitmap_bit_p (in_queue, node) && node != bb->index)
968 {
969 queue[++tail] = node;
970 bitmap_set_bit (in_queue, node);
971
972 if (too_large (node, &num_bbs, &num_insns))
973 {
974 too_large_failure = 1;
975 break;
976 }
977 }
978 }
979 }
980
981 if (tail >= 0 && !too_large_failure)
982 {
983 /* Place the loop header into list of region blocks. */
984 degree[bb->index] = -1;
985 rgn_bb_table[idx] = bb->index;
986 RGN_NR_BLOCKS (nr_regions) = num_bbs;
987 RGN_BLOCKS (nr_regions) = idx++;
988 RGN_DONT_CALC_DEPS (nr_regions) = 0;
989 RGN_HAS_REAL_EBB (nr_regions) = 0;
990 CONTAINING_RGN (bb->index) = nr_regions;
991 BLOCK_TO_BB (bb->index) = count = 0;
992
993 /* Remove blocks from queue[] when their in degree
994 becomes zero. Repeat until no blocks are left on the
995 list. This produces a topological list of blocks in
996 the region. */
997 while (tail >= 0)
998 {
999 if (head < 0)
1000 head = tail;
1001 child = queue[head];
1002 if (degree[child] == 0)
1003 {
1004 edge e;
1005
1006 degree[child] = -1;
1007 rgn_bb_table[idx++] = child;
1008 BLOCK_TO_BB (child) = ++count;
1009 CONTAINING_RGN (child) = nr_regions;
1010 queue[head] = queue[tail--];
1011
1012 FOR_EACH_EDGE (e, ei,
1013 BASIC_BLOCK_FOR_FN (cfun,
1014 child)->succs)
1015 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1016 --degree[e->dest->index];
1017 }
1018 else
1019 --head;
1020 }
1021 ++nr_regions;
1022 }
1023 else if (extend_regions_p)
1024 {
1025 /* Restore DEGREE. */
1026 int *t = degree;
1027
1028 degree = degree1;
1029 degree1 = t;
1030
1031 /* And force successors of BB to be region heads.
1032 This may provide several smaller regions instead
1033 of one too_large region. */
1034 FOR_EACH_EDGE (e, ei, bb->succs)
1035 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1036 bitmap_set_bit (extended_rgn_header, e->dest->index);
1037 }
1038 }
1039 }
1040 free (queue);
1041
1042 if (extend_regions_p)
1043 {
1044 free (degree1);
1045
1046 bitmap_ior (header, header, extended_rgn_header);
1047 sbitmap_free (extended_rgn_header);
1048
1049 extend_rgns (degree, &idx, header, max_hdr);
1050 }
1051 }
1052
1053 /* Any block that did not end up in a region is placed into a region
1054 by itself. */
1055 FOR_EACH_BB (bb)
1056 if (degree[bb->index] >= 0)
1057 {
1058 rgn_bb_table[idx] = bb->index;
1059 RGN_NR_BLOCKS (nr_regions) = 1;
1060 RGN_BLOCKS (nr_regions) = idx++;
1061 RGN_DONT_CALC_DEPS (nr_regions) = 0;
1062 RGN_HAS_REAL_EBB (nr_regions) = 0;
1063 CONTAINING_RGN (bb->index) = nr_regions++;
1064 BLOCK_TO_BB (bb->index) = 0;
1065 }
1066
1067 free (max_hdr);
1068 free (degree);
1069 free (stack);
1070 sbitmap_free (header);
1071 sbitmap_free (inner);
1072 sbitmap_free (in_queue);
1073 sbitmap_free (in_stack);
1074 }
1075
1076
1077 /* Wrapper function.
1078 If FLAG_SEL_SCHED_PIPELINING is set, then use custom function to form
1079 regions. Otherwise just call find_rgns_haifa. */
1080 static void
1081 find_rgns (void)
1082 {
1083 if (sel_sched_p () && flag_sel_sched_pipelining)
1084 sel_find_rgns ();
1085 else
1086 haifa_find_rgns ();
1087 }
1088
1089 static int gather_region_statistics (int **);
1090 static void print_region_statistics (int *, int, int *, int);
1091
1092 /* Calculate the histogram that shows the number of regions having the
1093 given number of basic blocks, and store it in the RSP array. Return
1094 the size of this array. */
1095 static int
1096 gather_region_statistics (int **rsp)
1097 {
1098 int i, *a = 0, a_sz = 0;
1099
1100 /* a[i] is the number of regions that have (i + 1) basic blocks. */
1101 for (i = 0; i < nr_regions; i++)
1102 {
1103 int nr_blocks = RGN_NR_BLOCKS (i);
1104
1105 gcc_assert (nr_blocks >= 1);
1106
1107 if (nr_blocks > a_sz)
1108 {
1109 a = XRESIZEVEC (int, a, nr_blocks);
1110 do
1111 a[a_sz++] = 0;
1112 while (a_sz != nr_blocks);
1113 }
1114
1115 a[nr_blocks - 1]++;
1116 }
1117
1118 *rsp = a;
1119 return a_sz;
1120 }
1121
1122 /* Print regions statistics. S1 and S2 denote the data before and after
1123 calling extend_rgns, respectively. */
1124 static void
1125 print_region_statistics (int *s1, int s1_sz, int *s2, int s2_sz)
1126 {
1127 int i;
1128
1129 /* We iterate until s2_sz because extend_rgns does not decrease
1130 the maximal region size. */
1131 for (i = 1; i < s2_sz; i++)
1132 {
1133 int n1, n2;
1134
1135 n2 = s2[i];
1136
1137 if (n2 == 0)
1138 continue;
1139
1140 if (i >= s1_sz)
1141 n1 = 0;
1142 else
1143 n1 = s1[i];
1144
1145 fprintf (sched_dump, ";; Region extension statistics: size %d: " \
1146 "was %d + %d more\n", i + 1, n1, n2 - n1);
1147 }
1148 }
1149
1150 /* Extend regions.
1151 DEGREE - Array of incoming edge count, considering only
1152 the edges, that don't have their sources in formed regions yet.
1153 IDXP - pointer to the next available index in rgn_bb_table.
1154 HEADER - set of all region heads.
1155 LOOP_HDR - mapping from block to the containing loop
1156 (two blocks can reside within one region if they have
1157 the same loop header). */
1158 void
1159 extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
1160 {
1161 int *order, i, rescan = 0, idx = *idxp, iter = 0, max_iter, *max_hdr;
1162 int nblocks = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
1163
1164 max_iter = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS);
1165
1166 max_hdr = XNEWVEC (int, last_basic_block_for_fn (cfun));
1167
1168 order = XNEWVEC (int, last_basic_block_for_fn (cfun));
1169 post_order_compute (order, false, false);
1170
1171 for (i = nblocks - 1; i >= 0; i--)
1172 {
1173 int bbn = order[i];
1174 if (degree[bbn] >= 0)
1175 {
1176 max_hdr[bbn] = bbn;
1177 rescan = 1;
1178 }
1179 else
1180 /* This block already was processed in find_rgns. */
1181 max_hdr[bbn] = -1;
1182 }
1183
1184 /* The idea is to topologically walk through CFG in top-down order.
1185 During the traversal, if all the predecessors of a node are
1186 marked to be in the same region (they all have the same max_hdr),
1187 then current node is also marked to be a part of that region.
1188 Otherwise the node starts its own region.
1189 CFG should be traversed until no further changes are made. On each
1190 iteration the set of the region heads is extended (the set of those
1191 blocks that have max_hdr[bbi] == bbi). This set is upper bounded by the
1192 set of all basic blocks, thus the algorithm is guaranteed to
1193 terminate. */
1194
1195 while (rescan && iter < max_iter)
1196 {
1197 rescan = 0;
1198
1199 for (i = nblocks - 1; i >= 0; i--)
1200 {
1201 edge e;
1202 edge_iterator ei;
1203 int bbn = order[i];
1204
1205 if (max_hdr[bbn] != -1 && !bitmap_bit_p (header, bbn))
1206 {
1207 int hdr = -1;
1208
1209 FOR_EACH_EDGE (e, ei, BASIC_BLOCK_FOR_FN (cfun, bbn)->preds)
1210 {
1211 int predn = e->src->index;
1212
1213 if (predn != ENTRY_BLOCK
1214 /* If pred wasn't processed in find_rgns. */
1215 && max_hdr[predn] != -1
1216 /* And pred and bb reside in the same loop.
1217 (Or out of any loop). */
1218 && loop_hdr[bbn] == loop_hdr[predn])
1219 {
1220 if (hdr == -1)
1221 /* Then bb extends the containing region of pred. */
1222 hdr = max_hdr[predn];
1223 else if (hdr != max_hdr[predn])
1224 /* Too bad, there are at least two predecessors
1225 that reside in different regions. Thus, BB should
1226 begin its own region. */
1227 {
1228 hdr = bbn;
1229 break;
1230 }
1231 }
1232 else
1233 /* BB starts its own region. */
1234 {
1235 hdr = bbn;
1236 break;
1237 }
1238 }
1239
1240 if (hdr == bbn)
1241 {
1242 /* If BB start its own region,
1243 update set of headers with BB. */
1244 bitmap_set_bit (header, bbn);
1245 rescan = 1;
1246 }
1247 else
1248 gcc_assert (hdr != -1);
1249
1250 max_hdr[bbn] = hdr;
1251 }
1252 }
1253
1254 iter++;
1255 }
1256
1257 /* Statistics were gathered on the SPEC2000 package of tests with
1258 mainline weekly snapshot gcc-4.1-20051015 on ia64.
1259
1260 Statistics for SPECint:
1261 1 iteration : 1751 cases (38.7%)
1262 2 iterations: 2770 cases (61.3%)
1263 Blocks wrapped in regions by find_rgns without extension: 18295 blocks
1264 Blocks wrapped in regions by 2 iterations in extend_rgns: 23821 blocks
1265 (We don't count single block regions here).
1266
1267 Statistics for SPECfp:
1268 1 iteration : 621 cases (35.9%)
1269 2 iterations: 1110 cases (64.1%)
1270 Blocks wrapped in regions by find_rgns without extension: 6476 blocks
1271 Blocks wrapped in regions by 2 iterations in extend_rgns: 11155 blocks
1272 (We don't count single block regions here).
1273
1274 By default we do at most 2 iterations.
1275 This can be overridden with max-sched-extend-regions-iters parameter:
1276 0 - disable region extension,
1277 N > 0 - do at most N iterations. */
1278
1279 if (sched_verbose && iter != 0)
1280 fprintf (sched_dump, ";; Region extension iterations: %d%s\n", iter,
1281 rescan ? "... failed" : "");
1282
1283 if (!rescan && iter != 0)
1284 {
1285 int *s1 = NULL, s1_sz = 0;
1286
1287 /* Save the old statistics for later printout. */
1288 if (sched_verbose >= 6)
1289 s1_sz = gather_region_statistics (&s1);
1290
1291 /* We have succeeded. Now assemble the regions. */
1292 for (i = nblocks - 1; i >= 0; i--)
1293 {
1294 int bbn = order[i];
1295
1296 if (max_hdr[bbn] == bbn)
1297 /* BBN is a region head. */
1298 {
1299 edge e;
1300 edge_iterator ei;
1301 int num_bbs = 0, j, num_insns = 0, large;
1302
1303 large = too_large (bbn, &num_bbs, &num_insns);
1304
1305 degree[bbn] = -1;
1306 rgn_bb_table[idx] = bbn;
1307 RGN_BLOCKS (nr_regions) = idx++;
1308 RGN_DONT_CALC_DEPS (nr_regions) = 0;
1309 RGN_HAS_REAL_EBB (nr_regions) = 0;
1310 CONTAINING_RGN (bbn) = nr_regions;
1311 BLOCK_TO_BB (bbn) = 0;
1312
1313 FOR_EACH_EDGE (e, ei, BASIC_BLOCK_FOR_FN (cfun, bbn)->succs)
1314 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1315 degree[e->dest->index]--;
1316
1317 if (!large)
1318 /* Here we check whether the region is too_large. */
1319 for (j = i - 1; j >= 0; j--)
1320 {
1321 int succn = order[j];
1322 if (max_hdr[succn] == bbn)
1323 {
1324 if ((large = too_large (succn, &num_bbs, &num_insns)))
1325 break;
1326 }
1327 }
1328
1329 if (large)
1330 /* If the region is too_large, then wrap every block of
1331 the region into single block region.
1332 Here we wrap region head only. Other blocks are
1333 processed in the below cycle. */
1334 {
1335 RGN_NR_BLOCKS (nr_regions) = 1;
1336 nr_regions++;
1337 }
1338
1339 num_bbs = 1;
1340
1341 for (j = i - 1; j >= 0; j--)
1342 {
1343 int succn = order[j];
1344
1345 if (max_hdr[succn] == bbn)
1346 /* This cycle iterates over all basic blocks, that
1347 are supposed to be in the region with head BBN,
1348 and wraps them into that region (or in single
1349 block region). */
1350 {
1351 gcc_assert (degree[succn] == 0);
1352
1353 degree[succn] = -1;
1354 rgn_bb_table[idx] = succn;
1355 BLOCK_TO_BB (succn) = large ? 0 : num_bbs++;
1356 CONTAINING_RGN (succn) = nr_regions;
1357
1358 if (large)
1359 /* Wrap SUCCN into single block region. */
1360 {
1361 RGN_BLOCKS (nr_regions) = idx;
1362 RGN_NR_BLOCKS (nr_regions) = 1;
1363 RGN_DONT_CALC_DEPS (nr_regions) = 0;
1364 RGN_HAS_REAL_EBB (nr_regions) = 0;
1365 nr_regions++;
1366 }
1367
1368 idx++;
1369
1370 FOR_EACH_EDGE (e, ei,
1371 BASIC_BLOCK_FOR_FN (cfun, succn)->succs)
1372 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1373 degree[e->dest->index]--;
1374 }
1375 }
1376
1377 if (!large)
1378 {
1379 RGN_NR_BLOCKS (nr_regions) = num_bbs;
1380 nr_regions++;
1381 }
1382 }
1383 }
1384
1385 if (sched_verbose >= 6)
1386 {
1387 int *s2, s2_sz;
1388
1389 /* Get the new statistics and print the comparison with the
1390 one before calling this function. */
1391 s2_sz = gather_region_statistics (&s2);
1392 print_region_statistics (s1, s1_sz, s2, s2_sz);
1393 free (s1);
1394 free (s2);
1395 }
1396 }
1397
1398 free (order);
1399 free (max_hdr);
1400
1401 *idxp = idx;
1402 }
1403
1404 /* Functions for regions scheduling information. */
1405
1406 /* Compute dominators, probability, and potential-split-edges of bb.
1407 Assume that these values were already computed for bb's predecessors. */
1408
1409 static void
1410 compute_dom_prob_ps (int bb)
1411 {
1412 edge_iterator in_ei;
1413 edge in_edge;
1414
1415 /* We shouldn't have any real ebbs yet. */
1416 gcc_assert (ebb_head [bb] == bb + current_blocks);
1417
1418 if (IS_RGN_ENTRY (bb))
1419 {
1420 bitmap_set_bit (dom[bb], 0);
1421 prob[bb] = REG_BR_PROB_BASE;
1422 return;
1423 }
1424
1425 prob[bb] = 0;
1426
1427 /* Initialize dom[bb] to '111..1'. */
1428 bitmap_ones (dom[bb]);
1429
1430 FOR_EACH_EDGE (in_edge, in_ei,
1431 BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (bb))->preds)
1432 {
1433 int pred_bb;
1434 edge out_edge;
1435 edge_iterator out_ei;
1436
1437 if (in_edge->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1438 continue;
1439
1440 pred_bb = BLOCK_TO_BB (in_edge->src->index);
1441 bitmap_and (dom[bb], dom[bb], dom[pred_bb]);
1442 bitmap_ior (ancestor_edges[bb],
1443 ancestor_edges[bb], ancestor_edges[pred_bb]);
1444
1445 bitmap_set_bit (ancestor_edges[bb], EDGE_TO_BIT (in_edge));
1446
1447 bitmap_ior (pot_split[bb], pot_split[bb], pot_split[pred_bb]);
1448
1449 FOR_EACH_EDGE (out_edge, out_ei, in_edge->src->succs)
1450 bitmap_set_bit (pot_split[bb], EDGE_TO_BIT (out_edge));
1451
1452 prob[bb] += combine_probabilities (prob[pred_bb], in_edge->probability);
1453 // The rounding divide in combine_probabilities can result in an extra
1454 // probability increment propagating along 50-50 edges. Eventually when
1455 // the edges re-merge, the accumulated probability can go slightly above
1456 // REG_BR_PROB_BASE.
1457 if (prob[bb] > REG_BR_PROB_BASE)
1458 prob[bb] = REG_BR_PROB_BASE;
1459 }
1460
1461 bitmap_set_bit (dom[bb], bb);
1462 bitmap_and_compl (pot_split[bb], pot_split[bb], ancestor_edges[bb]);
1463
1464 if (sched_verbose >= 2)
1465 fprintf (sched_dump, ";; bb_prob(%d, %d) = %3d\n", bb, BB_TO_BLOCK (bb),
1466 (100 * prob[bb]) / REG_BR_PROB_BASE);
1467 }
1468
1469 /* Functions for target info. */
1470
1471 /* Compute in BL the list of split-edges of bb_src relatively to bb_trg.
1472 Note that bb_trg dominates bb_src. */
1473
1474 static void
1475 split_edges (int bb_src, int bb_trg, edgelst *bl)
1476 {
1477 sbitmap src = sbitmap_alloc (SBITMAP_SIZE (pot_split[bb_src]));
1478 bitmap_copy (src, pot_split[bb_src]);
1479
1480 bitmap_and_compl (src, src, pot_split[bb_trg]);
1481 extract_edgelst (src, bl);
1482 sbitmap_free (src);
1483 }
1484
1485 /* Find the valid candidate-source-blocks for the target block TRG, compute
1486 their probability, and check if they are speculative or not.
1487 For speculative sources, compute their update-blocks and split-blocks. */
1488
1489 static void
1490 compute_trg_info (int trg)
1491 {
1492 candidate *sp;
1493 edgelst el = { NULL, 0 };
1494 int i, j, k, update_idx;
1495 basic_block block;
1496 sbitmap visited;
1497 edge_iterator ei;
1498 edge e;
1499
1500 candidate_table = XNEWVEC (candidate, current_nr_blocks);
1501
1502 bblst_last = 0;
1503 /* bblst_table holds split blocks and update blocks for each block after
1504 the current one in the region. split blocks and update blocks are
1505 the TO blocks of region edges, so there can be at most rgn_nr_edges
1506 of them. */
1507 bblst_size = (current_nr_blocks - target_bb) * rgn_nr_edges;
1508 bblst_table = XNEWVEC (basic_block, bblst_size);
1509
1510 edgelst_last = 0;
1511 edgelst_table = XNEWVEC (edge, rgn_nr_edges);
1512
1513 /* Define some of the fields for the target bb as well. */
1514 sp = candidate_table + trg;
1515 sp->is_valid = 1;
1516 sp->is_speculative = 0;
1517 sp->src_prob = REG_BR_PROB_BASE;
1518
1519 visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
1520
1521 for (i = trg + 1; i < current_nr_blocks; i++)
1522 {
1523 sp = candidate_table + i;
1524
1525 sp->is_valid = IS_DOMINATED (i, trg);
1526 if (sp->is_valid)
1527 {
1528 int tf = prob[trg], cf = prob[i];
1529
1530 /* In CFGs with low probability edges TF can possibly be zero. */
1531 sp->src_prob = (tf ? GCOV_COMPUTE_SCALE (cf, tf) : 0);
1532 sp->is_valid = (sp->src_prob >= min_spec_prob);
1533 }
1534
1535 if (sp->is_valid)
1536 {
1537 split_edges (i, trg, &el);
1538 sp->is_speculative = (el.nr_members) ? 1 : 0;
1539 if (sp->is_speculative && !flag_schedule_speculative)
1540 sp->is_valid = 0;
1541 }
1542
1543 if (sp->is_valid)
1544 {
1545 /* Compute split blocks and store them in bblst_table.
1546 The TO block of every split edge is a split block. */
1547 sp->split_bbs.first_member = &bblst_table[bblst_last];
1548 sp->split_bbs.nr_members = el.nr_members;
1549 for (j = 0; j < el.nr_members; bblst_last++, j++)
1550 bblst_table[bblst_last] = el.first_member[j]->dest;
1551 sp->update_bbs.first_member = &bblst_table[bblst_last];
1552
1553 /* Compute update blocks and store them in bblst_table.
1554 For every split edge, look at the FROM block, and check
1555 all out edges. For each out edge that is not a split edge,
1556 add the TO block to the update block list. This list can end
1557 up with a lot of duplicates. We need to weed them out to avoid
1558 overrunning the end of the bblst_table. */
1559
1560 update_idx = 0;
1561 bitmap_clear (visited);
1562 for (j = 0; j < el.nr_members; j++)
1563 {
1564 block = el.first_member[j]->src;
1565 FOR_EACH_EDGE (e, ei, block->succs)
1566 {
1567 if (!bitmap_bit_p (visited, e->dest->index))
1568 {
1569 for (k = 0; k < el.nr_members; k++)
1570 if (e == el.first_member[k])
1571 break;
1572
1573 if (k >= el.nr_members)
1574 {
1575 bblst_table[bblst_last++] = e->dest;
1576 bitmap_set_bit (visited, e->dest->index);
1577 update_idx++;
1578 }
1579 }
1580 }
1581 }
1582 sp->update_bbs.nr_members = update_idx;
1583
1584 /* Make sure we didn't overrun the end of bblst_table. */
1585 gcc_assert (bblst_last <= bblst_size);
1586 }
1587 else
1588 {
1589 sp->split_bbs.nr_members = sp->update_bbs.nr_members = 0;
1590
1591 sp->is_speculative = 0;
1592 sp->src_prob = 0;
1593 }
1594 }
1595
1596 sbitmap_free (visited);
1597 }
1598
1599 /* Free the computed target info. */
1600 static void
1601 free_trg_info (void)
1602 {
1603 free (candidate_table);
1604 free (bblst_table);
1605 free (edgelst_table);
1606 }
1607
1608 /* Print candidates info, for debugging purposes. Callable from debugger. */
1609
1610 DEBUG_FUNCTION void
1611 debug_candidate (int i)
1612 {
1613 if (!candidate_table[i].is_valid)
1614 return;
1615
1616 if (candidate_table[i].is_speculative)
1617 {
1618 int j;
1619 fprintf (sched_dump, "src b %d bb %d speculative \n", BB_TO_BLOCK (i), i);
1620
1621 fprintf (sched_dump, "split path: ");
1622 for (j = 0; j < candidate_table[i].split_bbs.nr_members; j++)
1623 {
1624 int b = candidate_table[i].split_bbs.first_member[j]->index;
1625
1626 fprintf (sched_dump, " %d ", b);
1627 }
1628 fprintf (sched_dump, "\n");
1629
1630 fprintf (sched_dump, "update path: ");
1631 for (j = 0; j < candidate_table[i].update_bbs.nr_members; j++)
1632 {
1633 int b = candidate_table[i].update_bbs.first_member[j]->index;
1634
1635 fprintf (sched_dump, " %d ", b);
1636 }
1637 fprintf (sched_dump, "\n");
1638 }
1639 else
1640 {
1641 fprintf (sched_dump, " src %d equivalent\n", BB_TO_BLOCK (i));
1642 }
1643 }
1644
1645 /* Print candidates info, for debugging purposes. Callable from debugger. */
1646
1647 DEBUG_FUNCTION void
1648 debug_candidates (int trg)
1649 {
1650 int i;
1651
1652 fprintf (sched_dump, "----------- candidate table: target: b=%d bb=%d ---\n",
1653 BB_TO_BLOCK (trg), trg);
1654 for (i = trg + 1; i < current_nr_blocks; i++)
1655 debug_candidate (i);
1656 }
1657
1658 /* Functions for speculative scheduling. */
1659
1660 static bitmap_head not_in_df;
1661
1662 /* Return 0 if x is a set of a register alive in the beginning of one
1663 of the split-blocks of src, otherwise return 1. */
1664
1665 static int
1666 check_live_1 (int src, rtx x)
1667 {
1668 int i;
1669 int regno;
1670 rtx reg = SET_DEST (x);
1671
1672 if (reg == 0)
1673 return 1;
1674
1675 while (GET_CODE (reg) == SUBREG
1676 || GET_CODE (reg) == ZERO_EXTRACT
1677 || GET_CODE (reg) == STRICT_LOW_PART)
1678 reg = XEXP (reg, 0);
1679
1680 if (GET_CODE (reg) == PARALLEL)
1681 {
1682 int i;
1683
1684 for (i = XVECLEN (reg, 0) - 1; i >= 0; i--)
1685 if (XEXP (XVECEXP (reg, 0, i), 0) != 0)
1686 if (check_live_1 (src, XEXP (XVECEXP (reg, 0, i), 0)))
1687 return 1;
1688
1689 return 0;
1690 }
1691
1692 if (!REG_P (reg))
1693 return 1;
1694
1695 regno = REGNO (reg);
1696
1697 if (regno < FIRST_PSEUDO_REGISTER && global_regs[regno])
1698 {
1699 /* Global registers are assumed live. */
1700 return 0;
1701 }
1702 else
1703 {
1704 if (regno < FIRST_PSEUDO_REGISTER)
1705 {
1706 /* Check for hard registers. */
1707 int j = hard_regno_nregs[regno][GET_MODE (reg)];
1708 while (--j >= 0)
1709 {
1710 for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
1711 {
1712 basic_block b = candidate_table[src].split_bbs.first_member[i];
1713 int t = bitmap_bit_p (&not_in_df, b->index);
1714
1715 /* We can have split blocks, that were recently generated.
1716 Such blocks are always outside current region. */
1717 gcc_assert (!t || (CONTAINING_RGN (b->index)
1718 != CONTAINING_RGN (BB_TO_BLOCK (src))));
1719
1720 if (t || REGNO_REG_SET_P (df_get_live_in (b), regno + j))
1721 return 0;
1722 }
1723 }
1724 }
1725 else
1726 {
1727 /* Check for pseudo registers. */
1728 for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
1729 {
1730 basic_block b = candidate_table[src].split_bbs.first_member[i];
1731 int t = bitmap_bit_p (&not_in_df, b->index);
1732
1733 gcc_assert (!t || (CONTAINING_RGN (b->index)
1734 != CONTAINING_RGN (BB_TO_BLOCK (src))));
1735
1736 if (t || REGNO_REG_SET_P (df_get_live_in (b), regno))
1737 return 0;
1738 }
1739 }
1740 }
1741
1742 return 1;
1743 }
1744
1745 /* If x is a set of a register R, mark that R is alive in the beginning
1746 of every update-block of src. */
1747
1748 static void
1749 update_live_1 (int src, rtx x)
1750 {
1751 int i;
1752 int regno;
1753 rtx reg = SET_DEST (x);
1754
1755 if (reg == 0)
1756 return;
1757
1758 while (GET_CODE (reg) == SUBREG
1759 || GET_CODE (reg) == ZERO_EXTRACT
1760 || GET_CODE (reg) == STRICT_LOW_PART)
1761 reg = XEXP (reg, 0);
1762
1763 if (GET_CODE (reg) == PARALLEL)
1764 {
1765 int i;
1766
1767 for (i = XVECLEN (reg, 0) - 1; i >= 0; i--)
1768 if (XEXP (XVECEXP (reg, 0, i), 0) != 0)
1769 update_live_1 (src, XEXP (XVECEXP (reg, 0, i), 0));
1770
1771 return;
1772 }
1773
1774 if (!REG_P (reg))
1775 return;
1776
1777 /* Global registers are always live, so the code below does not apply
1778 to them. */
1779
1780 regno = REGNO (reg);
1781
1782 if (! HARD_REGISTER_NUM_P (regno)
1783 || !global_regs[regno])
1784 {
1785 for (i = 0; i < candidate_table[src].update_bbs.nr_members; i++)
1786 {
1787 basic_block b = candidate_table[src].update_bbs.first_member[i];
1788
1789 if (HARD_REGISTER_NUM_P (regno))
1790 bitmap_set_range (df_get_live_in (b), regno,
1791 hard_regno_nregs[regno][GET_MODE (reg)]);
1792 else
1793 bitmap_set_bit (df_get_live_in (b), regno);
1794 }
1795 }
1796 }
1797
1798 /* Return 1 if insn can be speculatively moved from block src to trg,
1799 otherwise return 0. Called before first insertion of insn to
1800 ready-list or before the scheduling. */
1801
1802 static int
1803 check_live (rtx insn, int src)
1804 {
1805 /* Find the registers set by instruction. */
1806 if (GET_CODE (PATTERN (insn)) == SET
1807 || GET_CODE (PATTERN (insn)) == CLOBBER)
1808 return check_live_1 (src, PATTERN (insn));
1809 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
1810 {
1811 int j;
1812 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
1813 if ((GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
1814 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
1815 && !check_live_1 (src, XVECEXP (PATTERN (insn), 0, j)))
1816 return 0;
1817
1818 return 1;
1819 }
1820
1821 return 1;
1822 }
1823
1824 /* Update the live registers info after insn was moved speculatively from
1825 block src to trg. */
1826
1827 static void
1828 update_live (rtx insn, int src)
1829 {
1830 /* Find the registers set by instruction. */
1831 if (GET_CODE (PATTERN (insn)) == SET
1832 || GET_CODE (PATTERN (insn)) == CLOBBER)
1833 update_live_1 (src, PATTERN (insn));
1834 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
1835 {
1836 int j;
1837 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
1838 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
1839 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
1840 update_live_1 (src, XVECEXP (PATTERN (insn), 0, j));
1841 }
1842 }
1843
1844 /* Nonzero if block bb_to is equal to, or reachable from block bb_from. */
1845 #define IS_REACHABLE(bb_from, bb_to) \
1846 (bb_from == bb_to \
1847 || IS_RGN_ENTRY (bb_from) \
1848 || (bitmap_bit_p (ancestor_edges[bb_to], \
1849 EDGE_TO_BIT (single_pred_edge (BASIC_BLOCK_FOR_FN (cfun, \
1850 BB_TO_BLOCK (bb_from)))))))
1851
1852 /* Turns on the fed_by_spec_load flag for insns fed by load_insn. */
1853
1854 static void
1855 set_spec_fed (rtx load_insn)
1856 {
1857 sd_iterator_def sd_it;
1858 dep_t dep;
1859
1860 FOR_EACH_DEP (load_insn, SD_LIST_FORW, sd_it, dep)
1861 if (DEP_TYPE (dep) == REG_DEP_TRUE)
1862 FED_BY_SPEC_LOAD (DEP_CON (dep)) = 1;
1863 }
1864
1865 /* On the path from the insn to load_insn_bb, find a conditional
1866 branch depending on insn, that guards the speculative load. */
1867
1868 static int
1869 find_conditional_protection (rtx insn, int load_insn_bb)
1870 {
1871 sd_iterator_def sd_it;
1872 dep_t dep;
1873
1874 /* Iterate through DEF-USE forward dependences. */
1875 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
1876 {
1877 rtx next = DEP_CON (dep);
1878
1879 if ((CONTAINING_RGN (BLOCK_NUM (next)) ==
1880 CONTAINING_RGN (BB_TO_BLOCK (load_insn_bb)))
1881 && IS_REACHABLE (INSN_BB (next), load_insn_bb)
1882 && load_insn_bb != INSN_BB (next)
1883 && DEP_TYPE (dep) == REG_DEP_TRUE
1884 && (JUMP_P (next)
1885 || find_conditional_protection (next, load_insn_bb)))
1886 return 1;
1887 }
1888 return 0;
1889 } /* find_conditional_protection */
1890
1891 /* Returns 1 if the same insn1 that participates in the computation
1892 of load_insn's address is feeding a conditional branch that is
1893 guarding on load_insn. This is true if we find two DEF-USE
1894 chains:
1895 insn1 -> ... -> conditional-branch
1896 insn1 -> ... -> load_insn,
1897 and if a flow path exists:
1898 insn1 -> ... -> conditional-branch -> ... -> load_insn,
1899 and if insn1 is on the path
1900 region-entry -> ... -> bb_trg -> ... load_insn.
1901
1902 Locate insn1 by climbing on INSN_BACK_DEPS from load_insn.
1903 Locate the branch by following INSN_FORW_DEPS from insn1. */
1904
1905 static int
1906 is_conditionally_protected (rtx load_insn, int bb_src, int bb_trg)
1907 {
1908 sd_iterator_def sd_it;
1909 dep_t dep;
1910
1911 FOR_EACH_DEP (load_insn, SD_LIST_BACK, sd_it, dep)
1912 {
1913 rtx insn1 = DEP_PRO (dep);
1914
1915 /* Must be a DEF-USE dependence upon non-branch. */
1916 if (DEP_TYPE (dep) != REG_DEP_TRUE
1917 || JUMP_P (insn1))
1918 continue;
1919
1920 /* Must exist a path: region-entry -> ... -> bb_trg -> ... load_insn. */
1921 if (INSN_BB (insn1) == bb_src
1922 || (CONTAINING_RGN (BLOCK_NUM (insn1))
1923 != CONTAINING_RGN (BB_TO_BLOCK (bb_src)))
1924 || (!IS_REACHABLE (bb_trg, INSN_BB (insn1))
1925 && !IS_REACHABLE (INSN_BB (insn1), bb_trg)))
1926 continue;
1927
1928 /* Now search for the conditional-branch. */
1929 if (find_conditional_protection (insn1, bb_src))
1930 return 1;
1931
1932 /* Recursive step: search another insn1, "above" current insn1. */
1933 return is_conditionally_protected (insn1, bb_src, bb_trg);
1934 }
1935
1936 /* The chain does not exist. */
1937 return 0;
1938 } /* is_conditionally_protected */
1939
1940 /* Returns 1 if a clue for "similar load" 'insn2' is found, and hence
1941 load_insn can move speculatively from bb_src to bb_trg. All the
1942 following must hold:
1943
1944 (1) both loads have 1 base register (PFREE_CANDIDATEs).
1945 (2) load_insn and load1 have a def-use dependence upon
1946 the same insn 'insn1'.
1947 (3) either load2 is in bb_trg, or:
1948 - there's only one split-block, and
1949 - load1 is on the escape path, and
1950
1951 From all these we can conclude that the two loads access memory
1952 addresses that differ at most by a constant, and hence if moving
1953 load_insn would cause an exception, it would have been caused by
1954 load2 anyhow. */
1955
1956 static int
1957 is_pfree (rtx load_insn, int bb_src, int bb_trg)
1958 {
1959 sd_iterator_def back_sd_it;
1960 dep_t back_dep;
1961 candidate *candp = candidate_table + bb_src;
1962
1963 if (candp->split_bbs.nr_members != 1)
1964 /* Must have exactly one escape block. */
1965 return 0;
1966
1967 FOR_EACH_DEP (load_insn, SD_LIST_BACK, back_sd_it, back_dep)
1968 {
1969 rtx insn1 = DEP_PRO (back_dep);
1970
1971 if (DEP_TYPE (back_dep) == REG_DEP_TRUE)
1972 /* Found a DEF-USE dependence (insn1, load_insn). */
1973 {
1974 sd_iterator_def fore_sd_it;
1975 dep_t fore_dep;
1976
1977 FOR_EACH_DEP (insn1, SD_LIST_FORW, fore_sd_it, fore_dep)
1978 {
1979 rtx insn2 = DEP_CON (fore_dep);
1980
1981 if (DEP_TYPE (fore_dep) == REG_DEP_TRUE)
1982 {
1983 /* Found a DEF-USE dependence (insn1, insn2). */
1984 if (haifa_classify_insn (insn2) != PFREE_CANDIDATE)
1985 /* insn2 not guaranteed to be a 1 base reg load. */
1986 continue;
1987
1988 if (INSN_BB (insn2) == bb_trg)
1989 /* insn2 is the similar load, in the target block. */
1990 return 1;
1991
1992 if (*(candp->split_bbs.first_member) == BLOCK_FOR_INSN (insn2))
1993 /* insn2 is a similar load, in a split-block. */
1994 return 1;
1995 }
1996 }
1997 }
1998 }
1999
2000 /* Couldn't find a similar load. */
2001 return 0;
2002 } /* is_pfree */
2003
2004 /* Return 1 if load_insn is prisky (i.e. if load_insn is fed by
2005 a load moved speculatively, or if load_insn is protected by
2006 a compare on load_insn's address). */
2007
2008 static int
2009 is_prisky (rtx load_insn, int bb_src, int bb_trg)
2010 {
2011 if (FED_BY_SPEC_LOAD (load_insn))
2012 return 1;
2013
2014 if (sd_lists_empty_p (load_insn, SD_LIST_BACK))
2015 /* Dependence may 'hide' out of the region. */
2016 return 1;
2017
2018 if (is_conditionally_protected (load_insn, bb_src, bb_trg))
2019 return 1;
2020
2021 return 0;
2022 }
2023
2024 /* Insn is a candidate to be moved speculatively from bb_src to bb_trg.
2025 Return 1 if insn is exception-free (and the motion is valid)
2026 and 0 otherwise. */
2027
2028 static int
2029 is_exception_free (rtx insn, int bb_src, int bb_trg)
2030 {
2031 int insn_class = haifa_classify_insn (insn);
2032
2033 /* Handle non-load insns. */
2034 switch (insn_class)
2035 {
2036 case TRAP_FREE:
2037 return 1;
2038 case TRAP_RISKY:
2039 return 0;
2040 default:;
2041 }
2042
2043 /* Handle loads. */
2044 if (!flag_schedule_speculative_load)
2045 return 0;
2046 IS_LOAD_INSN (insn) = 1;
2047 switch (insn_class)
2048 {
2049 case IFREE:
2050 return (1);
2051 case IRISKY:
2052 return 0;
2053 case PFREE_CANDIDATE:
2054 if (is_pfree (insn, bb_src, bb_trg))
2055 return 1;
2056 /* Don't 'break' here: PFREE-candidate is also PRISKY-candidate. */
2057 case PRISKY_CANDIDATE:
2058 if (!flag_schedule_speculative_load_dangerous
2059 || is_prisky (insn, bb_src, bb_trg))
2060 return 0;
2061 break;
2062 default:;
2063 }
2064
2065 return flag_schedule_speculative_load_dangerous;
2066 }
2067 \f
2068 /* The number of insns from the current block scheduled so far. */
2069 static int sched_target_n_insns;
2070 /* The number of insns from the current block to be scheduled in total. */
2071 static int target_n_insns;
2072 /* The number of insns from the entire region scheduled so far. */
2073 static int sched_n_insns;
2074
2075 /* Implementations of the sched_info functions for region scheduling. */
2076 static void init_ready_list (void);
2077 static int can_schedule_ready_p (rtx);
2078 static void begin_schedule_ready (rtx);
2079 static ds_t new_ready (rtx, ds_t);
2080 static int schedule_more_p (void);
2081 static const char *rgn_print_insn (const_rtx, int);
2082 static int rgn_rank (rtx, rtx);
2083 static void compute_jump_reg_dependencies (rtx, regset);
2084
2085 /* Functions for speculative scheduling. */
2086 static void rgn_add_remove_insn (rtx, int);
2087 static void rgn_add_block (basic_block, basic_block);
2088 static void rgn_fix_recovery_cfg (int, int, int);
2089 static basic_block advance_target_bb (basic_block, rtx);
2090
2091 /* Return nonzero if there are more insns that should be scheduled. */
2092
2093 static int
2094 schedule_more_p (void)
2095 {
2096 return sched_target_n_insns < target_n_insns;
2097 }
2098
2099 /* Add all insns that are initially ready to the ready list READY. Called
2100 once before scheduling a set of insns. */
2101
2102 static void
2103 init_ready_list (void)
2104 {
2105 rtx prev_head = current_sched_info->prev_head;
2106 rtx next_tail = current_sched_info->next_tail;
2107 int bb_src;
2108 rtx insn;
2109
2110 target_n_insns = 0;
2111 sched_target_n_insns = 0;
2112 sched_n_insns = 0;
2113
2114 /* Print debugging information. */
2115 if (sched_verbose >= 5)
2116 debug_rgn_dependencies (target_bb);
2117
2118 /* Prepare current target block info. */
2119 if (current_nr_blocks > 1)
2120 compute_trg_info (target_bb);
2121
2122 /* Initialize ready list with all 'ready' insns in target block.
2123 Count number of insns in the target block being scheduled. */
2124 for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
2125 {
2126 gcc_assert (TODO_SPEC (insn) == HARD_DEP || TODO_SPEC (insn) == DEP_POSTPONED);
2127 TODO_SPEC (insn) = HARD_DEP;
2128 try_ready (insn);
2129 target_n_insns++;
2130
2131 gcc_assert (!(TODO_SPEC (insn) & BEGIN_CONTROL));
2132 }
2133
2134 /* Add to ready list all 'ready' insns in valid source blocks.
2135 For speculative insns, check-live, exception-free, and
2136 issue-delay. */
2137 for (bb_src = target_bb + 1; bb_src < current_nr_blocks; bb_src++)
2138 if (IS_VALID (bb_src))
2139 {
2140 rtx src_head;
2141 rtx src_next_tail;
2142 rtx tail, head;
2143
2144 get_ebb_head_tail (EBB_FIRST_BB (bb_src), EBB_LAST_BB (bb_src),
2145 &head, &tail);
2146 src_next_tail = NEXT_INSN (tail);
2147 src_head = head;
2148
2149 for (insn = src_head; insn != src_next_tail; insn = NEXT_INSN (insn))
2150 if (INSN_P (insn))
2151 {
2152 gcc_assert (TODO_SPEC (insn) == HARD_DEP || TODO_SPEC (insn) == DEP_POSTPONED);
2153 TODO_SPEC (insn) = HARD_DEP;
2154 try_ready (insn);
2155 }
2156 }
2157 }
2158
2159 /* Called after taking INSN from the ready list. Returns nonzero if this
2160 insn can be scheduled, nonzero if we should silently discard it. */
2161
2162 static int
2163 can_schedule_ready_p (rtx insn)
2164 {
2165 /* An interblock motion? */
2166 if (INSN_BB (insn) != target_bb
2167 && IS_SPECULATIVE_INSN (insn)
2168 && !check_live (insn, INSN_BB (insn)))
2169 return 0;
2170 else
2171 return 1;
2172 }
2173
2174 /* Updates counter and other information. Split from can_schedule_ready_p ()
2175 because when we schedule insn speculatively then insn passed to
2176 can_schedule_ready_p () differs from the one passed to
2177 begin_schedule_ready (). */
2178 static void
2179 begin_schedule_ready (rtx insn)
2180 {
2181 /* An interblock motion? */
2182 if (INSN_BB (insn) != target_bb)
2183 {
2184 if (IS_SPECULATIVE_INSN (insn))
2185 {
2186 gcc_assert (check_live (insn, INSN_BB (insn)));
2187
2188 update_live (insn, INSN_BB (insn));
2189
2190 /* For speculative load, mark insns fed by it. */
2191 if (IS_LOAD_INSN (insn) || FED_BY_SPEC_LOAD (insn))
2192 set_spec_fed (insn);
2193
2194 nr_spec++;
2195 }
2196 nr_inter++;
2197 }
2198 else
2199 {
2200 /* In block motion. */
2201 sched_target_n_insns++;
2202 }
2203 sched_n_insns++;
2204 }
2205
2206 /* Called after INSN has all its hard dependencies resolved and the speculation
2207 of type TS is enough to overcome them all.
2208 Return nonzero if it should be moved to the ready list or the queue, or zero
2209 if we should silently discard it. */
2210 static ds_t
2211 new_ready (rtx next, ds_t ts)
2212 {
2213 if (INSN_BB (next) != target_bb)
2214 {
2215 int not_ex_free = 0;
2216
2217 /* For speculative insns, before inserting to ready/queue,
2218 check live, exception-free, and issue-delay. */
2219 if (!IS_VALID (INSN_BB (next))
2220 || CANT_MOVE (next)
2221 || (IS_SPECULATIVE_INSN (next)
2222 && ((recog_memoized (next) >= 0
2223 && min_insn_conflict_delay (curr_state, next, next)
2224 > PARAM_VALUE (PARAM_MAX_SCHED_INSN_CONFLICT_DELAY))
2225 || IS_SPECULATION_CHECK_P (next)
2226 || !check_live (next, INSN_BB (next))
2227 || (not_ex_free = !is_exception_free (next, INSN_BB (next),
2228 target_bb)))))
2229 {
2230 if (not_ex_free
2231 /* We are here because is_exception_free () == false.
2232 But we possibly can handle that with control speculation. */
2233 && sched_deps_info->generate_spec_deps
2234 && spec_info->mask & BEGIN_CONTROL)
2235 {
2236 ds_t new_ds;
2237
2238 /* Add control speculation to NEXT's dependency type. */
2239 new_ds = set_dep_weak (ts, BEGIN_CONTROL, MAX_DEP_WEAK);
2240
2241 /* Check if NEXT can be speculated with new dependency type. */
2242 if (sched_insn_is_legitimate_for_speculation_p (next, new_ds))
2243 /* Here we got new control-speculative instruction. */
2244 ts = new_ds;
2245 else
2246 /* NEXT isn't ready yet. */
2247 ts = DEP_POSTPONED;
2248 }
2249 else
2250 /* NEXT isn't ready yet. */
2251 ts = DEP_POSTPONED;
2252 }
2253 }
2254
2255 return ts;
2256 }
2257
2258 /* Return a string that contains the insn uid and optionally anything else
2259 necessary to identify this insn in an output. It's valid to use a
2260 static buffer for this. The ALIGNED parameter should cause the string
2261 to be formatted so that multiple output lines will line up nicely. */
2262
2263 static const char *
2264 rgn_print_insn (const_rtx insn, int aligned)
2265 {
2266 static char tmp[80];
2267
2268 if (aligned)
2269 sprintf (tmp, "b%3d: i%4d", INSN_BB (insn), INSN_UID (insn));
2270 else
2271 {
2272 if (current_nr_blocks > 1 && INSN_BB (insn) != target_bb)
2273 sprintf (tmp, "%d/b%d", INSN_UID (insn), INSN_BB (insn));
2274 else
2275 sprintf (tmp, "%d", INSN_UID (insn));
2276 }
2277 return tmp;
2278 }
2279
2280 /* Compare priority of two insns. Return a positive number if the second
2281 insn is to be preferred for scheduling, and a negative one if the first
2282 is to be preferred. Zero if they are equally good. */
2283
2284 static int
2285 rgn_rank (rtx insn1, rtx insn2)
2286 {
2287 /* Some comparison make sense in interblock scheduling only. */
2288 if (INSN_BB (insn1) != INSN_BB (insn2))
2289 {
2290 int spec_val, prob_val;
2291
2292 /* Prefer an inblock motion on an interblock motion. */
2293 if ((INSN_BB (insn2) == target_bb) && (INSN_BB (insn1) != target_bb))
2294 return 1;
2295 if ((INSN_BB (insn1) == target_bb) && (INSN_BB (insn2) != target_bb))
2296 return -1;
2297
2298 /* Prefer a useful motion on a speculative one. */
2299 spec_val = IS_SPECULATIVE_INSN (insn1) - IS_SPECULATIVE_INSN (insn2);
2300 if (spec_val)
2301 return spec_val;
2302
2303 /* Prefer a more probable (speculative) insn. */
2304 prob_val = INSN_PROBABILITY (insn2) - INSN_PROBABILITY (insn1);
2305 if (prob_val)
2306 return prob_val;
2307 }
2308 return 0;
2309 }
2310
2311 /* NEXT is an instruction that depends on INSN (a backward dependence);
2312 return nonzero if we should include this dependence in priority
2313 calculations. */
2314
2315 int
2316 contributes_to_priority (rtx next, rtx insn)
2317 {
2318 /* NEXT and INSN reside in one ebb. */
2319 return BLOCK_TO_BB (BLOCK_NUM (next)) == BLOCK_TO_BB (BLOCK_NUM (insn));
2320 }
2321
2322 /* INSN is a JUMP_INSN. Store the set of registers that must be
2323 considered as used by this jump in USED. */
2324
2325 static void
2326 compute_jump_reg_dependencies (rtx insn ATTRIBUTE_UNUSED,
2327 regset used ATTRIBUTE_UNUSED)
2328 {
2329 /* Nothing to do here, since we postprocess jumps in
2330 add_branch_dependences. */
2331 }
2332
2333 /* This variable holds common_sched_info hooks and data relevant to
2334 the interblock scheduler. */
2335 static struct common_sched_info_def rgn_common_sched_info;
2336
2337
2338 /* This holds data for the dependence analysis relevant to
2339 the interblock scheduler. */
2340 static struct sched_deps_info_def rgn_sched_deps_info;
2341
2342 /* This holds constant data used for initializing the above structure
2343 for the Haifa scheduler. */
2344 static const struct sched_deps_info_def rgn_const_sched_deps_info =
2345 {
2346 compute_jump_reg_dependencies,
2347 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2348 0, 0, 0
2349 };
2350
2351 /* Same as above, but for the selective scheduler. */
2352 static const struct sched_deps_info_def rgn_const_sel_sched_deps_info =
2353 {
2354 compute_jump_reg_dependencies,
2355 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2356 0, 0, 0
2357 };
2358
2359 /* Return true if scheduling INSN will trigger finish of scheduling
2360 current block. */
2361 static bool
2362 rgn_insn_finishes_block_p (rtx insn)
2363 {
2364 if (INSN_BB (insn) == target_bb
2365 && sched_target_n_insns + 1 == target_n_insns)
2366 /* INSN is the last not-scheduled instruction in the current block. */
2367 return true;
2368
2369 return false;
2370 }
2371
2372 /* Used in schedule_insns to initialize current_sched_info for scheduling
2373 regions (or single basic blocks). */
2374
2375 static const struct haifa_sched_info rgn_const_sched_info =
2376 {
2377 init_ready_list,
2378 can_schedule_ready_p,
2379 schedule_more_p,
2380 new_ready,
2381 rgn_rank,
2382 rgn_print_insn,
2383 contributes_to_priority,
2384 rgn_insn_finishes_block_p,
2385
2386 NULL, NULL,
2387 NULL, NULL,
2388 0, 0,
2389
2390 rgn_add_remove_insn,
2391 begin_schedule_ready,
2392 NULL,
2393 advance_target_bb,
2394 NULL, NULL,
2395 SCHED_RGN
2396 };
2397
2398 /* This variable holds the data and hooks needed to the Haifa scheduler backend
2399 for the interblock scheduler frontend. */
2400 static struct haifa_sched_info rgn_sched_info;
2401
2402 /* Returns maximum priority that an insn was assigned to. */
2403
2404 int
2405 get_rgn_sched_max_insns_priority (void)
2406 {
2407 return rgn_sched_info.sched_max_insns_priority;
2408 }
2409
2410 /* Determine if PAT sets a TARGET_CLASS_LIKELY_SPILLED_P register. */
2411
2412 static bool
2413 sets_likely_spilled (rtx pat)
2414 {
2415 bool ret = false;
2416 note_stores (pat, sets_likely_spilled_1, &ret);
2417 return ret;
2418 }
2419
2420 static void
2421 sets_likely_spilled_1 (rtx x, const_rtx pat, void *data)
2422 {
2423 bool *ret = (bool *) data;
2424
2425 if (GET_CODE (pat) == SET
2426 && REG_P (x)
2427 && HARD_REGISTER_P (x)
2428 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (x))))
2429 *ret = true;
2430 }
2431
2432 /* A bitmap to note insns that participate in any dependency. Used in
2433 add_branch_dependences. */
2434 static sbitmap insn_referenced;
2435
2436 /* Add dependences so that branches are scheduled to run last in their
2437 block. */
2438 static void
2439 add_branch_dependences (rtx head, rtx tail)
2440 {
2441 rtx insn, last;
2442
2443 /* For all branches, calls, uses, clobbers, cc0 setters, and instructions
2444 that can throw exceptions, force them to remain in order at the end of
2445 the block by adding dependencies and giving the last a high priority.
2446 There may be notes present, and prev_head may also be a note.
2447
2448 Branches must obviously remain at the end. Calls should remain at the
2449 end since moving them results in worse register allocation. Uses remain
2450 at the end to ensure proper register allocation.
2451
2452 cc0 setters remain at the end because they can't be moved away from
2453 their cc0 user.
2454
2455 Predecessors of SCHED_GROUP_P instructions at the end remain at the end.
2456
2457 COND_EXEC insns cannot be moved past a branch (see e.g. PR17808).
2458
2459 Insns setting TARGET_CLASS_LIKELY_SPILLED_P registers (usually return
2460 values) are not moved before reload because we can wind up with register
2461 allocation failures. */
2462
2463 while (tail != head && DEBUG_INSN_P (tail))
2464 tail = PREV_INSN (tail);
2465
2466 insn = tail;
2467 last = 0;
2468 while (CALL_P (insn)
2469 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
2470 || (NONJUMP_INSN_P (insn)
2471 && (GET_CODE (PATTERN (insn)) == USE
2472 || GET_CODE (PATTERN (insn)) == CLOBBER
2473 || can_throw_internal (insn)
2474 #ifdef HAVE_cc0
2475 || sets_cc0_p (PATTERN (insn))
2476 #endif
2477 || (!reload_completed
2478 && sets_likely_spilled (PATTERN (insn)))))
2479 || NOTE_P (insn)
2480 || (last != 0 && SCHED_GROUP_P (last)))
2481 {
2482 if (!NOTE_P (insn))
2483 {
2484 if (last != 0
2485 && sd_find_dep_between (insn, last, false) == NULL)
2486 {
2487 if (! sched_insns_conditions_mutex_p (last, insn))
2488 add_dependence (last, insn, REG_DEP_ANTI);
2489 bitmap_set_bit (insn_referenced, INSN_LUID (insn));
2490 }
2491
2492 CANT_MOVE (insn) = 1;
2493
2494 last = insn;
2495 }
2496
2497 /* Don't overrun the bounds of the basic block. */
2498 if (insn == head)
2499 break;
2500
2501 do
2502 insn = PREV_INSN (insn);
2503 while (insn != head && DEBUG_INSN_P (insn));
2504 }
2505
2506 /* Make sure these insns are scheduled last in their block. */
2507 insn = last;
2508 if (insn != 0)
2509 while (insn != head)
2510 {
2511 insn = prev_nonnote_insn (insn);
2512
2513 if (bitmap_bit_p (insn_referenced, INSN_LUID (insn))
2514 || DEBUG_INSN_P (insn))
2515 continue;
2516
2517 if (! sched_insns_conditions_mutex_p (last, insn))
2518 add_dependence (last, insn, REG_DEP_ANTI);
2519 }
2520
2521 if (!targetm.have_conditional_execution ())
2522 return;
2523
2524 /* Finally, if the block ends in a jump, and we are doing intra-block
2525 scheduling, make sure that the branch depends on any COND_EXEC insns
2526 inside the block to avoid moving the COND_EXECs past the branch insn.
2527
2528 We only have to do this after reload, because (1) before reload there
2529 are no COND_EXEC insns, and (2) the region scheduler is an intra-block
2530 scheduler after reload.
2531
2532 FIXME: We could in some cases move COND_EXEC insns past the branch if
2533 this scheduler would be a little smarter. Consider this code:
2534
2535 T = [addr]
2536 C ? addr += 4
2537 !C ? X += 12
2538 C ? T += 1
2539 C ? jump foo
2540
2541 On a target with a one cycle stall on a memory access the optimal
2542 sequence would be:
2543
2544 T = [addr]
2545 C ? addr += 4
2546 C ? T += 1
2547 C ? jump foo
2548 !C ? X += 12
2549
2550 We don't want to put the 'X += 12' before the branch because it just
2551 wastes a cycle of execution time when the branch is taken.
2552
2553 Note that in the example "!C" will always be true. That is another
2554 possible improvement for handling COND_EXECs in this scheduler: it
2555 could remove always-true predicates. */
2556
2557 if (!reload_completed || ! (JUMP_P (tail) || JUMP_TABLE_DATA_P (tail)))
2558 return;
2559
2560 insn = tail;
2561 while (insn != head)
2562 {
2563 insn = PREV_INSN (insn);
2564
2565 /* Note that we want to add this dependency even when
2566 sched_insns_conditions_mutex_p returns true. The whole point
2567 is that we _want_ this dependency, even if these insns really
2568 are independent. */
2569 if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == COND_EXEC)
2570 add_dependence (tail, insn, REG_DEP_ANTI);
2571 }
2572 }
2573
2574 /* Data structures for the computation of data dependences in a regions. We
2575 keep one `deps' structure for every basic block. Before analyzing the
2576 data dependences for a bb, its variables are initialized as a function of
2577 the variables of its predecessors. When the analysis for a bb completes,
2578 we save the contents to the corresponding bb_deps[bb] variable. */
2579
2580 static struct deps_desc *bb_deps;
2581
2582 static void
2583 concat_insn_mem_list (rtx copy_insns, rtx copy_mems, rtx *old_insns_p,
2584 rtx *old_mems_p)
2585 {
2586 rtx new_insns = *old_insns_p;
2587 rtx new_mems = *old_mems_p;
2588
2589 while (copy_insns)
2590 {
2591 new_insns = alloc_INSN_LIST (XEXP (copy_insns, 0), new_insns);
2592 new_mems = alloc_EXPR_LIST (VOIDmode, XEXP (copy_mems, 0), new_mems);
2593 copy_insns = XEXP (copy_insns, 1);
2594 copy_mems = XEXP (copy_mems, 1);
2595 }
2596
2597 *old_insns_p = new_insns;
2598 *old_mems_p = new_mems;
2599 }
2600
2601 /* Join PRED_DEPS to the SUCC_DEPS. */
2602 void
2603 deps_join (struct deps_desc *succ_deps, struct deps_desc *pred_deps)
2604 {
2605 unsigned reg;
2606 reg_set_iterator rsi;
2607
2608 /* The reg_last lists are inherited by successor. */
2609 EXECUTE_IF_SET_IN_REG_SET (&pred_deps->reg_last_in_use, 0, reg, rsi)
2610 {
2611 struct deps_reg *pred_rl = &pred_deps->reg_last[reg];
2612 struct deps_reg *succ_rl = &succ_deps->reg_last[reg];
2613
2614 succ_rl->uses = concat_INSN_LIST (pred_rl->uses, succ_rl->uses);
2615 succ_rl->sets = concat_INSN_LIST (pred_rl->sets, succ_rl->sets);
2616 succ_rl->implicit_sets
2617 = concat_INSN_LIST (pred_rl->implicit_sets, succ_rl->implicit_sets);
2618 succ_rl->clobbers = concat_INSN_LIST (pred_rl->clobbers,
2619 succ_rl->clobbers);
2620 succ_rl->uses_length += pred_rl->uses_length;
2621 succ_rl->clobbers_length += pred_rl->clobbers_length;
2622 }
2623 IOR_REG_SET (&succ_deps->reg_last_in_use, &pred_deps->reg_last_in_use);
2624
2625 /* Mem read/write lists are inherited by successor. */
2626 concat_insn_mem_list (pred_deps->pending_read_insns,
2627 pred_deps->pending_read_mems,
2628 &succ_deps->pending_read_insns,
2629 &succ_deps->pending_read_mems);
2630 concat_insn_mem_list (pred_deps->pending_write_insns,
2631 pred_deps->pending_write_mems,
2632 &succ_deps->pending_write_insns,
2633 &succ_deps->pending_write_mems);
2634
2635 succ_deps->pending_jump_insns
2636 = concat_INSN_LIST (pred_deps->pending_jump_insns,
2637 succ_deps->pending_jump_insns);
2638 succ_deps->last_pending_memory_flush
2639 = concat_INSN_LIST (pred_deps->last_pending_memory_flush,
2640 succ_deps->last_pending_memory_flush);
2641
2642 succ_deps->pending_read_list_length += pred_deps->pending_read_list_length;
2643 succ_deps->pending_write_list_length += pred_deps->pending_write_list_length;
2644 succ_deps->pending_flush_length += pred_deps->pending_flush_length;
2645
2646 /* last_function_call is inherited by successor. */
2647 succ_deps->last_function_call
2648 = concat_INSN_LIST (pred_deps->last_function_call,
2649 succ_deps->last_function_call);
2650
2651 /* last_function_call_may_noreturn is inherited by successor. */
2652 succ_deps->last_function_call_may_noreturn
2653 = concat_INSN_LIST (pred_deps->last_function_call_may_noreturn,
2654 succ_deps->last_function_call_may_noreturn);
2655
2656 /* sched_before_next_call is inherited by successor. */
2657 succ_deps->sched_before_next_call
2658 = concat_INSN_LIST (pred_deps->sched_before_next_call,
2659 succ_deps->sched_before_next_call);
2660 }
2661
2662 /* After computing the dependencies for block BB, propagate the dependencies
2663 found in TMP_DEPS to the successors of the block. */
2664 static void
2665 propagate_deps (int bb, struct deps_desc *pred_deps)
2666 {
2667 basic_block block = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (bb));
2668 edge_iterator ei;
2669 edge e;
2670
2671 /* bb's structures are inherited by its successors. */
2672 FOR_EACH_EDGE (e, ei, block->succs)
2673 {
2674 /* Only bbs "below" bb, in the same region, are interesting. */
2675 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
2676 || CONTAINING_RGN (block->index) != CONTAINING_RGN (e->dest->index)
2677 || BLOCK_TO_BB (e->dest->index) <= bb)
2678 continue;
2679
2680 deps_join (bb_deps + BLOCK_TO_BB (e->dest->index), pred_deps);
2681 }
2682
2683 /* These lists should point to the right place, for correct
2684 freeing later. */
2685 bb_deps[bb].pending_read_insns = pred_deps->pending_read_insns;
2686 bb_deps[bb].pending_read_mems = pred_deps->pending_read_mems;
2687 bb_deps[bb].pending_write_insns = pred_deps->pending_write_insns;
2688 bb_deps[bb].pending_write_mems = pred_deps->pending_write_mems;
2689 bb_deps[bb].pending_jump_insns = pred_deps->pending_jump_insns;
2690
2691 /* Can't allow these to be freed twice. */
2692 pred_deps->pending_read_insns = 0;
2693 pred_deps->pending_read_mems = 0;
2694 pred_deps->pending_write_insns = 0;
2695 pred_deps->pending_write_mems = 0;
2696 pred_deps->pending_jump_insns = 0;
2697 }
2698
2699 /* Compute dependences inside bb. In a multiple blocks region:
2700 (1) a bb is analyzed after its predecessors, and (2) the lists in
2701 effect at the end of bb (after analyzing for bb) are inherited by
2702 bb's successors.
2703
2704 Specifically for reg-reg data dependences, the block insns are
2705 scanned by sched_analyze () top-to-bottom. Three lists are
2706 maintained by sched_analyze (): reg_last[].sets for register DEFs,
2707 reg_last[].implicit_sets for implicit hard register DEFs, and
2708 reg_last[].uses for register USEs.
2709
2710 When analysis is completed for bb, we update for its successors:
2711 ; - DEFS[succ] = Union (DEFS [succ], DEFS [bb])
2712 ; - IMPLICIT_DEFS[succ] = Union (IMPLICIT_DEFS [succ], IMPLICIT_DEFS [bb])
2713 ; - USES[succ] = Union (USES [succ], DEFS [bb])
2714
2715 The mechanism for computing mem-mem data dependence is very
2716 similar, and the result is interblock dependences in the region. */
2717
2718 static void
2719 compute_block_dependences (int bb)
2720 {
2721 rtx head, tail;
2722 struct deps_desc tmp_deps;
2723
2724 tmp_deps = bb_deps[bb];
2725
2726 /* Do the analysis for this block. */
2727 gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
2728 get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2729
2730 sched_analyze (&tmp_deps, head, tail);
2731
2732 /* Selective scheduling handles control dependencies by itself. */
2733 if (!sel_sched_p ())
2734 add_branch_dependences (head, tail);
2735
2736 if (current_nr_blocks > 1)
2737 propagate_deps (bb, &tmp_deps);
2738
2739 /* Free up the INSN_LISTs. */
2740 free_deps (&tmp_deps);
2741
2742 if (targetm.sched.dependencies_evaluation_hook)
2743 targetm.sched.dependencies_evaluation_hook (head, tail);
2744 }
2745
2746 /* Free dependencies of instructions inside BB. */
2747 static void
2748 free_block_dependencies (int bb)
2749 {
2750 rtx head;
2751 rtx tail;
2752
2753 get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2754
2755 if (no_real_insns_p (head, tail))
2756 return;
2757
2758 sched_free_deps (head, tail, true);
2759 }
2760
2761 /* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
2762 them to the unused_*_list variables, so that they can be reused. */
2763
2764 static void
2765 free_pending_lists (void)
2766 {
2767 int bb;
2768
2769 for (bb = 0; bb < current_nr_blocks; bb++)
2770 {
2771 free_INSN_LIST_list (&bb_deps[bb].pending_read_insns);
2772 free_INSN_LIST_list (&bb_deps[bb].pending_write_insns);
2773 free_EXPR_LIST_list (&bb_deps[bb].pending_read_mems);
2774 free_EXPR_LIST_list (&bb_deps[bb].pending_write_mems);
2775 free_INSN_LIST_list (&bb_deps[bb].pending_jump_insns);
2776 }
2777 }
2778 \f
2779 /* Print dependences for debugging starting from FROM_BB.
2780 Callable from debugger. */
2781 /* Print dependences for debugging starting from FROM_BB.
2782 Callable from debugger. */
2783 DEBUG_FUNCTION void
2784 debug_rgn_dependencies (int from_bb)
2785 {
2786 int bb;
2787
2788 fprintf (sched_dump,
2789 ";; --------------- forward dependences: ------------ \n");
2790
2791 for (bb = from_bb; bb < current_nr_blocks; bb++)
2792 {
2793 rtx head, tail;
2794
2795 get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2796 fprintf (sched_dump, "\n;; --- Region Dependences --- b %d bb %d \n",
2797 BB_TO_BLOCK (bb), bb);
2798
2799 debug_dependencies (head, tail);
2800 }
2801 }
2802
2803 /* Print dependencies information for instructions between HEAD and TAIL.
2804 ??? This function would probably fit best in haifa-sched.c. */
2805 void debug_dependencies (rtx head, rtx tail)
2806 {
2807 rtx insn;
2808 rtx next_tail = NEXT_INSN (tail);
2809
2810 fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n",
2811 "insn", "code", "bb", "dep", "prio", "cost",
2812 "reservation");
2813 fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n",
2814 "----", "----", "--", "---", "----", "----",
2815 "-----------");
2816
2817 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
2818 {
2819 if (! INSN_P (insn))
2820 {
2821 int n;
2822 fprintf (sched_dump, ";; %6d ", INSN_UID (insn));
2823 if (NOTE_P (insn))
2824 {
2825 n = NOTE_KIND (insn);
2826 fprintf (sched_dump, "%s\n", GET_NOTE_INSN_NAME (n));
2827 }
2828 else
2829 fprintf (sched_dump, " {%s}\n", GET_RTX_NAME (GET_CODE (insn)));
2830 continue;
2831 }
2832
2833 fprintf (sched_dump,
2834 ";; %s%5d%6d%6d%6d%6d%6d ",
2835 (SCHED_GROUP_P (insn) ? "+" : " "),
2836 INSN_UID (insn),
2837 INSN_CODE (insn),
2838 BLOCK_NUM (insn),
2839 sched_emulate_haifa_p ? -1 : sd_lists_size (insn, SD_LIST_BACK),
2840 (sel_sched_p () ? (sched_emulate_haifa_p ? -1
2841 : INSN_PRIORITY (insn))
2842 : INSN_PRIORITY (insn)),
2843 (sel_sched_p () ? (sched_emulate_haifa_p ? -1
2844 : insn_cost (insn))
2845 : insn_cost (insn)));
2846
2847 if (recog_memoized (insn) < 0)
2848 fprintf (sched_dump, "nothing");
2849 else
2850 print_reservation (sched_dump, insn);
2851
2852 fprintf (sched_dump, "\t: ");
2853 {
2854 sd_iterator_def sd_it;
2855 dep_t dep;
2856
2857 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
2858 fprintf (sched_dump, "%d%s%s ", INSN_UID (DEP_CON (dep)),
2859 DEP_NONREG (dep) ? "n" : "",
2860 DEP_MULTIPLE (dep) ? "m" : "");
2861 }
2862 fprintf (sched_dump, "\n");
2863 }
2864
2865 fprintf (sched_dump, "\n");
2866 }
2867 \f
2868 /* Returns true if all the basic blocks of the current region have
2869 NOTE_DISABLE_SCHED_OF_BLOCK which means not to schedule that region. */
2870 bool
2871 sched_is_disabled_for_current_region_p (void)
2872 {
2873 int bb;
2874
2875 for (bb = 0; bb < current_nr_blocks; bb++)
2876 if (!(BASIC_BLOCK_FOR_FN (cfun,
2877 BB_TO_BLOCK (bb))->flags & BB_DISABLE_SCHEDULE))
2878 return false;
2879
2880 return true;
2881 }
2882
2883 /* Free all region dependencies saved in INSN_BACK_DEPS and
2884 INSN_RESOLVED_BACK_DEPS. The Haifa scheduler does this on the fly
2885 when scheduling, so this function is supposed to be called from
2886 the selective scheduling only. */
2887 void
2888 free_rgn_deps (void)
2889 {
2890 int bb;
2891
2892 for (bb = 0; bb < current_nr_blocks; bb++)
2893 {
2894 rtx head, tail;
2895
2896 gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
2897 get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2898
2899 sched_free_deps (head, tail, false);
2900 }
2901 }
2902
2903 static int rgn_n_insns;
2904
2905 /* Compute insn priority for a current region. */
2906 void
2907 compute_priorities (void)
2908 {
2909 int bb;
2910
2911 current_sched_info->sched_max_insns_priority = 0;
2912 for (bb = 0; bb < current_nr_blocks; bb++)
2913 {
2914 rtx head, tail;
2915
2916 gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
2917 get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2918
2919 if (no_real_insns_p (head, tail))
2920 continue;
2921
2922 rgn_n_insns += set_priorities (head, tail);
2923 }
2924 current_sched_info->sched_max_insns_priority++;
2925 }
2926
2927 /* (Re-)initialize the arrays of DFA states at the end of each basic block.
2928
2929 SAVED_LAST_BASIC_BLOCK is the previous length of the arrays. It must be
2930 zero for the first call to this function, to allocate the arrays for the
2931 first time.
2932
2933 This function is called once during initialization of the scheduler, and
2934 called again to resize the arrays if new basic blocks have been created,
2935 for example for speculation recovery code. */
2936
2937 static void
2938 realloc_bb_state_array (int saved_last_basic_block)
2939 {
2940 char *old_bb_state_array = bb_state_array;
2941 size_t lbb = (size_t) last_basic_block_for_fn (cfun);
2942 size_t slbb = (size_t) saved_last_basic_block;
2943
2944 /* Nothing to do if nothing changed since the last time this was called. */
2945 if (saved_last_basic_block == last_basic_block_for_fn (cfun))
2946 return;
2947
2948 /* The selective scheduler doesn't use the state arrays. */
2949 if (sel_sched_p ())
2950 {
2951 gcc_assert (bb_state_array == NULL && bb_state == NULL);
2952 return;
2953 }
2954
2955 gcc_checking_assert (saved_last_basic_block == 0
2956 || (bb_state_array != NULL && bb_state != NULL));
2957
2958 bb_state_array = XRESIZEVEC (char, bb_state_array, lbb * dfa_state_size);
2959 bb_state = XRESIZEVEC (state_t, bb_state, lbb);
2960
2961 /* If BB_STATE_ARRAY has moved, fixup all the state pointers array.
2962 Otherwise only fixup the newly allocated ones. For the state
2963 array itself, only initialize the new entries. */
2964 bool bb_state_array_moved = (bb_state_array != old_bb_state_array);
2965 for (size_t i = bb_state_array_moved ? 0 : slbb; i < lbb; i++)
2966 bb_state[i] = (state_t) (bb_state_array + i * dfa_state_size);
2967 for (size_t i = slbb; i < lbb; i++)
2968 state_reset (bb_state[i]);
2969 }
2970
2971 /* Free the arrays of DFA states at the end of each basic block. */
2972
2973 static void
2974 free_bb_state_array (void)
2975 {
2976 free (bb_state_array);
2977 free (bb_state);
2978 bb_state_array = NULL;
2979 bb_state = NULL;
2980 }
2981
2982 /* Schedule a region. A region is either an inner loop, a loop-free
2983 subroutine, or a single basic block. Each bb in the region is
2984 scheduled after its flow predecessors. */
2985
2986 static void
2987 schedule_region (int rgn)
2988 {
2989 int bb;
2990 int sched_rgn_n_insns = 0;
2991
2992 rgn_n_insns = 0;
2993
2994 rgn_setup_region (rgn);
2995
2996 /* Don't schedule region that is marked by
2997 NOTE_DISABLE_SCHED_OF_BLOCK. */
2998 if (sched_is_disabled_for_current_region_p ())
2999 return;
3000
3001 sched_rgn_compute_dependencies (rgn);
3002
3003 sched_rgn_local_init (rgn);
3004
3005 /* Set priorities. */
3006 compute_priorities ();
3007
3008 sched_extend_ready_list (rgn_n_insns);
3009
3010 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
3011 {
3012 sched_init_region_reg_pressure_info ();
3013 for (bb = 0; bb < current_nr_blocks; bb++)
3014 {
3015 basic_block first_bb, last_bb;
3016 rtx head, tail;
3017
3018 first_bb = EBB_FIRST_BB (bb);
3019 last_bb = EBB_LAST_BB (bb);
3020
3021 get_ebb_head_tail (first_bb, last_bb, &head, &tail);
3022
3023 if (no_real_insns_p (head, tail))
3024 {
3025 gcc_assert (first_bb == last_bb);
3026 continue;
3027 }
3028 sched_setup_bb_reg_pressure_info (first_bb, PREV_INSN (head));
3029 }
3030 }
3031
3032 /* Now we can schedule all blocks. */
3033 for (bb = 0; bb < current_nr_blocks; bb++)
3034 {
3035 basic_block first_bb, last_bb, curr_bb;
3036 rtx head, tail;
3037
3038 first_bb = EBB_FIRST_BB (bb);
3039 last_bb = EBB_LAST_BB (bb);
3040
3041 get_ebb_head_tail (first_bb, last_bb, &head, &tail);
3042
3043 if (no_real_insns_p (head, tail))
3044 {
3045 gcc_assert (first_bb == last_bb);
3046 continue;
3047 }
3048
3049 current_sched_info->prev_head = PREV_INSN (head);
3050 current_sched_info->next_tail = NEXT_INSN (tail);
3051
3052 remove_notes (head, tail);
3053
3054 unlink_bb_notes (first_bb, last_bb);
3055
3056 target_bb = bb;
3057
3058 gcc_assert (flag_schedule_interblock || current_nr_blocks == 1);
3059 current_sched_info->queue_must_finish_empty = current_nr_blocks == 1;
3060
3061 curr_bb = first_bb;
3062 if (dbg_cnt (sched_block))
3063 {
3064 edge f;
3065 int saved_last_basic_block = last_basic_block_for_fn (cfun);
3066
3067 schedule_block (&curr_bb, bb_state[first_bb->index]);
3068 gcc_assert (EBB_FIRST_BB (bb) == first_bb);
3069 sched_rgn_n_insns += sched_n_insns;
3070 realloc_bb_state_array (saved_last_basic_block);
3071 f = find_fallthru_edge (last_bb->succs);
3072 if (f && f->probability * 100 / REG_BR_PROB_BASE >=
3073 PARAM_VALUE (PARAM_SCHED_STATE_EDGE_PROB_CUTOFF))
3074 {
3075 memcpy (bb_state[f->dest->index], curr_state,
3076 dfa_state_size);
3077 if (sched_verbose >= 5)
3078 fprintf (sched_dump, "saving state for edge %d->%d\n",
3079 f->src->index, f->dest->index);
3080 }
3081 }
3082 else
3083 {
3084 sched_rgn_n_insns += rgn_n_insns;
3085 }
3086
3087 /* Clean up. */
3088 if (current_nr_blocks > 1)
3089 free_trg_info ();
3090 }
3091
3092 /* Sanity check: verify that all region insns were scheduled. */
3093 gcc_assert (sched_rgn_n_insns == rgn_n_insns);
3094
3095 sched_finish_ready_list ();
3096
3097 /* Done with this region. */
3098 sched_rgn_local_finish ();
3099
3100 /* Free dependencies. */
3101 for (bb = 0; bb < current_nr_blocks; ++bb)
3102 free_block_dependencies (bb);
3103
3104 gcc_assert (haifa_recovery_bb_ever_added_p
3105 || deps_pools_are_empty_p ());
3106 }
3107
3108 /* Initialize data structures for region scheduling. */
3109
3110 void
3111 sched_rgn_init (bool single_blocks_p)
3112 {
3113 min_spec_prob = ((PARAM_VALUE (PARAM_MIN_SPEC_PROB) * REG_BR_PROB_BASE)
3114 / 100);
3115
3116 nr_inter = 0;
3117 nr_spec = 0;
3118
3119 extend_regions ();
3120
3121 CONTAINING_RGN (ENTRY_BLOCK) = -1;
3122 CONTAINING_RGN (EXIT_BLOCK) = -1;
3123
3124 realloc_bb_state_array (0);
3125
3126 /* Compute regions for scheduling. */
3127 if (single_blocks_p
3128 || n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS + 1
3129 || !flag_schedule_interblock
3130 || is_cfg_nonregular ())
3131 {
3132 find_single_block_region (sel_sched_p ());
3133 }
3134 else
3135 {
3136 /* Compute the dominators and post dominators. */
3137 if (!sel_sched_p ())
3138 calculate_dominance_info (CDI_DOMINATORS);
3139
3140 /* Find regions. */
3141 find_rgns ();
3142
3143 if (sched_verbose >= 3)
3144 debug_regions ();
3145
3146 /* For now. This will move as more and more of haifa is converted
3147 to using the cfg code. */
3148 if (!sel_sched_p ())
3149 free_dominance_info (CDI_DOMINATORS);
3150 }
3151
3152 gcc_assert (0 < nr_regions && nr_regions <= n_basic_blocks_for_fn (cfun));
3153
3154 RGN_BLOCKS (nr_regions) = (RGN_BLOCKS (nr_regions - 1) +
3155 RGN_NR_BLOCKS (nr_regions - 1));
3156 }
3157
3158 /* Free data structures for region scheduling. */
3159 void
3160 sched_rgn_finish (void)
3161 {
3162 free_bb_state_array ();
3163
3164 /* Reposition the prologue and epilogue notes in case we moved the
3165 prologue/epilogue insns. */
3166 if (reload_completed)
3167 reposition_prologue_and_epilogue_notes ();
3168
3169 if (sched_verbose)
3170 {
3171 if (reload_completed == 0
3172 && flag_schedule_interblock)
3173 {
3174 fprintf (sched_dump,
3175 "\n;; Procedure interblock/speculative motions == %d/%d \n",
3176 nr_inter, nr_spec);
3177 }
3178 else
3179 gcc_assert (nr_inter <= 0);
3180 fprintf (sched_dump, "\n\n");
3181 }
3182
3183 nr_regions = 0;
3184
3185 free (rgn_table);
3186 rgn_table = NULL;
3187
3188 free (rgn_bb_table);
3189 rgn_bb_table = NULL;
3190
3191 free (block_to_bb);
3192 block_to_bb = NULL;
3193
3194 free (containing_rgn);
3195 containing_rgn = NULL;
3196
3197 free (ebb_head);
3198 ebb_head = NULL;
3199 }
3200
3201 /* Setup global variables like CURRENT_BLOCKS and CURRENT_NR_BLOCK to
3202 point to the region RGN. */
3203 void
3204 rgn_setup_region (int rgn)
3205 {
3206 int bb;
3207
3208 /* Set variables for the current region. */
3209 current_nr_blocks = RGN_NR_BLOCKS (rgn);
3210 current_blocks = RGN_BLOCKS (rgn);
3211
3212 /* EBB_HEAD is a region-scope structure. But we realloc it for
3213 each region to save time/memory/something else.
3214 See comments in add_block1, for what reasons we allocate +1 element. */
3215 ebb_head = XRESIZEVEC (int, ebb_head, current_nr_blocks + 1);
3216 for (bb = 0; bb <= current_nr_blocks; bb++)
3217 ebb_head[bb] = current_blocks + bb;
3218 }
3219
3220 /* Compute instruction dependencies in region RGN. */
3221 void
3222 sched_rgn_compute_dependencies (int rgn)
3223 {
3224 if (!RGN_DONT_CALC_DEPS (rgn))
3225 {
3226 int bb;
3227
3228 if (sel_sched_p ())
3229 sched_emulate_haifa_p = 1;
3230
3231 init_deps_global ();
3232
3233 /* Initializations for region data dependence analysis. */
3234 bb_deps = XNEWVEC (struct deps_desc, current_nr_blocks);
3235 for (bb = 0; bb < current_nr_blocks; bb++)
3236 init_deps (bb_deps + bb, false);
3237
3238 /* Initialize bitmap used in add_branch_dependences. */
3239 insn_referenced = sbitmap_alloc (sched_max_luid);
3240 bitmap_clear (insn_referenced);
3241
3242 /* Compute backward dependencies. */
3243 for (bb = 0; bb < current_nr_blocks; bb++)
3244 compute_block_dependences (bb);
3245
3246 sbitmap_free (insn_referenced);
3247 free_pending_lists ();
3248 finish_deps_global ();
3249 free (bb_deps);
3250
3251 /* We don't want to recalculate this twice. */
3252 RGN_DONT_CALC_DEPS (rgn) = 1;
3253
3254 if (sel_sched_p ())
3255 sched_emulate_haifa_p = 0;
3256 }
3257 else
3258 /* (This is a recovery block. It is always a single block region.)
3259 OR (We use selective scheduling.) */
3260 gcc_assert (current_nr_blocks == 1 || sel_sched_p ());
3261 }
3262
3263 /* Init region data structures. Returns true if this region should
3264 not be scheduled. */
3265 void
3266 sched_rgn_local_init (int rgn)
3267 {
3268 int bb;
3269
3270 /* Compute interblock info: probabilities, split-edges, dominators, etc. */
3271 if (current_nr_blocks > 1)
3272 {
3273 basic_block block;
3274 edge e;
3275 edge_iterator ei;
3276
3277 prob = XNEWVEC (int, current_nr_blocks);
3278
3279 dom = sbitmap_vector_alloc (current_nr_blocks, current_nr_blocks);
3280 bitmap_vector_clear (dom, current_nr_blocks);
3281
3282 /* Use ->aux to implement EDGE_TO_BIT mapping. */
3283 rgn_nr_edges = 0;
3284 FOR_EACH_BB (block)
3285 {
3286 if (CONTAINING_RGN (block->index) != rgn)
3287 continue;
3288 FOR_EACH_EDGE (e, ei, block->succs)
3289 SET_EDGE_TO_BIT (e, rgn_nr_edges++);
3290 }
3291
3292 rgn_edges = XNEWVEC (edge, rgn_nr_edges);
3293 rgn_nr_edges = 0;
3294 FOR_EACH_BB (block)
3295 {
3296 if (CONTAINING_RGN (block->index) != rgn)
3297 continue;
3298 FOR_EACH_EDGE (e, ei, block->succs)
3299 rgn_edges[rgn_nr_edges++] = e;
3300 }
3301
3302 /* Split edges. */
3303 pot_split = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges);
3304 bitmap_vector_clear (pot_split, current_nr_blocks);
3305 ancestor_edges = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges);
3306 bitmap_vector_clear (ancestor_edges, current_nr_blocks);
3307
3308 /* Compute probabilities, dominators, split_edges. */
3309 for (bb = 0; bb < current_nr_blocks; bb++)
3310 compute_dom_prob_ps (bb);
3311
3312 /* Cleanup ->aux used for EDGE_TO_BIT mapping. */
3313 /* We don't need them anymore. But we want to avoid duplication of
3314 aux fields in the newly created edges. */
3315 FOR_EACH_BB (block)
3316 {
3317 if (CONTAINING_RGN (block->index) != rgn)
3318 continue;
3319 FOR_EACH_EDGE (e, ei, block->succs)
3320 e->aux = NULL;
3321 }
3322 }
3323 }
3324
3325 /* Free data computed for the finished region. */
3326 void
3327 sched_rgn_local_free (void)
3328 {
3329 free (prob);
3330 sbitmap_vector_free (dom);
3331 sbitmap_vector_free (pot_split);
3332 sbitmap_vector_free (ancestor_edges);
3333 free (rgn_edges);
3334 }
3335
3336 /* Free data computed for the finished region. */
3337 void
3338 sched_rgn_local_finish (void)
3339 {
3340 if (current_nr_blocks > 1 && !sel_sched_p ())
3341 {
3342 sched_rgn_local_free ();
3343 }
3344 }
3345
3346 /* Setup scheduler infos. */
3347 void
3348 rgn_setup_common_sched_info (void)
3349 {
3350 memcpy (&rgn_common_sched_info, &haifa_common_sched_info,
3351 sizeof (rgn_common_sched_info));
3352
3353 rgn_common_sched_info.fix_recovery_cfg = rgn_fix_recovery_cfg;
3354 rgn_common_sched_info.add_block = rgn_add_block;
3355 rgn_common_sched_info.estimate_number_of_insns
3356 = rgn_estimate_number_of_insns;
3357 rgn_common_sched_info.sched_pass_id = SCHED_RGN_PASS;
3358
3359 common_sched_info = &rgn_common_sched_info;
3360 }
3361
3362 /* Setup all *_sched_info structures (for the Haifa frontend
3363 and for the dependence analysis) in the interblock scheduler. */
3364 void
3365 rgn_setup_sched_infos (void)
3366 {
3367 if (!sel_sched_p ())
3368 memcpy (&rgn_sched_deps_info, &rgn_const_sched_deps_info,
3369 sizeof (rgn_sched_deps_info));
3370 else
3371 memcpy (&rgn_sched_deps_info, &rgn_const_sel_sched_deps_info,
3372 sizeof (rgn_sched_deps_info));
3373
3374 sched_deps_info = &rgn_sched_deps_info;
3375
3376 memcpy (&rgn_sched_info, &rgn_const_sched_info, sizeof (rgn_sched_info));
3377 current_sched_info = &rgn_sched_info;
3378 }
3379
3380 /* The one entry point in this file. */
3381 void
3382 schedule_insns (void)
3383 {
3384 int rgn;
3385
3386 /* Taking care of this degenerate case makes the rest of
3387 this code simpler. */
3388 if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
3389 return;
3390
3391 rgn_setup_common_sched_info ();
3392 rgn_setup_sched_infos ();
3393
3394 haifa_sched_init ();
3395 sched_rgn_init (reload_completed);
3396
3397 bitmap_initialize (&not_in_df, 0);
3398 bitmap_clear (&not_in_df);
3399
3400 /* Schedule every region in the subroutine. */
3401 for (rgn = 0; rgn < nr_regions; rgn++)
3402 if (dbg_cnt (sched_region))
3403 schedule_region (rgn);
3404
3405 /* Clean up. */
3406 sched_rgn_finish ();
3407 bitmap_clear (&not_in_df);
3408
3409 haifa_sched_finish ();
3410 }
3411
3412 /* INSN has been added to/removed from current region. */
3413 static void
3414 rgn_add_remove_insn (rtx insn, int remove_p)
3415 {
3416 if (!remove_p)
3417 rgn_n_insns++;
3418 else
3419 rgn_n_insns--;
3420
3421 if (INSN_BB (insn) == target_bb)
3422 {
3423 if (!remove_p)
3424 target_n_insns++;
3425 else
3426 target_n_insns--;
3427 }
3428 }
3429
3430 /* Extend internal data structures. */
3431 void
3432 extend_regions (void)
3433 {
3434 rgn_table = XRESIZEVEC (region, rgn_table, n_basic_blocks_for_fn (cfun));
3435 rgn_bb_table = XRESIZEVEC (int, rgn_bb_table,
3436 n_basic_blocks_for_fn (cfun));
3437 block_to_bb = XRESIZEVEC (int, block_to_bb,
3438 last_basic_block_for_fn (cfun));
3439 containing_rgn = XRESIZEVEC (int, containing_rgn,
3440 last_basic_block_for_fn (cfun));
3441 }
3442
3443 void
3444 rgn_make_new_region_out_of_new_block (basic_block bb)
3445 {
3446 int i;
3447
3448 i = RGN_BLOCKS (nr_regions);
3449 /* I - first free position in rgn_bb_table. */
3450
3451 rgn_bb_table[i] = bb->index;
3452 RGN_NR_BLOCKS (nr_regions) = 1;
3453 RGN_HAS_REAL_EBB (nr_regions) = 0;
3454 RGN_DONT_CALC_DEPS (nr_regions) = 0;
3455 CONTAINING_RGN (bb->index) = nr_regions;
3456 BLOCK_TO_BB (bb->index) = 0;
3457
3458 nr_regions++;
3459
3460 RGN_BLOCKS (nr_regions) = i + 1;
3461 }
3462
3463 /* BB was added to ebb after AFTER. */
3464 static void
3465 rgn_add_block (basic_block bb, basic_block after)
3466 {
3467 extend_regions ();
3468 bitmap_set_bit (&not_in_df, bb->index);
3469
3470 if (after == 0 || after == EXIT_BLOCK_PTR_FOR_FN (cfun))
3471 {
3472 rgn_make_new_region_out_of_new_block (bb);
3473 RGN_DONT_CALC_DEPS (nr_regions - 1) = (after
3474 == EXIT_BLOCK_PTR_FOR_FN (cfun));
3475 }
3476 else
3477 {
3478 int i, pos;
3479
3480 /* We need to fix rgn_table, block_to_bb, containing_rgn
3481 and ebb_head. */
3482
3483 BLOCK_TO_BB (bb->index) = BLOCK_TO_BB (after->index);
3484
3485 /* We extend ebb_head to one more position to
3486 easily find the last position of the last ebb in
3487 the current region. Thus, ebb_head[BLOCK_TO_BB (after) + 1]
3488 is _always_ valid for access. */
3489
3490 i = BLOCK_TO_BB (after->index) + 1;
3491 pos = ebb_head[i] - 1;
3492 /* Now POS is the index of the last block in the region. */
3493
3494 /* Find index of basic block AFTER. */
3495 for (; rgn_bb_table[pos] != after->index; pos--)
3496 ;
3497
3498 pos++;
3499 gcc_assert (pos > ebb_head[i - 1]);
3500
3501 /* i - ebb right after "AFTER". */
3502 /* ebb_head[i] - VALID. */
3503
3504 /* Source position: ebb_head[i]
3505 Destination position: ebb_head[i] + 1
3506 Last position:
3507 RGN_BLOCKS (nr_regions) - 1
3508 Number of elements to copy: (last_position) - (source_position) + 1
3509 */
3510
3511 memmove (rgn_bb_table + pos + 1,
3512 rgn_bb_table + pos,
3513 ((RGN_BLOCKS (nr_regions) - 1) - (pos) + 1)
3514 * sizeof (*rgn_bb_table));
3515
3516 rgn_bb_table[pos] = bb->index;
3517
3518 for (; i <= current_nr_blocks; i++)
3519 ebb_head [i]++;
3520
3521 i = CONTAINING_RGN (after->index);
3522 CONTAINING_RGN (bb->index) = i;
3523
3524 RGN_HAS_REAL_EBB (i) = 1;
3525
3526 for (++i; i <= nr_regions; i++)
3527 RGN_BLOCKS (i)++;
3528 }
3529 }
3530
3531 /* Fix internal data after interblock movement of jump instruction.
3532 For parameter meaning please refer to
3533 sched-int.h: struct sched_info: fix_recovery_cfg. */
3534 static void
3535 rgn_fix_recovery_cfg (int bbi, int check_bbi, int check_bb_nexti)
3536 {
3537 int old_pos, new_pos, i;
3538
3539 BLOCK_TO_BB (check_bb_nexti) = BLOCK_TO_BB (bbi);
3540
3541 for (old_pos = ebb_head[BLOCK_TO_BB (check_bbi) + 1] - 1;
3542 rgn_bb_table[old_pos] != check_bb_nexti;
3543 old_pos--)
3544 ;
3545 gcc_assert (old_pos > ebb_head[BLOCK_TO_BB (check_bbi)]);
3546
3547 for (new_pos = ebb_head[BLOCK_TO_BB (bbi) + 1] - 1;
3548 rgn_bb_table[new_pos] != bbi;
3549 new_pos--)
3550 ;
3551 new_pos++;
3552 gcc_assert (new_pos > ebb_head[BLOCK_TO_BB (bbi)]);
3553
3554 gcc_assert (new_pos < old_pos);
3555
3556 memmove (rgn_bb_table + new_pos + 1,
3557 rgn_bb_table + new_pos,
3558 (old_pos - new_pos) * sizeof (*rgn_bb_table));
3559
3560 rgn_bb_table[new_pos] = check_bb_nexti;
3561
3562 for (i = BLOCK_TO_BB (bbi) + 1; i <= BLOCK_TO_BB (check_bbi); i++)
3563 ebb_head[i]++;
3564 }
3565
3566 /* Return next block in ebb chain. For parameter meaning please refer to
3567 sched-int.h: struct sched_info: advance_target_bb. */
3568 static basic_block
3569 advance_target_bb (basic_block bb, rtx insn)
3570 {
3571 if (insn)
3572 return 0;
3573
3574 gcc_assert (BLOCK_TO_BB (bb->index) == target_bb
3575 && BLOCK_TO_BB (bb->next_bb->index) == target_bb);
3576 return bb->next_bb;
3577 }
3578
3579 #endif
3580 \f
3581 static bool
3582 gate_handle_live_range_shrinkage (void)
3583 {
3584 #ifdef INSN_SCHEDULING
3585 return flag_live_range_shrinkage;
3586 #else
3587 return 0;
3588 #endif
3589 }
3590
3591 /* Run instruction scheduler. */
3592 static unsigned int
3593 rest_of_handle_live_range_shrinkage (void)
3594 {
3595 #ifdef INSN_SCHEDULING
3596 int saved;
3597
3598 initialize_live_range_shrinkage ();
3599 saved = flag_schedule_interblock;
3600 flag_schedule_interblock = false;
3601 schedule_insns ();
3602 flag_schedule_interblock = saved;
3603 finish_live_range_shrinkage ();
3604 #endif
3605 return 0;
3606 }
3607
3608 static bool
3609 gate_handle_sched (void)
3610 {
3611 #ifdef INSN_SCHEDULING
3612 return optimize > 0 && flag_schedule_insns && dbg_cnt (sched_func);
3613 #else
3614 return 0;
3615 #endif
3616 }
3617
3618 /* Run instruction scheduler. */
3619 static unsigned int
3620 rest_of_handle_sched (void)
3621 {
3622 #ifdef INSN_SCHEDULING
3623 if (flag_selective_scheduling
3624 && ! maybe_skip_selective_scheduling ())
3625 run_selective_scheduling ();
3626 else
3627 schedule_insns ();
3628 #endif
3629 return 0;
3630 }
3631
3632 static bool
3633 gate_handle_sched2 (void)
3634 {
3635 #ifdef INSN_SCHEDULING
3636 return optimize > 0 && flag_schedule_insns_after_reload
3637 && !targetm.delay_sched2 && dbg_cnt (sched2_func);
3638 #else
3639 return 0;
3640 #endif
3641 }
3642
3643 /* Run second scheduling pass after reload. */
3644 static unsigned int
3645 rest_of_handle_sched2 (void)
3646 {
3647 #ifdef INSN_SCHEDULING
3648 if (flag_selective_scheduling2
3649 && ! maybe_skip_selective_scheduling ())
3650 run_selective_scheduling ();
3651 else
3652 {
3653 /* Do control and data sched analysis again,
3654 and write some more of the results to dump file. */
3655 if (flag_sched2_use_superblocks)
3656 schedule_ebbs ();
3657 else
3658 schedule_insns ();
3659 }
3660 #endif
3661 return 0;
3662 }
3663
3664 namespace {
3665
3666 const pass_data pass_data_live_range_shrinkage =
3667 {
3668 RTL_PASS, /* type */
3669 "lr_shrinkage", /* name */
3670 OPTGROUP_NONE, /* optinfo_flags */
3671 true, /* has_gate */
3672 true, /* has_execute */
3673 TV_LIVE_RANGE_SHRINKAGE, /* tv_id */
3674 0, /* properties_required */
3675 0, /* properties_provided */
3676 0, /* properties_destroyed */
3677 0, /* todo_flags_start */
3678 ( TODO_df_finish | TODO_verify_rtl_sharing
3679 | TODO_verify_flow ), /* todo_flags_finish */
3680 };
3681
3682 class pass_live_range_shrinkage : public rtl_opt_pass
3683 {
3684 public:
3685 pass_live_range_shrinkage(gcc::context *ctxt)
3686 : rtl_opt_pass(pass_data_live_range_shrinkage, ctxt)
3687 {}
3688
3689 /* opt_pass methods: */
3690 bool gate () { return gate_handle_live_range_shrinkage (); }
3691 unsigned int execute () { return rest_of_handle_live_range_shrinkage (); }
3692
3693 }; // class pass_live_range_shrinkage
3694
3695 } // anon namespace
3696
3697 rtl_opt_pass *
3698 make_pass_live_range_shrinkage (gcc::context *ctxt)
3699 {
3700 return new pass_live_range_shrinkage (ctxt);
3701 }
3702
3703 namespace {
3704
3705 const pass_data pass_data_sched =
3706 {
3707 RTL_PASS, /* type */
3708 "sched1", /* name */
3709 OPTGROUP_NONE, /* optinfo_flags */
3710 true, /* has_gate */
3711 true, /* has_execute */
3712 TV_SCHED, /* tv_id */
3713 0, /* properties_required */
3714 0, /* properties_provided */
3715 0, /* properties_destroyed */
3716 0, /* todo_flags_start */
3717 ( TODO_df_finish | TODO_verify_rtl_sharing
3718 | TODO_verify_flow ), /* todo_flags_finish */
3719 };
3720
3721 class pass_sched : public rtl_opt_pass
3722 {
3723 public:
3724 pass_sched (gcc::context *ctxt)
3725 : rtl_opt_pass (pass_data_sched, ctxt)
3726 {}
3727
3728 /* opt_pass methods: */
3729 bool gate () { return gate_handle_sched (); }
3730 unsigned int execute () { return rest_of_handle_sched (); }
3731
3732 }; // class pass_sched
3733
3734 } // anon namespace
3735
3736 rtl_opt_pass *
3737 make_pass_sched (gcc::context *ctxt)
3738 {
3739 return new pass_sched (ctxt);
3740 }
3741
3742 namespace {
3743
3744 const pass_data pass_data_sched2 =
3745 {
3746 RTL_PASS, /* type */
3747 "sched2", /* name */
3748 OPTGROUP_NONE, /* optinfo_flags */
3749 true, /* has_gate */
3750 true, /* has_execute */
3751 TV_SCHED2, /* tv_id */
3752 0, /* properties_required */
3753 0, /* properties_provided */
3754 0, /* properties_destroyed */
3755 0, /* todo_flags_start */
3756 ( TODO_df_finish | TODO_verify_rtl_sharing
3757 | TODO_verify_flow ), /* todo_flags_finish */
3758 };
3759
3760 class pass_sched2 : public rtl_opt_pass
3761 {
3762 public:
3763 pass_sched2 (gcc::context *ctxt)
3764 : rtl_opt_pass (pass_data_sched2, ctxt)
3765 {}
3766
3767 /* opt_pass methods: */
3768 bool gate () { return gate_handle_sched2 (); }
3769 unsigned int execute () { return rest_of_handle_sched2 (); }
3770
3771 }; // class pass_sched2
3772
3773 } // anon namespace
3774
3775 rtl_opt_pass *
3776 make_pass_sched2 (gcc::context *ctxt)
3777 {
3778 return new pass_sched2 (ctxt);
3779 }