]>
Commit | Line | Data |
---|---|---|
7a31a7bd | 1 | /* Instruction scheduling pass. |
711789cc | 2 | Copyright (C) 1992-2013 Free Software Foundation, Inc. |
7a31a7bd | 3 | Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by, |
4 | and currently maintained by, Jim Wilson (wilson@cygnus.com) | |
5 | ||
f12b58b3 | 6 | This file is part of GCC. |
7a31a7bd | 7 | |
f12b58b3 | 8 | GCC is free software; you can redistribute it and/or modify it under |
9 | the terms of the GNU General Public License as published by the Free | |
8c4c00c1 | 10 | Software Foundation; either version 3, or (at your option) any later |
f12b58b3 | 11 | version. |
7a31a7bd | 12 | |
f12b58b3 | 13 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
14 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
7a31a7bd | 15 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
16 | for more details. | |
17 | ||
18 | You should have received a copy of the GNU General Public License | |
8c4c00c1 | 19 | along with GCC; see the file COPYING3. If not see |
20 | <http://www.gnu.org/licenses/>. */ | |
7a31a7bd | 21 | |
22 | /* This pass implements list scheduling within basic blocks. It is | |
23 | run twice: (1) after flow analysis, but before register allocation, | |
24 | and (2) after register allocation. | |
25 | ||
26 | The first run performs interblock scheduling, moving insns between | |
27 | different blocks in the same "region", and the second runs only | |
28 | basic block scheduling. | |
29 | ||
30 | Interblock motions performed are useful motions and speculative | |
31 | motions, including speculative loads. Motions requiring code | |
32 | duplication are not supported. The identification of motion type | |
33 | and the check for validity of speculative motions requires | |
34 | construction and analysis of the function's control flow graph. | |
35 | ||
36 | The main entry point for this pass is schedule_insns(), called for | |
37 | each function. The work of the scheduler is organized in three | |
38 | levels: (1) function level: insns are subject to splitting, | |
39 | control-flow-graph is constructed, regions are computed (after | |
40 | reload, each region is of one block), (2) region level: control | |
41 | flow graph attributes required for interblock scheduling are | |
42 | computed (dominators, reachability, etc.), data dependences and | |
43 | priorities are computed, and (3) block level: insns in the block | |
44 | are actually scheduled. */ | |
45 | \f | |
46 | #include "config.h" | |
47 | #include "system.h" | |
805e22b2 | 48 | #include "coretypes.h" |
49 | #include "tm.h" | |
0b205f4c | 50 | #include "diagnostic-core.h" |
7a31a7bd | 51 | #include "rtl.h" |
52 | #include "tm_p.h" | |
53 | #include "hard-reg-set.h" | |
7a31a7bd | 54 | #include "regs.h" |
55 | #include "function.h" | |
56 | #include "flags.h" | |
57 | #include "insn-config.h" | |
58 | #include "insn-attr.h" | |
59 | #include "except.h" | |
7a31a7bd | 60 | #include "recog.h" |
4c50e1f4 | 61 | #include "params.h" |
7a31a7bd | 62 | #include "sched-int.h" |
e1ab7874 | 63 | #include "sel-sched.h" |
bea4bad2 | 64 | #include "target.h" |
77fce4cd | 65 | #include "tree-pass.h" |
3072d30e | 66 | #include "dbgcnt.h" |
7fb47f9f | 67 | |
cda0a5f5 | 68 | #ifdef INSN_SCHEDULING |
e1ab7874 | 69 | |
7a31a7bd | 70 | /* Some accessor macros for h_i_d members only used within this file. */ |
e1ab7874 | 71 | #define FED_BY_SPEC_LOAD(INSN) (HID (INSN)->fed_by_spec_load) |
72 | #define IS_LOAD_INSN(INSN) (HID (insn)->is_load_insn) | |
7a31a7bd | 73 | |
7a31a7bd | 74 | /* nr_inter/spec counts interblock/speculative motion for the function. */ |
75 | static int nr_inter, nr_spec; | |
76 | ||
60b8c5b3 | 77 | static int is_cfg_nonregular (void); |
7a31a7bd | 78 | |
79 | /* Number of regions in the procedure. */ | |
e1ab7874 | 80 | int nr_regions = 0; |
7a31a7bd | 81 | |
82 | /* Table of region descriptions. */ | |
e1ab7874 | 83 | region *rgn_table = NULL; |
7a31a7bd | 84 | |
85 | /* Array of lists of regions' blocks. */ | |
e1ab7874 | 86 | int *rgn_bb_table = NULL; |
7a31a7bd | 87 | |
88 | /* Topological order of blocks in the region (if b2 is reachable from | |
89 | b1, block_to_bb[b2] > block_to_bb[b1]). Note: A basic block is | |
90 | always referred to by either block or b, while its topological | |
917bbcab | 91 | order name (in the region) is referred to by bb. */ |
e1ab7874 | 92 | int *block_to_bb = NULL; |
7a31a7bd | 93 | |
94 | /* The number of the region containing a block. */ | |
e1ab7874 | 95 | int *containing_rgn = NULL; |
96 | ||
97 | /* ebb_head [i] - is index in rgn_bb_table of the head basic block of i'th ebb. | |
98 | Currently we can get a ebb only through splitting of currently | |
99 | scheduling block, therefore, we don't need ebb_head array for every region, | |
100 | hence, its sufficient to hold it for current one only. */ | |
101 | int *ebb_head = NULL; | |
7a31a7bd | 102 | |
6c0b81cb | 103 | /* The minimum probability of reaching a source block so that it will be |
104 | considered for speculative scheduling. */ | |
105 | static int min_spec_prob; | |
106 | ||
e1ab7874 | 107 | static void find_single_block_region (bool); |
aae97b21 | 108 | static void find_rgns (void); |
4c50e1f4 | 109 | static bool too_large (int, int *, int *); |
7a31a7bd | 110 | |
7a31a7bd | 111 | /* Blocks of the current region being scheduled. */ |
e1ab7874 | 112 | int current_nr_blocks; |
113 | int current_blocks; | |
7a31a7bd | 114 | |
e1ab7874 | 115 | /* A speculative motion requires checking live information on the path |
116 | from 'source' to 'target'. The split blocks are those to be checked. | |
117 | After a speculative motion, live information should be modified in | |
118 | the 'update' blocks. | |
6a1cdb4d | 119 | |
e1ab7874 | 120 | Lists of split and update blocks for each candidate of the current |
121 | target are in array bblst_table. */ | |
122 | static basic_block *bblst_table; | |
123 | static int bblst_size, bblst_last; | |
7a31a7bd | 124 | |
bbd0cfb1 | 125 | /* Arrays that hold the DFA state at the end of a basic block, to re-use |
126 | as the initial state at the start of successor blocks. The BB_STATE | |
127 | array holds the actual DFA state, and BB_STATE_ARRAY[I] is a pointer | |
128 | into BB_STATE for basic block I. FIXME: This should be a vec. */ | |
129 | static char *bb_state_array = NULL; | |
130 | static state_t *bb_state = NULL; | |
0a15667c | 131 | |
7a31a7bd | 132 | /* Target info declarations. |
133 | ||
134 | The block currently being scheduled is referred to as the "target" block, | |
135 | while other blocks in the region from which insns can be moved to the | |
136 | target are called "source" blocks. The candidate structure holds info | |
137 | about such sources: are they valid? Speculative? Etc. */ | |
aae97b21 | 138 | typedef struct |
139 | { | |
140 | basic_block *first_member; | |
141 | int nr_members; | |
142 | } | |
143 | bblst; | |
144 | ||
7a31a7bd | 145 | typedef struct |
146 | { | |
147 | char is_valid; | |
148 | char is_speculative; | |
149 | int src_prob; | |
150 | bblst split_bbs; | |
151 | bblst update_bbs; | |
152 | } | |
153 | candidate; | |
154 | ||
155 | static candidate *candidate_table; | |
e1ab7874 | 156 | #define IS_VALID(src) (candidate_table[src].is_valid) |
157 | #define IS_SPECULATIVE(src) (candidate_table[src].is_speculative) | |
158 | #define IS_SPECULATIVE_INSN(INSN) \ | |
159 | (IS_SPECULATIVE (BLOCK_TO_BB (BLOCK_NUM (INSN)))) | |
7a31a7bd | 160 | #define SRC_PROB(src) ( candidate_table[src].src_prob ) |
161 | ||
162 | /* The bb being currently scheduled. */ | |
e1ab7874 | 163 | int target_bb; |
7a31a7bd | 164 | |
165 | /* List of edges. */ | |
aae97b21 | 166 | typedef struct |
167 | { | |
168 | edge *first_member; | |
169 | int nr_members; | |
170 | } | |
171 | edgelst; | |
172 | ||
173 | static edge *edgelst_table; | |
174 | static int edgelst_last; | |
175 | ||
176 | static void extract_edgelst (sbitmap, edgelst *); | |
177 | ||
7a31a7bd | 178 | /* Target info functions. */ |
60b8c5b3 | 179 | static void split_edges (int, int, edgelst *); |
180 | static void compute_trg_info (int); | |
181 | void debug_candidate (int); | |
182 | void debug_candidates (int); | |
7a31a7bd | 183 | |
79cafa9e | 184 | /* Dominators array: dom[i] contains the sbitmap of dominators of |
7a31a7bd | 185 | bb i in the region. */ |
79cafa9e | 186 | static sbitmap *dom; |
7a31a7bd | 187 | |
188 | /* bb 0 is the only region entry. */ | |
189 | #define IS_RGN_ENTRY(bb) (!bb) | |
190 | ||
191 | /* Is bb_src dominated by bb_trg. */ | |
192 | #define IS_DOMINATED(bb_src, bb_trg) \ | |
08b7917c | 193 | ( bitmap_bit_p (dom[bb_src], bb_trg) ) |
7a31a7bd | 194 | |
6c0b81cb | 195 | /* Probability: Prob[i] is an int in [0, REG_BR_PROB_BASE] which is |
196 | the probability of bb i relative to the region entry. */ | |
197 | static int *prob; | |
7a31a7bd | 198 | |
199 | /* Bit-set of edges, where bit i stands for edge i. */ | |
79cafa9e | 200 | typedef sbitmap edgeset; |
7a31a7bd | 201 | |
202 | /* Number of edges in the region. */ | |
203 | static int rgn_nr_edges; | |
204 | ||
205 | /* Array of size rgn_nr_edges. */ | |
aae97b21 | 206 | static edge *rgn_edges; |
7a31a7bd | 207 | |
208 | /* Mapping from each edge in the graph to its number in the rgn. */ | |
aae97b21 | 209 | #define EDGE_TO_BIT(edge) ((int)(size_t)(edge)->aux) |
210 | #define SET_EDGE_TO_BIT(edge,nr) ((edge)->aux = (void *)(size_t)(nr)) | |
7a31a7bd | 211 | |
212 | /* The split edges of a source bb is different for each target | |
213 | bb. In order to compute this efficiently, the 'potential-split edges' | |
214 | are computed for each bb prior to scheduling a region. This is actually | |
215 | the split edges of each bb relative to the region entry. | |
216 | ||
217 | pot_split[bb] is the set of potential split edges of bb. */ | |
218 | static edgeset *pot_split; | |
219 | ||
220 | /* For every bb, a set of its ancestor edges. */ | |
221 | static edgeset *ancestor_edges; | |
222 | ||
7a31a7bd | 223 | #define INSN_PROBABILITY(INSN) (SRC_PROB (BLOCK_TO_BB (BLOCK_NUM (INSN)))) |
7a31a7bd | 224 | |
7a31a7bd | 225 | /* Speculative scheduling functions. */ |
60b8c5b3 | 226 | static int check_live_1 (int, rtx); |
227 | static void update_live_1 (int, rtx); | |
60b8c5b3 | 228 | static int is_pfree (rtx, int, int); |
229 | static int find_conditional_protection (rtx, int); | |
230 | static int is_conditionally_protected (rtx, int, int); | |
231 | static int is_prisky (rtx, int, int); | |
232 | static int is_exception_free (rtx, int, int); | |
233 | ||
234 | static bool sets_likely_spilled (rtx); | |
81a410b1 | 235 | static void sets_likely_spilled_1 (rtx, const_rtx, void *); |
60b8c5b3 | 236 | static void add_branch_dependences (rtx, rtx); |
93f6b030 | 237 | static void compute_block_dependences (int); |
60b8c5b3 | 238 | |
60b8c5b3 | 239 | static void schedule_region (int); |
60b8c5b3 | 240 | static void concat_insn_mem_list (rtx, rtx, rtx *, rtx *); |
68e419a1 | 241 | static void propagate_deps (int, struct deps_desc *); |
60b8c5b3 | 242 | static void free_pending_lists (void); |
7a31a7bd | 243 | |
244 | /* Functions for construction of the control flow graph. */ | |
245 | ||
246 | /* Return 1 if control flow graph should not be constructed, 0 otherwise. | |
247 | ||
248 | We decide not to build the control flow graph if there is possibly more | |
aae97b21 | 249 | than one entry to the function, if computed branches exist, if we |
250 | have nonlocal gotos, or if we have an unreachable loop. */ | |
7a31a7bd | 251 | |
252 | static int | |
60b8c5b3 | 253 | is_cfg_nonregular (void) |
7a31a7bd | 254 | { |
4c26117a | 255 | basic_block b; |
7a31a7bd | 256 | rtx insn; |
7a31a7bd | 257 | |
258 | /* If we have a label that could be the target of a nonlocal goto, then | |
259 | the cfg is not well structured. */ | |
260 | if (nonlocal_goto_handler_labels) | |
261 | return 1; | |
262 | ||
263 | /* If we have any forced labels, then the cfg is not well structured. */ | |
264 | if (forced_labels) | |
265 | return 1; | |
266 | ||
7a31a7bd | 267 | /* If we have exception handlers, then we consider the cfg not well |
3072d30e | 268 | structured. ?!? We should be able to handle this now that we |
269 | compute an accurate cfg for EH. */ | |
8f8dcce4 | 270 | if (current_function_has_exception_handlers ()) |
7a31a7bd | 271 | return 1; |
272 | ||
19d2fe05 | 273 | /* If we have insns which refer to labels as non-jumped-to operands, |
274 | then we consider the cfg not well structured. */ | |
fc00614f | 275 | FOR_EACH_BB_FN (b, cfun) |
cd6dccd3 | 276 | FOR_BB_INSNS (b, insn) |
7a31a7bd | 277 | { |
a8d1dae0 | 278 | rtx note, next, set, dest; |
19d2fe05 | 279 | |
cd6dccd3 | 280 | /* If this function has a computed jump, then we consider the cfg |
281 | not well structured. */ | |
19d2fe05 | 282 | if (JUMP_P (insn) && computed_jump_p (insn)) |
cd6dccd3 | 283 | return 1; |
a8d1dae0 | 284 | |
285 | if (!INSN_P (insn)) | |
286 | continue; | |
287 | ||
288 | note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX); | |
289 | if (note == NULL_RTX) | |
290 | continue; | |
291 | ||
292 | /* For that label not to be seen as a referred-to label, this | |
293 | must be a single-set which is feeding a jump *only*. This | |
294 | could be a conditional jump with the label split off for | |
295 | machine-specific reasons or a casesi/tablejump. */ | |
296 | next = next_nonnote_insn (insn); | |
297 | if (next == NULL_RTX | |
298 | || !JUMP_P (next) | |
299 | || (JUMP_LABEL (next) != XEXP (note, 0) | |
300 | && find_reg_note (next, REG_LABEL_TARGET, | |
301 | XEXP (note, 0)) == NULL_RTX) | |
302 | || BLOCK_FOR_INSN (insn) != BLOCK_FOR_INSN (next)) | |
303 | return 1; | |
304 | ||
305 | set = single_set (insn); | |
306 | if (set == NULL_RTX) | |
307 | return 1; | |
308 | ||
309 | dest = SET_DEST (set); | |
310 | if (!REG_P (dest) || !dead_or_set_p (next, dest)) | |
311 | return 1; | |
7a31a7bd | 312 | } |
313 | ||
7a31a7bd | 314 | /* Unreachable loops with more than one basic block are detected |
315 | during the DFS traversal in find_rgns. | |
316 | ||
317 | Unreachable loops with a single block are detected here. This | |
318 | test is redundant with the one in find_rgns, but it's much | |
aae97b21 | 319 | cheaper to go ahead and catch the trivial case here. */ |
fc00614f | 320 | FOR_EACH_BB_FN (b, cfun) |
7a31a7bd | 321 | { |
cd665a06 | 322 | if (EDGE_COUNT (b->preds) == 0 |
ea091dfd | 323 | || (single_pred_p (b) |
324 | && single_pred (b) == b)) | |
aae97b21 | 325 | return 1; |
7a31a7bd | 326 | } |
327 | ||
aae97b21 | 328 | /* All the tests passed. Consider the cfg well structured. */ |
329 | return 0; | |
7a31a7bd | 330 | } |
331 | ||
aae97b21 | 332 | /* Extract list of edges from a bitmap containing EDGE_TO_BIT bits. */ |
7a31a7bd | 333 | |
334 | static void | |
aae97b21 | 335 | extract_edgelst (sbitmap set, edgelst *el) |
7a31a7bd | 336 | { |
86c1585a | 337 | unsigned int i = 0; |
3e790786 | 338 | sbitmap_iterator sbi; |
7a31a7bd | 339 | |
aae97b21 | 340 | /* edgelst table space is reused in each call to extract_edgelst. */ |
341 | edgelst_last = 0; | |
7a31a7bd | 342 | |
aae97b21 | 343 | el->first_member = &edgelst_table[edgelst_last]; |
344 | el->nr_members = 0; | |
7a31a7bd | 345 | |
346 | /* Iterate over each word in the bitset. */ | |
0d211963 | 347 | EXECUTE_IF_SET_IN_BITMAP (set, 0, i, sbi) |
3e790786 | 348 | { |
349 | edgelst_table[edgelst_last++] = rgn_edges[i]; | |
350 | el->nr_members++; | |
351 | } | |
7a31a7bd | 352 | } |
353 | ||
354 | /* Functions for the construction of regions. */ | |
355 | ||
356 | /* Print the regions, for debugging purposes. Callable from debugger. */ | |
357 | ||
4b987fac | 358 | DEBUG_FUNCTION void |
60b8c5b3 | 359 | debug_regions (void) |
7a31a7bd | 360 | { |
361 | int rgn, bb; | |
362 | ||
363 | fprintf (sched_dump, "\n;; ------------ REGIONS ----------\n\n"); | |
364 | for (rgn = 0; rgn < nr_regions; rgn++) | |
365 | { | |
366 | fprintf (sched_dump, ";;\trgn %d nr_blocks %d:\n", rgn, | |
367 | rgn_table[rgn].rgn_nr_blocks); | |
368 | fprintf (sched_dump, ";;\tbb/block: "); | |
369 | ||
6a1cdb4d | 370 | /* We don't have ebb_head initialized yet, so we can't use |
371 | BB_TO_BLOCK (). */ | |
372 | current_blocks = RGN_BLOCKS (rgn); | |
7a31a7bd | 373 | |
6a1cdb4d | 374 | for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++) |
375 | fprintf (sched_dump, " %d/%d ", bb, rgn_bb_table[current_blocks + bb]); | |
7a31a7bd | 376 | |
377 | fprintf (sched_dump, "\n\n"); | |
378 | } | |
379 | } | |
380 | ||
e1ab7874 | 381 | /* Print the region's basic blocks. */ |
382 | ||
4b987fac | 383 | DEBUG_FUNCTION void |
e1ab7874 | 384 | debug_region (int rgn) |
385 | { | |
386 | int bb; | |
387 | ||
388 | fprintf (stderr, "\n;; ------------ REGION %d ----------\n\n", rgn); | |
389 | fprintf (stderr, ";;\trgn %d nr_blocks %d:\n", rgn, | |
390 | rgn_table[rgn].rgn_nr_blocks); | |
391 | fprintf (stderr, ";;\tbb/block: "); | |
392 | ||
393 | /* We don't have ebb_head initialized yet, so we can't use | |
394 | BB_TO_BLOCK (). */ | |
395 | current_blocks = RGN_BLOCKS (rgn); | |
396 | ||
397 | for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++) | |
398 | fprintf (stderr, " %d/%d ", bb, rgn_bb_table[current_blocks + bb]); | |
399 | ||
400 | fprintf (stderr, "\n\n"); | |
401 | ||
402 | for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++) | |
403 | { | |
f5a6b05f | 404 | dump_bb (stderr, |
405 | BASIC_BLOCK_FOR_FN (cfun, rgn_bb_table[current_blocks + bb]), | |
bec2cf98 | 406 | 0, TDF_SLIM | TDF_BLOCKS); |
e1ab7874 | 407 | fprintf (stderr, "\n"); |
408 | } | |
409 | ||
410 | fprintf (stderr, "\n"); | |
411 | ||
412 | } | |
413 | ||
414 | /* True when a bb with index BB_INDEX contained in region RGN. */ | |
415 | static bool | |
416 | bb_in_region_p (int bb_index, int rgn) | |
417 | { | |
418 | int i; | |
419 | ||
420 | for (i = 0; i < rgn_table[rgn].rgn_nr_blocks; i++) | |
421 | if (rgn_bb_table[current_blocks + i] == bb_index) | |
422 | return true; | |
423 | ||
424 | return false; | |
425 | } | |
426 | ||
427 | /* Dump region RGN to file F using dot syntax. */ | |
428 | void | |
429 | dump_region_dot (FILE *f, int rgn) | |
430 | { | |
431 | int i; | |
432 | ||
433 | fprintf (f, "digraph Region_%d {\n", rgn); | |
434 | ||
435 | /* We don't have ebb_head initialized yet, so we can't use | |
436 | BB_TO_BLOCK (). */ | |
437 | current_blocks = RGN_BLOCKS (rgn); | |
438 | ||
439 | for (i = 0; i < rgn_table[rgn].rgn_nr_blocks; i++) | |
440 | { | |
441 | edge e; | |
442 | edge_iterator ei; | |
443 | int src_bb_num = rgn_bb_table[current_blocks + i]; | |
f5a6b05f | 444 | basic_block bb = BASIC_BLOCK_FOR_FN (cfun, src_bb_num); |
e1ab7874 | 445 | |
446 | FOR_EACH_EDGE (e, ei, bb->succs) | |
447 | if (bb_in_region_p (e->dest->index, rgn)) | |
448 | fprintf (f, "\t%d -> %d\n", src_bb_num, e->dest->index); | |
449 | } | |
450 | fprintf (f, "}\n"); | |
451 | } | |
452 | ||
453 | /* The same, but first open a file specified by FNAME. */ | |
48e1416a | 454 | void |
e1ab7874 | 455 | dump_region_dot_file (const char *fname, int rgn) |
456 | { | |
457 | FILE *f = fopen (fname, "wt"); | |
458 | dump_region_dot (f, rgn); | |
459 | fclose (f); | |
460 | } | |
461 | ||
7a31a7bd | 462 | /* Build a single block region for each basic block in the function. |
463 | This allows for using the same code for interblock and basic block | |
464 | scheduling. */ | |
465 | ||
466 | static void | |
e1ab7874 | 467 | find_single_block_region (bool ebbs_p) |
7a31a7bd | 468 | { |
e1ab7874 | 469 | basic_block bb, ebb_start; |
470 | int i = 0; | |
4c5da238 | 471 | |
4c26117a | 472 | nr_regions = 0; |
473 | ||
e1ab7874 | 474 | if (ebbs_p) { |
475 | int probability_cutoff; | |
476 | if (profile_info && flag_branch_probabilities) | |
477 | probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK); | |
478 | else | |
479 | probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY); | |
480 | probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff; | |
481 | ||
fc00614f | 482 | FOR_EACH_BB_FN (ebb_start, cfun) |
e1ab7874 | 483 | { |
484 | RGN_NR_BLOCKS (nr_regions) = 0; | |
485 | RGN_BLOCKS (nr_regions) = i; | |
486 | RGN_DONT_CALC_DEPS (nr_regions) = 0; | |
487 | RGN_HAS_REAL_EBB (nr_regions) = 0; | |
488 | ||
489 | for (bb = ebb_start; ; bb = bb->next_bb) | |
490 | { | |
491 | edge e; | |
e1ab7874 | 492 | |
493 | rgn_bb_table[i] = bb->index; | |
494 | RGN_NR_BLOCKS (nr_regions)++; | |
495 | CONTAINING_RGN (bb->index) = nr_regions; | |
496 | BLOCK_TO_BB (bb->index) = i - RGN_BLOCKS (nr_regions); | |
497 | i++; | |
498 | ||
34154e27 | 499 | if (bb->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun) |
e1ab7874 | 500 | || LABEL_P (BB_HEAD (bb->next_bb))) |
501 | break; | |
48e1416a | 502 | |
7f58c05e | 503 | e = find_fallthru_edge (bb->succs); |
e1ab7874 | 504 | if (! e) |
505 | break; | |
506 | if (e->probability <= probability_cutoff) | |
507 | break; | |
508 | } | |
509 | ||
510 | ebb_start = bb; | |
511 | nr_regions++; | |
512 | } | |
513 | } | |
514 | else | |
fc00614f | 515 | FOR_EACH_BB_FN (bb, cfun) |
e1ab7874 | 516 | { |
517 | rgn_bb_table[nr_regions] = bb->index; | |
518 | RGN_NR_BLOCKS (nr_regions) = 1; | |
519 | RGN_BLOCKS (nr_regions) = nr_regions; | |
520 | RGN_DONT_CALC_DEPS (nr_regions) = 0; | |
521 | RGN_HAS_REAL_EBB (nr_regions) = 0; | |
522 | ||
523 | CONTAINING_RGN (bb->index) = nr_regions; | |
524 | BLOCK_TO_BB (bb->index) = 0; | |
525 | nr_regions++; | |
526 | } | |
527 | } | |
528 | ||
529 | /* Estimate number of the insns in the BB. */ | |
530 | static int | |
531 | rgn_estimate_number_of_insns (basic_block bb) | |
532 | { | |
9845d120 | 533 | int count; |
534 | ||
535 | count = INSN_LUID (BB_END (bb)) - INSN_LUID (BB_HEAD (bb)); | |
536 | ||
537 | if (MAY_HAVE_DEBUG_INSNS) | |
538 | { | |
539 | rtx insn; | |
540 | ||
541 | FOR_BB_INSNS (bb, insn) | |
542 | if (DEBUG_INSN_P (insn)) | |
543 | count--; | |
544 | } | |
545 | ||
546 | return count; | |
7a31a7bd | 547 | } |
548 | ||
549 | /* Update number of blocks and the estimate for number of insns | |
4c50e1f4 | 550 | in the region. Return true if the region is "too large" for interblock |
551 | scheduling (compile time considerations). */ | |
7a31a7bd | 552 | |
4c50e1f4 | 553 | static bool |
60b8c5b3 | 554 | too_large (int block, int *num_bbs, int *num_insns) |
7a31a7bd | 555 | { |
556 | (*num_bbs)++; | |
e1ab7874 | 557 | (*num_insns) += (common_sched_info->estimate_number_of_insns |
f5a6b05f | 558 | (BASIC_BLOCK_FOR_FN (cfun, block))); |
4c50e1f4 | 559 | |
560 | return ((*num_bbs > PARAM_VALUE (PARAM_MAX_SCHED_REGION_BLOCKS)) | |
561 | || (*num_insns > PARAM_VALUE (PARAM_MAX_SCHED_REGION_INSNS))); | |
7a31a7bd | 562 | } |
563 | ||
564 | /* Update_loop_relations(blk, hdr): Check if the loop headed by max_hdr[blk] | |
565 | is still an inner loop. Put in max_hdr[blk] the header of the most inner | |
566 | loop containing blk. */ | |
40734805 | 567 | #define UPDATE_LOOP_RELATIONS(blk, hdr) \ |
568 | { \ | |
569 | if (max_hdr[blk] == -1) \ | |
570 | max_hdr[blk] = hdr; \ | |
571 | else if (dfs_nr[max_hdr[blk]] > dfs_nr[hdr]) \ | |
08b7917c | 572 | bitmap_clear_bit (inner, hdr); \ |
40734805 | 573 | else if (dfs_nr[max_hdr[blk]] < dfs_nr[hdr]) \ |
574 | { \ | |
08b7917c | 575 | bitmap_clear_bit (inner,max_hdr[blk]); \ |
40734805 | 576 | max_hdr[blk] = hdr; \ |
577 | } \ | |
7a31a7bd | 578 | } |
579 | ||
580 | /* Find regions for interblock scheduling. | |
581 | ||
582 | A region for scheduling can be: | |
583 | ||
584 | * A loop-free procedure, or | |
585 | ||
586 | * A reducible inner loop, or | |
587 | ||
588 | * A basic block not contained in any other region. | |
589 | ||
590 | ?!? In theory we could build other regions based on extended basic | |
591 | blocks or reverse extended basic blocks. Is it worth the trouble? | |
592 | ||
593 | Loop blocks that form a region are put into the region's block list | |
594 | in topological order. | |
595 | ||
596 | This procedure stores its results into the following global (ick) variables | |
597 | ||
598 | * rgn_nr | |
599 | * rgn_table | |
600 | * rgn_bb_table | |
601 | * block_to_bb | |
602 | * containing region | |
603 | ||
604 | We use dominator relationships to avoid making regions out of non-reducible | |
605 | loops. | |
606 | ||
607 | This procedure needs to be converted to work on pred/succ lists instead | |
608 | of edge tables. That would simplify it somewhat. */ | |
609 | ||
610 | static void | |
e1ab7874 | 611 | haifa_find_rgns (void) |
7a31a7bd | 612 | { |
aae97b21 | 613 | int *max_hdr, *dfs_nr, *degree; |
7a31a7bd | 614 | char no_loops = 1; |
615 | int node, child, loop_head, i, head, tail; | |
8deb7557 | 616 | int count = 0, sp, idx = 0; |
aae97b21 | 617 | edge_iterator current_edge; |
618 | edge_iterator *stack; | |
7a31a7bd | 619 | int num_bbs, num_insns, unreachable; |
620 | int too_large_failure; | |
4c26117a | 621 | basic_block bb; |
7a31a7bd | 622 | |
7a31a7bd | 623 | /* Note if a block is a natural loop header. */ |
624 | sbitmap header; | |
625 | ||
edc2a478 | 626 | /* Note if a block is a natural inner loop header. */ |
7a31a7bd | 627 | sbitmap inner; |
628 | ||
629 | /* Note if a block is in the block queue. */ | |
630 | sbitmap in_queue; | |
631 | ||
632 | /* Note if a block is in the block queue. */ | |
633 | sbitmap in_stack; | |
634 | ||
7a31a7bd | 635 | /* Perform a DFS traversal of the cfg. Identify loop headers, inner loops |
636 | and a mapping from block to its loop header (if the block is contained | |
637 | in a loop, else -1). | |
638 | ||
639 | Store results in HEADER, INNER, and MAX_HDR respectively, these will | |
640 | be used as inputs to the second traversal. | |
641 | ||
642 | STACK, SP and DFS_NR are only used during the first traversal. */ | |
643 | ||
644 | /* Allocate and initialize variables for the first traversal. */ | |
fe672ac0 | 645 | max_hdr = XNEWVEC (int, last_basic_block_for_fn (cfun)); |
646 | dfs_nr = XCNEWVEC (int, last_basic_block_for_fn (cfun)); | |
f1955b22 | 647 | stack = XNEWVEC (edge_iterator, n_edges_for_fn (cfun)); |
7a31a7bd | 648 | |
fe672ac0 | 649 | inner = sbitmap_alloc (last_basic_block_for_fn (cfun)); |
53c5d9d4 | 650 | bitmap_ones (inner); |
7a31a7bd | 651 | |
fe672ac0 | 652 | header = sbitmap_alloc (last_basic_block_for_fn (cfun)); |
53c5d9d4 | 653 | bitmap_clear (header); |
7a31a7bd | 654 | |
fe672ac0 | 655 | in_queue = sbitmap_alloc (last_basic_block_for_fn (cfun)); |
53c5d9d4 | 656 | bitmap_clear (in_queue); |
7a31a7bd | 657 | |
fe672ac0 | 658 | in_stack = sbitmap_alloc (last_basic_block_for_fn (cfun)); |
53c5d9d4 | 659 | bitmap_clear (in_stack); |
7a31a7bd | 660 | |
fe672ac0 | 661 | for (i = 0; i < last_basic_block_for_fn (cfun); i++) |
7a31a7bd | 662 | max_hdr[i] = -1; |
663 | ||
aae97b21 | 664 | #define EDGE_PASSED(E) (ei_end_p ((E)) || ei_edge ((E))->aux) |
665 | #define SET_EDGE_PASSED(E) (ei_edge ((E))->aux = ei_edge ((E))) | |
666 | ||
7a31a7bd | 667 | /* DFS traversal to find inner loops in the cfg. */ |
668 | ||
34154e27 | 669 | current_edge = ei_start (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))->succs); |
7a31a7bd | 670 | sp = -1; |
aae97b21 | 671 | |
7a31a7bd | 672 | while (1) |
673 | { | |
aae97b21 | 674 | if (EDGE_PASSED (current_edge)) |
7a31a7bd | 675 | { |
676 | /* We have reached a leaf node or a node that was already | |
677 | processed. Pop edges off the stack until we find | |
678 | an edge that has not yet been processed. */ | |
aae97b21 | 679 | while (sp >= 0 && EDGE_PASSED (current_edge)) |
7a31a7bd | 680 | { |
681 | /* Pop entry off the stack. */ | |
682 | current_edge = stack[sp--]; | |
aae97b21 | 683 | node = ei_edge (current_edge)->src->index; |
684 | gcc_assert (node != ENTRY_BLOCK); | |
685 | child = ei_edge (current_edge)->dest->index; | |
686 | gcc_assert (child != EXIT_BLOCK); | |
08b7917c | 687 | bitmap_clear_bit (in_stack, child); |
688 | if (max_hdr[child] >= 0 && bitmap_bit_p (in_stack, max_hdr[child])) | |
7a31a7bd | 689 | UPDATE_LOOP_RELATIONS (node, max_hdr[child]); |
aae97b21 | 690 | ei_next (¤t_edge); |
7a31a7bd | 691 | } |
692 | ||
693 | /* See if have finished the DFS tree traversal. */ | |
aae97b21 | 694 | if (sp < 0 && EDGE_PASSED (current_edge)) |
7a31a7bd | 695 | break; |
696 | ||
697 | /* Nope, continue the traversal with the popped node. */ | |
698 | continue; | |
699 | } | |
700 | ||
701 | /* Process a node. */ | |
aae97b21 | 702 | node = ei_edge (current_edge)->src->index; |
703 | gcc_assert (node != ENTRY_BLOCK); | |
08b7917c | 704 | bitmap_set_bit (in_stack, node); |
7a31a7bd | 705 | dfs_nr[node] = ++count; |
706 | ||
aae97b21 | 707 | /* We don't traverse to the exit block. */ |
708 | child = ei_edge (current_edge)->dest->index; | |
709 | if (child == EXIT_BLOCK) | |
710 | { | |
711 | SET_EDGE_PASSED (current_edge); | |
712 | ei_next (¤t_edge); | |
713 | continue; | |
714 | } | |
715 | ||
7a31a7bd | 716 | /* If the successor is in the stack, then we've found a loop. |
717 | Mark the loop, if it is not a natural loop, then it will | |
718 | be rejected during the second traversal. */ | |
08b7917c | 719 | if (bitmap_bit_p (in_stack, child)) |
7a31a7bd | 720 | { |
721 | no_loops = 0; | |
08b7917c | 722 | bitmap_set_bit (header, child); |
7a31a7bd | 723 | UPDATE_LOOP_RELATIONS (node, child); |
aae97b21 | 724 | SET_EDGE_PASSED (current_edge); |
725 | ei_next (¤t_edge); | |
7a31a7bd | 726 | continue; |
727 | } | |
728 | ||
729 | /* If the child was already visited, then there is no need to visit | |
730 | it again. Just update the loop relationships and restart | |
731 | with a new edge. */ | |
732 | if (dfs_nr[child]) | |
733 | { | |
08b7917c | 734 | if (max_hdr[child] >= 0 && bitmap_bit_p (in_stack, max_hdr[child])) |
7a31a7bd | 735 | UPDATE_LOOP_RELATIONS (node, max_hdr[child]); |
aae97b21 | 736 | SET_EDGE_PASSED (current_edge); |
737 | ei_next (¤t_edge); | |
7a31a7bd | 738 | continue; |
739 | } | |
740 | ||
741 | /* Push an entry on the stack and continue DFS traversal. */ | |
742 | stack[++sp] = current_edge; | |
aae97b21 | 743 | SET_EDGE_PASSED (current_edge); |
744 | current_edge = ei_start (ei_edge (current_edge)->dest->succs); | |
745 | } | |
746 | ||
747 | /* Reset ->aux field used by EDGE_PASSED. */ | |
748 | FOR_ALL_BB (bb) | |
749 | { | |
750 | edge_iterator ei; | |
751 | edge e; | |
752 | FOR_EACH_EDGE (e, ei, bb->succs) | |
753 | e->aux = NULL; | |
7a31a7bd | 754 | } |
755 | ||
aae97b21 | 756 | |
7a31a7bd | 757 | /* Another check for unreachable blocks. The earlier test in |
758 | is_cfg_nonregular only finds unreachable blocks that do not | |
759 | form a loop. | |
760 | ||
761 | The DFS traversal will mark every block that is reachable from | |
762 | the entry node by placing a nonzero value in dfs_nr. Thus if | |
763 | dfs_nr is zero for any block, then it must be unreachable. */ | |
764 | unreachable = 0; | |
fc00614f | 765 | FOR_EACH_BB_FN (bb, cfun) |
4c26117a | 766 | if (dfs_nr[bb->index] == 0) |
7a31a7bd | 767 | { |
768 | unreachable = 1; | |
769 | break; | |
770 | } | |
771 | ||
772 | /* Gross. To avoid wasting memory, the second pass uses the dfs_nr array | |
773 | to hold degree counts. */ | |
774 | degree = dfs_nr; | |
775 | ||
fc00614f | 776 | FOR_EACH_BB_FN (bb, cfun) |
aae97b21 | 777 | degree[bb->index] = EDGE_COUNT (bb->preds); |
7a31a7bd | 778 | |
779 | /* Do not perform region scheduling if there are any unreachable | |
780 | blocks. */ | |
781 | if (!unreachable) | |
782 | { | |
4bfe0e7b | 783 | int *queue, *degree1 = NULL; |
784 | /* We use EXTENDED_RGN_HEADER as an addition to HEADER and put | |
785 | there basic blocks, which are forced to be region heads. | |
48e1416a | 786 | This is done to try to assemble few smaller regions |
4bfe0e7b | 787 | from a too_large region. */ |
788 | sbitmap extended_rgn_header = NULL; | |
789 | bool extend_regions_p; | |
7a31a7bd | 790 | |
791 | if (no_loops) | |
08b7917c | 792 | bitmap_set_bit (header, 0); |
7a31a7bd | 793 | |
de132707 | 794 | /* Second traversal:find reducible inner loops and topologically sort |
7a31a7bd | 795 | block of each region. */ |
796 | ||
a28770e1 | 797 | queue = XNEWVEC (int, n_basic_blocks_for_fn (cfun)); |
48e1416a | 798 | |
4bfe0e7b | 799 | extend_regions_p = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS) > 0; |
800 | if (extend_regions_p) | |
801 | { | |
fe672ac0 | 802 | degree1 = XNEWVEC (int, last_basic_block_for_fn (cfun)); |
803 | extended_rgn_header = | |
804 | sbitmap_alloc (last_basic_block_for_fn (cfun)); | |
53c5d9d4 | 805 | bitmap_clear (extended_rgn_header); |
4bfe0e7b | 806 | } |
7a31a7bd | 807 | |
808 | /* Find blocks which are inner loop headers. We still have non-reducible | |
809 | loops to consider at this point. */ | |
fc00614f | 810 | FOR_EACH_BB_FN (bb, cfun) |
7a31a7bd | 811 | { |
08b7917c | 812 | if (bitmap_bit_p (header, bb->index) && bitmap_bit_p (inner, bb->index)) |
7a31a7bd | 813 | { |
814 | edge e; | |
cd665a06 | 815 | edge_iterator ei; |
4c26117a | 816 | basic_block jbb; |
7a31a7bd | 817 | |
818 | /* Now check that the loop is reducible. We do this separate | |
819 | from finding inner loops so that we do not find a reducible | |
820 | loop which contains an inner non-reducible loop. | |
821 | ||
822 | A simple way to find reducible/natural loops is to verify | |
823 | that each block in the loop is dominated by the loop | |
824 | header. | |
825 | ||
826 | If there exists a block that is not dominated by the loop | |
827 | header, then the block is reachable from outside the loop | |
828 | and thus the loop is not a natural loop. */ | |
fc00614f | 829 | FOR_EACH_BB_FN (jbb, cfun) |
7a31a7bd | 830 | { |
831 | /* First identify blocks in the loop, except for the loop | |
832 | entry block. */ | |
4c26117a | 833 | if (bb->index == max_hdr[jbb->index] && bb != jbb) |
7a31a7bd | 834 | { |
835 | /* Now verify that the block is dominated by the loop | |
836 | header. */ | |
0051c76a | 837 | if (!dominated_by_p (CDI_DOMINATORS, jbb, bb)) |
7a31a7bd | 838 | break; |
839 | } | |
840 | } | |
841 | ||
842 | /* If we exited the loop early, then I is the header of | |
843 | a non-reducible loop and we should quit processing it | |
844 | now. */ | |
34154e27 | 845 | if (jbb != EXIT_BLOCK_PTR_FOR_FN (cfun)) |
7a31a7bd | 846 | continue; |
847 | ||
848 | /* I is a header of an inner loop, or block 0 in a subroutine | |
849 | with no loops at all. */ | |
850 | head = tail = -1; | |
851 | too_large_failure = 0; | |
4c26117a | 852 | loop_head = max_hdr[bb->index]; |
7a31a7bd | 853 | |
4bfe0e7b | 854 | if (extend_regions_p) |
48e1416a | 855 | /* We save degree in case when we meet a too_large region |
856 | and cancel it. We need a correct degree later when | |
4bfe0e7b | 857 | calling extend_rgns. */ |
fe672ac0 | 858 | memcpy (degree1, degree, |
859 | last_basic_block_for_fn (cfun) * sizeof (int)); | |
48e1416a | 860 | |
7a31a7bd | 861 | /* Decrease degree of all I's successors for topological |
862 | ordering. */ | |
cd665a06 | 863 | FOR_EACH_EDGE (e, ei, bb->succs) |
34154e27 | 864 | if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)) |
b3d6de89 | 865 | --degree[e->dest->index]; |
7a31a7bd | 866 | |
867 | /* Estimate # insns, and count # blocks in the region. */ | |
868 | num_bbs = 1; | |
e1ab7874 | 869 | num_insns = common_sched_info->estimate_number_of_insns (bb); |
7a31a7bd | 870 | |
871 | /* Find all loop latches (blocks with back edges to the loop | |
872 | header) or all the leaf blocks in the cfg has no loops. | |
873 | ||
874 | Place those blocks into the queue. */ | |
875 | if (no_loops) | |
876 | { | |
fc00614f | 877 | FOR_EACH_BB_FN (jbb, cfun) |
7a31a7bd | 878 | /* Leaf nodes have only a single successor which must |
879 | be EXIT_BLOCK. */ | |
ea091dfd | 880 | if (single_succ_p (jbb) |
34154e27 | 881 | && single_succ (jbb) == EXIT_BLOCK_PTR_FOR_FN (cfun)) |
7a31a7bd | 882 | { |
4c26117a | 883 | queue[++tail] = jbb->index; |
08b7917c | 884 | bitmap_set_bit (in_queue, jbb->index); |
7a31a7bd | 885 | |
4c26117a | 886 | if (too_large (jbb->index, &num_bbs, &num_insns)) |
7a31a7bd | 887 | { |
888 | too_large_failure = 1; | |
889 | break; | |
890 | } | |
891 | } | |
892 | } | |
893 | else | |
894 | { | |
895 | edge e; | |
896 | ||
cd665a06 | 897 | FOR_EACH_EDGE (e, ei, bb->preds) |
7a31a7bd | 898 | { |
34154e27 | 899 | if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)) |
7a31a7bd | 900 | continue; |
901 | ||
b3d6de89 | 902 | node = e->src->index; |
7a31a7bd | 903 | |
4c26117a | 904 | if (max_hdr[node] == loop_head && node != bb->index) |
7a31a7bd | 905 | { |
906 | /* This is a loop latch. */ | |
907 | queue[++tail] = node; | |
08b7917c | 908 | bitmap_set_bit (in_queue, node); |
7a31a7bd | 909 | |
910 | if (too_large (node, &num_bbs, &num_insns)) | |
911 | { | |
912 | too_large_failure = 1; | |
913 | break; | |
914 | } | |
915 | } | |
916 | } | |
917 | } | |
918 | ||
919 | /* Now add all the blocks in the loop to the queue. | |
920 | ||
921 | We know the loop is a natural loop; however the algorithm | |
922 | above will not always mark certain blocks as being in the | |
923 | loop. Consider: | |
924 | node children | |
925 | a b,c | |
926 | b c | |
927 | c a,d | |
928 | d b | |
929 | ||
930 | The algorithm in the DFS traversal may not mark B & D as part | |
0c6d8c36 | 931 | of the loop (i.e. they will not have max_hdr set to A). |
7a31a7bd | 932 | |
933 | We know they can not be loop latches (else they would have | |
934 | had max_hdr set since they'd have a backedge to a dominator | |
935 | block). So we don't need them on the initial queue. | |
936 | ||
937 | We know they are part of the loop because they are dominated | |
938 | by the loop header and can be reached by a backwards walk of | |
939 | the edges starting with nodes on the initial queue. | |
940 | ||
941 | It is safe and desirable to include those nodes in the | |
942 | loop/scheduling region. To do so we would need to decrease | |
943 | the degree of a node if it is the target of a backedge | |
944 | within the loop itself as the node is placed in the queue. | |
945 | ||
946 | We do not do this because I'm not sure that the actual | |
947 | scheduling code will properly handle this case. ?!? */ | |
948 | ||
949 | while (head < tail && !too_large_failure) | |
950 | { | |
951 | edge e; | |
952 | child = queue[++head]; | |
953 | ||
f5a6b05f | 954 | FOR_EACH_EDGE (e, ei, |
955 | BASIC_BLOCK_FOR_FN (cfun, child)->preds) | |
7a31a7bd | 956 | { |
b3d6de89 | 957 | node = e->src->index; |
7a31a7bd | 958 | |
959 | /* See discussion above about nodes not marked as in | |
960 | this loop during the initial DFS traversal. */ | |
34154e27 | 961 | if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) |
7a31a7bd | 962 | || max_hdr[node] != loop_head) |
963 | { | |
964 | tail = -1; | |
965 | break; | |
966 | } | |
08b7917c | 967 | else if (!bitmap_bit_p (in_queue, node) && node != bb->index) |
7a31a7bd | 968 | { |
969 | queue[++tail] = node; | |
08b7917c | 970 | bitmap_set_bit (in_queue, node); |
7a31a7bd | 971 | |
972 | if (too_large (node, &num_bbs, &num_insns)) | |
973 | { | |
974 | too_large_failure = 1; | |
975 | break; | |
976 | } | |
977 | } | |
978 | } | |
979 | } | |
980 | ||
981 | if (tail >= 0 && !too_large_failure) | |
982 | { | |
983 | /* Place the loop header into list of region blocks. */ | |
4c26117a | 984 | degree[bb->index] = -1; |
985 | rgn_bb_table[idx] = bb->index; | |
7a31a7bd | 986 | RGN_NR_BLOCKS (nr_regions) = num_bbs; |
987 | RGN_BLOCKS (nr_regions) = idx++; | |
6a1cdb4d | 988 | RGN_DONT_CALC_DEPS (nr_regions) = 0; |
989 | RGN_HAS_REAL_EBB (nr_regions) = 0; | |
4c26117a | 990 | CONTAINING_RGN (bb->index) = nr_regions; |
991 | BLOCK_TO_BB (bb->index) = count = 0; | |
7a31a7bd | 992 | |
993 | /* Remove blocks from queue[] when their in degree | |
994 | becomes zero. Repeat until no blocks are left on the | |
995 | list. This produces a topological list of blocks in | |
996 | the region. */ | |
997 | while (tail >= 0) | |
998 | { | |
999 | if (head < 0) | |
1000 | head = tail; | |
1001 | child = queue[head]; | |
1002 | if (degree[child] == 0) | |
1003 | { | |
1004 | edge e; | |
1005 | ||
1006 | degree[child] = -1; | |
1007 | rgn_bb_table[idx++] = child; | |
1008 | BLOCK_TO_BB (child) = ++count; | |
1009 | CONTAINING_RGN (child) = nr_regions; | |
1010 | queue[head] = queue[tail--]; | |
1011 | ||
f5a6b05f | 1012 | FOR_EACH_EDGE (e, ei, |
1013 | BASIC_BLOCK_FOR_FN (cfun, | |
1014 | child)->succs) | |
34154e27 | 1015 | if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)) |
b3d6de89 | 1016 | --degree[e->dest->index]; |
7a31a7bd | 1017 | } |
1018 | else | |
1019 | --head; | |
1020 | } | |
1021 | ++nr_regions; | |
1022 | } | |
4bfe0e7b | 1023 | else if (extend_regions_p) |
1024 | { | |
1025 | /* Restore DEGREE. */ | |
1026 | int *t = degree; | |
1027 | ||
1028 | degree = degree1; | |
1029 | degree1 = t; | |
48e1416a | 1030 | |
4bfe0e7b | 1031 | /* And force successors of BB to be region heads. |
1032 | This may provide several smaller regions instead | |
1033 | of one too_large region. */ | |
1034 | FOR_EACH_EDGE (e, ei, bb->succs) | |
34154e27 | 1035 | if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)) |
08b7917c | 1036 | bitmap_set_bit (extended_rgn_header, e->dest->index); |
4bfe0e7b | 1037 | } |
7a31a7bd | 1038 | } |
1039 | } | |
1040 | free (queue); | |
4bfe0e7b | 1041 | |
1042 | if (extend_regions_p) | |
1043 | { | |
1044 | free (degree1); | |
48e1416a | 1045 | |
53c5d9d4 | 1046 | bitmap_ior (header, header, extended_rgn_header); |
4bfe0e7b | 1047 | sbitmap_free (extended_rgn_header); |
48e1416a | 1048 | |
4bfe0e7b | 1049 | extend_rgns (degree, &idx, header, max_hdr); |
1050 | } | |
7a31a7bd | 1051 | } |
1052 | ||
1053 | /* Any block that did not end up in a region is placed into a region | |
1054 | by itself. */ | |
fc00614f | 1055 | FOR_EACH_BB_FN (bb, cfun) |
4c26117a | 1056 | if (degree[bb->index] >= 0) |
7a31a7bd | 1057 | { |
4c26117a | 1058 | rgn_bb_table[idx] = bb->index; |
7a31a7bd | 1059 | RGN_NR_BLOCKS (nr_regions) = 1; |
1060 | RGN_BLOCKS (nr_regions) = idx++; | |
6a1cdb4d | 1061 | RGN_DONT_CALC_DEPS (nr_regions) = 0; |
1062 | RGN_HAS_REAL_EBB (nr_regions) = 0; | |
4c26117a | 1063 | CONTAINING_RGN (bb->index) = nr_regions++; |
1064 | BLOCK_TO_BB (bb->index) = 0; | |
7a31a7bd | 1065 | } |
1066 | ||
1067 | free (max_hdr); | |
4bfe0e7b | 1068 | free (degree); |
7a31a7bd | 1069 | free (stack); |
265a9759 | 1070 | sbitmap_free (header); |
1071 | sbitmap_free (inner); | |
1072 | sbitmap_free (in_queue); | |
1073 | sbitmap_free (in_stack); | |
7a31a7bd | 1074 | } |
1075 | ||
e1ab7874 | 1076 | |
1077 | /* Wrapper function. | |
1078 | If FLAG_SEL_SCHED_PIPELINING is set, then use custom function to form | |
1079 | regions. Otherwise just call find_rgns_haifa. */ | |
1080 | static void | |
1081 | find_rgns (void) | |
1082 | { | |
1083 | if (sel_sched_p () && flag_sel_sched_pipelining) | |
1084 | sel_find_rgns (); | |
1085 | else | |
1086 | haifa_find_rgns (); | |
1087 | } | |
1088 | ||
4bfe0e7b | 1089 | static int gather_region_statistics (int **); |
1090 | static void print_region_statistics (int *, int, int *, int); | |
1091 | ||
48e1416a | 1092 | /* Calculate the histogram that shows the number of regions having the |
1093 | given number of basic blocks, and store it in the RSP array. Return | |
4bfe0e7b | 1094 | the size of this array. */ |
1095 | static int | |
1096 | gather_region_statistics (int **rsp) | |
1097 | { | |
1098 | int i, *a = 0, a_sz = 0; | |
1099 | ||
1100 | /* a[i] is the number of regions that have (i + 1) basic blocks. */ | |
1101 | for (i = 0; i < nr_regions; i++) | |
1102 | { | |
1103 | int nr_blocks = RGN_NR_BLOCKS (i); | |
1104 | ||
1105 | gcc_assert (nr_blocks >= 1); | |
1106 | ||
1107 | if (nr_blocks > a_sz) | |
48e1416a | 1108 | { |
f7f3687c | 1109 | a = XRESIZEVEC (int, a, nr_blocks); |
4bfe0e7b | 1110 | do |
1111 | a[a_sz++] = 0; | |
1112 | while (a_sz != nr_blocks); | |
1113 | } | |
1114 | ||
1115 | a[nr_blocks - 1]++; | |
1116 | } | |
1117 | ||
1118 | *rsp = a; | |
1119 | return a_sz; | |
1120 | } | |
1121 | ||
48e1416a | 1122 | /* Print regions statistics. S1 and S2 denote the data before and after |
4bfe0e7b | 1123 | calling extend_rgns, respectively. */ |
1124 | static void | |
1125 | print_region_statistics (int *s1, int s1_sz, int *s2, int s2_sz) | |
1126 | { | |
1127 | int i; | |
48e1416a | 1128 | |
1129 | /* We iterate until s2_sz because extend_rgns does not decrease | |
4bfe0e7b | 1130 | the maximal region size. */ |
1131 | for (i = 1; i < s2_sz; i++) | |
1132 | { | |
1133 | int n1, n2; | |
1134 | ||
1135 | n2 = s2[i]; | |
1136 | ||
1137 | if (n2 == 0) | |
1138 | continue; | |
1139 | ||
1140 | if (i >= s1_sz) | |
1141 | n1 = 0; | |
1142 | else | |
1143 | n1 = s1[i]; | |
1144 | ||
1145 | fprintf (sched_dump, ";; Region extension statistics: size %d: " \ | |
1146 | "was %d + %d more\n", i + 1, n1, n2 - n1); | |
1147 | } | |
1148 | } | |
1149 | ||
1150 | /* Extend regions. | |
1151 | DEGREE - Array of incoming edge count, considering only | |
1152 | the edges, that don't have their sources in formed regions yet. | |
1153 | IDXP - pointer to the next available index in rgn_bb_table. | |
1154 | HEADER - set of all region heads. | |
1155 | LOOP_HDR - mapping from block to the containing loop | |
1156 | (two blocks can reside within one region if they have | |
1157 | the same loop header). */ | |
e1ab7874 | 1158 | void |
4bfe0e7b | 1159 | extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr) |
1160 | { | |
1161 | int *order, i, rescan = 0, idx = *idxp, iter = 0, max_iter, *max_hdr; | |
a28770e1 | 1162 | int nblocks = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS; |
4bfe0e7b | 1163 | |
1164 | max_iter = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS); | |
1165 | ||
fe672ac0 | 1166 | max_hdr = XNEWVEC (int, last_basic_block_for_fn (cfun)); |
4bfe0e7b | 1167 | |
fe672ac0 | 1168 | order = XNEWVEC (int, last_basic_block_for_fn (cfun)); |
3072d30e | 1169 | post_order_compute (order, false, false); |
4bfe0e7b | 1170 | |
1171 | for (i = nblocks - 1; i >= 0; i--) | |
1172 | { | |
1173 | int bbn = order[i]; | |
1174 | if (degree[bbn] >= 0) | |
1175 | { | |
1176 | max_hdr[bbn] = bbn; | |
1177 | rescan = 1; | |
1178 | } | |
1179 | else | |
1180 | /* This block already was processed in find_rgns. */ | |
1181 | max_hdr[bbn] = -1; | |
1182 | } | |
48e1416a | 1183 | |
4bfe0e7b | 1184 | /* The idea is to topologically walk through CFG in top-down order. |
1185 | During the traversal, if all the predecessors of a node are | |
1186 | marked to be in the same region (they all have the same max_hdr), | |
48e1416a | 1187 | then current node is also marked to be a part of that region. |
4bfe0e7b | 1188 | Otherwise the node starts its own region. |
48e1416a | 1189 | CFG should be traversed until no further changes are made. On each |
1190 | iteration the set of the region heads is extended (the set of those | |
1191 | blocks that have max_hdr[bbi] == bbi). This set is upper bounded by the | |
e1ab7874 | 1192 | set of all basic blocks, thus the algorithm is guaranteed to |
1193 | terminate. */ | |
4bfe0e7b | 1194 | |
1195 | while (rescan && iter < max_iter) | |
1196 | { | |
1197 | rescan = 0; | |
48e1416a | 1198 | |
4bfe0e7b | 1199 | for (i = nblocks - 1; i >= 0; i--) |
1200 | { | |
1201 | edge e; | |
1202 | edge_iterator ei; | |
1203 | int bbn = order[i]; | |
48e1416a | 1204 | |
08b7917c | 1205 | if (max_hdr[bbn] != -1 && !bitmap_bit_p (header, bbn)) |
4bfe0e7b | 1206 | { |
1207 | int hdr = -1; | |
1208 | ||
f5a6b05f | 1209 | FOR_EACH_EDGE (e, ei, BASIC_BLOCK_FOR_FN (cfun, bbn)->preds) |
4bfe0e7b | 1210 | { |
1211 | int predn = e->src->index; | |
1212 | ||
1213 | if (predn != ENTRY_BLOCK | |
1214 | /* If pred wasn't processed in find_rgns. */ | |
1215 | && max_hdr[predn] != -1 | |
1216 | /* And pred and bb reside in the same loop. | |
1217 | (Or out of any loop). */ | |
1218 | && loop_hdr[bbn] == loop_hdr[predn]) | |
1219 | { | |
1220 | if (hdr == -1) | |
1221 | /* Then bb extends the containing region of pred. */ | |
1222 | hdr = max_hdr[predn]; | |
1223 | else if (hdr != max_hdr[predn]) | |
1224 | /* Too bad, there are at least two predecessors | |
1225 | that reside in different regions. Thus, BB should | |
1226 | begin its own region. */ | |
1227 | { | |
1228 | hdr = bbn; | |
1229 | break; | |
48e1416a | 1230 | } |
4bfe0e7b | 1231 | } |
1232 | else | |
1233 | /* BB starts its own region. */ | |
1234 | { | |
1235 | hdr = bbn; | |
1236 | break; | |
48e1416a | 1237 | } |
4bfe0e7b | 1238 | } |
48e1416a | 1239 | |
4bfe0e7b | 1240 | if (hdr == bbn) |
1241 | { | |
1242 | /* If BB start its own region, | |
1243 | update set of headers with BB. */ | |
08b7917c | 1244 | bitmap_set_bit (header, bbn); |
4bfe0e7b | 1245 | rescan = 1; |
1246 | } | |
1247 | else | |
48e1416a | 1248 | gcc_assert (hdr != -1); |
4bfe0e7b | 1249 | |
1250 | max_hdr[bbn] = hdr; | |
1251 | } | |
1252 | } | |
1253 | ||
1254 | iter++; | |
1255 | } | |
48e1416a | 1256 | |
4bfe0e7b | 1257 | /* Statistics were gathered on the SPEC2000 package of tests with |
1258 | mainline weekly snapshot gcc-4.1-20051015 on ia64. | |
48e1416a | 1259 | |
4bfe0e7b | 1260 | Statistics for SPECint: |
1261 | 1 iteration : 1751 cases (38.7%) | |
1262 | 2 iterations: 2770 cases (61.3%) | |
1263 | Blocks wrapped in regions by find_rgns without extension: 18295 blocks | |
1264 | Blocks wrapped in regions by 2 iterations in extend_rgns: 23821 blocks | |
1265 | (We don't count single block regions here). | |
48e1416a | 1266 | |
4bfe0e7b | 1267 | Statistics for SPECfp: |
1268 | 1 iteration : 621 cases (35.9%) | |
1269 | 2 iterations: 1110 cases (64.1%) | |
1270 | Blocks wrapped in regions by find_rgns without extension: 6476 blocks | |
1271 | Blocks wrapped in regions by 2 iterations in extend_rgns: 11155 blocks | |
1272 | (We don't count single block regions here). | |
1273 | ||
1274 | By default we do at most 2 iterations. | |
9ca2c29a | 1275 | This can be overridden with max-sched-extend-regions-iters parameter: |
4bfe0e7b | 1276 | 0 - disable region extension, |
1277 | N > 0 - do at most N iterations. */ | |
48e1416a | 1278 | |
4bfe0e7b | 1279 | if (sched_verbose && iter != 0) |
1280 | fprintf (sched_dump, ";; Region extension iterations: %d%s\n", iter, | |
1281 | rescan ? "... failed" : ""); | |
48e1416a | 1282 | |
4bfe0e7b | 1283 | if (!rescan && iter != 0) |
1284 | { | |
1285 | int *s1 = NULL, s1_sz = 0; | |
1286 | ||
1287 | /* Save the old statistics for later printout. */ | |
1288 | if (sched_verbose >= 6) | |
1289 | s1_sz = gather_region_statistics (&s1); | |
1290 | ||
1291 | /* We have succeeded. Now assemble the regions. */ | |
1292 | for (i = nblocks - 1; i >= 0; i--) | |
1293 | { | |
1294 | int bbn = order[i]; | |
1295 | ||
1296 | if (max_hdr[bbn] == bbn) | |
1297 | /* BBN is a region head. */ | |
1298 | { | |
1299 | edge e; | |
1300 | edge_iterator ei; | |
1301 | int num_bbs = 0, j, num_insns = 0, large; | |
48e1416a | 1302 | |
4bfe0e7b | 1303 | large = too_large (bbn, &num_bbs, &num_insns); |
1304 | ||
1305 | degree[bbn] = -1; | |
1306 | rgn_bb_table[idx] = bbn; | |
1307 | RGN_BLOCKS (nr_regions) = idx++; | |
6a1cdb4d | 1308 | RGN_DONT_CALC_DEPS (nr_regions) = 0; |
1309 | RGN_HAS_REAL_EBB (nr_regions) = 0; | |
4bfe0e7b | 1310 | CONTAINING_RGN (bbn) = nr_regions; |
1311 | BLOCK_TO_BB (bbn) = 0; | |
1312 | ||
f5a6b05f | 1313 | FOR_EACH_EDGE (e, ei, BASIC_BLOCK_FOR_FN (cfun, bbn)->succs) |
34154e27 | 1314 | if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)) |
4bfe0e7b | 1315 | degree[e->dest->index]--; |
1316 | ||
1317 | if (!large) | |
1318 | /* Here we check whether the region is too_large. */ | |
1319 | for (j = i - 1; j >= 0; j--) | |
1320 | { | |
1321 | int succn = order[j]; | |
1322 | if (max_hdr[succn] == bbn) | |
1323 | { | |
1324 | if ((large = too_large (succn, &num_bbs, &num_insns))) | |
1325 | break; | |
1326 | } | |
1327 | } | |
1328 | ||
1329 | if (large) | |
1330 | /* If the region is too_large, then wrap every block of | |
1331 | the region into single block region. | |
1332 | Here we wrap region head only. Other blocks are | |
1333 | processed in the below cycle. */ | |
1334 | { | |
1335 | RGN_NR_BLOCKS (nr_regions) = 1; | |
1336 | nr_regions++; | |
48e1416a | 1337 | } |
4bfe0e7b | 1338 | |
1339 | num_bbs = 1; | |
1340 | ||
1341 | for (j = i - 1; j >= 0; j--) | |
1342 | { | |
1343 | int succn = order[j]; | |
1344 | ||
1345 | if (max_hdr[succn] == bbn) | |
48e1416a | 1346 | /* This cycle iterates over all basic blocks, that |
4bfe0e7b | 1347 | are supposed to be in the region with head BBN, |
1348 | and wraps them into that region (or in single | |
1349 | block region). */ | |
1350 | { | |
1351 | gcc_assert (degree[succn] == 0); | |
1352 | ||
1353 | degree[succn] = -1; | |
48e1416a | 1354 | rgn_bb_table[idx] = succn; |
4bfe0e7b | 1355 | BLOCK_TO_BB (succn) = large ? 0 : num_bbs++; |
1356 | CONTAINING_RGN (succn) = nr_regions; | |
1357 | ||
1358 | if (large) | |
1359 | /* Wrap SUCCN into single block region. */ | |
1360 | { | |
1361 | RGN_BLOCKS (nr_regions) = idx; | |
1362 | RGN_NR_BLOCKS (nr_regions) = 1; | |
6a1cdb4d | 1363 | RGN_DONT_CALC_DEPS (nr_regions) = 0; |
1364 | RGN_HAS_REAL_EBB (nr_regions) = 0; | |
4bfe0e7b | 1365 | nr_regions++; |
1366 | } | |
1367 | ||
1368 | idx++; | |
48e1416a | 1369 | |
f5a6b05f | 1370 | FOR_EACH_EDGE (e, ei, |
1371 | BASIC_BLOCK_FOR_FN (cfun, succn)->succs) | |
34154e27 | 1372 | if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)) |
4bfe0e7b | 1373 | degree[e->dest->index]--; |
1374 | } | |
1375 | } | |
1376 | ||
1377 | if (!large) | |
1378 | { | |
1379 | RGN_NR_BLOCKS (nr_regions) = num_bbs; | |
1380 | nr_regions++; | |
1381 | } | |
1382 | } | |
1383 | } | |
1384 | ||
1385 | if (sched_verbose >= 6) | |
1386 | { | |
1387 | int *s2, s2_sz; | |
1388 | ||
48e1416a | 1389 | /* Get the new statistics and print the comparison with the |
4bfe0e7b | 1390 | one before calling this function. */ |
1391 | s2_sz = gather_region_statistics (&s2); | |
1392 | print_region_statistics (s1, s1_sz, s2, s2_sz); | |
1393 | free (s1); | |
1394 | free (s2); | |
1395 | } | |
1396 | } | |
48e1416a | 1397 | |
4bfe0e7b | 1398 | free (order); |
1399 | free (max_hdr); | |
1400 | ||
48e1416a | 1401 | *idxp = idx; |
4bfe0e7b | 1402 | } |
1403 | ||
7a31a7bd | 1404 | /* Functions for regions scheduling information. */ |
1405 | ||
1406 | /* Compute dominators, probability, and potential-split-edges of bb. | |
1407 | Assume that these values were already computed for bb's predecessors. */ | |
1408 | ||
1409 | static void | |
60b8c5b3 | 1410 | compute_dom_prob_ps (int bb) |
7a31a7bd | 1411 | { |
6c0b81cb | 1412 | edge_iterator in_ei; |
1413 | edge in_edge; | |
7a31a7bd | 1414 | |
6a1cdb4d | 1415 | /* We shouldn't have any real ebbs yet. */ |
1416 | gcc_assert (ebb_head [bb] == bb + current_blocks); | |
48e1416a | 1417 | |
7a31a7bd | 1418 | if (IS_RGN_ENTRY (bb)) |
1419 | { | |
08b7917c | 1420 | bitmap_set_bit (dom[bb], 0); |
6c0b81cb | 1421 | prob[bb] = REG_BR_PROB_BASE; |
7a31a7bd | 1422 | return; |
1423 | } | |
1424 | ||
6c0b81cb | 1425 | prob[bb] = 0; |
1426 | ||
4a82352a | 1427 | /* Initialize dom[bb] to '111..1'. */ |
53c5d9d4 | 1428 | bitmap_ones (dom[bb]); |
7a31a7bd | 1429 | |
f5a6b05f | 1430 | FOR_EACH_EDGE (in_edge, in_ei, |
1431 | BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (bb))->preds) | |
7a31a7bd | 1432 | { |
6c0b81cb | 1433 | int pred_bb; |
1434 | edge out_edge; | |
1435 | edge_iterator out_ei; | |
1436 | ||
34154e27 | 1437 | if (in_edge->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)) |
aae97b21 | 1438 | continue; |
7a31a7bd | 1439 | |
aae97b21 | 1440 | pred_bb = BLOCK_TO_BB (in_edge->src->index); |
53c5d9d4 | 1441 | bitmap_and (dom[bb], dom[bb], dom[pred_bb]); |
1442 | bitmap_ior (ancestor_edges[bb], | |
aae97b21 | 1443 | ancestor_edges[bb], ancestor_edges[pred_bb]); |
7a31a7bd | 1444 | |
08b7917c | 1445 | bitmap_set_bit (ancestor_edges[bb], EDGE_TO_BIT (in_edge)); |
79cafa9e | 1446 | |
53c5d9d4 | 1447 | bitmap_ior (pot_split[bb], pot_split[bb], pot_split[pred_bb]); |
7a31a7bd | 1448 | |
aae97b21 | 1449 | FOR_EACH_EDGE (out_edge, out_ei, in_edge->src->succs) |
08b7917c | 1450 | bitmap_set_bit (pot_split[bb], EDGE_TO_BIT (out_edge)); |
aae97b21 | 1451 | |
70074000 | 1452 | prob[bb] += combine_probabilities (prob[pred_bb], in_edge->probability); |
df21e330 | 1453 | // The rounding divide in combine_probabilities can result in an extra |
1454 | // probability increment propagating along 50-50 edges. Eventually when | |
1455 | // the edges re-merge, the accumulated probability can go slightly above | |
1456 | // REG_BR_PROB_BASE. | |
1457 | if (prob[bb] > REG_BR_PROB_BASE) | |
1458 | prob[bb] = REG_BR_PROB_BASE; | |
7a31a7bd | 1459 | } |
7a31a7bd | 1460 | |
08b7917c | 1461 | bitmap_set_bit (dom[bb], bb); |
53c5d9d4 | 1462 | bitmap_and_compl (pot_split[bb], pot_split[bb], ancestor_edges[bb]); |
7a31a7bd | 1463 | |
1464 | if (sched_verbose >= 2) | |
1465 | fprintf (sched_dump, ";; bb_prob(%d, %d) = %3d\n", bb, BB_TO_BLOCK (bb), | |
6c0b81cb | 1466 | (100 * prob[bb]) / REG_BR_PROB_BASE); |
7a31a7bd | 1467 | } |
1468 | ||
1469 | /* Functions for target info. */ | |
1470 | ||
1471 | /* Compute in BL the list of split-edges of bb_src relatively to bb_trg. | |
1472 | Note that bb_trg dominates bb_src. */ | |
1473 | ||
1474 | static void | |
60b8c5b3 | 1475 | split_edges (int bb_src, int bb_trg, edgelst *bl) |
7a31a7bd | 1476 | { |
156093aa | 1477 | sbitmap src = sbitmap_alloc (SBITMAP_SIZE (pot_split[bb_src])); |
53c5d9d4 | 1478 | bitmap_copy (src, pot_split[bb_src]); |
79cafa9e | 1479 | |
53c5d9d4 | 1480 | bitmap_and_compl (src, src, pot_split[bb_trg]); |
aae97b21 | 1481 | extract_edgelst (src, bl); |
79cafa9e | 1482 | sbitmap_free (src); |
7a31a7bd | 1483 | } |
1484 | ||
1485 | /* Find the valid candidate-source-blocks for the target block TRG, compute | |
1486 | their probability, and check if they are speculative or not. | |
1487 | For speculative sources, compute their update-blocks and split-blocks. */ | |
1488 | ||
1489 | static void | |
60b8c5b3 | 1490 | compute_trg_info (int trg) |
7a31a7bd | 1491 | { |
19cb6b50 | 1492 | candidate *sp; |
91020a82 | 1493 | edgelst el = { NULL, 0 }; |
aae97b21 | 1494 | int i, j, k, update_idx; |
1495 | basic_block block; | |
d3129ae7 | 1496 | sbitmap visited; |
aae97b21 | 1497 | edge_iterator ei; |
1498 | edge e; | |
7a31a7bd | 1499 | |
e1ab7874 | 1500 | candidate_table = XNEWVEC (candidate, current_nr_blocks); |
1501 | ||
1502 | bblst_last = 0; | |
1503 | /* bblst_table holds split blocks and update blocks for each block after | |
1504 | the current one in the region. split blocks and update blocks are | |
1505 | the TO blocks of region edges, so there can be at most rgn_nr_edges | |
1506 | of them. */ | |
1507 | bblst_size = (current_nr_blocks - target_bb) * rgn_nr_edges; | |
1508 | bblst_table = XNEWVEC (basic_block, bblst_size); | |
1509 | ||
1510 | edgelst_last = 0; | |
1511 | edgelst_table = XNEWVEC (edge, rgn_nr_edges); | |
1512 | ||
7a31a7bd | 1513 | /* Define some of the fields for the target bb as well. */ |
1514 | sp = candidate_table + trg; | |
1515 | sp->is_valid = 1; | |
1516 | sp->is_speculative = 0; | |
6c0b81cb | 1517 | sp->src_prob = REG_BR_PROB_BASE; |
7a31a7bd | 1518 | |
fe672ac0 | 1519 | visited = sbitmap_alloc (last_basic_block_for_fn (cfun)); |
d3129ae7 | 1520 | |
7a31a7bd | 1521 | for (i = trg + 1; i < current_nr_blocks; i++) |
1522 | { | |
1523 | sp = candidate_table + i; | |
1524 | ||
1525 | sp->is_valid = IS_DOMINATED (i, trg); | |
1526 | if (sp->is_valid) | |
1527 | { | |
6c0b81cb | 1528 | int tf = prob[trg], cf = prob[i]; |
1529 | ||
1530 | /* In CFGs with low probability edges TF can possibly be zero. */ | |
70074000 | 1531 | sp->src_prob = (tf ? GCOV_COMPUTE_SCALE (cf, tf) : 0); |
6c0b81cb | 1532 | sp->is_valid = (sp->src_prob >= min_spec_prob); |
7a31a7bd | 1533 | } |
1534 | ||
1535 | if (sp->is_valid) | |
1536 | { | |
1537 | split_edges (i, trg, &el); | |
1538 | sp->is_speculative = (el.nr_members) ? 1 : 0; | |
1539 | if (sp->is_speculative && !flag_schedule_speculative) | |
1540 | sp->is_valid = 0; | |
1541 | } | |
1542 | ||
1543 | if (sp->is_valid) | |
1544 | { | |
7a31a7bd | 1545 | /* Compute split blocks and store them in bblst_table. |
1546 | The TO block of every split edge is a split block. */ | |
1547 | sp->split_bbs.first_member = &bblst_table[bblst_last]; | |
1548 | sp->split_bbs.nr_members = el.nr_members; | |
1549 | for (j = 0; j < el.nr_members; bblst_last++, j++) | |
aae97b21 | 1550 | bblst_table[bblst_last] = el.first_member[j]->dest; |
7a31a7bd | 1551 | sp->update_bbs.first_member = &bblst_table[bblst_last]; |
1552 | ||
1553 | /* Compute update blocks and store them in bblst_table. | |
1554 | For every split edge, look at the FROM block, and check | |
1555 | all out edges. For each out edge that is not a split edge, | |
1556 | add the TO block to the update block list. This list can end | |
1557 | up with a lot of duplicates. We need to weed them out to avoid | |
1558 | overrunning the end of the bblst_table. */ | |
7a31a7bd | 1559 | |
1560 | update_idx = 0; | |
53c5d9d4 | 1561 | bitmap_clear (visited); |
7a31a7bd | 1562 | for (j = 0; j < el.nr_members; j++) |
1563 | { | |
aae97b21 | 1564 | block = el.first_member[j]->src; |
1565 | FOR_EACH_EDGE (e, ei, block->succs) | |
7a31a7bd | 1566 | { |
08b7917c | 1567 | if (!bitmap_bit_p (visited, e->dest->index)) |
7a31a7bd | 1568 | { |
1569 | for (k = 0; k < el.nr_members; k++) | |
aae97b21 | 1570 | if (e == el.first_member[k]) |
7a31a7bd | 1571 | break; |
1572 | ||
1573 | if (k >= el.nr_members) | |
1574 | { | |
aae97b21 | 1575 | bblst_table[bblst_last++] = e->dest; |
08b7917c | 1576 | bitmap_set_bit (visited, e->dest->index); |
7a31a7bd | 1577 | update_idx++; |
1578 | } | |
1579 | } | |
7a31a7bd | 1580 | } |
7a31a7bd | 1581 | } |
1582 | sp->update_bbs.nr_members = update_idx; | |
1583 | ||
1584 | /* Make sure we didn't overrun the end of bblst_table. */ | |
04e579b6 | 1585 | gcc_assert (bblst_last <= bblst_size); |
7a31a7bd | 1586 | } |
1587 | else | |
1588 | { | |
1589 | sp->split_bbs.nr_members = sp->update_bbs.nr_members = 0; | |
1590 | ||
1591 | sp->is_speculative = 0; | |
1592 | sp->src_prob = 0; | |
1593 | } | |
1594 | } | |
d3129ae7 | 1595 | |
1596 | sbitmap_free (visited); | |
7a31a7bd | 1597 | } |
1598 | ||
e1ab7874 | 1599 | /* Free the computed target info. */ |
1600 | static void | |
1601 | free_trg_info (void) | |
1602 | { | |
1603 | free (candidate_table); | |
1604 | free (bblst_table); | |
1605 | free (edgelst_table); | |
1606 | } | |
1607 | ||
7a31a7bd | 1608 | /* Print candidates info, for debugging purposes. Callable from debugger. */ |
1609 | ||
4b987fac | 1610 | DEBUG_FUNCTION void |
60b8c5b3 | 1611 | debug_candidate (int i) |
7a31a7bd | 1612 | { |
1613 | if (!candidate_table[i].is_valid) | |
1614 | return; | |
1615 | ||
1616 | if (candidate_table[i].is_speculative) | |
1617 | { | |
1618 | int j; | |
1619 | fprintf (sched_dump, "src b %d bb %d speculative \n", BB_TO_BLOCK (i), i); | |
1620 | ||
1621 | fprintf (sched_dump, "split path: "); | |
1622 | for (j = 0; j < candidate_table[i].split_bbs.nr_members; j++) | |
1623 | { | |
aae97b21 | 1624 | int b = candidate_table[i].split_bbs.first_member[j]->index; |
7a31a7bd | 1625 | |
1626 | fprintf (sched_dump, " %d ", b); | |
1627 | } | |
1628 | fprintf (sched_dump, "\n"); | |
1629 | ||
1630 | fprintf (sched_dump, "update path: "); | |
1631 | for (j = 0; j < candidate_table[i].update_bbs.nr_members; j++) | |
1632 | { | |
aae97b21 | 1633 | int b = candidate_table[i].update_bbs.first_member[j]->index; |
7a31a7bd | 1634 | |
1635 | fprintf (sched_dump, " %d ", b); | |
1636 | } | |
1637 | fprintf (sched_dump, "\n"); | |
1638 | } | |
1639 | else | |
1640 | { | |
1641 | fprintf (sched_dump, " src %d equivalent\n", BB_TO_BLOCK (i)); | |
1642 | } | |
1643 | } | |
1644 | ||
1645 | /* Print candidates info, for debugging purposes. Callable from debugger. */ | |
1646 | ||
4b987fac | 1647 | DEBUG_FUNCTION void |
60b8c5b3 | 1648 | debug_candidates (int trg) |
7a31a7bd | 1649 | { |
1650 | int i; | |
1651 | ||
1652 | fprintf (sched_dump, "----------- candidate table: target: b=%d bb=%d ---\n", | |
1653 | BB_TO_BLOCK (trg), trg); | |
1654 | for (i = trg + 1; i < current_nr_blocks; i++) | |
1655 | debug_candidate (i); | |
1656 | } | |
1657 | ||
de132707 | 1658 | /* Functions for speculative scheduling. */ |
7a31a7bd | 1659 | |
3072d30e | 1660 | static bitmap_head not_in_df; |
1661 | ||
7a31a7bd | 1662 | /* Return 0 if x is a set of a register alive in the beginning of one |
1663 | of the split-blocks of src, otherwise return 1. */ | |
1664 | ||
1665 | static int | |
60b8c5b3 | 1666 | check_live_1 (int src, rtx x) |
7a31a7bd | 1667 | { |
19cb6b50 | 1668 | int i; |
1669 | int regno; | |
1670 | rtx reg = SET_DEST (x); | |
7a31a7bd | 1671 | |
1672 | if (reg == 0) | |
1673 | return 1; | |
1674 | ||
476d094d | 1675 | while (GET_CODE (reg) == SUBREG |
1676 | || GET_CODE (reg) == ZERO_EXTRACT | |
7a31a7bd | 1677 | || GET_CODE (reg) == STRICT_LOW_PART) |
1678 | reg = XEXP (reg, 0); | |
1679 | ||
4b303227 | 1680 | if (GET_CODE (reg) == PARALLEL) |
7a31a7bd | 1681 | { |
19cb6b50 | 1682 | int i; |
216b2683 | 1683 | |
7a31a7bd | 1684 | for (i = XVECLEN (reg, 0) - 1; i >= 0; i--) |
4b303227 | 1685 | if (XEXP (XVECEXP (reg, 0, i), 0) != 0) |
1686 | if (check_live_1 (src, XEXP (XVECEXP (reg, 0, i), 0))) | |
216b2683 | 1687 | return 1; |
216b2683 | 1688 | |
7a31a7bd | 1689 | return 0; |
1690 | } | |
1691 | ||
8ad4c111 | 1692 | if (!REG_P (reg)) |
7a31a7bd | 1693 | return 1; |
1694 | ||
1695 | regno = REGNO (reg); | |
1696 | ||
1697 | if (regno < FIRST_PSEUDO_REGISTER && global_regs[regno]) | |
1698 | { | |
1699 | /* Global registers are assumed live. */ | |
1700 | return 0; | |
1701 | } | |
1702 | else | |
1703 | { | |
1704 | if (regno < FIRST_PSEUDO_REGISTER) | |
1705 | { | |
1706 | /* Check for hard registers. */ | |
67d6c12b | 1707 | int j = hard_regno_nregs[regno][GET_MODE (reg)]; |
7a31a7bd | 1708 | while (--j >= 0) |
1709 | { | |
1710 | for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++) | |
1711 | { | |
aae97b21 | 1712 | basic_block b = candidate_table[src].split_bbs.first_member[i]; |
3072d30e | 1713 | int t = bitmap_bit_p (¬_in_df, b->index); |
7a31a7bd | 1714 | |
6a1cdb4d | 1715 | /* We can have split blocks, that were recently generated. |
f0b5f617 | 1716 | Such blocks are always outside current region. */ |
3072d30e | 1717 | gcc_assert (!t || (CONTAINING_RGN (b->index) |
1718 | != CONTAINING_RGN (BB_TO_BLOCK (src)))); | |
1719 | ||
deb2741b | 1720 | if (t || REGNO_REG_SET_P (df_get_live_in (b), regno + j)) |
3072d30e | 1721 | return 0; |
7a31a7bd | 1722 | } |
1723 | } | |
1724 | } | |
1725 | else | |
1726 | { | |
f024691d | 1727 | /* Check for pseudo registers. */ |
7a31a7bd | 1728 | for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++) |
1729 | { | |
aae97b21 | 1730 | basic_block b = candidate_table[src].split_bbs.first_member[i]; |
3072d30e | 1731 | int t = bitmap_bit_p (¬_in_df, b->index); |
7a31a7bd | 1732 | |
3072d30e | 1733 | gcc_assert (!t || (CONTAINING_RGN (b->index) |
1734 | != CONTAINING_RGN (BB_TO_BLOCK (src)))); | |
1735 | ||
deb2741b | 1736 | if (t || REGNO_REG_SET_P (df_get_live_in (b), regno)) |
3072d30e | 1737 | return 0; |
7a31a7bd | 1738 | } |
1739 | } | |
1740 | } | |
1741 | ||
1742 | return 1; | |
1743 | } | |
1744 | ||
1745 | /* If x is a set of a register R, mark that R is alive in the beginning | |
1746 | of every update-block of src. */ | |
1747 | ||
1748 | static void | |
60b8c5b3 | 1749 | update_live_1 (int src, rtx x) |
7a31a7bd | 1750 | { |
19cb6b50 | 1751 | int i; |
1752 | int regno; | |
1753 | rtx reg = SET_DEST (x); | |
7a31a7bd | 1754 | |
1755 | if (reg == 0) | |
1756 | return; | |
1757 | ||
476d094d | 1758 | while (GET_CODE (reg) == SUBREG |
1759 | || GET_CODE (reg) == ZERO_EXTRACT | |
7a31a7bd | 1760 | || GET_CODE (reg) == STRICT_LOW_PART) |
1761 | reg = XEXP (reg, 0); | |
1762 | ||
4b303227 | 1763 | if (GET_CODE (reg) == PARALLEL) |
7a31a7bd | 1764 | { |
19cb6b50 | 1765 | int i; |
216b2683 | 1766 | |
7a31a7bd | 1767 | for (i = XVECLEN (reg, 0) - 1; i >= 0; i--) |
4b303227 | 1768 | if (XEXP (XVECEXP (reg, 0, i), 0) != 0) |
1769 | update_live_1 (src, XEXP (XVECEXP (reg, 0, i), 0)); | |
216b2683 | 1770 | |
7a31a7bd | 1771 | return; |
1772 | } | |
1773 | ||
8ad4c111 | 1774 | if (!REG_P (reg)) |
7a31a7bd | 1775 | return; |
1776 | ||
1777 | /* Global registers are always live, so the code below does not apply | |
1778 | to them. */ | |
1779 | ||
1780 | regno = REGNO (reg); | |
1781 | ||
771d4616 | 1782 | if (! HARD_REGISTER_NUM_P (regno) |
1783 | || !global_regs[regno]) | |
7a31a7bd | 1784 | { |
771d4616 | 1785 | for (i = 0; i < candidate_table[src].update_bbs.nr_members; i++) |
7a31a7bd | 1786 | { |
771d4616 | 1787 | basic_block b = candidate_table[src].update_bbs.first_member[i]; |
7a31a7bd | 1788 | |
771d4616 | 1789 | if (HARD_REGISTER_NUM_P (regno)) |
1790 | bitmap_set_range (df_get_live_in (b), regno, | |
1791 | hard_regno_nregs[regno][GET_MODE (reg)]); | |
1792 | else | |
1793 | bitmap_set_bit (df_get_live_in (b), regno); | |
7a31a7bd | 1794 | } |
1795 | } | |
1796 | } | |
1797 | ||
1798 | /* Return 1 if insn can be speculatively moved from block src to trg, | |
1799 | otherwise return 0. Called before first insertion of insn to | |
1800 | ready-list or before the scheduling. */ | |
1801 | ||
1802 | static int | |
60b8c5b3 | 1803 | check_live (rtx insn, int src) |
7a31a7bd | 1804 | { |
1805 | /* Find the registers set by instruction. */ | |
1806 | if (GET_CODE (PATTERN (insn)) == SET | |
1807 | || GET_CODE (PATTERN (insn)) == CLOBBER) | |
1808 | return check_live_1 (src, PATTERN (insn)); | |
1809 | else if (GET_CODE (PATTERN (insn)) == PARALLEL) | |
1810 | { | |
1811 | int j; | |
1812 | for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--) | |
1813 | if ((GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET | |
1814 | || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER) | |
1815 | && !check_live_1 (src, XVECEXP (PATTERN (insn), 0, j))) | |
1816 | return 0; | |
1817 | ||
1818 | return 1; | |
1819 | } | |
1820 | ||
1821 | return 1; | |
1822 | } | |
1823 | ||
1824 | /* Update the live registers info after insn was moved speculatively from | |
1825 | block src to trg. */ | |
1826 | ||
1827 | static void | |
60b8c5b3 | 1828 | update_live (rtx insn, int src) |
7a31a7bd | 1829 | { |
1830 | /* Find the registers set by instruction. */ | |
1831 | if (GET_CODE (PATTERN (insn)) == SET | |
1832 | || GET_CODE (PATTERN (insn)) == CLOBBER) | |
1833 | update_live_1 (src, PATTERN (insn)); | |
1834 | else if (GET_CODE (PATTERN (insn)) == PARALLEL) | |
1835 | { | |
1836 | int j; | |
1837 | for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--) | |
1838 | if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET | |
1839 | || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER) | |
1840 | update_live_1 (src, XVECEXP (PATTERN (insn), 0, j)); | |
1841 | } | |
1842 | } | |
1843 | ||
a8b24921 | 1844 | /* Nonzero if block bb_to is equal to, or reachable from block bb_from. */ |
7a31a7bd | 1845 | #define IS_REACHABLE(bb_from, bb_to) \ |
40734805 | 1846 | (bb_from == bb_to \ |
7a31a7bd | 1847 | || IS_RGN_ENTRY (bb_from) \ |
08b7917c | 1848 | || (bitmap_bit_p (ancestor_edges[bb_to], \ |
f5a6b05f | 1849 | EDGE_TO_BIT (single_pred_edge (BASIC_BLOCK_FOR_FN (cfun, \ |
1850 | BB_TO_BLOCK (bb_from))))))) | |
7a31a7bd | 1851 | |
7a31a7bd | 1852 | /* Turns on the fed_by_spec_load flag for insns fed by load_insn. */ |
1853 | ||
1854 | static void | |
60b8c5b3 | 1855 | set_spec_fed (rtx load_insn) |
7a31a7bd | 1856 | { |
93f6b030 | 1857 | sd_iterator_def sd_it; |
1858 | dep_t dep; | |
7a31a7bd | 1859 | |
93f6b030 | 1860 | FOR_EACH_DEP (load_insn, SD_LIST_FORW, sd_it, dep) |
1861 | if (DEP_TYPE (dep) == REG_DEP_TRUE) | |
1862 | FED_BY_SPEC_LOAD (DEP_CON (dep)) = 1; | |
9997bd27 | 1863 | } |
7a31a7bd | 1864 | |
1865 | /* On the path from the insn to load_insn_bb, find a conditional | |
1866 | branch depending on insn, that guards the speculative load. */ | |
1867 | ||
1868 | static int | |
60b8c5b3 | 1869 | find_conditional_protection (rtx insn, int load_insn_bb) |
7a31a7bd | 1870 | { |
93f6b030 | 1871 | sd_iterator_def sd_it; |
1872 | dep_t dep; | |
7a31a7bd | 1873 | |
1874 | /* Iterate through DEF-USE forward dependences. */ | |
93f6b030 | 1875 | FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep) |
7a31a7bd | 1876 | { |
93f6b030 | 1877 | rtx next = DEP_CON (dep); |
9997bd27 | 1878 | |
7a31a7bd | 1879 | if ((CONTAINING_RGN (BLOCK_NUM (next)) == |
1880 | CONTAINING_RGN (BB_TO_BLOCK (load_insn_bb))) | |
1881 | && IS_REACHABLE (INSN_BB (next), load_insn_bb) | |
1882 | && load_insn_bb != INSN_BB (next) | |
93f6b030 | 1883 | && DEP_TYPE (dep) == REG_DEP_TRUE |
6d7dc5b9 | 1884 | && (JUMP_P (next) |
7a31a7bd | 1885 | || find_conditional_protection (next, load_insn_bb))) |
1886 | return 1; | |
1887 | } | |
1888 | return 0; | |
1889 | } /* find_conditional_protection */ | |
1890 | ||
1891 | /* Returns 1 if the same insn1 that participates in the computation | |
1892 | of load_insn's address is feeding a conditional branch that is | |
f0b5f617 | 1893 | guarding on load_insn. This is true if we find two DEF-USE |
7a31a7bd | 1894 | chains: |
1895 | insn1 -> ... -> conditional-branch | |
1896 | insn1 -> ... -> load_insn, | |
f0b5f617 | 1897 | and if a flow path exists: |
7a31a7bd | 1898 | insn1 -> ... -> conditional-branch -> ... -> load_insn, |
1899 | and if insn1 is on the path | |
1900 | region-entry -> ... -> bb_trg -> ... load_insn. | |
1901 | ||
9997bd27 | 1902 | Locate insn1 by climbing on INSN_BACK_DEPS from load_insn. |
1903 | Locate the branch by following INSN_FORW_DEPS from insn1. */ | |
7a31a7bd | 1904 | |
1905 | static int | |
60b8c5b3 | 1906 | is_conditionally_protected (rtx load_insn, int bb_src, int bb_trg) |
7a31a7bd | 1907 | { |
93f6b030 | 1908 | sd_iterator_def sd_it; |
1909 | dep_t dep; | |
7a31a7bd | 1910 | |
93f6b030 | 1911 | FOR_EACH_DEP (load_insn, SD_LIST_BACK, sd_it, dep) |
7a31a7bd | 1912 | { |
93f6b030 | 1913 | rtx insn1 = DEP_PRO (dep); |
7a31a7bd | 1914 | |
1915 | /* Must be a DEF-USE dependence upon non-branch. */ | |
93f6b030 | 1916 | if (DEP_TYPE (dep) != REG_DEP_TRUE |
6d7dc5b9 | 1917 | || JUMP_P (insn1)) |
7a31a7bd | 1918 | continue; |
1919 | ||
1920 | /* Must exist a path: region-entry -> ... -> bb_trg -> ... load_insn. */ | |
1921 | if (INSN_BB (insn1) == bb_src | |
1922 | || (CONTAINING_RGN (BLOCK_NUM (insn1)) | |
1923 | != CONTAINING_RGN (BB_TO_BLOCK (bb_src))) | |
1924 | || (!IS_REACHABLE (bb_trg, INSN_BB (insn1)) | |
1925 | && !IS_REACHABLE (INSN_BB (insn1), bb_trg))) | |
1926 | continue; | |
1927 | ||
1928 | /* Now search for the conditional-branch. */ | |
1929 | if (find_conditional_protection (insn1, bb_src)) | |
1930 | return 1; | |
1931 | ||
1932 | /* Recursive step: search another insn1, "above" current insn1. */ | |
1933 | return is_conditionally_protected (insn1, bb_src, bb_trg); | |
1934 | } | |
1935 | ||
1936 | /* The chain does not exist. */ | |
1937 | return 0; | |
1938 | } /* is_conditionally_protected */ | |
1939 | ||
1940 | /* Returns 1 if a clue for "similar load" 'insn2' is found, and hence | |
1941 | load_insn can move speculatively from bb_src to bb_trg. All the | |
1942 | following must hold: | |
1943 | ||
1944 | (1) both loads have 1 base register (PFREE_CANDIDATEs). | |
1945 | (2) load_insn and load1 have a def-use dependence upon | |
1946 | the same insn 'insn1'. | |
1947 | (3) either load2 is in bb_trg, or: | |
1948 | - there's only one split-block, and | |
1949 | - load1 is on the escape path, and | |
1950 | ||
1951 | From all these we can conclude that the two loads access memory | |
1952 | addresses that differ at most by a constant, and hence if moving | |
1953 | load_insn would cause an exception, it would have been caused by | |
1954 | load2 anyhow. */ | |
1955 | ||
1956 | static int | |
60b8c5b3 | 1957 | is_pfree (rtx load_insn, int bb_src, int bb_trg) |
7a31a7bd | 1958 | { |
93f6b030 | 1959 | sd_iterator_def back_sd_it; |
1960 | dep_t back_dep; | |
19cb6b50 | 1961 | candidate *candp = candidate_table + bb_src; |
7a31a7bd | 1962 | |
1963 | if (candp->split_bbs.nr_members != 1) | |
1964 | /* Must have exactly one escape block. */ | |
1965 | return 0; | |
1966 | ||
93f6b030 | 1967 | FOR_EACH_DEP (load_insn, SD_LIST_BACK, back_sd_it, back_dep) |
7a31a7bd | 1968 | { |
93f6b030 | 1969 | rtx insn1 = DEP_PRO (back_dep); |
7a31a7bd | 1970 | |
93f6b030 | 1971 | if (DEP_TYPE (back_dep) == REG_DEP_TRUE) |
1972 | /* Found a DEF-USE dependence (insn1, load_insn). */ | |
7a31a7bd | 1973 | { |
93f6b030 | 1974 | sd_iterator_def fore_sd_it; |
1975 | dep_t fore_dep; | |
7a31a7bd | 1976 | |
93f6b030 | 1977 | FOR_EACH_DEP (insn1, SD_LIST_FORW, fore_sd_it, fore_dep) |
7a31a7bd | 1978 | { |
93f6b030 | 1979 | rtx insn2 = DEP_CON (fore_dep); |
9997bd27 | 1980 | |
93f6b030 | 1981 | if (DEP_TYPE (fore_dep) == REG_DEP_TRUE) |
7a31a7bd | 1982 | { |
1983 | /* Found a DEF-USE dependence (insn1, insn2). */ | |
1984 | if (haifa_classify_insn (insn2) != PFREE_CANDIDATE) | |
1985 | /* insn2 not guaranteed to be a 1 base reg load. */ | |
1986 | continue; | |
1987 | ||
1988 | if (INSN_BB (insn2) == bb_trg) | |
1989 | /* insn2 is the similar load, in the target block. */ | |
1990 | return 1; | |
1991 | ||
aae97b21 | 1992 | if (*(candp->split_bbs.first_member) == BLOCK_FOR_INSN (insn2)) |
7a31a7bd | 1993 | /* insn2 is a similar load, in a split-block. */ |
1994 | return 1; | |
1995 | } | |
1996 | } | |
1997 | } | |
1998 | } | |
1999 | ||
2000 | /* Couldn't find a similar load. */ | |
2001 | return 0; | |
2002 | } /* is_pfree */ | |
2003 | ||
7a31a7bd | 2004 | /* Return 1 if load_insn is prisky (i.e. if load_insn is fed by |
2005 | a load moved speculatively, or if load_insn is protected by | |
2006 | a compare on load_insn's address). */ | |
2007 | ||
2008 | static int | |
60b8c5b3 | 2009 | is_prisky (rtx load_insn, int bb_src, int bb_trg) |
7a31a7bd | 2010 | { |
2011 | if (FED_BY_SPEC_LOAD (load_insn)) | |
2012 | return 1; | |
2013 | ||
93f6b030 | 2014 | if (sd_lists_empty_p (load_insn, SD_LIST_BACK)) |
7a31a7bd | 2015 | /* Dependence may 'hide' out of the region. */ |
2016 | return 1; | |
2017 | ||
2018 | if (is_conditionally_protected (load_insn, bb_src, bb_trg)) | |
2019 | return 1; | |
2020 | ||
2021 | return 0; | |
2022 | } | |
2023 | ||
2024 | /* Insn is a candidate to be moved speculatively from bb_src to bb_trg. | |
2025 | Return 1 if insn is exception-free (and the motion is valid) | |
2026 | and 0 otherwise. */ | |
2027 | ||
2028 | static int | |
60b8c5b3 | 2029 | is_exception_free (rtx insn, int bb_src, int bb_trg) |
7a31a7bd | 2030 | { |
2031 | int insn_class = haifa_classify_insn (insn); | |
2032 | ||
2033 | /* Handle non-load insns. */ | |
2034 | switch (insn_class) | |
2035 | { | |
2036 | case TRAP_FREE: | |
2037 | return 1; | |
2038 | case TRAP_RISKY: | |
2039 | return 0; | |
2040 | default:; | |
2041 | } | |
2042 | ||
2043 | /* Handle loads. */ | |
2044 | if (!flag_schedule_speculative_load) | |
2045 | return 0; | |
2046 | IS_LOAD_INSN (insn) = 1; | |
2047 | switch (insn_class) | |
2048 | { | |
2049 | case IFREE: | |
2050 | return (1); | |
2051 | case IRISKY: | |
2052 | return 0; | |
2053 | case PFREE_CANDIDATE: | |
2054 | if (is_pfree (insn, bb_src, bb_trg)) | |
2055 | return 1; | |
2056 | /* Don't 'break' here: PFREE-candidate is also PRISKY-candidate. */ | |
2057 | case PRISKY_CANDIDATE: | |
2058 | if (!flag_schedule_speculative_load_dangerous | |
2059 | || is_prisky (insn, bb_src, bb_trg)) | |
2060 | return 0; | |
2061 | break; | |
2062 | default:; | |
2063 | } | |
2064 | ||
2065 | return flag_schedule_speculative_load_dangerous; | |
2066 | } | |
2067 | \f | |
2068 | /* The number of insns from the current block scheduled so far. */ | |
2069 | static int sched_target_n_insns; | |
2070 | /* The number of insns from the current block to be scheduled in total. */ | |
2071 | static int target_n_insns; | |
2072 | /* The number of insns from the entire region scheduled so far. */ | |
2073 | static int sched_n_insns; | |
2074 | ||
2075 | /* Implementations of the sched_info functions for region scheduling. */ | |
e4897000 | 2076 | static void init_ready_list (void); |
60b8c5b3 | 2077 | static int can_schedule_ready_p (rtx); |
d2412f57 | 2078 | static void begin_schedule_ready (rtx); |
6a1cdb4d | 2079 | static ds_t new_ready (rtx, ds_t); |
60b8c5b3 | 2080 | static int schedule_more_p (void); |
e1ab7874 | 2081 | static const char *rgn_print_insn (const_rtx, int); |
60b8c5b3 | 2082 | static int rgn_rank (rtx, rtx); |
6aed13f1 | 2083 | static void compute_jump_reg_dependencies (rtx, regset); |
7a31a7bd | 2084 | |
6a1cdb4d | 2085 | /* Functions for speculative scheduling. */ |
e1ab7874 | 2086 | static void rgn_add_remove_insn (rtx, int); |
2087 | static void rgn_add_block (basic_block, basic_block); | |
2088 | static void rgn_fix_recovery_cfg (int, int, int); | |
6a1cdb4d | 2089 | static basic_block advance_target_bb (basic_block, rtx); |
6a1cdb4d | 2090 | |
7a31a7bd | 2091 | /* Return nonzero if there are more insns that should be scheduled. */ |
2092 | ||
2093 | static int | |
60b8c5b3 | 2094 | schedule_more_p (void) |
7a31a7bd | 2095 | { |
6a1cdb4d | 2096 | return sched_target_n_insns < target_n_insns; |
7a31a7bd | 2097 | } |
2098 | ||
2099 | /* Add all insns that are initially ready to the ready list READY. Called | |
2100 | once before scheduling a set of insns. */ | |
2101 | ||
2102 | static void | |
e4897000 | 2103 | init_ready_list (void) |
7a31a7bd | 2104 | { |
2105 | rtx prev_head = current_sched_info->prev_head; | |
2106 | rtx next_tail = current_sched_info->next_tail; | |
2107 | int bb_src; | |
2108 | rtx insn; | |
2109 | ||
2110 | target_n_insns = 0; | |
2111 | sched_target_n_insns = 0; | |
2112 | sched_n_insns = 0; | |
2113 | ||
2114 | /* Print debugging information. */ | |
2115 | if (sched_verbose >= 5) | |
a2819fc2 | 2116 | debug_rgn_dependencies (target_bb); |
7a31a7bd | 2117 | |
2118 | /* Prepare current target block info. */ | |
2119 | if (current_nr_blocks > 1) | |
e1ab7874 | 2120 | compute_trg_info (target_bb); |
7a31a7bd | 2121 | |
2122 | /* Initialize ready list with all 'ready' insns in target block. | |
2123 | Count number of insns in the target block being scheduled. */ | |
2124 | for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn)) | |
48e1416a | 2125 | { |
d452a169 | 2126 | gcc_assert (TODO_SPEC (insn) == HARD_DEP || TODO_SPEC (insn) == DEP_POSTPONED); |
2127 | TODO_SPEC (insn) = HARD_DEP; | |
e4897000 | 2128 | try_ready (insn); |
e26579fc | 2129 | target_n_insns++; |
6a1cdb4d | 2130 | |
2131 | gcc_assert (!(TODO_SPEC (insn) & BEGIN_CONTROL)); | |
7a31a7bd | 2132 | } |
2133 | ||
2134 | /* Add to ready list all 'ready' insns in valid source blocks. | |
2135 | For speculative insns, check-live, exception-free, and | |
2136 | issue-delay. */ | |
2137 | for (bb_src = target_bb + 1; bb_src < current_nr_blocks; bb_src++) | |
2138 | if (IS_VALID (bb_src)) | |
2139 | { | |
2140 | rtx src_head; | |
2141 | rtx src_next_tail; | |
2142 | rtx tail, head; | |
2143 | ||
6a1cdb4d | 2144 | get_ebb_head_tail (EBB_FIRST_BB (bb_src), EBB_LAST_BB (bb_src), |
2145 | &head, &tail); | |
7a31a7bd | 2146 | src_next_tail = NEXT_INSN (tail); |
2147 | src_head = head; | |
2148 | ||
2149 | for (insn = src_head; insn != src_next_tail; insn = NEXT_INSN (insn)) | |
9221ec44 | 2150 | if (INSN_P (insn)) |
d452a169 | 2151 | { |
2152 | gcc_assert (TODO_SPEC (insn) == HARD_DEP || TODO_SPEC (insn) == DEP_POSTPONED); | |
2153 | TODO_SPEC (insn) = HARD_DEP; | |
2154 | try_ready (insn); | |
2155 | } | |
7a31a7bd | 2156 | } |
2157 | } | |
2158 | ||
2159 | /* Called after taking INSN from the ready list. Returns nonzero if this | |
2160 | insn can be scheduled, nonzero if we should silently discard it. */ | |
2161 | ||
2162 | static int | |
60b8c5b3 | 2163 | can_schedule_ready_p (rtx insn) |
7a31a7bd | 2164 | { |
6a1cdb4d | 2165 | /* An interblock motion? */ |
2166 | if (INSN_BB (insn) != target_bb | |
2167 | && IS_SPECULATIVE_INSN (insn) | |
2168 | && !check_live (insn, INSN_BB (insn))) | |
48e1416a | 2169 | return 0; |
6a1cdb4d | 2170 | else |
2171 | return 1; | |
2172 | } | |
2295df67 | 2173 | |
9ca2c29a | 2174 | /* Updates counter and other information. Split from can_schedule_ready_p () |
6a1cdb4d | 2175 | because when we schedule insn speculatively then insn passed to |
2176 | can_schedule_ready_p () differs from the one passed to | |
2177 | begin_schedule_ready (). */ | |
2178 | static void | |
d2412f57 | 2179 | begin_schedule_ready (rtx insn) |
6a1cdb4d | 2180 | { |
7a31a7bd | 2181 | /* An interblock motion? */ |
2182 | if (INSN_BB (insn) != target_bb) | |
2183 | { | |
7a31a7bd | 2184 | if (IS_SPECULATIVE_INSN (insn)) |
2185 | { | |
6a1cdb4d | 2186 | gcc_assert (check_live (insn, INSN_BB (insn))); |
2187 | ||
7a31a7bd | 2188 | update_live (insn, INSN_BB (insn)); |
2189 | ||
2190 | /* For speculative load, mark insns fed by it. */ | |
2191 | if (IS_LOAD_INSN (insn) || FED_BY_SPEC_LOAD (insn)) | |
2192 | set_spec_fed (insn); | |
2193 | ||
2194 | nr_spec++; | |
2195 | } | |
2196 | nr_inter++; | |
7a31a7bd | 2197 | } |
2198 | else | |
2199 | { | |
2200 | /* In block motion. */ | |
2201 | sched_target_n_insns++; | |
2202 | } | |
2203 | sched_n_insns++; | |
7a31a7bd | 2204 | } |
2205 | ||
6a1cdb4d | 2206 | /* Called after INSN has all its hard dependencies resolved and the speculation |
2207 | of type TS is enough to overcome them all. | |
2208 | Return nonzero if it should be moved to the ready list or the queue, or zero | |
2209 | if we should silently discard it. */ | |
2210 | static ds_t | |
2211 | new_ready (rtx next, ds_t ts) | |
7a31a7bd | 2212 | { |
6a1cdb4d | 2213 | if (INSN_BB (next) != target_bb) |
2214 | { | |
2215 | int not_ex_free = 0; | |
2216 | ||
2217 | /* For speculative insns, before inserting to ready/queue, | |
48e1416a | 2218 | check live, exception-free, and issue-delay. */ |
6a1cdb4d | 2219 | if (!IS_VALID (INSN_BB (next)) |
7a31a7bd | 2220 | || CANT_MOVE (next) |
2221 | || (IS_SPECULATIVE_INSN (next) | |
67900a4f | 2222 | && ((recog_memoized (next) >= 0 |
48e1416a | 2223 | && min_insn_conflict_delay (curr_state, next, next) |
6a1cdb4d | 2224 | > PARAM_VALUE (PARAM_MAX_SCHED_INSN_CONFLICT_DELAY)) |
fd27912f | 2225 | || IS_SPECULATION_CHECK_P (next) |
7a31a7bd | 2226 | || !check_live (next, INSN_BB (next)) |
6a1cdb4d | 2227 | || (not_ex_free = !is_exception_free (next, INSN_BB (next), |
2228 | target_bb))))) | |
2229 | { | |
2230 | if (not_ex_free | |
2231 | /* We are here because is_exception_free () == false. | |
2232 | But we possibly can handle that with control speculation. */ | |
e1ab7874 | 2233 | && sched_deps_info->generate_spec_deps |
2234 | && spec_info->mask & BEGIN_CONTROL) | |
42de5f34 | 2235 | { |
2236 | ds_t new_ds; | |
2237 | ||
2238 | /* Add control speculation to NEXT's dependency type. */ | |
2239 | new_ds = set_dep_weak (ts, BEGIN_CONTROL, MAX_DEP_WEAK); | |
2240 | ||
2241 | /* Check if NEXT can be speculated with new dependency type. */ | |
2242 | if (sched_insn_is_legitimate_for_speculation_p (next, new_ds)) | |
2243 | /* Here we got new control-speculative instruction. */ | |
2244 | ts = new_ds; | |
2245 | else | |
2246 | /* NEXT isn't ready yet. */ | |
d452a169 | 2247 | ts = DEP_POSTPONED; |
42de5f34 | 2248 | } |
6a1cdb4d | 2249 | else |
42de5f34 | 2250 | /* NEXT isn't ready yet. */ |
d452a169 | 2251 | ts = DEP_POSTPONED; |
6a1cdb4d | 2252 | } |
2253 | } | |
48e1416a | 2254 | |
6a1cdb4d | 2255 | return ts; |
7a31a7bd | 2256 | } |
2257 | ||
2258 | /* Return a string that contains the insn uid and optionally anything else | |
2259 | necessary to identify this insn in an output. It's valid to use a | |
2260 | static buffer for this. The ALIGNED parameter should cause the string | |
2261 | to be formatted so that multiple output lines will line up nicely. */ | |
2262 | ||
2263 | static const char * | |
e1ab7874 | 2264 | rgn_print_insn (const_rtx insn, int aligned) |
7a31a7bd | 2265 | { |
2266 | static char tmp[80]; | |
2267 | ||
2268 | if (aligned) | |
2269 | sprintf (tmp, "b%3d: i%4d", INSN_BB (insn), INSN_UID (insn)); | |
2270 | else | |
2271 | { | |
7a31a7bd | 2272 | if (current_nr_blocks > 1 && INSN_BB (insn) != target_bb) |
cda0a5f5 | 2273 | sprintf (tmp, "%d/b%d", INSN_UID (insn), INSN_BB (insn)); |
2274 | else | |
2275 | sprintf (tmp, "%d", INSN_UID (insn)); | |
7a31a7bd | 2276 | } |
2277 | return tmp; | |
2278 | } | |
2279 | ||
2280 | /* Compare priority of two insns. Return a positive number if the second | |
2281 | insn is to be preferred for scheduling, and a negative one if the first | |
2282 | is to be preferred. Zero if they are equally good. */ | |
2283 | ||
2284 | static int | |
60b8c5b3 | 2285 | rgn_rank (rtx insn1, rtx insn2) |
7a31a7bd | 2286 | { |
2287 | /* Some comparison make sense in interblock scheduling only. */ | |
2288 | if (INSN_BB (insn1) != INSN_BB (insn2)) | |
2289 | { | |
2290 | int spec_val, prob_val; | |
2291 | ||
2292 | /* Prefer an inblock motion on an interblock motion. */ | |
2293 | if ((INSN_BB (insn2) == target_bb) && (INSN_BB (insn1) != target_bb)) | |
2294 | return 1; | |
2295 | if ((INSN_BB (insn1) == target_bb) && (INSN_BB (insn2) != target_bb)) | |
2296 | return -1; | |
2297 | ||
2298 | /* Prefer a useful motion on a speculative one. */ | |
2299 | spec_val = IS_SPECULATIVE_INSN (insn1) - IS_SPECULATIVE_INSN (insn2); | |
2300 | if (spec_val) | |
2301 | return spec_val; | |
2302 | ||
2303 | /* Prefer a more probable (speculative) insn. */ | |
2304 | prob_val = INSN_PROBABILITY (insn2) - INSN_PROBABILITY (insn1); | |
2305 | if (prob_val) | |
2306 | return prob_val; | |
2307 | } | |
2308 | return 0; | |
2309 | } | |
2310 | ||
d6141c0c | 2311 | /* NEXT is an instruction that depends on INSN (a backward dependence); |
2312 | return nonzero if we should include this dependence in priority | |
2313 | calculations. */ | |
2314 | ||
e1ab7874 | 2315 | int |
60b8c5b3 | 2316 | contributes_to_priority (rtx next, rtx insn) |
d6141c0c | 2317 | { |
6a1cdb4d | 2318 | /* NEXT and INSN reside in one ebb. */ |
2319 | return BLOCK_TO_BB (BLOCK_NUM (next)) == BLOCK_TO_BB (BLOCK_NUM (insn)); | |
d6141c0c | 2320 | } |
2321 | ||
6aed13f1 | 2322 | /* INSN is a JUMP_INSN. Store the set of registers that must be |
2323 | considered as used by this jump in USED. */ | |
d6141c0c | 2324 | |
2325 | static void | |
60b8c5b3 | 2326 | compute_jump_reg_dependencies (rtx insn ATTRIBUTE_UNUSED, |
6aed13f1 | 2327 | regset used ATTRIBUTE_UNUSED) |
d6141c0c | 2328 | { |
2329 | /* Nothing to do here, since we postprocess jumps in | |
2330 | add_branch_dependences. */ | |
2331 | } | |
2332 | ||
48e1416a | 2333 | /* This variable holds common_sched_info hooks and data relevant to |
e1ab7874 | 2334 | the interblock scheduler. */ |
2335 | static struct common_sched_info_def rgn_common_sched_info; | |
2336 | ||
2337 | ||
2338 | /* This holds data for the dependence analysis relevant to | |
2339 | the interblock scheduler. */ | |
2340 | static struct sched_deps_info_def rgn_sched_deps_info; | |
2341 | ||
2342 | /* This holds constant data used for initializing the above structure | |
2343 | for the Haifa scheduler. */ | |
2344 | static const struct sched_deps_info_def rgn_const_sched_deps_info = | |
2345 | { | |
2346 | compute_jump_reg_dependencies, | |
2347 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | |
2348 | 0, 0, 0 | |
2349 | }; | |
2350 | ||
2351 | /* Same as above, but for the selective scheduler. */ | |
2352 | static const struct sched_deps_info_def rgn_const_sel_sched_deps_info = | |
2353 | { | |
2354 | compute_jump_reg_dependencies, | |
2355 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | |
2356 | 0, 0, 0 | |
2357 | }; | |
2358 | ||
4db82bc9 | 2359 | /* Return true if scheduling INSN will trigger finish of scheduling |
2360 | current block. */ | |
2361 | static bool | |
2362 | rgn_insn_finishes_block_p (rtx insn) | |
2363 | { | |
2364 | if (INSN_BB (insn) == target_bb | |
2365 | && sched_target_n_insns + 1 == target_n_insns) | |
2366 | /* INSN is the last not-scheduled instruction in the current block. */ | |
2367 | return true; | |
2368 | ||
2369 | return false; | |
2370 | } | |
2371 | ||
7a31a7bd | 2372 | /* Used in schedule_insns to initialize current_sched_info for scheduling |
2373 | regions (or single basic blocks). */ | |
2374 | ||
e1ab7874 | 2375 | static const struct haifa_sched_info rgn_const_sched_info = |
7a31a7bd | 2376 | { |
2377 | init_ready_list, | |
2378 | can_schedule_ready_p, | |
2379 | schedule_more_p, | |
2380 | new_ready, | |
2381 | rgn_rank, | |
2382 | rgn_print_insn, | |
d6141c0c | 2383 | contributes_to_priority, |
4db82bc9 | 2384 | rgn_insn_finishes_block_p, |
7a31a7bd | 2385 | |
2386 | NULL, NULL, | |
2387 | NULL, NULL, | |
e1ab7874 | 2388 | 0, 0, |
4d64d9a4 | 2389 | |
e1ab7874 | 2390 | rgn_add_remove_insn, |
6a1cdb4d | 2391 | begin_schedule_ready, |
d2412f57 | 2392 | NULL, |
6a1cdb4d | 2393 | advance_target_bb, |
e2f4a6ff | 2394 | NULL, NULL, |
3072d30e | 2395 | SCHED_RGN |
7a31a7bd | 2396 | }; |
2397 | ||
e1ab7874 | 2398 | /* This variable holds the data and hooks needed to the Haifa scheduler backend |
2399 | for the interblock scheduler frontend. */ | |
2400 | static struct haifa_sched_info rgn_sched_info; | |
2401 | ||
2402 | /* Returns maximum priority that an insn was assigned to. */ | |
2403 | ||
2404 | int | |
2405 | get_rgn_sched_max_insns_priority (void) | |
2406 | { | |
2407 | return rgn_sched_info.sched_max_insns_priority; | |
2408 | } | |
2409 | ||
24dd0668 | 2410 | /* Determine if PAT sets a TARGET_CLASS_LIKELY_SPILLED_P register. */ |
cbf780cc | 2411 | |
2412 | static bool | |
60b8c5b3 | 2413 | sets_likely_spilled (rtx pat) |
cbf780cc | 2414 | { |
2415 | bool ret = false; | |
2416 | note_stores (pat, sets_likely_spilled_1, &ret); | |
2417 | return ret; | |
2418 | } | |
2419 | ||
2420 | static void | |
81a410b1 | 2421 | sets_likely_spilled_1 (rtx x, const_rtx pat, void *data) |
cbf780cc | 2422 | { |
2423 | bool *ret = (bool *) data; | |
2424 | ||
2425 | if (GET_CODE (pat) == SET | |
2426 | && REG_P (x) | |
24dd0668 | 2427 | && HARD_REGISTER_P (x) |
2428 | && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (x)))) | |
cbf780cc | 2429 | *ret = true; |
2430 | } | |
2431 | ||
566e7db2 | 2432 | /* A bitmap to note insns that participate in any dependency. Used in |
2433 | add_branch_dependences. */ | |
2434 | static sbitmap insn_referenced; | |
e1ab7874 | 2435 | |
7a31a7bd | 2436 | /* Add dependences so that branches are scheduled to run last in their |
2437 | block. */ | |
7a31a7bd | 2438 | static void |
60b8c5b3 | 2439 | add_branch_dependences (rtx head, rtx tail) |
7a31a7bd | 2440 | { |
2441 | rtx insn, last; | |
2442 | ||
cbaab9a3 | 2443 | /* For all branches, calls, uses, clobbers, cc0 setters, and instructions |
2444 | that can throw exceptions, force them to remain in order at the end of | |
2445 | the block by adding dependencies and giving the last a high priority. | |
2446 | There may be notes present, and prev_head may also be a note. | |
7a31a7bd | 2447 | |
2448 | Branches must obviously remain at the end. Calls should remain at the | |
2449 | end since moving them results in worse register allocation. Uses remain | |
cbf780cc | 2450 | at the end to ensure proper register allocation. |
2451 | ||
40e55fbb | 2452 | cc0 setters remain at the end because they can't be moved away from |
cbf780cc | 2453 | their cc0 user. |
2454 | ||
681b9609 | 2455 | Predecessors of SCHED_GROUP_P instructions at the end remain at the end. |
2456 | ||
e6a25dc9 | 2457 | COND_EXEC insns cannot be moved past a branch (see e.g. PR17808). |
2458 | ||
24dd0668 | 2459 | Insns setting TARGET_CLASS_LIKELY_SPILLED_P registers (usually return |
2460 | values) are not moved before reload because we can wind up with register | |
cbf780cc | 2461 | allocation failures. */ |
2462 | ||
9845d120 | 2463 | while (tail != head && DEBUG_INSN_P (tail)) |
2464 | tail = PREV_INSN (tail); | |
2465 | ||
7a31a7bd | 2466 | insn = tail; |
2467 | last = 0; | |
6d7dc5b9 | 2468 | while (CALL_P (insn) |
91f71fa3 | 2469 | || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn) |
6d7dc5b9 | 2470 | || (NONJUMP_INSN_P (insn) |
7a31a7bd | 2471 | && (GET_CODE (PATTERN (insn)) == USE |
2472 | || GET_CODE (PATTERN (insn)) == CLOBBER | |
cbaab9a3 | 2473 | || can_throw_internal (insn) |
7a31a7bd | 2474 | #ifdef HAVE_cc0 |
2475 | || sets_cc0_p (PATTERN (insn)) | |
2476 | #endif | |
cbf780cc | 2477 | || (!reload_completed |
2478 | && sets_likely_spilled (PATTERN (insn))))) | |
681b9609 | 2479 | || NOTE_P (insn) |
2480 | || (last != 0 && SCHED_GROUP_P (last))) | |
7a31a7bd | 2481 | { |
6d7dc5b9 | 2482 | if (!NOTE_P (insn)) |
7a31a7bd | 2483 | { |
9997bd27 | 2484 | if (last != 0 |
93f6b030 | 2485 | && sd_find_dep_between (insn, last, false) == NULL) |
7a31a7bd | 2486 | { |
e6a25dc9 | 2487 | if (! sched_insns_conditions_mutex_p (last, insn)) |
2488 | add_dependence (last, insn, REG_DEP_ANTI); | |
08b7917c | 2489 | bitmap_set_bit (insn_referenced, INSN_LUID (insn)); |
7a31a7bd | 2490 | } |
2491 | ||
2492 | CANT_MOVE (insn) = 1; | |
2493 | ||
2494 | last = insn; | |
7a31a7bd | 2495 | } |
2496 | ||
2497 | /* Don't overrun the bounds of the basic block. */ | |
2498 | if (insn == head) | |
2499 | break; | |
2500 | ||
9845d120 | 2501 | do |
2502 | insn = PREV_INSN (insn); | |
2503 | while (insn != head && DEBUG_INSN_P (insn)); | |
7a31a7bd | 2504 | } |
2505 | ||
2506 | /* Make sure these insns are scheduled last in their block. */ | |
2507 | insn = last; | |
2508 | if (insn != 0) | |
2509 | while (insn != head) | |
2510 | { | |
2511 | insn = prev_nonnote_insn (insn); | |
2512 | ||
08b7917c | 2513 | if (bitmap_bit_p (insn_referenced, INSN_LUID (insn)) |
9845d120 | 2514 | || DEBUG_INSN_P (insn)) |
7a31a7bd | 2515 | continue; |
2516 | ||
e6a25dc9 | 2517 | if (! sched_insns_conditions_mutex_p (last, insn)) |
2518 | add_dependence (last, insn, REG_DEP_ANTI); | |
7a31a7bd | 2519 | } |
e6a25dc9 | 2520 | |
751d3ba7 | 2521 | if (!targetm.have_conditional_execution ()) |
2522 | return; | |
2523 | ||
e6a25dc9 | 2524 | /* Finally, if the block ends in a jump, and we are doing intra-block |
2525 | scheduling, make sure that the branch depends on any COND_EXEC insns | |
2526 | inside the block to avoid moving the COND_EXECs past the branch insn. | |
2527 | ||
2528 | We only have to do this after reload, because (1) before reload there | |
2529 | are no COND_EXEC insns, and (2) the region scheduler is an intra-block | |
2530 | scheduler after reload. | |
2531 | ||
2532 | FIXME: We could in some cases move COND_EXEC insns past the branch if | |
2533 | this scheduler would be a little smarter. Consider this code: | |
2534 | ||
2535 | T = [addr] | |
2536 | C ? addr += 4 | |
f19b9016 | 2537 | !C ? X += 12 |
e6a25dc9 | 2538 | C ? T += 1 |
f19b9016 | 2539 | C ? jump foo |
e6a25dc9 | 2540 | |
2541 | On a target with a one cycle stall on a memory access the optimal | |
2542 | sequence would be: | |
2543 | ||
2544 | T = [addr] | |
2545 | C ? addr += 4 | |
2546 | C ? T += 1 | |
2547 | C ? jump foo | |
2548 | !C ? X += 12 | |
2549 | ||
2550 | We don't want to put the 'X += 12' before the branch because it just | |
2551 | wastes a cycle of execution time when the branch is taken. | |
2552 | ||
2553 | Note that in the example "!C" will always be true. That is another | |
2554 | possible improvement for handling COND_EXECs in this scheduler: it | |
2555 | could remove always-true predicates. */ | |
2556 | ||
91f71fa3 | 2557 | if (!reload_completed || ! (JUMP_P (tail) || JUMP_TABLE_DATA_P (tail))) |
e6a25dc9 | 2558 | return; |
2559 | ||
f19b9016 | 2560 | insn = tail; |
e6a25dc9 | 2561 | while (insn != head) |
2562 | { | |
f19b9016 | 2563 | insn = PREV_INSN (insn); |
2564 | ||
e6a25dc9 | 2565 | /* Note that we want to add this dependency even when |
2566 | sched_insns_conditions_mutex_p returns true. The whole point | |
2567 | is that we _want_ this dependency, even if these insns really | |
2568 | are independent. */ | |
2569 | if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == COND_EXEC) | |
2570 | add_dependence (tail, insn, REG_DEP_ANTI); | |
e6a25dc9 | 2571 | } |
7a31a7bd | 2572 | } |
2573 | ||
2574 | /* Data structures for the computation of data dependences in a regions. We | |
2575 | keep one `deps' structure for every basic block. Before analyzing the | |
2576 | data dependences for a bb, its variables are initialized as a function of | |
2577 | the variables of its predecessors. When the analysis for a bb completes, | |
2578 | we save the contents to the corresponding bb_deps[bb] variable. */ | |
2579 | ||
68e419a1 | 2580 | static struct deps_desc *bb_deps; |
7a31a7bd | 2581 | |
5deaeb50 | 2582 | static void |
60b8c5b3 | 2583 | concat_insn_mem_list (rtx copy_insns, rtx copy_mems, rtx *old_insns_p, |
2584 | rtx *old_mems_p) | |
5deaeb50 | 2585 | { |
2586 | rtx new_insns = *old_insns_p; | |
2587 | rtx new_mems = *old_mems_p; | |
2588 | ||
2589 | while (copy_insns) | |
2590 | { | |
2591 | new_insns = alloc_INSN_LIST (XEXP (copy_insns, 0), new_insns); | |
2592 | new_mems = alloc_EXPR_LIST (VOIDmode, XEXP (copy_mems, 0), new_mems); | |
2593 | copy_insns = XEXP (copy_insns, 1); | |
2594 | copy_mems = XEXP (copy_mems, 1); | |
2595 | } | |
2596 | ||
2597 | *old_insns_p = new_insns; | |
2598 | *old_mems_p = new_mems; | |
2599 | } | |
2600 | ||
e1ab7874 | 2601 | /* Join PRED_DEPS to the SUCC_DEPS. */ |
2602 | void | |
68e419a1 | 2603 | deps_join (struct deps_desc *succ_deps, struct deps_desc *pred_deps) |
e1ab7874 | 2604 | { |
2605 | unsigned reg; | |
2606 | reg_set_iterator rsi; | |
2607 | ||
2608 | /* The reg_last lists are inherited by successor. */ | |
2609 | EXECUTE_IF_SET_IN_REG_SET (&pred_deps->reg_last_in_use, 0, reg, rsi) | |
2610 | { | |
2611 | struct deps_reg *pred_rl = &pred_deps->reg_last[reg]; | |
2612 | struct deps_reg *succ_rl = &succ_deps->reg_last[reg]; | |
2613 | ||
2614 | succ_rl->uses = concat_INSN_LIST (pred_rl->uses, succ_rl->uses); | |
2615 | succ_rl->sets = concat_INSN_LIST (pred_rl->sets, succ_rl->sets); | |
a7dcf969 | 2616 | succ_rl->implicit_sets |
2617 | = concat_INSN_LIST (pred_rl->implicit_sets, succ_rl->implicit_sets); | |
e1ab7874 | 2618 | succ_rl->clobbers = concat_INSN_LIST (pred_rl->clobbers, |
2619 | succ_rl->clobbers); | |
2620 | succ_rl->uses_length += pred_rl->uses_length; | |
2621 | succ_rl->clobbers_length += pred_rl->clobbers_length; | |
2622 | } | |
2623 | IOR_REG_SET (&succ_deps->reg_last_in_use, &pred_deps->reg_last_in_use); | |
2624 | ||
2625 | /* Mem read/write lists are inherited by successor. */ | |
2626 | concat_insn_mem_list (pred_deps->pending_read_insns, | |
2627 | pred_deps->pending_read_mems, | |
2628 | &succ_deps->pending_read_insns, | |
2629 | &succ_deps->pending_read_mems); | |
2630 | concat_insn_mem_list (pred_deps->pending_write_insns, | |
2631 | pred_deps->pending_write_mems, | |
2632 | &succ_deps->pending_write_insns, | |
2633 | &succ_deps->pending_write_mems); | |
2634 | ||
effd1640 | 2635 | succ_deps->pending_jump_insns |
2636 | = concat_INSN_LIST (pred_deps->pending_jump_insns, | |
2637 | succ_deps->pending_jump_insns); | |
e1ab7874 | 2638 | succ_deps->last_pending_memory_flush |
2639 | = concat_INSN_LIST (pred_deps->last_pending_memory_flush, | |
2640 | succ_deps->last_pending_memory_flush); | |
2641 | ||
2642 | succ_deps->pending_read_list_length += pred_deps->pending_read_list_length; | |
2643 | succ_deps->pending_write_list_length += pred_deps->pending_write_list_length; | |
2644 | succ_deps->pending_flush_length += pred_deps->pending_flush_length; | |
2645 | ||
2646 | /* last_function_call is inherited by successor. */ | |
2647 | succ_deps->last_function_call | |
2648 | = concat_INSN_LIST (pred_deps->last_function_call, | |
2649 | succ_deps->last_function_call); | |
2650 | ||
326d0c19 | 2651 | /* last_function_call_may_noreturn is inherited by successor. */ |
2652 | succ_deps->last_function_call_may_noreturn | |
2653 | = concat_INSN_LIST (pred_deps->last_function_call_may_noreturn, | |
2654 | succ_deps->last_function_call_may_noreturn); | |
2655 | ||
e1ab7874 | 2656 | /* sched_before_next_call is inherited by successor. */ |
2657 | succ_deps->sched_before_next_call | |
2658 | = concat_INSN_LIST (pred_deps->sched_before_next_call, | |
2659 | succ_deps->sched_before_next_call); | |
2660 | } | |
2661 | ||
7a31a7bd | 2662 | /* After computing the dependencies for block BB, propagate the dependencies |
749c6f58 | 2663 | found in TMP_DEPS to the successors of the block. */ |
7a31a7bd | 2664 | static void |
68e419a1 | 2665 | propagate_deps (int bb, struct deps_desc *pred_deps) |
7a31a7bd | 2666 | { |
f5a6b05f | 2667 | basic_block block = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (bb)); |
aae97b21 | 2668 | edge_iterator ei; |
2669 | edge e; | |
7a31a7bd | 2670 | |
2671 | /* bb's structures are inherited by its successors. */ | |
aae97b21 | 2672 | FOR_EACH_EDGE (e, ei, block->succs) |
2673 | { | |
aae97b21 | 2674 | /* Only bbs "below" bb, in the same region, are interesting. */ |
34154e27 | 2675 | if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun) |
aae97b21 | 2676 | || CONTAINING_RGN (block->index) != CONTAINING_RGN (e->dest->index) |
2677 | || BLOCK_TO_BB (e->dest->index) <= bb) | |
2678 | continue; | |
5deaeb50 | 2679 | |
e1ab7874 | 2680 | deps_join (bb_deps + BLOCK_TO_BB (e->dest->index), pred_deps); |
aae97b21 | 2681 | } |
7a31a7bd | 2682 | |
5deaeb50 | 2683 | /* These lists should point to the right place, for correct |
2684 | freeing later. */ | |
2685 | bb_deps[bb].pending_read_insns = pred_deps->pending_read_insns; | |
2686 | bb_deps[bb].pending_read_mems = pred_deps->pending_read_mems; | |
2687 | bb_deps[bb].pending_write_insns = pred_deps->pending_write_insns; | |
2688 | bb_deps[bb].pending_write_mems = pred_deps->pending_write_mems; | |
effd1640 | 2689 | bb_deps[bb].pending_jump_insns = pred_deps->pending_jump_insns; |
5deaeb50 | 2690 | |
2691 | /* Can't allow these to be freed twice. */ | |
2692 | pred_deps->pending_read_insns = 0; | |
2693 | pred_deps->pending_read_mems = 0; | |
2694 | pred_deps->pending_write_insns = 0; | |
2695 | pred_deps->pending_write_mems = 0; | |
effd1640 | 2696 | pred_deps->pending_jump_insns = 0; |
7a31a7bd | 2697 | } |
2698 | ||
93f6b030 | 2699 | /* Compute dependences inside bb. In a multiple blocks region: |
7a31a7bd | 2700 | (1) a bb is analyzed after its predecessors, and (2) the lists in |
2701 | effect at the end of bb (after analyzing for bb) are inherited by | |
de132707 | 2702 | bb's successors. |
7a31a7bd | 2703 | |
2704 | Specifically for reg-reg data dependences, the block insns are | |
a7dcf969 | 2705 | scanned by sched_analyze () top-to-bottom. Three lists are |
749c6f58 | 2706 | maintained by sched_analyze (): reg_last[].sets for register DEFs, |
a7dcf969 | 2707 | reg_last[].implicit_sets for implicit hard register DEFs, and |
2708 | reg_last[].uses for register USEs. | |
7a31a7bd | 2709 | |
2710 | When analysis is completed for bb, we update for its successors: | |
2711 | ; - DEFS[succ] = Union (DEFS [succ], DEFS [bb]) | |
a7dcf969 | 2712 | ; - IMPLICIT_DEFS[succ] = Union (IMPLICIT_DEFS [succ], IMPLICIT_DEFS [bb]) |
7a31a7bd | 2713 | ; - USES[succ] = Union (USES [succ], DEFS [bb]) |
2714 | ||
2715 | The mechanism for computing mem-mem data dependence is very | |
2716 | similar, and the result is interblock dependences in the region. */ | |
2717 | ||
2718 | static void | |
93f6b030 | 2719 | compute_block_dependences (int bb) |
7a31a7bd | 2720 | { |
2721 | rtx head, tail; | |
68e419a1 | 2722 | struct deps_desc tmp_deps; |
7a31a7bd | 2723 | |
2724 | tmp_deps = bb_deps[bb]; | |
2725 | ||
2726 | /* Do the analysis for this block. */ | |
6a1cdb4d | 2727 | gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb)); |
2728 | get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail); | |
93f6b030 | 2729 | |
7a31a7bd | 2730 | sched_analyze (&tmp_deps, head, tail); |
e1ab7874 | 2731 | |
2732 | /* Selective scheduling handles control dependencies by itself. */ | |
2733 | if (!sel_sched_p ()) | |
2734 | add_branch_dependences (head, tail); | |
7a31a7bd | 2735 | |
2736 | if (current_nr_blocks > 1) | |
749c6f58 | 2737 | propagate_deps (bb, &tmp_deps); |
7a31a7bd | 2738 | |
2739 | /* Free up the INSN_LISTs. */ | |
2740 | free_deps (&tmp_deps); | |
93f6b030 | 2741 | |
2742 | if (targetm.sched.dependencies_evaluation_hook) | |
2743 | targetm.sched.dependencies_evaluation_hook (head, tail); | |
2744 | } | |
2745 | ||
2746 | /* Free dependencies of instructions inside BB. */ | |
2747 | static void | |
2748 | free_block_dependencies (int bb) | |
2749 | { | |
2750 | rtx head; | |
2751 | rtx tail; | |
2752 | ||
2753 | get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail); | |
2754 | ||
9845d120 | 2755 | if (no_real_insns_p (head, tail)) |
2756 | return; | |
2757 | ||
93f6b030 | 2758 | sched_free_deps (head, tail, true); |
7a31a7bd | 2759 | } |
749c6f58 | 2760 | |
7a31a7bd | 2761 | /* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add |
2762 | them to the unused_*_list variables, so that they can be reused. */ | |
2763 | ||
2764 | static void | |
60b8c5b3 | 2765 | free_pending_lists (void) |
7a31a7bd | 2766 | { |
2767 | int bb; | |
2768 | ||
2769 | for (bb = 0; bb < current_nr_blocks; bb++) | |
2770 | { | |
2771 | free_INSN_LIST_list (&bb_deps[bb].pending_read_insns); | |
2772 | free_INSN_LIST_list (&bb_deps[bb].pending_write_insns); | |
2773 | free_EXPR_LIST_list (&bb_deps[bb].pending_read_mems); | |
2774 | free_EXPR_LIST_list (&bb_deps[bb].pending_write_mems); | |
effd1640 | 2775 | free_INSN_LIST_list (&bb_deps[bb].pending_jump_insns); |
7a31a7bd | 2776 | } |
2777 | } | |
2778 | \f | |
93f6b030 | 2779 | /* Print dependences for debugging starting from FROM_BB. |
2780 | Callable from debugger. */ | |
a2819fc2 | 2781 | /* Print dependences for debugging starting from FROM_BB. |
2782 | Callable from debugger. */ | |
4b987fac | 2783 | DEBUG_FUNCTION void |
a2819fc2 | 2784 | debug_rgn_dependencies (int from_bb) |
7a31a7bd | 2785 | { |
2786 | int bb; | |
2787 | ||
a2819fc2 | 2788 | fprintf (sched_dump, |
2789 | ";; --------------- forward dependences: ------------ \n"); | |
2790 | ||
2791 | for (bb = from_bb; bb < current_nr_blocks; bb++) | |
7a31a7bd | 2792 | { |
67900a4f | 2793 | rtx head, tail; |
67900a4f | 2794 | |
6a1cdb4d | 2795 | get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail); |
67900a4f | 2796 | fprintf (sched_dump, "\n;; --- Region Dependences --- b %d bb %d \n", |
2797 | BB_TO_BLOCK (bb), bb); | |
2798 | ||
a2819fc2 | 2799 | debug_dependencies (head, tail); |
2800 | } | |
2801 | } | |
67900a4f | 2802 | |
a2819fc2 | 2803 | /* Print dependencies information for instructions between HEAD and TAIL. |
2804 | ??? This function would probably fit best in haifa-sched.c. */ | |
2805 | void debug_dependencies (rtx head, rtx tail) | |
2806 | { | |
2807 | rtx insn; | |
2808 | rtx next_tail = NEXT_INSN (tail); | |
2809 | ||
2810 | fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n", | |
2811 | "insn", "code", "bb", "dep", "prio", "cost", | |
2812 | "reservation"); | |
2813 | fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n", | |
2814 | "----", "----", "--", "---", "----", "----", | |
2815 | "-----------"); | |
7a31a7bd | 2816 | |
a2819fc2 | 2817 | for (insn = head; insn != next_tail; insn = NEXT_INSN (insn)) |
2818 | { | |
a2819fc2 | 2819 | if (! INSN_P (insn)) |
2820 | { | |
2821 | int n; | |
2822 | fprintf (sched_dump, ";; %6d ", INSN_UID (insn)); | |
2823 | if (NOTE_P (insn)) | |
7a31a7bd | 2824 | { |
ad4583d9 | 2825 | n = NOTE_KIND (insn); |
2826 | fprintf (sched_dump, "%s\n", GET_NOTE_INSN_NAME (n)); | |
7a31a7bd | 2827 | } |
67900a4f | 2828 | else |
a2819fc2 | 2829 | fprintf (sched_dump, " {%s}\n", GET_RTX_NAME (GET_CODE (insn))); |
2830 | continue; | |
7a31a7bd | 2831 | } |
a2819fc2 | 2832 | |
2833 | fprintf (sched_dump, | |
2834 | ";; %s%5d%6d%6d%6d%6d%6d ", | |
2835 | (SCHED_GROUP_P (insn) ? "+" : " "), | |
2836 | INSN_UID (insn), | |
2837 | INSN_CODE (insn), | |
2838 | BLOCK_NUM (insn), | |
e1ab7874 | 2839 | sched_emulate_haifa_p ? -1 : sd_lists_size (insn, SD_LIST_BACK), |
2840 | (sel_sched_p () ? (sched_emulate_haifa_p ? -1 | |
2841 | : INSN_PRIORITY (insn)) | |
2842 | : INSN_PRIORITY (insn)), | |
2843 | (sel_sched_p () ? (sched_emulate_haifa_p ? -1 | |
2844 | : insn_cost (insn)) | |
2845 | : insn_cost (insn))); | |
a2819fc2 | 2846 | |
2847 | if (recog_memoized (insn) < 0) | |
2848 | fprintf (sched_dump, "nothing"); | |
2849 | else | |
2850 | print_reservation (sched_dump, insn); | |
2851 | ||
2852 | fprintf (sched_dump, "\t: "); | |
93f6b030 | 2853 | { |
2854 | sd_iterator_def sd_it; | |
2855 | dep_t dep; | |
2856 | ||
2857 | FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep) | |
d452a169 | 2858 | fprintf (sched_dump, "%d%s%s ", INSN_UID (DEP_CON (dep)), |
2859 | DEP_NONREG (dep) ? "n" : "", | |
2860 | DEP_MULTIPLE (dep) ? "m" : ""); | |
93f6b030 | 2861 | } |
a2819fc2 | 2862 | fprintf (sched_dump, "\n"); |
7a31a7bd | 2863 | } |
a2819fc2 | 2864 | |
7a31a7bd | 2865 | fprintf (sched_dump, "\n"); |
2866 | } | |
2867 | \f | |
f045d41d | 2868 | /* Returns true if all the basic blocks of the current region have |
2869 | NOTE_DISABLE_SCHED_OF_BLOCK which means not to schedule that region. */ | |
e1ab7874 | 2870 | bool |
f045d41d | 2871 | sched_is_disabled_for_current_region_p (void) |
2872 | { | |
f045d41d | 2873 | int bb; |
2874 | ||
2875 | for (bb = 0; bb < current_nr_blocks; bb++) | |
f5a6b05f | 2876 | if (!(BASIC_BLOCK_FOR_FN (cfun, |
2877 | BB_TO_BLOCK (bb))->flags & BB_DISABLE_SCHEDULE)) | |
7562ed74 | 2878 | return false; |
f045d41d | 2879 | |
2880 | return true; | |
2881 | } | |
2882 | ||
48e1416a | 2883 | /* Free all region dependencies saved in INSN_BACK_DEPS and |
e1ab7874 | 2884 | INSN_RESOLVED_BACK_DEPS. The Haifa scheduler does this on the fly |
48e1416a | 2885 | when scheduling, so this function is supposed to be called from |
e1ab7874 | 2886 | the selective scheduling only. */ |
2887 | void | |
2888 | free_rgn_deps (void) | |
7a31a7bd | 2889 | { |
2890 | int bb; | |
f045d41d | 2891 | |
e1ab7874 | 2892 | for (bb = 0; bb < current_nr_blocks; bb++) |
6a1cdb4d | 2893 | { |
e1ab7874 | 2894 | rtx head, tail; |
48e1416a | 2895 | |
e1ab7874 | 2896 | gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb)); |
2897 | get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail); | |
7a31a7bd | 2898 | |
e1ab7874 | 2899 | sched_free_deps (head, tail, false); |
2900 | } | |
2901 | } | |
7a31a7bd | 2902 | |
e1ab7874 | 2903 | static int rgn_n_insns; |
6a1cdb4d | 2904 | |
e1ab7874 | 2905 | /* Compute insn priority for a current region. */ |
2906 | void | |
48e1416a | 2907 | compute_priorities (void) |
e1ab7874 | 2908 | { |
2909 | int bb; | |
6a1cdb4d | 2910 | |
e4897000 | 2911 | current_sched_info->sched_max_insns_priority = 0; |
7a31a7bd | 2912 | for (bb = 0; bb < current_nr_blocks; bb++) |
2295df67 | 2913 | { |
2914 | rtx head, tail; | |
48e1416a | 2915 | |
6a1cdb4d | 2916 | gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb)); |
2917 | get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail); | |
2295df67 | 2918 | |
9845d120 | 2919 | if (no_real_insns_p (head, tail)) |
2920 | continue; | |
2921 | ||
2295df67 | 2922 | rgn_n_insns += set_priorities (head, tail); |
2923 | } | |
e4897000 | 2924 | current_sched_info->sched_max_insns_priority++; |
e1ab7874 | 2925 | } |
7a31a7bd | 2926 | |
bbd0cfb1 | 2927 | /* (Re-)initialize the arrays of DFA states at the end of each basic block. |
2928 | ||
2929 | SAVED_LAST_BASIC_BLOCK is the previous length of the arrays. It must be | |
2930 | zero for the first call to this function, to allocate the arrays for the | |
2931 | first time. | |
2932 | ||
2933 | This function is called once during initialization of the scheduler, and | |
2934 | called again to resize the arrays if new basic blocks have been created, | |
2935 | for example for speculation recovery code. */ | |
2936 | ||
2937 | static void | |
2938 | realloc_bb_state_array (int saved_last_basic_block) | |
2939 | { | |
2940 | char *old_bb_state_array = bb_state_array; | |
fe672ac0 | 2941 | size_t lbb = (size_t) last_basic_block_for_fn (cfun); |
bbd0cfb1 | 2942 | size_t slbb = (size_t) saved_last_basic_block; |
2943 | ||
2944 | /* Nothing to do if nothing changed since the last time this was called. */ | |
fe672ac0 | 2945 | if (saved_last_basic_block == last_basic_block_for_fn (cfun)) |
bbd0cfb1 | 2946 | return; |
2947 | ||
2948 | /* The selective scheduler doesn't use the state arrays. */ | |
2949 | if (sel_sched_p ()) | |
2950 | { | |
2951 | gcc_assert (bb_state_array == NULL && bb_state == NULL); | |
2952 | return; | |
2953 | } | |
2954 | ||
2955 | gcc_checking_assert (saved_last_basic_block == 0 | |
2956 | || (bb_state_array != NULL && bb_state != NULL)); | |
2957 | ||
2958 | bb_state_array = XRESIZEVEC (char, bb_state_array, lbb * dfa_state_size); | |
2959 | bb_state = XRESIZEVEC (state_t, bb_state, lbb); | |
2960 | ||
2961 | /* If BB_STATE_ARRAY has moved, fixup all the state pointers array. | |
2962 | Otherwise only fixup the newly allocated ones. For the state | |
2963 | array itself, only initialize the new entries. */ | |
2964 | bool bb_state_array_moved = (bb_state_array != old_bb_state_array); | |
2965 | for (size_t i = bb_state_array_moved ? 0 : slbb; i < lbb; i++) | |
2966 | bb_state[i] = (state_t) (bb_state_array + i * dfa_state_size); | |
2967 | for (size_t i = slbb; i < lbb; i++) | |
2968 | state_reset (bb_state[i]); | |
2969 | } | |
2970 | ||
2971 | /* Free the arrays of DFA states at the end of each basic block. */ | |
2972 | ||
2973 | static void | |
2974 | free_bb_state_array (void) | |
2975 | { | |
2976 | free (bb_state_array); | |
2977 | free (bb_state); | |
2978 | bb_state_array = NULL; | |
2979 | bb_state = NULL; | |
2980 | } | |
2981 | ||
e1ab7874 | 2982 | /* Schedule a region. A region is either an inner loop, a loop-free |
2983 | subroutine, or a single basic block. Each bb in the region is | |
2984 | scheduled after its flow predecessors. */ | |
7a31a7bd | 2985 | |
e1ab7874 | 2986 | static void |
2987 | schedule_region (int rgn) | |
2988 | { | |
2989 | int bb; | |
2990 | int sched_rgn_n_insns = 0; | |
aae97b21 | 2991 | |
e1ab7874 | 2992 | rgn_n_insns = 0; |
7a31a7bd | 2993 | |
e1ab7874 | 2994 | rgn_setup_region (rgn); |
7a31a7bd | 2995 | |
e1ab7874 | 2996 | /* Don't schedule region that is marked by |
2997 | NOTE_DISABLE_SCHED_OF_BLOCK. */ | |
2998 | if (sched_is_disabled_for_current_region_p ()) | |
2999 | return; | |
7a31a7bd | 3000 | |
e1ab7874 | 3001 | sched_rgn_compute_dependencies (rgn); |
6a1cdb4d | 3002 | |
e1ab7874 | 3003 | sched_rgn_local_init (rgn); |
3004 | ||
3005 | /* Set priorities. */ | |
3006 | compute_priorities (); | |
3007 | ||
3008 | sched_extend_ready_list (rgn_n_insns); | |
7a31a7bd | 3009 | |
11189c7a | 3010 | if (sched_pressure == SCHED_PRESSURE_WEIGHTED) |
a7dcf969 | 3011 | { |
3012 | sched_init_region_reg_pressure_info (); | |
3013 | for (bb = 0; bb < current_nr_blocks; bb++) | |
3014 | { | |
3015 | basic_block first_bb, last_bb; | |
3016 | rtx head, tail; | |
48e1416a | 3017 | |
a7dcf969 | 3018 | first_bb = EBB_FIRST_BB (bb); |
3019 | last_bb = EBB_LAST_BB (bb); | |
48e1416a | 3020 | |
a7dcf969 | 3021 | get_ebb_head_tail (first_bb, last_bb, &head, &tail); |
48e1416a | 3022 | |
a7dcf969 | 3023 | if (no_real_insns_p (head, tail)) |
3024 | { | |
3025 | gcc_assert (first_bb == last_bb); | |
3026 | continue; | |
3027 | } | |
3028 | sched_setup_bb_reg_pressure_info (first_bb, PREV_INSN (head)); | |
3029 | } | |
3030 | } | |
3031 | ||
7a31a7bd | 3032 | /* Now we can schedule all blocks. */ |
3033 | for (bb = 0; bb < current_nr_blocks; bb++) | |
3034 | { | |
6a1cdb4d | 3035 | basic_block first_bb, last_bb, curr_bb; |
7a31a7bd | 3036 | rtx head, tail; |
7a31a7bd | 3037 | |
6a1cdb4d | 3038 | first_bb = EBB_FIRST_BB (bb); |
3039 | last_bb = EBB_LAST_BB (bb); | |
3040 | ||
3041 | get_ebb_head_tail (first_bb, last_bb, &head, &tail); | |
7a31a7bd | 3042 | |
3043 | if (no_real_insns_p (head, tail)) | |
6a1cdb4d | 3044 | { |
3045 | gcc_assert (first_bb == last_bb); | |
3046 | continue; | |
3047 | } | |
7a31a7bd | 3048 | |
3049 | current_sched_info->prev_head = PREV_INSN (head); | |
3050 | current_sched_info->next_tail = NEXT_INSN (tail); | |
3051 | ||
e1ab7874 | 3052 | remove_notes (head, tail); |
7a31a7bd | 3053 | |
6a1cdb4d | 3054 | unlink_bb_notes (first_bb, last_bb); |
3055 | ||
7a31a7bd | 3056 | target_bb = bb; |
3057 | ||
e4897000 | 3058 | gcc_assert (flag_schedule_interblock || current_nr_blocks == 1); |
3059 | current_sched_info->queue_must_finish_empty = current_nr_blocks == 1; | |
7a31a7bd | 3060 | |
6a1cdb4d | 3061 | curr_bb = first_bb; |
3072d30e | 3062 | if (dbg_cnt (sched_block)) |
3063 | { | |
0a15667c | 3064 | edge f; |
fe672ac0 | 3065 | int saved_last_basic_block = last_basic_block_for_fn (cfun); |
0a15667c | 3066 | |
bbd0cfb1 | 3067 | schedule_block (&curr_bb, bb_state[first_bb->index]); |
3068 | gcc_assert (EBB_FIRST_BB (bb) == first_bb); | |
3069 | sched_rgn_n_insns += sched_n_insns; | |
3070 | realloc_bb_state_array (saved_last_basic_block); | |
0a15667c | 3071 | f = find_fallthru_edge (last_bb->succs); |
3072 | if (f && f->probability * 100 / REG_BR_PROB_BASE >= | |
3073 | PARAM_VALUE (PARAM_SCHED_STATE_EDGE_PROB_CUTOFF)) | |
3074 | { | |
3075 | memcpy (bb_state[f->dest->index], curr_state, | |
3076 | dfa_state_size); | |
3077 | if (sched_verbose >= 5) | |
3078 | fprintf (sched_dump, "saving state for edge %d->%d\n", | |
3079 | f->src->index, f->dest->index); | |
3080 | } | |
3072d30e | 3081 | } |
3082 | else | |
3083 | { | |
3084 | sched_rgn_n_insns += rgn_n_insns; | |
3085 | } | |
7a31a7bd | 3086 | |
7a31a7bd | 3087 | /* Clean up. */ |
3088 | if (current_nr_blocks > 1) | |
e1ab7874 | 3089 | free_trg_info (); |
7a31a7bd | 3090 | } |
3091 | ||
3092 | /* Sanity check: verify that all region insns were scheduled. */ | |
04e579b6 | 3093 | gcc_assert (sched_rgn_n_insns == rgn_n_insns); |
7a31a7bd | 3094 | |
e1ab7874 | 3095 | sched_finish_ready_list (); |
7a31a7bd | 3096 | |
e1ab7874 | 3097 | /* Done with this region. */ |
3098 | sched_rgn_local_finish (); | |
93f6b030 | 3099 | |
3100 | /* Free dependencies. */ | |
3101 | for (bb = 0; bb < current_nr_blocks; ++bb) | |
3102 | free_block_dependencies (bb); | |
3103 | ||
3104 | gcc_assert (haifa_recovery_bb_ever_added_p | |
3105 | || deps_pools_are_empty_p ()); | |
7a31a7bd | 3106 | } |
3107 | ||
7a31a7bd | 3108 | /* Initialize data structures for region scheduling. */ |
3109 | ||
e1ab7874 | 3110 | void |
3111 | sched_rgn_init (bool single_blocks_p) | |
7a31a7bd | 3112 | { |
e1ab7874 | 3113 | min_spec_prob = ((PARAM_VALUE (PARAM_MIN_SPEC_PROB) * REG_BR_PROB_BASE) |
3114 | / 100); | |
3115 | ||
3116 | nr_inter = 0; | |
3117 | nr_spec = 0; | |
3118 | ||
6a1cdb4d | 3119 | extend_regions (); |
7a31a7bd | 3120 | |
e1ab7874 | 3121 | CONTAINING_RGN (ENTRY_BLOCK) = -1; |
3122 | CONTAINING_RGN (EXIT_BLOCK) = -1; | |
3123 | ||
bbd0cfb1 | 3124 | realloc_bb_state_array (0); |
0a15667c | 3125 | |
7a31a7bd | 3126 | /* Compute regions for scheduling. */ |
e1ab7874 | 3127 | if (single_blocks_p |
a28770e1 | 3128 | || n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS + 1 |
aae97b21 | 3129 | || !flag_schedule_interblock |
3130 | || is_cfg_nonregular ()) | |
7a31a7bd | 3131 | { |
e1ab7874 | 3132 | find_single_block_region (sel_sched_p ()); |
7a31a7bd | 3133 | } |
3134 | else | |
3135 | { | |
aae97b21 | 3136 | /* Compute the dominators and post dominators. */ |
e1ab7874 | 3137 | if (!sel_sched_p ()) |
3138 | calculate_dominance_info (CDI_DOMINATORS); | |
7a31a7bd | 3139 | |
aae97b21 | 3140 | /* Find regions. */ |
3141 | find_rgns (); | |
7a31a7bd | 3142 | |
aae97b21 | 3143 | if (sched_verbose >= 3) |
3144 | debug_regions (); | |
7a31a7bd | 3145 | |
aae97b21 | 3146 | /* For now. This will move as more and more of haifa is converted |
3072d30e | 3147 | to using the cfg code. */ |
e1ab7874 | 3148 | if (!sel_sched_p ()) |
3149 | free_dominance_info (CDI_DOMINATORS); | |
7a31a7bd | 3150 | } |
7a31a7bd | 3151 | |
a28770e1 | 3152 | gcc_assert (0 < nr_regions && nr_regions <= n_basic_blocks_for_fn (cfun)); |
7a31a7bd | 3153 | |
e1ab7874 | 3154 | RGN_BLOCKS (nr_regions) = (RGN_BLOCKS (nr_regions - 1) + |
3155 | RGN_NR_BLOCKS (nr_regions - 1)); | |
3156 | } | |
3157 | ||
3158 | /* Free data structures for region scheduling. */ | |
7a31a7bd | 3159 | void |
e1ab7874 | 3160 | sched_rgn_finish (void) |
7a31a7bd | 3161 | { |
bbd0cfb1 | 3162 | free_bb_state_array (); |
0a15667c | 3163 | |
7a31a7bd | 3164 | /* Reposition the prologue and epilogue notes in case we moved the |
3165 | prologue/epilogue insns. */ | |
3166 | if (reload_completed) | |
3072d30e | 3167 | reposition_prologue_and_epilogue_notes (); |
7a31a7bd | 3168 | |
7a31a7bd | 3169 | if (sched_verbose) |
3170 | { | |
e1ab7874 | 3171 | if (reload_completed == 0 |
3172 | && flag_schedule_interblock) | |
7a31a7bd | 3173 | { |
3174 | fprintf (sched_dump, | |
3175 | "\n;; Procedure interblock/speculative motions == %d/%d \n", | |
3176 | nr_inter, nr_spec); | |
3177 | } | |
3178 | else | |
04e579b6 | 3179 | gcc_assert (nr_inter <= 0); |
7a31a7bd | 3180 | fprintf (sched_dump, "\n\n"); |
3181 | } | |
3182 | ||
e1ab7874 | 3183 | nr_regions = 0; |
3184 | ||
7a31a7bd | 3185 | free (rgn_table); |
e1ab7874 | 3186 | rgn_table = NULL; |
3187 | ||
7a31a7bd | 3188 | free (rgn_bb_table); |
e1ab7874 | 3189 | rgn_bb_table = NULL; |
3190 | ||
7a31a7bd | 3191 | free (block_to_bb); |
e1ab7874 | 3192 | block_to_bb = NULL; |
3193 | ||
7a31a7bd | 3194 | free (containing_rgn); |
e1ab7874 | 3195 | containing_rgn = NULL; |
3196 | ||
3197 | free (ebb_head); | |
3198 | ebb_head = NULL; | |
3199 | } | |
3200 | ||
3201 | /* Setup global variables like CURRENT_BLOCKS and CURRENT_NR_BLOCK to | |
3202 | point to the region RGN. */ | |
3203 | void | |
3204 | rgn_setup_region (int rgn) | |
3205 | { | |
3206 | int bb; | |
3207 | ||
3208 | /* Set variables for the current region. */ | |
3209 | current_nr_blocks = RGN_NR_BLOCKS (rgn); | |
3210 | current_blocks = RGN_BLOCKS (rgn); | |
48e1416a | 3211 | |
e1ab7874 | 3212 | /* EBB_HEAD is a region-scope structure. But we realloc it for |
3213 | each region to save time/memory/something else. | |
3214 | See comments in add_block1, for what reasons we allocate +1 element. */ | |
3215 | ebb_head = XRESIZEVEC (int, ebb_head, current_nr_blocks + 1); | |
3216 | for (bb = 0; bb <= current_nr_blocks; bb++) | |
3217 | ebb_head[bb] = current_blocks + bb; | |
3218 | } | |
3219 | ||
3220 | /* Compute instruction dependencies in region RGN. */ | |
3221 | void | |
3222 | sched_rgn_compute_dependencies (int rgn) | |
3223 | { | |
3224 | if (!RGN_DONT_CALC_DEPS (rgn)) | |
3225 | { | |
3226 | int bb; | |
3227 | ||
3228 | if (sel_sched_p ()) | |
3229 | sched_emulate_haifa_p = 1; | |
3230 | ||
3231 | init_deps_global (); | |
3232 | ||
3233 | /* Initializations for region data dependence analysis. */ | |
68e419a1 | 3234 | bb_deps = XNEWVEC (struct deps_desc, current_nr_blocks); |
e1ab7874 | 3235 | for (bb = 0; bb < current_nr_blocks; bb++) |
d9ab2038 | 3236 | init_deps (bb_deps + bb, false); |
e1ab7874 | 3237 | |
566e7db2 | 3238 | /* Initialize bitmap used in add_branch_dependences. */ |
3239 | insn_referenced = sbitmap_alloc (sched_max_luid); | |
53c5d9d4 | 3240 | bitmap_clear (insn_referenced); |
48e1416a | 3241 | |
e1ab7874 | 3242 | /* Compute backward dependencies. */ |
3243 | for (bb = 0; bb < current_nr_blocks; bb++) | |
3244 | compute_block_dependences (bb); | |
48e1416a | 3245 | |
566e7db2 | 3246 | sbitmap_free (insn_referenced); |
e1ab7874 | 3247 | free_pending_lists (); |
3248 | finish_deps_global (); | |
3249 | free (bb_deps); | |
7a31a7bd | 3250 | |
e1ab7874 | 3251 | /* We don't want to recalculate this twice. */ |
3252 | RGN_DONT_CALC_DEPS (rgn) = 1; | |
3072d30e | 3253 | |
e1ab7874 | 3254 | if (sel_sched_p ()) |
3255 | sched_emulate_haifa_p = 0; | |
3256 | } | |
3257 | else | |
3258 | /* (This is a recovery block. It is always a single block region.) | |
3259 | OR (We use selective scheduling.) */ | |
3260 | gcc_assert (current_nr_blocks == 1 || sel_sched_p ()); | |
3261 | } | |
3262 | ||
3263 | /* Init region data structures. Returns true if this region should | |
3264 | not be scheduled. */ | |
3265 | void | |
3266 | sched_rgn_local_init (int rgn) | |
3267 | { | |
3268 | int bb; | |
48e1416a | 3269 | |
e1ab7874 | 3270 | /* Compute interblock info: probabilities, split-edges, dominators, etc. */ |
3271 | if (current_nr_blocks > 1) | |
3272 | { | |
3273 | basic_block block; | |
3274 | edge e; | |
3275 | edge_iterator ei; | |
3276 | ||
3277 | prob = XNEWVEC (int, current_nr_blocks); | |
3278 | ||
3279 | dom = sbitmap_vector_alloc (current_nr_blocks, current_nr_blocks); | |
53c5d9d4 | 3280 | bitmap_vector_clear (dom, current_nr_blocks); |
e1ab7874 | 3281 | |
3282 | /* Use ->aux to implement EDGE_TO_BIT mapping. */ | |
3283 | rgn_nr_edges = 0; | |
fc00614f | 3284 | FOR_EACH_BB_FN (block, cfun) |
e1ab7874 | 3285 | { |
3286 | if (CONTAINING_RGN (block->index) != rgn) | |
3287 | continue; | |
3288 | FOR_EACH_EDGE (e, ei, block->succs) | |
3289 | SET_EDGE_TO_BIT (e, rgn_nr_edges++); | |
3290 | } | |
3291 | ||
3292 | rgn_edges = XNEWVEC (edge, rgn_nr_edges); | |
3293 | rgn_nr_edges = 0; | |
fc00614f | 3294 | FOR_EACH_BB_FN (block, cfun) |
e1ab7874 | 3295 | { |
3296 | if (CONTAINING_RGN (block->index) != rgn) | |
3297 | continue; | |
3298 | FOR_EACH_EDGE (e, ei, block->succs) | |
3299 | rgn_edges[rgn_nr_edges++] = e; | |
3300 | } | |
3301 | ||
3302 | /* Split edges. */ | |
3303 | pot_split = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges); | |
53c5d9d4 | 3304 | bitmap_vector_clear (pot_split, current_nr_blocks); |
e1ab7874 | 3305 | ancestor_edges = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges); |
53c5d9d4 | 3306 | bitmap_vector_clear (ancestor_edges, current_nr_blocks); |
e1ab7874 | 3307 | |
3308 | /* Compute probabilities, dominators, split_edges. */ | |
3309 | for (bb = 0; bb < current_nr_blocks; bb++) | |
3310 | compute_dom_prob_ps (bb); | |
3311 | ||
3312 | /* Cleanup ->aux used for EDGE_TO_BIT mapping. */ | |
3313 | /* We don't need them anymore. But we want to avoid duplication of | |
3314 | aux fields in the newly created edges. */ | |
fc00614f | 3315 | FOR_EACH_BB_FN (block, cfun) |
e1ab7874 | 3316 | { |
3317 | if (CONTAINING_RGN (block->index) != rgn) | |
3318 | continue; | |
3319 | FOR_EACH_EDGE (e, ei, block->succs) | |
3320 | e->aux = NULL; | |
3321 | } | |
3322 | } | |
3323 | } | |
3324 | ||
3325 | /* Free data computed for the finished region. */ | |
48e1416a | 3326 | void |
e1ab7874 | 3327 | sched_rgn_local_free (void) |
3328 | { | |
3329 | free (prob); | |
3330 | sbitmap_vector_free (dom); | |
3331 | sbitmap_vector_free (pot_split); | |
3332 | sbitmap_vector_free (ancestor_edges); | |
3333 | free (rgn_edges); | |
3334 | } | |
3335 | ||
3336 | /* Free data computed for the finished region. */ | |
3337 | void | |
3338 | sched_rgn_local_finish (void) | |
3339 | { | |
3340 | if (current_nr_blocks > 1 && !sel_sched_p ()) | |
3341 | { | |
3342 | sched_rgn_local_free (); | |
3343 | } | |
3344 | } | |
3345 | ||
3346 | /* Setup scheduler infos. */ | |
3347 | void | |
3348 | rgn_setup_common_sched_info (void) | |
3349 | { | |
3350 | memcpy (&rgn_common_sched_info, &haifa_common_sched_info, | |
3351 | sizeof (rgn_common_sched_info)); | |
3352 | ||
3353 | rgn_common_sched_info.fix_recovery_cfg = rgn_fix_recovery_cfg; | |
3354 | rgn_common_sched_info.add_block = rgn_add_block; | |
3355 | rgn_common_sched_info.estimate_number_of_insns | |
3356 | = rgn_estimate_number_of_insns; | |
3357 | rgn_common_sched_info.sched_pass_id = SCHED_RGN_PASS; | |
3358 | ||
3359 | common_sched_info = &rgn_common_sched_info; | |
3360 | } | |
3361 | ||
3362 | /* Setup all *_sched_info structures (for the Haifa frontend | |
3363 | and for the dependence analysis) in the interblock scheduler. */ | |
3364 | void | |
3365 | rgn_setup_sched_infos (void) | |
3366 | { | |
3367 | if (!sel_sched_p ()) | |
3368 | memcpy (&rgn_sched_deps_info, &rgn_const_sched_deps_info, | |
3369 | sizeof (rgn_sched_deps_info)); | |
3370 | else | |
3371 | memcpy (&rgn_sched_deps_info, &rgn_const_sel_sched_deps_info, | |
3372 | sizeof (rgn_sched_deps_info)); | |
3373 | ||
3374 | sched_deps_info = &rgn_sched_deps_info; | |
3375 | ||
3376 | memcpy (&rgn_sched_info, &rgn_const_sched_info, sizeof (rgn_sched_info)); | |
3377 | current_sched_info = &rgn_sched_info; | |
3378 | } | |
3379 | ||
3380 | /* The one entry point in this file. */ | |
3381 | void | |
3382 | schedule_insns (void) | |
3383 | { | |
3384 | int rgn; | |
3385 | ||
3386 | /* Taking care of this degenerate case makes the rest of | |
3387 | this code simpler. */ | |
a28770e1 | 3388 | if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS) |
e1ab7874 | 3389 | return; |
3390 | ||
3391 | rgn_setup_common_sched_info (); | |
3392 | rgn_setup_sched_infos (); | |
3393 | ||
3394 | haifa_sched_init (); | |
3395 | sched_rgn_init (reload_completed); | |
3396 | ||
3397 | bitmap_initialize (¬_in_df, 0); | |
3072d30e | 3398 | bitmap_clear (¬_in_df); |
7a31a7bd | 3399 | |
e1ab7874 | 3400 | /* Schedule every region in the subroutine. */ |
3401 | for (rgn = 0; rgn < nr_regions; rgn++) | |
3402 | if (dbg_cnt (sched_region)) | |
3403 | schedule_region (rgn); | |
3404 | ||
3405 | /* Clean up. */ | |
3406 | sched_rgn_finish (); | |
3407 | bitmap_clear (¬_in_df); | |
3408 | ||
3409 | haifa_sched_finish (); | |
7a31a7bd | 3410 | } |
6a1cdb4d | 3411 | |
3412 | /* INSN has been added to/removed from current region. */ | |
3413 | static void | |
e1ab7874 | 3414 | rgn_add_remove_insn (rtx insn, int remove_p) |
6a1cdb4d | 3415 | { |
3416 | if (!remove_p) | |
3417 | rgn_n_insns++; | |
3418 | else | |
3419 | rgn_n_insns--; | |
3420 | ||
3421 | if (INSN_BB (insn) == target_bb) | |
3422 | { | |
3423 | if (!remove_p) | |
3424 | target_n_insns++; | |
3425 | else | |
3426 | target_n_insns--; | |
3427 | } | |
3428 | } | |
3429 | ||
3430 | /* Extend internal data structures. */ | |
e1ab7874 | 3431 | void |
6a1cdb4d | 3432 | extend_regions (void) |
3433 | { | |
a28770e1 | 3434 | rgn_table = XRESIZEVEC (region, rgn_table, n_basic_blocks_for_fn (cfun)); |
fe672ac0 | 3435 | rgn_bb_table = XRESIZEVEC (int, rgn_bb_table, |
3436 | n_basic_blocks_for_fn (cfun)); | |
3437 | block_to_bb = XRESIZEVEC (int, block_to_bb, | |
3438 | last_basic_block_for_fn (cfun)); | |
3439 | containing_rgn = XRESIZEVEC (int, containing_rgn, | |
3440 | last_basic_block_for_fn (cfun)); | |
6a1cdb4d | 3441 | } |
3442 | ||
e1ab7874 | 3443 | void |
3444 | rgn_make_new_region_out_of_new_block (basic_block bb) | |
3445 | { | |
3446 | int i; | |
3447 | ||
3448 | i = RGN_BLOCKS (nr_regions); | |
3449 | /* I - first free position in rgn_bb_table. */ | |
3450 | ||
3451 | rgn_bb_table[i] = bb->index; | |
3452 | RGN_NR_BLOCKS (nr_regions) = 1; | |
3453 | RGN_HAS_REAL_EBB (nr_regions) = 0; | |
3454 | RGN_DONT_CALC_DEPS (nr_regions) = 0; | |
3455 | CONTAINING_RGN (bb->index) = nr_regions; | |
3456 | BLOCK_TO_BB (bb->index) = 0; | |
3457 | ||
3458 | nr_regions++; | |
48e1416a | 3459 | |
e1ab7874 | 3460 | RGN_BLOCKS (nr_regions) = i + 1; |
3461 | } | |
3462 | ||
6a1cdb4d | 3463 | /* BB was added to ebb after AFTER. */ |
3464 | static void | |
e1ab7874 | 3465 | rgn_add_block (basic_block bb, basic_block after) |
6a1cdb4d | 3466 | { |
3467 | extend_regions (); | |
3072d30e | 3468 | bitmap_set_bit (¬_in_df, bb->index); |
3469 | ||
34154e27 | 3470 | if (after == 0 || after == EXIT_BLOCK_PTR_FOR_FN (cfun)) |
6a1cdb4d | 3471 | { |
e1ab7874 | 3472 | rgn_make_new_region_out_of_new_block (bb); |
34154e27 | 3473 | RGN_DONT_CALC_DEPS (nr_regions - 1) = (after |
3474 | == EXIT_BLOCK_PTR_FOR_FN (cfun)); | |
6a1cdb4d | 3475 | } |
3476 | else | |
48e1416a | 3477 | { |
6a1cdb4d | 3478 | int i, pos; |
3479 | ||
3480 | /* We need to fix rgn_table, block_to_bb, containing_rgn | |
3481 | and ebb_head. */ | |
3482 | ||
3483 | BLOCK_TO_BB (bb->index) = BLOCK_TO_BB (after->index); | |
3484 | ||
3485 | /* We extend ebb_head to one more position to | |
48e1416a | 3486 | easily find the last position of the last ebb in |
6a1cdb4d | 3487 | the current region. Thus, ebb_head[BLOCK_TO_BB (after) + 1] |
3488 | is _always_ valid for access. */ | |
3489 | ||
3490 | i = BLOCK_TO_BB (after->index) + 1; | |
90bf0a00 | 3491 | pos = ebb_head[i] - 1; |
3492 | /* Now POS is the index of the last block in the region. */ | |
3493 | ||
3494 | /* Find index of basic block AFTER. */ | |
3c802a1e | 3495 | for (; rgn_bb_table[pos] != after->index; pos--) |
3496 | ; | |
90bf0a00 | 3497 | |
6a1cdb4d | 3498 | pos++; |
3499 | gcc_assert (pos > ebb_head[i - 1]); | |
90bf0a00 | 3500 | |
6a1cdb4d | 3501 | /* i - ebb right after "AFTER". */ |
3502 | /* ebb_head[i] - VALID. */ | |
3503 | ||
3504 | /* Source position: ebb_head[i] | |
9ca2c29a | 3505 | Destination position: ebb_head[i] + 1 |
48e1416a | 3506 | Last position: |
6a1cdb4d | 3507 | RGN_BLOCKS (nr_regions) - 1 |
3508 | Number of elements to copy: (last_position) - (source_position) + 1 | |
3509 | */ | |
48e1416a | 3510 | |
6a1cdb4d | 3511 | memmove (rgn_bb_table + pos + 1, |
3512 | rgn_bb_table + pos, | |
3513 | ((RGN_BLOCKS (nr_regions) - 1) - (pos) + 1) | |
3514 | * sizeof (*rgn_bb_table)); | |
3515 | ||
3516 | rgn_bb_table[pos] = bb->index; | |
48e1416a | 3517 | |
6a1cdb4d | 3518 | for (; i <= current_nr_blocks; i++) |
3519 | ebb_head [i]++; | |
3520 | ||
3521 | i = CONTAINING_RGN (after->index); | |
3522 | CONTAINING_RGN (bb->index) = i; | |
48e1416a | 3523 | |
6a1cdb4d | 3524 | RGN_HAS_REAL_EBB (i) = 1; |
3525 | ||
3526 | for (++i; i <= nr_regions; i++) | |
3527 | RGN_BLOCKS (i)++; | |
6a1cdb4d | 3528 | } |
3529 | } | |
3530 | ||
3531 | /* Fix internal data after interblock movement of jump instruction. | |
3532 | For parameter meaning please refer to | |
3533 | sched-int.h: struct sched_info: fix_recovery_cfg. */ | |
3534 | static void | |
e1ab7874 | 3535 | rgn_fix_recovery_cfg (int bbi, int check_bbi, int check_bb_nexti) |
6a1cdb4d | 3536 | { |
3537 | int old_pos, new_pos, i; | |
3538 | ||
3539 | BLOCK_TO_BB (check_bb_nexti) = BLOCK_TO_BB (bbi); | |
48e1416a | 3540 | |
6a1cdb4d | 3541 | for (old_pos = ebb_head[BLOCK_TO_BB (check_bbi) + 1] - 1; |
3542 | rgn_bb_table[old_pos] != check_bb_nexti; | |
3c802a1e | 3543 | old_pos--) |
3544 | ; | |
6a1cdb4d | 3545 | gcc_assert (old_pos > ebb_head[BLOCK_TO_BB (check_bbi)]); |
3546 | ||
3547 | for (new_pos = ebb_head[BLOCK_TO_BB (bbi) + 1] - 1; | |
3548 | rgn_bb_table[new_pos] != bbi; | |
3c802a1e | 3549 | new_pos--) |
3550 | ; | |
6a1cdb4d | 3551 | new_pos++; |
3552 | gcc_assert (new_pos > ebb_head[BLOCK_TO_BB (bbi)]); | |
48e1416a | 3553 | |
6a1cdb4d | 3554 | gcc_assert (new_pos < old_pos); |
3555 | ||
3556 | memmove (rgn_bb_table + new_pos + 1, | |
3557 | rgn_bb_table + new_pos, | |
3558 | (old_pos - new_pos) * sizeof (*rgn_bb_table)); | |
3559 | ||
3560 | rgn_bb_table[new_pos] = check_bb_nexti; | |
3561 | ||
3562 | for (i = BLOCK_TO_BB (bbi) + 1; i <= BLOCK_TO_BB (check_bbi); i++) | |
3563 | ebb_head[i]++; | |
3564 | } | |
3565 | ||
3566 | /* Return next block in ebb chain. For parameter meaning please refer to | |
3567 | sched-int.h: struct sched_info: advance_target_bb. */ | |
3568 | static basic_block | |
3569 | advance_target_bb (basic_block bb, rtx insn) | |
3570 | { | |
3571 | if (insn) | |
3572 | return 0; | |
3573 | ||
3574 | gcc_assert (BLOCK_TO_BB (bb->index) == target_bb | |
3575 | && BLOCK_TO_BB (bb->next_bb->index) == target_bb); | |
3576 | return bb->next_bb; | |
3577 | } | |
3578 | ||
cda0a5f5 | 3579 | #endif |
77fce4cd | 3580 | \f |
57a8bf1b | 3581 | static bool |
3582 | gate_handle_live_range_shrinkage (void) | |
3583 | { | |
3584 | #ifdef INSN_SCHEDULING | |
3585 | return flag_live_range_shrinkage; | |
3586 | #else | |
3587 | return 0; | |
3588 | #endif | |
3589 | } | |
3590 | ||
3591 | /* Run instruction scheduler. */ | |
3592 | static unsigned int | |
3593 | rest_of_handle_live_range_shrinkage (void) | |
3594 | { | |
3595 | #ifdef INSN_SCHEDULING | |
3596 | int saved; | |
3597 | ||
3598 | initialize_live_range_shrinkage (); | |
3599 | saved = flag_schedule_interblock; | |
3600 | flag_schedule_interblock = false; | |
3601 | schedule_insns (); | |
3602 | flag_schedule_interblock = saved; | |
3603 | finish_live_range_shrinkage (); | |
3604 | #endif | |
3605 | return 0; | |
3606 | } | |
3607 | ||
77fce4cd | 3608 | static bool |
3609 | gate_handle_sched (void) | |
3610 | { | |
3611 | #ifdef INSN_SCHEDULING | |
1c3660b1 | 3612 | return optimize > 0 && flag_schedule_insns && dbg_cnt (sched_func); |
77fce4cd | 3613 | #else |
3614 | return 0; | |
3615 | #endif | |
3616 | } | |
3617 | ||
3618 | /* Run instruction scheduler. */ | |
2a1990e9 | 3619 | static unsigned int |
77fce4cd | 3620 | rest_of_handle_sched (void) |
3621 | { | |
3622 | #ifdef INSN_SCHEDULING | |
e1ab7874 | 3623 | if (flag_selective_scheduling |
3624 | && ! maybe_skip_selective_scheduling ()) | |
3625 | run_selective_scheduling (); | |
3626 | else | |
3627 | schedule_insns (); | |
77fce4cd | 3628 | #endif |
2a1990e9 | 3629 | return 0; |
77fce4cd | 3630 | } |
3631 | ||
3632 | static bool | |
3633 | gate_handle_sched2 (void) | |
3634 | { | |
3635 | #ifdef INSN_SCHEDULING | |
48e1416a | 3636 | return optimize > 0 && flag_schedule_insns_after_reload |
8a42230a | 3637 | && !targetm.delay_sched2 && dbg_cnt (sched2_func); |
77fce4cd | 3638 | #else |
3639 | return 0; | |
3640 | #endif | |
3641 | } | |
3642 | ||
3643 | /* Run second scheduling pass after reload. */ | |
2a1990e9 | 3644 | static unsigned int |
77fce4cd | 3645 | rest_of_handle_sched2 (void) |
3646 | { | |
3647 | #ifdef INSN_SCHEDULING | |
e1ab7874 | 3648 | if (flag_selective_scheduling2 |
3649 | && ! maybe_skip_selective_scheduling ()) | |
3650 | run_selective_scheduling (); | |
77fce4cd | 3651 | else |
e1ab7874 | 3652 | { |
3653 | /* Do control and data sched analysis again, | |
3654 | and write some more of the results to dump file. */ | |
fda153ea | 3655 | if (flag_sched2_use_superblocks) |
e1ab7874 | 3656 | schedule_ebbs (); |
3657 | else | |
3658 | schedule_insns (); | |
3659 | } | |
77fce4cd | 3660 | #endif |
2a1990e9 | 3661 | return 0; |
77fce4cd | 3662 | } |
3663 | ||
cbe8bda8 | 3664 | namespace { |
3665 | ||
57a8bf1b | 3666 | const pass_data pass_data_live_range_shrinkage = |
3667 | { | |
3668 | RTL_PASS, /* type */ | |
3669 | "lr_shrinkage", /* name */ | |
3670 | OPTGROUP_NONE, /* optinfo_flags */ | |
3671 | true, /* has_gate */ | |
3672 | true, /* has_execute */ | |
3673 | TV_LIVE_RANGE_SHRINKAGE, /* tv_id */ | |
3674 | 0, /* properties_required */ | |
3675 | 0, /* properties_provided */ | |
3676 | 0, /* properties_destroyed */ | |
3677 | 0, /* todo_flags_start */ | |
3678 | ( TODO_df_finish | TODO_verify_rtl_sharing | |
3679 | | TODO_verify_flow ), /* todo_flags_finish */ | |
3680 | }; | |
3681 | ||
3682 | class pass_live_range_shrinkage : public rtl_opt_pass | |
3683 | { | |
3684 | public: | |
3685 | pass_live_range_shrinkage(gcc::context *ctxt) | |
3686 | : rtl_opt_pass(pass_data_live_range_shrinkage, ctxt) | |
3687 | {} | |
3688 | ||
3689 | /* opt_pass methods: */ | |
3690 | bool gate () { return gate_handle_live_range_shrinkage (); } | |
3691 | unsigned int execute () { return rest_of_handle_live_range_shrinkage (); } | |
3692 | ||
3693 | }; // class pass_live_range_shrinkage | |
3694 | ||
3695 | } // anon namespace | |
3696 | ||
3697 | rtl_opt_pass * | |
3698 | make_pass_live_range_shrinkage (gcc::context *ctxt) | |
3699 | { | |
3700 | return new pass_live_range_shrinkage (ctxt); | |
3701 | } | |
3702 | ||
3703 | namespace { | |
3704 | ||
cbe8bda8 | 3705 | const pass_data pass_data_sched = |
3706 | { | |
3707 | RTL_PASS, /* type */ | |
3708 | "sched1", /* name */ | |
3709 | OPTGROUP_NONE, /* optinfo_flags */ | |
3710 | true, /* has_gate */ | |
3711 | true, /* has_execute */ | |
3712 | TV_SCHED, /* tv_id */ | |
3713 | 0, /* properties_required */ | |
3714 | 0, /* properties_provided */ | |
3715 | 0, /* properties_destroyed */ | |
3716 | 0, /* todo_flags_start */ | |
3717 | ( TODO_df_finish | TODO_verify_rtl_sharing | |
3718 | | TODO_verify_flow ), /* todo_flags_finish */ | |
77fce4cd | 3719 | }; |
3720 | ||
cbe8bda8 | 3721 | class pass_sched : public rtl_opt_pass |
3722 | { | |
3723 | public: | |
9af5ce0c | 3724 | pass_sched (gcc::context *ctxt) |
3725 | : rtl_opt_pass (pass_data_sched, ctxt) | |
cbe8bda8 | 3726 | {} |
3727 | ||
3728 | /* opt_pass methods: */ | |
3729 | bool gate () { return gate_handle_sched (); } | |
3730 | unsigned int execute () { return rest_of_handle_sched (); } | |
3731 | ||
3732 | }; // class pass_sched | |
3733 | ||
3734 | } // anon namespace | |
3735 | ||
3736 | rtl_opt_pass * | |
3737 | make_pass_sched (gcc::context *ctxt) | |
3738 | { | |
3739 | return new pass_sched (ctxt); | |
3740 | } | |
3741 | ||
3742 | namespace { | |
3743 | ||
3744 | const pass_data pass_data_sched2 = | |
3745 | { | |
3746 | RTL_PASS, /* type */ | |
3747 | "sched2", /* name */ | |
3748 | OPTGROUP_NONE, /* optinfo_flags */ | |
3749 | true, /* has_gate */ | |
3750 | true, /* has_execute */ | |
3751 | TV_SCHED2, /* tv_id */ | |
3752 | 0, /* properties_required */ | |
3753 | 0, /* properties_provided */ | |
3754 | 0, /* properties_destroyed */ | |
3755 | 0, /* todo_flags_start */ | |
3756 | ( TODO_df_finish | TODO_verify_rtl_sharing | |
3757 | | TODO_verify_flow ), /* todo_flags_finish */ | |
77fce4cd | 3758 | }; |
cbe8bda8 | 3759 | |
3760 | class pass_sched2 : public rtl_opt_pass | |
3761 | { | |
3762 | public: | |
9af5ce0c | 3763 | pass_sched2 (gcc::context *ctxt) |
3764 | : rtl_opt_pass (pass_data_sched2, ctxt) | |
cbe8bda8 | 3765 | {} |
3766 | ||
3767 | /* opt_pass methods: */ | |
3768 | bool gate () { return gate_handle_sched2 (); } | |
3769 | unsigned int execute () { return rest_of_handle_sched2 (); } | |
3770 | ||
3771 | }; // class pass_sched2 | |
3772 | ||
3773 | } // anon namespace | |
3774 | ||
3775 | rtl_opt_pass * | |
3776 | make_pass_sched2 (gcc::context *ctxt) | |
3777 | { | |
3778 | return new pass_sched2 (ctxt); | |
3779 | } |