]>
Commit | Line | Data |
---|---|---|
7a31a7bd | 1 | /* Instruction scheduling pass. |
aad93da1 | 2 | Copyright (C) 1992-2017 Free Software Foundation, Inc. |
7a31a7bd | 3 | Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by, |
4 | and currently maintained by, Jim Wilson (wilson@cygnus.com) | |
5 | ||
f12b58b3 | 6 | This file is part of GCC. |
7a31a7bd | 7 | |
f12b58b3 | 8 | GCC is free software; you can redistribute it and/or modify it under |
9 | the terms of the GNU General Public License as published by the Free | |
8c4c00c1 | 10 | Software Foundation; either version 3, or (at your option) any later |
f12b58b3 | 11 | version. |
7a31a7bd | 12 | |
f12b58b3 | 13 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
14 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
7a31a7bd | 15 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
16 | for more details. | |
17 | ||
18 | You should have received a copy of the GNU General Public License | |
8c4c00c1 | 19 | along with GCC; see the file COPYING3. If not see |
20 | <http://www.gnu.org/licenses/>. */ | |
7a31a7bd | 21 | |
22 | /* This pass implements list scheduling within basic blocks. It is | |
23 | run twice: (1) after flow analysis, but before register allocation, | |
24 | and (2) after register allocation. | |
25 | ||
26 | The first run performs interblock scheduling, moving insns between | |
27 | different blocks in the same "region", and the second runs only | |
28 | basic block scheduling. | |
29 | ||
30 | Interblock motions performed are useful motions and speculative | |
31 | motions, including speculative loads. Motions requiring code | |
32 | duplication are not supported. The identification of motion type | |
33 | and the check for validity of speculative motions requires | |
34 | construction and analysis of the function's control flow graph. | |
35 | ||
36 | The main entry point for this pass is schedule_insns(), called for | |
37 | each function. The work of the scheduler is organized in three | |
38 | levels: (1) function level: insns are subject to splitting, | |
39 | control-flow-graph is constructed, regions are computed (after | |
40 | reload, each region is of one block), (2) region level: control | |
41 | flow graph attributes required for interblock scheduling are | |
42 | computed (dominators, reachability, etc.), data dependences and | |
43 | priorities are computed, and (3) block level: insns in the block | |
44 | are actually scheduled. */ | |
45 | \f | |
46 | #include "config.h" | |
47 | #include "system.h" | |
805e22b2 | 48 | #include "coretypes.h" |
9ef16211 | 49 | #include "backend.h" |
7c29e30e | 50 | #include "target.h" |
7a31a7bd | 51 | #include "rtl.h" |
9ef16211 | 52 | #include "df.h" |
ad7b10a2 | 53 | #include "memmodel.h" |
7a31a7bd | 54 | #include "tm_p.h" |
7c29e30e | 55 | #include "insn-config.h" |
7c29e30e | 56 | #include "emit-rtl.h" |
57 | #include "recog.h" | |
886c1262 | 58 | #include "profile.h" |
7a31a7bd | 59 | #include "insn-attr.h" |
60 | #include "except.h" | |
4c50e1f4 | 61 | #include "params.h" |
94ea8568 | 62 | #include "cfganal.h" |
7a31a7bd | 63 | #include "sched-int.h" |
e1ab7874 | 64 | #include "sel-sched.h" |
77fce4cd | 65 | #include "tree-pass.h" |
3072d30e | 66 | #include "dbgcnt.h" |
eabf74c2 | 67 | #include "pretty-print.h" |
68 | #include "print-rtl.h" | |
7fb47f9f | 69 | |
cda0a5f5 | 70 | #ifdef INSN_SCHEDULING |
e1ab7874 | 71 | |
7a31a7bd | 72 | /* Some accessor macros for h_i_d members only used within this file. */ |
e1ab7874 | 73 | #define FED_BY_SPEC_LOAD(INSN) (HID (INSN)->fed_by_spec_load) |
74 | #define IS_LOAD_INSN(INSN) (HID (insn)->is_load_insn) | |
7a31a7bd | 75 | |
7a31a7bd | 76 | /* nr_inter/spec counts interblock/speculative motion for the function. */ |
77 | static int nr_inter, nr_spec; | |
78 | ||
60b8c5b3 | 79 | static int is_cfg_nonregular (void); |
7a31a7bd | 80 | |
81 | /* Number of regions in the procedure. */ | |
e1ab7874 | 82 | int nr_regions = 0; |
7a31a7bd | 83 | |
c3089433 | 84 | /* Same as above before adding any new regions. */ |
85 | static int nr_regions_initial = 0; | |
86 | ||
7a31a7bd | 87 | /* Table of region descriptions. */ |
e1ab7874 | 88 | region *rgn_table = NULL; |
7a31a7bd | 89 | |
90 | /* Array of lists of regions' blocks. */ | |
e1ab7874 | 91 | int *rgn_bb_table = NULL; |
7a31a7bd | 92 | |
93 | /* Topological order of blocks in the region (if b2 is reachable from | |
94 | b1, block_to_bb[b2] > block_to_bb[b1]). Note: A basic block is | |
95 | always referred to by either block or b, while its topological | |
917bbcab | 96 | order name (in the region) is referred to by bb. */ |
e1ab7874 | 97 | int *block_to_bb = NULL; |
7a31a7bd | 98 | |
99 | /* The number of the region containing a block. */ | |
e1ab7874 | 100 | int *containing_rgn = NULL; |
101 | ||
102 | /* ebb_head [i] - is index in rgn_bb_table of the head basic block of i'th ebb. | |
103 | Currently we can get a ebb only through splitting of currently | |
104 | scheduling block, therefore, we don't need ebb_head array for every region, | |
105 | hence, its sufficient to hold it for current one only. */ | |
106 | int *ebb_head = NULL; | |
7a31a7bd | 107 | |
6c0b81cb | 108 | /* The minimum probability of reaching a source block so that it will be |
109 | considered for speculative scheduling. */ | |
110 | static int min_spec_prob; | |
111 | ||
e1ab7874 | 112 | static void find_single_block_region (bool); |
aae97b21 | 113 | static void find_rgns (void); |
4c50e1f4 | 114 | static bool too_large (int, int *, int *); |
7a31a7bd | 115 | |
7a31a7bd | 116 | /* Blocks of the current region being scheduled. */ |
e1ab7874 | 117 | int current_nr_blocks; |
118 | int current_blocks; | |
7a31a7bd | 119 | |
e1ab7874 | 120 | /* A speculative motion requires checking live information on the path |
121 | from 'source' to 'target'. The split blocks are those to be checked. | |
122 | After a speculative motion, live information should be modified in | |
123 | the 'update' blocks. | |
6a1cdb4d | 124 | |
e1ab7874 | 125 | Lists of split and update blocks for each candidate of the current |
126 | target are in array bblst_table. */ | |
127 | static basic_block *bblst_table; | |
128 | static int bblst_size, bblst_last; | |
7a31a7bd | 129 | |
bbd0cfb1 | 130 | /* Arrays that hold the DFA state at the end of a basic block, to re-use |
131 | as the initial state at the start of successor blocks. The BB_STATE | |
132 | array holds the actual DFA state, and BB_STATE_ARRAY[I] is a pointer | |
133 | into BB_STATE for basic block I. FIXME: This should be a vec. */ | |
134 | static char *bb_state_array = NULL; | |
135 | static state_t *bb_state = NULL; | |
0a15667c | 136 | |
7a31a7bd | 137 | /* Target info declarations. |
138 | ||
139 | The block currently being scheduled is referred to as the "target" block, | |
140 | while other blocks in the region from which insns can be moved to the | |
141 | target are called "source" blocks. The candidate structure holds info | |
142 | about such sources: are they valid? Speculative? Etc. */ | |
6dc50383 | 143 | struct bblst |
aae97b21 | 144 | { |
145 | basic_block *first_member; | |
146 | int nr_members; | |
6dc50383 | 147 | }; |
aae97b21 | 148 | |
6dc50383 | 149 | struct candidate |
7a31a7bd | 150 | { |
151 | char is_valid; | |
152 | char is_speculative; | |
153 | int src_prob; | |
154 | bblst split_bbs; | |
155 | bblst update_bbs; | |
6dc50383 | 156 | }; |
7a31a7bd | 157 | |
158 | static candidate *candidate_table; | |
e1ab7874 | 159 | #define IS_VALID(src) (candidate_table[src].is_valid) |
160 | #define IS_SPECULATIVE(src) (candidate_table[src].is_speculative) | |
161 | #define IS_SPECULATIVE_INSN(INSN) \ | |
162 | (IS_SPECULATIVE (BLOCK_TO_BB (BLOCK_NUM (INSN)))) | |
7a31a7bd | 163 | #define SRC_PROB(src) ( candidate_table[src].src_prob ) |
164 | ||
165 | /* The bb being currently scheduled. */ | |
e1ab7874 | 166 | int target_bb; |
7a31a7bd | 167 | |
168 | /* List of edges. */ | |
6dc50383 | 169 | struct edgelst |
aae97b21 | 170 | { |
171 | edge *first_member; | |
172 | int nr_members; | |
6dc50383 | 173 | }; |
aae97b21 | 174 | |
175 | static edge *edgelst_table; | |
176 | static int edgelst_last; | |
177 | ||
178 | static void extract_edgelst (sbitmap, edgelst *); | |
179 | ||
7a31a7bd | 180 | /* Target info functions. */ |
60b8c5b3 | 181 | static void split_edges (int, int, edgelst *); |
182 | static void compute_trg_info (int); | |
183 | void debug_candidate (int); | |
184 | void debug_candidates (int); | |
7a31a7bd | 185 | |
79cafa9e | 186 | /* Dominators array: dom[i] contains the sbitmap of dominators of |
7a31a7bd | 187 | bb i in the region. */ |
79cafa9e | 188 | static sbitmap *dom; |
7a31a7bd | 189 | |
190 | /* bb 0 is the only region entry. */ | |
191 | #define IS_RGN_ENTRY(bb) (!bb) | |
192 | ||
193 | /* Is bb_src dominated by bb_trg. */ | |
194 | #define IS_DOMINATED(bb_src, bb_trg) \ | |
08b7917c | 195 | ( bitmap_bit_p (dom[bb_src], bb_trg) ) |
7a31a7bd | 196 | |
6c0b81cb | 197 | /* Probability: Prob[i] is an int in [0, REG_BR_PROB_BASE] which is |
198 | the probability of bb i relative to the region entry. */ | |
199 | static int *prob; | |
7a31a7bd | 200 | |
201 | /* Bit-set of edges, where bit i stands for edge i. */ | |
79cafa9e | 202 | typedef sbitmap edgeset; |
7a31a7bd | 203 | |
204 | /* Number of edges in the region. */ | |
205 | static int rgn_nr_edges; | |
206 | ||
207 | /* Array of size rgn_nr_edges. */ | |
aae97b21 | 208 | static edge *rgn_edges; |
7a31a7bd | 209 | |
210 | /* Mapping from each edge in the graph to its number in the rgn. */ | |
aae97b21 | 211 | #define EDGE_TO_BIT(edge) ((int)(size_t)(edge)->aux) |
212 | #define SET_EDGE_TO_BIT(edge,nr) ((edge)->aux = (void *)(size_t)(nr)) | |
7a31a7bd | 213 | |
214 | /* The split edges of a source bb is different for each target | |
215 | bb. In order to compute this efficiently, the 'potential-split edges' | |
216 | are computed for each bb prior to scheduling a region. This is actually | |
217 | the split edges of each bb relative to the region entry. | |
218 | ||
219 | pot_split[bb] is the set of potential split edges of bb. */ | |
220 | static edgeset *pot_split; | |
221 | ||
222 | /* For every bb, a set of its ancestor edges. */ | |
223 | static edgeset *ancestor_edges; | |
224 | ||
7a31a7bd | 225 | #define INSN_PROBABILITY(INSN) (SRC_PROB (BLOCK_TO_BB (BLOCK_NUM (INSN)))) |
7a31a7bd | 226 | |
7a31a7bd | 227 | /* Speculative scheduling functions. */ |
60b8c5b3 | 228 | static int check_live_1 (int, rtx); |
229 | static void update_live_1 (int, rtx); | |
60b8c5b3 | 230 | static int is_pfree (rtx, int, int); |
71ce7f59 | 231 | static int find_conditional_protection (rtx_insn *, int); |
60b8c5b3 | 232 | static int is_conditionally_protected (rtx, int, int); |
233 | static int is_prisky (rtx, int, int); | |
71ce7f59 | 234 | static int is_exception_free (rtx_insn *, int, int); |
60b8c5b3 | 235 | |
236 | static bool sets_likely_spilled (rtx); | |
81a410b1 | 237 | static void sets_likely_spilled_1 (rtx, const_rtx, void *); |
b24ef467 | 238 | static void add_branch_dependences (rtx_insn *, rtx_insn *); |
93f6b030 | 239 | static void compute_block_dependences (int); |
60b8c5b3 | 240 | |
60b8c5b3 | 241 | static void schedule_region (int); |
8e56831f | 242 | static void concat_insn_mem_list (rtx_insn_list *, rtx_expr_list *, |
243 | rtx_insn_list **, rtx_expr_list **); | |
68e419a1 | 244 | static void propagate_deps (int, struct deps_desc *); |
60b8c5b3 | 245 | static void free_pending_lists (void); |
7a31a7bd | 246 | |
247 | /* Functions for construction of the control flow graph. */ | |
248 | ||
249 | /* Return 1 if control flow graph should not be constructed, 0 otherwise. | |
250 | ||
251 | We decide not to build the control flow graph if there is possibly more | |
aae97b21 | 252 | than one entry to the function, if computed branches exist, if we |
253 | have nonlocal gotos, or if we have an unreachable loop. */ | |
7a31a7bd | 254 | |
255 | static int | |
60b8c5b3 | 256 | is_cfg_nonregular (void) |
7a31a7bd | 257 | { |
4c26117a | 258 | basic_block b; |
918383b3 | 259 | rtx_insn *insn; |
7a31a7bd | 260 | |
261 | /* If we have a label that could be the target of a nonlocal goto, then | |
262 | the cfg is not well structured. */ | |
263 | if (nonlocal_goto_handler_labels) | |
264 | return 1; | |
265 | ||
266 | /* If we have any forced labels, then the cfg is not well structured. */ | |
267 | if (forced_labels) | |
268 | return 1; | |
269 | ||
7a31a7bd | 270 | /* If we have exception handlers, then we consider the cfg not well |
3072d30e | 271 | structured. ?!? We should be able to handle this now that we |
272 | compute an accurate cfg for EH. */ | |
8f8dcce4 | 273 | if (current_function_has_exception_handlers ()) |
7a31a7bd | 274 | return 1; |
275 | ||
19d2fe05 | 276 | /* If we have insns which refer to labels as non-jumped-to operands, |
277 | then we consider the cfg not well structured. */ | |
fc00614f | 278 | FOR_EACH_BB_FN (b, cfun) |
cd6dccd3 | 279 | FOR_BB_INSNS (b, insn) |
7a31a7bd | 280 | { |
918383b3 | 281 | rtx note, set, dest; |
282 | rtx_insn *next; | |
19d2fe05 | 283 | |
cd6dccd3 | 284 | /* If this function has a computed jump, then we consider the cfg |
285 | not well structured. */ | |
19d2fe05 | 286 | if (JUMP_P (insn) && computed_jump_p (insn)) |
cd6dccd3 | 287 | return 1; |
a8d1dae0 | 288 | |
289 | if (!INSN_P (insn)) | |
290 | continue; | |
291 | ||
292 | note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX); | |
293 | if (note == NULL_RTX) | |
294 | continue; | |
295 | ||
296 | /* For that label not to be seen as a referred-to label, this | |
297 | must be a single-set which is feeding a jump *only*. This | |
298 | could be a conditional jump with the label split off for | |
299 | machine-specific reasons or a casesi/tablejump. */ | |
300 | next = next_nonnote_insn (insn); | |
301 | if (next == NULL_RTX | |
302 | || !JUMP_P (next) | |
303 | || (JUMP_LABEL (next) != XEXP (note, 0) | |
304 | && find_reg_note (next, REG_LABEL_TARGET, | |
305 | XEXP (note, 0)) == NULL_RTX) | |
306 | || BLOCK_FOR_INSN (insn) != BLOCK_FOR_INSN (next)) | |
307 | return 1; | |
308 | ||
309 | set = single_set (insn); | |
310 | if (set == NULL_RTX) | |
311 | return 1; | |
312 | ||
313 | dest = SET_DEST (set); | |
314 | if (!REG_P (dest) || !dead_or_set_p (next, dest)) | |
315 | return 1; | |
7a31a7bd | 316 | } |
317 | ||
7a31a7bd | 318 | /* Unreachable loops with more than one basic block are detected |
319 | during the DFS traversal in find_rgns. | |
320 | ||
321 | Unreachable loops with a single block are detected here. This | |
322 | test is redundant with the one in find_rgns, but it's much | |
aae97b21 | 323 | cheaper to go ahead and catch the trivial case here. */ |
fc00614f | 324 | FOR_EACH_BB_FN (b, cfun) |
7a31a7bd | 325 | { |
cd665a06 | 326 | if (EDGE_COUNT (b->preds) == 0 |
ea091dfd | 327 | || (single_pred_p (b) |
328 | && single_pred (b) == b)) | |
aae97b21 | 329 | return 1; |
7a31a7bd | 330 | } |
331 | ||
aae97b21 | 332 | /* All the tests passed. Consider the cfg well structured. */ |
333 | return 0; | |
7a31a7bd | 334 | } |
335 | ||
aae97b21 | 336 | /* Extract list of edges from a bitmap containing EDGE_TO_BIT bits. */ |
7a31a7bd | 337 | |
338 | static void | |
aae97b21 | 339 | extract_edgelst (sbitmap set, edgelst *el) |
7a31a7bd | 340 | { |
86c1585a | 341 | unsigned int i = 0; |
3e790786 | 342 | sbitmap_iterator sbi; |
7a31a7bd | 343 | |
aae97b21 | 344 | /* edgelst table space is reused in each call to extract_edgelst. */ |
345 | edgelst_last = 0; | |
7a31a7bd | 346 | |
aae97b21 | 347 | el->first_member = &edgelst_table[edgelst_last]; |
348 | el->nr_members = 0; | |
7a31a7bd | 349 | |
350 | /* Iterate over each word in the bitset. */ | |
0d211963 | 351 | EXECUTE_IF_SET_IN_BITMAP (set, 0, i, sbi) |
3e790786 | 352 | { |
353 | edgelst_table[edgelst_last++] = rgn_edges[i]; | |
354 | el->nr_members++; | |
355 | } | |
7a31a7bd | 356 | } |
357 | ||
358 | /* Functions for the construction of regions. */ | |
359 | ||
360 | /* Print the regions, for debugging purposes. Callable from debugger. */ | |
361 | ||
4b987fac | 362 | DEBUG_FUNCTION void |
60b8c5b3 | 363 | debug_regions (void) |
7a31a7bd | 364 | { |
365 | int rgn, bb; | |
366 | ||
367 | fprintf (sched_dump, "\n;; ------------ REGIONS ----------\n\n"); | |
368 | for (rgn = 0; rgn < nr_regions; rgn++) | |
369 | { | |
370 | fprintf (sched_dump, ";;\trgn %d nr_blocks %d:\n", rgn, | |
371 | rgn_table[rgn].rgn_nr_blocks); | |
372 | fprintf (sched_dump, ";;\tbb/block: "); | |
373 | ||
6a1cdb4d | 374 | /* We don't have ebb_head initialized yet, so we can't use |
375 | BB_TO_BLOCK (). */ | |
376 | current_blocks = RGN_BLOCKS (rgn); | |
7a31a7bd | 377 | |
6a1cdb4d | 378 | for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++) |
379 | fprintf (sched_dump, " %d/%d ", bb, rgn_bb_table[current_blocks + bb]); | |
7a31a7bd | 380 | |
381 | fprintf (sched_dump, "\n\n"); | |
382 | } | |
383 | } | |
384 | ||
e1ab7874 | 385 | /* Print the region's basic blocks. */ |
386 | ||
4b987fac | 387 | DEBUG_FUNCTION void |
e1ab7874 | 388 | debug_region (int rgn) |
389 | { | |
390 | int bb; | |
391 | ||
392 | fprintf (stderr, "\n;; ------------ REGION %d ----------\n\n", rgn); | |
393 | fprintf (stderr, ";;\trgn %d nr_blocks %d:\n", rgn, | |
394 | rgn_table[rgn].rgn_nr_blocks); | |
395 | fprintf (stderr, ";;\tbb/block: "); | |
396 | ||
397 | /* We don't have ebb_head initialized yet, so we can't use | |
398 | BB_TO_BLOCK (). */ | |
399 | current_blocks = RGN_BLOCKS (rgn); | |
400 | ||
401 | for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++) | |
402 | fprintf (stderr, " %d/%d ", bb, rgn_bb_table[current_blocks + bb]); | |
403 | ||
404 | fprintf (stderr, "\n\n"); | |
405 | ||
406 | for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++) | |
407 | { | |
f5a6b05f | 408 | dump_bb (stderr, |
409 | BASIC_BLOCK_FOR_FN (cfun, rgn_bb_table[current_blocks + bb]), | |
bec2cf98 | 410 | 0, TDF_SLIM | TDF_BLOCKS); |
e1ab7874 | 411 | fprintf (stderr, "\n"); |
412 | } | |
413 | ||
414 | fprintf (stderr, "\n"); | |
415 | ||
416 | } | |
417 | ||
418 | /* True when a bb with index BB_INDEX contained in region RGN. */ | |
419 | static bool | |
420 | bb_in_region_p (int bb_index, int rgn) | |
421 | { | |
422 | int i; | |
423 | ||
424 | for (i = 0; i < rgn_table[rgn].rgn_nr_blocks; i++) | |
425 | if (rgn_bb_table[current_blocks + i] == bb_index) | |
426 | return true; | |
427 | ||
428 | return false; | |
429 | } | |
430 | ||
431 | /* Dump region RGN to file F using dot syntax. */ | |
432 | void | |
433 | dump_region_dot (FILE *f, int rgn) | |
434 | { | |
435 | int i; | |
436 | ||
437 | fprintf (f, "digraph Region_%d {\n", rgn); | |
438 | ||
439 | /* We don't have ebb_head initialized yet, so we can't use | |
440 | BB_TO_BLOCK (). */ | |
441 | current_blocks = RGN_BLOCKS (rgn); | |
442 | ||
443 | for (i = 0; i < rgn_table[rgn].rgn_nr_blocks; i++) | |
444 | { | |
445 | edge e; | |
446 | edge_iterator ei; | |
447 | int src_bb_num = rgn_bb_table[current_blocks + i]; | |
f5a6b05f | 448 | basic_block bb = BASIC_BLOCK_FOR_FN (cfun, src_bb_num); |
e1ab7874 | 449 | |
450 | FOR_EACH_EDGE (e, ei, bb->succs) | |
451 | if (bb_in_region_p (e->dest->index, rgn)) | |
452 | fprintf (f, "\t%d -> %d\n", src_bb_num, e->dest->index); | |
453 | } | |
454 | fprintf (f, "}\n"); | |
455 | } | |
456 | ||
457 | /* The same, but first open a file specified by FNAME. */ | |
48e1416a | 458 | void |
e1ab7874 | 459 | dump_region_dot_file (const char *fname, int rgn) |
460 | { | |
461 | FILE *f = fopen (fname, "wt"); | |
462 | dump_region_dot (f, rgn); | |
463 | fclose (f); | |
464 | } | |
465 | ||
7a31a7bd | 466 | /* Build a single block region for each basic block in the function. |
467 | This allows for using the same code for interblock and basic block | |
468 | scheduling. */ | |
469 | ||
470 | static void | |
e1ab7874 | 471 | find_single_block_region (bool ebbs_p) |
7a31a7bd | 472 | { |
e1ab7874 | 473 | basic_block bb, ebb_start; |
474 | int i = 0; | |
4c5da238 | 475 | |
4c26117a | 476 | nr_regions = 0; |
477 | ||
e1ab7874 | 478 | if (ebbs_p) { |
479 | int probability_cutoff; | |
a74a34e6 | 480 | if (profile_info && profile_status_for_fn (cfun) == PROFILE_READ) |
e1ab7874 | 481 | probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK); |
482 | else | |
483 | probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY); | |
484 | probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff; | |
485 | ||
fc00614f | 486 | FOR_EACH_BB_FN (ebb_start, cfun) |
e1ab7874 | 487 | { |
488 | RGN_NR_BLOCKS (nr_regions) = 0; | |
489 | RGN_BLOCKS (nr_regions) = i; | |
490 | RGN_DONT_CALC_DEPS (nr_regions) = 0; | |
491 | RGN_HAS_REAL_EBB (nr_regions) = 0; | |
492 | ||
493 | for (bb = ebb_start; ; bb = bb->next_bb) | |
494 | { | |
495 | edge e; | |
e1ab7874 | 496 | |
497 | rgn_bb_table[i] = bb->index; | |
498 | RGN_NR_BLOCKS (nr_regions)++; | |
499 | CONTAINING_RGN (bb->index) = nr_regions; | |
500 | BLOCK_TO_BB (bb->index) = i - RGN_BLOCKS (nr_regions); | |
501 | i++; | |
502 | ||
34154e27 | 503 | if (bb->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun) |
e1ab7874 | 504 | || LABEL_P (BB_HEAD (bb->next_bb))) |
505 | break; | |
48e1416a | 506 | |
7f58c05e | 507 | e = find_fallthru_edge (bb->succs); |
e1ab7874 | 508 | if (! e) |
509 | break; | |
720cfc43 | 510 | if (e->probability.initialized_p () |
511 | && e->probability.to_reg_br_prob_base () <= probability_cutoff) | |
e1ab7874 | 512 | break; |
513 | } | |
514 | ||
515 | ebb_start = bb; | |
516 | nr_regions++; | |
517 | } | |
518 | } | |
519 | else | |
fc00614f | 520 | FOR_EACH_BB_FN (bb, cfun) |
e1ab7874 | 521 | { |
522 | rgn_bb_table[nr_regions] = bb->index; | |
523 | RGN_NR_BLOCKS (nr_regions) = 1; | |
524 | RGN_BLOCKS (nr_regions) = nr_regions; | |
525 | RGN_DONT_CALC_DEPS (nr_regions) = 0; | |
526 | RGN_HAS_REAL_EBB (nr_regions) = 0; | |
527 | ||
528 | CONTAINING_RGN (bb->index) = nr_regions; | |
529 | BLOCK_TO_BB (bb->index) = 0; | |
530 | nr_regions++; | |
531 | } | |
532 | } | |
533 | ||
534 | /* Estimate number of the insns in the BB. */ | |
535 | static int | |
536 | rgn_estimate_number_of_insns (basic_block bb) | |
537 | { | |
9845d120 | 538 | int count; |
539 | ||
540 | count = INSN_LUID (BB_END (bb)) - INSN_LUID (BB_HEAD (bb)); | |
541 | ||
542 | if (MAY_HAVE_DEBUG_INSNS) | |
543 | { | |
b24ef467 | 544 | rtx_insn *insn; |
9845d120 | 545 | |
546 | FOR_BB_INSNS (bb, insn) | |
547 | if (DEBUG_INSN_P (insn)) | |
548 | count--; | |
549 | } | |
550 | ||
551 | return count; | |
7a31a7bd | 552 | } |
553 | ||
554 | /* Update number of blocks and the estimate for number of insns | |
4c50e1f4 | 555 | in the region. Return true if the region is "too large" for interblock |
556 | scheduling (compile time considerations). */ | |
7a31a7bd | 557 | |
4c50e1f4 | 558 | static bool |
60b8c5b3 | 559 | too_large (int block, int *num_bbs, int *num_insns) |
7a31a7bd | 560 | { |
561 | (*num_bbs)++; | |
e1ab7874 | 562 | (*num_insns) += (common_sched_info->estimate_number_of_insns |
f5a6b05f | 563 | (BASIC_BLOCK_FOR_FN (cfun, block))); |
4c50e1f4 | 564 | |
565 | return ((*num_bbs > PARAM_VALUE (PARAM_MAX_SCHED_REGION_BLOCKS)) | |
566 | || (*num_insns > PARAM_VALUE (PARAM_MAX_SCHED_REGION_INSNS))); | |
7a31a7bd | 567 | } |
568 | ||
569 | /* Update_loop_relations(blk, hdr): Check if the loop headed by max_hdr[blk] | |
570 | is still an inner loop. Put in max_hdr[blk] the header of the most inner | |
571 | loop containing blk. */ | |
40734805 | 572 | #define UPDATE_LOOP_RELATIONS(blk, hdr) \ |
573 | { \ | |
574 | if (max_hdr[blk] == -1) \ | |
575 | max_hdr[blk] = hdr; \ | |
576 | else if (dfs_nr[max_hdr[blk]] > dfs_nr[hdr]) \ | |
08b7917c | 577 | bitmap_clear_bit (inner, hdr); \ |
40734805 | 578 | else if (dfs_nr[max_hdr[blk]] < dfs_nr[hdr]) \ |
579 | { \ | |
08b7917c | 580 | bitmap_clear_bit (inner,max_hdr[blk]); \ |
40734805 | 581 | max_hdr[blk] = hdr; \ |
582 | } \ | |
7a31a7bd | 583 | } |
584 | ||
585 | /* Find regions for interblock scheduling. | |
586 | ||
587 | A region for scheduling can be: | |
588 | ||
589 | * A loop-free procedure, or | |
590 | ||
591 | * A reducible inner loop, or | |
592 | ||
593 | * A basic block not contained in any other region. | |
594 | ||
595 | ?!? In theory we could build other regions based on extended basic | |
596 | blocks or reverse extended basic blocks. Is it worth the trouble? | |
597 | ||
598 | Loop blocks that form a region are put into the region's block list | |
599 | in topological order. | |
600 | ||
601 | This procedure stores its results into the following global (ick) variables | |
602 | ||
603 | * rgn_nr | |
604 | * rgn_table | |
605 | * rgn_bb_table | |
606 | * block_to_bb | |
607 | * containing region | |
608 | ||
609 | We use dominator relationships to avoid making regions out of non-reducible | |
610 | loops. | |
611 | ||
612 | This procedure needs to be converted to work on pred/succ lists instead | |
613 | of edge tables. That would simplify it somewhat. */ | |
614 | ||
615 | static void | |
e1ab7874 | 616 | haifa_find_rgns (void) |
7a31a7bd | 617 | { |
aae97b21 | 618 | int *max_hdr, *dfs_nr, *degree; |
7a31a7bd | 619 | char no_loops = 1; |
620 | int node, child, loop_head, i, head, tail; | |
8deb7557 | 621 | int count = 0, sp, idx = 0; |
aae97b21 | 622 | edge_iterator current_edge; |
623 | edge_iterator *stack; | |
7a31a7bd | 624 | int num_bbs, num_insns, unreachable; |
625 | int too_large_failure; | |
4c26117a | 626 | basic_block bb; |
7a31a7bd | 627 | |
7a31a7bd | 628 | /* Perform a DFS traversal of the cfg. Identify loop headers, inner loops |
629 | and a mapping from block to its loop header (if the block is contained | |
630 | in a loop, else -1). | |
631 | ||
632 | Store results in HEADER, INNER, and MAX_HDR respectively, these will | |
633 | be used as inputs to the second traversal. | |
634 | ||
635 | STACK, SP and DFS_NR are only used during the first traversal. */ | |
636 | ||
637 | /* Allocate and initialize variables for the first traversal. */ | |
fe672ac0 | 638 | max_hdr = XNEWVEC (int, last_basic_block_for_fn (cfun)); |
639 | dfs_nr = XCNEWVEC (int, last_basic_block_for_fn (cfun)); | |
f1955b22 | 640 | stack = XNEWVEC (edge_iterator, n_edges_for_fn (cfun)); |
7a31a7bd | 641 | |
3c6549f8 | 642 | /* Note if a block is a natural inner loop header. */ |
643 | auto_sbitmap inner (last_basic_block_for_fn (cfun)); | |
53c5d9d4 | 644 | bitmap_ones (inner); |
7a31a7bd | 645 | |
3c6549f8 | 646 | /* Note if a block is a natural loop header. */ |
647 | auto_sbitmap header (last_basic_block_for_fn (cfun)); | |
53c5d9d4 | 648 | bitmap_clear (header); |
7a31a7bd | 649 | |
3c6549f8 | 650 | /* Note if a block is in the block queue. */ |
651 | auto_sbitmap in_queue (last_basic_block_for_fn (cfun)); | |
53c5d9d4 | 652 | bitmap_clear (in_queue); |
7a31a7bd | 653 | |
3c6549f8 | 654 | /* Note if a block is in the block queue. */ |
655 | auto_sbitmap in_stack (last_basic_block_for_fn (cfun)); | |
53c5d9d4 | 656 | bitmap_clear (in_stack); |
7a31a7bd | 657 | |
fe672ac0 | 658 | for (i = 0; i < last_basic_block_for_fn (cfun); i++) |
7a31a7bd | 659 | max_hdr[i] = -1; |
660 | ||
aae97b21 | 661 | #define EDGE_PASSED(E) (ei_end_p ((E)) || ei_edge ((E))->aux) |
662 | #define SET_EDGE_PASSED(E) (ei_edge ((E))->aux = ei_edge ((E))) | |
663 | ||
7a31a7bd | 664 | /* DFS traversal to find inner loops in the cfg. */ |
665 | ||
34154e27 | 666 | current_edge = ei_start (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))->succs); |
7a31a7bd | 667 | sp = -1; |
aae97b21 | 668 | |
7a31a7bd | 669 | while (1) |
670 | { | |
aae97b21 | 671 | if (EDGE_PASSED (current_edge)) |
7a31a7bd | 672 | { |
673 | /* We have reached a leaf node or a node that was already | |
674 | processed. Pop edges off the stack until we find | |
675 | an edge that has not yet been processed. */ | |
aae97b21 | 676 | while (sp >= 0 && EDGE_PASSED (current_edge)) |
7a31a7bd | 677 | { |
678 | /* Pop entry off the stack. */ | |
679 | current_edge = stack[sp--]; | |
aae97b21 | 680 | node = ei_edge (current_edge)->src->index; |
681 | gcc_assert (node != ENTRY_BLOCK); | |
682 | child = ei_edge (current_edge)->dest->index; | |
683 | gcc_assert (child != EXIT_BLOCK); | |
08b7917c | 684 | bitmap_clear_bit (in_stack, child); |
685 | if (max_hdr[child] >= 0 && bitmap_bit_p (in_stack, max_hdr[child])) | |
7a31a7bd | 686 | UPDATE_LOOP_RELATIONS (node, max_hdr[child]); |
aae97b21 | 687 | ei_next (¤t_edge); |
7a31a7bd | 688 | } |
689 | ||
690 | /* See if have finished the DFS tree traversal. */ | |
aae97b21 | 691 | if (sp < 0 && EDGE_PASSED (current_edge)) |
7a31a7bd | 692 | break; |
693 | ||
694 | /* Nope, continue the traversal with the popped node. */ | |
695 | continue; | |
696 | } | |
697 | ||
698 | /* Process a node. */ | |
aae97b21 | 699 | node = ei_edge (current_edge)->src->index; |
700 | gcc_assert (node != ENTRY_BLOCK); | |
08b7917c | 701 | bitmap_set_bit (in_stack, node); |
7a31a7bd | 702 | dfs_nr[node] = ++count; |
703 | ||
aae97b21 | 704 | /* We don't traverse to the exit block. */ |
705 | child = ei_edge (current_edge)->dest->index; | |
706 | if (child == EXIT_BLOCK) | |
707 | { | |
708 | SET_EDGE_PASSED (current_edge); | |
709 | ei_next (¤t_edge); | |
710 | continue; | |
711 | } | |
712 | ||
7a31a7bd | 713 | /* If the successor is in the stack, then we've found a loop. |
714 | Mark the loop, if it is not a natural loop, then it will | |
715 | be rejected during the second traversal. */ | |
08b7917c | 716 | if (bitmap_bit_p (in_stack, child)) |
7a31a7bd | 717 | { |
718 | no_loops = 0; | |
08b7917c | 719 | bitmap_set_bit (header, child); |
7a31a7bd | 720 | UPDATE_LOOP_RELATIONS (node, child); |
aae97b21 | 721 | SET_EDGE_PASSED (current_edge); |
722 | ei_next (¤t_edge); | |
7a31a7bd | 723 | continue; |
724 | } | |
725 | ||
726 | /* If the child was already visited, then there is no need to visit | |
727 | it again. Just update the loop relationships and restart | |
728 | with a new edge. */ | |
729 | if (dfs_nr[child]) | |
730 | { | |
08b7917c | 731 | if (max_hdr[child] >= 0 && bitmap_bit_p (in_stack, max_hdr[child])) |
7a31a7bd | 732 | UPDATE_LOOP_RELATIONS (node, max_hdr[child]); |
aae97b21 | 733 | SET_EDGE_PASSED (current_edge); |
734 | ei_next (¤t_edge); | |
7a31a7bd | 735 | continue; |
736 | } | |
737 | ||
738 | /* Push an entry on the stack and continue DFS traversal. */ | |
739 | stack[++sp] = current_edge; | |
aae97b21 | 740 | SET_EDGE_PASSED (current_edge); |
741 | current_edge = ei_start (ei_edge (current_edge)->dest->succs); | |
742 | } | |
743 | ||
744 | /* Reset ->aux field used by EDGE_PASSED. */ | |
ed7d889a | 745 | FOR_ALL_BB_FN (bb, cfun) |
aae97b21 | 746 | { |
747 | edge_iterator ei; | |
748 | edge e; | |
749 | FOR_EACH_EDGE (e, ei, bb->succs) | |
750 | e->aux = NULL; | |
7a31a7bd | 751 | } |
752 | ||
aae97b21 | 753 | |
7a31a7bd | 754 | /* Another check for unreachable blocks. The earlier test in |
755 | is_cfg_nonregular only finds unreachable blocks that do not | |
756 | form a loop. | |
757 | ||
758 | The DFS traversal will mark every block that is reachable from | |
759 | the entry node by placing a nonzero value in dfs_nr. Thus if | |
760 | dfs_nr is zero for any block, then it must be unreachable. */ | |
761 | unreachable = 0; | |
fc00614f | 762 | FOR_EACH_BB_FN (bb, cfun) |
4c26117a | 763 | if (dfs_nr[bb->index] == 0) |
7a31a7bd | 764 | { |
765 | unreachable = 1; | |
766 | break; | |
767 | } | |
768 | ||
769 | /* Gross. To avoid wasting memory, the second pass uses the dfs_nr array | |
770 | to hold degree counts. */ | |
771 | degree = dfs_nr; | |
772 | ||
fc00614f | 773 | FOR_EACH_BB_FN (bb, cfun) |
aae97b21 | 774 | degree[bb->index] = EDGE_COUNT (bb->preds); |
7a31a7bd | 775 | |
776 | /* Do not perform region scheduling if there are any unreachable | |
777 | blocks. */ | |
778 | if (!unreachable) | |
779 | { | |
4bfe0e7b | 780 | int *queue, *degree1 = NULL; |
781 | /* We use EXTENDED_RGN_HEADER as an addition to HEADER and put | |
782 | there basic blocks, which are forced to be region heads. | |
48e1416a | 783 | This is done to try to assemble few smaller regions |
4bfe0e7b | 784 | from a too_large region. */ |
785 | sbitmap extended_rgn_header = NULL; | |
786 | bool extend_regions_p; | |
7a31a7bd | 787 | |
788 | if (no_loops) | |
08b7917c | 789 | bitmap_set_bit (header, 0); |
7a31a7bd | 790 | |
de132707 | 791 | /* Second traversal:find reducible inner loops and topologically sort |
7a31a7bd | 792 | block of each region. */ |
793 | ||
a28770e1 | 794 | queue = XNEWVEC (int, n_basic_blocks_for_fn (cfun)); |
48e1416a | 795 | |
4bfe0e7b | 796 | extend_regions_p = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS) > 0; |
797 | if (extend_regions_p) | |
798 | { | |
fe672ac0 | 799 | degree1 = XNEWVEC (int, last_basic_block_for_fn (cfun)); |
800 | extended_rgn_header = | |
801 | sbitmap_alloc (last_basic_block_for_fn (cfun)); | |
53c5d9d4 | 802 | bitmap_clear (extended_rgn_header); |
4bfe0e7b | 803 | } |
7a31a7bd | 804 | |
805 | /* Find blocks which are inner loop headers. We still have non-reducible | |
806 | loops to consider at this point. */ | |
fc00614f | 807 | FOR_EACH_BB_FN (bb, cfun) |
7a31a7bd | 808 | { |
08b7917c | 809 | if (bitmap_bit_p (header, bb->index) && bitmap_bit_p (inner, bb->index)) |
7a31a7bd | 810 | { |
811 | edge e; | |
cd665a06 | 812 | edge_iterator ei; |
4c26117a | 813 | basic_block jbb; |
7a31a7bd | 814 | |
815 | /* Now check that the loop is reducible. We do this separate | |
816 | from finding inner loops so that we do not find a reducible | |
817 | loop which contains an inner non-reducible loop. | |
818 | ||
819 | A simple way to find reducible/natural loops is to verify | |
820 | that each block in the loop is dominated by the loop | |
821 | header. | |
822 | ||
823 | If there exists a block that is not dominated by the loop | |
824 | header, then the block is reachable from outside the loop | |
825 | and thus the loop is not a natural loop. */ | |
fc00614f | 826 | FOR_EACH_BB_FN (jbb, cfun) |
7a31a7bd | 827 | { |
828 | /* First identify blocks in the loop, except for the loop | |
829 | entry block. */ | |
4c26117a | 830 | if (bb->index == max_hdr[jbb->index] && bb != jbb) |
7a31a7bd | 831 | { |
832 | /* Now verify that the block is dominated by the loop | |
833 | header. */ | |
0051c76a | 834 | if (!dominated_by_p (CDI_DOMINATORS, jbb, bb)) |
7a31a7bd | 835 | break; |
836 | } | |
837 | } | |
838 | ||
839 | /* If we exited the loop early, then I is the header of | |
840 | a non-reducible loop and we should quit processing it | |
841 | now. */ | |
34154e27 | 842 | if (jbb != EXIT_BLOCK_PTR_FOR_FN (cfun)) |
7a31a7bd | 843 | continue; |
844 | ||
845 | /* I is a header of an inner loop, or block 0 in a subroutine | |
846 | with no loops at all. */ | |
847 | head = tail = -1; | |
848 | too_large_failure = 0; | |
4c26117a | 849 | loop_head = max_hdr[bb->index]; |
7a31a7bd | 850 | |
4bfe0e7b | 851 | if (extend_regions_p) |
48e1416a | 852 | /* We save degree in case when we meet a too_large region |
853 | and cancel it. We need a correct degree later when | |
4bfe0e7b | 854 | calling extend_rgns. */ |
fe672ac0 | 855 | memcpy (degree1, degree, |
856 | last_basic_block_for_fn (cfun) * sizeof (int)); | |
48e1416a | 857 | |
7a31a7bd | 858 | /* Decrease degree of all I's successors for topological |
859 | ordering. */ | |
cd665a06 | 860 | FOR_EACH_EDGE (e, ei, bb->succs) |
34154e27 | 861 | if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)) |
b3d6de89 | 862 | --degree[e->dest->index]; |
7a31a7bd | 863 | |
864 | /* Estimate # insns, and count # blocks in the region. */ | |
865 | num_bbs = 1; | |
e1ab7874 | 866 | num_insns = common_sched_info->estimate_number_of_insns (bb); |
7a31a7bd | 867 | |
868 | /* Find all loop latches (blocks with back edges to the loop | |
869 | header) or all the leaf blocks in the cfg has no loops. | |
870 | ||
871 | Place those blocks into the queue. */ | |
872 | if (no_loops) | |
873 | { | |
fc00614f | 874 | FOR_EACH_BB_FN (jbb, cfun) |
7a31a7bd | 875 | /* Leaf nodes have only a single successor which must |
876 | be EXIT_BLOCK. */ | |
ea091dfd | 877 | if (single_succ_p (jbb) |
34154e27 | 878 | && single_succ (jbb) == EXIT_BLOCK_PTR_FOR_FN (cfun)) |
7a31a7bd | 879 | { |
4c26117a | 880 | queue[++tail] = jbb->index; |
08b7917c | 881 | bitmap_set_bit (in_queue, jbb->index); |
7a31a7bd | 882 | |
4c26117a | 883 | if (too_large (jbb->index, &num_bbs, &num_insns)) |
7a31a7bd | 884 | { |
885 | too_large_failure = 1; | |
886 | break; | |
887 | } | |
888 | } | |
889 | } | |
890 | else | |
891 | { | |
892 | edge e; | |
893 | ||
cd665a06 | 894 | FOR_EACH_EDGE (e, ei, bb->preds) |
7a31a7bd | 895 | { |
34154e27 | 896 | if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)) |
7a31a7bd | 897 | continue; |
898 | ||
b3d6de89 | 899 | node = e->src->index; |
7a31a7bd | 900 | |
4c26117a | 901 | if (max_hdr[node] == loop_head && node != bb->index) |
7a31a7bd | 902 | { |
903 | /* This is a loop latch. */ | |
904 | queue[++tail] = node; | |
08b7917c | 905 | bitmap_set_bit (in_queue, node); |
7a31a7bd | 906 | |
907 | if (too_large (node, &num_bbs, &num_insns)) | |
908 | { | |
909 | too_large_failure = 1; | |
910 | break; | |
911 | } | |
912 | } | |
913 | } | |
914 | } | |
915 | ||
916 | /* Now add all the blocks in the loop to the queue. | |
917 | ||
918 | We know the loop is a natural loop; however the algorithm | |
919 | above will not always mark certain blocks as being in the | |
920 | loop. Consider: | |
921 | node children | |
922 | a b,c | |
923 | b c | |
924 | c a,d | |
925 | d b | |
926 | ||
927 | The algorithm in the DFS traversal may not mark B & D as part | |
0c6d8c36 | 928 | of the loop (i.e. they will not have max_hdr set to A). |
7a31a7bd | 929 | |
930 | We know they can not be loop latches (else they would have | |
931 | had max_hdr set since they'd have a backedge to a dominator | |
932 | block). So we don't need them on the initial queue. | |
933 | ||
934 | We know they are part of the loop because they are dominated | |
935 | by the loop header and can be reached by a backwards walk of | |
936 | the edges starting with nodes on the initial queue. | |
937 | ||
938 | It is safe and desirable to include those nodes in the | |
939 | loop/scheduling region. To do so we would need to decrease | |
940 | the degree of a node if it is the target of a backedge | |
941 | within the loop itself as the node is placed in the queue. | |
942 | ||
943 | We do not do this because I'm not sure that the actual | |
944 | scheduling code will properly handle this case. ?!? */ | |
945 | ||
946 | while (head < tail && !too_large_failure) | |
947 | { | |
948 | edge e; | |
949 | child = queue[++head]; | |
950 | ||
f5a6b05f | 951 | FOR_EACH_EDGE (e, ei, |
952 | BASIC_BLOCK_FOR_FN (cfun, child)->preds) | |
7a31a7bd | 953 | { |
b3d6de89 | 954 | node = e->src->index; |
7a31a7bd | 955 | |
956 | /* See discussion above about nodes not marked as in | |
957 | this loop during the initial DFS traversal. */ | |
34154e27 | 958 | if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) |
7a31a7bd | 959 | || max_hdr[node] != loop_head) |
960 | { | |
961 | tail = -1; | |
962 | break; | |
963 | } | |
08b7917c | 964 | else if (!bitmap_bit_p (in_queue, node) && node != bb->index) |
7a31a7bd | 965 | { |
966 | queue[++tail] = node; | |
08b7917c | 967 | bitmap_set_bit (in_queue, node); |
7a31a7bd | 968 | |
969 | if (too_large (node, &num_bbs, &num_insns)) | |
970 | { | |
971 | too_large_failure = 1; | |
972 | break; | |
973 | } | |
974 | } | |
975 | } | |
976 | } | |
977 | ||
978 | if (tail >= 0 && !too_large_failure) | |
979 | { | |
980 | /* Place the loop header into list of region blocks. */ | |
4c26117a | 981 | degree[bb->index] = -1; |
982 | rgn_bb_table[idx] = bb->index; | |
7a31a7bd | 983 | RGN_NR_BLOCKS (nr_regions) = num_bbs; |
984 | RGN_BLOCKS (nr_regions) = idx++; | |
6a1cdb4d | 985 | RGN_DONT_CALC_DEPS (nr_regions) = 0; |
986 | RGN_HAS_REAL_EBB (nr_regions) = 0; | |
4c26117a | 987 | CONTAINING_RGN (bb->index) = nr_regions; |
988 | BLOCK_TO_BB (bb->index) = count = 0; | |
7a31a7bd | 989 | |
990 | /* Remove blocks from queue[] when their in degree | |
991 | becomes zero. Repeat until no blocks are left on the | |
992 | list. This produces a topological list of blocks in | |
993 | the region. */ | |
994 | while (tail >= 0) | |
995 | { | |
996 | if (head < 0) | |
997 | head = tail; | |
998 | child = queue[head]; | |
999 | if (degree[child] == 0) | |
1000 | { | |
1001 | edge e; | |
1002 | ||
1003 | degree[child] = -1; | |
1004 | rgn_bb_table[idx++] = child; | |
1005 | BLOCK_TO_BB (child) = ++count; | |
1006 | CONTAINING_RGN (child) = nr_regions; | |
1007 | queue[head] = queue[tail--]; | |
1008 | ||
f5a6b05f | 1009 | FOR_EACH_EDGE (e, ei, |
1010 | BASIC_BLOCK_FOR_FN (cfun, | |
1011 | child)->succs) | |
34154e27 | 1012 | if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)) |
b3d6de89 | 1013 | --degree[e->dest->index]; |
7a31a7bd | 1014 | } |
1015 | else | |
1016 | --head; | |
1017 | } | |
1018 | ++nr_regions; | |
1019 | } | |
4bfe0e7b | 1020 | else if (extend_regions_p) |
1021 | { | |
1022 | /* Restore DEGREE. */ | |
1023 | int *t = degree; | |
1024 | ||
1025 | degree = degree1; | |
1026 | degree1 = t; | |
48e1416a | 1027 | |
4bfe0e7b | 1028 | /* And force successors of BB to be region heads. |
1029 | This may provide several smaller regions instead | |
1030 | of one too_large region. */ | |
1031 | FOR_EACH_EDGE (e, ei, bb->succs) | |
34154e27 | 1032 | if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)) |
08b7917c | 1033 | bitmap_set_bit (extended_rgn_header, e->dest->index); |
4bfe0e7b | 1034 | } |
7a31a7bd | 1035 | } |
1036 | } | |
1037 | free (queue); | |
4bfe0e7b | 1038 | |
1039 | if (extend_regions_p) | |
1040 | { | |
1041 | free (degree1); | |
48e1416a | 1042 | |
53c5d9d4 | 1043 | bitmap_ior (header, header, extended_rgn_header); |
4bfe0e7b | 1044 | sbitmap_free (extended_rgn_header); |
48e1416a | 1045 | |
4bfe0e7b | 1046 | extend_rgns (degree, &idx, header, max_hdr); |
1047 | } | |
7a31a7bd | 1048 | } |
1049 | ||
1050 | /* Any block that did not end up in a region is placed into a region | |
1051 | by itself. */ | |
fc00614f | 1052 | FOR_EACH_BB_FN (bb, cfun) |
4c26117a | 1053 | if (degree[bb->index] >= 0) |
7a31a7bd | 1054 | { |
4c26117a | 1055 | rgn_bb_table[idx] = bb->index; |
7a31a7bd | 1056 | RGN_NR_BLOCKS (nr_regions) = 1; |
1057 | RGN_BLOCKS (nr_regions) = idx++; | |
6a1cdb4d | 1058 | RGN_DONT_CALC_DEPS (nr_regions) = 0; |
1059 | RGN_HAS_REAL_EBB (nr_regions) = 0; | |
4c26117a | 1060 | CONTAINING_RGN (bb->index) = nr_regions++; |
1061 | BLOCK_TO_BB (bb->index) = 0; | |
7a31a7bd | 1062 | } |
1063 | ||
1064 | free (max_hdr); | |
4bfe0e7b | 1065 | free (degree); |
7a31a7bd | 1066 | free (stack); |
7a31a7bd | 1067 | } |
1068 | ||
e1ab7874 | 1069 | |
1070 | /* Wrapper function. | |
1071 | If FLAG_SEL_SCHED_PIPELINING is set, then use custom function to form | |
1072 | regions. Otherwise just call find_rgns_haifa. */ | |
1073 | static void | |
1074 | find_rgns (void) | |
1075 | { | |
1076 | if (sel_sched_p () && flag_sel_sched_pipelining) | |
1077 | sel_find_rgns (); | |
1078 | else | |
1079 | haifa_find_rgns (); | |
1080 | } | |
1081 | ||
4bfe0e7b | 1082 | static int gather_region_statistics (int **); |
1083 | static void print_region_statistics (int *, int, int *, int); | |
1084 | ||
48e1416a | 1085 | /* Calculate the histogram that shows the number of regions having the |
1086 | given number of basic blocks, and store it in the RSP array. Return | |
4bfe0e7b | 1087 | the size of this array. */ |
1088 | static int | |
1089 | gather_region_statistics (int **rsp) | |
1090 | { | |
1091 | int i, *a = 0, a_sz = 0; | |
1092 | ||
1093 | /* a[i] is the number of regions that have (i + 1) basic blocks. */ | |
1094 | for (i = 0; i < nr_regions; i++) | |
1095 | { | |
1096 | int nr_blocks = RGN_NR_BLOCKS (i); | |
1097 | ||
1098 | gcc_assert (nr_blocks >= 1); | |
1099 | ||
1100 | if (nr_blocks > a_sz) | |
48e1416a | 1101 | { |
f7f3687c | 1102 | a = XRESIZEVEC (int, a, nr_blocks); |
4bfe0e7b | 1103 | do |
1104 | a[a_sz++] = 0; | |
1105 | while (a_sz != nr_blocks); | |
1106 | } | |
1107 | ||
1108 | a[nr_blocks - 1]++; | |
1109 | } | |
1110 | ||
1111 | *rsp = a; | |
1112 | return a_sz; | |
1113 | } | |
1114 | ||
48e1416a | 1115 | /* Print regions statistics. S1 and S2 denote the data before and after |
4bfe0e7b | 1116 | calling extend_rgns, respectively. */ |
1117 | static void | |
1118 | print_region_statistics (int *s1, int s1_sz, int *s2, int s2_sz) | |
1119 | { | |
1120 | int i; | |
48e1416a | 1121 | |
1122 | /* We iterate until s2_sz because extend_rgns does not decrease | |
4bfe0e7b | 1123 | the maximal region size. */ |
1124 | for (i = 1; i < s2_sz; i++) | |
1125 | { | |
1126 | int n1, n2; | |
1127 | ||
1128 | n2 = s2[i]; | |
1129 | ||
1130 | if (n2 == 0) | |
1131 | continue; | |
1132 | ||
1133 | if (i >= s1_sz) | |
1134 | n1 = 0; | |
1135 | else | |
1136 | n1 = s1[i]; | |
1137 | ||
1138 | fprintf (sched_dump, ";; Region extension statistics: size %d: " \ | |
1139 | "was %d + %d more\n", i + 1, n1, n2 - n1); | |
1140 | } | |
1141 | } | |
1142 | ||
1143 | /* Extend regions. | |
1144 | DEGREE - Array of incoming edge count, considering only | |
1145 | the edges, that don't have their sources in formed regions yet. | |
1146 | IDXP - pointer to the next available index in rgn_bb_table. | |
1147 | HEADER - set of all region heads. | |
1148 | LOOP_HDR - mapping from block to the containing loop | |
1149 | (two blocks can reside within one region if they have | |
1150 | the same loop header). */ | |
e1ab7874 | 1151 | void |
4bfe0e7b | 1152 | extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr) |
1153 | { | |
1154 | int *order, i, rescan = 0, idx = *idxp, iter = 0, max_iter, *max_hdr; | |
a28770e1 | 1155 | int nblocks = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS; |
4bfe0e7b | 1156 | |
1157 | max_iter = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS); | |
1158 | ||
fe672ac0 | 1159 | max_hdr = XNEWVEC (int, last_basic_block_for_fn (cfun)); |
4bfe0e7b | 1160 | |
fe672ac0 | 1161 | order = XNEWVEC (int, last_basic_block_for_fn (cfun)); |
3072d30e | 1162 | post_order_compute (order, false, false); |
4bfe0e7b | 1163 | |
1164 | for (i = nblocks - 1; i >= 0; i--) | |
1165 | { | |
1166 | int bbn = order[i]; | |
1167 | if (degree[bbn] >= 0) | |
1168 | { | |
1169 | max_hdr[bbn] = bbn; | |
1170 | rescan = 1; | |
1171 | } | |
1172 | else | |
1173 | /* This block already was processed in find_rgns. */ | |
1174 | max_hdr[bbn] = -1; | |
1175 | } | |
48e1416a | 1176 | |
4bfe0e7b | 1177 | /* The idea is to topologically walk through CFG in top-down order. |
1178 | During the traversal, if all the predecessors of a node are | |
1179 | marked to be in the same region (they all have the same max_hdr), | |
48e1416a | 1180 | then current node is also marked to be a part of that region. |
4bfe0e7b | 1181 | Otherwise the node starts its own region. |
48e1416a | 1182 | CFG should be traversed until no further changes are made. On each |
1183 | iteration the set of the region heads is extended (the set of those | |
1184 | blocks that have max_hdr[bbi] == bbi). This set is upper bounded by the | |
e1ab7874 | 1185 | set of all basic blocks, thus the algorithm is guaranteed to |
1186 | terminate. */ | |
4bfe0e7b | 1187 | |
1188 | while (rescan && iter < max_iter) | |
1189 | { | |
1190 | rescan = 0; | |
48e1416a | 1191 | |
4bfe0e7b | 1192 | for (i = nblocks - 1; i >= 0; i--) |
1193 | { | |
1194 | edge e; | |
1195 | edge_iterator ei; | |
1196 | int bbn = order[i]; | |
48e1416a | 1197 | |
08b7917c | 1198 | if (max_hdr[bbn] != -1 && !bitmap_bit_p (header, bbn)) |
4bfe0e7b | 1199 | { |
1200 | int hdr = -1; | |
1201 | ||
f5a6b05f | 1202 | FOR_EACH_EDGE (e, ei, BASIC_BLOCK_FOR_FN (cfun, bbn)->preds) |
4bfe0e7b | 1203 | { |
1204 | int predn = e->src->index; | |
1205 | ||
1206 | if (predn != ENTRY_BLOCK | |
1207 | /* If pred wasn't processed in find_rgns. */ | |
1208 | && max_hdr[predn] != -1 | |
1209 | /* And pred and bb reside in the same loop. | |
1210 | (Or out of any loop). */ | |
1211 | && loop_hdr[bbn] == loop_hdr[predn]) | |
1212 | { | |
1213 | if (hdr == -1) | |
1214 | /* Then bb extends the containing region of pred. */ | |
1215 | hdr = max_hdr[predn]; | |
1216 | else if (hdr != max_hdr[predn]) | |
1217 | /* Too bad, there are at least two predecessors | |
1218 | that reside in different regions. Thus, BB should | |
1219 | begin its own region. */ | |
1220 | { | |
1221 | hdr = bbn; | |
1222 | break; | |
48e1416a | 1223 | } |
4bfe0e7b | 1224 | } |
1225 | else | |
1226 | /* BB starts its own region. */ | |
1227 | { | |
1228 | hdr = bbn; | |
1229 | break; | |
48e1416a | 1230 | } |
4bfe0e7b | 1231 | } |
48e1416a | 1232 | |
4bfe0e7b | 1233 | if (hdr == bbn) |
1234 | { | |
1235 | /* If BB start its own region, | |
1236 | update set of headers with BB. */ | |
08b7917c | 1237 | bitmap_set_bit (header, bbn); |
4bfe0e7b | 1238 | rescan = 1; |
1239 | } | |
1240 | else | |
48e1416a | 1241 | gcc_assert (hdr != -1); |
4bfe0e7b | 1242 | |
1243 | max_hdr[bbn] = hdr; | |
1244 | } | |
1245 | } | |
1246 | ||
1247 | iter++; | |
1248 | } | |
48e1416a | 1249 | |
4bfe0e7b | 1250 | /* Statistics were gathered on the SPEC2000 package of tests with |
1251 | mainline weekly snapshot gcc-4.1-20051015 on ia64. | |
48e1416a | 1252 | |
4bfe0e7b | 1253 | Statistics for SPECint: |
1254 | 1 iteration : 1751 cases (38.7%) | |
1255 | 2 iterations: 2770 cases (61.3%) | |
1256 | Blocks wrapped in regions by find_rgns without extension: 18295 blocks | |
1257 | Blocks wrapped in regions by 2 iterations in extend_rgns: 23821 blocks | |
1258 | (We don't count single block regions here). | |
48e1416a | 1259 | |
4bfe0e7b | 1260 | Statistics for SPECfp: |
1261 | 1 iteration : 621 cases (35.9%) | |
1262 | 2 iterations: 1110 cases (64.1%) | |
1263 | Blocks wrapped in regions by find_rgns without extension: 6476 blocks | |
1264 | Blocks wrapped in regions by 2 iterations in extend_rgns: 11155 blocks | |
1265 | (We don't count single block regions here). | |
1266 | ||
1267 | By default we do at most 2 iterations. | |
9ca2c29a | 1268 | This can be overridden with max-sched-extend-regions-iters parameter: |
4bfe0e7b | 1269 | 0 - disable region extension, |
1270 | N > 0 - do at most N iterations. */ | |
48e1416a | 1271 | |
4bfe0e7b | 1272 | if (sched_verbose && iter != 0) |
1273 | fprintf (sched_dump, ";; Region extension iterations: %d%s\n", iter, | |
1274 | rescan ? "... failed" : ""); | |
48e1416a | 1275 | |
4bfe0e7b | 1276 | if (!rescan && iter != 0) |
1277 | { | |
1278 | int *s1 = NULL, s1_sz = 0; | |
1279 | ||
1280 | /* Save the old statistics for later printout. */ | |
1281 | if (sched_verbose >= 6) | |
1282 | s1_sz = gather_region_statistics (&s1); | |
1283 | ||
1284 | /* We have succeeded. Now assemble the regions. */ | |
1285 | for (i = nblocks - 1; i >= 0; i--) | |
1286 | { | |
1287 | int bbn = order[i]; | |
1288 | ||
1289 | if (max_hdr[bbn] == bbn) | |
1290 | /* BBN is a region head. */ | |
1291 | { | |
1292 | edge e; | |
1293 | edge_iterator ei; | |
1294 | int num_bbs = 0, j, num_insns = 0, large; | |
48e1416a | 1295 | |
4bfe0e7b | 1296 | large = too_large (bbn, &num_bbs, &num_insns); |
1297 | ||
1298 | degree[bbn] = -1; | |
1299 | rgn_bb_table[idx] = bbn; | |
1300 | RGN_BLOCKS (nr_regions) = idx++; | |
6a1cdb4d | 1301 | RGN_DONT_CALC_DEPS (nr_regions) = 0; |
1302 | RGN_HAS_REAL_EBB (nr_regions) = 0; | |
4bfe0e7b | 1303 | CONTAINING_RGN (bbn) = nr_regions; |
1304 | BLOCK_TO_BB (bbn) = 0; | |
1305 | ||
f5a6b05f | 1306 | FOR_EACH_EDGE (e, ei, BASIC_BLOCK_FOR_FN (cfun, bbn)->succs) |
34154e27 | 1307 | if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)) |
4bfe0e7b | 1308 | degree[e->dest->index]--; |
1309 | ||
1310 | if (!large) | |
1311 | /* Here we check whether the region is too_large. */ | |
1312 | for (j = i - 1; j >= 0; j--) | |
1313 | { | |
1314 | int succn = order[j]; | |
1315 | if (max_hdr[succn] == bbn) | |
1316 | { | |
1317 | if ((large = too_large (succn, &num_bbs, &num_insns))) | |
1318 | break; | |
1319 | } | |
1320 | } | |
1321 | ||
1322 | if (large) | |
1323 | /* If the region is too_large, then wrap every block of | |
1324 | the region into single block region. | |
1325 | Here we wrap region head only. Other blocks are | |
1326 | processed in the below cycle. */ | |
1327 | { | |
1328 | RGN_NR_BLOCKS (nr_regions) = 1; | |
1329 | nr_regions++; | |
48e1416a | 1330 | } |
4bfe0e7b | 1331 | |
1332 | num_bbs = 1; | |
1333 | ||
1334 | for (j = i - 1; j >= 0; j--) | |
1335 | { | |
1336 | int succn = order[j]; | |
1337 | ||
1338 | if (max_hdr[succn] == bbn) | |
48e1416a | 1339 | /* This cycle iterates over all basic blocks, that |
4bfe0e7b | 1340 | are supposed to be in the region with head BBN, |
1341 | and wraps them into that region (or in single | |
1342 | block region). */ | |
1343 | { | |
1344 | gcc_assert (degree[succn] == 0); | |
1345 | ||
1346 | degree[succn] = -1; | |
48e1416a | 1347 | rgn_bb_table[idx] = succn; |
4bfe0e7b | 1348 | BLOCK_TO_BB (succn) = large ? 0 : num_bbs++; |
1349 | CONTAINING_RGN (succn) = nr_regions; | |
1350 | ||
1351 | if (large) | |
1352 | /* Wrap SUCCN into single block region. */ | |
1353 | { | |
1354 | RGN_BLOCKS (nr_regions) = idx; | |
1355 | RGN_NR_BLOCKS (nr_regions) = 1; | |
6a1cdb4d | 1356 | RGN_DONT_CALC_DEPS (nr_regions) = 0; |
1357 | RGN_HAS_REAL_EBB (nr_regions) = 0; | |
4bfe0e7b | 1358 | nr_regions++; |
1359 | } | |
1360 | ||
1361 | idx++; | |
48e1416a | 1362 | |
f5a6b05f | 1363 | FOR_EACH_EDGE (e, ei, |
1364 | BASIC_BLOCK_FOR_FN (cfun, succn)->succs) | |
34154e27 | 1365 | if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)) |
4bfe0e7b | 1366 | degree[e->dest->index]--; |
1367 | } | |
1368 | } | |
1369 | ||
1370 | if (!large) | |
1371 | { | |
1372 | RGN_NR_BLOCKS (nr_regions) = num_bbs; | |
1373 | nr_regions++; | |
1374 | } | |
1375 | } | |
1376 | } | |
1377 | ||
1378 | if (sched_verbose >= 6) | |
1379 | { | |
1380 | int *s2, s2_sz; | |
1381 | ||
48e1416a | 1382 | /* Get the new statistics and print the comparison with the |
4bfe0e7b | 1383 | one before calling this function. */ |
1384 | s2_sz = gather_region_statistics (&s2); | |
1385 | print_region_statistics (s1, s1_sz, s2, s2_sz); | |
1386 | free (s1); | |
1387 | free (s2); | |
1388 | } | |
1389 | } | |
48e1416a | 1390 | |
4bfe0e7b | 1391 | free (order); |
1392 | free (max_hdr); | |
1393 | ||
48e1416a | 1394 | *idxp = idx; |
4bfe0e7b | 1395 | } |
1396 | ||
7a31a7bd | 1397 | /* Functions for regions scheduling information. */ |
1398 | ||
1399 | /* Compute dominators, probability, and potential-split-edges of bb. | |
1400 | Assume that these values were already computed for bb's predecessors. */ | |
1401 | ||
1402 | static void | |
60b8c5b3 | 1403 | compute_dom_prob_ps (int bb) |
7a31a7bd | 1404 | { |
6c0b81cb | 1405 | edge_iterator in_ei; |
1406 | edge in_edge; | |
7a31a7bd | 1407 | |
6a1cdb4d | 1408 | /* We shouldn't have any real ebbs yet. */ |
1409 | gcc_assert (ebb_head [bb] == bb + current_blocks); | |
48e1416a | 1410 | |
7a31a7bd | 1411 | if (IS_RGN_ENTRY (bb)) |
1412 | { | |
08b7917c | 1413 | bitmap_set_bit (dom[bb], 0); |
6c0b81cb | 1414 | prob[bb] = REG_BR_PROB_BASE; |
7a31a7bd | 1415 | return; |
1416 | } | |
1417 | ||
6c0b81cb | 1418 | prob[bb] = 0; |
1419 | ||
4a82352a | 1420 | /* Initialize dom[bb] to '111..1'. */ |
53c5d9d4 | 1421 | bitmap_ones (dom[bb]); |
7a31a7bd | 1422 | |
f5a6b05f | 1423 | FOR_EACH_EDGE (in_edge, in_ei, |
1424 | BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (bb))->preds) | |
7a31a7bd | 1425 | { |
6c0b81cb | 1426 | int pred_bb; |
1427 | edge out_edge; | |
1428 | edge_iterator out_ei; | |
1429 | ||
34154e27 | 1430 | if (in_edge->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)) |
aae97b21 | 1431 | continue; |
7a31a7bd | 1432 | |
aae97b21 | 1433 | pred_bb = BLOCK_TO_BB (in_edge->src->index); |
53c5d9d4 | 1434 | bitmap_and (dom[bb], dom[bb], dom[pred_bb]); |
1435 | bitmap_ior (ancestor_edges[bb], | |
aae97b21 | 1436 | ancestor_edges[bb], ancestor_edges[pred_bb]); |
7a31a7bd | 1437 | |
08b7917c | 1438 | bitmap_set_bit (ancestor_edges[bb], EDGE_TO_BIT (in_edge)); |
79cafa9e | 1439 | |
53c5d9d4 | 1440 | bitmap_ior (pot_split[bb], pot_split[bb], pot_split[pred_bb]); |
7a31a7bd | 1441 | |
aae97b21 | 1442 | FOR_EACH_EDGE (out_edge, out_ei, in_edge->src->succs) |
08b7917c | 1443 | bitmap_set_bit (pot_split[bb], EDGE_TO_BIT (out_edge)); |
aae97b21 | 1444 | |
720cfc43 | 1445 | prob[bb] += combine_probabilities |
1446 | (prob[pred_bb], | |
1447 | in_edge->probability.initialized_p () | |
1448 | ? in_edge->probability.to_reg_br_prob_base () | |
1449 | : 0); | |
df21e330 | 1450 | // The rounding divide in combine_probabilities can result in an extra |
1451 | // probability increment propagating along 50-50 edges. Eventually when | |
1452 | // the edges re-merge, the accumulated probability can go slightly above | |
1453 | // REG_BR_PROB_BASE. | |
1454 | if (prob[bb] > REG_BR_PROB_BASE) | |
1455 | prob[bb] = REG_BR_PROB_BASE; | |
7a31a7bd | 1456 | } |
7a31a7bd | 1457 | |
08b7917c | 1458 | bitmap_set_bit (dom[bb], bb); |
53c5d9d4 | 1459 | bitmap_and_compl (pot_split[bb], pot_split[bb], ancestor_edges[bb]); |
7a31a7bd | 1460 | |
1461 | if (sched_verbose >= 2) | |
1462 | fprintf (sched_dump, ";; bb_prob(%d, %d) = %3d\n", bb, BB_TO_BLOCK (bb), | |
6c0b81cb | 1463 | (100 * prob[bb]) / REG_BR_PROB_BASE); |
7a31a7bd | 1464 | } |
1465 | ||
1466 | /* Functions for target info. */ | |
1467 | ||
1468 | /* Compute in BL the list of split-edges of bb_src relatively to bb_trg. | |
1469 | Note that bb_trg dominates bb_src. */ | |
1470 | ||
1471 | static void | |
60b8c5b3 | 1472 | split_edges (int bb_src, int bb_trg, edgelst *bl) |
7a31a7bd | 1473 | { |
3c6549f8 | 1474 | auto_sbitmap src (SBITMAP_SIZE (pot_split[bb_src])); |
53c5d9d4 | 1475 | bitmap_copy (src, pot_split[bb_src]); |
79cafa9e | 1476 | |
53c5d9d4 | 1477 | bitmap_and_compl (src, src, pot_split[bb_trg]); |
aae97b21 | 1478 | extract_edgelst (src, bl); |
7a31a7bd | 1479 | } |
1480 | ||
1481 | /* Find the valid candidate-source-blocks for the target block TRG, compute | |
1482 | their probability, and check if they are speculative or not. | |
1483 | For speculative sources, compute their update-blocks and split-blocks. */ | |
1484 | ||
1485 | static void | |
60b8c5b3 | 1486 | compute_trg_info (int trg) |
7a31a7bd | 1487 | { |
19cb6b50 | 1488 | candidate *sp; |
91020a82 | 1489 | edgelst el = { NULL, 0 }; |
aae97b21 | 1490 | int i, j, k, update_idx; |
1491 | basic_block block; | |
1492 | edge_iterator ei; | |
1493 | edge e; | |
7a31a7bd | 1494 | |
e1ab7874 | 1495 | candidate_table = XNEWVEC (candidate, current_nr_blocks); |
1496 | ||
1497 | bblst_last = 0; | |
1498 | /* bblst_table holds split blocks and update blocks for each block after | |
1499 | the current one in the region. split blocks and update blocks are | |
1500 | the TO blocks of region edges, so there can be at most rgn_nr_edges | |
1501 | of them. */ | |
1502 | bblst_size = (current_nr_blocks - target_bb) * rgn_nr_edges; | |
1503 | bblst_table = XNEWVEC (basic_block, bblst_size); | |
1504 | ||
1505 | edgelst_last = 0; | |
1506 | edgelst_table = XNEWVEC (edge, rgn_nr_edges); | |
1507 | ||
7a31a7bd | 1508 | /* Define some of the fields for the target bb as well. */ |
1509 | sp = candidate_table + trg; | |
1510 | sp->is_valid = 1; | |
1511 | sp->is_speculative = 0; | |
6c0b81cb | 1512 | sp->src_prob = REG_BR_PROB_BASE; |
7a31a7bd | 1513 | |
3c6549f8 | 1514 | auto_sbitmap visited (last_basic_block_for_fn (cfun)); |
d3129ae7 | 1515 | |
7a31a7bd | 1516 | for (i = trg + 1; i < current_nr_blocks; i++) |
1517 | { | |
1518 | sp = candidate_table + i; | |
1519 | ||
1520 | sp->is_valid = IS_DOMINATED (i, trg); | |
1521 | if (sp->is_valid) | |
1522 | { | |
6c0b81cb | 1523 | int tf = prob[trg], cf = prob[i]; |
1524 | ||
1525 | /* In CFGs with low probability edges TF can possibly be zero. */ | |
70074000 | 1526 | sp->src_prob = (tf ? GCOV_COMPUTE_SCALE (cf, tf) : 0); |
6c0b81cb | 1527 | sp->is_valid = (sp->src_prob >= min_spec_prob); |
7a31a7bd | 1528 | } |
1529 | ||
1530 | if (sp->is_valid) | |
1531 | { | |
1532 | split_edges (i, trg, &el); | |
1533 | sp->is_speculative = (el.nr_members) ? 1 : 0; | |
1534 | if (sp->is_speculative && !flag_schedule_speculative) | |
1535 | sp->is_valid = 0; | |
1536 | } | |
1537 | ||
1538 | if (sp->is_valid) | |
1539 | { | |
7a31a7bd | 1540 | /* Compute split blocks and store them in bblst_table. |
1541 | The TO block of every split edge is a split block. */ | |
1542 | sp->split_bbs.first_member = &bblst_table[bblst_last]; | |
1543 | sp->split_bbs.nr_members = el.nr_members; | |
1544 | for (j = 0; j < el.nr_members; bblst_last++, j++) | |
aae97b21 | 1545 | bblst_table[bblst_last] = el.first_member[j]->dest; |
7a31a7bd | 1546 | sp->update_bbs.first_member = &bblst_table[bblst_last]; |
1547 | ||
1548 | /* Compute update blocks and store them in bblst_table. | |
1549 | For every split edge, look at the FROM block, and check | |
1550 | all out edges. For each out edge that is not a split edge, | |
1551 | add the TO block to the update block list. This list can end | |
1552 | up with a lot of duplicates. We need to weed them out to avoid | |
1553 | overrunning the end of the bblst_table. */ | |
7a31a7bd | 1554 | |
1555 | update_idx = 0; | |
53c5d9d4 | 1556 | bitmap_clear (visited); |
7a31a7bd | 1557 | for (j = 0; j < el.nr_members; j++) |
1558 | { | |
aae97b21 | 1559 | block = el.first_member[j]->src; |
1560 | FOR_EACH_EDGE (e, ei, block->succs) | |
7a31a7bd | 1561 | { |
08b7917c | 1562 | if (!bitmap_bit_p (visited, e->dest->index)) |
7a31a7bd | 1563 | { |
1564 | for (k = 0; k < el.nr_members; k++) | |
aae97b21 | 1565 | if (e == el.first_member[k]) |
7a31a7bd | 1566 | break; |
1567 | ||
1568 | if (k >= el.nr_members) | |
1569 | { | |
aae97b21 | 1570 | bblst_table[bblst_last++] = e->dest; |
08b7917c | 1571 | bitmap_set_bit (visited, e->dest->index); |
7a31a7bd | 1572 | update_idx++; |
1573 | } | |
1574 | } | |
7a31a7bd | 1575 | } |
7a31a7bd | 1576 | } |
1577 | sp->update_bbs.nr_members = update_idx; | |
1578 | ||
1579 | /* Make sure we didn't overrun the end of bblst_table. */ | |
04e579b6 | 1580 | gcc_assert (bblst_last <= bblst_size); |
7a31a7bd | 1581 | } |
1582 | else | |
1583 | { | |
1584 | sp->split_bbs.nr_members = sp->update_bbs.nr_members = 0; | |
1585 | ||
1586 | sp->is_speculative = 0; | |
1587 | sp->src_prob = 0; | |
1588 | } | |
1589 | } | |
1590 | } | |
1591 | ||
e1ab7874 | 1592 | /* Free the computed target info. */ |
1593 | static void | |
1594 | free_trg_info (void) | |
1595 | { | |
1596 | free (candidate_table); | |
1597 | free (bblst_table); | |
1598 | free (edgelst_table); | |
1599 | } | |
1600 | ||
7a31a7bd | 1601 | /* Print candidates info, for debugging purposes. Callable from debugger. */ |
1602 | ||
4b987fac | 1603 | DEBUG_FUNCTION void |
60b8c5b3 | 1604 | debug_candidate (int i) |
7a31a7bd | 1605 | { |
1606 | if (!candidate_table[i].is_valid) | |
1607 | return; | |
1608 | ||
1609 | if (candidate_table[i].is_speculative) | |
1610 | { | |
1611 | int j; | |
1612 | fprintf (sched_dump, "src b %d bb %d speculative \n", BB_TO_BLOCK (i), i); | |
1613 | ||
1614 | fprintf (sched_dump, "split path: "); | |
1615 | for (j = 0; j < candidate_table[i].split_bbs.nr_members; j++) | |
1616 | { | |
aae97b21 | 1617 | int b = candidate_table[i].split_bbs.first_member[j]->index; |
7a31a7bd | 1618 | |
1619 | fprintf (sched_dump, " %d ", b); | |
1620 | } | |
1621 | fprintf (sched_dump, "\n"); | |
1622 | ||
1623 | fprintf (sched_dump, "update path: "); | |
1624 | for (j = 0; j < candidate_table[i].update_bbs.nr_members; j++) | |
1625 | { | |
aae97b21 | 1626 | int b = candidate_table[i].update_bbs.first_member[j]->index; |
7a31a7bd | 1627 | |
1628 | fprintf (sched_dump, " %d ", b); | |
1629 | } | |
1630 | fprintf (sched_dump, "\n"); | |
1631 | } | |
1632 | else | |
1633 | { | |
1634 | fprintf (sched_dump, " src %d equivalent\n", BB_TO_BLOCK (i)); | |
1635 | } | |
1636 | } | |
1637 | ||
1638 | /* Print candidates info, for debugging purposes. Callable from debugger. */ | |
1639 | ||
4b987fac | 1640 | DEBUG_FUNCTION void |
60b8c5b3 | 1641 | debug_candidates (int trg) |
7a31a7bd | 1642 | { |
1643 | int i; | |
1644 | ||
1645 | fprintf (sched_dump, "----------- candidate table: target: b=%d bb=%d ---\n", | |
1646 | BB_TO_BLOCK (trg), trg); | |
1647 | for (i = trg + 1; i < current_nr_blocks; i++) | |
1648 | debug_candidate (i); | |
1649 | } | |
1650 | ||
de132707 | 1651 | /* Functions for speculative scheduling. */ |
7a31a7bd | 1652 | |
3072d30e | 1653 | static bitmap_head not_in_df; |
1654 | ||
7a31a7bd | 1655 | /* Return 0 if x is a set of a register alive in the beginning of one |
1656 | of the split-blocks of src, otherwise return 1. */ | |
1657 | ||
1658 | static int | |
60b8c5b3 | 1659 | check_live_1 (int src, rtx x) |
7a31a7bd | 1660 | { |
19cb6b50 | 1661 | int i; |
1662 | int regno; | |
1663 | rtx reg = SET_DEST (x); | |
7a31a7bd | 1664 | |
1665 | if (reg == 0) | |
1666 | return 1; | |
1667 | ||
476d094d | 1668 | while (GET_CODE (reg) == SUBREG |
1669 | || GET_CODE (reg) == ZERO_EXTRACT | |
7a31a7bd | 1670 | || GET_CODE (reg) == STRICT_LOW_PART) |
1671 | reg = XEXP (reg, 0); | |
1672 | ||
4b303227 | 1673 | if (GET_CODE (reg) == PARALLEL) |
7a31a7bd | 1674 | { |
19cb6b50 | 1675 | int i; |
216b2683 | 1676 | |
7a31a7bd | 1677 | for (i = XVECLEN (reg, 0) - 1; i >= 0; i--) |
4b303227 | 1678 | if (XEXP (XVECEXP (reg, 0, i), 0) != 0) |
1679 | if (check_live_1 (src, XEXP (XVECEXP (reg, 0, i), 0))) | |
216b2683 | 1680 | return 1; |
216b2683 | 1681 | |
7a31a7bd | 1682 | return 0; |
1683 | } | |
1684 | ||
8ad4c111 | 1685 | if (!REG_P (reg)) |
7a31a7bd | 1686 | return 1; |
1687 | ||
1688 | regno = REGNO (reg); | |
1689 | ||
1690 | if (regno < FIRST_PSEUDO_REGISTER && global_regs[regno]) | |
1691 | { | |
1692 | /* Global registers are assumed live. */ | |
1693 | return 0; | |
1694 | } | |
1695 | else | |
1696 | { | |
1697 | if (regno < FIRST_PSEUDO_REGISTER) | |
1698 | { | |
1699 | /* Check for hard registers. */ | |
0933f1d9 | 1700 | int j = REG_NREGS (reg); |
7a31a7bd | 1701 | while (--j >= 0) |
1702 | { | |
1703 | for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++) | |
1704 | { | |
aae97b21 | 1705 | basic_block b = candidate_table[src].split_bbs.first_member[i]; |
3072d30e | 1706 | int t = bitmap_bit_p (¬_in_df, b->index); |
7a31a7bd | 1707 | |
6a1cdb4d | 1708 | /* We can have split blocks, that were recently generated. |
f0b5f617 | 1709 | Such blocks are always outside current region. */ |
3072d30e | 1710 | gcc_assert (!t || (CONTAINING_RGN (b->index) |
1711 | != CONTAINING_RGN (BB_TO_BLOCK (src)))); | |
1712 | ||
deb2741b | 1713 | if (t || REGNO_REG_SET_P (df_get_live_in (b), regno + j)) |
3072d30e | 1714 | return 0; |
7a31a7bd | 1715 | } |
1716 | } | |
1717 | } | |
1718 | else | |
1719 | { | |
f024691d | 1720 | /* Check for pseudo registers. */ |
7a31a7bd | 1721 | for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++) |
1722 | { | |
aae97b21 | 1723 | basic_block b = candidate_table[src].split_bbs.first_member[i]; |
3072d30e | 1724 | int t = bitmap_bit_p (¬_in_df, b->index); |
7a31a7bd | 1725 | |
3072d30e | 1726 | gcc_assert (!t || (CONTAINING_RGN (b->index) |
1727 | != CONTAINING_RGN (BB_TO_BLOCK (src)))); | |
1728 | ||
deb2741b | 1729 | if (t || REGNO_REG_SET_P (df_get_live_in (b), regno)) |
3072d30e | 1730 | return 0; |
7a31a7bd | 1731 | } |
1732 | } | |
1733 | } | |
1734 | ||
1735 | return 1; | |
1736 | } | |
1737 | ||
1738 | /* If x is a set of a register R, mark that R is alive in the beginning | |
1739 | of every update-block of src. */ | |
1740 | ||
1741 | static void | |
60b8c5b3 | 1742 | update_live_1 (int src, rtx x) |
7a31a7bd | 1743 | { |
19cb6b50 | 1744 | int i; |
1745 | int regno; | |
1746 | rtx reg = SET_DEST (x); | |
7a31a7bd | 1747 | |
1748 | if (reg == 0) | |
1749 | return; | |
1750 | ||
476d094d | 1751 | while (GET_CODE (reg) == SUBREG |
1752 | || GET_CODE (reg) == ZERO_EXTRACT | |
7a31a7bd | 1753 | || GET_CODE (reg) == STRICT_LOW_PART) |
1754 | reg = XEXP (reg, 0); | |
1755 | ||
4b303227 | 1756 | if (GET_CODE (reg) == PARALLEL) |
7a31a7bd | 1757 | { |
19cb6b50 | 1758 | int i; |
216b2683 | 1759 | |
7a31a7bd | 1760 | for (i = XVECLEN (reg, 0) - 1; i >= 0; i--) |
4b303227 | 1761 | if (XEXP (XVECEXP (reg, 0, i), 0) != 0) |
1762 | update_live_1 (src, XEXP (XVECEXP (reg, 0, i), 0)); | |
216b2683 | 1763 | |
7a31a7bd | 1764 | return; |
1765 | } | |
1766 | ||
8ad4c111 | 1767 | if (!REG_P (reg)) |
7a31a7bd | 1768 | return; |
1769 | ||
1770 | /* Global registers are always live, so the code below does not apply | |
1771 | to them. */ | |
1772 | ||
1773 | regno = REGNO (reg); | |
1774 | ||
771d4616 | 1775 | if (! HARD_REGISTER_NUM_P (regno) |
1776 | || !global_regs[regno]) | |
7a31a7bd | 1777 | { |
771d4616 | 1778 | for (i = 0; i < candidate_table[src].update_bbs.nr_members; i++) |
7a31a7bd | 1779 | { |
771d4616 | 1780 | basic_block b = candidate_table[src].update_bbs.first_member[i]; |
837d3ea8 | 1781 | bitmap_set_range (df_get_live_in (b), regno, REG_NREGS (reg)); |
7a31a7bd | 1782 | } |
1783 | } | |
1784 | } | |
1785 | ||
1786 | /* Return 1 if insn can be speculatively moved from block src to trg, | |
1787 | otherwise return 0. Called before first insertion of insn to | |
1788 | ready-list or before the scheduling. */ | |
1789 | ||
1790 | static int | |
b24ef467 | 1791 | check_live (rtx_insn *insn, int src) |
7a31a7bd | 1792 | { |
1793 | /* Find the registers set by instruction. */ | |
1794 | if (GET_CODE (PATTERN (insn)) == SET | |
1795 | || GET_CODE (PATTERN (insn)) == CLOBBER) | |
1796 | return check_live_1 (src, PATTERN (insn)); | |
1797 | else if (GET_CODE (PATTERN (insn)) == PARALLEL) | |
1798 | { | |
1799 | int j; | |
1800 | for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--) | |
1801 | if ((GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET | |
1802 | || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER) | |
1803 | && !check_live_1 (src, XVECEXP (PATTERN (insn), 0, j))) | |
1804 | return 0; | |
1805 | ||
1806 | return 1; | |
1807 | } | |
1808 | ||
1809 | return 1; | |
1810 | } | |
1811 | ||
1812 | /* Update the live registers info after insn was moved speculatively from | |
1813 | block src to trg. */ | |
1814 | ||
1815 | static void | |
71ce7f59 | 1816 | update_live (rtx_insn *insn, int src) |
7a31a7bd | 1817 | { |
1818 | /* Find the registers set by instruction. */ | |
1819 | if (GET_CODE (PATTERN (insn)) == SET | |
1820 | || GET_CODE (PATTERN (insn)) == CLOBBER) | |
1821 | update_live_1 (src, PATTERN (insn)); | |
1822 | else if (GET_CODE (PATTERN (insn)) == PARALLEL) | |
1823 | { | |
1824 | int j; | |
1825 | for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--) | |
1826 | if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET | |
1827 | || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER) | |
1828 | update_live_1 (src, XVECEXP (PATTERN (insn), 0, j)); | |
1829 | } | |
1830 | } | |
1831 | ||
a8b24921 | 1832 | /* Nonzero if block bb_to is equal to, or reachable from block bb_from. */ |
7a31a7bd | 1833 | #define IS_REACHABLE(bb_from, bb_to) \ |
40734805 | 1834 | (bb_from == bb_to \ |
7a31a7bd | 1835 | || IS_RGN_ENTRY (bb_from) \ |
08b7917c | 1836 | || (bitmap_bit_p (ancestor_edges[bb_to], \ |
f5a6b05f | 1837 | EDGE_TO_BIT (single_pred_edge (BASIC_BLOCK_FOR_FN (cfun, \ |
1838 | BB_TO_BLOCK (bb_from))))))) | |
7a31a7bd | 1839 | |
7a31a7bd | 1840 | /* Turns on the fed_by_spec_load flag for insns fed by load_insn. */ |
1841 | ||
1842 | static void | |
60b8c5b3 | 1843 | set_spec_fed (rtx load_insn) |
7a31a7bd | 1844 | { |
93f6b030 | 1845 | sd_iterator_def sd_it; |
1846 | dep_t dep; | |
7a31a7bd | 1847 | |
93f6b030 | 1848 | FOR_EACH_DEP (load_insn, SD_LIST_FORW, sd_it, dep) |
1849 | if (DEP_TYPE (dep) == REG_DEP_TRUE) | |
1850 | FED_BY_SPEC_LOAD (DEP_CON (dep)) = 1; | |
9997bd27 | 1851 | } |
7a31a7bd | 1852 | |
1853 | /* On the path from the insn to load_insn_bb, find a conditional | |
1854 | branch depending on insn, that guards the speculative load. */ | |
1855 | ||
1856 | static int | |
71ce7f59 | 1857 | find_conditional_protection (rtx_insn *insn, int load_insn_bb) |
7a31a7bd | 1858 | { |
93f6b030 | 1859 | sd_iterator_def sd_it; |
1860 | dep_t dep; | |
7a31a7bd | 1861 | |
1862 | /* Iterate through DEF-USE forward dependences. */ | |
93f6b030 | 1863 | FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep) |
7a31a7bd | 1864 | { |
918383b3 | 1865 | rtx_insn *next = DEP_CON (dep); |
9997bd27 | 1866 | |
7a31a7bd | 1867 | if ((CONTAINING_RGN (BLOCK_NUM (next)) == |
1868 | CONTAINING_RGN (BB_TO_BLOCK (load_insn_bb))) | |
1869 | && IS_REACHABLE (INSN_BB (next), load_insn_bb) | |
1870 | && load_insn_bb != INSN_BB (next) | |
93f6b030 | 1871 | && DEP_TYPE (dep) == REG_DEP_TRUE |
6d7dc5b9 | 1872 | && (JUMP_P (next) |
7a31a7bd | 1873 | || find_conditional_protection (next, load_insn_bb))) |
1874 | return 1; | |
1875 | } | |
1876 | return 0; | |
1877 | } /* find_conditional_protection */ | |
1878 | ||
1879 | /* Returns 1 if the same insn1 that participates in the computation | |
1880 | of load_insn's address is feeding a conditional branch that is | |
f0b5f617 | 1881 | guarding on load_insn. This is true if we find two DEF-USE |
7a31a7bd | 1882 | chains: |
1883 | insn1 -> ... -> conditional-branch | |
1884 | insn1 -> ... -> load_insn, | |
f0b5f617 | 1885 | and if a flow path exists: |
7a31a7bd | 1886 | insn1 -> ... -> conditional-branch -> ... -> load_insn, |
1887 | and if insn1 is on the path | |
1888 | region-entry -> ... -> bb_trg -> ... load_insn. | |
1889 | ||
9997bd27 | 1890 | Locate insn1 by climbing on INSN_BACK_DEPS from load_insn. |
1891 | Locate the branch by following INSN_FORW_DEPS from insn1. */ | |
7a31a7bd | 1892 | |
1893 | static int | |
60b8c5b3 | 1894 | is_conditionally_protected (rtx load_insn, int bb_src, int bb_trg) |
7a31a7bd | 1895 | { |
93f6b030 | 1896 | sd_iterator_def sd_it; |
1897 | dep_t dep; | |
7a31a7bd | 1898 | |
93f6b030 | 1899 | FOR_EACH_DEP (load_insn, SD_LIST_BACK, sd_it, dep) |
7a31a7bd | 1900 | { |
918383b3 | 1901 | rtx_insn *insn1 = DEP_PRO (dep); |
7a31a7bd | 1902 | |
1903 | /* Must be a DEF-USE dependence upon non-branch. */ | |
93f6b030 | 1904 | if (DEP_TYPE (dep) != REG_DEP_TRUE |
6d7dc5b9 | 1905 | || JUMP_P (insn1)) |
7a31a7bd | 1906 | continue; |
1907 | ||
1908 | /* Must exist a path: region-entry -> ... -> bb_trg -> ... load_insn. */ | |
1909 | if (INSN_BB (insn1) == bb_src | |
1910 | || (CONTAINING_RGN (BLOCK_NUM (insn1)) | |
1911 | != CONTAINING_RGN (BB_TO_BLOCK (bb_src))) | |
1912 | || (!IS_REACHABLE (bb_trg, INSN_BB (insn1)) | |
1913 | && !IS_REACHABLE (INSN_BB (insn1), bb_trg))) | |
1914 | continue; | |
1915 | ||
1916 | /* Now search for the conditional-branch. */ | |
1917 | if (find_conditional_protection (insn1, bb_src)) | |
1918 | return 1; | |
1919 | ||
1920 | /* Recursive step: search another insn1, "above" current insn1. */ | |
1921 | return is_conditionally_protected (insn1, bb_src, bb_trg); | |
1922 | } | |
1923 | ||
1924 | /* The chain does not exist. */ | |
1925 | return 0; | |
1926 | } /* is_conditionally_protected */ | |
1927 | ||
1928 | /* Returns 1 if a clue for "similar load" 'insn2' is found, and hence | |
1929 | load_insn can move speculatively from bb_src to bb_trg. All the | |
1930 | following must hold: | |
1931 | ||
1932 | (1) both loads have 1 base register (PFREE_CANDIDATEs). | |
1933 | (2) load_insn and load1 have a def-use dependence upon | |
1934 | the same insn 'insn1'. | |
1935 | (3) either load2 is in bb_trg, or: | |
1936 | - there's only one split-block, and | |
1937 | - load1 is on the escape path, and | |
1938 | ||
1939 | From all these we can conclude that the two loads access memory | |
1940 | addresses that differ at most by a constant, and hence if moving | |
1941 | load_insn would cause an exception, it would have been caused by | |
1942 | load2 anyhow. */ | |
1943 | ||
1944 | static int | |
60b8c5b3 | 1945 | is_pfree (rtx load_insn, int bb_src, int bb_trg) |
7a31a7bd | 1946 | { |
93f6b030 | 1947 | sd_iterator_def back_sd_it; |
1948 | dep_t back_dep; | |
19cb6b50 | 1949 | candidate *candp = candidate_table + bb_src; |
7a31a7bd | 1950 | |
1951 | if (candp->split_bbs.nr_members != 1) | |
1952 | /* Must have exactly one escape block. */ | |
1953 | return 0; | |
1954 | ||
93f6b030 | 1955 | FOR_EACH_DEP (load_insn, SD_LIST_BACK, back_sd_it, back_dep) |
7a31a7bd | 1956 | { |
918383b3 | 1957 | rtx_insn *insn1 = DEP_PRO (back_dep); |
7a31a7bd | 1958 | |
93f6b030 | 1959 | if (DEP_TYPE (back_dep) == REG_DEP_TRUE) |
1960 | /* Found a DEF-USE dependence (insn1, load_insn). */ | |
7a31a7bd | 1961 | { |
93f6b030 | 1962 | sd_iterator_def fore_sd_it; |
1963 | dep_t fore_dep; | |
7a31a7bd | 1964 | |
93f6b030 | 1965 | FOR_EACH_DEP (insn1, SD_LIST_FORW, fore_sd_it, fore_dep) |
7a31a7bd | 1966 | { |
918383b3 | 1967 | rtx_insn *insn2 = DEP_CON (fore_dep); |
9997bd27 | 1968 | |
93f6b030 | 1969 | if (DEP_TYPE (fore_dep) == REG_DEP_TRUE) |
7a31a7bd | 1970 | { |
1971 | /* Found a DEF-USE dependence (insn1, insn2). */ | |
1972 | if (haifa_classify_insn (insn2) != PFREE_CANDIDATE) | |
1973 | /* insn2 not guaranteed to be a 1 base reg load. */ | |
1974 | continue; | |
1975 | ||
1976 | if (INSN_BB (insn2) == bb_trg) | |
1977 | /* insn2 is the similar load, in the target block. */ | |
1978 | return 1; | |
1979 | ||
aae97b21 | 1980 | if (*(candp->split_bbs.first_member) == BLOCK_FOR_INSN (insn2)) |
7a31a7bd | 1981 | /* insn2 is a similar load, in a split-block. */ |
1982 | return 1; | |
1983 | } | |
1984 | } | |
1985 | } | |
1986 | } | |
1987 | ||
1988 | /* Couldn't find a similar load. */ | |
1989 | return 0; | |
1990 | } /* is_pfree */ | |
1991 | ||
7a31a7bd | 1992 | /* Return 1 if load_insn is prisky (i.e. if load_insn is fed by |
1993 | a load moved speculatively, or if load_insn is protected by | |
1994 | a compare on load_insn's address). */ | |
1995 | ||
1996 | static int | |
60b8c5b3 | 1997 | is_prisky (rtx load_insn, int bb_src, int bb_trg) |
7a31a7bd | 1998 | { |
1999 | if (FED_BY_SPEC_LOAD (load_insn)) | |
2000 | return 1; | |
2001 | ||
93f6b030 | 2002 | if (sd_lists_empty_p (load_insn, SD_LIST_BACK)) |
7a31a7bd | 2003 | /* Dependence may 'hide' out of the region. */ |
2004 | return 1; | |
2005 | ||
2006 | if (is_conditionally_protected (load_insn, bb_src, bb_trg)) | |
2007 | return 1; | |
2008 | ||
2009 | return 0; | |
2010 | } | |
2011 | ||
2012 | /* Insn is a candidate to be moved speculatively from bb_src to bb_trg. | |
2013 | Return 1 if insn is exception-free (and the motion is valid) | |
2014 | and 0 otherwise. */ | |
2015 | ||
2016 | static int | |
71ce7f59 | 2017 | is_exception_free (rtx_insn *insn, int bb_src, int bb_trg) |
7a31a7bd | 2018 | { |
2019 | int insn_class = haifa_classify_insn (insn); | |
2020 | ||
2021 | /* Handle non-load insns. */ | |
2022 | switch (insn_class) | |
2023 | { | |
2024 | case TRAP_FREE: | |
2025 | return 1; | |
2026 | case TRAP_RISKY: | |
2027 | return 0; | |
2028 | default:; | |
2029 | } | |
2030 | ||
2031 | /* Handle loads. */ | |
2032 | if (!flag_schedule_speculative_load) | |
2033 | return 0; | |
2034 | IS_LOAD_INSN (insn) = 1; | |
2035 | switch (insn_class) | |
2036 | { | |
2037 | case IFREE: | |
2038 | return (1); | |
2039 | case IRISKY: | |
2040 | return 0; | |
2041 | case PFREE_CANDIDATE: | |
2042 | if (is_pfree (insn, bb_src, bb_trg)) | |
2043 | return 1; | |
2044 | /* Don't 'break' here: PFREE-candidate is also PRISKY-candidate. */ | |
e3533433 | 2045 | /* FALLTHRU */ |
7a31a7bd | 2046 | case PRISKY_CANDIDATE: |
2047 | if (!flag_schedule_speculative_load_dangerous | |
2048 | || is_prisky (insn, bb_src, bb_trg)) | |
2049 | return 0; | |
2050 | break; | |
2051 | default:; | |
2052 | } | |
2053 | ||
2054 | return flag_schedule_speculative_load_dangerous; | |
2055 | } | |
2056 | \f | |
2057 | /* The number of insns from the current block scheduled so far. */ | |
2058 | static int sched_target_n_insns; | |
2059 | /* The number of insns from the current block to be scheduled in total. */ | |
2060 | static int target_n_insns; | |
2061 | /* The number of insns from the entire region scheduled so far. */ | |
2062 | static int sched_n_insns; | |
2063 | ||
2064 | /* Implementations of the sched_info functions for region scheduling. */ | |
e4897000 | 2065 | static void init_ready_list (void); |
b24ef467 | 2066 | static int can_schedule_ready_p (rtx_insn *); |
2067 | static void begin_schedule_ready (rtx_insn *); | |
2068 | static ds_t new_ready (rtx_insn *, ds_t); | |
60b8c5b3 | 2069 | static int schedule_more_p (void); |
b24ef467 | 2070 | static const char *rgn_print_insn (const rtx_insn *, int); |
2071 | static int rgn_rank (rtx_insn *, rtx_insn *); | |
6aed13f1 | 2072 | static void compute_jump_reg_dependencies (rtx, regset); |
7a31a7bd | 2073 | |
6a1cdb4d | 2074 | /* Functions for speculative scheduling. */ |
b24ef467 | 2075 | static void rgn_add_remove_insn (rtx_insn *, int); |
e1ab7874 | 2076 | static void rgn_add_block (basic_block, basic_block); |
2077 | static void rgn_fix_recovery_cfg (int, int, int); | |
b24ef467 | 2078 | static basic_block advance_target_bb (basic_block, rtx_insn *); |
6a1cdb4d | 2079 | |
7a31a7bd | 2080 | /* Return nonzero if there are more insns that should be scheduled. */ |
2081 | ||
2082 | static int | |
60b8c5b3 | 2083 | schedule_more_p (void) |
7a31a7bd | 2084 | { |
6a1cdb4d | 2085 | return sched_target_n_insns < target_n_insns; |
7a31a7bd | 2086 | } |
2087 | ||
2088 | /* Add all insns that are initially ready to the ready list READY. Called | |
2089 | once before scheduling a set of insns. */ | |
2090 | ||
2091 | static void | |
e4897000 | 2092 | init_ready_list (void) |
7a31a7bd | 2093 | { |
4cd001d5 | 2094 | rtx_insn *prev_head = current_sched_info->prev_head; |
2095 | rtx_insn *next_tail = current_sched_info->next_tail; | |
7a31a7bd | 2096 | int bb_src; |
b24ef467 | 2097 | rtx_insn *insn; |
7a31a7bd | 2098 | |
2099 | target_n_insns = 0; | |
2100 | sched_target_n_insns = 0; | |
2101 | sched_n_insns = 0; | |
2102 | ||
2103 | /* Print debugging information. */ | |
2104 | if (sched_verbose >= 5) | |
a2819fc2 | 2105 | debug_rgn_dependencies (target_bb); |
7a31a7bd | 2106 | |
2107 | /* Prepare current target block info. */ | |
2108 | if (current_nr_blocks > 1) | |
e1ab7874 | 2109 | compute_trg_info (target_bb); |
7a31a7bd | 2110 | |
2111 | /* Initialize ready list with all 'ready' insns in target block. | |
2112 | Count number of insns in the target block being scheduled. */ | |
2113 | for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn)) | |
48e1416a | 2114 | { |
d452a169 | 2115 | gcc_assert (TODO_SPEC (insn) == HARD_DEP || TODO_SPEC (insn) == DEP_POSTPONED); |
2116 | TODO_SPEC (insn) = HARD_DEP; | |
e4897000 | 2117 | try_ready (insn); |
e26579fc | 2118 | target_n_insns++; |
6a1cdb4d | 2119 | |
2120 | gcc_assert (!(TODO_SPEC (insn) & BEGIN_CONTROL)); | |
7a31a7bd | 2121 | } |
2122 | ||
2123 | /* Add to ready list all 'ready' insns in valid source blocks. | |
2124 | For speculative insns, check-live, exception-free, and | |
2125 | issue-delay. */ | |
2126 | for (bb_src = target_bb + 1; bb_src < current_nr_blocks; bb_src++) | |
2127 | if (IS_VALID (bb_src)) | |
2128 | { | |
6fe7b8c2 | 2129 | rtx_insn *src_head; |
2130 | rtx_insn *src_next_tail; | |
2131 | rtx_insn *tail, *head; | |
7a31a7bd | 2132 | |
6a1cdb4d | 2133 | get_ebb_head_tail (EBB_FIRST_BB (bb_src), EBB_LAST_BB (bb_src), |
2134 | &head, &tail); | |
7a31a7bd | 2135 | src_next_tail = NEXT_INSN (tail); |
2136 | src_head = head; | |
2137 | ||
2138 | for (insn = src_head; insn != src_next_tail; insn = NEXT_INSN (insn)) | |
9221ec44 | 2139 | if (INSN_P (insn)) |
d452a169 | 2140 | { |
2141 | gcc_assert (TODO_SPEC (insn) == HARD_DEP || TODO_SPEC (insn) == DEP_POSTPONED); | |
2142 | TODO_SPEC (insn) = HARD_DEP; | |
2143 | try_ready (insn); | |
2144 | } | |
7a31a7bd | 2145 | } |
2146 | } | |
2147 | ||
2148 | /* Called after taking INSN from the ready list. Returns nonzero if this | |
2149 | insn can be scheduled, nonzero if we should silently discard it. */ | |
2150 | ||
2151 | static int | |
b24ef467 | 2152 | can_schedule_ready_p (rtx_insn *insn) |
7a31a7bd | 2153 | { |
6a1cdb4d | 2154 | /* An interblock motion? */ |
87417bbc | 2155 | if (INSN_BB (insn) != target_bb && IS_SPECULATIVE_INSN (insn)) |
2156 | { | |
2157 | /* Cannot schedule this insn unless all operands are live. */ | |
2158 | if (!check_live (insn, INSN_BB (insn))) | |
2159 | return 0; | |
2160 | ||
2161 | /* Should not move expensive instructions speculatively. */ | |
2162 | if (GET_CODE (PATTERN (insn)) != CLOBBER | |
2163 | && !targetm.sched.can_speculate_insn (insn)) | |
2164 | return 0; | |
2165 | } | |
2166 | ||
2167 | return 1; | |
6a1cdb4d | 2168 | } |
2295df67 | 2169 | |
9ca2c29a | 2170 | /* Updates counter and other information. Split from can_schedule_ready_p () |
6a1cdb4d | 2171 | because when we schedule insn speculatively then insn passed to |
2172 | can_schedule_ready_p () differs from the one passed to | |
2173 | begin_schedule_ready (). */ | |
2174 | static void | |
b24ef467 | 2175 | begin_schedule_ready (rtx_insn *insn) |
6a1cdb4d | 2176 | { |
7a31a7bd | 2177 | /* An interblock motion? */ |
2178 | if (INSN_BB (insn) != target_bb) | |
2179 | { | |
7a31a7bd | 2180 | if (IS_SPECULATIVE_INSN (insn)) |
2181 | { | |
6a1cdb4d | 2182 | gcc_assert (check_live (insn, INSN_BB (insn))); |
2183 | ||
7a31a7bd | 2184 | update_live (insn, INSN_BB (insn)); |
2185 | ||
2186 | /* For speculative load, mark insns fed by it. */ | |
2187 | if (IS_LOAD_INSN (insn) || FED_BY_SPEC_LOAD (insn)) | |
2188 | set_spec_fed (insn); | |
2189 | ||
2190 | nr_spec++; | |
2191 | } | |
2192 | nr_inter++; | |
7a31a7bd | 2193 | } |
2194 | else | |
2195 | { | |
2196 | /* In block motion. */ | |
2197 | sched_target_n_insns++; | |
2198 | } | |
2199 | sched_n_insns++; | |
7a31a7bd | 2200 | } |
2201 | ||
6a1cdb4d | 2202 | /* Called after INSN has all its hard dependencies resolved and the speculation |
2203 | of type TS is enough to overcome them all. | |
2204 | Return nonzero if it should be moved to the ready list or the queue, or zero | |
2205 | if we should silently discard it. */ | |
2206 | static ds_t | |
b24ef467 | 2207 | new_ready (rtx_insn *next, ds_t ts) |
7a31a7bd | 2208 | { |
6a1cdb4d | 2209 | if (INSN_BB (next) != target_bb) |
2210 | { | |
2211 | int not_ex_free = 0; | |
2212 | ||
2213 | /* For speculative insns, before inserting to ready/queue, | |
48e1416a | 2214 | check live, exception-free, and issue-delay. */ |
6a1cdb4d | 2215 | if (!IS_VALID (INSN_BB (next)) |
7a31a7bd | 2216 | || CANT_MOVE (next) |
2217 | || (IS_SPECULATIVE_INSN (next) | |
67900a4f | 2218 | && ((recog_memoized (next) >= 0 |
48e1416a | 2219 | && min_insn_conflict_delay (curr_state, next, next) |
6a1cdb4d | 2220 | > PARAM_VALUE (PARAM_MAX_SCHED_INSN_CONFLICT_DELAY)) |
fd27912f | 2221 | || IS_SPECULATION_CHECK_P (next) |
7a31a7bd | 2222 | || !check_live (next, INSN_BB (next)) |
6a1cdb4d | 2223 | || (not_ex_free = !is_exception_free (next, INSN_BB (next), |
2224 | target_bb))))) | |
2225 | { | |
2226 | if (not_ex_free | |
2227 | /* We are here because is_exception_free () == false. | |
2228 | But we possibly can handle that with control speculation. */ | |
e1ab7874 | 2229 | && sched_deps_info->generate_spec_deps |
2230 | && spec_info->mask & BEGIN_CONTROL) | |
42de5f34 | 2231 | { |
2232 | ds_t new_ds; | |
2233 | ||
2234 | /* Add control speculation to NEXT's dependency type. */ | |
2235 | new_ds = set_dep_weak (ts, BEGIN_CONTROL, MAX_DEP_WEAK); | |
2236 | ||
2237 | /* Check if NEXT can be speculated with new dependency type. */ | |
2238 | if (sched_insn_is_legitimate_for_speculation_p (next, new_ds)) | |
2239 | /* Here we got new control-speculative instruction. */ | |
2240 | ts = new_ds; | |
2241 | else | |
2242 | /* NEXT isn't ready yet. */ | |
d452a169 | 2243 | ts = DEP_POSTPONED; |
42de5f34 | 2244 | } |
6a1cdb4d | 2245 | else |
42de5f34 | 2246 | /* NEXT isn't ready yet. */ |
d452a169 | 2247 | ts = DEP_POSTPONED; |
6a1cdb4d | 2248 | } |
2249 | } | |
48e1416a | 2250 | |
6a1cdb4d | 2251 | return ts; |
7a31a7bd | 2252 | } |
2253 | ||
2254 | /* Return a string that contains the insn uid and optionally anything else | |
2255 | necessary to identify this insn in an output. It's valid to use a | |
2256 | static buffer for this. The ALIGNED parameter should cause the string | |
2257 | to be formatted so that multiple output lines will line up nicely. */ | |
2258 | ||
2259 | static const char * | |
b24ef467 | 2260 | rgn_print_insn (const rtx_insn *insn, int aligned) |
7a31a7bd | 2261 | { |
2262 | static char tmp[80]; | |
2263 | ||
2264 | if (aligned) | |
2265 | sprintf (tmp, "b%3d: i%4d", INSN_BB (insn), INSN_UID (insn)); | |
2266 | else | |
2267 | { | |
7a31a7bd | 2268 | if (current_nr_blocks > 1 && INSN_BB (insn) != target_bb) |
cda0a5f5 | 2269 | sprintf (tmp, "%d/b%d", INSN_UID (insn), INSN_BB (insn)); |
2270 | else | |
2271 | sprintf (tmp, "%d", INSN_UID (insn)); | |
7a31a7bd | 2272 | } |
2273 | return tmp; | |
2274 | } | |
2275 | ||
2276 | /* Compare priority of two insns. Return a positive number if the second | |
2277 | insn is to be preferred for scheduling, and a negative one if the first | |
2278 | is to be preferred. Zero if they are equally good. */ | |
2279 | ||
2280 | static int | |
b24ef467 | 2281 | rgn_rank (rtx_insn *insn1, rtx_insn *insn2) |
7a31a7bd | 2282 | { |
2283 | /* Some comparison make sense in interblock scheduling only. */ | |
2284 | if (INSN_BB (insn1) != INSN_BB (insn2)) | |
2285 | { | |
2286 | int spec_val, prob_val; | |
2287 | ||
2288 | /* Prefer an inblock motion on an interblock motion. */ | |
2289 | if ((INSN_BB (insn2) == target_bb) && (INSN_BB (insn1) != target_bb)) | |
2290 | return 1; | |
2291 | if ((INSN_BB (insn1) == target_bb) && (INSN_BB (insn2) != target_bb)) | |
2292 | return -1; | |
2293 | ||
2294 | /* Prefer a useful motion on a speculative one. */ | |
2295 | spec_val = IS_SPECULATIVE_INSN (insn1) - IS_SPECULATIVE_INSN (insn2); | |
2296 | if (spec_val) | |
2297 | return spec_val; | |
2298 | ||
2299 | /* Prefer a more probable (speculative) insn. */ | |
2300 | prob_val = INSN_PROBABILITY (insn2) - INSN_PROBABILITY (insn1); | |
2301 | if (prob_val) | |
2302 | return prob_val; | |
2303 | } | |
2304 | return 0; | |
2305 | } | |
2306 | ||
d6141c0c | 2307 | /* NEXT is an instruction that depends on INSN (a backward dependence); |
2308 | return nonzero if we should include this dependence in priority | |
2309 | calculations. */ | |
2310 | ||
e1ab7874 | 2311 | int |
b24ef467 | 2312 | contributes_to_priority (rtx_insn *next, rtx_insn *insn) |
d6141c0c | 2313 | { |
6a1cdb4d | 2314 | /* NEXT and INSN reside in one ebb. */ |
2315 | return BLOCK_TO_BB (BLOCK_NUM (next)) == BLOCK_TO_BB (BLOCK_NUM (insn)); | |
d6141c0c | 2316 | } |
2317 | ||
6aed13f1 | 2318 | /* INSN is a JUMP_INSN. Store the set of registers that must be |
2319 | considered as used by this jump in USED. */ | |
d6141c0c | 2320 | |
2321 | static void | |
60b8c5b3 | 2322 | compute_jump_reg_dependencies (rtx insn ATTRIBUTE_UNUSED, |
6aed13f1 | 2323 | regset used ATTRIBUTE_UNUSED) |
d6141c0c | 2324 | { |
2325 | /* Nothing to do here, since we postprocess jumps in | |
2326 | add_branch_dependences. */ | |
2327 | } | |
2328 | ||
48e1416a | 2329 | /* This variable holds common_sched_info hooks and data relevant to |
e1ab7874 | 2330 | the interblock scheduler. */ |
2331 | static struct common_sched_info_def rgn_common_sched_info; | |
2332 | ||
2333 | ||
2334 | /* This holds data for the dependence analysis relevant to | |
2335 | the interblock scheduler. */ | |
2336 | static struct sched_deps_info_def rgn_sched_deps_info; | |
2337 | ||
2338 | /* This holds constant data used for initializing the above structure | |
2339 | for the Haifa scheduler. */ | |
2340 | static const struct sched_deps_info_def rgn_const_sched_deps_info = | |
2341 | { | |
2342 | compute_jump_reg_dependencies, | |
2343 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | |
2344 | 0, 0, 0 | |
2345 | }; | |
2346 | ||
2347 | /* Same as above, but for the selective scheduler. */ | |
2348 | static const struct sched_deps_info_def rgn_const_sel_sched_deps_info = | |
2349 | { | |
2350 | compute_jump_reg_dependencies, | |
2351 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | |
2352 | 0, 0, 0 | |
2353 | }; | |
2354 | ||
4db82bc9 | 2355 | /* Return true if scheduling INSN will trigger finish of scheduling |
2356 | current block. */ | |
2357 | static bool | |
b24ef467 | 2358 | rgn_insn_finishes_block_p (rtx_insn *insn) |
4db82bc9 | 2359 | { |
2360 | if (INSN_BB (insn) == target_bb | |
2361 | && sched_target_n_insns + 1 == target_n_insns) | |
2362 | /* INSN is the last not-scheduled instruction in the current block. */ | |
2363 | return true; | |
2364 | ||
2365 | return false; | |
2366 | } | |
2367 | ||
7a31a7bd | 2368 | /* Used in schedule_insns to initialize current_sched_info for scheduling |
2369 | regions (or single basic blocks). */ | |
2370 | ||
e1ab7874 | 2371 | static const struct haifa_sched_info rgn_const_sched_info = |
7a31a7bd | 2372 | { |
2373 | init_ready_list, | |
2374 | can_schedule_ready_p, | |
2375 | schedule_more_p, | |
2376 | new_ready, | |
2377 | rgn_rank, | |
2378 | rgn_print_insn, | |
d6141c0c | 2379 | contributes_to_priority, |
4db82bc9 | 2380 | rgn_insn_finishes_block_p, |
7a31a7bd | 2381 | |
2382 | NULL, NULL, | |
2383 | NULL, NULL, | |
e1ab7874 | 2384 | 0, 0, |
4d64d9a4 | 2385 | |
e1ab7874 | 2386 | rgn_add_remove_insn, |
6a1cdb4d | 2387 | begin_schedule_ready, |
d2412f57 | 2388 | NULL, |
6a1cdb4d | 2389 | advance_target_bb, |
e2f4a6ff | 2390 | NULL, NULL, |
3072d30e | 2391 | SCHED_RGN |
7a31a7bd | 2392 | }; |
2393 | ||
e1ab7874 | 2394 | /* This variable holds the data and hooks needed to the Haifa scheduler backend |
2395 | for the interblock scheduler frontend. */ | |
2396 | static struct haifa_sched_info rgn_sched_info; | |
2397 | ||
2398 | /* Returns maximum priority that an insn was assigned to. */ | |
2399 | ||
2400 | int | |
2401 | get_rgn_sched_max_insns_priority (void) | |
2402 | { | |
2403 | return rgn_sched_info.sched_max_insns_priority; | |
2404 | } | |
2405 | ||
24dd0668 | 2406 | /* Determine if PAT sets a TARGET_CLASS_LIKELY_SPILLED_P register. */ |
cbf780cc | 2407 | |
2408 | static bool | |
60b8c5b3 | 2409 | sets_likely_spilled (rtx pat) |
cbf780cc | 2410 | { |
2411 | bool ret = false; | |
2412 | note_stores (pat, sets_likely_spilled_1, &ret); | |
2413 | return ret; | |
2414 | } | |
2415 | ||
2416 | static void | |
81a410b1 | 2417 | sets_likely_spilled_1 (rtx x, const_rtx pat, void *data) |
cbf780cc | 2418 | { |
2419 | bool *ret = (bool *) data; | |
2420 | ||
2421 | if (GET_CODE (pat) == SET | |
2422 | && REG_P (x) | |
24dd0668 | 2423 | && HARD_REGISTER_P (x) |
2424 | && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (x)))) | |
cbf780cc | 2425 | *ret = true; |
2426 | } | |
2427 | ||
566e7db2 | 2428 | /* A bitmap to note insns that participate in any dependency. Used in |
2429 | add_branch_dependences. */ | |
2430 | static sbitmap insn_referenced; | |
e1ab7874 | 2431 | |
7a31a7bd | 2432 | /* Add dependences so that branches are scheduled to run last in their |
2433 | block. */ | |
7a31a7bd | 2434 | static void |
b24ef467 | 2435 | add_branch_dependences (rtx_insn *head, rtx_insn *tail) |
7a31a7bd | 2436 | { |
b24ef467 | 2437 | rtx_insn *insn, *last; |
7a31a7bd | 2438 | |
cbaab9a3 | 2439 | /* For all branches, calls, uses, clobbers, cc0 setters, and instructions |
2440 | that can throw exceptions, force them to remain in order at the end of | |
2441 | the block by adding dependencies and giving the last a high priority. | |
2442 | There may be notes present, and prev_head may also be a note. | |
7a31a7bd | 2443 | |
2444 | Branches must obviously remain at the end. Calls should remain at the | |
2445 | end since moving them results in worse register allocation. Uses remain | |
cbf780cc | 2446 | at the end to ensure proper register allocation. |
2447 | ||
40e55fbb | 2448 | cc0 setters remain at the end because they can't be moved away from |
cbf780cc | 2449 | their cc0 user. |
2450 | ||
681b9609 | 2451 | Predecessors of SCHED_GROUP_P instructions at the end remain at the end. |
2452 | ||
e6a25dc9 | 2453 | COND_EXEC insns cannot be moved past a branch (see e.g. PR17808). |
2454 | ||
24dd0668 | 2455 | Insns setting TARGET_CLASS_LIKELY_SPILLED_P registers (usually return |
2456 | values) are not moved before reload because we can wind up with register | |
cbf780cc | 2457 | allocation failures. */ |
2458 | ||
9845d120 | 2459 | while (tail != head && DEBUG_INSN_P (tail)) |
2460 | tail = PREV_INSN (tail); | |
2461 | ||
7a31a7bd | 2462 | insn = tail; |
2463 | last = 0; | |
6d7dc5b9 | 2464 | while (CALL_P (insn) |
91f71fa3 | 2465 | || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn) |
6d7dc5b9 | 2466 | || (NONJUMP_INSN_P (insn) |
7a31a7bd | 2467 | && (GET_CODE (PATTERN (insn)) == USE |
2468 | || GET_CODE (PATTERN (insn)) == CLOBBER | |
cbaab9a3 | 2469 | || can_throw_internal (insn) |
ff900b8e | 2470 | || (HAVE_cc0 && sets_cc0_p (PATTERN (insn))) |
cbf780cc | 2471 | || (!reload_completed |
2472 | && sets_likely_spilled (PATTERN (insn))))) | |
681b9609 | 2473 | || NOTE_P (insn) |
2474 | || (last != 0 && SCHED_GROUP_P (last))) | |
7a31a7bd | 2475 | { |
6d7dc5b9 | 2476 | if (!NOTE_P (insn)) |
7a31a7bd | 2477 | { |
9997bd27 | 2478 | if (last != 0 |
93f6b030 | 2479 | && sd_find_dep_between (insn, last, false) == NULL) |
7a31a7bd | 2480 | { |
e6a25dc9 | 2481 | if (! sched_insns_conditions_mutex_p (last, insn)) |
2482 | add_dependence (last, insn, REG_DEP_ANTI); | |
08b7917c | 2483 | bitmap_set_bit (insn_referenced, INSN_LUID (insn)); |
7a31a7bd | 2484 | } |
2485 | ||
2486 | CANT_MOVE (insn) = 1; | |
2487 | ||
2488 | last = insn; | |
7a31a7bd | 2489 | } |
2490 | ||
2491 | /* Don't overrun the bounds of the basic block. */ | |
2492 | if (insn == head) | |
2493 | break; | |
2494 | ||
9845d120 | 2495 | do |
2496 | insn = PREV_INSN (insn); | |
2497 | while (insn != head && DEBUG_INSN_P (insn)); | |
7a31a7bd | 2498 | } |
2499 | ||
2500 | /* Make sure these insns are scheduled last in their block. */ | |
2501 | insn = last; | |
2502 | if (insn != 0) | |
2503 | while (insn != head) | |
2504 | { | |
2505 | insn = prev_nonnote_insn (insn); | |
2506 | ||
08b7917c | 2507 | if (bitmap_bit_p (insn_referenced, INSN_LUID (insn)) |
9845d120 | 2508 | || DEBUG_INSN_P (insn)) |
7a31a7bd | 2509 | continue; |
2510 | ||
e6a25dc9 | 2511 | if (! sched_insns_conditions_mutex_p (last, insn)) |
2512 | add_dependence (last, insn, REG_DEP_ANTI); | |
7a31a7bd | 2513 | } |
e6a25dc9 | 2514 | |
751d3ba7 | 2515 | if (!targetm.have_conditional_execution ()) |
2516 | return; | |
2517 | ||
e6a25dc9 | 2518 | /* Finally, if the block ends in a jump, and we are doing intra-block |
2519 | scheduling, make sure that the branch depends on any COND_EXEC insns | |
2520 | inside the block to avoid moving the COND_EXECs past the branch insn. | |
2521 | ||
2522 | We only have to do this after reload, because (1) before reload there | |
2523 | are no COND_EXEC insns, and (2) the region scheduler is an intra-block | |
2524 | scheduler after reload. | |
2525 | ||
2526 | FIXME: We could in some cases move COND_EXEC insns past the branch if | |
2527 | this scheduler would be a little smarter. Consider this code: | |
2528 | ||
2529 | T = [addr] | |
2530 | C ? addr += 4 | |
f19b9016 | 2531 | !C ? X += 12 |
e6a25dc9 | 2532 | C ? T += 1 |
f19b9016 | 2533 | C ? jump foo |
e6a25dc9 | 2534 | |
2535 | On a target with a one cycle stall on a memory access the optimal | |
2536 | sequence would be: | |
2537 | ||
2538 | T = [addr] | |
2539 | C ? addr += 4 | |
2540 | C ? T += 1 | |
2541 | C ? jump foo | |
2542 | !C ? X += 12 | |
2543 | ||
2544 | We don't want to put the 'X += 12' before the branch because it just | |
2545 | wastes a cycle of execution time when the branch is taken. | |
2546 | ||
2547 | Note that in the example "!C" will always be true. That is another | |
2548 | possible improvement for handling COND_EXECs in this scheduler: it | |
2549 | could remove always-true predicates. */ | |
2550 | ||
91f71fa3 | 2551 | if (!reload_completed || ! (JUMP_P (tail) || JUMP_TABLE_DATA_P (tail))) |
e6a25dc9 | 2552 | return; |
2553 | ||
f19b9016 | 2554 | insn = tail; |
e6a25dc9 | 2555 | while (insn != head) |
2556 | { | |
f19b9016 | 2557 | insn = PREV_INSN (insn); |
2558 | ||
e6a25dc9 | 2559 | /* Note that we want to add this dependency even when |
2560 | sched_insns_conditions_mutex_p returns true. The whole point | |
2561 | is that we _want_ this dependency, even if these insns really | |
2562 | are independent. */ | |
2563 | if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == COND_EXEC) | |
2564 | add_dependence (tail, insn, REG_DEP_ANTI); | |
e6a25dc9 | 2565 | } |
7a31a7bd | 2566 | } |
2567 | ||
2568 | /* Data structures for the computation of data dependences in a regions. We | |
2569 | keep one `deps' structure for every basic block. Before analyzing the | |
2570 | data dependences for a bb, its variables are initialized as a function of | |
2571 | the variables of its predecessors. When the analysis for a bb completes, | |
2572 | we save the contents to the corresponding bb_deps[bb] variable. */ | |
2573 | ||
68e419a1 | 2574 | static struct deps_desc *bb_deps; |
7a31a7bd | 2575 | |
5deaeb50 | 2576 | static void |
8e56831f | 2577 | concat_insn_mem_list (rtx_insn_list *copy_insns, |
2578 | rtx_expr_list *copy_mems, | |
54267fdf | 2579 | rtx_insn_list **old_insns_p, |
8e56831f | 2580 | rtx_expr_list **old_mems_p) |
5deaeb50 | 2581 | { |
54267fdf | 2582 | rtx_insn_list *new_insns = *old_insns_p; |
8e56831f | 2583 | rtx_expr_list *new_mems = *old_mems_p; |
5deaeb50 | 2584 | |
2585 | while (copy_insns) | |
2586 | { | |
54267fdf | 2587 | new_insns = alloc_INSN_LIST (copy_insns->insn (), new_insns); |
8e56831f | 2588 | new_mems = alloc_EXPR_LIST (VOIDmode, copy_mems->element (), new_mems); |
54267fdf | 2589 | copy_insns = copy_insns->next (); |
8e56831f | 2590 | copy_mems = copy_mems->next (); |
5deaeb50 | 2591 | } |
2592 | ||
2593 | *old_insns_p = new_insns; | |
2594 | *old_mems_p = new_mems; | |
2595 | } | |
2596 | ||
e1ab7874 | 2597 | /* Join PRED_DEPS to the SUCC_DEPS. */ |
2598 | void | |
68e419a1 | 2599 | deps_join (struct deps_desc *succ_deps, struct deps_desc *pred_deps) |
e1ab7874 | 2600 | { |
2601 | unsigned reg; | |
2602 | reg_set_iterator rsi; | |
2603 | ||
2604 | /* The reg_last lists are inherited by successor. */ | |
2605 | EXECUTE_IF_SET_IN_REG_SET (&pred_deps->reg_last_in_use, 0, reg, rsi) | |
2606 | { | |
2607 | struct deps_reg *pred_rl = &pred_deps->reg_last[reg]; | |
2608 | struct deps_reg *succ_rl = &succ_deps->reg_last[reg]; | |
2609 | ||
2610 | succ_rl->uses = concat_INSN_LIST (pred_rl->uses, succ_rl->uses); | |
2611 | succ_rl->sets = concat_INSN_LIST (pred_rl->sets, succ_rl->sets); | |
a7dcf969 | 2612 | succ_rl->implicit_sets |
2613 | = concat_INSN_LIST (pred_rl->implicit_sets, succ_rl->implicit_sets); | |
e1ab7874 | 2614 | succ_rl->clobbers = concat_INSN_LIST (pred_rl->clobbers, |
2615 | succ_rl->clobbers); | |
2616 | succ_rl->uses_length += pred_rl->uses_length; | |
2617 | succ_rl->clobbers_length += pred_rl->clobbers_length; | |
2618 | } | |
2619 | IOR_REG_SET (&succ_deps->reg_last_in_use, &pred_deps->reg_last_in_use); | |
2620 | ||
2621 | /* Mem read/write lists are inherited by successor. */ | |
2622 | concat_insn_mem_list (pred_deps->pending_read_insns, | |
2623 | pred_deps->pending_read_mems, | |
2624 | &succ_deps->pending_read_insns, | |
2625 | &succ_deps->pending_read_mems); | |
2626 | concat_insn_mem_list (pred_deps->pending_write_insns, | |
2627 | pred_deps->pending_write_mems, | |
2628 | &succ_deps->pending_write_insns, | |
2629 | &succ_deps->pending_write_mems); | |
2630 | ||
effd1640 | 2631 | succ_deps->pending_jump_insns |
2632 | = concat_INSN_LIST (pred_deps->pending_jump_insns, | |
2633 | succ_deps->pending_jump_insns); | |
e1ab7874 | 2634 | succ_deps->last_pending_memory_flush |
2635 | = concat_INSN_LIST (pred_deps->last_pending_memory_flush, | |
2636 | succ_deps->last_pending_memory_flush); | |
2637 | ||
2638 | succ_deps->pending_read_list_length += pred_deps->pending_read_list_length; | |
2639 | succ_deps->pending_write_list_length += pred_deps->pending_write_list_length; | |
2640 | succ_deps->pending_flush_length += pred_deps->pending_flush_length; | |
2641 | ||
2642 | /* last_function_call is inherited by successor. */ | |
2643 | succ_deps->last_function_call | |
2644 | = concat_INSN_LIST (pred_deps->last_function_call, | |
2645 | succ_deps->last_function_call); | |
2646 | ||
326d0c19 | 2647 | /* last_function_call_may_noreturn is inherited by successor. */ |
2648 | succ_deps->last_function_call_may_noreturn | |
2649 | = concat_INSN_LIST (pred_deps->last_function_call_may_noreturn, | |
2650 | succ_deps->last_function_call_may_noreturn); | |
2651 | ||
e1ab7874 | 2652 | /* sched_before_next_call is inherited by successor. */ |
2653 | succ_deps->sched_before_next_call | |
2654 | = concat_INSN_LIST (pred_deps->sched_before_next_call, | |
2655 | succ_deps->sched_before_next_call); | |
2656 | } | |
2657 | ||
7a31a7bd | 2658 | /* After computing the dependencies for block BB, propagate the dependencies |
749c6f58 | 2659 | found in TMP_DEPS to the successors of the block. */ |
7a31a7bd | 2660 | static void |
68e419a1 | 2661 | propagate_deps (int bb, struct deps_desc *pred_deps) |
7a31a7bd | 2662 | { |
f5a6b05f | 2663 | basic_block block = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (bb)); |
aae97b21 | 2664 | edge_iterator ei; |
2665 | edge e; | |
7a31a7bd | 2666 | |
2667 | /* bb's structures are inherited by its successors. */ | |
aae97b21 | 2668 | FOR_EACH_EDGE (e, ei, block->succs) |
2669 | { | |
aae97b21 | 2670 | /* Only bbs "below" bb, in the same region, are interesting. */ |
34154e27 | 2671 | if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun) |
aae97b21 | 2672 | || CONTAINING_RGN (block->index) != CONTAINING_RGN (e->dest->index) |
2673 | || BLOCK_TO_BB (e->dest->index) <= bb) | |
2674 | continue; | |
5deaeb50 | 2675 | |
e1ab7874 | 2676 | deps_join (bb_deps + BLOCK_TO_BB (e->dest->index), pred_deps); |
aae97b21 | 2677 | } |
7a31a7bd | 2678 | |
5deaeb50 | 2679 | /* These lists should point to the right place, for correct |
2680 | freeing later. */ | |
2681 | bb_deps[bb].pending_read_insns = pred_deps->pending_read_insns; | |
2682 | bb_deps[bb].pending_read_mems = pred_deps->pending_read_mems; | |
2683 | bb_deps[bb].pending_write_insns = pred_deps->pending_write_insns; | |
2684 | bb_deps[bb].pending_write_mems = pred_deps->pending_write_mems; | |
effd1640 | 2685 | bb_deps[bb].pending_jump_insns = pred_deps->pending_jump_insns; |
5deaeb50 | 2686 | |
2687 | /* Can't allow these to be freed twice. */ | |
2688 | pred_deps->pending_read_insns = 0; | |
2689 | pred_deps->pending_read_mems = 0; | |
2690 | pred_deps->pending_write_insns = 0; | |
2691 | pred_deps->pending_write_mems = 0; | |
effd1640 | 2692 | pred_deps->pending_jump_insns = 0; |
7a31a7bd | 2693 | } |
2694 | ||
93f6b030 | 2695 | /* Compute dependences inside bb. In a multiple blocks region: |
7a31a7bd | 2696 | (1) a bb is analyzed after its predecessors, and (2) the lists in |
2697 | effect at the end of bb (after analyzing for bb) are inherited by | |
de132707 | 2698 | bb's successors. |
7a31a7bd | 2699 | |
2700 | Specifically for reg-reg data dependences, the block insns are | |
a7dcf969 | 2701 | scanned by sched_analyze () top-to-bottom. Three lists are |
749c6f58 | 2702 | maintained by sched_analyze (): reg_last[].sets for register DEFs, |
a7dcf969 | 2703 | reg_last[].implicit_sets for implicit hard register DEFs, and |
2704 | reg_last[].uses for register USEs. | |
7a31a7bd | 2705 | |
2706 | When analysis is completed for bb, we update for its successors: | |
2707 | ; - DEFS[succ] = Union (DEFS [succ], DEFS [bb]) | |
a7dcf969 | 2708 | ; - IMPLICIT_DEFS[succ] = Union (IMPLICIT_DEFS [succ], IMPLICIT_DEFS [bb]) |
7a31a7bd | 2709 | ; - USES[succ] = Union (USES [succ], DEFS [bb]) |
2710 | ||
2711 | The mechanism for computing mem-mem data dependence is very | |
2712 | similar, and the result is interblock dependences in the region. */ | |
2713 | ||
2714 | static void | |
93f6b030 | 2715 | compute_block_dependences (int bb) |
7a31a7bd | 2716 | { |
6fe7b8c2 | 2717 | rtx_insn *head, *tail; |
68e419a1 | 2718 | struct deps_desc tmp_deps; |
7a31a7bd | 2719 | |
2720 | tmp_deps = bb_deps[bb]; | |
2721 | ||
2722 | /* Do the analysis for this block. */ | |
6a1cdb4d | 2723 | gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb)); |
2724 | get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail); | |
93f6b030 | 2725 | |
7a31a7bd | 2726 | sched_analyze (&tmp_deps, head, tail); |
e1ab7874 | 2727 | |
2728 | /* Selective scheduling handles control dependencies by itself. */ | |
2729 | if (!sel_sched_p ()) | |
2730 | add_branch_dependences (head, tail); | |
7a31a7bd | 2731 | |
2732 | if (current_nr_blocks > 1) | |
749c6f58 | 2733 | propagate_deps (bb, &tmp_deps); |
7a31a7bd | 2734 | |
2735 | /* Free up the INSN_LISTs. */ | |
2736 | free_deps (&tmp_deps); | |
93f6b030 | 2737 | |
2738 | if (targetm.sched.dependencies_evaluation_hook) | |
2739 | targetm.sched.dependencies_evaluation_hook (head, tail); | |
2740 | } | |
2741 | ||
2742 | /* Free dependencies of instructions inside BB. */ | |
2743 | static void | |
2744 | free_block_dependencies (int bb) | |
2745 | { | |
6fe7b8c2 | 2746 | rtx_insn *head; |
2747 | rtx_insn *tail; | |
93f6b030 | 2748 | |
2749 | get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail); | |
2750 | ||
9845d120 | 2751 | if (no_real_insns_p (head, tail)) |
2752 | return; | |
2753 | ||
93f6b030 | 2754 | sched_free_deps (head, tail, true); |
7a31a7bd | 2755 | } |
749c6f58 | 2756 | |
7a31a7bd | 2757 | /* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add |
2758 | them to the unused_*_list variables, so that they can be reused. */ | |
2759 | ||
2760 | static void | |
60b8c5b3 | 2761 | free_pending_lists (void) |
7a31a7bd | 2762 | { |
2763 | int bb; | |
2764 | ||
2765 | for (bb = 0; bb < current_nr_blocks; bb++) | |
2766 | { | |
2767 | free_INSN_LIST_list (&bb_deps[bb].pending_read_insns); | |
2768 | free_INSN_LIST_list (&bb_deps[bb].pending_write_insns); | |
2769 | free_EXPR_LIST_list (&bb_deps[bb].pending_read_mems); | |
2770 | free_EXPR_LIST_list (&bb_deps[bb].pending_write_mems); | |
effd1640 | 2771 | free_INSN_LIST_list (&bb_deps[bb].pending_jump_insns); |
7a31a7bd | 2772 | } |
2773 | } | |
2774 | \f | |
93f6b030 | 2775 | /* Print dependences for debugging starting from FROM_BB. |
2776 | Callable from debugger. */ | |
a2819fc2 | 2777 | /* Print dependences for debugging starting from FROM_BB. |
2778 | Callable from debugger. */ | |
4b987fac | 2779 | DEBUG_FUNCTION void |
a2819fc2 | 2780 | debug_rgn_dependencies (int from_bb) |
7a31a7bd | 2781 | { |
2782 | int bb; | |
2783 | ||
a2819fc2 | 2784 | fprintf (sched_dump, |
2785 | ";; --------------- forward dependences: ------------ \n"); | |
2786 | ||
2787 | for (bb = from_bb; bb < current_nr_blocks; bb++) | |
7a31a7bd | 2788 | { |
6fe7b8c2 | 2789 | rtx_insn *head, *tail; |
67900a4f | 2790 | |
6a1cdb4d | 2791 | get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail); |
67900a4f | 2792 | fprintf (sched_dump, "\n;; --- Region Dependences --- b %d bb %d \n", |
2793 | BB_TO_BLOCK (bb), bb); | |
2794 | ||
a2819fc2 | 2795 | debug_dependencies (head, tail); |
2796 | } | |
2797 | } | |
67900a4f | 2798 | |
a2819fc2 | 2799 | /* Print dependencies information for instructions between HEAD and TAIL. |
2800 | ??? This function would probably fit best in haifa-sched.c. */ | |
73e15687 | 2801 | void debug_dependencies (rtx_insn *head, rtx_insn *tail) |
a2819fc2 | 2802 | { |
73e15687 | 2803 | rtx_insn *insn; |
2804 | rtx_insn *next_tail = NEXT_INSN (tail); | |
a2819fc2 | 2805 | |
2806 | fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n", | |
2807 | "insn", "code", "bb", "dep", "prio", "cost", | |
2808 | "reservation"); | |
2809 | fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n", | |
2810 | "----", "----", "--", "---", "----", "----", | |
2811 | "-----------"); | |
7a31a7bd | 2812 | |
a2819fc2 | 2813 | for (insn = head; insn != next_tail; insn = NEXT_INSN (insn)) |
2814 | { | |
a2819fc2 | 2815 | if (! INSN_P (insn)) |
2816 | { | |
2817 | int n; | |
2818 | fprintf (sched_dump, ";; %6d ", INSN_UID (insn)); | |
2819 | if (NOTE_P (insn)) | |
7a31a7bd | 2820 | { |
ad4583d9 | 2821 | n = NOTE_KIND (insn); |
2822 | fprintf (sched_dump, "%s\n", GET_NOTE_INSN_NAME (n)); | |
7a31a7bd | 2823 | } |
67900a4f | 2824 | else |
a2819fc2 | 2825 | fprintf (sched_dump, " {%s}\n", GET_RTX_NAME (GET_CODE (insn))); |
2826 | continue; | |
7a31a7bd | 2827 | } |
a2819fc2 | 2828 | |
2829 | fprintf (sched_dump, | |
2830 | ";; %s%5d%6d%6d%6d%6d%6d ", | |
2831 | (SCHED_GROUP_P (insn) ? "+" : " "), | |
2832 | INSN_UID (insn), | |
2833 | INSN_CODE (insn), | |
2834 | BLOCK_NUM (insn), | |
e1ab7874 | 2835 | sched_emulate_haifa_p ? -1 : sd_lists_size (insn, SD_LIST_BACK), |
2836 | (sel_sched_p () ? (sched_emulate_haifa_p ? -1 | |
2837 | : INSN_PRIORITY (insn)) | |
2838 | : INSN_PRIORITY (insn)), | |
2839 | (sel_sched_p () ? (sched_emulate_haifa_p ? -1 | |
5e53acc3 | 2840 | : insn_sched_cost (insn)) |
2841 | : insn_sched_cost (insn))); | |
a2819fc2 | 2842 | |
2843 | if (recog_memoized (insn) < 0) | |
2844 | fprintf (sched_dump, "nothing"); | |
2845 | else | |
2846 | print_reservation (sched_dump, insn); | |
2847 | ||
2848 | fprintf (sched_dump, "\t: "); | |
93f6b030 | 2849 | { |
2850 | sd_iterator_def sd_it; | |
2851 | dep_t dep; | |
2852 | ||
2853 | FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep) | |
d452a169 | 2854 | fprintf (sched_dump, "%d%s%s ", INSN_UID (DEP_CON (dep)), |
2855 | DEP_NONREG (dep) ? "n" : "", | |
2856 | DEP_MULTIPLE (dep) ? "m" : ""); | |
93f6b030 | 2857 | } |
a2819fc2 | 2858 | fprintf (sched_dump, "\n"); |
7a31a7bd | 2859 | } |
a2819fc2 | 2860 | |
7a31a7bd | 2861 | fprintf (sched_dump, "\n"); |
2862 | } | |
eabf74c2 | 2863 | |
2864 | /* Dump dependency graph for the current region to a file using dot syntax. */ | |
2865 | ||
2866 | void | |
2867 | dump_rgn_dependencies_dot (FILE *file) | |
2868 | { | |
2869 | rtx_insn *head, *tail, *con, *pro; | |
2870 | sd_iterator_def sd_it; | |
2871 | dep_t dep; | |
2872 | int bb; | |
2873 | pretty_printer pp; | |
2874 | ||
2875 | pp.buffer->stream = file; | |
2876 | pp_printf (&pp, "digraph SchedDG {\n"); | |
2877 | ||
2878 | for (bb = 0; bb < current_nr_blocks; ++bb) | |
2879 | { | |
2880 | /* Begin subgraph (basic block). */ | |
2881 | pp_printf (&pp, "subgraph cluster_block_%d {\n", bb); | |
2882 | pp_printf (&pp, "\t" "color=blue;" "\n"); | |
2883 | pp_printf (&pp, "\t" "style=bold;" "\n"); | |
2884 | pp_printf (&pp, "\t" "label=\"BB #%d\";\n", BB_TO_BLOCK (bb)); | |
2885 | ||
2886 | /* Setup head and tail (no support for EBBs). */ | |
2887 | gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb)); | |
2888 | get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail); | |
2889 | tail = NEXT_INSN (tail); | |
2890 | ||
2891 | /* Dump all insns. */ | |
2892 | for (con = head; con != tail; con = NEXT_INSN (con)) | |
2893 | { | |
2894 | if (!INSN_P (con)) | |
2895 | continue; | |
2896 | ||
2897 | /* Pretty print the insn. */ | |
2898 | pp_printf (&pp, "\t%d [label=\"{", INSN_UID (con)); | |
2899 | pp_write_text_to_stream (&pp); | |
2900 | print_insn (&pp, con, /*verbose=*/false); | |
2901 | pp_write_text_as_dot_label_to_stream (&pp, /*for_record=*/true); | |
2902 | pp_write_text_to_stream (&pp); | |
2903 | ||
2904 | /* Dump instruction attributes. */ | |
2905 | pp_printf (&pp, "|{ uid:%d | luid:%d | prio:%d }}\",shape=record]\n", | |
2906 | INSN_UID (con), INSN_LUID (con), INSN_PRIORITY (con)); | |
2907 | ||
2908 | /* Dump all deps. */ | |
2909 | FOR_EACH_DEP (con, SD_LIST_BACK, sd_it, dep) | |
2910 | { | |
2911 | int weight = 0; | |
2912 | const char *color; | |
2913 | pro = DEP_PRO (dep); | |
2914 | ||
2915 | switch (DEP_TYPE (dep)) | |
2916 | { | |
2917 | case REG_DEP_TRUE: | |
2918 | color = "black"; | |
2919 | weight = 1; | |
2920 | break; | |
2921 | case REG_DEP_OUTPUT: | |
2922 | case REG_DEP_ANTI: | |
2923 | color = "orange"; | |
2924 | break; | |
2925 | case REG_DEP_CONTROL: | |
2926 | color = "blue"; | |
2927 | break; | |
2928 | default: | |
2929 | gcc_unreachable (); | |
2930 | } | |
2931 | ||
2932 | pp_printf (&pp, "\t%d -> %d [color=%s", | |
2933 | INSN_UID (pro), INSN_UID (con), color); | |
2934 | if (int cost = dep_cost (dep)) | |
2935 | pp_printf (&pp, ",label=%d", cost); | |
2936 | pp_printf (&pp, ",weight=%d", weight); | |
2937 | pp_printf (&pp, "];\n"); | |
2938 | } | |
2939 | } | |
2940 | pp_printf (&pp, "}\n"); | |
2941 | } | |
2942 | ||
2943 | pp_printf (&pp, "}\n"); | |
2944 | pp_flush (&pp); | |
2945 | } | |
2946 | ||
2947 | /* Dump dependency graph for the current region to a file using dot syntax. */ | |
2948 | ||
2949 | DEBUG_FUNCTION void | |
2950 | dump_rgn_dependencies_dot (const char *fname) | |
2951 | { | |
2952 | FILE *fp; | |
2953 | ||
2954 | fp = fopen (fname, "w"); | |
2955 | if (!fp) | |
2956 | { | |
2957 | perror ("fopen"); | |
2958 | return; | |
2959 | } | |
2960 | ||
2961 | dump_rgn_dependencies_dot (fp); | |
2962 | fclose (fp); | |
2963 | } | |
2964 | ||
7a31a7bd | 2965 | \f |
f045d41d | 2966 | /* Returns true if all the basic blocks of the current region have |
2967 | NOTE_DISABLE_SCHED_OF_BLOCK which means not to schedule that region. */ | |
e1ab7874 | 2968 | bool |
f045d41d | 2969 | sched_is_disabled_for_current_region_p (void) |
2970 | { | |
f045d41d | 2971 | int bb; |
2972 | ||
2973 | for (bb = 0; bb < current_nr_blocks; bb++) | |
f5a6b05f | 2974 | if (!(BASIC_BLOCK_FOR_FN (cfun, |
2975 | BB_TO_BLOCK (bb))->flags & BB_DISABLE_SCHEDULE)) | |
7562ed74 | 2976 | return false; |
f045d41d | 2977 | |
2978 | return true; | |
2979 | } | |
2980 | ||
48e1416a | 2981 | /* Free all region dependencies saved in INSN_BACK_DEPS and |
e1ab7874 | 2982 | INSN_RESOLVED_BACK_DEPS. The Haifa scheduler does this on the fly |
48e1416a | 2983 | when scheduling, so this function is supposed to be called from |
e1ab7874 | 2984 | the selective scheduling only. */ |
2985 | void | |
2986 | free_rgn_deps (void) | |
7a31a7bd | 2987 | { |
2988 | int bb; | |
f045d41d | 2989 | |
e1ab7874 | 2990 | for (bb = 0; bb < current_nr_blocks; bb++) |
6a1cdb4d | 2991 | { |
6fe7b8c2 | 2992 | rtx_insn *head, *tail; |
48e1416a | 2993 | |
e1ab7874 | 2994 | gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb)); |
2995 | get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail); | |
7a31a7bd | 2996 | |
e1ab7874 | 2997 | sched_free_deps (head, tail, false); |
2998 | } | |
2999 | } | |
7a31a7bd | 3000 | |
e1ab7874 | 3001 | static int rgn_n_insns; |
6a1cdb4d | 3002 | |
e1ab7874 | 3003 | /* Compute insn priority for a current region. */ |
3004 | void | |
48e1416a | 3005 | compute_priorities (void) |
e1ab7874 | 3006 | { |
3007 | int bb; | |
6a1cdb4d | 3008 | |
e4897000 | 3009 | current_sched_info->sched_max_insns_priority = 0; |
7a31a7bd | 3010 | for (bb = 0; bb < current_nr_blocks; bb++) |
2295df67 | 3011 | { |
6fe7b8c2 | 3012 | rtx_insn *head, *tail; |
48e1416a | 3013 | |
6a1cdb4d | 3014 | gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb)); |
3015 | get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail); | |
2295df67 | 3016 | |
9845d120 | 3017 | if (no_real_insns_p (head, tail)) |
3018 | continue; | |
3019 | ||
2295df67 | 3020 | rgn_n_insns += set_priorities (head, tail); |
3021 | } | |
e4897000 | 3022 | current_sched_info->sched_max_insns_priority++; |
e1ab7874 | 3023 | } |
7a31a7bd | 3024 | |
bbd0cfb1 | 3025 | /* (Re-)initialize the arrays of DFA states at the end of each basic block. |
3026 | ||
3027 | SAVED_LAST_BASIC_BLOCK is the previous length of the arrays. It must be | |
3028 | zero for the first call to this function, to allocate the arrays for the | |
3029 | first time. | |
3030 | ||
3031 | This function is called once during initialization of the scheduler, and | |
3032 | called again to resize the arrays if new basic blocks have been created, | |
3033 | for example for speculation recovery code. */ | |
3034 | ||
3035 | static void | |
3036 | realloc_bb_state_array (int saved_last_basic_block) | |
3037 | { | |
3038 | char *old_bb_state_array = bb_state_array; | |
fe672ac0 | 3039 | size_t lbb = (size_t) last_basic_block_for_fn (cfun); |
bbd0cfb1 | 3040 | size_t slbb = (size_t) saved_last_basic_block; |
3041 | ||
3042 | /* Nothing to do if nothing changed since the last time this was called. */ | |
fe672ac0 | 3043 | if (saved_last_basic_block == last_basic_block_for_fn (cfun)) |
bbd0cfb1 | 3044 | return; |
3045 | ||
3046 | /* The selective scheduler doesn't use the state arrays. */ | |
3047 | if (sel_sched_p ()) | |
3048 | { | |
3049 | gcc_assert (bb_state_array == NULL && bb_state == NULL); | |
3050 | return; | |
3051 | } | |
3052 | ||
3053 | gcc_checking_assert (saved_last_basic_block == 0 | |
3054 | || (bb_state_array != NULL && bb_state != NULL)); | |
3055 | ||
3056 | bb_state_array = XRESIZEVEC (char, bb_state_array, lbb * dfa_state_size); | |
3057 | bb_state = XRESIZEVEC (state_t, bb_state, lbb); | |
3058 | ||
3059 | /* If BB_STATE_ARRAY has moved, fixup all the state pointers array. | |
3060 | Otherwise only fixup the newly allocated ones. For the state | |
3061 | array itself, only initialize the new entries. */ | |
3062 | bool bb_state_array_moved = (bb_state_array != old_bb_state_array); | |
3063 | for (size_t i = bb_state_array_moved ? 0 : slbb; i < lbb; i++) | |
3064 | bb_state[i] = (state_t) (bb_state_array + i * dfa_state_size); | |
3065 | for (size_t i = slbb; i < lbb; i++) | |
3066 | state_reset (bb_state[i]); | |
3067 | } | |
3068 | ||
3069 | /* Free the arrays of DFA states at the end of each basic block. */ | |
3070 | ||
3071 | static void | |
3072 | free_bb_state_array (void) | |
3073 | { | |
3074 | free (bb_state_array); | |
3075 | free (bb_state); | |
3076 | bb_state_array = NULL; | |
3077 | bb_state = NULL; | |
3078 | } | |
3079 | ||
e1ab7874 | 3080 | /* Schedule a region. A region is either an inner loop, a loop-free |
3081 | subroutine, or a single basic block. Each bb in the region is | |
3082 | scheduled after its flow predecessors. */ | |
7a31a7bd | 3083 | |
e1ab7874 | 3084 | static void |
3085 | schedule_region (int rgn) | |
3086 | { | |
3087 | int bb; | |
3088 | int sched_rgn_n_insns = 0; | |
aae97b21 | 3089 | |
e1ab7874 | 3090 | rgn_n_insns = 0; |
7a31a7bd | 3091 | |
c3089433 | 3092 | /* Do not support register pressure sensitive scheduling for the new regions |
3093 | as we don't update the liveness info for them. */ | |
91096e87 | 3094 | if (sched_pressure != SCHED_PRESSURE_NONE |
3095 | && rgn >= nr_regions_initial) | |
c3089433 | 3096 | { |
91096e87 | 3097 | free_global_sched_pressure_data (); |
c3089433 | 3098 | sched_pressure = SCHED_PRESSURE_NONE; |
3099 | } | |
3100 | ||
e1ab7874 | 3101 | rgn_setup_region (rgn); |
7a31a7bd | 3102 | |
e1ab7874 | 3103 | /* Don't schedule region that is marked by |
3104 | NOTE_DISABLE_SCHED_OF_BLOCK. */ | |
3105 | if (sched_is_disabled_for_current_region_p ()) | |
3106 | return; | |
7a31a7bd | 3107 | |
e1ab7874 | 3108 | sched_rgn_compute_dependencies (rgn); |
6a1cdb4d | 3109 | |
e1ab7874 | 3110 | sched_rgn_local_init (rgn); |
3111 | ||
3112 | /* Set priorities. */ | |
3113 | compute_priorities (); | |
3114 | ||
3115 | sched_extend_ready_list (rgn_n_insns); | |
7a31a7bd | 3116 | |
11189c7a | 3117 | if (sched_pressure == SCHED_PRESSURE_WEIGHTED) |
a7dcf969 | 3118 | { |
3119 | sched_init_region_reg_pressure_info (); | |
3120 | for (bb = 0; bb < current_nr_blocks; bb++) | |
3121 | { | |
3122 | basic_block first_bb, last_bb; | |
6fe7b8c2 | 3123 | rtx_insn *head, *tail; |
48e1416a | 3124 | |
a7dcf969 | 3125 | first_bb = EBB_FIRST_BB (bb); |
3126 | last_bb = EBB_LAST_BB (bb); | |
48e1416a | 3127 | |
a7dcf969 | 3128 | get_ebb_head_tail (first_bb, last_bb, &head, &tail); |
48e1416a | 3129 | |
a7dcf969 | 3130 | if (no_real_insns_p (head, tail)) |
3131 | { | |
3132 | gcc_assert (first_bb == last_bb); | |
3133 | continue; | |
3134 | } | |
3135 | sched_setup_bb_reg_pressure_info (first_bb, PREV_INSN (head)); | |
3136 | } | |
3137 | } | |
3138 | ||
7a31a7bd | 3139 | /* Now we can schedule all blocks. */ |
3140 | for (bb = 0; bb < current_nr_blocks; bb++) | |
3141 | { | |
6a1cdb4d | 3142 | basic_block first_bb, last_bb, curr_bb; |
6fe7b8c2 | 3143 | rtx_insn *head, *tail; |
7a31a7bd | 3144 | |
6a1cdb4d | 3145 | first_bb = EBB_FIRST_BB (bb); |
3146 | last_bb = EBB_LAST_BB (bb); | |
3147 | ||
3148 | get_ebb_head_tail (first_bb, last_bb, &head, &tail); | |
7a31a7bd | 3149 | |
3150 | if (no_real_insns_p (head, tail)) | |
6a1cdb4d | 3151 | { |
3152 | gcc_assert (first_bb == last_bb); | |
3153 | continue; | |
3154 | } | |
7a31a7bd | 3155 | |
3156 | current_sched_info->prev_head = PREV_INSN (head); | |
3157 | current_sched_info->next_tail = NEXT_INSN (tail); | |
3158 | ||
e1ab7874 | 3159 | remove_notes (head, tail); |
7a31a7bd | 3160 | |
6a1cdb4d | 3161 | unlink_bb_notes (first_bb, last_bb); |
3162 | ||
7a31a7bd | 3163 | target_bb = bb; |
3164 | ||
e4897000 | 3165 | gcc_assert (flag_schedule_interblock || current_nr_blocks == 1); |
3166 | current_sched_info->queue_must_finish_empty = current_nr_blocks == 1; | |
7a31a7bd | 3167 | |
6a1cdb4d | 3168 | curr_bb = first_bb; |
3072d30e | 3169 | if (dbg_cnt (sched_block)) |
3170 | { | |
0a15667c | 3171 | edge f; |
fe672ac0 | 3172 | int saved_last_basic_block = last_basic_block_for_fn (cfun); |
0a15667c | 3173 | |
bbd0cfb1 | 3174 | schedule_block (&curr_bb, bb_state[first_bb->index]); |
3175 | gcc_assert (EBB_FIRST_BB (bb) == first_bb); | |
3176 | sched_rgn_n_insns += sched_n_insns; | |
3177 | realloc_bb_state_array (saved_last_basic_block); | |
0a15667c | 3178 | f = find_fallthru_edge (last_bb->succs); |
720cfc43 | 3179 | if (f |
3180 | && (!f->probability.initialized_p () | |
3181 | || f->probability.to_reg_br_prob_base () * 100 / REG_BR_PROB_BASE >= | |
3182 | PARAM_VALUE (PARAM_SCHED_STATE_EDGE_PROB_CUTOFF))) | |
0a15667c | 3183 | { |
3184 | memcpy (bb_state[f->dest->index], curr_state, | |
3185 | dfa_state_size); | |
3186 | if (sched_verbose >= 5) | |
3187 | fprintf (sched_dump, "saving state for edge %d->%d\n", | |
3188 | f->src->index, f->dest->index); | |
3189 | } | |
3072d30e | 3190 | } |
3191 | else | |
3192 | { | |
3193 | sched_rgn_n_insns += rgn_n_insns; | |
3194 | } | |
7a31a7bd | 3195 | |
7a31a7bd | 3196 | /* Clean up. */ |
3197 | if (current_nr_blocks > 1) | |
e1ab7874 | 3198 | free_trg_info (); |
7a31a7bd | 3199 | } |
3200 | ||
3201 | /* Sanity check: verify that all region insns were scheduled. */ | |
04e579b6 | 3202 | gcc_assert (sched_rgn_n_insns == rgn_n_insns); |
7a31a7bd | 3203 | |
e1ab7874 | 3204 | sched_finish_ready_list (); |
7a31a7bd | 3205 | |
e1ab7874 | 3206 | /* Done with this region. */ |
3207 | sched_rgn_local_finish (); | |
93f6b030 | 3208 | |
3209 | /* Free dependencies. */ | |
3210 | for (bb = 0; bb < current_nr_blocks; ++bb) | |
3211 | free_block_dependencies (bb); | |
3212 | ||
3213 | gcc_assert (haifa_recovery_bb_ever_added_p | |
3214 | || deps_pools_are_empty_p ()); | |
7a31a7bd | 3215 | } |
3216 | ||
7a31a7bd | 3217 | /* Initialize data structures for region scheduling. */ |
3218 | ||
e1ab7874 | 3219 | void |
3220 | sched_rgn_init (bool single_blocks_p) | |
7a31a7bd | 3221 | { |
e1ab7874 | 3222 | min_spec_prob = ((PARAM_VALUE (PARAM_MIN_SPEC_PROB) * REG_BR_PROB_BASE) |
3223 | / 100); | |
3224 | ||
3225 | nr_inter = 0; | |
3226 | nr_spec = 0; | |
3227 | ||
6a1cdb4d | 3228 | extend_regions (); |
7a31a7bd | 3229 | |
e1ab7874 | 3230 | CONTAINING_RGN (ENTRY_BLOCK) = -1; |
3231 | CONTAINING_RGN (EXIT_BLOCK) = -1; | |
3232 | ||
bbd0cfb1 | 3233 | realloc_bb_state_array (0); |
0a15667c | 3234 | |
7a31a7bd | 3235 | /* Compute regions for scheduling. */ |
e1ab7874 | 3236 | if (single_blocks_p |
a28770e1 | 3237 | || n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS + 1 |
aae97b21 | 3238 | || !flag_schedule_interblock |
3239 | || is_cfg_nonregular ()) | |
7a31a7bd | 3240 | { |
e1ab7874 | 3241 | find_single_block_region (sel_sched_p ()); |
7a31a7bd | 3242 | } |
3243 | else | |
3244 | { | |
aae97b21 | 3245 | /* Compute the dominators and post dominators. */ |
e1ab7874 | 3246 | if (!sel_sched_p ()) |
3247 | calculate_dominance_info (CDI_DOMINATORS); | |
7a31a7bd | 3248 | |
aae97b21 | 3249 | /* Find regions. */ |
3250 | find_rgns (); | |
7a31a7bd | 3251 | |
aae97b21 | 3252 | if (sched_verbose >= 3) |
3253 | debug_regions (); | |
7a31a7bd | 3254 | |
aae97b21 | 3255 | /* For now. This will move as more and more of haifa is converted |
3072d30e | 3256 | to using the cfg code. */ |
e1ab7874 | 3257 | if (!sel_sched_p ()) |
3258 | free_dominance_info (CDI_DOMINATORS); | |
7a31a7bd | 3259 | } |
7a31a7bd | 3260 | |
c9281ef8 | 3261 | gcc_assert (nr_regions > 0 && nr_regions <= n_basic_blocks_for_fn (cfun)); |
7a31a7bd | 3262 | |
c9281ef8 | 3263 | RGN_BLOCKS (nr_regions) = (RGN_BLOCKS (nr_regions - 1) |
3264 | + RGN_NR_BLOCKS (nr_regions - 1)); | |
91096e87 | 3265 | nr_regions_initial = nr_regions; |
e1ab7874 | 3266 | } |
3267 | ||
3268 | /* Free data structures for region scheduling. */ | |
7a31a7bd | 3269 | void |
e1ab7874 | 3270 | sched_rgn_finish (void) |
7a31a7bd | 3271 | { |
bbd0cfb1 | 3272 | free_bb_state_array (); |
0a15667c | 3273 | |
7a31a7bd | 3274 | /* Reposition the prologue and epilogue notes in case we moved the |
3275 | prologue/epilogue insns. */ | |
3276 | if (reload_completed) | |
3072d30e | 3277 | reposition_prologue_and_epilogue_notes (); |
7a31a7bd | 3278 | |
7a31a7bd | 3279 | if (sched_verbose) |
3280 | { | |
e1ab7874 | 3281 | if (reload_completed == 0 |
3282 | && flag_schedule_interblock) | |
7a31a7bd | 3283 | { |
3284 | fprintf (sched_dump, | |
3285 | "\n;; Procedure interblock/speculative motions == %d/%d \n", | |
3286 | nr_inter, nr_spec); | |
3287 | } | |
3288 | else | |
04e579b6 | 3289 | gcc_assert (nr_inter <= 0); |
7a31a7bd | 3290 | fprintf (sched_dump, "\n\n"); |
3291 | } | |
3292 | ||
e1ab7874 | 3293 | nr_regions = 0; |
3294 | ||
7a31a7bd | 3295 | free (rgn_table); |
e1ab7874 | 3296 | rgn_table = NULL; |
3297 | ||
7a31a7bd | 3298 | free (rgn_bb_table); |
e1ab7874 | 3299 | rgn_bb_table = NULL; |
3300 | ||
7a31a7bd | 3301 | free (block_to_bb); |
e1ab7874 | 3302 | block_to_bb = NULL; |
3303 | ||
7a31a7bd | 3304 | free (containing_rgn); |
e1ab7874 | 3305 | containing_rgn = NULL; |
3306 | ||
3307 | free (ebb_head); | |
3308 | ebb_head = NULL; | |
3309 | } | |
3310 | ||
3311 | /* Setup global variables like CURRENT_BLOCKS and CURRENT_NR_BLOCK to | |
3312 | point to the region RGN. */ | |
3313 | void | |
3314 | rgn_setup_region (int rgn) | |
3315 | { | |
3316 | int bb; | |
3317 | ||
3318 | /* Set variables for the current region. */ | |
3319 | current_nr_blocks = RGN_NR_BLOCKS (rgn); | |
3320 | current_blocks = RGN_BLOCKS (rgn); | |
48e1416a | 3321 | |
e1ab7874 | 3322 | /* EBB_HEAD is a region-scope structure. But we realloc it for |
3323 | each region to save time/memory/something else. | |
3324 | See comments in add_block1, for what reasons we allocate +1 element. */ | |
3325 | ebb_head = XRESIZEVEC (int, ebb_head, current_nr_blocks + 1); | |
3326 | for (bb = 0; bb <= current_nr_blocks; bb++) | |
3327 | ebb_head[bb] = current_blocks + bb; | |
3328 | } | |
3329 | ||
3330 | /* Compute instruction dependencies in region RGN. */ | |
3331 | void | |
3332 | sched_rgn_compute_dependencies (int rgn) | |
3333 | { | |
3334 | if (!RGN_DONT_CALC_DEPS (rgn)) | |
3335 | { | |
3336 | int bb; | |
3337 | ||
3338 | if (sel_sched_p ()) | |
3339 | sched_emulate_haifa_p = 1; | |
3340 | ||
3341 | init_deps_global (); | |
3342 | ||
3343 | /* Initializations for region data dependence analysis. */ | |
68e419a1 | 3344 | bb_deps = XNEWVEC (struct deps_desc, current_nr_blocks); |
e1ab7874 | 3345 | for (bb = 0; bb < current_nr_blocks; bb++) |
d9ab2038 | 3346 | init_deps (bb_deps + bb, false); |
e1ab7874 | 3347 | |
566e7db2 | 3348 | /* Initialize bitmap used in add_branch_dependences. */ |
3349 | insn_referenced = sbitmap_alloc (sched_max_luid); | |
53c5d9d4 | 3350 | bitmap_clear (insn_referenced); |
48e1416a | 3351 | |
e1ab7874 | 3352 | /* Compute backward dependencies. */ |
3353 | for (bb = 0; bb < current_nr_blocks; bb++) | |
3354 | compute_block_dependences (bb); | |
48e1416a | 3355 | |
566e7db2 | 3356 | sbitmap_free (insn_referenced); |
e1ab7874 | 3357 | free_pending_lists (); |
3358 | finish_deps_global (); | |
3359 | free (bb_deps); | |
7a31a7bd | 3360 | |
e1ab7874 | 3361 | /* We don't want to recalculate this twice. */ |
3362 | RGN_DONT_CALC_DEPS (rgn) = 1; | |
3072d30e | 3363 | |
e1ab7874 | 3364 | if (sel_sched_p ()) |
3365 | sched_emulate_haifa_p = 0; | |
3366 | } | |
3367 | else | |
3368 | /* (This is a recovery block. It is always a single block region.) | |
3369 | OR (We use selective scheduling.) */ | |
3370 | gcc_assert (current_nr_blocks == 1 || sel_sched_p ()); | |
3371 | } | |
3372 | ||
3373 | /* Init region data structures. Returns true if this region should | |
3374 | not be scheduled. */ | |
3375 | void | |
3376 | sched_rgn_local_init (int rgn) | |
3377 | { | |
3378 | int bb; | |
48e1416a | 3379 | |
e1ab7874 | 3380 | /* Compute interblock info: probabilities, split-edges, dominators, etc. */ |
3381 | if (current_nr_blocks > 1) | |
3382 | { | |
3383 | basic_block block; | |
3384 | edge e; | |
3385 | edge_iterator ei; | |
3386 | ||
3387 | prob = XNEWVEC (int, current_nr_blocks); | |
3388 | ||
3389 | dom = sbitmap_vector_alloc (current_nr_blocks, current_nr_blocks); | |
53c5d9d4 | 3390 | bitmap_vector_clear (dom, current_nr_blocks); |
e1ab7874 | 3391 | |
3392 | /* Use ->aux to implement EDGE_TO_BIT mapping. */ | |
3393 | rgn_nr_edges = 0; | |
fc00614f | 3394 | FOR_EACH_BB_FN (block, cfun) |
e1ab7874 | 3395 | { |
3396 | if (CONTAINING_RGN (block->index) != rgn) | |
3397 | continue; | |
3398 | FOR_EACH_EDGE (e, ei, block->succs) | |
3399 | SET_EDGE_TO_BIT (e, rgn_nr_edges++); | |
3400 | } | |
3401 | ||
3402 | rgn_edges = XNEWVEC (edge, rgn_nr_edges); | |
3403 | rgn_nr_edges = 0; | |
fc00614f | 3404 | FOR_EACH_BB_FN (block, cfun) |
e1ab7874 | 3405 | { |
3406 | if (CONTAINING_RGN (block->index) != rgn) | |
3407 | continue; | |
3408 | FOR_EACH_EDGE (e, ei, block->succs) | |
3409 | rgn_edges[rgn_nr_edges++] = e; | |
3410 | } | |
3411 | ||
3412 | /* Split edges. */ | |
3413 | pot_split = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges); | |
53c5d9d4 | 3414 | bitmap_vector_clear (pot_split, current_nr_blocks); |
e1ab7874 | 3415 | ancestor_edges = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges); |
53c5d9d4 | 3416 | bitmap_vector_clear (ancestor_edges, current_nr_blocks); |
e1ab7874 | 3417 | |
3418 | /* Compute probabilities, dominators, split_edges. */ | |
3419 | for (bb = 0; bb < current_nr_blocks; bb++) | |
3420 | compute_dom_prob_ps (bb); | |
3421 | ||
3422 | /* Cleanup ->aux used for EDGE_TO_BIT mapping. */ | |
3423 | /* We don't need them anymore. But we want to avoid duplication of | |
3424 | aux fields in the newly created edges. */ | |
fc00614f | 3425 | FOR_EACH_BB_FN (block, cfun) |
e1ab7874 | 3426 | { |
3427 | if (CONTAINING_RGN (block->index) != rgn) | |
3428 | continue; | |
3429 | FOR_EACH_EDGE (e, ei, block->succs) | |
3430 | e->aux = NULL; | |
3431 | } | |
3432 | } | |
3433 | } | |
3434 | ||
3435 | /* Free data computed for the finished region. */ | |
48e1416a | 3436 | void |
e1ab7874 | 3437 | sched_rgn_local_free (void) |
3438 | { | |
3439 | free (prob); | |
3440 | sbitmap_vector_free (dom); | |
3441 | sbitmap_vector_free (pot_split); | |
3442 | sbitmap_vector_free (ancestor_edges); | |
3443 | free (rgn_edges); | |
3444 | } | |
3445 | ||
3446 | /* Free data computed for the finished region. */ | |
3447 | void | |
3448 | sched_rgn_local_finish (void) | |
3449 | { | |
3450 | if (current_nr_blocks > 1 && !sel_sched_p ()) | |
3451 | { | |
3452 | sched_rgn_local_free (); | |
3453 | } | |
3454 | } | |
3455 | ||
3456 | /* Setup scheduler infos. */ | |
3457 | void | |
3458 | rgn_setup_common_sched_info (void) | |
3459 | { | |
3460 | memcpy (&rgn_common_sched_info, &haifa_common_sched_info, | |
3461 | sizeof (rgn_common_sched_info)); | |
3462 | ||
3463 | rgn_common_sched_info.fix_recovery_cfg = rgn_fix_recovery_cfg; | |
3464 | rgn_common_sched_info.add_block = rgn_add_block; | |
3465 | rgn_common_sched_info.estimate_number_of_insns | |
3466 | = rgn_estimate_number_of_insns; | |
3467 | rgn_common_sched_info.sched_pass_id = SCHED_RGN_PASS; | |
3468 | ||
3469 | common_sched_info = &rgn_common_sched_info; | |
3470 | } | |
3471 | ||
3472 | /* Setup all *_sched_info structures (for the Haifa frontend | |
3473 | and for the dependence analysis) in the interblock scheduler. */ | |
3474 | void | |
3475 | rgn_setup_sched_infos (void) | |
3476 | { | |
3477 | if (!sel_sched_p ()) | |
3478 | memcpy (&rgn_sched_deps_info, &rgn_const_sched_deps_info, | |
3479 | sizeof (rgn_sched_deps_info)); | |
3480 | else | |
3481 | memcpy (&rgn_sched_deps_info, &rgn_const_sel_sched_deps_info, | |
3482 | sizeof (rgn_sched_deps_info)); | |
3483 | ||
3484 | sched_deps_info = &rgn_sched_deps_info; | |
3485 | ||
3486 | memcpy (&rgn_sched_info, &rgn_const_sched_info, sizeof (rgn_sched_info)); | |
3487 | current_sched_info = &rgn_sched_info; | |
3488 | } | |
3489 | ||
3490 | /* The one entry point in this file. */ | |
3491 | void | |
3492 | schedule_insns (void) | |
3493 | { | |
3494 | int rgn; | |
3495 | ||
3496 | /* Taking care of this degenerate case makes the rest of | |
3497 | this code simpler. */ | |
a28770e1 | 3498 | if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS) |
e1ab7874 | 3499 | return; |
3500 | ||
3501 | rgn_setup_common_sched_info (); | |
3502 | rgn_setup_sched_infos (); | |
3503 | ||
3504 | haifa_sched_init (); | |
3505 | sched_rgn_init (reload_completed); | |
3506 | ||
3507 | bitmap_initialize (¬_in_df, 0); | |
3072d30e | 3508 | bitmap_clear (¬_in_df); |
7a31a7bd | 3509 | |
e1ab7874 | 3510 | /* Schedule every region in the subroutine. */ |
3511 | for (rgn = 0; rgn < nr_regions; rgn++) | |
3512 | if (dbg_cnt (sched_region)) | |
3513 | schedule_region (rgn); | |
3514 | ||
3515 | /* Clean up. */ | |
3516 | sched_rgn_finish (); | |
3517 | bitmap_clear (¬_in_df); | |
3518 | ||
3519 | haifa_sched_finish (); | |
7a31a7bd | 3520 | } |
6a1cdb4d | 3521 | |
3522 | /* INSN has been added to/removed from current region. */ | |
3523 | static void | |
b24ef467 | 3524 | rgn_add_remove_insn (rtx_insn *insn, int remove_p) |
6a1cdb4d | 3525 | { |
3526 | if (!remove_p) | |
3527 | rgn_n_insns++; | |
3528 | else | |
3529 | rgn_n_insns--; | |
3530 | ||
3531 | if (INSN_BB (insn) == target_bb) | |
3532 | { | |
3533 | if (!remove_p) | |
3534 | target_n_insns++; | |
3535 | else | |
3536 | target_n_insns--; | |
3537 | } | |
3538 | } | |
3539 | ||
3540 | /* Extend internal data structures. */ | |
e1ab7874 | 3541 | void |
6a1cdb4d | 3542 | extend_regions (void) |
3543 | { | |
a28770e1 | 3544 | rgn_table = XRESIZEVEC (region, rgn_table, n_basic_blocks_for_fn (cfun)); |
fe672ac0 | 3545 | rgn_bb_table = XRESIZEVEC (int, rgn_bb_table, |
3546 | n_basic_blocks_for_fn (cfun)); | |
3547 | block_to_bb = XRESIZEVEC (int, block_to_bb, | |
3548 | last_basic_block_for_fn (cfun)); | |
3549 | containing_rgn = XRESIZEVEC (int, containing_rgn, | |
3550 | last_basic_block_for_fn (cfun)); | |
6a1cdb4d | 3551 | } |
3552 | ||
e1ab7874 | 3553 | void |
3554 | rgn_make_new_region_out_of_new_block (basic_block bb) | |
3555 | { | |
3556 | int i; | |
3557 | ||
3558 | i = RGN_BLOCKS (nr_regions); | |
3559 | /* I - first free position in rgn_bb_table. */ | |
3560 | ||
3561 | rgn_bb_table[i] = bb->index; | |
3562 | RGN_NR_BLOCKS (nr_regions) = 1; | |
3563 | RGN_HAS_REAL_EBB (nr_regions) = 0; | |
3564 | RGN_DONT_CALC_DEPS (nr_regions) = 0; | |
3565 | CONTAINING_RGN (bb->index) = nr_regions; | |
3566 | BLOCK_TO_BB (bb->index) = 0; | |
3567 | ||
3568 | nr_regions++; | |
48e1416a | 3569 | |
e1ab7874 | 3570 | RGN_BLOCKS (nr_regions) = i + 1; |
3571 | } | |
3572 | ||
6a1cdb4d | 3573 | /* BB was added to ebb after AFTER. */ |
3574 | static void | |
e1ab7874 | 3575 | rgn_add_block (basic_block bb, basic_block after) |
6a1cdb4d | 3576 | { |
3577 | extend_regions (); | |
3072d30e | 3578 | bitmap_set_bit (¬_in_df, bb->index); |
3579 | ||
34154e27 | 3580 | if (after == 0 || after == EXIT_BLOCK_PTR_FOR_FN (cfun)) |
6a1cdb4d | 3581 | { |
e1ab7874 | 3582 | rgn_make_new_region_out_of_new_block (bb); |
34154e27 | 3583 | RGN_DONT_CALC_DEPS (nr_regions - 1) = (after |
3584 | == EXIT_BLOCK_PTR_FOR_FN (cfun)); | |
6a1cdb4d | 3585 | } |
3586 | else | |
48e1416a | 3587 | { |
6a1cdb4d | 3588 | int i, pos; |
3589 | ||
3590 | /* We need to fix rgn_table, block_to_bb, containing_rgn | |
3591 | and ebb_head. */ | |
3592 | ||
3593 | BLOCK_TO_BB (bb->index) = BLOCK_TO_BB (after->index); | |
3594 | ||
3595 | /* We extend ebb_head to one more position to | |
48e1416a | 3596 | easily find the last position of the last ebb in |
6a1cdb4d | 3597 | the current region. Thus, ebb_head[BLOCK_TO_BB (after) + 1] |
3598 | is _always_ valid for access. */ | |
3599 | ||
3600 | i = BLOCK_TO_BB (after->index) + 1; | |
90bf0a00 | 3601 | pos = ebb_head[i] - 1; |
3602 | /* Now POS is the index of the last block in the region. */ | |
3603 | ||
3604 | /* Find index of basic block AFTER. */ | |
3c802a1e | 3605 | for (; rgn_bb_table[pos] != after->index; pos--) |
3606 | ; | |
90bf0a00 | 3607 | |
6a1cdb4d | 3608 | pos++; |
3609 | gcc_assert (pos > ebb_head[i - 1]); | |
90bf0a00 | 3610 | |
6a1cdb4d | 3611 | /* i - ebb right after "AFTER". */ |
3612 | /* ebb_head[i] - VALID. */ | |
3613 | ||
3614 | /* Source position: ebb_head[i] | |
9ca2c29a | 3615 | Destination position: ebb_head[i] + 1 |
48e1416a | 3616 | Last position: |
6a1cdb4d | 3617 | RGN_BLOCKS (nr_regions) - 1 |
3618 | Number of elements to copy: (last_position) - (source_position) + 1 | |
3619 | */ | |
48e1416a | 3620 | |
6a1cdb4d | 3621 | memmove (rgn_bb_table + pos + 1, |
3622 | rgn_bb_table + pos, | |
3623 | ((RGN_BLOCKS (nr_regions) - 1) - (pos) + 1) | |
3624 | * sizeof (*rgn_bb_table)); | |
3625 | ||
3626 | rgn_bb_table[pos] = bb->index; | |
48e1416a | 3627 | |
6a1cdb4d | 3628 | for (; i <= current_nr_blocks; i++) |
3629 | ebb_head [i]++; | |
3630 | ||
3631 | i = CONTAINING_RGN (after->index); | |
3632 | CONTAINING_RGN (bb->index) = i; | |
48e1416a | 3633 | |
6a1cdb4d | 3634 | RGN_HAS_REAL_EBB (i) = 1; |
3635 | ||
3636 | for (++i; i <= nr_regions; i++) | |
3637 | RGN_BLOCKS (i)++; | |
6a1cdb4d | 3638 | } |
3639 | } | |
3640 | ||
3641 | /* Fix internal data after interblock movement of jump instruction. | |
3642 | For parameter meaning please refer to | |
3643 | sched-int.h: struct sched_info: fix_recovery_cfg. */ | |
3644 | static void | |
e1ab7874 | 3645 | rgn_fix_recovery_cfg (int bbi, int check_bbi, int check_bb_nexti) |
6a1cdb4d | 3646 | { |
3647 | int old_pos, new_pos, i; | |
3648 | ||
3649 | BLOCK_TO_BB (check_bb_nexti) = BLOCK_TO_BB (bbi); | |
48e1416a | 3650 | |
6a1cdb4d | 3651 | for (old_pos = ebb_head[BLOCK_TO_BB (check_bbi) + 1] - 1; |
3652 | rgn_bb_table[old_pos] != check_bb_nexti; | |
3c802a1e | 3653 | old_pos--) |
3654 | ; | |
6a1cdb4d | 3655 | gcc_assert (old_pos > ebb_head[BLOCK_TO_BB (check_bbi)]); |
3656 | ||
3657 | for (new_pos = ebb_head[BLOCK_TO_BB (bbi) + 1] - 1; | |
3658 | rgn_bb_table[new_pos] != bbi; | |
3c802a1e | 3659 | new_pos--) |
3660 | ; | |
6a1cdb4d | 3661 | new_pos++; |
3662 | gcc_assert (new_pos > ebb_head[BLOCK_TO_BB (bbi)]); | |
48e1416a | 3663 | |
6a1cdb4d | 3664 | gcc_assert (new_pos < old_pos); |
3665 | ||
3666 | memmove (rgn_bb_table + new_pos + 1, | |
3667 | rgn_bb_table + new_pos, | |
3668 | (old_pos - new_pos) * sizeof (*rgn_bb_table)); | |
3669 | ||
3670 | rgn_bb_table[new_pos] = check_bb_nexti; | |
3671 | ||
3672 | for (i = BLOCK_TO_BB (bbi) + 1; i <= BLOCK_TO_BB (check_bbi); i++) | |
3673 | ebb_head[i]++; | |
3674 | } | |
3675 | ||
3676 | /* Return next block in ebb chain. For parameter meaning please refer to | |
3677 | sched-int.h: struct sched_info: advance_target_bb. */ | |
3678 | static basic_block | |
b24ef467 | 3679 | advance_target_bb (basic_block bb, rtx_insn *insn) |
6a1cdb4d | 3680 | { |
3681 | if (insn) | |
3682 | return 0; | |
3683 | ||
3684 | gcc_assert (BLOCK_TO_BB (bb->index) == target_bb | |
3685 | && BLOCK_TO_BB (bb->next_bb->index) == target_bb); | |
3686 | return bb->next_bb; | |
3687 | } | |
3688 | ||
cda0a5f5 | 3689 | #endif |
77fce4cd | 3690 | \f |
57a8bf1b | 3691 | /* Run instruction scheduler. */ |
3692 | static unsigned int | |
3693 | rest_of_handle_live_range_shrinkage (void) | |
3694 | { | |
3695 | #ifdef INSN_SCHEDULING | |
3696 | int saved; | |
3697 | ||
3698 | initialize_live_range_shrinkage (); | |
3699 | saved = flag_schedule_interblock; | |
3700 | flag_schedule_interblock = false; | |
3701 | schedule_insns (); | |
3702 | flag_schedule_interblock = saved; | |
3703 | finish_live_range_shrinkage (); | |
3704 | #endif | |
3705 | return 0; | |
3706 | } | |
3707 | ||
77fce4cd | 3708 | /* Run instruction scheduler. */ |
2a1990e9 | 3709 | static unsigned int |
77fce4cd | 3710 | rest_of_handle_sched (void) |
3711 | { | |
3712 | #ifdef INSN_SCHEDULING | |
e1ab7874 | 3713 | if (flag_selective_scheduling |
3714 | && ! maybe_skip_selective_scheduling ()) | |
3715 | run_selective_scheduling (); | |
3716 | else | |
3717 | schedule_insns (); | |
77fce4cd | 3718 | #endif |
2a1990e9 | 3719 | return 0; |
77fce4cd | 3720 | } |
3721 | ||
77fce4cd | 3722 | /* Run second scheduling pass after reload. */ |
2a1990e9 | 3723 | static unsigned int |
77fce4cd | 3724 | rest_of_handle_sched2 (void) |
3725 | { | |
3726 | #ifdef INSN_SCHEDULING | |
e1ab7874 | 3727 | if (flag_selective_scheduling2 |
3728 | && ! maybe_skip_selective_scheduling ()) | |
3729 | run_selective_scheduling (); | |
77fce4cd | 3730 | else |
e1ab7874 | 3731 | { |
3732 | /* Do control and data sched analysis again, | |
3733 | and write some more of the results to dump file. */ | |
fda153ea | 3734 | if (flag_sched2_use_superblocks) |
e1ab7874 | 3735 | schedule_ebbs (); |
3736 | else | |
3737 | schedule_insns (); | |
3738 | } | |
77fce4cd | 3739 | #endif |
2a1990e9 | 3740 | return 0; |
77fce4cd | 3741 | } |
3742 | ||
012ad66c | 3743 | static unsigned int |
3744 | rest_of_handle_sched_fusion (void) | |
3745 | { | |
3746 | #ifdef INSN_SCHEDULING | |
3747 | sched_fusion = true; | |
3748 | schedule_insns (); | |
3749 | sched_fusion = false; | |
3750 | #endif | |
3751 | return 0; | |
3752 | } | |
3753 | ||
cbe8bda8 | 3754 | namespace { |
3755 | ||
57a8bf1b | 3756 | const pass_data pass_data_live_range_shrinkage = |
3757 | { | |
3758 | RTL_PASS, /* type */ | |
3759 | "lr_shrinkage", /* name */ | |
3760 | OPTGROUP_NONE, /* optinfo_flags */ | |
57a8bf1b | 3761 | TV_LIVE_RANGE_SHRINKAGE, /* tv_id */ |
3762 | 0, /* properties_required */ | |
3763 | 0, /* properties_provided */ | |
3764 | 0, /* properties_destroyed */ | |
3765 | 0, /* todo_flags_start */ | |
8b88439e | 3766 | TODO_df_finish, /* todo_flags_finish */ |
57a8bf1b | 3767 | }; |
3768 | ||
3769 | class pass_live_range_shrinkage : public rtl_opt_pass | |
3770 | { | |
3771 | public: | |
3772 | pass_live_range_shrinkage(gcc::context *ctxt) | |
3773 | : rtl_opt_pass(pass_data_live_range_shrinkage, ctxt) | |
3774 | {} | |
3775 | ||
3776 | /* opt_pass methods: */ | |
31315c24 | 3777 | virtual bool gate (function *) |
3778 | { | |
3779 | #ifdef INSN_SCHEDULING | |
3780 | return flag_live_range_shrinkage; | |
3781 | #else | |
3782 | return 0; | |
3783 | #endif | |
3784 | } | |
3785 | ||
65b0537f | 3786 | virtual unsigned int execute (function *) |
3787 | { | |
3788 | return rest_of_handle_live_range_shrinkage (); | |
3789 | } | |
57a8bf1b | 3790 | |
3791 | }; // class pass_live_range_shrinkage | |
3792 | ||
3793 | } // anon namespace | |
3794 | ||
3795 | rtl_opt_pass * | |
3796 | make_pass_live_range_shrinkage (gcc::context *ctxt) | |
3797 | { | |
3798 | return new pass_live_range_shrinkage (ctxt); | |
3799 | } | |
3800 | ||
3801 | namespace { | |
3802 | ||
cbe8bda8 | 3803 | const pass_data pass_data_sched = |
3804 | { | |
3805 | RTL_PASS, /* type */ | |
3806 | "sched1", /* name */ | |
3807 | OPTGROUP_NONE, /* optinfo_flags */ | |
cbe8bda8 | 3808 | TV_SCHED, /* tv_id */ |
3809 | 0, /* properties_required */ | |
3810 | 0, /* properties_provided */ | |
3811 | 0, /* properties_destroyed */ | |
3812 | 0, /* todo_flags_start */ | |
8b88439e | 3813 | TODO_df_finish, /* todo_flags_finish */ |
77fce4cd | 3814 | }; |
3815 | ||
cbe8bda8 | 3816 | class pass_sched : public rtl_opt_pass |
3817 | { | |
3818 | public: | |
9af5ce0c | 3819 | pass_sched (gcc::context *ctxt) |
3820 | : rtl_opt_pass (pass_data_sched, ctxt) | |
cbe8bda8 | 3821 | {} |
3822 | ||
3823 | /* opt_pass methods: */ | |
31315c24 | 3824 | virtual bool gate (function *); |
65b0537f | 3825 | virtual unsigned int execute (function *) { return rest_of_handle_sched (); } |
cbe8bda8 | 3826 | |
3827 | }; // class pass_sched | |
3828 | ||
31315c24 | 3829 | bool |
3830 | pass_sched::gate (function *) | |
3831 | { | |
3832 | #ifdef INSN_SCHEDULING | |
3833 | return optimize > 0 && flag_schedule_insns && dbg_cnt (sched_func); | |
3834 | #else | |
3835 | return 0; | |
3836 | #endif | |
3837 | } | |
3838 | ||
cbe8bda8 | 3839 | } // anon namespace |
3840 | ||
3841 | rtl_opt_pass * | |
3842 | make_pass_sched (gcc::context *ctxt) | |
3843 | { | |
3844 | return new pass_sched (ctxt); | |
3845 | } | |
3846 | ||
3847 | namespace { | |
3848 | ||
3849 | const pass_data pass_data_sched2 = | |
3850 | { | |
3851 | RTL_PASS, /* type */ | |
3852 | "sched2", /* name */ | |
3853 | OPTGROUP_NONE, /* optinfo_flags */ | |
cbe8bda8 | 3854 | TV_SCHED2, /* tv_id */ |
3855 | 0, /* properties_required */ | |
3856 | 0, /* properties_provided */ | |
3857 | 0, /* properties_destroyed */ | |
3858 | 0, /* todo_flags_start */ | |
8b88439e | 3859 | TODO_df_finish, /* todo_flags_finish */ |
77fce4cd | 3860 | }; |
cbe8bda8 | 3861 | |
3862 | class pass_sched2 : public rtl_opt_pass | |
3863 | { | |
3864 | public: | |
9af5ce0c | 3865 | pass_sched2 (gcc::context *ctxt) |
3866 | : rtl_opt_pass (pass_data_sched2, ctxt) | |
cbe8bda8 | 3867 | {} |
3868 | ||
3869 | /* opt_pass methods: */ | |
31315c24 | 3870 | virtual bool gate (function *); |
65b0537f | 3871 | virtual unsigned int execute (function *) |
3872 | { | |
3873 | return rest_of_handle_sched2 (); | |
3874 | } | |
cbe8bda8 | 3875 | |
3876 | }; // class pass_sched2 | |
3877 | ||
31315c24 | 3878 | bool |
3879 | pass_sched2::gate (function *) | |
3880 | { | |
3881 | #ifdef INSN_SCHEDULING | |
3882 | return optimize > 0 && flag_schedule_insns_after_reload | |
3883 | && !targetm.delay_sched2 && dbg_cnt (sched2_func); | |
3884 | #else | |
3885 | return 0; | |
3886 | #endif | |
3887 | } | |
3888 | ||
cbe8bda8 | 3889 | } // anon namespace |
3890 | ||
3891 | rtl_opt_pass * | |
3892 | make_pass_sched2 (gcc::context *ctxt) | |
3893 | { | |
3894 | return new pass_sched2 (ctxt); | |
3895 | } | |
012ad66c | 3896 | |
3897 | namespace { | |
3898 | ||
3899 | const pass_data pass_data_sched_fusion = | |
3900 | { | |
3901 | RTL_PASS, /* type */ | |
3902 | "sched_fusion", /* name */ | |
3903 | OPTGROUP_NONE, /* optinfo_flags */ | |
3904 | TV_SCHED_FUSION, /* tv_id */ | |
3905 | 0, /* properties_required */ | |
3906 | 0, /* properties_provided */ | |
3907 | 0, /* properties_destroyed */ | |
3908 | 0, /* todo_flags_start */ | |
3909 | TODO_df_finish, /* todo_flags_finish */ | |
3910 | }; | |
3911 | ||
3912 | class pass_sched_fusion : public rtl_opt_pass | |
3913 | { | |
3914 | public: | |
3915 | pass_sched_fusion (gcc::context *ctxt) | |
3916 | : rtl_opt_pass (pass_data_sched_fusion, ctxt) | |
3917 | {} | |
3918 | ||
3919 | /* opt_pass methods: */ | |
3920 | virtual bool gate (function *); | |
3921 | virtual unsigned int execute (function *) | |
3922 | { | |
3923 | return rest_of_handle_sched_fusion (); | |
3924 | } | |
3925 | ||
3926 | }; // class pass_sched2 | |
3927 | ||
3928 | bool | |
3929 | pass_sched_fusion::gate (function *) | |
3930 | { | |
3931 | #ifdef INSN_SCHEDULING | |
3932 | /* Scheduling fusion relies on peephole2 to do real fusion work, | |
3933 | so only enable it if peephole2 is in effect. */ | |
3934 | return (optimize > 0 && flag_peephole2 | |
3935 | && flag_schedule_fusion && targetm.sched.fusion_priority != NULL); | |
3936 | #else | |
3937 | return 0; | |
3938 | #endif | |
3939 | } | |
3940 | ||
3941 | } // anon namespace | |
3942 | ||
3943 | rtl_opt_pass * | |
3944 | make_pass_sched_fusion (gcc::context *ctxt) | |
3945 | { | |
3946 | return new pass_sched_fusion (ctxt); | |
3947 | } |