]>
Commit | Line | Data |
---|---|---|
d6141c0c | 1 | /* Instruction scheduling pass. |
3aea1f79 | 2 | Copyright (C) 1992-2014 Free Software Foundation, Inc. |
d6141c0c | 3 | Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by, |
4 | and currently maintained by, Jim Wilson (wilson@cygnus.com) | |
5 | ||
f12b58b3 | 6 | This file is part of GCC. |
d6141c0c | 7 | |
f12b58b3 | 8 | GCC is free software; you can redistribute it and/or modify it under |
9 | the terms of the GNU General Public License as published by the Free | |
8c4c00c1 | 10 | Software Foundation; either version 3, or (at your option) any later |
f12b58b3 | 11 | version. |
d6141c0c | 12 | |
f12b58b3 | 13 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
14 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
d6141c0c | 15 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
16 | for more details. | |
17 | ||
18 | You should have received a copy of the GNU General Public License | |
8c4c00c1 | 19 | along with GCC; see the file COPYING3. If not see |
20 | <http://www.gnu.org/licenses/>. */ | |
d6141c0c | 21 | \f |
22 | #include "config.h" | |
23 | #include "system.h" | |
805e22b2 | 24 | #include "coretypes.h" |
25 | #include "tm.h" | |
0b205f4c | 26 | #include "diagnostic-core.h" |
d6141c0c | 27 | #include "rtl.h" |
28 | #include "tm_p.h" | |
29 | #include "hard-reg-set.h" | |
d6141c0c | 30 | #include "regs.h" |
a3020f2f | 31 | #include "hashtab.h" |
32 | #include "hash-set.h" | |
33 | #include "vec.h" | |
34 | #include "machmode.h" | |
35 | #include "input.h" | |
d6141c0c | 36 | #include "function.h" |
37 | #include "flags.h" | |
38 | #include "insn-config.h" | |
39 | #include "insn-attr.h" | |
40 | #include "except.h" | |
d6141c0c | 41 | #include "recog.h" |
eeb4a70e | 42 | #include "params.h" |
d6141c0c | 43 | #include "sched-int.h" |
58ada791 | 44 | #include "target.h" |
3072d30e | 45 | |
d6141c0c | 46 | \f |
db982eeb | 47 | #ifdef INSN_SCHEDULING |
48 | ||
6a1cdb4d | 49 | /* The number of insns to be scheduled in total. */ |
e1ab7874 | 50 | static int rgn_n_insns; |
51 | ||
52 | /* The number of insns scheduled so far. */ | |
53 | static int sched_rgn_n_insns; | |
6a1cdb4d | 54 | |
55 | /* Set of blocks, that already have their dependencies calculated. */ | |
56 | static bitmap_head dont_calc_deps; | |
6a1cdb4d | 57 | |
58 | /* Last basic block in current ebb. */ | |
59 | static basic_block last_bb; | |
60 | ||
d6141c0c | 61 | /* Implementations of the sched_info functions for region scheduling. */ |
e4897000 | 62 | static void init_ready_list (void); |
b24ef467 | 63 | static void begin_schedule_ready (rtx_insn *); |
60b8c5b3 | 64 | static int schedule_more_p (void); |
b24ef467 | 65 | static const char *ebb_print_insn (const rtx_insn *, int); |
66 | static int rank (rtx_insn *, rtx_insn *); | |
67 | static int ebb_contributes_to_priority (rtx_insn *, rtx_insn *); | |
60b8c5b3 | 68 | static basic_block earliest_block_with_similiar_load (basic_block, rtx); |
43ecfeb5 | 69 | static void add_deps_for_risky_insns (rtx_insn *, rtx_insn *); |
73e15687 | 70 | static void debug_ebb_dependencies (rtx_insn *, rtx_insn *); |
6a1cdb4d | 71 | |
b24ef467 | 72 | static void ebb_add_remove_insn (rtx_insn *, int); |
e1ab7874 | 73 | static void ebb_add_block (basic_block, basic_block); |
b24ef467 | 74 | static basic_block advance_target_bb (basic_block, rtx_insn *); |
e1ab7874 | 75 | static void ebb_fix_recovery_cfg (int, int, int); |
6a1cdb4d | 76 | |
e2f4a6ff | 77 | /* Allocate memory and store the state of the frontend. Return the allocated |
78 | memory. */ | |
79 | static void * | |
80 | save_ebb_state (void) | |
81 | { | |
82 | int *p = XNEW (int); | |
83 | *p = sched_rgn_n_insns; | |
84 | return p; | |
85 | } | |
86 | ||
87 | /* Restore the state of the frontend from P_, then free it. */ | |
88 | static void | |
89 | restore_ebb_state (void *p_) | |
90 | { | |
91 | int *p = (int *)p_; | |
92 | sched_rgn_n_insns = *p; | |
93 | free (p_); | |
94 | } | |
95 | ||
d6141c0c | 96 | /* Return nonzero if there are more insns that should be scheduled. */ |
97 | ||
98 | static int | |
60b8c5b3 | 99 | schedule_more_p (void) |
d6141c0c | 100 | { |
e1ab7874 | 101 | return sched_rgn_n_insns < rgn_n_insns; |
d6141c0c | 102 | } |
103 | ||
a2819fc2 | 104 | /* Print dependency information about ebb between HEAD and TAIL. */ |
105 | static void | |
73e15687 | 106 | debug_ebb_dependencies (rtx_insn *head, rtx_insn *tail) |
a2819fc2 | 107 | { |
108 | fprintf (sched_dump, | |
109 | ";; --------------- forward dependences: ------------ \n"); | |
110 | ||
111 | fprintf (sched_dump, "\n;; --- EBB Dependences --- from bb%d to bb%d \n", | |
112 | BLOCK_NUM (head), BLOCK_NUM (tail)); | |
113 | ||
114 | debug_dependencies (head, tail); | |
115 | } | |
116 | ||
d6141c0c | 117 | /* Add all insns that are initially ready to the ready list READY. Called |
118 | once before scheduling a set of insns. */ | |
119 | ||
120 | static void | |
e4897000 | 121 | init_ready_list (void) |
d6141c0c | 122 | { |
6a1cdb4d | 123 | int n = 0; |
4cd001d5 | 124 | rtx_insn *prev_head = current_sched_info->prev_head; |
125 | rtx_insn *next_tail = current_sched_info->next_tail; | |
b24ef467 | 126 | rtx_insn *insn; |
d6141c0c | 127 | |
e1ab7874 | 128 | sched_rgn_n_insns = 0; |
d6141c0c | 129 | |
d6141c0c | 130 | /* Print debugging information. */ |
131 | if (sched_verbose >= 5) | |
a2819fc2 | 132 | debug_ebb_dependencies (NEXT_INSN (prev_head), PREV_INSN (next_tail)); |
d6141c0c | 133 | |
134 | /* Initialize ready list with all 'ready' insns in target block. | |
135 | Count number of insns in the target block being scheduled. */ | |
136 | for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn)) | |
137 | { | |
e4897000 | 138 | try_ready (insn); |
6a1cdb4d | 139 | n++; |
d6141c0c | 140 | } |
d6141c0c | 141 | |
e1ab7874 | 142 | gcc_assert (n == rgn_n_insns); |
6a1cdb4d | 143 | } |
d6141c0c | 144 | |
6a1cdb4d | 145 | /* INSN is being scheduled after LAST. Update counters. */ |
146 | static void | |
b24ef467 | 147 | begin_schedule_ready (rtx_insn *insn ATTRIBUTE_UNUSED) |
d6141c0c | 148 | { |
e1ab7874 | 149 | sched_rgn_n_insns++; |
d2412f57 | 150 | } |
6a1cdb4d | 151 | |
d2412f57 | 152 | /* INSN is being moved to its place in the schedule, after LAST. */ |
153 | static void | |
b24ef467 | 154 | begin_move_insn (rtx_insn *insn, rtx_insn *last) |
d2412f57 | 155 | { |
6a1cdb4d | 156 | if (BLOCK_FOR_INSN (insn) == last_bb |
157 | /* INSN is a jump in the last block, ... */ | |
158 | && control_flow_insn_p (insn) | |
159 | /* that is going to be moved over some instructions. */ | |
160 | && last != PREV_INSN (insn)) | |
161 | { | |
162 | edge e; | |
6a1cdb4d | 163 | basic_block bb; |
164 | ||
165 | /* An obscure special case, where we do have partially dead | |
166 | instruction scheduled after last control flow instruction. | |
167 | In this case we can create new basic block. It is | |
168 | always exactly one basic block last in the sequence. */ | |
48e1416a | 169 | |
7f58c05e | 170 | e = find_fallthru_edge (last_bb->succs); |
6a1cdb4d | 171 | |
1b4345f7 | 172 | gcc_checking_assert (!e || !(e->flags & EDGE_COMPLEX)); |
6a1cdb4d | 173 | |
1b4345f7 | 174 | gcc_checking_assert (BLOCK_FOR_INSN (insn) == last_bb |
175 | && !IS_SPECULATION_CHECK_P (insn) | |
176 | && BB_HEAD (last_bb) != insn | |
177 | && BB_END (last_bb) == insn); | |
6a1cdb4d | 178 | |
179 | { | |
180 | rtx x; | |
181 | ||
182 | x = NEXT_INSN (insn); | |
183 | if (e) | |
1b4345f7 | 184 | gcc_checking_assert (NOTE_P (x) || LABEL_P (x)); |
6a1cdb4d | 185 | else |
1b4345f7 | 186 | gcc_checking_assert (BARRIER_P (x)); |
6a1cdb4d | 187 | } |
6a1cdb4d | 188 | |
189 | if (e) | |
190 | { | |
191 | bb = split_edge (e); | |
192 | gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_END (bb))); | |
193 | } | |
194 | else | |
e6428cd5 | 195 | { |
196 | /* Create an empty unreachable block after the INSN. */ | |
4cd001d5 | 197 | rtx_insn *next = NEXT_INSN (insn); |
e6428cd5 | 198 | if (next && BARRIER_P (next)) |
199 | next = NEXT_INSN (next); | |
200 | bb = create_basic_block (next, NULL_RTX, last_bb); | |
201 | } | |
48e1416a | 202 | |
6a1cdb4d | 203 | /* split_edge () creates BB before E->DEST. Keep in mind, that |
204 | this operation extends scheduling region till the end of BB. | |
205 | Hence, we need to shift NEXT_TAIL, so haifa-sched.c won't go out | |
206 | of the scheduling region. */ | |
207 | current_sched_info->next_tail = NEXT_INSN (BB_END (bb)); | |
208 | gcc_assert (current_sched_info->next_tail); | |
209 | ||
e1ab7874 | 210 | /* Append new basic block to the end of the ebb. */ |
211 | sched_init_only_bb (bb, last_bb); | |
6a1cdb4d | 212 | gcc_assert (last_bb == bb); |
213 | } | |
d6141c0c | 214 | } |
215 | ||
d6141c0c | 216 | /* Return a string that contains the insn uid and optionally anything else |
217 | necessary to identify this insn in an output. It's valid to use a | |
218 | static buffer for this. The ALIGNED parameter should cause the string | |
219 | to be formatted so that multiple output lines will line up nicely. */ | |
220 | ||
221 | static const char * | |
b24ef467 | 222 | ebb_print_insn (const rtx_insn *insn, int aligned ATTRIBUTE_UNUSED) |
d6141c0c | 223 | { |
224 | static char tmp[80]; | |
225 | ||
e1ab7874 | 226 | /* '+' before insn means it is a new cycle start. */ |
227 | if (GET_MODE (insn) == TImode) | |
228 | sprintf (tmp, "+ %4d", INSN_UID (insn)); | |
229 | else | |
230 | sprintf (tmp, " %4d", INSN_UID (insn)); | |
231 | ||
d6141c0c | 232 | return tmp; |
233 | } | |
234 | ||
235 | /* Compare priority of two insns. Return a positive number if the second | |
236 | insn is to be preferred for scheduling, and a negative one if the first | |
237 | is to be preferred. Zero if they are equally good. */ | |
238 | ||
239 | static int | |
b24ef467 | 240 | rank (rtx_insn *insn1, rtx_insn *insn2) |
d6141c0c | 241 | { |
505f406c | 242 | basic_block bb1 = BLOCK_FOR_INSN (insn1); |
243 | basic_block bb2 = BLOCK_FOR_INSN (insn2); | |
244 | ||
245 | if (bb1->count > bb2->count | |
246 | || bb1->frequency > bb2->frequency) | |
247 | return -1; | |
248 | if (bb1->count < bb2->count | |
249 | || bb1->frequency < bb2->frequency) | |
250 | return 1; | |
d6141c0c | 251 | return 0; |
252 | } | |
253 | ||
254 | /* NEXT is an instruction that depends on INSN (a backward dependence); | |
255 | return nonzero if we should include this dependence in priority | |
256 | calculations. */ | |
257 | ||
258 | static int | |
b24ef467 | 259 | ebb_contributes_to_priority (rtx_insn *next ATTRIBUTE_UNUSED, |
260 | rtx_insn *insn ATTRIBUTE_UNUSED) | |
d6141c0c | 261 | { |
262 | return 1; | |
263 | } | |
264 | ||
6aed13f1 | 265 | /* INSN is a JUMP_INSN. Store the set of registers that |
266 | must be considered as used by this jump in USED. */ | |
d6141c0c | 267 | |
e1ab7874 | 268 | void |
6aed13f1 | 269 | ebb_compute_jump_reg_dependencies (rtx insn, regset used) |
d6141c0c | 270 | { |
271 | basic_block b = BLOCK_FOR_INSN (insn); | |
272 | edge e; | |
cd665a06 | 273 | edge_iterator ei; |
274 | ||
275 | FOR_EACH_EDGE (e, ei, b->succs) | |
6aed13f1 | 276 | if ((e->flags & EDGE_FALLTHRU) == 0) |
deb2741b | 277 | bitmap_ior_into (used, df_get_live_in (e->dest)); |
d6141c0c | 278 | } |
279 | ||
280 | /* Used in schedule_insns to initialize current_sched_info for scheduling | |
281 | regions (or single basic blocks). */ | |
282 | ||
e1ab7874 | 283 | static struct common_sched_info_def ebb_common_sched_info; |
284 | ||
285 | static struct sched_deps_info_def ebb_sched_deps_info = | |
286 | { | |
287 | ebb_compute_jump_reg_dependencies, | |
288 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | |
289 | NULL, | |
290 | 1, 0, 0 | |
291 | }; | |
292 | ||
293 | static struct haifa_sched_info ebb_sched_info = | |
d6141c0c | 294 | { |
295 | init_ready_list, | |
6a1cdb4d | 296 | NULL, |
d6141c0c | 297 | schedule_more_p, |
e4897000 | 298 | NULL, |
d6141c0c | 299 | rank, |
0bc18d66 | 300 | ebb_print_insn, |
e1ab7874 | 301 | ebb_contributes_to_priority, |
4db82bc9 | 302 | NULL, /* insn_finishes_block_p */ |
d6141c0c | 303 | |
304 | NULL, NULL, | |
305 | NULL, NULL, | |
e1ab7874 | 306 | 1, 0, |
4d64d9a4 | 307 | |
e1ab7874 | 308 | ebb_add_remove_insn, |
6a1cdb4d | 309 | begin_schedule_ready, |
d2412f57 | 310 | begin_move_insn, |
6a1cdb4d | 311 | advance_target_bb, |
e2f4a6ff | 312 | |
313 | save_ebb_state, | |
314 | restore_ebb_state, | |
315 | ||
3072d30e | 316 | SCHED_EBB |
317 | /* We can create new blocks in begin_schedule_ready (). */ | |
318 | | NEW_BBS | |
d6141c0c | 319 | }; |
320 | \f | |
9b7c6f02 | 321 | /* Returns the earliest block in EBB currently being processed where a |
322 | "similar load" 'insn2' is found, and hence LOAD_INSN can move | |
323 | speculatively into the found block. All the following must hold: | |
324 | ||
325 | (1) both loads have 1 base register (PFREE_CANDIDATEs). | |
326 | (2) load_insn and load2 have a def-use dependence upon | |
327 | the same insn 'insn1'. | |
328 | ||
329 | From all these we can conclude that the two loads access memory | |
330 | addresses that differ at most by a constant, and hence if moving | |
331 | load_insn would cause an exception, it would have been caused by | |
332 | load2 anyhow. | |
333 | ||
334 | The function uses list (given by LAST_BLOCK) of already processed | |
335 | blocks in EBB. The list is formed in `add_deps_for_risky_insns'. */ | |
336 | ||
337 | static basic_block | |
60b8c5b3 | 338 | earliest_block_with_similiar_load (basic_block last_block, rtx load_insn) |
9b7c6f02 | 339 | { |
93f6b030 | 340 | sd_iterator_def back_sd_it; |
341 | dep_t back_dep; | |
9b7c6f02 | 342 | basic_block bb, earliest_block = NULL; |
343 | ||
93f6b030 | 344 | FOR_EACH_DEP (load_insn, SD_LIST_BACK, back_sd_it, back_dep) |
9b7c6f02 | 345 | { |
43ecfeb5 | 346 | rtx_insn *insn1 = DEP_PRO (back_dep); |
9b7c6f02 | 347 | |
48e1416a | 348 | if (DEP_TYPE (back_dep) == REG_DEP_TRUE) |
93f6b030 | 349 | /* Found a DEF-USE dependence (insn1, load_insn). */ |
9b7c6f02 | 350 | { |
93f6b030 | 351 | sd_iterator_def fore_sd_it; |
352 | dep_t fore_dep; | |
9b7c6f02 | 353 | |
93f6b030 | 354 | FOR_EACH_DEP (insn1, SD_LIST_FORW, fore_sd_it, fore_dep) |
9b7c6f02 | 355 | { |
43ecfeb5 | 356 | rtx_insn *insn2 = DEP_CON (fore_dep); |
9b7c6f02 | 357 | basic_block insn2_block = BLOCK_FOR_INSN (insn2); |
358 | ||
93f6b030 | 359 | if (DEP_TYPE (fore_dep) == REG_DEP_TRUE) |
9b7c6f02 | 360 | { |
361 | if (earliest_block != NULL | |
362 | && earliest_block->index < insn2_block->index) | |
363 | continue; | |
364 | ||
365 | /* Found a DEF-USE dependence (insn1, insn2). */ | |
366 | if (haifa_classify_insn (insn2) != PFREE_CANDIDATE) | |
367 | /* insn2 not guaranteed to be a 1 base reg load. */ | |
368 | continue; | |
60b8c5b3 | 369 | |
f7f3687c | 370 | for (bb = last_block; bb; bb = (basic_block) bb->aux) |
9b7c6f02 | 371 | if (insn2_block == bb) |
372 | break; | |
373 | ||
374 | if (!bb) | |
375 | /* insn2 is the similar load. */ | |
376 | earliest_block = insn2_block; | |
377 | } | |
378 | } | |
379 | } | |
380 | } | |
381 | ||
382 | return earliest_block; | |
383 | } | |
384 | ||
917bbcab | 385 | /* The following function adds dependencies between jumps and risky |
9b7c6f02 | 386 | insns in given ebb. */ |
387 | ||
388 | static void | |
43ecfeb5 | 389 | add_deps_for_risky_insns (rtx_insn *head, rtx_insn *tail) |
9b7c6f02 | 390 | { |
43ecfeb5 | 391 | rtx_insn *insn, *prev; |
47cfb7f4 | 392 | int classification; |
43ecfeb5 | 393 | rtx_insn *last_jump = NULL; |
394 | rtx_insn *next_tail = NEXT_INSN (tail); | |
9b7c6f02 | 395 | basic_block last_block = NULL, bb; |
60b8c5b3 | 396 | |
9b7c6f02 | 397 | for (insn = head; insn != next_tail; insn = NEXT_INSN (insn)) |
e2f4a6ff | 398 | { |
399 | add_delay_dependencies (insn); | |
400 | if (control_flow_insn_p (insn)) | |
401 | { | |
402 | bb = BLOCK_FOR_INSN (insn); | |
403 | bb->aux = last_block; | |
404 | last_block = bb; | |
902ef38a | 405 | /* Ensure blocks stay in the same order. */ |
406 | if (last_jump) | |
407 | add_dependence (insn, last_jump, REG_DEP_ANTI); | |
e2f4a6ff | 408 | last_jump = insn; |
409 | } | |
410 | else if (INSN_P (insn) && last_jump != NULL_RTX) | |
411 | { | |
412 | classification = haifa_classify_insn (insn); | |
413 | prev = last_jump; | |
414 | ||
415 | switch (classification) | |
416 | { | |
417 | case PFREE_CANDIDATE: | |
418 | if (flag_schedule_speculative_load) | |
419 | { | |
420 | bb = earliest_block_with_similiar_load (last_block, insn); | |
421 | if (bb) | |
422 | { | |
423 | bb = (basic_block) bb->aux; | |
424 | if (!bb) | |
425 | break; | |
426 | prev = BB_END (bb); | |
427 | } | |
428 | } | |
429 | /* Fall through. */ | |
430 | case TRAP_RISKY: | |
431 | case IRISKY: | |
432 | case PRISKY_CANDIDATE: | |
433 | /* ??? We could implement better checking PRISKY_CANDIDATEs | |
434 | analogous to sched-rgn.c. */ | |
435 | /* We can not change the mode of the backward | |
436 | dependency because REG_DEP_ANTI has the lowest | |
437 | rank. */ | |
438 | if (! sched_insns_conditions_mutex_p (insn, prev)) | |
439 | { | |
effd1640 | 440 | if ((current_sched_info->flags & DO_SPECULATION) |
441 | && (spec_info->mask & BEGIN_CONTROL)) | |
e2f4a6ff | 442 | { |
effd1640 | 443 | dep_def _dep, *dep = &_dep; |
e2f4a6ff | 444 | |
effd1640 | 445 | init_dep (dep, prev, insn, REG_DEP_ANTI); |
e2f4a6ff | 446 | |
effd1640 | 447 | if (current_sched_info->flags & USE_DEPS_LIST) |
448 | { | |
449 | DEP_STATUS (dep) = set_dep_weak (DEP_ANTI, BEGIN_CONTROL, | |
450 | MAX_DEP_WEAK); | |
e2f4a6ff | 451 | |
effd1640 | 452 | } |
e2f4a6ff | 453 | sd_add_or_update_dep (dep, false); |
e2f4a6ff | 454 | } |
effd1640 | 455 | else |
456 | add_dependence (insn, prev, REG_DEP_CONTROL); | |
e2f4a6ff | 457 | } |
458 | ||
459 | break; | |
460 | ||
461 | default: | |
462 | break; | |
463 | } | |
464 | } | |
465 | } | |
9b7c6f02 | 466 | /* Maintain the invariant that bb->aux is clear after use. */ |
467 | while (last_block) | |
468 | { | |
f7f3687c | 469 | bb = (basic_block) last_block->aux; |
9b7c6f02 | 470 | last_block->aux = NULL; |
471 | last_block = bb; | |
472 | } | |
473 | } | |
474 | ||
b0f8644a | 475 | /* Schedule a single extended basic block, defined by the boundaries |
476 | HEAD and TAIL. | |
d6141c0c | 477 | |
b0f8644a | 478 | We change our expectations about scheduler behaviour depending on |
479 | whether MODULO_SCHEDULING is true. If it is, we expect that the | |
480 | caller has already called set_modulo_params and created delay pairs | |
481 | as appropriate. If the modulo schedule failed, we return | |
482 | NULL_RTX. */ | |
483 | ||
484 | basic_block | |
43ecfeb5 | 485 | schedule_ebb (rtx_insn *head, rtx_insn *tail, bool modulo_scheduling) |
d6141c0c | 486 | { |
6a1cdb4d | 487 | basic_block first_bb, target_bb; |
68e419a1 | 488 | struct deps_desc tmp_deps; |
b0f8644a | 489 | bool success; |
490 | ||
491 | /* Blah. We should fix the rest of the code not to get confused by | |
492 | a note or two. */ | |
493 | while (head != tail) | |
494 | { | |
495 | if (NOTE_P (head) || DEBUG_INSN_P (head)) | |
496 | head = NEXT_INSN (head); | |
497 | else if (NOTE_P (tail) || DEBUG_INSN_P (tail)) | |
498 | tail = PREV_INSN (tail); | |
499 | else if (LABEL_P (head)) | |
500 | head = NEXT_INSN (head); | |
501 | else | |
502 | break; | |
503 | } | |
48e1416a | 504 | |
6a1cdb4d | 505 | first_bb = BLOCK_FOR_INSN (head); |
506 | last_bb = BLOCK_FOR_INSN (tail); | |
d6141c0c | 507 | |
508 | if (no_real_insns_p (head, tail)) | |
505f406c | 509 | return BLOCK_FOR_INSN (tail); |
d6141c0c | 510 | |
6a1cdb4d | 511 | gcc_assert (INSN_P (head) && INSN_P (tail)); |
512 | ||
513 | if (!bitmap_bit_p (&dont_calc_deps, first_bb->index)) | |
514 | { | |
515 | init_deps_global (); | |
d6141c0c | 516 | |
93f6b030 | 517 | /* Compute dependencies. */ |
d9ab2038 | 518 | init_deps (&tmp_deps, false); |
6a1cdb4d | 519 | sched_analyze (&tmp_deps, head, tail); |
520 | free_deps (&tmp_deps); | |
d6141c0c | 521 | |
6a1cdb4d | 522 | add_deps_for_risky_insns (head, tail); |
9b7c6f02 | 523 | |
6a1cdb4d | 524 | if (targetm.sched.dependencies_evaluation_hook) |
525 | targetm.sched.dependencies_evaluation_hook (head, tail); | |
526 | ||
527 | finish_deps_global (); | |
528 | } | |
529 | else | |
530 | /* Only recovery blocks can have their dependencies already calculated, | |
48e1416a | 531 | and they always are single block ebbs. */ |
6a1cdb4d | 532 | gcc_assert (first_bb == last_bb); |
58ada791 | 533 | |
d6141c0c | 534 | /* Set priorities. */ |
e4897000 | 535 | current_sched_info->sched_max_insns_priority = 0; |
e1ab7874 | 536 | rgn_n_insns = set_priorities (head, tail); |
e4897000 | 537 | current_sched_info->sched_max_insns_priority++; |
d6141c0c | 538 | |
539 | current_sched_info->prev_head = PREV_INSN (head); | |
540 | current_sched_info->next_tail = NEXT_INSN (tail); | |
541 | ||
e1ab7874 | 542 | remove_notes (head, tail); |
d6141c0c | 543 | |
6a1cdb4d | 544 | unlink_bb_notes (first_bb, last_bb); |
545 | ||
6a1cdb4d | 546 | target_bb = first_bb; |
e1ab7874 | 547 | |
548 | /* Make ready list big enough to hold all the instructions from the ebb. */ | |
549 | sched_extend_ready_list (rgn_n_insns); | |
0a15667c | 550 | success = schedule_block (&target_bb, NULL); |
b0f8644a | 551 | gcc_assert (success || modulo_scheduling); |
552 | ||
e1ab7874 | 553 | /* Free ready list. */ |
554 | sched_finish_ready_list (); | |
d6141c0c | 555 | |
6a1cdb4d | 556 | /* We might pack all instructions into fewer blocks, |
557 | so we may made some of them empty. Can't assert (b == last_bb). */ | |
48e1416a | 558 | |
d6141c0c | 559 | /* Sanity check: verify that all region insns were scheduled. */ |
b0f8644a | 560 | gcc_assert (modulo_scheduling || sched_rgn_n_insns == rgn_n_insns); |
93f6b030 | 561 | |
562 | /* Free dependencies. */ | |
563 | sched_free_deps (current_sched_info->head, current_sched_info->tail, true); | |
564 | ||
565 | gcc_assert (haifa_recovery_bb_ever_added_p | |
566 | || deps_pools_are_empty_p ()); | |
d6141c0c | 567 | |
6a1cdb4d | 568 | if (EDGE_COUNT (last_bb->preds) == 0) |
569 | /* LAST_BB is unreachable. */ | |
570 | { | |
571 | gcc_assert (first_bb != last_bb | |
572 | && EDGE_COUNT (last_bb->succs) == 0); | |
573 | last_bb = last_bb->prev_bb; | |
574 | delete_basic_block (last_bb->next_bb); | |
575 | } | |
576 | ||
b0f8644a | 577 | return success ? last_bb : NULL; |
d6141c0c | 578 | } |
579 | ||
b0f8644a | 580 | /* Perform initializations before running schedule_ebbs or a single |
581 | schedule_ebb. */ | |
d6141c0c | 582 | void |
b0f8644a | 583 | schedule_ebbs_init (void) |
d6141c0c | 584 | { |
e1ab7874 | 585 | /* Setup infos. */ |
586 | { | |
587 | memcpy (&ebb_common_sched_info, &haifa_common_sched_info, | |
588 | sizeof (ebb_common_sched_info)); | |
589 | ||
590 | ebb_common_sched_info.fix_recovery_cfg = ebb_fix_recovery_cfg; | |
591 | ebb_common_sched_info.add_block = ebb_add_block; | |
592 | ebb_common_sched_info.sched_pass_id = SCHED_EBB_PASS; | |
593 | ||
594 | common_sched_info = &ebb_common_sched_info; | |
595 | sched_deps_info = &ebb_sched_deps_info; | |
596 | current_sched_info = &ebb_sched_info; | |
597 | } | |
d6141c0c | 598 | |
e1ab7874 | 599 | haifa_sched_init (); |
4d64d9a4 | 600 | |
f23d9a22 | 601 | compute_bb_for_insn (); |
d6141c0c | 602 | |
6a1cdb4d | 603 | /* Initialize DONT_CALC_DEPS and ebb-{start, end} markers. */ |
604 | bitmap_initialize (&dont_calc_deps, 0); | |
605 | bitmap_clear (&dont_calc_deps); | |
b0f8644a | 606 | } |
607 | ||
608 | /* Perform cleanups after scheduling using schedules_ebbs or schedule_ebb. */ | |
609 | void | |
610 | schedule_ebbs_finish (void) | |
611 | { | |
612 | bitmap_clear (&dont_calc_deps); | |
613 | ||
614 | /* Reposition the prologue and epilogue notes in case we moved the | |
615 | prologue/epilogue insns. */ | |
616 | if (reload_completed) | |
617 | reposition_prologue_and_epilogue_notes (); | |
618 | ||
619 | haifa_sched_finish (); | |
620 | } | |
621 | ||
622 | /* The main entry point in this file. */ | |
623 | ||
624 | void | |
625 | schedule_ebbs (void) | |
626 | { | |
627 | basic_block bb; | |
628 | int probability_cutoff; | |
43ecfeb5 | 629 | rtx_insn *tail; |
b0f8644a | 630 | |
631 | /* Taking care of this degenerate case makes the rest of | |
632 | this code simpler. */ | |
a28770e1 | 633 | if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS) |
b0f8644a | 634 | return; |
635 | ||
636 | if (profile_info && flag_branch_probabilities) | |
637 | probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK); | |
638 | else | |
639 | probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY); | |
640 | probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff; | |
641 | ||
642 | schedule_ebbs_init (); | |
6a1cdb4d | 643 | |
d6141c0c | 644 | /* Schedule every region in the subroutine. */ |
fc00614f | 645 | FOR_EACH_BB_FN (bb, cfun) |
b3d6de89 | 646 | { |
43ecfeb5 | 647 | rtx_insn *head = BB_HEAD (bb); |
d6141c0c | 648 | |
f193badd | 649 | if (bb->flags & BB_DISABLE_SCHEDULE) |
650 | continue; | |
651 | ||
d6141c0c | 652 | for (;;) |
653 | { | |
d6141c0c | 654 | edge e; |
5496dbfc | 655 | tail = BB_END (bb); |
34154e27 | 656 | if (bb->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun) |
6d7dc5b9 | 657 | || LABEL_P (BB_HEAD (bb->next_bb))) |
d6141c0c | 658 | break; |
7f58c05e | 659 | e = find_fallthru_edge (bb->succs); |
d6141c0c | 660 | if (! e) |
661 | break; | |
eeb4a70e | 662 | if (e->probability <= probability_cutoff) |
505f406c | 663 | break; |
f193badd | 664 | if (e->dest->flags & BB_DISABLE_SCHEDULE) |
665 | break; | |
4c26117a | 666 | bb = bb->next_bb; |
d6141c0c | 667 | } |
668 | ||
b0f8644a | 669 | bb = schedule_ebb (head, tail, false); |
d6141c0c | 670 | } |
b0f8644a | 671 | schedule_ebbs_finish (); |
d6141c0c | 672 | } |
6a1cdb4d | 673 | |
674 | /* INSN has been added to/removed from current ebb. */ | |
675 | static void | |
b24ef467 | 676 | ebb_add_remove_insn (rtx_insn *insn ATTRIBUTE_UNUSED, int remove_p) |
6a1cdb4d | 677 | { |
678 | if (!remove_p) | |
e1ab7874 | 679 | rgn_n_insns++; |
6a1cdb4d | 680 | else |
e1ab7874 | 681 | rgn_n_insns--; |
6a1cdb4d | 682 | } |
683 | ||
684 | /* BB was added to ebb after AFTER. */ | |
685 | static void | |
e1ab7874 | 686 | ebb_add_block (basic_block bb, basic_block after) |
6a1cdb4d | 687 | { |
48e1416a | 688 | /* Recovery blocks are always bounded by BARRIERS, |
6a1cdb4d | 689 | therefore, they always form single block EBB, |
690 | therefore, we can use rec->index to identify such EBBs. */ | |
34154e27 | 691 | if (after == EXIT_BLOCK_PTR_FOR_FN (cfun)) |
6a1cdb4d | 692 | bitmap_set_bit (&dont_calc_deps, bb->index); |
693 | else if (after == last_bb) | |
694 | last_bb = bb; | |
695 | } | |
696 | ||
697 | /* Return next block in ebb chain. For parameter meaning please refer to | |
698 | sched-int.h: struct sched_info: advance_target_bb. */ | |
699 | static basic_block | |
b24ef467 | 700 | advance_target_bb (basic_block bb, rtx_insn *insn) |
6a1cdb4d | 701 | { |
702 | if (insn) | |
703 | { | |
704 | if (BLOCK_FOR_INSN (insn) != bb | |
705 | && control_flow_insn_p (insn) | |
2521f35f | 706 | /* We handle interblock movement of the speculation check |
707 | or over a speculation check in | |
708 | haifa-sched.c: move_block_after_check (). */ | |
709 | && !IS_SPECULATION_BRANCHY_CHECK_P (insn) | |
710 | && !IS_SPECULATION_BRANCHY_CHECK_P (BB_END (bb))) | |
6a1cdb4d | 711 | { |
2521f35f | 712 | /* Assert that we don't move jumps across blocks. */ |
6a1cdb4d | 713 | gcc_assert (!control_flow_insn_p (BB_END (bb)) |
714 | && NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (bb->next_bb))); | |
715 | return bb; | |
716 | } | |
717 | else | |
718 | return 0; | |
719 | } | |
6a1cdb4d | 720 | else |
6fcfdb19 | 721 | /* Return next non empty block. */ |
722 | { | |
723 | do | |
724 | { | |
725 | gcc_assert (bb != last_bb); | |
726 | ||
727 | bb = bb->next_bb; | |
728 | } | |
729 | while (bb_note (bb) == BB_END (bb)); | |
730 | ||
731 | return bb; | |
732 | } | |
6a1cdb4d | 733 | } |
734 | ||
735 | /* Fix internal data after interblock movement of jump instruction. | |
736 | For parameter meaning please refer to | |
737 | sched-int.h: struct sched_info: fix_recovery_cfg. */ | |
738 | static void | |
e1ab7874 | 739 | ebb_fix_recovery_cfg (int bbi ATTRIBUTE_UNUSED, int jump_bbi, |
740 | int jump_bb_nexti) | |
6a1cdb4d | 741 | { |
742 | gcc_assert (last_bb->index != bbi); | |
743 | ||
744 | if (jump_bb_nexti == last_bb->index) | |
f5a6b05f | 745 | last_bb = BASIC_BLOCK_FOR_FN (cfun, jump_bbi); |
6a1cdb4d | 746 | } |
db982eeb | 747 | |
748 | #endif /* INSN_SCHEDULING */ |