]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/sched-ebb.c
2015-07-07 Andrew MacLeod <amacleod@redhat.com>
[thirdparty/gcc.git] / gcc / sched-ebb.c
CommitLineData
d6141c0c 1/* Instruction scheduling pass.
d353bf18 2 Copyright (C) 1992-2015 Free Software Foundation, Inc.
d6141c0c 3 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
4 and currently maintained by, Jim Wilson (wilson@cygnus.com)
5
f12b58b3 6This file is part of GCC.
d6141c0c 7
f12b58b3 8GCC is free software; you can redistribute it and/or modify it under
9the terms of the GNU General Public License as published by the Free
8c4c00c1 10Software Foundation; either version 3, or (at your option) any later
f12b58b3 11version.
d6141c0c 12
f12b58b3 13GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14WARRANTY; without even the implied warranty of MERCHANTABILITY or
d6141c0c 15FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16for more details.
17
18You should have received a copy of the GNU General Public License
8c4c00c1 19along with GCC; see the file COPYING3. If not see
20<http://www.gnu.org/licenses/>. */
d6141c0c 21\f
22#include "config.h"
23#include "system.h"
805e22b2 24#include "coretypes.h"
9ef16211 25#include "backend.h"
d6141c0c 26#include "rtl.h"
9ef16211 27#include "df.h"
28#include "diagnostic-core.h"
d6141c0c 29#include "tm_p.h"
d6141c0c 30#include "regs.h"
886c1262 31#include "profile.h"
d6141c0c 32#include "flags.h"
33#include "insn-config.h"
34#include "insn-attr.h"
35#include "except.h"
d6141c0c 36#include "recog.h"
eeb4a70e 37#include "params.h"
94ea8568 38#include "cfgrtl.h"
39#include "cfgbuild.h"
d6141c0c 40#include "sched-int.h"
58ada791 41#include "target.h"
3072d30e 42
d6141c0c 43\f
db982eeb 44#ifdef INSN_SCHEDULING
45
6a1cdb4d 46/* The number of insns to be scheduled in total. */
e1ab7874 47static int rgn_n_insns;
48
49/* The number of insns scheduled so far. */
50static int sched_rgn_n_insns;
6a1cdb4d 51
52/* Set of blocks, that already have their dependencies calculated. */
53static bitmap_head dont_calc_deps;
6a1cdb4d 54
55/* Last basic block in current ebb. */
56static basic_block last_bb;
57
d6141c0c 58/* Implementations of the sched_info functions for region scheduling. */
e4897000 59static void init_ready_list (void);
b24ef467 60static void begin_schedule_ready (rtx_insn *);
60b8c5b3 61static int schedule_more_p (void);
b24ef467 62static const char *ebb_print_insn (const rtx_insn *, int);
63static int rank (rtx_insn *, rtx_insn *);
64static int ebb_contributes_to_priority (rtx_insn *, rtx_insn *);
60b8c5b3 65static basic_block earliest_block_with_similiar_load (basic_block, rtx);
43ecfeb5 66static void add_deps_for_risky_insns (rtx_insn *, rtx_insn *);
73e15687 67static void debug_ebb_dependencies (rtx_insn *, rtx_insn *);
6a1cdb4d 68
b24ef467 69static void ebb_add_remove_insn (rtx_insn *, int);
e1ab7874 70static void ebb_add_block (basic_block, basic_block);
b24ef467 71static basic_block advance_target_bb (basic_block, rtx_insn *);
e1ab7874 72static void ebb_fix_recovery_cfg (int, int, int);
6a1cdb4d 73
e2f4a6ff 74/* Allocate memory and store the state of the frontend. Return the allocated
75 memory. */
76static void *
77save_ebb_state (void)
78{
79 int *p = XNEW (int);
80 *p = sched_rgn_n_insns;
81 return p;
82}
83
84/* Restore the state of the frontend from P_, then free it. */
85static void
86restore_ebb_state (void *p_)
87{
88 int *p = (int *)p_;
89 sched_rgn_n_insns = *p;
90 free (p_);
91}
92
d6141c0c 93/* Return nonzero if there are more insns that should be scheduled. */
94
95static int
60b8c5b3 96schedule_more_p (void)
d6141c0c 97{
e1ab7874 98 return sched_rgn_n_insns < rgn_n_insns;
d6141c0c 99}
100
a2819fc2 101/* Print dependency information about ebb between HEAD and TAIL. */
102static void
73e15687 103debug_ebb_dependencies (rtx_insn *head, rtx_insn *tail)
a2819fc2 104{
105 fprintf (sched_dump,
106 ";; --------------- forward dependences: ------------ \n");
107
108 fprintf (sched_dump, "\n;; --- EBB Dependences --- from bb%d to bb%d \n",
109 BLOCK_NUM (head), BLOCK_NUM (tail));
110
111 debug_dependencies (head, tail);
112}
113
d6141c0c 114/* Add all insns that are initially ready to the ready list READY. Called
115 once before scheduling a set of insns. */
116
117static void
e4897000 118init_ready_list (void)
d6141c0c 119{
6a1cdb4d 120 int n = 0;
4cd001d5 121 rtx_insn *prev_head = current_sched_info->prev_head;
122 rtx_insn *next_tail = current_sched_info->next_tail;
b24ef467 123 rtx_insn *insn;
d6141c0c 124
e1ab7874 125 sched_rgn_n_insns = 0;
d6141c0c 126
d6141c0c 127 /* Print debugging information. */
128 if (sched_verbose >= 5)
a2819fc2 129 debug_ebb_dependencies (NEXT_INSN (prev_head), PREV_INSN (next_tail));
d6141c0c 130
131 /* Initialize ready list with all 'ready' insns in target block.
132 Count number of insns in the target block being scheduled. */
133 for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
134 {
e4897000 135 try_ready (insn);
6a1cdb4d 136 n++;
d6141c0c 137 }
d6141c0c 138
e1ab7874 139 gcc_assert (n == rgn_n_insns);
6a1cdb4d 140}
d6141c0c 141
6a1cdb4d 142/* INSN is being scheduled after LAST. Update counters. */
143static void
b24ef467 144begin_schedule_ready (rtx_insn *insn ATTRIBUTE_UNUSED)
d6141c0c 145{
e1ab7874 146 sched_rgn_n_insns++;
d2412f57 147}
6a1cdb4d 148
d2412f57 149/* INSN is being moved to its place in the schedule, after LAST. */
150static void
b24ef467 151begin_move_insn (rtx_insn *insn, rtx_insn *last)
d2412f57 152{
6a1cdb4d 153 if (BLOCK_FOR_INSN (insn) == last_bb
154 /* INSN is a jump in the last block, ... */
155 && control_flow_insn_p (insn)
156 /* that is going to be moved over some instructions. */
157 && last != PREV_INSN (insn))
158 {
159 edge e;
6a1cdb4d 160 basic_block bb;
161
162 /* An obscure special case, where we do have partially dead
163 instruction scheduled after last control flow instruction.
164 In this case we can create new basic block. It is
165 always exactly one basic block last in the sequence. */
48e1416a 166
7f58c05e 167 e = find_fallthru_edge (last_bb->succs);
6a1cdb4d 168
1b4345f7 169 gcc_checking_assert (!e || !(e->flags & EDGE_COMPLEX));
6a1cdb4d 170
1b4345f7 171 gcc_checking_assert (BLOCK_FOR_INSN (insn) == last_bb
172 && !IS_SPECULATION_CHECK_P (insn)
173 && BB_HEAD (last_bb) != insn
174 && BB_END (last_bb) == insn);
6a1cdb4d 175
176 {
9ed997be 177 rtx_insn *x = NEXT_INSN (insn);
6a1cdb4d 178 if (e)
1b4345f7 179 gcc_checking_assert (NOTE_P (x) || LABEL_P (x));
6a1cdb4d 180 else
1b4345f7 181 gcc_checking_assert (BARRIER_P (x));
6a1cdb4d 182 }
6a1cdb4d 183
184 if (e)
185 {
186 bb = split_edge (e);
187 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_END (bb)));
188 }
189 else
e6428cd5 190 {
191 /* Create an empty unreachable block after the INSN. */
4cd001d5 192 rtx_insn *next = NEXT_INSN (insn);
e6428cd5 193 if (next && BARRIER_P (next))
194 next = NEXT_INSN (next);
195 bb = create_basic_block (next, NULL_RTX, last_bb);
196 }
48e1416a 197
6a1cdb4d 198 /* split_edge () creates BB before E->DEST. Keep in mind, that
199 this operation extends scheduling region till the end of BB.
200 Hence, we need to shift NEXT_TAIL, so haifa-sched.c won't go out
201 of the scheduling region. */
202 current_sched_info->next_tail = NEXT_INSN (BB_END (bb));
203 gcc_assert (current_sched_info->next_tail);
204
e1ab7874 205 /* Append new basic block to the end of the ebb. */
206 sched_init_only_bb (bb, last_bb);
6a1cdb4d 207 gcc_assert (last_bb == bb);
208 }
d6141c0c 209}
210
d6141c0c 211/* Return a string that contains the insn uid and optionally anything else
212 necessary to identify this insn in an output. It's valid to use a
213 static buffer for this. The ALIGNED parameter should cause the string
214 to be formatted so that multiple output lines will line up nicely. */
215
216static const char *
b24ef467 217ebb_print_insn (const rtx_insn *insn, int aligned ATTRIBUTE_UNUSED)
d6141c0c 218{
219 static char tmp[80];
220
e1ab7874 221 /* '+' before insn means it is a new cycle start. */
222 if (GET_MODE (insn) == TImode)
223 sprintf (tmp, "+ %4d", INSN_UID (insn));
224 else
225 sprintf (tmp, " %4d", INSN_UID (insn));
226
d6141c0c 227 return tmp;
228}
229
230/* Compare priority of two insns. Return a positive number if the second
231 insn is to be preferred for scheduling, and a negative one if the first
232 is to be preferred. Zero if they are equally good. */
233
234static int
b24ef467 235rank (rtx_insn *insn1, rtx_insn *insn2)
d6141c0c 236{
505f406c 237 basic_block bb1 = BLOCK_FOR_INSN (insn1);
238 basic_block bb2 = BLOCK_FOR_INSN (insn2);
239
240 if (bb1->count > bb2->count
241 || bb1->frequency > bb2->frequency)
242 return -1;
243 if (bb1->count < bb2->count
244 || bb1->frequency < bb2->frequency)
245 return 1;
d6141c0c 246 return 0;
247}
248
249/* NEXT is an instruction that depends on INSN (a backward dependence);
250 return nonzero if we should include this dependence in priority
251 calculations. */
252
253static int
b24ef467 254ebb_contributes_to_priority (rtx_insn *next ATTRIBUTE_UNUSED,
255 rtx_insn *insn ATTRIBUTE_UNUSED)
d6141c0c 256{
257 return 1;
258}
259
6aed13f1 260 /* INSN is a JUMP_INSN. Store the set of registers that
261 must be considered as used by this jump in USED. */
d6141c0c 262
e1ab7874 263void
6aed13f1 264ebb_compute_jump_reg_dependencies (rtx insn, regset used)
d6141c0c 265{
266 basic_block b = BLOCK_FOR_INSN (insn);
267 edge e;
cd665a06 268 edge_iterator ei;
269
270 FOR_EACH_EDGE (e, ei, b->succs)
6aed13f1 271 if ((e->flags & EDGE_FALLTHRU) == 0)
deb2741b 272 bitmap_ior_into (used, df_get_live_in (e->dest));
d6141c0c 273}
274
275/* Used in schedule_insns to initialize current_sched_info for scheduling
276 regions (or single basic blocks). */
277
e1ab7874 278static struct common_sched_info_def ebb_common_sched_info;
279
280static struct sched_deps_info_def ebb_sched_deps_info =
281 {
282 ebb_compute_jump_reg_dependencies,
283 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
284 NULL,
285 1, 0, 0
286 };
287
288static struct haifa_sched_info ebb_sched_info =
d6141c0c 289{
290 init_ready_list,
6a1cdb4d 291 NULL,
d6141c0c 292 schedule_more_p,
e4897000 293 NULL,
d6141c0c 294 rank,
0bc18d66 295 ebb_print_insn,
e1ab7874 296 ebb_contributes_to_priority,
4db82bc9 297 NULL, /* insn_finishes_block_p */
d6141c0c 298
299 NULL, NULL,
300 NULL, NULL,
e1ab7874 301 1, 0,
4d64d9a4 302
e1ab7874 303 ebb_add_remove_insn,
6a1cdb4d 304 begin_schedule_ready,
d2412f57 305 begin_move_insn,
6a1cdb4d 306 advance_target_bb,
e2f4a6ff 307
308 save_ebb_state,
309 restore_ebb_state,
310
3072d30e 311 SCHED_EBB
312 /* We can create new blocks in begin_schedule_ready (). */
313 | NEW_BBS
d6141c0c 314};
315\f
9b7c6f02 316/* Returns the earliest block in EBB currently being processed where a
317 "similar load" 'insn2' is found, and hence LOAD_INSN can move
318 speculatively into the found block. All the following must hold:
319
320 (1) both loads have 1 base register (PFREE_CANDIDATEs).
321 (2) load_insn and load2 have a def-use dependence upon
322 the same insn 'insn1'.
323
324 From all these we can conclude that the two loads access memory
325 addresses that differ at most by a constant, and hence if moving
326 load_insn would cause an exception, it would have been caused by
327 load2 anyhow.
328
329 The function uses list (given by LAST_BLOCK) of already processed
330 blocks in EBB. The list is formed in `add_deps_for_risky_insns'. */
331
332static basic_block
60b8c5b3 333earliest_block_with_similiar_load (basic_block last_block, rtx load_insn)
9b7c6f02 334{
93f6b030 335 sd_iterator_def back_sd_it;
336 dep_t back_dep;
9b7c6f02 337 basic_block bb, earliest_block = NULL;
338
93f6b030 339 FOR_EACH_DEP (load_insn, SD_LIST_BACK, back_sd_it, back_dep)
9b7c6f02 340 {
43ecfeb5 341 rtx_insn *insn1 = DEP_PRO (back_dep);
9b7c6f02 342
48e1416a 343 if (DEP_TYPE (back_dep) == REG_DEP_TRUE)
93f6b030 344 /* Found a DEF-USE dependence (insn1, load_insn). */
9b7c6f02 345 {
93f6b030 346 sd_iterator_def fore_sd_it;
347 dep_t fore_dep;
9b7c6f02 348
93f6b030 349 FOR_EACH_DEP (insn1, SD_LIST_FORW, fore_sd_it, fore_dep)
9b7c6f02 350 {
43ecfeb5 351 rtx_insn *insn2 = DEP_CON (fore_dep);
9b7c6f02 352 basic_block insn2_block = BLOCK_FOR_INSN (insn2);
353
93f6b030 354 if (DEP_TYPE (fore_dep) == REG_DEP_TRUE)
9b7c6f02 355 {
356 if (earliest_block != NULL
357 && earliest_block->index < insn2_block->index)
358 continue;
359
360 /* Found a DEF-USE dependence (insn1, insn2). */
361 if (haifa_classify_insn (insn2) != PFREE_CANDIDATE)
362 /* insn2 not guaranteed to be a 1 base reg load. */
363 continue;
60b8c5b3 364
f7f3687c 365 for (bb = last_block; bb; bb = (basic_block) bb->aux)
9b7c6f02 366 if (insn2_block == bb)
367 break;
368
369 if (!bb)
370 /* insn2 is the similar load. */
371 earliest_block = insn2_block;
372 }
373 }
374 }
375 }
376
377 return earliest_block;
378}
379
917bbcab 380/* The following function adds dependencies between jumps and risky
9b7c6f02 381 insns in given ebb. */
382
383static void
43ecfeb5 384add_deps_for_risky_insns (rtx_insn *head, rtx_insn *tail)
9b7c6f02 385{
43ecfeb5 386 rtx_insn *insn, *prev;
47cfb7f4 387 int classification;
43ecfeb5 388 rtx_insn *last_jump = NULL;
389 rtx_insn *next_tail = NEXT_INSN (tail);
9b7c6f02 390 basic_block last_block = NULL, bb;
60b8c5b3 391
9b7c6f02 392 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
e2f4a6ff 393 {
394 add_delay_dependencies (insn);
395 if (control_flow_insn_p (insn))
396 {
397 bb = BLOCK_FOR_INSN (insn);
398 bb->aux = last_block;
399 last_block = bb;
902ef38a 400 /* Ensure blocks stay in the same order. */
401 if (last_jump)
402 add_dependence (insn, last_jump, REG_DEP_ANTI);
e2f4a6ff 403 last_jump = insn;
404 }
405 else if (INSN_P (insn) && last_jump != NULL_RTX)
406 {
407 classification = haifa_classify_insn (insn);
408 prev = last_jump;
409
410 switch (classification)
411 {
412 case PFREE_CANDIDATE:
413 if (flag_schedule_speculative_load)
414 {
415 bb = earliest_block_with_similiar_load (last_block, insn);
416 if (bb)
417 {
418 bb = (basic_block) bb->aux;
419 if (!bb)
420 break;
421 prev = BB_END (bb);
422 }
423 }
424 /* Fall through. */
425 case TRAP_RISKY:
426 case IRISKY:
427 case PRISKY_CANDIDATE:
428 /* ??? We could implement better checking PRISKY_CANDIDATEs
429 analogous to sched-rgn.c. */
430 /* We can not change the mode of the backward
431 dependency because REG_DEP_ANTI has the lowest
432 rank. */
433 if (! sched_insns_conditions_mutex_p (insn, prev))
434 {
effd1640 435 if ((current_sched_info->flags & DO_SPECULATION)
436 && (spec_info->mask & BEGIN_CONTROL))
e2f4a6ff 437 {
effd1640 438 dep_def _dep, *dep = &_dep;
e2f4a6ff 439
effd1640 440 init_dep (dep, prev, insn, REG_DEP_ANTI);
e2f4a6ff 441
effd1640 442 if (current_sched_info->flags & USE_DEPS_LIST)
443 {
444 DEP_STATUS (dep) = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
445 MAX_DEP_WEAK);
e2f4a6ff 446
effd1640 447 }
e2f4a6ff 448 sd_add_or_update_dep (dep, false);
e2f4a6ff 449 }
effd1640 450 else
451 add_dependence (insn, prev, REG_DEP_CONTROL);
e2f4a6ff 452 }
453
454 break;
455
456 default:
457 break;
458 }
459 }
460 }
9b7c6f02 461 /* Maintain the invariant that bb->aux is clear after use. */
462 while (last_block)
463 {
f7f3687c 464 bb = (basic_block) last_block->aux;
9b7c6f02 465 last_block->aux = NULL;
466 last_block = bb;
467 }
468}
469
b0f8644a 470/* Schedule a single extended basic block, defined by the boundaries
471 HEAD and TAIL.
d6141c0c 472
b0f8644a 473 We change our expectations about scheduler behaviour depending on
474 whether MODULO_SCHEDULING is true. If it is, we expect that the
475 caller has already called set_modulo_params and created delay pairs
476 as appropriate. If the modulo schedule failed, we return
477 NULL_RTX. */
478
479basic_block
43ecfeb5 480schedule_ebb (rtx_insn *head, rtx_insn *tail, bool modulo_scheduling)
d6141c0c 481{
6a1cdb4d 482 basic_block first_bb, target_bb;
68e419a1 483 struct deps_desc tmp_deps;
b0f8644a 484 bool success;
485
486 /* Blah. We should fix the rest of the code not to get confused by
487 a note or two. */
488 while (head != tail)
489 {
490 if (NOTE_P (head) || DEBUG_INSN_P (head))
491 head = NEXT_INSN (head);
492 else if (NOTE_P (tail) || DEBUG_INSN_P (tail))
493 tail = PREV_INSN (tail);
494 else if (LABEL_P (head))
495 head = NEXT_INSN (head);
496 else
497 break;
498 }
48e1416a 499
6a1cdb4d 500 first_bb = BLOCK_FOR_INSN (head);
501 last_bb = BLOCK_FOR_INSN (tail);
d6141c0c 502
503 if (no_real_insns_p (head, tail))
505f406c 504 return BLOCK_FOR_INSN (tail);
d6141c0c 505
6a1cdb4d 506 gcc_assert (INSN_P (head) && INSN_P (tail));
507
508 if (!bitmap_bit_p (&dont_calc_deps, first_bb->index))
509 {
510 init_deps_global ();
d6141c0c 511
93f6b030 512 /* Compute dependencies. */
d9ab2038 513 init_deps (&tmp_deps, false);
6a1cdb4d 514 sched_analyze (&tmp_deps, head, tail);
515 free_deps (&tmp_deps);
d6141c0c 516
6a1cdb4d 517 add_deps_for_risky_insns (head, tail);
9b7c6f02 518
6a1cdb4d 519 if (targetm.sched.dependencies_evaluation_hook)
520 targetm.sched.dependencies_evaluation_hook (head, tail);
521
522 finish_deps_global ();
523 }
524 else
525 /* Only recovery blocks can have their dependencies already calculated,
48e1416a 526 and they always are single block ebbs. */
6a1cdb4d 527 gcc_assert (first_bb == last_bb);
58ada791 528
d6141c0c 529 /* Set priorities. */
e4897000 530 current_sched_info->sched_max_insns_priority = 0;
e1ab7874 531 rgn_n_insns = set_priorities (head, tail);
e4897000 532 current_sched_info->sched_max_insns_priority++;
d6141c0c 533
534 current_sched_info->prev_head = PREV_INSN (head);
535 current_sched_info->next_tail = NEXT_INSN (tail);
536
e1ab7874 537 remove_notes (head, tail);
d6141c0c 538
6a1cdb4d 539 unlink_bb_notes (first_bb, last_bb);
540
6a1cdb4d 541 target_bb = first_bb;
e1ab7874 542
543 /* Make ready list big enough to hold all the instructions from the ebb. */
544 sched_extend_ready_list (rgn_n_insns);
0a15667c 545 success = schedule_block (&target_bb, NULL);
b0f8644a 546 gcc_assert (success || modulo_scheduling);
547
e1ab7874 548 /* Free ready list. */
549 sched_finish_ready_list ();
d6141c0c 550
6a1cdb4d 551 /* We might pack all instructions into fewer blocks,
552 so we may made some of them empty. Can't assert (b == last_bb). */
48e1416a 553
d6141c0c 554 /* Sanity check: verify that all region insns were scheduled. */
b0f8644a 555 gcc_assert (modulo_scheduling || sched_rgn_n_insns == rgn_n_insns);
93f6b030 556
557 /* Free dependencies. */
558 sched_free_deps (current_sched_info->head, current_sched_info->tail, true);
559
560 gcc_assert (haifa_recovery_bb_ever_added_p
561 || deps_pools_are_empty_p ());
d6141c0c 562
6a1cdb4d 563 if (EDGE_COUNT (last_bb->preds) == 0)
564 /* LAST_BB is unreachable. */
565 {
566 gcc_assert (first_bb != last_bb
567 && EDGE_COUNT (last_bb->succs) == 0);
568 last_bb = last_bb->prev_bb;
569 delete_basic_block (last_bb->next_bb);
570 }
571
b0f8644a 572 return success ? last_bb : NULL;
d6141c0c 573}
574
b0f8644a 575/* Perform initializations before running schedule_ebbs or a single
576 schedule_ebb. */
d6141c0c 577void
b0f8644a 578schedule_ebbs_init (void)
d6141c0c 579{
e1ab7874 580 /* Setup infos. */
581 {
582 memcpy (&ebb_common_sched_info, &haifa_common_sched_info,
583 sizeof (ebb_common_sched_info));
584
585 ebb_common_sched_info.fix_recovery_cfg = ebb_fix_recovery_cfg;
586 ebb_common_sched_info.add_block = ebb_add_block;
587 ebb_common_sched_info.sched_pass_id = SCHED_EBB_PASS;
588
589 common_sched_info = &ebb_common_sched_info;
590 sched_deps_info = &ebb_sched_deps_info;
591 current_sched_info = &ebb_sched_info;
592 }
d6141c0c 593
e1ab7874 594 haifa_sched_init ();
4d64d9a4 595
f23d9a22 596 compute_bb_for_insn ();
d6141c0c 597
6a1cdb4d 598 /* Initialize DONT_CALC_DEPS and ebb-{start, end} markers. */
599 bitmap_initialize (&dont_calc_deps, 0);
600 bitmap_clear (&dont_calc_deps);
b0f8644a 601}
602
603/* Perform cleanups after scheduling using schedules_ebbs or schedule_ebb. */
604void
605schedule_ebbs_finish (void)
606{
607 bitmap_clear (&dont_calc_deps);
608
609 /* Reposition the prologue and epilogue notes in case we moved the
610 prologue/epilogue insns. */
611 if (reload_completed)
612 reposition_prologue_and_epilogue_notes ();
613
614 haifa_sched_finish ();
615}
616
617/* The main entry point in this file. */
618
619void
620schedule_ebbs (void)
621{
622 basic_block bb;
623 int probability_cutoff;
43ecfeb5 624 rtx_insn *tail;
b0f8644a 625
626 /* Taking care of this degenerate case makes the rest of
627 this code simpler. */
a28770e1 628 if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
b0f8644a 629 return;
630
631 if (profile_info && flag_branch_probabilities)
632 probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
633 else
634 probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
635 probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
636
637 schedule_ebbs_init ();
6a1cdb4d 638
d6141c0c 639 /* Schedule every region in the subroutine. */
fc00614f 640 FOR_EACH_BB_FN (bb, cfun)
b3d6de89 641 {
43ecfeb5 642 rtx_insn *head = BB_HEAD (bb);
d6141c0c 643
f193badd 644 if (bb->flags & BB_DISABLE_SCHEDULE)
645 continue;
646
d6141c0c 647 for (;;)
648 {
d6141c0c 649 edge e;
5496dbfc 650 tail = BB_END (bb);
34154e27 651 if (bb->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
6d7dc5b9 652 || LABEL_P (BB_HEAD (bb->next_bb)))
d6141c0c 653 break;
7f58c05e 654 e = find_fallthru_edge (bb->succs);
d6141c0c 655 if (! e)
656 break;
eeb4a70e 657 if (e->probability <= probability_cutoff)
505f406c 658 break;
f193badd 659 if (e->dest->flags & BB_DISABLE_SCHEDULE)
660 break;
4c26117a 661 bb = bb->next_bb;
d6141c0c 662 }
663
b0f8644a 664 bb = schedule_ebb (head, tail, false);
d6141c0c 665 }
b0f8644a 666 schedule_ebbs_finish ();
d6141c0c 667}
6a1cdb4d 668
669/* INSN has been added to/removed from current ebb. */
670static void
b24ef467 671ebb_add_remove_insn (rtx_insn *insn ATTRIBUTE_UNUSED, int remove_p)
6a1cdb4d 672{
673 if (!remove_p)
e1ab7874 674 rgn_n_insns++;
6a1cdb4d 675 else
e1ab7874 676 rgn_n_insns--;
6a1cdb4d 677}
678
679/* BB was added to ebb after AFTER. */
680static void
e1ab7874 681ebb_add_block (basic_block bb, basic_block after)
6a1cdb4d 682{
48e1416a 683 /* Recovery blocks are always bounded by BARRIERS,
6a1cdb4d 684 therefore, they always form single block EBB,
685 therefore, we can use rec->index to identify such EBBs. */
34154e27 686 if (after == EXIT_BLOCK_PTR_FOR_FN (cfun))
6a1cdb4d 687 bitmap_set_bit (&dont_calc_deps, bb->index);
688 else if (after == last_bb)
689 last_bb = bb;
690}
691
692/* Return next block in ebb chain. For parameter meaning please refer to
693 sched-int.h: struct sched_info: advance_target_bb. */
694static basic_block
b24ef467 695advance_target_bb (basic_block bb, rtx_insn *insn)
6a1cdb4d 696{
697 if (insn)
698 {
699 if (BLOCK_FOR_INSN (insn) != bb
700 && control_flow_insn_p (insn)
2521f35f 701 /* We handle interblock movement of the speculation check
702 or over a speculation check in
703 haifa-sched.c: move_block_after_check (). */
704 && !IS_SPECULATION_BRANCHY_CHECK_P (insn)
705 && !IS_SPECULATION_BRANCHY_CHECK_P (BB_END (bb)))
6a1cdb4d 706 {
2521f35f 707 /* Assert that we don't move jumps across blocks. */
6a1cdb4d 708 gcc_assert (!control_flow_insn_p (BB_END (bb))
709 && NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (bb->next_bb)));
710 return bb;
711 }
712 else
713 return 0;
714 }
6a1cdb4d 715 else
6fcfdb19 716 /* Return next non empty block. */
717 {
718 do
719 {
720 gcc_assert (bb != last_bb);
721
722 bb = bb->next_bb;
723 }
724 while (bb_note (bb) == BB_END (bb));
725
726 return bb;
727 }
6a1cdb4d 728}
729
730/* Fix internal data after interblock movement of jump instruction.
731 For parameter meaning please refer to
732 sched-int.h: struct sched_info: fix_recovery_cfg. */
733static void
e1ab7874 734ebb_fix_recovery_cfg (int bbi ATTRIBUTE_UNUSED, int jump_bbi,
735 int jump_bb_nexti)
6a1cdb4d 736{
737 gcc_assert (last_bb->index != bbi);
738
739 if (jump_bb_nexti == last_bb->index)
f5a6b05f 740 last_bb = BASIC_BLOCK_FOR_FN (cfun, jump_bbi);
6a1cdb4d 741}
db982eeb 742
743#endif /* INSN_SCHEDULING */