]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/sched-ebb.c
alias.c: Reorder #include statements and remove duplicates.
[thirdparty/gcc.git] / gcc / sched-ebb.c
CommitLineData
18e720b3 1/* Instruction scheduling pass.
5624e564 2 Copyright (C) 1992-2015 Free Software Foundation, Inc.
18e720b3
BS
3 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
4 and currently maintained by, Jim Wilson (wilson@cygnus.com)
5
1322177d 6This file is part of GCC.
18e720b3 7
1322177d
LB
8GCC is free software; you can redistribute it and/or modify it under
9the terms of the GNU General Public License as published by the Free
9dcd6f09 10Software Foundation; either version 3, or (at your option) any later
1322177d 11version.
18e720b3 12
1322177d
LB
13GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14WARRANTY; without even the implied warranty of MERCHANTABILITY or
18e720b3
BS
15FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16for more details.
17
18You should have received a copy of the GNU General Public License
9dcd6f09
NC
19along with GCC; see the file COPYING3. If not see
20<http://www.gnu.org/licenses/>. */
18e720b3
BS
21\f
22#include "config.h"
23#include "system.h"
4977bab6 24#include "coretypes.h"
c7131fb2 25#include "backend.h"
957060b5 26#include "target.h"
18e720b3 27#include "rtl.h"
957060b5 28#include "cfghooks.h"
c7131fb2 29#include "df.h"
18e720b3 30#include "tm_p.h"
957060b5 31#include "insn-config.h"
18e720b3 32#include "regs.h"
957060b5
AM
33#include "recog.h"
34#include "diagnostic-core.h"
59f2e9d8 35#include "profile.h"
18e720b3 36#include "flags.h"
18e720b3
BS
37#include "insn-attr.h"
38#include "except.h"
62551c66 39#include "params.h"
60393bbc
AM
40#include "cfgrtl.h"
41#include "cfgbuild.h"
18e720b3 42#include "sched-int.h"
6fb5fa3c 43
18e720b3 44\f
a750daa2
MK
45#ifdef INSN_SCHEDULING
46
496d7bb0 47/* The number of insns to be scheduled in total. */
e855c69d
AB
48static int rgn_n_insns;
49
50/* The number of insns scheduled so far. */
51static int sched_rgn_n_insns;
496d7bb0
MK
52
53/* Set of blocks, that already have their dependencies calculated. */
54static bitmap_head dont_calc_deps;
496d7bb0
MK
55
56/* Last basic block in current ebb. */
57static basic_block last_bb;
58
18e720b3 59/* Implementations of the sched_info functions for region scheduling. */
63f54b1a 60static void init_ready_list (void);
ce1ce33a 61static void begin_schedule_ready (rtx_insn *);
46c5ad27 62static int schedule_more_p (void);
ce1ce33a
DM
63static const char *ebb_print_insn (const rtx_insn *, int);
64static int rank (rtx_insn *, rtx_insn *);
65static int ebb_contributes_to_priority (rtx_insn *, rtx_insn *);
46c5ad27 66static basic_block earliest_block_with_similiar_load (basic_block, rtx);
66fcd40c 67static void add_deps_for_risky_insns (rtx_insn *, rtx_insn *);
f57aa6b0 68static void debug_ebb_dependencies (rtx_insn *, rtx_insn *);
496d7bb0 69
ce1ce33a 70static void ebb_add_remove_insn (rtx_insn *, int);
e855c69d 71static void ebb_add_block (basic_block, basic_block);
ce1ce33a 72static basic_block advance_target_bb (basic_block, rtx_insn *);
e855c69d 73static void ebb_fix_recovery_cfg (int, int, int);
496d7bb0 74
26965010
BS
75/* Allocate memory and store the state of the frontend. Return the allocated
76 memory. */
77static void *
78save_ebb_state (void)
79{
80 int *p = XNEW (int);
81 *p = sched_rgn_n_insns;
82 return p;
83}
84
85/* Restore the state of the frontend from P_, then free it. */
86static void
87restore_ebb_state (void *p_)
88{
89 int *p = (int *)p_;
90 sched_rgn_n_insns = *p;
91 free (p_);
92}
93
18e720b3
BS
94/* Return nonzero if there are more insns that should be scheduled. */
95
96static int
46c5ad27 97schedule_more_p (void)
18e720b3 98{
e855c69d 99 return sched_rgn_n_insns < rgn_n_insns;
18e720b3
BS
100}
101
b640bd8f
MK
102/* Print dependency information about ebb between HEAD and TAIL. */
103static void
f57aa6b0 104debug_ebb_dependencies (rtx_insn *head, rtx_insn *tail)
b640bd8f
MK
105{
106 fprintf (sched_dump,
107 ";; --------------- forward dependences: ------------ \n");
108
109 fprintf (sched_dump, "\n;; --- EBB Dependences --- from bb%d to bb%d \n",
110 BLOCK_NUM (head), BLOCK_NUM (tail));
111
112 debug_dependencies (head, tail);
113}
114
18e720b3
BS
115/* Add all insns that are initially ready to the ready list READY. Called
116 once before scheduling a set of insns. */
117
118static void
63f54b1a 119init_ready_list (void)
18e720b3 120{
496d7bb0 121 int n = 0;
dc01c3d1
DM
122 rtx_insn *prev_head = current_sched_info->prev_head;
123 rtx_insn *next_tail = current_sched_info->next_tail;
ce1ce33a 124 rtx_insn *insn;
18e720b3 125
e855c69d 126 sched_rgn_n_insns = 0;
18e720b3 127
18e720b3
BS
128 /* Print debugging information. */
129 if (sched_verbose >= 5)
b640bd8f 130 debug_ebb_dependencies (NEXT_INSN (prev_head), PREV_INSN (next_tail));
18e720b3
BS
131
132 /* Initialize ready list with all 'ready' insns in target block.
133 Count number of insns in the target block being scheduled. */
134 for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
135 {
63f54b1a 136 try_ready (insn);
496d7bb0 137 n++;
18e720b3 138 }
18e720b3 139
e855c69d 140 gcc_assert (n == rgn_n_insns);
496d7bb0 141}
18e720b3 142
496d7bb0
MK
143/* INSN is being scheduled after LAST. Update counters. */
144static void
ce1ce33a 145begin_schedule_ready (rtx_insn *insn ATTRIBUTE_UNUSED)
18e720b3 146{
e855c69d 147 sched_rgn_n_insns++;
86014d07 148}
496d7bb0 149
86014d07
BS
150/* INSN is being moved to its place in the schedule, after LAST. */
151static void
ce1ce33a 152begin_move_insn (rtx_insn *insn, rtx_insn *last)
86014d07 153{
496d7bb0
MK
154 if (BLOCK_FOR_INSN (insn) == last_bb
155 /* INSN is a jump in the last block, ... */
156 && control_flow_insn_p (insn)
157 /* that is going to be moved over some instructions. */
158 && last != PREV_INSN (insn))
159 {
160 edge e;
496d7bb0
MK
161 basic_block bb;
162
163 /* An obscure special case, where we do have partially dead
164 instruction scheduled after last control flow instruction.
165 In this case we can create new basic block. It is
166 always exactly one basic block last in the sequence. */
b8698a0f 167
0fd4b31d 168 e = find_fallthru_edge (last_bb->succs);
496d7bb0 169
77a74ed7 170 gcc_checking_assert (!e || !(e->flags & EDGE_COMPLEX));
496d7bb0 171
77a74ed7
NF
172 gcc_checking_assert (BLOCK_FOR_INSN (insn) == last_bb
173 && !IS_SPECULATION_CHECK_P (insn)
174 && BB_HEAD (last_bb) != insn
175 && BB_END (last_bb) == insn);
496d7bb0
MK
176
177 {
e67d1102 178 rtx_insn *x = NEXT_INSN (insn);
496d7bb0 179 if (e)
77a74ed7 180 gcc_checking_assert (NOTE_P (x) || LABEL_P (x));
496d7bb0 181 else
77a74ed7 182 gcc_checking_assert (BARRIER_P (x));
496d7bb0 183 }
496d7bb0
MK
184
185 if (e)
186 {
187 bb = split_edge (e);
188 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_END (bb)));
189 }
190 else
bbf81a5f
JJ
191 {
192 /* Create an empty unreachable block after the INSN. */
dc01c3d1 193 rtx_insn *next = NEXT_INSN (insn);
bbf81a5f
JJ
194 if (next && BARRIER_P (next))
195 next = NEXT_INSN (next);
196 bb = create_basic_block (next, NULL_RTX, last_bb);
197 }
b8698a0f 198
496d7bb0
MK
199 /* split_edge () creates BB before E->DEST. Keep in mind, that
200 this operation extends scheduling region till the end of BB.
201 Hence, we need to shift NEXT_TAIL, so haifa-sched.c won't go out
202 of the scheduling region. */
203 current_sched_info->next_tail = NEXT_INSN (BB_END (bb));
204 gcc_assert (current_sched_info->next_tail);
205
e855c69d
AB
206 /* Append new basic block to the end of the ebb. */
207 sched_init_only_bb (bb, last_bb);
496d7bb0
MK
208 gcc_assert (last_bb == bb);
209 }
18e720b3
BS
210}
211
18e720b3
BS
212/* Return a string that contains the insn uid and optionally anything else
213 necessary to identify this insn in an output. It's valid to use a
214 static buffer for this. The ALIGNED parameter should cause the string
215 to be formatted so that multiple output lines will line up nicely. */
216
217static const char *
ce1ce33a 218ebb_print_insn (const rtx_insn *insn, int aligned ATTRIBUTE_UNUSED)
18e720b3
BS
219{
220 static char tmp[80];
221
e855c69d
AB
222 /* '+' before insn means it is a new cycle start. */
223 if (GET_MODE (insn) == TImode)
224 sprintf (tmp, "+ %4d", INSN_UID (insn));
225 else
226 sprintf (tmp, " %4d", INSN_UID (insn));
227
18e720b3
BS
228 return tmp;
229}
230
231/* Compare priority of two insns. Return a positive number if the second
232 insn is to be preferred for scheduling, and a negative one if the first
233 is to be preferred. Zero if they are equally good. */
234
235static int
ce1ce33a 236rank (rtx_insn *insn1, rtx_insn *insn2)
18e720b3 237{
8f62128d
JH
238 basic_block bb1 = BLOCK_FOR_INSN (insn1);
239 basic_block bb2 = BLOCK_FOR_INSN (insn2);
240
241 if (bb1->count > bb2->count
242 || bb1->frequency > bb2->frequency)
243 return -1;
244 if (bb1->count < bb2->count
245 || bb1->frequency < bb2->frequency)
246 return 1;
18e720b3
BS
247 return 0;
248}
249
250/* NEXT is an instruction that depends on INSN (a backward dependence);
251 return nonzero if we should include this dependence in priority
252 calculations. */
253
254static int
ce1ce33a
DM
255ebb_contributes_to_priority (rtx_insn *next ATTRIBUTE_UNUSED,
256 rtx_insn *insn ATTRIBUTE_UNUSED)
18e720b3
BS
257{
258 return 1;
259}
260
aef0e7a8
BS
261 /* INSN is a JUMP_INSN. Store the set of registers that
262 must be considered as used by this jump in USED. */
18e720b3 263
e855c69d 264void
aef0e7a8 265ebb_compute_jump_reg_dependencies (rtx insn, regset used)
18e720b3
BS
266{
267 basic_block b = BLOCK_FOR_INSN (insn);
268 edge e;
628f6a4e
BE
269 edge_iterator ei;
270
271 FOR_EACH_EDGE (e, ei, b->succs)
aef0e7a8 272 if ((e->flags & EDGE_FALLTHRU) == 0)
89a95777 273 bitmap_ior_into (used, df_get_live_in (e->dest));
18e720b3
BS
274}
275
276/* Used in schedule_insns to initialize current_sched_info for scheduling
277 regions (or single basic blocks). */
278
e855c69d
AB
279static struct common_sched_info_def ebb_common_sched_info;
280
281static struct sched_deps_info_def ebb_sched_deps_info =
282 {
283 ebb_compute_jump_reg_dependencies,
284 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
285 NULL,
286 1, 0, 0
287 };
288
289static struct haifa_sched_info ebb_sched_info =
18e720b3
BS
290{
291 init_ready_list,
496d7bb0 292 NULL,
18e720b3 293 schedule_more_p,
63f54b1a 294 NULL,
18e720b3 295 rank,
6d0de005 296 ebb_print_insn,
e855c69d 297 ebb_contributes_to_priority,
356c23b3 298 NULL, /* insn_finishes_block_p */
18e720b3
BS
299
300 NULL, NULL,
301 NULL, NULL,
e855c69d 302 1, 0,
ddbd5439 303
e855c69d 304 ebb_add_remove_insn,
496d7bb0 305 begin_schedule_ready,
86014d07 306 begin_move_insn,
496d7bb0 307 advance_target_bb,
26965010
BS
308
309 save_ebb_state,
310 restore_ebb_state,
311
6fb5fa3c
DB
312 SCHED_EBB
313 /* We can create new blocks in begin_schedule_ready (). */
314 | NEW_BBS
18e720b3
BS
315};
316\f
15aab9c0
VM
317/* Returns the earliest block in EBB currently being processed where a
318 "similar load" 'insn2' is found, and hence LOAD_INSN can move
319 speculatively into the found block. All the following must hold:
320
321 (1) both loads have 1 base register (PFREE_CANDIDATEs).
322 (2) load_insn and load2 have a def-use dependence upon
323 the same insn 'insn1'.
324
325 From all these we can conclude that the two loads access memory
326 addresses that differ at most by a constant, and hence if moving
327 load_insn would cause an exception, it would have been caused by
328 load2 anyhow.
329
330 The function uses list (given by LAST_BLOCK) of already processed
331 blocks in EBB. The list is formed in `add_deps_for_risky_insns'. */
332
333static basic_block
46c5ad27 334earliest_block_with_similiar_load (basic_block last_block, rtx load_insn)
15aab9c0 335{
e2f6ff94
MK
336 sd_iterator_def back_sd_it;
337 dep_t back_dep;
15aab9c0
VM
338 basic_block bb, earliest_block = NULL;
339
e2f6ff94 340 FOR_EACH_DEP (load_insn, SD_LIST_BACK, back_sd_it, back_dep)
15aab9c0 341 {
66fcd40c 342 rtx_insn *insn1 = DEP_PRO (back_dep);
15aab9c0 343
b8698a0f 344 if (DEP_TYPE (back_dep) == REG_DEP_TRUE)
e2f6ff94 345 /* Found a DEF-USE dependence (insn1, load_insn). */
15aab9c0 346 {
e2f6ff94
MK
347 sd_iterator_def fore_sd_it;
348 dep_t fore_dep;
15aab9c0 349
e2f6ff94 350 FOR_EACH_DEP (insn1, SD_LIST_FORW, fore_sd_it, fore_dep)
15aab9c0 351 {
66fcd40c 352 rtx_insn *insn2 = DEP_CON (fore_dep);
15aab9c0
VM
353 basic_block insn2_block = BLOCK_FOR_INSN (insn2);
354
e2f6ff94 355 if (DEP_TYPE (fore_dep) == REG_DEP_TRUE)
15aab9c0
VM
356 {
357 if (earliest_block != NULL
358 && earliest_block->index < insn2_block->index)
359 continue;
360
361 /* Found a DEF-USE dependence (insn1, insn2). */
362 if (haifa_classify_insn (insn2) != PFREE_CANDIDATE)
363 /* insn2 not guaranteed to be a 1 base reg load. */
364 continue;
46c5ad27 365
1634b18f 366 for (bb = last_block; bb; bb = (basic_block) bb->aux)
15aab9c0
VM
367 if (insn2_block == bb)
368 break;
369
370 if (!bb)
371 /* insn2 is the similar load. */
372 earliest_block = insn2_block;
373 }
374 }
375 }
376 }
377
378 return earliest_block;
379}
380
4d6922ee 381/* The following function adds dependencies between jumps and risky
15aab9c0
VM
382 insns in given ebb. */
383
384static void
66fcd40c 385add_deps_for_risky_insns (rtx_insn *head, rtx_insn *tail)
15aab9c0 386{
66fcd40c 387 rtx_insn *insn, *prev;
55d796da 388 int classification;
66fcd40c
DM
389 rtx_insn *last_jump = NULL;
390 rtx_insn *next_tail = NEXT_INSN (tail);
15aab9c0 391 basic_block last_block = NULL, bb;
46c5ad27 392
15aab9c0 393 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
26965010
BS
394 {
395 add_delay_dependencies (insn);
396 if (control_flow_insn_p (insn))
397 {
398 bb = BLOCK_FOR_INSN (insn);
399 bb->aux = last_block;
400 last_block = bb;
2c331232
BS
401 /* Ensure blocks stay in the same order. */
402 if (last_jump)
403 add_dependence (insn, last_jump, REG_DEP_ANTI);
26965010
BS
404 last_jump = insn;
405 }
406 else if (INSN_P (insn) && last_jump != NULL_RTX)
407 {
408 classification = haifa_classify_insn (insn);
409 prev = last_jump;
410
411 switch (classification)
412 {
413 case PFREE_CANDIDATE:
414 if (flag_schedule_speculative_load)
415 {
416 bb = earliest_block_with_similiar_load (last_block, insn);
417 if (bb)
418 {
419 bb = (basic_block) bb->aux;
420 if (!bb)
421 break;
422 prev = BB_END (bb);
423 }
424 }
425 /* Fall through. */
426 case TRAP_RISKY:
427 case IRISKY:
428 case PRISKY_CANDIDATE:
429 /* ??? We could implement better checking PRISKY_CANDIDATEs
430 analogous to sched-rgn.c. */
431 /* We can not change the mode of the backward
432 dependency because REG_DEP_ANTI has the lowest
433 rank. */
434 if (! sched_insns_conditions_mutex_p (insn, prev))
435 {
e2724e63
BS
436 if ((current_sched_info->flags & DO_SPECULATION)
437 && (spec_info->mask & BEGIN_CONTROL))
26965010 438 {
e2724e63 439 dep_def _dep, *dep = &_dep;
26965010 440
e2724e63 441 init_dep (dep, prev, insn, REG_DEP_ANTI);
26965010 442
e2724e63
BS
443 if (current_sched_info->flags & USE_DEPS_LIST)
444 {
445 DEP_STATUS (dep) = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
446 MAX_DEP_WEAK);
26965010 447
e2724e63 448 }
26965010 449 sd_add_or_update_dep (dep, false);
26965010 450 }
e2724e63
BS
451 else
452 add_dependence (insn, prev, REG_DEP_CONTROL);
26965010
BS
453 }
454
455 break;
456
457 default:
458 break;
459 }
460 }
461 }
15aab9c0
VM
462 /* Maintain the invariant that bb->aux is clear after use. */
463 while (last_block)
464 {
1634b18f 465 bb = (basic_block) last_block->aux;
15aab9c0
VM
466 last_block->aux = NULL;
467 last_block = bb;
468 }
469}
470
7043b893
BS
471/* Schedule a single extended basic block, defined by the boundaries
472 HEAD and TAIL.
18e720b3 473
7043b893
BS
474 We change our expectations about scheduler behaviour depending on
475 whether MODULO_SCHEDULING is true. If it is, we expect that the
476 caller has already called set_modulo_params and created delay pairs
477 as appropriate. If the modulo schedule failed, we return
478 NULL_RTX. */
479
480basic_block
66fcd40c 481schedule_ebb (rtx_insn *head, rtx_insn *tail, bool modulo_scheduling)
18e720b3 482{
496d7bb0 483 basic_block first_bb, target_bb;
88302d54 484 struct deps_desc tmp_deps;
7043b893
BS
485 bool success;
486
487 /* Blah. We should fix the rest of the code not to get confused by
488 a note or two. */
489 while (head != tail)
490 {
491 if (NOTE_P (head) || DEBUG_INSN_P (head))
492 head = NEXT_INSN (head);
493 else if (NOTE_P (tail) || DEBUG_INSN_P (tail))
494 tail = PREV_INSN (tail);
495 else if (LABEL_P (head))
496 head = NEXT_INSN (head);
497 else
498 break;
499 }
b8698a0f 500
496d7bb0
MK
501 first_bb = BLOCK_FOR_INSN (head);
502 last_bb = BLOCK_FOR_INSN (tail);
18e720b3
BS
503
504 if (no_real_insns_p (head, tail))
8f62128d 505 return BLOCK_FOR_INSN (tail);
18e720b3 506
496d7bb0
MK
507 gcc_assert (INSN_P (head) && INSN_P (tail));
508
509 if (!bitmap_bit_p (&dont_calc_deps, first_bb->index))
510 {
511 init_deps_global ();
18e720b3 512
e2f6ff94 513 /* Compute dependencies. */
bcf33775 514 init_deps (&tmp_deps, false);
496d7bb0
MK
515 sched_analyze (&tmp_deps, head, tail);
516 free_deps (&tmp_deps);
18e720b3 517
496d7bb0 518 add_deps_for_risky_insns (head, tail);
15aab9c0 519
496d7bb0
MK
520 if (targetm.sched.dependencies_evaluation_hook)
521 targetm.sched.dependencies_evaluation_hook (head, tail);
522
523 finish_deps_global ();
524 }
525 else
526 /* Only recovery blocks can have their dependencies already calculated,
b8698a0f 527 and they always are single block ebbs. */
496d7bb0 528 gcc_assert (first_bb == last_bb);
30028c85 529
18e720b3 530 /* Set priorities. */
63f54b1a 531 current_sched_info->sched_max_insns_priority = 0;
e855c69d 532 rgn_n_insns = set_priorities (head, tail);
63f54b1a 533 current_sched_info->sched_max_insns_priority++;
18e720b3
BS
534
535 current_sched_info->prev_head = PREV_INSN (head);
536 current_sched_info->next_tail = NEXT_INSN (tail);
537
e855c69d 538 remove_notes (head, tail);
18e720b3 539
496d7bb0
MK
540 unlink_bb_notes (first_bb, last_bb);
541
496d7bb0 542 target_bb = first_bb;
e855c69d
AB
543
544 /* Make ready list big enough to hold all the instructions from the ebb. */
545 sched_extend_ready_list (rgn_n_insns);
975ccf22 546 success = schedule_block (&target_bb, NULL);
7043b893
BS
547 gcc_assert (success || modulo_scheduling);
548
e855c69d
AB
549 /* Free ready list. */
550 sched_finish_ready_list ();
18e720b3 551
496d7bb0
MK
552 /* We might pack all instructions into fewer blocks,
553 so we may made some of them empty. Can't assert (b == last_bb). */
b8698a0f 554
18e720b3 555 /* Sanity check: verify that all region insns were scheduled. */
7043b893 556 gcc_assert (modulo_scheduling || sched_rgn_n_insns == rgn_n_insns);
e2f6ff94
MK
557
558 /* Free dependencies. */
559 sched_free_deps (current_sched_info->head, current_sched_info->tail, true);
560
561 gcc_assert (haifa_recovery_bb_ever_added_p
562 || deps_pools_are_empty_p ());
18e720b3 563
496d7bb0
MK
564 if (EDGE_COUNT (last_bb->preds) == 0)
565 /* LAST_BB is unreachable. */
566 {
567 gcc_assert (first_bb != last_bb
568 && EDGE_COUNT (last_bb->succs) == 0);
569 last_bb = last_bb->prev_bb;
570 delete_basic_block (last_bb->next_bb);
571 }
572
7043b893 573 return success ? last_bb : NULL;
18e720b3
BS
574}
575
7043b893
BS
576/* Perform initializations before running schedule_ebbs or a single
577 schedule_ebb. */
18e720b3 578void
7043b893 579schedule_ebbs_init (void)
18e720b3 580{
e855c69d
AB
581 /* Setup infos. */
582 {
583 memcpy (&ebb_common_sched_info, &haifa_common_sched_info,
584 sizeof (ebb_common_sched_info));
585
586 ebb_common_sched_info.fix_recovery_cfg = ebb_fix_recovery_cfg;
587 ebb_common_sched_info.add_block = ebb_add_block;
588 ebb_common_sched_info.sched_pass_id = SCHED_EBB_PASS;
589
590 common_sched_info = &ebb_common_sched_info;
591 sched_deps_info = &ebb_sched_deps_info;
592 current_sched_info = &ebb_sched_info;
593 }
18e720b3 594
e855c69d 595 haifa_sched_init ();
ddbd5439 596
852c6ec7 597 compute_bb_for_insn ();
18e720b3 598
496d7bb0
MK
599 /* Initialize DONT_CALC_DEPS and ebb-{start, end} markers. */
600 bitmap_initialize (&dont_calc_deps, 0);
601 bitmap_clear (&dont_calc_deps);
7043b893
BS
602}
603
604/* Perform cleanups after scheduling using schedules_ebbs or schedule_ebb. */
605void
606schedule_ebbs_finish (void)
607{
608 bitmap_clear (&dont_calc_deps);
609
610 /* Reposition the prologue and epilogue notes in case we moved the
611 prologue/epilogue insns. */
612 if (reload_completed)
613 reposition_prologue_and_epilogue_notes ();
614
615 haifa_sched_finish ();
616}
617
618/* The main entry point in this file. */
619
620void
621schedule_ebbs (void)
622{
623 basic_block bb;
624 int probability_cutoff;
66fcd40c 625 rtx_insn *tail;
7043b893
BS
626
627 /* Taking care of this degenerate case makes the rest of
628 this code simpler. */
0cae8d31 629 if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
7043b893
BS
630 return;
631
632 if (profile_info && flag_branch_probabilities)
633 probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
634 else
635 probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
636 probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
637
638 schedule_ebbs_init ();
496d7bb0 639
18e720b3 640 /* Schedule every region in the subroutine. */
11cd3bed 641 FOR_EACH_BB_FN (bb, cfun)
0b17ab2f 642 {
66fcd40c 643 rtx_insn *head = BB_HEAD (bb);
18e720b3 644
2a6a0d80
BS
645 if (bb->flags & BB_DISABLE_SCHEDULE)
646 continue;
647
18e720b3
BS
648 for (;;)
649 {
18e720b3 650 edge e;
a813c111 651 tail = BB_END (bb);
fefa31b5 652 if (bb->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4b4bf941 653 || LABEL_P (BB_HEAD (bb->next_bb)))
18e720b3 654 break;
0fd4b31d 655 e = find_fallthru_edge (bb->succs);
18e720b3
BS
656 if (! e)
657 break;
62551c66 658 if (e->probability <= probability_cutoff)
8f62128d 659 break;
2a6a0d80
BS
660 if (e->dest->flags & BB_DISABLE_SCHEDULE)
661 break;
e0082a72 662 bb = bb->next_bb;
18e720b3
BS
663 }
664
7043b893 665 bb = schedule_ebb (head, tail, false);
18e720b3 666 }
7043b893 667 schedule_ebbs_finish ();
18e720b3 668}
496d7bb0
MK
669
670/* INSN has been added to/removed from current ebb. */
671static void
ce1ce33a 672ebb_add_remove_insn (rtx_insn *insn ATTRIBUTE_UNUSED, int remove_p)
496d7bb0
MK
673{
674 if (!remove_p)
e855c69d 675 rgn_n_insns++;
496d7bb0 676 else
e855c69d 677 rgn_n_insns--;
496d7bb0
MK
678}
679
680/* BB was added to ebb after AFTER. */
681static void
e855c69d 682ebb_add_block (basic_block bb, basic_block after)
496d7bb0 683{
b8698a0f 684 /* Recovery blocks are always bounded by BARRIERS,
496d7bb0
MK
685 therefore, they always form single block EBB,
686 therefore, we can use rec->index to identify such EBBs. */
fefa31b5 687 if (after == EXIT_BLOCK_PTR_FOR_FN (cfun))
496d7bb0
MK
688 bitmap_set_bit (&dont_calc_deps, bb->index);
689 else if (after == last_bb)
690 last_bb = bb;
691}
692
693/* Return next block in ebb chain. For parameter meaning please refer to
694 sched-int.h: struct sched_info: advance_target_bb. */
695static basic_block
ce1ce33a 696advance_target_bb (basic_block bb, rtx_insn *insn)
496d7bb0
MK
697{
698 if (insn)
699 {
700 if (BLOCK_FOR_INSN (insn) != bb
701 && control_flow_insn_p (insn)
7ea84dc4
MK
702 /* We handle interblock movement of the speculation check
703 or over a speculation check in
704 haifa-sched.c: move_block_after_check (). */
705 && !IS_SPECULATION_BRANCHY_CHECK_P (insn)
706 && !IS_SPECULATION_BRANCHY_CHECK_P (BB_END (bb)))
496d7bb0 707 {
7ea84dc4 708 /* Assert that we don't move jumps across blocks. */
496d7bb0
MK
709 gcc_assert (!control_flow_insn_p (BB_END (bb))
710 && NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (bb->next_bb)));
711 return bb;
712 }
713 else
714 return 0;
715 }
496d7bb0 716 else
d3b30e42
MK
717 /* Return next non empty block. */
718 {
719 do
720 {
721 gcc_assert (bb != last_bb);
722
723 bb = bb->next_bb;
724 }
725 while (bb_note (bb) == BB_END (bb));
726
727 return bb;
728 }
496d7bb0
MK
729}
730
731/* Fix internal data after interblock movement of jump instruction.
732 For parameter meaning please refer to
733 sched-int.h: struct sched_info: fix_recovery_cfg. */
734static void
e855c69d
AB
735ebb_fix_recovery_cfg (int bbi ATTRIBUTE_UNUSED, int jump_bbi,
736 int jump_bb_nexti)
496d7bb0
MK
737{
738 gcc_assert (last_bb->index != bbi);
739
740 if (jump_bb_nexti == last_bb->index)
06e28de2 741 last_bb = BASIC_BLOCK_FOR_FN (cfun, jump_bbi);
496d7bb0 742}
a750daa2
MK
743
744#endif /* INSN_SCHEDULING */