]>
Commit | Line | Data |
---|---|---|
18e720b3 | 1 | /* Instruction scheduling pass. |
8d9254fc | 2 | Copyright (C) 1992-2020 Free Software Foundation, Inc. |
18e720b3 BS |
3 | Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by, |
4 | and currently maintained by, Jim Wilson (wilson@cygnus.com) | |
5 | ||
1322177d | 6 | This file is part of GCC. |
18e720b3 | 7 | |
1322177d LB |
8 | GCC is free software; you can redistribute it and/or modify it under |
9 | the terms of the GNU General Public License as published by the Free | |
9dcd6f09 | 10 | Software Foundation; either version 3, or (at your option) any later |
1322177d | 11 | version. |
18e720b3 | 12 | |
1322177d LB |
13 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
14 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
18e720b3 BS |
15 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
16 | for more details. | |
17 | ||
18 | You should have received a copy of the GNU General Public License | |
9dcd6f09 NC |
19 | along with GCC; see the file COPYING3. If not see |
20 | <http://www.gnu.org/licenses/>. */ | |
18e720b3 BS |
21 | \f |
22 | #include "config.h" | |
23 | #include "system.h" | |
4977bab6 | 24 | #include "coretypes.h" |
c7131fb2 | 25 | #include "backend.h" |
957060b5 | 26 | #include "target.h" |
18e720b3 | 27 | #include "rtl.h" |
957060b5 | 28 | #include "cfghooks.h" |
c7131fb2 | 29 | #include "df.h" |
59f2e9d8 | 30 | #include "profile.h" |
18e720b3 | 31 | #include "insn-attr.h" |
60393bbc AM |
32 | #include "cfgrtl.h" |
33 | #include "cfgbuild.h" | |
18e720b3 | 34 | #include "sched-int.h" |
6fb5fa3c | 35 | |
18e720b3 | 36 | \f |
a750daa2 MK |
37 | #ifdef INSN_SCHEDULING |
38 | ||
496d7bb0 | 39 | /* The number of insns to be scheduled in total. */ |
e855c69d AB |
40 | static int rgn_n_insns; |
41 | ||
42 | /* The number of insns scheduled so far. */ | |
43 | static int sched_rgn_n_insns; | |
496d7bb0 MK |
44 | |
45 | /* Set of blocks, that already have their dependencies calculated. */ | |
46 | static bitmap_head dont_calc_deps; | |
496d7bb0 MK |
47 | |
48 | /* Last basic block in current ebb. */ | |
49 | static basic_block last_bb; | |
50 | ||
18e720b3 | 51 | /* Implementations of the sched_info functions for region scheduling. */ |
63f54b1a | 52 | static void init_ready_list (void); |
ce1ce33a | 53 | static void begin_schedule_ready (rtx_insn *); |
46c5ad27 | 54 | static int schedule_more_p (void); |
ce1ce33a DM |
55 | static const char *ebb_print_insn (const rtx_insn *, int); |
56 | static int rank (rtx_insn *, rtx_insn *); | |
57 | static int ebb_contributes_to_priority (rtx_insn *, rtx_insn *); | |
46c5ad27 | 58 | static basic_block earliest_block_with_similiar_load (basic_block, rtx); |
66fcd40c | 59 | static void add_deps_for_risky_insns (rtx_insn *, rtx_insn *); |
f57aa6b0 | 60 | static void debug_ebb_dependencies (rtx_insn *, rtx_insn *); |
496d7bb0 | 61 | |
ce1ce33a | 62 | static void ebb_add_remove_insn (rtx_insn *, int); |
e855c69d | 63 | static void ebb_add_block (basic_block, basic_block); |
ce1ce33a | 64 | static basic_block advance_target_bb (basic_block, rtx_insn *); |
e855c69d | 65 | static void ebb_fix_recovery_cfg (int, int, int); |
496d7bb0 | 66 | |
26965010 BS |
67 | /* Allocate memory and store the state of the frontend. Return the allocated |
68 | memory. */ | |
69 | static void * | |
70 | save_ebb_state (void) | |
71 | { | |
72 | int *p = XNEW (int); | |
73 | *p = sched_rgn_n_insns; | |
74 | return p; | |
75 | } | |
76 | ||
77 | /* Restore the state of the frontend from P_, then free it. */ | |
78 | static void | |
79 | restore_ebb_state (void *p_) | |
80 | { | |
81 | int *p = (int *)p_; | |
82 | sched_rgn_n_insns = *p; | |
83 | free (p_); | |
84 | } | |
85 | ||
18e720b3 BS |
86 | /* Return nonzero if there are more insns that should be scheduled. */ |
87 | ||
88 | static int | |
46c5ad27 | 89 | schedule_more_p (void) |
18e720b3 | 90 | { |
e855c69d | 91 | return sched_rgn_n_insns < rgn_n_insns; |
18e720b3 BS |
92 | } |
93 | ||
b640bd8f MK |
94 | /* Print dependency information about ebb between HEAD and TAIL. */ |
95 | static void | |
f57aa6b0 | 96 | debug_ebb_dependencies (rtx_insn *head, rtx_insn *tail) |
b640bd8f MK |
97 | { |
98 | fprintf (sched_dump, | |
99 | ";; --------------- forward dependences: ------------ \n"); | |
100 | ||
101 | fprintf (sched_dump, "\n;; --- EBB Dependences --- from bb%d to bb%d \n", | |
102 | BLOCK_NUM (head), BLOCK_NUM (tail)); | |
103 | ||
104 | debug_dependencies (head, tail); | |
105 | } | |
106 | ||
18e720b3 BS |
107 | /* Add all insns that are initially ready to the ready list READY. Called |
108 | once before scheduling a set of insns. */ | |
109 | ||
110 | static void | |
63f54b1a | 111 | init_ready_list (void) |
18e720b3 | 112 | { |
496d7bb0 | 113 | int n = 0; |
dc01c3d1 DM |
114 | rtx_insn *prev_head = current_sched_info->prev_head; |
115 | rtx_insn *next_tail = current_sched_info->next_tail; | |
ce1ce33a | 116 | rtx_insn *insn; |
18e720b3 | 117 | |
e855c69d | 118 | sched_rgn_n_insns = 0; |
18e720b3 | 119 | |
18e720b3 BS |
120 | /* Print debugging information. */ |
121 | if (sched_verbose >= 5) | |
b640bd8f | 122 | debug_ebb_dependencies (NEXT_INSN (prev_head), PREV_INSN (next_tail)); |
18e720b3 BS |
123 | |
124 | /* Initialize ready list with all 'ready' insns in target block. | |
125 | Count number of insns in the target block being scheduled. */ | |
126 | for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn)) | |
127 | { | |
63f54b1a | 128 | try_ready (insn); |
496d7bb0 | 129 | n++; |
18e720b3 | 130 | } |
18e720b3 | 131 | |
e855c69d | 132 | gcc_assert (n == rgn_n_insns); |
496d7bb0 | 133 | } |
18e720b3 | 134 | |
496d7bb0 MK |
135 | /* INSN is being scheduled after LAST. Update counters. */ |
136 | static void | |
ce1ce33a | 137 | begin_schedule_ready (rtx_insn *insn ATTRIBUTE_UNUSED) |
18e720b3 | 138 | { |
e855c69d | 139 | sched_rgn_n_insns++; |
86014d07 | 140 | } |
496d7bb0 | 141 | |
86014d07 BS |
142 | /* INSN is being moved to its place in the schedule, after LAST. */ |
143 | static void | |
ce1ce33a | 144 | begin_move_insn (rtx_insn *insn, rtx_insn *last) |
86014d07 | 145 | { |
496d7bb0 MK |
146 | if (BLOCK_FOR_INSN (insn) == last_bb |
147 | /* INSN is a jump in the last block, ... */ | |
148 | && control_flow_insn_p (insn) | |
149 | /* that is going to be moved over some instructions. */ | |
150 | && last != PREV_INSN (insn)) | |
151 | { | |
152 | edge e; | |
496d7bb0 MK |
153 | basic_block bb; |
154 | ||
155 | /* An obscure special case, where we do have partially dead | |
156 | instruction scheduled after last control flow instruction. | |
157 | In this case we can create new basic block. It is | |
158 | always exactly one basic block last in the sequence. */ | |
b8698a0f | 159 | |
0fd4b31d | 160 | e = find_fallthru_edge (last_bb->succs); |
496d7bb0 | 161 | |
77a74ed7 | 162 | gcc_checking_assert (!e || !(e->flags & EDGE_COMPLEX)); |
496d7bb0 | 163 | |
77a74ed7 NF |
164 | gcc_checking_assert (BLOCK_FOR_INSN (insn) == last_bb |
165 | && !IS_SPECULATION_CHECK_P (insn) | |
166 | && BB_HEAD (last_bb) != insn | |
167 | && BB_END (last_bb) == insn); | |
496d7bb0 MK |
168 | |
169 | { | |
e67d1102 | 170 | rtx_insn *x = NEXT_INSN (insn); |
496d7bb0 | 171 | if (e) |
77a74ed7 | 172 | gcc_checking_assert (NOTE_P (x) || LABEL_P (x)); |
496d7bb0 | 173 | else |
77a74ed7 | 174 | gcc_checking_assert (BARRIER_P (x)); |
496d7bb0 | 175 | } |
496d7bb0 MK |
176 | |
177 | if (e) | |
178 | { | |
179 | bb = split_edge (e); | |
180 | gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_END (bb))); | |
181 | } | |
182 | else | |
bbf81a5f JJ |
183 | { |
184 | /* Create an empty unreachable block after the INSN. */ | |
dc01c3d1 | 185 | rtx_insn *next = NEXT_INSN (insn); |
bbf81a5f JJ |
186 | if (next && BARRIER_P (next)) |
187 | next = NEXT_INSN (next); | |
188 | bb = create_basic_block (next, NULL_RTX, last_bb); | |
189 | } | |
b8698a0f | 190 | |
496d7bb0 MK |
191 | /* split_edge () creates BB before E->DEST. Keep in mind, that |
192 | this operation extends scheduling region till the end of BB. | |
193 | Hence, we need to shift NEXT_TAIL, so haifa-sched.c won't go out | |
194 | of the scheduling region. */ | |
195 | current_sched_info->next_tail = NEXT_INSN (BB_END (bb)); | |
196 | gcc_assert (current_sched_info->next_tail); | |
197 | ||
e855c69d AB |
198 | /* Append new basic block to the end of the ebb. */ |
199 | sched_init_only_bb (bb, last_bb); | |
496d7bb0 MK |
200 | gcc_assert (last_bb == bb); |
201 | } | |
18e720b3 BS |
202 | } |
203 | ||
18e720b3 BS |
204 | /* Return a string that contains the insn uid and optionally anything else |
205 | necessary to identify this insn in an output. It's valid to use a | |
206 | static buffer for this. The ALIGNED parameter should cause the string | |
207 | to be formatted so that multiple output lines will line up nicely. */ | |
208 | ||
209 | static const char * | |
ce1ce33a | 210 | ebb_print_insn (const rtx_insn *insn, int aligned ATTRIBUTE_UNUSED) |
18e720b3 BS |
211 | { |
212 | static char tmp[80]; | |
213 | ||
e855c69d AB |
214 | /* '+' before insn means it is a new cycle start. */ |
215 | if (GET_MODE (insn) == TImode) | |
216 | sprintf (tmp, "+ %4d", INSN_UID (insn)); | |
217 | else | |
218 | sprintf (tmp, " %4d", INSN_UID (insn)); | |
219 | ||
18e720b3 BS |
220 | return tmp; |
221 | } | |
222 | ||
223 | /* Compare priority of two insns. Return a positive number if the second | |
224 | insn is to be preferred for scheduling, and a negative one if the first | |
225 | is to be preferred. Zero if they are equally good. */ | |
226 | ||
227 | static int | |
ce1ce33a | 228 | rank (rtx_insn *insn1, rtx_insn *insn2) |
18e720b3 | 229 | { |
8f62128d JH |
230 | basic_block bb1 = BLOCK_FOR_INSN (insn1); |
231 | basic_block bb2 = BLOCK_FOR_INSN (insn2); | |
232 | ||
e7a74006 | 233 | if (bb1->count > bb2->count) |
8f62128d | 234 | return -1; |
e7a74006 | 235 | if (bb1->count < bb2->count) |
8f62128d | 236 | return 1; |
18e720b3 BS |
237 | return 0; |
238 | } | |
239 | ||
240 | /* NEXT is an instruction that depends on INSN (a backward dependence); | |
241 | return nonzero if we should include this dependence in priority | |
242 | calculations. */ | |
243 | ||
244 | static int | |
ce1ce33a DM |
245 | ebb_contributes_to_priority (rtx_insn *next ATTRIBUTE_UNUSED, |
246 | rtx_insn *insn ATTRIBUTE_UNUSED) | |
18e720b3 BS |
247 | { |
248 | return 1; | |
249 | } | |
250 | ||
aef0e7a8 BS |
251 | /* INSN is a JUMP_INSN. Store the set of registers that |
252 | must be considered as used by this jump in USED. */ | |
18e720b3 | 253 | |
e855c69d | 254 | void |
aef0e7a8 | 255 | ebb_compute_jump_reg_dependencies (rtx insn, regset used) |
18e720b3 BS |
256 | { |
257 | basic_block b = BLOCK_FOR_INSN (insn); | |
258 | edge e; | |
628f6a4e BE |
259 | edge_iterator ei; |
260 | ||
261 | FOR_EACH_EDGE (e, ei, b->succs) | |
aef0e7a8 | 262 | if ((e->flags & EDGE_FALLTHRU) == 0) |
89a95777 | 263 | bitmap_ior_into (used, df_get_live_in (e->dest)); |
18e720b3 BS |
264 | } |
265 | ||
266 | /* Used in schedule_insns to initialize current_sched_info for scheduling | |
267 | regions (or single basic blocks). */ | |
268 | ||
e855c69d AB |
269 | static struct common_sched_info_def ebb_common_sched_info; |
270 | ||
271 | static struct sched_deps_info_def ebb_sched_deps_info = | |
272 | { | |
273 | ebb_compute_jump_reg_dependencies, | |
274 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | |
275 | NULL, | |
276 | 1, 0, 0 | |
277 | }; | |
278 | ||
279 | static struct haifa_sched_info ebb_sched_info = | |
18e720b3 BS |
280 | { |
281 | init_ready_list, | |
496d7bb0 | 282 | NULL, |
18e720b3 | 283 | schedule_more_p, |
63f54b1a | 284 | NULL, |
18e720b3 | 285 | rank, |
6d0de005 | 286 | ebb_print_insn, |
e855c69d | 287 | ebb_contributes_to_priority, |
356c23b3 | 288 | NULL, /* insn_finishes_block_p */ |
18e720b3 BS |
289 | |
290 | NULL, NULL, | |
291 | NULL, NULL, | |
e855c69d | 292 | 1, 0, |
ddbd5439 | 293 | |
e855c69d | 294 | ebb_add_remove_insn, |
496d7bb0 | 295 | begin_schedule_ready, |
86014d07 | 296 | begin_move_insn, |
496d7bb0 | 297 | advance_target_bb, |
26965010 BS |
298 | |
299 | save_ebb_state, | |
300 | restore_ebb_state, | |
301 | ||
6fb5fa3c DB |
302 | SCHED_EBB |
303 | /* We can create new blocks in begin_schedule_ready (). */ | |
304 | | NEW_BBS | |
18e720b3 BS |
305 | }; |
306 | \f | |
15aab9c0 VM |
307 | /* Returns the earliest block in EBB currently being processed where a |
308 | "similar load" 'insn2' is found, and hence LOAD_INSN can move | |
309 | speculatively into the found block. All the following must hold: | |
310 | ||
311 | (1) both loads have 1 base register (PFREE_CANDIDATEs). | |
312 | (2) load_insn and load2 have a def-use dependence upon | |
313 | the same insn 'insn1'. | |
314 | ||
315 | From all these we can conclude that the two loads access memory | |
316 | addresses that differ at most by a constant, and hence if moving | |
317 | load_insn would cause an exception, it would have been caused by | |
318 | load2 anyhow. | |
319 | ||
320 | The function uses list (given by LAST_BLOCK) of already processed | |
321 | blocks in EBB. The list is formed in `add_deps_for_risky_insns'. */ | |
322 | ||
323 | static basic_block | |
46c5ad27 | 324 | earliest_block_with_similiar_load (basic_block last_block, rtx load_insn) |
15aab9c0 | 325 | { |
e2f6ff94 MK |
326 | sd_iterator_def back_sd_it; |
327 | dep_t back_dep; | |
15aab9c0 VM |
328 | basic_block bb, earliest_block = NULL; |
329 | ||
e2f6ff94 | 330 | FOR_EACH_DEP (load_insn, SD_LIST_BACK, back_sd_it, back_dep) |
15aab9c0 | 331 | { |
66fcd40c | 332 | rtx_insn *insn1 = DEP_PRO (back_dep); |
15aab9c0 | 333 | |
b8698a0f | 334 | if (DEP_TYPE (back_dep) == REG_DEP_TRUE) |
e2f6ff94 | 335 | /* Found a DEF-USE dependence (insn1, load_insn). */ |
15aab9c0 | 336 | { |
e2f6ff94 MK |
337 | sd_iterator_def fore_sd_it; |
338 | dep_t fore_dep; | |
15aab9c0 | 339 | |
e2f6ff94 | 340 | FOR_EACH_DEP (insn1, SD_LIST_FORW, fore_sd_it, fore_dep) |
15aab9c0 | 341 | { |
66fcd40c | 342 | rtx_insn *insn2 = DEP_CON (fore_dep); |
15aab9c0 VM |
343 | basic_block insn2_block = BLOCK_FOR_INSN (insn2); |
344 | ||
e2f6ff94 | 345 | if (DEP_TYPE (fore_dep) == REG_DEP_TRUE) |
15aab9c0 VM |
346 | { |
347 | if (earliest_block != NULL | |
348 | && earliest_block->index < insn2_block->index) | |
349 | continue; | |
350 | ||
351 | /* Found a DEF-USE dependence (insn1, insn2). */ | |
352 | if (haifa_classify_insn (insn2) != PFREE_CANDIDATE) | |
353 | /* insn2 not guaranteed to be a 1 base reg load. */ | |
354 | continue; | |
46c5ad27 | 355 | |
1634b18f | 356 | for (bb = last_block; bb; bb = (basic_block) bb->aux) |
15aab9c0 VM |
357 | if (insn2_block == bb) |
358 | break; | |
359 | ||
360 | if (!bb) | |
361 | /* insn2 is the similar load. */ | |
362 | earliest_block = insn2_block; | |
363 | } | |
364 | } | |
365 | } | |
366 | } | |
367 | ||
368 | return earliest_block; | |
369 | } | |
370 | ||
4d6922ee | 371 | /* The following function adds dependencies between jumps and risky |
15aab9c0 VM |
372 | insns in given ebb. */ |
373 | ||
374 | static void | |
66fcd40c | 375 | add_deps_for_risky_insns (rtx_insn *head, rtx_insn *tail) |
15aab9c0 | 376 | { |
66fcd40c | 377 | rtx_insn *insn, *prev; |
55d796da | 378 | int classification; |
66fcd40c DM |
379 | rtx_insn *last_jump = NULL; |
380 | rtx_insn *next_tail = NEXT_INSN (tail); | |
15aab9c0 | 381 | basic_block last_block = NULL, bb; |
46c5ad27 | 382 | |
15aab9c0 | 383 | for (insn = head; insn != next_tail; insn = NEXT_INSN (insn)) |
26965010 BS |
384 | { |
385 | add_delay_dependencies (insn); | |
386 | if (control_flow_insn_p (insn)) | |
387 | { | |
388 | bb = BLOCK_FOR_INSN (insn); | |
389 | bb->aux = last_block; | |
390 | last_block = bb; | |
2c331232 BS |
391 | /* Ensure blocks stay in the same order. */ |
392 | if (last_jump) | |
393 | add_dependence (insn, last_jump, REG_DEP_ANTI); | |
26965010 BS |
394 | last_jump = insn; |
395 | } | |
396 | else if (INSN_P (insn) && last_jump != NULL_RTX) | |
397 | { | |
398 | classification = haifa_classify_insn (insn); | |
399 | prev = last_jump; | |
400 | ||
401 | switch (classification) | |
402 | { | |
403 | case PFREE_CANDIDATE: | |
404 | if (flag_schedule_speculative_load) | |
405 | { | |
406 | bb = earliest_block_with_similiar_load (last_block, insn); | |
407 | if (bb) | |
408 | { | |
409 | bb = (basic_block) bb->aux; | |
410 | if (!bb) | |
411 | break; | |
412 | prev = BB_END (bb); | |
413 | } | |
414 | } | |
415 | /* Fall through. */ | |
416 | case TRAP_RISKY: | |
417 | case IRISKY: | |
418 | case PRISKY_CANDIDATE: | |
419 | /* ??? We could implement better checking PRISKY_CANDIDATEs | |
420 | analogous to sched-rgn.c. */ | |
67914693 | 421 | /* We cannot change the mode of the backward |
26965010 BS |
422 | dependency because REG_DEP_ANTI has the lowest |
423 | rank. */ | |
424 | if (! sched_insns_conditions_mutex_p (insn, prev)) | |
425 | { | |
e2724e63 BS |
426 | if ((current_sched_info->flags & DO_SPECULATION) |
427 | && (spec_info->mask & BEGIN_CONTROL)) | |
26965010 | 428 | { |
e2724e63 | 429 | dep_def _dep, *dep = &_dep; |
26965010 | 430 | |
e2724e63 | 431 | init_dep (dep, prev, insn, REG_DEP_ANTI); |
26965010 | 432 | |
e2724e63 BS |
433 | if (current_sched_info->flags & USE_DEPS_LIST) |
434 | { | |
435 | DEP_STATUS (dep) = set_dep_weak (DEP_ANTI, BEGIN_CONTROL, | |
436 | MAX_DEP_WEAK); | |
26965010 | 437 | |
e2724e63 | 438 | } |
26965010 | 439 | sd_add_or_update_dep (dep, false); |
26965010 | 440 | } |
e2724e63 BS |
441 | else |
442 | add_dependence (insn, prev, REG_DEP_CONTROL); | |
26965010 BS |
443 | } |
444 | ||
445 | break; | |
446 | ||
447 | default: | |
448 | break; | |
449 | } | |
450 | } | |
451 | } | |
15aab9c0 VM |
452 | /* Maintain the invariant that bb->aux is clear after use. */ |
453 | while (last_block) | |
454 | { | |
1634b18f | 455 | bb = (basic_block) last_block->aux; |
15aab9c0 VM |
456 | last_block->aux = NULL; |
457 | last_block = bb; | |
458 | } | |
459 | } | |
460 | ||
7043b893 BS |
461 | /* Schedule a single extended basic block, defined by the boundaries |
462 | HEAD and TAIL. | |
18e720b3 | 463 | |
9c582551 | 464 | We change our expectations about scheduler behavior depending on |
7043b893 BS |
465 | whether MODULO_SCHEDULING is true. If it is, we expect that the |
466 | caller has already called set_modulo_params and created delay pairs | |
467 | as appropriate. If the modulo schedule failed, we return | |
468 | NULL_RTX. */ | |
469 | ||
470 | basic_block | |
66fcd40c | 471 | schedule_ebb (rtx_insn *head, rtx_insn *tail, bool modulo_scheduling) |
18e720b3 | 472 | { |
496d7bb0 | 473 | basic_block first_bb, target_bb; |
99b1c316 | 474 | class deps_desc tmp_deps; |
7043b893 BS |
475 | bool success; |
476 | ||
477 | /* Blah. We should fix the rest of the code not to get confused by | |
478 | a note or two. */ | |
479 | while (head != tail) | |
480 | { | |
481 | if (NOTE_P (head) || DEBUG_INSN_P (head)) | |
482 | head = NEXT_INSN (head); | |
483 | else if (NOTE_P (tail) || DEBUG_INSN_P (tail)) | |
484 | tail = PREV_INSN (tail); | |
485 | else if (LABEL_P (head)) | |
486 | head = NEXT_INSN (head); | |
487 | else | |
488 | break; | |
489 | } | |
b8698a0f | 490 | |
496d7bb0 MK |
491 | first_bb = BLOCK_FOR_INSN (head); |
492 | last_bb = BLOCK_FOR_INSN (tail); | |
18e720b3 BS |
493 | |
494 | if (no_real_insns_p (head, tail)) | |
8f62128d | 495 | return BLOCK_FOR_INSN (tail); |
18e720b3 | 496 | |
496d7bb0 MK |
497 | gcc_assert (INSN_P (head) && INSN_P (tail)); |
498 | ||
499 | if (!bitmap_bit_p (&dont_calc_deps, first_bb->index)) | |
500 | { | |
501 | init_deps_global (); | |
18e720b3 | 502 | |
e2f6ff94 | 503 | /* Compute dependencies. */ |
bcf33775 | 504 | init_deps (&tmp_deps, false); |
496d7bb0 MK |
505 | sched_analyze (&tmp_deps, head, tail); |
506 | free_deps (&tmp_deps); | |
18e720b3 | 507 | |
496d7bb0 | 508 | add_deps_for_risky_insns (head, tail); |
15aab9c0 | 509 | |
496d7bb0 MK |
510 | if (targetm.sched.dependencies_evaluation_hook) |
511 | targetm.sched.dependencies_evaluation_hook (head, tail); | |
512 | ||
513 | finish_deps_global (); | |
514 | } | |
515 | else | |
516 | /* Only recovery blocks can have their dependencies already calculated, | |
b8698a0f | 517 | and they always are single block ebbs. */ |
496d7bb0 | 518 | gcc_assert (first_bb == last_bb); |
30028c85 | 519 | |
18e720b3 | 520 | /* Set priorities. */ |
63f54b1a | 521 | current_sched_info->sched_max_insns_priority = 0; |
e855c69d | 522 | rgn_n_insns = set_priorities (head, tail); |
63f54b1a | 523 | current_sched_info->sched_max_insns_priority++; |
18e720b3 BS |
524 | |
525 | current_sched_info->prev_head = PREV_INSN (head); | |
526 | current_sched_info->next_tail = NEXT_INSN (tail); | |
527 | ||
e855c69d | 528 | remove_notes (head, tail); |
18e720b3 | 529 | |
496d7bb0 MK |
530 | unlink_bb_notes (first_bb, last_bb); |
531 | ||
496d7bb0 | 532 | target_bb = first_bb; |
e855c69d AB |
533 | |
534 | /* Make ready list big enough to hold all the instructions from the ebb. */ | |
535 | sched_extend_ready_list (rgn_n_insns); | |
975ccf22 | 536 | success = schedule_block (&target_bb, NULL); |
7043b893 BS |
537 | gcc_assert (success || modulo_scheduling); |
538 | ||
e855c69d AB |
539 | /* Free ready list. */ |
540 | sched_finish_ready_list (); | |
18e720b3 | 541 | |
496d7bb0 MK |
542 | /* We might pack all instructions into fewer blocks, |
543 | so we may made some of them empty. Can't assert (b == last_bb). */ | |
b8698a0f | 544 | |
18e720b3 | 545 | /* Sanity check: verify that all region insns were scheduled. */ |
7043b893 | 546 | gcc_assert (modulo_scheduling || sched_rgn_n_insns == rgn_n_insns); |
e2f6ff94 MK |
547 | |
548 | /* Free dependencies. */ | |
549 | sched_free_deps (current_sched_info->head, current_sched_info->tail, true); | |
550 | ||
551 | gcc_assert (haifa_recovery_bb_ever_added_p | |
552 | || deps_pools_are_empty_p ()); | |
18e720b3 | 553 | |
496d7bb0 MK |
554 | if (EDGE_COUNT (last_bb->preds) == 0) |
555 | /* LAST_BB is unreachable. */ | |
556 | { | |
557 | gcc_assert (first_bb != last_bb | |
558 | && EDGE_COUNT (last_bb->succs) == 0); | |
559 | last_bb = last_bb->prev_bb; | |
560 | delete_basic_block (last_bb->next_bb); | |
561 | } | |
562 | ||
7043b893 | 563 | return success ? last_bb : NULL; |
18e720b3 BS |
564 | } |
565 | ||
7043b893 BS |
566 | /* Perform initializations before running schedule_ebbs or a single |
567 | schedule_ebb. */ | |
18e720b3 | 568 | void |
7043b893 | 569 | schedule_ebbs_init (void) |
18e720b3 | 570 | { |
e855c69d AB |
571 | /* Setup infos. */ |
572 | { | |
573 | memcpy (&ebb_common_sched_info, &haifa_common_sched_info, | |
574 | sizeof (ebb_common_sched_info)); | |
575 | ||
576 | ebb_common_sched_info.fix_recovery_cfg = ebb_fix_recovery_cfg; | |
577 | ebb_common_sched_info.add_block = ebb_add_block; | |
578 | ebb_common_sched_info.sched_pass_id = SCHED_EBB_PASS; | |
579 | ||
580 | common_sched_info = &ebb_common_sched_info; | |
581 | sched_deps_info = &ebb_sched_deps_info; | |
582 | current_sched_info = &ebb_sched_info; | |
583 | } | |
18e720b3 | 584 | |
e855c69d | 585 | haifa_sched_init (); |
ddbd5439 | 586 | |
852c6ec7 | 587 | compute_bb_for_insn (); |
18e720b3 | 588 | |
496d7bb0 | 589 | /* Initialize DONT_CALC_DEPS and ebb-{start, end} markers. */ |
c0d105c6 | 590 | bitmap_initialize (&dont_calc_deps, &bitmap_default_obstack); |
7043b893 BS |
591 | } |
592 | ||
593 | /* Perform cleanups after scheduling using schedules_ebbs or schedule_ebb. */ | |
594 | void | |
595 | schedule_ebbs_finish (void) | |
596 | { | |
c0d105c6 | 597 | bitmap_release (&dont_calc_deps); |
7043b893 BS |
598 | |
599 | /* Reposition the prologue and epilogue notes in case we moved the | |
600 | prologue/epilogue insns. */ | |
601 | if (reload_completed) | |
602 | reposition_prologue_and_epilogue_notes (); | |
603 | ||
604 | haifa_sched_finish (); | |
605 | } | |
606 | ||
607 | /* The main entry point in this file. */ | |
608 | ||
609 | void | |
610 | schedule_ebbs (void) | |
611 | { | |
612 | basic_block bb; | |
613 | int probability_cutoff; | |
66fcd40c | 614 | rtx_insn *tail; |
7043b893 BS |
615 | |
616 | /* Taking care of this degenerate case makes the rest of | |
617 | this code simpler. */ | |
0cae8d31 | 618 | if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS) |
7043b893 BS |
619 | return; |
620 | ||
ee4e85b7 | 621 | if (profile_info && profile_status_for_fn (cfun) == PROFILE_READ) |
028d4092 | 622 | probability_cutoff = param_tracer_min_branch_probability_feedback; |
7043b893 | 623 | else |
028d4092 | 624 | probability_cutoff = param_tracer_min_branch_probability; |
7043b893 BS |
625 | probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff; |
626 | ||
627 | schedule_ebbs_init (); | |
496d7bb0 | 628 | |
18e720b3 | 629 | /* Schedule every region in the subroutine. */ |
11cd3bed | 630 | FOR_EACH_BB_FN (bb, cfun) |
0b17ab2f | 631 | { |
66fcd40c | 632 | rtx_insn *head = BB_HEAD (bb); |
18e720b3 | 633 | |
2a6a0d80 BS |
634 | if (bb->flags & BB_DISABLE_SCHEDULE) |
635 | continue; | |
636 | ||
18e720b3 BS |
637 | for (;;) |
638 | { | |
18e720b3 | 639 | edge e; |
a813c111 | 640 | tail = BB_END (bb); |
fefa31b5 | 641 | if (bb->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun) |
4b4bf941 | 642 | || LABEL_P (BB_HEAD (bb->next_bb))) |
18e720b3 | 643 | break; |
0fd4b31d | 644 | e = find_fallthru_edge (bb->succs); |
18e720b3 BS |
645 | if (! e) |
646 | break; | |
357067f2 JH |
647 | if (e->probability.initialized_p () |
648 | && e->probability.to_reg_br_prob_base () <= probability_cutoff) | |
8f62128d | 649 | break; |
2a6a0d80 BS |
650 | if (e->dest->flags & BB_DISABLE_SCHEDULE) |
651 | break; | |
e0082a72 | 652 | bb = bb->next_bb; |
18e720b3 BS |
653 | } |
654 | ||
7043b893 | 655 | bb = schedule_ebb (head, tail, false); |
18e720b3 | 656 | } |
7043b893 | 657 | schedule_ebbs_finish (); |
18e720b3 | 658 | } |
496d7bb0 MK |
659 | |
660 | /* INSN has been added to/removed from current ebb. */ | |
661 | static void | |
ce1ce33a | 662 | ebb_add_remove_insn (rtx_insn *insn ATTRIBUTE_UNUSED, int remove_p) |
496d7bb0 MK |
663 | { |
664 | if (!remove_p) | |
e855c69d | 665 | rgn_n_insns++; |
496d7bb0 | 666 | else |
e855c69d | 667 | rgn_n_insns--; |
496d7bb0 MK |
668 | } |
669 | ||
670 | /* BB was added to ebb after AFTER. */ | |
671 | static void | |
e855c69d | 672 | ebb_add_block (basic_block bb, basic_block after) |
496d7bb0 | 673 | { |
b8698a0f | 674 | /* Recovery blocks are always bounded by BARRIERS, |
496d7bb0 MK |
675 | therefore, they always form single block EBB, |
676 | therefore, we can use rec->index to identify such EBBs. */ | |
fefa31b5 | 677 | if (after == EXIT_BLOCK_PTR_FOR_FN (cfun)) |
496d7bb0 MK |
678 | bitmap_set_bit (&dont_calc_deps, bb->index); |
679 | else if (after == last_bb) | |
680 | last_bb = bb; | |
681 | } | |
682 | ||
683 | /* Return next block in ebb chain. For parameter meaning please refer to | |
684 | sched-int.h: struct sched_info: advance_target_bb. */ | |
685 | static basic_block | |
ce1ce33a | 686 | advance_target_bb (basic_block bb, rtx_insn *insn) |
496d7bb0 MK |
687 | { |
688 | if (insn) | |
689 | { | |
690 | if (BLOCK_FOR_INSN (insn) != bb | |
691 | && control_flow_insn_p (insn) | |
7ea84dc4 MK |
692 | /* We handle interblock movement of the speculation check |
693 | or over a speculation check in | |
694 | haifa-sched.c: move_block_after_check (). */ | |
695 | && !IS_SPECULATION_BRANCHY_CHECK_P (insn) | |
696 | && !IS_SPECULATION_BRANCHY_CHECK_P (BB_END (bb))) | |
496d7bb0 | 697 | { |
7ea84dc4 | 698 | /* Assert that we don't move jumps across blocks. */ |
496d7bb0 MK |
699 | gcc_assert (!control_flow_insn_p (BB_END (bb)) |
700 | && NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (bb->next_bb))); | |
701 | return bb; | |
702 | } | |
703 | else | |
704 | return 0; | |
705 | } | |
496d7bb0 | 706 | else |
d3b30e42 MK |
707 | /* Return next non empty block. */ |
708 | { | |
709 | do | |
710 | { | |
711 | gcc_assert (bb != last_bb); | |
712 | ||
713 | bb = bb->next_bb; | |
714 | } | |
715 | while (bb_note (bb) == BB_END (bb)); | |
716 | ||
717 | return bb; | |
718 | } | |
496d7bb0 MK |
719 | } |
720 | ||
721 | /* Fix internal data after interblock movement of jump instruction. | |
722 | For parameter meaning please refer to | |
723 | sched-int.h: struct sched_info: fix_recovery_cfg. */ | |
724 | static void | |
e855c69d AB |
725 | ebb_fix_recovery_cfg (int bbi ATTRIBUTE_UNUSED, int jump_bbi, |
726 | int jump_bb_nexti) | |
496d7bb0 MK |
727 | { |
728 | gcc_assert (last_bb->index != bbi); | |
729 | ||
730 | if (jump_bb_nexti == last_bb->index) | |
06e28de2 | 731 | last_bb = BASIC_BLOCK_FOR_FN (cfun, jump_bbi); |
496d7bb0 | 732 | } |
a750daa2 MK |
733 | |
734 | #endif /* INSN_SCHEDULING */ |