]>
Commit | Line | Data |
---|---|---|
8c660648 | 1 | /* Instruction scheduling pass. |
62e5bf5d | 2 | Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, |
efc0b2bd | 3 | 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 |
63642d5a | 4 | Free Software Foundation, Inc. |
8c660648 JL |
5 | Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by, |
6 | and currently maintained by, Jim Wilson (wilson@cygnus.com) | |
7 | ||
1322177d | 8 | This file is part of GCC. |
5d14e356 | 9 | |
1322177d LB |
10 | GCC is free software; you can redistribute it and/or modify it under |
11 | the terms of the GNU General Public License as published by the Free | |
9dcd6f09 | 12 | Software Foundation; either version 3, or (at your option) any later |
1322177d | 13 | version. |
5d14e356 | 14 | |
1322177d LB |
15 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
16 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
5d14e356 RK |
17 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
18 | for more details. | |
19 | ||
20 | You should have received a copy of the GNU General Public License | |
9dcd6f09 NC |
21 | along with GCC; see the file COPYING3. If not see |
22 | <http://www.gnu.org/licenses/>. */ | |
8c660648 | 23 | |
b4ead7d4 BS |
24 | /* Instruction scheduling pass. This file, along with sched-deps.c, |
25 | contains the generic parts. The actual entry point is found for | |
26 | the normal instruction scheduling pass is found in sched-rgn.c. | |
8c660648 JL |
27 | |
28 | We compute insn priorities based on data dependencies. Flow | |
29 | analysis only creates a fraction of the data-dependencies we must | |
30 | observe: namely, only those dependencies which the combiner can be | |
31 | expected to use. For this pass, we must therefore create the | |
32 | remaining dependencies we need to observe: register dependencies, | |
33 | memory dependencies, dependencies to keep function calls in order, | |
34 | and the dependence between a conditional branch and the setting of | |
35 | condition codes are all dealt with here. | |
36 | ||
37 | The scheduler first traverses the data flow graph, starting with | |
38 | the last instruction, and proceeding to the first, assigning values | |
39 | to insn_priority as it goes. This sorts the instructions | |
40 | topologically by data dependence. | |
41 | ||
42 | Once priorities have been established, we order the insns using | |
43 | list scheduling. This works as follows: starting with a list of | |
44 | all the ready insns, and sorted according to priority number, we | |
45 | schedule the insn from the end of the list by placing its | |
46 | predecessors in the list according to their priority order. We | |
47 | consider this insn scheduled by setting the pointer to the "end" of | |
48 | the list to point to the previous insn. When an insn has no | |
49 | predecessors, we either queue it until sufficient time has elapsed | |
50 | or add it to the ready list. As the instructions are scheduled or | |
51 | when stalls are introduced, the queue advances and dumps insns into | |
52 | the ready list. When all insns down to the lowest priority have | |
53 | been scheduled, the critical path of the basic block has been made | |
54 | as short as possible. The remaining insns are then scheduled in | |
55 | remaining slots. | |
56 | ||
8c660648 JL |
57 | The following list shows the order in which we want to break ties |
58 | among insns in the ready list: | |
59 | ||
60 | 1. choose insn with the longest path to end of bb, ties | |
61 | broken by | |
62 | 2. choose insn with least contribution to register pressure, | |
63 | ties broken by | |
64 | 3. prefer in-block upon interblock motion, ties broken by | |
65 | 4. prefer useful upon speculative motion, ties broken by | |
66 | 5. choose insn with largest control flow probability, ties | |
67 | broken by | |
68 | 6. choose insn with the least dependences upon the previously | |
69 | scheduled insn, or finally | |
2db45993 JL |
70 | 7 choose the insn which has the most insns dependent on it. |
71 | 8. choose insn with lowest UID. | |
8c660648 JL |
72 | |
73 | Memory references complicate matters. Only if we can be certain | |
74 | that memory references are not part of the data dependency graph | |
75 | (via true, anti, or output dependence), can we move operations past | |
76 | memory references. To first approximation, reads can be done | |
77 | independently, while writes introduce dependencies. Better | |
78 | approximations will yield fewer dependencies. | |
79 | ||
80 | Before reload, an extended analysis of interblock data dependences | |
81 | is required for interblock scheduling. This is performed in | |
82 | compute_block_backward_dependences (). | |
83 | ||
84 | Dependencies set up by memory references are treated in exactly the | |
b198261f MK |
85 | same way as other dependencies, by using insn backward dependences |
86 | INSN_BACK_DEPS. INSN_BACK_DEPS are translated into forward dependences | |
87 | INSN_FORW_DEPS the purpose of forward list scheduling. | |
8c660648 JL |
88 | |
89 | Having optimized the critical path, we may have also unduly | |
90 | extended the lifetimes of some registers. If an operation requires | |
91 | that constants be loaded into registers, it is certainly desirable | |
92 | to load those constants as early as necessary, but no earlier. | |
93 | I.e., it will not do to load up a bunch of registers at the | |
94 | beginning of a basic block only to use them at the end, if they | |
95 | could be loaded later, since this may result in excessive register | |
96 | utilization. | |
97 | ||
98 | Note that since branches are never in basic blocks, but only end | |
99 | basic blocks, this pass will not move branches. But that is ok, | |
100 | since we can use GNU's delayed branch scheduling pass to take care | |
101 | of this case. | |
102 | ||
103 | Also note that no further optimizations based on algebraic | |
104 | identities are performed, so this pass would be a good one to | |
105 | perform instruction splitting, such as breaking up a multiply | |
106 | instruction into shifts and adds where that is profitable. | |
107 | ||
108 | Given the memory aliasing analysis that this pass should perform, | |
109 | it should be possible to remove redundant stores to memory, and to | |
110 | load values from registers instead of hitting memory. | |
111 | ||
112 | Before reload, speculative insns are moved only if a 'proof' exists | |
113 | that no exception will be caused by this, and if no live registers | |
114 | exist that inhibit the motion (live registers constraints are not | |
115 | represented by data dependence edges). | |
116 | ||
117 | This pass must update information that subsequent passes expect to | |
118 | be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths, | |
a813c111 | 119 | reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END. |
8c660648 JL |
120 | |
121 | The information in the line number notes is carefully retained by | |
122 | this pass. Notes that refer to the starting and ending of | |
123 | exception regions are also carefully retained by this pass. All | |
124 | other NOTE insns are grouped in their same relative order at the | |
b4ead7d4 | 125 | beginning of basic blocks and regions that have been scheduled. */ |
8c660648 | 126 | \f |
8c660648 | 127 | #include "config.h" |
5835e573 | 128 | #include "system.h" |
4977bab6 ZW |
129 | #include "coretypes.h" |
130 | #include "tm.h" | |
01198c2f | 131 | #include "toplev.h" |
8c660648 | 132 | #include "rtl.h" |
6baf1cc8 | 133 | #include "tm_p.h" |
efc9bd41 | 134 | #include "hard-reg-set.h" |
8c660648 | 135 | #include "regs.h" |
49ad7cfa | 136 | #include "function.h" |
8c660648 JL |
137 | #include "flags.h" |
138 | #include "insn-config.h" | |
139 | #include "insn-attr.h" | |
140 | #include "except.h" | |
487a6e06 | 141 | #include "toplev.h" |
79c9824e | 142 | #include "recog.h" |
1708fd40 | 143 | #include "sched-int.h" |
c237e94a | 144 | #include "target.h" |
10d22567 | 145 | #include "output.h" |
496d7bb0 | 146 | #include "params.h" |
e855c69d | 147 | #include "vecprim.h" |
6fb5fa3c | 148 | #include "dbgcnt.h" |
e855c69d | 149 | #include "cfgloop.h" |
ce18efcb | 150 | #include "ira.h" |
8c660648 | 151 | |
8c660648 JL |
152 | #ifdef INSN_SCHEDULING |
153 | ||
8c660648 JL |
154 | /* issue_rate is the number of insns that can be scheduled in the same |
155 | machine cycle. It can be defined in the config/mach/mach.h file, | |
156 | otherwise we set it to 1. */ | |
157 | ||
e855c69d | 158 | int issue_rate; |
8c660648 | 159 | |
cc132865 | 160 | /* sched-verbose controls the amount of debugging output the |
409f8483 | 161 | scheduler prints. It is controlled by -fsched-verbose=N: |
8c660648 JL |
162 | N>0 and no -DSR : the output is directed to stderr. |
163 | N>=10 will direct the printouts to stderr (regardless of -dSR). | |
164 | N=1: same as -dSR. | |
165 | N=2: bb's probabilities, detailed ready list info, unit/insn info. | |
166 | N=3: rtl at abort point, control-flow, regions info. | |
cc132865 | 167 | N=5: dependences info. */ |
8c660648 | 168 | |
8c660648 | 169 | static int sched_verbose_param = 0; |
b4ead7d4 | 170 | int sched_verbose = 0; |
8c660648 | 171 | |
63de6c74 | 172 | /* Debugging file. All printouts are sent to dump, which is always set, |
8c660648 | 173 | either to stderr, or to the dump listing file (-dRS). */ |
c62c2659 | 174 | FILE *sched_dump = 0; |
a88f02e7 | 175 | |
8c660648 | 176 | /* fix_sched_param() is called from toplev.c upon detection |
409f8483 | 177 | of the -fsched-verbose=N option. */ |
8c660648 JL |
178 | |
179 | void | |
1d088dee | 180 | fix_sched_param (const char *param, const char *val) |
8c660648 | 181 | { |
cc132865 | 182 | if (!strcmp (param, "verbose")) |
8c660648 | 183 | sched_verbose_param = atoi (val); |
8c660648 | 184 | else |
d4ee4d25 | 185 | warning (0, "fix_sched_param: unknown param: %s", param); |
8c660648 JL |
186 | } |
187 | ||
b8698a0f | 188 | /* This is a placeholder for the scheduler parameters common |
e855c69d AB |
189 | to all schedulers. */ |
190 | struct common_sched_info_def *common_sched_info; | |
f66d83e1 | 191 | |
e855c69d AB |
192 | #define INSN_TICK(INSN) (HID (INSN)->tick) |
193 | #define INTER_TICK(INSN) (HID (INSN)->inter_tick) | |
63f54b1a MK |
194 | |
195 | /* If INSN_TICK of an instruction is equal to INVALID_TICK, | |
196 | then it should be recalculated from scratch. */ | |
197 | #define INVALID_TICK (-(max_insn_queue_index + 1)) | |
198 | /* The minimal value of the INSN_TICK of an instruction. */ | |
199 | #define MIN_TICK (-max_insn_queue_index) | |
8c660648 | 200 | |
496d7bb0 MK |
201 | /* Issue points are used to distinguish between instructions in max_issue (). |
202 | For now, all instructions are equally good. */ | |
203 | #define ISSUE_POINTS(INSN) 1 | |
204 | ||
8c660648 JL |
205 | /* List of important notes we must keep around. This is a pointer to the |
206 | last element in the list. */ | |
e855c69d | 207 | rtx note_list; |
8c660648 | 208 | |
496d7bb0 MK |
209 | static struct spec_info_def spec_info_var; |
210 | /* Description of the speculative part of the scheduling. | |
211 | If NULL - no speculation. */ | |
e855c69d | 212 | spec_info_t spec_info = NULL; |
496d7bb0 MK |
213 | |
214 | /* True, if recovery block was added during scheduling of current block. | |
215 | Used to determine, if we need to fix INSN_TICKs. */ | |
e2f6ff94 MK |
216 | static bool haifa_recovery_bb_recently_added_p; |
217 | ||
218 | /* True, if recovery block was added during this scheduling pass. | |
219 | Used to determine if we should have empty memory pools of dependencies | |
220 | after finishing current region. */ | |
221 | bool haifa_recovery_bb_ever_added_p; | |
496d7bb0 | 222 | |
917f1b7e | 223 | /* Counters of different types of speculative instructions. */ |
496d7bb0 MK |
224 | static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control; |
225 | ||
496d7bb0 MK |
226 | /* Array used in {unlink, restore}_bb_notes. */ |
227 | static rtx *bb_header = 0; | |
228 | ||
496d7bb0 MK |
229 | /* Basic block after which recovery blocks will be created. */ |
230 | static basic_block before_recovery; | |
231 | ||
e855c69d AB |
232 | /* Basic block just before the EXIT_BLOCK and after recovery, if we have |
233 | created it. */ | |
234 | basic_block after_recovery; | |
235 | ||
236 | /* FALSE if we add bb to another region, so we don't need to initialize it. */ | |
237 | bool adding_bb_to_current_region_p = true; | |
238 | ||
8c660648 JL |
239 | /* Queues, etc. */ |
240 | ||
241 | /* An instruction is ready to be scheduled when all insns preceding it | |
242 | have already been scheduled. It is important to ensure that all | |
243 | insns which use its result will not be executed until its result | |
244 | has been computed. An insn is maintained in one of four structures: | |
245 | ||
246 | (P) the "Pending" set of insns which cannot be scheduled until | |
247 | their dependencies have been satisfied. | |
248 | (Q) the "Queued" set of insns that can be scheduled when sufficient | |
249 | time has passed. | |
250 | (R) the "Ready" list of unscheduled, uncommitted insns. | |
251 | (S) the "Scheduled" list of insns. | |
252 | ||
253 | Initially, all insns are either "Pending" or "Ready" depending on | |
254 | whether their dependencies are satisfied. | |
255 | ||
256 | Insns move from the "Ready" list to the "Scheduled" list as they | |
257 | are committed to the schedule. As this occurs, the insns in the | |
258 | "Pending" list have their dependencies satisfied and move to either | |
259 | the "Ready" list or the "Queued" set depending on whether | |
260 | sufficient time has passed to make them ready. As time passes, | |
fa0aee89 | 261 | insns move from the "Queued" set to the "Ready" list. |
8c660648 | 262 | |
b198261f MK |
263 | The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the |
264 | unscheduled insns, i.e., those that are ready, queued, and pending. | |
8c660648 JL |
265 | The "Queued" set (Q) is implemented by the variable `insn_queue'. |
266 | The "Ready" list (R) is implemented by the variables `ready' and | |
267 | `n_ready'. | |
268 | The "Scheduled" list (S) is the new insn chain built by this pass. | |
269 | ||
270 | The transition (R->S) is implemented in the scheduling loop in | |
271 | `schedule_block' when the best insn to schedule is chosen. | |
8c660648 JL |
272 | The transitions (P->R and P->Q) are implemented in `schedule_insn' as |
273 | insns move from the ready list to the scheduled list. | |
274 | The transition (Q->R) is implemented in 'queue_to_insn' as time | |
275 | passes or stalls are introduced. */ | |
276 | ||
277 | /* Implement a circular buffer to delay instructions until sufficient | |
fa0aee89 | 278 | time has passed. For the new pipeline description interface, |
63f54b1a | 279 | MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less |
fa0aee89 PB |
280 | than maximal time of instruction execution computed by genattr.c on |
281 | the base maximal time of functional unit reservations and getting a | |
282 | result. This is the longest time an insn may be queued. */ | |
fae15c93 VM |
283 | |
284 | static rtx *insn_queue; | |
8c660648 JL |
285 | static int q_ptr = 0; |
286 | static int q_size = 0; | |
fa0aee89 PB |
287 | #define NEXT_Q(X) (((X)+1) & max_insn_queue_index) |
288 | #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index) | |
fae15c93 | 289 | |
63f54b1a MK |
290 | #define QUEUE_SCHEDULED (-3) |
291 | #define QUEUE_NOWHERE (-2) | |
292 | #define QUEUE_READY (-1) | |
293 | /* QUEUE_SCHEDULED - INSN is scheduled. | |
294 | QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in | |
295 | queue or ready list. | |
296 | QUEUE_READY - INSN is in ready list. | |
297 | N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */ | |
b8698a0f | 298 | |
e855c69d | 299 | #define QUEUE_INDEX(INSN) (HID (INSN)->queue_index) |
63f54b1a | 300 | |
fae15c93 VM |
301 | /* The following variable value refers for all current and future |
302 | reservations of the processor units. */ | |
303 | state_t curr_state; | |
304 | ||
305 | /* The following variable value is size of memory representing all | |
fa0aee89 | 306 | current and future reservations of the processor units. */ |
e855c69d | 307 | size_t dfa_state_size; |
fae15c93 VM |
308 | |
309 | /* The following array is used to find the best insn from ready when | |
310 | the automaton pipeline interface is used. */ | |
e855c69d | 311 | char *ready_try = NULL; |
8c660648 | 312 | |
e855c69d | 313 | /* The ready list. */ |
b5b8b0ac | 314 | struct ready_list ready = {NULL, 0, 0, 0, 0}; |
176f9a7b | 315 | |
e855c69d AB |
316 | /* The pointer to the ready list (to be removed). */ |
317 | static struct ready_list *readyp = &ready; | |
63f54b1a MK |
318 | |
319 | /* Scheduling clock. */ | |
320 | static int clock_var; | |
321 | ||
9678086d | 322 | static int may_trap_exp (const_rtx, int); |
15aab9c0 VM |
323 | |
324 | /* Nonzero iff the address is comprised from at most 1 register. */ | |
325 | #define CONST_BASED_ADDRESS_P(x) \ | |
f8cfc6aa | 326 | (REG_P (x) \ |
15aab9c0 VM |
327 | || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \ |
328 | || (GET_CODE (x) == LO_SUM)) \ | |
329 | && (CONSTANT_P (XEXP (x, 0)) \ | |
330 | || CONSTANT_P (XEXP (x, 1))))) | |
331 | ||
332 | /* Returns a class that insn with GET_DEST(insn)=x may belong to, | |
333 | as found by analyzing insn's expression. */ | |
334 | ||
e855c69d AB |
335 | \f |
336 | static int haifa_luid_for_non_insn (rtx x); | |
337 | ||
338 | /* Haifa version of sched_info hooks common to all headers. */ | |
b8698a0f | 339 | const struct common_sched_info_def haifa_common_sched_info = |
e855c69d AB |
340 | { |
341 | NULL, /* fix_recovery_cfg */ | |
342 | NULL, /* add_block */ | |
343 | NULL, /* estimate_number_of_insns */ | |
344 | haifa_luid_for_non_insn, /* luid_for_non_insn */ | |
345 | SCHED_PASS_UNKNOWN /* sched_pass_id */ | |
346 | }; | |
347 | ||
348 | const struct sched_scan_info_def *sched_scan_info; | |
349 | ||
350 | /* Mapping from instruction UID to its Logical UID. */ | |
351 | VEC (int, heap) *sched_luids = NULL; | |
352 | ||
353 | /* Next LUID to assign to an instruction. */ | |
354 | int sched_max_luid = 1; | |
355 | ||
356 | /* Haifa Instruction Data. */ | |
357 | VEC (haifa_insn_data_def, heap) *h_i_d = NULL; | |
358 | ||
359 | void (* sched_init_only_bb) (basic_block, basic_block); | |
360 | ||
361 | /* Split block function. Different schedulers might use different functions | |
362 | to handle their internal data consistent. */ | |
363 | basic_block (* sched_split_block) (basic_block, rtx); | |
364 | ||
365 | /* Create empty basic block after the specified block. */ | |
366 | basic_block (* sched_create_empty_bb) (basic_block); | |
367 | ||
15aab9c0 | 368 | static int |
9678086d | 369 | may_trap_exp (const_rtx x, int is_store) |
15aab9c0 VM |
370 | { |
371 | enum rtx_code code; | |
372 | ||
373 | if (x == 0) | |
374 | return TRAP_FREE; | |
375 | code = GET_CODE (x); | |
376 | if (is_store) | |
377 | { | |
378 | if (code == MEM && may_trap_p (x)) | |
379 | return TRAP_RISKY; | |
380 | else | |
381 | return TRAP_FREE; | |
382 | } | |
383 | if (code == MEM) | |
384 | { | |
385 | /* The insn uses memory: a volatile load. */ | |
386 | if (MEM_VOLATILE_P (x)) | |
387 | return IRISKY; | |
388 | /* An exception-free load. */ | |
389 | if (!may_trap_p (x)) | |
390 | return IFREE; | |
391 | /* A load with 1 base register, to be further checked. */ | |
392 | if (CONST_BASED_ADDRESS_P (XEXP (x, 0))) | |
393 | return PFREE_CANDIDATE; | |
394 | /* No info on the load, to be further checked. */ | |
395 | return PRISKY_CANDIDATE; | |
396 | } | |
397 | else | |
398 | { | |
399 | const char *fmt; | |
400 | int i, insn_class = TRAP_FREE; | |
401 | ||
402 | /* Neither store nor load, check if it may cause a trap. */ | |
403 | if (may_trap_p (x)) | |
404 | return TRAP_RISKY; | |
405 | /* Recursive step: walk the insn... */ | |
406 | fmt = GET_RTX_FORMAT (code); | |
407 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
408 | { | |
409 | if (fmt[i] == 'e') | |
410 | { | |
411 | int tmp_class = may_trap_exp (XEXP (x, i), is_store); | |
412 | insn_class = WORST_CLASS (insn_class, tmp_class); | |
413 | } | |
414 | else if (fmt[i] == 'E') | |
415 | { | |
416 | int j; | |
417 | for (j = 0; j < XVECLEN (x, i); j++) | |
418 | { | |
419 | int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store); | |
420 | insn_class = WORST_CLASS (insn_class, tmp_class); | |
421 | if (insn_class == TRAP_RISKY || insn_class == IRISKY) | |
422 | break; | |
423 | } | |
424 | } | |
425 | if (insn_class == TRAP_RISKY || insn_class == IRISKY) | |
426 | break; | |
427 | } | |
428 | return insn_class; | |
429 | } | |
430 | } | |
431 | ||
ac4a7e21 AM |
432 | /* Classifies rtx X of an insn for the purpose of verifying that X can be |
433 | executed speculatively (and consequently the insn can be moved | |
434 | speculatively), by examining X, returning: | |
15aab9c0 VM |
435 | TRAP_RISKY: store, or risky non-load insn (e.g. division by variable). |
436 | TRAP_FREE: non-load insn. | |
a98ebe2e | 437 | IFREE: load from a globally safe location. |
15aab9c0 VM |
438 | IRISKY: volatile load. |
439 | PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for | |
440 | being either PFREE or PRISKY. */ | |
441 | ||
ac4a7e21 AM |
442 | static int |
443 | haifa_classify_rtx (const_rtx x) | |
15aab9c0 | 444 | { |
15aab9c0 VM |
445 | int tmp_class = TRAP_FREE; |
446 | int insn_class = TRAP_FREE; | |
447 | enum rtx_code code; | |
448 | ||
ac4a7e21 | 449 | if (GET_CODE (x) == PARALLEL) |
15aab9c0 | 450 | { |
ac4a7e21 | 451 | int i, len = XVECLEN (x, 0); |
15aab9c0 VM |
452 | |
453 | for (i = len - 1; i >= 0; i--) | |
454 | { | |
ac4a7e21 | 455 | tmp_class = haifa_classify_rtx (XVECEXP (x, 0, i)); |
15aab9c0 VM |
456 | insn_class = WORST_CLASS (insn_class, tmp_class); |
457 | if (insn_class == TRAP_RISKY || insn_class == IRISKY) | |
458 | break; | |
459 | } | |
460 | } | |
461 | else | |
462 | { | |
ac4a7e21 | 463 | code = GET_CODE (x); |
15aab9c0 VM |
464 | switch (code) |
465 | { | |
466 | case CLOBBER: | |
467 | /* Test if it is a 'store'. */ | |
ac4a7e21 | 468 | tmp_class = may_trap_exp (XEXP (x, 0), 1); |
15aab9c0 VM |
469 | break; |
470 | case SET: | |
471 | /* Test if it is a store. */ | |
ac4a7e21 | 472 | tmp_class = may_trap_exp (SET_DEST (x), 1); |
15aab9c0 VM |
473 | if (tmp_class == TRAP_RISKY) |
474 | break; | |
475 | /* Test if it is a load. */ | |
476 | tmp_class = | |
477 | WORST_CLASS (tmp_class, | |
ac4a7e21 | 478 | may_trap_exp (SET_SRC (x), 0)); |
15aab9c0 VM |
479 | break; |
480 | case COND_EXEC: | |
ac4a7e21 AM |
481 | tmp_class = haifa_classify_rtx (COND_EXEC_CODE (x)); |
482 | if (tmp_class == TRAP_RISKY) | |
483 | break; | |
484 | tmp_class = WORST_CLASS (tmp_class, | |
485 | may_trap_exp (COND_EXEC_TEST (x), 0)); | |
486 | break; | |
15aab9c0 VM |
487 | case TRAP_IF: |
488 | tmp_class = TRAP_RISKY; | |
489 | break; | |
490 | default:; | |
491 | } | |
492 | insn_class = tmp_class; | |
493 | } | |
494 | ||
495 | return insn_class; | |
496 | } | |
497 | ||
ac4a7e21 AM |
498 | int |
499 | haifa_classify_insn (const_rtx insn) | |
500 | { | |
501 | return haifa_classify_rtx (PATTERN (insn)); | |
502 | } | |
503 | ||
8c660648 | 504 | /* Forward declarations. */ |
fae15c93 | 505 | |
1d088dee AJ |
506 | static int priority (rtx); |
507 | static int rank_for_schedule (const void *, const void *); | |
508 | static void swap_sort (rtx *, int); | |
509 | static void queue_insn (rtx, int); | |
63f54b1a | 510 | static int schedule_insn (rtx); |
1d088dee AJ |
511 | static void adjust_priority (rtx); |
512 | static void advance_one_cycle (void); | |
e855c69d AB |
513 | static void extend_h_i_d (void); |
514 | ||
8c660648 | 515 | |
8c660648 JL |
516 | /* Notes handling mechanism: |
517 | ========================= | |
518 | Generally, NOTES are saved before scheduling and restored after scheduling. | |
07c02828 | 519 | The scheduler distinguishes between two types of notes: |
8c660648 | 520 | |
6039a0c7 | 521 | (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes: |
8c660648 JL |
522 | Before scheduling a region, a pointer to the note is added to the insn |
523 | that follows or precedes it. (This happens as part of the data dependence | |
524 | computation). After scheduling an insn, the pointer contained in it is | |
525 | used for regenerating the corresponding note (in reemit_notes). | |
526 | ||
6039a0c7 | 527 | (2) All other notes (e.g. INSN_DELETED): Before scheduling a block, |
8c660648 JL |
528 | these notes are put in a list (in rm_other_notes() and |
529 | unlink_other_notes ()). After scheduling the block, these notes are | |
530 | inserted at the beginning of the block (in schedule_block()). */ | |
531 | ||
63f54b1a | 532 | static void ready_add (struct ready_list *, rtx, bool); |
1d088dee | 533 | static rtx ready_remove_first (struct ready_list *); |
3fe41456 | 534 | |
1d088dee | 535 | static void queue_to_ready (struct ready_list *); |
569fa502 | 536 | static int early_queue_to_ready (state_t, struct ready_list *); |
176f9a7b | 537 | |
1d088dee | 538 | static void debug_ready_list (struct ready_list *); |
3fe41456 | 539 | |
fae15c93 | 540 | /* The following functions are used to implement multi-pass scheduling |
fa0aee89 | 541 | on the first cycle. */ |
1d088dee | 542 | static rtx ready_remove (struct ready_list *, int); |
63f54b1a | 543 | static void ready_remove_insn (rtx); |
fae15c93 | 544 | |
b631c5f7 | 545 | static int choose_ready (struct ready_list *, rtx *); |
fae15c93 | 546 | |
63f54b1a MK |
547 | static void fix_inter_tick (rtx, rtx); |
548 | static int fix_tick_ready (rtx); | |
549 | static void change_queue_index (rtx, int); | |
63f54b1a | 550 | |
496d7bb0 MK |
551 | /* The following functions are used to implement scheduling of data/control |
552 | speculative instructions. */ | |
553 | ||
554 | static void extend_h_i_d (void); | |
496d7bb0 MK |
555 | static void init_h_i_d (rtx); |
556 | static void generate_recovery_code (rtx); | |
e2f6ff94 | 557 | static void process_insn_forw_deps_be_in_spec (rtx, rtx, ds_t); |
496d7bb0 MK |
558 | static void begin_speculative_block (rtx); |
559 | static void add_to_speculative_block (rtx); | |
e855c69d | 560 | static void init_before_recovery (basic_block *); |
496d7bb0 MK |
561 | static void create_check_block_twin (rtx, bool); |
562 | static void fix_recovery_deps (basic_block); | |
e855c69d | 563 | static void haifa_change_pattern (rtx, rtx); |
496d7bb0 MK |
564 | static void dump_new_block_header (int, basic_block, rtx, rtx); |
565 | static void restore_bb_notes (basic_block); | |
496d7bb0 MK |
566 | static void fix_jump_move (rtx); |
567 | static void move_block_after_check (rtx); | |
568 | static void move_succs (VEC(edge,gc) **, basic_block); | |
496d7bb0 | 569 | static void sched_remove_insn (rtx); |
916fa4f0 MK |
570 | static void clear_priorities (rtx, rtx_vec_t *); |
571 | static void calc_priorities (rtx_vec_t); | |
496d7bb0 | 572 | static void add_jump_dependencies (rtx, rtx); |
496d7bb0 MK |
573 | #ifdef ENABLE_CHECKING |
574 | static int has_edge_p (VEC(edge,gc) *, int); | |
575 | static void check_cfg (rtx, rtx); | |
496d7bb0 MK |
576 | #endif |
577 | ||
8c660648 JL |
578 | #endif /* INSN_SCHEDULING */ |
579 | \f | |
1708fd40 | 580 | /* Point to state used for the current scheduling pass. */ |
e855c69d | 581 | struct haifa_sched_info *current_sched_info; |
8c660648 JL |
582 | \f |
583 | #ifndef INSN_SCHEDULING | |
584 | void | |
10d22567 | 585 | schedule_insns (void) |
8c660648 JL |
586 | { |
587 | } | |
588 | #else | |
cbb13457 | 589 | |
ce18efcb VM |
590 | /* Do register pressure sensitive insn scheduling if the flag is set |
591 | up. */ | |
592 | bool sched_pressure_p; | |
593 | ||
594 | /* Map regno -> its cover class. The map defined only when | |
595 | SCHED_PRESSURE_P is true. */ | |
596 | enum reg_class *sched_regno_cover_class; | |
597 | ||
598 | /* The current register pressure. Only elements corresponding cover | |
599 | classes are defined. */ | |
600 | static int curr_reg_pressure[N_REG_CLASSES]; | |
601 | ||
602 | /* Saved value of the previous array. */ | |
603 | static int saved_reg_pressure[N_REG_CLASSES]; | |
604 | ||
605 | /* Register living at given scheduling point. */ | |
606 | static bitmap curr_reg_live; | |
607 | ||
608 | /* Saved value of the previous array. */ | |
609 | static bitmap saved_reg_live; | |
610 | ||
611 | /* Registers mentioned in the current region. */ | |
612 | static bitmap region_ref_regs; | |
613 | ||
614 | /* Initiate register pressure relative info for scheduling the current | |
615 | region. Currently it is only clearing register mentioned in the | |
616 | current region. */ | |
617 | void | |
618 | sched_init_region_reg_pressure_info (void) | |
619 | { | |
620 | bitmap_clear (region_ref_regs); | |
621 | } | |
622 | ||
623 | /* Update current register pressure related info after birth (if | |
624 | BIRTH_P) or death of register REGNO. */ | |
625 | static void | |
626 | mark_regno_birth_or_death (int regno, bool birth_p) | |
627 | { | |
628 | enum reg_class cover_class; | |
629 | ||
630 | cover_class = sched_regno_cover_class[regno]; | |
631 | if (regno >= FIRST_PSEUDO_REGISTER) | |
632 | { | |
633 | if (cover_class != NO_REGS) | |
634 | { | |
635 | if (birth_p) | |
636 | { | |
637 | bitmap_set_bit (curr_reg_live, regno); | |
638 | curr_reg_pressure[cover_class] | |
639 | += ira_reg_class_nregs[cover_class][PSEUDO_REGNO_MODE (regno)]; | |
640 | } | |
641 | else | |
642 | { | |
643 | bitmap_clear_bit (curr_reg_live, regno); | |
644 | curr_reg_pressure[cover_class] | |
645 | -= ira_reg_class_nregs[cover_class][PSEUDO_REGNO_MODE (regno)]; | |
646 | } | |
647 | } | |
648 | } | |
649 | else if (cover_class != NO_REGS | |
650 | && ! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno)) | |
651 | { | |
652 | if (birth_p) | |
653 | { | |
654 | bitmap_set_bit (curr_reg_live, regno); | |
655 | curr_reg_pressure[cover_class]++; | |
656 | } | |
657 | else | |
658 | { | |
659 | bitmap_clear_bit (curr_reg_live, regno); | |
660 | curr_reg_pressure[cover_class]--; | |
661 | } | |
662 | } | |
663 | } | |
664 | ||
665 | /* Initiate current register pressure related info from living | |
666 | registers given by LIVE. */ | |
667 | static void | |
668 | initiate_reg_pressure_info (bitmap live) | |
669 | { | |
670 | int i; | |
671 | unsigned int j; | |
672 | bitmap_iterator bi; | |
673 | ||
674 | for (i = 0; i < ira_reg_class_cover_size; i++) | |
675 | curr_reg_pressure[ira_reg_class_cover[i]] = 0; | |
676 | bitmap_clear (curr_reg_live); | |
677 | EXECUTE_IF_SET_IN_BITMAP (live, 0, j, bi) | |
678 | if (current_nr_blocks == 1 || bitmap_bit_p (region_ref_regs, j)) | |
679 | mark_regno_birth_or_death (j, true); | |
680 | } | |
681 | ||
682 | /* Mark registers in X as mentioned in the current region. */ | |
683 | static void | |
684 | setup_ref_regs (rtx x) | |
685 | { | |
686 | int i, j, regno; | |
687 | const RTX_CODE code = GET_CODE (x); | |
688 | const char *fmt; | |
689 | ||
690 | if (REG_P (x)) | |
691 | { | |
692 | regno = REGNO (x); | |
693 | if (regno >= FIRST_PSEUDO_REGISTER) | |
694 | bitmap_set_bit (region_ref_regs, REGNO (x)); | |
695 | else | |
696 | for (i = hard_regno_nregs[regno][GET_MODE (x)] - 1; i >= 0; i--) | |
697 | bitmap_set_bit (region_ref_regs, regno + i); | |
698 | return; | |
699 | } | |
700 | fmt = GET_RTX_FORMAT (code); | |
701 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
702 | if (fmt[i] == 'e') | |
703 | setup_ref_regs (XEXP (x, i)); | |
704 | else if (fmt[i] == 'E') | |
705 | { | |
706 | for (j = 0; j < XVECLEN (x, i); j++) | |
707 | setup_ref_regs (XVECEXP (x, i, j)); | |
708 | } | |
709 | } | |
710 | ||
711 | /* Initiate current register pressure related info at the start of | |
712 | basic block BB. */ | |
713 | static void | |
714 | initiate_bb_reg_pressure_info (basic_block bb) | |
715 | { | |
716 | unsigned int i; | |
717 | rtx insn; | |
718 | ||
719 | if (current_nr_blocks > 1) | |
720 | FOR_BB_INSNS (bb, insn) | |
721 | if (INSN_P (insn)) | |
722 | setup_ref_regs (PATTERN (insn)); | |
723 | initiate_reg_pressure_info (df_get_live_in (bb)); | |
724 | #ifdef EH_RETURN_DATA_REGNO | |
725 | if (bb_has_eh_pred (bb)) | |
726 | for (i = 0; ; ++i) | |
727 | { | |
728 | unsigned int regno = EH_RETURN_DATA_REGNO (i); | |
b8698a0f | 729 | |
ce18efcb VM |
730 | if (regno == INVALID_REGNUM) |
731 | break; | |
732 | if (! bitmap_bit_p (df_get_live_in (bb), regno)) | |
733 | mark_regno_birth_or_death (regno, true); | |
734 | } | |
735 | #endif | |
736 | } | |
737 | ||
738 | /* Save current register pressure related info. */ | |
739 | static void | |
740 | save_reg_pressure (void) | |
741 | { | |
742 | int i; | |
b8698a0f | 743 | |
ce18efcb VM |
744 | for (i = 0; i < ira_reg_class_cover_size; i++) |
745 | saved_reg_pressure[ira_reg_class_cover[i]] | |
746 | = curr_reg_pressure[ira_reg_class_cover[i]]; | |
747 | bitmap_copy (saved_reg_live, curr_reg_live); | |
748 | } | |
749 | ||
750 | /* Restore saved register pressure related info. */ | |
751 | static void | |
752 | restore_reg_pressure (void) | |
753 | { | |
754 | int i; | |
b8698a0f | 755 | |
ce18efcb VM |
756 | for (i = 0; i < ira_reg_class_cover_size; i++) |
757 | curr_reg_pressure[ira_reg_class_cover[i]] | |
758 | = saved_reg_pressure[ira_reg_class_cover[i]]; | |
759 | bitmap_copy (curr_reg_live, saved_reg_live); | |
760 | } | |
761 | ||
762 | /* Return TRUE if the register is dying after its USE. */ | |
763 | static bool | |
764 | dying_use_p (struct reg_use_data *use) | |
765 | { | |
766 | struct reg_use_data *next; | |
767 | ||
768 | for (next = use->next_regno_use; next != use; next = next->next_regno_use) | |
769 | if (QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED) | |
770 | return false; | |
771 | return true; | |
772 | } | |
773 | ||
774 | /* Print info about the current register pressure and its excess for | |
775 | each cover class. */ | |
776 | static void | |
777 | print_curr_reg_pressure (void) | |
778 | { | |
779 | int i; | |
780 | enum reg_class cl; | |
781 | ||
782 | fprintf (sched_dump, ";;\t"); | |
783 | for (i = 0; i < ira_reg_class_cover_size; i++) | |
784 | { | |
785 | cl = ira_reg_class_cover[i]; | |
786 | gcc_assert (curr_reg_pressure[cl] >= 0); | |
787 | fprintf (sched_dump, " %s:%d(%d)", reg_class_names[cl], | |
788 | curr_reg_pressure[cl], | |
789 | curr_reg_pressure[cl] - ira_available_class_regs[cl]); | |
790 | } | |
791 | fprintf (sched_dump, "\n"); | |
792 | } | |
793 | ||
8c660648 JL |
794 | /* Pointer to the last instruction scheduled. Used by rank_for_schedule, |
795 | so that insns independent of the last scheduled insn will be preferred | |
796 | over dependent instructions. */ | |
797 | ||
798 | static rtx last_scheduled_insn; | |
799 | ||
b198261f MK |
800 | /* Cached cost of the instruction. Use below function to get cost of the |
801 | insn. -1 here means that the field is not initialized. */ | |
e855c69d | 802 | #define INSN_COST(INSN) (HID (INSN)->cost) |
8c660648 | 803 | |
b198261f | 804 | /* Compute cost of executing INSN. |
496d7bb0 MK |
805 | This is the number of cycles between instruction issue and |
806 | instruction results. */ | |
5fd8300b | 807 | int |
b198261f | 808 | insn_cost (rtx insn) |
8c660648 | 809 | { |
e855c69d AB |
810 | int cost; |
811 | ||
812 | if (sel_sched_p ()) | |
813 | { | |
814 | if (recog_memoized (insn) < 0) | |
815 | return 0; | |
816 | ||
817 | cost = insn_default_latency (insn); | |
818 | if (cost < 0) | |
819 | cost = 0; | |
820 | ||
821 | return cost; | |
822 | } | |
823 | ||
824 | cost = INSN_COST (insn); | |
8c660648 | 825 | |
fae15c93 | 826 | if (cost < 0) |
8c660648 | 827 | { |
fae15c93 VM |
828 | /* A USE insn, or something else we don't need to |
829 | understand. We can't pass these directly to | |
830 | result_ready_cost or insn_default_latency because it will | |
831 | trigger a fatal error for unrecognizable insns. */ | |
832 | if (recog_memoized (insn) < 0) | |
8c660648 | 833 | { |
fae15c93 VM |
834 | INSN_COST (insn) = 0; |
835 | return 0; | |
8c660648 JL |
836 | } |
837 | else | |
838 | { | |
fa0aee89 | 839 | cost = insn_default_latency (insn); |
fae15c93 VM |
840 | if (cost < 0) |
841 | cost = 0; | |
1d088dee | 842 | |
8c660648 JL |
843 | INSN_COST (insn) = cost; |
844 | } | |
845 | } | |
846 | ||
b198261f MK |
847 | return cost; |
848 | } | |
849 | ||
850 | /* Compute cost of dependence LINK. | |
851 | This is the number of cycles between instruction issue and | |
078a70a1 AN |
852 | instruction results. |
853 | ??? We also use this function to call recog_memoized on all insns. */ | |
b198261f | 854 | int |
e855c69d | 855 | dep_cost_1 (dep_t link, dw_t dw) |
b198261f | 856 | { |
078a70a1 | 857 | rtx insn = DEP_PRO (link); |
b198261f MK |
858 | rtx used = DEP_CON (link); |
859 | int cost; | |
8c660648 | 860 | |
fae15c93 VM |
861 | /* A USE insn should never require the value used to be computed. |
862 | This allows the computation of a function's result and parameter | |
ce18efcb VM |
863 | values to overlap the return and call. We don't care about the |
864 | the dependence cost when only decreasing register pressure. */ | |
fae15c93 | 865 | if (recog_memoized (used) < 0) |
078a70a1 AN |
866 | { |
867 | cost = 0; | |
868 | recog_memoized (insn); | |
869 | } | |
fae15c93 | 870 | else |
8c660648 | 871 | { |
e2f6ff94 | 872 | enum reg_note dep_type = DEP_TYPE (link); |
b198261f MK |
873 | |
874 | cost = insn_cost (insn); | |
496d7bb0 | 875 | |
fa0aee89 | 876 | if (INSN_CODE (insn) >= 0) |
197043f5 | 877 | { |
496d7bb0 | 878 | if (dep_type == REG_DEP_ANTI) |
fa0aee89 | 879 | cost = 0; |
496d7bb0 | 880 | else if (dep_type == REG_DEP_OUTPUT) |
fae15c93 | 881 | { |
fa0aee89 PB |
882 | cost = (insn_default_latency (insn) |
883 | - insn_default_latency (used)); | |
884 | if (cost <= 0) | |
885 | cost = 1; | |
fae15c93 | 886 | } |
fa0aee89 PB |
887 | else if (bypass_p (insn)) |
888 | cost = insn_latency (insn, used); | |
197043f5 | 889 | } |
b8698a0f | 890 | |
b8ec5764 | 891 | |
e855c69d | 892 | if (targetm.sched.adjust_cost_2) |
ce18efcb VM |
893 | cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost, |
894 | dw); | |
e855c69d | 895 | else if (targetm.sched.adjust_cost != NULL) |
496d7bb0 | 896 | { |
b198261f MK |
897 | /* This variable is used for backward compatibility with the |
898 | targets. */ | |
899 | rtx dep_cost_rtx_link = alloc_INSN_LIST (NULL_RTX, NULL_RTX); | |
900 | ||
901 | /* Make it self-cycled, so that if some tries to walk over this | |
9f5ed61a | 902 | incomplete list he/she will be caught in an endless loop. */ |
b198261f MK |
903 | XEXP (dep_cost_rtx_link, 1) = dep_cost_rtx_link; |
904 | ||
905 | /* Targets use only REG_NOTE_KIND of the link. */ | |
e2f6ff94 | 906 | PUT_REG_NOTE_KIND (dep_cost_rtx_link, DEP_TYPE (link)); |
b198261f MK |
907 | |
908 | cost = targetm.sched.adjust_cost (used, dep_cost_rtx_link, | |
909 | insn, cost); | |
910 | ||
911 | free_INSN_LIST_node (dep_cost_rtx_link); | |
496d7bb0 | 912 | } |
fae15c93 VM |
913 | |
914 | if (cost < 0) | |
915 | cost = 0; | |
916 | } | |
1d088dee | 917 | |
8c660648 JL |
918 | return cost; |
919 | } | |
920 | ||
e855c69d AB |
921 | /* Compute cost of dependence LINK. |
922 | This is the number of cycles between instruction issue and | |
923 | instruction results. */ | |
924 | int | |
925 | dep_cost (dep_t link) | |
926 | { | |
927 | return dep_cost_1 (link, 0); | |
928 | } | |
929 | ||
930 | /* Use this sel-sched.c friendly function in reorder2 instead of increasing | |
931 | INSN_PRIORITY explicitly. */ | |
932 | void | |
933 | increase_insn_priority (rtx insn, int amount) | |
934 | { | |
935 | if (!sel_sched_p ()) | |
936 | { | |
937 | /* We're dealing with haifa-sched.c INSN_PRIORITY. */ | |
938 | if (INSN_PRIORITY_KNOWN (insn)) | |
939 | INSN_PRIORITY (insn) += amount; | |
940 | } | |
941 | else | |
942 | { | |
b8698a0f | 943 | /* In sel-sched.c INSN_PRIORITY is not kept up to date. |
e855c69d AB |
944 | Use EXPR_PRIORITY instead. */ |
945 | sel_add_to_insn_priority (insn, amount); | |
946 | } | |
947 | } | |
948 | ||
916fa4f0 MK |
949 | /* Return 'true' if DEP should be included in priority calculations. */ |
950 | static bool | |
951 | contributes_to_priority_p (dep_t dep) | |
952 | { | |
b5b8b0ac AO |
953 | if (DEBUG_INSN_P (DEP_CON (dep)) |
954 | || DEBUG_INSN_P (DEP_PRO (dep))) | |
955 | return false; | |
956 | ||
916fa4f0 MK |
957 | /* Critical path is meaningful in block boundaries only. */ |
958 | if (!current_sched_info->contributes_to_priority (DEP_CON (dep), | |
959 | DEP_PRO (dep))) | |
960 | return false; | |
961 | ||
962 | /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set, | |
963 | then speculative instructions will less likely be | |
964 | scheduled. That is because the priority of | |
965 | their producers will increase, and, thus, the | |
966 | producers will more likely be scheduled, thus, | |
967 | resolving the dependence. */ | |
e855c69d | 968 | if (sched_deps_info->generate_spec_deps |
916fa4f0 MK |
969 | && !(spec_info->flags & COUNT_SPEC_IN_CRITICAL_PATH) |
970 | && (DEP_STATUS (dep) & SPECULATIVE)) | |
971 | return false; | |
8c660648 | 972 | |
916fa4f0 MK |
973 | return true; |
974 | } | |
975 | ||
b5b8b0ac AO |
976 | /* Compute the number of nondebug forward deps of an insn. */ |
977 | ||
978 | static int | |
979 | dep_list_size (rtx insn) | |
980 | { | |
981 | sd_iterator_def sd_it; | |
982 | dep_t dep; | |
983 | int dbgcount = 0, nodbgcount = 0; | |
984 | ||
985 | if (!MAY_HAVE_DEBUG_INSNS) | |
986 | return sd_lists_size (insn, SD_LIST_FORW); | |
987 | ||
988 | FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep) | |
989 | { | |
990 | if (DEBUG_INSN_P (DEP_CON (dep))) | |
991 | dbgcount++; | |
f49b295a | 992 | else if (!DEBUG_INSN_P (DEP_PRO (dep))) |
b5b8b0ac AO |
993 | nodbgcount++; |
994 | } | |
995 | ||
996 | gcc_assert (dbgcount + nodbgcount == sd_lists_size (insn, SD_LIST_FORW)); | |
997 | ||
998 | return nodbgcount; | |
999 | } | |
1000 | ||
916fa4f0 | 1001 | /* Compute the priority number for INSN. */ |
8c660648 | 1002 | static int |
1d088dee | 1003 | priority (rtx insn) |
8c660648 | 1004 | { |
2c3c49de | 1005 | if (! INSN_P (insn)) |
8c660648 JL |
1006 | return 0; |
1007 | ||
c80b4100 | 1008 | /* We should not be interested in priority of an already scheduled insn. */ |
916fa4f0 MK |
1009 | gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED); |
1010 | ||
1011 | if (!INSN_PRIORITY_KNOWN (insn)) | |
8c660648 | 1012 | { |
e855c69d | 1013 | int this_priority = -1; |
21e4c9a8 | 1014 | |
b5b8b0ac | 1015 | if (dep_list_size (insn) == 0) |
b198261f MK |
1016 | /* ??? We should set INSN_PRIORITY to insn_cost when and insn has |
1017 | some forward deps but all of them are ignored by | |
1018 | contributes_to_priority hook. At the moment we set priority of | |
1019 | such insn to 0. */ | |
1020 | this_priority = insn_cost (insn); | |
8c660648 | 1021 | else |
21e4c9a8 | 1022 | { |
496d7bb0 MK |
1023 | rtx prev_first, twin; |
1024 | basic_block rec; | |
8c660648 | 1025 | |
496d7bb0 MK |
1026 | /* For recovery check instructions we calculate priority slightly |
1027 | different than that of normal instructions. Instead of walking | |
b198261f MK |
1028 | through INSN_FORW_DEPS (check) list, we walk through |
1029 | INSN_FORW_DEPS list of each instruction in the corresponding | |
b8698a0f | 1030 | recovery block. */ |
8c660648 | 1031 | |
e855c69d AB |
1032 | /* Selective scheduling does not define RECOVERY_BLOCK macro. */ |
1033 | rec = sel_sched_p () ? NULL : RECOVERY_BLOCK (insn); | |
496d7bb0 MK |
1034 | if (!rec || rec == EXIT_BLOCK_PTR) |
1035 | { | |
1036 | prev_first = PREV_INSN (insn); | |
1037 | twin = insn; | |
1038 | } | |
1039 | else | |
1040 | { | |
1041 | prev_first = NEXT_INSN (BB_HEAD (rec)); | |
1042 | twin = PREV_INSN (BB_END (rec)); | |
1043 | } | |
8c660648 | 1044 | |
496d7bb0 MK |
1045 | do |
1046 | { | |
e2f6ff94 MK |
1047 | sd_iterator_def sd_it; |
1048 | dep_t dep; | |
1049 | ||
1050 | FOR_EACH_DEP (twin, SD_LIST_FORW, sd_it, dep) | |
496d7bb0 MK |
1051 | { |
1052 | rtx next; | |
1053 | int next_priority; | |
b198261f MK |
1054 | |
1055 | next = DEP_CON (dep); | |
1056 | ||
496d7bb0 MK |
1057 | if (BLOCK_FOR_INSN (next) != rec) |
1058 | { | |
b198261f MK |
1059 | int cost; |
1060 | ||
916fa4f0 | 1061 | if (!contributes_to_priority_p (dep)) |
496d7bb0 | 1062 | continue; |
b198261f MK |
1063 | |
1064 | if (twin == insn) | |
1065 | cost = dep_cost (dep); | |
1066 | else | |
1067 | { | |
1068 | struct _dep _dep1, *dep1 = &_dep1; | |
1069 | ||
1070 | init_dep (dep1, insn, next, REG_DEP_ANTI); | |
1071 | ||
1072 | cost = dep_cost (dep1); | |
1073 | } | |
1074 | ||
1075 | next_priority = cost + priority (next); | |
496d7bb0 MK |
1076 | |
1077 | if (next_priority > this_priority) | |
1078 | this_priority = next_priority; | |
1079 | } | |
1080 | } | |
b8698a0f | 1081 | |
496d7bb0 | 1082 | twin = PREV_INSN (twin); |
21e4c9a8 | 1083 | } |
496d7bb0 | 1084 | while (twin != prev_first); |
21e4c9a8 | 1085 | } |
e855c69d AB |
1086 | |
1087 | if (this_priority < 0) | |
1088 | { | |
1089 | gcc_assert (this_priority == -1); | |
1090 | ||
1091 | this_priority = insn_cost (insn); | |
1092 | } | |
1093 | ||
8c660648 | 1094 | INSN_PRIORITY (insn) = this_priority; |
916fa4f0 | 1095 | INSN_PRIORITY_STATUS (insn) = 1; |
8c660648 | 1096 | } |
21e4c9a8 BS |
1097 | |
1098 | return INSN_PRIORITY (insn); | |
8c660648 JL |
1099 | } |
1100 | \f | |
8c660648 | 1101 | /* Macros and functions for keeping the priority queue sorted, and |
d91edf86 | 1102 | dealing with queuing and dequeuing of instructions. */ |
8c660648 JL |
1103 | |
1104 | #define SCHED_SORT(READY, N_READY) \ | |
1105 | do { if ((N_READY) == 2) \ | |
1106 | swap_sort (READY, N_READY); \ | |
1107 | else if ((N_READY) > 2) \ | |
1108 | qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \ | |
1109 | while (0) | |
1110 | ||
ce18efcb VM |
1111 | /* Setup info about the current register pressure impact of scheduling |
1112 | INSN at the current scheduling point. */ | |
1113 | static void | |
1114 | setup_insn_reg_pressure_info (rtx insn) | |
1115 | { | |
1116 | int i, change, before, after, hard_regno; | |
1117 | int excess_cost_change; | |
1118 | enum machine_mode mode; | |
1119 | enum reg_class cl; | |
1120 | struct reg_pressure_data *pressure_info; | |
1121 | int *max_reg_pressure; | |
1122 | struct reg_use_data *use; | |
1123 | static int death[N_REG_CLASSES]; | |
1124 | ||
1125 | excess_cost_change = 0; | |
1126 | for (i = 0; i < ira_reg_class_cover_size; i++) | |
1127 | death[ira_reg_class_cover[i]] = 0; | |
1128 | for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use) | |
1129 | if (dying_use_p (use)) | |
1130 | { | |
1131 | cl = sched_regno_cover_class[use->regno]; | |
1132 | if (use->regno < FIRST_PSEUDO_REGISTER) | |
1133 | death[cl]++; | |
1134 | else | |
1135 | death[cl] += ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (use->regno)]; | |
1136 | } | |
1137 | pressure_info = INSN_REG_PRESSURE (insn); | |
1138 | max_reg_pressure = INSN_MAX_REG_PRESSURE (insn); | |
1139 | gcc_assert (pressure_info != NULL && max_reg_pressure != NULL); | |
1140 | for (i = 0; i < ira_reg_class_cover_size; i++) | |
1141 | { | |
1142 | cl = ira_reg_class_cover[i]; | |
1143 | gcc_assert (curr_reg_pressure[cl] >= 0); | |
1144 | change = (int) pressure_info[i].set_increase - death[cl]; | |
1145 | before = MAX (0, max_reg_pressure[i] - ira_available_class_regs[cl]); | |
1146 | after = MAX (0, max_reg_pressure[i] + change | |
1147 | - ira_available_class_regs[cl]); | |
1148 | hard_regno = ira_class_hard_regs[cl][0]; | |
1149 | gcc_assert (hard_regno >= 0); | |
1150 | mode = reg_raw_mode[hard_regno]; | |
1151 | excess_cost_change += ((after - before) | |
1152 | * (ira_memory_move_cost[mode][cl][0] | |
1153 | + ira_memory_move_cost[mode][cl][1])); | |
1154 | } | |
1155 | INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insn) = excess_cost_change; | |
1156 | } | |
1157 | ||
8c660648 JL |
1158 | /* Returns a positive value if x is preferred; returns a negative value if |
1159 | y is preferred. Should never return 0, since that will make the sort | |
1160 | unstable. */ | |
1161 | ||
1162 | static int | |
1d088dee | 1163 | rank_for_schedule (const void *x, const void *y) |
8c660648 | 1164 | { |
7a403706 KH |
1165 | rtx tmp = *(const rtx *) y; |
1166 | rtx tmp2 = *(const rtx *) x; | |
b5b8b0ac | 1167 | rtx last; |
b198261f | 1168 | int tmp_class, tmp2_class; |
ce18efcb | 1169 | int val, priority_val, info_val; |
8c660648 | 1170 | |
b5b8b0ac AO |
1171 | if (MAY_HAVE_DEBUG_INSNS) |
1172 | { | |
1173 | /* Schedule debug insns as early as possible. */ | |
1174 | if (DEBUG_INSN_P (tmp) && !DEBUG_INSN_P (tmp2)) | |
1175 | return -1; | |
1176 | else if (DEBUG_INSN_P (tmp2)) | |
1177 | return 1; | |
1178 | } | |
1179 | ||
58fb7809 | 1180 | /* The insn in a schedule group should be issued the first. */ |
b8698a0f | 1181 | if (flag_sched_group_heuristic && |
ee4764a8 | 1182 | SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2)) |
58fb7809 VM |
1183 | return SCHED_GROUP_P (tmp2) ? 1 : -1; |
1184 | ||
916fa4f0 MK |
1185 | /* Make sure that priority of TMP and TMP2 are initialized. */ |
1186 | gcc_assert (INSN_PRIORITY_KNOWN (tmp) && INSN_PRIORITY_KNOWN (tmp2)); | |
1187 | ||
ce18efcb VM |
1188 | if (sched_pressure_p) |
1189 | { | |
1190 | int diff; | |
1191 | ||
1192 | /* Prefer insn whose scheduling results in the smallest register | |
1193 | pressure excess. */ | |
1194 | if ((diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp) | |
1195 | + (INSN_TICK (tmp) > clock_var | |
1196 | ? INSN_TICK (tmp) - clock_var : 0) | |
1197 | - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2) | |
1198 | - (INSN_TICK (tmp2) > clock_var | |
1199 | ? INSN_TICK (tmp2) - clock_var : 0))) != 0) | |
1200 | return diff; | |
1201 | } | |
1202 | ||
1203 | ||
1204 | if (sched_pressure_p | |
1205 | && (INSN_TICK (tmp2) > clock_var || INSN_TICK (tmp) > clock_var)) | |
1206 | { | |
1207 | if (INSN_TICK (tmp) <= clock_var) | |
1208 | return -1; | |
1209 | else if (INSN_TICK (tmp2) <= clock_var) | |
1210 | return 1; | |
1211 | else | |
1212 | return INSN_TICK (tmp) - INSN_TICK (tmp2); | |
1213 | } | |
63de6c74 | 1214 | /* Prefer insn with higher priority. */ |
8c660648 | 1215 | priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp); |
30028c85 | 1216 | |
ee4764a8 | 1217 | if (flag_sched_critical_path_heuristic && priority_val) |
8c660648 | 1218 | return priority_val; |
b8698a0f | 1219 | |
496d7bb0 | 1220 | /* Prefer speculative insn with greater dependencies weakness. */ |
ee4764a8 | 1221 | if (flag_sched_spec_insn_heuristic && spec_info) |
496d7bb0 MK |
1222 | { |
1223 | ds_t ds1, ds2; | |
1224 | dw_t dw1, dw2; | |
1225 | int dw; | |
1226 | ||
1227 | ds1 = TODO_SPEC (tmp) & SPECULATIVE; | |
1228 | if (ds1) | |
e855c69d | 1229 | dw1 = ds_weak (ds1); |
496d7bb0 MK |
1230 | else |
1231 | dw1 = NO_DEP_WEAK; | |
b8698a0f | 1232 | |
496d7bb0 MK |
1233 | ds2 = TODO_SPEC (tmp2) & SPECULATIVE; |
1234 | if (ds2) | |
e855c69d | 1235 | dw2 = ds_weak (ds2); |
496d7bb0 MK |
1236 | else |
1237 | dw2 = NO_DEP_WEAK; | |
1238 | ||
1239 | dw = dw2 - dw1; | |
1240 | if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8)) | |
1241 | return dw; | |
1242 | } | |
1243 | ||
1708fd40 | 1244 | info_val = (*current_sched_info->rank) (tmp, tmp2); |
ee4764a8 | 1245 | if(flag_sched_rank_heuristic && info_val) |
1708fd40 | 1246 | return info_val; |
8c660648 | 1247 | |
b5b8b0ac AO |
1248 | if (flag_sched_last_insn_heuristic) |
1249 | { | |
1250 | last = last_scheduled_insn; | |
1251 | ||
1252 | if (DEBUG_INSN_P (last) && last != current_sched_info->prev_head) | |
1253 | do | |
1254 | last = PREV_INSN (last); | |
1255 | while (!NONDEBUG_INSN_P (last) | |
1256 | && last != current_sched_info->prev_head); | |
1257 | } | |
1258 | ||
1259 | /* Compare insns based on their relation to the last scheduled | |
1260 | non-debug insn. */ | |
1261 | if (flag_sched_last_insn_heuristic && NONDEBUG_INSN_P (last)) | |
8c660648 | 1262 | { |
e2f6ff94 MK |
1263 | dep_t dep1; |
1264 | dep_t dep2; | |
1265 | ||
8c660648 JL |
1266 | /* Classify the instructions into three classes: |
1267 | 1) Data dependent on last schedule insn. | |
1268 | 2) Anti/Output dependent on last scheduled insn. | |
1269 | 3) Independent of last scheduled insn, or has latency of one. | |
1270 | Choose the insn from the highest numbered class if different. */ | |
b5b8b0ac | 1271 | dep1 = sd_find_dep_between (last, tmp, true); |
b198261f | 1272 | |
e2f6ff94 | 1273 | if (dep1 == NULL || dep_cost (dep1) == 1) |
8c660648 | 1274 | tmp_class = 3; |
b198261f | 1275 | else if (/* Data dependence. */ |
e2f6ff94 | 1276 | DEP_TYPE (dep1) == REG_DEP_TRUE) |
8c660648 JL |
1277 | tmp_class = 1; |
1278 | else | |
1279 | tmp_class = 2; | |
1280 | ||
b5b8b0ac | 1281 | dep2 = sd_find_dep_between (last, tmp2, true); |
b198261f | 1282 | |
e2f6ff94 | 1283 | if (dep2 == NULL || dep_cost (dep2) == 1) |
8c660648 | 1284 | tmp2_class = 3; |
b198261f | 1285 | else if (/* Data dependence. */ |
e2f6ff94 | 1286 | DEP_TYPE (dep2) == REG_DEP_TRUE) |
8c660648 JL |
1287 | tmp2_class = 1; |
1288 | else | |
1289 | tmp2_class = 2; | |
1290 | ||
1291 | if ((val = tmp2_class - tmp_class)) | |
1292 | return val; | |
1293 | } | |
1294 | ||
7a403706 | 1295 | /* Prefer the insn which has more later insns that depend on it. |
2db45993 JL |
1296 | This gives the scheduler more freedom when scheduling later |
1297 | instructions at the expense of added register pressure. */ | |
2db45993 | 1298 | |
b5b8b0ac | 1299 | val = (dep_list_size (tmp2) - dep_list_size (tmp)); |
2db45993 | 1300 | |
ee4764a8 | 1301 | if (flag_sched_dep_count_heuristic && val != 0) |
e2f6ff94 | 1302 | return val; |
7a403706 | 1303 | |
8c660648 JL |
1304 | /* If insns are equally good, sort by INSN_LUID (original insn order), |
1305 | so that we make the sort stable. This minimizes instruction movement, | |
1306 | thus minimizing sched's effect on debugging and cross-jumping. */ | |
1307 | return INSN_LUID (tmp) - INSN_LUID (tmp2); | |
1308 | } | |
1309 | ||
1310 | /* Resort the array A in which only element at index N may be out of order. */ | |
1311 | ||
cbb13457 | 1312 | HAIFA_INLINE static void |
1d088dee | 1313 | swap_sort (rtx *a, int n) |
8c660648 JL |
1314 | { |
1315 | rtx insn = a[n - 1]; | |
1316 | int i = n - 2; | |
1317 | ||
1318 | while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0) | |
1319 | { | |
1320 | a[i + 1] = a[i]; | |
1321 | i -= 1; | |
1322 | } | |
1323 | a[i + 1] = insn; | |
1324 | } | |
1325 | ||
8c660648 JL |
1326 | /* Add INSN to the insn queue so that it can be executed at least |
1327 | N_CYCLES after the currently executing insn. Preserve insns | |
1328 | chain for debugging purposes. */ | |
1329 | ||
cbb13457 | 1330 | HAIFA_INLINE static void |
1d088dee | 1331 | queue_insn (rtx insn, int n_cycles) |
8c660648 JL |
1332 | { |
1333 | int next_q = NEXT_Q_AFTER (q_ptr, n_cycles); | |
ebb7b10b | 1334 | rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]); |
63f54b1a MK |
1335 | |
1336 | gcc_assert (n_cycles <= max_insn_queue_index); | |
b5b8b0ac | 1337 | gcc_assert (!DEBUG_INSN_P (insn)); |
63f54b1a | 1338 | |
8c660648 JL |
1339 | insn_queue[next_q] = link; |
1340 | q_size += 1; | |
1341 | ||
1342 | if (sched_verbose >= 2) | |
1343 | { | |
1708fd40 BS |
1344 | fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ", |
1345 | (*current_sched_info->print_insn) (insn, 0)); | |
8c660648 | 1346 | |
a88f02e7 | 1347 | fprintf (sched_dump, "queued for %d cycles.\n", n_cycles); |
8c660648 | 1348 | } |
e855c69d | 1349 | |
63f54b1a MK |
1350 | QUEUE_INDEX (insn) = next_q; |
1351 | } | |
1352 | ||
1353 | /* Remove INSN from queue. */ | |
1354 | static void | |
1355 | queue_remove (rtx insn) | |
1356 | { | |
1357 | gcc_assert (QUEUE_INDEX (insn) >= 0); | |
1358 | remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]); | |
1359 | q_size--; | |
1360 | QUEUE_INDEX (insn) = QUEUE_NOWHERE; | |
176f9a7b BS |
1361 | } |
1362 | ||
1363 | /* Return a pointer to the bottom of the ready list, i.e. the insn | |
1364 | with the lowest priority. */ | |
1365 | ||
e855c69d | 1366 | rtx * |
1d088dee | 1367 | ready_lastpos (struct ready_list *ready) |
176f9a7b | 1368 | { |
63f54b1a | 1369 | gcc_assert (ready->n_ready >= 1); |
176f9a7b BS |
1370 | return ready->vec + ready->first - ready->n_ready + 1; |
1371 | } | |
1372 | ||
63f54b1a | 1373 | /* Add an element INSN to the ready list so that it ends up with the |
917f1b7e | 1374 | lowest/highest priority depending on FIRST_P. */ |
176f9a7b | 1375 | |
63f54b1a MK |
1376 | HAIFA_INLINE static void |
1377 | ready_add (struct ready_list *ready, rtx insn, bool first_p) | |
176f9a7b | 1378 | { |
63f54b1a | 1379 | if (!first_p) |
176f9a7b | 1380 | { |
63f54b1a MK |
1381 | if (ready->first == ready->n_ready) |
1382 | { | |
1383 | memmove (ready->vec + ready->veclen - ready->n_ready, | |
1384 | ready_lastpos (ready), | |
1385 | ready->n_ready * sizeof (rtx)); | |
1386 | ready->first = ready->veclen - 1; | |
1387 | } | |
1388 | ready->vec[ready->first - ready->n_ready] = insn; | |
176f9a7b | 1389 | } |
63f54b1a MK |
1390 | else |
1391 | { | |
1392 | if (ready->first == ready->veclen - 1) | |
1393 | { | |
1394 | if (ready->n_ready) | |
1395 | /* ready_lastpos() fails when called with (ready->n_ready == 0). */ | |
1396 | memmove (ready->vec + ready->veclen - ready->n_ready - 1, | |
1397 | ready_lastpos (ready), | |
1398 | ready->n_ready * sizeof (rtx)); | |
1399 | ready->first = ready->veclen - 2; | |
1400 | } | |
1401 | ready->vec[++(ready->first)] = insn; | |
1402 | } | |
1403 | ||
176f9a7b | 1404 | ready->n_ready++; |
b5b8b0ac AO |
1405 | if (DEBUG_INSN_P (insn)) |
1406 | ready->n_debug++; | |
63f54b1a MK |
1407 | |
1408 | gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY); | |
1409 | QUEUE_INDEX (insn) = QUEUE_READY; | |
176f9a7b | 1410 | } |
8c660648 | 1411 | |
176f9a7b BS |
1412 | /* Remove the element with the highest priority from the ready list and |
1413 | return it. */ | |
1414 | ||
1415 | HAIFA_INLINE static rtx | |
1d088dee | 1416 | ready_remove_first (struct ready_list *ready) |
176f9a7b BS |
1417 | { |
1418 | rtx t; | |
b8698a0f | 1419 | |
535a42b1 | 1420 | gcc_assert (ready->n_ready); |
176f9a7b BS |
1421 | t = ready->vec[ready->first--]; |
1422 | ready->n_ready--; | |
b5b8b0ac AO |
1423 | if (DEBUG_INSN_P (t)) |
1424 | ready->n_debug--; | |
176f9a7b BS |
1425 | /* If the queue becomes empty, reset it. */ |
1426 | if (ready->n_ready == 0) | |
1427 | ready->first = ready->veclen - 1; | |
63f54b1a MK |
1428 | |
1429 | gcc_assert (QUEUE_INDEX (t) == QUEUE_READY); | |
1430 | QUEUE_INDEX (t) = QUEUE_NOWHERE; | |
1431 | ||
176f9a7b BS |
1432 | return t; |
1433 | } | |
1434 | ||
fae15c93 VM |
1435 | /* The following code implements multi-pass scheduling for the first |
1436 | cycle. In other words, we will try to choose ready insn which | |
1437 | permits to start maximum number of insns on the same cycle. */ | |
1438 | ||
1439 | /* Return a pointer to the element INDEX from the ready. INDEX for | |
1440 | insn with the highest priority is 0, and the lowest priority has | |
1441 | N_READY - 1. */ | |
1442 | ||
e855c69d | 1443 | rtx |
1d088dee | 1444 | ready_element (struct ready_list *ready, int index) |
fae15c93 | 1445 | { |
535a42b1 | 1446 | gcc_assert (ready->n_ready && index < ready->n_ready); |
b8698a0f | 1447 | |
fae15c93 VM |
1448 | return ready->vec[ready->first - index]; |
1449 | } | |
1450 | ||
1451 | /* Remove the element INDEX from the ready list and return it. INDEX | |
1452 | for insn with the highest priority is 0, and the lowest priority | |
1453 | has N_READY - 1. */ | |
1454 | ||
1455 | HAIFA_INLINE static rtx | |
1d088dee | 1456 | ready_remove (struct ready_list *ready, int index) |
fae15c93 VM |
1457 | { |
1458 | rtx t; | |
1459 | int i; | |
1460 | ||
1461 | if (index == 0) | |
1462 | return ready_remove_first (ready); | |
535a42b1 | 1463 | gcc_assert (ready->n_ready && index < ready->n_ready); |
fae15c93 VM |
1464 | t = ready->vec[ready->first - index]; |
1465 | ready->n_ready--; | |
b5b8b0ac AO |
1466 | if (DEBUG_INSN_P (t)) |
1467 | ready->n_debug--; | |
fae15c93 VM |
1468 | for (i = index; i < ready->n_ready; i++) |
1469 | ready->vec[ready->first - i] = ready->vec[ready->first - i - 1]; | |
63f54b1a | 1470 | QUEUE_INDEX (t) = QUEUE_NOWHERE; |
fae15c93 VM |
1471 | return t; |
1472 | } | |
1473 | ||
63f54b1a MK |
1474 | /* Remove INSN from the ready list. */ |
1475 | static void | |
1476 | ready_remove_insn (rtx insn) | |
1477 | { | |
1478 | int i; | |
1479 | ||
1480 | for (i = 0; i < readyp->n_ready; i++) | |
1481 | if (ready_element (readyp, i) == insn) | |
1482 | { | |
1483 | ready_remove (readyp, i); | |
1484 | return; | |
1485 | } | |
1486 | gcc_unreachable (); | |
1487 | } | |
fae15c93 | 1488 | |
176f9a7b BS |
1489 | /* Sort the ready list READY by ascending priority, using the SCHED_SORT |
1490 | macro. */ | |
1491 | ||
e855c69d | 1492 | void |
1d088dee | 1493 | ready_sort (struct ready_list *ready) |
176f9a7b | 1494 | { |
ce18efcb | 1495 | int i; |
176f9a7b | 1496 | rtx *first = ready_lastpos (ready); |
ce18efcb VM |
1497 | |
1498 | if (sched_pressure_p) | |
1499 | { | |
1500 | for (i = 0; i < ready->n_ready; i++) | |
1501 | setup_insn_reg_pressure_info (first[i]); | |
1502 | } | |
176f9a7b | 1503 | SCHED_SORT (first, ready->n_ready); |
8c660648 JL |
1504 | } |
1505 | ||
8c660648 | 1506 | /* PREV is an insn that is ready to execute. Adjust its priority if that |
c46a37c4 | 1507 | will help shorten or lengthen register lifetimes as appropriate. Also |
fa10beec | 1508 | provide a hook for the target to tweak itself. */ |
8c660648 | 1509 | |
cbb13457 | 1510 | HAIFA_INLINE static void |
1d088dee | 1511 | adjust_priority (rtx prev) |
8c660648 | 1512 | { |
c46a37c4 RH |
1513 | /* ??? There used to be code here to try and estimate how an insn |
1514 | affected register lifetimes, but it did it by looking at REG_DEAD | |
7a403706 | 1515 | notes, which we removed in schedule_region. Nor did it try to |
c46a37c4 | 1516 | take into account register pressure or anything useful like that. |
8c660648 | 1517 | |
c46a37c4 | 1518 | Revisit when we have a machine model to work with and not before. */ |
197043f5 | 1519 | |
c237e94a ZW |
1520 | if (targetm.sched.adjust_priority) |
1521 | INSN_PRIORITY (prev) = | |
5fd9b178 | 1522 | targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev)); |
8c660648 JL |
1523 | } |
1524 | ||
e855c69d AB |
1525 | /* Advance DFA state STATE on one cycle. */ |
1526 | void | |
1527 | advance_state (state_t state) | |
fae15c93 | 1528 | { |
1c3d0d93 MK |
1529 | if (targetm.sched.dfa_pre_advance_cycle) |
1530 | targetm.sched.dfa_pre_advance_cycle (); | |
1531 | ||
fa0aee89 | 1532 | if (targetm.sched.dfa_pre_cycle_insn) |
e855c69d | 1533 | state_transition (state, |
fa0aee89 PB |
1534 | targetm.sched.dfa_pre_cycle_insn ()); |
1535 | ||
e855c69d | 1536 | state_transition (state, NULL); |
b8698a0f | 1537 | |
fa0aee89 | 1538 | if (targetm.sched.dfa_post_cycle_insn) |
e855c69d | 1539 | state_transition (state, |
fa0aee89 | 1540 | targetm.sched.dfa_post_cycle_insn ()); |
1c3d0d93 MK |
1541 | |
1542 | if (targetm.sched.dfa_post_advance_cycle) | |
1543 | targetm.sched.dfa_post_advance_cycle (); | |
fae15c93 VM |
1544 | } |
1545 | ||
e855c69d AB |
1546 | /* Advance time on one cycle. */ |
1547 | HAIFA_INLINE static void | |
1548 | advance_one_cycle (void) | |
1549 | { | |
1550 | advance_state (curr_state); | |
1551 | if (sched_verbose >= 6) | |
9c575182 | 1552 | fprintf (sched_dump, ";;\tAdvanced a state.\n"); |
e855c69d AB |
1553 | } |
1554 | ||
4bdc8810 RH |
1555 | /* Clock at which the previous instruction was issued. */ |
1556 | static int last_clock_var; | |
1557 | ||
ce18efcb VM |
1558 | /* Update register pressure after scheduling INSN. */ |
1559 | static void | |
1560 | update_register_pressure (rtx insn) | |
1561 | { | |
1562 | struct reg_use_data *use; | |
1563 | struct reg_set_data *set; | |
1564 | ||
1565 | for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use) | |
1566 | if (dying_use_p (use) && bitmap_bit_p (curr_reg_live, use->regno)) | |
1567 | mark_regno_birth_or_death (use->regno, false); | |
1568 | for (set = INSN_REG_SET_LIST (insn); set != NULL; set = set->next_insn_set) | |
1569 | mark_regno_birth_or_death (set->regno, true); | |
1570 | } | |
1571 | ||
1572 | /* Set up or update (if UPDATE_P) max register pressure (see its | |
1573 | meaning in sched-int.h::_haifa_insn_data) for all current BB insns | |
1574 | after insn AFTER. */ | |
1575 | static void | |
1576 | setup_insn_max_reg_pressure (rtx after, bool update_p) | |
1577 | { | |
1578 | int i, p; | |
1579 | bool eq_p; | |
1580 | rtx insn; | |
1581 | static int max_reg_pressure[N_REG_CLASSES]; | |
1582 | ||
1583 | save_reg_pressure (); | |
1584 | for (i = 0; i < ira_reg_class_cover_size; i++) | |
1585 | max_reg_pressure[ira_reg_class_cover[i]] | |
1586 | = curr_reg_pressure[ira_reg_class_cover[i]]; | |
1587 | for (insn = NEXT_INSN (after); | |
1588 | insn != NULL_RTX && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (after); | |
1589 | insn = NEXT_INSN (insn)) | |
1590 | if (NONDEBUG_INSN_P (insn)) | |
1591 | { | |
1592 | eq_p = true; | |
1593 | for (i = 0; i < ira_reg_class_cover_size; i++) | |
1594 | { | |
1595 | p = max_reg_pressure[ira_reg_class_cover[i]]; | |
1596 | if (INSN_MAX_REG_PRESSURE (insn)[i] != p) | |
1597 | { | |
1598 | eq_p = false; | |
1599 | INSN_MAX_REG_PRESSURE (insn)[i] | |
1600 | = max_reg_pressure[ira_reg_class_cover[i]]; | |
1601 | } | |
1602 | } | |
1603 | if (update_p && eq_p) | |
1604 | break; | |
1605 | update_register_pressure (insn); | |
1606 | for (i = 0; i < ira_reg_class_cover_size; i++) | |
1607 | if (max_reg_pressure[ira_reg_class_cover[i]] | |
1608 | < curr_reg_pressure[ira_reg_class_cover[i]]) | |
1609 | max_reg_pressure[ira_reg_class_cover[i]] | |
1610 | = curr_reg_pressure[ira_reg_class_cover[i]]; | |
1611 | } | |
1612 | restore_reg_pressure (); | |
1613 | } | |
1614 | ||
1615 | /* Update the current register pressure after scheduling INSN. Update | |
1616 | also max register pressure for unscheduled insns of the current | |
1617 | BB. */ | |
1618 | static void | |
1619 | update_reg_and_insn_max_reg_pressure (rtx insn) | |
1620 | { | |
1621 | int i; | |
1622 | int before[N_REG_CLASSES]; | |
1623 | ||
1624 | for (i = 0; i < ira_reg_class_cover_size; i++) | |
1625 | before[i] = curr_reg_pressure[ira_reg_class_cover[i]]; | |
1626 | update_register_pressure (insn); | |
1627 | for (i = 0; i < ira_reg_class_cover_size; i++) | |
1628 | if (curr_reg_pressure[ira_reg_class_cover[i]] != before[i]) | |
1629 | break; | |
1630 | if (i < ira_reg_class_cover_size) | |
1631 | setup_insn_max_reg_pressure (insn, true); | |
1632 | } | |
1633 | ||
1634 | /* Set up register pressure at the beginning of basic block BB whose | |
1635 | insns starting after insn AFTER. Set up also max register pressure | |
1636 | for all insns of the basic block. */ | |
1637 | void | |
1638 | sched_setup_bb_reg_pressure_info (basic_block bb, rtx after) | |
1639 | { | |
1640 | gcc_assert (sched_pressure_p); | |
1641 | initiate_bb_reg_pressure_info (bb); | |
1642 | setup_insn_max_reg_pressure (after, false); | |
1643 | } | |
1644 | ||
8c660648 | 1645 | /* INSN is the "currently executing insn". Launch each insn which was |
176f9a7b | 1646 | waiting on INSN. READY is the ready list which contains the insns |
58fb7809 VM |
1647 | that are ready to fire. CLOCK is the current cycle. The function |
1648 | returns necessary cycle advance after issuing the insn (it is not | |
1649 | zero for insns in a schedule group). */ | |
8c660648 | 1650 | |
58fb7809 | 1651 | static int |
63f54b1a | 1652 | schedule_insn (rtx insn) |
8c660648 | 1653 | { |
e2f6ff94 MK |
1654 | sd_iterator_def sd_it; |
1655 | dep_t dep; | |
ce18efcb | 1656 | int i; |
58fb7809 | 1657 | int advance = 0; |
8c660648 | 1658 | |
fa0aee89 | 1659 | if (sched_verbose >= 1) |
8c660648 | 1660 | { |
ce18efcb | 1661 | struct reg_pressure_data *pressure_info; |
6d0de005 | 1662 | char buf[2048]; |
fae15c93 | 1663 | |
6d0de005 | 1664 | print_insn (buf, insn, 0); |
6a87d634 | 1665 | buf[40] = 0; |
63f54b1a | 1666 | fprintf (sched_dump, ";;\t%3i--> %-40s:", clock_var, buf); |
fae15c93 | 1667 | |
6d0de005 JH |
1668 | if (recog_memoized (insn) < 0) |
1669 | fprintf (sched_dump, "nothing"); | |
1670 | else | |
1671 | print_reservation (sched_dump, insn); | |
ce18efcb VM |
1672 | pressure_info = INSN_REG_PRESSURE (insn); |
1673 | if (pressure_info != NULL) | |
1674 | { | |
1675 | fputc (':', sched_dump); | |
1676 | for (i = 0; i < ira_reg_class_cover_size; i++) | |
1677 | fprintf (sched_dump, "%s%+d(%d)", | |
1678 | reg_class_names[ira_reg_class_cover[i]], | |
1679 | pressure_info[i].set_increase, pressure_info[i].change); | |
1680 | } | |
6d0de005 JH |
1681 | fputc ('\n', sched_dump); |
1682 | } | |
8c660648 | 1683 | |
ce18efcb VM |
1684 | if (sched_pressure_p) |
1685 | update_reg_and_insn_max_reg_pressure (insn); | |
1686 | ||
63f54b1a MK |
1687 | /* Scheduling instruction should have all its dependencies resolved and |
1688 | should have been removed from the ready list. */ | |
e2f6ff94 | 1689 | gcc_assert (sd_lists_empty_p (insn, SD_LIST_BACK)); |
569fa502 | 1690 | |
f49b295a AO |
1691 | /* Reset debug insns invalidated by moving this insn. */ |
1692 | if (MAY_HAVE_DEBUG_INSNS && !DEBUG_INSN_P (insn)) | |
1693 | for (sd_it = sd_iterator_start (insn, SD_LIST_BACK); | |
1694 | sd_iterator_cond (&sd_it, &dep);) | |
1695 | { | |
1696 | rtx dbg = DEP_PRO (dep); | |
1697 | ||
1698 | gcc_assert (DEBUG_INSN_P (dbg)); | |
1699 | ||
1700 | if (sched_verbose >= 6) | |
1701 | fprintf (sched_dump, ";;\t\tresetting: debug insn %d\n", | |
1702 | INSN_UID (dbg)); | |
1703 | ||
1704 | /* ??? Rather than resetting the debug insn, we might be able | |
1705 | to emit a debug temp before the just-scheduled insn, but | |
1706 | this would involve checking that the expression at the | |
1707 | point of the debug insn is equivalent to the expression | |
1708 | before the just-scheduled insn. They might not be: the | |
1709 | expression in the debug insn may depend on other insns not | |
1710 | yet scheduled that set MEMs, REGs or even other debug | |
1711 | insns. It's not clear that attempting to preserve debug | |
1712 | information in these cases is worth the effort, given how | |
1713 | uncommon these resets are and the likelihood that the debug | |
1714 | temps introduced won't survive the schedule change. */ | |
1715 | INSN_VAR_LOCATION_LOC (dbg) = gen_rtx_UNKNOWN_VAR_LOC (); | |
1716 | df_insn_rescan (dbg); | |
1717 | ||
1718 | /* We delete rather than resolve these deps, otherwise we | |
1719 | crash in sched_free_deps(), because forward deps are | |
1720 | expected to be released before backward deps. */ | |
1721 | sd_delete_dep (sd_it); | |
1722 | } | |
1723 | ||
b198261f | 1724 | gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE); |
63f54b1a | 1725 | QUEUE_INDEX (insn) = QUEUE_SCHEDULED; |
b198261f | 1726 | |
63f54b1a MK |
1727 | gcc_assert (INSN_TICK (insn) >= MIN_TICK); |
1728 | if (INSN_TICK (insn) > clock_var) | |
1729 | /* INSN has been prematurely moved from the queue to the ready list. | |
1730 | This is possible only if following flag is set. */ | |
b8698a0f | 1731 | gcc_assert (flag_sched_stalled_insns); |
63f54b1a MK |
1732 | |
1733 | /* ??? Probably, if INSN is scheduled prematurely, we should leave | |
1734 | INSN_TICK untouched. This is a machine-dependent issue, actually. */ | |
1735 | INSN_TICK (insn) = clock_var; | |
1736 | ||
1737 | /* Update dependent instructions. */ | |
e2f6ff94 MK |
1738 | for (sd_it = sd_iterator_start (insn, SD_LIST_FORW); |
1739 | sd_iterator_cond (&sd_it, &dep);) | |
8c660648 | 1740 | { |
e2f6ff94 | 1741 | rtx next = DEP_CON (dep); |
8c660648 | 1742 | |
e2f6ff94 MK |
1743 | /* Resolve the dependence between INSN and NEXT. |
1744 | sd_resolve_dep () moves current dep to another list thus | |
1745 | advancing the iterator. */ | |
1746 | sd_resolve_dep (sd_it); | |
8c660648 | 1747 | |
f49b295a AO |
1748 | /* Don't bother trying to mark next as ready if insn is a debug |
1749 | insn. If insn is the last hard dependency, it will have | |
1750 | already been discounted. */ | |
1751 | if (DEBUG_INSN_P (insn) && !DEBUG_INSN_P (next)) | |
1752 | continue; | |
1753 | ||
d7bfd907 | 1754 | if (!IS_SPECULATION_BRANCHY_CHECK_P (insn)) |
496d7bb0 | 1755 | { |
b8698a0f L |
1756 | int effective_cost; |
1757 | ||
496d7bb0 | 1758 | effective_cost = try_ready (next); |
b8698a0f | 1759 | |
496d7bb0 MK |
1760 | if (effective_cost >= 0 |
1761 | && SCHED_GROUP_P (next) | |
1762 | && advance < effective_cost) | |
1763 | advance = effective_cost; | |
1764 | } | |
1765 | else | |
1766 | /* Check always has only one forward dependence (to the first insn in | |
1767 | the recovery block), therefore, this will be executed only once. */ | |
1768 | { | |
e2f6ff94 | 1769 | gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW)); |
496d7bb0 MK |
1770 | fix_recovery_deps (RECOVERY_BLOCK (insn)); |
1771 | } | |
8c660648 JL |
1772 | } |
1773 | ||
e2f6ff94 MK |
1774 | /* This is the place where scheduler doesn't *basically* need backward and |
1775 | forward dependencies for INSN anymore. Nevertheless they are used in | |
1776 | heuristics in rank_for_schedule (), early_queue_to_ready () and in | |
1777 | some targets (e.g. rs6000). Thus the earliest place where we *can* | |
1778 | remove dependencies is after targetm.sched.md_finish () call in | |
1779 | schedule_block (). But, on the other side, the safest place to remove | |
1780 | dependencies is when we are finishing scheduling entire region. As we | |
1781 | don't generate [many] dependencies during scheduling itself, we won't | |
1782 | need memory until beginning of next region. | |
1783 | Bottom line: Dependencies are removed for all insns in the end of | |
1784 | scheduling the region. */ | |
1785 | ||
7a403706 | 1786 | /* Annotate the instruction with issue information -- TImode |
4bdc8810 RH |
1787 | indicates that the instruction is expected not to be able |
1788 | to issue on the same cycle as the previous insn. A machine | |
1789 | may use this information to decide how the instruction should | |
1790 | be aligned. */ | |
30028c85 | 1791 | if (issue_rate > 1 |
fae15c93 | 1792 | && GET_CODE (PATTERN (insn)) != USE |
b5b8b0ac AO |
1793 | && GET_CODE (PATTERN (insn)) != CLOBBER |
1794 | && !DEBUG_INSN_P (insn)) | |
4bdc8810 | 1795 | { |
30028c85 | 1796 | if (reload_completed) |
63f54b1a MK |
1797 | PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode); |
1798 | last_clock_var = clock_var; | |
4bdc8810 | 1799 | } |
63f54b1a | 1800 | |
58fb7809 | 1801 | return advance; |
8c660648 JL |
1802 | } |
1803 | ||
63de6c74 | 1804 | /* Functions for handling of notes. */ |
8c660648 | 1805 | |
e855c69d | 1806 | /* Insert the INSN note at the end of the notes list. */ |
b8698a0f | 1807 | static void |
e855c69d AB |
1808 | add_to_note_list (rtx insn, rtx *note_list_end_p) |
1809 | { | |
1810 | PREV_INSN (insn) = *note_list_end_p; | |
1811 | if (*note_list_end_p) | |
1812 | NEXT_INSN (*note_list_end_p) = insn; | |
1813 | *note_list_end_p = insn; | |
1814 | } | |
1815 | ||
1816 | /* Add note list that ends on FROM_END to the end of TO_ENDP. */ | |
1817 | void | |
1818 | concat_note_lists (rtx from_end, rtx *to_endp) | |
1819 | { | |
1820 | rtx from_start; | |
1821 | ||
1822 | if (from_end == NULL) | |
1823 | /* It's easy when have nothing to concat. */ | |
1824 | return; | |
1825 | ||
1826 | if (*to_endp == NULL) | |
1827 | /* It's also easy when destination is empty. */ | |
1828 | { | |
1829 | *to_endp = from_end; | |
1830 | return; | |
1831 | } | |
1832 | ||
1833 | from_start = from_end; | |
1834 | /* A note list should be traversed via PREV_INSN. */ | |
b8698a0f | 1835 | while (PREV_INSN (from_start) != NULL) |
e855c69d AB |
1836 | from_start = PREV_INSN (from_start); |
1837 | ||
1838 | add_to_note_list (from_start, to_endp); | |
1839 | *to_endp = from_end; | |
1840 | } | |
1841 | ||
8c660648 JL |
1842 | /* Delete notes beginning with INSN and put them in the chain |
1843 | of notes ended by NOTE_LIST. | |
1844 | Returns the insn following the notes. */ | |
8c660648 | 1845 | static rtx |
1d088dee | 1846 | unlink_other_notes (rtx insn, rtx tail) |
8c660648 JL |
1847 | { |
1848 | rtx prev = PREV_INSN (insn); | |
1849 | ||
496d7bb0 | 1850 | while (insn != tail && NOTE_NOT_BB_P (insn)) |
8c660648 JL |
1851 | { |
1852 | rtx next = NEXT_INSN (insn); | |
f70b22c9 MK |
1853 | basic_block bb = BLOCK_FOR_INSN (insn); |
1854 | ||
8c660648 JL |
1855 | /* Delete the note from its current position. */ |
1856 | if (prev) | |
1857 | NEXT_INSN (prev) = next; | |
1858 | if (next) | |
1859 | PREV_INSN (next) = prev; | |
1860 | ||
6f2ba390 MK |
1861 | if (bb) |
1862 | { | |
1863 | /* Basic block can begin with either LABEL or | |
1864 | NOTE_INSN_BASIC_BLOCK. */ | |
1865 | gcc_assert (BB_HEAD (bb) != insn); | |
f70b22c9 | 1866 | |
6f2ba390 MK |
1867 | /* Check if we are removing last insn in the BB. */ |
1868 | if (BB_END (bb) == insn) | |
1869 | BB_END (bb) = prev; | |
1870 | } | |
f70b22c9 | 1871 | |
c46a37c4 | 1872 | /* See sched_analyze to see how these are handled. */ |
a38e7aa5 JH |
1873 | if (NOTE_KIND (insn) != NOTE_INSN_EH_REGION_BEG |
1874 | && NOTE_KIND (insn) != NOTE_INSN_EH_REGION_END) | |
e855c69d | 1875 | add_to_note_list (insn, ¬e_list); |
8c660648 JL |
1876 | |
1877 | insn = next; | |
1878 | } | |
e855c69d AB |
1879 | |
1880 | if (insn == tail) | |
1881 | { | |
1882 | gcc_assert (sel_sched_p ()); | |
1883 | return prev; | |
1884 | } | |
1885 | ||
8c660648 JL |
1886 | return insn; |
1887 | } | |
1888 | ||
496d7bb0 MK |
1889 | /* Return the head and tail pointers of ebb starting at BEG and ending |
1890 | at END. */ | |
b4ead7d4 | 1891 | void |
496d7bb0 MK |
1892 | get_ebb_head_tail (basic_block beg, basic_block end, rtx *headp, rtx *tailp) |
1893 | { | |
1894 | rtx beg_head = BB_HEAD (beg); | |
1895 | rtx beg_tail = BB_END (beg); | |
1896 | rtx end_head = BB_HEAD (end); | |
1897 | rtx end_tail = BB_END (end); | |
1898 | ||
1899 | /* Don't include any notes or labels at the beginning of the BEG | |
1900 | basic block, or notes at the end of the END basic blocks. */ | |
1901 | ||
1902 | if (LABEL_P (beg_head)) | |
1903 | beg_head = NEXT_INSN (beg_head); | |
1904 | ||
1905 | while (beg_head != beg_tail) | |
b5b8b0ac | 1906 | if (NOTE_P (beg_head) || BOUNDARY_DEBUG_INSN_P (beg_head)) |
496d7bb0 MK |
1907 | beg_head = NEXT_INSN (beg_head); |
1908 | else | |
1909 | break; | |
1910 | ||
1911 | *headp = beg_head; | |
1912 | ||
1913 | if (beg == end) | |
1914 | end_head = beg_head; | |
1915 | else if (LABEL_P (end_head)) | |
1916 | end_head = NEXT_INSN (end_head); | |
1917 | ||
1918 | while (end_head != end_tail) | |
b5b8b0ac | 1919 | if (NOTE_P (end_tail) || BOUNDARY_DEBUG_INSN_P (end_tail)) |
496d7bb0 MK |
1920 | end_tail = PREV_INSN (end_tail); |
1921 | else | |
1922 | break; | |
8c660648 | 1923 | |
496d7bb0 | 1924 | *tailp = end_tail; |
8c660648 JL |
1925 | } |
1926 | ||
1708fd40 BS |
1927 | /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */ |
1928 | ||
b4ead7d4 | 1929 | int |
9678086d | 1930 | no_real_insns_p (const_rtx head, const_rtx tail) |
1708fd40 BS |
1931 | { |
1932 | while (head != NEXT_INSN (tail)) | |
1933 | { | |
b5b8b0ac AO |
1934 | if (!NOTE_P (head) && !LABEL_P (head) |
1935 | && !BOUNDARY_DEBUG_INSN_P (head)) | |
1708fd40 BS |
1936 | return 0; |
1937 | head = NEXT_INSN (head); | |
1938 | } | |
1939 | return 1; | |
1940 | } | |
1941 | ||
79c2ffde | 1942 | /* Delete notes between HEAD and TAIL and put them in the chain |
8c660648 | 1943 | of notes ended by NOTE_LIST. */ |
e855c69d | 1944 | static void |
1d088dee | 1945 | rm_other_notes (rtx head, rtx tail) |
8c660648 JL |
1946 | { |
1947 | rtx next_tail; | |
1948 | rtx insn; | |
1949 | ||
b4ead7d4 | 1950 | note_list = 0; |
2c3c49de | 1951 | if (head == tail && (! INSN_P (head))) |
8c660648 JL |
1952 | return; |
1953 | ||
1954 | next_tail = NEXT_INSN (tail); | |
1955 | for (insn = head; insn != next_tail; insn = NEXT_INSN (insn)) | |
1956 | { | |
1957 | rtx prev; | |
1958 | ||
1959 | /* Farm out notes, and maybe save them in NOTE_LIST. | |
1960 | This is needed to keep the debugger from | |
1961 | getting completely deranged. */ | |
496d7bb0 | 1962 | if (NOTE_NOT_BB_P (insn)) |
8c660648 JL |
1963 | { |
1964 | prev = insn; | |
8c660648 JL |
1965 | insn = unlink_other_notes (insn, next_tail); |
1966 | ||
e855c69d AB |
1967 | gcc_assert ((sel_sched_p () |
1968 | || prev != tail) && prev != head && insn != next_tail); | |
1969 | } | |
1970 | } | |
1971 | } | |
1972 | ||
1973 | /* Same as above, but also process REG_SAVE_NOTEs of HEAD. */ | |
1974 | void | |
1975 | remove_notes (rtx head, rtx tail) | |
1976 | { | |
1977 | /* rm_other_notes only removes notes which are _inside_ the | |
1978 | block---that is, it won't remove notes before the first real insn | |
1979 | or after the last real insn of the block. So if the first insn | |
1980 | has a REG_SAVE_NOTE which would otherwise be emitted before the | |
1981 | insn, it is redundant with the note before the start of the | |
1982 | block, and so we have to take it out. */ | |
1983 | if (INSN_P (head)) | |
1984 | { | |
1985 | rtx note; | |
1986 | ||
1987 | for (note = REG_NOTES (head); note; note = XEXP (note, 1)) | |
1988 | if (REG_NOTE_KIND (note) == REG_SAVE_NOTE) | |
1989 | remove_note (head, note); | |
1990 | } | |
1991 | ||
1992 | /* Remove remaining note insns from the block, save them in | |
1993 | note_list. These notes are restored at the end of | |
1994 | schedule_block (). */ | |
1995 | rm_other_notes (head, tail); | |
1996 | } | |
1997 | ||
1998 | /* Restore-other-notes: NOTE_LIST is the end of a chain of notes | |
1999 | previously found among the insns. Insert them just before HEAD. */ | |
2000 | rtx | |
2001 | restore_other_notes (rtx head, basic_block head_bb) | |
2002 | { | |
2003 | if (note_list != 0) | |
2004 | { | |
2005 | rtx note_head = note_list; | |
2006 | ||
2007 | if (head) | |
2008 | head_bb = BLOCK_FOR_INSN (head); | |
2009 | else | |
2010 | head = NEXT_INSN (bb_note (head_bb)); | |
2011 | ||
2012 | while (PREV_INSN (note_head)) | |
2013 | { | |
2014 | set_block_for_insn (note_head, head_bb); | |
2015 | note_head = PREV_INSN (note_head); | |
8c660648 | 2016 | } |
e855c69d AB |
2017 | /* In the above cycle we've missed this note. */ |
2018 | set_block_for_insn (note_head, head_bb); | |
2019 | ||
2020 | PREV_INSN (note_head) = PREV_INSN (head); | |
2021 | NEXT_INSN (PREV_INSN (head)) = note_head; | |
2022 | PREV_INSN (head) = note_list; | |
2023 | NEXT_INSN (note_list) = head; | |
2024 | ||
2025 | if (BLOCK_FOR_INSN (head) != head_bb) | |
2026 | BB_END (head_bb) = note_list; | |
2027 | ||
2028 | head = note_head; | |
8c660648 | 2029 | } |
e855c69d AB |
2030 | |
2031 | return head; | |
8c660648 JL |
2032 | } |
2033 | ||
8c660648 JL |
2034 | /* Move insns that became ready to fire from queue to ready list. */ |
2035 | ||
176f9a7b | 2036 | static void |
1d088dee | 2037 | queue_to_ready (struct ready_list *ready) |
8c660648 JL |
2038 | { |
2039 | rtx insn; | |
2040 | rtx link; | |
b631c5f7 | 2041 | rtx skip_insn; |
8c660648 JL |
2042 | |
2043 | q_ptr = NEXT_Q (q_ptr); | |
2044 | ||
b631c5f7 | 2045 | if (dbg_cnt (sched_insn) == false) |
b5b8b0ac AO |
2046 | { |
2047 | /* If debug counter is activated do not requeue insn next after | |
2048 | last_scheduled_insn. */ | |
2049 | skip_insn = next_nonnote_insn (last_scheduled_insn); | |
2050 | while (skip_insn && DEBUG_INSN_P (skip_insn)) | |
2051 | skip_insn = next_nonnote_insn (skip_insn); | |
2052 | } | |
b631c5f7 SP |
2053 | else |
2054 | skip_insn = NULL_RTX; | |
2055 | ||
8c660648 | 2056 | /* Add all pending insns that can be scheduled without stalls to the |
b4ead7d4 BS |
2057 | ready list. */ |
2058 | for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1)) | |
2059 | { | |
2060 | insn = XEXP (link, 0); | |
2061 | q_size -= 1; | |
1708fd40 | 2062 | |
b4ead7d4 BS |
2063 | if (sched_verbose >= 2) |
2064 | fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ", | |
2065 | (*current_sched_info->print_insn) (insn, 0)); | |
1708fd40 | 2066 | |
6f8dd94b EB |
2067 | /* If the ready list is full, delay the insn for 1 cycle. |
2068 | See the comment in schedule_block for the rationale. */ | |
2069 | if (!reload_completed | |
b5b8b0ac | 2070 | && ready->n_ready - ready->n_debug > MAX_SCHED_READY_INSNS |
b631c5f7 SP |
2071 | && !SCHED_GROUP_P (insn) |
2072 | && insn != skip_insn) | |
6f8dd94b EB |
2073 | { |
2074 | if (sched_verbose >= 2) | |
2075 | fprintf (sched_dump, "requeued because ready full\n"); | |
2076 | queue_insn (insn, 1); | |
2077 | } | |
2078 | else | |
2079 | { | |
2080 | ready_add (ready, insn, false); | |
2081 | if (sched_verbose >= 2) | |
2082 | fprintf (sched_dump, "moving to ready without stalls\n"); | |
2083 | } | |
1708fd40 | 2084 | } |
63f54b1a | 2085 | free_INSN_LIST_list (&insn_queue[q_ptr]); |
b4ead7d4 BS |
2086 | |
2087 | /* If there are no ready insns, stall until one is ready and add all | |
2088 | of the pending insns at that point to the ready list. */ | |
2089 | if (ready->n_ready == 0) | |
1708fd40 | 2090 | { |
b3694847 | 2091 | int stalls; |
1708fd40 | 2092 | |
fa0aee89 | 2093 | for (stalls = 1; stalls <= max_insn_queue_index; stalls++) |
b4ead7d4 BS |
2094 | { |
2095 | if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)])) | |
2096 | { | |
2097 | for (; link; link = XEXP (link, 1)) | |
2098 | { | |
2099 | insn = XEXP (link, 0); | |
2100 | q_size -= 1; | |
1708fd40 | 2101 | |
b4ead7d4 BS |
2102 | if (sched_verbose >= 2) |
2103 | fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ", | |
2104 | (*current_sched_info->print_insn) (insn, 0)); | |
1708fd40 | 2105 | |
63f54b1a | 2106 | ready_add (ready, insn, false); |
b4ead7d4 BS |
2107 | if (sched_verbose >= 2) |
2108 | fprintf (sched_dump, "moving to ready with %d stalls\n", stalls); | |
2109 | } | |
63f54b1a | 2110 | free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]); |
1708fd40 | 2111 | |
fae15c93 VM |
2112 | advance_one_cycle (); |
2113 | ||
2114 | break; | |
b4ead7d4 | 2115 | } |
fae15c93 VM |
2116 | |
2117 | advance_one_cycle (); | |
b4ead7d4 | 2118 | } |
1708fd40 | 2119 | |
b4ead7d4 BS |
2120 | q_ptr = NEXT_Q_AFTER (q_ptr, stalls); |
2121 | clock_var += stalls; | |
1708fd40 | 2122 | } |
1708fd40 BS |
2123 | } |
2124 | ||
569fa502 | 2125 | /* Used by early_queue_to_ready. Determines whether it is "ok" to |
b8698a0f L |
2126 | prematurely move INSN from the queue to the ready list. Currently, |
2127 | if a target defines the hook 'is_costly_dependence', this function | |
569fa502 | 2128 | uses the hook to check whether there exist any dependences which are |
b8698a0f | 2129 | considered costly by the target, between INSN and other insns that |
569fa502 DN |
2130 | have already been scheduled. Dependences are checked up to Y cycles |
2131 | back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows | |
b8698a0f L |
2132 | controlling this value. |
2133 | (Other considerations could be taken into account instead (or in | |
569fa502 DN |
2134 | addition) depending on user flags and target hooks. */ |
2135 | ||
b8698a0f | 2136 | static bool |
569fa502 DN |
2137 | ok_for_early_queue_removal (rtx insn) |
2138 | { | |
2139 | int n_cycles; | |
2140 | rtx prev_insn = last_scheduled_insn; | |
2141 | ||
2142 | if (targetm.sched.is_costly_dependence) | |
2143 | { | |
2144 | for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--) | |
2145 | { | |
2146 | for ( ; prev_insn; prev_insn = PREV_INSN (prev_insn)) | |
2147 | { | |
b198261f | 2148 | int cost; |
569fa502 | 2149 | |
ec8628e8 JJ |
2150 | if (prev_insn == current_sched_info->prev_head) |
2151 | { | |
2152 | prev_insn = NULL; | |
2153 | break; | |
2154 | } | |
2155 | ||
4b4bf941 | 2156 | if (!NOTE_P (prev_insn)) |
569fa502 | 2157 | { |
e2f6ff94 | 2158 | dep_t dep; |
b198261f | 2159 | |
e2f6ff94 | 2160 | dep = sd_find_dep_between (prev_insn, insn, true); |
b198261f | 2161 | |
e2f6ff94 | 2162 | if (dep != NULL) |
569fa502 | 2163 | { |
b198261f MK |
2164 | cost = dep_cost (dep); |
2165 | ||
2166 | if (targetm.sched.is_costly_dependence (dep, cost, | |
569fa502 DN |
2167 | flag_sched_stalled_insns_dep - n_cycles)) |
2168 | return false; | |
2169 | } | |
2170 | } | |
2171 | ||
2172 | if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */ | |
2173 | break; | |
2174 | } | |
2175 | ||
b8698a0f | 2176 | if (!prev_insn) |
569fa502 | 2177 | break; |
b8698a0f | 2178 | prev_insn = PREV_INSN (prev_insn); |
569fa502 DN |
2179 | } |
2180 | } | |
2181 | ||
2182 | return true; | |
2183 | } | |
2184 | ||
2185 | ||
2186 | /* Remove insns from the queue, before they become "ready" with respect | |
6614fd40 | 2187 | to FU latency considerations. */ |
569fa502 | 2188 | |
b8698a0f | 2189 | static int |
569fa502 DN |
2190 | early_queue_to_ready (state_t state, struct ready_list *ready) |
2191 | { | |
2192 | rtx insn; | |
2193 | rtx link; | |
2194 | rtx next_link; | |
2195 | rtx prev_link; | |
2196 | bool move_to_ready; | |
2197 | int cost; | |
2198 | state_t temp_state = alloca (dfa_state_size); | |
2199 | int stalls; | |
2200 | int insns_removed = 0; | |
2201 | ||
2202 | /* | |
b8698a0f L |
2203 | Flag '-fsched-stalled-insns=X' determines the aggressiveness of this |
2204 | function: | |
569fa502 | 2205 | |
b8698a0f | 2206 | X == 0: There is no limit on how many queued insns can be removed |
569fa502 DN |
2207 | prematurely. (flag_sched_stalled_insns = -1). |
2208 | ||
b8698a0f | 2209 | X >= 1: Only X queued insns can be removed prematurely in each |
569fa502 DN |
2210 | invocation. (flag_sched_stalled_insns = X). |
2211 | ||
2212 | Otherwise: Early queue removal is disabled. | |
2213 | (flag_sched_stalled_insns = 0) | |
2214 | */ | |
2215 | ||
b8698a0f | 2216 | if (! flag_sched_stalled_insns) |
569fa502 DN |
2217 | return 0; |
2218 | ||
fa0aee89 | 2219 | for (stalls = 0; stalls <= max_insn_queue_index; stalls++) |
569fa502 DN |
2220 | { |
2221 | if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)])) | |
2222 | { | |
2223 | if (sched_verbose > 6) | |
2224 | fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls); | |
2225 | ||
2226 | prev_link = 0; | |
2227 | while (link) | |
2228 | { | |
2229 | next_link = XEXP (link, 1); | |
2230 | insn = XEXP (link, 0); | |
2231 | if (insn && sched_verbose > 6) | |
2232 | print_rtl_single (sched_dump, insn); | |
2233 | ||
2234 | memcpy (temp_state, state, dfa_state_size); | |
b8698a0f | 2235 | if (recog_memoized (insn) < 0) |
569fa502 DN |
2236 | /* non-negative to indicate that it's not ready |
2237 | to avoid infinite Q->R->Q->R... */ | |
2238 | cost = 0; | |
2239 | else | |
2240 | cost = state_transition (temp_state, insn); | |
2241 | ||
2242 | if (sched_verbose >= 6) | |
2243 | fprintf (sched_dump, "transition cost = %d\n", cost); | |
2244 | ||
2245 | move_to_ready = false; | |
b8698a0f | 2246 | if (cost < 0) |
569fa502 DN |
2247 | { |
2248 | move_to_ready = ok_for_early_queue_removal (insn); | |
2249 | if (move_to_ready == true) | |
2250 | { | |
2251 | /* move from Q to R */ | |
2252 | q_size -= 1; | |
63f54b1a | 2253 | ready_add (ready, insn, false); |
569fa502 | 2254 | |
b8698a0f | 2255 | if (prev_link) |
569fa502 DN |
2256 | XEXP (prev_link, 1) = next_link; |
2257 | else | |
2258 | insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link; | |
2259 | ||
2260 | free_INSN_LIST_node (link); | |
2261 | ||
2262 | if (sched_verbose >= 2) | |
2263 | fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n", | |
2264 | (*current_sched_info->print_insn) (insn, 0)); | |
2265 | ||
2266 | insns_removed++; | |
2267 | if (insns_removed == flag_sched_stalled_insns) | |
63f54b1a MK |
2268 | /* Remove no more than flag_sched_stalled_insns insns |
2269 | from Q at a time. */ | |
569fa502 DN |
2270 | return insns_removed; |
2271 | } | |
2272 | } | |
2273 | ||
2274 | if (move_to_ready == false) | |
2275 | prev_link = link; | |
2276 | ||
2277 | link = next_link; | |
2278 | } /* while link */ | |
b8698a0f | 2279 | } /* if link */ |
569fa502 DN |
2280 | |
2281 | } /* for stalls.. */ | |
2282 | ||
b8698a0f | 2283 | return insns_removed; |
569fa502 DN |
2284 | } |
2285 | ||
2286 | ||
b4ead7d4 | 2287 | /* Print the ready list for debugging purposes. Callable from debugger. */ |
1708fd40 | 2288 | |
b4ead7d4 | 2289 | static void |
1d088dee | 2290 | debug_ready_list (struct ready_list *ready) |
1708fd40 | 2291 | { |
b4ead7d4 BS |
2292 | rtx *p; |
2293 | int i; | |
1708fd40 | 2294 | |
b4ead7d4 | 2295 | if (ready->n_ready == 0) |
fae15c93 VM |
2296 | { |
2297 | fprintf (sched_dump, "\n"); | |
2298 | return; | |
2299 | } | |
1708fd40 | 2300 | |
b4ead7d4 BS |
2301 | p = ready_lastpos (ready); |
2302 | for (i = 0; i < ready->n_ready; i++) | |
ce18efcb VM |
2303 | { |
2304 | fprintf (sched_dump, " %s:%d", | |
2305 | (*current_sched_info->print_insn) (p[i], 0), | |
2306 | INSN_LUID (p[i])); | |
2307 | if (sched_pressure_p) | |
2308 | fprintf (sched_dump, "(cost=%d", | |
2309 | INSN_REG_PRESSURE_EXCESS_COST_CHANGE (p[i])); | |
2310 | if (INSN_TICK (p[i]) > clock_var) | |
2311 | fprintf (sched_dump, ":delay=%d", INSN_TICK (p[i]) - clock_var); | |
2312 | if (sched_pressure_p) | |
2313 | fprintf (sched_dump, ")"); | |
2314 | } | |
b4ead7d4 BS |
2315 | fprintf (sched_dump, "\n"); |
2316 | } | |
1708fd40 | 2317 | |
570a98eb | 2318 | /* Search INSN for REG_SAVE_NOTE note pairs for |
8b96512f | 2319 | NOTE_INSN_EHREGION_{BEG,END}; and convert them back into |
c46a37c4 RH |
2320 | NOTEs. The REG_SAVE_NOTE note following first one is contains the |
2321 | saved value for NOTE_BLOCK_NUMBER which is useful for | |
496d7bb0 | 2322 | NOTE_INSN_EH_REGION_{BEG,END} NOTEs. */ |
e855c69d | 2323 | void |
496d7bb0 | 2324 | reemit_notes (rtx insn) |
8c660648 | 2325 | { |
496d7bb0 | 2326 | rtx note, last = insn; |
8c660648 | 2327 | |
8c660648 JL |
2328 | for (note = REG_NOTES (insn); note; note = XEXP (note, 1)) |
2329 | { | |
c46a37c4 | 2330 | if (REG_NOTE_KIND (note) == REG_SAVE_NOTE) |
8c660648 | 2331 | { |
81f40b79 | 2332 | enum insn_note note_type = (enum insn_note) INTVAL (XEXP (note, 0)); |
b3b42a4d | 2333 | |
63f4a88e JH |
2334 | last = emit_note_before (note_type, last); |
2335 | remove_note (insn, note); | |
8c660648 JL |
2336 | } |
2337 | } | |
8c660648 JL |
2338 | } |
2339 | ||
496d7bb0 MK |
2340 | /* Move INSN. Reemit notes if needed. Update CFG, if needed. */ |
2341 | static void | |
e855c69d | 2342 | move_insn (rtx insn, rtx last, rtx nt) |
496d7bb0 | 2343 | { |
496d7bb0 MK |
2344 | if (PREV_INSN (insn) != last) |
2345 | { | |
2346 | basic_block bb; | |
2347 | rtx note; | |
2348 | int jump_p = 0; | |
8c660648 | 2349 | |
496d7bb0 | 2350 | bb = BLOCK_FOR_INSN (insn); |
b8698a0f | 2351 | |
496d7bb0 | 2352 | /* BB_HEAD is either LABEL or NOTE. */ |
b8698a0f | 2353 | gcc_assert (BB_HEAD (bb) != insn); |
496d7bb0 MK |
2354 | |
2355 | if (BB_END (bb) == insn) | |
2356 | /* If this is last instruction in BB, move end marker one | |
2357 | instruction up. */ | |
2358 | { | |
2359 | /* Jumps are always placed at the end of basic block. */ | |
2360 | jump_p = control_flow_insn_p (insn); | |
2361 | ||
2362 | gcc_assert (!jump_p | |
e855c69d | 2363 | || ((common_sched_info->sched_pass_id == SCHED_RGN_PASS) |
d7bfd907 | 2364 | && IS_SPECULATION_BRANCHY_CHECK_P (insn)) |
e855c69d AB |
2365 | || (common_sched_info->sched_pass_id |
2366 | == SCHED_EBB_PASS)); | |
b8698a0f | 2367 | |
496d7bb0 MK |
2368 | gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb); |
2369 | ||
2370 | BB_END (bb) = PREV_INSN (insn); | |
2371 | } | |
8c660648 | 2372 | |
496d7bb0 | 2373 | gcc_assert (BB_END (bb) != last); |
c9e03727 | 2374 | |
496d7bb0 MK |
2375 | if (jump_p) |
2376 | /* We move the block note along with jump. */ | |
2377 | { | |
e855c69d | 2378 | gcc_assert (nt); |
496d7bb0 MK |
2379 | |
2380 | note = NEXT_INSN (insn); | |
2381 | while (NOTE_NOT_BB_P (note) && note != nt) | |
2382 | note = NEXT_INSN (note); | |
2383 | ||
2384 | if (note != nt | |
2385 | && (LABEL_P (note) | |
2386 | || BARRIER_P (note))) | |
2387 | note = NEXT_INSN (note); | |
b8698a0f | 2388 | |
496d7bb0 MK |
2389 | gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note)); |
2390 | } | |
2391 | else | |
2392 | note = insn; | |
2393 | ||
2394 | NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note); | |
2395 | PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn); | |
2396 | ||
2397 | NEXT_INSN (note) = NEXT_INSN (last); | |
2398 | PREV_INSN (NEXT_INSN (last)) = note; | |
c9e03727 | 2399 | |
496d7bb0 MK |
2400 | NEXT_INSN (last) = insn; |
2401 | PREV_INSN (insn) = last; | |
2402 | ||
2403 | bb = BLOCK_FOR_INSN (last); | |
2404 | ||
2405 | if (jump_p) | |
2406 | { | |
2407 | fix_jump_move (insn); | |
2408 | ||
2409 | if (BLOCK_FOR_INSN (insn) != bb) | |
2410 | move_block_after_check (insn); | |
2411 | ||
2412 | gcc_assert (BB_END (bb) == last); | |
2413 | } | |
2414 | ||
63642d5a | 2415 | df_insn_change_bb (insn, bb); |
b8698a0f | 2416 | |
496d7bb0 MK |
2417 | /* Update BB_END, if needed. */ |
2418 | if (BB_END (bb) == last) | |
b8698a0f | 2419 | BB_END (bb) = insn; |
496d7bb0 | 2420 | } |
58fb7809 | 2421 | |
b8698a0f | 2422 | SCHED_GROUP_P (insn) = 0; |
8c660648 JL |
2423 | } |
2424 | ||
356c23b3 MK |
2425 | /* Return true if scheduling INSN will finish current clock cycle. */ |
2426 | static bool | |
2427 | insn_finishes_cycle_p (rtx insn) | |
2428 | { | |
2429 | if (SCHED_GROUP_P (insn)) | |
2430 | /* After issuing INSN, rest of the sched_group will be forced to issue | |
2431 | in order. Don't make any plans for the rest of cycle. */ | |
2432 | return true; | |
2433 | ||
2434 | /* Finishing the block will, apparently, finish the cycle. */ | |
2435 | if (current_sched_info->insn_finishes_block_p | |
2436 | && current_sched_info->insn_finishes_block_p (insn)) | |
2437 | return true; | |
2438 | ||
2439 | return false; | |
2440 | } | |
2441 | ||
30028c85 VM |
2442 | /* The following structure describe an entry of the stack of choices. */ |
2443 | struct choice_entry | |
2444 | { | |
2445 | /* Ordinal number of the issued insn in the ready queue. */ | |
2446 | int index; | |
2447 | /* The number of the rest insns whose issues we should try. */ | |
2448 | int rest; | |
2449 | /* The number of issued essential insns. */ | |
2450 | int n; | |
2451 | /* State after issuing the insn. */ | |
2452 | state_t state; | |
2453 | }; | |
2454 | ||
2455 | /* The following array is used to implement a stack of choices used in | |
2456 | function max_issue. */ | |
2457 | static struct choice_entry *choice_stack; | |
2458 | ||
2459 | /* The following variable value is number of essential insns issued on | |
2460 | the current cycle. An insn is essential one if it changes the | |
2461 | processors state. */ | |
e855c69d AB |
2462 | int cycle_issued_insns; |
2463 | ||
2464 | /* This holds the value of the target dfa_lookahead hook. */ | |
2465 | int dfa_lookahead; | |
30028c85 | 2466 | |
880efc46 VM |
2467 | /* The following variable value is maximal number of tries of issuing |
2468 | insns for the first cycle multipass insn scheduling. We define | |
2469 | this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not | |
2470 | need this constraint if all real insns (with non-negative codes) | |
2471 | had reservations because in this case the algorithm complexity is | |
2472 | O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions | |
2473 | might be incomplete and such insn might occur. For such | |
2474 | descriptions, the complexity of algorithm (without the constraint) | |
2475 | could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */ | |
2476 | static int max_lookahead_tries; | |
2477 | ||
2478 | /* The following value is value of hook | |
2479 | `first_cycle_multipass_dfa_lookahead' at the last call of | |
2480 | `max_issue'. */ | |
2481 | static int cached_first_cycle_multipass_dfa_lookahead = 0; | |
2482 | ||
2483 | /* The following value is value of `issue_rate' at the last call of | |
2484 | `sched_init'. */ | |
2485 | static int cached_issue_rate = 0; | |
2486 | ||
fae15c93 VM |
2487 | /* The following function returns maximal (or close to maximal) number |
2488 | of insns which can be issued on the same cycle and one of which | |
30028c85 | 2489 | insns is insns with the best rank (the first insn in READY). To |
fae15c93 VM |
2490 | make this function tries different samples of ready insns. READY |
2491 | is current queue `ready'. Global array READY_TRY reflects what | |
496d7bb0 | 2492 | insns are already issued in this try. MAX_POINTS is the sum of points |
917f1b7e | 2493 | of all instructions in READY. The function stops immediately, |
496d7bb0 MK |
2494 | if it reached the such a solution, that all instruction can be issued. |
2495 | INDEX will contain index of the best insn in READY. The following | |
e855c69d AB |
2496 | function is used only for first cycle multipass scheduling. |
2497 | ||
2498 | PRIVILEGED_N >= 0 | |
2499 | ||
2500 | This function expects recognized insns only. All USEs, | |
2501 | CLOBBERs, etc must be filtered elsewhere. */ | |
2502 | int | |
2503 | max_issue (struct ready_list *ready, int privileged_n, state_t state, | |
2504 | int *index) | |
fae15c93 | 2505 | { |
038dc49a | 2506 | int n, i, all, n_ready, best, delay, tries_num, max_points; |
e855c69d | 2507 | int more_issue; |
30028c85 | 2508 | struct choice_entry *top; |
fae15c93 | 2509 | rtx insn; |
fae15c93 | 2510 | |
e855c69d AB |
2511 | n_ready = ready->n_ready; |
2512 | gcc_assert (dfa_lookahead >= 1 && privileged_n >= 0 | |
2513 | && privileged_n <= n_ready); | |
2514 | ||
2515 | /* Init MAX_LOOKAHEAD_TRIES. */ | |
2516 | if (cached_first_cycle_multipass_dfa_lookahead != dfa_lookahead) | |
2517 | { | |
2518 | cached_first_cycle_multipass_dfa_lookahead = dfa_lookahead; | |
2519 | max_lookahead_tries = 100; | |
2520 | for (i = 0; i < issue_rate; i++) | |
2521 | max_lookahead_tries *= dfa_lookahead; | |
2522 | } | |
2523 | ||
2524 | /* Init max_points. */ | |
2525 | max_points = 0; | |
2526 | more_issue = issue_rate - cycle_issued_insns; | |
bff5a22d AB |
2527 | |
2528 | /* ??? We used to assert here that we never issue more insns than issue_rate. | |
2529 | However, some targets (e.g. MIPS/SB1) claim lower issue rate than can be | |
2530 | achieved to get better performance. Until these targets are fixed to use | |
b8698a0f L |
2531 | scheduler hooks to manipulate insns priority instead, the assert should |
2532 | be disabled. | |
bff5a22d AB |
2533 | |
2534 | gcc_assert (more_issue >= 0); */ | |
e855c69d AB |
2535 | |
2536 | for (i = 0; i < n_ready; i++) | |
2537 | if (!ready_try [i]) | |
2538 | { | |
2539 | if (more_issue-- > 0) | |
2540 | max_points += ISSUE_POINTS (ready_element (ready, i)); | |
2541 | else | |
2542 | break; | |
2543 | } | |
2544 | ||
2545 | /* The number of the issued insns in the best solution. */ | |
fae15c93 | 2546 | best = 0; |
e855c69d | 2547 | |
30028c85 | 2548 | top = choice_stack; |
e855c69d AB |
2549 | |
2550 | /* Set initial state of the search. */ | |
2551 | memcpy (top->state, state, dfa_state_size); | |
2552 | top->rest = dfa_lookahead; | |
30028c85 | 2553 | top->n = 0; |
e855c69d AB |
2554 | |
2555 | /* Count the number of the insns to search among. */ | |
30028c85 | 2556 | for (all = i = 0; i < n_ready; i++) |
fae15c93 | 2557 | if (!ready_try [i]) |
30028c85 | 2558 | all++; |
e855c69d AB |
2559 | |
2560 | /* I is the index of the insn to try next. */ | |
30028c85 | 2561 | i = 0; |
880efc46 | 2562 | tries_num = 0; |
30028c85 VM |
2563 | for (;;) |
2564 | { | |
e855c69d AB |
2565 | if (/* If we've reached a dead end or searched enough of what we have |
2566 | been asked... */ | |
2567 | top->rest == 0 | |
2568 | /* Or have nothing else to try. */ | |
2569 | || i >= n_ready) | |
30028c85 | 2570 | { |
e855c69d AB |
2571 | /* ??? (... || i == n_ready). */ |
2572 | gcc_assert (i <= n_ready); | |
2573 | ||
30028c85 VM |
2574 | if (top == choice_stack) |
2575 | break; | |
e855c69d AB |
2576 | |
2577 | if (best < top - choice_stack) | |
30028c85 | 2578 | { |
e855c69d AB |
2579 | if (privileged_n) |
2580 | { | |
2581 | n = privileged_n; | |
2582 | /* Try to find issued privileged insn. */ | |
2583 | while (n && !ready_try[--n]); | |
2584 | } | |
2585 | ||
2586 | if (/* If all insns are equally good... */ | |
2587 | privileged_n == 0 | |
2588 | /* Or a privileged insn will be issued. */ | |
2589 | || ready_try[n]) | |
2590 | /* Then we have a solution. */ | |
2591 | { | |
2592 | best = top - choice_stack; | |
2593 | /* This is the index of the insn issued first in this | |
2594 | solution. */ | |
2595 | *index = choice_stack [1].index; | |
e855c69d AB |
2596 | if (top->n == max_points || best == all) |
2597 | break; | |
2598 | } | |
30028c85 | 2599 | } |
e855c69d AB |
2600 | |
2601 | /* Set ready-list index to point to the last insn | |
2602 | ('i++' below will advance it to the next insn). */ | |
30028c85 | 2603 | i = top->index; |
e855c69d AB |
2604 | |
2605 | /* Backtrack. */ | |
30028c85 VM |
2606 | ready_try [i] = 0; |
2607 | top--; | |
e855c69d | 2608 | memcpy (state, top->state, dfa_state_size); |
30028c85 VM |
2609 | } |
2610 | else if (!ready_try [i]) | |
2611 | { | |
880efc46 VM |
2612 | tries_num++; |
2613 | if (tries_num > max_lookahead_tries) | |
2614 | break; | |
30028c85 | 2615 | insn = ready_element (ready, i); |
e855c69d | 2616 | delay = state_transition (state, insn); |
30028c85 VM |
2617 | if (delay < 0) |
2618 | { | |
356c23b3 MK |
2619 | if (state_dead_lock_p (state) |
2620 | || insn_finishes_cycle_p (insn)) | |
2621 | /* We won't issue any more instructions in the next | |
2622 | choice_state. */ | |
30028c85 VM |
2623 | top->rest = 0; |
2624 | else | |
2625 | top->rest--; | |
e855c69d | 2626 | |
30028c85 | 2627 | n = top->n; |
e855c69d | 2628 | if (memcmp (top->state, state, dfa_state_size) != 0) |
496d7bb0 | 2629 | n += ISSUE_POINTS (insn); |
e855c69d AB |
2630 | |
2631 | /* Advance to the next choice_entry. */ | |
30028c85 | 2632 | top++; |
e855c69d AB |
2633 | /* Initialize it. */ |
2634 | top->rest = dfa_lookahead; | |
30028c85 VM |
2635 | top->index = i; |
2636 | top->n = n; | |
e855c69d AB |
2637 | memcpy (top->state, state, dfa_state_size); |
2638 | ||
30028c85 VM |
2639 | ready_try [i] = 1; |
2640 | i = -1; | |
2641 | } | |
2642 | } | |
e855c69d AB |
2643 | |
2644 | /* Increase ready-list index. */ | |
30028c85 VM |
2645 | i++; |
2646 | } | |
496d7bb0 | 2647 | |
e855c69d | 2648 | /* Restore the original state of the DFA. */ |
b8698a0f | 2649 | memcpy (state, choice_stack->state, dfa_state_size); |
e855c69d | 2650 | |
fae15c93 VM |
2651 | return best; |
2652 | } | |
2653 | ||
2654 | /* The following function chooses insn from READY and modifies | |
e855c69d | 2655 | READY. The following function is used only for first |
b631c5f7 SP |
2656 | cycle multipass scheduling. |
2657 | Return: | |
2658 | -1 if cycle should be advanced, | |
2659 | 0 if INSN_PTR is set to point to the desirable insn, | |
2660 | 1 if choose_ready () should be restarted without advancing the cycle. */ | |
2661 | static int | |
2662 | choose_ready (struct ready_list *ready, rtx *insn_ptr) | |
fae15c93 | 2663 | { |
b631c5f7 SP |
2664 | int lookahead; |
2665 | ||
2666 | if (dbg_cnt (sched_insn) == false) | |
2667 | { | |
2668 | rtx insn; | |
2669 | ||
2670 | insn = next_nonnote_insn (last_scheduled_insn); | |
2671 | ||
2672 | if (QUEUE_INDEX (insn) == QUEUE_READY) | |
2673 | /* INSN is in the ready_list. */ | |
2674 | { | |
2675 | ready_remove_insn (insn); | |
2676 | *insn_ptr = insn; | |
2677 | return 0; | |
2678 | } | |
2679 | ||
2680 | /* INSN is in the queue. Advance cycle to move it to the ready list. */ | |
2681 | return -1; | |
2682 | } | |
2683 | ||
2684 | lookahead = 0; | |
880efc46 VM |
2685 | |
2686 | if (targetm.sched.first_cycle_multipass_dfa_lookahead) | |
5fd9b178 | 2687 | lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead (); |
b5b8b0ac AO |
2688 | if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0)) |
2689 | || DEBUG_INSN_P (ready_element (ready, 0))) | |
b631c5f7 SP |
2690 | { |
2691 | *insn_ptr = ready_remove_first (ready); | |
2692 | return 0; | |
2693 | } | |
fae15c93 VM |
2694 | else |
2695 | { | |
2696 | /* Try to choose the better insn. */ | |
496d7bb0 | 2697 | int index = 0, i, n; |
30028c85 | 2698 | rtx insn; |
e855c69d AB |
2699 | int try_data = 1, try_control = 1; |
2700 | ds_t ts; | |
b8698a0f | 2701 | |
30028c85 VM |
2702 | insn = ready_element (ready, 0); |
2703 | if (INSN_CODE (insn) < 0) | |
b631c5f7 SP |
2704 | { |
2705 | *insn_ptr = ready_remove_first (ready); | |
2706 | return 0; | |
2707 | } | |
496d7bb0 MK |
2708 | |
2709 | if (spec_info | |
2710 | && spec_info->flags & (PREFER_NON_DATA_SPEC | |
2711 | | PREFER_NON_CONTROL_SPEC)) | |
2712 | { | |
496d7bb0 MK |
2713 | for (i = 0, n = ready->n_ready; i < n; i++) |
2714 | { | |
a57aee2a MK |
2715 | rtx x; |
2716 | ds_t s; | |
2717 | ||
496d7bb0 MK |
2718 | x = ready_element (ready, i); |
2719 | s = TODO_SPEC (x); | |
b8698a0f | 2720 | |
496d7bb0 MK |
2721 | if (spec_info->flags & PREFER_NON_DATA_SPEC |
2722 | && !(s & DATA_SPEC)) | |
b8698a0f | 2723 | { |
496d7bb0 MK |
2724 | try_data = 0; |
2725 | if (!(spec_info->flags & PREFER_NON_CONTROL_SPEC) | |
2726 | || !try_control) | |
2727 | break; | |
2728 | } | |
b8698a0f | 2729 | |
496d7bb0 MK |
2730 | if (spec_info->flags & PREFER_NON_CONTROL_SPEC |
2731 | && !(s & CONTROL_SPEC)) | |
2732 | { | |
2733 | try_control = 0; | |
2734 | if (!(spec_info->flags & PREFER_NON_DATA_SPEC) || !try_data) | |
2735 | break; | |
2736 | } | |
2737 | } | |
2738 | } | |
2739 | ||
e855c69d AB |
2740 | ts = TODO_SPEC (insn); |
2741 | if ((ts & SPECULATIVE) | |
2742 | && (((!try_data && (ts & DATA_SPEC)) | |
2743 | || (!try_control && (ts & CONTROL_SPEC))) | |
2744 | || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec | |
2745 | && !targetm.sched | |
2746 | .first_cycle_multipass_dfa_lookahead_guard_spec (insn)))) | |
a57aee2a MK |
2747 | /* Discard speculative instruction that stands first in the ready |
2748 | list. */ | |
496d7bb0 MK |
2749 | { |
2750 | change_queue_index (insn, 1); | |
b631c5f7 | 2751 | return 1; |
496d7bb0 MK |
2752 | } |
2753 | ||
e855c69d | 2754 | ready_try[0] = 0; |
496d7bb0 | 2755 | |
30028c85 VM |
2756 | for (i = 1; i < ready->n_ready; i++) |
2757 | { | |
2758 | insn = ready_element (ready, i); | |
e855c69d | 2759 | |
30028c85 | 2760 | ready_try [i] |
e855c69d AB |
2761 | = ((!try_data && (TODO_SPEC (insn) & DATA_SPEC)) |
2762 | || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC))); | |
30028c85 | 2763 | } |
496d7bb0 | 2764 | |
e855c69d AB |
2765 | /* Let the target filter the search space. */ |
2766 | for (i = 1; i < ready->n_ready; i++) | |
2767 | if (!ready_try[i]) | |
2768 | { | |
2769 | insn = ready_element (ready, i); | |
2770 | ||
078a70a1 AN |
2771 | #ifdef ENABLE_CHECKING |
2772 | /* If this insn is recognizable we should have already | |
2773 | recognized it earlier. | |
2774 | ??? Not very clear where this is supposed to be done. | |
2775 | See dep_cost_1. */ | |
e855c69d AB |
2776 | gcc_assert (INSN_CODE (insn) >= 0 |
2777 | || recog_memoized (insn) < 0); | |
078a70a1 | 2778 | #endif |
e855c69d AB |
2779 | |
2780 | ready_try [i] | |
2781 | = (/* INSN_CODE check can be omitted here as it is also done later | |
2782 | in max_issue (). */ | |
2783 | INSN_CODE (insn) < 0 | |
2784 | || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard | |
2785 | && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard | |
2786 | (insn))); | |
2787 | } | |
2788 | ||
2789 | if (max_issue (ready, 1, curr_state, &index) == 0) | |
b631c5f7 SP |
2790 | { |
2791 | *insn_ptr = ready_remove_first (ready); | |
9c575182 | 2792 | if (sched_verbose >= 4) |
b8698a0f | 2793 | fprintf (sched_dump, ";;\t\tChosen insn (but can't issue) : %s \n", |
9c575182 | 2794 | (*current_sched_info->print_insn) (*insn_ptr, 0)); |
b631c5f7 SP |
2795 | return 0; |
2796 | } | |
fae15c93 | 2797 | else |
b631c5f7 | 2798 | { |
b8698a0f | 2799 | if (sched_verbose >= 4) |
e855c69d AB |
2800 | fprintf (sched_dump, ";;\t\tChosen insn : %s\n", |
2801 | (*current_sched_info->print_insn) | |
2802 | (ready_element (ready, index), 0)); | |
b8698a0f | 2803 | |
b631c5f7 SP |
2804 | *insn_ptr = ready_remove (ready, index); |
2805 | return 0; | |
2806 | } | |
fae15c93 VM |
2807 | } |
2808 | } | |
2809 | ||
496d7bb0 MK |
2810 | /* Use forward list scheduling to rearrange insns of block pointed to by |
2811 | TARGET_BB, possibly bringing insns from subsequent blocks in the same | |
2812 | region. */ | |
8c660648 | 2813 | |
b4ead7d4 | 2814 | void |
e855c69d | 2815 | schedule_block (basic_block *target_bb) |
8c660648 | 2816 | { |
30028c85 | 2817 | int i, first_cycle_insn_p; |
8c660648 | 2818 | int can_issue_more; |
fae15c93 | 2819 | state_t temp_state = NULL; /* It is used for multipass scheduling. */ |
58fb7809 | 2820 | int sort_p, advance, start_clock_var; |
8c660648 | 2821 | |
63de6c74 | 2822 | /* Head/tail info for this block. */ |
1708fd40 BS |
2823 | rtx prev_head = current_sched_info->prev_head; |
2824 | rtx next_tail = current_sched_info->next_tail; | |
2825 | rtx head = NEXT_INSN (prev_head); | |
2826 | rtx tail = PREV_INSN (next_tail); | |
8c660648 | 2827 | |
484df988 JL |
2828 | /* We used to have code to avoid getting parameters moved from hard |
2829 | argument registers into pseudos. | |
8c660648 | 2830 | |
484df988 JL |
2831 | However, it was removed when it proved to be of marginal benefit |
2832 | and caused problems because schedule_block and compute_forward_dependences | |
2833 | had different notions of what the "head" insn was. */ | |
8c660648 | 2834 | |
535a42b1 | 2835 | gcc_assert (head != tail || INSN_P (head)); |
8c660648 | 2836 | |
e2f6ff94 | 2837 | haifa_recovery_bb_recently_added_p = false; |
496d7bb0 | 2838 | |
63de6c74 | 2839 | /* Debug info. */ |
8c660648 | 2840 | if (sched_verbose) |
496d7bb0 | 2841 | dump_new_block_header (0, *target_bb, head, tail); |
8c660648 | 2842 | |
fa0aee89 | 2843 | state_reset (curr_state); |
8c660648 | 2844 | |
e855c69d | 2845 | /* Clear the ready list. */ |
176f9a7b | 2846 | ready.first = ready.veclen - 1; |
176f9a7b | 2847 | ready.n_ready = 0; |
b5b8b0ac | 2848 | ready.n_debug = 0; |
8c660648 | 2849 | |
fa0aee89 PB |
2850 | /* It is used for first cycle multipass scheduling. */ |
2851 | temp_state = alloca (dfa_state_size); | |
fae15c93 | 2852 | |
c237e94a | 2853 | if (targetm.sched.md_init) |
5fd9b178 | 2854 | targetm.sched.md_init (sched_dump, sched_verbose, ready.veclen); |
e4da5f6d | 2855 | |
89076bb3 RH |
2856 | /* We start inserting insns after PREV_HEAD. */ |
2857 | last_scheduled_insn = prev_head; | |
8c660648 | 2858 | |
b5b8b0ac AO |
2859 | gcc_assert ((NOTE_P (last_scheduled_insn) |
2860 | || BOUNDARY_DEBUG_INSN_P (last_scheduled_insn)) | |
496d7bb0 MK |
2861 | && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb); |
2862 | ||
1708fd40 BS |
2863 | /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the |
2864 | queue. */ | |
8c660648 JL |
2865 | q_ptr = 0; |
2866 | q_size = 0; | |
fae15c93 | 2867 | |
d3bfe4de | 2868 | insn_queue = XALLOCAVEC (rtx, max_insn_queue_index + 1); |
fa0aee89 | 2869 | memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx)); |
8c660648 | 2870 | |
197043f5 RH |
2871 | /* Start just before the beginning of time. */ |
2872 | clock_var = -1; | |
63f54b1a | 2873 | |
b8698a0f | 2874 | /* We need queue and ready lists and clock_var be initialized |
63f54b1a MK |
2875 | in try_ready () (which is called through init_ready_list ()). */ |
2876 | (*current_sched_info->init_ready_list) (); | |
2877 | ||
6f8dd94b EB |
2878 | /* The algorithm is O(n^2) in the number of ready insns at any given |
2879 | time in the worst case. Before reload we are more likely to have | |
2880 | big lists so truncate them to a reasonable size. */ | |
b5b8b0ac AO |
2881 | if (!reload_completed |
2882 | && ready.n_ready - ready.n_debug > MAX_SCHED_READY_INSNS) | |
6f8dd94b EB |
2883 | { |
2884 | ready_sort (&ready); | |
2885 | ||
b5b8b0ac AO |
2886 | /* Find first free-standing insn past MAX_SCHED_READY_INSNS. |
2887 | If there are debug insns, we know they're first. */ | |
2888 | for (i = MAX_SCHED_READY_INSNS + ready.n_debug; i < ready.n_ready; i++) | |
6f8dd94b EB |
2889 | if (!SCHED_GROUP_P (ready_element (&ready, i))) |
2890 | break; | |
2891 | ||
2892 | if (sched_verbose >= 2) | |
2893 | { | |
2894 | fprintf (sched_dump, | |
2895 | ";;\t\tReady list on entry: %d insns\n", ready.n_ready); | |
2896 | fprintf (sched_dump, | |
2897 | ";;\t\t before reload => truncated to %d insns\n", i); | |
2898 | } | |
2899 | ||
b631c5f7 SP |
2900 | /* Delay all insns past it for 1 cycle. If debug counter is |
2901 | activated make an exception for the insn right after | |
2902 | last_scheduled_insn. */ | |
2903 | { | |
2904 | rtx skip_insn; | |
2905 | ||
2906 | if (dbg_cnt (sched_insn) == false) | |
2907 | skip_insn = next_nonnote_insn (last_scheduled_insn); | |
2908 | else | |
2909 | skip_insn = NULL_RTX; | |
2910 | ||
2911 | while (i < ready.n_ready) | |
2912 | { | |
2913 | rtx insn; | |
2914 | ||
2915 | insn = ready_remove (&ready, i); | |
2916 | ||
2917 | if (insn != skip_insn) | |
2918 | queue_insn (insn, 1); | |
2919 | } | |
2920 | } | |
6f8dd94b EB |
2921 | } |
2922 | ||
496d7bb0 MK |
2923 | /* Now we can restore basic block notes and maintain precise cfg. */ |
2924 | restore_bb_notes (*target_bb); | |
2925 | ||
63f54b1a MK |
2926 | last_clock_var = -1; |
2927 | ||
58fb7809 | 2928 | advance = 0; |
197043f5 | 2929 | |
30028c85 | 2930 | sort_p = TRUE; |
63de6c74 | 2931 | /* Loop until all the insns in BB are scheduled. */ |
1708fd40 | 2932 | while ((*current_sched_info->schedule_more_p) ()) |
8c660648 | 2933 | { |
58fb7809 | 2934 | do |
8c660648 | 2935 | { |
58fb7809 VM |
2936 | start_clock_var = clock_var; |
2937 | ||
2938 | clock_var++; | |
1d088dee | 2939 | |
58fb7809 | 2940 | advance_one_cycle (); |
1d088dee | 2941 | |
58fb7809 VM |
2942 | /* Add to the ready list all pending insns that can be issued now. |
2943 | If there are no ready insns, increment clock until one | |
2944 | is ready and add all pending insns at that point to the ready | |
2945 | list. */ | |
2946 | queue_to_ready (&ready); | |
1d088dee | 2947 | |
535a42b1 | 2948 | gcc_assert (ready.n_ready); |
1d088dee | 2949 | |
58fb7809 VM |
2950 | if (sched_verbose >= 2) |
2951 | { | |
2952 | fprintf (sched_dump, ";;\t\tReady list after queue_to_ready: "); | |
2953 | debug_ready_list (&ready); | |
2954 | } | |
2955 | advance -= clock_var - start_clock_var; | |
8c660648 | 2956 | } |
58fb7809 | 2957 | while (advance > 0); |
8c660648 | 2958 | |
30028c85 VM |
2959 | if (sort_p) |
2960 | { | |
2961 | /* Sort the ready list based on priority. */ | |
2962 | ready_sort (&ready); | |
1d088dee | 2963 | |
30028c85 VM |
2964 | if (sched_verbose >= 2) |
2965 | { | |
2966 | fprintf (sched_dump, ";;\t\tReady list after ready_sort: "); | |
2967 | debug_ready_list (&ready); | |
2968 | } | |
2969 | } | |
197043f5 | 2970 | |
b5b8b0ac AO |
2971 | /* We don't want md sched reorder to even see debug isns, so put |
2972 | them out right away. */ | |
2973 | if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0))) | |
2974 | { | |
2975 | if (control_flow_insn_p (last_scheduled_insn)) | |
2976 | { | |
2977 | *target_bb = current_sched_info->advance_target_bb | |
2978 | (*target_bb, 0); | |
2979 | ||
2980 | if (sched_verbose) | |
2981 | { | |
2982 | rtx x; | |
2983 | ||
2984 | x = next_real_insn (last_scheduled_insn); | |
2985 | gcc_assert (x); | |
2986 | dump_new_block_header (1, *target_bb, x, tail); | |
2987 | } | |
2988 | ||
2989 | last_scheduled_insn = bb_note (*target_bb); | |
2990 | } | |
2991 | ||
2992 | while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0))) | |
2993 | { | |
2994 | rtx insn = ready_remove_first (&ready); | |
2995 | gcc_assert (DEBUG_INSN_P (insn)); | |
2996 | (*current_sched_info->begin_schedule_ready) (insn, | |
2997 | last_scheduled_insn); | |
2998 | move_insn (insn, last_scheduled_insn, | |
2999 | current_sched_info->next_tail); | |
3000 | last_scheduled_insn = insn; | |
3001 | advance = schedule_insn (insn); | |
3002 | gcc_assert (advance == 0); | |
3003 | if (ready.n_ready > 0) | |
3004 | ready_sort (&ready); | |
3005 | } | |
3006 | ||
3007 | if (!ready.n_ready) | |
3008 | continue; | |
3009 | } | |
3010 | ||
b4ead7d4 BS |
3011 | /* Allow the target to reorder the list, typically for |
3012 | better instruction bundling. */ | |
14484a78 | 3013 | if (sort_p && targetm.sched.reorder |
58fb7809 VM |
3014 | && (ready.n_ready == 0 |
3015 | || !SCHED_GROUP_P (ready_element (&ready, 0)))) | |
c237e94a | 3016 | can_issue_more = |
5fd9b178 KH |
3017 | targetm.sched.reorder (sched_dump, sched_verbose, |
3018 | ready_lastpos (&ready), | |
3019 | &ready.n_ready, clock_var); | |
c237e94a ZW |
3020 | else |
3021 | can_issue_more = issue_rate; | |
e1306f49 | 3022 | |
fae15c93 | 3023 | first_cycle_insn_p = 1; |
30028c85 | 3024 | cycle_issued_insns = 0; |
fae15c93 | 3025 | for (;;) |
e1306f49 | 3026 | { |
fae15c93 VM |
3027 | rtx insn; |
3028 | int cost; | |
d57f1617 | 3029 | bool asm_p = false; |
fae15c93 | 3030 | |
6d0de005 | 3031 | if (sched_verbose >= 2) |
fae15c93 | 3032 | { |
496d7bb0 | 3033 | fprintf (sched_dump, ";;\tReady list (t = %3d): ", |
fae15c93 VM |
3034 | clock_var); |
3035 | debug_ready_list (&ready); | |
ce18efcb VM |
3036 | if (sched_pressure_p) |
3037 | print_curr_reg_pressure (); | |
fae15c93 VM |
3038 | } |
3039 | ||
b8698a0f L |
3040 | if (ready.n_ready == 0 |
3041 | && can_issue_more | |
3042 | && reload_completed) | |
fae15c93 | 3043 | { |
fa0aee89 PB |
3044 | /* Allow scheduling insns directly from the queue in case |
3045 | there's nothing better to do (ready list is empty) but | |
3046 | there are still vacant dispatch slots in the current cycle. */ | |
3047 | if (sched_verbose >= 6) | |
62e5bf5d | 3048 | fprintf (sched_dump,";;\t\tSecond chance\n"); |
fa0aee89 PB |
3049 | memcpy (temp_state, curr_state, dfa_state_size); |
3050 | if (early_queue_to_ready (temp_state, &ready)) | |
3051 | ready_sort (&ready); | |
fae15c93 | 3052 | } |
569fa502 | 3053 | |
b5b8b0ac AO |
3054 | if (ready.n_ready == 0 |
3055 | || !can_issue_more | |
fa0aee89 PB |
3056 | || state_dead_lock_p (curr_state) |
3057 | || !(*current_sched_info->schedule_more_p) ()) | |
3058 | break; | |
1d088dee | 3059 | |
fa0aee89 PB |
3060 | /* Select and remove the insn from the ready list. */ |
3061 | if (sort_p) | |
496d7bb0 | 3062 | { |
b631c5f7 SP |
3063 | int res; |
3064 | ||
3065 | insn = NULL_RTX; | |
3066 | res = choose_ready (&ready, &insn); | |
3067 | ||
3068 | if (res < 0) | |
3069 | /* Finish cycle. */ | |
3070 | break; | |
3071 | if (res > 0) | |
3072 | /* Restart choose_ready (). */ | |
496d7bb0 | 3073 | continue; |
b631c5f7 SP |
3074 | |
3075 | gcc_assert (insn != NULL_RTX); | |
496d7bb0 | 3076 | } |
fa0aee89 PB |
3077 | else |
3078 | insn = ready_remove_first (&ready); | |
1d088dee | 3079 | |
ce18efcb VM |
3080 | if (sched_pressure_p && INSN_TICK (insn) > clock_var) |
3081 | { | |
3082 | ready_add (&ready, insn, true); | |
3083 | advance = 1; | |
3084 | break; | |
3085 | } | |
3086 | ||
fa0aee89 PB |
3087 | if (targetm.sched.dfa_new_cycle |
3088 | && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose, | |
3089 | insn, last_clock_var, | |
3090 | clock_var, &sort_p)) | |
63f54b1a MK |
3091 | /* SORT_P is used by the target to override sorting |
3092 | of the ready list. This is needed when the target | |
3093 | has modified its internal structures expecting that | |
3094 | the insn will be issued next. As we need the insn | |
3095 | to have the highest priority (so it will be returned by | |
3096 | the ready_remove_first call above), we invoke | |
3097 | ready_add (&ready, insn, true). | |
b8698a0f L |
3098 | But, still, there is one issue: INSN can be later |
3099 | discarded by scheduler's front end through | |
63f54b1a | 3100 | current_sched_info->can_schedule_ready_p, hence, won't |
b8698a0f | 3101 | be issued next. */ |
fa0aee89 | 3102 | { |
63f54b1a MK |
3103 | ready_add (&ready, insn, true); |
3104 | break; | |
fa0aee89 | 3105 | } |
1d088dee | 3106 | |
fa0aee89 PB |
3107 | sort_p = TRUE; |
3108 | memcpy (temp_state, curr_state, dfa_state_size); | |
3109 | if (recog_memoized (insn) < 0) | |
3110 | { | |
3111 | asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT | |
3112 | || asm_noperands (PATTERN (insn)) >= 0); | |
3113 | if (!first_cycle_insn_p && asm_p) | |
fa10beec | 3114 | /* This is asm insn which is tried to be issued on the |
fa0aee89 PB |
3115 | cycle not first. Issue it on the next cycle. */ |
3116 | cost = 1; | |
fae15c93 | 3117 | else |
fa0aee89 PB |
3118 | /* A USE insn, or something else we don't need to |
3119 | understand. We can't pass these directly to | |
3120 | state_transition because it will trigger a | |
3121 | fatal error for unrecognizable insns. */ | |
3122 | cost = 0; | |
3123 | } | |
ce18efcb VM |
3124 | else if (sched_pressure_p) |
3125 | cost = 0; | |
fa0aee89 PB |
3126 | else |
3127 | { | |
3128 | cost = state_transition (temp_state, insn); | |
3129 | if (cost < 0) | |
3130 | cost = 0; | |
3131 | else if (cost == 0) | |
3132 | cost = 1; | |
fae15c93 | 3133 | } |
e1306f49 | 3134 | |
b4ead7d4 BS |
3135 | if (cost >= 1) |
3136 | { | |
3137 | queue_insn (insn, cost); | |
dab80c81 AK |
3138 | if (SCHED_GROUP_P (insn)) |
3139 | { | |
3140 | advance = cost; | |
3141 | break; | |
3142 | } | |
b8698a0f | 3143 | |
b4ead7d4 BS |
3144 | continue; |
3145 | } | |
e1306f49 | 3146 | |
496d7bb0 MK |
3147 | if (current_sched_info->can_schedule_ready_p |
3148 | && ! (*current_sched_info->can_schedule_ready_p) (insn)) | |
3149 | /* We normally get here only if we don't want to move | |
3150 | insn from the split block. */ | |
3151 | { | |
3152 | TODO_SPEC (insn) = (TODO_SPEC (insn) & ~SPECULATIVE) | HARD_DEP; | |
3153 | continue; | |
3154 | } | |
3155 | ||
b8698a0f L |
3156 | /* DECISION is made. */ |
3157 | ||
496d7bb0 MK |
3158 | if (TODO_SPEC (insn) & SPECULATIVE) |
3159 | generate_recovery_code (insn); | |
3160 | ||
b8698a0f | 3161 | if (control_flow_insn_p (last_scheduled_insn) |
6fb5fa3c | 3162 | /* This is used to switch basic blocks by request |
496d7bb0 MK |
3163 | from scheduler front-end (actually, sched-ebb.c only). |
3164 | This is used to process blocks with single fallthru | |
917f1b7e | 3165 | edge. If succeeding block has jump, it [jump] will try |
496d7bb0 MK |
3166 | move at the end of current bb, thus corrupting CFG. */ |
3167 | || current_sched_info->advance_target_bb (*target_bb, insn)) | |
3168 | { | |
3169 | *target_bb = current_sched_info->advance_target_bb | |
3170 | (*target_bb, 0); | |
b8698a0f | 3171 | |
496d7bb0 MK |
3172 | if (sched_verbose) |
3173 | { | |
3174 | rtx x; | |
8c660648 | 3175 | |
496d7bb0 MK |
3176 | x = next_real_insn (last_scheduled_insn); |
3177 | gcc_assert (x); | |
3178 | dump_new_block_header (1, *target_bb, x, tail); | |
3179 | } | |
8c660648 | 3180 | |
496d7bb0 MK |
3181 | last_scheduled_insn = bb_note (*target_bb); |
3182 | } | |
b8698a0f | 3183 | |
496d7bb0 MK |
3184 | /* Update counters, etc in the scheduler's front end. */ |
3185 | (*current_sched_info->begin_schedule_ready) (insn, | |
3186 | last_scheduled_insn); | |
b8698a0f | 3187 | |
e855c69d AB |
3188 | move_insn (insn, last_scheduled_insn, current_sched_info->next_tail); |
3189 | reemit_notes (insn); | |
496d7bb0 | 3190 | last_scheduled_insn = insn; |
b8698a0f | 3191 | |
fa0aee89 | 3192 | if (memcmp (curr_state, temp_state, dfa_state_size) != 0) |
63f54b1a MK |
3193 | { |
3194 | cycle_issued_insns++; | |
3195 | memcpy (curr_state, temp_state, dfa_state_size); | |
3196 | } | |
1d088dee | 3197 | |
c237e94a ZW |
3198 | if (targetm.sched.variable_issue) |
3199 | can_issue_more = | |
5fd9b178 | 3200 | targetm.sched.variable_issue (sched_dump, sched_verbose, |
b5b8b0ac | 3201 | insn, can_issue_more); |
85d69216 JL |
3202 | /* A naked CLOBBER or USE generates no instruction, so do |
3203 | not count them against the issue rate. */ | |
3204 | else if (GET_CODE (PATTERN (insn)) != USE | |
3205 | && GET_CODE (PATTERN (insn)) != CLOBBER) | |
c237e94a | 3206 | can_issue_more--; |
63f54b1a | 3207 | advance = schedule_insn (insn); |
d57f1617 VM |
3208 | |
3209 | /* After issuing an asm insn we should start a new cycle. */ | |
3210 | if (advance == 0 && asm_p) | |
3211 | advance = 1; | |
58fb7809 VM |
3212 | if (advance != 0) |
3213 | break; | |
8c660648 | 3214 | |
fae15c93 VM |
3215 | first_cycle_insn_p = 0; |
3216 | ||
30028c85 VM |
3217 | /* Sort the ready list based on priority. This must be |
3218 | redone here, as schedule_insn may have readied additional | |
32dd366d | 3219 | insns that will not be sorted correctly. */ |
30028c85 VM |
3220 | if (ready.n_ready > 0) |
3221 | ready_sort (&ready); | |
3222 | ||
b5b8b0ac AO |
3223 | /* Quickly go through debug insns such that md sched |
3224 | reorder2 doesn't have to deal with debug insns. */ | |
3225 | if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)) | |
3226 | && (*current_sched_info->schedule_more_p) ()) | |
3227 | { | |
3228 | if (control_flow_insn_p (last_scheduled_insn)) | |
3229 | { | |
3230 | *target_bb = current_sched_info->advance_target_bb | |
3231 | (*target_bb, 0); | |
3232 | ||
3233 | if (sched_verbose) | |
3234 | { | |
3235 | rtx x; | |
3236 | ||
3237 | x = next_real_insn (last_scheduled_insn); | |
3238 | gcc_assert (x); | |
3239 | dump_new_block_header (1, *target_bb, x, tail); | |
3240 | } | |
3241 | ||
3242 | last_scheduled_insn = bb_note (*target_bb); | |
3243 | } | |
3244 | ||
3245 | while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0))) | |
3246 | { | |
3247 | insn = ready_remove_first (&ready); | |
3248 | gcc_assert (DEBUG_INSN_P (insn)); | |
3249 | (*current_sched_info->begin_schedule_ready) | |
3250 | (insn, last_scheduled_insn); | |
3251 | move_insn (insn, last_scheduled_insn, | |
3252 | current_sched_info->next_tail); | |
3253 | advance = schedule_insn (insn); | |
3254 | last_scheduled_insn = insn; | |
3255 | gcc_assert (advance == 0); | |
3256 | if (ready.n_ready > 0) | |
3257 | ready_sort (&ready); | |
3258 | } | |
3259 | } | |
3260 | ||
58fb7809 VM |
3261 | if (targetm.sched.reorder2 |
3262 | && (ready.n_ready == 0 | |
3263 | || !SCHED_GROUP_P (ready_element (&ready, 0)))) | |
c237e94a | 3264 | { |
c237e94a | 3265 | can_issue_more = |
5fd9b178 KH |
3266 | targetm.sched.reorder2 (sched_dump, sched_verbose, |
3267 | ready.n_ready | |
3268 | ? ready_lastpos (&ready) : NULL, | |
3269 | &ready.n_ready, clock_var); | |
c237e94a | 3270 | } |
b4ead7d4 | 3271 | } |
b4ead7d4 | 3272 | } |
8c660648 | 3273 | |
b4ead7d4 BS |
3274 | /* Debug info. */ |
3275 | if (sched_verbose) | |
3276 | { | |
3277 | fprintf (sched_dump, ";;\tReady list (final): "); | |
3278 | debug_ready_list (&ready); | |
b4ead7d4 | 3279 | } |
8c660648 | 3280 | |
63f54b1a MK |
3281 | if (current_sched_info->queue_must_finish_empty) |
3282 | /* Sanity check -- queue must be empty now. Meaningless if region has | |
3283 | multiple bbs. */ | |
b5b8b0ac | 3284 | gcc_assert (!q_size && !ready.n_ready && !ready.n_debug); |
b8698a0f | 3285 | else |
30028c85 | 3286 | { |
63f54b1a MK |
3287 | /* We must maintain QUEUE_INDEX between blocks in region. */ |
3288 | for (i = ready.n_ready - 1; i >= 0; i--) | |
496d7bb0 MK |
3289 | { |
3290 | rtx x; | |
b8698a0f | 3291 | |
496d7bb0 MK |
3292 | x = ready_element (&ready, i); |
3293 | QUEUE_INDEX (x) = QUEUE_NOWHERE; | |
3294 | TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP; | |
3295 | } | |
63f54b1a | 3296 | |
b8698a0f | 3297 | if (q_size) |
63f54b1a MK |
3298 | for (i = 0; i <= max_insn_queue_index; i++) |
3299 | { | |
3300 | rtx link; | |
3301 | for (link = insn_queue[i]; link; link = XEXP (link, 1)) | |
496d7bb0 MK |
3302 | { |
3303 | rtx x; | |
3304 | ||
3305 | x = XEXP (link, 0); | |
3306 | QUEUE_INDEX (x) = QUEUE_NOWHERE; | |
3307 | TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP; | |
3308 | } | |
63f54b1a MK |
3309 | free_INSN_LIST_list (&insn_queue[i]); |
3310 | } | |
496d7bb0 | 3311 | } |
1d088dee | 3312 | |
e855c69d AB |
3313 | if (sched_verbose) |
3314 | fprintf (sched_dump, ";; total time = %d\n", clock_var); | |
3315 | ||
496d7bb0 | 3316 | if (!current_sched_info->queue_must_finish_empty |
e2f6ff94 | 3317 | || haifa_recovery_bb_recently_added_p) |
496d7bb0 | 3318 | { |
30028c85 VM |
3319 | /* INSN_TICK (minimum clock tick at which the insn becomes |
3320 | ready) may be not correct for the insn in the subsequent | |
3321 | blocks of the region. We should use a correct value of | |
3322 | `clock_var' or modify INSN_TICK. It is better to keep | |
3323 | clock_var value equal to 0 at the start of a basic block. | |
3324 | Therefore we modify INSN_TICK here. */ | |
63f54b1a | 3325 | fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn); |
30028c85 VM |
3326 | } |
3327 | ||
63f54b1a | 3328 | if (targetm.sched.md_finish) |
e2f6ff94 MK |
3329 | { |
3330 | targetm.sched.md_finish (sched_dump, sched_verbose); | |
fa10beec | 3331 | /* Target might have added some instructions to the scheduled block |
e2f6ff94 MK |
3332 | in its md_finish () hook. These new insns don't have any data |
3333 | initialized and to identify them we extend h_i_d so that they'll | |
e855c69d AB |
3334 | get zero luids. */ |
3335 | sched_init_luids (NULL, NULL, NULL, NULL); | |
e2f6ff94 | 3336 | } |
63f54b1a | 3337 | |
e855c69d AB |
3338 | if (sched_verbose) |
3339 | fprintf (sched_dump, ";; new head = %d\n;; new tail = %d\n\n", | |
3340 | INSN_UID (head), INSN_UID (tail)); | |
3341 | ||
63f54b1a MK |
3342 | /* Update head/tail boundaries. */ |
3343 | head = NEXT_INSN (prev_head); | |
3344 | tail = last_scheduled_insn; | |
3345 | ||
e855c69d | 3346 | head = restore_other_notes (head, NULL); |
8c660648 | 3347 | |
b4ead7d4 BS |
3348 | current_sched_info->head = head; |
3349 | current_sched_info->tail = tail; | |
8c660648 | 3350 | } |
b4ead7d4 | 3351 | \f |
63de6c74 | 3352 | /* Set_priorities: compute priority of each insn in the block. */ |
8c660648 | 3353 | |
b4ead7d4 | 3354 | int |
1d088dee | 3355 | set_priorities (rtx head, rtx tail) |
8c660648 JL |
3356 | { |
3357 | rtx insn; | |
3358 | int n_insn; | |
b8698a0f | 3359 | int sched_max_insns_priority = |
79ae11c4 | 3360 | current_sched_info->sched_max_insns_priority; |
8c660648 | 3361 | rtx prev_head; |
8c660648 | 3362 | |
b5b8b0ac AO |
3363 | if (head == tail && (! INSN_P (head) || BOUNDARY_DEBUG_INSN_P (head))) |
3364 | gcc_unreachable (); | |
8c660648 JL |
3365 | |
3366 | n_insn = 0; | |
63f54b1a MK |
3367 | |
3368 | prev_head = PREV_INSN (head); | |
8c660648 JL |
3369 | for (insn = tail; insn != prev_head; insn = PREV_INSN (insn)) |
3370 | { | |
63f54b1a | 3371 | if (!INSN_P (insn)) |
8c660648 JL |
3372 | continue; |
3373 | ||
58fb7809 | 3374 | n_insn++; |
8c660648 | 3375 | (void) priority (insn); |
79ae11c4 | 3376 | |
916fa4f0 MK |
3377 | gcc_assert (INSN_PRIORITY_KNOWN (insn)); |
3378 | ||
3379 | sched_max_insns_priority = MAX (sched_max_insns_priority, | |
3380 | INSN_PRIORITY (insn)); | |
8c660648 | 3381 | } |
63f54b1a MK |
3382 | |
3383 | current_sched_info->sched_max_insns_priority = sched_max_insns_priority; | |
8c660648 JL |
3384 | |
3385 | return n_insn; | |
3386 | } | |
3387 | ||
e855c69d AB |
3388 | /* Set dump and sched_verbose for the desired debugging output. If no |
3389 | dump-file was specified, but -fsched-verbose=N (any N), print to stderr. | |
3390 | For -fsched-verbose=N, N>=10, print everything to stderr. */ | |
3391 | void | |
3392 | setup_sched_dump (void) | |
3393 | { | |
3394 | sched_verbose = sched_verbose_param; | |
3395 | if (sched_verbose_param == 0 && dump_file) | |
3396 | sched_verbose = 1; | |
3397 | sched_dump = ((sched_verbose_param >= 10 || !dump_file) | |
3398 | ? stderr : dump_file); | |
3399 | } | |
496d7bb0 | 3400 | |
b8698a0f | 3401 | /* Initialize some global state for the scheduler. This function works |
e855c69d AB |
3402 | with the common data shared between all the schedulers. It is called |
3403 | from the scheduler specific initialization routine. */ | |
8c660648 | 3404 | |
b4ead7d4 | 3405 | void |
10d22567 | 3406 | sched_init (void) |
8c660648 | 3407 | { |
63de6c74 | 3408 | /* Disable speculative loads in their presence if cc0 defined. */ |
8c660648 JL |
3409 | #ifdef HAVE_cc0 |
3410 | flag_schedule_speculative_load = 0; | |
3411 | #endif | |
3412 | ||
ce18efcb VM |
3413 | sched_pressure_p = (flag_sched_pressure && ! reload_completed |
3414 | && common_sched_info->sched_pass_id == SCHED_RGN_PASS); | |
3415 | if (sched_pressure_p) | |
3416 | ira_setup_eliminable_regset (); | |
3417 | ||
496d7bb0 MK |
3418 | /* Initialize SPEC_INFO. */ |
3419 | if (targetm.sched.set_sched_flags) | |
3420 | { | |
3421 | spec_info = &spec_info_var; | |
3422 | targetm.sched.set_sched_flags (spec_info); | |
e855c69d AB |
3423 | |
3424 | if (spec_info->mask != 0) | |
3425 | { | |
3426 | spec_info->data_weakness_cutoff = | |
3427 | (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100; | |
3428 | spec_info->control_weakness_cutoff = | |
3429 | (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) | |
3430 | * REG_BR_PROB_BASE) / 100; | |
3431 | } | |
496d7bb0 | 3432 | else |
6fc0bb99 | 3433 | /* So we won't read anything accidentally. */ |
e855c69d AB |
3434 | spec_info = NULL; |
3435 | ||
496d7bb0 MK |
3436 | } |
3437 | else | |
6fc0bb99 | 3438 | /* So we won't read anything accidentally. */ |
496d7bb0 MK |
3439 | spec_info = 0; |
3440 | ||
63de6c74 | 3441 | /* Initialize issue_rate. */ |
c237e94a | 3442 | if (targetm.sched.issue_rate) |
5fd9b178 | 3443 | issue_rate = targetm.sched.issue_rate (); |
c237e94a ZW |
3444 | else |
3445 | issue_rate = 1; | |
8c660648 | 3446 | |
880efc46 VM |
3447 | if (cached_issue_rate != issue_rate) |
3448 | { | |
3449 | cached_issue_rate = issue_rate; | |
3450 | /* To invalidate max_lookahead_tries: */ | |
3451 | cached_first_cycle_multipass_dfa_lookahead = 0; | |
3452 | } | |
3453 | ||
e855c69d AB |
3454 | if (targetm.sched.first_cycle_multipass_dfa_lookahead) |
3455 | dfa_lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead (); | |
3456 | else | |
3457 | dfa_lookahead = 0; | |
fae15c93 | 3458 | |
fa0aee89 PB |
3459 | if (targetm.sched.init_dfa_pre_cycle_insn) |
3460 | targetm.sched.init_dfa_pre_cycle_insn (); | |
1d088dee | 3461 | |
fa0aee89 PB |
3462 | if (targetm.sched.init_dfa_post_cycle_insn) |
3463 | targetm.sched.init_dfa_post_cycle_insn (); | |
1d088dee | 3464 | |
fa0aee89 PB |
3465 | dfa_start (); |
3466 | dfa_state_size = state_size (); | |
fae15c93 | 3467 | |
e855c69d | 3468 | init_alias_analysis (); |
f77e39fc | 3469 | |
e855c69d AB |
3470 | df_set_flags (DF_LR_RUN_DCE); |
3471 | df_note_add_problem (); | |
f77e39fc | 3472 | |
e855c69d AB |
3473 | /* More problems needed for interloop dep calculation in SMS. */ |
3474 | if (common_sched_info->sched_pass_id == SCHED_SMS_PASS) | |
3475 | { | |
3476 | df_rd_add_problem (); | |
3477 | df_chain_add_problem (DF_DU_CHAIN + DF_UD_CHAIN); | |
3478 | } | |
7a403706 | 3479 | |
e855c69d | 3480 | df_analyze (); |
b8698a0f L |
3481 | |
3482 | /* Do not run DCE after reload, as this can kill nops inserted | |
e855c69d AB |
3483 | by bundling. */ |
3484 | if (reload_completed) | |
3485 | df_clear_flags (DF_LR_RUN_DCE); | |
a88f02e7 | 3486 | |
e855c69d | 3487 | regstat_compute_calls_crossed (); |
a88f02e7 | 3488 | |
e855c69d AB |
3489 | if (targetm.sched.md_init_global) |
3490 | targetm.sched.md_init_global (sched_dump, sched_verbose, | |
3491 | get_max_uid () + 1); | |
a88f02e7 | 3492 | |
ce18efcb VM |
3493 | if (sched_pressure_p) |
3494 | { | |
3495 | int i, max_regno = max_reg_num (); | |
3496 | ||
3497 | ira_set_pseudo_classes (sched_verbose ? sched_dump : NULL); | |
3498 | sched_regno_cover_class | |
3499 | = (enum reg_class *) xmalloc (max_regno * sizeof (enum reg_class)); | |
3500 | for (i = 0; i < max_regno; i++) | |
3501 | sched_regno_cover_class[i] | |
3502 | = (i < FIRST_PSEUDO_REGISTER | |
3503 | ? ira_class_translate[REGNO_REG_CLASS (i)] | |
3504 | : reg_cover_class (i)); | |
3505 | curr_reg_live = BITMAP_ALLOC (NULL); | |
3506 | saved_reg_live = BITMAP_ALLOC (NULL); | |
3507 | region_ref_regs = BITMAP_ALLOC (NULL); | |
3508 | } | |
b8698a0f | 3509 | |
e855c69d AB |
3510 | curr_state = xmalloc (dfa_state_size); |
3511 | } | |
58565a33 | 3512 | |
e855c69d | 3513 | static void haifa_init_only_bb (basic_block, basic_block); |
496d7bb0 | 3514 | |
e855c69d AB |
3515 | /* Initialize data structures specific to the Haifa scheduler. */ |
3516 | void | |
3517 | haifa_sched_init (void) | |
3518 | { | |
3519 | setup_sched_dump (); | |
3520 | sched_init (); | |
3521 | ||
3522 | if (spec_info != NULL) | |
3523 | { | |
3524 | sched_deps_info->use_deps_list = 1; | |
3525 | sched_deps_info->generate_spec_deps = 1; | |
3526 | } | |
3527 | ||
3528 | /* Initialize luids, dependency caches, target and h_i_d for the | |
3529 | whole function. */ | |
3530 | { | |
3531 | bb_vec_t bbs = VEC_alloc (basic_block, heap, n_basic_blocks); | |
3532 | basic_block bb; | |
496d7bb0 | 3533 | |
e855c69d AB |
3534 | sched_init_bbs (); |
3535 | ||
3536 | FOR_EACH_BB (bb) | |
3537 | VEC_quick_push (basic_block, bbs, bb); | |
3538 | sched_init_luids (bbs, NULL, NULL, NULL); | |
3539 | sched_deps_init (true); | |
3540 | sched_extend_target (); | |
3541 | haifa_init_h_i_d (bbs, NULL, NULL, NULL); | |
3542 | ||
3543 | VEC_free (basic_block, heap, bbs); | |
3544 | } | |
3545 | ||
3546 | sched_init_only_bb = haifa_init_only_bb; | |
3547 | sched_split_block = sched_split_block_1; | |
3548 | sched_create_empty_bb = sched_create_empty_bb_1; | |
e2f6ff94 MK |
3549 | haifa_recovery_bb_ever_added_p = false; |
3550 | ||
496d7bb0 | 3551 | #ifdef ENABLE_CHECKING |
e855c69d AB |
3552 | /* This is used preferably for finding bugs in check_cfg () itself. |
3553 | We must call sched_bbs_init () before check_cfg () because check_cfg () | |
3554 | assumes that the last insn in the last bb has a non-null successor. */ | |
496d7bb0 MK |
3555 | check_cfg (0, 0); |
3556 | #endif | |
a88f02e7 | 3557 | |
e855c69d AB |
3558 | nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0; |
3559 | before_recovery = 0; | |
3560 | after_recovery = 0; | |
3561 | } | |
8c660648 | 3562 | |
e855c69d | 3563 | /* Finish work with the data specific to the Haifa scheduler. */ |
a88f02e7 | 3564 | void |
e855c69d | 3565 | haifa_sched_finish (void) |
a88f02e7 | 3566 | { |
e855c69d AB |
3567 | sched_create_empty_bb = NULL; |
3568 | sched_split_block = NULL; | |
3569 | sched_init_only_bb = NULL; | |
58565a33 | 3570 | |
496d7bb0 MK |
3571 | if (spec_info && spec_info->dump) |
3572 | { | |
3573 | char c = reload_completed ? 'a' : 'b'; | |
3574 | ||
3575 | fprintf (spec_info->dump, | |
3576 | ";; %s:\n", current_function_name ()); | |
3577 | ||
3578 | fprintf (spec_info->dump, | |
3579 | ";; Procedure %cr-begin-data-spec motions == %d\n", | |
3580 | c, nr_begin_data); | |
3581 | fprintf (spec_info->dump, | |
3582 | ";; Procedure %cr-be-in-data-spec motions == %d\n", | |
3583 | c, nr_be_in_data); | |
3584 | fprintf (spec_info->dump, | |
3585 | ";; Procedure %cr-begin-control-spec motions == %d\n", | |
3586 | c, nr_begin_control); | |
3587 | fprintf (spec_info->dump, | |
3588 | ";; Procedure %cr-be-in-control-spec motions == %d\n", | |
3589 | c, nr_be_in_control); | |
3590 | } | |
3591 | ||
e855c69d AB |
3592 | /* Finalize h_i_d, dependency caches, and luids for the whole |
3593 | function. Target will be finalized in md_global_finish (). */ | |
3594 | sched_deps_finish (); | |
3595 | sched_finish_luids (); | |
3596 | current_sched_info = NULL; | |
3597 | sched_finish (); | |
3598 | } | |
3599 | ||
b8698a0f | 3600 | /* Free global data used during insn scheduling. This function works with |
e855c69d AB |
3601 | the common data shared between the schedulers. */ |
3602 | ||
3603 | void | |
3604 | sched_finish (void) | |
3605 | { | |
3606 | haifa_finish_h_i_d (); | |
ce18efcb VM |
3607 | if (sched_pressure_p) |
3608 | { | |
3609 | free (sched_regno_cover_class); | |
3610 | BITMAP_FREE (region_ref_regs); | |
3611 | BITMAP_FREE (saved_reg_live); | |
3612 | BITMAP_FREE (curr_reg_live); | |
3613 | } | |
e855c69d AB |
3614 | free (curr_state); |
3615 | ||
3616 | if (targetm.sched.md_finish_global) | |
3617 | targetm.sched.md_finish_global (sched_dump, sched_verbose); | |
3618 | ||
3619 | end_alias_analysis (); | |
3620 | ||
3621 | regstat_free_calls_crossed (); | |
3622 | ||
3623 | dfa_finish (); | |
3624 | ||
496d7bb0 MK |
3625 | #ifdef ENABLE_CHECKING |
3626 | /* After reload ia64 backend clobbers CFG, so can't check anything. */ | |
3627 | if (!reload_completed) | |
3628 | check_cfg (0, 0); | |
3629 | #endif | |
8c660648 | 3630 | } |
63f54b1a MK |
3631 | |
3632 | /* Fix INSN_TICKs of the instructions in the current block as well as | |
917f1b7e | 3633 | INSN_TICKs of their dependents. |
63f54b1a MK |
3634 | HEAD and TAIL are the begin and the end of the current scheduled block. */ |
3635 | static void | |
3636 | fix_inter_tick (rtx head, rtx tail) | |
3637 | { | |
3638 | /* Set of instructions with corrected INSN_TICK. */ | |
3639 | bitmap_head processed; | |
e2f6ff94 MK |
3640 | /* ??? It is doubtful if we should assume that cycle advance happens on |
3641 | basic block boundaries. Basically insns that are unconditionally ready | |
3642 | on the start of the block are more preferable then those which have | |
3643 | a one cycle dependency over insn from the previous block. */ | |
63f54b1a MK |
3644 | int next_clock = clock_var + 1; |
3645 | ||
3646 | bitmap_initialize (&processed, 0); | |
b8698a0f | 3647 | |
63f54b1a MK |
3648 | /* Iterates over scheduled instructions and fix their INSN_TICKs and |
3649 | INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent | |
3650 | across different blocks. */ | |
3651 | for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head)) | |
3652 | { | |
3653 | if (INSN_P (head)) | |
3654 | { | |
3655 | int tick; | |
e2f6ff94 MK |
3656 | sd_iterator_def sd_it; |
3657 | dep_t dep; | |
b8698a0f | 3658 | |
63f54b1a MK |
3659 | tick = INSN_TICK (head); |
3660 | gcc_assert (tick >= MIN_TICK); | |
b8698a0f | 3661 | |
63f54b1a MK |
3662 | /* Fix INSN_TICK of instruction from just scheduled block. */ |
3663 | if (!bitmap_bit_p (&processed, INSN_LUID (head))) | |
3664 | { | |
3665 | bitmap_set_bit (&processed, INSN_LUID (head)); | |
3666 | tick -= next_clock; | |
b8698a0f | 3667 | |
63f54b1a MK |
3668 | if (tick < MIN_TICK) |
3669 | tick = MIN_TICK; | |
b8698a0f L |
3670 | |
3671 | INSN_TICK (head) = tick; | |
63f54b1a | 3672 | } |
b8698a0f | 3673 | |
e2f6ff94 | 3674 | FOR_EACH_DEP (head, SD_LIST_RES_FORW, sd_it, dep) |
63f54b1a MK |
3675 | { |
3676 | rtx next; | |
b8698a0f | 3677 | |
e2f6ff94 | 3678 | next = DEP_CON (dep); |
63f54b1a MK |
3679 | tick = INSN_TICK (next); |
3680 | ||
3681 | if (tick != INVALID_TICK | |
3682 | /* If NEXT has its INSN_TICK calculated, fix it. | |
3683 | If not - it will be properly calculated from | |
3684 | scratch later in fix_tick_ready. */ | |
3685 | && !bitmap_bit_p (&processed, INSN_LUID (next))) | |
3686 | { | |
3687 | bitmap_set_bit (&processed, INSN_LUID (next)); | |
3688 | tick -= next_clock; | |
b8698a0f | 3689 | |
63f54b1a MK |
3690 | if (tick < MIN_TICK) |
3691 | tick = MIN_TICK; | |
b8698a0f | 3692 | |
63f54b1a MK |
3693 | if (tick > INTER_TICK (next)) |
3694 | INTER_TICK (next) = tick; | |
3695 | else | |
3696 | tick = INTER_TICK (next); | |
e2f6ff94 | 3697 | |
63f54b1a MK |
3698 | INSN_TICK (next) = tick; |
3699 | } | |
3700 | } | |
3701 | } | |
3702 | } | |
3703 | bitmap_clear (&processed); | |
3704 | } | |
e855c69d AB |
3705 | |
3706 | static int haifa_speculate_insn (rtx, ds_t, rtx *); | |
b8698a0f | 3707 | |
63f54b1a MK |
3708 | /* Check if NEXT is ready to be added to the ready or queue list. |
3709 | If "yes", add it to the proper list. | |
3710 | Returns: | |
3711 | -1 - is not ready yet, | |
3712 | 0 - added to the ready list, | |
3713 | 0 < N - queued for N cycles. */ | |
3714 | int | |
3715 | try_ready (rtx next) | |
b8698a0f | 3716 | { |
496d7bb0 | 3717 | ds_t old_ts, *ts; |
63f54b1a | 3718 | |
496d7bb0 MK |
3719 | ts = &TODO_SPEC (next); |
3720 | old_ts = *ts; | |
63f54b1a | 3721 | |
496d7bb0 MK |
3722 | gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP)) |
3723 | && ((old_ts & HARD_DEP) | |
3724 | || (old_ts & SPECULATIVE))); | |
b8698a0f | 3725 | |
e2f6ff94 MK |
3726 | if (sd_lists_empty_p (next, SD_LIST_BACK)) |
3727 | /* NEXT has all its dependencies resolved. */ | |
496d7bb0 | 3728 | { |
e2f6ff94 MK |
3729 | /* Remove HARD_DEP bit from NEXT's status. */ |
3730 | *ts &= ~HARD_DEP; | |
3731 | ||
3732 | if (current_sched_info->flags & DO_SPECULATION) | |
3733 | /* Remove all speculative bits from NEXT's status. */ | |
3734 | *ts &= ~SPECULATIVE; | |
496d7bb0 MK |
3735 | } |
3736 | else | |
3737 | { | |
e2f6ff94 | 3738 | /* One of the NEXT's dependencies has been resolved. |
15dc95cb | 3739 | Recalculate NEXT's status. */ |
e2f6ff94 | 3740 | |
b198261f MK |
3741 | *ts &= ~SPECULATIVE & ~HARD_DEP; |
3742 | ||
e2f6ff94 MK |
3743 | if (sd_lists_empty_p (next, SD_LIST_HARD_BACK)) |
3744 | /* Now we've got NEXT with speculative deps only. | |
3745 | 1. Look at the deps to see what we have to do. | |
3746 | 2. Check if we can do 'todo'. */ | |
3747 | { | |
3748 | sd_iterator_def sd_it; | |
3749 | dep_t dep; | |
3750 | bool first_p = true; | |
b198261f | 3751 | |
e2f6ff94 MK |
3752 | FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep) |
3753 | { | |
3754 | ds_t ds = DEP_STATUS (dep) & SPECULATIVE; | |
3755 | ||
8ee2bec9 AO |
3756 | if (DEBUG_INSN_P (DEP_PRO (dep)) |
3757 | && !DEBUG_INSN_P (next)) | |
3758 | continue; | |
3759 | ||
e2f6ff94 | 3760 | if (first_p) |
b198261f | 3761 | { |
e2f6ff94 | 3762 | first_p = false; |
496d7bb0 | 3763 | |
e2f6ff94 MK |
3764 | *ts = ds; |
3765 | } | |
3766 | else | |
3767 | *ts = ds_merge (*ts, ds); | |
496d7bb0 | 3768 | } |
e2f6ff94 | 3769 | |
e855c69d | 3770 | if (ds_weak (*ts) < spec_info->data_weakness_cutoff) |
e2f6ff94 MK |
3771 | /* Too few points. */ |
3772 | *ts = (*ts & ~SPECULATIVE) | HARD_DEP; | |
3773 | } | |
3774 | else | |
3775 | *ts |= HARD_DEP; | |
496d7bb0 | 3776 | } |
b198261f | 3777 | |
496d7bb0 MK |
3778 | if (*ts & HARD_DEP) |
3779 | gcc_assert (*ts == old_ts | |
3780 | && QUEUE_INDEX (next) == QUEUE_NOWHERE); | |
3781 | else if (current_sched_info->new_ready) | |
b198261f | 3782 | *ts = current_sched_info->new_ready (next, *ts); |
496d7bb0 | 3783 | |
b198261f | 3784 | /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might |
496d7bb0 MK |
3785 | have its original pattern or changed (speculative) one. This is due |
3786 | to changing ebb in region scheduling. | |
3787 | * But if (old_ts & SPECULATIVE), then we are pretty sure that insn | |
3788 | has speculative pattern. | |
b198261f | 3789 | |
496d7bb0 MK |
3790 | We can't assert (!(*ts & HARD_DEP) || *ts == old_ts) here because |
3791 | control-speculative NEXT could have been discarded by sched-rgn.c | |
3792 | (the same case as when discarded by can_schedule_ready_p ()). */ | |
b198261f | 3793 | |
496d7bb0 | 3794 | if ((*ts & SPECULATIVE) |
b198261f | 3795 | /* If (old_ts == *ts), then (old_ts & SPECULATIVE) and we don't |
496d7bb0 MK |
3796 | need to change anything. */ |
3797 | && *ts != old_ts) | |
3798 | { | |
3799 | int res; | |
3800 | rtx new_pat; | |
b8698a0f | 3801 | |
496d7bb0 | 3802 | gcc_assert ((*ts & SPECULATIVE) && !(*ts & ~SPECULATIVE)); |
b8698a0f | 3803 | |
e855c69d | 3804 | res = haifa_speculate_insn (next, *ts, &new_pat); |
b8698a0f | 3805 | |
496d7bb0 MK |
3806 | switch (res) |
3807 | { | |
3808 | case -1: | |
3809 | /* It would be nice to change DEP_STATUS of all dependences, | |
3810 | which have ((DEP_STATUS & SPECULATIVE) == *ts) to HARD_DEP, | |
3811 | so we won't reanalyze anything. */ | |
3812 | *ts = (*ts & ~SPECULATIVE) | HARD_DEP; | |
3813 | break; | |
b8698a0f | 3814 | |
496d7bb0 MK |
3815 | case 0: |
3816 | /* We follow the rule, that every speculative insn | |
3817 | has non-null ORIG_PAT. */ | |
3818 | if (!ORIG_PAT (next)) | |
3819 | ORIG_PAT (next) = PATTERN (next); | |
3820 | break; | |
b8698a0f L |
3821 | |
3822 | case 1: | |
496d7bb0 MK |
3823 | if (!ORIG_PAT (next)) |
3824 | /* If we gonna to overwrite the original pattern of insn, | |
3825 | save it. */ | |
3826 | ORIG_PAT (next) = PATTERN (next); | |
b8698a0f | 3827 | |
e855c69d | 3828 | haifa_change_pattern (next, new_pat); |
496d7bb0 | 3829 | break; |
b8698a0f | 3830 | |
496d7bb0 MK |
3831 | default: |
3832 | gcc_unreachable (); | |
3833 | } | |
3834 | } | |
b8698a0f | 3835 | |
496d7bb0 MK |
3836 | /* We need to restore pattern only if (*ts == 0), because otherwise it is |
3837 | either correct (*ts & SPECULATIVE), | |
3838 | or we simply don't care (*ts & HARD_DEP). */ | |
b8698a0f | 3839 | |
496d7bb0 | 3840 | gcc_assert (!ORIG_PAT (next) |
d7bfd907 | 3841 | || !IS_SPECULATION_BRANCHY_CHECK_P (next)); |
b8698a0f | 3842 | |
496d7bb0 MK |
3843 | if (*ts & HARD_DEP) |
3844 | { | |
3845 | /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because | |
3846 | control-speculative NEXT could have been discarded by sched-rgn.c | |
3847 | (the same case as when discarded by can_schedule_ready_p ()). */ | |
3848 | /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/ | |
b8698a0f | 3849 | |
496d7bb0 MK |
3850 | change_queue_index (next, QUEUE_NOWHERE); |
3851 | return -1; | |
3852 | } | |
d7bfd907 | 3853 | else if (!(*ts & BEGIN_SPEC) && ORIG_PAT (next) && !IS_SPECULATION_CHECK_P (next)) |
b8698a0f | 3854 | /* We should change pattern of every previously speculative |
682b6a9e | 3855 | instruction - and we determine if NEXT was speculative by using |
d7bfd907 MK |
3856 | ORIG_PAT field. Except one case - speculation checks have ORIG_PAT |
3857 | pat too, so skip them. */ | |
682b6a9e | 3858 | { |
e855c69d | 3859 | haifa_change_pattern (next, ORIG_PAT (next)); |
682b6a9e MK |
3860 | ORIG_PAT (next) = 0; |
3861 | } | |
496d7bb0 MK |
3862 | |
3863 | if (sched_verbose >= 2) | |
b8698a0f | 3864 | { |
496d7bb0 | 3865 | int s = TODO_SPEC (next); |
b8698a0f | 3866 | |
496d7bb0 MK |
3867 | fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s", |
3868 | (*current_sched_info->print_insn) (next, 0)); | |
b8698a0f | 3869 | |
496d7bb0 MK |
3870 | if (spec_info && spec_info->dump) |
3871 | { | |
3872 | if (s & BEGIN_DATA) | |
3873 | fprintf (spec_info->dump, "; data-spec;"); | |
3874 | if (s & BEGIN_CONTROL) | |
3875 | fprintf (spec_info->dump, "; control-spec;"); | |
3876 | if (s & BE_IN_CONTROL) | |
3877 | fprintf (spec_info->dump, "; in-control-spec;"); | |
3878 | } | |
3879 | ||
3880 | fprintf (sched_dump, "\n"); | |
b8698a0f L |
3881 | } |
3882 | ||
496d7bb0 | 3883 | adjust_priority (next); |
b8698a0f | 3884 | |
496d7bb0 MK |
3885 | return fix_tick_ready (next); |
3886 | } | |
3887 | ||
3888 | /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */ | |
3889 | static int | |
63f54b1a MK |
3890 | fix_tick_ready (rtx next) |
3891 | { | |
63f54b1a MK |
3892 | int tick, delay; |
3893 | ||
e2f6ff94 | 3894 | if (!sd_lists_empty_p (next, SD_LIST_RES_BACK)) |
63f54b1a MK |
3895 | { |
3896 | int full_p; | |
e2f6ff94 MK |
3897 | sd_iterator_def sd_it; |
3898 | dep_t dep; | |
63f54b1a MK |
3899 | |
3900 | tick = INSN_TICK (next); | |
496d7bb0 | 3901 | /* if tick is not equal to INVALID_TICK, then update |
63f54b1a | 3902 | INSN_TICK of NEXT with the most recent resolved dependence |
917f1b7e | 3903 | cost. Otherwise, recalculate from scratch. */ |
b198261f MK |
3904 | full_p = (tick == INVALID_TICK); |
3905 | ||
e2f6ff94 | 3906 | FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep) |
b8698a0f | 3907 | { |
b198261f | 3908 | rtx pro = DEP_PRO (dep); |
63f54b1a | 3909 | int tick1; |
b8698a0f | 3910 | |
63f54b1a | 3911 | gcc_assert (INSN_TICK (pro) >= MIN_TICK); |
496d7bb0 | 3912 | |
b198261f | 3913 | tick1 = INSN_TICK (pro) + dep_cost (dep); |
63f54b1a MK |
3914 | if (tick1 > tick) |
3915 | tick = tick1; | |
b198261f MK |
3916 | |
3917 | if (!full_p) | |
3918 | break; | |
63f54b1a | 3919 | } |
63f54b1a MK |
3920 | } |
3921 | else | |
3922 | tick = -1; | |
3923 | ||
3924 | INSN_TICK (next) = tick; | |
3925 | ||
3926 | delay = tick - clock_var; | |
ce18efcb | 3927 | if (delay <= 0 || sched_pressure_p) |
63f54b1a MK |
3928 | delay = QUEUE_READY; |
3929 | ||
3930 | change_queue_index (next, delay); | |
496d7bb0 | 3931 | |
63f54b1a MK |
3932 | return delay; |
3933 | } | |
3934 | ||
3935 | /* Move NEXT to the proper queue list with (DELAY >= 1), | |
3936 | or add it to the ready list (DELAY == QUEUE_READY), | |
3937 | or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */ | |
3938 | static void | |
3939 | change_queue_index (rtx next, int delay) | |
3940 | { | |
3941 | int i = QUEUE_INDEX (next); | |
3942 | ||
b8698a0f | 3943 | gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index |
63f54b1a MK |
3944 | && delay != 0); |
3945 | gcc_assert (i != QUEUE_SCHEDULED); | |
b8698a0f | 3946 | |
63f54b1a MK |
3947 | if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i) |
3948 | || (delay < 0 && delay == i)) | |
3949 | /* We have nothing to do. */ | |
3950 | return; | |
3951 | ||
917f1b7e | 3952 | /* Remove NEXT from wherever it is now. */ |
63f54b1a MK |
3953 | if (i == QUEUE_READY) |
3954 | ready_remove_insn (next); | |
3955 | else if (i >= 0) | |
3956 | queue_remove (next); | |
b8698a0f | 3957 | |
63f54b1a MK |
3958 | /* Add it to the proper place. */ |
3959 | if (delay == QUEUE_READY) | |
3960 | ready_add (readyp, next, false); | |
3961 | else if (delay >= 1) | |
3962 | queue_insn (next, delay); | |
b8698a0f | 3963 | |
63f54b1a | 3964 | if (sched_verbose >= 2) |
b8698a0f | 3965 | { |
63f54b1a MK |
3966 | fprintf (sched_dump, ";;\t\ttick updated: insn %s", |
3967 | (*current_sched_info->print_insn) (next, 0)); | |
b8698a0f | 3968 | |
63f54b1a MK |
3969 | if (delay == QUEUE_READY) |
3970 | fprintf (sched_dump, " into ready\n"); | |
3971 | else if (delay >= 1) | |
3972 | fprintf (sched_dump, " into queue with cost=%d\n", delay); | |
3973 | else | |
3974 | fprintf (sched_dump, " removed from ready or queue lists\n"); | |
3975 | } | |
3976 | } | |
3977 | ||
e855c69d | 3978 | static int sched_ready_n_insns = -1; |
496d7bb0 | 3979 | |
e855c69d AB |
3980 | /* Initialize per region data structures. */ |
3981 | void | |
3982 | sched_extend_ready_list (int new_sched_ready_n_insns) | |
496d7bb0 MK |
3983 | { |
3984 | int i; | |
3985 | ||
e855c69d AB |
3986 | if (sched_ready_n_insns == -1) |
3987 | /* At the first call we need to initialize one more choice_stack | |
3988 | entry. */ | |
3989 | { | |
3990 | i = 0; | |
3991 | sched_ready_n_insns = 0; | |
3992 | } | |
3993 | else | |
3994 | i = sched_ready_n_insns + 1; | |
496d7bb0 | 3995 | |
e855c69d AB |
3996 | ready.veclen = new_sched_ready_n_insns + issue_rate; |
3997 | ready.vec = XRESIZEVEC (rtx, ready.vec, ready.veclen); | |
496d7bb0 | 3998 | |
e855c69d | 3999 | gcc_assert (new_sched_ready_n_insns >= sched_ready_n_insns); |
496d7bb0 | 4000 | |
e855c69d AB |
4001 | ready_try = (char *) xrecalloc (ready_try, new_sched_ready_n_insns, |
4002 | sched_ready_n_insns, sizeof (*ready_try)); | |
496d7bb0 | 4003 | |
e855c69d AB |
4004 | /* We allocate +1 element to save initial state in the choice_stack[0] |
4005 | entry. */ | |
4006 | choice_stack = XRESIZEVEC (struct choice_entry, choice_stack, | |
4007 | new_sched_ready_n_insns + 1); | |
e2f6ff94 | 4008 | |
e855c69d AB |
4009 | for (; i <= new_sched_ready_n_insns; i++) |
4010 | choice_stack[i].state = xmalloc (dfa_state_size); | |
496d7bb0 | 4011 | |
e855c69d | 4012 | sched_ready_n_insns = new_sched_ready_n_insns; |
496d7bb0 MK |
4013 | } |
4014 | ||
e855c69d AB |
4015 | /* Free per region data structures. */ |
4016 | void | |
4017 | sched_finish_ready_list (void) | |
a0d33ff8 | 4018 | { |
e855c69d | 4019 | int i; |
a0d33ff8 | 4020 | |
e855c69d AB |
4021 | free (ready.vec); |
4022 | ready.vec = NULL; | |
4023 | ready.veclen = 0; | |
a0d33ff8 | 4024 | |
e855c69d AB |
4025 | free (ready_try); |
4026 | ready_try = NULL; | |
496d7bb0 | 4027 | |
e855c69d AB |
4028 | for (i = 0; i <= sched_ready_n_insns; i++) |
4029 | free (choice_stack [i].state); | |
4030 | free (choice_stack); | |
4031 | choice_stack = NULL; | |
a0d33ff8 | 4032 | |
e855c69d | 4033 | sched_ready_n_insns = -1; |
496d7bb0 MK |
4034 | } |
4035 | ||
e855c69d AB |
4036 | static int |
4037 | haifa_luid_for_non_insn (rtx x) | |
496d7bb0 | 4038 | { |
e855c69d AB |
4039 | gcc_assert (NOTE_P (x) || LABEL_P (x)); |
4040 | ||
4041 | return 0; | |
496d7bb0 MK |
4042 | } |
4043 | ||
4044 | /* Generates recovery code for INSN. */ | |
4045 | static void | |
4046 | generate_recovery_code (rtx insn) | |
4047 | { | |
4048 | if (TODO_SPEC (insn) & BEGIN_SPEC) | |
4049 | begin_speculative_block (insn); | |
b8698a0f | 4050 | |
496d7bb0 MK |
4051 | /* Here we have insn with no dependencies to |
4052 | instructions other then CHECK_SPEC ones. */ | |
b8698a0f | 4053 | |
496d7bb0 MK |
4054 | if (TODO_SPEC (insn) & BE_IN_SPEC) |
4055 | add_to_speculative_block (insn); | |
4056 | } | |
4057 | ||
4058 | /* Helper function. | |
4059 | Tries to add speculative dependencies of type FS between instructions | |
b198261f | 4060 | in deps_list L and TWIN. */ |
496d7bb0 | 4061 | static void |
e2f6ff94 | 4062 | process_insn_forw_deps_be_in_spec (rtx insn, rtx twin, ds_t fs) |
496d7bb0 | 4063 | { |
e2f6ff94 MK |
4064 | sd_iterator_def sd_it; |
4065 | dep_t dep; | |
b198261f | 4066 | |
e2f6ff94 | 4067 | FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep) |
496d7bb0 MK |
4068 | { |
4069 | ds_t ds; | |
4070 | rtx consumer; | |
4071 | ||
e2f6ff94 | 4072 | consumer = DEP_CON (dep); |
496d7bb0 | 4073 | |
e2f6ff94 | 4074 | ds = DEP_STATUS (dep); |
496d7bb0 | 4075 | |
682b6a9e MK |
4076 | if (/* If we want to create speculative dep. */ |
4077 | fs | |
4078 | /* And we can do that because this is a true dep. */ | |
4079 | && (ds & DEP_TYPES) == DEP_TRUE) | |
4080 | { | |
4081 | gcc_assert (!(ds & BE_IN_SPEC)); | |
4082 | ||
917f1b7e | 4083 | if (/* If this dep can be overcome with 'begin speculation'. */ |
682b6a9e MK |
4084 | ds & BEGIN_SPEC) |
4085 | /* Then we have a choice: keep the dep 'begin speculative' | |
4086 | or transform it into 'be in speculative'. */ | |
4087 | { | |
4088 | if (/* In try_ready we assert that if insn once became ready | |
4089 | it can be removed from the ready (or queue) list only | |
4090 | due to backend decision. Hence we can't let the | |
4091 | probability of the speculative dep to decrease. */ | |
e855c69d | 4092 | ds_weak (ds) <= ds_weak (fs)) |
93b4b4cc MK |
4093 | { |
4094 | ds_t new_ds; | |
4095 | ||
4096 | new_ds = (ds & ~BEGIN_SPEC) | fs; | |
b8698a0f | 4097 | |
93b4b4cc MK |
4098 | if (/* consumer can 'be in speculative'. */ |
4099 | sched_insn_is_legitimate_for_speculation_p (consumer, | |
4100 | new_ds)) | |
4101 | /* Transform it to be in speculative. */ | |
4102 | ds = new_ds; | |
4103 | } | |
682b6a9e MK |
4104 | } |
4105 | else | |
4106 | /* Mark the dep as 'be in speculative'. */ | |
4107 | ds |= fs; | |
4108 | } | |
496d7bb0 | 4109 | |
e2f6ff94 MK |
4110 | { |
4111 | dep_def _new_dep, *new_dep = &_new_dep; | |
4112 | ||
4113 | init_dep_1 (new_dep, twin, consumer, DEP_TYPE (dep), ds); | |
4114 | sd_add_dep (new_dep, false); | |
4115 | } | |
496d7bb0 MK |
4116 | } |
4117 | } | |
4118 | ||
4119 | /* Generates recovery code for BEGIN speculative INSN. */ | |
4120 | static void | |
4121 | begin_speculative_block (rtx insn) | |
4122 | { | |
4123 | if (TODO_SPEC (insn) & BEGIN_DATA) | |
b8698a0f | 4124 | nr_begin_data++; |
496d7bb0 MK |
4125 | if (TODO_SPEC (insn) & BEGIN_CONTROL) |
4126 | nr_begin_control++; | |
4127 | ||
4128 | create_check_block_twin (insn, false); | |
4129 | ||
4130 | TODO_SPEC (insn) &= ~BEGIN_SPEC; | |
4131 | } | |
4132 | ||
e855c69d AB |
4133 | static void haifa_init_insn (rtx); |
4134 | ||
496d7bb0 MK |
4135 | /* Generates recovery code for BE_IN speculative INSN. */ |
4136 | static void | |
4137 | add_to_speculative_block (rtx insn) | |
4138 | { | |
4139 | ds_t ts; | |
e2f6ff94 MK |
4140 | sd_iterator_def sd_it; |
4141 | dep_t dep; | |
b198261f | 4142 | rtx twins = NULL; |
916fa4f0 | 4143 | rtx_vec_t priorities_roots; |
496d7bb0 MK |
4144 | |
4145 | ts = TODO_SPEC (insn); | |
4146 | gcc_assert (!(ts & ~BE_IN_SPEC)); | |
4147 | ||
4148 | if (ts & BE_IN_DATA) | |
4149 | nr_be_in_data++; | |
4150 | if (ts & BE_IN_CONTROL) | |
4151 | nr_be_in_control++; | |
4152 | ||
4153 | TODO_SPEC (insn) &= ~BE_IN_SPEC; | |
4154 | gcc_assert (!TODO_SPEC (insn)); | |
b8698a0f | 4155 | |
496d7bb0 MK |
4156 | DONE_SPEC (insn) |= ts; |
4157 | ||
4158 | /* First we convert all simple checks to branchy. */ | |
e2f6ff94 MK |
4159 | for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK); |
4160 | sd_iterator_cond (&sd_it, &dep);) | |
496d7bb0 | 4161 | { |
e2f6ff94 | 4162 | rtx check = DEP_PRO (dep); |
496d7bb0 | 4163 | |
d7bfd907 | 4164 | if (IS_SPECULATION_SIMPLE_CHECK_P (check)) |
496d7bb0 MK |
4165 | { |
4166 | create_check_block_twin (check, true); | |
b198261f MK |
4167 | |
4168 | /* Restart search. */ | |
e2f6ff94 | 4169 | sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK); |
496d7bb0 MK |
4170 | } |
4171 | else | |
b198261f | 4172 | /* Continue search. */ |
e2f6ff94 | 4173 | sd_iterator_next (&sd_it); |
496d7bb0 MK |
4174 | } |
4175 | ||
916fa4f0 MK |
4176 | priorities_roots = NULL; |
4177 | clear_priorities (insn, &priorities_roots); | |
e2f6ff94 MK |
4178 | |
4179 | while (1) | |
496d7bb0 | 4180 | { |
b198261f | 4181 | rtx check, twin; |
496d7bb0 MK |
4182 | basic_block rec; |
4183 | ||
e2f6ff94 MK |
4184 | /* Get the first backward dependency of INSN. */ |
4185 | sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK); | |
4186 | if (!sd_iterator_cond (&sd_it, &dep)) | |
4187 | /* INSN has no backward dependencies left. */ | |
4188 | break; | |
496d7bb0 | 4189 | |
e2f6ff94 MK |
4190 | gcc_assert ((DEP_STATUS (dep) & BEGIN_SPEC) == 0 |
4191 | && (DEP_STATUS (dep) & BE_IN_SPEC) != 0 | |
4192 | && (DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE); | |
b198261f | 4193 | |
e2f6ff94 | 4194 | check = DEP_PRO (dep); |
d7bfd907 MK |
4195 | |
4196 | gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check) | |
496d7bb0 | 4197 | && QUEUE_INDEX (check) == QUEUE_NOWHERE); |
e2f6ff94 | 4198 | |
496d7bb0 | 4199 | rec = BLOCK_FOR_INSN (check); |
e2f6ff94 | 4200 | |
05742530 | 4201 | twin = emit_insn_before (copy_insn (PATTERN (insn)), BB_END (rec)); |
e855c69d | 4202 | haifa_init_insn (twin); |
496d7bb0 | 4203 | |
e2f6ff94 | 4204 | sd_copy_back_deps (twin, insn, true); |
496d7bb0 MK |
4205 | |
4206 | if (sched_verbose && spec_info->dump) | |
4207 | /* INSN_BB (insn) isn't determined for twin insns yet. | |
4208 | So we can't use current_sched_info->print_insn. */ | |
4209 | fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n", | |
4210 | INSN_UID (twin), rec->index); | |
4211 | ||
4212 | twins = alloc_INSN_LIST (twin, twins); | |
4213 | ||
917f1b7e | 4214 | /* Add dependences between TWIN and all appropriate |
496d7bb0 | 4215 | instructions from REC. */ |
e2f6ff94 MK |
4216 | FOR_EACH_DEP (insn, SD_LIST_SPEC_BACK, sd_it, dep) |
4217 | { | |
4218 | rtx pro = DEP_PRO (dep); | |
b198261f | 4219 | |
e2f6ff94 MK |
4220 | gcc_assert (DEP_TYPE (dep) == REG_DEP_TRUE); |
4221 | ||
4222 | /* INSN might have dependencies from the instructions from | |
4223 | several recovery blocks. At this iteration we process those | |
4224 | producers that reside in REC. */ | |
4225 | if (BLOCK_FOR_INSN (pro) == rec) | |
4226 | { | |
4227 | dep_def _new_dep, *new_dep = &_new_dep; | |
4228 | ||
4229 | init_dep (new_dep, pro, twin, REG_DEP_TRUE); | |
4230 | sd_add_dep (new_dep, false); | |
496d7bb0 | 4231 | } |
496d7bb0 | 4232 | } |
496d7bb0 | 4233 | |
e2f6ff94 | 4234 | process_insn_forw_deps_be_in_spec (insn, twin, ts); |
496d7bb0 | 4235 | |
b198261f | 4236 | /* Remove all dependencies between INSN and insns in REC. */ |
e2f6ff94 MK |
4237 | for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK); |
4238 | sd_iterator_cond (&sd_it, &dep);) | |
496d7bb0 | 4239 | { |
e2f6ff94 | 4240 | rtx pro = DEP_PRO (dep); |
b198261f | 4241 | |
e2f6ff94 MK |
4242 | if (BLOCK_FOR_INSN (pro) == rec) |
4243 | sd_delete_dep (sd_it); | |
496d7bb0 | 4244 | else |
e2f6ff94 | 4245 | sd_iterator_next (&sd_it); |
496d7bb0 MK |
4246 | } |
4247 | } | |
496d7bb0 | 4248 | |
b198261f MK |
4249 | /* We couldn't have added the dependencies between INSN and TWINS earlier |
4250 | because that would make TWINS appear in the INSN_BACK_DEPS (INSN). */ | |
496d7bb0 MK |
4251 | while (twins) |
4252 | { | |
4253 | rtx twin; | |
4254 | ||
4255 | twin = XEXP (twins, 0); | |
e2f6ff94 MK |
4256 | |
4257 | { | |
4258 | dep_def _new_dep, *new_dep = &_new_dep; | |
4259 | ||
4260 | init_dep (new_dep, insn, twin, REG_DEP_OUTPUT); | |
4261 | sd_add_dep (new_dep, false); | |
4262 | } | |
496d7bb0 MK |
4263 | |
4264 | twin = XEXP (twins, 1); | |
4265 | free_INSN_LIST_node (twins); | |
b8698a0f | 4266 | twins = twin; |
496d7bb0 | 4267 | } |
916fa4f0 MK |
4268 | |
4269 | calc_priorities (priorities_roots); | |
4270 | VEC_free (rtx, heap, priorities_roots); | |
496d7bb0 MK |
4271 | } |
4272 | ||
4273 | /* Extends and fills with zeros (only the new part) array pointed to by P. */ | |
4274 | void * | |
4275 | xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size) | |
4276 | { | |
4277 | gcc_assert (new_nmemb >= old_nmemb); | |
4278 | p = XRESIZEVAR (void, p, new_nmemb * size); | |
4279 | memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size); | |
4280 | return p; | |
4281 | } | |
4282 | ||
496d7bb0 MK |
4283 | /* Helper function. |
4284 | Find fallthru edge from PRED. */ | |
e855c69d | 4285 | edge |
496d7bb0 MK |
4286 | find_fallthru_edge (basic_block pred) |
4287 | { | |
4288 | edge e; | |
4289 | edge_iterator ei; | |
4290 | basic_block succ; | |
4291 | ||
4292 | succ = pred->next_bb; | |
4293 | gcc_assert (succ->prev_bb == pred); | |
4294 | ||
4295 | if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds)) | |
4296 | { | |
4297 | FOR_EACH_EDGE (e, ei, pred->succs) | |
4298 | if (e->flags & EDGE_FALLTHRU) | |
4299 | { | |
4300 | gcc_assert (e->dest == succ); | |
4301 | return e; | |
4302 | } | |
4303 | } | |
4304 | else | |
4305 | { | |
4306 | FOR_EACH_EDGE (e, ei, succ->preds) | |
4307 | if (e->flags & EDGE_FALLTHRU) | |
4308 | { | |
4309 | gcc_assert (e->src == pred); | |
4310 | return e; | |
4311 | } | |
4312 | } | |
4313 | ||
4314 | return NULL; | |
4315 | } | |
4316 | ||
e855c69d AB |
4317 | /* Extend per basic block data structures. */ |
4318 | static void | |
4319 | sched_extend_bb (void) | |
4320 | { | |
4321 | rtx insn; | |
4322 | ||
4323 | /* The following is done to keep current_sched_info->next_tail non null. */ | |
4324 | insn = BB_END (EXIT_BLOCK_PTR->prev_bb); | |
4325 | if (NEXT_INSN (insn) == 0 | |
4326 | || (!NOTE_P (insn) | |
4327 | && !LABEL_P (insn) | |
4328 | /* Don't emit a NOTE if it would end up before a BARRIER. */ | |
4329 | && !BARRIER_P (NEXT_INSN (insn)))) | |
4330 | { | |
4331 | rtx note = emit_note_after (NOTE_INSN_DELETED, insn); | |
4332 | /* Make insn appear outside BB. */ | |
4333 | set_block_for_insn (note, NULL); | |
4334 | BB_END (EXIT_BLOCK_PTR->prev_bb) = insn; | |
4335 | } | |
4336 | } | |
4337 | ||
4338 | /* Init per basic block data structures. */ | |
4339 | void | |
4340 | sched_init_bbs (void) | |
4341 | { | |
4342 | sched_extend_bb (); | |
4343 | } | |
4344 | ||
496d7bb0 MK |
4345 | /* Initialize BEFORE_RECOVERY variable. */ |
4346 | static void | |
e855c69d | 4347 | init_before_recovery (basic_block *before_recovery_ptr) |
496d7bb0 MK |
4348 | { |
4349 | basic_block last; | |
4350 | edge e; | |
4351 | ||
4352 | last = EXIT_BLOCK_PTR->prev_bb; | |
4353 | e = find_fallthru_edge (last); | |
4354 | ||
4355 | if (e) | |
4356 | { | |
b8698a0f | 4357 | /* We create two basic blocks: |
496d7bb0 | 4358 | 1. Single instruction block is inserted right after E->SRC |
b8698a0f | 4359 | and has jump to |
496d7bb0 MK |
4360 | 2. Empty block right before EXIT_BLOCK. |
4361 | Between these two blocks recovery blocks will be emitted. */ | |
4362 | ||
4363 | basic_block single, empty; | |
4364 | rtx x, label; | |
4365 | ||
b8698a0f | 4366 | /* If the fallthrough edge to exit we've found is from the block we've |
e855c69d AB |
4367 | created before, don't do anything more. */ |
4368 | if (last == after_recovery) | |
4369 | return; | |
496d7bb0 | 4370 | |
e855c69d AB |
4371 | adding_bb_to_current_region_p = false; |
4372 | ||
4373 | single = sched_create_empty_bb (last); | |
4374 | empty = sched_create_empty_bb (single); | |
4375 | ||
4376 | /* Add new blocks to the root loop. */ | |
4377 | if (current_loops != NULL) | |
4378 | { | |
4379 | add_bb_to_loop (single, VEC_index (loop_p, current_loops->larray, 0)); | |
4380 | add_bb_to_loop (empty, VEC_index (loop_p, current_loops->larray, 0)); | |
4381 | } | |
4382 | ||
4383 | single->count = last->count; | |
496d7bb0 MK |
4384 | empty->count = last->count; |
4385 | single->frequency = last->frequency; | |
4386 | empty->frequency = last->frequency; | |
4387 | BB_COPY_PARTITION (single, last); | |
4388 | BB_COPY_PARTITION (empty, last); | |
4389 | ||
4390 | redirect_edge_succ (e, single); | |
4391 | make_single_succ_edge (single, empty, 0); | |
4392 | make_single_succ_edge (empty, EXIT_BLOCK_PTR, | |
4393 | EDGE_FALLTHRU | EDGE_CAN_FALLTHRU); | |
4394 | ||
4395 | label = block_label (empty); | |
4396 | x = emit_jump_insn_after (gen_jump (label), BB_END (single)); | |
4397 | JUMP_LABEL (x) = label; | |
4398 | LABEL_NUSES (label)++; | |
e855c69d | 4399 | haifa_init_insn (x); |
b8698a0f | 4400 | |
496d7bb0 MK |
4401 | emit_barrier_after (x); |
4402 | ||
e855c69d AB |
4403 | sched_init_only_bb (empty, NULL); |
4404 | sched_init_only_bb (single, NULL); | |
4405 | sched_extend_bb (); | |
496d7bb0 | 4406 | |
e855c69d | 4407 | adding_bb_to_current_region_p = true; |
496d7bb0 | 4408 | before_recovery = single; |
e855c69d AB |
4409 | after_recovery = empty; |
4410 | ||
4411 | if (before_recovery_ptr) | |
4412 | *before_recovery_ptr = before_recovery; | |
496d7bb0 MK |
4413 | |
4414 | if (sched_verbose >= 2 && spec_info->dump) | |
4415 | fprintf (spec_info->dump, | |
b8698a0f L |
4416 | ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n", |
4417 | last->index, single->index, empty->index); | |
496d7bb0 MK |
4418 | } |
4419 | else | |
4420 | before_recovery = last; | |
4421 | } | |
4422 | ||
4423 | /* Returns new recovery block. */ | |
e855c69d AB |
4424 | basic_block |
4425 | sched_create_recovery_block (basic_block *before_recovery_ptr) | |
496d7bb0 MK |
4426 | { |
4427 | rtx label; | |
96370780 | 4428 | rtx barrier; |
496d7bb0 | 4429 | basic_block rec; |
b8698a0f | 4430 | |
e2f6ff94 MK |
4431 | haifa_recovery_bb_recently_added_p = true; |
4432 | haifa_recovery_bb_ever_added_p = true; | |
496d7bb0 | 4433 | |
e855c69d | 4434 | init_before_recovery (before_recovery_ptr); |
496d7bb0 | 4435 | |
96370780 MK |
4436 | barrier = get_last_bb_insn (before_recovery); |
4437 | gcc_assert (BARRIER_P (barrier)); | |
4438 | ||
4439 | label = emit_label_after (gen_label_rtx (), barrier); | |
4440 | ||
4441 | rec = create_basic_block (label, label, before_recovery); | |
4442 | ||
e855c69d | 4443 | /* A recovery block always ends with an unconditional jump. */ |
496d7bb0 MK |
4444 | emit_barrier_after (BB_END (rec)); |
4445 | ||
4446 | if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED) | |
4447 | BB_SET_PARTITION (rec, BB_COLD_PARTITION); | |
b8698a0f L |
4448 | |
4449 | if (sched_verbose && spec_info->dump) | |
496d7bb0 MK |
4450 | fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n", |
4451 | rec->index); | |
4452 | ||
496d7bb0 MK |
4453 | return rec; |
4454 | } | |
4455 | ||
e855c69d AB |
4456 | /* Create edges: FIRST_BB -> REC; FIRST_BB -> SECOND_BB; REC -> SECOND_BB |
4457 | and emit necessary jumps. */ | |
4458 | void | |
4459 | sched_create_recovery_edges (basic_block first_bb, basic_block rec, | |
4460 | basic_block second_bb) | |
4461 | { | |
4462 | rtx label; | |
4463 | rtx jump; | |
e855c69d AB |
4464 | int edge_flags; |
4465 | ||
4466 | /* This is fixing of incoming edge. */ | |
b8698a0f | 4467 | /* ??? Which other flags should be specified? */ |
e855c69d AB |
4468 | if (BB_PARTITION (first_bb) != BB_PARTITION (rec)) |
4469 | /* Partition type is the same, if it is "unpartitioned". */ | |
4470 | edge_flags = EDGE_CROSSING; | |
4471 | else | |
4472 | edge_flags = 0; | |
b8698a0f | 4473 | |
038dc49a | 4474 | make_edge (first_bb, rec, edge_flags); |
e855c69d AB |
4475 | label = block_label (second_bb); |
4476 | jump = emit_jump_insn_after (gen_jump (label), BB_END (rec)); | |
4477 | JUMP_LABEL (jump) = label; | |
4478 | LABEL_NUSES (label)++; | |
4479 | ||
4480 | if (BB_PARTITION (second_bb) != BB_PARTITION (rec)) | |
4481 | /* Partition type is the same, if it is "unpartitioned". */ | |
4482 | { | |
4483 | /* Rewritten from cfgrtl.c. */ | |
4484 | if (flag_reorder_blocks_and_partition | |
4485 | && targetm.have_named_sections) | |
e855c69d | 4486 | { |
efc0b2bd ILT |
4487 | /* We don't need the same note for the check because |
4488 | any_condjump_p (check) == true. */ | |
4489 | add_reg_note (jump, REG_CROSSING_JUMP, NULL_RTX); | |
e855c69d AB |
4490 | } |
4491 | edge_flags = EDGE_CROSSING; | |
4492 | } | |
4493 | else | |
b8698a0f | 4494 | edge_flags = 0; |
e855c69d | 4495 | |
b8698a0f | 4496 | make_single_succ_edge (rec, second_bb, edge_flags); |
e855c69d AB |
4497 | } |
4498 | ||
496d7bb0 MK |
4499 | /* This function creates recovery code for INSN. If MUTATE_P is nonzero, |
4500 | INSN is a simple check, that should be converted to branchy one. */ | |
4501 | static void | |
4502 | create_check_block_twin (rtx insn, bool mutate_p) | |
4503 | { | |
4504 | basic_block rec; | |
b198261f | 4505 | rtx label, check, twin; |
496d7bb0 | 4506 | ds_t fs; |
e2f6ff94 MK |
4507 | sd_iterator_def sd_it; |
4508 | dep_t dep; | |
4509 | dep_def _new_dep, *new_dep = &_new_dep; | |
e855c69d | 4510 | ds_t todo_spec; |
496d7bb0 | 4511 | |
e855c69d AB |
4512 | gcc_assert (ORIG_PAT (insn) != NULL_RTX); |
4513 | ||
4514 | if (!mutate_p) | |
4515 | todo_spec = TODO_SPEC (insn); | |
4516 | else | |
4517 | { | |
4518 | gcc_assert (IS_SPECULATION_SIMPLE_CHECK_P (insn) | |
4519 | && (TODO_SPEC (insn) & SPECULATIVE) == 0); | |
4520 | ||
4521 | todo_spec = CHECK_SPEC (insn); | |
4522 | } | |
4523 | ||
4524 | todo_spec &= SPECULATIVE; | |
496d7bb0 MK |
4525 | |
4526 | /* Create recovery block. */ | |
388092d5 | 4527 | if (mutate_p || targetm.sched.needs_block_p (todo_spec)) |
496d7bb0 | 4528 | { |
e855c69d | 4529 | rec = sched_create_recovery_block (NULL); |
496d7bb0 MK |
4530 | label = BB_HEAD (rec); |
4531 | } | |
4532 | else | |
4533 | { | |
4534 | rec = EXIT_BLOCK_PTR; | |
e855c69d | 4535 | label = NULL_RTX; |
496d7bb0 MK |
4536 | } |
4537 | ||
4538 | /* Emit CHECK. */ | |
388092d5 | 4539 | check = targetm.sched.gen_spec_check (insn, label, todo_spec); |
496d7bb0 MK |
4540 | |
4541 | if (rec != EXIT_BLOCK_PTR) | |
4542 | { | |
4543 | /* To have mem_reg alive at the beginning of second_bb, | |
b8698a0f | 4544 | we emit check BEFORE insn, so insn after splitting |
496d7bb0 MK |
4545 | insn will be at the beginning of second_bb, which will |
4546 | provide us with the correct life information. */ | |
4547 | check = emit_jump_insn_before (check, insn); | |
4548 | JUMP_LABEL (check) = label; | |
4549 | LABEL_NUSES (label)++; | |
4550 | } | |
4551 | else | |
4552 | check = emit_insn_before (check, insn); | |
4553 | ||
4554 | /* Extend data structures. */ | |
e855c69d AB |
4555 | haifa_init_insn (check); |
4556 | ||
4557 | /* CHECK is being added to current region. Extend ready list. */ | |
4558 | gcc_assert (sched_ready_n_insns != -1); | |
4559 | sched_extend_ready_list (sched_ready_n_insns + 1); | |
4560 | ||
4561 | if (current_sched_info->add_remove_insn) | |
4562 | current_sched_info->add_remove_insn (insn, 0); | |
4563 | ||
496d7bb0 MK |
4564 | RECOVERY_BLOCK (check) = rec; |
4565 | ||
4566 | if (sched_verbose && spec_info->dump) | |
4567 | fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n", | |
4568 | (*current_sched_info->print_insn) (check, 0)); | |
4569 | ||
4570 | gcc_assert (ORIG_PAT (insn)); | |
4571 | ||
917f1b7e | 4572 | /* Initialize TWIN (twin is a duplicate of original instruction |
496d7bb0 MK |
4573 | in the recovery block). */ |
4574 | if (rec != EXIT_BLOCK_PTR) | |
4575 | { | |
e2f6ff94 MK |
4576 | sd_iterator_def sd_it; |
4577 | dep_t dep; | |
4578 | ||
4579 | FOR_EACH_DEP (insn, SD_LIST_RES_BACK, sd_it, dep) | |
4580 | if ((DEP_STATUS (dep) & DEP_OUTPUT) != 0) | |
496d7bb0 | 4581 | { |
e2f6ff94 | 4582 | struct _dep _dep2, *dep2 = &_dep2; |
b198261f | 4583 | |
e2f6ff94 | 4584 | init_dep (dep2, DEP_PRO (dep), check, REG_DEP_TRUE); |
b198261f | 4585 | |
e2f6ff94 | 4586 | sd_add_dep (dep2, true); |
496d7bb0 MK |
4587 | } |
4588 | ||
4589 | twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec)); | |
e855c69d | 4590 | haifa_init_insn (twin); |
496d7bb0 MK |
4591 | |
4592 | if (sched_verbose && spec_info->dump) | |
4593 | /* INSN_BB (insn) isn't determined for twin insns yet. | |
4594 | So we can't use current_sched_info->print_insn. */ | |
4595 | fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n", | |
4596 | INSN_UID (twin), rec->index); | |
4597 | } | |
4598 | else | |
4599 | { | |
4600 | ORIG_PAT (check) = ORIG_PAT (insn); | |
4601 | HAS_INTERNAL_DEP (check) = 1; | |
4602 | twin = check; | |
4603 | /* ??? We probably should change all OUTPUT dependencies to | |
4604 | (TRUE | OUTPUT). */ | |
4605 | } | |
4606 | ||
e2f6ff94 MK |
4607 | /* Copy all resolved back dependencies of INSN to TWIN. This will |
4608 | provide correct value for INSN_TICK (TWIN). */ | |
4609 | sd_copy_back_deps (twin, insn, true); | |
496d7bb0 MK |
4610 | |
4611 | if (rec != EXIT_BLOCK_PTR) | |
4612 | /* In case of branchy check, fix CFG. */ | |
4613 | { | |
4614 | basic_block first_bb, second_bb; | |
4615 | rtx jump; | |
496d7bb0 MK |
4616 | |
4617 | first_bb = BLOCK_FOR_INSN (check); | |
e855c69d | 4618 | second_bb = sched_split_block (first_bb, check); |
496d7bb0 | 4619 | |
e855c69d | 4620 | sched_create_recovery_edges (first_bb, rec, second_bb); |
496d7bb0 | 4621 | |
b8698a0f | 4622 | sched_init_only_bb (second_bb, first_bb); |
e855c69d AB |
4623 | sched_init_only_bb (rec, EXIT_BLOCK_PTR); |
4624 | ||
4625 | jump = BB_END (rec); | |
4626 | haifa_init_insn (jump); | |
496d7bb0 MK |
4627 | } |
4628 | ||
b8698a0f | 4629 | /* Move backward dependences from INSN to CHECK and |
496d7bb0 | 4630 | move forward dependences from INSN to TWIN. */ |
e2f6ff94 MK |
4631 | |
4632 | /* First, create dependencies between INSN's producers and CHECK & TWIN. */ | |
4633 | FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep) | |
496d7bb0 | 4634 | { |
e2f6ff94 | 4635 | rtx pro = DEP_PRO (dep); |
496d7bb0 MK |
4636 | ds_t ds; |
4637 | ||
4638 | /* If BEGIN_DATA: [insn ~~TRUE~~> producer]: | |
4639 | check --TRUE--> producer ??? or ANTI ??? | |
4640 | twin --TRUE--> producer | |
4641 | twin --ANTI--> check | |
b8698a0f | 4642 | |
496d7bb0 MK |
4643 | If BEGIN_CONTROL: [insn ~~ANTI~~> producer]: |
4644 | check --ANTI--> producer | |
4645 | twin --ANTI--> producer | |
4646 | twin --ANTI--> check | |
4647 | ||
4648 | If BE_IN_SPEC: [insn ~~TRUE~~> producer]: | |
4649 | check ~~TRUE~~> producer | |
4650 | twin ~~TRUE~~> producer | |
b8698a0f | 4651 | twin --ANTI--> check */ |
496d7bb0 | 4652 | |
e2f6ff94 | 4653 | ds = DEP_STATUS (dep); |
496d7bb0 MK |
4654 | |
4655 | if (ds & BEGIN_SPEC) | |
4656 | { | |
4657 | gcc_assert (!mutate_p); | |
4658 | ds &= ~BEGIN_SPEC; | |
4659 | } | |
4660 | ||
e2f6ff94 MK |
4661 | init_dep_1 (new_dep, pro, check, DEP_TYPE (dep), ds); |
4662 | sd_add_dep (new_dep, false); | |
4663 | ||
496d7bb0 MK |
4664 | if (rec != EXIT_BLOCK_PTR) |
4665 | { | |
e2f6ff94 MK |
4666 | DEP_CON (new_dep) = twin; |
4667 | sd_add_dep (new_dep, false); | |
b8698a0f | 4668 | } |
496d7bb0 MK |
4669 | } |
4670 | ||
e2f6ff94 MK |
4671 | /* Second, remove backward dependencies of INSN. */ |
4672 | for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK); | |
4673 | sd_iterator_cond (&sd_it, &dep);) | |
4674 | { | |
4675 | if ((DEP_STATUS (dep) & BEGIN_SPEC) | |
4676 | || mutate_p) | |
4677 | /* We can delete this dep because we overcome it with | |
4678 | BEGIN_SPECULATION. */ | |
4679 | sd_delete_dep (sd_it); | |
4680 | else | |
4681 | sd_iterator_next (&sd_it); | |
4682 | } | |
496d7bb0 | 4683 | |
e2f6ff94 | 4684 | /* Future Speculations. Determine what BE_IN speculations will be like. */ |
496d7bb0 MK |
4685 | fs = 0; |
4686 | ||
4687 | /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only | |
4688 | here. */ | |
b8698a0f | 4689 | |
496d7bb0 | 4690 | gcc_assert (!DONE_SPEC (insn)); |
b8698a0f | 4691 | |
496d7bb0 | 4692 | if (!mutate_p) |
b8698a0f | 4693 | { |
496d7bb0 MK |
4694 | ds_t ts = TODO_SPEC (insn); |
4695 | ||
4696 | DONE_SPEC (insn) = ts & BEGIN_SPEC; | |
4697 | CHECK_SPEC (check) = ts & BEGIN_SPEC; | |
4698 | ||
15dc95cb | 4699 | /* Luckiness of future speculations solely depends upon initial |
e2f6ff94 | 4700 | BEGIN speculation. */ |
496d7bb0 MK |
4701 | if (ts & BEGIN_DATA) |
4702 | fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA)); | |
4703 | if (ts & BEGIN_CONTROL) | |
e2f6ff94 MK |
4704 | fs = set_dep_weak (fs, BE_IN_CONTROL, |
4705 | get_dep_weak (ts, BEGIN_CONTROL)); | |
496d7bb0 MK |
4706 | } |
4707 | else | |
4708 | CHECK_SPEC (check) = CHECK_SPEC (insn); | |
4709 | ||
4710 | /* Future speculations: call the helper. */ | |
e2f6ff94 | 4711 | process_insn_forw_deps_be_in_spec (insn, twin, fs); |
496d7bb0 MK |
4712 | |
4713 | if (rec != EXIT_BLOCK_PTR) | |
4714 | { | |
4715 | /* Which types of dependencies should we use here is, | |
4716 | generally, machine-dependent question... But, for now, | |
4717 | it is not. */ | |
4718 | ||
4719 | if (!mutate_p) | |
4720 | { | |
e2f6ff94 MK |
4721 | init_dep (new_dep, insn, check, REG_DEP_TRUE); |
4722 | sd_add_dep (new_dep, false); | |
4723 | ||
4724 | init_dep (new_dep, insn, twin, REG_DEP_OUTPUT); | |
4725 | sd_add_dep (new_dep, false); | |
496d7bb0 MK |
4726 | } |
4727 | else | |
4728 | { | |
b8698a0f | 4729 | if (spec_info->dump) |
496d7bb0 MK |
4730 | fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n", |
4731 | (*current_sched_info->print_insn) (insn, 0)); | |
4732 | ||
e2f6ff94 MK |
4733 | /* Remove all dependencies of the INSN. */ |
4734 | { | |
4735 | sd_it = sd_iterator_start (insn, (SD_LIST_FORW | |
4736 | | SD_LIST_BACK | |
4737 | | SD_LIST_RES_BACK)); | |
4738 | while (sd_iterator_cond (&sd_it, &dep)) | |
4739 | sd_delete_dep (sd_it); | |
4740 | } | |
496d7bb0 | 4741 | |
e2f6ff94 MK |
4742 | /* If former check (INSN) already was moved to the ready (or queue) |
4743 | list, add new check (CHECK) there too. */ | |
496d7bb0 MK |
4744 | if (QUEUE_INDEX (insn) != QUEUE_NOWHERE) |
4745 | try_ready (check); | |
4746 | ||
e2f6ff94 MK |
4747 | /* Remove old check from instruction stream and free its |
4748 | data. */ | |
496d7bb0 MK |
4749 | sched_remove_insn (insn); |
4750 | } | |
4751 | ||
e2f6ff94 MK |
4752 | init_dep (new_dep, check, twin, REG_DEP_ANTI); |
4753 | sd_add_dep (new_dep, false); | |
496d7bb0 MK |
4754 | } |
4755 | else | |
e2f6ff94 MK |
4756 | { |
4757 | init_dep_1 (new_dep, insn, check, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT); | |
4758 | sd_add_dep (new_dep, false); | |
4759 | } | |
496d7bb0 MK |
4760 | |
4761 | if (!mutate_p) | |
917f1b7e | 4762 | /* Fix priorities. If MUTATE_P is nonzero, this is not necessary, |
496d7bb0 MK |
4763 | because it'll be done later in add_to_speculative_block. */ |
4764 | { | |
916fa4f0 MK |
4765 | rtx_vec_t priorities_roots = NULL; |
4766 | ||
4767 | clear_priorities (twin, &priorities_roots); | |
4768 | calc_priorities (priorities_roots); | |
4769 | VEC_free (rtx, heap, priorities_roots); | |
496d7bb0 MK |
4770 | } |
4771 | } | |
4772 | ||
4773 | /* Removes dependency between instructions in the recovery block REC | |
4774 | and usual region instructions. It keeps inner dependences so it | |
917f1b7e | 4775 | won't be necessary to recompute them. */ |
496d7bb0 MK |
4776 | static void |
4777 | fix_recovery_deps (basic_block rec) | |
4778 | { | |
b198261f | 4779 | rtx note, insn, jump, ready_list = 0; |
496d7bb0 | 4780 | bitmap_head in_ready; |
e2f6ff94 | 4781 | rtx link; |
496d7bb0 MK |
4782 | |
4783 | bitmap_initialize (&in_ready, 0); | |
b8698a0f | 4784 | |
496d7bb0 MK |
4785 | /* NOTE - a basic block note. */ |
4786 | note = NEXT_INSN (BB_HEAD (rec)); | |
4787 | gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note)); | |
4788 | insn = BB_END (rec); | |
4789 | gcc_assert (JUMP_P (insn)); | |
4790 | insn = PREV_INSN (insn); | |
4791 | ||
4792 | do | |
e2f6ff94 MK |
4793 | { |
4794 | sd_iterator_def sd_it; | |
4795 | dep_t dep; | |
496d7bb0 | 4796 | |
e2f6ff94 MK |
4797 | for (sd_it = sd_iterator_start (insn, SD_LIST_FORW); |
4798 | sd_iterator_cond (&sd_it, &dep);) | |
4799 | { | |
4800 | rtx consumer = DEP_CON (dep); | |
496d7bb0 MK |
4801 | |
4802 | if (BLOCK_FOR_INSN (consumer) != rec) | |
4803 | { | |
e2f6ff94 | 4804 | sd_delete_dep (sd_it); |
496d7bb0 MK |
4805 | |
4806 | if (!bitmap_bit_p (&in_ready, INSN_LUID (consumer))) | |
4807 | { | |
4808 | ready_list = alloc_INSN_LIST (consumer, ready_list); | |
4809 | bitmap_set_bit (&in_ready, INSN_LUID (consumer)); | |
4810 | } | |
496d7bb0 MK |
4811 | } |
4812 | else | |
4813 | { | |
e2f6ff94 | 4814 | gcc_assert ((DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE); |
496d7bb0 | 4815 | |
e2f6ff94 | 4816 | sd_iterator_next (&sd_it); |
496d7bb0 MK |
4817 | } |
4818 | } | |
b8698a0f | 4819 | |
496d7bb0 MK |
4820 | insn = PREV_INSN (insn); |
4821 | } | |
4822 | while (insn != note); | |
4823 | ||
4824 | bitmap_clear (&in_ready); | |
4825 | ||
4826 | /* Try to add instructions to the ready or queue list. */ | |
e2f6ff94 MK |
4827 | for (link = ready_list; link; link = XEXP (link, 1)) |
4828 | try_ready (XEXP (link, 0)); | |
496d7bb0 MK |
4829 | free_INSN_LIST_list (&ready_list); |
4830 | ||
4831 | /* Fixing jump's dependences. */ | |
4832 | insn = BB_HEAD (rec); | |
4833 | jump = BB_END (rec); | |
b8698a0f | 4834 | |
496d7bb0 MK |
4835 | gcc_assert (LABEL_P (insn)); |
4836 | insn = NEXT_INSN (insn); | |
b8698a0f | 4837 | |
496d7bb0 MK |
4838 | gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn)); |
4839 | add_jump_dependencies (insn, jump); | |
4840 | } | |
4841 | ||
e855c69d AB |
4842 | /* Change pattern of INSN to NEW_PAT. */ |
4843 | void | |
4844 | sched_change_pattern (rtx insn, rtx new_pat) | |
496d7bb0 MK |
4845 | { |
4846 | int t; | |
4847 | ||
4848 | t = validate_change (insn, &PATTERN (insn), new_pat, 0); | |
4849 | gcc_assert (t); | |
e855c69d AB |
4850 | dfa_clear_single_insn_cache (insn); |
4851 | } | |
4852 | ||
4853 | /* Change pattern of INSN to NEW_PAT. Invalidate cached haifa | |
4854 | instruction data. */ | |
4855 | static void | |
4856 | haifa_change_pattern (rtx insn, rtx new_pat) | |
4857 | { | |
4858 | sched_change_pattern (insn, new_pat); | |
4859 | ||
496d7bb0 MK |
4860 | /* Invalidate INSN_COST, so it'll be recalculated. */ |
4861 | INSN_COST (insn) = -1; | |
4862 | /* Invalidate INSN_TICK, so it'll be recalculated. */ | |
4863 | INSN_TICK (insn) = INVALID_TICK; | |
496d7bb0 MK |
4864 | } |
4865 | ||
496d7bb0 MK |
4866 | /* -1 - can't speculate, |
4867 | 0 - for speculation with REQUEST mode it is OK to use | |
4868 | current instruction pattern, | |
4869 | 1 - need to change pattern for *NEW_PAT to be speculative. */ | |
e855c69d AB |
4870 | int |
4871 | sched_speculate_insn (rtx insn, ds_t request, rtx *new_pat) | |
496d7bb0 MK |
4872 | { |
4873 | gcc_assert (current_sched_info->flags & DO_SPECULATION | |
e2f6ff94 MK |
4874 | && (request & SPECULATIVE) |
4875 | && sched_insn_is_legitimate_for_speculation_p (insn, request)); | |
496d7bb0 | 4876 | |
e2f6ff94 | 4877 | if ((request & spec_info->mask) != request) |
496d7bb0 | 4878 | return -1; |
496d7bb0 | 4879 | |
e2f6ff94 MK |
4880 | if (request & BE_IN_SPEC |
4881 | && !(request & BEGIN_SPEC)) | |
4882 | return 0; | |
496d7bb0 | 4883 | |
e855c69d AB |
4884 | return targetm.sched.speculate_insn (insn, request, new_pat); |
4885 | } | |
4886 | ||
4887 | static int | |
4888 | haifa_speculate_insn (rtx insn, ds_t request, rtx *new_pat) | |
4889 | { | |
4890 | gcc_assert (sched_deps_info->generate_spec_deps | |
4891 | && !IS_SPECULATION_CHECK_P (insn)); | |
4892 | ||
4893 | if (HAS_INTERNAL_DEP (insn) | |
4894 | || SCHED_GROUP_P (insn)) | |
4895 | return -1; | |
4896 | ||
4897 | return sched_speculate_insn (insn, request, new_pat); | |
496d7bb0 MK |
4898 | } |
4899 | ||
4900 | /* Print some information about block BB, which starts with HEAD and | |
4901 | ends with TAIL, before scheduling it. | |
4902 | I is zero, if scheduler is about to start with the fresh ebb. */ | |
4903 | static void | |
4904 | dump_new_block_header (int i, basic_block bb, rtx head, rtx tail) | |
4905 | { | |
4906 | if (!i) | |
4907 | fprintf (sched_dump, | |
4908 | ";; ======================================================\n"); | |
4909 | else | |
4910 | fprintf (sched_dump, | |
4911 | ";; =====================ADVANCING TO=====================\n"); | |
4912 | fprintf (sched_dump, | |
4913 | ";; -- basic block %d from %d to %d -- %s reload\n", | |
4914 | bb->index, INSN_UID (head), INSN_UID (tail), | |
4915 | (reload_completed ? "after" : "before")); | |
4916 | fprintf (sched_dump, | |
4917 | ";; ======================================================\n"); | |
4918 | fprintf (sched_dump, "\n"); | |
4919 | } | |
4920 | ||
4921 | /* Unlink basic block notes and labels and saves them, so they | |
4922 | can be easily restored. We unlink basic block notes in EBB to | |
917f1b7e | 4923 | provide back-compatibility with the previous code, as target backends |
496d7bb0 MK |
4924 | assume, that there'll be only instructions between |
4925 | current_sched_info->{head and tail}. We restore these notes as soon | |
4926 | as we can. | |
4927 | FIRST (LAST) is the first (last) basic block in the ebb. | |
4928 | NB: In usual case (FIRST == LAST) nothing is really done. */ | |
4929 | void | |
4930 | unlink_bb_notes (basic_block first, basic_block last) | |
4931 | { | |
4932 | /* We DON'T unlink basic block notes of the first block in the ebb. */ | |
4933 | if (first == last) | |
4934 | return; | |
4935 | ||
d3bfe4de | 4936 | bb_header = XNEWVEC (rtx, last_basic_block); |
496d7bb0 MK |
4937 | |
4938 | /* Make a sentinel. */ | |
4939 | if (last->next_bb != EXIT_BLOCK_PTR) | |
4940 | bb_header[last->next_bb->index] = 0; | |
4941 | ||
4942 | first = first->next_bb; | |
4943 | do | |
4944 | { | |
4945 | rtx prev, label, note, next; | |
4946 | ||
4947 | label = BB_HEAD (last); | |
4948 | if (LABEL_P (label)) | |
4949 | note = NEXT_INSN (label); | |
4950 | else | |
b8698a0f | 4951 | note = label; |
496d7bb0 MK |
4952 | gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note)); |
4953 | ||
4954 | prev = PREV_INSN (label); | |
4955 | next = NEXT_INSN (note); | |
4956 | gcc_assert (prev && next); | |
4957 | ||
4958 | NEXT_INSN (prev) = next; | |
4959 | PREV_INSN (next) = prev; | |
4960 | ||
4961 | bb_header[last->index] = label; | |
4962 | ||
4963 | if (last == first) | |
4964 | break; | |
b8698a0f | 4965 | |
496d7bb0 MK |
4966 | last = last->prev_bb; |
4967 | } | |
4968 | while (1); | |
4969 | } | |
4970 | ||
4971 | /* Restore basic block notes. | |
4972 | FIRST is the first basic block in the ebb. */ | |
4973 | static void | |
4974 | restore_bb_notes (basic_block first) | |
4975 | { | |
4976 | if (!bb_header) | |
4977 | return; | |
4978 | ||
4979 | /* We DON'T unlink basic block notes of the first block in the ebb. */ | |
b8698a0f | 4980 | first = first->next_bb; |
496d7bb0 MK |
4981 | /* Remember: FIRST is actually a second basic block in the ebb. */ |
4982 | ||
4983 | while (first != EXIT_BLOCK_PTR | |
4984 | && bb_header[first->index]) | |
4985 | { | |
4986 | rtx prev, label, note, next; | |
b8698a0f | 4987 | |
496d7bb0 MK |
4988 | label = bb_header[first->index]; |
4989 | prev = PREV_INSN (label); | |
4990 | next = NEXT_INSN (prev); | |
4991 | ||
4992 | if (LABEL_P (label)) | |
4993 | note = NEXT_INSN (label); | |
4994 | else | |
b8698a0f | 4995 | note = label; |
496d7bb0 MK |
4996 | gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note)); |
4997 | ||
4998 | bb_header[first->index] = 0; | |
4999 | ||
5000 | NEXT_INSN (prev) = label; | |
5001 | NEXT_INSN (note) = next; | |
5002 | PREV_INSN (next) = note; | |
b8698a0f | 5003 | |
496d7bb0 MK |
5004 | first = first->next_bb; |
5005 | } | |
5006 | ||
5007 | free (bb_header); | |
5008 | bb_header = 0; | |
5009 | } | |
5010 | ||
496d7bb0 MK |
5011 | /* Helper function. |
5012 | Fix CFG after both in- and inter-block movement of | |
5013 | control_flow_insn_p JUMP. */ | |
5014 | static void | |
5015 | fix_jump_move (rtx jump) | |
5016 | { | |
5017 | basic_block bb, jump_bb, jump_bb_next; | |
5018 | ||
5019 | bb = BLOCK_FOR_INSN (PREV_INSN (jump)); | |
5020 | jump_bb = BLOCK_FOR_INSN (jump); | |
5021 | jump_bb_next = jump_bb->next_bb; | |
5022 | ||
e855c69d | 5023 | gcc_assert (common_sched_info->sched_pass_id == SCHED_EBB_PASS |
d7bfd907 | 5024 | || IS_SPECULATION_BRANCHY_CHECK_P (jump)); |
b8698a0f | 5025 | |
496d7bb0 MK |
5026 | if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next))) |
5027 | /* if jump_bb_next is not empty. */ | |
5028 | BB_END (jump_bb) = BB_END (jump_bb_next); | |
5029 | ||
5030 | if (BB_END (bb) != PREV_INSN (jump)) | |
5031 | /* Then there are instruction after jump that should be placed | |
5032 | to jump_bb_next. */ | |
5033 | BB_END (jump_bb_next) = BB_END (bb); | |
5034 | else | |
5035 | /* Otherwise jump_bb_next is empty. */ | |
5036 | BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next)); | |
5037 | ||
5038 | /* To make assertion in move_insn happy. */ | |
5039 | BB_END (bb) = PREV_INSN (jump); | |
5040 | ||
5041 | update_bb_for_insn (jump_bb_next); | |
5042 | } | |
5043 | ||
5044 | /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */ | |
5045 | static void | |
5046 | move_block_after_check (rtx jump) | |
5047 | { | |
5048 | basic_block bb, jump_bb, jump_bb_next; | |
5049 | VEC(edge,gc) *t; | |
5050 | ||
5051 | bb = BLOCK_FOR_INSN (PREV_INSN (jump)); | |
5052 | jump_bb = BLOCK_FOR_INSN (jump); | |
5053 | jump_bb_next = jump_bb->next_bb; | |
b8698a0f | 5054 | |
496d7bb0 | 5055 | update_bb_for_insn (jump_bb); |
b8698a0f | 5056 | |
d7bfd907 MK |
5057 | gcc_assert (IS_SPECULATION_CHECK_P (jump) |
5058 | || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next))); | |
496d7bb0 MK |
5059 | |
5060 | unlink_block (jump_bb_next); | |
5061 | link_block (jump_bb_next, bb); | |
5062 | ||
5063 | t = bb->succs; | |
5064 | bb->succs = 0; | |
5065 | move_succs (&(jump_bb->succs), bb); | |
5066 | move_succs (&(jump_bb_next->succs), jump_bb); | |
5067 | move_succs (&t, jump_bb_next); | |
6fb5fa3c DB |
5068 | |
5069 | df_mark_solutions_dirty (); | |
b8698a0f | 5070 | |
e855c69d AB |
5071 | common_sched_info->fix_recovery_cfg |
5072 | (bb->index, jump_bb->index, jump_bb_next->index); | |
496d7bb0 MK |
5073 | } |
5074 | ||
5075 | /* Helper function for move_block_after_check. | |
5076 | This functions attaches edge vector pointed to by SUCCSP to | |
5077 | block TO. */ | |
5078 | static void | |
5079 | move_succs (VEC(edge,gc) **succsp, basic_block to) | |
5080 | { | |
5081 | edge e; | |
5082 | edge_iterator ei; | |
5083 | ||
5084 | gcc_assert (to->succs == 0); | |
5085 | ||
5086 | to->succs = *succsp; | |
5087 | ||
5088 | FOR_EACH_EDGE (e, ei, to->succs) | |
5089 | e->src = to; | |
5090 | ||
5091 | *succsp = 0; | |
5092 | } | |
5093 | ||
496d7bb0 MK |
5094 | /* Remove INSN from the instruction stream. |
5095 | INSN should have any dependencies. */ | |
5096 | static void | |
5097 | sched_remove_insn (rtx insn) | |
5098 | { | |
e2f6ff94 MK |
5099 | sd_finish_insn (insn); |
5100 | ||
496d7bb0 MK |
5101 | change_queue_index (insn, QUEUE_NOWHERE); |
5102 | current_sched_info->add_remove_insn (insn, 1); | |
5103 | remove_insn (insn); | |
5104 | } | |
5105 | ||
916fa4f0 MK |
5106 | /* Clear priorities of all instructions, that are forward dependent on INSN. |
5107 | Store in vector pointed to by ROOTS_PTR insns on which priority () should | |
5108 | be invoked to initialize all cleared priorities. */ | |
496d7bb0 | 5109 | static void |
916fa4f0 | 5110 | clear_priorities (rtx insn, rtx_vec_t *roots_ptr) |
496d7bb0 | 5111 | { |
e2f6ff94 MK |
5112 | sd_iterator_def sd_it; |
5113 | dep_t dep; | |
916fa4f0 MK |
5114 | bool insn_is_root_p = true; |
5115 | ||
5116 | gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED); | |
496d7bb0 | 5117 | |
e2f6ff94 | 5118 | FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep) |
496d7bb0 | 5119 | { |
916fa4f0 | 5120 | rtx pro = DEP_PRO (dep); |
496d7bb0 | 5121 | |
916fa4f0 MK |
5122 | if (INSN_PRIORITY_STATUS (pro) >= 0 |
5123 | && QUEUE_INDEX (insn) != QUEUE_SCHEDULED) | |
496d7bb0 | 5124 | { |
916fa4f0 MK |
5125 | /* If DEP doesn't contribute to priority then INSN itself should |
5126 | be added to priority roots. */ | |
5127 | if (contributes_to_priority_p (dep)) | |
5128 | insn_is_root_p = false; | |
5129 | ||
5130 | INSN_PRIORITY_STATUS (pro) = -1; | |
5131 | clear_priorities (pro, roots_ptr); | |
496d7bb0 MK |
5132 | } |
5133 | } | |
916fa4f0 MK |
5134 | |
5135 | if (insn_is_root_p) | |
5136 | VEC_safe_push (rtx, heap, *roots_ptr, insn); | |
496d7bb0 MK |
5137 | } |
5138 | ||
5139 | /* Recompute priorities of instructions, whose priorities might have been | |
916fa4f0 MK |
5140 | changed. ROOTS is a vector of instructions whose priority computation will |
5141 | trigger initialization of all cleared priorities. */ | |
496d7bb0 | 5142 | static void |
916fa4f0 | 5143 | calc_priorities (rtx_vec_t roots) |
496d7bb0 | 5144 | { |
916fa4f0 MK |
5145 | int i; |
5146 | rtx insn; | |
496d7bb0 | 5147 | |
916fa4f0 MK |
5148 | for (i = 0; VEC_iterate (rtx, roots, i, insn); i++) |
5149 | priority (insn); | |
496d7bb0 MK |
5150 | } |
5151 | ||
5152 | ||
5153 | /* Add dependences between JUMP and other instructions in the recovery | |
5154 | block. INSN is the first insn the recovery block. */ | |
5155 | static void | |
5156 | add_jump_dependencies (rtx insn, rtx jump) | |
5157 | { | |
5158 | do | |
5159 | { | |
5160 | insn = NEXT_INSN (insn); | |
5161 | if (insn == jump) | |
5162 | break; | |
b8698a0f | 5163 | |
b5b8b0ac | 5164 | if (dep_list_size (insn) == 0) |
e2f6ff94 MK |
5165 | { |
5166 | dep_def _new_dep, *new_dep = &_new_dep; | |
5167 | ||
5168 | init_dep (new_dep, insn, jump, REG_DEP_ANTI); | |
5169 | sd_add_dep (new_dep, false); | |
5170 | } | |
496d7bb0 MK |
5171 | } |
5172 | while (1); | |
b198261f | 5173 | |
e2f6ff94 | 5174 | gcc_assert (!sd_lists_empty_p (jump, SD_LIST_BACK)); |
496d7bb0 MK |
5175 | } |
5176 | ||
5177 | /* Return the NOTE_INSN_BASIC_BLOCK of BB. */ | |
d3b30e42 | 5178 | rtx |
496d7bb0 MK |
5179 | bb_note (basic_block bb) |
5180 | { | |
5181 | rtx note; | |
5182 | ||
5183 | note = BB_HEAD (bb); | |
5184 | if (LABEL_P (note)) | |
5185 | note = NEXT_INSN (note); | |
5186 | ||
5187 | gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note)); | |
5188 | return note; | |
5189 | } | |
5190 | ||
5191 | #ifdef ENABLE_CHECKING | |
496d7bb0 | 5192 | /* Helper function for check_cfg. |
917f1b7e | 5193 | Return nonzero, if edge vector pointed to by EL has edge with TYPE in |
496d7bb0 MK |
5194 | its flags. */ |
5195 | static int | |
5196 | has_edge_p (VEC(edge,gc) *el, int type) | |
5197 | { | |
5198 | edge e; | |
5199 | edge_iterator ei; | |
5200 | ||
5201 | FOR_EACH_EDGE (e, ei, el) | |
5202 | if (e->flags & type) | |
5203 | return 1; | |
5204 | return 0; | |
5205 | } | |
5206 | ||
b5b8b0ac AO |
5207 | /* Search back, starting at INSN, for an insn that is not a |
5208 | NOTE_INSN_VAR_LOCATION. Don't search beyond HEAD, and return it if | |
5209 | no such insn can be found. */ | |
5210 | static inline rtx | |
5211 | prev_non_location_insn (rtx insn, rtx head) | |
5212 | { | |
5213 | while (insn != head && NOTE_P (insn) | |
5214 | && NOTE_KIND (insn) == NOTE_INSN_VAR_LOCATION) | |
5215 | insn = PREV_INSN (insn); | |
5216 | ||
5217 | return insn; | |
5218 | } | |
5219 | ||
496d7bb0 MK |
5220 | /* Check few properties of CFG between HEAD and TAIL. |
5221 | If HEAD (TAIL) is NULL check from the beginning (till the end) of the | |
5222 | instruction stream. */ | |
5223 | static void | |
5224 | check_cfg (rtx head, rtx tail) | |
5225 | { | |
5226 | rtx next_tail; | |
5227 | basic_block bb = 0; | |
5228 | int not_first = 0, not_last; | |
5229 | ||
5230 | if (head == NULL) | |
5231 | head = get_insns (); | |
5232 | if (tail == NULL) | |
5233 | tail = get_last_insn (); | |
5234 | next_tail = NEXT_INSN (tail); | |
5235 | ||
5236 | do | |
b8698a0f L |
5237 | { |
5238 | not_last = head != tail; | |
496d7bb0 MK |
5239 | |
5240 | if (not_first) | |
5241 | gcc_assert (NEXT_INSN (PREV_INSN (head)) == head); | |
5242 | if (not_last) | |
5243 | gcc_assert (PREV_INSN (NEXT_INSN (head)) == head); | |
5244 | ||
b8698a0f | 5245 | if (LABEL_P (head) |
496d7bb0 MK |
5246 | || (NOTE_INSN_BASIC_BLOCK_P (head) |
5247 | && (!not_first | |
5248 | || (not_first && !LABEL_P (PREV_INSN (head)))))) | |
5249 | { | |
b8698a0f | 5250 | gcc_assert (bb == 0); |
496d7bb0 MK |
5251 | bb = BLOCK_FOR_INSN (head); |
5252 | if (bb != 0) | |
b8698a0f | 5253 | gcc_assert (BB_HEAD (bb) == head); |
496d7bb0 MK |
5254 | else |
5255 | /* This is the case of jump table. See inside_basic_block_p (). */ | |
5256 | gcc_assert (LABEL_P (head) && !inside_basic_block_p (head)); | |
5257 | } | |
5258 | ||
5259 | if (bb == 0) | |
5260 | { | |
5261 | gcc_assert (!inside_basic_block_p (head)); | |
5262 | head = NEXT_INSN (head); | |
5263 | } | |
5264 | else | |
5265 | { | |
5266 | gcc_assert (inside_basic_block_p (head) | |
5267 | || NOTE_P (head)); | |
5268 | gcc_assert (BLOCK_FOR_INSN (head) == bb); | |
b8698a0f | 5269 | |
496d7bb0 MK |
5270 | if (LABEL_P (head)) |
5271 | { | |
5272 | head = NEXT_INSN (head); | |
5273 | gcc_assert (NOTE_INSN_BASIC_BLOCK_P (head)); | |
5274 | } | |
5275 | else | |
5276 | { | |
5277 | if (control_flow_insn_p (head)) | |
5278 | { | |
b5b8b0ac AO |
5279 | gcc_assert (prev_non_location_insn (BB_END (bb), head) |
5280 | == head); | |
5281 | ||
496d7bb0 MK |
5282 | if (any_uncondjump_p (head)) |
5283 | gcc_assert (EDGE_COUNT (bb->succs) == 1 | |
5284 | && BARRIER_P (NEXT_INSN (head))); | |
5285 | else if (any_condjump_p (head)) | |
cd8d4e24 MK |
5286 | gcc_assert (/* Usual case. */ |
5287 | (EDGE_COUNT (bb->succs) > 1 | |
5288 | && !BARRIER_P (NEXT_INSN (head))) | |
5289 | /* Or jump to the next instruction. */ | |
5290 | || (EDGE_COUNT (bb->succs) == 1 | |
5291 | && (BB_HEAD (EDGE_I (bb->succs, 0)->dest) | |
5292 | == JUMP_LABEL (head)))); | |
496d7bb0 MK |
5293 | } |
5294 | if (BB_END (bb) == head) | |
5295 | { | |
5296 | if (EDGE_COUNT (bb->succs) > 1) | |
b5b8b0ac AO |
5297 | gcc_assert (control_flow_insn_p (prev_non_location_insn |
5298 | (head, BB_HEAD (bb))) | |
496d7bb0 MK |
5299 | || has_edge_p (bb->succs, EDGE_COMPLEX)); |
5300 | bb = 0; | |
5301 | } | |
b5b8b0ac | 5302 | |
496d7bb0 MK |
5303 | head = NEXT_INSN (head); |
5304 | } | |
5305 | } | |
5306 | ||
5307 | not_first = 1; | |
5308 | } | |
5309 | while (head != next_tail); | |
5310 | ||
5311 | gcc_assert (bb == 0); | |
5312 | } | |
e855c69d | 5313 | |
496d7bb0 MK |
5314 | #endif /* ENABLE_CHECKING */ |
5315 | ||
e855c69d AB |
5316 | /* Extend per basic block data structures. */ |
5317 | static void | |
5318 | extend_bb (void) | |
5319 | { | |
5320 | if (sched_scan_info->extend_bb) | |
5321 | sched_scan_info->extend_bb (); | |
5322 | } | |
5323 | ||
5324 | /* Init data for BB. */ | |
5325 | static void | |
5326 | init_bb (basic_block bb) | |
5327 | { | |
5328 | if (sched_scan_info->init_bb) | |
5329 | sched_scan_info->init_bb (bb); | |
5330 | } | |
5331 | ||
5332 | /* Extend per insn data structures. */ | |
5333 | static void | |
5334 | extend_insn (void) | |
5335 | { | |
5336 | if (sched_scan_info->extend_insn) | |
5337 | sched_scan_info->extend_insn (); | |
5338 | } | |
5339 | ||
5340 | /* Init data structures for INSN. */ | |
5341 | static void | |
5342 | init_insn (rtx insn) | |
5343 | { | |
5344 | if (sched_scan_info->init_insn) | |
5345 | sched_scan_info->init_insn (insn); | |
5346 | } | |
5347 | ||
5348 | /* Init all insns in BB. */ | |
5349 | static void | |
5350 | init_insns_in_bb (basic_block bb) | |
5351 | { | |
5352 | rtx insn; | |
5353 | ||
5354 | FOR_BB_INSNS (bb, insn) | |
5355 | init_insn (insn); | |
5356 | } | |
5357 | ||
5358 | /* A driver function to add a set of basic blocks (BBS), | |
5359 | a single basic block (BB), a set of insns (INSNS) or a single insn (INSN) | |
5360 | to the scheduling region. */ | |
5361 | void | |
5362 | sched_scan (const struct sched_scan_info_def *ssi, | |
5363 | bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn) | |
5364 | { | |
5365 | sched_scan_info = ssi; | |
5366 | ||
5367 | if (bbs != NULL || bb != NULL) | |
5368 | { | |
5369 | extend_bb (); | |
5370 | ||
5371 | if (bbs != NULL) | |
5372 | { | |
5373 | unsigned i; | |
5374 | basic_block x; | |
5375 | ||
5376 | for (i = 0; VEC_iterate (basic_block, bbs, i, x); i++) | |
5377 | init_bb (x); | |
5378 | } | |
5379 | ||
5380 | if (bb != NULL) | |
5381 | init_bb (bb); | |
5382 | } | |
5383 | ||
5384 | extend_insn (); | |
5385 | ||
5386 | if (bbs != NULL) | |
b8698a0f | 5387 | { |
e855c69d AB |
5388 | unsigned i; |
5389 | basic_block x; | |
5390 | ||
5391 | for (i = 0; VEC_iterate (basic_block, bbs, i, x); i++) | |
5392 | init_insns_in_bb (x); | |
5393 | } | |
5394 | ||
5395 | if (bb != NULL) | |
5396 | init_insns_in_bb (bb); | |
5397 | ||
5398 | if (insns != NULL) | |
5399 | { | |
5400 | unsigned i; | |
5401 | rtx x; | |
5402 | ||
5403 | for (i = 0; VEC_iterate (rtx, insns, i, x); i++) | |
5404 | init_insn (x); | |
5405 | } | |
5406 | ||
5407 | if (insn != NULL) | |
5408 | init_insn (insn); | |
5409 | } | |
5410 | ||
5411 | ||
5412 | /* Extend data structures for logical insn UID. */ | |
5413 | static void | |
5414 | luids_extend_insn (void) | |
5415 | { | |
5416 | int new_luids_max_uid = get_max_uid () + 1; | |
5417 | ||
5418 | VEC_safe_grow_cleared (int, heap, sched_luids, new_luids_max_uid); | |
5419 | } | |
5420 | ||
5421 | /* Initialize LUID for INSN. */ | |
5422 | static void | |
5423 | luids_init_insn (rtx insn) | |
5424 | { | |
5425 | int i = INSN_P (insn) ? 1 : common_sched_info->luid_for_non_insn (insn); | |
5426 | int luid; | |
5427 | ||
5428 | if (i >= 0) | |
5429 | { | |
5430 | luid = sched_max_luid; | |
5431 | sched_max_luid += i; | |
5432 | } | |
5433 | else | |
5434 | luid = -1; | |
5435 | ||
5436 | SET_INSN_LUID (insn, luid); | |
5437 | } | |
5438 | ||
5439 | /* Initialize luids for BBS, BB, INSNS and INSN. | |
5440 | The hook common_sched_info->luid_for_non_insn () is used to determine | |
5441 | if notes, labels, etc. need luids. */ | |
5442 | void | |
5443 | sched_init_luids (bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn) | |
5444 | { | |
5445 | const struct sched_scan_info_def ssi = | |
5446 | { | |
5447 | NULL, /* extend_bb */ | |
5448 | NULL, /* init_bb */ | |
5449 | luids_extend_insn, /* extend_insn */ | |
5450 | luids_init_insn /* init_insn */ | |
5451 | }; | |
5452 | ||
5453 | sched_scan (&ssi, bbs, bb, insns, insn); | |
5454 | } | |
5455 | ||
5456 | /* Free LUIDs. */ | |
5457 | void | |
5458 | sched_finish_luids (void) | |
5459 | { | |
5460 | VEC_free (int, heap, sched_luids); | |
5461 | sched_max_luid = 1; | |
5462 | } | |
5463 | ||
5464 | /* Return logical uid of INSN. Helpful while debugging. */ | |
5465 | int | |
5466 | insn_luid (rtx insn) | |
5467 | { | |
5468 | return INSN_LUID (insn); | |
5469 | } | |
5470 | ||
5471 | /* Extend per insn data in the target. */ | |
5472 | void | |
5473 | sched_extend_target (void) | |
5474 | { | |
5475 | if (targetm.sched.h_i_d_extended) | |
5476 | targetm.sched.h_i_d_extended (); | |
5477 | } | |
5478 | ||
5479 | /* Extend global scheduler structures (those, that live across calls to | |
5480 | schedule_block) to include information about just emitted INSN. */ | |
5481 | static void | |
5482 | extend_h_i_d (void) | |
5483 | { | |
b8698a0f | 5484 | int reserve = (get_max_uid () + 1 |
e855c69d | 5485 | - VEC_length (haifa_insn_data_def, h_i_d)); |
b8698a0f | 5486 | if (reserve > 0 |
e855c69d AB |
5487 | && ! VEC_space (haifa_insn_data_def, h_i_d, reserve)) |
5488 | { | |
b8698a0f | 5489 | VEC_safe_grow_cleared (haifa_insn_data_def, heap, h_i_d, |
e855c69d AB |
5490 | 3 * get_max_uid () / 2); |
5491 | sched_extend_target (); | |
5492 | } | |
5493 | } | |
5494 | ||
5495 | /* Initialize h_i_d entry of the INSN with default values. | |
5496 | Values, that are not explicitly initialized here, hold zero. */ | |
5497 | static void | |
5498 | init_h_i_d (rtx insn) | |
5499 | { | |
5500 | if (INSN_LUID (insn) > 0) | |
5501 | { | |
5502 | INSN_COST (insn) = -1; | |
e855c69d AB |
5503 | QUEUE_INDEX (insn) = QUEUE_NOWHERE; |
5504 | INSN_TICK (insn) = INVALID_TICK; | |
5505 | INTER_TICK (insn) = INVALID_TICK; | |
5506 | TODO_SPEC (insn) = HARD_DEP; | |
5507 | } | |
5508 | } | |
5509 | ||
5510 | /* Initialize haifa_insn_data for BBS, BB, INSNS and INSN. */ | |
5511 | void | |
5512 | haifa_init_h_i_d (bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn) | |
5513 | { | |
5514 | const struct sched_scan_info_def ssi = | |
5515 | { | |
5516 | NULL, /* extend_bb */ | |
5517 | NULL, /* init_bb */ | |
5518 | extend_h_i_d, /* extend_insn */ | |
5519 | init_h_i_d /* init_insn */ | |
5520 | }; | |
5521 | ||
5522 | sched_scan (&ssi, bbs, bb, insns, insn); | |
5523 | } | |
5524 | ||
5525 | /* Finalize haifa_insn_data. */ | |
5526 | void | |
5527 | haifa_finish_h_i_d (void) | |
5528 | { | |
ce18efcb VM |
5529 | int i; |
5530 | haifa_insn_data_t data; | |
5531 | struct reg_use_data *use, *next; | |
5532 | ||
5533 | for (i = 0; VEC_iterate (haifa_insn_data_def, h_i_d, i, data); i++) | |
5534 | { | |
5535 | if (data->reg_pressure != NULL) | |
5536 | free (data->reg_pressure); | |
5537 | for (use = data->reg_use_list; use != NULL; use = next) | |
5538 | { | |
5539 | next = use->next_insn_use; | |
5540 | free (use); | |
5541 | } | |
5542 | } | |
e855c69d AB |
5543 | VEC_free (haifa_insn_data_def, heap, h_i_d); |
5544 | } | |
5545 | ||
5546 | /* Init data for the new insn INSN. */ | |
5547 | static void | |
5548 | haifa_init_insn (rtx insn) | |
5549 | { | |
5550 | gcc_assert (insn != NULL); | |
5551 | ||
5552 | sched_init_luids (NULL, NULL, NULL, insn); | |
5553 | sched_extend_target (); | |
5554 | sched_deps_init (false); | |
5555 | haifa_init_h_i_d (NULL, NULL, NULL, insn); | |
5556 | ||
5557 | if (adding_bb_to_current_region_p) | |
5558 | { | |
5559 | sd_init_insn (insn); | |
5560 | ||
5561 | /* Extend dependency caches by one element. */ | |
5562 | extend_dependency_caches (1, false); | |
5563 | } | |
5564 | } | |
5565 | ||
5566 | /* Init data for the new basic block BB which comes after AFTER. */ | |
5567 | static void | |
5568 | haifa_init_only_bb (basic_block bb, basic_block after) | |
5569 | { | |
5570 | gcc_assert (bb != NULL); | |
5571 | ||
5572 | sched_init_bbs (); | |
5573 | ||
5574 | if (common_sched_info->add_block) | |
5575 | /* This changes only data structures of the front-end. */ | |
5576 | common_sched_info->add_block (bb, after); | |
5577 | } | |
5578 | ||
5579 | /* A generic version of sched_split_block (). */ | |
5580 | basic_block | |
5581 | sched_split_block_1 (basic_block first_bb, rtx after) | |
5582 | { | |
5583 | edge e; | |
5584 | ||
5585 | e = split_block (first_bb, after); | |
5586 | gcc_assert (e->src == first_bb); | |
5587 | ||
b8698a0f | 5588 | /* sched_split_block emits note if *check == BB_END. Probably it |
e855c69d AB |
5589 | is better to rip that note off. */ |
5590 | ||
5591 | return e->dest; | |
5592 | } | |
5593 | ||
5594 | /* A generic version of sched_create_empty_bb (). */ | |
5595 | basic_block | |
5596 | sched_create_empty_bb_1 (basic_block after) | |
5597 | { | |
5598 | return create_empty_bb (after); | |
5599 | } | |
5600 | ||
9dcc2e87 TS |
5601 | /* Insert PAT as an INSN into the schedule and update the necessary data |
5602 | structures to account for it. */ | |
5603 | rtx | |
5604 | sched_emit_insn (rtx pat) | |
5605 | { | |
5606 | rtx insn = emit_insn_after (pat, last_scheduled_insn); | |
5607 | last_scheduled_insn = insn; | |
5608 | haifa_init_insn (insn); | |
5609 | return insn; | |
5610 | } | |
5611 | ||
8c660648 | 5612 | #endif /* INSN_SCHEDULING */ |