]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/haifa-sched.c
Disable AVX-512VL insns for scalar mode operands on -march=knl.
[thirdparty/gcc.git] / gcc / haifa-sched.c
1 /* Instruction scheduling pass.
2 Copyright (C) 1992-2015 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
4 and currently maintained by, Jim Wilson (wilson@cygnus.com)
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 /* Instruction scheduling pass. This file, along with sched-deps.c,
23 contains the generic parts. The actual entry point for
24 the normal instruction scheduling pass is found in sched-rgn.c.
25
26 We compute insn priorities based on data dependencies. Flow
27 analysis only creates a fraction of the data-dependencies we must
28 observe: namely, only those dependencies which the combiner can be
29 expected to use. For this pass, we must therefore create the
30 remaining dependencies we need to observe: register dependencies,
31 memory dependencies, dependencies to keep function calls in order,
32 and the dependence between a conditional branch and the setting of
33 condition codes are all dealt with here.
34
35 The scheduler first traverses the data flow graph, starting with
36 the last instruction, and proceeding to the first, assigning values
37 to insn_priority as it goes. This sorts the instructions
38 topologically by data dependence.
39
40 Once priorities have been established, we order the insns using
41 list scheduling. This works as follows: starting with a list of
42 all the ready insns, and sorted according to priority number, we
43 schedule the insn from the end of the list by placing its
44 predecessors in the list according to their priority order. We
45 consider this insn scheduled by setting the pointer to the "end" of
46 the list to point to the previous insn. When an insn has no
47 predecessors, we either queue it until sufficient time has elapsed
48 or add it to the ready list. As the instructions are scheduled or
49 when stalls are introduced, the queue advances and dumps insns into
50 the ready list. When all insns down to the lowest priority have
51 been scheduled, the critical path of the basic block has been made
52 as short as possible. The remaining insns are then scheduled in
53 remaining slots.
54
55 The following list shows the order in which we want to break ties
56 among insns in the ready list:
57
58 1. choose insn with the longest path to end of bb, ties
59 broken by
60 2. choose insn with least contribution to register pressure,
61 ties broken by
62 3. prefer in-block upon interblock motion, ties broken by
63 4. prefer useful upon speculative motion, ties broken by
64 5. choose insn with largest control flow probability, ties
65 broken by
66 6. choose insn with the least dependences upon the previously
67 scheduled insn, or finally
68 7 choose the insn which has the most insns dependent on it.
69 8. choose insn with lowest UID.
70
71 Memory references complicate matters. Only if we can be certain
72 that memory references are not part of the data dependency graph
73 (via true, anti, or output dependence), can we move operations past
74 memory references. To first approximation, reads can be done
75 independently, while writes introduce dependencies. Better
76 approximations will yield fewer dependencies.
77
78 Before reload, an extended analysis of interblock data dependences
79 is required for interblock scheduling. This is performed in
80 compute_block_dependences ().
81
82 Dependencies set up by memory references are treated in exactly the
83 same way as other dependencies, by using insn backward dependences
84 INSN_BACK_DEPS. INSN_BACK_DEPS are translated into forward dependences
85 INSN_FORW_DEPS for the purpose of forward list scheduling.
86
87 Having optimized the critical path, we may have also unduly
88 extended the lifetimes of some registers. If an operation requires
89 that constants be loaded into registers, it is certainly desirable
90 to load those constants as early as necessary, but no earlier.
91 I.e., it will not do to load up a bunch of registers at the
92 beginning of a basic block only to use them at the end, if they
93 could be loaded later, since this may result in excessive register
94 utilization.
95
96 Note that since branches are never in basic blocks, but only end
97 basic blocks, this pass will not move branches. But that is ok,
98 since we can use GNU's delayed branch scheduling pass to take care
99 of this case.
100
101 Also note that no further optimizations based on algebraic
102 identities are performed, so this pass would be a good one to
103 perform instruction splitting, such as breaking up a multiply
104 instruction into shifts and adds where that is profitable.
105
106 Given the memory aliasing analysis that this pass should perform,
107 it should be possible to remove redundant stores to memory, and to
108 load values from registers instead of hitting memory.
109
110 Before reload, speculative insns are moved only if a 'proof' exists
111 that no exception will be caused by this, and if no live registers
112 exist that inhibit the motion (live registers constraints are not
113 represented by data dependence edges).
114
115 This pass must update information that subsequent passes expect to
116 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
117 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
118
119 The information in the line number notes is carefully retained by
120 this pass. Notes that refer to the starting and ending of
121 exception regions are also carefully retained by this pass. All
122 other NOTE insns are grouped in their same relative order at the
123 beginning of basic blocks and regions that have been scheduled. */
124 \f
125 #include "config.h"
126 #include "system.h"
127 #include "coretypes.h"
128 #include "backend.h"
129 #include "cfghooks.h"
130 #include "rtl.h"
131 #include "df.h"
132 #include "diagnostic-core.h"
133 #include "tm_p.h"
134 #include "regs.h"
135 #include "flags.h"
136 #include "insn-config.h"
137 #include "insn-attr.h"
138 #include "except.h"
139 #include "recog.h"
140 #include "cfgrtl.h"
141 #include "cfgbuild.h"
142 #include "sched-int.h"
143 #include "target.h"
144 #include "common/common-target.h"
145 #include "params.h"
146 #include "dbgcnt.h"
147 #include "cfgloop.h"
148 #include "ira.h"
149 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
150 #include "dumpfile.h"
151
152 #ifdef INSN_SCHEDULING
153
154 /* True if we do register pressure relief through live-range
155 shrinkage. */
156 static bool live_range_shrinkage_p;
157
158 /* Switch on live range shrinkage. */
159 void
160 initialize_live_range_shrinkage (void)
161 {
162 live_range_shrinkage_p = true;
163 }
164
165 /* Switch off live range shrinkage. */
166 void
167 finish_live_range_shrinkage (void)
168 {
169 live_range_shrinkage_p = false;
170 }
171
172 /* issue_rate is the number of insns that can be scheduled in the same
173 machine cycle. It can be defined in the config/mach/mach.h file,
174 otherwise we set it to 1. */
175
176 int issue_rate;
177
178 /* This can be set to true by a backend if the scheduler should not
179 enable a DCE pass. */
180 bool sched_no_dce;
181
182 /* The current initiation interval used when modulo scheduling. */
183 static int modulo_ii;
184
185 /* The maximum number of stages we are prepared to handle. */
186 static int modulo_max_stages;
187
188 /* The number of insns that exist in each iteration of the loop. We use this
189 to detect when we've scheduled all insns from the first iteration. */
190 static int modulo_n_insns;
191
192 /* The current count of insns in the first iteration of the loop that have
193 already been scheduled. */
194 static int modulo_insns_scheduled;
195
196 /* The maximum uid of insns from the first iteration of the loop. */
197 static int modulo_iter0_max_uid;
198
199 /* The number of times we should attempt to backtrack when modulo scheduling.
200 Decreased each time we have to backtrack. */
201 static int modulo_backtracks_left;
202
203 /* The stage in which the last insn from the original loop was
204 scheduled. */
205 static int modulo_last_stage;
206
207 /* sched-verbose controls the amount of debugging output the
208 scheduler prints. It is controlled by -fsched-verbose=N:
209 N>0 and no -DSR : the output is directed to stderr.
210 N>=10 will direct the printouts to stderr (regardless of -dSR).
211 N=1: same as -dSR.
212 N=2: bb's probabilities, detailed ready list info, unit/insn info.
213 N=3: rtl at abort point, control-flow, regions info.
214 N=5: dependences info. */
215
216 int sched_verbose = 0;
217
218 /* Debugging file. All printouts are sent to dump, which is always set,
219 either to stderr, or to the dump listing file (-dRS). */
220 FILE *sched_dump = 0;
221
222 /* This is a placeholder for the scheduler parameters common
223 to all schedulers. */
224 struct common_sched_info_def *common_sched_info;
225
226 #define INSN_TICK(INSN) (HID (INSN)->tick)
227 #define INSN_EXACT_TICK(INSN) (HID (INSN)->exact_tick)
228 #define INSN_TICK_ESTIMATE(INSN) (HID (INSN)->tick_estimate)
229 #define INTER_TICK(INSN) (HID (INSN)->inter_tick)
230 #define FEEDS_BACKTRACK_INSN(INSN) (HID (INSN)->feeds_backtrack_insn)
231 #define SHADOW_P(INSN) (HID (INSN)->shadow_p)
232 #define MUST_RECOMPUTE_SPEC_P(INSN) (HID (INSN)->must_recompute_spec)
233 /* Cached cost of the instruction. Use insn_cost to get cost of the
234 insn. -1 here means that the field is not initialized. */
235 #define INSN_COST(INSN) (HID (INSN)->cost)
236
237 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
238 then it should be recalculated from scratch. */
239 #define INVALID_TICK (-(max_insn_queue_index + 1))
240 /* The minimal value of the INSN_TICK of an instruction. */
241 #define MIN_TICK (-max_insn_queue_index)
242
243 /* Original order of insns in the ready list.
244 Used to keep order of normal insns while separating DEBUG_INSNs. */
245 #define INSN_RFS_DEBUG_ORIG_ORDER(INSN) (HID (INSN)->rfs_debug_orig_order)
246
247 /* The deciding reason for INSN's place in the ready list. */
248 #define INSN_LAST_RFS_WIN(INSN) (HID (INSN)->last_rfs_win)
249
250 /* List of important notes we must keep around. This is a pointer to the
251 last element in the list. */
252 rtx_insn *note_list;
253
254 static struct spec_info_def spec_info_var;
255 /* Description of the speculative part of the scheduling.
256 If NULL - no speculation. */
257 spec_info_t spec_info = NULL;
258
259 /* True, if recovery block was added during scheduling of current block.
260 Used to determine, if we need to fix INSN_TICKs. */
261 static bool haifa_recovery_bb_recently_added_p;
262
263 /* True, if recovery block was added during this scheduling pass.
264 Used to determine if we should have empty memory pools of dependencies
265 after finishing current region. */
266 bool haifa_recovery_bb_ever_added_p;
267
268 /* Counters of different types of speculative instructions. */
269 static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
270
271 /* Array used in {unlink, restore}_bb_notes. */
272 static rtx_insn **bb_header = 0;
273
274 /* Basic block after which recovery blocks will be created. */
275 static basic_block before_recovery;
276
277 /* Basic block just before the EXIT_BLOCK and after recovery, if we have
278 created it. */
279 basic_block after_recovery;
280
281 /* FALSE if we add bb to another region, so we don't need to initialize it. */
282 bool adding_bb_to_current_region_p = true;
283
284 /* Queues, etc. */
285
286 /* An instruction is ready to be scheduled when all insns preceding it
287 have already been scheduled. It is important to ensure that all
288 insns which use its result will not be executed until its result
289 has been computed. An insn is maintained in one of four structures:
290
291 (P) the "Pending" set of insns which cannot be scheduled until
292 their dependencies have been satisfied.
293 (Q) the "Queued" set of insns that can be scheduled when sufficient
294 time has passed.
295 (R) the "Ready" list of unscheduled, uncommitted insns.
296 (S) the "Scheduled" list of insns.
297
298 Initially, all insns are either "Pending" or "Ready" depending on
299 whether their dependencies are satisfied.
300
301 Insns move from the "Ready" list to the "Scheduled" list as they
302 are committed to the schedule. As this occurs, the insns in the
303 "Pending" list have their dependencies satisfied and move to either
304 the "Ready" list or the "Queued" set depending on whether
305 sufficient time has passed to make them ready. As time passes,
306 insns move from the "Queued" set to the "Ready" list.
307
308 The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
309 unscheduled insns, i.e., those that are ready, queued, and pending.
310 The "Queued" set (Q) is implemented by the variable `insn_queue'.
311 The "Ready" list (R) is implemented by the variables `ready' and
312 `n_ready'.
313 The "Scheduled" list (S) is the new insn chain built by this pass.
314
315 The transition (R->S) is implemented in the scheduling loop in
316 `schedule_block' when the best insn to schedule is chosen.
317 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
318 insns move from the ready list to the scheduled list.
319 The transition (Q->R) is implemented in 'queue_to_insn' as time
320 passes or stalls are introduced. */
321
322 /* Implement a circular buffer to delay instructions until sufficient
323 time has passed. For the new pipeline description interface,
324 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
325 than maximal time of instruction execution computed by genattr.c on
326 the base maximal time of functional unit reservations and getting a
327 result. This is the longest time an insn may be queued. */
328
329 static rtx_insn_list **insn_queue;
330 static int q_ptr = 0;
331 static int q_size = 0;
332 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
333 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
334
335 #define QUEUE_SCHEDULED (-3)
336 #define QUEUE_NOWHERE (-2)
337 #define QUEUE_READY (-1)
338 /* QUEUE_SCHEDULED - INSN is scheduled.
339 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
340 queue or ready list.
341 QUEUE_READY - INSN is in ready list.
342 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
343
344 #define QUEUE_INDEX(INSN) (HID (INSN)->queue_index)
345
346 /* The following variable value refers for all current and future
347 reservations of the processor units. */
348 state_t curr_state;
349
350 /* The following variable value is size of memory representing all
351 current and future reservations of the processor units. */
352 size_t dfa_state_size;
353
354 /* The following array is used to find the best insn from ready when
355 the automaton pipeline interface is used. */
356 signed char *ready_try = NULL;
357
358 /* The ready list. */
359 struct ready_list ready = {NULL, 0, 0, 0, 0};
360
361 /* The pointer to the ready list (to be removed). */
362 static struct ready_list *readyp = &ready;
363
364 /* Scheduling clock. */
365 static int clock_var;
366
367 /* Clock at which the previous instruction was issued. */
368 static int last_clock_var;
369
370 /* Set to true if, when queuing a shadow insn, we discover that it would be
371 scheduled too late. */
372 static bool must_backtrack;
373
374 /* The following variable value is number of essential insns issued on
375 the current cycle. An insn is essential one if it changes the
376 processors state. */
377 int cycle_issued_insns;
378
379 /* This records the actual schedule. It is built up during the main phase
380 of schedule_block, and afterwards used to reorder the insns in the RTL. */
381 static vec<rtx_insn *> scheduled_insns;
382
383 static int may_trap_exp (const_rtx, int);
384
385 /* Nonzero iff the address is comprised from at most 1 register. */
386 #define CONST_BASED_ADDRESS_P(x) \
387 (REG_P (x) \
388 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
389 || (GET_CODE (x) == LO_SUM)) \
390 && (CONSTANT_P (XEXP (x, 0)) \
391 || CONSTANT_P (XEXP (x, 1)))))
392
393 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
394 as found by analyzing insn's expression. */
395
396 \f
397 static int haifa_luid_for_non_insn (rtx x);
398
399 /* Haifa version of sched_info hooks common to all headers. */
400 const struct common_sched_info_def haifa_common_sched_info =
401 {
402 NULL, /* fix_recovery_cfg */
403 NULL, /* add_block */
404 NULL, /* estimate_number_of_insns */
405 haifa_luid_for_non_insn, /* luid_for_non_insn */
406 SCHED_PASS_UNKNOWN /* sched_pass_id */
407 };
408
409 /* Mapping from instruction UID to its Logical UID. */
410 vec<int> sched_luids = vNULL;
411
412 /* Next LUID to assign to an instruction. */
413 int sched_max_luid = 1;
414
415 /* Haifa Instruction Data. */
416 vec<haifa_insn_data_def> h_i_d = vNULL;
417
418 void (* sched_init_only_bb) (basic_block, basic_block);
419
420 /* Split block function. Different schedulers might use different functions
421 to handle their internal data consistent. */
422 basic_block (* sched_split_block) (basic_block, rtx);
423
424 /* Create empty basic block after the specified block. */
425 basic_block (* sched_create_empty_bb) (basic_block);
426
427 /* Return the number of cycles until INSN is expected to be ready.
428 Return zero if it already is. */
429 static int
430 insn_delay (rtx_insn *insn)
431 {
432 return MAX (INSN_TICK (insn) - clock_var, 0);
433 }
434
435 static int
436 may_trap_exp (const_rtx x, int is_store)
437 {
438 enum rtx_code code;
439
440 if (x == 0)
441 return TRAP_FREE;
442 code = GET_CODE (x);
443 if (is_store)
444 {
445 if (code == MEM && may_trap_p (x))
446 return TRAP_RISKY;
447 else
448 return TRAP_FREE;
449 }
450 if (code == MEM)
451 {
452 /* The insn uses memory: a volatile load. */
453 if (MEM_VOLATILE_P (x))
454 return IRISKY;
455 /* An exception-free load. */
456 if (!may_trap_p (x))
457 return IFREE;
458 /* A load with 1 base register, to be further checked. */
459 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
460 return PFREE_CANDIDATE;
461 /* No info on the load, to be further checked. */
462 return PRISKY_CANDIDATE;
463 }
464 else
465 {
466 const char *fmt;
467 int i, insn_class = TRAP_FREE;
468
469 /* Neither store nor load, check if it may cause a trap. */
470 if (may_trap_p (x))
471 return TRAP_RISKY;
472 /* Recursive step: walk the insn... */
473 fmt = GET_RTX_FORMAT (code);
474 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
475 {
476 if (fmt[i] == 'e')
477 {
478 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
479 insn_class = WORST_CLASS (insn_class, tmp_class);
480 }
481 else if (fmt[i] == 'E')
482 {
483 int j;
484 for (j = 0; j < XVECLEN (x, i); j++)
485 {
486 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
487 insn_class = WORST_CLASS (insn_class, tmp_class);
488 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
489 break;
490 }
491 }
492 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
493 break;
494 }
495 return insn_class;
496 }
497 }
498
499 /* Classifies rtx X of an insn for the purpose of verifying that X can be
500 executed speculatively (and consequently the insn can be moved
501 speculatively), by examining X, returning:
502 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
503 TRAP_FREE: non-load insn.
504 IFREE: load from a globally safe location.
505 IRISKY: volatile load.
506 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
507 being either PFREE or PRISKY. */
508
509 static int
510 haifa_classify_rtx (const_rtx x)
511 {
512 int tmp_class = TRAP_FREE;
513 int insn_class = TRAP_FREE;
514 enum rtx_code code;
515
516 if (GET_CODE (x) == PARALLEL)
517 {
518 int i, len = XVECLEN (x, 0);
519
520 for (i = len - 1; i >= 0; i--)
521 {
522 tmp_class = haifa_classify_rtx (XVECEXP (x, 0, i));
523 insn_class = WORST_CLASS (insn_class, tmp_class);
524 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
525 break;
526 }
527 }
528 else
529 {
530 code = GET_CODE (x);
531 switch (code)
532 {
533 case CLOBBER:
534 /* Test if it is a 'store'. */
535 tmp_class = may_trap_exp (XEXP (x, 0), 1);
536 break;
537 case SET:
538 /* Test if it is a store. */
539 tmp_class = may_trap_exp (SET_DEST (x), 1);
540 if (tmp_class == TRAP_RISKY)
541 break;
542 /* Test if it is a load. */
543 tmp_class =
544 WORST_CLASS (tmp_class,
545 may_trap_exp (SET_SRC (x), 0));
546 break;
547 case COND_EXEC:
548 tmp_class = haifa_classify_rtx (COND_EXEC_CODE (x));
549 if (tmp_class == TRAP_RISKY)
550 break;
551 tmp_class = WORST_CLASS (tmp_class,
552 may_trap_exp (COND_EXEC_TEST (x), 0));
553 break;
554 case TRAP_IF:
555 tmp_class = TRAP_RISKY;
556 break;
557 default:;
558 }
559 insn_class = tmp_class;
560 }
561
562 return insn_class;
563 }
564
565 int
566 haifa_classify_insn (const_rtx insn)
567 {
568 return haifa_classify_rtx (PATTERN (insn));
569 }
570 \f
571 /* After the scheduler initialization function has been called, this function
572 can be called to enable modulo scheduling. II is the initiation interval
573 we should use, it affects the delays for delay_pairs that were recorded as
574 separated by a given number of stages.
575
576 MAX_STAGES provides us with a limit
577 after which we give up scheduling; the caller must have unrolled at least
578 as many copies of the loop body and recorded delay_pairs for them.
579
580 INSNS is the number of real (non-debug) insns in one iteration of
581 the loop. MAX_UID can be used to test whether an insn belongs to
582 the first iteration of the loop; all of them have a uid lower than
583 MAX_UID. */
584 void
585 set_modulo_params (int ii, int max_stages, int insns, int max_uid)
586 {
587 modulo_ii = ii;
588 modulo_max_stages = max_stages;
589 modulo_n_insns = insns;
590 modulo_iter0_max_uid = max_uid;
591 modulo_backtracks_left = PARAM_VALUE (PARAM_MAX_MODULO_BACKTRACK_ATTEMPTS);
592 }
593
594 /* A structure to record a pair of insns where the first one is a real
595 insn that has delay slots, and the second is its delayed shadow.
596 I1 is scheduled normally and will emit an assembly instruction,
597 while I2 describes the side effect that takes place at the
598 transition between cycles CYCLES and (CYCLES + 1) after I1. */
599 struct delay_pair
600 {
601 struct delay_pair *next_same_i1;
602 rtx_insn *i1, *i2;
603 int cycles;
604 /* When doing modulo scheduling, we a delay_pair can also be used to
605 show that I1 and I2 are the same insn in a different stage. If that
606 is the case, STAGES will be nonzero. */
607 int stages;
608 };
609
610 /* Helpers for delay hashing. */
611
612 struct delay_i1_hasher : nofree_ptr_hash <delay_pair>
613 {
614 typedef void *compare_type;
615 static inline hashval_t hash (const delay_pair *);
616 static inline bool equal (const delay_pair *, const void *);
617 };
618
619 /* Returns a hash value for X, based on hashing just I1. */
620
621 inline hashval_t
622 delay_i1_hasher::hash (const delay_pair *x)
623 {
624 return htab_hash_pointer (x->i1);
625 }
626
627 /* Return true if I1 of pair X is the same as that of pair Y. */
628
629 inline bool
630 delay_i1_hasher::equal (const delay_pair *x, const void *y)
631 {
632 return x->i1 == y;
633 }
634
635 struct delay_i2_hasher : free_ptr_hash <delay_pair>
636 {
637 typedef void *compare_type;
638 static inline hashval_t hash (const delay_pair *);
639 static inline bool equal (const delay_pair *, const void *);
640 };
641
642 /* Returns a hash value for X, based on hashing just I2. */
643
644 inline hashval_t
645 delay_i2_hasher::hash (const delay_pair *x)
646 {
647 return htab_hash_pointer (x->i2);
648 }
649
650 /* Return true if I2 of pair X is the same as that of pair Y. */
651
652 inline bool
653 delay_i2_hasher::equal (const delay_pair *x, const void *y)
654 {
655 return x->i2 == y;
656 }
657
658 /* Two hash tables to record delay_pairs, one indexed by I1 and the other
659 indexed by I2. */
660 static hash_table<delay_i1_hasher> *delay_htab;
661 static hash_table<delay_i2_hasher> *delay_htab_i2;
662
663 /* Called through htab_traverse. Walk the hashtable using I2 as
664 index, and delete all elements involving an UID higher than
665 that pointed to by *DATA. */
666 int
667 haifa_htab_i2_traverse (delay_pair **slot, int *data)
668 {
669 int maxuid = *data;
670 struct delay_pair *p = *slot;
671 if (INSN_UID (p->i2) >= maxuid || INSN_UID (p->i1) >= maxuid)
672 {
673 delay_htab_i2->clear_slot (slot);
674 }
675 return 1;
676 }
677
678 /* Called through htab_traverse. Walk the hashtable using I2 as
679 index, and delete all elements involving an UID higher than
680 that pointed to by *DATA. */
681 int
682 haifa_htab_i1_traverse (delay_pair **pslot, int *data)
683 {
684 int maxuid = *data;
685 struct delay_pair *p, *first, **pprev;
686
687 if (INSN_UID ((*pslot)->i1) >= maxuid)
688 {
689 delay_htab->clear_slot (pslot);
690 return 1;
691 }
692 pprev = &first;
693 for (p = *pslot; p; p = p->next_same_i1)
694 {
695 if (INSN_UID (p->i2) < maxuid)
696 {
697 *pprev = p;
698 pprev = &p->next_same_i1;
699 }
700 }
701 *pprev = NULL;
702 if (first == NULL)
703 delay_htab->clear_slot (pslot);
704 else
705 *pslot = first;
706 return 1;
707 }
708
709 /* Discard all delay pairs which involve an insn with an UID higher
710 than MAX_UID. */
711 void
712 discard_delay_pairs_above (int max_uid)
713 {
714 delay_htab->traverse <int *, haifa_htab_i1_traverse> (&max_uid);
715 delay_htab_i2->traverse <int *, haifa_htab_i2_traverse> (&max_uid);
716 }
717
718 /* This function can be called by a port just before it starts the final
719 scheduling pass. It records the fact that an instruction with delay
720 slots has been split into two insns, I1 and I2. The first one will be
721 scheduled normally and initiates the operation. The second one is a
722 shadow which must follow a specific number of cycles after I1; its only
723 purpose is to show the side effect that occurs at that cycle in the RTL.
724 If a JUMP_INSN or a CALL_INSN has been split, I1 should be a normal INSN,
725 while I2 retains the original insn type.
726
727 There are two ways in which the number of cycles can be specified,
728 involving the CYCLES and STAGES arguments to this function. If STAGES
729 is zero, we just use the value of CYCLES. Otherwise, STAGES is a factor
730 which is multiplied by MODULO_II to give the number of cycles. This is
731 only useful if the caller also calls set_modulo_params to enable modulo
732 scheduling. */
733
734 void
735 record_delay_slot_pair (rtx_insn *i1, rtx_insn *i2, int cycles, int stages)
736 {
737 struct delay_pair *p = XNEW (struct delay_pair);
738 struct delay_pair **slot;
739
740 p->i1 = i1;
741 p->i2 = i2;
742 p->cycles = cycles;
743 p->stages = stages;
744
745 if (!delay_htab)
746 {
747 delay_htab = new hash_table<delay_i1_hasher> (10);
748 delay_htab_i2 = new hash_table<delay_i2_hasher> (10);
749 }
750 slot = delay_htab->find_slot_with_hash (i1, htab_hash_pointer (i1), INSERT);
751 p->next_same_i1 = *slot;
752 *slot = p;
753 slot = delay_htab_i2->find_slot (p, INSERT);
754 *slot = p;
755 }
756
757 /* Examine the delay pair hashtable to see if INSN is a shadow for another,
758 and return the other insn if so. Return NULL otherwise. */
759 rtx_insn *
760 real_insn_for_shadow (rtx_insn *insn)
761 {
762 struct delay_pair *pair;
763
764 if (!delay_htab)
765 return NULL;
766
767 pair = delay_htab_i2->find_with_hash (insn, htab_hash_pointer (insn));
768 if (!pair || pair->stages > 0)
769 return NULL;
770 return pair->i1;
771 }
772
773 /* For a pair P of insns, return the fixed distance in cycles from the first
774 insn after which the second must be scheduled. */
775 static int
776 pair_delay (struct delay_pair *p)
777 {
778 if (p->stages == 0)
779 return p->cycles;
780 else
781 return p->stages * modulo_ii;
782 }
783
784 /* Given an insn INSN, add a dependence on its delayed shadow if it
785 has one. Also try to find situations where shadows depend on each other
786 and add dependencies to the real insns to limit the amount of backtracking
787 needed. */
788 void
789 add_delay_dependencies (rtx_insn *insn)
790 {
791 struct delay_pair *pair;
792 sd_iterator_def sd_it;
793 dep_t dep;
794
795 if (!delay_htab)
796 return;
797
798 pair = delay_htab_i2->find_with_hash (insn, htab_hash_pointer (insn));
799 if (!pair)
800 return;
801 add_dependence (insn, pair->i1, REG_DEP_ANTI);
802 if (pair->stages)
803 return;
804
805 FOR_EACH_DEP (pair->i2, SD_LIST_BACK, sd_it, dep)
806 {
807 rtx_insn *pro = DEP_PRO (dep);
808 struct delay_pair *other_pair
809 = delay_htab_i2->find_with_hash (pro, htab_hash_pointer (pro));
810 if (!other_pair || other_pair->stages)
811 continue;
812 if (pair_delay (other_pair) >= pair_delay (pair))
813 {
814 if (sched_verbose >= 4)
815 {
816 fprintf (sched_dump, ";;\tadding dependence %d <- %d\n",
817 INSN_UID (other_pair->i1),
818 INSN_UID (pair->i1));
819 fprintf (sched_dump, ";;\tpair1 %d <- %d, cost %d\n",
820 INSN_UID (pair->i1),
821 INSN_UID (pair->i2),
822 pair_delay (pair));
823 fprintf (sched_dump, ";;\tpair2 %d <- %d, cost %d\n",
824 INSN_UID (other_pair->i1),
825 INSN_UID (other_pair->i2),
826 pair_delay (other_pair));
827 }
828 add_dependence (pair->i1, other_pair->i1, REG_DEP_ANTI);
829 }
830 }
831 }
832 \f
833 /* Forward declarations. */
834
835 static int priority (rtx_insn *);
836 static int autopref_rank_for_schedule (const rtx_insn *, const rtx_insn *);
837 static int rank_for_schedule (const void *, const void *);
838 static void swap_sort (rtx_insn **, int);
839 static void queue_insn (rtx_insn *, int, const char *);
840 static int schedule_insn (rtx_insn *);
841 static void adjust_priority (rtx_insn *);
842 static void advance_one_cycle (void);
843 static void extend_h_i_d (void);
844
845
846 /* Notes handling mechanism:
847 =========================
848 Generally, NOTES are saved before scheduling and restored after scheduling.
849 The scheduler distinguishes between two types of notes:
850
851 (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
852 Before scheduling a region, a pointer to the note is added to the insn
853 that follows or precedes it. (This happens as part of the data dependence
854 computation). After scheduling an insn, the pointer contained in it is
855 used for regenerating the corresponding note (in reemit_notes).
856
857 (2) All other notes (e.g. INSN_DELETED): Before scheduling a block,
858 these notes are put in a list (in rm_other_notes() and
859 unlink_other_notes ()). After scheduling the block, these notes are
860 inserted at the beginning of the block (in schedule_block()). */
861
862 static void ready_add (struct ready_list *, rtx_insn *, bool);
863 static rtx_insn *ready_remove_first (struct ready_list *);
864 static rtx_insn *ready_remove_first_dispatch (struct ready_list *ready);
865
866 static void queue_to_ready (struct ready_list *);
867 static int early_queue_to_ready (state_t, struct ready_list *);
868
869 /* The following functions are used to implement multi-pass scheduling
870 on the first cycle. */
871 static rtx_insn *ready_remove (struct ready_list *, int);
872 static void ready_remove_insn (rtx_insn *);
873
874 static void fix_inter_tick (rtx_insn *, rtx_insn *);
875 static int fix_tick_ready (rtx_insn *);
876 static void change_queue_index (rtx_insn *, int);
877
878 /* The following functions are used to implement scheduling of data/control
879 speculative instructions. */
880
881 static void extend_h_i_d (void);
882 static void init_h_i_d (rtx_insn *);
883 static int haifa_speculate_insn (rtx_insn *, ds_t, rtx *);
884 static void generate_recovery_code (rtx_insn *);
885 static void process_insn_forw_deps_be_in_spec (rtx_insn *, rtx_insn *, ds_t);
886 static void begin_speculative_block (rtx_insn *);
887 static void add_to_speculative_block (rtx_insn *);
888 static void init_before_recovery (basic_block *);
889 static void create_check_block_twin (rtx_insn *, bool);
890 static void fix_recovery_deps (basic_block);
891 static bool haifa_change_pattern (rtx_insn *, rtx);
892 static void dump_new_block_header (int, basic_block, rtx_insn *, rtx_insn *);
893 static void restore_bb_notes (basic_block);
894 static void fix_jump_move (rtx_insn *);
895 static void move_block_after_check (rtx_insn *);
896 static void move_succs (vec<edge, va_gc> **, basic_block);
897 static void sched_remove_insn (rtx_insn *);
898 static void clear_priorities (rtx_insn *, rtx_vec_t *);
899 static void calc_priorities (rtx_vec_t);
900 static void add_jump_dependencies (rtx_insn *, rtx_insn *);
901
902 #endif /* INSN_SCHEDULING */
903 \f
904 /* Point to state used for the current scheduling pass. */
905 struct haifa_sched_info *current_sched_info;
906 \f
907 #ifndef INSN_SCHEDULING
908 void
909 schedule_insns (void)
910 {
911 }
912 #else
913
914 /* Do register pressure sensitive insn scheduling if the flag is set
915 up. */
916 enum sched_pressure_algorithm sched_pressure;
917
918 /* Map regno -> its pressure class. The map defined only when
919 SCHED_PRESSURE != SCHED_PRESSURE_NONE. */
920 enum reg_class *sched_regno_pressure_class;
921
922 /* The current register pressure. Only elements corresponding pressure
923 classes are defined. */
924 static int curr_reg_pressure[N_REG_CLASSES];
925
926 /* Saved value of the previous array. */
927 static int saved_reg_pressure[N_REG_CLASSES];
928
929 /* Register living at given scheduling point. */
930 static bitmap curr_reg_live;
931
932 /* Saved value of the previous array. */
933 static bitmap saved_reg_live;
934
935 /* Registers mentioned in the current region. */
936 static bitmap region_ref_regs;
937
938 /* Effective number of available registers of a given class (see comment
939 in sched_pressure_start_bb). */
940 static int sched_class_regs_num[N_REG_CLASSES];
941 /* Number of call_used_regs. This is a helper for calculating of
942 sched_class_regs_num. */
943 static int call_used_regs_num[N_REG_CLASSES];
944
945 /* Initiate register pressure relative info for scheduling the current
946 region. Currently it is only clearing register mentioned in the
947 current region. */
948 void
949 sched_init_region_reg_pressure_info (void)
950 {
951 bitmap_clear (region_ref_regs);
952 }
953
954 /* PRESSURE[CL] describes the pressure on register class CL. Update it
955 for the birth (if BIRTH_P) or death (if !BIRTH_P) of register REGNO.
956 LIVE tracks the set of live registers; if it is null, assume that
957 every birth or death is genuine. */
958 static inline void
959 mark_regno_birth_or_death (bitmap live, int *pressure, int regno, bool birth_p)
960 {
961 enum reg_class pressure_class;
962
963 pressure_class = sched_regno_pressure_class[regno];
964 if (regno >= FIRST_PSEUDO_REGISTER)
965 {
966 if (pressure_class != NO_REGS)
967 {
968 if (birth_p)
969 {
970 if (!live || bitmap_set_bit (live, regno))
971 pressure[pressure_class]
972 += (ira_reg_class_max_nregs
973 [pressure_class][PSEUDO_REGNO_MODE (regno)]);
974 }
975 else
976 {
977 if (!live || bitmap_clear_bit (live, regno))
978 pressure[pressure_class]
979 -= (ira_reg_class_max_nregs
980 [pressure_class][PSEUDO_REGNO_MODE (regno)]);
981 }
982 }
983 }
984 else if (pressure_class != NO_REGS
985 && ! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
986 {
987 if (birth_p)
988 {
989 if (!live || bitmap_set_bit (live, regno))
990 pressure[pressure_class]++;
991 }
992 else
993 {
994 if (!live || bitmap_clear_bit (live, regno))
995 pressure[pressure_class]--;
996 }
997 }
998 }
999
1000 /* Initiate current register pressure related info from living
1001 registers given by LIVE. */
1002 static void
1003 initiate_reg_pressure_info (bitmap live)
1004 {
1005 int i;
1006 unsigned int j;
1007 bitmap_iterator bi;
1008
1009 for (i = 0; i < ira_pressure_classes_num; i++)
1010 curr_reg_pressure[ira_pressure_classes[i]] = 0;
1011 bitmap_clear (curr_reg_live);
1012 EXECUTE_IF_SET_IN_BITMAP (live, 0, j, bi)
1013 if (sched_pressure == SCHED_PRESSURE_MODEL
1014 || current_nr_blocks == 1
1015 || bitmap_bit_p (region_ref_regs, j))
1016 mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure, j, true);
1017 }
1018
1019 /* Mark registers in X as mentioned in the current region. */
1020 static void
1021 setup_ref_regs (rtx x)
1022 {
1023 int i, j;
1024 const RTX_CODE code = GET_CODE (x);
1025 const char *fmt;
1026
1027 if (REG_P (x))
1028 {
1029 bitmap_set_range (region_ref_regs, REGNO (x), REG_NREGS (x));
1030 return;
1031 }
1032 fmt = GET_RTX_FORMAT (code);
1033 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1034 if (fmt[i] == 'e')
1035 setup_ref_regs (XEXP (x, i));
1036 else if (fmt[i] == 'E')
1037 {
1038 for (j = 0; j < XVECLEN (x, i); j++)
1039 setup_ref_regs (XVECEXP (x, i, j));
1040 }
1041 }
1042
1043 /* Initiate current register pressure related info at the start of
1044 basic block BB. */
1045 static void
1046 initiate_bb_reg_pressure_info (basic_block bb)
1047 {
1048 unsigned int i ATTRIBUTE_UNUSED;
1049 rtx_insn *insn;
1050
1051 if (current_nr_blocks > 1)
1052 FOR_BB_INSNS (bb, insn)
1053 if (NONDEBUG_INSN_P (insn))
1054 setup_ref_regs (PATTERN (insn));
1055 initiate_reg_pressure_info (df_get_live_in (bb));
1056 if (bb_has_eh_pred (bb))
1057 for (i = 0; ; ++i)
1058 {
1059 unsigned int regno = EH_RETURN_DATA_REGNO (i);
1060
1061 if (regno == INVALID_REGNUM)
1062 break;
1063 if (! bitmap_bit_p (df_get_live_in (bb), regno))
1064 mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
1065 regno, true);
1066 }
1067 }
1068
1069 /* Save current register pressure related info. */
1070 static void
1071 save_reg_pressure (void)
1072 {
1073 int i;
1074
1075 for (i = 0; i < ira_pressure_classes_num; i++)
1076 saved_reg_pressure[ira_pressure_classes[i]]
1077 = curr_reg_pressure[ira_pressure_classes[i]];
1078 bitmap_copy (saved_reg_live, curr_reg_live);
1079 }
1080
1081 /* Restore saved register pressure related info. */
1082 static void
1083 restore_reg_pressure (void)
1084 {
1085 int i;
1086
1087 for (i = 0; i < ira_pressure_classes_num; i++)
1088 curr_reg_pressure[ira_pressure_classes[i]]
1089 = saved_reg_pressure[ira_pressure_classes[i]];
1090 bitmap_copy (curr_reg_live, saved_reg_live);
1091 }
1092
1093 /* Return TRUE if the register is dying after its USE. */
1094 static bool
1095 dying_use_p (struct reg_use_data *use)
1096 {
1097 struct reg_use_data *next;
1098
1099 for (next = use->next_regno_use; next != use; next = next->next_regno_use)
1100 if (NONDEBUG_INSN_P (next->insn)
1101 && QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
1102 return false;
1103 return true;
1104 }
1105
1106 /* Print info about the current register pressure and its excess for
1107 each pressure class. */
1108 static void
1109 print_curr_reg_pressure (void)
1110 {
1111 int i;
1112 enum reg_class cl;
1113
1114 fprintf (sched_dump, ";;\t");
1115 for (i = 0; i < ira_pressure_classes_num; i++)
1116 {
1117 cl = ira_pressure_classes[i];
1118 gcc_assert (curr_reg_pressure[cl] >= 0);
1119 fprintf (sched_dump, " %s:%d(%d)", reg_class_names[cl],
1120 curr_reg_pressure[cl],
1121 curr_reg_pressure[cl] - sched_class_regs_num[cl]);
1122 }
1123 fprintf (sched_dump, "\n");
1124 }
1125 \f
1126 /* Determine if INSN has a condition that is clobbered if a register
1127 in SET_REGS is modified. */
1128 static bool
1129 cond_clobbered_p (rtx_insn *insn, HARD_REG_SET set_regs)
1130 {
1131 rtx pat = PATTERN (insn);
1132 gcc_assert (GET_CODE (pat) == COND_EXEC);
1133 if (TEST_HARD_REG_BIT (set_regs, REGNO (XEXP (COND_EXEC_TEST (pat), 0))))
1134 {
1135 sd_iterator_def sd_it;
1136 dep_t dep;
1137 haifa_change_pattern (insn, ORIG_PAT (insn));
1138 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1139 DEP_STATUS (dep) &= ~DEP_CANCELLED;
1140 TODO_SPEC (insn) = HARD_DEP;
1141 if (sched_verbose >= 2)
1142 fprintf (sched_dump,
1143 ";;\t\tdequeue insn %s because of clobbered condition\n",
1144 (*current_sched_info->print_insn) (insn, 0));
1145 return true;
1146 }
1147
1148 return false;
1149 }
1150
1151 /* This function should be called after modifying the pattern of INSN,
1152 to update scheduler data structures as needed. */
1153 static void
1154 update_insn_after_change (rtx_insn *insn)
1155 {
1156 sd_iterator_def sd_it;
1157 dep_t dep;
1158
1159 dfa_clear_single_insn_cache (insn);
1160
1161 sd_it = sd_iterator_start (insn,
1162 SD_LIST_FORW | SD_LIST_BACK | SD_LIST_RES_BACK);
1163 while (sd_iterator_cond (&sd_it, &dep))
1164 {
1165 DEP_COST (dep) = UNKNOWN_DEP_COST;
1166 sd_iterator_next (&sd_it);
1167 }
1168
1169 /* Invalidate INSN_COST, so it'll be recalculated. */
1170 INSN_COST (insn) = -1;
1171 /* Invalidate INSN_TICK, so it'll be recalculated. */
1172 INSN_TICK (insn) = INVALID_TICK;
1173
1174 /* Invalidate autoprefetch data entry. */
1175 INSN_AUTOPREF_MULTIPASS_DATA (insn)[0].status
1176 = AUTOPREF_MULTIPASS_DATA_UNINITIALIZED;
1177 INSN_AUTOPREF_MULTIPASS_DATA (insn)[1].status
1178 = AUTOPREF_MULTIPASS_DATA_UNINITIALIZED;
1179 }
1180
1181
1182 /* Two VECs, one to hold dependencies for which pattern replacements
1183 need to be applied or restored at the start of the next cycle, and
1184 another to hold an integer that is either one, to apply the
1185 corresponding replacement, or zero to restore it. */
1186 static vec<dep_t> next_cycle_replace_deps;
1187 static vec<int> next_cycle_apply;
1188
1189 static void apply_replacement (dep_t, bool);
1190 static void restore_pattern (dep_t, bool);
1191
1192 /* Look at the remaining dependencies for insn NEXT, and compute and return
1193 the TODO_SPEC value we should use for it. This is called after one of
1194 NEXT's dependencies has been resolved.
1195 We also perform pattern replacements for predication, and for broken
1196 replacement dependencies. The latter is only done if FOR_BACKTRACK is
1197 false. */
1198
1199 static ds_t
1200 recompute_todo_spec (rtx_insn *next, bool for_backtrack)
1201 {
1202 ds_t new_ds;
1203 sd_iterator_def sd_it;
1204 dep_t dep, modify_dep = NULL;
1205 int n_spec = 0;
1206 int n_control = 0;
1207 int n_replace = 0;
1208 bool first_p = true;
1209
1210 if (sd_lists_empty_p (next, SD_LIST_BACK))
1211 /* NEXT has all its dependencies resolved. */
1212 return 0;
1213
1214 if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
1215 return HARD_DEP;
1216
1217 /* If NEXT is intended to sit adjacent to this instruction, we don't
1218 want to try to break any dependencies. Treat it as a HARD_DEP. */
1219 if (SCHED_GROUP_P (next))
1220 return HARD_DEP;
1221
1222 /* Now we've got NEXT with speculative deps only.
1223 1. Look at the deps to see what we have to do.
1224 2. Check if we can do 'todo'. */
1225 new_ds = 0;
1226
1227 FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
1228 {
1229 rtx_insn *pro = DEP_PRO (dep);
1230 ds_t ds = DEP_STATUS (dep) & SPECULATIVE;
1231
1232 if (DEBUG_INSN_P (pro) && !DEBUG_INSN_P (next))
1233 continue;
1234
1235 if (ds)
1236 {
1237 n_spec++;
1238 if (first_p)
1239 {
1240 first_p = false;
1241
1242 new_ds = ds;
1243 }
1244 else
1245 new_ds = ds_merge (new_ds, ds);
1246 }
1247 else if (DEP_TYPE (dep) == REG_DEP_CONTROL)
1248 {
1249 if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED)
1250 {
1251 n_control++;
1252 modify_dep = dep;
1253 }
1254 DEP_STATUS (dep) &= ~DEP_CANCELLED;
1255 }
1256 else if (DEP_REPLACE (dep) != NULL)
1257 {
1258 if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED)
1259 {
1260 n_replace++;
1261 modify_dep = dep;
1262 }
1263 DEP_STATUS (dep) &= ~DEP_CANCELLED;
1264 }
1265 }
1266
1267 if (n_replace > 0 && n_control == 0 && n_spec == 0)
1268 {
1269 if (!dbg_cnt (sched_breakdep))
1270 return HARD_DEP;
1271 FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
1272 {
1273 struct dep_replacement *desc = DEP_REPLACE (dep);
1274 if (desc != NULL)
1275 {
1276 if (desc->insn == next && !for_backtrack)
1277 {
1278 gcc_assert (n_replace == 1);
1279 apply_replacement (dep, true);
1280 }
1281 DEP_STATUS (dep) |= DEP_CANCELLED;
1282 }
1283 }
1284 return 0;
1285 }
1286
1287 else if (n_control == 1 && n_replace == 0 && n_spec == 0)
1288 {
1289 rtx_insn *pro, *other;
1290 rtx new_pat;
1291 rtx cond = NULL_RTX;
1292 bool success;
1293 rtx_insn *prev = NULL;
1294 int i;
1295 unsigned regno;
1296
1297 if ((current_sched_info->flags & DO_PREDICATION) == 0
1298 || (ORIG_PAT (next) != NULL_RTX
1299 && PREDICATED_PAT (next) == NULL_RTX))
1300 return HARD_DEP;
1301
1302 pro = DEP_PRO (modify_dep);
1303 other = real_insn_for_shadow (pro);
1304 if (other != NULL_RTX)
1305 pro = other;
1306
1307 cond = sched_get_reverse_condition_uncached (pro);
1308 regno = REGNO (XEXP (cond, 0));
1309
1310 /* Find the last scheduled insn that modifies the condition register.
1311 We can stop looking once we find the insn we depend on through the
1312 REG_DEP_CONTROL; if the condition register isn't modified after it,
1313 we know that it still has the right value. */
1314 if (QUEUE_INDEX (pro) == QUEUE_SCHEDULED)
1315 FOR_EACH_VEC_ELT_REVERSE (scheduled_insns, i, prev)
1316 {
1317 HARD_REG_SET t;
1318
1319 find_all_hard_reg_sets (prev, &t, true);
1320 if (TEST_HARD_REG_BIT (t, regno))
1321 return HARD_DEP;
1322 if (prev == pro)
1323 break;
1324 }
1325 if (ORIG_PAT (next) == NULL_RTX)
1326 {
1327 ORIG_PAT (next) = PATTERN (next);
1328
1329 new_pat = gen_rtx_COND_EXEC (VOIDmode, cond, PATTERN (next));
1330 success = haifa_change_pattern (next, new_pat);
1331 if (!success)
1332 return HARD_DEP;
1333 PREDICATED_PAT (next) = new_pat;
1334 }
1335 else if (PATTERN (next) != PREDICATED_PAT (next))
1336 {
1337 bool success = haifa_change_pattern (next,
1338 PREDICATED_PAT (next));
1339 gcc_assert (success);
1340 }
1341 DEP_STATUS (modify_dep) |= DEP_CANCELLED;
1342 return DEP_CONTROL;
1343 }
1344
1345 if (PREDICATED_PAT (next) != NULL_RTX)
1346 {
1347 int tick = INSN_TICK (next);
1348 bool success = haifa_change_pattern (next,
1349 ORIG_PAT (next));
1350 INSN_TICK (next) = tick;
1351 gcc_assert (success);
1352 }
1353
1354 /* We can't handle the case where there are both speculative and control
1355 dependencies, so we return HARD_DEP in such a case. Also fail if
1356 we have speculative dependencies with not enough points, or more than
1357 one control dependency. */
1358 if ((n_spec > 0 && (n_control > 0 || n_replace > 0))
1359 || (n_spec > 0
1360 /* Too few points? */
1361 && ds_weak (new_ds) < spec_info->data_weakness_cutoff)
1362 || n_control > 0
1363 || n_replace > 0)
1364 return HARD_DEP;
1365
1366 return new_ds;
1367 }
1368 \f
1369 /* Pointer to the last instruction scheduled. */
1370 static rtx_insn *last_scheduled_insn;
1371
1372 /* Pointer to the last nondebug instruction scheduled within the
1373 block, or the prev_head of the scheduling block. Used by
1374 rank_for_schedule, so that insns independent of the last scheduled
1375 insn will be preferred over dependent instructions. */
1376 static rtx_insn *last_nondebug_scheduled_insn;
1377
1378 /* Pointer that iterates through the list of unscheduled insns if we
1379 have a dbg_cnt enabled. It always points at an insn prior to the
1380 first unscheduled one. */
1381 static rtx_insn *nonscheduled_insns_begin;
1382
1383 /* Compute cost of executing INSN.
1384 This is the number of cycles between instruction issue and
1385 instruction results. */
1386 int
1387 insn_cost (rtx_insn *insn)
1388 {
1389 int cost;
1390
1391 if (sched_fusion)
1392 return 0;
1393
1394 if (sel_sched_p ())
1395 {
1396 if (recog_memoized (insn) < 0)
1397 return 0;
1398
1399 cost = insn_default_latency (insn);
1400 if (cost < 0)
1401 cost = 0;
1402
1403 return cost;
1404 }
1405
1406 cost = INSN_COST (insn);
1407
1408 if (cost < 0)
1409 {
1410 /* A USE insn, or something else we don't need to
1411 understand. We can't pass these directly to
1412 result_ready_cost or insn_default_latency because it will
1413 trigger a fatal error for unrecognizable insns. */
1414 if (recog_memoized (insn) < 0)
1415 {
1416 INSN_COST (insn) = 0;
1417 return 0;
1418 }
1419 else
1420 {
1421 cost = insn_default_latency (insn);
1422 if (cost < 0)
1423 cost = 0;
1424
1425 INSN_COST (insn) = cost;
1426 }
1427 }
1428
1429 return cost;
1430 }
1431
1432 /* Compute cost of dependence LINK.
1433 This is the number of cycles between instruction issue and
1434 instruction results.
1435 ??? We also use this function to call recog_memoized on all insns. */
1436 int
1437 dep_cost_1 (dep_t link, dw_t dw)
1438 {
1439 rtx_insn *insn = DEP_PRO (link);
1440 rtx_insn *used = DEP_CON (link);
1441 int cost;
1442
1443 if (DEP_COST (link) != UNKNOWN_DEP_COST)
1444 return DEP_COST (link);
1445
1446 if (delay_htab)
1447 {
1448 struct delay_pair *delay_entry;
1449 delay_entry
1450 = delay_htab_i2->find_with_hash (used, htab_hash_pointer (used));
1451 if (delay_entry)
1452 {
1453 if (delay_entry->i1 == insn)
1454 {
1455 DEP_COST (link) = pair_delay (delay_entry);
1456 return DEP_COST (link);
1457 }
1458 }
1459 }
1460
1461 /* A USE insn should never require the value used to be computed.
1462 This allows the computation of a function's result and parameter
1463 values to overlap the return and call. We don't care about the
1464 dependence cost when only decreasing register pressure. */
1465 if (recog_memoized (used) < 0)
1466 {
1467 cost = 0;
1468 recog_memoized (insn);
1469 }
1470 else
1471 {
1472 enum reg_note dep_type = DEP_TYPE (link);
1473
1474 cost = insn_cost (insn);
1475
1476 if (INSN_CODE (insn) >= 0)
1477 {
1478 if (dep_type == REG_DEP_ANTI)
1479 cost = 0;
1480 else if (dep_type == REG_DEP_OUTPUT)
1481 {
1482 cost = (insn_default_latency (insn)
1483 - insn_default_latency (used));
1484 if (cost <= 0)
1485 cost = 1;
1486 }
1487 else if (bypass_p (insn))
1488 cost = insn_latency (insn, used);
1489 }
1490
1491
1492 if (targetm.sched.adjust_cost_2)
1493 cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost,
1494 dw);
1495 else if (targetm.sched.adjust_cost != NULL)
1496 {
1497 /* This variable is used for backward compatibility with the
1498 targets. */
1499 rtx_insn_list *dep_cost_rtx_link =
1500 alloc_INSN_LIST (NULL_RTX, NULL);
1501
1502 /* Make it self-cycled, so that if some tries to walk over this
1503 incomplete list he/she will be caught in an endless loop. */
1504 XEXP (dep_cost_rtx_link, 1) = dep_cost_rtx_link;
1505
1506 /* Targets use only REG_NOTE_KIND of the link. */
1507 PUT_REG_NOTE_KIND (dep_cost_rtx_link, DEP_TYPE (link));
1508
1509 cost = targetm.sched.adjust_cost (used, dep_cost_rtx_link,
1510 insn, cost);
1511
1512 free_INSN_LIST_node (dep_cost_rtx_link);
1513 }
1514
1515 if (cost < 0)
1516 cost = 0;
1517 }
1518
1519 DEP_COST (link) = cost;
1520 return cost;
1521 }
1522
1523 /* Compute cost of dependence LINK.
1524 This is the number of cycles between instruction issue and
1525 instruction results. */
1526 int
1527 dep_cost (dep_t link)
1528 {
1529 return dep_cost_1 (link, 0);
1530 }
1531
1532 /* Use this sel-sched.c friendly function in reorder2 instead of increasing
1533 INSN_PRIORITY explicitly. */
1534 void
1535 increase_insn_priority (rtx_insn *insn, int amount)
1536 {
1537 if (!sel_sched_p ())
1538 {
1539 /* We're dealing with haifa-sched.c INSN_PRIORITY. */
1540 if (INSN_PRIORITY_KNOWN (insn))
1541 INSN_PRIORITY (insn) += amount;
1542 }
1543 else
1544 {
1545 /* In sel-sched.c INSN_PRIORITY is not kept up to date.
1546 Use EXPR_PRIORITY instead. */
1547 sel_add_to_insn_priority (insn, amount);
1548 }
1549 }
1550
1551 /* Return 'true' if DEP should be included in priority calculations. */
1552 static bool
1553 contributes_to_priority_p (dep_t dep)
1554 {
1555 if (DEBUG_INSN_P (DEP_CON (dep))
1556 || DEBUG_INSN_P (DEP_PRO (dep)))
1557 return false;
1558
1559 /* Critical path is meaningful in block boundaries only. */
1560 if (!current_sched_info->contributes_to_priority (DEP_CON (dep),
1561 DEP_PRO (dep)))
1562 return false;
1563
1564 if (DEP_REPLACE (dep) != NULL)
1565 return false;
1566
1567 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
1568 then speculative instructions will less likely be
1569 scheduled. That is because the priority of
1570 their producers will increase, and, thus, the
1571 producers will more likely be scheduled, thus,
1572 resolving the dependence. */
1573 if (sched_deps_info->generate_spec_deps
1574 && !(spec_info->flags & COUNT_SPEC_IN_CRITICAL_PATH)
1575 && (DEP_STATUS (dep) & SPECULATIVE))
1576 return false;
1577
1578 return true;
1579 }
1580
1581 /* Compute the number of nondebug deps in list LIST for INSN. */
1582
1583 static int
1584 dep_list_size (rtx_insn *insn, sd_list_types_def list)
1585 {
1586 sd_iterator_def sd_it;
1587 dep_t dep;
1588 int dbgcount = 0, nodbgcount = 0;
1589
1590 if (!MAY_HAVE_DEBUG_INSNS)
1591 return sd_lists_size (insn, list);
1592
1593 FOR_EACH_DEP (insn, list, sd_it, dep)
1594 {
1595 if (DEBUG_INSN_P (DEP_CON (dep)))
1596 dbgcount++;
1597 else if (!DEBUG_INSN_P (DEP_PRO (dep)))
1598 nodbgcount++;
1599 }
1600
1601 gcc_assert (dbgcount + nodbgcount == sd_lists_size (insn, list));
1602
1603 return nodbgcount;
1604 }
1605
1606 bool sched_fusion;
1607
1608 /* Compute the priority number for INSN. */
1609 static int
1610 priority (rtx_insn *insn)
1611 {
1612 if (! INSN_P (insn))
1613 return 0;
1614
1615 /* We should not be interested in priority of an already scheduled insn. */
1616 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
1617
1618 if (!INSN_PRIORITY_KNOWN (insn))
1619 {
1620 int this_priority = -1;
1621
1622 if (sched_fusion)
1623 {
1624 int this_fusion_priority;
1625
1626 targetm.sched.fusion_priority (insn, FUSION_MAX_PRIORITY,
1627 &this_fusion_priority, &this_priority);
1628 INSN_FUSION_PRIORITY (insn) = this_fusion_priority;
1629 }
1630 else if (dep_list_size (insn, SD_LIST_FORW) == 0)
1631 /* ??? We should set INSN_PRIORITY to insn_cost when and insn has
1632 some forward deps but all of them are ignored by
1633 contributes_to_priority hook. At the moment we set priority of
1634 such insn to 0. */
1635 this_priority = insn_cost (insn);
1636 else
1637 {
1638 rtx_insn *prev_first, *twin;
1639 basic_block rec;
1640
1641 /* For recovery check instructions we calculate priority slightly
1642 different than that of normal instructions. Instead of walking
1643 through INSN_FORW_DEPS (check) list, we walk through
1644 INSN_FORW_DEPS list of each instruction in the corresponding
1645 recovery block. */
1646
1647 /* Selective scheduling does not define RECOVERY_BLOCK macro. */
1648 rec = sel_sched_p () ? NULL : RECOVERY_BLOCK (insn);
1649 if (!rec || rec == EXIT_BLOCK_PTR_FOR_FN (cfun))
1650 {
1651 prev_first = PREV_INSN (insn);
1652 twin = insn;
1653 }
1654 else
1655 {
1656 prev_first = NEXT_INSN (BB_HEAD (rec));
1657 twin = PREV_INSN (BB_END (rec));
1658 }
1659
1660 do
1661 {
1662 sd_iterator_def sd_it;
1663 dep_t dep;
1664
1665 FOR_EACH_DEP (twin, SD_LIST_FORW, sd_it, dep)
1666 {
1667 rtx_insn *next;
1668 int next_priority;
1669
1670 next = DEP_CON (dep);
1671
1672 if (BLOCK_FOR_INSN (next) != rec)
1673 {
1674 int cost;
1675
1676 if (!contributes_to_priority_p (dep))
1677 continue;
1678
1679 if (twin == insn)
1680 cost = dep_cost (dep);
1681 else
1682 {
1683 struct _dep _dep1, *dep1 = &_dep1;
1684
1685 init_dep (dep1, insn, next, REG_DEP_ANTI);
1686
1687 cost = dep_cost (dep1);
1688 }
1689
1690 next_priority = cost + priority (next);
1691
1692 if (next_priority > this_priority)
1693 this_priority = next_priority;
1694 }
1695 }
1696
1697 twin = PREV_INSN (twin);
1698 }
1699 while (twin != prev_first);
1700 }
1701
1702 if (this_priority < 0)
1703 {
1704 gcc_assert (this_priority == -1);
1705
1706 this_priority = insn_cost (insn);
1707 }
1708
1709 INSN_PRIORITY (insn) = this_priority;
1710 INSN_PRIORITY_STATUS (insn) = 1;
1711 }
1712
1713 return INSN_PRIORITY (insn);
1714 }
1715 \f
1716 /* Macros and functions for keeping the priority queue sorted, and
1717 dealing with queuing and dequeuing of instructions. */
1718
1719 /* For each pressure class CL, set DEATH[CL] to the number of registers
1720 in that class that die in INSN. */
1721
1722 static void
1723 calculate_reg_deaths (rtx_insn *insn, int *death)
1724 {
1725 int i;
1726 struct reg_use_data *use;
1727
1728 for (i = 0; i < ira_pressure_classes_num; i++)
1729 death[ira_pressure_classes[i]] = 0;
1730 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1731 if (dying_use_p (use))
1732 mark_regno_birth_or_death (0, death, use->regno, true);
1733 }
1734
1735 /* Setup info about the current register pressure impact of scheduling
1736 INSN at the current scheduling point. */
1737 static void
1738 setup_insn_reg_pressure_info (rtx_insn *insn)
1739 {
1740 int i, change, before, after, hard_regno;
1741 int excess_cost_change;
1742 machine_mode mode;
1743 enum reg_class cl;
1744 struct reg_pressure_data *pressure_info;
1745 int *max_reg_pressure;
1746 static int death[N_REG_CLASSES];
1747
1748 gcc_checking_assert (!DEBUG_INSN_P (insn));
1749
1750 excess_cost_change = 0;
1751 calculate_reg_deaths (insn, death);
1752 pressure_info = INSN_REG_PRESSURE (insn);
1753 max_reg_pressure = INSN_MAX_REG_PRESSURE (insn);
1754 gcc_assert (pressure_info != NULL && max_reg_pressure != NULL);
1755 for (i = 0; i < ira_pressure_classes_num; i++)
1756 {
1757 cl = ira_pressure_classes[i];
1758 gcc_assert (curr_reg_pressure[cl] >= 0);
1759 change = (int) pressure_info[i].set_increase - death[cl];
1760 before = MAX (0, max_reg_pressure[i] - sched_class_regs_num[cl]);
1761 after = MAX (0, max_reg_pressure[i] + change
1762 - sched_class_regs_num[cl]);
1763 hard_regno = ira_class_hard_regs[cl][0];
1764 gcc_assert (hard_regno >= 0);
1765 mode = reg_raw_mode[hard_regno];
1766 excess_cost_change += ((after - before)
1767 * (ira_memory_move_cost[mode][cl][0]
1768 + ira_memory_move_cost[mode][cl][1]));
1769 }
1770 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insn) = excess_cost_change;
1771 }
1772 \f
1773 /* This is the first page of code related to SCHED_PRESSURE_MODEL.
1774 It tries to make the scheduler take register pressure into account
1775 without introducing too many unnecessary stalls. It hooks into the
1776 main scheduling algorithm at several points:
1777
1778 - Before scheduling starts, model_start_schedule constructs a
1779 "model schedule" for the current block. This model schedule is
1780 chosen solely to keep register pressure down. It does not take the
1781 target's pipeline or the original instruction order into account,
1782 except as a tie-breaker. It also doesn't work to a particular
1783 pressure limit.
1784
1785 This model schedule gives us an idea of what pressure can be
1786 achieved for the block and gives us an example of a schedule that
1787 keeps to that pressure. It also makes the final schedule less
1788 dependent on the original instruction order. This is important
1789 because the original order can either be "wide" (many values live
1790 at once, such as in user-scheduled code) or "narrow" (few values
1791 live at once, such as after loop unrolling, where several
1792 iterations are executed sequentially).
1793
1794 We do not apply this model schedule to the rtx stream. We simply
1795 record it in model_schedule. We also compute the maximum pressure,
1796 MP, that was seen during this schedule.
1797
1798 - Instructions are added to the ready queue even if they require
1799 a stall. The length of the stall is instead computed as:
1800
1801 MAX (INSN_TICK (INSN) - clock_var, 0)
1802
1803 (= insn_delay). This allows rank_for_schedule to choose between
1804 introducing a deliberate stall or increasing pressure.
1805
1806 - Before sorting the ready queue, model_set_excess_costs assigns
1807 a pressure-based cost to each ready instruction in the queue.
1808 This is the instruction's INSN_REG_PRESSURE_EXCESS_COST_CHANGE
1809 (ECC for short) and is effectively measured in cycles.
1810
1811 - rank_for_schedule ranks instructions based on:
1812
1813 ECC (insn) + insn_delay (insn)
1814
1815 then as:
1816
1817 insn_delay (insn)
1818
1819 So, for example, an instruction X1 with an ECC of 1 that can issue
1820 now will win over an instruction X0 with an ECC of zero that would
1821 introduce a stall of one cycle. However, an instruction X2 with an
1822 ECC of 2 that can issue now will lose to both X0 and X1.
1823
1824 - When an instruction is scheduled, model_recompute updates the model
1825 schedule with the new pressures (some of which might now exceed the
1826 original maximum pressure MP). model_update_limit_points then searches
1827 for the new point of maximum pressure, if not already known. */
1828
1829 /* Used to separate high-verbosity debug information for SCHED_PRESSURE_MODEL
1830 from surrounding debug information. */
1831 #define MODEL_BAR \
1832 ";;\t\t+------------------------------------------------------\n"
1833
1834 /* Information about the pressure on a particular register class at a
1835 particular point of the model schedule. */
1836 struct model_pressure_data {
1837 /* The pressure at this point of the model schedule, or -1 if the
1838 point is associated with an instruction that has already been
1839 scheduled. */
1840 int ref_pressure;
1841
1842 /* The maximum pressure during or after this point of the model schedule. */
1843 int max_pressure;
1844 };
1845
1846 /* Per-instruction information that is used while building the model
1847 schedule. Here, "schedule" refers to the model schedule rather
1848 than the main schedule. */
1849 struct model_insn_info {
1850 /* The instruction itself. */
1851 rtx_insn *insn;
1852
1853 /* If this instruction is in model_worklist, these fields link to the
1854 previous (higher-priority) and next (lower-priority) instructions
1855 in the list. */
1856 struct model_insn_info *prev;
1857 struct model_insn_info *next;
1858
1859 /* While constructing the schedule, QUEUE_INDEX describes whether an
1860 instruction has already been added to the schedule (QUEUE_SCHEDULED),
1861 is in model_worklist (QUEUE_READY), or neither (QUEUE_NOWHERE).
1862 old_queue records the value that QUEUE_INDEX had before scheduling
1863 started, so that we can restore it once the schedule is complete. */
1864 int old_queue;
1865
1866 /* The relative importance of an unscheduled instruction. Higher
1867 values indicate greater importance. */
1868 unsigned int model_priority;
1869
1870 /* The length of the longest path of satisfied true dependencies
1871 that leads to this instruction. */
1872 unsigned int depth;
1873
1874 /* The length of the longest path of dependencies of any kind
1875 that leads from this instruction. */
1876 unsigned int alap;
1877
1878 /* The number of predecessor nodes that must still be scheduled. */
1879 int unscheduled_preds;
1880 };
1881
1882 /* Information about the pressure limit for a particular register class.
1883 This structure is used when applying a model schedule to the main
1884 schedule. */
1885 struct model_pressure_limit {
1886 /* The maximum register pressure seen in the original model schedule. */
1887 int orig_pressure;
1888
1889 /* The maximum register pressure seen in the current model schedule
1890 (which excludes instructions that have already been scheduled). */
1891 int pressure;
1892
1893 /* The point of the current model schedule at which PRESSURE is first
1894 reached. It is set to -1 if the value needs to be recomputed. */
1895 int point;
1896 };
1897
1898 /* Describes a particular way of measuring register pressure. */
1899 struct model_pressure_group {
1900 /* Index PCI describes the maximum pressure on ira_pressure_classes[PCI]. */
1901 struct model_pressure_limit limits[N_REG_CLASSES];
1902
1903 /* Index (POINT * ira_num_pressure_classes + PCI) describes the pressure
1904 on register class ira_pressure_classes[PCI] at point POINT of the
1905 current model schedule. A POINT of model_num_insns describes the
1906 pressure at the end of the schedule. */
1907 struct model_pressure_data *model;
1908 };
1909
1910 /* Index POINT gives the instruction at point POINT of the model schedule.
1911 This array doesn't change during main scheduling. */
1912 static vec<rtx_insn *> model_schedule;
1913
1914 /* The list of instructions in the model worklist, sorted in order of
1915 decreasing priority. */
1916 static struct model_insn_info *model_worklist;
1917
1918 /* Index I describes the instruction with INSN_LUID I. */
1919 static struct model_insn_info *model_insns;
1920
1921 /* The number of instructions in the model schedule. */
1922 static int model_num_insns;
1923
1924 /* The index of the first instruction in model_schedule that hasn't yet been
1925 added to the main schedule, or model_num_insns if all of them have. */
1926 static int model_curr_point;
1927
1928 /* Describes the pressure before each instruction in the model schedule. */
1929 static struct model_pressure_group model_before_pressure;
1930
1931 /* The first unused model_priority value (as used in model_insn_info). */
1932 static unsigned int model_next_priority;
1933
1934
1935 /* The model_pressure_data for ira_pressure_classes[PCI] in GROUP
1936 at point POINT of the model schedule. */
1937 #define MODEL_PRESSURE_DATA(GROUP, POINT, PCI) \
1938 (&(GROUP)->model[(POINT) * ira_pressure_classes_num + (PCI)])
1939
1940 /* The maximum pressure on ira_pressure_classes[PCI] in GROUP at or
1941 after point POINT of the model schedule. */
1942 #define MODEL_MAX_PRESSURE(GROUP, POINT, PCI) \
1943 (MODEL_PRESSURE_DATA (GROUP, POINT, PCI)->max_pressure)
1944
1945 /* The pressure on ira_pressure_classes[PCI] in GROUP at point POINT
1946 of the model schedule. */
1947 #define MODEL_REF_PRESSURE(GROUP, POINT, PCI) \
1948 (MODEL_PRESSURE_DATA (GROUP, POINT, PCI)->ref_pressure)
1949
1950 /* Information about INSN that is used when creating the model schedule. */
1951 #define MODEL_INSN_INFO(INSN) \
1952 (&model_insns[INSN_LUID (INSN)])
1953
1954 /* The instruction at point POINT of the model schedule. */
1955 #define MODEL_INSN(POINT) \
1956 (model_schedule[POINT])
1957
1958
1959 /* Return INSN's index in the model schedule, or model_num_insns if it
1960 doesn't belong to that schedule. */
1961
1962 static int
1963 model_index (rtx_insn *insn)
1964 {
1965 if (INSN_MODEL_INDEX (insn) == 0)
1966 return model_num_insns;
1967 return INSN_MODEL_INDEX (insn) - 1;
1968 }
1969
1970 /* Make sure that GROUP->limits is up-to-date for the current point
1971 of the model schedule. */
1972
1973 static void
1974 model_update_limit_points_in_group (struct model_pressure_group *group)
1975 {
1976 int pci, max_pressure, point;
1977
1978 for (pci = 0; pci < ira_pressure_classes_num; pci++)
1979 {
1980 /* We may have passed the final point at which the pressure in
1981 group->limits[pci].pressure was reached. Update the limit if so. */
1982 max_pressure = MODEL_MAX_PRESSURE (group, model_curr_point, pci);
1983 group->limits[pci].pressure = max_pressure;
1984
1985 /* Find the point at which MAX_PRESSURE is first reached. We need
1986 to search in three cases:
1987
1988 - We've already moved past the previous pressure point.
1989 In this case we search forward from model_curr_point.
1990
1991 - We scheduled the previous point of maximum pressure ahead of
1992 its position in the model schedule, but doing so didn't bring
1993 the pressure point earlier. In this case we search forward
1994 from that previous pressure point.
1995
1996 - Scheduling an instruction early caused the maximum pressure
1997 to decrease. In this case we will have set the pressure
1998 point to -1, and we search forward from model_curr_point. */
1999 point = MAX (group->limits[pci].point, model_curr_point);
2000 while (point < model_num_insns
2001 && MODEL_REF_PRESSURE (group, point, pci) < max_pressure)
2002 point++;
2003 group->limits[pci].point = point;
2004
2005 gcc_assert (MODEL_REF_PRESSURE (group, point, pci) == max_pressure);
2006 gcc_assert (MODEL_MAX_PRESSURE (group, point, pci) == max_pressure);
2007 }
2008 }
2009
2010 /* Make sure that all register-pressure limits are up-to-date for the
2011 current position in the model schedule. */
2012
2013 static void
2014 model_update_limit_points (void)
2015 {
2016 model_update_limit_points_in_group (&model_before_pressure);
2017 }
2018
2019 /* Return the model_index of the last unscheduled use in chain USE
2020 outside of USE's instruction. Return -1 if there are no other uses,
2021 or model_num_insns if the register is live at the end of the block. */
2022
2023 static int
2024 model_last_use_except (struct reg_use_data *use)
2025 {
2026 struct reg_use_data *next;
2027 int last, index;
2028
2029 last = -1;
2030 for (next = use->next_regno_use; next != use; next = next->next_regno_use)
2031 if (NONDEBUG_INSN_P (next->insn)
2032 && QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
2033 {
2034 index = model_index (next->insn);
2035 if (index == model_num_insns)
2036 return model_num_insns;
2037 if (last < index)
2038 last = index;
2039 }
2040 return last;
2041 }
2042
2043 /* An instruction with model_index POINT has just been scheduled, and it
2044 adds DELTA to the pressure on ira_pressure_classes[PCI] after POINT - 1.
2045 Update MODEL_REF_PRESSURE (GROUP, POINT, PCI) and
2046 MODEL_MAX_PRESSURE (GROUP, POINT, PCI) accordingly. */
2047
2048 static void
2049 model_start_update_pressure (struct model_pressure_group *group,
2050 int point, int pci, int delta)
2051 {
2052 int next_max_pressure;
2053
2054 if (point == model_num_insns)
2055 {
2056 /* The instruction wasn't part of the model schedule; it was moved
2057 from a different block. Update the pressure for the end of
2058 the model schedule. */
2059 MODEL_REF_PRESSURE (group, point, pci) += delta;
2060 MODEL_MAX_PRESSURE (group, point, pci) += delta;
2061 }
2062 else
2063 {
2064 /* Record that this instruction has been scheduled. Nothing now
2065 changes between POINT and POINT + 1, so get the maximum pressure
2066 from the latter. If the maximum pressure decreases, the new
2067 pressure point may be before POINT. */
2068 MODEL_REF_PRESSURE (group, point, pci) = -1;
2069 next_max_pressure = MODEL_MAX_PRESSURE (group, point + 1, pci);
2070 if (MODEL_MAX_PRESSURE (group, point, pci) > next_max_pressure)
2071 {
2072 MODEL_MAX_PRESSURE (group, point, pci) = next_max_pressure;
2073 if (group->limits[pci].point == point)
2074 group->limits[pci].point = -1;
2075 }
2076 }
2077 }
2078
2079 /* Record that scheduling a later instruction has changed the pressure
2080 at point POINT of the model schedule by DELTA (which might be 0).
2081 Update GROUP accordingly. Return nonzero if these changes might
2082 trigger changes to previous points as well. */
2083
2084 static int
2085 model_update_pressure (struct model_pressure_group *group,
2086 int point, int pci, int delta)
2087 {
2088 int ref_pressure, max_pressure, next_max_pressure;
2089
2090 /* If POINT hasn't yet been scheduled, update its pressure. */
2091 ref_pressure = MODEL_REF_PRESSURE (group, point, pci);
2092 if (ref_pressure >= 0 && delta != 0)
2093 {
2094 ref_pressure += delta;
2095 MODEL_REF_PRESSURE (group, point, pci) = ref_pressure;
2096
2097 /* Check whether the maximum pressure in the overall schedule
2098 has increased. (This means that the MODEL_MAX_PRESSURE of
2099 every point <= POINT will need to increase too; see below.) */
2100 if (group->limits[pci].pressure < ref_pressure)
2101 group->limits[pci].pressure = ref_pressure;
2102
2103 /* If we are at maximum pressure, and the maximum pressure
2104 point was previously unknown or later than POINT,
2105 bring it forward. */
2106 if (group->limits[pci].pressure == ref_pressure
2107 && !IN_RANGE (group->limits[pci].point, 0, point))
2108 group->limits[pci].point = point;
2109
2110 /* If POINT used to be the point of maximum pressure, but isn't
2111 any longer, we need to recalculate it using a forward walk. */
2112 if (group->limits[pci].pressure > ref_pressure
2113 && group->limits[pci].point == point)
2114 group->limits[pci].point = -1;
2115 }
2116
2117 /* Update the maximum pressure at POINT. Changes here might also
2118 affect the maximum pressure at POINT - 1. */
2119 next_max_pressure = MODEL_MAX_PRESSURE (group, point + 1, pci);
2120 max_pressure = MAX (ref_pressure, next_max_pressure);
2121 if (MODEL_MAX_PRESSURE (group, point, pci) != max_pressure)
2122 {
2123 MODEL_MAX_PRESSURE (group, point, pci) = max_pressure;
2124 return 1;
2125 }
2126 return 0;
2127 }
2128
2129 /* INSN has just been scheduled. Update the model schedule accordingly. */
2130
2131 static void
2132 model_recompute (rtx_insn *insn)
2133 {
2134 struct {
2135 int last_use;
2136 int regno;
2137 } uses[FIRST_PSEUDO_REGISTER + MAX_RECOG_OPERANDS];
2138 struct reg_use_data *use;
2139 struct reg_pressure_data *reg_pressure;
2140 int delta[N_REG_CLASSES];
2141 int pci, point, mix, new_last, cl, ref_pressure, queue;
2142 unsigned int i, num_uses, num_pending_births;
2143 bool print_p;
2144
2145 /* The destinations of INSN were previously live from POINT onwards, but are
2146 now live from model_curr_point onwards. Set up DELTA accordingly. */
2147 point = model_index (insn);
2148 reg_pressure = INSN_REG_PRESSURE (insn);
2149 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2150 {
2151 cl = ira_pressure_classes[pci];
2152 delta[cl] = reg_pressure[pci].set_increase;
2153 }
2154
2155 /* Record which registers previously died at POINT, but which now die
2156 before POINT. Adjust DELTA so that it represents the effect of
2157 this change after POINT - 1. Set NUM_PENDING_BIRTHS to the number of
2158 registers that will be born in the range [model_curr_point, POINT). */
2159 num_uses = 0;
2160 num_pending_births = 0;
2161 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
2162 {
2163 new_last = model_last_use_except (use);
2164 if (new_last < point)
2165 {
2166 gcc_assert (num_uses < ARRAY_SIZE (uses));
2167 uses[num_uses].last_use = new_last;
2168 uses[num_uses].regno = use->regno;
2169 /* This register is no longer live after POINT - 1. */
2170 mark_regno_birth_or_death (NULL, delta, use->regno, false);
2171 num_uses++;
2172 if (new_last >= 0)
2173 num_pending_births++;
2174 }
2175 }
2176
2177 /* Update the MODEL_REF_PRESSURE and MODEL_MAX_PRESSURE for POINT.
2178 Also set each group pressure limit for POINT. */
2179 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2180 {
2181 cl = ira_pressure_classes[pci];
2182 model_start_update_pressure (&model_before_pressure,
2183 point, pci, delta[cl]);
2184 }
2185
2186 /* Walk the model schedule backwards, starting immediately before POINT. */
2187 print_p = false;
2188 if (point != model_curr_point)
2189 do
2190 {
2191 point--;
2192 insn = MODEL_INSN (point);
2193 queue = QUEUE_INDEX (insn);
2194
2195 if (queue != QUEUE_SCHEDULED)
2196 {
2197 /* DELTA describes the effect of the move on the register pressure
2198 after POINT. Make it describe the effect on the pressure
2199 before POINT. */
2200 i = 0;
2201 while (i < num_uses)
2202 {
2203 if (uses[i].last_use == point)
2204 {
2205 /* This register is now live again. */
2206 mark_regno_birth_or_death (NULL, delta,
2207 uses[i].regno, true);
2208
2209 /* Remove this use from the array. */
2210 uses[i] = uses[num_uses - 1];
2211 num_uses--;
2212 num_pending_births--;
2213 }
2214 else
2215 i++;
2216 }
2217
2218 if (sched_verbose >= 5)
2219 {
2220 if (!print_p)
2221 {
2222 fprintf (sched_dump, MODEL_BAR);
2223 fprintf (sched_dump, ";;\t\t| New pressure for model"
2224 " schedule\n");
2225 fprintf (sched_dump, MODEL_BAR);
2226 print_p = true;
2227 }
2228
2229 fprintf (sched_dump, ";;\t\t| %3d %4d %-30s ",
2230 point, INSN_UID (insn),
2231 str_pattern_slim (PATTERN (insn)));
2232 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2233 {
2234 cl = ira_pressure_classes[pci];
2235 ref_pressure = MODEL_REF_PRESSURE (&model_before_pressure,
2236 point, pci);
2237 fprintf (sched_dump, " %s:[%d->%d]",
2238 reg_class_names[ira_pressure_classes[pci]],
2239 ref_pressure, ref_pressure + delta[cl]);
2240 }
2241 fprintf (sched_dump, "\n");
2242 }
2243 }
2244
2245 /* Adjust the pressure at POINT. Set MIX to nonzero if POINT - 1
2246 might have changed as well. */
2247 mix = num_pending_births;
2248 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2249 {
2250 cl = ira_pressure_classes[pci];
2251 mix |= delta[cl];
2252 mix |= model_update_pressure (&model_before_pressure,
2253 point, pci, delta[cl]);
2254 }
2255 }
2256 while (mix && point > model_curr_point);
2257
2258 if (print_p)
2259 fprintf (sched_dump, MODEL_BAR);
2260 }
2261
2262 /* After DEP, which was cancelled, has been resolved for insn NEXT,
2263 check whether the insn's pattern needs restoring. */
2264 static bool
2265 must_restore_pattern_p (rtx_insn *next, dep_t dep)
2266 {
2267 if (QUEUE_INDEX (next) == QUEUE_SCHEDULED)
2268 return false;
2269
2270 if (DEP_TYPE (dep) == REG_DEP_CONTROL)
2271 {
2272 gcc_assert (ORIG_PAT (next) != NULL_RTX);
2273 gcc_assert (next == DEP_CON (dep));
2274 }
2275 else
2276 {
2277 struct dep_replacement *desc = DEP_REPLACE (dep);
2278 if (desc->insn != next)
2279 {
2280 gcc_assert (*desc->loc == desc->orig);
2281 return false;
2282 }
2283 }
2284 return true;
2285 }
2286 \f
2287 /* model_spill_cost (CL, P, P') returns the cost of increasing the
2288 pressure on CL from P to P'. We use this to calculate a "base ECC",
2289 baseECC (CL, X), for each pressure class CL and each instruction X.
2290 Supposing X changes the pressure on CL from P to P', and that the
2291 maximum pressure on CL in the current model schedule is MP', then:
2292
2293 * if X occurs before or at the next point of maximum pressure in
2294 the model schedule and P' > MP', then:
2295
2296 baseECC (CL, X) = model_spill_cost (CL, MP, P')
2297
2298 The idea is that the pressure after scheduling a fixed set of
2299 instructions -- in this case, the set up to and including the
2300 next maximum pressure point -- is going to be the same regardless
2301 of the order; we simply want to keep the intermediate pressure
2302 under control. Thus X has a cost of zero unless scheduling it
2303 now would exceed MP'.
2304
2305 If all increases in the set are by the same amount, no zero-cost
2306 instruction will ever cause the pressure to exceed MP'. However,
2307 if X is instead moved past an instruction X' with pressure in the
2308 range (MP' - (P' - P), MP'), the pressure at X' will increase
2309 beyond MP'. Since baseECC is very much a heuristic anyway,
2310 it doesn't seem worth the overhead of tracking cases like these.
2311
2312 The cost of exceeding MP' is always based on the original maximum
2313 pressure MP. This is so that going 2 registers over the original
2314 limit has the same cost regardless of whether it comes from two
2315 separate +1 deltas or from a single +2 delta.
2316
2317 * if X occurs after the next point of maximum pressure in the model
2318 schedule and P' > P, then:
2319
2320 baseECC (CL, X) = model_spill_cost (CL, MP, MP' + (P' - P))
2321
2322 That is, if we move X forward across a point of maximum pressure,
2323 and if X increases the pressure by P' - P, then we conservatively
2324 assume that scheduling X next would increase the maximum pressure
2325 by P' - P. Again, the cost of doing this is based on the original
2326 maximum pressure MP, for the same reason as above.
2327
2328 * if P' < P, P > MP, and X occurs at or after the next point of
2329 maximum pressure, then:
2330
2331 baseECC (CL, X) = -model_spill_cost (CL, MAX (MP, P'), P)
2332
2333 That is, if we have already exceeded the original maximum pressure MP,
2334 and if X might reduce the maximum pressure again -- or at least push
2335 it further back, and thus allow more scheduling freedom -- it is given
2336 a negative cost to reflect the improvement.
2337
2338 * otherwise,
2339
2340 baseECC (CL, X) = 0
2341
2342 In this case, X is not expected to affect the maximum pressure MP',
2343 so it has zero cost.
2344
2345 We then create a combined value baseECC (X) that is the sum of
2346 baseECC (CL, X) for each pressure class CL.
2347
2348 baseECC (X) could itself be used as the ECC value described above.
2349 However, this is often too conservative, in the sense that it
2350 tends to make high-priority instructions that increase pressure
2351 wait too long in cases where introducing a spill would be better.
2352 For this reason the final ECC is a priority-adjusted form of
2353 baseECC (X). Specifically, we calculate:
2354
2355 P (X) = INSN_PRIORITY (X) - insn_delay (X) - baseECC (X)
2356 baseP = MAX { P (X) | baseECC (X) <= 0 }
2357
2358 Then:
2359
2360 ECC (X) = MAX (MIN (baseP - P (X), baseECC (X)), 0)
2361
2362 Thus an instruction's effect on pressure is ignored if it has a high
2363 enough priority relative to the ones that don't increase pressure.
2364 Negative values of baseECC (X) do not increase the priority of X
2365 itself, but they do make it harder for other instructions to
2366 increase the pressure further.
2367
2368 This pressure cost is deliberately timid. The intention has been
2369 to choose a heuristic that rarely interferes with the normal list
2370 scheduler in cases where that scheduler would produce good code.
2371 We simply want to curb some of its worst excesses. */
2372
2373 /* Return the cost of increasing the pressure in class CL from FROM to TO.
2374
2375 Here we use the very simplistic cost model that every register above
2376 sched_class_regs_num[CL] has a spill cost of 1. We could use other
2377 measures instead, such as one based on MEMORY_MOVE_COST. However:
2378
2379 (1) In order for an instruction to be scheduled, the higher cost
2380 would need to be justified in a single saving of that many stalls.
2381 This is overly pessimistic, because the benefit of spilling is
2382 often to avoid a sequence of several short stalls rather than
2383 a single long one.
2384
2385 (2) The cost is still arbitrary. Because we are not allocating
2386 registers during scheduling, we have no way of knowing for
2387 sure how many memory accesses will be required by each spill,
2388 where the spills will be placed within the block, or even
2389 which block(s) will contain the spills.
2390
2391 So a higher cost than 1 is often too conservative in practice,
2392 forcing blocks to contain unnecessary stalls instead of spill code.
2393 The simple cost below seems to be the best compromise. It reduces
2394 the interference with the normal list scheduler, which helps make
2395 it more suitable for a default-on option. */
2396
2397 static int
2398 model_spill_cost (int cl, int from, int to)
2399 {
2400 from = MAX (from, sched_class_regs_num[cl]);
2401 return MAX (to, from) - from;
2402 }
2403
2404 /* Return baseECC (ira_pressure_classes[PCI], POINT), given that
2405 P = curr_reg_pressure[ira_pressure_classes[PCI]] and that
2406 P' = P + DELTA. */
2407
2408 static int
2409 model_excess_group_cost (struct model_pressure_group *group,
2410 int point, int pci, int delta)
2411 {
2412 int pressure, cl;
2413
2414 cl = ira_pressure_classes[pci];
2415 if (delta < 0 && point >= group->limits[pci].point)
2416 {
2417 pressure = MAX (group->limits[pci].orig_pressure,
2418 curr_reg_pressure[cl] + delta);
2419 return -model_spill_cost (cl, pressure, curr_reg_pressure[cl]);
2420 }
2421
2422 if (delta > 0)
2423 {
2424 if (point > group->limits[pci].point)
2425 pressure = group->limits[pci].pressure + delta;
2426 else
2427 pressure = curr_reg_pressure[cl] + delta;
2428
2429 if (pressure > group->limits[pci].pressure)
2430 return model_spill_cost (cl, group->limits[pci].orig_pressure,
2431 pressure);
2432 }
2433
2434 return 0;
2435 }
2436
2437 /* Return baseECC (MODEL_INSN (INSN)). Dump the costs to sched_dump
2438 if PRINT_P. */
2439
2440 static int
2441 model_excess_cost (rtx_insn *insn, bool print_p)
2442 {
2443 int point, pci, cl, cost, this_cost, delta;
2444 struct reg_pressure_data *insn_reg_pressure;
2445 int insn_death[N_REG_CLASSES];
2446
2447 calculate_reg_deaths (insn, insn_death);
2448 point = model_index (insn);
2449 insn_reg_pressure = INSN_REG_PRESSURE (insn);
2450 cost = 0;
2451
2452 if (print_p)
2453 fprintf (sched_dump, ";;\t\t| %3d %4d | %4d %+3d |", point,
2454 INSN_UID (insn), INSN_PRIORITY (insn), insn_delay (insn));
2455
2456 /* Sum up the individual costs for each register class. */
2457 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2458 {
2459 cl = ira_pressure_classes[pci];
2460 delta = insn_reg_pressure[pci].set_increase - insn_death[cl];
2461 this_cost = model_excess_group_cost (&model_before_pressure,
2462 point, pci, delta);
2463 cost += this_cost;
2464 if (print_p)
2465 fprintf (sched_dump, " %s:[%d base cost %d]",
2466 reg_class_names[cl], delta, this_cost);
2467 }
2468
2469 if (print_p)
2470 fprintf (sched_dump, "\n");
2471
2472 return cost;
2473 }
2474
2475 /* Dump the next points of maximum pressure for GROUP. */
2476
2477 static void
2478 model_dump_pressure_points (struct model_pressure_group *group)
2479 {
2480 int pci, cl;
2481
2482 fprintf (sched_dump, ";;\t\t| pressure points");
2483 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2484 {
2485 cl = ira_pressure_classes[pci];
2486 fprintf (sched_dump, " %s:[%d->%d at ", reg_class_names[cl],
2487 curr_reg_pressure[cl], group->limits[pci].pressure);
2488 if (group->limits[pci].point < model_num_insns)
2489 fprintf (sched_dump, "%d:%d]", group->limits[pci].point,
2490 INSN_UID (MODEL_INSN (group->limits[pci].point)));
2491 else
2492 fprintf (sched_dump, "end]");
2493 }
2494 fprintf (sched_dump, "\n");
2495 }
2496
2497 /* Set INSN_REG_PRESSURE_EXCESS_COST_CHANGE for INSNS[0...COUNT-1]. */
2498
2499 static void
2500 model_set_excess_costs (rtx_insn **insns, int count)
2501 {
2502 int i, cost, priority_base, priority;
2503 bool print_p;
2504
2505 /* Record the baseECC value for each instruction in the model schedule,
2506 except that negative costs are converted to zero ones now rather than
2507 later. Do not assign a cost to debug instructions, since they must
2508 not change code-generation decisions. Experiments suggest we also
2509 get better results by not assigning a cost to instructions from
2510 a different block.
2511
2512 Set PRIORITY_BASE to baseP in the block comment above. This is the
2513 maximum priority of the "cheap" instructions, which should always
2514 include the next model instruction. */
2515 priority_base = 0;
2516 print_p = false;
2517 for (i = 0; i < count; i++)
2518 if (INSN_MODEL_INDEX (insns[i]))
2519 {
2520 if (sched_verbose >= 6 && !print_p)
2521 {
2522 fprintf (sched_dump, MODEL_BAR);
2523 fprintf (sched_dump, ";;\t\t| Pressure costs for ready queue\n");
2524 model_dump_pressure_points (&model_before_pressure);
2525 fprintf (sched_dump, MODEL_BAR);
2526 print_p = true;
2527 }
2528 cost = model_excess_cost (insns[i], print_p);
2529 if (cost <= 0)
2530 {
2531 priority = INSN_PRIORITY (insns[i]) - insn_delay (insns[i]) - cost;
2532 priority_base = MAX (priority_base, priority);
2533 cost = 0;
2534 }
2535 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]) = cost;
2536 }
2537 if (print_p)
2538 fprintf (sched_dump, MODEL_BAR);
2539
2540 /* Use MAX (baseECC, 0) and baseP to calculcate ECC for each
2541 instruction. */
2542 for (i = 0; i < count; i++)
2543 {
2544 cost = INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]);
2545 priority = INSN_PRIORITY (insns[i]) - insn_delay (insns[i]);
2546 if (cost > 0 && priority > priority_base)
2547 {
2548 cost += priority_base - priority;
2549 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]) = MAX (cost, 0);
2550 }
2551 }
2552 }
2553 \f
2554
2555 /* Enum of rank_for_schedule heuristic decisions. */
2556 enum rfs_decision {
2557 RFS_LIVE_RANGE_SHRINK1, RFS_LIVE_RANGE_SHRINK2,
2558 RFS_SCHED_GROUP, RFS_PRESSURE_DELAY, RFS_PRESSURE_TICK,
2559 RFS_FEEDS_BACKTRACK_INSN, RFS_PRIORITY, RFS_SPECULATION,
2560 RFS_SCHED_RANK, RFS_LAST_INSN, RFS_PRESSURE_INDEX,
2561 RFS_DEP_COUNT, RFS_TIE, RFS_FUSION, RFS_N };
2562
2563 /* Corresponding strings for print outs. */
2564 static const char *rfs_str[RFS_N] = {
2565 "RFS_LIVE_RANGE_SHRINK1", "RFS_LIVE_RANGE_SHRINK2",
2566 "RFS_SCHED_GROUP", "RFS_PRESSURE_DELAY", "RFS_PRESSURE_TICK",
2567 "RFS_FEEDS_BACKTRACK_INSN", "RFS_PRIORITY", "RFS_SPECULATION",
2568 "RFS_SCHED_RANK", "RFS_LAST_INSN", "RFS_PRESSURE_INDEX",
2569 "RFS_DEP_COUNT", "RFS_TIE", "RFS_FUSION" };
2570
2571 /* Statistical breakdown of rank_for_schedule decisions. */
2572 typedef struct { unsigned stats[RFS_N]; } rank_for_schedule_stats_t;
2573 static rank_for_schedule_stats_t rank_for_schedule_stats;
2574
2575 /* Return the result of comparing insns TMP and TMP2 and update
2576 Rank_For_Schedule statistics. */
2577 static int
2578 rfs_result (enum rfs_decision decision, int result, rtx tmp, rtx tmp2)
2579 {
2580 ++rank_for_schedule_stats.stats[decision];
2581 if (result < 0)
2582 INSN_LAST_RFS_WIN (tmp) = decision;
2583 else if (result > 0)
2584 INSN_LAST_RFS_WIN (tmp2) = decision;
2585 else
2586 gcc_unreachable ();
2587 return result;
2588 }
2589
2590 /* Sorting predicate to move DEBUG_INSNs to the top of ready list, while
2591 keeping normal insns in original order. */
2592
2593 static int
2594 rank_for_schedule_debug (const void *x, const void *y)
2595 {
2596 rtx_insn *tmp = *(rtx_insn * const *) y;
2597 rtx_insn *tmp2 = *(rtx_insn * const *) x;
2598
2599 /* Schedule debug insns as early as possible. */
2600 if (DEBUG_INSN_P (tmp) && !DEBUG_INSN_P (tmp2))
2601 return -1;
2602 else if (!DEBUG_INSN_P (tmp) && DEBUG_INSN_P (tmp2))
2603 return 1;
2604 else if (DEBUG_INSN_P (tmp) && DEBUG_INSN_P (tmp2))
2605 return INSN_LUID (tmp) - INSN_LUID (tmp2);
2606 else
2607 return INSN_RFS_DEBUG_ORIG_ORDER (tmp2) - INSN_RFS_DEBUG_ORIG_ORDER (tmp);
2608 }
2609
2610 /* Returns a positive value if x is preferred; returns a negative value if
2611 y is preferred. Should never return 0, since that will make the sort
2612 unstable. */
2613
2614 static int
2615 rank_for_schedule (const void *x, const void *y)
2616 {
2617 rtx_insn *tmp = *(rtx_insn * const *) y;
2618 rtx_insn *tmp2 = *(rtx_insn * const *) x;
2619 int tmp_class, tmp2_class;
2620 int val, priority_val, info_val, diff;
2621
2622 if (live_range_shrinkage_p)
2623 {
2624 /* Don't use SCHED_PRESSURE_MODEL -- it results in much worse
2625 code. */
2626 gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED);
2627 if ((INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp) < 0
2628 || INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2) < 0)
2629 && (diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
2630 - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2))) != 0)
2631 return rfs_result (RFS_LIVE_RANGE_SHRINK1, diff, tmp, tmp2);
2632 /* Sort by INSN_LUID (original insn order), so that we make the
2633 sort stable. This minimizes instruction movement, thus
2634 minimizing sched's effect on debugging and cross-jumping. */
2635 return rfs_result (RFS_LIVE_RANGE_SHRINK2,
2636 INSN_LUID (tmp) - INSN_LUID (tmp2), tmp, tmp2);
2637 }
2638
2639 /* The insn in a schedule group should be issued the first. */
2640 if (flag_sched_group_heuristic &&
2641 SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
2642 return rfs_result (RFS_SCHED_GROUP, SCHED_GROUP_P (tmp2) ? 1 : -1,
2643 tmp, tmp2);
2644
2645 /* Make sure that priority of TMP and TMP2 are initialized. */
2646 gcc_assert (INSN_PRIORITY_KNOWN (tmp) && INSN_PRIORITY_KNOWN (tmp2));
2647
2648 if (sched_fusion)
2649 {
2650 /* The instruction that has the same fusion priority as the last
2651 instruction is the instruction we picked next. If that is not
2652 the case, we sort ready list firstly by fusion priority, then
2653 by priority, and at last by INSN_LUID. */
2654 int a = INSN_FUSION_PRIORITY (tmp);
2655 int b = INSN_FUSION_PRIORITY (tmp2);
2656 int last = -1;
2657
2658 if (last_nondebug_scheduled_insn
2659 && !NOTE_P (last_nondebug_scheduled_insn)
2660 && BLOCK_FOR_INSN (tmp)
2661 == BLOCK_FOR_INSN (last_nondebug_scheduled_insn))
2662 last = INSN_FUSION_PRIORITY (last_nondebug_scheduled_insn);
2663
2664 if (a != last && b != last)
2665 {
2666 if (a == b)
2667 {
2668 a = INSN_PRIORITY (tmp);
2669 b = INSN_PRIORITY (tmp2);
2670 }
2671 if (a != b)
2672 return rfs_result (RFS_FUSION, b - a, tmp, tmp2);
2673 else
2674 return rfs_result (RFS_FUSION,
2675 INSN_LUID (tmp) - INSN_LUID (tmp2), tmp, tmp2);
2676 }
2677 else if (a == b)
2678 {
2679 gcc_assert (last_nondebug_scheduled_insn
2680 && !NOTE_P (last_nondebug_scheduled_insn));
2681 last = INSN_PRIORITY (last_nondebug_scheduled_insn);
2682
2683 a = abs (INSN_PRIORITY (tmp) - last);
2684 b = abs (INSN_PRIORITY (tmp2) - last);
2685 if (a != b)
2686 return rfs_result (RFS_FUSION, a - b, tmp, tmp2);
2687 else
2688 return rfs_result (RFS_FUSION,
2689 INSN_LUID (tmp) - INSN_LUID (tmp2), tmp, tmp2);
2690 }
2691 else if (a == last)
2692 return rfs_result (RFS_FUSION, -1, tmp, tmp2);
2693 else
2694 return rfs_result (RFS_FUSION, 1, tmp, tmp2);
2695 }
2696
2697 if (sched_pressure != SCHED_PRESSURE_NONE)
2698 {
2699 /* Prefer insn whose scheduling results in the smallest register
2700 pressure excess. */
2701 if ((diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
2702 + insn_delay (tmp)
2703 - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2)
2704 - insn_delay (tmp2))))
2705 return rfs_result (RFS_PRESSURE_DELAY, diff, tmp, tmp2);
2706 }
2707
2708 if (sched_pressure != SCHED_PRESSURE_NONE
2709 && (INSN_TICK (tmp2) > clock_var || INSN_TICK (tmp) > clock_var)
2710 && INSN_TICK (tmp2) != INSN_TICK (tmp))
2711 {
2712 diff = INSN_TICK (tmp) - INSN_TICK (tmp2);
2713 return rfs_result (RFS_PRESSURE_TICK, diff, tmp, tmp2);
2714 }
2715
2716 /* If we are doing backtracking in this schedule, prefer insns that
2717 have forward dependencies with negative cost against an insn that
2718 was already scheduled. */
2719 if (current_sched_info->flags & DO_BACKTRACKING)
2720 {
2721 priority_val = FEEDS_BACKTRACK_INSN (tmp2) - FEEDS_BACKTRACK_INSN (tmp);
2722 if (priority_val)
2723 return rfs_result (RFS_FEEDS_BACKTRACK_INSN, priority_val, tmp, tmp2);
2724 }
2725
2726 /* Prefer insn with higher priority. */
2727 priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
2728
2729 if (flag_sched_critical_path_heuristic && priority_val)
2730 return rfs_result (RFS_PRIORITY, priority_val, tmp, tmp2);
2731
2732 if (PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) >= 0)
2733 {
2734 int autopref = autopref_rank_for_schedule (tmp, tmp2);
2735 if (autopref != 0)
2736 return autopref;
2737 }
2738
2739 /* Prefer speculative insn with greater dependencies weakness. */
2740 if (flag_sched_spec_insn_heuristic && spec_info)
2741 {
2742 ds_t ds1, ds2;
2743 dw_t dw1, dw2;
2744 int dw;
2745
2746 ds1 = TODO_SPEC (tmp) & SPECULATIVE;
2747 if (ds1)
2748 dw1 = ds_weak (ds1);
2749 else
2750 dw1 = NO_DEP_WEAK;
2751
2752 ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
2753 if (ds2)
2754 dw2 = ds_weak (ds2);
2755 else
2756 dw2 = NO_DEP_WEAK;
2757
2758 dw = dw2 - dw1;
2759 if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
2760 return rfs_result (RFS_SPECULATION, dw, tmp, tmp2);
2761 }
2762
2763 info_val = (*current_sched_info->rank) (tmp, tmp2);
2764 if (flag_sched_rank_heuristic && info_val)
2765 return rfs_result (RFS_SCHED_RANK, info_val, tmp, tmp2);
2766
2767 /* Compare insns based on their relation to the last scheduled
2768 non-debug insn. */
2769 if (flag_sched_last_insn_heuristic && last_nondebug_scheduled_insn)
2770 {
2771 dep_t dep1;
2772 dep_t dep2;
2773 rtx_insn *last = last_nondebug_scheduled_insn;
2774
2775 /* Classify the instructions into three classes:
2776 1) Data dependent on last schedule insn.
2777 2) Anti/Output dependent on last scheduled insn.
2778 3) Independent of last scheduled insn, or has latency of one.
2779 Choose the insn from the highest numbered class if different. */
2780 dep1 = sd_find_dep_between (last, tmp, true);
2781
2782 if (dep1 == NULL || dep_cost (dep1) == 1)
2783 tmp_class = 3;
2784 else if (/* Data dependence. */
2785 DEP_TYPE (dep1) == REG_DEP_TRUE)
2786 tmp_class = 1;
2787 else
2788 tmp_class = 2;
2789
2790 dep2 = sd_find_dep_between (last, tmp2, true);
2791
2792 if (dep2 == NULL || dep_cost (dep2) == 1)
2793 tmp2_class = 3;
2794 else if (/* Data dependence. */
2795 DEP_TYPE (dep2) == REG_DEP_TRUE)
2796 tmp2_class = 1;
2797 else
2798 tmp2_class = 2;
2799
2800 if ((val = tmp2_class - tmp_class))
2801 return rfs_result (RFS_LAST_INSN, val, tmp, tmp2);
2802 }
2803
2804 /* Prefer instructions that occur earlier in the model schedule. */
2805 if (sched_pressure == SCHED_PRESSURE_MODEL
2806 && INSN_BB (tmp) == target_bb && INSN_BB (tmp2) == target_bb)
2807 {
2808 diff = model_index (tmp) - model_index (tmp2);
2809 gcc_assert (diff != 0);
2810 return rfs_result (RFS_PRESSURE_INDEX, diff, tmp, tmp2);
2811 }
2812
2813 /* Prefer the insn which has more later insns that depend on it.
2814 This gives the scheduler more freedom when scheduling later
2815 instructions at the expense of added register pressure. */
2816
2817 val = (dep_list_size (tmp2, SD_LIST_FORW)
2818 - dep_list_size (tmp, SD_LIST_FORW));
2819
2820 if (flag_sched_dep_count_heuristic && val != 0)
2821 return rfs_result (RFS_DEP_COUNT, val, tmp, tmp2);
2822
2823 /* If insns are equally good, sort by INSN_LUID (original insn order),
2824 so that we make the sort stable. This minimizes instruction movement,
2825 thus minimizing sched's effect on debugging and cross-jumping. */
2826 return rfs_result (RFS_TIE, INSN_LUID (tmp) - INSN_LUID (tmp2), tmp, tmp2);
2827 }
2828
2829 /* Resort the array A in which only element at index N may be out of order. */
2830
2831 HAIFA_INLINE static void
2832 swap_sort (rtx_insn **a, int n)
2833 {
2834 rtx_insn *insn = a[n - 1];
2835 int i = n - 2;
2836
2837 while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
2838 {
2839 a[i + 1] = a[i];
2840 i -= 1;
2841 }
2842 a[i + 1] = insn;
2843 }
2844
2845 /* Add INSN to the insn queue so that it can be executed at least
2846 N_CYCLES after the currently executing insn. Preserve insns
2847 chain for debugging purposes. REASON will be printed in debugging
2848 output. */
2849
2850 HAIFA_INLINE static void
2851 queue_insn (rtx_insn *insn, int n_cycles, const char *reason)
2852 {
2853 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
2854 rtx_insn_list *link = alloc_INSN_LIST (insn, insn_queue[next_q]);
2855 int new_tick;
2856
2857 gcc_assert (n_cycles <= max_insn_queue_index);
2858 gcc_assert (!DEBUG_INSN_P (insn));
2859
2860 insn_queue[next_q] = link;
2861 q_size += 1;
2862
2863 if (sched_verbose >= 2)
2864 {
2865 fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
2866 (*current_sched_info->print_insn) (insn, 0));
2867
2868 fprintf (sched_dump, "queued for %d cycles (%s).\n", n_cycles, reason);
2869 }
2870
2871 QUEUE_INDEX (insn) = next_q;
2872
2873 if (current_sched_info->flags & DO_BACKTRACKING)
2874 {
2875 new_tick = clock_var + n_cycles;
2876 if (INSN_TICK (insn) == INVALID_TICK || INSN_TICK (insn) < new_tick)
2877 INSN_TICK (insn) = new_tick;
2878
2879 if (INSN_EXACT_TICK (insn) != INVALID_TICK
2880 && INSN_EXACT_TICK (insn) < clock_var + n_cycles)
2881 {
2882 must_backtrack = true;
2883 if (sched_verbose >= 2)
2884 fprintf (sched_dump, ";;\t\tcausing a backtrack.\n");
2885 }
2886 }
2887 }
2888
2889 /* Remove INSN from queue. */
2890 static void
2891 queue_remove (rtx_insn *insn)
2892 {
2893 gcc_assert (QUEUE_INDEX (insn) >= 0);
2894 remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
2895 q_size--;
2896 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
2897 }
2898
2899 /* Return a pointer to the bottom of the ready list, i.e. the insn
2900 with the lowest priority. */
2901
2902 rtx_insn **
2903 ready_lastpos (struct ready_list *ready)
2904 {
2905 gcc_assert (ready->n_ready >= 1);
2906 return ready->vec + ready->first - ready->n_ready + 1;
2907 }
2908
2909 /* Add an element INSN to the ready list so that it ends up with the
2910 lowest/highest priority depending on FIRST_P. */
2911
2912 HAIFA_INLINE static void
2913 ready_add (struct ready_list *ready, rtx_insn *insn, bool first_p)
2914 {
2915 if (!first_p)
2916 {
2917 if (ready->first == ready->n_ready)
2918 {
2919 memmove (ready->vec + ready->veclen - ready->n_ready,
2920 ready_lastpos (ready),
2921 ready->n_ready * sizeof (rtx));
2922 ready->first = ready->veclen - 1;
2923 }
2924 ready->vec[ready->first - ready->n_ready] = insn;
2925 }
2926 else
2927 {
2928 if (ready->first == ready->veclen - 1)
2929 {
2930 if (ready->n_ready)
2931 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
2932 memmove (ready->vec + ready->veclen - ready->n_ready - 1,
2933 ready_lastpos (ready),
2934 ready->n_ready * sizeof (rtx));
2935 ready->first = ready->veclen - 2;
2936 }
2937 ready->vec[++(ready->first)] = insn;
2938 }
2939
2940 ready->n_ready++;
2941 if (DEBUG_INSN_P (insn))
2942 ready->n_debug++;
2943
2944 gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
2945 QUEUE_INDEX (insn) = QUEUE_READY;
2946
2947 if (INSN_EXACT_TICK (insn) != INVALID_TICK
2948 && INSN_EXACT_TICK (insn) < clock_var)
2949 {
2950 must_backtrack = true;
2951 }
2952 }
2953
2954 /* Remove the element with the highest priority from the ready list and
2955 return it. */
2956
2957 HAIFA_INLINE static rtx_insn *
2958 ready_remove_first (struct ready_list *ready)
2959 {
2960 rtx_insn *t;
2961
2962 gcc_assert (ready->n_ready);
2963 t = ready->vec[ready->first--];
2964 ready->n_ready--;
2965 if (DEBUG_INSN_P (t))
2966 ready->n_debug--;
2967 /* If the queue becomes empty, reset it. */
2968 if (ready->n_ready == 0)
2969 ready->first = ready->veclen - 1;
2970
2971 gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
2972 QUEUE_INDEX (t) = QUEUE_NOWHERE;
2973
2974 return t;
2975 }
2976
2977 /* The following code implements multi-pass scheduling for the first
2978 cycle. In other words, we will try to choose ready insn which
2979 permits to start maximum number of insns on the same cycle. */
2980
2981 /* Return a pointer to the element INDEX from the ready. INDEX for
2982 insn with the highest priority is 0, and the lowest priority has
2983 N_READY - 1. */
2984
2985 rtx_insn *
2986 ready_element (struct ready_list *ready, int index)
2987 {
2988 gcc_assert (ready->n_ready && index < ready->n_ready);
2989
2990 return ready->vec[ready->first - index];
2991 }
2992
2993 /* Remove the element INDEX from the ready list and return it. INDEX
2994 for insn with the highest priority is 0, and the lowest priority
2995 has N_READY - 1. */
2996
2997 HAIFA_INLINE static rtx_insn *
2998 ready_remove (struct ready_list *ready, int index)
2999 {
3000 rtx_insn *t;
3001 int i;
3002
3003 if (index == 0)
3004 return ready_remove_first (ready);
3005 gcc_assert (ready->n_ready && index < ready->n_ready);
3006 t = ready->vec[ready->first - index];
3007 ready->n_ready--;
3008 if (DEBUG_INSN_P (t))
3009 ready->n_debug--;
3010 for (i = index; i < ready->n_ready; i++)
3011 ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
3012 QUEUE_INDEX (t) = QUEUE_NOWHERE;
3013 return t;
3014 }
3015
3016 /* Remove INSN from the ready list. */
3017 static void
3018 ready_remove_insn (rtx_insn *insn)
3019 {
3020 int i;
3021
3022 for (i = 0; i < readyp->n_ready; i++)
3023 if (ready_element (readyp, i) == insn)
3024 {
3025 ready_remove (readyp, i);
3026 return;
3027 }
3028 gcc_unreachable ();
3029 }
3030
3031 /* Calculate difference of two statistics set WAS and NOW.
3032 Result returned in WAS. */
3033 static void
3034 rank_for_schedule_stats_diff (rank_for_schedule_stats_t *was,
3035 const rank_for_schedule_stats_t *now)
3036 {
3037 for (int i = 0; i < RFS_N; ++i)
3038 was->stats[i] = now->stats[i] - was->stats[i];
3039 }
3040
3041 /* Print rank_for_schedule statistics. */
3042 static void
3043 print_rank_for_schedule_stats (const char *prefix,
3044 const rank_for_schedule_stats_t *stats,
3045 struct ready_list *ready)
3046 {
3047 for (int i = 0; i < RFS_N; ++i)
3048 if (stats->stats[i])
3049 {
3050 fprintf (sched_dump, "%s%20s: %u", prefix, rfs_str[i], stats->stats[i]);
3051
3052 if (ready != NULL)
3053 /* Print out insns that won due to RFS_<I>. */
3054 {
3055 rtx_insn **p = ready_lastpos (ready);
3056
3057 fprintf (sched_dump, ":");
3058 /* Start with 1 since least-priority insn didn't have any wins. */
3059 for (int j = 1; j < ready->n_ready; ++j)
3060 if (INSN_LAST_RFS_WIN (p[j]) == i)
3061 fprintf (sched_dump, " %s",
3062 (*current_sched_info->print_insn) (p[j], 0));
3063 }
3064 fprintf (sched_dump, "\n");
3065 }
3066 }
3067
3068 /* Separate DEBUG_INSNS from normal insns. DEBUG_INSNs go to the end
3069 of array. */
3070 static void
3071 ready_sort_debug (struct ready_list *ready)
3072 {
3073 int i;
3074 rtx_insn **first = ready_lastpos (ready);
3075
3076 for (i = 0; i < ready->n_ready; ++i)
3077 if (!DEBUG_INSN_P (first[i]))
3078 INSN_RFS_DEBUG_ORIG_ORDER (first[i]) = i;
3079
3080 qsort (first, ready->n_ready, sizeof (rtx), rank_for_schedule_debug);
3081 }
3082
3083 /* Sort non-debug insns in the ready list READY by ascending priority.
3084 Assumes that all debug insns are separated from the real insns. */
3085 static void
3086 ready_sort_real (struct ready_list *ready)
3087 {
3088 int i;
3089 rtx_insn **first = ready_lastpos (ready);
3090 int n_ready_real = ready->n_ready - ready->n_debug;
3091
3092 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
3093 for (i = 0; i < n_ready_real; ++i)
3094 setup_insn_reg_pressure_info (first[i]);
3095 else if (sched_pressure == SCHED_PRESSURE_MODEL
3096 && model_curr_point < model_num_insns)
3097 model_set_excess_costs (first, n_ready_real);
3098
3099 rank_for_schedule_stats_t stats1;
3100 if (sched_verbose >= 4)
3101 stats1 = rank_for_schedule_stats;
3102
3103 if (n_ready_real == 2)
3104 swap_sort (first, n_ready_real);
3105 else if (n_ready_real > 2)
3106 qsort (first, n_ready_real, sizeof (rtx), rank_for_schedule);
3107
3108 if (sched_verbose >= 4)
3109 {
3110 rank_for_schedule_stats_diff (&stats1, &rank_for_schedule_stats);
3111 print_rank_for_schedule_stats (";;\t\t", &stats1, ready);
3112 }
3113 }
3114
3115 /* Sort the ready list READY by ascending priority. */
3116 static void
3117 ready_sort (struct ready_list *ready)
3118 {
3119 if (ready->n_debug > 0)
3120 ready_sort_debug (ready);
3121 else
3122 ready_sort_real (ready);
3123 }
3124
3125 /* PREV is an insn that is ready to execute. Adjust its priority if that
3126 will help shorten or lengthen register lifetimes as appropriate. Also
3127 provide a hook for the target to tweak itself. */
3128
3129 HAIFA_INLINE static void
3130 adjust_priority (rtx_insn *prev)
3131 {
3132 /* ??? There used to be code here to try and estimate how an insn
3133 affected register lifetimes, but it did it by looking at REG_DEAD
3134 notes, which we removed in schedule_region. Nor did it try to
3135 take into account register pressure or anything useful like that.
3136
3137 Revisit when we have a machine model to work with and not before. */
3138
3139 if (targetm.sched.adjust_priority)
3140 INSN_PRIORITY (prev) =
3141 targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
3142 }
3143
3144 /* Advance DFA state STATE on one cycle. */
3145 void
3146 advance_state (state_t state)
3147 {
3148 if (targetm.sched.dfa_pre_advance_cycle)
3149 targetm.sched.dfa_pre_advance_cycle ();
3150
3151 if (targetm.sched.dfa_pre_cycle_insn)
3152 state_transition (state,
3153 targetm.sched.dfa_pre_cycle_insn ());
3154
3155 state_transition (state, NULL);
3156
3157 if (targetm.sched.dfa_post_cycle_insn)
3158 state_transition (state,
3159 targetm.sched.dfa_post_cycle_insn ());
3160
3161 if (targetm.sched.dfa_post_advance_cycle)
3162 targetm.sched.dfa_post_advance_cycle ();
3163 }
3164
3165 /* Advance time on one cycle. */
3166 HAIFA_INLINE static void
3167 advance_one_cycle (void)
3168 {
3169 advance_state (curr_state);
3170 if (sched_verbose >= 4)
3171 fprintf (sched_dump, ";;\tAdvance the current state.\n");
3172 }
3173
3174 /* Update register pressure after scheduling INSN. */
3175 static void
3176 update_register_pressure (rtx_insn *insn)
3177 {
3178 struct reg_use_data *use;
3179 struct reg_set_data *set;
3180
3181 gcc_checking_assert (!DEBUG_INSN_P (insn));
3182
3183 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
3184 if (dying_use_p (use))
3185 mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
3186 use->regno, false);
3187 for (set = INSN_REG_SET_LIST (insn); set != NULL; set = set->next_insn_set)
3188 mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
3189 set->regno, true);
3190 }
3191
3192 /* Set up or update (if UPDATE_P) max register pressure (see its
3193 meaning in sched-int.h::_haifa_insn_data) for all current BB insns
3194 after insn AFTER. */
3195 static void
3196 setup_insn_max_reg_pressure (rtx_insn *after, bool update_p)
3197 {
3198 int i, p;
3199 bool eq_p;
3200 rtx_insn *insn;
3201 static int max_reg_pressure[N_REG_CLASSES];
3202
3203 save_reg_pressure ();
3204 for (i = 0; i < ira_pressure_classes_num; i++)
3205 max_reg_pressure[ira_pressure_classes[i]]
3206 = curr_reg_pressure[ira_pressure_classes[i]];
3207 for (insn = NEXT_INSN (after);
3208 insn != NULL_RTX && ! BARRIER_P (insn)
3209 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (after);
3210 insn = NEXT_INSN (insn))
3211 if (NONDEBUG_INSN_P (insn))
3212 {
3213 eq_p = true;
3214 for (i = 0; i < ira_pressure_classes_num; i++)
3215 {
3216 p = max_reg_pressure[ira_pressure_classes[i]];
3217 if (INSN_MAX_REG_PRESSURE (insn)[i] != p)
3218 {
3219 eq_p = false;
3220 INSN_MAX_REG_PRESSURE (insn)[i]
3221 = max_reg_pressure[ira_pressure_classes[i]];
3222 }
3223 }
3224 if (update_p && eq_p)
3225 break;
3226 update_register_pressure (insn);
3227 for (i = 0; i < ira_pressure_classes_num; i++)
3228 if (max_reg_pressure[ira_pressure_classes[i]]
3229 < curr_reg_pressure[ira_pressure_classes[i]])
3230 max_reg_pressure[ira_pressure_classes[i]]
3231 = curr_reg_pressure[ira_pressure_classes[i]];
3232 }
3233 restore_reg_pressure ();
3234 }
3235
3236 /* Update the current register pressure after scheduling INSN. Update
3237 also max register pressure for unscheduled insns of the current
3238 BB. */
3239 static void
3240 update_reg_and_insn_max_reg_pressure (rtx_insn *insn)
3241 {
3242 int i;
3243 int before[N_REG_CLASSES];
3244
3245 for (i = 0; i < ira_pressure_classes_num; i++)
3246 before[i] = curr_reg_pressure[ira_pressure_classes[i]];
3247 update_register_pressure (insn);
3248 for (i = 0; i < ira_pressure_classes_num; i++)
3249 if (curr_reg_pressure[ira_pressure_classes[i]] != before[i])
3250 break;
3251 if (i < ira_pressure_classes_num)
3252 setup_insn_max_reg_pressure (insn, true);
3253 }
3254
3255 /* Set up register pressure at the beginning of basic block BB whose
3256 insns starting after insn AFTER. Set up also max register pressure
3257 for all insns of the basic block. */
3258 void
3259 sched_setup_bb_reg_pressure_info (basic_block bb, rtx_insn *after)
3260 {
3261 gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED);
3262 initiate_bb_reg_pressure_info (bb);
3263 setup_insn_max_reg_pressure (after, false);
3264 }
3265 \f
3266 /* If doing predication while scheduling, verify whether INSN, which
3267 has just been scheduled, clobbers the conditions of any
3268 instructions that must be predicated in order to break their
3269 dependencies. If so, remove them from the queues so that they will
3270 only be scheduled once their control dependency is resolved. */
3271
3272 static void
3273 check_clobbered_conditions (rtx_insn *insn)
3274 {
3275 HARD_REG_SET t;
3276 int i;
3277
3278 if ((current_sched_info->flags & DO_PREDICATION) == 0)
3279 return;
3280
3281 find_all_hard_reg_sets (insn, &t, true);
3282
3283 restart:
3284 for (i = 0; i < ready.n_ready; i++)
3285 {
3286 rtx_insn *x = ready_element (&ready, i);
3287 if (TODO_SPEC (x) == DEP_CONTROL && cond_clobbered_p (x, t))
3288 {
3289 ready_remove_insn (x);
3290 goto restart;
3291 }
3292 }
3293 for (i = 0; i <= max_insn_queue_index; i++)
3294 {
3295 rtx_insn_list *link;
3296 int q = NEXT_Q_AFTER (q_ptr, i);
3297
3298 restart_queue:
3299 for (link = insn_queue[q]; link; link = link->next ())
3300 {
3301 rtx_insn *x = link->insn ();
3302 if (TODO_SPEC (x) == DEP_CONTROL && cond_clobbered_p (x, t))
3303 {
3304 queue_remove (x);
3305 goto restart_queue;
3306 }
3307 }
3308 }
3309 }
3310 \f
3311 /* Return (in order):
3312
3313 - positive if INSN adversely affects the pressure on one
3314 register class
3315
3316 - negative if INSN reduces the pressure on one register class
3317
3318 - 0 if INSN doesn't affect the pressure on any register class. */
3319
3320 static int
3321 model_classify_pressure (struct model_insn_info *insn)
3322 {
3323 struct reg_pressure_data *reg_pressure;
3324 int death[N_REG_CLASSES];
3325 int pci, cl, sum;
3326
3327 calculate_reg_deaths (insn->insn, death);
3328 reg_pressure = INSN_REG_PRESSURE (insn->insn);
3329 sum = 0;
3330 for (pci = 0; pci < ira_pressure_classes_num; pci++)
3331 {
3332 cl = ira_pressure_classes[pci];
3333 if (death[cl] < reg_pressure[pci].set_increase)
3334 return 1;
3335 sum += reg_pressure[pci].set_increase - death[cl];
3336 }
3337 return sum;
3338 }
3339
3340 /* Return true if INSN1 should come before INSN2 in the model schedule. */
3341
3342 static int
3343 model_order_p (struct model_insn_info *insn1, struct model_insn_info *insn2)
3344 {
3345 unsigned int height1, height2;
3346 unsigned int priority1, priority2;
3347
3348 /* Prefer instructions with a higher model priority. */
3349 if (insn1->model_priority != insn2->model_priority)
3350 return insn1->model_priority > insn2->model_priority;
3351
3352 /* Combine the length of the longest path of satisfied true dependencies
3353 that leads to each instruction (depth) with the length of the longest
3354 path of any dependencies that leads from the instruction (alap).
3355 Prefer instructions with the greatest combined length. If the combined
3356 lengths are equal, prefer instructions with the greatest depth.
3357
3358 The idea is that, if we have a set S of "equal" instructions that each
3359 have ALAP value X, and we pick one such instruction I, any true-dependent
3360 successors of I that have ALAP value X - 1 should be preferred over S.
3361 This encourages the schedule to be "narrow" rather than "wide".
3362 However, if I is a low-priority instruction that we decided to
3363 schedule because of its model_classify_pressure, and if there
3364 is a set of higher-priority instructions T, the aforementioned
3365 successors of I should not have the edge over T. */
3366 height1 = insn1->depth + insn1->alap;
3367 height2 = insn2->depth + insn2->alap;
3368 if (height1 != height2)
3369 return height1 > height2;
3370 if (insn1->depth != insn2->depth)
3371 return insn1->depth > insn2->depth;
3372
3373 /* We have no real preference between INSN1 an INSN2 as far as attempts
3374 to reduce pressure go. Prefer instructions with higher priorities. */
3375 priority1 = INSN_PRIORITY (insn1->insn);
3376 priority2 = INSN_PRIORITY (insn2->insn);
3377 if (priority1 != priority2)
3378 return priority1 > priority2;
3379
3380 /* Use the original rtl sequence as a tie-breaker. */
3381 return insn1 < insn2;
3382 }
3383
3384 /* Add INSN to the model worklist immediately after PREV. Add it to the
3385 beginning of the list if PREV is null. */
3386
3387 static void
3388 model_add_to_worklist_at (struct model_insn_info *insn,
3389 struct model_insn_info *prev)
3390 {
3391 gcc_assert (QUEUE_INDEX (insn->insn) == QUEUE_NOWHERE);
3392 QUEUE_INDEX (insn->insn) = QUEUE_READY;
3393
3394 insn->prev = prev;
3395 if (prev)
3396 {
3397 insn->next = prev->next;
3398 prev->next = insn;
3399 }
3400 else
3401 {
3402 insn->next = model_worklist;
3403 model_worklist = insn;
3404 }
3405 if (insn->next)
3406 insn->next->prev = insn;
3407 }
3408
3409 /* Remove INSN from the model worklist. */
3410
3411 static void
3412 model_remove_from_worklist (struct model_insn_info *insn)
3413 {
3414 gcc_assert (QUEUE_INDEX (insn->insn) == QUEUE_READY);
3415 QUEUE_INDEX (insn->insn) = QUEUE_NOWHERE;
3416
3417 if (insn->prev)
3418 insn->prev->next = insn->next;
3419 else
3420 model_worklist = insn->next;
3421 if (insn->next)
3422 insn->next->prev = insn->prev;
3423 }
3424
3425 /* Add INSN to the model worklist. Start looking for a suitable position
3426 between neighbors PREV and NEXT, testing at most MAX_SCHED_READY_INSNS
3427 insns either side. A null PREV indicates the beginning of the list and
3428 a null NEXT indicates the end. */
3429
3430 static void
3431 model_add_to_worklist (struct model_insn_info *insn,
3432 struct model_insn_info *prev,
3433 struct model_insn_info *next)
3434 {
3435 int count;
3436
3437 count = MAX_SCHED_READY_INSNS;
3438 if (count > 0 && prev && model_order_p (insn, prev))
3439 do
3440 {
3441 count--;
3442 prev = prev->prev;
3443 }
3444 while (count > 0 && prev && model_order_p (insn, prev));
3445 else
3446 while (count > 0 && next && model_order_p (next, insn))
3447 {
3448 count--;
3449 prev = next;
3450 next = next->next;
3451 }
3452 model_add_to_worklist_at (insn, prev);
3453 }
3454
3455 /* INSN may now have a higher priority (in the model_order_p sense)
3456 than before. Move it up the worklist if necessary. */
3457
3458 static void
3459 model_promote_insn (struct model_insn_info *insn)
3460 {
3461 struct model_insn_info *prev;
3462 int count;
3463
3464 prev = insn->prev;
3465 count = MAX_SCHED_READY_INSNS;
3466 while (count > 0 && prev && model_order_p (insn, prev))
3467 {
3468 count--;
3469 prev = prev->prev;
3470 }
3471 if (prev != insn->prev)
3472 {
3473 model_remove_from_worklist (insn);
3474 model_add_to_worklist_at (insn, prev);
3475 }
3476 }
3477
3478 /* Add INSN to the end of the model schedule. */
3479
3480 static void
3481 model_add_to_schedule (rtx_insn *insn)
3482 {
3483 unsigned int point;
3484
3485 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
3486 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
3487
3488 point = model_schedule.length ();
3489 model_schedule.quick_push (insn);
3490 INSN_MODEL_INDEX (insn) = point + 1;
3491 }
3492
3493 /* Analyze the instructions that are to be scheduled, setting up
3494 MODEL_INSN_INFO (...) and model_num_insns accordingly. Add ready
3495 instructions to model_worklist. */
3496
3497 static void
3498 model_analyze_insns (void)
3499 {
3500 rtx_insn *start, *end, *iter;
3501 sd_iterator_def sd_it;
3502 dep_t dep;
3503 struct model_insn_info *insn, *con;
3504
3505 model_num_insns = 0;
3506 start = PREV_INSN (current_sched_info->next_tail);
3507 end = current_sched_info->prev_head;
3508 for (iter = start; iter != end; iter = PREV_INSN (iter))
3509 if (NONDEBUG_INSN_P (iter))
3510 {
3511 insn = MODEL_INSN_INFO (iter);
3512 insn->insn = iter;
3513 FOR_EACH_DEP (iter, SD_LIST_FORW, sd_it, dep)
3514 {
3515 con = MODEL_INSN_INFO (DEP_CON (dep));
3516 if (con->insn && insn->alap < con->alap + 1)
3517 insn->alap = con->alap + 1;
3518 }
3519
3520 insn->old_queue = QUEUE_INDEX (iter);
3521 QUEUE_INDEX (iter) = QUEUE_NOWHERE;
3522
3523 insn->unscheduled_preds = dep_list_size (iter, SD_LIST_HARD_BACK);
3524 if (insn->unscheduled_preds == 0)
3525 model_add_to_worklist (insn, NULL, model_worklist);
3526
3527 model_num_insns++;
3528 }
3529 }
3530
3531 /* The global state describes the register pressure at the start of the
3532 model schedule. Initialize GROUP accordingly. */
3533
3534 static void
3535 model_init_pressure_group (struct model_pressure_group *group)
3536 {
3537 int pci, cl;
3538
3539 for (pci = 0; pci < ira_pressure_classes_num; pci++)
3540 {
3541 cl = ira_pressure_classes[pci];
3542 group->limits[pci].pressure = curr_reg_pressure[cl];
3543 group->limits[pci].point = 0;
3544 }
3545 /* Use index model_num_insns to record the state after the last
3546 instruction in the model schedule. */
3547 group->model = XNEWVEC (struct model_pressure_data,
3548 (model_num_insns + 1) * ira_pressure_classes_num);
3549 }
3550
3551 /* Record that MODEL_REF_PRESSURE (GROUP, POINT, PCI) is PRESSURE.
3552 Update the maximum pressure for the whole schedule. */
3553
3554 static void
3555 model_record_pressure (struct model_pressure_group *group,
3556 int point, int pci, int pressure)
3557 {
3558 MODEL_REF_PRESSURE (group, point, pci) = pressure;
3559 if (group->limits[pci].pressure < pressure)
3560 {
3561 group->limits[pci].pressure = pressure;
3562 group->limits[pci].point = point;
3563 }
3564 }
3565
3566 /* INSN has just been added to the end of the model schedule. Record its
3567 register-pressure information. */
3568
3569 static void
3570 model_record_pressures (struct model_insn_info *insn)
3571 {
3572 struct reg_pressure_data *reg_pressure;
3573 int point, pci, cl, delta;
3574 int death[N_REG_CLASSES];
3575
3576 point = model_index (insn->insn);
3577 if (sched_verbose >= 2)
3578 {
3579 if (point == 0)
3580 {
3581 fprintf (sched_dump, "\n;;\tModel schedule:\n;;\n");
3582 fprintf (sched_dump, ";;\t| idx insn | mpri hght dpth prio |\n");
3583 }
3584 fprintf (sched_dump, ";;\t| %3d %4d | %4d %4d %4d %4d | %-30s ",
3585 point, INSN_UID (insn->insn), insn->model_priority,
3586 insn->depth + insn->alap, insn->depth,
3587 INSN_PRIORITY (insn->insn),
3588 str_pattern_slim (PATTERN (insn->insn)));
3589 }
3590 calculate_reg_deaths (insn->insn, death);
3591 reg_pressure = INSN_REG_PRESSURE (insn->insn);
3592 for (pci = 0; pci < ira_pressure_classes_num; pci++)
3593 {
3594 cl = ira_pressure_classes[pci];
3595 delta = reg_pressure[pci].set_increase - death[cl];
3596 if (sched_verbose >= 2)
3597 fprintf (sched_dump, " %s:[%d,%+d]", reg_class_names[cl],
3598 curr_reg_pressure[cl], delta);
3599 model_record_pressure (&model_before_pressure, point, pci,
3600 curr_reg_pressure[cl]);
3601 }
3602 if (sched_verbose >= 2)
3603 fprintf (sched_dump, "\n");
3604 }
3605
3606 /* All instructions have been added to the model schedule. Record the
3607 final register pressure in GROUP and set up all MODEL_MAX_PRESSUREs. */
3608
3609 static void
3610 model_record_final_pressures (struct model_pressure_group *group)
3611 {
3612 int point, pci, max_pressure, ref_pressure, cl;
3613
3614 for (pci = 0; pci < ira_pressure_classes_num; pci++)
3615 {
3616 /* Record the final pressure for this class. */
3617 cl = ira_pressure_classes[pci];
3618 point = model_num_insns;
3619 ref_pressure = curr_reg_pressure[cl];
3620 model_record_pressure (group, point, pci, ref_pressure);
3621
3622 /* Record the original maximum pressure. */
3623 group->limits[pci].orig_pressure = group->limits[pci].pressure;
3624
3625 /* Update the MODEL_MAX_PRESSURE for every point of the schedule. */
3626 max_pressure = ref_pressure;
3627 MODEL_MAX_PRESSURE (group, point, pci) = max_pressure;
3628 while (point > 0)
3629 {
3630 point--;
3631 ref_pressure = MODEL_REF_PRESSURE (group, point, pci);
3632 max_pressure = MAX (max_pressure, ref_pressure);
3633 MODEL_MAX_PRESSURE (group, point, pci) = max_pressure;
3634 }
3635 }
3636 }
3637
3638 /* Update all successors of INSN, given that INSN has just been scheduled. */
3639
3640 static void
3641 model_add_successors_to_worklist (struct model_insn_info *insn)
3642 {
3643 sd_iterator_def sd_it;
3644 struct model_insn_info *con;
3645 dep_t dep;
3646
3647 FOR_EACH_DEP (insn->insn, SD_LIST_FORW, sd_it, dep)
3648 {
3649 con = MODEL_INSN_INFO (DEP_CON (dep));
3650 /* Ignore debug instructions, and instructions from other blocks. */
3651 if (con->insn)
3652 {
3653 con->unscheduled_preds--;
3654
3655 /* Update the depth field of each true-dependent successor.
3656 Increasing the depth gives them a higher priority than
3657 before. */
3658 if (DEP_TYPE (dep) == REG_DEP_TRUE && con->depth < insn->depth + 1)
3659 {
3660 con->depth = insn->depth + 1;
3661 if (QUEUE_INDEX (con->insn) == QUEUE_READY)
3662 model_promote_insn (con);
3663 }
3664
3665 /* If this is a true dependency, or if there are no remaining
3666 dependencies for CON (meaning that CON only had non-true
3667 dependencies), make sure that CON is on the worklist.
3668 We don't bother otherwise because it would tend to fill the
3669 worklist with a lot of low-priority instructions that are not
3670 yet ready to issue. */
3671 if ((con->depth > 0 || con->unscheduled_preds == 0)
3672 && QUEUE_INDEX (con->insn) == QUEUE_NOWHERE)
3673 model_add_to_worklist (con, insn, insn->next);
3674 }
3675 }
3676 }
3677
3678 /* Give INSN a higher priority than any current instruction, then give
3679 unscheduled predecessors of INSN a higher priority still. If any of
3680 those predecessors are not on the model worklist, do the same for its
3681 predecessors, and so on. */
3682
3683 static void
3684 model_promote_predecessors (struct model_insn_info *insn)
3685 {
3686 struct model_insn_info *pro, *first;
3687 sd_iterator_def sd_it;
3688 dep_t dep;
3689
3690 if (sched_verbose >= 7)
3691 fprintf (sched_dump, ";;\t+--- priority of %d = %d, priority of",
3692 INSN_UID (insn->insn), model_next_priority);
3693 insn->model_priority = model_next_priority++;
3694 model_remove_from_worklist (insn);
3695 model_add_to_worklist_at (insn, NULL);
3696
3697 first = NULL;
3698 for (;;)
3699 {
3700 FOR_EACH_DEP (insn->insn, SD_LIST_HARD_BACK, sd_it, dep)
3701 {
3702 pro = MODEL_INSN_INFO (DEP_PRO (dep));
3703 /* The first test is to ignore debug instructions, and instructions
3704 from other blocks. */
3705 if (pro->insn
3706 && pro->model_priority != model_next_priority
3707 && QUEUE_INDEX (pro->insn) != QUEUE_SCHEDULED)
3708 {
3709 pro->model_priority = model_next_priority;
3710 if (sched_verbose >= 7)
3711 fprintf (sched_dump, " %d", INSN_UID (pro->insn));
3712 if (QUEUE_INDEX (pro->insn) == QUEUE_READY)
3713 {
3714 /* PRO is already in the worklist, but it now has
3715 a higher priority than before. Move it at the
3716 appropriate place. */
3717 model_remove_from_worklist (pro);
3718 model_add_to_worklist (pro, NULL, model_worklist);
3719 }
3720 else
3721 {
3722 /* PRO isn't in the worklist. Recursively process
3723 its predecessors until we find one that is. */
3724 pro->next = first;
3725 first = pro;
3726 }
3727 }
3728 }
3729 if (!first)
3730 break;
3731 insn = first;
3732 first = insn->next;
3733 }
3734 if (sched_verbose >= 7)
3735 fprintf (sched_dump, " = %d\n", model_next_priority);
3736 model_next_priority++;
3737 }
3738
3739 /* Pick one instruction from model_worklist and process it. */
3740
3741 static void
3742 model_choose_insn (void)
3743 {
3744 struct model_insn_info *insn, *fallback;
3745 int count;
3746
3747 if (sched_verbose >= 7)
3748 {
3749 fprintf (sched_dump, ";;\t+--- worklist:\n");
3750 insn = model_worklist;
3751 count = MAX_SCHED_READY_INSNS;
3752 while (count > 0 && insn)
3753 {
3754 fprintf (sched_dump, ";;\t+--- %d [%d, %d, %d, %d]\n",
3755 INSN_UID (insn->insn), insn->model_priority,
3756 insn->depth + insn->alap, insn->depth,
3757 INSN_PRIORITY (insn->insn));
3758 count--;
3759 insn = insn->next;
3760 }
3761 }
3762
3763 /* Look for a ready instruction whose model_classify_priority is zero
3764 or negative, picking the highest-priority one. Adding such an
3765 instruction to the schedule now should do no harm, and may actually
3766 do some good.
3767
3768 Failing that, see whether there is an instruction with the highest
3769 extant model_priority that is not yet ready, but which would reduce
3770 pressure if it became ready. This is designed to catch cases like:
3771
3772 (set (mem (reg R1)) (reg R2))
3773
3774 where the instruction is the last remaining use of R1 and where the
3775 value of R2 is not yet available (or vice versa). The death of R1
3776 means that this instruction already reduces pressure. It is of
3777 course possible that the computation of R2 involves other registers
3778 that are hard to kill, but such cases are rare enough for this
3779 heuristic to be a win in general.
3780
3781 Failing that, just pick the highest-priority instruction in the
3782 worklist. */
3783 count = MAX_SCHED_READY_INSNS;
3784 insn = model_worklist;
3785 fallback = 0;
3786 for (;;)
3787 {
3788 if (count == 0 || !insn)
3789 {
3790 insn = fallback ? fallback : model_worklist;
3791 break;
3792 }
3793 if (insn->unscheduled_preds)
3794 {
3795 if (model_worklist->model_priority == insn->model_priority
3796 && !fallback
3797 && model_classify_pressure (insn) < 0)
3798 fallback = insn;
3799 }
3800 else
3801 {
3802 if (model_classify_pressure (insn) <= 0)
3803 break;
3804 }
3805 count--;
3806 insn = insn->next;
3807 }
3808
3809 if (sched_verbose >= 7 && insn != model_worklist)
3810 {
3811 if (insn->unscheduled_preds)
3812 fprintf (sched_dump, ";;\t+--- promoting insn %d, with dependencies\n",
3813 INSN_UID (insn->insn));
3814 else
3815 fprintf (sched_dump, ";;\t+--- promoting insn %d, which is ready\n",
3816 INSN_UID (insn->insn));
3817 }
3818 if (insn->unscheduled_preds)
3819 /* INSN isn't yet ready to issue. Give all its predecessors the
3820 highest priority. */
3821 model_promote_predecessors (insn);
3822 else
3823 {
3824 /* INSN is ready. Add it to the end of model_schedule and
3825 process its successors. */
3826 model_add_successors_to_worklist (insn);
3827 model_remove_from_worklist (insn);
3828 model_add_to_schedule (insn->insn);
3829 model_record_pressures (insn);
3830 update_register_pressure (insn->insn);
3831 }
3832 }
3833
3834 /* Restore all QUEUE_INDEXs to the values that they had before
3835 model_start_schedule was called. */
3836
3837 static void
3838 model_reset_queue_indices (void)
3839 {
3840 unsigned int i;
3841 rtx_insn *insn;
3842
3843 FOR_EACH_VEC_ELT (model_schedule, i, insn)
3844 QUEUE_INDEX (insn) = MODEL_INSN_INFO (insn)->old_queue;
3845 }
3846
3847 /* We have calculated the model schedule and spill costs. Print a summary
3848 to sched_dump. */
3849
3850 static void
3851 model_dump_pressure_summary (void)
3852 {
3853 int pci, cl;
3854
3855 fprintf (sched_dump, ";; Pressure summary:");
3856 for (pci = 0; pci < ira_pressure_classes_num; pci++)
3857 {
3858 cl = ira_pressure_classes[pci];
3859 fprintf (sched_dump, " %s:%d", reg_class_names[cl],
3860 model_before_pressure.limits[pci].pressure);
3861 }
3862 fprintf (sched_dump, "\n\n");
3863 }
3864
3865 /* Initialize the SCHED_PRESSURE_MODEL information for the current
3866 scheduling region. */
3867
3868 static void
3869 model_start_schedule (basic_block bb)
3870 {
3871 model_next_priority = 1;
3872 model_schedule.create (sched_max_luid);
3873 model_insns = XCNEWVEC (struct model_insn_info, sched_max_luid);
3874
3875 gcc_assert (bb == BLOCK_FOR_INSN (NEXT_INSN (current_sched_info->prev_head)));
3876 initiate_reg_pressure_info (df_get_live_in (bb));
3877
3878 model_analyze_insns ();
3879 model_init_pressure_group (&model_before_pressure);
3880 while (model_worklist)
3881 model_choose_insn ();
3882 gcc_assert (model_num_insns == (int) model_schedule.length ());
3883 if (sched_verbose >= 2)
3884 fprintf (sched_dump, "\n");
3885
3886 model_record_final_pressures (&model_before_pressure);
3887 model_reset_queue_indices ();
3888
3889 XDELETEVEC (model_insns);
3890
3891 model_curr_point = 0;
3892 initiate_reg_pressure_info (df_get_live_in (bb));
3893 if (sched_verbose >= 1)
3894 model_dump_pressure_summary ();
3895 }
3896
3897 /* Free the information associated with GROUP. */
3898
3899 static void
3900 model_finalize_pressure_group (struct model_pressure_group *group)
3901 {
3902 XDELETEVEC (group->model);
3903 }
3904
3905 /* Free the information created by model_start_schedule. */
3906
3907 static void
3908 model_end_schedule (void)
3909 {
3910 model_finalize_pressure_group (&model_before_pressure);
3911 model_schedule.release ();
3912 }
3913
3914 /* Prepare reg pressure scheduling for basic block BB. */
3915 static void
3916 sched_pressure_start_bb (basic_block bb)
3917 {
3918 /* Set the number of available registers for each class taking into account
3919 relative probability of current basic block versus function prologue and
3920 epilogue.
3921 * If the basic block executes much more often than the prologue/epilogue
3922 (e.g., inside a hot loop), then cost of spill in the prologue is close to
3923 nil, so the effective number of available registers is
3924 (ira_class_hard_regs_num[cl] - 0).
3925 * If the basic block executes as often as the prologue/epilogue,
3926 then spill in the block is as costly as in the prologue, so the effective
3927 number of available registers is
3928 (ira_class_hard_regs_num[cl] - call_used_regs_num[cl]).
3929 Note that all-else-equal, we prefer to spill in the prologue, since that
3930 allows "extra" registers for other basic blocks of the function.
3931 * If the basic block is on the cold path of the function and executes
3932 rarely, then we should always prefer to spill in the block, rather than
3933 in the prologue/epilogue. The effective number of available register is
3934 (ira_class_hard_regs_num[cl] - call_used_regs_num[cl]). */
3935 {
3936 int i;
3937 int entry_freq = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
3938 int bb_freq = bb->frequency;
3939
3940 if (bb_freq == 0)
3941 {
3942 if (entry_freq == 0)
3943 entry_freq = bb_freq = 1;
3944 }
3945 if (bb_freq < entry_freq)
3946 bb_freq = entry_freq;
3947
3948 for (i = 0; i < ira_pressure_classes_num; ++i)
3949 {
3950 enum reg_class cl = ira_pressure_classes[i];
3951 sched_class_regs_num[cl] = ira_class_hard_regs_num[cl];
3952 sched_class_regs_num[cl]
3953 -= (call_used_regs_num[cl] * entry_freq) / bb_freq;
3954 }
3955 }
3956
3957 if (sched_pressure == SCHED_PRESSURE_MODEL)
3958 model_start_schedule (bb);
3959 }
3960 \f
3961 /* A structure that holds local state for the loop in schedule_block. */
3962 struct sched_block_state
3963 {
3964 /* True if no real insns have been scheduled in the current cycle. */
3965 bool first_cycle_insn_p;
3966 /* True if a shadow insn has been scheduled in the current cycle, which
3967 means that no more normal insns can be issued. */
3968 bool shadows_only_p;
3969 /* True if we're winding down a modulo schedule, which means that we only
3970 issue insns with INSN_EXACT_TICK set. */
3971 bool modulo_epilogue;
3972 /* Initialized with the machine's issue rate every cycle, and updated
3973 by calls to the variable_issue hook. */
3974 int can_issue_more;
3975 };
3976
3977 /* INSN is the "currently executing insn". Launch each insn which was
3978 waiting on INSN. READY is the ready list which contains the insns
3979 that are ready to fire. CLOCK is the current cycle. The function
3980 returns necessary cycle advance after issuing the insn (it is not
3981 zero for insns in a schedule group). */
3982
3983 static int
3984 schedule_insn (rtx_insn *insn)
3985 {
3986 sd_iterator_def sd_it;
3987 dep_t dep;
3988 int i;
3989 int advance = 0;
3990
3991 if (sched_verbose >= 1)
3992 {
3993 struct reg_pressure_data *pressure_info;
3994 fprintf (sched_dump, ";;\t%3i--> %s %-40s:",
3995 clock_var, (*current_sched_info->print_insn) (insn, 1),
3996 str_pattern_slim (PATTERN (insn)));
3997
3998 if (recog_memoized (insn) < 0)
3999 fprintf (sched_dump, "nothing");
4000 else
4001 print_reservation (sched_dump, insn);
4002 pressure_info = INSN_REG_PRESSURE (insn);
4003 if (pressure_info != NULL)
4004 {
4005 fputc (':', sched_dump);
4006 for (i = 0; i < ira_pressure_classes_num; i++)
4007 fprintf (sched_dump, "%s%s%+d(%d)",
4008 scheduled_insns.length () > 1
4009 && INSN_LUID (insn)
4010 < INSN_LUID (scheduled_insns[scheduled_insns.length () - 2]) ? "@" : "",
4011 reg_class_names[ira_pressure_classes[i]],
4012 pressure_info[i].set_increase, pressure_info[i].change);
4013 }
4014 if (sched_pressure == SCHED_PRESSURE_MODEL
4015 && model_curr_point < model_num_insns
4016 && model_index (insn) == model_curr_point)
4017 fprintf (sched_dump, ":model %d", model_curr_point);
4018 fputc ('\n', sched_dump);
4019 }
4020
4021 if (sched_pressure == SCHED_PRESSURE_WEIGHTED && !DEBUG_INSN_P (insn))
4022 update_reg_and_insn_max_reg_pressure (insn);
4023
4024 /* Scheduling instruction should have all its dependencies resolved and
4025 should have been removed from the ready list. */
4026 gcc_assert (sd_lists_empty_p (insn, SD_LIST_HARD_BACK));
4027
4028 /* Reset debug insns invalidated by moving this insn. */
4029 if (MAY_HAVE_DEBUG_INSNS && !DEBUG_INSN_P (insn))
4030 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
4031 sd_iterator_cond (&sd_it, &dep);)
4032 {
4033 rtx_insn *dbg = DEP_PRO (dep);
4034 struct reg_use_data *use, *next;
4035
4036 if (DEP_STATUS (dep) & DEP_CANCELLED)
4037 {
4038 sd_iterator_next (&sd_it);
4039 continue;
4040 }
4041
4042 gcc_assert (DEBUG_INSN_P (dbg));
4043
4044 if (sched_verbose >= 6)
4045 fprintf (sched_dump, ";;\t\tresetting: debug insn %d\n",
4046 INSN_UID (dbg));
4047
4048 /* ??? Rather than resetting the debug insn, we might be able
4049 to emit a debug temp before the just-scheduled insn, but
4050 this would involve checking that the expression at the
4051 point of the debug insn is equivalent to the expression
4052 before the just-scheduled insn. They might not be: the
4053 expression in the debug insn may depend on other insns not
4054 yet scheduled that set MEMs, REGs or even other debug
4055 insns. It's not clear that attempting to preserve debug
4056 information in these cases is worth the effort, given how
4057 uncommon these resets are and the likelihood that the debug
4058 temps introduced won't survive the schedule change. */
4059 INSN_VAR_LOCATION_LOC (dbg) = gen_rtx_UNKNOWN_VAR_LOC ();
4060 df_insn_rescan (dbg);
4061
4062 /* Unknown location doesn't use any registers. */
4063 for (use = INSN_REG_USE_LIST (dbg); use != NULL; use = next)
4064 {
4065 struct reg_use_data *prev = use;
4066
4067 /* Remove use from the cyclic next_regno_use chain first. */
4068 while (prev->next_regno_use != use)
4069 prev = prev->next_regno_use;
4070 prev->next_regno_use = use->next_regno_use;
4071 next = use->next_insn_use;
4072 free (use);
4073 }
4074 INSN_REG_USE_LIST (dbg) = NULL;
4075
4076 /* We delete rather than resolve these deps, otherwise we
4077 crash in sched_free_deps(), because forward deps are
4078 expected to be released before backward deps. */
4079 sd_delete_dep (sd_it);
4080 }
4081
4082 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
4083 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
4084
4085 if (sched_pressure == SCHED_PRESSURE_MODEL
4086 && model_curr_point < model_num_insns
4087 && NONDEBUG_INSN_P (insn))
4088 {
4089 if (model_index (insn) == model_curr_point)
4090 do
4091 model_curr_point++;
4092 while (model_curr_point < model_num_insns
4093 && (QUEUE_INDEX (MODEL_INSN (model_curr_point))
4094 == QUEUE_SCHEDULED));
4095 else
4096 model_recompute (insn);
4097 model_update_limit_points ();
4098 update_register_pressure (insn);
4099 if (sched_verbose >= 2)
4100 print_curr_reg_pressure ();
4101 }
4102
4103 gcc_assert (INSN_TICK (insn) >= MIN_TICK);
4104 if (INSN_TICK (insn) > clock_var)
4105 /* INSN has been prematurely moved from the queue to the ready list.
4106 This is possible only if following flags are set. */
4107 gcc_assert (flag_sched_stalled_insns || sched_fusion);
4108
4109 /* ??? Probably, if INSN is scheduled prematurely, we should leave
4110 INSN_TICK untouched. This is a machine-dependent issue, actually. */
4111 INSN_TICK (insn) = clock_var;
4112
4113 check_clobbered_conditions (insn);
4114
4115 /* Update dependent instructions. First, see if by scheduling this insn
4116 now we broke a dependence in a way that requires us to change another
4117 insn. */
4118 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4119 sd_iterator_cond (&sd_it, &dep); sd_iterator_next (&sd_it))
4120 {
4121 struct dep_replacement *desc = DEP_REPLACE (dep);
4122 rtx_insn *pro = DEP_PRO (dep);
4123 if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED
4124 && desc != NULL && desc->insn == pro)
4125 apply_replacement (dep, false);
4126 }
4127
4128 /* Go through and resolve forward dependencies. */
4129 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
4130 sd_iterator_cond (&sd_it, &dep);)
4131 {
4132 rtx_insn *next = DEP_CON (dep);
4133 bool cancelled = (DEP_STATUS (dep) & DEP_CANCELLED) != 0;
4134
4135 /* Resolve the dependence between INSN and NEXT.
4136 sd_resolve_dep () moves current dep to another list thus
4137 advancing the iterator. */
4138 sd_resolve_dep (sd_it);
4139
4140 if (cancelled)
4141 {
4142 if (must_restore_pattern_p (next, dep))
4143 restore_pattern (dep, false);
4144 continue;
4145 }
4146
4147 /* Don't bother trying to mark next as ready if insn is a debug
4148 insn. If insn is the last hard dependency, it will have
4149 already been discounted. */
4150 if (DEBUG_INSN_P (insn) && !DEBUG_INSN_P (next))
4151 continue;
4152
4153 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
4154 {
4155 int effective_cost;
4156
4157 effective_cost = try_ready (next);
4158
4159 if (effective_cost >= 0
4160 && SCHED_GROUP_P (next)
4161 && advance < effective_cost)
4162 advance = effective_cost;
4163 }
4164 else
4165 /* Check always has only one forward dependence (to the first insn in
4166 the recovery block), therefore, this will be executed only once. */
4167 {
4168 gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
4169 fix_recovery_deps (RECOVERY_BLOCK (insn));
4170 }
4171 }
4172
4173 /* Annotate the instruction with issue information -- TImode
4174 indicates that the instruction is expected not to be able
4175 to issue on the same cycle as the previous insn. A machine
4176 may use this information to decide how the instruction should
4177 be aligned. */
4178 if (issue_rate > 1
4179 && GET_CODE (PATTERN (insn)) != USE
4180 && GET_CODE (PATTERN (insn)) != CLOBBER
4181 && !DEBUG_INSN_P (insn))
4182 {
4183 if (reload_completed)
4184 PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
4185 last_clock_var = clock_var;
4186 }
4187
4188 if (nonscheduled_insns_begin != NULL_RTX)
4189 /* Indicate to debug counters that INSN is scheduled. */
4190 nonscheduled_insns_begin = insn;
4191
4192 return advance;
4193 }
4194
4195 /* Functions for handling of notes. */
4196
4197 /* Add note list that ends on FROM_END to the end of TO_ENDP. */
4198 void
4199 concat_note_lists (rtx_insn *from_end, rtx_insn **to_endp)
4200 {
4201 rtx_insn *from_start;
4202
4203 /* It's easy when have nothing to concat. */
4204 if (from_end == NULL)
4205 return;
4206
4207 /* It's also easy when destination is empty. */
4208 if (*to_endp == NULL)
4209 {
4210 *to_endp = from_end;
4211 return;
4212 }
4213
4214 from_start = from_end;
4215 while (PREV_INSN (from_start) != NULL)
4216 from_start = PREV_INSN (from_start);
4217
4218 SET_PREV_INSN (from_start) = *to_endp;
4219 SET_NEXT_INSN (*to_endp) = from_start;
4220 *to_endp = from_end;
4221 }
4222
4223 /* Delete notes between HEAD and TAIL and put them in the chain
4224 of notes ended by NOTE_LIST. */
4225 void
4226 remove_notes (rtx_insn *head, rtx_insn *tail)
4227 {
4228 rtx_insn *next_tail, *insn, *next;
4229
4230 note_list = 0;
4231 if (head == tail && !INSN_P (head))
4232 return;
4233
4234 next_tail = NEXT_INSN (tail);
4235 for (insn = head; insn != next_tail; insn = next)
4236 {
4237 next = NEXT_INSN (insn);
4238 if (!NOTE_P (insn))
4239 continue;
4240
4241 switch (NOTE_KIND (insn))
4242 {
4243 case NOTE_INSN_BASIC_BLOCK:
4244 continue;
4245
4246 case NOTE_INSN_EPILOGUE_BEG:
4247 if (insn != tail)
4248 {
4249 remove_insn (insn);
4250 add_reg_note (next, REG_SAVE_NOTE,
4251 GEN_INT (NOTE_INSN_EPILOGUE_BEG));
4252 break;
4253 }
4254 /* FALLTHRU */
4255
4256 default:
4257 remove_insn (insn);
4258
4259 /* Add the note to list that ends at NOTE_LIST. */
4260 SET_PREV_INSN (insn) = note_list;
4261 SET_NEXT_INSN (insn) = NULL_RTX;
4262 if (note_list)
4263 SET_NEXT_INSN (note_list) = insn;
4264 note_list = insn;
4265 break;
4266 }
4267
4268 gcc_assert ((sel_sched_p () || insn != tail) && insn != head);
4269 }
4270 }
4271
4272 /* A structure to record enough data to allow us to backtrack the scheduler to
4273 a previous state. */
4274 struct haifa_saved_data
4275 {
4276 /* Next entry on the list. */
4277 struct haifa_saved_data *next;
4278
4279 /* Backtracking is associated with scheduling insns that have delay slots.
4280 DELAY_PAIR points to the structure that contains the insns involved, and
4281 the number of cycles between them. */
4282 struct delay_pair *delay_pair;
4283
4284 /* Data used by the frontend (e.g. sched-ebb or sched-rgn). */
4285 void *fe_saved_data;
4286 /* Data used by the backend. */
4287 void *be_saved_data;
4288
4289 /* Copies of global state. */
4290 int clock_var, last_clock_var;
4291 struct ready_list ready;
4292 state_t curr_state;
4293
4294 rtx_insn *last_scheduled_insn;
4295 rtx_insn *last_nondebug_scheduled_insn;
4296 rtx_insn *nonscheduled_insns_begin;
4297 int cycle_issued_insns;
4298
4299 /* Copies of state used in the inner loop of schedule_block. */
4300 struct sched_block_state sched_block;
4301
4302 /* We don't need to save q_ptr, as its value is arbitrary and we can set it
4303 to 0 when restoring. */
4304 int q_size;
4305 rtx_insn_list **insn_queue;
4306
4307 /* Describe pattern replacements that occurred since this backtrack point
4308 was queued. */
4309 vec<dep_t> replacement_deps;
4310 vec<int> replace_apply;
4311
4312 /* A copy of the next-cycle replacement vectors at the time of the backtrack
4313 point. */
4314 vec<dep_t> next_cycle_deps;
4315 vec<int> next_cycle_apply;
4316 };
4317
4318 /* A record, in reverse order, of all scheduled insns which have delay slots
4319 and may require backtracking. */
4320 static struct haifa_saved_data *backtrack_queue;
4321
4322 /* For every dependency of INSN, set the FEEDS_BACKTRACK_INSN bit according
4323 to SET_P. */
4324 static void
4325 mark_backtrack_feeds (rtx_insn *insn, int set_p)
4326 {
4327 sd_iterator_def sd_it;
4328 dep_t dep;
4329 FOR_EACH_DEP (insn, SD_LIST_HARD_BACK, sd_it, dep)
4330 {
4331 FEEDS_BACKTRACK_INSN (DEP_PRO (dep)) = set_p;
4332 }
4333 }
4334
4335 /* Save the current scheduler state so that we can backtrack to it
4336 later if necessary. PAIR gives the insns that make it necessary to
4337 save this point. SCHED_BLOCK is the local state of schedule_block
4338 that need to be saved. */
4339 static void
4340 save_backtrack_point (struct delay_pair *pair,
4341 struct sched_block_state sched_block)
4342 {
4343 int i;
4344 struct haifa_saved_data *save = XNEW (struct haifa_saved_data);
4345
4346 save->curr_state = xmalloc (dfa_state_size);
4347 memcpy (save->curr_state, curr_state, dfa_state_size);
4348
4349 save->ready.first = ready.first;
4350 save->ready.n_ready = ready.n_ready;
4351 save->ready.n_debug = ready.n_debug;
4352 save->ready.veclen = ready.veclen;
4353 save->ready.vec = XNEWVEC (rtx_insn *, ready.veclen);
4354 memcpy (save->ready.vec, ready.vec, ready.veclen * sizeof (rtx));
4355
4356 save->insn_queue = XNEWVEC (rtx_insn_list *, max_insn_queue_index + 1);
4357 save->q_size = q_size;
4358 for (i = 0; i <= max_insn_queue_index; i++)
4359 {
4360 int q = NEXT_Q_AFTER (q_ptr, i);
4361 save->insn_queue[i] = copy_INSN_LIST (insn_queue[q]);
4362 }
4363
4364 save->clock_var = clock_var;
4365 save->last_clock_var = last_clock_var;
4366 save->cycle_issued_insns = cycle_issued_insns;
4367 save->last_scheduled_insn = last_scheduled_insn;
4368 save->last_nondebug_scheduled_insn = last_nondebug_scheduled_insn;
4369 save->nonscheduled_insns_begin = nonscheduled_insns_begin;
4370
4371 save->sched_block = sched_block;
4372
4373 save->replacement_deps.create (0);
4374 save->replace_apply.create (0);
4375 save->next_cycle_deps = next_cycle_replace_deps.copy ();
4376 save->next_cycle_apply = next_cycle_apply.copy ();
4377
4378 if (current_sched_info->save_state)
4379 save->fe_saved_data = (*current_sched_info->save_state) ();
4380
4381 if (targetm.sched.alloc_sched_context)
4382 {
4383 save->be_saved_data = targetm.sched.alloc_sched_context ();
4384 targetm.sched.init_sched_context (save->be_saved_data, false);
4385 }
4386 else
4387 save->be_saved_data = NULL;
4388
4389 save->delay_pair = pair;
4390
4391 save->next = backtrack_queue;
4392 backtrack_queue = save;
4393
4394 while (pair)
4395 {
4396 mark_backtrack_feeds (pair->i2, 1);
4397 INSN_TICK (pair->i2) = INVALID_TICK;
4398 INSN_EXACT_TICK (pair->i2) = clock_var + pair_delay (pair);
4399 SHADOW_P (pair->i2) = pair->stages == 0;
4400 pair = pair->next_same_i1;
4401 }
4402 }
4403
4404 /* Walk the ready list and all queues. If any insns have unresolved backwards
4405 dependencies, these must be cancelled deps, broken by predication. Set or
4406 clear (depending on SET) the DEP_CANCELLED bit in DEP_STATUS. */
4407
4408 static void
4409 toggle_cancelled_flags (bool set)
4410 {
4411 int i;
4412 sd_iterator_def sd_it;
4413 dep_t dep;
4414
4415 if (ready.n_ready > 0)
4416 {
4417 rtx_insn **first = ready_lastpos (&ready);
4418 for (i = 0; i < ready.n_ready; i++)
4419 FOR_EACH_DEP (first[i], SD_LIST_BACK, sd_it, dep)
4420 if (!DEBUG_INSN_P (DEP_PRO (dep)))
4421 {
4422 if (set)
4423 DEP_STATUS (dep) |= DEP_CANCELLED;
4424 else
4425 DEP_STATUS (dep) &= ~DEP_CANCELLED;
4426 }
4427 }
4428 for (i = 0; i <= max_insn_queue_index; i++)
4429 {
4430 int q = NEXT_Q_AFTER (q_ptr, i);
4431 rtx_insn_list *link;
4432 for (link = insn_queue[q]; link; link = link->next ())
4433 {
4434 rtx_insn *insn = link->insn ();
4435 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4436 if (!DEBUG_INSN_P (DEP_PRO (dep)))
4437 {
4438 if (set)
4439 DEP_STATUS (dep) |= DEP_CANCELLED;
4440 else
4441 DEP_STATUS (dep) &= ~DEP_CANCELLED;
4442 }
4443 }
4444 }
4445 }
4446
4447 /* Undo the replacements that have occurred after backtrack point SAVE
4448 was placed. */
4449 static void
4450 undo_replacements_for_backtrack (struct haifa_saved_data *save)
4451 {
4452 while (!save->replacement_deps.is_empty ())
4453 {
4454 dep_t dep = save->replacement_deps.pop ();
4455 int apply_p = save->replace_apply.pop ();
4456
4457 if (apply_p)
4458 restore_pattern (dep, true);
4459 else
4460 apply_replacement (dep, true);
4461 }
4462 save->replacement_deps.release ();
4463 save->replace_apply.release ();
4464 }
4465
4466 /* Pop entries from the SCHEDULED_INSNS vector up to and including INSN.
4467 Restore their dependencies to an unresolved state, and mark them as
4468 queued nowhere. */
4469
4470 static void
4471 unschedule_insns_until (rtx_insn *insn)
4472 {
4473 auto_vec<rtx_insn *> recompute_vec;
4474
4475 /* Make two passes over the insns to be unscheduled. First, we clear out
4476 dependencies and other trivial bookkeeping. */
4477 for (;;)
4478 {
4479 rtx_insn *last;
4480 sd_iterator_def sd_it;
4481 dep_t dep;
4482
4483 last = scheduled_insns.pop ();
4484
4485 /* This will be changed by restore_backtrack_point if the insn is in
4486 any queue. */
4487 QUEUE_INDEX (last) = QUEUE_NOWHERE;
4488 if (last != insn)
4489 INSN_TICK (last) = INVALID_TICK;
4490
4491 if (modulo_ii > 0 && INSN_UID (last) < modulo_iter0_max_uid)
4492 modulo_insns_scheduled--;
4493
4494 for (sd_it = sd_iterator_start (last, SD_LIST_RES_FORW);
4495 sd_iterator_cond (&sd_it, &dep);)
4496 {
4497 rtx_insn *con = DEP_CON (dep);
4498 sd_unresolve_dep (sd_it);
4499 if (!MUST_RECOMPUTE_SPEC_P (con))
4500 {
4501 MUST_RECOMPUTE_SPEC_P (con) = 1;
4502 recompute_vec.safe_push (con);
4503 }
4504 }
4505
4506 if (last == insn)
4507 break;
4508 }
4509
4510 /* A second pass, to update ready and speculation status for insns
4511 depending on the unscheduled ones. The first pass must have
4512 popped the scheduled_insns vector up to the point where we
4513 restart scheduling, as recompute_todo_spec requires it to be
4514 up-to-date. */
4515 while (!recompute_vec.is_empty ())
4516 {
4517 rtx_insn *con;
4518
4519 con = recompute_vec.pop ();
4520 MUST_RECOMPUTE_SPEC_P (con) = 0;
4521 if (!sd_lists_empty_p (con, SD_LIST_HARD_BACK))
4522 {
4523 TODO_SPEC (con) = HARD_DEP;
4524 INSN_TICK (con) = INVALID_TICK;
4525 if (PREDICATED_PAT (con) != NULL_RTX)
4526 haifa_change_pattern (con, ORIG_PAT (con));
4527 }
4528 else if (QUEUE_INDEX (con) != QUEUE_SCHEDULED)
4529 TODO_SPEC (con) = recompute_todo_spec (con, true);
4530 }
4531 }
4532
4533 /* Restore scheduler state from the topmost entry on the backtracking queue.
4534 PSCHED_BLOCK_P points to the local data of schedule_block that we must
4535 overwrite with the saved data.
4536 The caller must already have called unschedule_insns_until. */
4537
4538 static void
4539 restore_last_backtrack_point (struct sched_block_state *psched_block)
4540 {
4541 int i;
4542 struct haifa_saved_data *save = backtrack_queue;
4543
4544 backtrack_queue = save->next;
4545
4546 if (current_sched_info->restore_state)
4547 (*current_sched_info->restore_state) (save->fe_saved_data);
4548
4549 if (targetm.sched.alloc_sched_context)
4550 {
4551 targetm.sched.set_sched_context (save->be_saved_data);
4552 targetm.sched.free_sched_context (save->be_saved_data);
4553 }
4554
4555 /* Do this first since it clobbers INSN_TICK of the involved
4556 instructions. */
4557 undo_replacements_for_backtrack (save);
4558
4559 /* Clear the QUEUE_INDEX of everything in the ready list or one
4560 of the queues. */
4561 if (ready.n_ready > 0)
4562 {
4563 rtx_insn **first = ready_lastpos (&ready);
4564 for (i = 0; i < ready.n_ready; i++)
4565 {
4566 rtx_insn *insn = first[i];
4567 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
4568 INSN_TICK (insn) = INVALID_TICK;
4569 }
4570 }
4571 for (i = 0; i <= max_insn_queue_index; i++)
4572 {
4573 int q = NEXT_Q_AFTER (q_ptr, i);
4574
4575 for (rtx_insn_list *link = insn_queue[q]; link; link = link->next ())
4576 {
4577 rtx_insn *x = link->insn ();
4578 QUEUE_INDEX (x) = QUEUE_NOWHERE;
4579 INSN_TICK (x) = INVALID_TICK;
4580 }
4581 free_INSN_LIST_list (&insn_queue[q]);
4582 }
4583
4584 free (ready.vec);
4585 ready = save->ready;
4586
4587 if (ready.n_ready > 0)
4588 {
4589 rtx_insn **first = ready_lastpos (&ready);
4590 for (i = 0; i < ready.n_ready; i++)
4591 {
4592 rtx_insn *insn = first[i];
4593 QUEUE_INDEX (insn) = QUEUE_READY;
4594 TODO_SPEC (insn) = recompute_todo_spec (insn, true);
4595 INSN_TICK (insn) = save->clock_var;
4596 }
4597 }
4598
4599 q_ptr = 0;
4600 q_size = save->q_size;
4601 for (i = 0; i <= max_insn_queue_index; i++)
4602 {
4603 int q = NEXT_Q_AFTER (q_ptr, i);
4604
4605 insn_queue[q] = save->insn_queue[q];
4606
4607 for (rtx_insn_list *link = insn_queue[q]; link; link = link->next ())
4608 {
4609 rtx_insn *x = link->insn ();
4610 QUEUE_INDEX (x) = i;
4611 TODO_SPEC (x) = recompute_todo_spec (x, true);
4612 INSN_TICK (x) = save->clock_var + i;
4613 }
4614 }
4615 free (save->insn_queue);
4616
4617 toggle_cancelled_flags (true);
4618
4619 clock_var = save->clock_var;
4620 last_clock_var = save->last_clock_var;
4621 cycle_issued_insns = save->cycle_issued_insns;
4622 last_scheduled_insn = save->last_scheduled_insn;
4623 last_nondebug_scheduled_insn = save->last_nondebug_scheduled_insn;
4624 nonscheduled_insns_begin = save->nonscheduled_insns_begin;
4625
4626 *psched_block = save->sched_block;
4627
4628 memcpy (curr_state, save->curr_state, dfa_state_size);
4629 free (save->curr_state);
4630
4631 mark_backtrack_feeds (save->delay_pair->i2, 0);
4632
4633 gcc_assert (next_cycle_replace_deps.is_empty ());
4634 next_cycle_replace_deps = save->next_cycle_deps.copy ();
4635 next_cycle_apply = save->next_cycle_apply.copy ();
4636
4637 free (save);
4638
4639 for (save = backtrack_queue; save; save = save->next)
4640 {
4641 mark_backtrack_feeds (save->delay_pair->i2, 1);
4642 }
4643 }
4644
4645 /* Discard all data associated with the topmost entry in the backtrack
4646 queue. If RESET_TICK is false, we just want to free the data. If true,
4647 we are doing this because we discovered a reason to backtrack. In the
4648 latter case, also reset the INSN_TICK for the shadow insn. */
4649 static void
4650 free_topmost_backtrack_point (bool reset_tick)
4651 {
4652 struct haifa_saved_data *save = backtrack_queue;
4653 int i;
4654
4655 backtrack_queue = save->next;
4656
4657 if (reset_tick)
4658 {
4659 struct delay_pair *pair = save->delay_pair;
4660 while (pair)
4661 {
4662 INSN_TICK (pair->i2) = INVALID_TICK;
4663 INSN_EXACT_TICK (pair->i2) = INVALID_TICK;
4664 pair = pair->next_same_i1;
4665 }
4666 undo_replacements_for_backtrack (save);
4667 }
4668 else
4669 {
4670 save->replacement_deps.release ();
4671 save->replace_apply.release ();
4672 }
4673
4674 if (targetm.sched.free_sched_context)
4675 targetm.sched.free_sched_context (save->be_saved_data);
4676 if (current_sched_info->restore_state)
4677 free (save->fe_saved_data);
4678 for (i = 0; i <= max_insn_queue_index; i++)
4679 free_INSN_LIST_list (&save->insn_queue[i]);
4680 free (save->insn_queue);
4681 free (save->curr_state);
4682 free (save->ready.vec);
4683 free (save);
4684 }
4685
4686 /* Free the entire backtrack queue. */
4687 static void
4688 free_backtrack_queue (void)
4689 {
4690 while (backtrack_queue)
4691 free_topmost_backtrack_point (false);
4692 }
4693
4694 /* Apply a replacement described by DESC. If IMMEDIATELY is false, we
4695 may have to postpone the replacement until the start of the next cycle,
4696 at which point we will be called again with IMMEDIATELY true. This is
4697 only done for machines which have instruction packets with explicit
4698 parallelism however. */
4699 static void
4700 apply_replacement (dep_t dep, bool immediately)
4701 {
4702 struct dep_replacement *desc = DEP_REPLACE (dep);
4703 if (!immediately && targetm.sched.exposed_pipeline && reload_completed)
4704 {
4705 next_cycle_replace_deps.safe_push (dep);
4706 next_cycle_apply.safe_push (1);
4707 }
4708 else
4709 {
4710 bool success;
4711
4712 if (QUEUE_INDEX (desc->insn) == QUEUE_SCHEDULED)
4713 return;
4714
4715 if (sched_verbose >= 5)
4716 fprintf (sched_dump, "applying replacement for insn %d\n",
4717 INSN_UID (desc->insn));
4718
4719 success = validate_change (desc->insn, desc->loc, desc->newval, 0);
4720 gcc_assert (success);
4721
4722 update_insn_after_change (desc->insn);
4723 if ((TODO_SPEC (desc->insn) & (HARD_DEP | DEP_POSTPONED)) == 0)
4724 fix_tick_ready (desc->insn);
4725
4726 if (backtrack_queue != NULL)
4727 {
4728 backtrack_queue->replacement_deps.safe_push (dep);
4729 backtrack_queue->replace_apply.safe_push (1);
4730 }
4731 }
4732 }
4733
4734 /* We have determined that a pattern involved in DEP must be restored.
4735 If IMMEDIATELY is false, we may have to postpone the replacement
4736 until the start of the next cycle, at which point we will be called
4737 again with IMMEDIATELY true. */
4738 static void
4739 restore_pattern (dep_t dep, bool immediately)
4740 {
4741 rtx_insn *next = DEP_CON (dep);
4742 int tick = INSN_TICK (next);
4743
4744 /* If we already scheduled the insn, the modified version is
4745 correct. */
4746 if (QUEUE_INDEX (next) == QUEUE_SCHEDULED)
4747 return;
4748
4749 if (!immediately && targetm.sched.exposed_pipeline && reload_completed)
4750 {
4751 next_cycle_replace_deps.safe_push (dep);
4752 next_cycle_apply.safe_push (0);
4753 return;
4754 }
4755
4756
4757 if (DEP_TYPE (dep) == REG_DEP_CONTROL)
4758 {
4759 if (sched_verbose >= 5)
4760 fprintf (sched_dump, "restoring pattern for insn %d\n",
4761 INSN_UID (next));
4762 haifa_change_pattern (next, ORIG_PAT (next));
4763 }
4764 else
4765 {
4766 struct dep_replacement *desc = DEP_REPLACE (dep);
4767 bool success;
4768
4769 if (sched_verbose >= 5)
4770 fprintf (sched_dump, "restoring pattern for insn %d\n",
4771 INSN_UID (desc->insn));
4772 tick = INSN_TICK (desc->insn);
4773
4774 success = validate_change (desc->insn, desc->loc, desc->orig, 0);
4775 gcc_assert (success);
4776 update_insn_after_change (desc->insn);
4777 if (backtrack_queue != NULL)
4778 {
4779 backtrack_queue->replacement_deps.safe_push (dep);
4780 backtrack_queue->replace_apply.safe_push (0);
4781 }
4782 }
4783 INSN_TICK (next) = tick;
4784 if (TODO_SPEC (next) == DEP_POSTPONED)
4785 return;
4786
4787 if (sd_lists_empty_p (next, SD_LIST_BACK))
4788 TODO_SPEC (next) = 0;
4789 else if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
4790 TODO_SPEC (next) = HARD_DEP;
4791 }
4792
4793 /* Perform pattern replacements that were queued up until the next
4794 cycle. */
4795 static void
4796 perform_replacements_new_cycle (void)
4797 {
4798 int i;
4799 dep_t dep;
4800 FOR_EACH_VEC_ELT (next_cycle_replace_deps, i, dep)
4801 {
4802 int apply_p = next_cycle_apply[i];
4803 if (apply_p)
4804 apply_replacement (dep, true);
4805 else
4806 restore_pattern (dep, true);
4807 }
4808 next_cycle_replace_deps.truncate (0);
4809 next_cycle_apply.truncate (0);
4810 }
4811
4812 /* Compute INSN_TICK_ESTIMATE for INSN. PROCESSED is a bitmap of
4813 instructions we've previously encountered, a set bit prevents
4814 recursion. BUDGET is a limit on how far ahead we look, it is
4815 reduced on recursive calls. Return true if we produced a good
4816 estimate, or false if we exceeded the budget. */
4817 static bool
4818 estimate_insn_tick (bitmap processed, rtx_insn *insn, int budget)
4819 {
4820 sd_iterator_def sd_it;
4821 dep_t dep;
4822 int earliest = INSN_TICK (insn);
4823
4824 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4825 {
4826 rtx_insn *pro = DEP_PRO (dep);
4827 int t;
4828
4829 if (DEP_STATUS (dep) & DEP_CANCELLED)
4830 continue;
4831
4832 if (QUEUE_INDEX (pro) == QUEUE_SCHEDULED)
4833 gcc_assert (INSN_TICK (pro) + dep_cost (dep) <= INSN_TICK (insn));
4834 else
4835 {
4836 int cost = dep_cost (dep);
4837 if (cost >= budget)
4838 return false;
4839 if (!bitmap_bit_p (processed, INSN_LUID (pro)))
4840 {
4841 if (!estimate_insn_tick (processed, pro, budget - cost))
4842 return false;
4843 }
4844 gcc_assert (INSN_TICK_ESTIMATE (pro) != INVALID_TICK);
4845 t = INSN_TICK_ESTIMATE (pro) + cost;
4846 if (earliest == INVALID_TICK || t > earliest)
4847 earliest = t;
4848 }
4849 }
4850 bitmap_set_bit (processed, INSN_LUID (insn));
4851 INSN_TICK_ESTIMATE (insn) = earliest;
4852 return true;
4853 }
4854
4855 /* Examine the pair of insns in P, and estimate (optimistically, assuming
4856 infinite resources) the cycle in which the delayed shadow can be issued.
4857 Return the number of cycles that must pass before the real insn can be
4858 issued in order to meet this constraint. */
4859 static int
4860 estimate_shadow_tick (struct delay_pair *p)
4861 {
4862 bitmap_head processed;
4863 int t;
4864 bool cutoff;
4865 bitmap_initialize (&processed, 0);
4866
4867 cutoff = !estimate_insn_tick (&processed, p->i2,
4868 max_insn_queue_index + pair_delay (p));
4869 bitmap_clear (&processed);
4870 if (cutoff)
4871 return max_insn_queue_index;
4872 t = INSN_TICK_ESTIMATE (p->i2) - (clock_var + pair_delay (p) + 1);
4873 if (t > 0)
4874 return t;
4875 return 0;
4876 }
4877
4878 /* If INSN has no unresolved backwards dependencies, add it to the schedule and
4879 recursively resolve all its forward dependencies. */
4880 static void
4881 resolve_dependencies (rtx_insn *insn)
4882 {
4883 sd_iterator_def sd_it;
4884 dep_t dep;
4885
4886 /* Don't use sd_lists_empty_p; it ignores debug insns. */
4887 if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (insn)) != NULL
4888 || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (insn)) != NULL)
4889 return;
4890
4891 if (sched_verbose >= 4)
4892 fprintf (sched_dump, ";;\tquickly resolving %d\n", INSN_UID (insn));
4893
4894 if (QUEUE_INDEX (insn) >= 0)
4895 queue_remove (insn);
4896
4897 scheduled_insns.safe_push (insn);
4898
4899 /* Update dependent instructions. */
4900 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
4901 sd_iterator_cond (&sd_it, &dep);)
4902 {
4903 rtx_insn *next = DEP_CON (dep);
4904
4905 if (sched_verbose >= 4)
4906 fprintf (sched_dump, ";;\t\tdep %d against %d\n", INSN_UID (insn),
4907 INSN_UID (next));
4908
4909 /* Resolve the dependence between INSN and NEXT.
4910 sd_resolve_dep () moves current dep to another list thus
4911 advancing the iterator. */
4912 sd_resolve_dep (sd_it);
4913
4914 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
4915 {
4916 resolve_dependencies (next);
4917 }
4918 else
4919 /* Check always has only one forward dependence (to the first insn in
4920 the recovery block), therefore, this will be executed only once. */
4921 {
4922 gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
4923 }
4924 }
4925 }
4926
4927
4928 /* Return the head and tail pointers of ebb starting at BEG and ending
4929 at END. */
4930 void
4931 get_ebb_head_tail (basic_block beg, basic_block end,
4932 rtx_insn **headp, rtx_insn **tailp)
4933 {
4934 rtx_insn *beg_head = BB_HEAD (beg);
4935 rtx_insn * beg_tail = BB_END (beg);
4936 rtx_insn * end_head = BB_HEAD (end);
4937 rtx_insn * end_tail = BB_END (end);
4938
4939 /* Don't include any notes or labels at the beginning of the BEG
4940 basic block, or notes at the end of the END basic blocks. */
4941
4942 if (LABEL_P (beg_head))
4943 beg_head = NEXT_INSN (beg_head);
4944
4945 while (beg_head != beg_tail)
4946 if (NOTE_P (beg_head))
4947 beg_head = NEXT_INSN (beg_head);
4948 else if (DEBUG_INSN_P (beg_head))
4949 {
4950 rtx_insn * note, *next;
4951
4952 for (note = NEXT_INSN (beg_head);
4953 note != beg_tail;
4954 note = next)
4955 {
4956 next = NEXT_INSN (note);
4957 if (NOTE_P (note))
4958 {
4959 if (sched_verbose >= 9)
4960 fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
4961
4962 reorder_insns_nobb (note, note, PREV_INSN (beg_head));
4963
4964 if (BLOCK_FOR_INSN (note) != beg)
4965 df_insn_change_bb (note, beg);
4966 }
4967 else if (!DEBUG_INSN_P (note))
4968 break;
4969 }
4970
4971 break;
4972 }
4973 else
4974 break;
4975
4976 *headp = beg_head;
4977
4978 if (beg == end)
4979 end_head = beg_head;
4980 else if (LABEL_P (end_head))
4981 end_head = NEXT_INSN (end_head);
4982
4983 while (end_head != end_tail)
4984 if (NOTE_P (end_tail))
4985 end_tail = PREV_INSN (end_tail);
4986 else if (DEBUG_INSN_P (end_tail))
4987 {
4988 rtx_insn * note, *prev;
4989
4990 for (note = PREV_INSN (end_tail);
4991 note != end_head;
4992 note = prev)
4993 {
4994 prev = PREV_INSN (note);
4995 if (NOTE_P (note))
4996 {
4997 if (sched_verbose >= 9)
4998 fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
4999
5000 reorder_insns_nobb (note, note, end_tail);
5001
5002 if (end_tail == BB_END (end))
5003 BB_END (end) = note;
5004
5005 if (BLOCK_FOR_INSN (note) != end)
5006 df_insn_change_bb (note, end);
5007 }
5008 else if (!DEBUG_INSN_P (note))
5009 break;
5010 }
5011
5012 break;
5013 }
5014 else
5015 break;
5016
5017 *tailp = end_tail;
5018 }
5019
5020 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
5021
5022 int
5023 no_real_insns_p (const rtx_insn *head, const rtx_insn *tail)
5024 {
5025 while (head != NEXT_INSN (tail))
5026 {
5027 if (!NOTE_P (head) && !LABEL_P (head))
5028 return 0;
5029 head = NEXT_INSN (head);
5030 }
5031 return 1;
5032 }
5033
5034 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
5035 previously found among the insns. Insert them just before HEAD. */
5036 rtx_insn *
5037 restore_other_notes (rtx_insn *head, basic_block head_bb)
5038 {
5039 if (note_list != 0)
5040 {
5041 rtx_insn *note_head = note_list;
5042
5043 if (head)
5044 head_bb = BLOCK_FOR_INSN (head);
5045 else
5046 head = NEXT_INSN (bb_note (head_bb));
5047
5048 while (PREV_INSN (note_head))
5049 {
5050 set_block_for_insn (note_head, head_bb);
5051 note_head = PREV_INSN (note_head);
5052 }
5053 /* In the above cycle we've missed this note. */
5054 set_block_for_insn (note_head, head_bb);
5055
5056 SET_PREV_INSN (note_head) = PREV_INSN (head);
5057 SET_NEXT_INSN (PREV_INSN (head)) = note_head;
5058 SET_PREV_INSN (head) = note_list;
5059 SET_NEXT_INSN (note_list) = head;
5060
5061 if (BLOCK_FOR_INSN (head) != head_bb)
5062 BB_END (head_bb) = note_list;
5063
5064 head = note_head;
5065 }
5066
5067 return head;
5068 }
5069
5070 /* When we know we are going to discard the schedule due to a failed attempt
5071 at modulo scheduling, undo all replacements. */
5072 static void
5073 undo_all_replacements (void)
5074 {
5075 rtx_insn *insn;
5076 int i;
5077
5078 FOR_EACH_VEC_ELT (scheduled_insns, i, insn)
5079 {
5080 sd_iterator_def sd_it;
5081 dep_t dep;
5082
5083 /* See if we must undo a replacement. */
5084 for (sd_it = sd_iterator_start (insn, SD_LIST_RES_FORW);
5085 sd_iterator_cond (&sd_it, &dep); sd_iterator_next (&sd_it))
5086 {
5087 struct dep_replacement *desc = DEP_REPLACE (dep);
5088 if (desc != NULL)
5089 validate_change (desc->insn, desc->loc, desc->orig, 0);
5090 }
5091 }
5092 }
5093
5094 /* Return first non-scheduled insn in the current scheduling block.
5095 This is mostly used for debug-counter purposes. */
5096 static rtx_insn *
5097 first_nonscheduled_insn (void)
5098 {
5099 rtx_insn *insn = (nonscheduled_insns_begin != NULL_RTX
5100 ? nonscheduled_insns_begin
5101 : current_sched_info->prev_head);
5102
5103 do
5104 {
5105 insn = next_nonnote_nondebug_insn (insn);
5106 }
5107 while (QUEUE_INDEX (insn) == QUEUE_SCHEDULED);
5108
5109 return insn;
5110 }
5111
5112 /* Move insns that became ready to fire from queue to ready list. */
5113
5114 static void
5115 queue_to_ready (struct ready_list *ready)
5116 {
5117 rtx_insn *insn;
5118 rtx_insn_list *link;
5119 rtx_insn *skip_insn;
5120
5121 q_ptr = NEXT_Q (q_ptr);
5122
5123 if (dbg_cnt (sched_insn) == false)
5124 /* If debug counter is activated do not requeue the first
5125 nonscheduled insn. */
5126 skip_insn = first_nonscheduled_insn ();
5127 else
5128 skip_insn = NULL;
5129
5130 /* Add all pending insns that can be scheduled without stalls to the
5131 ready list. */
5132 for (link = insn_queue[q_ptr]; link; link = link->next ())
5133 {
5134 insn = link->insn ();
5135 q_size -= 1;
5136
5137 if (sched_verbose >= 2)
5138 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
5139 (*current_sched_info->print_insn) (insn, 0));
5140
5141 /* If the ready list is full, delay the insn for 1 cycle.
5142 See the comment in schedule_block for the rationale. */
5143 if (!reload_completed
5144 && (ready->n_ready - ready->n_debug > MAX_SCHED_READY_INSNS
5145 || (sched_pressure == SCHED_PRESSURE_MODEL
5146 /* Limit pressure recalculations to MAX_SCHED_READY_INSNS
5147 instructions too. */
5148 && model_index (insn) > (model_curr_point
5149 + MAX_SCHED_READY_INSNS)))
5150 && !(sched_pressure == SCHED_PRESSURE_MODEL
5151 && model_curr_point < model_num_insns
5152 /* Always allow the next model instruction to issue. */
5153 && model_index (insn) == model_curr_point)
5154 && !SCHED_GROUP_P (insn)
5155 && insn != skip_insn)
5156 {
5157 if (sched_verbose >= 2)
5158 fprintf (sched_dump, "keeping in queue, ready full\n");
5159 queue_insn (insn, 1, "ready full");
5160 }
5161 else
5162 {
5163 ready_add (ready, insn, false);
5164 if (sched_verbose >= 2)
5165 fprintf (sched_dump, "moving to ready without stalls\n");
5166 }
5167 }
5168 free_INSN_LIST_list (&insn_queue[q_ptr]);
5169
5170 /* If there are no ready insns, stall until one is ready and add all
5171 of the pending insns at that point to the ready list. */
5172 if (ready->n_ready == 0)
5173 {
5174 int stalls;
5175
5176 for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
5177 {
5178 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
5179 {
5180 for (; link; link = link->next ())
5181 {
5182 insn = link->insn ();
5183 q_size -= 1;
5184
5185 if (sched_verbose >= 2)
5186 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
5187 (*current_sched_info->print_insn) (insn, 0));
5188
5189 ready_add (ready, insn, false);
5190 if (sched_verbose >= 2)
5191 fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
5192 }
5193 free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
5194
5195 advance_one_cycle ();
5196
5197 break;
5198 }
5199
5200 advance_one_cycle ();
5201 }
5202
5203 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
5204 clock_var += stalls;
5205 if (sched_verbose >= 2)
5206 fprintf (sched_dump, ";;\tAdvancing clock by %d cycle[s] to %d\n",
5207 stalls, clock_var);
5208 }
5209 }
5210
5211 /* Used by early_queue_to_ready. Determines whether it is "ok" to
5212 prematurely move INSN from the queue to the ready list. Currently,
5213 if a target defines the hook 'is_costly_dependence', this function
5214 uses the hook to check whether there exist any dependences which are
5215 considered costly by the target, between INSN and other insns that
5216 have already been scheduled. Dependences are checked up to Y cycles
5217 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
5218 controlling this value.
5219 (Other considerations could be taken into account instead (or in
5220 addition) depending on user flags and target hooks. */
5221
5222 static bool
5223 ok_for_early_queue_removal (rtx_insn *insn)
5224 {
5225 if (targetm.sched.is_costly_dependence)
5226 {
5227 int n_cycles;
5228 int i = scheduled_insns.length ();
5229 for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
5230 {
5231 while (i-- > 0)
5232 {
5233 int cost;
5234
5235 rtx_insn *prev_insn = scheduled_insns[i];
5236
5237 if (!NOTE_P (prev_insn))
5238 {
5239 dep_t dep;
5240
5241 dep = sd_find_dep_between (prev_insn, insn, true);
5242
5243 if (dep != NULL)
5244 {
5245 cost = dep_cost (dep);
5246
5247 if (targetm.sched.is_costly_dependence (dep, cost,
5248 flag_sched_stalled_insns_dep - n_cycles))
5249 return false;
5250 }
5251 }
5252
5253 if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
5254 break;
5255 }
5256
5257 if (i == 0)
5258 break;
5259 }
5260 }
5261
5262 return true;
5263 }
5264
5265
5266 /* Remove insns from the queue, before they become "ready" with respect
5267 to FU latency considerations. */
5268
5269 static int
5270 early_queue_to_ready (state_t state, struct ready_list *ready)
5271 {
5272 rtx_insn *insn;
5273 rtx_insn_list *link;
5274 rtx_insn_list *next_link;
5275 rtx_insn_list *prev_link;
5276 bool move_to_ready;
5277 int cost;
5278 state_t temp_state = alloca (dfa_state_size);
5279 int stalls;
5280 int insns_removed = 0;
5281
5282 /*
5283 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
5284 function:
5285
5286 X == 0: There is no limit on how many queued insns can be removed
5287 prematurely. (flag_sched_stalled_insns = -1).
5288
5289 X >= 1: Only X queued insns can be removed prematurely in each
5290 invocation. (flag_sched_stalled_insns = X).
5291
5292 Otherwise: Early queue removal is disabled.
5293 (flag_sched_stalled_insns = 0)
5294 */
5295
5296 if (! flag_sched_stalled_insns)
5297 return 0;
5298
5299 for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
5300 {
5301 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
5302 {
5303 if (sched_verbose > 6)
5304 fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
5305
5306 prev_link = 0;
5307 while (link)
5308 {
5309 next_link = link->next ();
5310 insn = link->insn ();
5311 if (insn && sched_verbose > 6)
5312 print_rtl_single (sched_dump, insn);
5313
5314 memcpy (temp_state, state, dfa_state_size);
5315 if (recog_memoized (insn) < 0)
5316 /* non-negative to indicate that it's not ready
5317 to avoid infinite Q->R->Q->R... */
5318 cost = 0;
5319 else
5320 cost = state_transition (temp_state, insn);
5321
5322 if (sched_verbose >= 6)
5323 fprintf (sched_dump, "transition cost = %d\n", cost);
5324
5325 move_to_ready = false;
5326 if (cost < 0)
5327 {
5328 move_to_ready = ok_for_early_queue_removal (insn);
5329 if (move_to_ready == true)
5330 {
5331 /* move from Q to R */
5332 q_size -= 1;
5333 ready_add (ready, insn, false);
5334
5335 if (prev_link)
5336 XEXP (prev_link, 1) = next_link;
5337 else
5338 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
5339
5340 free_INSN_LIST_node (link);
5341
5342 if (sched_verbose >= 2)
5343 fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
5344 (*current_sched_info->print_insn) (insn, 0));
5345
5346 insns_removed++;
5347 if (insns_removed == flag_sched_stalled_insns)
5348 /* Remove no more than flag_sched_stalled_insns insns
5349 from Q at a time. */
5350 return insns_removed;
5351 }
5352 }
5353
5354 if (move_to_ready == false)
5355 prev_link = link;
5356
5357 link = next_link;
5358 } /* while link */
5359 } /* if link */
5360
5361 } /* for stalls.. */
5362
5363 return insns_removed;
5364 }
5365
5366
5367 /* Print the ready list for debugging purposes.
5368 If READY_TRY is non-zero then only print insns that max_issue
5369 will consider. */
5370 static void
5371 debug_ready_list_1 (struct ready_list *ready, signed char *ready_try)
5372 {
5373 rtx_insn **p;
5374 int i;
5375
5376 if (ready->n_ready == 0)
5377 {
5378 fprintf (sched_dump, "\n");
5379 return;
5380 }
5381
5382 p = ready_lastpos (ready);
5383 for (i = 0; i < ready->n_ready; i++)
5384 {
5385 if (ready_try != NULL && ready_try[ready->n_ready - i - 1])
5386 continue;
5387
5388 fprintf (sched_dump, " %s:%d",
5389 (*current_sched_info->print_insn) (p[i], 0),
5390 INSN_LUID (p[i]));
5391 if (sched_pressure != SCHED_PRESSURE_NONE)
5392 fprintf (sched_dump, "(cost=%d",
5393 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (p[i]));
5394 fprintf (sched_dump, ":prio=%d", INSN_PRIORITY (p[i]));
5395 if (INSN_TICK (p[i]) > clock_var)
5396 fprintf (sched_dump, ":delay=%d", INSN_TICK (p[i]) - clock_var);
5397 if (sched_pressure == SCHED_PRESSURE_MODEL)
5398 fprintf (sched_dump, ":idx=%d",
5399 model_index (p[i]));
5400 if (sched_pressure != SCHED_PRESSURE_NONE)
5401 fprintf (sched_dump, ")");
5402 }
5403 fprintf (sched_dump, "\n");
5404 }
5405
5406 /* Print the ready list. Callable from debugger. */
5407 static void
5408 debug_ready_list (struct ready_list *ready)
5409 {
5410 debug_ready_list_1 (ready, NULL);
5411 }
5412
5413 /* Search INSN for REG_SAVE_NOTE notes and convert them back into insn
5414 NOTEs. This is used for NOTE_INSN_EPILOGUE_BEG, so that sched-ebb
5415 replaces the epilogue note in the correct basic block. */
5416 void
5417 reemit_notes (rtx_insn *insn)
5418 {
5419 rtx note;
5420 rtx_insn *last = insn;
5421
5422 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
5423 {
5424 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
5425 {
5426 enum insn_note note_type = (enum insn_note) INTVAL (XEXP (note, 0));
5427
5428 last = emit_note_before (note_type, last);
5429 remove_note (insn, note);
5430 }
5431 }
5432 }
5433
5434 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
5435 static void
5436 move_insn (rtx_insn *insn, rtx_insn *last, rtx nt)
5437 {
5438 if (PREV_INSN (insn) != last)
5439 {
5440 basic_block bb;
5441 rtx_insn *note;
5442 int jump_p = 0;
5443
5444 bb = BLOCK_FOR_INSN (insn);
5445
5446 /* BB_HEAD is either LABEL or NOTE. */
5447 gcc_assert (BB_HEAD (bb) != insn);
5448
5449 if (BB_END (bb) == insn)
5450 /* If this is last instruction in BB, move end marker one
5451 instruction up. */
5452 {
5453 /* Jumps are always placed at the end of basic block. */
5454 jump_p = control_flow_insn_p (insn);
5455
5456 gcc_assert (!jump_p
5457 || ((common_sched_info->sched_pass_id == SCHED_RGN_PASS)
5458 && IS_SPECULATION_BRANCHY_CHECK_P (insn))
5459 || (common_sched_info->sched_pass_id
5460 == SCHED_EBB_PASS));
5461
5462 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
5463
5464 BB_END (bb) = PREV_INSN (insn);
5465 }
5466
5467 gcc_assert (BB_END (bb) != last);
5468
5469 if (jump_p)
5470 /* We move the block note along with jump. */
5471 {
5472 gcc_assert (nt);
5473
5474 note = NEXT_INSN (insn);
5475 while (NOTE_NOT_BB_P (note) && note != nt)
5476 note = NEXT_INSN (note);
5477
5478 if (note != nt
5479 && (LABEL_P (note)
5480 || BARRIER_P (note)))
5481 note = NEXT_INSN (note);
5482
5483 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
5484 }
5485 else
5486 note = insn;
5487
5488 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
5489 SET_PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
5490
5491 SET_NEXT_INSN (note) = NEXT_INSN (last);
5492 SET_PREV_INSN (NEXT_INSN (last)) = note;
5493
5494 SET_NEXT_INSN (last) = insn;
5495 SET_PREV_INSN (insn) = last;
5496
5497 bb = BLOCK_FOR_INSN (last);
5498
5499 if (jump_p)
5500 {
5501 fix_jump_move (insn);
5502
5503 if (BLOCK_FOR_INSN (insn) != bb)
5504 move_block_after_check (insn);
5505
5506 gcc_assert (BB_END (bb) == last);
5507 }
5508
5509 df_insn_change_bb (insn, bb);
5510
5511 /* Update BB_END, if needed. */
5512 if (BB_END (bb) == last)
5513 BB_END (bb) = insn;
5514 }
5515
5516 SCHED_GROUP_P (insn) = 0;
5517 }
5518
5519 /* Return true if scheduling INSN will finish current clock cycle. */
5520 static bool
5521 insn_finishes_cycle_p (rtx_insn *insn)
5522 {
5523 if (SCHED_GROUP_P (insn))
5524 /* After issuing INSN, rest of the sched_group will be forced to issue
5525 in order. Don't make any plans for the rest of cycle. */
5526 return true;
5527
5528 /* Finishing the block will, apparently, finish the cycle. */
5529 if (current_sched_info->insn_finishes_block_p
5530 && current_sched_info->insn_finishes_block_p (insn))
5531 return true;
5532
5533 return false;
5534 }
5535
5536 /* Functions to model cache auto-prefetcher.
5537
5538 Some of the CPUs have cache auto-prefetcher, which /seems/ to initiate
5539 memory prefetches if it sees instructions with consequitive memory accesses
5540 in the instruction stream. Details of such hardware units are not published,
5541 so we can only guess what exactly is going on there.
5542 In the scheduler, we model abstract auto-prefetcher. If there are memory
5543 insns in the ready list (or the queue) that have same memory base, but
5544 different offsets, then we delay the insns with larger offsets until insns
5545 with smaller offsets get scheduled. If PARAM_SCHED_AUTOPREF_QUEUE_DEPTH
5546 is "1", then we look at the ready list; if it is N>1, then we also look
5547 through N-1 queue entries.
5548 If the param is N>=0, then rank_for_schedule will consider auto-prefetching
5549 among its heuristics.
5550 Param value of "-1" disables modelling of the auto-prefetcher. */
5551
5552 /* Initialize autoprefetcher model data for INSN. */
5553 static void
5554 autopref_multipass_init (const rtx_insn *insn, int write)
5555 {
5556 autopref_multipass_data_t data = &INSN_AUTOPREF_MULTIPASS_DATA (insn)[write];
5557
5558 gcc_assert (data->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED);
5559 data->base = NULL_RTX;
5560 data->offset = 0;
5561 /* Set insn entry initialized, but not relevant for auto-prefetcher. */
5562 data->status = AUTOPREF_MULTIPASS_DATA_IRRELEVANT;
5563
5564 rtx set = single_set (insn);
5565 if (set == NULL_RTX)
5566 return;
5567
5568 rtx mem = write ? SET_DEST (set) : SET_SRC (set);
5569 if (!MEM_P (mem))
5570 return;
5571
5572 struct address_info info;
5573 decompose_mem_address (&info, mem);
5574
5575 /* TODO: Currently only (base+const) addressing is supported. */
5576 if (info.base == NULL || !REG_P (*info.base)
5577 || (info.disp != NULL && !CONST_INT_P (*info.disp)))
5578 return;
5579
5580 /* This insn is relevant for auto-prefetcher. */
5581 data->base = *info.base;
5582 data->offset = info.disp ? INTVAL (*info.disp) : 0;
5583 data->status = AUTOPREF_MULTIPASS_DATA_NORMAL;
5584 }
5585
5586 /* Helper function for rank_for_schedule sorting. */
5587 static int
5588 autopref_rank_for_schedule (const rtx_insn *insn1, const rtx_insn *insn2)
5589 {
5590 for (int write = 0; write < 2; ++write)
5591 {
5592 autopref_multipass_data_t data1
5593 = &INSN_AUTOPREF_MULTIPASS_DATA (insn1)[write];
5594 autopref_multipass_data_t data2
5595 = &INSN_AUTOPREF_MULTIPASS_DATA (insn2)[write];
5596
5597 if (data1->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED)
5598 autopref_multipass_init (insn1, write);
5599 if (data1->status == AUTOPREF_MULTIPASS_DATA_IRRELEVANT)
5600 continue;
5601
5602 if (data2->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED)
5603 autopref_multipass_init (insn2, write);
5604 if (data2->status == AUTOPREF_MULTIPASS_DATA_IRRELEVANT)
5605 continue;
5606
5607 if (!rtx_equal_p (data1->base, data2->base))
5608 continue;
5609
5610 return data1->offset - data2->offset;
5611 }
5612
5613 return 0;
5614 }
5615
5616 /* True if header of debug dump was printed. */
5617 static bool autopref_multipass_dfa_lookahead_guard_started_dump_p;
5618
5619 /* Helper for autopref_multipass_dfa_lookahead_guard.
5620 Return "1" if INSN1 should be delayed in favor of INSN2. */
5621 static int
5622 autopref_multipass_dfa_lookahead_guard_1 (const rtx_insn *insn1,
5623 const rtx_insn *insn2, int write)
5624 {
5625 autopref_multipass_data_t data1
5626 = &INSN_AUTOPREF_MULTIPASS_DATA (insn1)[write];
5627 autopref_multipass_data_t data2
5628 = &INSN_AUTOPREF_MULTIPASS_DATA (insn2)[write];
5629
5630 if (data2->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED)
5631 autopref_multipass_init (insn2, write);
5632 if (data2->status == AUTOPREF_MULTIPASS_DATA_IRRELEVANT)
5633 return 0;
5634
5635 if (rtx_equal_p (data1->base, data2->base)
5636 && data1->offset > data2->offset)
5637 {
5638 if (sched_verbose >= 2)
5639 {
5640 if (!autopref_multipass_dfa_lookahead_guard_started_dump_p)
5641 {
5642 fprintf (sched_dump,
5643 ";;\t\tnot trying in max_issue due to autoprefetch "
5644 "model: ");
5645 autopref_multipass_dfa_lookahead_guard_started_dump_p = true;
5646 }
5647
5648 fprintf (sched_dump, " %d(%d)", INSN_UID (insn1), INSN_UID (insn2));
5649 }
5650
5651 return 1;
5652 }
5653
5654 return 0;
5655 }
5656
5657 /* General note:
5658
5659 We could have also hooked autoprefetcher model into
5660 first_cycle_multipass_backtrack / first_cycle_multipass_issue hooks
5661 to enable intelligent selection of "[r1+0]=r2; [r1+4]=r3" on the same cycle
5662 (e.g., once "[r1+0]=r2" is issued in max_issue(), "[r1+4]=r3" gets
5663 unblocked). We don't bother about this yet because target of interest
5664 (ARM Cortex-A15) can issue only 1 memory operation per cycle. */
5665
5666 /* Implementation of first_cycle_multipass_dfa_lookahead_guard hook.
5667 Return "1" if INSN1 should not be considered in max_issue due to
5668 auto-prefetcher considerations. */
5669 int
5670 autopref_multipass_dfa_lookahead_guard (rtx_insn *insn1, int ready_index)
5671 {
5672 int r = 0;
5673
5674 if (PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) <= 0)
5675 return 0;
5676
5677 if (sched_verbose >= 2 && ready_index == 0)
5678 autopref_multipass_dfa_lookahead_guard_started_dump_p = false;
5679
5680 for (int write = 0; write < 2; ++write)
5681 {
5682 autopref_multipass_data_t data1
5683 = &INSN_AUTOPREF_MULTIPASS_DATA (insn1)[write];
5684
5685 if (data1->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED)
5686 autopref_multipass_init (insn1, write);
5687 if (data1->status == AUTOPREF_MULTIPASS_DATA_IRRELEVANT)
5688 continue;
5689
5690 if (ready_index == 0
5691 && data1->status == AUTOPREF_MULTIPASS_DATA_DONT_DELAY)
5692 /* We allow only a single delay on priviledged instructions.
5693 Doing otherwise would cause infinite loop. */
5694 {
5695 if (sched_verbose >= 2)
5696 {
5697 if (!autopref_multipass_dfa_lookahead_guard_started_dump_p)
5698 {
5699 fprintf (sched_dump,
5700 ";;\t\tnot trying in max_issue due to autoprefetch "
5701 "model: ");
5702 autopref_multipass_dfa_lookahead_guard_started_dump_p = true;
5703 }
5704
5705 fprintf (sched_dump, " *%d*", INSN_UID (insn1));
5706 }
5707 continue;
5708 }
5709
5710 for (int i2 = 0; i2 < ready.n_ready; ++i2)
5711 {
5712 rtx_insn *insn2 = get_ready_element (i2);
5713 if (insn1 == insn2)
5714 continue;
5715 r = autopref_multipass_dfa_lookahead_guard_1 (insn1, insn2, write);
5716 if (r)
5717 {
5718 if (ready_index == 0)
5719 {
5720 r = -1;
5721 data1->status = AUTOPREF_MULTIPASS_DATA_DONT_DELAY;
5722 }
5723 goto finish;
5724 }
5725 }
5726
5727 if (PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) == 1)
5728 continue;
5729
5730 /* Everything from the current queue slot should have been moved to
5731 the ready list. */
5732 gcc_assert (insn_queue[NEXT_Q_AFTER (q_ptr, 0)] == NULL_RTX);
5733
5734 int n_stalls = PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) - 1;
5735 if (n_stalls > max_insn_queue_index)
5736 n_stalls = max_insn_queue_index;
5737
5738 for (int stalls = 1; stalls <= n_stalls; ++stalls)
5739 {
5740 for (rtx_insn_list *link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)];
5741 link != NULL_RTX;
5742 link = link->next ())
5743 {
5744 rtx_insn *insn2 = link->insn ();
5745 r = autopref_multipass_dfa_lookahead_guard_1 (insn1, insn2,
5746 write);
5747 if (r)
5748 {
5749 /* Queue INSN1 until INSN2 can issue. */
5750 r = -stalls;
5751 if (ready_index == 0)
5752 data1->status = AUTOPREF_MULTIPASS_DATA_DONT_DELAY;
5753 goto finish;
5754 }
5755 }
5756 }
5757 }
5758
5759 finish:
5760 if (sched_verbose >= 2
5761 && autopref_multipass_dfa_lookahead_guard_started_dump_p
5762 && (ready_index == ready.n_ready - 1 || r < 0))
5763 /* This does not /always/ trigger. We don't output EOL if the last
5764 insn is not recognized (INSN_CODE < 0) and lookahead_guard is not
5765 called. We can live with this. */
5766 fprintf (sched_dump, "\n");
5767
5768 return r;
5769 }
5770
5771 /* Define type for target data used in multipass scheduling. */
5772 #ifndef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T
5773 # define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T int
5774 #endif
5775 typedef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T first_cycle_multipass_data_t;
5776
5777 /* The following structure describe an entry of the stack of choices. */
5778 struct choice_entry
5779 {
5780 /* Ordinal number of the issued insn in the ready queue. */
5781 int index;
5782 /* The number of the rest insns whose issues we should try. */
5783 int rest;
5784 /* The number of issued essential insns. */
5785 int n;
5786 /* State after issuing the insn. */
5787 state_t state;
5788 /* Target-specific data. */
5789 first_cycle_multipass_data_t target_data;
5790 };
5791
5792 /* The following array is used to implement a stack of choices used in
5793 function max_issue. */
5794 static struct choice_entry *choice_stack;
5795
5796 /* This holds the value of the target dfa_lookahead hook. */
5797 int dfa_lookahead;
5798
5799 /* The following variable value is maximal number of tries of issuing
5800 insns for the first cycle multipass insn scheduling. We define
5801 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
5802 need this constraint if all real insns (with non-negative codes)
5803 had reservations because in this case the algorithm complexity is
5804 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
5805 might be incomplete and such insn might occur. For such
5806 descriptions, the complexity of algorithm (without the constraint)
5807 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
5808 static int max_lookahead_tries;
5809
5810 /* The following function returns maximal (or close to maximal) number
5811 of insns which can be issued on the same cycle and one of which
5812 insns is insns with the best rank (the first insn in READY). To
5813 make this function tries different samples of ready insns. READY
5814 is current queue `ready'. Global array READY_TRY reflects what
5815 insns are already issued in this try. The function stops immediately,
5816 if it reached the such a solution, that all instruction can be issued.
5817 INDEX will contain index of the best insn in READY. The following
5818 function is used only for first cycle multipass scheduling.
5819
5820 PRIVILEGED_N >= 0
5821
5822 This function expects recognized insns only. All USEs,
5823 CLOBBERs, etc must be filtered elsewhere. */
5824 int
5825 max_issue (struct ready_list *ready, int privileged_n, state_t state,
5826 bool first_cycle_insn_p, int *index)
5827 {
5828 int n, i, all, n_ready, best, delay, tries_num;
5829 int more_issue;
5830 struct choice_entry *top;
5831 rtx_insn *insn;
5832
5833 if (sched_fusion)
5834 return 0;
5835
5836 n_ready = ready->n_ready;
5837 gcc_assert (dfa_lookahead >= 1 && privileged_n >= 0
5838 && privileged_n <= n_ready);
5839
5840 /* Init MAX_LOOKAHEAD_TRIES. */
5841 if (max_lookahead_tries == 0)
5842 {
5843 max_lookahead_tries = 100;
5844 for (i = 0; i < issue_rate; i++)
5845 max_lookahead_tries *= dfa_lookahead;
5846 }
5847
5848 /* Init max_points. */
5849 more_issue = issue_rate - cycle_issued_insns;
5850 gcc_assert (more_issue >= 0);
5851
5852 /* The number of the issued insns in the best solution. */
5853 best = 0;
5854
5855 top = choice_stack;
5856
5857 /* Set initial state of the search. */
5858 memcpy (top->state, state, dfa_state_size);
5859 top->rest = dfa_lookahead;
5860 top->n = 0;
5861 if (targetm.sched.first_cycle_multipass_begin)
5862 targetm.sched.first_cycle_multipass_begin (&top->target_data,
5863 ready_try, n_ready,
5864 first_cycle_insn_p);
5865
5866 /* Count the number of the insns to search among. */
5867 for (all = i = 0; i < n_ready; i++)
5868 if (!ready_try [i])
5869 all++;
5870
5871 if (sched_verbose >= 2)
5872 {
5873 fprintf (sched_dump, ";;\t\tmax_issue among %d insns:", all);
5874 debug_ready_list_1 (ready, ready_try);
5875 }
5876
5877 /* I is the index of the insn to try next. */
5878 i = 0;
5879 tries_num = 0;
5880 for (;;)
5881 {
5882 if (/* If we've reached a dead end or searched enough of what we have
5883 been asked... */
5884 top->rest == 0
5885 /* or have nothing else to try... */
5886 || i >= n_ready
5887 /* or should not issue more. */
5888 || top->n >= more_issue)
5889 {
5890 /* ??? (... || i == n_ready). */
5891 gcc_assert (i <= n_ready);
5892
5893 /* We should not issue more than issue_rate instructions. */
5894 gcc_assert (top->n <= more_issue);
5895
5896 if (top == choice_stack)
5897 break;
5898
5899 if (best < top - choice_stack)
5900 {
5901 if (privileged_n)
5902 {
5903 n = privileged_n;
5904 /* Try to find issued privileged insn. */
5905 while (n && !ready_try[--n])
5906 ;
5907 }
5908
5909 if (/* If all insns are equally good... */
5910 privileged_n == 0
5911 /* Or a privileged insn will be issued. */
5912 || ready_try[n])
5913 /* Then we have a solution. */
5914 {
5915 best = top - choice_stack;
5916 /* This is the index of the insn issued first in this
5917 solution. */
5918 *index = choice_stack [1].index;
5919 if (top->n == more_issue || best == all)
5920 break;
5921 }
5922 }
5923
5924 /* Set ready-list index to point to the last insn
5925 ('i++' below will advance it to the next insn). */
5926 i = top->index;
5927
5928 /* Backtrack. */
5929 ready_try [i] = 0;
5930
5931 if (targetm.sched.first_cycle_multipass_backtrack)
5932 targetm.sched.first_cycle_multipass_backtrack (&top->target_data,
5933 ready_try, n_ready);
5934
5935 top--;
5936 memcpy (state, top->state, dfa_state_size);
5937 }
5938 else if (!ready_try [i])
5939 {
5940 tries_num++;
5941 if (tries_num > max_lookahead_tries)
5942 break;
5943 insn = ready_element (ready, i);
5944 delay = state_transition (state, insn);
5945 if (delay < 0)
5946 {
5947 if (state_dead_lock_p (state)
5948 || insn_finishes_cycle_p (insn))
5949 /* We won't issue any more instructions in the next
5950 choice_state. */
5951 top->rest = 0;
5952 else
5953 top->rest--;
5954
5955 n = top->n;
5956 if (memcmp (top->state, state, dfa_state_size) != 0)
5957 n++;
5958
5959 /* Advance to the next choice_entry. */
5960 top++;
5961 /* Initialize it. */
5962 top->rest = dfa_lookahead;
5963 top->index = i;
5964 top->n = n;
5965 memcpy (top->state, state, dfa_state_size);
5966 ready_try [i] = 1;
5967
5968 if (targetm.sched.first_cycle_multipass_issue)
5969 targetm.sched.first_cycle_multipass_issue (&top->target_data,
5970 ready_try, n_ready,
5971 insn,
5972 &((top - 1)
5973 ->target_data));
5974
5975 i = -1;
5976 }
5977 }
5978
5979 /* Increase ready-list index. */
5980 i++;
5981 }
5982
5983 if (targetm.sched.first_cycle_multipass_end)
5984 targetm.sched.first_cycle_multipass_end (best != 0
5985 ? &choice_stack[1].target_data
5986 : NULL);
5987
5988 /* Restore the original state of the DFA. */
5989 memcpy (state, choice_stack->state, dfa_state_size);
5990
5991 return best;
5992 }
5993
5994 /* The following function chooses insn from READY and modifies
5995 READY. The following function is used only for first
5996 cycle multipass scheduling.
5997 Return:
5998 -1 if cycle should be advanced,
5999 0 if INSN_PTR is set to point to the desirable insn,
6000 1 if choose_ready () should be restarted without advancing the cycle. */
6001 static int
6002 choose_ready (struct ready_list *ready, bool first_cycle_insn_p,
6003 rtx_insn **insn_ptr)
6004 {
6005 if (dbg_cnt (sched_insn) == false)
6006 {
6007 if (nonscheduled_insns_begin == NULL_RTX)
6008 nonscheduled_insns_begin = current_sched_info->prev_head;
6009
6010 rtx_insn *insn = first_nonscheduled_insn ();
6011
6012 if (QUEUE_INDEX (insn) == QUEUE_READY)
6013 /* INSN is in the ready_list. */
6014 {
6015 ready_remove_insn (insn);
6016 *insn_ptr = insn;
6017 return 0;
6018 }
6019
6020 /* INSN is in the queue. Advance cycle to move it to the ready list. */
6021 gcc_assert (QUEUE_INDEX (insn) >= 0);
6022 return -1;
6023 }
6024
6025 if (dfa_lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0))
6026 || DEBUG_INSN_P (ready_element (ready, 0)))
6027 {
6028 if (targetm.sched.dispatch (NULL, IS_DISPATCH_ON))
6029 *insn_ptr = ready_remove_first_dispatch (ready);
6030 else
6031 *insn_ptr = ready_remove_first (ready);
6032
6033 return 0;
6034 }
6035 else
6036 {
6037 /* Try to choose the best insn. */
6038 int index = 0, i;
6039 rtx_insn *insn;
6040
6041 insn = ready_element (ready, 0);
6042 if (INSN_CODE (insn) < 0)
6043 {
6044 *insn_ptr = ready_remove_first (ready);
6045 return 0;
6046 }
6047
6048 /* Filter the search space. */
6049 for (i = 0; i < ready->n_ready; i++)
6050 {
6051 ready_try[i] = 0;
6052
6053 insn = ready_element (ready, i);
6054
6055 /* If this insn is recognizable we should have already
6056 recognized it earlier.
6057 ??? Not very clear where this is supposed to be done.
6058 See dep_cost_1. */
6059 gcc_checking_assert (INSN_CODE (insn) >= 0
6060 || recog_memoized (insn) < 0);
6061 if (INSN_CODE (insn) < 0)
6062 {
6063 /* Non-recognized insns at position 0 are handled above. */
6064 gcc_assert (i > 0);
6065 ready_try[i] = 1;
6066 continue;
6067 }
6068
6069 if (targetm.sched.first_cycle_multipass_dfa_lookahead_guard)
6070 {
6071 ready_try[i]
6072 = (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
6073 (insn, i));
6074
6075 if (ready_try[i] < 0)
6076 /* Queue instruction for several cycles.
6077 We need to restart choose_ready as we have changed
6078 the ready list. */
6079 {
6080 change_queue_index (insn, -ready_try[i]);
6081 return 1;
6082 }
6083
6084 /* Make sure that we didn't end up with 0'th insn filtered out.
6085 Don't be tempted to make life easier for backends and just
6086 requeue 0'th insn if (ready_try[0] == 0) and restart
6087 choose_ready. Backends should be very considerate about
6088 requeueing instructions -- especially the highest priority
6089 one at position 0. */
6090 gcc_assert (ready_try[i] == 0 || i > 0);
6091 if (ready_try[i])
6092 continue;
6093 }
6094
6095 gcc_assert (ready_try[i] == 0);
6096 /* INSN made it through the scrutiny of filters! */
6097 }
6098
6099 if (max_issue (ready, 1, curr_state, first_cycle_insn_p, &index) == 0)
6100 {
6101 *insn_ptr = ready_remove_first (ready);
6102 if (sched_verbose >= 4)
6103 fprintf (sched_dump, ";;\t\tChosen insn (but can't issue) : %s \n",
6104 (*current_sched_info->print_insn) (*insn_ptr, 0));
6105 return 0;
6106 }
6107 else
6108 {
6109 if (sched_verbose >= 4)
6110 fprintf (sched_dump, ";;\t\tChosen insn : %s\n",
6111 (*current_sched_info->print_insn)
6112 (ready_element (ready, index), 0));
6113
6114 *insn_ptr = ready_remove (ready, index);
6115 return 0;
6116 }
6117 }
6118 }
6119
6120 /* This function is called when we have successfully scheduled a
6121 block. It uses the schedule stored in the scheduled_insns vector
6122 to rearrange the RTL. PREV_HEAD is used as the anchor to which we
6123 append the scheduled insns; TAIL is the insn after the scheduled
6124 block. TARGET_BB is the argument passed to schedule_block. */
6125
6126 static void
6127 commit_schedule (rtx_insn *prev_head, rtx_insn *tail, basic_block *target_bb)
6128 {
6129 unsigned int i;
6130 rtx_insn *insn;
6131
6132 last_scheduled_insn = prev_head;
6133 for (i = 0;
6134 scheduled_insns.iterate (i, &insn);
6135 i++)
6136 {
6137 if (control_flow_insn_p (last_scheduled_insn)
6138 || current_sched_info->advance_target_bb (*target_bb, insn))
6139 {
6140 *target_bb = current_sched_info->advance_target_bb (*target_bb, 0);
6141
6142 if (sched_verbose)
6143 {
6144 rtx_insn *x;
6145
6146 x = next_real_insn (last_scheduled_insn);
6147 gcc_assert (x);
6148 dump_new_block_header (1, *target_bb, x, tail);
6149 }
6150
6151 last_scheduled_insn = bb_note (*target_bb);
6152 }
6153
6154 if (current_sched_info->begin_move_insn)
6155 (*current_sched_info->begin_move_insn) (insn, last_scheduled_insn);
6156 move_insn (insn, last_scheduled_insn,
6157 current_sched_info->next_tail);
6158 if (!DEBUG_INSN_P (insn))
6159 reemit_notes (insn);
6160 last_scheduled_insn = insn;
6161 }
6162
6163 scheduled_insns.truncate (0);
6164 }
6165
6166 /* Examine all insns on the ready list and queue those which can't be
6167 issued in this cycle. TEMP_STATE is temporary scheduler state we
6168 can use as scratch space. If FIRST_CYCLE_INSN_P is true, no insns
6169 have been issued for the current cycle, which means it is valid to
6170 issue an asm statement.
6171
6172 If SHADOWS_ONLY_P is true, we eliminate all real insns and only
6173 leave those for which SHADOW_P is true. If MODULO_EPILOGUE is true,
6174 we only leave insns which have an INSN_EXACT_TICK. */
6175
6176 static void
6177 prune_ready_list (state_t temp_state, bool first_cycle_insn_p,
6178 bool shadows_only_p, bool modulo_epilogue_p)
6179 {
6180 int i, pass;
6181 bool sched_group_found = false;
6182 int min_cost_group = 1;
6183
6184 if (sched_fusion)
6185 return;
6186
6187 for (i = 0; i < ready.n_ready; i++)
6188 {
6189 rtx_insn *insn = ready_element (&ready, i);
6190 if (SCHED_GROUP_P (insn))
6191 {
6192 sched_group_found = true;
6193 break;
6194 }
6195 }
6196
6197 /* Make two passes if there's a SCHED_GROUP_P insn; make sure to handle
6198 such an insn first and note its cost, then schedule all other insns
6199 for one cycle later. */
6200 for (pass = sched_group_found ? 0 : 1; pass < 2; )
6201 {
6202 int n = ready.n_ready;
6203 for (i = 0; i < n; i++)
6204 {
6205 rtx_insn *insn = ready_element (&ready, i);
6206 int cost = 0;
6207 const char *reason = "resource conflict";
6208
6209 if (DEBUG_INSN_P (insn))
6210 continue;
6211
6212 if (sched_group_found && !SCHED_GROUP_P (insn))
6213 {
6214 if (pass == 0)
6215 continue;
6216 cost = min_cost_group;
6217 reason = "not in sched group";
6218 }
6219 else if (modulo_epilogue_p
6220 && INSN_EXACT_TICK (insn) == INVALID_TICK)
6221 {
6222 cost = max_insn_queue_index;
6223 reason = "not an epilogue insn";
6224 }
6225 else if (shadows_only_p && !SHADOW_P (insn))
6226 {
6227 cost = 1;
6228 reason = "not a shadow";
6229 }
6230 else if (recog_memoized (insn) < 0)
6231 {
6232 if (!first_cycle_insn_p
6233 && (GET_CODE (PATTERN (insn)) == ASM_INPUT
6234 || asm_noperands (PATTERN (insn)) >= 0))
6235 cost = 1;
6236 reason = "asm";
6237 }
6238 else if (sched_pressure != SCHED_PRESSURE_NONE)
6239 {
6240 if (sched_pressure == SCHED_PRESSURE_MODEL
6241 && INSN_TICK (insn) <= clock_var)
6242 {
6243 memcpy (temp_state, curr_state, dfa_state_size);
6244 if (state_transition (temp_state, insn) >= 0)
6245 INSN_TICK (insn) = clock_var + 1;
6246 }
6247 cost = 0;
6248 }
6249 else
6250 {
6251 int delay_cost = 0;
6252
6253 if (delay_htab)
6254 {
6255 struct delay_pair *delay_entry;
6256 delay_entry
6257 = delay_htab->find_with_hash (insn,
6258 htab_hash_pointer (insn));
6259 while (delay_entry && delay_cost == 0)
6260 {
6261 delay_cost = estimate_shadow_tick (delay_entry);
6262 if (delay_cost > max_insn_queue_index)
6263 delay_cost = max_insn_queue_index;
6264 delay_entry = delay_entry->next_same_i1;
6265 }
6266 }
6267
6268 memcpy (temp_state, curr_state, dfa_state_size);
6269 cost = state_transition (temp_state, insn);
6270 if (cost < 0)
6271 cost = 0;
6272 else if (cost == 0)
6273 cost = 1;
6274 if (cost < delay_cost)
6275 {
6276 cost = delay_cost;
6277 reason = "shadow tick";
6278 }
6279 }
6280 if (cost >= 1)
6281 {
6282 if (SCHED_GROUP_P (insn) && cost > min_cost_group)
6283 min_cost_group = cost;
6284 ready_remove (&ready, i);
6285 /* Normally we'd want to queue INSN for COST cycles. However,
6286 if SCHED_GROUP_P is set, then we must ensure that nothing
6287 else comes between INSN and its predecessor. If there is
6288 some other insn ready to fire on the next cycle, then that
6289 invariant would be broken.
6290
6291 So when SCHED_GROUP_P is set, just queue this insn for a
6292 single cycle. */
6293 queue_insn (insn, SCHED_GROUP_P (insn) ? 1 : cost, reason);
6294 if (i + 1 < n)
6295 break;
6296 }
6297 }
6298 if (i == n)
6299 pass++;
6300 }
6301 }
6302
6303 /* Called when we detect that the schedule is impossible. We examine the
6304 backtrack queue to find the earliest insn that caused this condition. */
6305
6306 static struct haifa_saved_data *
6307 verify_shadows (void)
6308 {
6309 struct haifa_saved_data *save, *earliest_fail = NULL;
6310 for (save = backtrack_queue; save; save = save->next)
6311 {
6312 int t;
6313 struct delay_pair *pair = save->delay_pair;
6314 rtx_insn *i1 = pair->i1;
6315
6316 for (; pair; pair = pair->next_same_i1)
6317 {
6318 rtx_insn *i2 = pair->i2;
6319
6320 if (QUEUE_INDEX (i2) == QUEUE_SCHEDULED)
6321 continue;
6322
6323 t = INSN_TICK (i1) + pair_delay (pair);
6324 if (t < clock_var)
6325 {
6326 if (sched_verbose >= 2)
6327 fprintf (sched_dump,
6328 ";;\t\tfailed delay requirements for %d/%d (%d->%d)"
6329 ", not ready\n",
6330 INSN_UID (pair->i1), INSN_UID (pair->i2),
6331 INSN_TICK (pair->i1), INSN_EXACT_TICK (pair->i2));
6332 earliest_fail = save;
6333 break;
6334 }
6335 if (QUEUE_INDEX (i2) >= 0)
6336 {
6337 int queued_for = INSN_TICK (i2);
6338
6339 if (t < queued_for)
6340 {
6341 if (sched_verbose >= 2)
6342 fprintf (sched_dump,
6343 ";;\t\tfailed delay requirements for %d/%d"
6344 " (%d->%d), queued too late\n",
6345 INSN_UID (pair->i1), INSN_UID (pair->i2),
6346 INSN_TICK (pair->i1), INSN_EXACT_TICK (pair->i2));
6347 earliest_fail = save;
6348 break;
6349 }
6350 }
6351 }
6352 }
6353
6354 return earliest_fail;
6355 }
6356
6357 /* Print instructions together with useful scheduling information between
6358 HEAD and TAIL (inclusive). */
6359 static void
6360 dump_insn_stream (rtx_insn *head, rtx_insn *tail)
6361 {
6362 fprintf (sched_dump, ";;\t| insn | prio |\n");
6363
6364 rtx_insn *next_tail = NEXT_INSN (tail);
6365 for (rtx_insn *insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6366 {
6367 int priority = NOTE_P (insn) ? 0 : INSN_PRIORITY (insn);
6368 const char *pattern = (NOTE_P (insn)
6369 ? "note"
6370 : str_pattern_slim (PATTERN (insn)));
6371
6372 fprintf (sched_dump, ";;\t| %4d | %4d | %-30s ",
6373 INSN_UID (insn), priority, pattern);
6374
6375 if (sched_verbose >= 4)
6376 {
6377 if (NOTE_P (insn) || recog_memoized (insn) < 0)
6378 fprintf (sched_dump, "nothing");
6379 else
6380 print_reservation (sched_dump, insn);
6381 }
6382 fprintf (sched_dump, "\n");
6383 }
6384 }
6385
6386 /* Use forward list scheduling to rearrange insns of block pointed to by
6387 TARGET_BB, possibly bringing insns from subsequent blocks in the same
6388 region. */
6389
6390 bool
6391 schedule_block (basic_block *target_bb, state_t init_state)
6392 {
6393 int i;
6394 bool success = modulo_ii == 0;
6395 struct sched_block_state ls;
6396 state_t temp_state = NULL; /* It is used for multipass scheduling. */
6397 int sort_p, advance, start_clock_var;
6398
6399 /* Head/tail info for this block. */
6400 rtx_insn *prev_head = current_sched_info->prev_head;
6401 rtx_insn *next_tail = current_sched_info->next_tail;
6402 rtx_insn *head = NEXT_INSN (prev_head);
6403 rtx_insn *tail = PREV_INSN (next_tail);
6404
6405 if ((current_sched_info->flags & DONT_BREAK_DEPENDENCIES) == 0
6406 && sched_pressure != SCHED_PRESSURE_MODEL && !sched_fusion)
6407 find_modifiable_mems (head, tail);
6408
6409 /* We used to have code to avoid getting parameters moved from hard
6410 argument registers into pseudos.
6411
6412 However, it was removed when it proved to be of marginal benefit
6413 and caused problems because schedule_block and compute_forward_dependences
6414 had different notions of what the "head" insn was. */
6415
6416 gcc_assert (head != tail || INSN_P (head));
6417
6418 haifa_recovery_bb_recently_added_p = false;
6419
6420 backtrack_queue = NULL;
6421
6422 /* Debug info. */
6423 if (sched_verbose)
6424 {
6425 dump_new_block_header (0, *target_bb, head, tail);
6426
6427 if (sched_verbose >= 2)
6428 {
6429 dump_insn_stream (head, tail);
6430 memset (&rank_for_schedule_stats, 0,
6431 sizeof (rank_for_schedule_stats));
6432 }
6433 }
6434
6435 if (init_state == NULL)
6436 state_reset (curr_state);
6437 else
6438 memcpy (curr_state, init_state, dfa_state_size);
6439
6440 /* Clear the ready list. */
6441 ready.first = ready.veclen - 1;
6442 ready.n_ready = 0;
6443 ready.n_debug = 0;
6444
6445 /* It is used for first cycle multipass scheduling. */
6446 temp_state = alloca (dfa_state_size);
6447
6448 if (targetm.sched.init)
6449 targetm.sched.init (sched_dump, sched_verbose, ready.veclen);
6450
6451 /* We start inserting insns after PREV_HEAD. */
6452 last_scheduled_insn = prev_head;
6453 last_nondebug_scheduled_insn = NULL;
6454 nonscheduled_insns_begin = NULL;
6455
6456 gcc_assert ((NOTE_P (last_scheduled_insn)
6457 || DEBUG_INSN_P (last_scheduled_insn))
6458 && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
6459
6460 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
6461 queue. */
6462 q_ptr = 0;
6463 q_size = 0;
6464
6465 insn_queue = XALLOCAVEC (rtx_insn_list *, max_insn_queue_index + 1);
6466 memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
6467
6468 /* Start just before the beginning of time. */
6469 clock_var = -1;
6470
6471 /* We need queue and ready lists and clock_var be initialized
6472 in try_ready () (which is called through init_ready_list ()). */
6473 (*current_sched_info->init_ready_list) ();
6474
6475 if (sched_pressure)
6476 sched_pressure_start_bb (*target_bb);
6477
6478 /* The algorithm is O(n^2) in the number of ready insns at any given
6479 time in the worst case. Before reload we are more likely to have
6480 big lists so truncate them to a reasonable size. */
6481 if (!reload_completed
6482 && ready.n_ready - ready.n_debug > MAX_SCHED_READY_INSNS)
6483 {
6484 ready_sort_debug (&ready);
6485 ready_sort_real (&ready);
6486
6487 /* Find first free-standing insn past MAX_SCHED_READY_INSNS.
6488 If there are debug insns, we know they're first. */
6489 for (i = MAX_SCHED_READY_INSNS + ready.n_debug; i < ready.n_ready; i++)
6490 if (!SCHED_GROUP_P (ready_element (&ready, i)))
6491 break;
6492
6493 if (sched_verbose >= 2)
6494 {
6495 fprintf (sched_dump,
6496 ";;\t\tReady list on entry: %d insns: ", ready.n_ready);
6497 debug_ready_list (&ready);
6498 fprintf (sched_dump,
6499 ";;\t\t before reload => truncated to %d insns\n", i);
6500 }
6501
6502 /* Delay all insns past it for 1 cycle. If debug counter is
6503 activated make an exception for the insn right after
6504 nonscheduled_insns_begin. */
6505 {
6506 rtx_insn *skip_insn;
6507
6508 if (dbg_cnt (sched_insn) == false)
6509 skip_insn = first_nonscheduled_insn ();
6510 else
6511 skip_insn = NULL;
6512
6513 while (i < ready.n_ready)
6514 {
6515 rtx_insn *insn;
6516
6517 insn = ready_remove (&ready, i);
6518
6519 if (insn != skip_insn)
6520 queue_insn (insn, 1, "list truncated");
6521 }
6522 if (skip_insn)
6523 ready_add (&ready, skip_insn, true);
6524 }
6525 }
6526
6527 /* Now we can restore basic block notes and maintain precise cfg. */
6528 restore_bb_notes (*target_bb);
6529
6530 last_clock_var = -1;
6531
6532 advance = 0;
6533
6534 gcc_assert (scheduled_insns.length () == 0);
6535 sort_p = TRUE;
6536 must_backtrack = false;
6537 modulo_insns_scheduled = 0;
6538
6539 ls.modulo_epilogue = false;
6540 ls.first_cycle_insn_p = true;
6541
6542 /* Loop until all the insns in BB are scheduled. */
6543 while ((*current_sched_info->schedule_more_p) ())
6544 {
6545 perform_replacements_new_cycle ();
6546 do
6547 {
6548 start_clock_var = clock_var;
6549
6550 clock_var++;
6551
6552 advance_one_cycle ();
6553
6554 /* Add to the ready list all pending insns that can be issued now.
6555 If there are no ready insns, increment clock until one
6556 is ready and add all pending insns at that point to the ready
6557 list. */
6558 queue_to_ready (&ready);
6559
6560 gcc_assert (ready.n_ready);
6561
6562 if (sched_verbose >= 2)
6563 {
6564 fprintf (sched_dump, ";;\t\tReady list after queue_to_ready:");
6565 debug_ready_list (&ready);
6566 }
6567 advance -= clock_var - start_clock_var;
6568 }
6569 while (advance > 0);
6570
6571 if (ls.modulo_epilogue)
6572 {
6573 int stage = clock_var / modulo_ii;
6574 if (stage > modulo_last_stage * 2 + 2)
6575 {
6576 if (sched_verbose >= 2)
6577 fprintf (sched_dump,
6578 ";;\t\tmodulo scheduled succeeded at II %d\n",
6579 modulo_ii);
6580 success = true;
6581 goto end_schedule;
6582 }
6583 }
6584 else if (modulo_ii > 0)
6585 {
6586 int stage = clock_var / modulo_ii;
6587 if (stage > modulo_max_stages)
6588 {
6589 if (sched_verbose >= 2)
6590 fprintf (sched_dump,
6591 ";;\t\tfailing schedule due to excessive stages\n");
6592 goto end_schedule;
6593 }
6594 if (modulo_n_insns == modulo_insns_scheduled
6595 && stage > modulo_last_stage)
6596 {
6597 if (sched_verbose >= 2)
6598 fprintf (sched_dump,
6599 ";;\t\tfound kernel after %d stages, II %d\n",
6600 stage, modulo_ii);
6601 ls.modulo_epilogue = true;
6602 }
6603 }
6604
6605 prune_ready_list (temp_state, true, false, ls.modulo_epilogue);
6606 if (ready.n_ready == 0)
6607 continue;
6608 if (must_backtrack)
6609 goto do_backtrack;
6610
6611 ls.shadows_only_p = false;
6612 cycle_issued_insns = 0;
6613 ls.can_issue_more = issue_rate;
6614 for (;;)
6615 {
6616 rtx_insn *insn;
6617 int cost;
6618 bool asm_p;
6619
6620 if (sort_p && ready.n_ready > 0)
6621 {
6622 /* Sort the ready list based on priority. This must be
6623 done every iteration through the loop, as schedule_insn
6624 may have readied additional insns that will not be
6625 sorted correctly. */
6626 ready_sort (&ready);
6627
6628 if (sched_verbose >= 2)
6629 {
6630 fprintf (sched_dump,
6631 ";;\t\tReady list after ready_sort: ");
6632 debug_ready_list (&ready);
6633 }
6634 }
6635
6636 /* We don't want md sched reorder to even see debug isns, so put
6637 them out right away. */
6638 if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0))
6639 && (*current_sched_info->schedule_more_p) ())
6640 {
6641 while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
6642 {
6643 rtx_insn *insn = ready_remove_first (&ready);
6644 gcc_assert (DEBUG_INSN_P (insn));
6645 (*current_sched_info->begin_schedule_ready) (insn);
6646 scheduled_insns.safe_push (insn);
6647 last_scheduled_insn = insn;
6648 advance = schedule_insn (insn);
6649 gcc_assert (advance == 0);
6650 if (ready.n_ready > 0)
6651 ready_sort (&ready);
6652 }
6653 }
6654
6655 if (ls.first_cycle_insn_p && !ready.n_ready)
6656 break;
6657
6658 resume_after_backtrack:
6659 /* Allow the target to reorder the list, typically for
6660 better instruction bundling. */
6661 if (sort_p
6662 && (ready.n_ready == 0
6663 || !SCHED_GROUP_P (ready_element (&ready, 0))))
6664 {
6665 if (ls.first_cycle_insn_p && targetm.sched.reorder)
6666 ls.can_issue_more
6667 = targetm.sched.reorder (sched_dump, sched_verbose,
6668 ready_lastpos (&ready),
6669 &ready.n_ready, clock_var);
6670 else if (!ls.first_cycle_insn_p && targetm.sched.reorder2)
6671 ls.can_issue_more
6672 = targetm.sched.reorder2 (sched_dump, sched_verbose,
6673 ready.n_ready
6674 ? ready_lastpos (&ready) : NULL,
6675 &ready.n_ready, clock_var);
6676 }
6677
6678 restart_choose_ready:
6679 if (sched_verbose >= 2)
6680 {
6681 fprintf (sched_dump, ";;\tReady list (t = %3d): ",
6682 clock_var);
6683 debug_ready_list (&ready);
6684 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
6685 print_curr_reg_pressure ();
6686 }
6687
6688 if (ready.n_ready == 0
6689 && ls.can_issue_more
6690 && reload_completed)
6691 {
6692 /* Allow scheduling insns directly from the queue in case
6693 there's nothing better to do (ready list is empty) but
6694 there are still vacant dispatch slots in the current cycle. */
6695 if (sched_verbose >= 6)
6696 fprintf (sched_dump,";;\t\tSecond chance\n");
6697 memcpy (temp_state, curr_state, dfa_state_size);
6698 if (early_queue_to_ready (temp_state, &ready))
6699 ready_sort (&ready);
6700 }
6701
6702 if (ready.n_ready == 0
6703 || !ls.can_issue_more
6704 || state_dead_lock_p (curr_state)
6705 || !(*current_sched_info->schedule_more_p) ())
6706 break;
6707
6708 /* Select and remove the insn from the ready list. */
6709 if (sort_p)
6710 {
6711 int res;
6712
6713 insn = NULL;
6714 res = choose_ready (&ready, ls.first_cycle_insn_p, &insn);
6715
6716 if (res < 0)
6717 /* Finish cycle. */
6718 break;
6719 if (res > 0)
6720 goto restart_choose_ready;
6721
6722 gcc_assert (insn != NULL_RTX);
6723 }
6724 else
6725 insn = ready_remove_first (&ready);
6726
6727 if (sched_pressure != SCHED_PRESSURE_NONE
6728 && INSN_TICK (insn) > clock_var)
6729 {
6730 ready_add (&ready, insn, true);
6731 advance = 1;
6732 break;
6733 }
6734
6735 if (targetm.sched.dfa_new_cycle
6736 && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
6737 insn, last_clock_var,
6738 clock_var, &sort_p))
6739 /* SORT_P is used by the target to override sorting
6740 of the ready list. This is needed when the target
6741 has modified its internal structures expecting that
6742 the insn will be issued next. As we need the insn
6743 to have the highest priority (so it will be returned by
6744 the ready_remove_first call above), we invoke
6745 ready_add (&ready, insn, true).
6746 But, still, there is one issue: INSN can be later
6747 discarded by scheduler's front end through
6748 current_sched_info->can_schedule_ready_p, hence, won't
6749 be issued next. */
6750 {
6751 ready_add (&ready, insn, true);
6752 break;
6753 }
6754
6755 sort_p = TRUE;
6756
6757 if (current_sched_info->can_schedule_ready_p
6758 && ! (*current_sched_info->can_schedule_ready_p) (insn))
6759 /* We normally get here only if we don't want to move
6760 insn from the split block. */
6761 {
6762 TODO_SPEC (insn) = DEP_POSTPONED;
6763 goto restart_choose_ready;
6764 }
6765
6766 if (delay_htab)
6767 {
6768 /* If this insn is the first part of a delay-slot pair, record a
6769 backtrack point. */
6770 struct delay_pair *delay_entry;
6771 delay_entry
6772 = delay_htab->find_with_hash (insn, htab_hash_pointer (insn));
6773 if (delay_entry)
6774 {
6775 save_backtrack_point (delay_entry, ls);
6776 if (sched_verbose >= 2)
6777 fprintf (sched_dump, ";;\t\tsaving backtrack point\n");
6778 }
6779 }
6780
6781 /* DECISION is made. */
6782
6783 if (modulo_ii > 0 && INSN_UID (insn) < modulo_iter0_max_uid)
6784 {
6785 modulo_insns_scheduled++;
6786 modulo_last_stage = clock_var / modulo_ii;
6787 }
6788 if (TODO_SPEC (insn) & SPECULATIVE)
6789 generate_recovery_code (insn);
6790
6791 if (targetm.sched.dispatch (NULL, IS_DISPATCH_ON))
6792 targetm.sched.dispatch_do (insn, ADD_TO_DISPATCH_WINDOW);
6793
6794 /* Update counters, etc in the scheduler's front end. */
6795 (*current_sched_info->begin_schedule_ready) (insn);
6796 scheduled_insns.safe_push (insn);
6797 gcc_assert (NONDEBUG_INSN_P (insn));
6798 last_nondebug_scheduled_insn = last_scheduled_insn = insn;
6799
6800 if (recog_memoized (insn) >= 0)
6801 {
6802 memcpy (temp_state, curr_state, dfa_state_size);
6803 cost = state_transition (curr_state, insn);
6804 if (sched_pressure != SCHED_PRESSURE_WEIGHTED && !sched_fusion)
6805 gcc_assert (cost < 0);
6806 if (memcmp (temp_state, curr_state, dfa_state_size) != 0)
6807 cycle_issued_insns++;
6808 asm_p = false;
6809 }
6810 else
6811 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
6812 || asm_noperands (PATTERN (insn)) >= 0);
6813
6814 if (targetm.sched.variable_issue)
6815 ls.can_issue_more =
6816 targetm.sched.variable_issue (sched_dump, sched_verbose,
6817 insn, ls.can_issue_more);
6818 /* A naked CLOBBER or USE generates no instruction, so do
6819 not count them against the issue rate. */
6820 else if (GET_CODE (PATTERN (insn)) != USE
6821 && GET_CODE (PATTERN (insn)) != CLOBBER)
6822 ls.can_issue_more--;
6823 advance = schedule_insn (insn);
6824
6825 if (SHADOW_P (insn))
6826 ls.shadows_only_p = true;
6827
6828 /* After issuing an asm insn we should start a new cycle. */
6829 if (advance == 0 && asm_p)
6830 advance = 1;
6831
6832 if (must_backtrack)
6833 break;
6834
6835 if (advance != 0)
6836 break;
6837
6838 ls.first_cycle_insn_p = false;
6839 if (ready.n_ready > 0)
6840 prune_ready_list (temp_state, false, ls.shadows_only_p,
6841 ls.modulo_epilogue);
6842 }
6843
6844 do_backtrack:
6845 if (!must_backtrack)
6846 for (i = 0; i < ready.n_ready; i++)
6847 {
6848 rtx_insn *insn = ready_element (&ready, i);
6849 if (INSN_EXACT_TICK (insn) == clock_var)
6850 {
6851 must_backtrack = true;
6852 clock_var++;
6853 break;
6854 }
6855 }
6856 if (must_backtrack && modulo_ii > 0)
6857 {
6858 if (modulo_backtracks_left == 0)
6859 goto end_schedule;
6860 modulo_backtracks_left--;
6861 }
6862 while (must_backtrack)
6863 {
6864 struct haifa_saved_data *failed;
6865 rtx_insn *failed_insn;
6866
6867 must_backtrack = false;
6868 failed = verify_shadows ();
6869 gcc_assert (failed);
6870
6871 failed_insn = failed->delay_pair->i1;
6872 /* Clear these queues. */
6873 perform_replacements_new_cycle ();
6874 toggle_cancelled_flags (false);
6875 unschedule_insns_until (failed_insn);
6876 while (failed != backtrack_queue)
6877 free_topmost_backtrack_point (true);
6878 restore_last_backtrack_point (&ls);
6879 if (sched_verbose >= 2)
6880 fprintf (sched_dump, ";;\t\trewind to cycle %d\n", clock_var);
6881 /* Delay by at least a cycle. This could cause additional
6882 backtracking. */
6883 queue_insn (failed_insn, 1, "backtracked");
6884 advance = 0;
6885 if (must_backtrack)
6886 continue;
6887 if (ready.n_ready > 0)
6888 goto resume_after_backtrack;
6889 else
6890 {
6891 if (clock_var == 0 && ls.first_cycle_insn_p)
6892 goto end_schedule;
6893 advance = 1;
6894 break;
6895 }
6896 }
6897 ls.first_cycle_insn_p = true;
6898 }
6899 if (ls.modulo_epilogue)
6900 success = true;
6901 end_schedule:
6902 if (!ls.first_cycle_insn_p || advance)
6903 advance_one_cycle ();
6904 perform_replacements_new_cycle ();
6905 if (modulo_ii > 0)
6906 {
6907 /* Once again, debug insn suckiness: they can be on the ready list
6908 even if they have unresolved dependencies. To make our view
6909 of the world consistent, remove such "ready" insns. */
6910 restart_debug_insn_loop:
6911 for (i = ready.n_ready - 1; i >= 0; i--)
6912 {
6913 rtx_insn *x;
6914
6915 x = ready_element (&ready, i);
6916 if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (x)) != NULL
6917 || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (x)) != NULL)
6918 {
6919 ready_remove (&ready, i);
6920 goto restart_debug_insn_loop;
6921 }
6922 }
6923 for (i = ready.n_ready - 1; i >= 0; i--)
6924 {
6925 rtx_insn *x;
6926
6927 x = ready_element (&ready, i);
6928 resolve_dependencies (x);
6929 }
6930 for (i = 0; i <= max_insn_queue_index; i++)
6931 {
6932 rtx_insn_list *link;
6933 while ((link = insn_queue[i]) != NULL)
6934 {
6935 rtx_insn *x = link->insn ();
6936 insn_queue[i] = link->next ();
6937 QUEUE_INDEX (x) = QUEUE_NOWHERE;
6938 free_INSN_LIST_node (link);
6939 resolve_dependencies (x);
6940 }
6941 }
6942 }
6943
6944 if (!success)
6945 undo_all_replacements ();
6946
6947 /* Debug info. */
6948 if (sched_verbose)
6949 {
6950 fprintf (sched_dump, ";;\tReady list (final): ");
6951 debug_ready_list (&ready);
6952 }
6953
6954 if (modulo_ii == 0 && current_sched_info->queue_must_finish_empty)
6955 /* Sanity check -- queue must be empty now. Meaningless if region has
6956 multiple bbs. */
6957 gcc_assert (!q_size && !ready.n_ready && !ready.n_debug);
6958 else if (modulo_ii == 0)
6959 {
6960 /* We must maintain QUEUE_INDEX between blocks in region. */
6961 for (i = ready.n_ready - 1; i >= 0; i--)
6962 {
6963 rtx_insn *x;
6964
6965 x = ready_element (&ready, i);
6966 QUEUE_INDEX (x) = QUEUE_NOWHERE;
6967 TODO_SPEC (x) = HARD_DEP;
6968 }
6969
6970 if (q_size)
6971 for (i = 0; i <= max_insn_queue_index; i++)
6972 {
6973 rtx_insn_list *link;
6974 for (link = insn_queue[i]; link; link = link->next ())
6975 {
6976 rtx_insn *x;
6977
6978 x = link->insn ();
6979 QUEUE_INDEX (x) = QUEUE_NOWHERE;
6980 TODO_SPEC (x) = HARD_DEP;
6981 }
6982 free_INSN_LIST_list (&insn_queue[i]);
6983 }
6984 }
6985
6986 if (sched_pressure == SCHED_PRESSURE_MODEL)
6987 model_end_schedule ();
6988
6989 if (success)
6990 {
6991 commit_schedule (prev_head, tail, target_bb);
6992 if (sched_verbose)
6993 fprintf (sched_dump, ";; total time = %d\n", clock_var);
6994 }
6995 else
6996 last_scheduled_insn = tail;
6997
6998 scheduled_insns.truncate (0);
6999
7000 if (!current_sched_info->queue_must_finish_empty
7001 || haifa_recovery_bb_recently_added_p)
7002 {
7003 /* INSN_TICK (minimum clock tick at which the insn becomes
7004 ready) may be not correct for the insn in the subsequent
7005 blocks of the region. We should use a correct value of
7006 `clock_var' or modify INSN_TICK. It is better to keep
7007 clock_var value equal to 0 at the start of a basic block.
7008 Therefore we modify INSN_TICK here. */
7009 fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
7010 }
7011
7012 if (targetm.sched.finish)
7013 {
7014 targetm.sched.finish (sched_dump, sched_verbose);
7015 /* Target might have added some instructions to the scheduled block
7016 in its md_finish () hook. These new insns don't have any data
7017 initialized and to identify them we extend h_i_d so that they'll
7018 get zero luids. */
7019 sched_extend_luids ();
7020 }
7021
7022 /* Update head/tail boundaries. */
7023 head = NEXT_INSN (prev_head);
7024 tail = last_scheduled_insn;
7025
7026 if (sched_verbose)
7027 {
7028 fprintf (sched_dump, ";; new head = %d\n;; new tail = %d\n",
7029 INSN_UID (head), INSN_UID (tail));
7030
7031 if (sched_verbose >= 2)
7032 {
7033 dump_insn_stream (head, tail);
7034 print_rank_for_schedule_stats (";; TOTAL ", &rank_for_schedule_stats,
7035 NULL);
7036 }
7037
7038 fprintf (sched_dump, "\n");
7039 }
7040
7041 head = restore_other_notes (head, NULL);
7042
7043 current_sched_info->head = head;
7044 current_sched_info->tail = tail;
7045
7046 free_backtrack_queue ();
7047
7048 return success;
7049 }
7050 \f
7051 /* Set_priorities: compute priority of each insn in the block. */
7052
7053 int
7054 set_priorities (rtx_insn *head, rtx_insn *tail)
7055 {
7056 rtx_insn *insn;
7057 int n_insn;
7058 int sched_max_insns_priority =
7059 current_sched_info->sched_max_insns_priority;
7060 rtx_insn *prev_head;
7061
7062 if (head == tail && ! INSN_P (head))
7063 gcc_unreachable ();
7064
7065 n_insn = 0;
7066
7067 prev_head = PREV_INSN (head);
7068 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
7069 {
7070 if (!INSN_P (insn))
7071 continue;
7072
7073 n_insn++;
7074 (void) priority (insn);
7075
7076 gcc_assert (INSN_PRIORITY_KNOWN (insn));
7077
7078 sched_max_insns_priority = MAX (sched_max_insns_priority,
7079 INSN_PRIORITY (insn));
7080 }
7081
7082 current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
7083
7084 return n_insn;
7085 }
7086
7087 /* Set dump and sched_verbose for the desired debugging output. If no
7088 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
7089 For -fsched-verbose=N, N>=10, print everything to stderr. */
7090 void
7091 setup_sched_dump (void)
7092 {
7093 sched_verbose = sched_verbose_param;
7094 if (sched_verbose_param == 0 && dump_file)
7095 sched_verbose = 1;
7096 sched_dump = ((sched_verbose_param >= 10 || !dump_file)
7097 ? stderr : dump_file);
7098 }
7099
7100 /* Allocate data for register pressure sensitive scheduling. */
7101 static void
7102 alloc_global_sched_pressure_data (void)
7103 {
7104 if (sched_pressure != SCHED_PRESSURE_NONE)
7105 {
7106 int i, max_regno = max_reg_num ();
7107
7108 if (sched_dump != NULL)
7109 /* We need info about pseudos for rtl dumps about pseudo
7110 classes and costs. */
7111 regstat_init_n_sets_and_refs ();
7112 ira_set_pseudo_classes (true, sched_verbose ? sched_dump : NULL);
7113 sched_regno_pressure_class
7114 = (enum reg_class *) xmalloc (max_regno * sizeof (enum reg_class));
7115 for (i = 0; i < max_regno; i++)
7116 sched_regno_pressure_class[i]
7117 = (i < FIRST_PSEUDO_REGISTER
7118 ? ira_pressure_class_translate[REGNO_REG_CLASS (i)]
7119 : ira_pressure_class_translate[reg_allocno_class (i)]);
7120 curr_reg_live = BITMAP_ALLOC (NULL);
7121 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
7122 {
7123 saved_reg_live = BITMAP_ALLOC (NULL);
7124 region_ref_regs = BITMAP_ALLOC (NULL);
7125 }
7126
7127 /* Calculate number of CALL_USED_REGS in register classes that
7128 we calculate register pressure for. */
7129 for (int c = 0; c < ira_pressure_classes_num; ++c)
7130 {
7131 enum reg_class cl = ira_pressure_classes[c];
7132
7133 call_used_regs_num[cl] = 0;
7134
7135 for (int i = 0; i < ira_class_hard_regs_num[cl]; ++i)
7136 if (call_used_regs[ira_class_hard_regs[cl][i]])
7137 ++call_used_regs_num[cl];
7138 }
7139 }
7140 }
7141
7142 /* Free data for register pressure sensitive scheduling. Also called
7143 from schedule_region when stopping sched-pressure early. */
7144 void
7145 free_global_sched_pressure_data (void)
7146 {
7147 if (sched_pressure != SCHED_PRESSURE_NONE)
7148 {
7149 if (regstat_n_sets_and_refs != NULL)
7150 regstat_free_n_sets_and_refs ();
7151 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
7152 {
7153 BITMAP_FREE (region_ref_regs);
7154 BITMAP_FREE (saved_reg_live);
7155 }
7156 BITMAP_FREE (curr_reg_live);
7157 free (sched_regno_pressure_class);
7158 }
7159 }
7160
7161 /* Initialize some global state for the scheduler. This function works
7162 with the common data shared between all the schedulers. It is called
7163 from the scheduler specific initialization routine. */
7164
7165 void
7166 sched_init (void)
7167 {
7168 /* Disable speculative loads in their presence if cc0 defined. */
7169 if (HAVE_cc0)
7170 flag_schedule_speculative_load = 0;
7171
7172 if (targetm.sched.dispatch (NULL, IS_DISPATCH_ON))
7173 targetm.sched.dispatch_do (NULL, DISPATCH_INIT);
7174
7175 if (live_range_shrinkage_p)
7176 sched_pressure = SCHED_PRESSURE_WEIGHTED;
7177 else if (flag_sched_pressure
7178 && !reload_completed
7179 && common_sched_info->sched_pass_id == SCHED_RGN_PASS)
7180 sched_pressure = ((enum sched_pressure_algorithm)
7181 PARAM_VALUE (PARAM_SCHED_PRESSURE_ALGORITHM));
7182 else
7183 sched_pressure = SCHED_PRESSURE_NONE;
7184
7185 if (sched_pressure != SCHED_PRESSURE_NONE)
7186 ira_setup_eliminable_regset ();
7187
7188 /* Initialize SPEC_INFO. */
7189 if (targetm.sched.set_sched_flags)
7190 {
7191 spec_info = &spec_info_var;
7192 targetm.sched.set_sched_flags (spec_info);
7193
7194 if (spec_info->mask != 0)
7195 {
7196 spec_info->data_weakness_cutoff =
7197 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
7198 spec_info->control_weakness_cutoff =
7199 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF)
7200 * REG_BR_PROB_BASE) / 100;
7201 }
7202 else
7203 /* So we won't read anything accidentally. */
7204 spec_info = NULL;
7205
7206 }
7207 else
7208 /* So we won't read anything accidentally. */
7209 spec_info = 0;
7210
7211 /* Initialize issue_rate. */
7212 if (targetm.sched.issue_rate)
7213 issue_rate = targetm.sched.issue_rate ();
7214 else
7215 issue_rate = 1;
7216
7217 if (targetm.sched.first_cycle_multipass_dfa_lookahead
7218 /* Don't use max_issue with reg_pressure scheduling. Multipass
7219 scheduling and reg_pressure scheduling undo each other's decisions. */
7220 && sched_pressure == SCHED_PRESSURE_NONE)
7221 dfa_lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
7222 else
7223 dfa_lookahead = 0;
7224
7225 /* Set to "0" so that we recalculate. */
7226 max_lookahead_tries = 0;
7227
7228 if (targetm.sched.init_dfa_pre_cycle_insn)
7229 targetm.sched.init_dfa_pre_cycle_insn ();
7230
7231 if (targetm.sched.init_dfa_post_cycle_insn)
7232 targetm.sched.init_dfa_post_cycle_insn ();
7233
7234 dfa_start ();
7235 dfa_state_size = state_size ();
7236
7237 init_alias_analysis ();
7238
7239 if (!sched_no_dce)
7240 df_set_flags (DF_LR_RUN_DCE);
7241 df_note_add_problem ();
7242
7243 /* More problems needed for interloop dep calculation in SMS. */
7244 if (common_sched_info->sched_pass_id == SCHED_SMS_PASS)
7245 {
7246 df_rd_add_problem ();
7247 df_chain_add_problem (DF_DU_CHAIN + DF_UD_CHAIN);
7248 }
7249
7250 df_analyze ();
7251
7252 /* Do not run DCE after reload, as this can kill nops inserted
7253 by bundling. */
7254 if (reload_completed)
7255 df_clear_flags (DF_LR_RUN_DCE);
7256
7257 regstat_compute_calls_crossed ();
7258
7259 if (targetm.sched.init_global)
7260 targetm.sched.init_global (sched_dump, sched_verbose, get_max_uid () + 1);
7261
7262 alloc_global_sched_pressure_data ();
7263
7264 curr_state = xmalloc (dfa_state_size);
7265 }
7266
7267 static void haifa_init_only_bb (basic_block, basic_block);
7268
7269 /* Initialize data structures specific to the Haifa scheduler. */
7270 void
7271 haifa_sched_init (void)
7272 {
7273 setup_sched_dump ();
7274 sched_init ();
7275
7276 scheduled_insns.create (0);
7277
7278 if (spec_info != NULL)
7279 {
7280 sched_deps_info->use_deps_list = 1;
7281 sched_deps_info->generate_spec_deps = 1;
7282 }
7283
7284 /* Initialize luids, dependency caches, target and h_i_d for the
7285 whole function. */
7286 {
7287 bb_vec_t bbs;
7288 bbs.create (n_basic_blocks_for_fn (cfun));
7289 basic_block bb;
7290
7291 sched_init_bbs ();
7292
7293 FOR_EACH_BB_FN (bb, cfun)
7294 bbs.quick_push (bb);
7295 sched_init_luids (bbs);
7296 sched_deps_init (true);
7297 sched_extend_target ();
7298 haifa_init_h_i_d (bbs);
7299
7300 bbs.release ();
7301 }
7302
7303 sched_init_only_bb = haifa_init_only_bb;
7304 sched_split_block = sched_split_block_1;
7305 sched_create_empty_bb = sched_create_empty_bb_1;
7306 haifa_recovery_bb_ever_added_p = false;
7307
7308 nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
7309 before_recovery = 0;
7310 after_recovery = 0;
7311
7312 modulo_ii = 0;
7313 }
7314
7315 /* Finish work with the data specific to the Haifa scheduler. */
7316 void
7317 haifa_sched_finish (void)
7318 {
7319 sched_create_empty_bb = NULL;
7320 sched_split_block = NULL;
7321 sched_init_only_bb = NULL;
7322
7323 if (spec_info && spec_info->dump)
7324 {
7325 char c = reload_completed ? 'a' : 'b';
7326
7327 fprintf (spec_info->dump,
7328 ";; %s:\n", current_function_name ());
7329
7330 fprintf (spec_info->dump,
7331 ";; Procedure %cr-begin-data-spec motions == %d\n",
7332 c, nr_begin_data);
7333 fprintf (spec_info->dump,
7334 ";; Procedure %cr-be-in-data-spec motions == %d\n",
7335 c, nr_be_in_data);
7336 fprintf (spec_info->dump,
7337 ";; Procedure %cr-begin-control-spec motions == %d\n",
7338 c, nr_begin_control);
7339 fprintf (spec_info->dump,
7340 ";; Procedure %cr-be-in-control-spec motions == %d\n",
7341 c, nr_be_in_control);
7342 }
7343
7344 scheduled_insns.release ();
7345
7346 /* Finalize h_i_d, dependency caches, and luids for the whole
7347 function. Target will be finalized in md_global_finish (). */
7348 sched_deps_finish ();
7349 sched_finish_luids ();
7350 current_sched_info = NULL;
7351 sched_finish ();
7352 }
7353
7354 /* Free global data used during insn scheduling. This function works with
7355 the common data shared between the schedulers. */
7356
7357 void
7358 sched_finish (void)
7359 {
7360 haifa_finish_h_i_d ();
7361 free_global_sched_pressure_data ();
7362 free (curr_state);
7363
7364 if (targetm.sched.finish_global)
7365 targetm.sched.finish_global (sched_dump, sched_verbose);
7366
7367 end_alias_analysis ();
7368
7369 regstat_free_calls_crossed ();
7370
7371 dfa_finish ();
7372 }
7373
7374 /* Free all delay_pair structures that were recorded. */
7375 void
7376 free_delay_pairs (void)
7377 {
7378 if (delay_htab)
7379 {
7380 delay_htab->empty ();
7381 delay_htab_i2->empty ();
7382 }
7383 }
7384
7385 /* Fix INSN_TICKs of the instructions in the current block as well as
7386 INSN_TICKs of their dependents.
7387 HEAD and TAIL are the begin and the end of the current scheduled block. */
7388 static void
7389 fix_inter_tick (rtx_insn *head, rtx_insn *tail)
7390 {
7391 /* Set of instructions with corrected INSN_TICK. */
7392 bitmap_head processed;
7393 /* ??? It is doubtful if we should assume that cycle advance happens on
7394 basic block boundaries. Basically insns that are unconditionally ready
7395 on the start of the block are more preferable then those which have
7396 a one cycle dependency over insn from the previous block. */
7397 int next_clock = clock_var + 1;
7398
7399 bitmap_initialize (&processed, 0);
7400
7401 /* Iterates over scheduled instructions and fix their INSN_TICKs and
7402 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
7403 across different blocks. */
7404 for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
7405 {
7406 if (INSN_P (head))
7407 {
7408 int tick;
7409 sd_iterator_def sd_it;
7410 dep_t dep;
7411
7412 tick = INSN_TICK (head);
7413 gcc_assert (tick >= MIN_TICK);
7414
7415 /* Fix INSN_TICK of instruction from just scheduled block. */
7416 if (bitmap_set_bit (&processed, INSN_LUID (head)))
7417 {
7418 tick -= next_clock;
7419
7420 if (tick < MIN_TICK)
7421 tick = MIN_TICK;
7422
7423 INSN_TICK (head) = tick;
7424 }
7425
7426 if (DEBUG_INSN_P (head))
7427 continue;
7428
7429 FOR_EACH_DEP (head, SD_LIST_RES_FORW, sd_it, dep)
7430 {
7431 rtx_insn *next;
7432
7433 next = DEP_CON (dep);
7434 tick = INSN_TICK (next);
7435
7436 if (tick != INVALID_TICK
7437 /* If NEXT has its INSN_TICK calculated, fix it.
7438 If not - it will be properly calculated from
7439 scratch later in fix_tick_ready. */
7440 && bitmap_set_bit (&processed, INSN_LUID (next)))
7441 {
7442 tick -= next_clock;
7443
7444 if (tick < MIN_TICK)
7445 tick = MIN_TICK;
7446
7447 if (tick > INTER_TICK (next))
7448 INTER_TICK (next) = tick;
7449 else
7450 tick = INTER_TICK (next);
7451
7452 INSN_TICK (next) = tick;
7453 }
7454 }
7455 }
7456 }
7457 bitmap_clear (&processed);
7458 }
7459
7460 /* Check if NEXT is ready to be added to the ready or queue list.
7461 If "yes", add it to the proper list.
7462 Returns:
7463 -1 - is not ready yet,
7464 0 - added to the ready list,
7465 0 < N - queued for N cycles. */
7466 int
7467 try_ready (rtx_insn *next)
7468 {
7469 ds_t old_ts, new_ts;
7470
7471 old_ts = TODO_SPEC (next);
7472
7473 gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP | DEP_CONTROL | DEP_POSTPONED))
7474 && (old_ts == HARD_DEP
7475 || old_ts == DEP_POSTPONED
7476 || (old_ts & SPECULATIVE)
7477 || old_ts == DEP_CONTROL));
7478
7479 new_ts = recompute_todo_spec (next, false);
7480
7481 if (new_ts & (HARD_DEP | DEP_POSTPONED))
7482 gcc_assert (new_ts == old_ts
7483 && QUEUE_INDEX (next) == QUEUE_NOWHERE);
7484 else if (current_sched_info->new_ready)
7485 new_ts = current_sched_info->new_ready (next, new_ts);
7486
7487 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
7488 have its original pattern or changed (speculative) one. This is due
7489 to changing ebb in region scheduling.
7490 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
7491 has speculative pattern.
7492
7493 We can't assert (!(new_ts & HARD_DEP) || new_ts == old_ts) here because
7494 control-speculative NEXT could have been discarded by sched-rgn.c
7495 (the same case as when discarded by can_schedule_ready_p ()). */
7496
7497 if ((new_ts & SPECULATIVE)
7498 /* If (old_ts == new_ts), then (old_ts & SPECULATIVE) and we don't
7499 need to change anything. */
7500 && new_ts != old_ts)
7501 {
7502 int res;
7503 rtx new_pat;
7504
7505 gcc_assert ((new_ts & SPECULATIVE) && !(new_ts & ~SPECULATIVE));
7506
7507 res = haifa_speculate_insn (next, new_ts, &new_pat);
7508
7509 switch (res)
7510 {
7511 case -1:
7512 /* It would be nice to change DEP_STATUS of all dependences,
7513 which have ((DEP_STATUS & SPECULATIVE) == new_ts) to HARD_DEP,
7514 so we won't reanalyze anything. */
7515 new_ts = HARD_DEP;
7516 break;
7517
7518 case 0:
7519 /* We follow the rule, that every speculative insn
7520 has non-null ORIG_PAT. */
7521 if (!ORIG_PAT (next))
7522 ORIG_PAT (next) = PATTERN (next);
7523 break;
7524
7525 case 1:
7526 if (!ORIG_PAT (next))
7527 /* If we gonna to overwrite the original pattern of insn,
7528 save it. */
7529 ORIG_PAT (next) = PATTERN (next);
7530
7531 res = haifa_change_pattern (next, new_pat);
7532 gcc_assert (res);
7533 break;
7534
7535 default:
7536 gcc_unreachable ();
7537 }
7538 }
7539
7540 /* We need to restore pattern only if (new_ts == 0), because otherwise it is
7541 either correct (new_ts & SPECULATIVE),
7542 or we simply don't care (new_ts & HARD_DEP). */
7543
7544 gcc_assert (!ORIG_PAT (next)
7545 || !IS_SPECULATION_BRANCHY_CHECK_P (next));
7546
7547 TODO_SPEC (next) = new_ts;
7548
7549 if (new_ts & (HARD_DEP | DEP_POSTPONED))
7550 {
7551 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
7552 control-speculative NEXT could have been discarded by sched-rgn.c
7553 (the same case as when discarded by can_schedule_ready_p ()). */
7554 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
7555
7556 change_queue_index (next, QUEUE_NOWHERE);
7557
7558 return -1;
7559 }
7560 else if (!(new_ts & BEGIN_SPEC)
7561 && ORIG_PAT (next) && PREDICATED_PAT (next) == NULL_RTX
7562 && !IS_SPECULATION_CHECK_P (next))
7563 /* We should change pattern of every previously speculative
7564 instruction - and we determine if NEXT was speculative by using
7565 ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
7566 pat too, so skip them. */
7567 {
7568 bool success = haifa_change_pattern (next, ORIG_PAT (next));
7569 gcc_assert (success);
7570 ORIG_PAT (next) = 0;
7571 }
7572
7573 if (sched_verbose >= 2)
7574 {
7575 fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
7576 (*current_sched_info->print_insn) (next, 0));
7577
7578 if (spec_info && spec_info->dump)
7579 {
7580 if (new_ts & BEGIN_DATA)
7581 fprintf (spec_info->dump, "; data-spec;");
7582 if (new_ts & BEGIN_CONTROL)
7583 fprintf (spec_info->dump, "; control-spec;");
7584 if (new_ts & BE_IN_CONTROL)
7585 fprintf (spec_info->dump, "; in-control-spec;");
7586 }
7587 if (TODO_SPEC (next) & DEP_CONTROL)
7588 fprintf (sched_dump, " predicated");
7589 fprintf (sched_dump, "\n");
7590 }
7591
7592 adjust_priority (next);
7593
7594 return fix_tick_ready (next);
7595 }
7596
7597 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
7598 static int
7599 fix_tick_ready (rtx_insn *next)
7600 {
7601 int tick, delay;
7602
7603 if (!DEBUG_INSN_P (next) && !sd_lists_empty_p (next, SD_LIST_RES_BACK))
7604 {
7605 int full_p;
7606 sd_iterator_def sd_it;
7607 dep_t dep;
7608
7609 tick = INSN_TICK (next);
7610 /* if tick is not equal to INVALID_TICK, then update
7611 INSN_TICK of NEXT with the most recent resolved dependence
7612 cost. Otherwise, recalculate from scratch. */
7613 full_p = (tick == INVALID_TICK);
7614
7615 FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep)
7616 {
7617 rtx_insn *pro = DEP_PRO (dep);
7618 int tick1;
7619
7620 gcc_assert (INSN_TICK (pro) >= MIN_TICK);
7621
7622 tick1 = INSN_TICK (pro) + dep_cost (dep);
7623 if (tick1 > tick)
7624 tick = tick1;
7625
7626 if (!full_p)
7627 break;
7628 }
7629 }
7630 else
7631 tick = -1;
7632
7633 INSN_TICK (next) = tick;
7634
7635 delay = tick - clock_var;
7636 if (delay <= 0 || sched_pressure != SCHED_PRESSURE_NONE || sched_fusion)
7637 delay = QUEUE_READY;
7638
7639 change_queue_index (next, delay);
7640
7641 return delay;
7642 }
7643
7644 /* Move NEXT to the proper queue list with (DELAY >= 1),
7645 or add it to the ready list (DELAY == QUEUE_READY),
7646 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
7647 static void
7648 change_queue_index (rtx_insn *next, int delay)
7649 {
7650 int i = QUEUE_INDEX (next);
7651
7652 gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
7653 && delay != 0);
7654 gcc_assert (i != QUEUE_SCHEDULED);
7655
7656 if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
7657 || (delay < 0 && delay == i))
7658 /* We have nothing to do. */
7659 return;
7660
7661 /* Remove NEXT from wherever it is now. */
7662 if (i == QUEUE_READY)
7663 ready_remove_insn (next);
7664 else if (i >= 0)
7665 queue_remove (next);
7666
7667 /* Add it to the proper place. */
7668 if (delay == QUEUE_READY)
7669 ready_add (readyp, next, false);
7670 else if (delay >= 1)
7671 queue_insn (next, delay, "change queue index");
7672
7673 if (sched_verbose >= 2)
7674 {
7675 fprintf (sched_dump, ";;\t\ttick updated: insn %s",
7676 (*current_sched_info->print_insn) (next, 0));
7677
7678 if (delay == QUEUE_READY)
7679 fprintf (sched_dump, " into ready\n");
7680 else if (delay >= 1)
7681 fprintf (sched_dump, " into queue with cost=%d\n", delay);
7682 else
7683 fprintf (sched_dump, " removed from ready or queue lists\n");
7684 }
7685 }
7686
7687 static int sched_ready_n_insns = -1;
7688
7689 /* Initialize per region data structures. */
7690 void
7691 sched_extend_ready_list (int new_sched_ready_n_insns)
7692 {
7693 int i;
7694
7695 if (sched_ready_n_insns == -1)
7696 /* At the first call we need to initialize one more choice_stack
7697 entry. */
7698 {
7699 i = 0;
7700 sched_ready_n_insns = 0;
7701 scheduled_insns.reserve (new_sched_ready_n_insns);
7702 }
7703 else
7704 i = sched_ready_n_insns + 1;
7705
7706 ready.veclen = new_sched_ready_n_insns + issue_rate;
7707 ready.vec = XRESIZEVEC (rtx_insn *, ready.vec, ready.veclen);
7708
7709 gcc_assert (new_sched_ready_n_insns >= sched_ready_n_insns);
7710
7711 ready_try = (signed char *) xrecalloc (ready_try, new_sched_ready_n_insns,
7712 sched_ready_n_insns,
7713 sizeof (*ready_try));
7714
7715 /* We allocate +1 element to save initial state in the choice_stack[0]
7716 entry. */
7717 choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
7718 new_sched_ready_n_insns + 1);
7719
7720 for (; i <= new_sched_ready_n_insns; i++)
7721 {
7722 choice_stack[i].state = xmalloc (dfa_state_size);
7723
7724 if (targetm.sched.first_cycle_multipass_init)
7725 targetm.sched.first_cycle_multipass_init (&(choice_stack[i]
7726 .target_data));
7727 }
7728
7729 sched_ready_n_insns = new_sched_ready_n_insns;
7730 }
7731
7732 /* Free per region data structures. */
7733 void
7734 sched_finish_ready_list (void)
7735 {
7736 int i;
7737
7738 free (ready.vec);
7739 ready.vec = NULL;
7740 ready.veclen = 0;
7741
7742 free (ready_try);
7743 ready_try = NULL;
7744
7745 for (i = 0; i <= sched_ready_n_insns; i++)
7746 {
7747 if (targetm.sched.first_cycle_multipass_fini)
7748 targetm.sched.first_cycle_multipass_fini (&(choice_stack[i]
7749 .target_data));
7750
7751 free (choice_stack [i].state);
7752 }
7753 free (choice_stack);
7754 choice_stack = NULL;
7755
7756 sched_ready_n_insns = -1;
7757 }
7758
7759 static int
7760 haifa_luid_for_non_insn (rtx x)
7761 {
7762 gcc_assert (NOTE_P (x) || LABEL_P (x));
7763
7764 return 0;
7765 }
7766
7767 /* Generates recovery code for INSN. */
7768 static void
7769 generate_recovery_code (rtx_insn *insn)
7770 {
7771 if (TODO_SPEC (insn) & BEGIN_SPEC)
7772 begin_speculative_block (insn);
7773
7774 /* Here we have insn with no dependencies to
7775 instructions other then CHECK_SPEC ones. */
7776
7777 if (TODO_SPEC (insn) & BE_IN_SPEC)
7778 add_to_speculative_block (insn);
7779 }
7780
7781 /* Helper function.
7782 Tries to add speculative dependencies of type FS between instructions
7783 in deps_list L and TWIN. */
7784 static void
7785 process_insn_forw_deps_be_in_spec (rtx_insn *insn, rtx_insn *twin, ds_t fs)
7786 {
7787 sd_iterator_def sd_it;
7788 dep_t dep;
7789
7790 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
7791 {
7792 ds_t ds;
7793 rtx_insn *consumer;
7794
7795 consumer = DEP_CON (dep);
7796
7797 ds = DEP_STATUS (dep);
7798
7799 if (/* If we want to create speculative dep. */
7800 fs
7801 /* And we can do that because this is a true dep. */
7802 && (ds & DEP_TYPES) == DEP_TRUE)
7803 {
7804 gcc_assert (!(ds & BE_IN_SPEC));
7805
7806 if (/* If this dep can be overcome with 'begin speculation'. */
7807 ds & BEGIN_SPEC)
7808 /* Then we have a choice: keep the dep 'begin speculative'
7809 or transform it into 'be in speculative'. */
7810 {
7811 if (/* In try_ready we assert that if insn once became ready
7812 it can be removed from the ready (or queue) list only
7813 due to backend decision. Hence we can't let the
7814 probability of the speculative dep to decrease. */
7815 ds_weak (ds) <= ds_weak (fs))
7816 {
7817 ds_t new_ds;
7818
7819 new_ds = (ds & ~BEGIN_SPEC) | fs;
7820
7821 if (/* consumer can 'be in speculative'. */
7822 sched_insn_is_legitimate_for_speculation_p (consumer,
7823 new_ds))
7824 /* Transform it to be in speculative. */
7825 ds = new_ds;
7826 }
7827 }
7828 else
7829 /* Mark the dep as 'be in speculative'. */
7830 ds |= fs;
7831 }
7832
7833 {
7834 dep_def _new_dep, *new_dep = &_new_dep;
7835
7836 init_dep_1 (new_dep, twin, consumer, DEP_TYPE (dep), ds);
7837 sd_add_dep (new_dep, false);
7838 }
7839 }
7840 }
7841
7842 /* Generates recovery code for BEGIN speculative INSN. */
7843 static void
7844 begin_speculative_block (rtx_insn *insn)
7845 {
7846 if (TODO_SPEC (insn) & BEGIN_DATA)
7847 nr_begin_data++;
7848 if (TODO_SPEC (insn) & BEGIN_CONTROL)
7849 nr_begin_control++;
7850
7851 create_check_block_twin (insn, false);
7852
7853 TODO_SPEC (insn) &= ~BEGIN_SPEC;
7854 }
7855
7856 static void haifa_init_insn (rtx_insn *);
7857
7858 /* Generates recovery code for BE_IN speculative INSN. */
7859 static void
7860 add_to_speculative_block (rtx_insn *insn)
7861 {
7862 ds_t ts;
7863 sd_iterator_def sd_it;
7864 dep_t dep;
7865 rtx_insn_list *twins = NULL;
7866 rtx_vec_t priorities_roots;
7867
7868 ts = TODO_SPEC (insn);
7869 gcc_assert (!(ts & ~BE_IN_SPEC));
7870
7871 if (ts & BE_IN_DATA)
7872 nr_be_in_data++;
7873 if (ts & BE_IN_CONTROL)
7874 nr_be_in_control++;
7875
7876 TODO_SPEC (insn) &= ~BE_IN_SPEC;
7877 gcc_assert (!TODO_SPEC (insn));
7878
7879 DONE_SPEC (insn) |= ts;
7880
7881 /* First we convert all simple checks to branchy. */
7882 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7883 sd_iterator_cond (&sd_it, &dep);)
7884 {
7885 rtx_insn *check = DEP_PRO (dep);
7886
7887 if (IS_SPECULATION_SIMPLE_CHECK_P (check))
7888 {
7889 create_check_block_twin (check, true);
7890
7891 /* Restart search. */
7892 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7893 }
7894 else
7895 /* Continue search. */
7896 sd_iterator_next (&sd_it);
7897 }
7898
7899 priorities_roots.create (0);
7900 clear_priorities (insn, &priorities_roots);
7901
7902 while (1)
7903 {
7904 rtx_insn *check, *twin;
7905 basic_block rec;
7906
7907 /* Get the first backward dependency of INSN. */
7908 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7909 if (!sd_iterator_cond (&sd_it, &dep))
7910 /* INSN has no backward dependencies left. */
7911 break;
7912
7913 gcc_assert ((DEP_STATUS (dep) & BEGIN_SPEC) == 0
7914 && (DEP_STATUS (dep) & BE_IN_SPEC) != 0
7915 && (DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
7916
7917 check = DEP_PRO (dep);
7918
7919 gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check)
7920 && QUEUE_INDEX (check) == QUEUE_NOWHERE);
7921
7922 rec = BLOCK_FOR_INSN (check);
7923
7924 twin = emit_insn_before (copy_insn (PATTERN (insn)), BB_END (rec));
7925 haifa_init_insn (twin);
7926
7927 sd_copy_back_deps (twin, insn, true);
7928
7929 if (sched_verbose && spec_info->dump)
7930 /* INSN_BB (insn) isn't determined for twin insns yet.
7931 So we can't use current_sched_info->print_insn. */
7932 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
7933 INSN_UID (twin), rec->index);
7934
7935 twins = alloc_INSN_LIST (twin, twins);
7936
7937 /* Add dependences between TWIN and all appropriate
7938 instructions from REC. */
7939 FOR_EACH_DEP (insn, SD_LIST_SPEC_BACK, sd_it, dep)
7940 {
7941 rtx_insn *pro = DEP_PRO (dep);
7942
7943 gcc_assert (DEP_TYPE (dep) == REG_DEP_TRUE);
7944
7945 /* INSN might have dependencies from the instructions from
7946 several recovery blocks. At this iteration we process those
7947 producers that reside in REC. */
7948 if (BLOCK_FOR_INSN (pro) == rec)
7949 {
7950 dep_def _new_dep, *new_dep = &_new_dep;
7951
7952 init_dep (new_dep, pro, twin, REG_DEP_TRUE);
7953 sd_add_dep (new_dep, false);
7954 }
7955 }
7956
7957 process_insn_forw_deps_be_in_spec (insn, twin, ts);
7958
7959 /* Remove all dependencies between INSN and insns in REC. */
7960 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7961 sd_iterator_cond (&sd_it, &dep);)
7962 {
7963 rtx_insn *pro = DEP_PRO (dep);
7964
7965 if (BLOCK_FOR_INSN (pro) == rec)
7966 sd_delete_dep (sd_it);
7967 else
7968 sd_iterator_next (&sd_it);
7969 }
7970 }
7971
7972 /* We couldn't have added the dependencies between INSN and TWINS earlier
7973 because that would make TWINS appear in the INSN_BACK_DEPS (INSN). */
7974 while (twins)
7975 {
7976 rtx_insn *twin;
7977 rtx_insn_list *next_node;
7978
7979 twin = twins->insn ();
7980
7981 {
7982 dep_def _new_dep, *new_dep = &_new_dep;
7983
7984 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
7985 sd_add_dep (new_dep, false);
7986 }
7987
7988 next_node = twins->next ();
7989 free_INSN_LIST_node (twins);
7990 twins = next_node;
7991 }
7992
7993 calc_priorities (priorities_roots);
7994 priorities_roots.release ();
7995 }
7996
7997 /* Extends and fills with zeros (only the new part) array pointed to by P. */
7998 void *
7999 xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
8000 {
8001 gcc_assert (new_nmemb >= old_nmemb);
8002 p = XRESIZEVAR (void, p, new_nmemb * size);
8003 memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
8004 return p;
8005 }
8006
8007 /* Helper function.
8008 Find fallthru edge from PRED. */
8009 edge
8010 find_fallthru_edge_from (basic_block pred)
8011 {
8012 edge e;
8013 basic_block succ;
8014
8015 succ = pred->next_bb;
8016 gcc_assert (succ->prev_bb == pred);
8017
8018 if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
8019 {
8020 e = find_fallthru_edge (pred->succs);
8021
8022 if (e)
8023 {
8024 gcc_assert (e->dest == succ);
8025 return e;
8026 }
8027 }
8028 else
8029 {
8030 e = find_fallthru_edge (succ->preds);
8031
8032 if (e)
8033 {
8034 gcc_assert (e->src == pred);
8035 return e;
8036 }
8037 }
8038
8039 return NULL;
8040 }
8041
8042 /* Extend per basic block data structures. */
8043 static void
8044 sched_extend_bb (void)
8045 {
8046 /* The following is done to keep current_sched_info->next_tail non null. */
8047 rtx_insn *end = BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
8048 rtx_insn *insn = DEBUG_INSN_P (end) ? prev_nondebug_insn (end) : end;
8049 if (NEXT_INSN (end) == 0
8050 || (!NOTE_P (insn)
8051 && !LABEL_P (insn)
8052 /* Don't emit a NOTE if it would end up before a BARRIER. */
8053 && !BARRIER_P (NEXT_INSN (end))))
8054 {
8055 rtx_note *note = emit_note_after (NOTE_INSN_DELETED, end);
8056 /* Make note appear outside BB. */
8057 set_block_for_insn (note, NULL);
8058 BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb) = end;
8059 }
8060 }
8061
8062 /* Init per basic block data structures. */
8063 void
8064 sched_init_bbs (void)
8065 {
8066 sched_extend_bb ();
8067 }
8068
8069 /* Initialize BEFORE_RECOVERY variable. */
8070 static void
8071 init_before_recovery (basic_block *before_recovery_ptr)
8072 {
8073 basic_block last;
8074 edge e;
8075
8076 last = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
8077 e = find_fallthru_edge_from (last);
8078
8079 if (e)
8080 {
8081 /* We create two basic blocks:
8082 1. Single instruction block is inserted right after E->SRC
8083 and has jump to
8084 2. Empty block right before EXIT_BLOCK.
8085 Between these two blocks recovery blocks will be emitted. */
8086
8087 basic_block single, empty;
8088
8089 /* If the fallthrough edge to exit we've found is from the block we've
8090 created before, don't do anything more. */
8091 if (last == after_recovery)
8092 return;
8093
8094 adding_bb_to_current_region_p = false;
8095
8096 single = sched_create_empty_bb (last);
8097 empty = sched_create_empty_bb (single);
8098
8099 /* Add new blocks to the root loop. */
8100 if (current_loops != NULL)
8101 {
8102 add_bb_to_loop (single, (*current_loops->larray)[0]);
8103 add_bb_to_loop (empty, (*current_loops->larray)[0]);
8104 }
8105
8106 single->count = last->count;
8107 empty->count = last->count;
8108 single->frequency = last->frequency;
8109 empty->frequency = last->frequency;
8110 BB_COPY_PARTITION (single, last);
8111 BB_COPY_PARTITION (empty, last);
8112
8113 redirect_edge_succ (e, single);
8114 make_single_succ_edge (single, empty, 0);
8115 make_single_succ_edge (empty, EXIT_BLOCK_PTR_FOR_FN (cfun),
8116 EDGE_FALLTHRU);
8117
8118 rtx_code_label *label = block_label (empty);
8119 rtx_jump_insn *x = emit_jump_insn_after (targetm.gen_jump (label),
8120 BB_END (single));
8121 JUMP_LABEL (x) = label;
8122 LABEL_NUSES (label)++;
8123 haifa_init_insn (x);
8124
8125 emit_barrier_after (x);
8126
8127 sched_init_only_bb (empty, NULL);
8128 sched_init_only_bb (single, NULL);
8129 sched_extend_bb ();
8130
8131 adding_bb_to_current_region_p = true;
8132 before_recovery = single;
8133 after_recovery = empty;
8134
8135 if (before_recovery_ptr)
8136 *before_recovery_ptr = before_recovery;
8137
8138 if (sched_verbose >= 2 && spec_info->dump)
8139 fprintf (spec_info->dump,
8140 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
8141 last->index, single->index, empty->index);
8142 }
8143 else
8144 before_recovery = last;
8145 }
8146
8147 /* Returns new recovery block. */
8148 basic_block
8149 sched_create_recovery_block (basic_block *before_recovery_ptr)
8150 {
8151 rtx_insn *barrier;
8152 basic_block rec;
8153
8154 haifa_recovery_bb_recently_added_p = true;
8155 haifa_recovery_bb_ever_added_p = true;
8156
8157 init_before_recovery (before_recovery_ptr);
8158
8159 barrier = get_last_bb_insn (before_recovery);
8160 gcc_assert (BARRIER_P (barrier));
8161
8162 rtx_insn *label = emit_label_after (gen_label_rtx (), barrier);
8163
8164 rec = create_basic_block (label, label, before_recovery);
8165
8166 /* A recovery block always ends with an unconditional jump. */
8167 emit_barrier_after (BB_END (rec));
8168
8169 if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
8170 BB_SET_PARTITION (rec, BB_COLD_PARTITION);
8171
8172 if (sched_verbose && spec_info->dump)
8173 fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
8174 rec->index);
8175
8176 return rec;
8177 }
8178
8179 /* Create edges: FIRST_BB -> REC; FIRST_BB -> SECOND_BB; REC -> SECOND_BB
8180 and emit necessary jumps. */
8181 void
8182 sched_create_recovery_edges (basic_block first_bb, basic_block rec,
8183 basic_block second_bb)
8184 {
8185 int edge_flags;
8186
8187 /* This is fixing of incoming edge. */
8188 /* ??? Which other flags should be specified? */
8189 if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
8190 /* Partition type is the same, if it is "unpartitioned". */
8191 edge_flags = EDGE_CROSSING;
8192 else
8193 edge_flags = 0;
8194
8195 make_edge (first_bb, rec, edge_flags);
8196 rtx_code_label *label = block_label (second_bb);
8197 rtx_jump_insn *jump = emit_jump_insn_after (targetm.gen_jump (label),
8198 BB_END (rec));
8199 JUMP_LABEL (jump) = label;
8200 LABEL_NUSES (label)++;
8201
8202 if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
8203 /* Partition type is the same, if it is "unpartitioned". */
8204 {
8205 /* Rewritten from cfgrtl.c. */
8206 if (flag_reorder_blocks_and_partition
8207 && targetm_common.have_named_sections)
8208 {
8209 /* We don't need the same note for the check because
8210 any_condjump_p (check) == true. */
8211 CROSSING_JUMP_P (jump) = 1;
8212 }
8213 edge_flags = EDGE_CROSSING;
8214 }
8215 else
8216 edge_flags = 0;
8217
8218 make_single_succ_edge (rec, second_bb, edge_flags);
8219 if (dom_info_available_p (CDI_DOMINATORS))
8220 set_immediate_dominator (CDI_DOMINATORS, rec, first_bb);
8221 }
8222
8223 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
8224 INSN is a simple check, that should be converted to branchy one. */
8225 static void
8226 create_check_block_twin (rtx_insn *insn, bool mutate_p)
8227 {
8228 basic_block rec;
8229 rtx_insn *label, *check, *twin;
8230 rtx check_pat;
8231 ds_t fs;
8232 sd_iterator_def sd_it;
8233 dep_t dep;
8234 dep_def _new_dep, *new_dep = &_new_dep;
8235 ds_t todo_spec;
8236
8237 gcc_assert (ORIG_PAT (insn) != NULL_RTX);
8238
8239 if (!mutate_p)
8240 todo_spec = TODO_SPEC (insn);
8241 else
8242 {
8243 gcc_assert (IS_SPECULATION_SIMPLE_CHECK_P (insn)
8244 && (TODO_SPEC (insn) & SPECULATIVE) == 0);
8245
8246 todo_spec = CHECK_SPEC (insn);
8247 }
8248
8249 todo_spec &= SPECULATIVE;
8250
8251 /* Create recovery block. */
8252 if (mutate_p || targetm.sched.needs_block_p (todo_spec))
8253 {
8254 rec = sched_create_recovery_block (NULL);
8255 label = BB_HEAD (rec);
8256 }
8257 else
8258 {
8259 rec = EXIT_BLOCK_PTR_FOR_FN (cfun);
8260 label = NULL;
8261 }
8262
8263 /* Emit CHECK. */
8264 check_pat = targetm.sched.gen_spec_check (insn, label, todo_spec);
8265
8266 if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8267 {
8268 /* To have mem_reg alive at the beginning of second_bb,
8269 we emit check BEFORE insn, so insn after splitting
8270 insn will be at the beginning of second_bb, which will
8271 provide us with the correct life information. */
8272 check = emit_jump_insn_before (check_pat, insn);
8273 JUMP_LABEL (check) = label;
8274 LABEL_NUSES (label)++;
8275 }
8276 else
8277 check = emit_insn_before (check_pat, insn);
8278
8279 /* Extend data structures. */
8280 haifa_init_insn (check);
8281
8282 /* CHECK is being added to current region. Extend ready list. */
8283 gcc_assert (sched_ready_n_insns != -1);
8284 sched_extend_ready_list (sched_ready_n_insns + 1);
8285
8286 if (current_sched_info->add_remove_insn)
8287 current_sched_info->add_remove_insn (insn, 0);
8288
8289 RECOVERY_BLOCK (check) = rec;
8290
8291 if (sched_verbose && spec_info->dump)
8292 fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
8293 (*current_sched_info->print_insn) (check, 0));
8294
8295 gcc_assert (ORIG_PAT (insn));
8296
8297 /* Initialize TWIN (twin is a duplicate of original instruction
8298 in the recovery block). */
8299 if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8300 {
8301 sd_iterator_def sd_it;
8302 dep_t dep;
8303
8304 FOR_EACH_DEP (insn, SD_LIST_RES_BACK, sd_it, dep)
8305 if ((DEP_STATUS (dep) & DEP_OUTPUT) != 0)
8306 {
8307 struct _dep _dep2, *dep2 = &_dep2;
8308
8309 init_dep (dep2, DEP_PRO (dep), check, REG_DEP_TRUE);
8310
8311 sd_add_dep (dep2, true);
8312 }
8313
8314 twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
8315 haifa_init_insn (twin);
8316
8317 if (sched_verbose && spec_info->dump)
8318 /* INSN_BB (insn) isn't determined for twin insns yet.
8319 So we can't use current_sched_info->print_insn. */
8320 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
8321 INSN_UID (twin), rec->index);
8322 }
8323 else
8324 {
8325 ORIG_PAT (check) = ORIG_PAT (insn);
8326 HAS_INTERNAL_DEP (check) = 1;
8327 twin = check;
8328 /* ??? We probably should change all OUTPUT dependencies to
8329 (TRUE | OUTPUT). */
8330 }
8331
8332 /* Copy all resolved back dependencies of INSN to TWIN. This will
8333 provide correct value for INSN_TICK (TWIN). */
8334 sd_copy_back_deps (twin, insn, true);
8335
8336 if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8337 /* In case of branchy check, fix CFG. */
8338 {
8339 basic_block first_bb, second_bb;
8340 rtx_insn *jump;
8341
8342 first_bb = BLOCK_FOR_INSN (check);
8343 second_bb = sched_split_block (first_bb, check);
8344
8345 sched_create_recovery_edges (first_bb, rec, second_bb);
8346
8347 sched_init_only_bb (second_bb, first_bb);
8348 sched_init_only_bb (rec, EXIT_BLOCK_PTR_FOR_FN (cfun));
8349
8350 jump = BB_END (rec);
8351 haifa_init_insn (jump);
8352 }
8353
8354 /* Move backward dependences from INSN to CHECK and
8355 move forward dependences from INSN to TWIN. */
8356
8357 /* First, create dependencies between INSN's producers and CHECK & TWIN. */
8358 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
8359 {
8360 rtx_insn *pro = DEP_PRO (dep);
8361 ds_t ds;
8362
8363 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
8364 check --TRUE--> producer ??? or ANTI ???
8365 twin --TRUE--> producer
8366 twin --ANTI--> check
8367
8368 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
8369 check --ANTI--> producer
8370 twin --ANTI--> producer
8371 twin --ANTI--> check
8372
8373 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
8374 check ~~TRUE~~> producer
8375 twin ~~TRUE~~> producer
8376 twin --ANTI--> check */
8377
8378 ds = DEP_STATUS (dep);
8379
8380 if (ds & BEGIN_SPEC)
8381 {
8382 gcc_assert (!mutate_p);
8383 ds &= ~BEGIN_SPEC;
8384 }
8385
8386 init_dep_1 (new_dep, pro, check, DEP_TYPE (dep), ds);
8387 sd_add_dep (new_dep, false);
8388
8389 if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8390 {
8391 DEP_CON (new_dep) = twin;
8392 sd_add_dep (new_dep, false);
8393 }
8394 }
8395
8396 /* Second, remove backward dependencies of INSN. */
8397 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
8398 sd_iterator_cond (&sd_it, &dep);)
8399 {
8400 if ((DEP_STATUS (dep) & BEGIN_SPEC)
8401 || mutate_p)
8402 /* We can delete this dep because we overcome it with
8403 BEGIN_SPECULATION. */
8404 sd_delete_dep (sd_it);
8405 else
8406 sd_iterator_next (&sd_it);
8407 }
8408
8409 /* Future Speculations. Determine what BE_IN speculations will be like. */
8410 fs = 0;
8411
8412 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
8413 here. */
8414
8415 gcc_assert (!DONE_SPEC (insn));
8416
8417 if (!mutate_p)
8418 {
8419 ds_t ts = TODO_SPEC (insn);
8420
8421 DONE_SPEC (insn) = ts & BEGIN_SPEC;
8422 CHECK_SPEC (check) = ts & BEGIN_SPEC;
8423
8424 /* Luckiness of future speculations solely depends upon initial
8425 BEGIN speculation. */
8426 if (ts & BEGIN_DATA)
8427 fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
8428 if (ts & BEGIN_CONTROL)
8429 fs = set_dep_weak (fs, BE_IN_CONTROL,
8430 get_dep_weak (ts, BEGIN_CONTROL));
8431 }
8432 else
8433 CHECK_SPEC (check) = CHECK_SPEC (insn);
8434
8435 /* Future speculations: call the helper. */
8436 process_insn_forw_deps_be_in_spec (insn, twin, fs);
8437
8438 if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8439 {
8440 /* Which types of dependencies should we use here is,
8441 generally, machine-dependent question... But, for now,
8442 it is not. */
8443
8444 if (!mutate_p)
8445 {
8446 init_dep (new_dep, insn, check, REG_DEP_TRUE);
8447 sd_add_dep (new_dep, false);
8448
8449 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
8450 sd_add_dep (new_dep, false);
8451 }
8452 else
8453 {
8454 if (spec_info->dump)
8455 fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
8456 (*current_sched_info->print_insn) (insn, 0));
8457
8458 /* Remove all dependencies of the INSN. */
8459 {
8460 sd_it = sd_iterator_start (insn, (SD_LIST_FORW
8461 | SD_LIST_BACK
8462 | SD_LIST_RES_BACK));
8463 while (sd_iterator_cond (&sd_it, &dep))
8464 sd_delete_dep (sd_it);
8465 }
8466
8467 /* If former check (INSN) already was moved to the ready (or queue)
8468 list, add new check (CHECK) there too. */
8469 if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
8470 try_ready (check);
8471
8472 /* Remove old check from instruction stream and free its
8473 data. */
8474 sched_remove_insn (insn);
8475 }
8476
8477 init_dep (new_dep, check, twin, REG_DEP_ANTI);
8478 sd_add_dep (new_dep, false);
8479 }
8480 else
8481 {
8482 init_dep_1 (new_dep, insn, check, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
8483 sd_add_dep (new_dep, false);
8484 }
8485
8486 if (!mutate_p)
8487 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
8488 because it'll be done later in add_to_speculative_block. */
8489 {
8490 rtx_vec_t priorities_roots = rtx_vec_t ();
8491
8492 clear_priorities (twin, &priorities_roots);
8493 calc_priorities (priorities_roots);
8494 priorities_roots.release ();
8495 }
8496 }
8497
8498 /* Removes dependency between instructions in the recovery block REC
8499 and usual region instructions. It keeps inner dependences so it
8500 won't be necessary to recompute them. */
8501 static void
8502 fix_recovery_deps (basic_block rec)
8503 {
8504 rtx_insn *note, *insn, *jump;
8505 rtx_insn_list *ready_list = 0;
8506 bitmap_head in_ready;
8507 rtx_insn_list *link;
8508
8509 bitmap_initialize (&in_ready, 0);
8510
8511 /* NOTE - a basic block note. */
8512 note = NEXT_INSN (BB_HEAD (rec));
8513 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
8514 insn = BB_END (rec);
8515 gcc_assert (JUMP_P (insn));
8516 insn = PREV_INSN (insn);
8517
8518 do
8519 {
8520 sd_iterator_def sd_it;
8521 dep_t dep;
8522
8523 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
8524 sd_iterator_cond (&sd_it, &dep);)
8525 {
8526 rtx_insn *consumer = DEP_CON (dep);
8527
8528 if (BLOCK_FOR_INSN (consumer) != rec)
8529 {
8530 sd_delete_dep (sd_it);
8531
8532 if (bitmap_set_bit (&in_ready, INSN_LUID (consumer)))
8533 ready_list = alloc_INSN_LIST (consumer, ready_list);
8534 }
8535 else
8536 {
8537 gcc_assert ((DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
8538
8539 sd_iterator_next (&sd_it);
8540 }
8541 }
8542
8543 insn = PREV_INSN (insn);
8544 }
8545 while (insn != note);
8546
8547 bitmap_clear (&in_ready);
8548
8549 /* Try to add instructions to the ready or queue list. */
8550 for (link = ready_list; link; link = link->next ())
8551 try_ready (link->insn ());
8552 free_INSN_LIST_list (&ready_list);
8553
8554 /* Fixing jump's dependences. */
8555 insn = BB_HEAD (rec);
8556 jump = BB_END (rec);
8557
8558 gcc_assert (LABEL_P (insn));
8559 insn = NEXT_INSN (insn);
8560
8561 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
8562 add_jump_dependencies (insn, jump);
8563 }
8564
8565 /* Change pattern of INSN to NEW_PAT. Invalidate cached haifa
8566 instruction data. */
8567 static bool
8568 haifa_change_pattern (rtx_insn *insn, rtx new_pat)
8569 {
8570 int t;
8571
8572 t = validate_change (insn, &PATTERN (insn), new_pat, 0);
8573 if (!t)
8574 return false;
8575
8576 update_insn_after_change (insn);
8577 return true;
8578 }
8579
8580 /* -1 - can't speculate,
8581 0 - for speculation with REQUEST mode it is OK to use
8582 current instruction pattern,
8583 1 - need to change pattern for *NEW_PAT to be speculative. */
8584 int
8585 sched_speculate_insn (rtx_insn *insn, ds_t request, rtx *new_pat)
8586 {
8587 gcc_assert (current_sched_info->flags & DO_SPECULATION
8588 && (request & SPECULATIVE)
8589 && sched_insn_is_legitimate_for_speculation_p (insn, request));
8590
8591 if ((request & spec_info->mask) != request)
8592 return -1;
8593
8594 if (request & BE_IN_SPEC
8595 && !(request & BEGIN_SPEC))
8596 return 0;
8597
8598 return targetm.sched.speculate_insn (insn, request, new_pat);
8599 }
8600
8601 static int
8602 haifa_speculate_insn (rtx_insn *insn, ds_t request, rtx *new_pat)
8603 {
8604 gcc_assert (sched_deps_info->generate_spec_deps
8605 && !IS_SPECULATION_CHECK_P (insn));
8606
8607 if (HAS_INTERNAL_DEP (insn)
8608 || SCHED_GROUP_P (insn))
8609 return -1;
8610
8611 return sched_speculate_insn (insn, request, new_pat);
8612 }
8613
8614 /* Print some information about block BB, which starts with HEAD and
8615 ends with TAIL, before scheduling it.
8616 I is zero, if scheduler is about to start with the fresh ebb. */
8617 static void
8618 dump_new_block_header (int i, basic_block bb, rtx_insn *head, rtx_insn *tail)
8619 {
8620 if (!i)
8621 fprintf (sched_dump,
8622 ";; ======================================================\n");
8623 else
8624 fprintf (sched_dump,
8625 ";; =====================ADVANCING TO=====================\n");
8626 fprintf (sched_dump,
8627 ";; -- basic block %d from %d to %d -- %s reload\n",
8628 bb->index, INSN_UID (head), INSN_UID (tail),
8629 (reload_completed ? "after" : "before"));
8630 fprintf (sched_dump,
8631 ";; ======================================================\n");
8632 fprintf (sched_dump, "\n");
8633 }
8634
8635 /* Unlink basic block notes and labels and saves them, so they
8636 can be easily restored. We unlink basic block notes in EBB to
8637 provide back-compatibility with the previous code, as target backends
8638 assume, that there'll be only instructions between
8639 current_sched_info->{head and tail}. We restore these notes as soon
8640 as we can.
8641 FIRST (LAST) is the first (last) basic block in the ebb.
8642 NB: In usual case (FIRST == LAST) nothing is really done. */
8643 void
8644 unlink_bb_notes (basic_block first, basic_block last)
8645 {
8646 /* We DON'T unlink basic block notes of the first block in the ebb. */
8647 if (first == last)
8648 return;
8649
8650 bb_header = XNEWVEC (rtx_insn *, last_basic_block_for_fn (cfun));
8651
8652 /* Make a sentinel. */
8653 if (last->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
8654 bb_header[last->next_bb->index] = 0;
8655
8656 first = first->next_bb;
8657 do
8658 {
8659 rtx_insn *prev, *label, *note, *next;
8660
8661 label = BB_HEAD (last);
8662 if (LABEL_P (label))
8663 note = NEXT_INSN (label);
8664 else
8665 note = label;
8666 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
8667
8668 prev = PREV_INSN (label);
8669 next = NEXT_INSN (note);
8670 gcc_assert (prev && next);
8671
8672 SET_NEXT_INSN (prev) = next;
8673 SET_PREV_INSN (next) = prev;
8674
8675 bb_header[last->index] = label;
8676
8677 if (last == first)
8678 break;
8679
8680 last = last->prev_bb;
8681 }
8682 while (1);
8683 }
8684
8685 /* Restore basic block notes.
8686 FIRST is the first basic block in the ebb. */
8687 static void
8688 restore_bb_notes (basic_block first)
8689 {
8690 if (!bb_header)
8691 return;
8692
8693 /* We DON'T unlink basic block notes of the first block in the ebb. */
8694 first = first->next_bb;
8695 /* Remember: FIRST is actually a second basic block in the ebb. */
8696
8697 while (first != EXIT_BLOCK_PTR_FOR_FN (cfun)
8698 && bb_header[first->index])
8699 {
8700 rtx_insn *prev, *label, *note, *next;
8701
8702 label = bb_header[first->index];
8703 prev = PREV_INSN (label);
8704 next = NEXT_INSN (prev);
8705
8706 if (LABEL_P (label))
8707 note = NEXT_INSN (label);
8708 else
8709 note = label;
8710 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
8711
8712 bb_header[first->index] = 0;
8713
8714 SET_NEXT_INSN (prev) = label;
8715 SET_NEXT_INSN (note) = next;
8716 SET_PREV_INSN (next) = note;
8717
8718 first = first->next_bb;
8719 }
8720
8721 free (bb_header);
8722 bb_header = 0;
8723 }
8724
8725 /* Helper function.
8726 Fix CFG after both in- and inter-block movement of
8727 control_flow_insn_p JUMP. */
8728 static void
8729 fix_jump_move (rtx_insn *jump)
8730 {
8731 basic_block bb, jump_bb, jump_bb_next;
8732
8733 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
8734 jump_bb = BLOCK_FOR_INSN (jump);
8735 jump_bb_next = jump_bb->next_bb;
8736
8737 gcc_assert (common_sched_info->sched_pass_id == SCHED_EBB_PASS
8738 || IS_SPECULATION_BRANCHY_CHECK_P (jump));
8739
8740 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
8741 /* if jump_bb_next is not empty. */
8742 BB_END (jump_bb) = BB_END (jump_bb_next);
8743
8744 if (BB_END (bb) != PREV_INSN (jump))
8745 /* Then there are instruction after jump that should be placed
8746 to jump_bb_next. */
8747 BB_END (jump_bb_next) = BB_END (bb);
8748 else
8749 /* Otherwise jump_bb_next is empty. */
8750 BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
8751
8752 /* To make assertion in move_insn happy. */
8753 BB_END (bb) = PREV_INSN (jump);
8754
8755 update_bb_for_insn (jump_bb_next);
8756 }
8757
8758 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
8759 static void
8760 move_block_after_check (rtx_insn *jump)
8761 {
8762 basic_block bb, jump_bb, jump_bb_next;
8763 vec<edge, va_gc> *t;
8764
8765 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
8766 jump_bb = BLOCK_FOR_INSN (jump);
8767 jump_bb_next = jump_bb->next_bb;
8768
8769 update_bb_for_insn (jump_bb);
8770
8771 gcc_assert (IS_SPECULATION_CHECK_P (jump)
8772 || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
8773
8774 unlink_block (jump_bb_next);
8775 link_block (jump_bb_next, bb);
8776
8777 t = bb->succs;
8778 bb->succs = 0;
8779 move_succs (&(jump_bb->succs), bb);
8780 move_succs (&(jump_bb_next->succs), jump_bb);
8781 move_succs (&t, jump_bb_next);
8782
8783 df_mark_solutions_dirty ();
8784
8785 common_sched_info->fix_recovery_cfg
8786 (bb->index, jump_bb->index, jump_bb_next->index);
8787 }
8788
8789 /* Helper function for move_block_after_check.
8790 This functions attaches edge vector pointed to by SUCCSP to
8791 block TO. */
8792 static void
8793 move_succs (vec<edge, va_gc> **succsp, basic_block to)
8794 {
8795 edge e;
8796 edge_iterator ei;
8797
8798 gcc_assert (to->succs == 0);
8799
8800 to->succs = *succsp;
8801
8802 FOR_EACH_EDGE (e, ei, to->succs)
8803 e->src = to;
8804
8805 *succsp = 0;
8806 }
8807
8808 /* Remove INSN from the instruction stream.
8809 INSN should have any dependencies. */
8810 static void
8811 sched_remove_insn (rtx_insn *insn)
8812 {
8813 sd_finish_insn (insn);
8814
8815 change_queue_index (insn, QUEUE_NOWHERE);
8816 current_sched_info->add_remove_insn (insn, 1);
8817 delete_insn (insn);
8818 }
8819
8820 /* Clear priorities of all instructions, that are forward dependent on INSN.
8821 Store in vector pointed to by ROOTS_PTR insns on which priority () should
8822 be invoked to initialize all cleared priorities. */
8823 static void
8824 clear_priorities (rtx_insn *insn, rtx_vec_t *roots_ptr)
8825 {
8826 sd_iterator_def sd_it;
8827 dep_t dep;
8828 bool insn_is_root_p = true;
8829
8830 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
8831
8832 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
8833 {
8834 rtx_insn *pro = DEP_PRO (dep);
8835
8836 if (INSN_PRIORITY_STATUS (pro) >= 0
8837 && QUEUE_INDEX (insn) != QUEUE_SCHEDULED)
8838 {
8839 /* If DEP doesn't contribute to priority then INSN itself should
8840 be added to priority roots. */
8841 if (contributes_to_priority_p (dep))
8842 insn_is_root_p = false;
8843
8844 INSN_PRIORITY_STATUS (pro) = -1;
8845 clear_priorities (pro, roots_ptr);
8846 }
8847 }
8848
8849 if (insn_is_root_p)
8850 roots_ptr->safe_push (insn);
8851 }
8852
8853 /* Recompute priorities of instructions, whose priorities might have been
8854 changed. ROOTS is a vector of instructions whose priority computation will
8855 trigger initialization of all cleared priorities. */
8856 static void
8857 calc_priorities (rtx_vec_t roots)
8858 {
8859 int i;
8860 rtx_insn *insn;
8861
8862 FOR_EACH_VEC_ELT (roots, i, insn)
8863 priority (insn);
8864 }
8865
8866
8867 /* Add dependences between JUMP and other instructions in the recovery
8868 block. INSN is the first insn the recovery block. */
8869 static void
8870 add_jump_dependencies (rtx_insn *insn, rtx_insn *jump)
8871 {
8872 do
8873 {
8874 insn = NEXT_INSN (insn);
8875 if (insn == jump)
8876 break;
8877
8878 if (dep_list_size (insn, SD_LIST_FORW) == 0)
8879 {
8880 dep_def _new_dep, *new_dep = &_new_dep;
8881
8882 init_dep (new_dep, insn, jump, REG_DEP_ANTI);
8883 sd_add_dep (new_dep, false);
8884 }
8885 }
8886 while (1);
8887
8888 gcc_assert (!sd_lists_empty_p (jump, SD_LIST_BACK));
8889 }
8890
8891 /* Extend data structures for logical insn UID. */
8892 void
8893 sched_extend_luids (void)
8894 {
8895 int new_luids_max_uid = get_max_uid () + 1;
8896
8897 sched_luids.safe_grow_cleared (new_luids_max_uid);
8898 }
8899
8900 /* Initialize LUID for INSN. */
8901 void
8902 sched_init_insn_luid (rtx_insn *insn)
8903 {
8904 int i = INSN_P (insn) ? 1 : common_sched_info->luid_for_non_insn (insn);
8905 int luid;
8906
8907 if (i >= 0)
8908 {
8909 luid = sched_max_luid;
8910 sched_max_luid += i;
8911 }
8912 else
8913 luid = -1;
8914
8915 SET_INSN_LUID (insn, luid);
8916 }
8917
8918 /* Initialize luids for BBS.
8919 The hook common_sched_info->luid_for_non_insn () is used to determine
8920 if notes, labels, etc. need luids. */
8921 void
8922 sched_init_luids (bb_vec_t bbs)
8923 {
8924 int i;
8925 basic_block bb;
8926
8927 sched_extend_luids ();
8928 FOR_EACH_VEC_ELT (bbs, i, bb)
8929 {
8930 rtx_insn *insn;
8931
8932 FOR_BB_INSNS (bb, insn)
8933 sched_init_insn_luid (insn);
8934 }
8935 }
8936
8937 /* Free LUIDs. */
8938 void
8939 sched_finish_luids (void)
8940 {
8941 sched_luids.release ();
8942 sched_max_luid = 1;
8943 }
8944
8945 /* Return logical uid of INSN. Helpful while debugging. */
8946 int
8947 insn_luid (rtx_insn *insn)
8948 {
8949 return INSN_LUID (insn);
8950 }
8951
8952 /* Extend per insn data in the target. */
8953 void
8954 sched_extend_target (void)
8955 {
8956 if (targetm.sched.h_i_d_extended)
8957 targetm.sched.h_i_d_extended ();
8958 }
8959
8960 /* Extend global scheduler structures (those, that live across calls to
8961 schedule_block) to include information about just emitted INSN. */
8962 static void
8963 extend_h_i_d (void)
8964 {
8965 int reserve = (get_max_uid () + 1 - h_i_d.length ());
8966 if (reserve > 0
8967 && ! h_i_d.space (reserve))
8968 {
8969 h_i_d.safe_grow_cleared (3 * get_max_uid () / 2);
8970 sched_extend_target ();
8971 }
8972 }
8973
8974 /* Initialize h_i_d entry of the INSN with default values.
8975 Values, that are not explicitly initialized here, hold zero. */
8976 static void
8977 init_h_i_d (rtx_insn *insn)
8978 {
8979 if (INSN_LUID (insn) > 0)
8980 {
8981 INSN_COST (insn) = -1;
8982 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
8983 INSN_TICK (insn) = INVALID_TICK;
8984 INSN_EXACT_TICK (insn) = INVALID_TICK;
8985 INTER_TICK (insn) = INVALID_TICK;
8986 TODO_SPEC (insn) = HARD_DEP;
8987 INSN_AUTOPREF_MULTIPASS_DATA (insn)[0].status
8988 = AUTOPREF_MULTIPASS_DATA_UNINITIALIZED;
8989 INSN_AUTOPREF_MULTIPASS_DATA (insn)[1].status
8990 = AUTOPREF_MULTIPASS_DATA_UNINITIALIZED;
8991 }
8992 }
8993
8994 /* Initialize haifa_insn_data for BBS. */
8995 void
8996 haifa_init_h_i_d (bb_vec_t bbs)
8997 {
8998 int i;
8999 basic_block bb;
9000
9001 extend_h_i_d ();
9002 FOR_EACH_VEC_ELT (bbs, i, bb)
9003 {
9004 rtx_insn *insn;
9005
9006 FOR_BB_INSNS (bb, insn)
9007 init_h_i_d (insn);
9008 }
9009 }
9010
9011 /* Finalize haifa_insn_data. */
9012 void
9013 haifa_finish_h_i_d (void)
9014 {
9015 int i;
9016 haifa_insn_data_t data;
9017 struct reg_use_data *use, *next;
9018
9019 FOR_EACH_VEC_ELT (h_i_d, i, data)
9020 {
9021 free (data->max_reg_pressure);
9022 free (data->reg_pressure);
9023 for (use = data->reg_use_list; use != NULL; use = next)
9024 {
9025 next = use->next_insn_use;
9026 free (use);
9027 }
9028 }
9029 h_i_d.release ();
9030 }
9031
9032 /* Init data for the new insn INSN. */
9033 static void
9034 haifa_init_insn (rtx_insn *insn)
9035 {
9036 gcc_assert (insn != NULL);
9037
9038 sched_extend_luids ();
9039 sched_init_insn_luid (insn);
9040 sched_extend_target ();
9041 sched_deps_init (false);
9042 extend_h_i_d ();
9043 init_h_i_d (insn);
9044
9045 if (adding_bb_to_current_region_p)
9046 {
9047 sd_init_insn (insn);
9048
9049 /* Extend dependency caches by one element. */
9050 extend_dependency_caches (1, false);
9051 }
9052 if (sched_pressure != SCHED_PRESSURE_NONE)
9053 init_insn_reg_pressure_info (insn);
9054 }
9055
9056 /* Init data for the new basic block BB which comes after AFTER. */
9057 static void
9058 haifa_init_only_bb (basic_block bb, basic_block after)
9059 {
9060 gcc_assert (bb != NULL);
9061
9062 sched_init_bbs ();
9063
9064 if (common_sched_info->add_block)
9065 /* This changes only data structures of the front-end. */
9066 common_sched_info->add_block (bb, after);
9067 }
9068
9069 /* A generic version of sched_split_block (). */
9070 basic_block
9071 sched_split_block_1 (basic_block first_bb, rtx after)
9072 {
9073 edge e;
9074
9075 e = split_block (first_bb, after);
9076 gcc_assert (e->src == first_bb);
9077
9078 /* sched_split_block emits note if *check == BB_END. Probably it
9079 is better to rip that note off. */
9080
9081 return e->dest;
9082 }
9083
9084 /* A generic version of sched_create_empty_bb (). */
9085 basic_block
9086 sched_create_empty_bb_1 (basic_block after)
9087 {
9088 return create_empty_bb (after);
9089 }
9090
9091 /* Insert PAT as an INSN into the schedule and update the necessary data
9092 structures to account for it. */
9093 rtx_insn *
9094 sched_emit_insn (rtx pat)
9095 {
9096 rtx_insn *insn = emit_insn_before (pat, first_nonscheduled_insn ());
9097 haifa_init_insn (insn);
9098
9099 if (current_sched_info->add_remove_insn)
9100 current_sched_info->add_remove_insn (insn, 0);
9101
9102 (*current_sched_info->begin_schedule_ready) (insn);
9103 scheduled_insns.safe_push (insn);
9104
9105 last_scheduled_insn = insn;
9106 return insn;
9107 }
9108
9109 /* This function returns a candidate satisfying dispatch constraints from
9110 the ready list. */
9111
9112 static rtx_insn *
9113 ready_remove_first_dispatch (struct ready_list *ready)
9114 {
9115 int i;
9116 rtx_insn *insn = ready_element (ready, 0);
9117
9118 if (ready->n_ready == 1
9119 || !INSN_P (insn)
9120 || INSN_CODE (insn) < 0
9121 || !active_insn_p (insn)
9122 || targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
9123 return ready_remove_first (ready);
9124
9125 for (i = 1; i < ready->n_ready; i++)
9126 {
9127 insn = ready_element (ready, i);
9128
9129 if (!INSN_P (insn)
9130 || INSN_CODE (insn) < 0
9131 || !active_insn_p (insn))
9132 continue;
9133
9134 if (targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
9135 {
9136 /* Return ith element of ready. */
9137 insn = ready_remove (ready, i);
9138 return insn;
9139 }
9140 }
9141
9142 if (targetm.sched.dispatch (NULL, DISPATCH_VIOLATION))
9143 return ready_remove_first (ready);
9144
9145 for (i = 1; i < ready->n_ready; i++)
9146 {
9147 insn = ready_element (ready, i);
9148
9149 if (!INSN_P (insn)
9150 || INSN_CODE (insn) < 0
9151 || !active_insn_p (insn))
9152 continue;
9153
9154 /* Return i-th element of ready. */
9155 if (targetm.sched.dispatch (insn, IS_CMP))
9156 return ready_remove (ready, i);
9157 }
9158
9159 return ready_remove_first (ready);
9160 }
9161
9162 /* Get number of ready insn in the ready list. */
9163
9164 int
9165 number_in_ready (void)
9166 {
9167 return ready.n_ready;
9168 }
9169
9170 /* Get number of ready's in the ready list. */
9171
9172 rtx_insn *
9173 get_ready_element (int i)
9174 {
9175 return ready_element (&ready, i);
9176 }
9177
9178 #endif /* INSN_SCHEDULING */