]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/sched.c
Update mainline egcs to gcc2 snapshot 971021.
[thirdparty/gcc.git] / gcc / sched.c
1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 93-96, 1997 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com)
4 Enhanced by, and currently maintained by, Jim Wilson (wilson@cygnus.com)
5
6 This file is part of GNU CC.
7
8 GNU CC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
12
13 GNU CC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GNU CC; see the file COPYING. If not, write to
20 the Free Software Foundation, 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
22
23 /* Instruction scheduling pass.
24
25 This pass implements list scheduling within basic blocks. It is
26 run after flow analysis, but before register allocation. The
27 scheduler works as follows:
28
29 We compute insn priorities based on data dependencies. Flow
30 analysis only creates a fraction of the data-dependencies we must
31 observe: namely, only those dependencies which the combiner can be
32 expected to use. For this pass, we must therefore create the
33 remaining dependencies we need to observe: register dependencies,
34 memory dependencies, dependencies to keep function calls in order,
35 and the dependence between a conditional branch and the setting of
36 condition codes are all dealt with here.
37
38 The scheduler first traverses the data flow graph, starting with
39 the last instruction, and proceeding to the first, assigning
40 values to insn_priority as it goes. This sorts the instructions
41 topologically by data dependence.
42
43 Once priorities have been established, we order the insns using
44 list scheduling. This works as follows: starting with a list of
45 all the ready insns, and sorted according to priority number, we
46 schedule the insn from the end of the list by placing its
47 predecessors in the list according to their priority order. We
48 consider this insn scheduled by setting the pointer to the "end" of
49 the list to point to the previous insn. When an insn has no
50 predecessors, we either queue it until sufficient time has elapsed
51 or add it to the ready list. As the instructions are scheduled or
52 when stalls are introduced, the queue advances and dumps insns into
53 the ready list. When all insns down to the lowest priority have
54 been scheduled, the critical path of the basic block has been made
55 as short as possible. The remaining insns are then scheduled in
56 remaining slots.
57
58 Function unit conflicts are resolved during reverse list scheduling
59 by tracking the time when each insn is committed to the schedule
60 and from that, the time the function units it uses must be free.
61 As insns on the ready list are considered for scheduling, those
62 that would result in a blockage of the already committed insns are
63 queued until no blockage will result. Among the remaining insns on
64 the ready list to be considered, the first one with the largest
65 potential for causing a subsequent blockage is chosen.
66
67 The following list shows the order in which we want to break ties
68 among insns in the ready list:
69
70 1. choose insn with lowest conflict cost, ties broken by
71 2. choose insn with the longest path to end of bb, ties broken by
72 3. choose insn that kills the most registers, ties broken by
73 4. choose insn that conflicts with the most ready insns, or finally
74 5. choose insn with lowest UID.
75
76 Memory references complicate matters. Only if we can be certain
77 that memory references are not part of the data dependency graph
78 (via true, anti, or output dependence), can we move operations past
79 memory references. To first approximation, reads can be done
80 independently, while writes introduce dependencies. Better
81 approximations will yield fewer dependencies.
82
83 Dependencies set up by memory references are treated in exactly the
84 same way as other dependencies, by using LOG_LINKS.
85
86 Having optimized the critical path, we may have also unduly
87 extended the lifetimes of some registers. If an operation requires
88 that constants be loaded into registers, it is certainly desirable
89 to load those constants as early as necessary, but no earlier.
90 I.e., it will not do to load up a bunch of registers at the
91 beginning of a basic block only to use them at the end, if they
92 could be loaded later, since this may result in excessive register
93 utilization.
94
95 Note that since branches are never in basic blocks, but only end
96 basic blocks, this pass will not do any branch scheduling. But
97 that is ok, since we can use GNU's delayed branch scheduling
98 pass to take care of this case.
99
100 Also note that no further optimizations based on algebraic identities
101 are performed, so this pass would be a good one to perform instruction
102 splitting, such as breaking up a multiply instruction into shifts
103 and adds where that is profitable.
104
105 Given the memory aliasing analysis that this pass should perform,
106 it should be possible to remove redundant stores to memory, and to
107 load values from registers instead of hitting memory.
108
109 This pass must update information that subsequent passes expect to be
110 correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
111 reg_n_calls_crossed, and reg_live_length. Also, basic_block_head,
112 basic_block_end.
113
114 The information in the line number notes is carefully retained by
115 this pass. Notes that refer to the starting and ending of
116 exception regions are also carefully retained by this pass. All
117 other NOTE insns are grouped in their same relative order at the
118 beginning of basic blocks that have been scheduled. */
119 \f
120 #include "config.h"
121 #include <stdio.h>
122 #include "rtl.h"
123 #include "basic-block.h"
124 #include "regs.h"
125 #include "hard-reg-set.h"
126 #include "flags.h"
127 #include "insn-config.h"
128 #include "insn-attr.h"
129
130 extern char *reg_known_equiv_p;
131 extern rtx *reg_known_value;
132
133 #ifdef INSN_SCHEDULING
134 /* Arrays set up by scheduling for the same respective purposes as
135 similar-named arrays set up by flow analysis. We work with these
136 arrays during the scheduling pass so we can compare values against
137 unscheduled code.
138
139 Values of these arrays are copied at the end of this pass into the
140 arrays set up by flow analysis. */
141 static int *sched_reg_n_calls_crossed;
142 static int *sched_reg_live_length;
143
144 /* Element N is the next insn that sets (hard or pseudo) register
145 N within the current basic block; or zero, if there is no
146 such insn. Needed for new registers which may be introduced
147 by splitting insns. */
148 static rtx *reg_last_uses;
149 static rtx *reg_last_sets;
150 static regset reg_pending_sets;
151 static int reg_pending_sets_all;
152
153 /* Vector indexed by INSN_UID giving the original ordering of the insns. */
154 static int *insn_luid;
155 #define INSN_LUID(INSN) (insn_luid[INSN_UID (INSN)])
156
157 /* Vector indexed by INSN_UID giving each instruction a priority. */
158 static int *insn_priority;
159 #define INSN_PRIORITY(INSN) (insn_priority[INSN_UID (INSN)])
160
161 static short *insn_costs;
162 #define INSN_COST(INSN) insn_costs[INSN_UID (INSN)]
163
164 /* Vector indexed by INSN_UID giving an encoding of the function units
165 used. */
166 static short *insn_units;
167 #define INSN_UNIT(INSN) insn_units[INSN_UID (INSN)]
168
169 /* Vector indexed by INSN_UID giving an encoding of the blockage range
170 function. The unit and the range are encoded. */
171 static unsigned int *insn_blockage;
172 #define INSN_BLOCKAGE(INSN) insn_blockage[INSN_UID (INSN)]
173 #define UNIT_BITS 5
174 #define BLOCKAGE_MASK ((1 << BLOCKAGE_BITS) - 1)
175 #define ENCODE_BLOCKAGE(U,R) \
176 ((((U) << UNIT_BITS) << BLOCKAGE_BITS \
177 | MIN_BLOCKAGE_COST (R)) << BLOCKAGE_BITS \
178 | MAX_BLOCKAGE_COST (R))
179 #define UNIT_BLOCKED(B) ((B) >> (2 * BLOCKAGE_BITS))
180 #define BLOCKAGE_RANGE(B) \
181 (((((B) >> BLOCKAGE_BITS) & BLOCKAGE_MASK) << (HOST_BITS_PER_INT / 2)) \
182 | (B) & BLOCKAGE_MASK)
183
184 /* Encodings of the `<name>_unit_blockage_range' function. */
185 #define MIN_BLOCKAGE_COST(R) ((R) >> (HOST_BITS_PER_INT / 2))
186 #define MAX_BLOCKAGE_COST(R) ((R) & ((1 << (HOST_BITS_PER_INT / 2)) - 1))
187
188 #define DONE_PRIORITY -1
189 #define MAX_PRIORITY 0x7fffffff
190 #define TAIL_PRIORITY 0x7ffffffe
191 #define LAUNCH_PRIORITY 0x7f000001
192 #define DONE_PRIORITY_P(INSN) (INSN_PRIORITY (INSN) < 0)
193 #define LOW_PRIORITY_P(INSN) ((INSN_PRIORITY (INSN) & 0x7f000000) == 0)
194
195 /* Vector indexed by INSN_UID giving number of insns referring to this insn. */
196 static int *insn_ref_count;
197 #define INSN_REF_COUNT(INSN) (insn_ref_count[INSN_UID (INSN)])
198
199 /* Vector indexed by INSN_UID giving line-number note in effect for each
200 insn. For line-number notes, this indicates whether the note may be
201 reused. */
202 static rtx *line_note;
203 #define LINE_NOTE(INSN) (line_note[INSN_UID (INSN)])
204
205 /* Vector indexed by basic block number giving the starting line-number
206 for each basic block. */
207 static rtx *line_note_head;
208
209 /* List of important notes we must keep around. This is a pointer to the
210 last element in the list. */
211 static rtx note_list;
212
213 /* Regsets telling whether a given register is live or dead before the last
214 scheduled insn. Must scan the instructions once before scheduling to
215 determine what registers are live or dead at the end of the block. */
216 static regset bb_dead_regs;
217 static regset bb_live_regs;
218
219 /* Regset telling whether a given register is live after the insn currently
220 being scheduled. Before processing an insn, this is equal to bb_live_regs
221 above. This is used so that we can find registers that are newly born/dead
222 after processing an insn. */
223 static regset old_live_regs;
224
225 /* The chain of REG_DEAD notes. REG_DEAD notes are removed from all insns
226 during the initial scan and reused later. If there are not exactly as
227 many REG_DEAD notes in the post scheduled code as there were in the
228 prescheduled code then we trigger an abort because this indicates a bug. */
229 static rtx dead_notes;
230
231 /* Queues, etc. */
232
233 /* An instruction is ready to be scheduled when all insns following it
234 have already been scheduled. It is important to ensure that all
235 insns which use its result will not be executed until its result
236 has been computed. An insn is maintained in one of four structures:
237
238 (P) the "Pending" set of insns which cannot be scheduled until
239 their dependencies have been satisfied.
240 (Q) the "Queued" set of insns that can be scheduled when sufficient
241 time has passed.
242 (R) the "Ready" list of unscheduled, uncommitted insns.
243 (S) the "Scheduled" list of insns.
244
245 Initially, all insns are either "Pending" or "Ready" depending on
246 whether their dependencies are satisfied.
247
248 Insns move from the "Ready" list to the "Scheduled" list as they
249 are committed to the schedule. As this occurs, the insns in the
250 "Pending" list have their dependencies satisfied and move to either
251 the "Ready" list or the "Queued" set depending on whether
252 sufficient time has passed to make them ready. As time passes,
253 insns move from the "Queued" set to the "Ready" list. Insns may
254 move from the "Ready" list to the "Queued" set if they are blocked
255 due to a function unit conflict.
256
257 The "Pending" list (P) are the insns in the LOG_LINKS of the unscheduled
258 insns, i.e., those that are ready, queued, and pending.
259 The "Queued" set (Q) is implemented by the variable `insn_queue'.
260 The "Ready" list (R) is implemented by the variables `ready' and
261 `n_ready'.
262 The "Scheduled" list (S) is the new insn chain built by this pass.
263
264 The transition (R->S) is implemented in the scheduling loop in
265 `schedule_block' when the best insn to schedule is chosen.
266 The transition (R->Q) is implemented in `schedule_select' when an
267 insn is found to to have a function unit conflict with the already
268 committed insns.
269 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
270 insns move from the ready list to the scheduled list.
271 The transition (Q->R) is implemented at the top of the scheduling
272 loop in `schedule_block' as time passes or stalls are introduced. */
273
274 /* Implement a circular buffer to delay instructions until sufficient
275 time has passed. INSN_QUEUE_SIZE is a power of two larger than
276 MAX_BLOCKAGE and MAX_READY_COST computed by genattr.c. This is the
277 longest time an isnsn may be queued. */
278 static rtx insn_queue[INSN_QUEUE_SIZE];
279 static int q_ptr = 0;
280 static int q_size = 0;
281 #define NEXT_Q(X) (((X)+1) & (INSN_QUEUE_SIZE-1))
282 #define NEXT_Q_AFTER(X,C) (((X)+C) & (INSN_QUEUE_SIZE-1))
283
284 /* Vector indexed by INSN_UID giving the minimum clock tick at which
285 the insn becomes ready. This is used to note timing constraints for
286 insns in the pending list. */
287 static int *insn_tick;
288 #define INSN_TICK(INSN) (insn_tick[INSN_UID (INSN)])
289
290 /* Data structure for keeping track of register information
291 during that register's life. */
292
293 struct sometimes
294 {
295 int regno;
296 int live_length;
297 int calls_crossed;
298 };
299
300 /* Forward declarations. */
301 static void add_dependence PROTO((rtx, rtx, enum reg_note));
302 static void remove_dependence PROTO((rtx, rtx));
303 static rtx find_insn_list PROTO((rtx, rtx));
304 static int insn_unit PROTO((rtx));
305 static unsigned int blockage_range PROTO((int, rtx));
306 static void clear_units PROTO((void));
307 static void prepare_unit PROTO((int));
308 static int actual_hazard_this_instance PROTO((int, int, rtx, int, int));
309 static void schedule_unit PROTO((int, rtx, int));
310 static int actual_hazard PROTO((int, rtx, int, int));
311 static int potential_hazard PROTO((int, rtx, int));
312 static int insn_cost PROTO((rtx, rtx, rtx));
313 static int priority PROTO((rtx));
314 static void free_pending_lists PROTO((void));
315 static void add_insn_mem_dependence PROTO((rtx *, rtx *, rtx, rtx));
316 static void flush_pending_lists PROTO((rtx, int));
317 static void sched_analyze_1 PROTO((rtx, rtx));
318 static void sched_analyze_2 PROTO((rtx, rtx));
319 static void sched_analyze_insn PROTO((rtx, rtx, rtx));
320 static int sched_analyze PROTO((rtx, rtx));
321 static void sched_note_set PROTO((int, rtx, int));
322 static int rank_for_schedule PROTO((rtx *, rtx *));
323 static void swap_sort PROTO((rtx *, int));
324 static void queue_insn PROTO((rtx, int));
325 static int birthing_insn_p PROTO((rtx));
326 static void adjust_priority PROTO((rtx));
327 static int schedule_insn PROTO((rtx, rtx *, int, int));
328 static int schedule_select PROTO((rtx *, int, int, FILE *));
329 static void create_reg_dead_note PROTO((rtx, rtx));
330 static void attach_deaths PROTO((rtx, rtx, int));
331 static void attach_deaths_insn PROTO((rtx));
332 static rtx unlink_notes PROTO((rtx, rtx));
333 static int new_sometimes_live PROTO((struct sometimes *, int, int));
334 static void finish_sometimes_live PROTO((struct sometimes *, int));
335 static rtx reemit_notes PROTO((rtx, rtx));
336 static void schedule_block PROTO((int, FILE *));
337 static rtx regno_use_in PROTO((int, rtx));
338 static void split_hard_reg_notes PROTO((rtx, rtx, rtx, rtx));
339 static void new_insn_dead_notes PROTO((rtx, rtx, rtx, rtx));
340 static void update_n_sets PROTO((rtx, int));
341 static void update_flow_info PROTO((rtx, rtx, rtx, rtx));
342
343 /* Main entry point of this file. */
344 void schedule_insns PROTO((FILE *));
345
346 #endif /* INSN_SCHEDULING */
347 \f
348 #define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
349
350 /* Helper functions for instruction scheduling. */
351
352 /* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
353 LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
354 of dependence that this link represents. */
355
356 static void
357 add_dependence (insn, elem, dep_type)
358 rtx insn;
359 rtx elem;
360 enum reg_note dep_type;
361 {
362 rtx link, next;
363
364 /* Don't depend an insn on itself. */
365 if (insn == elem)
366 return;
367
368 /* If elem is part of a sequence that must be scheduled together, then
369 make the dependence point to the last insn of the sequence.
370 When HAVE_cc0, it is possible for NOTEs to exist between users and
371 setters of the condition codes, so we must skip past notes here.
372 Otherwise, NOTEs are impossible here. */
373
374 next = NEXT_INSN (elem);
375
376 #ifdef HAVE_cc0
377 while (next && GET_CODE (next) == NOTE)
378 next = NEXT_INSN (next);
379 #endif
380
381 if (next && SCHED_GROUP_P (next)
382 && GET_CODE (next) != CODE_LABEL)
383 {
384 /* Notes will never intervene here though, so don't bother checking
385 for them. */
386 /* We must reject CODE_LABELs, so that we don't get confused by one
387 that has LABEL_PRESERVE_P set, which is represented by the same
388 bit in the rtl as SCHED_GROUP_P. A CODE_LABEL can never be
389 SCHED_GROUP_P. */
390 while (NEXT_INSN (next) && SCHED_GROUP_P (NEXT_INSN (next))
391 && GET_CODE (NEXT_INSN (next)) != CODE_LABEL)
392 next = NEXT_INSN (next);
393
394 /* Again, don't depend an insn on itself. */
395 if (insn == next)
396 return;
397
398 /* Make the dependence to NEXT, the last insn of the group, instead
399 of the original ELEM. */
400 elem = next;
401 }
402
403 /* Check that we don't already have this dependence. */
404 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
405 if (XEXP (link, 0) == elem)
406 {
407 /* If this is a more restrictive type of dependence than the existing
408 one, then change the existing dependence to this type. */
409 if ((int) dep_type < (int) REG_NOTE_KIND (link))
410 PUT_REG_NOTE_KIND (link, dep_type);
411 return;
412 }
413 /* Might want to check one level of transitivity to save conses. */
414
415 link = rtx_alloc (INSN_LIST);
416 /* Insn dependency, not data dependency. */
417 PUT_REG_NOTE_KIND (link, dep_type);
418 XEXP (link, 0) = elem;
419 XEXP (link, 1) = LOG_LINKS (insn);
420 LOG_LINKS (insn) = link;
421 }
422
423 /* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
424 of INSN. Abort if not found. */
425
426 static void
427 remove_dependence (insn, elem)
428 rtx insn;
429 rtx elem;
430 {
431 rtx prev, link;
432 int found = 0;
433
434 for (prev = 0, link = LOG_LINKS (insn); link;
435 prev = link, link = XEXP (link, 1))
436 {
437 if (XEXP (link, 0) == elem)
438 {
439 if (prev)
440 XEXP (prev, 1) = XEXP (link, 1);
441 else
442 LOG_LINKS (insn) = XEXP (link, 1);
443 found = 1;
444 }
445 }
446
447 if (! found)
448 abort ();
449 return;
450 }
451 \f
452 #ifndef INSN_SCHEDULING
453 void
454 schedule_insns (dump_file)
455 FILE *dump_file;
456 {
457 }
458 #else
459 #ifndef __GNUC__
460 #define __inline
461 #endif
462
463 /* Computation of memory dependencies. */
464
465 /* The *_insns and *_mems are paired lists. Each pending memory operation
466 will have a pointer to the MEM rtx on one list and a pointer to the
467 containing insn on the other list in the same place in the list. */
468
469 /* We can't use add_dependence like the old code did, because a single insn
470 may have multiple memory accesses, and hence needs to be on the list
471 once for each memory access. Add_dependence won't let you add an insn
472 to a list more than once. */
473
474 /* An INSN_LIST containing all insns with pending read operations. */
475 static rtx pending_read_insns;
476
477 /* An EXPR_LIST containing all MEM rtx's which are pending reads. */
478 static rtx pending_read_mems;
479
480 /* An INSN_LIST containing all insns with pending write operations. */
481 static rtx pending_write_insns;
482
483 /* An EXPR_LIST containing all MEM rtx's which are pending writes. */
484 static rtx pending_write_mems;
485
486 /* Indicates the combined length of the two pending lists. We must prevent
487 these lists from ever growing too large since the number of dependencies
488 produced is at least O(N*N), and execution time is at least O(4*N*N), as
489 a function of the length of these pending lists. */
490
491 static int pending_lists_length;
492
493 /* An INSN_LIST containing all INSN_LISTs allocated but currently unused. */
494
495 static rtx unused_insn_list;
496
497 /* An EXPR_LIST containing all EXPR_LISTs allocated but currently unused. */
498
499 static rtx unused_expr_list;
500
501 /* The last insn upon which all memory references must depend.
502 This is an insn which flushed the pending lists, creating a dependency
503 between it and all previously pending memory references. This creates
504 a barrier (or a checkpoint) which no memory reference is allowed to cross.
505
506 This includes all non constant CALL_INSNs. When we do interprocedural
507 alias analysis, this restriction can be relaxed.
508 This may also be an INSN that writes memory if the pending lists grow
509 too large. */
510
511 static rtx last_pending_memory_flush;
512
513 /* The last function call we have seen. All hard regs, and, of course,
514 the last function call, must depend on this. */
515
516 static rtx last_function_call;
517
518 /* The LOG_LINKS field of this is a list of insns which use a pseudo register
519 that does not already cross a call. We create dependencies between each
520 of those insn and the next call insn, to ensure that they won't cross a call
521 after scheduling is done. */
522
523 static rtx sched_before_next_call;
524
525 /* Pointer to the last instruction scheduled. Used by rank_for_schedule,
526 so that insns independent of the last scheduled insn will be preferred
527 over dependent instructions. */
528
529 static rtx last_scheduled_insn;
530
531 /* Process an insn's memory dependencies. There are four kinds of
532 dependencies:
533
534 (0) read dependence: read follows read
535 (1) true dependence: read follows write
536 (2) anti dependence: write follows read
537 (3) output dependence: write follows write
538
539 We are careful to build only dependencies which actually exist, and
540 use transitivity to avoid building too many links. */
541 \f
542 /* Return the INSN_LIST containing INSN in LIST, or NULL
543 if LIST does not contain INSN. */
544
545 __inline static rtx
546 find_insn_list (insn, list)
547 rtx insn;
548 rtx list;
549 {
550 while (list)
551 {
552 if (XEXP (list, 0) == insn)
553 return list;
554 list = XEXP (list, 1);
555 }
556 return 0;
557 }
558
559 /* Compute the function units used by INSN. This caches the value
560 returned by function_units_used. A function unit is encoded as the
561 unit number if the value is non-negative and the compliment of a
562 mask if the value is negative. A function unit index is the
563 non-negative encoding. */
564
565 __inline static int
566 insn_unit (insn)
567 rtx insn;
568 {
569 register int unit = INSN_UNIT (insn);
570
571 if (unit == 0)
572 {
573 recog_memoized (insn);
574
575 /* A USE insn, or something else we don't need to understand.
576 We can't pass these directly to function_units_used because it will
577 trigger a fatal error for unrecognizable insns. */
578 if (INSN_CODE (insn) < 0)
579 unit = -1;
580 else
581 {
582 unit = function_units_used (insn);
583 /* Increment non-negative values so we can cache zero. */
584 if (unit >= 0) unit++;
585 }
586 /* We only cache 16 bits of the result, so if the value is out of
587 range, don't cache it. */
588 if (FUNCTION_UNITS_SIZE < HOST_BITS_PER_SHORT
589 || unit >= 0
590 || (~unit & ((1 << (HOST_BITS_PER_SHORT - 1)) - 1)) == 0)
591 INSN_UNIT (insn) = unit;
592 }
593 return (unit > 0 ? unit - 1 : unit);
594 }
595
596 /* Compute the blockage range for executing INSN on UNIT. This caches
597 the value returned by the blockage_range_function for the unit.
598 These values are encoded in an int where the upper half gives the
599 minimum value and the lower half gives the maximum value. */
600
601 __inline static unsigned int
602 blockage_range (unit, insn)
603 int unit;
604 rtx insn;
605 {
606 unsigned int blockage = INSN_BLOCKAGE (insn);
607 unsigned int range;
608
609 if (UNIT_BLOCKED (blockage) != unit + 1)
610 {
611 range = function_units[unit].blockage_range_function (insn);
612 /* We only cache the blockage range for one unit and then only if
613 the values fit. */
614 if (HOST_BITS_PER_INT >= UNIT_BITS + 2 * BLOCKAGE_BITS)
615 INSN_BLOCKAGE (insn) = ENCODE_BLOCKAGE (unit + 1, range);
616 }
617 else
618 range = BLOCKAGE_RANGE (blockage);
619
620 return range;
621 }
622
623 /* A vector indexed by function unit instance giving the last insn to use
624 the unit. The value of the function unit instance index for unit U
625 instance I is (U + I * FUNCTION_UNITS_SIZE). */
626 static rtx unit_last_insn[FUNCTION_UNITS_SIZE * MAX_MULTIPLICITY];
627
628 /* A vector indexed by function unit instance giving the minimum time when
629 the unit will unblock based on the maximum blockage cost. */
630 static int unit_tick[FUNCTION_UNITS_SIZE * MAX_MULTIPLICITY];
631
632 /* A vector indexed by function unit number giving the number of insns
633 that remain to use the unit. */
634 static int unit_n_insns[FUNCTION_UNITS_SIZE];
635
636 /* Reset the function unit state to the null state. */
637
638 static void
639 clear_units ()
640 {
641 bzero ((char *) unit_last_insn, sizeof (unit_last_insn));
642 bzero ((char *) unit_tick, sizeof (unit_tick));
643 bzero ((char *) unit_n_insns, sizeof (unit_n_insns));
644 }
645
646 /* Record an insn as one that will use the units encoded by UNIT. */
647
648 __inline static void
649 prepare_unit (unit)
650 int unit;
651 {
652 int i;
653
654 if (unit >= 0)
655 unit_n_insns[unit]++;
656 else
657 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
658 if ((unit & 1) != 0)
659 prepare_unit (i);
660 }
661
662 /* Return the actual hazard cost of executing INSN on the unit UNIT,
663 instance INSTANCE at time CLOCK if the previous actual hazard cost
664 was COST. */
665
666 __inline static int
667 actual_hazard_this_instance (unit, instance, insn, clock, cost)
668 int unit, instance, clock, cost;
669 rtx insn;
670 {
671 int tick = unit_tick[instance];
672
673 if (tick - clock > cost)
674 {
675 /* The scheduler is operating in reverse, so INSN is the executing
676 insn and the unit's last insn is the candidate insn. We want a
677 more exact measure of the blockage if we execute INSN at CLOCK
678 given when we committed the execution of the unit's last insn.
679
680 The blockage value is given by either the unit's max blockage
681 constant, blockage range function, or blockage function. Use
682 the most exact form for the given unit. */
683
684 if (function_units[unit].blockage_range_function)
685 {
686 if (function_units[unit].blockage_function)
687 tick += (function_units[unit].blockage_function
688 (insn, unit_last_insn[instance])
689 - function_units[unit].max_blockage);
690 else
691 tick += ((int) MAX_BLOCKAGE_COST (blockage_range (unit, insn))
692 - function_units[unit].max_blockage);
693 }
694 if (tick - clock > cost)
695 cost = tick - clock;
696 }
697 return cost;
698 }
699
700 /* Record INSN as having begun execution on the units encoded by UNIT at
701 time CLOCK. */
702
703 __inline static void
704 schedule_unit (unit, insn, clock)
705 int unit, clock;
706 rtx insn;
707 {
708 int i;
709
710 if (unit >= 0)
711 {
712 int instance = unit;
713 #if MAX_MULTIPLICITY > 1
714 /* Find the first free instance of the function unit and use that
715 one. We assume that one is free. */
716 for (i = function_units[unit].multiplicity - 1; i > 0; i--)
717 {
718 if (! actual_hazard_this_instance (unit, instance, insn, clock, 0))
719 break;
720 instance += FUNCTION_UNITS_SIZE;
721 }
722 #endif
723 unit_last_insn[instance] = insn;
724 unit_tick[instance] = (clock + function_units[unit].max_blockage);
725 }
726 else
727 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
728 if ((unit & 1) != 0)
729 schedule_unit (i, insn, clock);
730 }
731
732 /* Return the actual hazard cost of executing INSN on the units encoded by
733 UNIT at time CLOCK if the previous actual hazard cost was COST. */
734
735 __inline static int
736 actual_hazard (unit, insn, clock, cost)
737 int unit, clock, cost;
738 rtx insn;
739 {
740 int i;
741
742 if (unit >= 0)
743 {
744 /* Find the instance of the function unit with the minimum hazard. */
745 int instance = unit;
746 int best_cost = actual_hazard_this_instance (unit, instance, insn,
747 clock, cost);
748 int this_cost;
749
750 #if MAX_MULTIPLICITY > 1
751 if (best_cost > cost)
752 {
753 for (i = function_units[unit].multiplicity - 1; i > 0; i--)
754 {
755 instance += FUNCTION_UNITS_SIZE;
756 this_cost = actual_hazard_this_instance (unit, instance, insn,
757 clock, cost);
758 if (this_cost < best_cost)
759 {
760 best_cost = this_cost;
761 if (this_cost <= cost)
762 break;
763 }
764 }
765 }
766 #endif
767 cost = MAX (cost, best_cost);
768 }
769 else
770 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
771 if ((unit & 1) != 0)
772 cost = actual_hazard (i, insn, clock, cost);
773
774 return cost;
775 }
776
777 /* Return the potential hazard cost of executing an instruction on the
778 units encoded by UNIT if the previous potential hazard cost was COST.
779 An insn with a large blockage time is chosen in preference to one
780 with a smaller time; an insn that uses a unit that is more likely
781 to be used is chosen in preference to one with a unit that is less
782 used. We are trying to minimize a subsequent actual hazard. */
783
784 __inline static int
785 potential_hazard (unit, insn, cost)
786 int unit, cost;
787 rtx insn;
788 {
789 int i, ncost;
790 unsigned int minb, maxb;
791
792 if (unit >= 0)
793 {
794 minb = maxb = function_units[unit].max_blockage;
795 if (maxb > 1)
796 {
797 if (function_units[unit].blockage_range_function)
798 {
799 maxb = minb = blockage_range (unit, insn);
800 maxb = MAX_BLOCKAGE_COST (maxb);
801 minb = MIN_BLOCKAGE_COST (minb);
802 }
803
804 if (maxb > 1)
805 {
806 /* Make the number of instructions left dominate. Make the
807 minimum delay dominate the maximum delay. If all these
808 are the same, use the unit number to add an arbitrary
809 ordering. Other terms can be added. */
810 ncost = minb * 0x40 + maxb;
811 ncost *= (unit_n_insns[unit] - 1) * 0x1000 + unit;
812 if (ncost > cost)
813 cost = ncost;
814 }
815 }
816 }
817 else
818 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
819 if ((unit & 1) != 0)
820 cost = potential_hazard (i, insn, cost);
821
822 return cost;
823 }
824
825 /* Compute cost of executing INSN given the dependence LINK on the insn USED.
826 This is the number of virtual cycles taken between instruction issue and
827 instruction results. */
828
829 __inline static int
830 insn_cost (insn, link, used)
831 rtx insn, link, used;
832 {
833 register int cost = INSN_COST (insn);
834
835 if (cost == 0)
836 {
837 recog_memoized (insn);
838
839 /* A USE insn, or something else we don't need to understand.
840 We can't pass these directly to result_ready_cost because it will
841 trigger a fatal error for unrecognizable insns. */
842 if (INSN_CODE (insn) < 0)
843 {
844 INSN_COST (insn) = 1;
845 return 1;
846 }
847 else
848 {
849 cost = result_ready_cost (insn);
850
851 if (cost < 1)
852 cost = 1;
853
854 INSN_COST (insn) = cost;
855 }
856 }
857
858 /* A USE insn should never require the value used to be computed. This
859 allows the computation of a function's result and parameter values to
860 overlap the return and call. */
861 recog_memoized (used);
862 if (INSN_CODE (used) < 0)
863 LINK_COST_FREE (link) = 1;
864
865 /* If some dependencies vary the cost, compute the adjustment. Most
866 commonly, the adjustment is complete: either the cost is ignored
867 (in the case of an output- or anti-dependence), or the cost is
868 unchanged. These values are cached in the link as LINK_COST_FREE
869 and LINK_COST_ZERO. */
870
871 if (LINK_COST_FREE (link))
872 cost = 1;
873 #ifdef ADJUST_COST
874 else if (! LINK_COST_ZERO (link))
875 {
876 int ncost = cost;
877
878 ADJUST_COST (used, link, insn, ncost);
879 if (ncost <= 1)
880 LINK_COST_FREE (link) = ncost = 1;
881 if (cost == ncost)
882 LINK_COST_ZERO (link) = 1;
883 cost = ncost;
884 }
885 #endif
886 return cost;
887 }
888
889 /* Compute the priority number for INSN. */
890
891 static int
892 priority (insn)
893 rtx insn;
894 {
895 if (insn && GET_RTX_CLASS (GET_CODE (insn)) == 'i')
896 {
897 int prev_priority;
898 int max_priority;
899 int this_priority = INSN_PRIORITY (insn);
900 rtx prev;
901
902 if (this_priority > 0)
903 return this_priority;
904
905 max_priority = 1;
906
907 /* Nonzero if these insns must be scheduled together. */
908 if (SCHED_GROUP_P (insn))
909 {
910 prev = insn;
911 while (SCHED_GROUP_P (prev))
912 {
913 prev = PREV_INSN (prev);
914 INSN_REF_COUNT (prev) += 1;
915 }
916 }
917
918 for (prev = LOG_LINKS (insn); prev; prev = XEXP (prev, 1))
919 {
920 rtx x = XEXP (prev, 0);
921
922 /* A dependence pointing to a note or deleted insn is always
923 obsolete, because sched_analyze_insn will have created any
924 necessary new dependences which replace it. Notes and deleted
925 insns can be created when instructions are deleted by insn
926 splitting, or by register allocation. */
927 if (GET_CODE (x) == NOTE || INSN_DELETED_P (x))
928 {
929 remove_dependence (insn, x);
930 continue;
931 }
932
933 /* Clear the link cost adjustment bits. */
934 LINK_COST_FREE (prev) = 0;
935 #ifdef ADJUST_COST
936 LINK_COST_ZERO (prev) = 0;
937 #endif
938
939 /* This priority calculation was chosen because it results in the
940 least instruction movement, and does not hurt the performance
941 of the resulting code compared to the old algorithm.
942 This makes the sched algorithm more stable, which results
943 in better code, because there is less register pressure,
944 cross jumping is more likely to work, and debugging is easier.
945
946 When all instructions have a latency of 1, there is no need to
947 move any instructions. Subtracting one here ensures that in such
948 cases all instructions will end up with a priority of one, and
949 hence no scheduling will be done.
950
951 The original code did not subtract the one, and added the
952 insn_cost of the current instruction to its priority (e.g.
953 move the insn_cost call down to the end). */
954
955 prev_priority = priority (x) + insn_cost (x, prev, insn) - 1;
956
957 if (prev_priority > max_priority)
958 max_priority = prev_priority;
959 INSN_REF_COUNT (x) += 1;
960 }
961
962 prepare_unit (insn_unit (insn));
963 INSN_PRIORITY (insn) = max_priority;
964 return INSN_PRIORITY (insn);
965 }
966 return 0;
967 }
968 \f
969 /* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
970 them to the unused_*_list variables, so that they can be reused. */
971
972 static void
973 free_pending_lists ()
974 {
975 register rtx link, prev_link;
976
977 if (pending_read_insns)
978 {
979 prev_link = pending_read_insns;
980 link = XEXP (prev_link, 1);
981
982 while (link)
983 {
984 prev_link = link;
985 link = XEXP (link, 1);
986 }
987
988 XEXP (prev_link, 1) = unused_insn_list;
989 unused_insn_list = pending_read_insns;
990 pending_read_insns = 0;
991 }
992
993 if (pending_write_insns)
994 {
995 prev_link = pending_write_insns;
996 link = XEXP (prev_link, 1);
997
998 while (link)
999 {
1000 prev_link = link;
1001 link = XEXP (link, 1);
1002 }
1003
1004 XEXP (prev_link, 1) = unused_insn_list;
1005 unused_insn_list = pending_write_insns;
1006 pending_write_insns = 0;
1007 }
1008
1009 if (pending_read_mems)
1010 {
1011 prev_link = pending_read_mems;
1012 link = XEXP (prev_link, 1);
1013
1014 while (link)
1015 {
1016 prev_link = link;
1017 link = XEXP (link, 1);
1018 }
1019
1020 XEXP (prev_link, 1) = unused_expr_list;
1021 unused_expr_list = pending_read_mems;
1022 pending_read_mems = 0;
1023 }
1024
1025 if (pending_write_mems)
1026 {
1027 prev_link = pending_write_mems;
1028 link = XEXP (prev_link, 1);
1029
1030 while (link)
1031 {
1032 prev_link = link;
1033 link = XEXP (link, 1);
1034 }
1035
1036 XEXP (prev_link, 1) = unused_expr_list;
1037 unused_expr_list = pending_write_mems;
1038 pending_write_mems = 0;
1039 }
1040 }
1041
1042 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1043 The MEM is a memory reference contained within INSN, which we are saving
1044 so that we can do memory aliasing on it. */
1045
1046 static void
1047 add_insn_mem_dependence (insn_list, mem_list, insn, mem)
1048 rtx *insn_list, *mem_list, insn, mem;
1049 {
1050 register rtx link;
1051
1052 if (unused_insn_list)
1053 {
1054 link = unused_insn_list;
1055 unused_insn_list = XEXP (link, 1);
1056 }
1057 else
1058 link = rtx_alloc (INSN_LIST);
1059 XEXP (link, 0) = insn;
1060 XEXP (link, 1) = *insn_list;
1061 *insn_list = link;
1062
1063 if (unused_expr_list)
1064 {
1065 link = unused_expr_list;
1066 unused_expr_list = XEXP (link, 1);
1067 }
1068 else
1069 link = rtx_alloc (EXPR_LIST);
1070 XEXP (link, 0) = mem;
1071 XEXP (link, 1) = *mem_list;
1072 *mem_list = link;
1073
1074 pending_lists_length++;
1075 }
1076 \f
1077 /* Make a dependency between every memory reference on the pending lists
1078 and INSN, thus flushing the pending lists. If ONLY_WRITE, don't flush
1079 the read list. */
1080
1081 static void
1082 flush_pending_lists (insn, only_write)
1083 rtx insn;
1084 int only_write;
1085 {
1086 rtx link;
1087
1088 while (pending_read_insns && ! only_write)
1089 {
1090 add_dependence (insn, XEXP (pending_read_insns, 0), REG_DEP_ANTI);
1091
1092 link = pending_read_insns;
1093 pending_read_insns = XEXP (pending_read_insns, 1);
1094 XEXP (link, 1) = unused_insn_list;
1095 unused_insn_list = link;
1096
1097 link = pending_read_mems;
1098 pending_read_mems = XEXP (pending_read_mems, 1);
1099 XEXP (link, 1) = unused_expr_list;
1100 unused_expr_list = link;
1101 }
1102 while (pending_write_insns)
1103 {
1104 add_dependence (insn, XEXP (pending_write_insns, 0), REG_DEP_ANTI);
1105
1106 link = pending_write_insns;
1107 pending_write_insns = XEXP (pending_write_insns, 1);
1108 XEXP (link, 1) = unused_insn_list;
1109 unused_insn_list = link;
1110
1111 link = pending_write_mems;
1112 pending_write_mems = XEXP (pending_write_mems, 1);
1113 XEXP (link, 1) = unused_expr_list;
1114 unused_expr_list = link;
1115 }
1116 pending_lists_length = 0;
1117
1118 if (last_pending_memory_flush)
1119 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1120
1121 last_pending_memory_flush = insn;
1122 }
1123
1124 /* Analyze a single SET or CLOBBER rtx, X, creating all dependencies generated
1125 by the write to the destination of X, and reads of everything mentioned. */
1126
1127 static void
1128 sched_analyze_1 (x, insn)
1129 rtx x;
1130 rtx insn;
1131 {
1132 register int regno;
1133 register rtx dest = SET_DEST (x);
1134
1135 if (dest == 0)
1136 return;
1137
1138 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
1139 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
1140 {
1141 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
1142 {
1143 /* The second and third arguments are values read by this insn. */
1144 sched_analyze_2 (XEXP (dest, 1), insn);
1145 sched_analyze_2 (XEXP (dest, 2), insn);
1146 }
1147 dest = SUBREG_REG (dest);
1148 }
1149
1150 if (GET_CODE (dest) == REG)
1151 {
1152 register int i;
1153
1154 regno = REGNO (dest);
1155
1156 /* A hard reg in a wide mode may really be multiple registers.
1157 If so, mark all of them just like the first. */
1158 if (regno < FIRST_PSEUDO_REGISTER)
1159 {
1160 i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
1161 while (--i >= 0)
1162 {
1163 rtx u;
1164
1165 for (u = reg_last_uses[regno+i]; u; u = XEXP (u, 1))
1166 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1167 reg_last_uses[regno + i] = 0;
1168 if (reg_last_sets[regno + i])
1169 add_dependence (insn, reg_last_sets[regno + i],
1170 REG_DEP_OUTPUT);
1171 SET_REGNO_REG_SET (reg_pending_sets, regno + i);
1172 if ((call_used_regs[i] || global_regs[i])
1173 && last_function_call)
1174 /* Function calls clobber all call_used regs. */
1175 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1176 }
1177 }
1178 else
1179 {
1180 rtx u;
1181
1182 for (u = reg_last_uses[regno]; u; u = XEXP (u, 1))
1183 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1184 reg_last_uses[regno] = 0;
1185 if (reg_last_sets[regno])
1186 add_dependence (insn, reg_last_sets[regno], REG_DEP_OUTPUT);
1187 SET_REGNO_REG_SET (reg_pending_sets, regno);
1188
1189 /* Pseudos that are REG_EQUIV to something may be replaced
1190 by that during reloading. We need only add dependencies for
1191 the address in the REG_EQUIV note. */
1192 if (! reload_completed
1193 && reg_known_equiv_p[regno]
1194 && GET_CODE (reg_known_value[regno]) == MEM)
1195 sched_analyze_2 (XEXP (reg_known_value[regno], 0), insn);
1196
1197 /* Don't let it cross a call after scheduling if it doesn't
1198 already cross one. */
1199 if (REG_N_CALLS_CROSSED (regno) == 0 && last_function_call)
1200 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1201 }
1202 }
1203 else if (GET_CODE (dest) == MEM)
1204 {
1205 /* Writing memory. */
1206
1207 if (pending_lists_length > 32)
1208 {
1209 /* Flush all pending reads and writes to prevent the pending lists
1210 from getting any larger. Insn scheduling runs too slowly when
1211 these lists get long. The number 32 was chosen because it
1212 seems like a reasonable number. When compiling GCC with itself,
1213 this flush occurs 8 times for sparc, and 10 times for m88k using
1214 the number 32. */
1215 flush_pending_lists (insn, 0);
1216 }
1217 else
1218 {
1219 rtx pending, pending_mem;
1220
1221 pending = pending_read_insns;
1222 pending_mem = pending_read_mems;
1223 while (pending)
1224 {
1225 /* If a dependency already exists, don't create a new one. */
1226 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1227 if (anti_dependence (XEXP (pending_mem, 0), dest))
1228 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
1229
1230 pending = XEXP (pending, 1);
1231 pending_mem = XEXP (pending_mem, 1);
1232 }
1233
1234 pending = pending_write_insns;
1235 pending_mem = pending_write_mems;
1236 while (pending)
1237 {
1238 /* If a dependency already exists, don't create a new one. */
1239 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1240 if (output_dependence (XEXP (pending_mem, 0), dest))
1241 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1242
1243 pending = XEXP (pending, 1);
1244 pending_mem = XEXP (pending_mem, 1);
1245 }
1246
1247 if (last_pending_memory_flush)
1248 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1249
1250 add_insn_mem_dependence (&pending_write_insns, &pending_write_mems,
1251 insn, dest);
1252 }
1253 sched_analyze_2 (XEXP (dest, 0), insn);
1254 }
1255
1256 /* Analyze reads. */
1257 if (GET_CODE (x) == SET)
1258 sched_analyze_2 (SET_SRC (x), insn);
1259 }
1260
1261 /* Analyze the uses of memory and registers in rtx X in INSN. */
1262
1263 static void
1264 sched_analyze_2 (x, insn)
1265 rtx x;
1266 rtx insn;
1267 {
1268 register int i;
1269 register int j;
1270 register enum rtx_code code;
1271 register char *fmt;
1272
1273 if (x == 0)
1274 return;
1275
1276 code = GET_CODE (x);
1277
1278 switch (code)
1279 {
1280 case CONST_INT:
1281 case CONST_DOUBLE:
1282 case SYMBOL_REF:
1283 case CONST:
1284 case LABEL_REF:
1285 /* Ignore constants. Note that we must handle CONST_DOUBLE here
1286 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
1287 this does not mean that this insn is using cc0. */
1288 return;
1289
1290 #ifdef HAVE_cc0
1291 case CC0:
1292 {
1293 rtx link, prev;
1294
1295 /* User of CC0 depends on immediately preceding insn. */
1296 SCHED_GROUP_P (insn) = 1;
1297
1298 /* There may be a note before this insn now, but all notes will
1299 be removed before we actually try to schedule the insns, so
1300 it won't cause a problem later. We must avoid it here though. */
1301 prev = prev_nonnote_insn (insn);
1302
1303 /* Make a copy of all dependencies on the immediately previous insn,
1304 and add to this insn. This is so that all the dependencies will
1305 apply to the group. Remove an explicit dependence on this insn
1306 as SCHED_GROUP_P now represents it. */
1307
1308 if (find_insn_list (prev, LOG_LINKS (insn)))
1309 remove_dependence (insn, prev);
1310
1311 for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
1312 add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
1313
1314 return;
1315 }
1316 #endif
1317
1318 case REG:
1319 {
1320 int regno = REGNO (x);
1321 if (regno < FIRST_PSEUDO_REGISTER)
1322 {
1323 int i;
1324
1325 i = HARD_REGNO_NREGS (regno, GET_MODE (x));
1326 while (--i >= 0)
1327 {
1328 reg_last_uses[regno + i]
1329 = gen_rtx (INSN_LIST, VOIDmode,
1330 insn, reg_last_uses[regno + i]);
1331 if (reg_last_sets[regno + i])
1332 add_dependence (insn, reg_last_sets[regno + i], 0);
1333 if ((call_used_regs[regno + i] || global_regs[regno + i])
1334 && last_function_call)
1335 /* Function calls clobber all call_used regs. */
1336 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1337 }
1338 }
1339 else
1340 {
1341 reg_last_uses[regno]
1342 = gen_rtx (INSN_LIST, VOIDmode, insn, reg_last_uses[regno]);
1343 if (reg_last_sets[regno])
1344 add_dependence (insn, reg_last_sets[regno], 0);
1345
1346 /* Pseudos that are REG_EQUIV to something may be replaced
1347 by that during reloading. We need only add dependencies for
1348 the address in the REG_EQUIV note. */
1349 if (! reload_completed
1350 && reg_known_equiv_p[regno]
1351 && GET_CODE (reg_known_value[regno]) == MEM)
1352 sched_analyze_2 (XEXP (reg_known_value[regno], 0), insn);
1353
1354 /* If the register does not already cross any calls, then add this
1355 insn to the sched_before_next_call list so that it will still
1356 not cross calls after scheduling. */
1357 if (REG_N_CALLS_CROSSED (regno) == 0)
1358 add_dependence (sched_before_next_call, insn, REG_DEP_ANTI);
1359 }
1360 return;
1361 }
1362
1363 case MEM:
1364 {
1365 /* Reading memory. */
1366
1367 rtx pending, pending_mem;
1368
1369 pending = pending_read_insns;
1370 pending_mem = pending_read_mems;
1371 while (pending)
1372 {
1373 /* If a dependency already exists, don't create a new one. */
1374 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1375 if (read_dependence (XEXP (pending_mem, 0), x))
1376 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
1377
1378 pending = XEXP (pending, 1);
1379 pending_mem = XEXP (pending_mem, 1);
1380 }
1381
1382 pending = pending_write_insns;
1383 pending_mem = pending_write_mems;
1384 while (pending)
1385 {
1386 /* If a dependency already exists, don't create a new one. */
1387 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1388 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
1389 x, rtx_varies_p))
1390 add_dependence (insn, XEXP (pending, 0), 0);
1391
1392 pending = XEXP (pending, 1);
1393 pending_mem = XEXP (pending_mem, 1);
1394 }
1395 if (last_pending_memory_flush)
1396 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1397
1398 /* Always add these dependencies to pending_reads, since
1399 this insn may be followed by a write. */
1400 add_insn_mem_dependence (&pending_read_insns, &pending_read_mems,
1401 insn, x);
1402
1403 /* Take advantage of tail recursion here. */
1404 sched_analyze_2 (XEXP (x, 0), insn);
1405 return;
1406 }
1407
1408 case ASM_OPERANDS:
1409 case ASM_INPUT:
1410 case UNSPEC_VOLATILE:
1411 case TRAP_IF:
1412 {
1413 rtx u;
1414
1415 /* Traditional and volatile asm instructions must be considered to use
1416 and clobber all hard registers, all pseudo-registers and all of
1417 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
1418
1419 Consider for instance a volatile asm that changes the fpu rounding
1420 mode. An insn should not be moved across this even if it only uses
1421 pseudo-regs because it might give an incorrectly rounded result. */
1422 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
1423 {
1424 int max_reg = max_reg_num ();
1425 for (i = 0; i < max_reg; i++)
1426 {
1427 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1428 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1429 reg_last_uses[i] = 0;
1430 if (reg_last_sets[i])
1431 add_dependence (insn, reg_last_sets[i], 0);
1432 }
1433 reg_pending_sets_all = 1;
1434
1435 flush_pending_lists (insn, 0);
1436 }
1437
1438 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
1439 We can not just fall through here since then we would be confused
1440 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
1441 traditional asms unlike their normal usage. */
1442
1443 if (code == ASM_OPERANDS)
1444 {
1445 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
1446 sched_analyze_2 (ASM_OPERANDS_INPUT (x, j), insn);
1447 return;
1448 }
1449 break;
1450 }
1451
1452 case PRE_DEC:
1453 case POST_DEC:
1454 case PRE_INC:
1455 case POST_INC:
1456 /* These both read and modify the result. We must handle them as writes
1457 to get proper dependencies for following instructions. We must handle
1458 them as reads to get proper dependencies from this to previous
1459 instructions. Thus we need to pass them to both sched_analyze_1
1460 and sched_analyze_2. We must call sched_analyze_2 first in order
1461 to get the proper antecedent for the read. */
1462 sched_analyze_2 (XEXP (x, 0), insn);
1463 sched_analyze_1 (x, insn);
1464 return;
1465
1466 default:
1467 break;
1468 }
1469
1470 /* Other cases: walk the insn. */
1471 fmt = GET_RTX_FORMAT (code);
1472 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1473 {
1474 if (fmt[i] == 'e')
1475 sched_analyze_2 (XEXP (x, i), insn);
1476 else if (fmt[i] == 'E')
1477 for (j = 0; j < XVECLEN (x, i); j++)
1478 sched_analyze_2 (XVECEXP (x, i, j), insn);
1479 }
1480 }
1481
1482 /* Analyze an INSN with pattern X to find all dependencies. */
1483
1484 static void
1485 sched_analyze_insn (x, insn, loop_notes)
1486 rtx x, insn;
1487 rtx loop_notes;
1488 {
1489 register RTX_CODE code = GET_CODE (x);
1490 rtx link;
1491 int maxreg = max_reg_num ();
1492 int i;
1493
1494 if (code == SET || code == CLOBBER)
1495 sched_analyze_1 (x, insn);
1496 else if (code == PARALLEL)
1497 {
1498 register int i;
1499 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1500 {
1501 code = GET_CODE (XVECEXP (x, 0, i));
1502 if (code == SET || code == CLOBBER)
1503 sched_analyze_1 (XVECEXP (x, 0, i), insn);
1504 else
1505 sched_analyze_2 (XVECEXP (x, 0, i), insn);
1506 }
1507 }
1508 else
1509 sched_analyze_2 (x, insn);
1510
1511 /* Mark registers CLOBBERED or used by called function. */
1512 if (GET_CODE (insn) == CALL_INSN)
1513 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1514 {
1515 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
1516 sched_analyze_1 (XEXP (link, 0), insn);
1517 else
1518 sched_analyze_2 (XEXP (link, 0), insn);
1519 }
1520
1521 /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic block, then
1522 we must be sure that no instructions are scheduled across it.
1523 Otherwise, the reg_n_refs info (which depends on loop_depth) would
1524 become incorrect. */
1525
1526 if (loop_notes)
1527 {
1528 int max_reg = max_reg_num ();
1529 rtx link;
1530
1531 for (i = 0; i < max_reg; i++)
1532 {
1533 rtx u;
1534 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1535 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1536 reg_last_uses[i] = 0;
1537 if (reg_last_sets[i])
1538 add_dependence (insn, reg_last_sets[i], 0);
1539 }
1540 reg_pending_sets_all = 1;
1541
1542 flush_pending_lists (insn, 0);
1543
1544 link = loop_notes;
1545 while (XEXP (link, 1))
1546 link = XEXP (link, 1);
1547 XEXP (link, 1) = REG_NOTES (insn);
1548 REG_NOTES (insn) = loop_notes;
1549 }
1550
1551 /* After reload, it is possible for an instruction to have a REG_DEAD note
1552 for a register that actually dies a few instructions earlier. For
1553 example, this can happen with SECONDARY_MEMORY_NEEDED reloads.
1554 In this case, we must consider the insn to use the register mentioned
1555 in the REG_DEAD note. Otherwise, we may accidentally move this insn
1556 after another insn that sets the register, thus getting obviously invalid
1557 rtl. This confuses reorg which believes that REG_DEAD notes are still
1558 meaningful.
1559
1560 ??? We would get better code if we fixed reload to put the REG_DEAD
1561 notes in the right places, but that may not be worth the effort. */
1562
1563 if (reload_completed)
1564 {
1565 rtx note;
1566
1567 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
1568 if (REG_NOTE_KIND (note) == REG_DEAD)
1569 sched_analyze_2 (XEXP (note, 0), insn);
1570 }
1571
1572 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
1573 {
1574 reg_last_sets[i] = insn;
1575 });
1576 CLEAR_REG_SET (reg_pending_sets);
1577
1578 if (reg_pending_sets_all)
1579 {
1580 for (i = 0; i < maxreg; i++)
1581 reg_last_sets[i] = insn;
1582 reg_pending_sets_all = 0;
1583 }
1584
1585 /* Handle function calls and function returns created by the epilogue
1586 threading code. */
1587 if (GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN)
1588 {
1589 rtx dep_insn;
1590 rtx prev_dep_insn;
1591
1592 /* When scheduling instructions, we make sure calls don't lose their
1593 accompanying USE insns by depending them one on another in order.
1594
1595 Also, we must do the same thing for returns created by the epilogue
1596 threading code. Note this code works only in this special case,
1597 because other passes make no guarantee that they will never emit
1598 an instruction between a USE and a RETURN. There is such a guarantee
1599 for USE instructions immediately before a call. */
1600
1601 prev_dep_insn = insn;
1602 dep_insn = PREV_INSN (insn);
1603 while (GET_CODE (dep_insn) == INSN
1604 && GET_CODE (PATTERN (dep_insn)) == USE
1605 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == REG)
1606 {
1607 SCHED_GROUP_P (prev_dep_insn) = 1;
1608
1609 /* Make a copy of all dependencies on dep_insn, and add to insn.
1610 This is so that all of the dependencies will apply to the
1611 group. */
1612
1613 for (link = LOG_LINKS (dep_insn); link; link = XEXP (link, 1))
1614 add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
1615
1616 prev_dep_insn = dep_insn;
1617 dep_insn = PREV_INSN (dep_insn);
1618 }
1619 }
1620 }
1621
1622 /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1623 for every dependency. */
1624
1625 static int
1626 sched_analyze (head, tail)
1627 rtx head, tail;
1628 {
1629 register rtx insn;
1630 register int n_insns = 0;
1631 register rtx u;
1632 register int luid = 0;
1633 rtx loop_notes = 0;
1634
1635 for (insn = head; ; insn = NEXT_INSN (insn))
1636 {
1637 INSN_LUID (insn) = luid++;
1638
1639 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
1640 {
1641 sched_analyze_insn (PATTERN (insn), insn, loop_notes);
1642 loop_notes = 0;
1643 n_insns += 1;
1644 }
1645 else if (GET_CODE (insn) == CALL_INSN)
1646 {
1647 rtx x;
1648 register int i;
1649
1650 /* Any instruction using a hard register which may get clobbered
1651 by a call needs to be marked as dependent on this call.
1652 This prevents a use of a hard return reg from being moved
1653 past a void call (i.e. it does not explicitly set the hard
1654 return reg). */
1655
1656 /* If this call is followed by a NOTE_INSN_SETJMP, then assume that
1657 all registers, not just hard registers, may be clobbered by this
1658 call. */
1659
1660 /* Insn, being a CALL_INSN, magically depends on
1661 `last_function_call' already. */
1662
1663 if (NEXT_INSN (insn) && GET_CODE (NEXT_INSN (insn)) == NOTE
1664 && NOTE_LINE_NUMBER (NEXT_INSN (insn)) == NOTE_INSN_SETJMP)
1665 {
1666 int max_reg = max_reg_num ();
1667 for (i = 0; i < max_reg; i++)
1668 {
1669 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1670 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1671 reg_last_uses[i] = 0;
1672 if (reg_last_sets[i])
1673 add_dependence (insn, reg_last_sets[i], 0);
1674 }
1675 reg_pending_sets_all = 1;
1676
1677 /* Add a pair of fake REG_NOTEs which we will later
1678 convert back into a NOTE_INSN_SETJMP note. See
1679 reemit_notes for why we use a pair of of NOTEs. */
1680
1681 REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_DEAD,
1682 GEN_INT (0),
1683 REG_NOTES (insn));
1684 REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_DEAD,
1685 GEN_INT (NOTE_INSN_SETJMP),
1686 REG_NOTES (insn));
1687 }
1688 else
1689 {
1690 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1691 if (call_used_regs[i] || global_regs[i])
1692 {
1693 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1694 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1695 reg_last_uses[i] = 0;
1696 if (reg_last_sets[i])
1697 add_dependence (insn, reg_last_sets[i], REG_DEP_ANTI);
1698 SET_REGNO_REG_SET (reg_pending_sets, i);
1699 }
1700 }
1701
1702 /* For each insn which shouldn't cross a call, add a dependence
1703 between that insn and this call insn. */
1704 x = LOG_LINKS (sched_before_next_call);
1705 while (x)
1706 {
1707 add_dependence (insn, XEXP (x, 0), REG_DEP_ANTI);
1708 x = XEXP (x, 1);
1709 }
1710 LOG_LINKS (sched_before_next_call) = 0;
1711
1712 sched_analyze_insn (PATTERN (insn), insn, loop_notes);
1713 loop_notes = 0;
1714
1715 /* In the absence of interprocedural alias analysis, we must flush
1716 all pending reads and writes, and start new dependencies starting
1717 from here. But only flush writes for constant calls (which may
1718 be passed a pointer to something we haven't written yet). */
1719 flush_pending_lists (insn, CONST_CALL_P (insn));
1720
1721 /* Depend this function call (actually, the user of this
1722 function call) on all hard register clobberage. */
1723 last_function_call = insn;
1724 n_insns += 1;
1725 }
1726
1727 /* See comments on reemit_notes as to why we do this. */
1728 else if (GET_CODE (insn) == NOTE
1729 && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
1730 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END
1731 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1732 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END
1733 || (NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP
1734 && GET_CODE (PREV_INSN (insn)) != CALL_INSN)))
1735 {
1736 loop_notes = gen_rtx (EXPR_LIST, REG_DEAD,
1737 GEN_INT (NOTE_BLOCK_NUMBER (insn)), loop_notes);
1738 loop_notes = gen_rtx (EXPR_LIST, REG_DEAD,
1739 GEN_INT (NOTE_LINE_NUMBER (insn)), loop_notes);
1740 CONST_CALL_P (loop_notes) = CONST_CALL_P (insn);
1741 }
1742
1743 if (insn == tail)
1744 return n_insns;
1745 }
1746
1747 abort ();
1748 }
1749 \f
1750 /* Called when we see a set of a register. If death is true, then we are
1751 scanning backwards. Mark that register as unborn. If nobody says
1752 otherwise, that is how things will remain. If death is false, then we
1753 are scanning forwards. Mark that register as being born. */
1754
1755 static void
1756 sched_note_set (b, x, death)
1757 int b;
1758 rtx x;
1759 int death;
1760 {
1761 register int regno;
1762 register rtx reg = SET_DEST (x);
1763 int subreg_p = 0;
1764
1765 if (reg == 0)
1766 return;
1767
1768 while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == STRICT_LOW_PART
1769 || GET_CODE (reg) == SIGN_EXTRACT || GET_CODE (reg) == ZERO_EXTRACT)
1770 {
1771 /* Must treat modification of just one hardware register of a multi-reg
1772 value or just a byte field of a register exactly the same way that
1773 mark_set_1 in flow.c does, i.e. anything except a paradoxical subreg
1774 does not kill the entire register. */
1775 if (GET_CODE (reg) != SUBREG
1776 || REG_SIZE (SUBREG_REG (reg)) > REG_SIZE (reg))
1777 subreg_p = 1;
1778
1779 reg = SUBREG_REG (reg);
1780 }
1781
1782 if (GET_CODE (reg) != REG)
1783 return;
1784
1785 /* Global registers are always live, so the code below does not apply
1786 to them. */
1787
1788 regno = REGNO (reg);
1789 if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno])
1790 {
1791 if (death)
1792 {
1793 /* If we only set part of the register, then this set does not
1794 kill it. */
1795 if (subreg_p)
1796 return;
1797
1798 /* Try killing this register. */
1799 if (regno < FIRST_PSEUDO_REGISTER)
1800 {
1801 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
1802 while (--j >= 0)
1803 {
1804 CLEAR_REGNO_REG_SET (bb_live_regs, regno + j);
1805 SET_REGNO_REG_SET (bb_dead_regs, regno + j);
1806 }
1807 }
1808 else
1809 {
1810 CLEAR_REGNO_REG_SET (bb_live_regs, regno);
1811 SET_REGNO_REG_SET (bb_dead_regs, regno);
1812 }
1813 }
1814 else
1815 {
1816 /* Make the register live again. */
1817 if (regno < FIRST_PSEUDO_REGISTER)
1818 {
1819 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
1820 while (--j >= 0)
1821 {
1822 SET_REGNO_REG_SET (bb_live_regs, regno + j);
1823 CLEAR_REGNO_REG_SET (bb_dead_regs, regno + j);
1824 }
1825 }
1826 else
1827 {
1828 SET_REGNO_REG_SET (bb_live_regs, regno);
1829 CLEAR_REGNO_REG_SET (bb_dead_regs, regno);
1830 }
1831 }
1832 }
1833 }
1834 \f
1835 /* Macros and functions for keeping the priority queue sorted, and
1836 dealing with queueing and dequeueing of instructions. */
1837
1838 #define SCHED_SORT(READY, NEW_READY, OLD_READY) \
1839 do { if ((NEW_READY) - (OLD_READY) == 1) \
1840 swap_sort (READY, NEW_READY); \
1841 else if ((NEW_READY) - (OLD_READY) > 1) \
1842 qsort (READY, NEW_READY, sizeof (rtx), rank_for_schedule); } \
1843 while (0)
1844
1845 /* Returns a positive value if y is preferred; returns a negative value if
1846 x is preferred. Should never return 0, since that will make the sort
1847 unstable. */
1848
1849 static int
1850 rank_for_schedule (x, y)
1851 rtx *x, *y;
1852 {
1853 rtx tmp = *y;
1854 rtx tmp2 = *x;
1855 rtx link;
1856 int tmp_class, tmp2_class;
1857 int value;
1858
1859 /* Choose the instruction with the highest priority, if different. */
1860 if (value = INSN_PRIORITY (tmp) - INSN_PRIORITY (tmp2))
1861 return value;
1862
1863 if (last_scheduled_insn)
1864 {
1865 /* Classify the instructions into three classes:
1866 1) Data dependent on last schedule insn.
1867 2) Anti/Output dependent on last scheduled insn.
1868 3) Independent of last scheduled insn, or has latency of one.
1869 Choose the insn from the highest numbered class if different. */
1870 link = find_insn_list (tmp, LOG_LINKS (last_scheduled_insn));
1871 if (link == 0 || insn_cost (tmp, link, last_scheduled_insn) == 1)
1872 tmp_class = 3;
1873 else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
1874 tmp_class = 1;
1875 else
1876 tmp_class = 2;
1877
1878 link = find_insn_list (tmp2, LOG_LINKS (last_scheduled_insn));
1879 if (link == 0 || insn_cost (tmp2, link, last_scheduled_insn) == 1)
1880 tmp2_class = 3;
1881 else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
1882 tmp2_class = 1;
1883 else
1884 tmp2_class = 2;
1885
1886 if (value = tmp_class - tmp2_class)
1887 return value;
1888 }
1889
1890 /* If insns are equally good, sort by INSN_LUID (original insn order),
1891 so that we make the sort stable. This minimizes instruction movement,
1892 thus minimizing sched's effect on debugging and cross-jumping. */
1893 return INSN_LUID (tmp) - INSN_LUID (tmp2);
1894 }
1895
1896 /* Resort the array A in which only element at index N may be out of order. */
1897
1898 __inline static void
1899 swap_sort (a, n)
1900 rtx *a;
1901 int n;
1902 {
1903 rtx insn = a[n-1];
1904 int i = n-2;
1905
1906 while (i >= 0 && rank_for_schedule (a+i, &insn) >= 0)
1907 {
1908 a[i+1] = a[i];
1909 i -= 1;
1910 }
1911 a[i+1] = insn;
1912 }
1913
1914 static int max_priority;
1915
1916 /* Add INSN to the insn queue so that it fires at least N_CYCLES
1917 before the currently executing insn. */
1918
1919 __inline static void
1920 queue_insn (insn, n_cycles)
1921 rtx insn;
1922 int n_cycles;
1923 {
1924 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
1925 NEXT_INSN (insn) = insn_queue[next_q];
1926 insn_queue[next_q] = insn;
1927 q_size += 1;
1928 }
1929
1930 /* Return nonzero if PAT is the pattern of an insn which makes a
1931 register live. */
1932
1933 __inline static int
1934 birthing_insn_p (pat)
1935 rtx pat;
1936 {
1937 int j;
1938
1939 if (reload_completed == 1)
1940 return 0;
1941
1942 if (GET_CODE (pat) == SET
1943 && GET_CODE (SET_DEST (pat)) == REG)
1944 {
1945 rtx dest = SET_DEST (pat);
1946 int i = REGNO (dest);
1947
1948 /* It would be more accurate to use refers_to_regno_p or
1949 reg_mentioned_p to determine when the dest is not live before this
1950 insn. */
1951
1952 if (REGNO_REG_SET_P (bb_live_regs, i))
1953 return (REG_N_SETS (i) == 1);
1954
1955 return 0;
1956 }
1957 if (GET_CODE (pat) == PARALLEL)
1958 {
1959 for (j = 0; j < XVECLEN (pat, 0); j++)
1960 if (birthing_insn_p (XVECEXP (pat, 0, j)))
1961 return 1;
1962 }
1963 return 0;
1964 }
1965
1966 /* PREV is an insn that is ready to execute. Adjust its priority if that
1967 will help shorten register lifetimes. */
1968
1969 __inline static void
1970 adjust_priority (prev)
1971 rtx prev;
1972 {
1973 /* Trying to shorten register lives after reload has completed
1974 is useless and wrong. It gives inaccurate schedules. */
1975 if (reload_completed == 0)
1976 {
1977 rtx note;
1978 int n_deaths = 0;
1979
1980 /* ??? This code has no effect, because REG_DEAD notes are removed
1981 before we ever get here. */
1982 for (note = REG_NOTES (prev); note; note = XEXP (note, 1))
1983 if (REG_NOTE_KIND (note) == REG_DEAD)
1984 n_deaths += 1;
1985
1986 /* Defer scheduling insns which kill registers, since that
1987 shortens register lives. Prefer scheduling insns which
1988 make registers live for the same reason. */
1989 switch (n_deaths)
1990 {
1991 default:
1992 INSN_PRIORITY (prev) >>= 3;
1993 break;
1994 case 3:
1995 INSN_PRIORITY (prev) >>= 2;
1996 break;
1997 case 2:
1998 case 1:
1999 INSN_PRIORITY (prev) >>= 1;
2000 break;
2001 case 0:
2002 if (birthing_insn_p (PATTERN (prev)))
2003 {
2004 int max = max_priority;
2005
2006 if (max > INSN_PRIORITY (prev))
2007 INSN_PRIORITY (prev) = max;
2008 }
2009 break;
2010 }
2011 #ifdef ADJUST_PRIORITY
2012 ADJUST_PRIORITY (prev);
2013 #endif
2014 }
2015 }
2016
2017 /* INSN is the "currently executing insn". Launch each insn which was
2018 waiting on INSN (in the backwards dataflow sense). READY is a
2019 vector of insns which are ready to fire. N_READY is the number of
2020 elements in READY. CLOCK is the current virtual cycle. */
2021
2022 static int
2023 schedule_insn (insn, ready, n_ready, clock)
2024 rtx insn;
2025 rtx *ready;
2026 int n_ready;
2027 int clock;
2028 {
2029 rtx link;
2030 int new_ready = n_ready;
2031
2032 if (MAX_BLOCKAGE > 1)
2033 schedule_unit (insn_unit (insn), insn, clock);
2034
2035 if (LOG_LINKS (insn) == 0)
2036 return n_ready;
2037
2038 /* This is used by the function adjust_priority above. */
2039 if (n_ready > 0)
2040 max_priority = MAX (INSN_PRIORITY (ready[0]), INSN_PRIORITY (insn));
2041 else
2042 max_priority = INSN_PRIORITY (insn);
2043
2044 for (link = LOG_LINKS (insn); link != 0; link = XEXP (link, 1))
2045 {
2046 rtx prev = XEXP (link, 0);
2047 int cost = insn_cost (prev, link, insn);
2048
2049 if ((INSN_REF_COUNT (prev) -= 1) != 0)
2050 {
2051 /* We satisfied one requirement to fire PREV. Record the earliest
2052 time when PREV can fire. No need to do this if the cost is 1,
2053 because PREV can fire no sooner than the next cycle. */
2054 if (cost > 1)
2055 INSN_TICK (prev) = MAX (INSN_TICK (prev), clock + cost);
2056 }
2057 else
2058 {
2059 /* We satisfied the last requirement to fire PREV. Ensure that all
2060 timing requirements are satisfied. */
2061 if (INSN_TICK (prev) - clock > cost)
2062 cost = INSN_TICK (prev) - clock;
2063
2064 /* Adjust the priority of PREV and either put it on the ready
2065 list or queue it. */
2066 adjust_priority (prev);
2067 if (cost <= 1)
2068 ready[new_ready++] = prev;
2069 else
2070 queue_insn (prev, cost);
2071 }
2072 }
2073
2074 return new_ready;
2075 }
2076
2077 /* Given N_READY insns in the ready list READY at time CLOCK, queue
2078 those that are blocked due to function unit hazards and rearrange
2079 the remaining ones to minimize subsequent function unit hazards. */
2080
2081 static int
2082 schedule_select (ready, n_ready, clock, file)
2083 rtx *ready;
2084 int n_ready, clock;
2085 FILE *file;
2086 {
2087 int pri = INSN_PRIORITY (ready[0]);
2088 int i, j, k, q, cost, best_cost, best_insn = 0, new_ready = n_ready;
2089 rtx insn;
2090
2091 /* Work down the ready list in groups of instructions with the same
2092 priority value. Queue insns in the group that are blocked and
2093 select among those that remain for the one with the largest
2094 potential hazard. */
2095 for (i = 0; i < n_ready; i = j)
2096 {
2097 int opri = pri;
2098 for (j = i + 1; j < n_ready; j++)
2099 if ((pri = INSN_PRIORITY (ready[j])) != opri)
2100 break;
2101
2102 /* Queue insns in the group that are blocked. */
2103 for (k = i, q = 0; k < j; k++)
2104 {
2105 insn = ready[k];
2106 if ((cost = actual_hazard (insn_unit (insn), insn, clock, 0)) != 0)
2107 {
2108 q++;
2109 ready[k] = 0;
2110 queue_insn (insn, cost);
2111 if (file)
2112 fprintf (file, "\n;; blocking insn %d for %d cycles",
2113 INSN_UID (insn), cost);
2114 }
2115 }
2116 new_ready -= q;
2117
2118 /* Check the next group if all insns were queued. */
2119 if (j - i - q == 0)
2120 continue;
2121
2122 /* If more than one remains, select the first one with the largest
2123 potential hazard. */
2124 else if (j - i - q > 1)
2125 {
2126 best_cost = -1;
2127 for (k = i; k < j; k++)
2128 {
2129 if ((insn = ready[k]) == 0)
2130 continue;
2131 if ((cost = potential_hazard (insn_unit (insn), insn, 0))
2132 > best_cost)
2133 {
2134 best_cost = cost;
2135 best_insn = k;
2136 }
2137 }
2138 }
2139 /* We have found a suitable insn to schedule. */
2140 break;
2141 }
2142
2143 /* Move the best insn to be front of the ready list. */
2144 if (best_insn != 0)
2145 {
2146 if (file)
2147 {
2148 fprintf (file, ", now");
2149 for (i = 0; i < n_ready; i++)
2150 if (ready[i])
2151 fprintf (file, " %d", INSN_UID (ready[i]));
2152 fprintf (file, "\n;; insn %d has a greater potential hazard",
2153 INSN_UID (ready[best_insn]));
2154 }
2155 for (i = best_insn; i > 0; i--)
2156 {
2157 insn = ready[i-1];
2158 ready[i-1] = ready[i];
2159 ready[i] = insn;
2160 }
2161 }
2162
2163 /* Compact the ready list. */
2164 if (new_ready < n_ready)
2165 for (i = j = 0; i < n_ready; i++)
2166 if (ready[i])
2167 ready[j++] = ready[i];
2168
2169 return new_ready;
2170 }
2171
2172 /* Add a REG_DEAD note for REG to INSN, reusing a REG_DEAD note from the
2173 dead_notes list. */
2174
2175 static void
2176 create_reg_dead_note (reg, insn)
2177 rtx reg, insn;
2178 {
2179 rtx link;
2180
2181 /* The number of registers killed after scheduling must be the same as the
2182 number of registers killed before scheduling. The number of REG_DEAD
2183 notes may not be conserved, i.e. two SImode hard register REG_DEAD notes
2184 might become one DImode hard register REG_DEAD note, but the number of
2185 registers killed will be conserved.
2186
2187 We carefully remove REG_DEAD notes from the dead_notes list, so that
2188 there will be none left at the end. If we run out early, then there
2189 is a bug somewhere in flow, combine and/or sched. */
2190
2191 if (dead_notes == 0)
2192 {
2193 #if 1
2194 abort ();
2195 #else
2196 link = rtx_alloc (EXPR_LIST);
2197 PUT_REG_NOTE_KIND (link, REG_DEAD);
2198 #endif
2199 }
2200 else
2201 {
2202 /* Number of regs killed by REG. */
2203 int regs_killed = (REGNO (reg) >= FIRST_PSEUDO_REGISTER ? 1
2204 : HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)));
2205 /* Number of regs killed by REG_DEAD notes taken off the list. */
2206 int reg_note_regs;
2207
2208 link = dead_notes;
2209 reg_note_regs = (REGNO (XEXP (link, 0)) >= FIRST_PSEUDO_REGISTER ? 1
2210 : HARD_REGNO_NREGS (REGNO (XEXP (link, 0)),
2211 GET_MODE (XEXP (link, 0))));
2212 while (reg_note_regs < regs_killed)
2213 {
2214 link = XEXP (link, 1);
2215 reg_note_regs += (REGNO (XEXP (link, 0)) >= FIRST_PSEUDO_REGISTER ? 1
2216 : HARD_REGNO_NREGS (REGNO (XEXP (link, 0)),
2217 GET_MODE (XEXP (link, 0))));
2218 }
2219 dead_notes = XEXP (link, 1);
2220
2221 /* If we took too many regs kills off, put the extra ones back. */
2222 while (reg_note_regs > regs_killed)
2223 {
2224 rtx temp_reg, temp_link;
2225
2226 temp_reg = gen_rtx (REG, word_mode, 0);
2227 temp_link = rtx_alloc (EXPR_LIST);
2228 PUT_REG_NOTE_KIND (temp_link, REG_DEAD);
2229 XEXP (temp_link, 0) = temp_reg;
2230 XEXP (temp_link, 1) = dead_notes;
2231 dead_notes = temp_link;
2232 reg_note_regs--;
2233 }
2234 }
2235
2236 XEXP (link, 0) = reg;
2237 XEXP (link, 1) = REG_NOTES (insn);
2238 REG_NOTES (insn) = link;
2239 }
2240
2241 /* Subroutine on attach_deaths_insn--handles the recursive search
2242 through INSN. If SET_P is true, then x is being modified by the insn. */
2243
2244 static void
2245 attach_deaths (x, insn, set_p)
2246 rtx x;
2247 rtx insn;
2248 int set_p;
2249 {
2250 register int i;
2251 register int j;
2252 register enum rtx_code code;
2253 register char *fmt;
2254
2255 if (x == 0)
2256 return;
2257
2258 code = GET_CODE (x);
2259
2260 switch (code)
2261 {
2262 case CONST_INT:
2263 case CONST_DOUBLE:
2264 case LABEL_REF:
2265 case SYMBOL_REF:
2266 case CONST:
2267 case CODE_LABEL:
2268 case PC:
2269 case CC0:
2270 /* Get rid of the easy cases first. */
2271 return;
2272
2273 case REG:
2274 {
2275 /* If the register dies in this insn, queue that note, and mark
2276 this register as needing to die. */
2277 /* This code is very similar to mark_used_1 (if set_p is false)
2278 and mark_set_1 (if set_p is true) in flow.c. */
2279
2280 register int regno;
2281 int some_needed;
2282 int all_needed;
2283
2284 if (set_p)
2285 return;
2286
2287 regno = REGNO (x);
2288 all_needed = some_needed = REGNO_REG_SET_P (old_live_regs, regno);
2289 if (regno < FIRST_PSEUDO_REGISTER)
2290 {
2291 int n;
2292
2293 n = HARD_REGNO_NREGS (regno, GET_MODE (x));
2294 while (--n > 0)
2295 {
2296 int needed = (REGNO_REG_SET_P (old_live_regs, regno + n));
2297 some_needed |= needed;
2298 all_needed &= needed;
2299 }
2300 }
2301
2302 /* If it wasn't live before we started, then add a REG_DEAD note.
2303 We must check the previous lifetime info not the current info,
2304 because we may have to execute this code several times, e.g.
2305 once for a clobber (which doesn't add a note) and later
2306 for a use (which does add a note).
2307
2308 Always make the register live. We must do this even if it was
2309 live before, because this may be an insn which sets and uses
2310 the same register, in which case the register has already been
2311 killed, so we must make it live again.
2312
2313 Global registers are always live, and should never have a REG_DEAD
2314 note added for them, so none of the code below applies to them. */
2315
2316 if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno])
2317 {
2318 /* Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2319 STACK_POINTER_REGNUM, since these are always considered to be
2320 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2321 if (regno != FRAME_POINTER_REGNUM
2322 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2323 && ! (regno == HARD_FRAME_POINTER_REGNUM)
2324 #endif
2325 #if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
2326 && ! (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
2327 #endif
2328 && regno != STACK_POINTER_REGNUM)
2329 {
2330 /* ??? It is perhaps a dead_or_set_p bug that it does
2331 not check for REG_UNUSED notes itself. This is necessary
2332 for the case where the SET_DEST is a subreg of regno, as
2333 dead_or_set_p handles subregs specially. */
2334 if (! all_needed && ! dead_or_set_p (insn, x)
2335 && ! find_reg_note (insn, REG_UNUSED, x))
2336 {
2337 /* Check for the case where the register dying partially
2338 overlaps the register set by this insn. */
2339 if (regno < FIRST_PSEUDO_REGISTER
2340 && HARD_REGNO_NREGS (regno, GET_MODE (x)) > 1)
2341 {
2342 int n = HARD_REGNO_NREGS (regno, GET_MODE (x));
2343 while (--n >= 0)
2344 some_needed |= dead_or_set_regno_p (insn, regno + n);
2345 }
2346
2347 /* If none of the words in X is needed, make a REG_DEAD
2348 note. Otherwise, we must make partial REG_DEAD
2349 notes. */
2350 if (! some_needed)
2351 create_reg_dead_note (x, insn);
2352 else
2353 {
2354 int i;
2355
2356 /* Don't make a REG_DEAD note for a part of a
2357 register that is set in the insn. */
2358 for (i = HARD_REGNO_NREGS (regno, GET_MODE (x)) - 1;
2359 i >= 0; i--)
2360 if (! REGNO_REG_SET_P (old_live_regs, regno + i)
2361 && ! dead_or_set_regno_p (insn, regno + i))
2362 create_reg_dead_note (gen_rtx (REG,
2363 reg_raw_mode[regno + i],
2364 regno + i),
2365 insn);
2366 }
2367 }
2368 }
2369
2370 if (regno < FIRST_PSEUDO_REGISTER)
2371 {
2372 int j = HARD_REGNO_NREGS (regno, GET_MODE (x));
2373 while (--j >= 0)
2374 {
2375 CLEAR_REGNO_REG_SET (bb_dead_regs, regno + j);
2376 SET_REGNO_REG_SET (bb_live_regs, regno + j);
2377 }
2378 }
2379 else
2380 {
2381 CLEAR_REGNO_REG_SET (bb_dead_regs, regno);
2382 SET_REGNO_REG_SET (bb_live_regs, regno);
2383 }
2384 }
2385 return;
2386 }
2387
2388 case MEM:
2389 /* Handle tail-recursive case. */
2390 attach_deaths (XEXP (x, 0), insn, 0);
2391 return;
2392
2393 case SUBREG:
2394 case STRICT_LOW_PART:
2395 /* These two cases preserve the value of SET_P, so handle them
2396 separately. */
2397 attach_deaths (XEXP (x, 0), insn, set_p);
2398 return;
2399
2400 case ZERO_EXTRACT:
2401 case SIGN_EXTRACT:
2402 /* This case preserves the value of SET_P for the first operand, but
2403 clears it for the other two. */
2404 attach_deaths (XEXP (x, 0), insn, set_p);
2405 attach_deaths (XEXP (x, 1), insn, 0);
2406 attach_deaths (XEXP (x, 2), insn, 0);
2407 return;
2408
2409 default:
2410 /* Other cases: walk the insn. */
2411 fmt = GET_RTX_FORMAT (code);
2412 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2413 {
2414 if (fmt[i] == 'e')
2415 attach_deaths (XEXP (x, i), insn, 0);
2416 else if (fmt[i] == 'E')
2417 for (j = 0; j < XVECLEN (x, i); j++)
2418 attach_deaths (XVECEXP (x, i, j), insn, 0);
2419 }
2420 }
2421 }
2422
2423 /* After INSN has executed, add register death notes for each register
2424 that is dead after INSN. */
2425
2426 static void
2427 attach_deaths_insn (insn)
2428 rtx insn;
2429 {
2430 rtx x = PATTERN (insn);
2431 register RTX_CODE code = GET_CODE (x);
2432 rtx link;
2433
2434 if (code == SET)
2435 {
2436 attach_deaths (SET_SRC (x), insn, 0);
2437
2438 /* A register might die here even if it is the destination, e.g.
2439 it is the target of a volatile read and is otherwise unused.
2440 Hence we must always call attach_deaths for the SET_DEST. */
2441 attach_deaths (SET_DEST (x), insn, 1);
2442 }
2443 else if (code == PARALLEL)
2444 {
2445 register int i;
2446 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
2447 {
2448 code = GET_CODE (XVECEXP (x, 0, i));
2449 if (code == SET)
2450 {
2451 attach_deaths (SET_SRC (XVECEXP (x, 0, i)), insn, 0);
2452
2453 attach_deaths (SET_DEST (XVECEXP (x, 0, i)), insn, 1);
2454 }
2455 /* Flow does not add REG_DEAD notes to registers that die in
2456 clobbers, so we can't either. */
2457 else if (code != CLOBBER)
2458 attach_deaths (XVECEXP (x, 0, i), insn, 0);
2459 }
2460 }
2461 /* If this is a CLOBBER, only add REG_DEAD notes to registers inside a
2462 MEM being clobbered, just like flow. */
2463 else if (code == CLOBBER && GET_CODE (XEXP (x, 0)) == MEM)
2464 attach_deaths (XEXP (XEXP (x, 0), 0), insn, 0);
2465 /* Otherwise don't add a death note to things being clobbered. */
2466 else if (code != CLOBBER)
2467 attach_deaths (x, insn, 0);
2468
2469 /* Make death notes for things used in the called function. */
2470 if (GET_CODE (insn) == CALL_INSN)
2471 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2472 attach_deaths (XEXP (XEXP (link, 0), 0), insn,
2473 GET_CODE (XEXP (link, 0)) == CLOBBER);
2474 }
2475
2476 /* Delete notes beginning with INSN and maybe put them in the chain
2477 of notes ended by NOTE_LIST.
2478 Returns the insn following the notes. */
2479
2480 static rtx
2481 unlink_notes (insn, tail)
2482 rtx insn, tail;
2483 {
2484 rtx prev = PREV_INSN (insn);
2485
2486 while (insn != tail && GET_CODE (insn) == NOTE)
2487 {
2488 rtx next = NEXT_INSN (insn);
2489 /* Delete the note from its current position. */
2490 if (prev)
2491 NEXT_INSN (prev) = next;
2492 if (next)
2493 PREV_INSN (next) = prev;
2494
2495 if (write_symbols != NO_DEBUG && NOTE_LINE_NUMBER (insn) > 0)
2496 /* Record line-number notes so they can be reused. */
2497 LINE_NOTE (insn) = insn;
2498
2499 /* Don't save away NOTE_INSN_SETJMPs, because they must remain
2500 immediately after the call they follow. We use a fake
2501 (REG_DEAD (const_int -1)) note to remember them.
2502 Likewise with NOTE_INSN_{LOOP,EHREGION}_{BEG, END}. */
2503 else if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_SETJMP
2504 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG
2505 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_END
2506 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
2507 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_END)
2508 {
2509 /* Insert the note at the end of the notes list. */
2510 PREV_INSN (insn) = note_list;
2511 if (note_list)
2512 NEXT_INSN (note_list) = insn;
2513 note_list = insn;
2514 }
2515
2516 insn = next;
2517 }
2518 return insn;
2519 }
2520
2521 /* Constructor for `sometimes' data structure. */
2522
2523 static int
2524 new_sometimes_live (regs_sometimes_live, regno, sometimes_max)
2525 struct sometimes *regs_sometimes_live;
2526 int regno;
2527 int sometimes_max;
2528 {
2529 register struct sometimes *p;
2530
2531 /* There should never be a register greater than max_regno here. If there
2532 is, it means that a define_split has created a new pseudo reg. This
2533 is not allowed, since there will not be flow info available for any
2534 new register, so catch the error here. */
2535 if (regno >= max_regno)
2536 abort ();
2537
2538 p = &regs_sometimes_live[sometimes_max];
2539 p->regno = regno;
2540 p->live_length = 0;
2541 p->calls_crossed = 0;
2542 sometimes_max++;
2543 return sometimes_max;
2544 }
2545
2546 /* Count lengths of all regs we are currently tracking,
2547 and find new registers no longer live. */
2548
2549 static void
2550 finish_sometimes_live (regs_sometimes_live, sometimes_max)
2551 struct sometimes *regs_sometimes_live;
2552 int sometimes_max;
2553 {
2554 int i;
2555
2556 for (i = 0; i < sometimes_max; i++)
2557 {
2558 register struct sometimes *p = &regs_sometimes_live[i];
2559 int regno = p->regno;
2560
2561 sched_reg_live_length[regno] += p->live_length;
2562 sched_reg_n_calls_crossed[regno] += p->calls_crossed;
2563 }
2564 }
2565
2566 /* Search INSN for fake REG_DEAD note pairs for NOTE_INSN_SETJMP,
2567 NOTE_INSN_{LOOP,EHREGION}_{BEG,END}; and convert them back into
2568 NOTEs. The REG_DEAD note following first one is contains the saved
2569 value for NOTE_BLOCK_NUMBER which is useful for
2570 NOTE_INSN_EH_REGION_{BEG,END} NOTEs. LAST is the last instruction
2571 output by the instruction scheduler. Return the new value of LAST. */
2572
2573 static rtx
2574 reemit_notes (insn, last)
2575 rtx insn;
2576 rtx last;
2577 {
2578 rtx note;
2579
2580 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2581 {
2582 if (REG_NOTE_KIND (note) == REG_DEAD
2583 && GET_CODE (XEXP (note, 0)) == CONST_INT)
2584 {
2585 if (INTVAL (XEXP (note, 0)) == NOTE_INSN_SETJMP)
2586 {
2587 CONST_CALL_P (emit_note_after (INTVAL (XEXP (note, 0)), insn))
2588 = CONST_CALL_P (note);
2589 remove_note (insn, note);
2590 note = XEXP (note, 1);
2591 }
2592 else
2593 {
2594 last = emit_note_before (INTVAL (XEXP (note, 0)), last);
2595 remove_note (insn, note);
2596 note = XEXP (note, 1);
2597 NOTE_BLOCK_NUMBER (last) = INTVAL (XEXP (note, 0));
2598 }
2599 remove_note (insn, note);
2600 }
2601 }
2602 return last;
2603 }
2604
2605 /* Use modified list scheduling to rearrange insns in basic block
2606 B. FILE, if nonzero, is where we dump interesting output about
2607 this pass. */
2608
2609 static void
2610 schedule_block (b, file)
2611 int b;
2612 FILE *file;
2613 {
2614 rtx insn, last;
2615 rtx *ready, link;
2616 int i, j, n_ready = 0, new_ready, n_insns;
2617 int sched_n_insns = 0;
2618 int clock;
2619 #define NEED_NOTHING 0
2620 #define NEED_HEAD 1
2621 #define NEED_TAIL 2
2622 int new_needs;
2623
2624 /* HEAD and TAIL delimit the region being scheduled. */
2625 rtx head = basic_block_head[b];
2626 rtx tail = basic_block_end[b];
2627 /* PREV_HEAD and NEXT_TAIL are the boundaries of the insns
2628 being scheduled. When the insns have been ordered,
2629 these insns delimit where the new insns are to be
2630 spliced back into the insn chain. */
2631 rtx next_tail;
2632 rtx prev_head;
2633
2634 /* Keep life information accurate. */
2635 register struct sometimes *regs_sometimes_live;
2636 int sometimes_max;
2637
2638 if (file)
2639 fprintf (file, ";;\t -- basic block number %d from %d to %d --\n",
2640 b, INSN_UID (basic_block_head[b]), INSN_UID (basic_block_end[b]));
2641
2642 i = max_reg_num ();
2643 reg_last_uses = (rtx *) alloca (i * sizeof (rtx));
2644 bzero ((char *) reg_last_uses, i * sizeof (rtx));
2645 reg_last_sets = (rtx *) alloca (i * sizeof (rtx));
2646 bzero ((char *) reg_last_sets, i * sizeof (rtx));
2647 reg_pending_sets = ALLOCA_REG_SET ();
2648 CLEAR_REG_SET (reg_pending_sets);
2649 reg_pending_sets_all = 0;
2650 clear_units ();
2651
2652 /* Remove certain insns at the beginning from scheduling,
2653 by advancing HEAD. */
2654
2655 /* At the start of a function, before reload has run, don't delay getting
2656 parameters from hard registers into pseudo registers. */
2657 if (reload_completed == 0 && b == 0)
2658 {
2659 while (head != tail
2660 && GET_CODE (head) == NOTE
2661 && NOTE_LINE_NUMBER (head) != NOTE_INSN_FUNCTION_BEG)
2662 head = NEXT_INSN (head);
2663 while (head != tail
2664 && GET_CODE (head) == INSN
2665 && GET_CODE (PATTERN (head)) == SET)
2666 {
2667 rtx src = SET_SRC (PATTERN (head));
2668 while (GET_CODE (src) == SUBREG
2669 || GET_CODE (src) == SIGN_EXTEND
2670 || GET_CODE (src) == ZERO_EXTEND
2671 || GET_CODE (src) == SIGN_EXTRACT
2672 || GET_CODE (src) == ZERO_EXTRACT)
2673 src = XEXP (src, 0);
2674 if (GET_CODE (src) != REG
2675 || REGNO (src) >= FIRST_PSEUDO_REGISTER)
2676 break;
2677 /* Keep this insn from ever being scheduled. */
2678 INSN_REF_COUNT (head) = 1;
2679 head = NEXT_INSN (head);
2680 }
2681 }
2682
2683 /* Don't include any notes or labels at the beginning of the
2684 basic block, or notes at the ends of basic blocks. */
2685 while (head != tail)
2686 {
2687 if (GET_CODE (head) == NOTE)
2688 head = NEXT_INSN (head);
2689 else if (GET_CODE (tail) == NOTE)
2690 tail = PREV_INSN (tail);
2691 else if (GET_CODE (head) == CODE_LABEL)
2692 head = NEXT_INSN (head);
2693 else break;
2694 }
2695 /* If the only insn left is a NOTE or a CODE_LABEL, then there is no need
2696 to schedule this block. */
2697 if (head == tail
2698 && (GET_CODE (head) == NOTE || GET_CODE (head) == CODE_LABEL))
2699 goto ret;
2700
2701 #if 0
2702 /* This short-cut doesn't work. It does not count call insns crossed by
2703 registers in reg_sometimes_live. It does not mark these registers as
2704 dead if they die in this block. It does not mark these registers live
2705 (or create new reg_sometimes_live entries if necessary) if they are born
2706 in this block.
2707
2708 The easy solution is to just always schedule a block. This block only
2709 has one insn, so this won't slow down this pass by much. */
2710
2711 if (head == tail)
2712 goto ret;
2713 #endif
2714
2715 /* Now HEAD through TAIL are the insns actually to be rearranged;
2716 Let PREV_HEAD and NEXT_TAIL enclose them. */
2717 prev_head = PREV_INSN (head);
2718 next_tail = NEXT_INSN (tail);
2719
2720 /* Initialize basic block data structures. */
2721 dead_notes = 0;
2722 pending_read_insns = 0;
2723 pending_read_mems = 0;
2724 pending_write_insns = 0;
2725 pending_write_mems = 0;
2726 pending_lists_length = 0;
2727 last_pending_memory_flush = 0;
2728 last_function_call = 0;
2729 last_scheduled_insn = 0;
2730
2731 LOG_LINKS (sched_before_next_call) = 0;
2732
2733 n_insns = sched_analyze (head, tail);
2734 if (n_insns == 0)
2735 {
2736 free_pending_lists ();
2737 goto ret;
2738 }
2739
2740 /* Allocate vector to hold insns to be rearranged (except those
2741 insns which are controlled by an insn with SCHED_GROUP_P set).
2742 All these insns are included between ORIG_HEAD and ORIG_TAIL,
2743 as those variables ultimately are set up. */
2744 ready = (rtx *) alloca ((n_insns+1) * sizeof (rtx));
2745
2746 /* TAIL is now the last of the insns to be rearranged.
2747 Put those insns into the READY vector. */
2748 insn = tail;
2749
2750 /* For all branches, calls, uses, and cc0 setters, force them to remain
2751 in order at the end of the block by adding dependencies and giving
2752 the last a high priority. There may be notes present, and prev_head
2753 may also be a note.
2754
2755 Branches must obviously remain at the end. Calls should remain at the
2756 end since moving them results in worse register allocation. Uses remain
2757 at the end to ensure proper register allocation. cc0 setters remaim
2758 at the end because they can't be moved away from their cc0 user. */
2759 last = 0;
2760 while (GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN
2761 || (GET_CODE (insn) == INSN
2762 && (GET_CODE (PATTERN (insn)) == USE
2763 #ifdef HAVE_cc0
2764 || sets_cc0_p (PATTERN (insn))
2765 #endif
2766 ))
2767 || GET_CODE (insn) == NOTE)
2768 {
2769 if (GET_CODE (insn) != NOTE)
2770 {
2771 priority (insn);
2772 if (last == 0)
2773 {
2774 ready[n_ready++] = insn;
2775 INSN_PRIORITY (insn) = TAIL_PRIORITY - i;
2776 INSN_REF_COUNT (insn) = 0;
2777 }
2778 else if (! find_insn_list (insn, LOG_LINKS (last)))
2779 {
2780 add_dependence (last, insn, REG_DEP_ANTI);
2781 INSN_REF_COUNT (insn)++;
2782 }
2783 last = insn;
2784
2785 /* Skip over insns that are part of a group. */
2786 while (SCHED_GROUP_P (insn))
2787 {
2788 insn = prev_nonnote_insn (insn);
2789 priority (insn);
2790 }
2791 }
2792
2793 insn = PREV_INSN (insn);
2794 /* Don't overrun the bounds of the basic block. */
2795 if (insn == prev_head)
2796 break;
2797 }
2798
2799 /* Assign priorities to instructions. Also check whether they
2800 are in priority order already. If so then I will be nonnegative.
2801 We use this shortcut only before reloading. */
2802 #if 0
2803 i = reload_completed ? DONE_PRIORITY : MAX_PRIORITY;
2804 #endif
2805
2806 for (; insn != prev_head; insn = PREV_INSN (insn))
2807 {
2808 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2809 {
2810 priority (insn);
2811 if (INSN_REF_COUNT (insn) == 0)
2812 {
2813 if (last == 0)
2814 ready[n_ready++] = insn;
2815 else
2816 {
2817 /* Make this dependent on the last of the instructions
2818 that must remain in order at the end of the block. */
2819 add_dependence (last, insn, REG_DEP_ANTI);
2820 INSN_REF_COUNT (insn) = 1;
2821 }
2822 }
2823 if (SCHED_GROUP_P (insn))
2824 {
2825 while (SCHED_GROUP_P (insn))
2826 {
2827 insn = prev_nonnote_insn (insn);
2828 priority (insn);
2829 }
2830 continue;
2831 }
2832 #if 0
2833 if (i < 0)
2834 continue;
2835 if (INSN_PRIORITY (insn) < i)
2836 i = INSN_PRIORITY (insn);
2837 else if (INSN_PRIORITY (insn) > i)
2838 i = DONE_PRIORITY;
2839 #endif
2840 }
2841 }
2842
2843 #if 0
2844 /* This short-cut doesn't work. It does not count call insns crossed by
2845 registers in reg_sometimes_live. It does not mark these registers as
2846 dead if they die in this block. It does not mark these registers live
2847 (or create new reg_sometimes_live entries if necessary) if they are born
2848 in this block.
2849
2850 The easy solution is to just always schedule a block. These blocks tend
2851 to be very short, so this doesn't slow down this pass by much. */
2852
2853 /* If existing order is good, don't bother to reorder. */
2854 if (i != DONE_PRIORITY)
2855 {
2856 if (file)
2857 fprintf (file, ";; already scheduled\n");
2858
2859 if (reload_completed == 0)
2860 {
2861 for (i = 0; i < sometimes_max; i++)
2862 regs_sometimes_live[i].live_length += n_insns;
2863
2864 finish_sometimes_live (regs_sometimes_live, sometimes_max);
2865 }
2866 free_pending_lists ();
2867 goto ret;
2868 }
2869 #endif
2870
2871 /* Scan all the insns to be scheduled, removing NOTE insns
2872 and register death notes.
2873 Line number NOTE insns end up in NOTE_LIST.
2874 Register death notes end up in DEAD_NOTES.
2875
2876 Recreate the register life information for the end of this basic
2877 block. */
2878
2879 if (reload_completed == 0)
2880 {
2881 COPY_REG_SET (bb_live_regs, basic_block_live_at_start[b]);
2882 CLEAR_REG_SET (bb_dead_regs);
2883
2884 if (b == 0)
2885 {
2886 /* This is the first block in the function. There may be insns
2887 before head that we can't schedule. We still need to examine
2888 them though for accurate register lifetime analysis. */
2889
2890 /* We don't want to remove any REG_DEAD notes as the code below
2891 does. */
2892
2893 for (insn = basic_block_head[b]; insn != head;
2894 insn = NEXT_INSN (insn))
2895 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2896 {
2897 /* See if the register gets born here. */
2898 /* We must check for registers being born before we check for
2899 registers dying. It is possible for a register to be born
2900 and die in the same insn, e.g. reading from a volatile
2901 memory location into an otherwise unused register. Such
2902 a register must be marked as dead after this insn. */
2903 if (GET_CODE (PATTERN (insn)) == SET
2904 || GET_CODE (PATTERN (insn)) == CLOBBER)
2905 sched_note_set (b, PATTERN (insn), 0);
2906 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
2907 {
2908 int j;
2909 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2910 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
2911 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
2912 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
2913
2914 /* ??? This code is obsolete and should be deleted. It
2915 is harmless though, so we will leave it in for now. */
2916 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2917 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == USE)
2918 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
2919 }
2920
2921 /* Each call clobbers (makes live) all call-clobbered regs
2922 that are not global or fixed. Note that the function-value
2923 reg is a call_clobbered reg. */
2924
2925 if (GET_CODE (insn) == CALL_INSN)
2926 {
2927 int j;
2928 for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
2929 if (call_used_regs[j] && ! global_regs[j]
2930 && ! fixed_regs[j])
2931 {
2932 SET_REGNO_REG_SET (bb_live_regs, j);
2933 CLEAR_REGNO_REG_SET (bb_dead_regs, j);
2934 }
2935 }
2936
2937 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2938 {
2939 if ((REG_NOTE_KIND (link) == REG_DEAD
2940 || REG_NOTE_KIND (link) == REG_UNUSED)
2941 /* Verify that the REG_NOTE has a valid value. */
2942 && GET_CODE (XEXP (link, 0)) == REG)
2943 {
2944 register int regno = REGNO (XEXP (link, 0));
2945
2946 if (regno < FIRST_PSEUDO_REGISTER)
2947 {
2948 int j = HARD_REGNO_NREGS (regno,
2949 GET_MODE (XEXP (link, 0)));
2950 while (--j >= 0)
2951 {
2952 CLEAR_REGNO_REG_SET (bb_live_regs, regno + j);
2953 SET_REGNO_REG_SET (bb_dead_regs, regno + j);
2954 }
2955 }
2956 else
2957 {
2958 CLEAR_REGNO_REG_SET (bb_live_regs, regno);
2959 SET_REGNO_REG_SET (bb_dead_regs, regno);
2960 }
2961 }
2962 }
2963 }
2964 }
2965 }
2966
2967 /* If debugging information is being produced, keep track of the line
2968 number notes for each insn. */
2969 if (write_symbols != NO_DEBUG)
2970 {
2971 /* We must use the true line number for the first insn in the block
2972 that was computed and saved at the start of this pass. We can't
2973 use the current line number, because scheduling of the previous
2974 block may have changed the current line number. */
2975 rtx line = line_note_head[b];
2976
2977 for (insn = basic_block_head[b];
2978 insn != next_tail;
2979 insn = NEXT_INSN (insn))
2980 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
2981 line = insn;
2982 else
2983 LINE_NOTE (insn) = line;
2984 }
2985
2986 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
2987 {
2988 rtx prev, next, link;
2989
2990 /* Farm out notes. This is needed to keep the debugger from
2991 getting completely deranged. */
2992 if (GET_CODE (insn) == NOTE)
2993 {
2994 prev = insn;
2995 insn = unlink_notes (insn, next_tail);
2996 if (prev == tail)
2997 abort ();
2998 if (prev == head)
2999 abort ();
3000 if (insn == next_tail)
3001 abort ();
3002 }
3003
3004 if (reload_completed == 0
3005 && GET_RTX_CLASS (GET_CODE (insn)) == 'i')
3006 {
3007 /* See if the register gets born here. */
3008 /* We must check for registers being born before we check for
3009 registers dying. It is possible for a register to be born and
3010 die in the same insn, e.g. reading from a volatile memory
3011 location into an otherwise unused register. Such a register
3012 must be marked as dead after this insn. */
3013 if (GET_CODE (PATTERN (insn)) == SET
3014 || GET_CODE (PATTERN (insn)) == CLOBBER)
3015 sched_note_set (b, PATTERN (insn), 0);
3016 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3017 {
3018 int j;
3019 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
3020 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
3021 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
3022 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
3023
3024 /* ??? This code is obsolete and should be deleted. It
3025 is harmless though, so we will leave it in for now. */
3026 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
3027 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == USE)
3028 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
3029 }
3030
3031 /* Each call clobbers (makes live) all call-clobbered regs that are
3032 not global or fixed. Note that the function-value reg is a
3033 call_clobbered reg. */
3034
3035 if (GET_CODE (insn) == CALL_INSN)
3036 {
3037 int j;
3038 for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
3039 if (call_used_regs[j] && ! global_regs[j]
3040 && ! fixed_regs[j])
3041 {
3042 SET_REGNO_REG_SET (bb_live_regs, j);
3043 CLEAR_REGNO_REG_SET (bb_dead_regs, j);
3044 }
3045 }
3046
3047 /* Need to know what registers this insn kills. */
3048 for (prev = 0, link = REG_NOTES (insn); link; link = next)
3049 {
3050 next = XEXP (link, 1);
3051 if ((REG_NOTE_KIND (link) == REG_DEAD
3052 || REG_NOTE_KIND (link) == REG_UNUSED)
3053 /* Verify that the REG_NOTE has a valid value. */
3054 && GET_CODE (XEXP (link, 0)) == REG)
3055 {
3056 register int regno = REGNO (XEXP (link, 0));
3057
3058 /* Only unlink REG_DEAD notes; leave REG_UNUSED notes
3059 alone. */
3060 if (REG_NOTE_KIND (link) == REG_DEAD)
3061 {
3062 if (prev)
3063 XEXP (prev, 1) = next;
3064 else
3065 REG_NOTES (insn) = next;
3066 XEXP (link, 1) = dead_notes;
3067 dead_notes = link;
3068 }
3069 else
3070 prev = link;
3071
3072 if (regno < FIRST_PSEUDO_REGISTER)
3073 {
3074 int j = HARD_REGNO_NREGS (regno,
3075 GET_MODE (XEXP (link, 0)));
3076 while (--j >= 0)
3077 {
3078 CLEAR_REGNO_REG_SET (bb_live_regs, regno + j);
3079 SET_REGNO_REG_SET (bb_dead_regs, regno + j);
3080 }
3081 }
3082 else
3083 {
3084 CLEAR_REGNO_REG_SET (bb_live_regs, regno);
3085 SET_REGNO_REG_SET (bb_dead_regs, regno);
3086 }
3087 }
3088 else
3089 prev = link;
3090 }
3091 }
3092 }
3093
3094 if (reload_completed == 0)
3095 {
3096 /* Keep track of register lives. */
3097 old_live_regs = ALLOCA_REG_SET ();
3098 regs_sometimes_live
3099 = (struct sometimes *) alloca (max_regno * sizeof (struct sometimes));
3100 sometimes_max = 0;
3101
3102 /* Start with registers live at end. */
3103 COPY_REG_SET (old_live_regs, bb_live_regs);
3104 EXECUTE_IF_SET_IN_REG_SET (bb_live_regs, 0, j,
3105 {
3106 sometimes_max
3107 = new_sometimes_live (regs_sometimes_live,
3108 j, sometimes_max);
3109 });
3110 }
3111
3112 SCHED_SORT (ready, n_ready, 1);
3113
3114 if (file)
3115 {
3116 fprintf (file, ";; ready list initially:\n;; ");
3117 for (i = 0; i < n_ready; i++)
3118 fprintf (file, "%d ", INSN_UID (ready[i]));
3119 fprintf (file, "\n\n");
3120
3121 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3122 if (INSN_PRIORITY (insn) > 0)
3123 fprintf (file, ";; insn[%4d]: priority = %4d, ref_count = %4d\n",
3124 INSN_UID (insn), INSN_PRIORITY (insn),
3125 INSN_REF_COUNT (insn));
3126 }
3127
3128 /* Now HEAD and TAIL are going to become disconnected
3129 entirely from the insn chain. */
3130 tail = 0;
3131
3132 /* Q_SIZE will always be zero here. */
3133 q_ptr = 0; clock = 0;
3134 bzero ((char *) insn_queue, sizeof (insn_queue));
3135
3136 /* Now, perform list scheduling. */
3137
3138 /* Where we start inserting insns is after TAIL. */
3139 last = next_tail;
3140
3141 new_needs = (NEXT_INSN (prev_head) == basic_block_head[b]
3142 ? NEED_HEAD : NEED_NOTHING);
3143 if (PREV_INSN (next_tail) == basic_block_end[b])
3144 new_needs |= NEED_TAIL;
3145
3146 new_ready = n_ready;
3147 while (sched_n_insns < n_insns)
3148 {
3149 q_ptr = NEXT_Q (q_ptr); clock++;
3150
3151 /* Add all pending insns that can be scheduled without stalls to the
3152 ready list. */
3153 for (insn = insn_queue[q_ptr]; insn; insn = NEXT_INSN (insn))
3154 {
3155 if (file)
3156 fprintf (file, ";; launching %d before %d with no stalls at T-%d\n",
3157 INSN_UID (insn), INSN_UID (last), clock);
3158 ready[new_ready++] = insn;
3159 q_size -= 1;
3160 }
3161 insn_queue[q_ptr] = 0;
3162
3163 /* If there are no ready insns, stall until one is ready and add all
3164 of the pending insns at that point to the ready list. */
3165 if (new_ready == 0)
3166 {
3167 register int stalls;
3168
3169 for (stalls = 1; stalls < INSN_QUEUE_SIZE; stalls++)
3170 if (insn = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)])
3171 {
3172 for (; insn; insn = NEXT_INSN (insn))
3173 {
3174 if (file)
3175 fprintf (file, ";; launching %d before %d with %d stalls at T-%d\n",
3176 INSN_UID (insn), INSN_UID (last), stalls, clock);
3177 ready[new_ready++] = insn;
3178 q_size -= 1;
3179 }
3180 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = 0;
3181 break;
3182 }
3183
3184 q_ptr = NEXT_Q_AFTER (q_ptr, stalls); clock += stalls;
3185 }
3186
3187 /* There should be some instructions waiting to fire. */
3188 if (new_ready == 0)
3189 abort ();
3190
3191 if (file)
3192 {
3193 fprintf (file, ";; ready list at T-%d:", clock);
3194 for (i = 0; i < new_ready; i++)
3195 fprintf (file, " %d (%x)",
3196 INSN_UID (ready[i]), INSN_PRIORITY (ready[i]));
3197 }
3198
3199 /* Sort the ready list and choose the best insn to schedule. Select
3200 which insn should issue in this cycle and queue those that are
3201 blocked by function unit hazards.
3202
3203 N_READY holds the number of items that were scheduled the last time,
3204 minus the one instruction scheduled on the last loop iteration; it
3205 is not modified for any other reason in this loop. */
3206
3207 SCHED_SORT (ready, new_ready, n_ready);
3208 if (MAX_BLOCKAGE > 1)
3209 {
3210 new_ready = schedule_select (ready, new_ready, clock, file);
3211 if (new_ready == 0)
3212 {
3213 if (file)
3214 fprintf (file, "\n");
3215 /* We must set n_ready here, to ensure that sorting always
3216 occurs when we come back to the SCHED_SORT line above. */
3217 n_ready = 0;
3218 continue;
3219 }
3220 }
3221 n_ready = new_ready;
3222 last_scheduled_insn = insn = ready[0];
3223
3224 /* The first insn scheduled becomes the new tail. */
3225 if (tail == 0)
3226 tail = insn;
3227
3228 if (file)
3229 {
3230 fprintf (file, ", now");
3231 for (i = 0; i < n_ready; i++)
3232 fprintf (file, " %d", INSN_UID (ready[i]));
3233 fprintf (file, "\n");
3234 }
3235
3236 if (DONE_PRIORITY_P (insn))
3237 abort ();
3238
3239 if (reload_completed == 0)
3240 {
3241 /* Process this insn, and each insn linked to this one which must
3242 be immediately output after this insn. */
3243 do
3244 {
3245 /* First we kill registers set by this insn, and then we
3246 make registers used by this insn live. This is the opposite
3247 order used above because we are traversing the instructions
3248 backwards. */
3249
3250 /* Strictly speaking, we should scan REG_UNUSED notes and make
3251 every register mentioned there live, however, we will just
3252 kill them again immediately below, so there doesn't seem to
3253 be any reason why we bother to do this. */
3254
3255 /* See if this is the last notice we must take of a register. */
3256 if (GET_CODE (PATTERN (insn)) == SET
3257 || GET_CODE (PATTERN (insn)) == CLOBBER)
3258 sched_note_set (b, PATTERN (insn), 1);
3259 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3260 {
3261 int j;
3262 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
3263 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
3264 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
3265 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 1);
3266 }
3267
3268 /* This code keeps life analysis information up to date. */
3269 if (GET_CODE (insn) == CALL_INSN)
3270 {
3271 register struct sometimes *p;
3272
3273 /* A call kills all call used registers that are not
3274 global or fixed, except for those mentioned in the call
3275 pattern which will be made live again later. */
3276 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3277 if (call_used_regs[i] && ! global_regs[i]
3278 && ! fixed_regs[i])
3279 {
3280 CLEAR_REGNO_REG_SET (bb_live_regs, i);
3281 SET_REGNO_REG_SET (bb_dead_regs, i);
3282 }
3283
3284 /* Regs live at the time of a call instruction must not
3285 go in a register clobbered by calls. Record this for
3286 all regs now live. Note that insns which are born or
3287 die in a call do not cross a call, so this must be done
3288 after the killings (above) and before the births
3289 (below). */
3290 p = regs_sometimes_live;
3291 for (i = 0; i < sometimes_max; i++, p++)
3292 if (REGNO_REG_SET_P (bb_live_regs, p->regno))
3293 p->calls_crossed += 1;
3294 }
3295
3296 /* Make every register used live, and add REG_DEAD notes for
3297 registers which were not live before we started. */
3298 attach_deaths_insn (insn);
3299
3300 /* Find registers now made live by that instruction. */
3301 EXECUTE_IF_AND_COMPL_IN_REG_SET (bb_live_regs, old_live_regs, 0, i,
3302 {
3303 sometimes_max
3304 = new_sometimes_live (regs_sometimes_live,
3305 i, sometimes_max);
3306 });
3307 IOR_REG_SET (old_live_regs, bb_live_regs);
3308
3309 /* Count lengths of all regs we are worrying about now,
3310 and handle registers no longer live. */
3311
3312 for (i = 0; i < sometimes_max; i++)
3313 {
3314 register struct sometimes *p = &regs_sometimes_live[i];
3315 int regno = p->regno;
3316
3317 p->live_length += 1;
3318
3319 if (!REGNO_REG_SET_P (bb_live_regs, p->regno))
3320 {
3321 /* This is the end of one of this register's lifetime
3322 segments. Save the lifetime info collected so far,
3323 and clear its bit in the old_live_regs entry. */
3324 sched_reg_live_length[regno] += p->live_length;
3325 sched_reg_n_calls_crossed[regno] += p->calls_crossed;
3326 CLEAR_REGNO_REG_SET (old_live_regs, p->regno);
3327
3328 /* Delete the reg_sometimes_live entry for this reg by
3329 copying the last entry over top of it. */
3330 *p = regs_sometimes_live[--sometimes_max];
3331 /* ...and decrement i so that this newly copied entry
3332 will be processed. */
3333 i--;
3334 }
3335 }
3336
3337 link = insn;
3338 insn = PREV_INSN (insn);
3339 }
3340 while (SCHED_GROUP_P (link));
3341
3342 /* Set INSN back to the insn we are scheduling now. */
3343 insn = ready[0];
3344 }
3345
3346 /* Schedule INSN. Remove it from the ready list. */
3347 ready += 1;
3348 n_ready -= 1;
3349
3350 sched_n_insns += 1;
3351 NEXT_INSN (insn) = last;
3352 PREV_INSN (last) = insn;
3353
3354 /* Everything that precedes INSN now either becomes "ready", if
3355 it can execute immediately before INSN, or "pending", if
3356 there must be a delay. Give INSN high enough priority that
3357 at least one (maybe more) reg-killing insns can be launched
3358 ahead of all others. Mark INSN as scheduled by changing its
3359 priority to -1. */
3360 INSN_PRIORITY (insn) = LAUNCH_PRIORITY;
3361 new_ready = schedule_insn (insn, ready, n_ready, clock);
3362 INSN_PRIORITY (insn) = DONE_PRIORITY;
3363
3364 /* Schedule all prior insns that must not be moved. */
3365 if (SCHED_GROUP_P (insn))
3366 {
3367 /* Disable these insns from being launched, in case one of the
3368 insns in the group has a dependency on an earlier one. */
3369 link = insn;
3370 while (SCHED_GROUP_P (link))
3371 {
3372 /* Disable these insns from being launched by anybody. */
3373 link = PREV_INSN (link);
3374 INSN_REF_COUNT (link) = 0;
3375 }
3376
3377 /* Now handle each group insn like the main insn was handled
3378 above. */
3379 link = insn;
3380 while (SCHED_GROUP_P (link))
3381 {
3382 link = PREV_INSN (link);
3383
3384 sched_n_insns += 1;
3385
3386 /* ??? Why don't we set LAUNCH_PRIORITY here? */
3387 new_ready = schedule_insn (link, ready, new_ready, clock);
3388 INSN_PRIORITY (link) = DONE_PRIORITY;
3389 }
3390 }
3391
3392 /* Put back NOTE_INSN_SETJMP,
3393 NOTE_INSN_{LOOP,EHREGION}_{BEGIN,END} notes. */
3394
3395 /* To prime the loop. We need to handle INSN and all the insns in the
3396 sched group. */
3397 last = NEXT_INSN (insn);
3398 do
3399 {
3400 insn = PREV_INSN (last);
3401
3402 /* Maintain a valid chain so emit_note_before works.
3403 This is necessary because PREV_INSN (insn) isn't valid
3404 (if ! SCHED_GROUP_P) and if it points to an insn already
3405 scheduled, a circularity will result. */
3406 if (! SCHED_GROUP_P (insn))
3407 {
3408 NEXT_INSN (prev_head) = insn;
3409 PREV_INSN (insn) = prev_head;
3410 }
3411
3412 last = reemit_notes (insn, insn);
3413 }
3414 while (SCHED_GROUP_P (insn));
3415 }
3416 if (q_size != 0)
3417 abort ();
3418
3419 if (reload_completed == 0)
3420 finish_sometimes_live (regs_sometimes_live, sometimes_max);
3421
3422 /* HEAD is now the first insn in the chain of insns that
3423 been scheduled by the loop above.
3424 TAIL is the last of those insns. */
3425 head = last;
3426
3427 /* NOTE_LIST is the end of a chain of notes previously found
3428 among the insns. Insert them at the beginning of the insns. */
3429 if (note_list != 0)
3430 {
3431 rtx note_head = note_list;
3432 while (PREV_INSN (note_head))
3433 note_head = PREV_INSN (note_head);
3434
3435 PREV_INSN (head) = note_list;
3436 NEXT_INSN (note_list) = head;
3437 head = note_head;
3438 }
3439
3440 /* There should be no REG_DEAD notes leftover at the end.
3441 In practice, this can occur as the result of bugs in flow, combine.c,
3442 and/or sched.c. The values of the REG_DEAD notes remaining are
3443 meaningless, because dead_notes is just used as a free list. */
3444 #if 1
3445 if (dead_notes != 0)
3446 abort ();
3447 #endif
3448
3449 if (new_needs & NEED_HEAD)
3450 basic_block_head[b] = head;
3451 PREV_INSN (head) = prev_head;
3452 NEXT_INSN (prev_head) = head;
3453
3454 if (new_needs & NEED_TAIL)
3455 basic_block_end[b] = tail;
3456 NEXT_INSN (tail) = next_tail;
3457 PREV_INSN (next_tail) = tail;
3458
3459 /* Restore the line-number notes of each insn. */
3460 if (write_symbols != NO_DEBUG)
3461 {
3462 rtx line, note, prev, new;
3463 int notes = 0;
3464
3465 head = basic_block_head[b];
3466 next_tail = NEXT_INSN (basic_block_end[b]);
3467
3468 /* Determine the current line-number. We want to know the current
3469 line number of the first insn of the block here, in case it is
3470 different from the true line number that was saved earlier. If
3471 different, then we need a line number note before the first insn
3472 of this block. If it happens to be the same, then we don't want to
3473 emit another line number note here. */
3474 for (line = head; line; line = PREV_INSN (line))
3475 if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
3476 break;
3477
3478 /* Walk the insns keeping track of the current line-number and inserting
3479 the line-number notes as needed. */
3480 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3481 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
3482 line = insn;
3483 /* This used to emit line number notes before every non-deleted note.
3484 However, this confuses a debugger, because line notes not separated
3485 by real instructions all end up at the same address. I can find no
3486 use for line number notes before other notes, so none are emitted. */
3487 else if (GET_CODE (insn) != NOTE
3488 && (note = LINE_NOTE (insn)) != 0
3489 && note != line
3490 && (line == 0
3491 || NOTE_LINE_NUMBER (note) != NOTE_LINE_NUMBER (line)
3492 || NOTE_SOURCE_FILE (note) != NOTE_SOURCE_FILE (line)))
3493 {
3494 line = note;
3495 prev = PREV_INSN (insn);
3496 if (LINE_NOTE (note))
3497 {
3498 /* Re-use the original line-number note. */
3499 LINE_NOTE (note) = 0;
3500 PREV_INSN (note) = prev;
3501 NEXT_INSN (prev) = note;
3502 PREV_INSN (insn) = note;
3503 NEXT_INSN (note) = insn;
3504 }
3505 else
3506 {
3507 notes++;
3508 new = emit_note_after (NOTE_LINE_NUMBER (note), prev);
3509 NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note);
3510 RTX_INTEGRATED_P (new) = RTX_INTEGRATED_P (note);
3511 }
3512 }
3513 if (file && notes)
3514 fprintf (file, ";; added %d line-number notes\n", notes);
3515 }
3516
3517 if (file)
3518 {
3519 fprintf (file, ";; total time = %d\n;; new basic block head = %d\n;; new basic block end = %d\n\n",
3520 clock, INSN_UID (basic_block_head[b]), INSN_UID (basic_block_end[b]));
3521 }
3522
3523 /* Yow! We're done! */
3524 free_pending_lists ();
3525
3526 ret:
3527 FREE_REG_SET (reg_pending_sets);
3528 FREE_REG_SET (old_live_regs);
3529
3530 return;
3531 }
3532 \f
3533 /* Subroutine of split_hard_reg_notes. Searches X for any reference to
3534 REGNO, returning the rtx of the reference found if any. Otherwise,
3535 returns 0. */
3536
3537 static rtx
3538 regno_use_in (regno, x)
3539 int regno;
3540 rtx x;
3541 {
3542 register char *fmt;
3543 int i, j;
3544 rtx tem;
3545
3546 if (GET_CODE (x) == REG && REGNO (x) == regno)
3547 return x;
3548
3549 fmt = GET_RTX_FORMAT (GET_CODE (x));
3550 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3551 {
3552 if (fmt[i] == 'e')
3553 {
3554 if (tem = regno_use_in (regno, XEXP (x, i)))
3555 return tem;
3556 }
3557 else if (fmt[i] == 'E')
3558 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3559 if (tem = regno_use_in (regno , XVECEXP (x, i, j)))
3560 return tem;
3561 }
3562
3563 return 0;
3564 }
3565
3566 /* Subroutine of update_flow_info. Determines whether any new REG_NOTEs are
3567 needed for the hard register mentioned in the note. This can happen
3568 if the reference to the hard register in the original insn was split into
3569 several smaller hard register references in the split insns. */
3570
3571 static void
3572 split_hard_reg_notes (note, first, last, orig_insn)
3573 rtx note, first, last, orig_insn;
3574 {
3575 rtx reg, temp, link;
3576 int n_regs, i, new_reg;
3577 rtx insn;
3578
3579 /* Assume that this is a REG_DEAD note. */
3580 if (REG_NOTE_KIND (note) != REG_DEAD)
3581 abort ();
3582
3583 reg = XEXP (note, 0);
3584
3585 n_regs = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
3586
3587 for (i = 0; i < n_regs; i++)
3588 {
3589 new_reg = REGNO (reg) + i;
3590
3591 /* Check for references to new_reg in the split insns. */
3592 for (insn = last; ; insn = PREV_INSN (insn))
3593 {
3594 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3595 && (temp = regno_use_in (new_reg, PATTERN (insn))))
3596 {
3597 /* Create a new reg dead note here. */
3598 link = rtx_alloc (EXPR_LIST);
3599 PUT_REG_NOTE_KIND (link, REG_DEAD);
3600 XEXP (link, 0) = temp;
3601 XEXP (link, 1) = REG_NOTES (insn);
3602 REG_NOTES (insn) = link;
3603
3604 /* If killed multiple registers here, then add in the excess. */
3605 i += HARD_REGNO_NREGS (REGNO (temp), GET_MODE (temp)) - 1;
3606
3607 break;
3608 }
3609 /* It isn't mentioned anywhere, so no new reg note is needed for
3610 this register. */
3611 if (insn == first)
3612 break;
3613 }
3614 }
3615 }
3616
3617 /* Subroutine of update_flow_info. Determines whether a SET or CLOBBER in an
3618 insn created by splitting needs a REG_DEAD or REG_UNUSED note added. */
3619
3620 static void
3621 new_insn_dead_notes (pat, insn, last, orig_insn)
3622 rtx pat, insn, last, orig_insn;
3623 {
3624 rtx dest, tem, set;
3625
3626 /* PAT is either a CLOBBER or a SET here. */
3627 dest = XEXP (pat, 0);
3628
3629 while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
3630 || GET_CODE (dest) == STRICT_LOW_PART
3631 || GET_CODE (dest) == SIGN_EXTRACT)
3632 dest = XEXP (dest, 0);
3633
3634 if (GET_CODE (dest) == REG)
3635 {
3636 for (tem = last; tem != insn; tem = PREV_INSN (tem))
3637 {
3638 if (GET_RTX_CLASS (GET_CODE (tem)) == 'i'
3639 && reg_overlap_mentioned_p (dest, PATTERN (tem))
3640 && (set = single_set (tem)))
3641 {
3642 rtx tem_dest = SET_DEST (set);
3643
3644 while (GET_CODE (tem_dest) == ZERO_EXTRACT
3645 || GET_CODE (tem_dest) == SUBREG
3646 || GET_CODE (tem_dest) == STRICT_LOW_PART
3647 || GET_CODE (tem_dest) == SIGN_EXTRACT)
3648 tem_dest = XEXP (tem_dest, 0);
3649
3650 if (! rtx_equal_p (tem_dest, dest))
3651 {
3652 /* Use the same scheme as combine.c, don't put both REG_DEAD
3653 and REG_UNUSED notes on the same insn. */
3654 if (! find_regno_note (tem, REG_UNUSED, REGNO (dest))
3655 && ! find_regno_note (tem, REG_DEAD, REGNO (dest)))
3656 {
3657 rtx note = rtx_alloc (EXPR_LIST);
3658 PUT_REG_NOTE_KIND (note, REG_DEAD);
3659 XEXP (note, 0) = dest;
3660 XEXP (note, 1) = REG_NOTES (tem);
3661 REG_NOTES (tem) = note;
3662 }
3663 /* The reg only dies in one insn, the last one that uses
3664 it. */
3665 break;
3666 }
3667 else if (reg_overlap_mentioned_p (dest, SET_SRC (set)))
3668 /* We found an instruction that both uses the register,
3669 and sets it, so no new REG_NOTE is needed for this set. */
3670 break;
3671 }
3672 }
3673 /* If this is a set, it must die somewhere, unless it is the dest of
3674 the original insn, and hence is live after the original insn. Abort
3675 if it isn't supposed to be live after the original insn.
3676
3677 If this is a clobber, then just add a REG_UNUSED note. */
3678 if (tem == insn)
3679 {
3680 int live_after_orig_insn = 0;
3681 rtx pattern = PATTERN (orig_insn);
3682 int i;
3683
3684 if (GET_CODE (pat) == CLOBBER)
3685 {
3686 rtx note = rtx_alloc (EXPR_LIST);
3687 PUT_REG_NOTE_KIND (note, REG_UNUSED);
3688 XEXP (note, 0) = dest;
3689 XEXP (note, 1) = REG_NOTES (insn);
3690 REG_NOTES (insn) = note;
3691 return;
3692 }
3693
3694 /* The original insn could have multiple sets, so search the
3695 insn for all sets. */
3696 if (GET_CODE (pattern) == SET)
3697 {
3698 if (reg_overlap_mentioned_p (dest, SET_DEST (pattern)))
3699 live_after_orig_insn = 1;
3700 }
3701 else if (GET_CODE (pattern) == PARALLEL)
3702 {
3703 for (i = 0; i < XVECLEN (pattern, 0); i++)
3704 if (GET_CODE (XVECEXP (pattern, 0, i)) == SET
3705 && reg_overlap_mentioned_p (dest,
3706 SET_DEST (XVECEXP (pattern,
3707 0, i))))
3708 live_after_orig_insn = 1;
3709 }
3710
3711 if (! live_after_orig_insn)
3712 abort ();
3713 }
3714 }
3715 }
3716
3717 /* Subroutine of update_flow_info. Update the value of reg_n_sets for all
3718 registers modified by X. INC is -1 if the containing insn is being deleted,
3719 and is 1 if the containing insn is a newly generated insn. */
3720
3721 static void
3722 update_n_sets (x, inc)
3723 rtx x;
3724 int inc;
3725 {
3726 rtx dest = SET_DEST (x);
3727
3728 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
3729 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
3730 dest = SUBREG_REG (dest);
3731
3732 if (GET_CODE (dest) == REG)
3733 {
3734 int regno = REGNO (dest);
3735
3736 if (regno < FIRST_PSEUDO_REGISTER)
3737 {
3738 register int i;
3739 int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (dest));
3740
3741 for (i = regno; i < endregno; i++)
3742 REG_N_SETS (i) += inc;
3743 }
3744 else
3745 REG_N_SETS (regno) += inc;
3746 }
3747 }
3748
3749 /* Updates all flow-analysis related quantities (including REG_NOTES) for
3750 the insns from FIRST to LAST inclusive that were created by splitting
3751 ORIG_INSN. NOTES are the original REG_NOTES. */
3752
3753 static void
3754 update_flow_info (notes, first, last, orig_insn)
3755 rtx notes;
3756 rtx first, last;
3757 rtx orig_insn;
3758 {
3759 rtx insn, note;
3760 rtx next;
3761 rtx orig_dest, temp;
3762 rtx set;
3763
3764 /* Get and save the destination set by the original insn. */
3765
3766 orig_dest = single_set (orig_insn);
3767 if (orig_dest)
3768 orig_dest = SET_DEST (orig_dest);
3769
3770 /* Move REG_NOTES from the original insn to where they now belong. */
3771
3772 for (note = notes; note; note = next)
3773 {
3774 next = XEXP (note, 1);
3775 switch (REG_NOTE_KIND (note))
3776 {
3777 case REG_DEAD:
3778 case REG_UNUSED:
3779 /* Move these notes from the original insn to the last new insn where
3780 the register is now set. */
3781
3782 for (insn = last; ; insn = PREV_INSN (insn))
3783 {
3784 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3785 && reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
3786 {
3787 /* If this note refers to a multiple word hard register, it
3788 may have been split into several smaller hard register
3789 references, so handle it specially. */
3790 temp = XEXP (note, 0);
3791 if (REG_NOTE_KIND (note) == REG_DEAD
3792 && GET_CODE (temp) == REG
3793 && REGNO (temp) < FIRST_PSEUDO_REGISTER
3794 && HARD_REGNO_NREGS (REGNO (temp), GET_MODE (temp)) > 1)
3795 split_hard_reg_notes (note, first, last, orig_insn);
3796 else
3797 {
3798 XEXP (note, 1) = REG_NOTES (insn);
3799 REG_NOTES (insn) = note;
3800 }
3801
3802 /* Sometimes need to convert REG_UNUSED notes to REG_DEAD
3803 notes. */
3804 /* ??? This won't handle multiple word registers correctly,
3805 but should be good enough for now. */
3806 if (REG_NOTE_KIND (note) == REG_UNUSED
3807 && GET_CODE (XEXP (note, 0)) != SCRATCH
3808 && ! dead_or_set_p (insn, XEXP (note, 0)))
3809 PUT_REG_NOTE_KIND (note, REG_DEAD);
3810
3811 /* The reg only dies in one insn, the last one that uses
3812 it. */
3813 break;
3814 }
3815 /* It must die somewhere, fail it we couldn't find where it died.
3816
3817 If this is a REG_UNUSED note, then it must be a temporary
3818 register that was not needed by this instantiation of the
3819 pattern, so we can safely ignore it. */
3820 if (insn == first)
3821 {
3822 /* After reload, REG_DEAD notes come sometimes an
3823 instruction after the register actually dies. */
3824 if (reload_completed && REG_NOTE_KIND (note) == REG_DEAD)
3825 {
3826 XEXP (note, 1) = REG_NOTES (insn);
3827 REG_NOTES (insn) = note;
3828 break;
3829 }
3830
3831 if (REG_NOTE_KIND (note) != REG_UNUSED)
3832 abort ();
3833
3834 break;
3835 }
3836 }
3837 break;
3838
3839 case REG_WAS_0:
3840 /* This note applies to the dest of the original insn. Find the
3841 first new insn that now has the same dest, and move the note
3842 there. */
3843
3844 if (! orig_dest)
3845 abort ();
3846
3847 for (insn = first; ; insn = NEXT_INSN (insn))
3848 {
3849 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3850 && (temp = single_set (insn))
3851 && rtx_equal_p (SET_DEST (temp), orig_dest))
3852 {
3853 XEXP (note, 1) = REG_NOTES (insn);
3854 REG_NOTES (insn) = note;
3855 /* The reg is only zero before one insn, the first that
3856 uses it. */
3857 break;
3858 }
3859 /* If this note refers to a multiple word hard
3860 register, it may have been split into several smaller
3861 hard register references. We could split the notes,
3862 but simply dropping them is good enough. */
3863 if (GET_CODE (orig_dest) == REG
3864 && REGNO (orig_dest) < FIRST_PSEUDO_REGISTER
3865 && HARD_REGNO_NREGS (REGNO (orig_dest),
3866 GET_MODE (orig_dest)) > 1)
3867 break;
3868 /* It must be set somewhere, fail if we couldn't find where it
3869 was set. */
3870 if (insn == last)
3871 abort ();
3872 }
3873 break;
3874
3875 case REG_EQUAL:
3876 case REG_EQUIV:
3877 /* A REG_EQUIV or REG_EQUAL note on an insn with more than one
3878 set is meaningless. Just drop the note. */
3879 if (! orig_dest)
3880 break;
3881
3882 case REG_NO_CONFLICT:
3883 /* These notes apply to the dest of the original insn. Find the last
3884 new insn that now has the same dest, and move the note there. */
3885
3886 if (! orig_dest)
3887 abort ();
3888
3889 for (insn = last; ; insn = PREV_INSN (insn))
3890 {
3891 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3892 && (temp = single_set (insn))
3893 && rtx_equal_p (SET_DEST (temp), orig_dest))
3894 {
3895 XEXP (note, 1) = REG_NOTES (insn);
3896 REG_NOTES (insn) = note;
3897 /* Only put this note on one of the new insns. */
3898 break;
3899 }
3900
3901 /* The original dest must still be set someplace. Abort if we
3902 couldn't find it. */
3903 if (insn == first)
3904 {
3905 /* However, if this note refers to a multiple word hard
3906 register, it may have been split into several smaller
3907 hard register references. We could split the notes,
3908 but simply dropping them is good enough. */
3909 if (GET_CODE (orig_dest) == REG
3910 && REGNO (orig_dest) < FIRST_PSEUDO_REGISTER
3911 && HARD_REGNO_NREGS (REGNO (orig_dest),
3912 GET_MODE (orig_dest)) > 1)
3913 break;
3914 /* Likewise for multi-word memory references. */
3915 if (GET_CODE (orig_dest) == MEM
3916 && SIZE_FOR_MODE (orig_dest) > MOVE_MAX)
3917 break;
3918 abort ();
3919 }
3920 }
3921 break;
3922
3923 case REG_LIBCALL:
3924 /* Move a REG_LIBCALL note to the first insn created, and update
3925 the corresponding REG_RETVAL note. */
3926 XEXP (note, 1) = REG_NOTES (first);
3927 REG_NOTES (first) = note;
3928
3929 insn = XEXP (note, 0);
3930 note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
3931 if (note)
3932 XEXP (note, 0) = first;
3933 break;
3934
3935 case REG_EXEC_COUNT:
3936 /* Move a REG_EXEC_COUNT note to the first insn created. */
3937 XEXP (note, 1) = REG_NOTES (first);
3938 REG_NOTES (first) = note;
3939 break;
3940
3941 case REG_RETVAL:
3942 /* Move a REG_RETVAL note to the last insn created, and update
3943 the corresponding REG_LIBCALL note. */
3944 XEXP (note, 1) = REG_NOTES (last);
3945 REG_NOTES (last) = note;
3946
3947 insn = XEXP (note, 0);
3948 note = find_reg_note (insn, REG_LIBCALL, NULL_RTX);
3949 if (note)
3950 XEXP (note, 0) = last;
3951 break;
3952
3953 case REG_NONNEG:
3954 case REG_BR_PROB:
3955 /* This should be moved to whichever instruction is a JUMP_INSN. */
3956
3957 for (insn = last; ; insn = PREV_INSN (insn))
3958 {
3959 if (GET_CODE (insn) == JUMP_INSN)
3960 {
3961 XEXP (note, 1) = REG_NOTES (insn);
3962 REG_NOTES (insn) = note;
3963 /* Only put this note on one of the new insns. */
3964 break;
3965 }
3966 /* Fail if we couldn't find a JUMP_INSN. */
3967 if (insn == first)
3968 abort ();
3969 }
3970 break;
3971
3972 case REG_INC:
3973 /* reload sometimes leaves obsolete REG_INC notes around. */
3974 if (reload_completed)
3975 break;
3976 /* This should be moved to whichever instruction now has the
3977 increment operation. */
3978 abort ();
3979
3980 case REG_LABEL:
3981 /* Should be moved to the new insn(s) which use the label. */
3982 for (insn = first; insn != NEXT_INSN (last); insn = NEXT_INSN (insn))
3983 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3984 && reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
3985 REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_LABEL,
3986 XEXP (note, 0), REG_NOTES (insn));
3987 break;
3988
3989 case REG_CC_SETTER:
3990 case REG_CC_USER:
3991 /* These two notes will never appear until after reorg, so we don't
3992 have to handle them here. */
3993 default:
3994 abort ();
3995 }
3996 }
3997
3998 /* Each new insn created, except the last, has a new set. If the destination
3999 is a register, then this reg is now live across several insns, whereas
4000 previously the dest reg was born and died within the same insn. To
4001 reflect this, we now need a REG_DEAD note on the insn where this
4002 dest reg dies.
4003
4004 Similarly, the new insns may have clobbers that need REG_UNUSED notes. */
4005
4006 for (insn = first; insn != last; insn = NEXT_INSN (insn))
4007 {
4008 rtx pat;
4009 int i;
4010
4011 pat = PATTERN (insn);
4012 if (GET_CODE (pat) == SET || GET_CODE (pat) == CLOBBER)
4013 new_insn_dead_notes (pat, insn, last, orig_insn);
4014 else if (GET_CODE (pat) == PARALLEL)
4015 {
4016 for (i = 0; i < XVECLEN (pat, 0); i++)
4017 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
4018 || GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER)
4019 new_insn_dead_notes (XVECEXP (pat, 0, i), insn, last, orig_insn);
4020 }
4021 }
4022
4023 /* If any insn, except the last, uses the register set by the last insn,
4024 then we need a new REG_DEAD note on that insn. In this case, there
4025 would not have been a REG_DEAD note for this register in the original
4026 insn because it was used and set within one insn. */
4027
4028 set = single_set (last);
4029 if (set)
4030 {
4031 rtx dest = SET_DEST (set);
4032
4033 while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
4034 || GET_CODE (dest) == STRICT_LOW_PART
4035 || GET_CODE (dest) == SIGN_EXTRACT)
4036 dest = XEXP (dest, 0);
4037
4038 if (GET_CODE (dest) == REG
4039 /* Global registers are always live, so the code below does not
4040 apply to them. */
4041 && (REGNO (dest) >= FIRST_PSEUDO_REGISTER
4042 || ! global_regs[REGNO (dest)]))
4043 {
4044 rtx stop_insn = PREV_INSN (first);
4045
4046 /* If the last insn uses the register that it is setting, then
4047 we don't want to put a REG_DEAD note there. Search backwards
4048 to find the first insn that sets but does not use DEST. */
4049
4050 insn = last;
4051 if (reg_overlap_mentioned_p (dest, SET_SRC (set)))
4052 {
4053 for (insn = PREV_INSN (insn); insn != first;
4054 insn = PREV_INSN (insn))
4055 {
4056 if ((set = single_set (insn))
4057 && reg_mentioned_p (dest, SET_DEST (set))
4058 && ! reg_overlap_mentioned_p (dest, SET_SRC (set)))
4059 break;
4060 }
4061 }
4062
4063 /* Now find the first insn that uses but does not set DEST. */
4064
4065 for (insn = PREV_INSN (insn); insn != stop_insn;
4066 insn = PREV_INSN (insn))
4067 {
4068 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
4069 && reg_mentioned_p (dest, PATTERN (insn))
4070 && (set = single_set (insn)))
4071 {
4072 rtx insn_dest = SET_DEST (set);
4073
4074 while (GET_CODE (insn_dest) == ZERO_EXTRACT
4075 || GET_CODE (insn_dest) == SUBREG
4076 || GET_CODE (insn_dest) == STRICT_LOW_PART
4077 || GET_CODE (insn_dest) == SIGN_EXTRACT)
4078 insn_dest = XEXP (insn_dest, 0);
4079
4080 if (insn_dest != dest)
4081 {
4082 note = rtx_alloc (EXPR_LIST);
4083 PUT_REG_NOTE_KIND (note, REG_DEAD);
4084 XEXP (note, 0) = dest;
4085 XEXP (note, 1) = REG_NOTES (insn);
4086 REG_NOTES (insn) = note;
4087 /* The reg only dies in one insn, the last one
4088 that uses it. */
4089 break;
4090 }
4091 }
4092 }
4093 }
4094 }
4095
4096 /* If the original dest is modifying a multiple register target, and the
4097 original instruction was split such that the original dest is now set
4098 by two or more SUBREG sets, then the split insns no longer kill the
4099 destination of the original insn.
4100
4101 In this case, if there exists an instruction in the same basic block,
4102 before the split insn, which uses the original dest, and this use is
4103 killed by the original insn, then we must remove the REG_DEAD note on
4104 this insn, because it is now superfluous.
4105
4106 This does not apply when a hard register gets split, because the code
4107 knows how to handle overlapping hard registers properly. */
4108 if (orig_dest && GET_CODE (orig_dest) == REG)
4109 {
4110 int found_orig_dest = 0;
4111 int found_split_dest = 0;
4112
4113 for (insn = first; ; insn = NEXT_INSN (insn))
4114 {
4115 rtx pat = PATTERN (insn);
4116 int i = GET_CODE (pat) == PARALLEL ? XVECLEN (pat, 0) : 0;
4117 set = pat;
4118 for (;;)
4119 {
4120 if (GET_CODE (set) == SET)
4121 {
4122 if (GET_CODE (SET_DEST (set)) == REG
4123 && REGNO (SET_DEST (set)) == REGNO (orig_dest))
4124 {
4125 found_orig_dest = 1;
4126 break;
4127 }
4128 else if (GET_CODE (SET_DEST (set)) == SUBREG
4129 && SUBREG_REG (SET_DEST (set)) == orig_dest)
4130 {
4131 found_split_dest = 1;
4132 break;
4133 }
4134 }
4135 if (--i < 0)
4136 break;
4137 set = XVECEXP (pat, 0, i);
4138 }
4139
4140 if (insn == last)
4141 break;
4142 }
4143
4144 if (found_split_dest)
4145 {
4146 /* Search backwards from FIRST, looking for the first insn that uses
4147 the original dest. Stop if we pass a CODE_LABEL or a JUMP_INSN.
4148 If we find an insn, and it has a REG_DEAD note, then delete the
4149 note. */
4150
4151 for (insn = first; insn; insn = PREV_INSN (insn))
4152 {
4153 if (GET_CODE (insn) == CODE_LABEL
4154 || GET_CODE (insn) == JUMP_INSN)
4155 break;
4156 else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
4157 && reg_mentioned_p (orig_dest, insn))
4158 {
4159 note = find_regno_note (insn, REG_DEAD, REGNO (orig_dest));
4160 if (note)
4161 remove_note (insn, note);
4162 }
4163 }
4164 }
4165 else if (! found_orig_dest)
4166 {
4167 /* This should never happen. */
4168 abort ();
4169 }
4170 }
4171
4172 /* Update reg_n_sets. This is necessary to prevent local alloc from
4173 converting REG_EQUAL notes to REG_EQUIV when splitting has modified
4174 a reg from set once to set multiple times. */
4175
4176 {
4177 rtx x = PATTERN (orig_insn);
4178 RTX_CODE code = GET_CODE (x);
4179
4180 if (code == SET || code == CLOBBER)
4181 update_n_sets (x, -1);
4182 else if (code == PARALLEL)
4183 {
4184 int i;
4185 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
4186 {
4187 code = GET_CODE (XVECEXP (x, 0, i));
4188 if (code == SET || code == CLOBBER)
4189 update_n_sets (XVECEXP (x, 0, i), -1);
4190 }
4191 }
4192
4193 for (insn = first; ; insn = NEXT_INSN (insn))
4194 {
4195 x = PATTERN (insn);
4196 code = GET_CODE (x);
4197
4198 if (code == SET || code == CLOBBER)
4199 update_n_sets (x, 1);
4200 else if (code == PARALLEL)
4201 {
4202 int i;
4203 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
4204 {
4205 code = GET_CODE (XVECEXP (x, 0, i));
4206 if (code == SET || code == CLOBBER)
4207 update_n_sets (XVECEXP (x, 0, i), 1);
4208 }
4209 }
4210
4211 if (insn == last)
4212 break;
4213 }
4214 }
4215 }
4216
4217 /* The one entry point in this file. DUMP_FILE is the dump file for
4218 this pass. */
4219
4220 void
4221 schedule_insns (dump_file)
4222 FILE *dump_file;
4223 {
4224 int max_uid = MAX_INSNS_PER_SPLIT * (get_max_uid () + 1);
4225 int b;
4226 int i;
4227 rtx insn;
4228
4229 /* Taking care of this degenerate case makes the rest of
4230 this code simpler. */
4231 if (n_basic_blocks == 0)
4232 return;
4233
4234 /* Create an insn here so that we can hang dependencies off of it later. */
4235 sched_before_next_call
4236 = gen_rtx (INSN, VOIDmode, 0, NULL_RTX, NULL_RTX,
4237 NULL_RTX, 0, NULL_RTX, NULL_RTX);
4238
4239 /* Initialize the unused_*_lists. We can't use the ones left over from
4240 the previous function, because gcc has freed that memory. We can use
4241 the ones left over from the first sched pass in the second pass however,
4242 so only clear them on the first sched pass. The first pass is before
4243 reload if flag_schedule_insns is set, otherwise it is afterwards. */
4244
4245 if (reload_completed == 0 || ! flag_schedule_insns)
4246 {
4247 unused_insn_list = 0;
4248 unused_expr_list = 0;
4249 }
4250
4251 /* We create no insns here, only reorder them, so we
4252 remember how far we can cut back the stack on exit. */
4253
4254 /* Allocate data for this pass. See comments, above,
4255 for what these vectors do. */
4256 insn_luid = (int *) alloca (max_uid * sizeof (int));
4257 insn_priority = (int *) alloca (max_uid * sizeof (int));
4258 insn_tick = (int *) alloca (max_uid * sizeof (int));
4259 insn_costs = (short *) alloca (max_uid * sizeof (short));
4260 insn_units = (short *) alloca (max_uid * sizeof (short));
4261 insn_blockage = (unsigned int *) alloca (max_uid * sizeof (unsigned int));
4262 insn_ref_count = (int *) alloca (max_uid * sizeof (int));
4263
4264 if (reload_completed == 0)
4265 {
4266 sched_reg_n_calls_crossed = (int *) alloca (max_regno * sizeof (int));
4267 sched_reg_live_length = (int *) alloca (max_regno * sizeof (int));
4268 bb_dead_regs = ALLOCA_REG_SET ();
4269 bb_live_regs = ALLOCA_REG_SET ();
4270 bzero ((char *) sched_reg_n_calls_crossed, max_regno * sizeof (int));
4271 bzero ((char *) sched_reg_live_length, max_regno * sizeof (int));
4272 }
4273 else
4274 {
4275 sched_reg_n_calls_crossed = 0;
4276 sched_reg_live_length = 0;
4277 bb_dead_regs = 0;
4278 bb_live_regs = 0;
4279 }
4280 init_alias_analysis ();
4281
4282 if (write_symbols != NO_DEBUG)
4283 {
4284 rtx line;
4285
4286 line_note = (rtx *) alloca (max_uid * sizeof (rtx));
4287 bzero ((char *) line_note, max_uid * sizeof (rtx));
4288 line_note_head = (rtx *) alloca (n_basic_blocks * sizeof (rtx));
4289 bzero ((char *) line_note_head, n_basic_blocks * sizeof (rtx));
4290
4291 /* Determine the line-number at the start of each basic block.
4292 This must be computed and saved now, because after a basic block's
4293 predecessor has been scheduled, it is impossible to accurately
4294 determine the correct line number for the first insn of the block. */
4295
4296 for (b = 0; b < n_basic_blocks; b++)
4297 for (line = basic_block_head[b]; line; line = PREV_INSN (line))
4298 if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
4299 {
4300 line_note_head[b] = line;
4301 break;
4302 }
4303 }
4304
4305 bzero ((char *) insn_luid, max_uid * sizeof (int));
4306 bzero ((char *) insn_priority, max_uid * sizeof (int));
4307 bzero ((char *) insn_tick, max_uid * sizeof (int));
4308 bzero ((char *) insn_costs, max_uid * sizeof (short));
4309 bzero ((char *) insn_units, max_uid * sizeof (short));
4310 bzero ((char *) insn_blockage, max_uid * sizeof (unsigned int));
4311 bzero ((char *) insn_ref_count, max_uid * sizeof (int));
4312
4313 /* Schedule each basic block, block by block. */
4314
4315 /* ??? Add a NOTE after the last insn of the last basic block. It is not
4316 known why this is done. */
4317 /* ??? Perhaps it's done to ensure NEXT_TAIL in schedule_block is a
4318 valid insn. */
4319
4320 insn = basic_block_end[n_basic_blocks-1];
4321 if (NEXT_INSN (insn) == 0
4322 || (GET_CODE (insn) != NOTE
4323 && GET_CODE (insn) != CODE_LABEL
4324 /* Don't emit a NOTE if it would end up between an unconditional
4325 jump and a BARRIER. */
4326 && ! (GET_CODE (insn) == JUMP_INSN
4327 && GET_CODE (NEXT_INSN (insn)) == BARRIER)))
4328 emit_note_after (NOTE_INSN_DELETED, basic_block_end[n_basic_blocks-1]);
4329
4330 for (b = 0; b < n_basic_blocks; b++)
4331 {
4332 rtx insn, next;
4333
4334 note_list = 0;
4335
4336 for (insn = basic_block_head[b]; ; insn = next)
4337 {
4338 rtx prev;
4339 rtx set;
4340
4341 /* Can't use `next_real_insn' because that
4342 might go across CODE_LABELS and short-out basic blocks. */
4343 next = NEXT_INSN (insn);
4344 if (GET_CODE (insn) != INSN)
4345 {
4346 if (insn == basic_block_end[b])
4347 break;
4348
4349 continue;
4350 }
4351
4352 /* Don't split no-op move insns. These should silently disappear
4353 later in final. Splitting such insns would break the code
4354 that handles REG_NO_CONFLICT blocks. */
4355 set = single_set (insn);
4356 if (set && rtx_equal_p (SET_SRC (set), SET_DEST (set)))
4357 {
4358 if (insn == basic_block_end[b])
4359 break;
4360
4361 /* Nops get in the way while scheduling, so delete them now if
4362 register allocation has already been done. It is too risky
4363 to try to do this before register allocation, and there are
4364 unlikely to be very many nops then anyways. */
4365 if (reload_completed)
4366 {
4367 PUT_CODE (insn, NOTE);
4368 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
4369 NOTE_SOURCE_FILE (insn) = 0;
4370 }
4371
4372 continue;
4373 }
4374
4375 /* Split insns here to get max fine-grain parallelism. */
4376 prev = PREV_INSN (insn);
4377 /* It is probably not worthwhile to try to split again in the
4378 second pass. However, if flag_schedule_insns is not set,
4379 the first and only (if any) scheduling pass is after reload. */
4380 if (reload_completed == 0 || ! flag_schedule_insns)
4381 {
4382 rtx last, first = PREV_INSN (insn);
4383 rtx notes = REG_NOTES (insn);
4384
4385 last = try_split (PATTERN (insn), insn, 1);
4386 if (last != insn)
4387 {
4388 /* try_split returns the NOTE that INSN became. */
4389 first = NEXT_INSN (first);
4390 update_flow_info (notes, first, last, insn);
4391
4392 PUT_CODE (insn, NOTE);
4393 NOTE_SOURCE_FILE (insn) = 0;
4394 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
4395 if (insn == basic_block_head[b])
4396 basic_block_head[b] = first;
4397 if (insn == basic_block_end[b])
4398 {
4399 basic_block_end[b] = last;
4400 break;
4401 }
4402 }
4403 }
4404
4405 if (insn == basic_block_end[b])
4406 break;
4407 }
4408
4409 schedule_block (b, dump_file);
4410
4411 #ifdef USE_C_ALLOCA
4412 alloca (0);
4413 #endif
4414 }
4415
4416 /* Reposition the prologue and epilogue notes in case we moved the
4417 prologue/epilogue insns. */
4418 if (reload_completed)
4419 reposition_prologue_and_epilogue_notes (get_insns ());
4420
4421 if (write_symbols != NO_DEBUG)
4422 {
4423 rtx line = 0;
4424 rtx insn = get_insns ();
4425 int active_insn = 0;
4426 int notes = 0;
4427
4428 /* Walk the insns deleting redundant line-number notes. Many of these
4429 are already present. The remainder tend to occur at basic
4430 block boundaries. */
4431 for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
4432 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
4433 {
4434 /* If there are no active insns following, INSN is redundant. */
4435 if (active_insn == 0)
4436 {
4437 notes++;
4438 NOTE_SOURCE_FILE (insn) = 0;
4439 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
4440 }
4441 /* If the line number is unchanged, LINE is redundant. */
4442 else if (line
4443 && NOTE_LINE_NUMBER (line) == NOTE_LINE_NUMBER (insn)
4444 && NOTE_SOURCE_FILE (line) == NOTE_SOURCE_FILE (insn))
4445 {
4446 notes++;
4447 NOTE_SOURCE_FILE (line) = 0;
4448 NOTE_LINE_NUMBER (line) = NOTE_INSN_DELETED;
4449 line = insn;
4450 }
4451 else
4452 line = insn;
4453 active_insn = 0;
4454 }
4455 else if (! ((GET_CODE (insn) == NOTE
4456 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
4457 || (GET_CODE (insn) == INSN
4458 && (GET_CODE (PATTERN (insn)) == USE
4459 || GET_CODE (PATTERN (insn)) == CLOBBER))))
4460 active_insn++;
4461
4462 if (dump_file && notes)
4463 fprintf (dump_file, ";; deleted %d line-number notes\n", notes);
4464 }
4465
4466 if (reload_completed == 0)
4467 {
4468 int regno;
4469 for (regno = 0; regno < max_regno; regno++)
4470 if (sched_reg_live_length[regno])
4471 {
4472 if (dump_file)
4473 {
4474 if (REG_LIVE_LENGTH (regno) > sched_reg_live_length[regno])
4475 fprintf (dump_file,
4476 ";; register %d life shortened from %d to %d\n",
4477 regno, REG_LIVE_LENGTH (regno),
4478 sched_reg_live_length[regno]);
4479 /* Negative values are special; don't overwrite the current
4480 reg_live_length value if it is negative. */
4481 else if (REG_LIVE_LENGTH (regno) < sched_reg_live_length[regno]
4482 && REG_LIVE_LENGTH (regno) >= 0)
4483 fprintf (dump_file,
4484 ";; register %d life extended from %d to %d\n",
4485 regno, REG_LIVE_LENGTH (regno),
4486 sched_reg_live_length[regno]);
4487
4488 if (! REG_N_CALLS_CROSSED (regno)
4489 && sched_reg_n_calls_crossed[regno])
4490 fprintf (dump_file,
4491 ";; register %d now crosses calls\n", regno);
4492 else if (REG_N_CALLS_CROSSED (regno)
4493 && ! sched_reg_n_calls_crossed[regno]
4494 && REG_BASIC_BLOCK (regno) != REG_BLOCK_GLOBAL)
4495 fprintf (dump_file,
4496 ";; register %d no longer crosses calls\n", regno);
4497
4498 }
4499 /* Negative values are special; don't overwrite the current
4500 reg_live_length value if it is negative. */
4501 if (REG_LIVE_LENGTH (regno) >= 0)
4502 REG_LIVE_LENGTH (regno) = sched_reg_live_length[regno];
4503
4504 /* We can't change the value of reg_n_calls_crossed to zero for
4505 pseudos which are live in more than one block.
4506
4507 This is because combine might have made an optimization which
4508 invalidated basic_block_live_at_start and reg_n_calls_crossed,
4509 but it does not update them. If we update reg_n_calls_crossed
4510 here, the two variables are now inconsistent, and this might
4511 confuse the caller-save code into saving a register that doesn't
4512 need to be saved. This is only a problem when we zero calls
4513 crossed for a pseudo live in multiple basic blocks.
4514
4515 Alternatively, we could try to correctly update basic block live
4516 at start here in sched, but that seems complicated. */
4517 if (sched_reg_n_calls_crossed[regno]
4518 || REG_BASIC_BLOCK (regno) != REG_BLOCK_GLOBAL)
4519 REG_N_CALLS_CROSSED (regno) = sched_reg_n_calls_crossed[regno];
4520 }
4521 }
4522
4523 if (reload_completed == 0)
4524 {
4525 FREE_REG_SET (bb_dead_regs);
4526 FREE_REG_SET (bb_live_regs);
4527 }
4528
4529 }
4530 #endif /* INSN_SCHEDULING */