]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/sched-int.h
gcc_release (announce_snapshot): Use changedir instead of plain cd.
[thirdparty/gcc.git] / gcc / sched-int.h
CommitLineData
1708fd40
BS
1/* Instruction scheduling pass. This file contains definitions used
2 internally in the scheduler.
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
2a9f2ad3 4 1999, 2000, 2001, 2003, 2004 Free Software Foundation, Inc.
1708fd40 5
1322177d 6This file is part of GCC.
1708fd40 7
1322177d
LB
8GCC is free software; you can redistribute it and/or modify it under
9the terms of the GNU General Public License as published by the Free
10Software Foundation; either version 2, or (at your option) any later
11version.
1708fd40 12
1322177d
LB
13GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14WARRANTY; without even the implied warranty of MERCHANTABILITY or
1708fd40
BS
15FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16for more details.
17
18You should have received a copy of the GNU General Public License
f2d40127
NC
19along with GCC; see the file COPYING. If not, write to the Free
20Software Foundation, 59 Temple Place - Suite 330, Boston, MA
1708fd40
BS
2102111-1307, USA. */
22
fae15c93
VM
23/* Pointer to data describing the current DFA state. */
24extern state_t curr_state;
25
1708fd40
BS
26/* Forward declaration. */
27struct ready_list;
28
16f6ece6
BS
29/* Describe state of dependencies used during sched_analyze phase. */
30struct deps
31{
32 /* The *_insns and *_mems are paired lists. Each pending memory operation
33 will have a pointer to the MEM rtx on one list and a pointer to the
34 containing insn on the other list in the same place in the list. */
35
36 /* We can't use add_dependence like the old code did, because a single insn
37 may have multiple memory accesses, and hence needs to be on the list
38 once for each memory access. Add_dependence won't let you add an insn
39 to a list more than once. */
40
41 /* An INSN_LIST containing all insns with pending read operations. */
42 rtx pending_read_insns;
43
44 /* An EXPR_LIST containing all MEM rtx's which are pending reads. */
45 rtx pending_read_mems;
46
47 /* An INSN_LIST containing all insns with pending write operations. */
48 rtx pending_write_insns;
49
50 /* An EXPR_LIST containing all MEM rtx's which are pending writes. */
51 rtx pending_write_mems;
52
53 /* Indicates the combined length of the two pending lists. We must prevent
54 these lists from ever growing too large since the number of dependencies
55 produced is at least O(N*N), and execution time is at least O(4*N*N), as
56 a function of the length of these pending lists. */
57 int pending_lists_length;
58
4a121cc3
AM
59 /* Length of the pending memory flush list. Large functions with no
60 calls may build up extremely large lists. */
61 int pending_flush_length;
62
16f6ece6
BS
63 /* The last insn upon which all memory references must depend.
64 This is an insn which flushed the pending lists, creating a dependency
65 between it and all previously pending memory references. This creates
66 a barrier (or a checkpoint) which no memory reference is allowed to cross.
67
68 This includes all non constant CALL_INSNs. When we do interprocedural
69 alias analysis, this restriction can be relaxed.
70 This may also be an INSN that writes memory if the pending lists grow
71 too large. */
72 rtx last_pending_memory_flush;
73
37a0f8a5
RH
74 /* A list of the last function calls we have seen. We use a list to
75 represent last function calls from multiple predecessor blocks.
76 Used to prevent register lifetimes from expanding unnecessarily. */
16f6ece6
BS
77 rtx last_function_call;
78
37a0f8a5
RH
79 /* A list of insns which use a pseudo register that does not already
80 cross a call. We create dependencies between each of those insn
81 and the next call insn, to ensure that they won't cross a call after
82 scheduling is done. */
83 rtx sched_before_next_call;
84
2067c116 85 /* Used to keep post-call pseudo/hard reg movements together with
16f6ece6 86 the call. */
37a0f8a5 87 bool in_post_call_group_p;
16f6ece6 88
85d69216
JL
89 /* Set to the tail insn of the outermost libcall block.
90
91 When nonzero, we will mark each insn processed by sched_analyze_insn
92 with SCHED_GROUP_P to ensure libcalls are scheduled as a unit. */
93 rtx libcall_block_tail_insn;
94
4ba478b8
RH
95 /* The maximum register number for the following arrays. Before reload
96 this is max_reg_num; after reload it is FIRST_PSEUDO_REGISTER. */
97 int max_reg;
98
16f6ece6
BS
99 /* Element N is the next insn that sets (hard or pseudo) register
100 N within the current basic block; or zero, if there is no
101 such insn. Needed for new registers which may be introduced
102 by splitting insns. */
4ba478b8
RH
103 struct deps_reg
104 {
105 rtx uses;
106 rtx sets;
107 rtx clobbers;
2583081e
RH
108 int uses_length;
109 int clobbers_length;
4ba478b8
RH
110 } *reg_last;
111
0e9e1e0a 112 /* Element N is set for each register that has any nonzero element
4ba478b8
RH
113 in reg_last[N].{uses,sets,clobbers}. */
114 regset_head reg_last_in_use;
5a257872
EB
115
116 /* Element N is set for each register that is conditionally set. */
117 regset_head reg_conditional_sets;
16f6ece6
BS
118};
119
1708fd40
BS
120/* This structure holds some state of the current scheduling pass, and
121 contains some function pointers that abstract out some of the non-generic
122 functionality from functions such as schedule_block or schedule_insn.
123 There is one global variable, current_sched_info, which points to the
124 sched_info structure currently in use. */
125struct sched_info
126{
127 /* Add all insns that are initially ready to the ready list. Called once
128 before scheduling a set of insns. */
46c5ad27 129 void (*init_ready_list) (struct ready_list *);
1708fd40
BS
130 /* Called after taking an insn from the ready list. Returns nonzero if
131 this insn can be scheduled, nonzero if we should silently discard it. */
f55ade6e 132 int (*can_schedule_ready_p) (rtx);
1708fd40 133 /* Return nonzero if there are more insns that should be scheduled. */
f55ade6e 134 int (*schedule_more_p) (void);
1708fd40
BS
135 /* Called after an insn has all its dependencies resolved. Return nonzero
136 if it should be moved to the ready list or the queue, or zero if we
137 should silently discard it. */
f55ade6e 138 int (*new_ready) (rtx);
1708fd40
BS
139 /* Compare priority of two insns. Return a positive number if the second
140 insn is to be preferred for scheduling, and a negative one if the first
141 is to be preferred. Zero if they are equally good. */
f55ade6e 142 int (*rank) (rtx, rtx);
1708fd40
BS
143 /* Return a string that contains the insn uid and optionally anything else
144 necessary to identify this insn in an output. It's valid to use a
145 static buffer for this. The ALIGNED parameter should cause the string
146 to be formatted so that multiple output lines will line up nicely. */
f55ade6e 147 const char *(*print_insn) (rtx, int);
18e720b3
BS
148 /* Return nonzero if an insn should be included in priority
149 calculations. */
f55ade6e 150 int (*contributes_to_priority) (rtx, rtx);
18e720b3
BS
151 /* Called when computing dependencies for a JUMP_INSN. This function
152 should store the set of registers that must be considered as set by
153 the jump in the regset. */
5a257872 154 void (*compute_jump_reg_dependencies) (rtx, regset, regset, regset);
1708fd40
BS
155
156 /* The boundaries of the set of insns to be scheduled. */
157 rtx prev_head, next_tail;
158
159 /* Filled in after the schedule is finished; the first and last scheduled
160 insns. */
161 rtx head, tail;
162
163 /* If nonzero, enables an additional sanity check in schedule_block. */
4b6c5340
BS
164 unsigned int queue_must_finish_empty:1;
165 /* Nonzero if we should use cselib for better alias analysis. This
166 must be 0 if the dependency information is used after sched_analyze
167 has completed, e.g. if we're using it to initialize state for successor
168 blocks in region scheduling. */
169 unsigned int use_cselib:1;
79ae11c4
DN
170
171 /* Maximum priority that has been assigned to an insn. */
172 int sched_max_insns_priority;
1708fd40
BS
173};
174
16f6ece6
BS
175extern struct sched_info *current_sched_info;
176
177/* Indexed by INSN_UID, the collection of all data associated with
178 a single instruction. */
179
180struct haifa_insn_data
181{
182 /* A list of insns which depend on the instruction. Unlike LOG_LINKS,
ff7cc307 183 it represents forward dependencies. */
16f6ece6
BS
184 rtx depend;
185
186 /* The line number note in effect for each insn. For line number
187 notes, this indicates whether the note may be reused. */
188 rtx line_note;
189
190 /* Logical uid gives the original ordering of the insns. */
191 int luid;
192
193 /* A priority for each insn. */
194 int priority;
195
196 /* The number of incoming edges in the forward dependency graph.
e0bb17a8 197 As scheduling proceeds, counts are decreased. An insn moves to
16f6ece6
BS
198 the ready queue when its counter reaches zero. */
199 int dep_count;
200
201 /* An encoding of the blockage range function. Both unit and range
fae15c93 202 are coded. This member is used only for old pipeline interface. */
16f6ece6
BS
203 unsigned int blockage;
204
205 /* Number of instructions referring to this insn. */
206 int ref_count;
207
208 /* The minimum clock tick at which the insn becomes ready. This is
209 used to note timing constraints for the insns in the pending list. */
210 int tick;
211
212 short cost;
213
fae15c93
VM
214 /* An encoding of the function units used. This member is used only
215 for old pipeline interface. */
16f6ece6
BS
216 short units;
217
218 /* This weight is an estimation of the insn's contribution to
219 register pressure. */
220 short reg_weight;
221
222 /* Some insns (e.g. call) are not allowed to move across blocks. */
223 unsigned int cant_move : 1;
224
f5143c46 225 /* Set if there's DEF-USE dependence between some speculatively
16f6ece6
BS
226 moved load insn and this one. */
227 unsigned int fed_by_spec_load : 1;
228 unsigned int is_load_insn : 1;
21e4c9a8
BS
229
230 /* Nonzero if priority has been computed already. */
231 unsigned int priority_known : 1;
16f6ece6
BS
232};
233
234extern struct haifa_insn_data *h_i_d;
235
b4ead7d4
BS
236/* Accessor macros for h_i_d. There are more in haifa-sched.c and
237 sched-rgn.c. */
16f6ece6
BS
238#define INSN_DEPEND(INSN) (h_i_d[INSN_UID (INSN)].depend)
239#define INSN_LUID(INSN) (h_i_d[INSN_UID (INSN)].luid)
240#define CANT_MOVE(insn) (h_i_d[INSN_UID (insn)].cant_move)
241#define INSN_DEP_COUNT(INSN) (h_i_d[INSN_UID (INSN)].dep_count)
b4ead7d4 242#define INSN_PRIORITY(INSN) (h_i_d[INSN_UID (INSN)].priority)
21e4c9a8 243#define INSN_PRIORITY_KNOWN(INSN) (h_i_d[INSN_UID (INSN)].priority_known)
b4ead7d4
BS
244#define INSN_COST(INSN) (h_i_d[INSN_UID (INSN)].cost)
245#define INSN_UNIT(INSN) (h_i_d[INSN_UID (INSN)].units)
246#define INSN_REG_WEIGHT(INSN) (h_i_d[INSN_UID (INSN)].reg_weight)
247
248#define INSN_BLOCKAGE(INSN) (h_i_d[INSN_UID (INSN)].blockage)
249#define UNIT_BITS 5
250#define BLOCKAGE_MASK ((1 << BLOCKAGE_BITS) - 1)
251#define ENCODE_BLOCKAGE(U, R) \
252 (((U) << BLOCKAGE_BITS \
253 | MIN_BLOCKAGE_COST (R)) << BLOCKAGE_BITS \
254 | MAX_BLOCKAGE_COST (R))
255#define UNIT_BLOCKED(B) ((B) >> (2 * BLOCKAGE_BITS))
256#define BLOCKAGE_RANGE(B) \
257 (((((B) >> BLOCKAGE_BITS) & BLOCKAGE_MASK) << (HOST_BITS_PER_INT / 2)) \
258 | ((B) & BLOCKAGE_MASK))
259
260/* Encodings of the `<name>_unit_blockage_range' function. */
261#define MIN_BLOCKAGE_COST(R) ((R) >> (HOST_BITS_PER_INT / 2))
262#define MAX_BLOCKAGE_COST(R) ((R) & ((1 << (HOST_BITS_PER_INT / 2)) - 1))
16f6ece6
BS
263
264extern FILE *sched_dump;
b4ead7d4 265extern int sched_verbose;
16f6ece6 266
15aab9c0
VM
267/* Exception Free Loads:
268
269 We define five classes of speculative loads: IFREE, IRISKY,
270 PFREE, PRISKY, and MFREE.
271
272 IFREE loads are loads that are proved to be exception-free, just
273 by examining the load insn. Examples for such loads are loads
274 from TOC and loads of global data.
275
276 IRISKY loads are loads that are proved to be exception-risky,
277 just by examining the load insn. Examples for such loads are
278 volatile loads and loads from shared memory.
279
280 PFREE loads are loads for which we can prove, by examining other
281 insns, that they are exception-free. Currently, this class consists
282 of loads for which we are able to find a "similar load", either in
283 the target block, or, if only one split-block exists, in that split
284 block. Load2 is similar to load1 if both have same single base
285 register. We identify only part of the similar loads, by finding
286 an insn upon which both load1 and load2 have a DEF-USE dependence.
287
288 PRISKY loads are loads for which we can prove, by examining other
289 insns, that they are exception-risky. Currently we have two proofs for
290 such loads. The first proof detects loads that are probably guarded by a
291 test on the memory address. This proof is based on the
292 backward and forward data dependence information for the region.
293 Let load-insn be the examined load.
294 Load-insn is PRISKY iff ALL the following hold:
295
296 - insn1 is not in the same block as load-insn
297 - there is a DEF-USE dependence chain (insn1, ..., load-insn)
298 - test-insn is either a compare or a branch, not in the same block
299 as load-insn
300 - load-insn is reachable from test-insn
301 - there is a DEF-USE dependence chain (insn1, ..., test-insn)
302
303 This proof might fail when the compare and the load are fed
304 by an insn not in the region. To solve this, we will add to this
305 group all loads that have no input DEF-USE dependence.
306
307 The second proof detects loads that are directly or indirectly
308 fed by a speculative load. This proof is affected by the
309 scheduling process. We will use the flag fed_by_spec_load.
310 Initially, all insns have this flag reset. After a speculative
311 motion of an insn, if insn is either a load, or marked as
312 fed_by_spec_load, we will also mark as fed_by_spec_load every
313 insn1 for which a DEF-USE dependence (insn, insn1) exists. A
314 load which is fed_by_spec_load is also PRISKY.
315
316 MFREE (maybe-free) loads are all the remaining loads. They may be
317 exception-free, but we cannot prove it.
318
319 Now, all loads in IFREE and PFREE classes are considered
320 exception-free, while all loads in IRISKY and PRISKY classes are
321 considered exception-risky. As for loads in the MFREE class,
322 these are considered either exception-free or exception-risky,
323 depending on whether we are pessimistic or optimistic. We have
324 to take the pessimistic approach to assure the safety of
325 speculative scheduling, but we can take the optimistic approach
326 by invoking the -fsched_spec_load_dangerous option. */
327
328enum INSN_TRAP_CLASS
329{
330 TRAP_FREE = 0, IFREE = 1, PFREE_CANDIDATE = 2,
331 PRISKY_CANDIDATE = 3, IRISKY = 4, TRAP_RISKY = 5
332};
333
334#define WORST_CLASS(class1, class2) \
335((class1 > class2) ? class1 : class2)
336
1708fd40
BS
337#ifndef __GNUC__
338#define __inline
339#endif
340
341#ifndef HAIFA_INLINE
342#define HAIFA_INLINE __inline
343#endif
16f6ece6
BS
344
345/* Functions in sched-vis.c. */
46c5ad27
AJ
346extern void init_target_units (void);
347extern void insn_print_units (rtx);
348extern void init_block_visualization (void);
349extern void print_block_visualization (const char *);
350extern void visualize_scheduled_insns (int);
351extern void visualize_no_unit (rtx);
352extern void visualize_stall_cycles (int);
353extern void visualize_alloc (void);
354extern void visualize_free (void);
16f6ece6
BS
355
356/* Functions in sched-deps.c. */
46c5ad27
AJ
357extern int add_dependence (rtx, rtx, enum reg_note);
358extern void add_insn_mem_dependence (struct deps *, rtx *, rtx *, rtx, rtx);
359extern void sched_analyze (struct deps *, rtx, rtx);
360extern void init_deps (struct deps *);
361extern void free_deps (struct deps *);
362extern void init_deps_global (void);
363extern void finish_deps_global (void);
364extern void add_forward_dependence (rtx, rtx, enum reg_note);
365extern void compute_forward_dependences (rtx, rtx);
366extern rtx find_insn_list (rtx, rtx);
367extern void init_dependency_caches (int);
368extern void free_dependency_caches (void);
16f6ece6
BS
369
370/* Functions in haifa-sched.c. */
46c5ad27
AJ
371extern int haifa_classify_insn (rtx);
372extern void get_block_head_tail (int, rtx *, rtx *);
373extern int no_real_insns_p (rtx, rtx);
b4ead7d4 374
46c5ad27
AJ
375extern void rm_line_notes (rtx, rtx);
376extern void save_line_notes (int, rtx, rtx);
377extern void restore_line_notes (rtx, rtx);
378extern void rm_redundant_line_notes (void);
379extern void rm_other_notes (rtx, rtx);
b4ead7d4 380
46c5ad27
AJ
381extern int insn_issue_delay (rtx);
382extern int set_priorities (rtx, rtx);
b4ead7d4 383
46c5ad27
AJ
384extern void schedule_block (int, int);
385extern void sched_init (FILE *);
386extern void sched_finish (void);
b4ead7d4 387
46c5ad27 388extern void ready_add (struct ready_list *, rtx);
b4ead7d4
BS
389
390/* The following are exported for the benefit of debugging functions. It
391 would be nicer to keep them private to haifa-sched.c. */
46c5ad27
AJ
392extern int insn_unit (rtx);
393extern int insn_cost (rtx, rtx, rtx);
394extern rtx get_unit_last_insn (int);
395extern int actual_hazard_this_instance (int, int, rtx, int, int);
396extern void print_insn (char *, rtx, int);