]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/sched-deps.c
tree-core.h: Include symtab.h.
[thirdparty/gcc.git] / gcc / sched-deps.c
CommitLineData
16f6ece6
BS
1/* Instruction scheduling pass. This file computes dependencies between
2 instructions.
5624e564 3 Copyright (C) 1992-2015 Free Software Foundation, Inc.
16f6ece6
BS
4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
6
1322177d 7This file is part of GCC.
16f6ece6 8
1322177d
LB
9GCC is free software; you can redistribute it and/or modify it under
10the terms of the GNU General Public License as published by the Free
9dcd6f09 11Software Foundation; either version 3, or (at your option) any later
1322177d 12version.
16f6ece6 13
1322177d
LB
14GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15WARRANTY; without even the implied warranty of MERCHANTABILITY or
16f6ece6
BS
16FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17for more details.
18
19You should have received a copy of the GNU General Public License
9dcd6f09
NC
20along with GCC; see the file COPYING3. If not see
21<http://www.gnu.org/licenses/>. */
16f6ece6
BS
22\f
23#include "config.h"
24#include "system.h"
4977bab6 25#include "coretypes.h"
c7131fb2
AM
26#include "backend.h"
27#include "tree.h"
16f6ece6 28#include "rtl.h"
c7131fb2
AM
29#include "df.h"
30#include "diagnostic-core.h"
40e23961 31#include "alias.h"
16f6ece6 32#include "tm_p.h"
16f6ece6 33#include "regs.h"
16f6ece6
BS
34#include "flags.h"
35#include "insn-config.h"
36#include "insn-attr.h"
37#include "except.h"
16f6ece6 38#include "recog.h"
1a83e602 39#include "emit-rtl.h"
60393bbc 40#include "cfgbuild.h"
16f6ece6 41#include "sched-int.h"
4a121cc3 42#include "params.h"
d88e64a6 43#include "alloc-pool.h"
4b6c5340 44#include "cselib.h"
ce18efcb 45#include "ira.h"
d4ebfa65 46#include "target.h"
16f6ece6 47
a750daa2
MK
48#ifdef INSN_SCHEDULING
49
b198261f
MK
50#ifdef ENABLE_CHECKING
51#define CHECK (true)
52#else
53#define CHECK (false)
54#endif
55
e855c69d
AB
56/* Holds current parameters for the dependency analyzer. */
57struct sched_deps_info_def *sched_deps_info;
58
59/* The data is specific to the Haifa scheduler. */
9771b263 60vec<haifa_deps_insn_data_def>
6e1aa848 61 h_d_i_d = vNULL;
e855c69d 62
b198261f
MK
63/* Return the major type present in the DS. */
64enum reg_note
65ds_to_dk (ds_t ds)
66{
67 if (ds & DEP_TRUE)
68 return REG_DEP_TRUE;
69
70 if (ds & DEP_OUTPUT)
71 return REG_DEP_OUTPUT;
72
e2724e63
BS
73 if (ds & DEP_CONTROL)
74 return REG_DEP_CONTROL;
75
b198261f
MK
76 gcc_assert (ds & DEP_ANTI);
77
78 return REG_DEP_ANTI;
79}
80
81/* Return equivalent dep_status. */
82ds_t
83dk_to_ds (enum reg_note dk)
84{
85 switch (dk)
86 {
87 case REG_DEP_TRUE:
88 return DEP_TRUE;
89
90 case REG_DEP_OUTPUT:
91 return DEP_OUTPUT;
92
e2724e63
BS
93 case REG_DEP_CONTROL:
94 return DEP_CONTROL;
95
b198261f
MK
96 default:
97 gcc_assert (dk == REG_DEP_ANTI);
98 return DEP_ANTI;
99 }
100}
101
102/* Functions to operate with dependence information container - dep_t. */
103
104/* Init DEP with the arguments. */
e2f6ff94 105void
0e0ce50d 106init_dep_1 (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note type, ds_t ds)
b198261f 107{
0e0ce50d
DM
108 DEP_PRO (dep) = pro;
109 DEP_CON (dep) = con;
e2f6ff94 110 DEP_TYPE (dep) = type;
b198261f 111 DEP_STATUS (dep) = ds;
a19af9c4 112 DEP_COST (dep) = UNKNOWN_DEP_COST;
1a83e602
BS
113 DEP_NONREG (dep) = 0;
114 DEP_MULTIPLE (dep) = 0;
115 DEP_REPLACE (dep) = NULL;
b198261f
MK
116}
117
118/* Init DEP with the arguments.
119 While most of the scheduler (including targets) only need the major type
9f5ed61a 120 of the dependency, it is convenient to hide full dep_status from them. */
b198261f 121void
0e0ce50d 122init_dep (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note kind)
b198261f
MK
123{
124 ds_t ds;
125
e2f6ff94 126 if ((current_sched_info->flags & USE_DEPS_LIST))
b198261f
MK
127 ds = dk_to_ds (kind);
128 else
b953c2b8 129 ds = 0;
b198261f
MK
130
131 init_dep_1 (dep, pro, con, kind, ds);
132}
133
134/* Make a copy of FROM in TO. */
135static void
136copy_dep (dep_t to, dep_t from)
137{
138 memcpy (to, from, sizeof (*to));
139}
140
e2f6ff94 141static void dump_ds (FILE *, ds_t);
b198261f 142
e2f6ff94
MK
143/* Define flags for dump_dep (). */
144
145/* Dump producer of the dependence. */
146#define DUMP_DEP_PRO (2)
147
148/* Dump consumer of the dependence. */
149#define DUMP_DEP_CON (4)
150
151/* Dump type of the dependence. */
152#define DUMP_DEP_TYPE (8)
153
154/* Dump status of the dependence. */
155#define DUMP_DEP_STATUS (16)
156
157/* Dump all information about the dependence. */
158#define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE \
159 |DUMP_DEP_STATUS)
160
161/* Dump DEP to DUMP.
162 FLAGS is a bit mask specifying what information about DEP needs
163 to be printed.
164 If FLAGS has the very first bit set, then dump all information about DEP
165 and propagate this bit into the callee dump functions. */
166static void
167dump_dep (FILE *dump, dep_t dep, int flags)
b198261f 168{
e2f6ff94
MK
169 if (flags & 1)
170 flags |= DUMP_DEP_ALL;
171
172 fprintf (dump, "<");
173
174 if (flags & DUMP_DEP_PRO)
175 fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
176
177 if (flags & DUMP_DEP_CON)
178 fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
179
180 if (flags & DUMP_DEP_TYPE)
181 {
182 char t;
183 enum reg_note type = DEP_TYPE (dep);
184
185 switch (type)
186 {
187 case REG_DEP_TRUE:
188 t = 't';
189 break;
190
191 case REG_DEP_OUTPUT:
192 t = 'o';
193 break;
b198261f 194
e2724e63
BS
195 case REG_DEP_CONTROL:
196 t = 'c';
197 break;
198
e2f6ff94
MK
199 case REG_DEP_ANTI:
200 t = 'a';
201 break;
202
203 default:
204 gcc_unreachable ();
205 break;
206 }
207
208 fprintf (dump, "%c; ", t);
209 }
210
211 if (flags & DUMP_DEP_STATUS)
212 {
213 if (current_sched_info->flags & USE_DEPS_LIST)
214 dump_ds (dump, DEP_STATUS (dep));
215 }
216
217 fprintf (dump, ">");
218}
219
220/* Default flags for dump_dep (). */
221static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
222
223/* Dump all fields of DEP to STDERR. */
224void
225sd_debug_dep (dep_t dep)
226{
227 dump_dep (stderr, dep, 1);
228 fprintf (stderr, "\n");
b198261f
MK
229}
230
f49b295a
AO
231/* Determine whether DEP is a dependency link of a non-debug insn on a
232 debug insn. */
233
234static inline bool
235depl_on_debug_p (dep_link_t dep)
236{
237 return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
238 && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
239}
240
e2f6ff94
MK
241/* Functions to operate with a single link from the dependencies lists -
242 dep_link_t. */
243
b198261f
MK
244/* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
245 PREV_NEXT_P. */
246static void
247attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
248{
249 dep_link_t next = *prev_nextp;
250
251 gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
252 && DEP_LINK_NEXT (l) == NULL);
253
254 /* Init node being inserted. */
255 DEP_LINK_PREV_NEXTP (l) = prev_nextp;
256 DEP_LINK_NEXT (l) = next;
257
258 /* Fix next node. */
259 if (next != NULL)
260 {
261 gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
262
263 DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
264 }
265
266 /* Fix prev node. */
267 *prev_nextp = l;
268}
269
270/* Add dep_link LINK to deps_list L. */
271static void
272add_to_deps_list (dep_link_t link, deps_list_t l)
273{
274 attach_dep_link (link, &DEPS_LIST_FIRST (l));
e2f6ff94 275
f49b295a
AO
276 /* Don't count debug deps. */
277 if (!depl_on_debug_p (link))
278 ++DEPS_LIST_N_LINKS (l);
b198261f
MK
279}
280
281/* Detach dep_link L from the list. */
282static void
283detach_dep_link (dep_link_t l)
284{
285 dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
286 dep_link_t next = DEP_LINK_NEXT (l);
287
288 *prev_nextp = next;
289
290 if (next != NULL)
291 DEP_LINK_PREV_NEXTP (next) = prev_nextp;
292
b198261f
MK
293 DEP_LINK_PREV_NEXTP (l) = NULL;
294 DEP_LINK_NEXT (l) = NULL;
295}
296
e2f6ff94
MK
297/* Remove link LINK from list LIST. */
298static void
299remove_from_deps_list (dep_link_t link, deps_list_t list)
b198261f
MK
300{
301 detach_dep_link (link);
b198261f 302
f49b295a
AO
303 /* Don't count debug deps. */
304 if (!depl_on_debug_p (link))
305 --DEPS_LIST_N_LINKS (list);
b198261f
MK
306}
307
e2f6ff94 308/* Move link LINK from list FROM to list TO. */
b198261f 309static void
e2f6ff94 310move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
b198261f 311{
e2f6ff94
MK
312 remove_from_deps_list (link, from);
313 add_to_deps_list (link, to);
b198261f
MK
314}
315
e2f6ff94
MK
316/* Return true of LINK is not attached to any list. */
317static bool
318dep_link_is_detached_p (dep_link_t link)
b198261f 319{
e2f6ff94 320 return DEP_LINK_PREV_NEXTP (link) == NULL;
b198261f
MK
321}
322
e2f6ff94 323/* Pool to hold all dependency nodes (dep_node_t). */
973004cb 324static pool_allocator<_dep_node> *dn_pool;
b198261f 325
e2f6ff94
MK
326/* Number of dep_nodes out there. */
327static int dn_pool_diff = 0;
b198261f 328
e2f6ff94
MK
329/* Create a dep_node. */
330static dep_node_t
331create_dep_node (void)
332{
973004cb 333 dep_node_t n = dn_pool->allocate ();
e2f6ff94
MK
334 dep_link_t back = DEP_NODE_BACK (n);
335 dep_link_t forw = DEP_NODE_FORW (n);
b198261f 336
e2f6ff94
MK
337 DEP_LINK_NODE (back) = n;
338 DEP_LINK_NEXT (back) = NULL;
339 DEP_LINK_PREV_NEXTP (back) = NULL;
b198261f 340
e2f6ff94
MK
341 DEP_LINK_NODE (forw) = n;
342 DEP_LINK_NEXT (forw) = NULL;
343 DEP_LINK_PREV_NEXTP (forw) = NULL;
b198261f 344
e2f6ff94 345 ++dn_pool_diff;
b198261f 346
e2f6ff94 347 return n;
b198261f
MK
348}
349
e2f6ff94 350/* Delete dep_node N. N must not be connected to any deps_list. */
b198261f 351static void
e2f6ff94 352delete_dep_node (dep_node_t n)
b198261f 353{
e2f6ff94
MK
354 gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
355 && dep_link_is_detached_p (DEP_NODE_FORW (n)));
b198261f 356
334e71e8
JJ
357 XDELETE (DEP_REPLACE (DEP_NODE_DEP (n)));
358
e2f6ff94 359 --dn_pool_diff;
b198261f 360
973004cb 361 dn_pool->remove (n);
b198261f
MK
362}
363
e2f6ff94 364/* Pool to hold dependencies lists (deps_list_t). */
973004cb 365static pool_allocator<_deps_list> *dl_pool;
b198261f 366
e2f6ff94
MK
367/* Number of deps_lists out there. */
368static int dl_pool_diff = 0;
b198261f 369
e2f6ff94 370/* Functions to operate with dependences lists - deps_list_t. */
b198261f 371
e2f6ff94 372/* Return true if list L is empty. */
b198261f 373static bool
e2f6ff94 374deps_list_empty_p (deps_list_t l)
b198261f 375{
e2f6ff94 376 return DEPS_LIST_N_LINKS (l) == 0;
b198261f
MK
377}
378
e2f6ff94
MK
379/* Create a new deps_list. */
380static deps_list_t
381create_deps_list (void)
b198261f 382{
973004cb 383 deps_list_t l = dl_pool->allocate ();
b198261f 384
e2f6ff94
MK
385 DEPS_LIST_FIRST (l) = NULL;
386 DEPS_LIST_N_LINKS (l) = 0;
b198261f 387
e2f6ff94
MK
388 ++dl_pool_diff;
389 return l;
b198261f
MK
390}
391
e2f6ff94
MK
392/* Free deps_list L. */
393static void
394free_deps_list (deps_list_t l)
b198261f 395{
e2f6ff94 396 gcc_assert (deps_list_empty_p (l));
b198261f 397
e2f6ff94 398 --dl_pool_diff;
b198261f 399
973004cb 400 dl_pool->remove (l);
b198261f
MK
401}
402
e2f6ff94 403/* Return true if there is no dep_nodes and deps_lists out there.
15dc95cb 404 After the region is scheduled all the dependency nodes and lists
e2f6ff94
MK
405 should [generally] be returned to pool. */
406bool
407deps_pools_are_empty_p (void)
b198261f 408{
e2f6ff94 409 return dn_pool_diff == 0 && dl_pool_diff == 0;
b198261f
MK
410}
411
e2f6ff94
MK
412/* Remove all elements from L. */
413static void
414clear_deps_list (deps_list_t l)
b198261f 415{
e2f6ff94
MK
416 do
417 {
418 dep_link_t link = DEPS_LIST_FIRST (l);
b198261f 419
e2f6ff94
MK
420 if (link == NULL)
421 break;
b198261f 422
e2f6ff94 423 remove_from_deps_list (link, l);
b198261f 424 }
e2f6ff94 425 while (1);
b198261f 426}
16f6ece6 427
b953c2b8
BS
428/* Decide whether a dependency should be treated as a hard or a speculative
429 dependency. */
430static bool
431dep_spec_p (dep_t dep)
432{
433 if (current_sched_info->flags & DO_SPECULATION)
e2724e63
BS
434 {
435 if (DEP_STATUS (dep) & SPECULATIVE)
436 return true;
437 }
438 if (current_sched_info->flags & DO_PREDICATION)
439 {
440 if (DEP_TYPE (dep) == REG_DEP_CONTROL)
441 return true;
442 }
1a83e602
BS
443 if (DEP_REPLACE (dep) != NULL)
444 return true;
b953c2b8
BS
445 return false;
446}
447
16f6ece6
BS
448static regset reg_pending_sets;
449static regset reg_pending_clobbers;
37a0f8a5 450static regset reg_pending_uses;
e2724e63 451static regset reg_pending_control_uses;
d1c9693e 452static enum reg_pending_barrier_mode reg_pending_barrier;
16f6ece6 453
ce18efcb
VM
454/* Hard registers implicitly clobbered or used (or may be implicitly
455 clobbered or used) by the currently analyzed insn. For example,
456 insn in its constraint has one register class. Even if there is
457 currently no hard register in the insn, the particular hard
458 register will be in the insn after reload pass because the
459 constraint requires it. */
460static HARD_REG_SET implicit_reg_pending_clobbers;
461static HARD_REG_SET implicit_reg_pending_uses;
462
16f6ece6
BS
463/* To speed up the test for duplicate dependency links we keep a
464 record of dependencies created by add_dependence when the average
465 number of instructions in a basic block is very large.
466
467 Studies have shown that there is typically around 5 instructions between
468 branches for typical C code. So we can make a guess that the average
469 basic block is approximately 5 instructions long; we will choose 100X
470 the average size as a very large basic block.
471
472 Each insn has associated bitmaps for its dependencies. Each bitmap
473 has enough entries to represent a dependency on any other insn in
474 the insn chain. All bitmap for true dependencies cache is
2d76cb1a 475 allocated then the rest two ones are also allocated. */
e855c69d
AB
476static bitmap_head *true_dependency_cache = NULL;
477static bitmap_head *output_dependency_cache = NULL;
478static bitmap_head *anti_dependency_cache = NULL;
e2724e63 479static bitmap_head *control_dependency_cache = NULL;
e855c69d 480static bitmap_head *spec_dependency_cache = NULL;
392c64a0 481static int cache_size;
16f6ece6 482
1a83e602
BS
483/* True if we should mark added dependencies as a non-register deps. */
484static bool mark_as_hard;
485
9678086d 486static int deps_may_trap_p (const_rtx);
ce1ce33a 487static void add_dependence_1 (rtx_insn *, rtx_insn *, enum reg_note);
3dc99c19
DM
488static void add_dependence_list (rtx_insn *, rtx_insn_list *, int,
489 enum reg_note, bool);
ce1ce33a 490static void add_dependence_list_and_free (struct deps_desc *, rtx_insn *,
3dc99c19
DM
491 rtx_insn_list **, int, enum reg_note,
492 bool);
90831096 493static void delete_all_dependences (rtx_insn *);
ce1ce33a 494static void chain_to_prev_insn (rtx_insn *);
16f6ece6 495
ce1ce33a
DM
496static void flush_pending_lists (struct deps_desc *, rtx_insn *, int, int);
497static void sched_analyze_1 (struct deps_desc *, rtx, rtx_insn *);
498static void sched_analyze_2 (struct deps_desc *, rtx, rtx_insn *);
499static void sched_analyze_insn (struct deps_desc *, rtx, rtx_insn *);
0a1bb917 500
c838c402 501static bool sched_has_condition_p (const rtx_insn *);
e855c69d 502static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
ddbd5439 503
e2f6ff94
MK
504static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
505 rtx, rtx);
506static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
ddbd5439 507
ddbd5439 508#ifdef ENABLE_CHECKING
e2f6ff94 509static void check_dep (dep_t, bool);
ddbd5439 510#endif
16f6ece6 511\f
713f41f9
BS
512/* Return nonzero if a load of the memory reference MEM can cause a trap. */
513
514static int
9678086d 515deps_may_trap_p (const_rtx mem)
713f41f9 516{
9678086d 517 const_rtx addr = XEXP (mem, 0);
713f41f9 518
bb1acb3e
RH
519 if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
520 {
9678086d 521 const_rtx t = get_reg_known_value (REGNO (addr));
bb1acb3e
RH
522 if (t)
523 addr = t;
524 }
713f41f9
BS
525 return rtx_addr_can_trap_p (addr);
526}
527\f
0a1bb917 528
e855c69d
AB
529/* Find the condition under which INSN is executed. If REV is not NULL,
530 it is set to TRUE when the returned comparison should be reversed
f0898307 531 to get the actual condition. */
0a1bb917 532static rtx
c838c402 533sched_get_condition_with_rev_uncached (const rtx_insn *insn, bool *rev)
0a1bb917
BS
534{
535 rtx pat = PATTERN (insn);
adf968c7 536 rtx src;
0a1bb917 537
e855c69d
AB
538 if (rev)
539 *rev = false;
540
0a1bb917 541 if (GET_CODE (pat) == COND_EXEC)
f0898307 542 return COND_EXEC_TEST (pat);
6903ecd4
RE
543
544 if (!any_condjump_p (insn) || !onlyjump_p (insn))
0a1bb917 545 return 0;
6903ecd4 546
adf968c7 547 src = SET_SRC (pc_set (insn));
2bd1e239 548
adf968c7 549 if (XEXP (src, 2) == pc_rtx)
f0898307 550 return XEXP (src, 0);
adf968c7 551 else if (XEXP (src, 1) == pc_rtx)
6903ecd4 552 {
adf968c7 553 rtx cond = XEXP (src, 0);
6903ecd4
RE
554 enum rtx_code revcode = reversed_comparison_code (cond, insn);
555
556 if (revcode == UNKNOWN)
557 return 0;
e855c69d
AB
558
559 if (rev)
560 *rev = true;
561 return cond;
6903ecd4 562 }
a8b3643c
RE
563
564 return 0;
0a1bb917
BS
565}
566
e2724e63
BS
567/* Return the condition under which INSN does not execute (i.e. the
568 not-taken condition for a conditional branch), or NULL if we cannot
569 find such a condition. The caller should make a copy of the condition
570 before using it. */
571rtx
c838c402 572sched_get_reverse_condition_uncached (const rtx_insn *insn)
e2724e63
BS
573{
574 bool rev;
575 rtx cond = sched_get_condition_with_rev_uncached (insn, &rev);
576 if (cond == NULL_RTX)
577 return cond;
578 if (!rev)
579 {
580 enum rtx_code revcode = reversed_comparison_code (cond, insn);
581 cond = gen_rtx_fmt_ee (revcode, GET_MODE (cond),
582 XEXP (cond, 0),
583 XEXP (cond, 1));
584 }
585 return cond;
586}
587
f0898307
SG
588/* Caching variant of sched_get_condition_with_rev_uncached.
589 We only do actual work the first time we come here for an insn; the
590 results are cached in INSN_CACHED_COND and INSN_REVERSE_COND. */
591static rtx
c838c402 592sched_get_condition_with_rev (const rtx_insn *insn, bool *rev)
f0898307
SG
593{
594 bool tmp;
595
596 if (INSN_LUID (insn) == 0)
597 return sched_get_condition_with_rev_uncached (insn, rev);
598
599 if (INSN_CACHED_COND (insn) == const_true_rtx)
600 return NULL_RTX;
601
602 if (INSN_CACHED_COND (insn) != NULL_RTX)
603 {
604 if (rev)
605 *rev = INSN_REVERSE_COND (insn);
606 return INSN_CACHED_COND (insn);
607 }
608
609 INSN_CACHED_COND (insn) = sched_get_condition_with_rev_uncached (insn, &tmp);
610 INSN_REVERSE_COND (insn) = tmp;
611
612 if (INSN_CACHED_COND (insn) == NULL_RTX)
613 {
614 INSN_CACHED_COND (insn) = const_true_rtx;
615 return NULL_RTX;
616 }
617
618 if (rev)
619 *rev = INSN_REVERSE_COND (insn);
620 return INSN_CACHED_COND (insn);
621}
622
e855c69d
AB
623/* True when we can find a condition under which INSN is executed. */
624static bool
c838c402 625sched_has_condition_p (const rtx_insn *insn)
e855c69d
AB
626{
627 return !! sched_get_condition_with_rev (insn, NULL);
628}
629
2bd1e239 630\f
0a1bb917 631
e855c69d 632/* Return nonzero if conditions COND1 and COND2 can never be both true. */
0a1bb917 633static int
e855c69d 634conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
0a1bb917 635{
ec8e098d
PB
636 if (COMPARISON_P (cond1)
637 && COMPARISON_P (cond2)
e855c69d
AB
638 && GET_CODE (cond1) ==
639 (rev1==rev2
640 ? reversed_comparison_code (cond2, NULL)
641 : GET_CODE (cond2))
aef0e7a8 642 && rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
0a1bb917
BS
643 && XEXP (cond1, 1) == XEXP (cond2, 1))
644 return 1;
645 return 0;
646}
2bd1e239
SB
647
648/* Return true if insn1 and insn2 can never depend on one another because
649 the conditions under which they are executed are mutually exclusive. */
650bool
c838c402 651sched_insns_conditions_mutex_p (const rtx_insn *insn1, const rtx_insn *insn2)
2bd1e239
SB
652{
653 rtx cond1, cond2;
99e395c7 654 bool rev1 = false, rev2 = false;
2bd1e239 655
6fb5fa3c 656 /* df doesn't handle conditional lifetimes entirely correctly;
2bd1e239
SB
657 calls mess up the conditional lifetimes. */
658 if (!CALL_P (insn1) && !CALL_P (insn2))
659 {
e855c69d
AB
660 cond1 = sched_get_condition_with_rev (insn1, &rev1);
661 cond2 = sched_get_condition_with_rev (insn2, &rev2);
2bd1e239 662 if (cond1 && cond2
e855c69d 663 && conditions_mutex_p (cond1, cond2, rev1, rev2)
2bd1e239
SB
664 /* Make sure first instruction doesn't affect condition of second
665 instruction if switched. */
666 && !modified_in_p (cond1, insn2)
667 /* Make sure second instruction doesn't affect condition of first
668 instruction if switched. */
669 && !modified_in_p (cond2, insn1))
670 return true;
671 }
672 return false;
673}
0a1bb917 674\f
ddbd5439 675
c275297b
MK
676/* Return true if INSN can potentially be speculated with type DS. */
677bool
c838c402 678sched_insn_is_legitimate_for_speculation_p (const rtx_insn *insn, ds_t ds)
c275297b
MK
679{
680 if (HAS_INTERNAL_DEP (insn))
681 return false;
682
683 if (!NONJUMP_INSN_P (insn))
684 return false;
685
686 if (SCHED_GROUP_P (insn))
687 return false;
688
c838c402 689 if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX_INSN (insn)))
c275297b
MK
690 return false;
691
692 if (side_effects_p (PATTERN (insn)))
693 return false;
694
695 if (ds & BE_IN_SPEC)
696 /* The following instructions, which depend on a speculatively scheduled
697 instruction, cannot be speculatively scheduled along. */
698 {
a61c3633
EB
699 if (may_trap_or_fault_p (PATTERN (insn)))
700 /* If instruction might fault, it cannot be speculatively scheduled.
c275297b
MK
701 For control speculation it's obvious why and for data speculation
702 it's because the insn might get wrong input if speculation
703 wasn't successful. */
704 return false;
705
706 if ((ds & BE_IN_DATA)
e855c69d 707 && sched_has_condition_p (insn))
c275297b
MK
708 /* If this is a predicated instruction, then it cannot be
709 speculatively scheduled. See PR35659. */
710 return false;
711 }
712
713 return true;
714}
715
e2f6ff94
MK
716/* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
717 initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
718 and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
719 This function is used to switch sd_iterator to the next list.
720 !!! For internal use only. Might consider moving it to sched-int.h. */
721void
9678086d 722sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
e2f6ff94
MK
723 deps_list_t *list_ptr, bool *resolved_p_ptr)
724{
725 sd_list_types_def types = *types_ptr;
726
727 if (types & SD_LIST_HARD_BACK)
728 {
729 *list_ptr = INSN_HARD_BACK_DEPS (insn);
730 *resolved_p_ptr = false;
731 *types_ptr = types & ~SD_LIST_HARD_BACK;
732 }
733 else if (types & SD_LIST_SPEC_BACK)
734 {
735 *list_ptr = INSN_SPEC_BACK_DEPS (insn);
736 *resolved_p_ptr = false;
737 *types_ptr = types & ~SD_LIST_SPEC_BACK;
738 }
739 else if (types & SD_LIST_FORW)
740 {
741 *list_ptr = INSN_FORW_DEPS (insn);
742 *resolved_p_ptr = false;
743 *types_ptr = types & ~SD_LIST_FORW;
744 }
745 else if (types & SD_LIST_RES_BACK)
746 {
747 *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
748 *resolved_p_ptr = true;
749 *types_ptr = types & ~SD_LIST_RES_BACK;
750 }
751 else if (types & SD_LIST_RES_FORW)
752 {
753 *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
754 *resolved_p_ptr = true;
755 *types_ptr = types & ~SD_LIST_RES_FORW;
756 }
757 else
758 {
759 *list_ptr = NULL;
760 *resolved_p_ptr = false;
761 *types_ptr = SD_LIST_NONE;
762 }
763}
764
765/* Return the summary size of INSN's lists defined by LIST_TYPES. */
766int
9678086d 767sd_lists_size (const_rtx insn, sd_list_types_def list_types)
e2f6ff94
MK
768{
769 int size = 0;
770
771 while (list_types != SD_LIST_NONE)
772 {
773 deps_list_t list;
774 bool resolved_p;
775
776 sd_next_list (insn, &list_types, &list, &resolved_p);
b5b8b0ac
AO
777 if (list)
778 size += DEPS_LIST_N_LINKS (list);
e2f6ff94
MK
779 }
780
781 return size;
782}
783
784/* Return true if INSN's lists defined by LIST_TYPES are all empty. */
f49b295a 785
e2f6ff94 786bool
9678086d 787sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
e2f6ff94 788{
f49b295a
AO
789 while (list_types != SD_LIST_NONE)
790 {
791 deps_list_t list;
792 bool resolved_p;
793
794 sd_next_list (insn, &list_types, &list, &resolved_p);
795 if (!deps_list_empty_p (list))
796 return false;
797 }
798
799 return true;
e2f6ff94
MK
800}
801
802/* Initialize data for INSN. */
803void
df8379e7 804sd_init_insn (rtx_insn *insn)
e2f6ff94
MK
805{
806 INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
807 INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
808 INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
809 INSN_FORW_DEPS (insn) = create_deps_list ();
810 INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
811
812 /* ??? It would be nice to allocate dependency caches here. */
813}
814
815/* Free data for INSN. */
816void
df8379e7 817sd_finish_insn (rtx_insn *insn)
e2f6ff94
MK
818{
819 /* ??? It would be nice to deallocate dependency caches here. */
820
821 free_deps_list (INSN_HARD_BACK_DEPS (insn));
822 INSN_HARD_BACK_DEPS (insn) = NULL;
823
824 free_deps_list (INSN_SPEC_BACK_DEPS (insn));
825 INSN_SPEC_BACK_DEPS (insn) = NULL;
826
827 free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
828 INSN_RESOLVED_BACK_DEPS (insn) = NULL;
829
830 free_deps_list (INSN_FORW_DEPS (insn));
831 INSN_FORW_DEPS (insn) = NULL;
832
833 free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
834 INSN_RESOLVED_FORW_DEPS (insn) = NULL;
835}
836
837/* Find a dependency between producer PRO and consumer CON.
838 Search through resolved dependency lists if RESOLVED_P is true.
839 If no such dependency is found return NULL,
15dc95cb 840 otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
e2f6ff94
MK
841 with an iterator pointing to it. */
842static dep_t
843sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
844 sd_iterator_def *sd_it_ptr)
845{
846 sd_list_types_def pro_list_type;
847 sd_list_types_def con_list_type;
848 sd_iterator_def sd_it;
849 dep_t dep;
850 bool found_p = false;
851
852 if (resolved_p)
853 {
854 pro_list_type = SD_LIST_RES_FORW;
855 con_list_type = SD_LIST_RES_BACK;
856 }
857 else
858 {
859 pro_list_type = SD_LIST_FORW;
860 con_list_type = SD_LIST_BACK;
861 }
862
863 /* Walk through either back list of INSN or forw list of ELEM
864 depending on which one is shorter. */
865 if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
866 {
867 /* Find the dep_link with producer PRO in consumer's back_deps. */
868 FOR_EACH_DEP (con, con_list_type, sd_it, dep)
869 if (DEP_PRO (dep) == pro)
870 {
871 found_p = true;
872 break;
873 }
874 }
875 else
876 {
877 /* Find the dep_link with consumer CON in producer's forw_deps. */
878 FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
879 if (DEP_CON (dep) == con)
880 {
881 found_p = true;
882 break;
883 }
884 }
885
886 if (found_p)
887 {
888 if (sd_it_ptr != NULL)
889 *sd_it_ptr = sd_it;
890
891 return dep;
892 }
893
894 return NULL;
895}
896
897/* Find a dependency between producer PRO and consumer CON.
898 Use dependency [if available] to check if dependency is present at all.
899 Search through resolved dependency lists if RESOLVED_P is true.
900 If the dependency or NULL if none found. */
901dep_t
902sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
903{
904 if (true_dependency_cache != NULL)
905 /* Avoiding the list walk below can cut compile times dramatically
906 for some code. */
907 {
908 int elem_luid = INSN_LUID (pro);
909 int insn_luid = INSN_LUID (con);
910
e2f6ff94
MK
911 if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
912 && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
e2724e63
BS
913 && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)
914 && !bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
e2f6ff94
MK
915 return NULL;
916 }
917
918 return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
919}
920
921/* Add or update a dependence described by DEP.
922 MEM1 and MEM2, if non-null, correspond to memory locations in case of
923 data speculation.
924
925 The function returns a value indicating if an old entry has been changed
926 or a new entry has been added to insn's backward deps.
927
928 This function merely checks if producer and consumer is the same insn
929 and doesn't create a dep in this case. Actual manipulation of
930 dependence data structures is performed in add_or_update_dep_1. */
ddbd5439 931static enum DEPS_ADJUST_RESULT
e2f6ff94 932maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
16f6ece6 933{
974c43e9
DM
934 rtx_insn *elem = DEP_PRO (dep);
935 rtx_insn *insn = DEP_CON (dep);
e2f6ff94 936
ddbd5439 937 gcc_assert (INSN_P (insn) && INSN_P (elem));
16f6ece6
BS
938
939 /* Don't depend an insn on itself. */
940 if (insn == elem)
ddbd5439 941 {
e855c69d 942 if (sched_deps_info->generate_spec_deps)
ddbd5439
MK
943 /* INSN has an internal dependence, which we can't overcome. */
944 HAS_INTERNAL_DEP (insn) = 1;
e2f6ff94
MK
945
946 return DEP_NODEP;
ddbd5439 947 }
16f6ece6 948
e2f6ff94 949 return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
ddbd5439 950}
16f6ece6 951
e2f6ff94
MK
952/* Ask dependency caches what needs to be done for dependence DEP.
953 Return DEP_CREATED if new dependence should be created and there is no
954 need to try to find one searching the dependencies lists.
955 Return DEP_PRESENT if there already is a dependence described by DEP and
956 hence nothing is to be done.
957 Return DEP_CHANGED if there already is a dependence, but it should be
958 updated to incorporate additional information from DEP. */
ddbd5439 959static enum DEPS_ADJUST_RESULT
e2f6ff94 960ask_dependency_caches (dep_t dep)
ddbd5439 961{
e2f6ff94
MK
962 int elem_luid = INSN_LUID (DEP_PRO (dep));
963 int insn_luid = INSN_LUID (DEP_CON (dep));
ddbd5439 964
e2f6ff94
MK
965 gcc_assert (true_dependency_cache != NULL
966 && output_dependency_cache != NULL
e2724e63
BS
967 && anti_dependency_cache != NULL
968 && control_dependency_cache != NULL);
ddbd5439 969
e2f6ff94 970 if (!(current_sched_info->flags & USE_DEPS_LIST))
b8698a0f 971 {
e2f6ff94
MK
972 enum reg_note present_dep_type;
973
974 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
975 present_dep_type = REG_DEP_TRUE;
976 else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
977 present_dep_type = REG_DEP_OUTPUT;
978 else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
979 present_dep_type = REG_DEP_ANTI;
e2724e63
BS
980 else if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
981 present_dep_type = REG_DEP_CONTROL;
e2f6ff94
MK
982 else
983 /* There is no existing dep so it should be created. */
984 return DEP_CREATED;
985
986 if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
987 /* DEP does not add anything to the existing dependence. */
988 return DEP_PRESENT;
989 }
990 else
b8698a0f 991 {
e2f6ff94 992 ds_t present_dep_types = 0;
b8698a0f 993
e2f6ff94
MK
994 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
995 present_dep_types |= DEP_TRUE;
996 if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
997 present_dep_types |= DEP_OUTPUT;
998 if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
999 present_dep_types |= DEP_ANTI;
e2724e63
BS
1000 if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
1001 present_dep_types |= DEP_CONTROL;
e2f6ff94
MK
1002
1003 if (present_dep_types == 0)
1004 /* There is no existing dep so it should be created. */
1005 return DEP_CREATED;
1006
1007 if (!(current_sched_info->flags & DO_SPECULATION)
1008 || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
1009 {
1010 if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
1011 == present_dep_types)
1012 /* DEP does not add anything to the existing dependence. */
1013 return DEP_PRESENT;
1014 }
1015 else
1016 {
1017 /* Only true dependencies can be data speculative and
1018 only anti dependencies can be control speculative. */
1019 gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
1020 == present_dep_types);
1021
1022 /* if (DEP is SPECULATIVE) then
1023 ..we should update DEP_STATUS
1024 else
1025 ..we should reset existing dep to non-speculative. */
1026 }
1027 }
1028
1029 return DEP_CHANGED;
1030}
1031
1032/* Set dependency caches according to DEP. */
1033static void
1034set_dependency_caches (dep_t dep)
1035{
1036 int elem_luid = INSN_LUID (DEP_PRO (dep));
1037 int insn_luid = INSN_LUID (DEP_CON (dep));
1038
1039 if (!(current_sched_info->flags & USE_DEPS_LIST))
1040 {
1041 switch (DEP_TYPE (dep))
1042 {
1043 case REG_DEP_TRUE:
1044 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1045 break;
1046
1047 case REG_DEP_OUTPUT:
1048 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1049 break;
1050
1051 case REG_DEP_ANTI:
1052 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1053 break;
1054
e2724e63
BS
1055 case REG_DEP_CONTROL:
1056 bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1057 break;
1058
e2f6ff94
MK
1059 default:
1060 gcc_unreachable ();
1061 }
1062 }
1063 else
1064 {
1065 ds_t ds = DEP_STATUS (dep);
1066
1067 if (ds & DEP_TRUE)
1068 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1069 if (ds & DEP_OUTPUT)
1070 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1071 if (ds & DEP_ANTI)
1072 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
e2724e63
BS
1073 if (ds & DEP_CONTROL)
1074 bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
e2f6ff94
MK
1075
1076 if (ds & SPECULATIVE)
1077 {
1078 gcc_assert (current_sched_info->flags & DO_SPECULATION);
1079 bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
1080 }
1081 }
1082}
1083
1084/* Type of dependence DEP have changed from OLD_TYPE. Update dependency
1085 caches accordingly. */
1086static void
1087update_dependency_caches (dep_t dep, enum reg_note old_type)
1088{
1089 int elem_luid = INSN_LUID (DEP_PRO (dep));
1090 int insn_luid = INSN_LUID (DEP_CON (dep));
1091
1092 /* Clear corresponding cache entry because type of the link
1093 may have changed. Keep them if we use_deps_list. */
1094 if (!(current_sched_info->flags & USE_DEPS_LIST))
1095 {
1096 switch (old_type)
1097 {
1098 case REG_DEP_OUTPUT:
1099 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1100 break;
1101
1102 case REG_DEP_ANTI:
1103 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1104 break;
1105
e2724e63
BS
1106 case REG_DEP_CONTROL:
1107 bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1108 break;
1109
e2f6ff94 1110 default:
b8698a0f 1111 gcc_unreachable ();
e2f6ff94
MK
1112 }
1113 }
1114
1115 set_dependency_caches (dep);
1116}
1117
1118/* Convert a dependence pointed to by SD_IT to be non-speculative. */
1119static void
1120change_spec_dep_to_hard (sd_iterator_def sd_it)
1121{
1122 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1123 dep_link_t link = DEP_NODE_BACK (node);
1124 dep_t dep = DEP_NODE_DEP (node);
974c43e9
DM
1125 rtx_insn *elem = DEP_PRO (dep);
1126 rtx_insn *insn = DEP_CON (dep);
e2f6ff94
MK
1127
1128 move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1129
1130 DEP_STATUS (dep) &= ~SPECULATIVE;
16f6ece6 1131
16f6ece6 1132 if (true_dependency_cache != NULL)
e2f6ff94
MK
1133 /* Clear the cache entry. */
1134 bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1135 INSN_LUID (elem));
1136}
e2f6ff94
MK
1137
1138/* Update DEP to incorporate information from NEW_DEP.
1139 SD_IT points to DEP in case it should be moved to another list.
1140 MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1141 data-speculative dependence should be updated. */
1142static enum DEPS_ADJUST_RESULT
1143update_dep (dep_t dep, dep_t new_dep,
a34ef915
MK
1144 sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1145 rtx mem1 ATTRIBUTE_UNUSED,
1146 rtx mem2 ATTRIBUTE_UNUSED)
e2f6ff94
MK
1147{
1148 enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1149 enum reg_note old_type = DEP_TYPE (dep);
b953c2b8 1150 bool was_spec = dep_spec_p (dep);
e2f6ff94 1151
1a83e602
BS
1152 DEP_NONREG (dep) |= DEP_NONREG (new_dep);
1153 DEP_MULTIPLE (dep) = 1;
1154
e2f6ff94
MK
1155 /* If this is a more restrictive type of dependence than the
1156 existing one, then change the existing dependence to this
1157 type. */
1158 if ((int) DEP_TYPE (new_dep) < (int) old_type)
16f6ece6 1159 {
e2f6ff94
MK
1160 DEP_TYPE (dep) = DEP_TYPE (new_dep);
1161 res = DEP_CHANGED;
1162 }
1163
e2f6ff94
MK
1164 if (current_sched_info->flags & USE_DEPS_LIST)
1165 /* Update DEP_STATUS. */
1166 {
1167 ds_t dep_status = DEP_STATUS (dep);
1168 ds_t ds = DEP_STATUS (new_dep);
1169 ds_t new_status = ds | dep_status;
1170
1171 if (new_status & SPECULATIVE)
e2f6ff94 1172 {
b953c2b8
BS
1173 /* Either existing dep or a dep we're adding or both are
1174 speculative. */
e2f6ff94
MK
1175 if (!(ds & SPECULATIVE)
1176 || !(dep_status & SPECULATIVE))
1177 /* The new dep can't be speculative. */
b953c2b8 1178 new_status &= ~SPECULATIVE;
e2f6ff94 1179 else
ddbd5439 1180 {
e2f6ff94
MK
1181 /* Both are speculative. Merge probabilities. */
1182 if (mem1 != NULL)
ddbd5439 1183 {
e2f6ff94
MK
1184 dw_t dw;
1185
1186 dw = estimate_dep_weak (mem1, mem2);
1187 ds = set_dep_weak (ds, BEGIN_DATA, dw);
ddbd5439 1188 }
b8698a0f 1189
e2f6ff94 1190 new_status = ds_merge (dep_status, ds);
ddbd5439 1191 }
e2f6ff94
MK
1192 }
1193
1194 ds = new_status;
1195
1196 if (dep_status != ds)
1197 {
1198 DEP_STATUS (dep) = ds;
1199 res = DEP_CHANGED;
1200 }
1201 }
1202
b953c2b8
BS
1203 if (was_spec && !dep_spec_p (dep))
1204 /* The old dep was speculative, but now it isn't. */
1205 change_spec_dep_to_hard (sd_it);
1206
e2f6ff94
MK
1207 if (true_dependency_cache != NULL
1208 && res == DEP_CHANGED)
1209 update_dependency_caches (dep, old_type);
e2f6ff94
MK
1210
1211 return res;
1212}
1213
1214/* Add or update a dependence described by DEP.
1215 MEM1 and MEM2, if non-null, correspond to memory locations in case of
1216 data speculation.
1217
1218 The function returns a value indicating if an old entry has been changed
1219 or a new entry has been added to insn's backward deps or nothing has
1220 been updated at all. */
1221static enum DEPS_ADJUST_RESULT
1222add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1223 rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1224{
1225 bool maybe_present_p = true;
1226 bool present_p = false;
1227
1228 gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1229 && DEP_PRO (new_dep) != DEP_CON (new_dep));
b8698a0f 1230
e2f6ff94
MK
1231#ifdef ENABLE_CHECKING
1232 check_dep (new_dep, mem1 != NULL);
1233#endif
1234
1235 if (true_dependency_cache != NULL)
1236 {
1237 switch (ask_dependency_caches (new_dep))
1238 {
1239 case DEP_PRESENT:
a0107bd0
JJ
1240 dep_t present_dep;
1241 sd_iterator_def sd_it;
1242
1243 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1244 DEP_CON (new_dep),
1245 resolved_p, &sd_it);
1246 DEP_MULTIPLE (present_dep) = 1;
e2f6ff94
MK
1247 return DEP_PRESENT;
1248
1249 case DEP_CHANGED:
1250 maybe_present_p = true;
1251 present_p = true;
1252 break;
1253
1254 case DEP_CREATED:
1255 maybe_present_p = false;
1256 present_p = false;
1257 break;
1258
1259 default:
1260 gcc_unreachable ();
1261 break;
1262 }
16f6ece6 1263 }
16f6ece6
BS
1264
1265 /* Check that we don't already have this dependence. */
ddbd5439
MK
1266 if (maybe_present_p)
1267 {
e2f6ff94
MK
1268 dep_t present_dep;
1269 sd_iterator_def sd_it;
ddbd5439 1270
e2f6ff94 1271 gcc_assert (true_dependency_cache == NULL || present_p);
46c5ad27 1272
e2f6ff94
MK
1273 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1274 DEP_CON (new_dep),
1275 resolved_p, &sd_it);
786de7eb 1276
e2f6ff94
MK
1277 if (present_dep != NULL)
1278 /* We found an existing dependency between ELEM and INSN. */
1279 return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1280 else
1281 /* We didn't find a dep, it shouldn't present in the cache. */
1282 gcc_assert (!present_p);
ddbd5439 1283 }
16f6ece6 1284
ddbd5439 1285 /* Might want to check one level of transitivity to save conses.
e2f6ff94
MK
1286 This check should be done in maybe_add_or_update_dep_1.
1287 Since we made it to add_or_update_dep_1, we must create
ddbd5439 1288 (or update) a link. */
16f6ece6 1289
e2f6ff94 1290 if (mem1 != NULL_RTX)
ddbd5439 1291 {
e855c69d 1292 gcc_assert (sched_deps_info->generate_spec_deps);
e2f6ff94
MK
1293 DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1294 estimate_dep_weak (mem1, mem2));
ddbd5439 1295 }
e2f6ff94
MK
1296
1297 sd_add_dep (new_dep, resolved_p);
b8698a0f 1298
ddbd5439
MK
1299 return DEP_CREATED;
1300}
16f6ece6 1301
e2f6ff94
MK
1302/* Initialize BACK_LIST_PTR with consumer's backward list and
1303 FORW_LIST_PTR with producer's forward list. If RESOLVED_P is true
1304 initialize with lists that hold resolved deps. */
ddbd5439 1305static void
e2f6ff94
MK
1306get_back_and_forw_lists (dep_t dep, bool resolved_p,
1307 deps_list_t *back_list_ptr,
1308 deps_list_t *forw_list_ptr)
ddbd5439 1309{
974c43e9 1310 rtx_insn *con = DEP_CON (dep);
b198261f 1311
e2f6ff94
MK
1312 if (!resolved_p)
1313 {
b953c2b8 1314 if (dep_spec_p (dep))
e2f6ff94
MK
1315 *back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1316 else
1317 *back_list_ptr = INSN_HARD_BACK_DEPS (con);
ddbd5439 1318
e2f6ff94
MK
1319 *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1320 }
ddbd5439 1321 else
e2f6ff94
MK
1322 {
1323 *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1324 *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1325 }
1326}
1327
1328/* Add dependence described by DEP.
1329 If RESOLVED_P is true treat the dependence as a resolved one. */
1330void
1331sd_add_dep (dep_t dep, bool resolved_p)
1332{
1333 dep_node_t n = create_dep_node ();
1334 deps_list_t con_back_deps;
1335 deps_list_t pro_forw_deps;
974c43e9
DM
1336 rtx_insn *elem = DEP_PRO (dep);
1337 rtx_insn *insn = DEP_CON (dep);
e2f6ff94
MK
1338
1339 gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1340
b953c2b8
BS
1341 if ((current_sched_info->flags & DO_SPECULATION) == 0
1342 || !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
e2f6ff94
MK
1343 DEP_STATUS (dep) &= ~SPECULATIVE;
1344
1345 copy_dep (DEP_NODE_DEP (n), dep);
1346
1347 get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
b198261f 1348
e2f6ff94 1349 add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
b198261f 1350
ddbd5439 1351#ifdef ENABLE_CHECKING
e2f6ff94 1352 check_dep (dep, false);
ddbd5439
MK
1353#endif
1354
e2f6ff94
MK
1355 add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1356
16f6ece6 1357 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
2d76cb1a 1358 in the bitmap caches of dependency information. */
16f6ece6 1359 if (true_dependency_cache != NULL)
e2f6ff94 1360 set_dependency_caches (dep);
e2f6ff94
MK
1361}
1362
1363/* Add or update backward dependence between INSN and ELEM
1364 with given type DEP_TYPE and dep_status DS.
1365 This function is a convenience wrapper. */
1366enum DEPS_ADJUST_RESULT
1367sd_add_or_update_dep (dep_t dep, bool resolved_p)
1368{
1369 return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1370}
1371
1372/* Resolved dependence pointed to by SD_IT.
1373 SD_IT will advance to the next element. */
1374void
1375sd_resolve_dep (sd_iterator_def sd_it)
1376{
1377 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1378 dep_t dep = DEP_NODE_DEP (node);
974c43e9
DM
1379 rtx_insn *pro = DEP_PRO (dep);
1380 rtx_insn *con = DEP_CON (dep);
e2f6ff94 1381
b953c2b8 1382 if (dep_spec_p (dep))
e2f6ff94
MK
1383 move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1384 INSN_RESOLVED_BACK_DEPS (con));
1385 else
1386 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1387 INSN_RESOLVED_BACK_DEPS (con));
1388
1389 move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1390 INSN_RESOLVED_FORW_DEPS (pro));
1391}
1392
26965010
BS
1393/* Perform the inverse operation of sd_resolve_dep. Restore the dependence
1394 pointed to by SD_IT to unresolved state. */
1395void
1396sd_unresolve_dep (sd_iterator_def sd_it)
1397{
1398 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1399 dep_t dep = DEP_NODE_DEP (node);
974c43e9
DM
1400 rtx_insn *pro = DEP_PRO (dep);
1401 rtx_insn *con = DEP_CON (dep);
26965010 1402
e2724e63 1403 if (dep_spec_p (dep))
26965010
BS
1404 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1405 INSN_SPEC_BACK_DEPS (con));
1406 else
1407 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1408 INSN_HARD_BACK_DEPS (con));
1409
1410 move_dep_link (DEP_NODE_FORW (node), INSN_RESOLVED_FORW_DEPS (pro),
1411 INSN_FORW_DEPS (pro));
1412}
1413
e2f6ff94
MK
1414/* Make TO depend on all the FROM's producers.
1415 If RESOLVED_P is true add dependencies to the resolved lists. */
1416void
0e0ce50d 1417sd_copy_back_deps (rtx_insn *to, rtx_insn *from, bool resolved_p)
e2f6ff94
MK
1418{
1419 sd_list_types_def list_type;
1420 sd_iterator_def sd_it;
1421 dep_t dep;
1422
1423 list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1424
1425 FOR_EACH_DEP (from, list_type, sd_it, dep)
16f6ece6 1426 {
e2f6ff94
MK
1427 dep_def _new_dep, *new_dep = &_new_dep;
1428
1429 copy_dep (new_dep, dep);
0e0ce50d 1430 DEP_CON (new_dep) = to;
e2f6ff94 1431 sd_add_dep (new_dep, resolved_p);
16f6ece6 1432 }
e2f6ff94
MK
1433}
1434
1435/* Remove a dependency referred to by SD_IT.
1436 SD_IT will point to the next dependence after removal. */
1437void
1438sd_delete_dep (sd_iterator_def sd_it)
1439{
1440 dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1441 dep_t dep = DEP_NODE_DEP (n);
974c43e9
DM
1442 rtx_insn *pro = DEP_PRO (dep);
1443 rtx_insn *con = DEP_CON (dep);
e2f6ff94
MK
1444 deps_list_t con_back_deps;
1445 deps_list_t pro_forw_deps;
1446
1447 if (true_dependency_cache != NULL)
1448 {
1449 int elem_luid = INSN_LUID (pro);
1450 int insn_luid = INSN_LUID (con);
1451
1452 bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1453 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
e2724e63 1454 bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
e2f6ff94
MK
1455 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1456
1457 if (current_sched_info->flags & DO_SPECULATION)
1458 bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1459 }
1460
1461 get_back_and_forw_lists (dep, sd_it.resolved_p,
1462 &con_back_deps, &pro_forw_deps);
1463
1464 remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1465 remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1466
1467 delete_dep_node (n);
1468}
1469
1470/* Dump size of the lists. */
1471#define DUMP_LISTS_SIZE (2)
1472
1473/* Dump dependencies of the lists. */
1474#define DUMP_LISTS_DEPS (4)
1475
1476/* Dump all information about the lists. */
1477#define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1478
1479/* Dump deps_lists of INSN specified by TYPES to DUMP.
1480 FLAGS is a bit mask specifying what information about the lists needs
1481 to be printed.
1482 If FLAGS has the very first bit set, then dump all information about
1483 the lists and propagate this bit into the callee dump functions. */
1484static void
1485dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1486{
1487 sd_iterator_def sd_it;
1488 dep_t dep;
1489 int all;
1490
1491 all = (flags & 1);
1492
1493 if (all)
1494 flags |= DUMP_LISTS_ALL;
1495
1496 fprintf (dump, "[");
1497
1498 if (flags & DUMP_LISTS_SIZE)
1499 fprintf (dump, "%d; ", sd_lists_size (insn, types));
1500
1501 if (flags & DUMP_LISTS_DEPS)
1502 {
1503 FOR_EACH_DEP (insn, types, sd_it, dep)
1504 {
1505 dump_dep (dump, dep, dump_dep_flags | all);
1506 fprintf (dump, " ");
1507 }
1508 }
1509}
1510
1511/* Dump all information about deps_lists of INSN specified by TYPES
1512 to STDERR. */
1513void
1514sd_debug_lists (rtx insn, sd_list_types_def types)
1515{
1516 dump_lists (stderr, insn, types, 1);
1517 fprintf (stderr, "\n");
16f6ece6
BS
1518}
1519
e2724e63
BS
1520/* A wrapper around add_dependence_1, to add a dependence of CON on
1521 PRO, with type DEP_TYPE. This function implements special handling
1522 for REG_DEP_CONTROL dependencies. For these, we optionally promote
1523 the type to REG_DEP_ANTI if we can determine that predication is
1524 impossible; otherwise we add additional true dependencies on the
1525 INSN_COND_DEPS list of the jump (which PRO must be). */
1526void
ce1ce33a 1527add_dependence (rtx_insn *con, rtx_insn *pro, enum reg_note dep_type)
e2724e63 1528{
f561ef0f
BS
1529 if (dep_type == REG_DEP_CONTROL
1530 && !(current_sched_info->flags & DO_PREDICATION))
1531 dep_type = REG_DEP_ANTI;
1532
e2724e63
BS
1533 /* A REG_DEP_CONTROL dependence may be eliminated through predication,
1534 so we must also make the insn dependent on the setter of the
1535 condition. */
1536 if (dep_type == REG_DEP_CONTROL)
1537 {
f57aa6b0
DM
1538 rtx_insn *real_pro = pro;
1539 rtx_insn *other = real_insn_for_shadow (real_pro);
e2724e63
BS
1540 rtx cond;
1541
1542 if (other != NULL_RTX)
1543 real_pro = other;
1544 cond = sched_get_reverse_condition_uncached (real_pro);
1545 /* Verify that the insn does not use a different value in
1546 the condition register than the one that was present at
1547 the jump. */
1548 if (cond == NULL_RTX)
1549 dep_type = REG_DEP_ANTI;
1550 else if (INSN_CACHED_COND (real_pro) == const_true_rtx)
1551 {
1552 HARD_REG_SET uses;
1553 CLEAR_HARD_REG_SET (uses);
1554 note_uses (&PATTERN (con), record_hard_reg_uses, &uses);
1555 if (TEST_HARD_REG_BIT (uses, REGNO (XEXP (cond, 0))))
1556 dep_type = REG_DEP_ANTI;
1557 }
1558 if (dep_type == REG_DEP_CONTROL)
1559 {
1560 if (sched_verbose >= 5)
1561 fprintf (sched_dump, "making DEP_CONTROL for %d\n",
1562 INSN_UID (real_pro));
1563 add_dependence_list (con, INSN_COND_DEPS (real_pro), 0,
1a83e602 1564 REG_DEP_TRUE, false);
e2724e63
BS
1565 }
1566 }
1567
1568 add_dependence_1 (con, pro, dep_type);
1569}
1570
1a83e602
BS
1571/* A convenience wrapper to operate on an entire list. HARD should be
1572 true if DEP_NONREG should be set on newly created dependencies. */
37a0f8a5
RH
1573
1574static void
3dc99c19
DM
1575add_dependence_list (rtx_insn *insn, rtx_insn_list *list, int uncond,
1576 enum reg_note dep_type, bool hard)
37a0f8a5 1577{
1a83e602 1578 mark_as_hard = hard;
3dc99c19 1579 for (; list; list = list->next ())
2bd1e239 1580 {
3dc99c19
DM
1581 if (uncond || ! sched_insns_conditions_mutex_p (insn, list->insn ()))
1582 add_dependence (insn, list->insn (), dep_type);
2bd1e239 1583 }
1a83e602 1584 mark_as_hard = false;
37a0f8a5
RH
1585}
1586
b8698a0f 1587/* Similar, but free *LISTP at the same time, when the context
1a83e602
BS
1588 is not readonly. HARD should be true if DEP_NONREG should be set on
1589 newly created dependencies. */
37a0f8a5
RH
1590
1591static void
3dc99c19
DM
1592add_dependence_list_and_free (struct deps_desc *deps, rtx_insn *insn,
1593 rtx_insn_list **listp,
1a83e602 1594 int uncond, enum reg_note dep_type, bool hard)
37a0f8a5 1595{
1a83e602 1596 add_dependence_list (insn, *listp, uncond, dep_type, hard);
e855c69d 1597
39e7207a
AO
1598 /* We don't want to short-circuit dependencies involving debug
1599 insns, because they may cause actual dependencies to be
1600 disregarded. */
1601 if (deps->readonly || DEBUG_INSN_P (insn))
8febe3fa 1602 return;
e855c69d 1603
8febe3fa 1604 free_INSN_LIST_list (listp);
37a0f8a5
RH
1605}
1606
073a8998
JM
1607/* Remove all occurrences of INSN from LIST. Return the number of
1608 occurrences removed. */
e855c69d
AB
1609
1610static int
90831096 1611remove_from_dependence_list (rtx_insn *insn, rtx_insn_list **listp)
e855c69d
AB
1612{
1613 int removed = 0;
b8698a0f 1614
e855c69d
AB
1615 while (*listp)
1616 {
3dc99c19 1617 if ((*listp)->insn () == insn)
e855c69d
AB
1618 {
1619 remove_free_INSN_LIST_node (listp);
1620 removed++;
1621 continue;
1622 }
b8698a0f 1623
3dc99c19 1624 listp = (rtx_insn_list **)&XEXP (*listp, 1);
e855c69d 1625 }
b8698a0f 1626
e855c69d
AB
1627 return removed;
1628}
1629
1630/* Same as above, but process two lists at once. */
b8698a0f 1631static int
90831096 1632remove_from_both_dependence_lists (rtx_insn *insn,
3dc99c19 1633 rtx_insn_list **listp,
2f33ff0a 1634 rtx_expr_list **exprp)
e855c69d
AB
1635{
1636 int removed = 0;
b8698a0f 1637
e855c69d
AB
1638 while (*listp)
1639 {
1640 if (XEXP (*listp, 0) == insn)
1641 {
1642 remove_free_INSN_LIST_node (listp);
1643 remove_free_EXPR_LIST_node (exprp);
1644 removed++;
1645 continue;
1646 }
b8698a0f 1647
3dc99c19 1648 listp = (rtx_insn_list **)&XEXP (*listp, 1);
2f33ff0a 1649 exprp = (rtx_expr_list **)&XEXP (*exprp, 1);
e855c69d 1650 }
b8698a0f 1651
e855c69d
AB
1652 return removed;
1653}
1654
4c2b2906 1655/* Clear all dependencies for an insn. */
16f6ece6 1656static void
90831096 1657delete_all_dependences (rtx_insn *insn)
16f6ece6 1658{
e2f6ff94
MK
1659 sd_iterator_def sd_it;
1660 dep_t dep;
16f6ece6 1661
e2f6ff94
MK
1662 /* The below cycle can be optimized to clear the caches and back_deps
1663 in one call but that would provoke duplication of code from
1664 delete_dep (). */
16f6ece6 1665
e2f6ff94
MK
1666 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1667 sd_iterator_cond (&sd_it, &dep);)
1668 sd_delete_dep (sd_it);
4c2b2906
EC
1669}
1670
1671/* All insns in a scheduling group except the first should only have
1672 dependencies on the previous insn in the group. So we find the
1673 first instruction in the scheduling group by walking the dependence
1674 chains backwards. Then we add the dependencies for the group to
1675 the previous nonnote insn. */
1676
1677static void
ce1ce33a 1678chain_to_prev_insn (rtx_insn *insn)
4c2b2906 1679{
e2f6ff94
MK
1680 sd_iterator_def sd_it;
1681 dep_t dep;
ce1ce33a 1682 rtx_insn *prev_nonnote;
4c2b2906 1683
e2f6ff94 1684 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4c2b2906 1685 {
ce1ce33a 1686 rtx_insn *i = insn;
974c43e9 1687 rtx_insn *pro = DEP_PRO (dep);
b198261f 1688
4c2b2906
EC
1689 do
1690 {
1691 i = prev_nonnote_insn (i);
1692
b198261f 1693 if (pro == i)
4c2b2906 1694 goto next_link;
b5b8b0ac 1695 } while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
b198261f
MK
1696
1697 if (! sched_insns_conditions_mutex_p (i, pro))
e2f6ff94 1698 add_dependence (i, pro, DEP_TYPE (dep));
4c2b2906
EC
1699 next_link:;
1700 }
1701
1702 delete_all_dependences (insn);
1703
f0fc0803 1704 prev_nonnote = prev_nonnote_nondebug_insn (insn);
2bd1e239
SB
1705 if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1706 && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1707 add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
16f6ece6
BS
1708}
1709\f
1710/* Process an insn's memory dependencies. There are four kinds of
1711 dependencies:
1712
1713 (0) read dependence: read follows read
1714 (1) true dependence: read follows write
ddbd5439
MK
1715 (2) output dependence: write follows write
1716 (3) anti dependence: write follows read
16f6ece6
BS
1717
1718 We are careful to build only dependencies which actually exist, and
1719 use transitivity to avoid building too many links. */
1720
1721/* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1722 The MEM is a memory reference contained within INSN, which we are saving
1723 so that we can do memory aliasing on it. */
1724
c66a4368 1725static void
88302d54 1726add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
12ea1b95 1727 rtx_insn *insn, rtx mem)
16f6ece6 1728{
3dc99c19
DM
1729 rtx_insn_list **insn_list;
1730 rtx_insn_list *insn_node;
2f33ff0a
DM
1731 rtx_expr_list **mem_list;
1732 rtx_expr_list *mem_node;
16f6ece6 1733
e855c69d 1734 gcc_assert (!deps->readonly);
bdbf40a5
MK
1735 if (read_p)
1736 {
1737 insn_list = &deps->pending_read_insns;
1738 mem_list = &deps->pending_read_mems;
f49b295a
AO
1739 if (!DEBUG_INSN_P (insn))
1740 deps->pending_read_list_length++;
bdbf40a5
MK
1741 }
1742 else
1743 {
1744 insn_list = &deps->pending_write_insns;
1745 mem_list = &deps->pending_write_mems;
1746 deps->pending_write_list_length++;
1747 }
1748
3dc99c19
DM
1749 insn_node = alloc_INSN_LIST (insn, *insn_list);
1750 *insn_list = insn_node;
16f6ece6 1751
e855c69d 1752 if (sched_deps_info->use_cselib)
4b6c5340
BS
1753 {
1754 mem = shallow_copy_rtx (mem);
9a76e83d
JJ
1755 XEXP (mem, 0) = cselib_subst_to_values_from_insn (XEXP (mem, 0),
1756 GET_MODE (mem), insn);
4b6c5340 1757 }
3dc99c19
DM
1758 mem_node = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1759 *mem_list = mem_node;
16f6ece6
BS
1760}
1761
1762/* Make a dependency between every memory reference on the pending lists
37a0f8a5
RH
1763 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
1764 dependencies for a read operation, similarly with FOR_WRITE. */
16f6ece6
BS
1765
1766static void
ce1ce33a 1767flush_pending_lists (struct deps_desc *deps, rtx_insn *insn, int for_read,
46c5ad27 1768 int for_write)
16f6ece6 1769{
37a0f8a5 1770 if (for_write)
16f6ece6 1771 {
b8698a0f 1772 add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1a83e602 1773 1, REG_DEP_ANTI, true);
e855c69d
AB
1774 if (!deps->readonly)
1775 {
1776 free_EXPR_LIST_list (&deps->pending_read_mems);
1777 deps->pending_read_list_length = 0;
1778 }
16f6ece6 1779 }
16f6ece6 1780
e855c69d 1781 add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1a83e602
BS
1782 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1783 true);
16f6ece6 1784
b8698a0f 1785 add_dependence_list_and_free (deps, insn,
e855c69d 1786 &deps->last_pending_memory_flush, 1,
1a83e602
BS
1787 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1788 true);
e2724e63
BS
1789
1790 add_dependence_list_and_free (deps, insn, &deps->pending_jump_insns, 1,
1a83e602 1791 REG_DEP_ANTI, true);
e2724e63 1792
8febe3fa
MK
1793 if (DEBUG_INSN_P (insn))
1794 {
1795 if (for_write)
1796 free_INSN_LIST_list (&deps->pending_read_insns);
1797 free_INSN_LIST_list (&deps->pending_write_insns);
1798 free_INSN_LIST_list (&deps->last_pending_memory_flush);
1799 free_INSN_LIST_list (&deps->pending_jump_insns);
1800 }
1801
e855c69d
AB
1802 if (!deps->readonly)
1803 {
1804 free_EXPR_LIST_list (&deps->pending_write_mems);
1805 deps->pending_write_list_length = 0;
1806
1807 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1808 deps->pending_flush_length = 1;
1809 }
1a83e602 1810 mark_as_hard = false;
e855c69d
AB
1811}
1812\f
1813/* Instruction which dependencies we are analyzing. */
ce1ce33a 1814static rtx_insn *cur_insn = NULL;
e855c69d
AB
1815
1816/* Implement hooks for haifa scheduler. */
1817
1818static void
6144a836 1819haifa_start_insn (rtx_insn *insn)
e855c69d
AB
1820{
1821 gcc_assert (insn && !cur_insn);
1822
6144a836 1823 cur_insn = insn;
e855c69d
AB
1824}
1825
1826static void
1827haifa_finish_insn (void)
1828{
1829 cur_insn = NULL;
1830}
1831
1832void
1833haifa_note_reg_set (int regno)
1834{
1835 SET_REGNO_REG_SET (reg_pending_sets, regno);
1836}
1837
1838void
1839haifa_note_reg_clobber (int regno)
1840{
1841 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1842}
1843
1844void
1845haifa_note_reg_use (int regno)
1846{
1847 SET_REGNO_REG_SET (reg_pending_uses, regno);
1848}
1849
1850static void
6144a836 1851haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx_insn *pending_insn, ds_t ds)
e855c69d
AB
1852{
1853 if (!(ds & SPECULATIVE))
1854 {
1855 mem = NULL_RTX;
1856 pending_mem = NULL_RTX;
1857 }
1858 else
1859 gcc_assert (ds & BEGIN_DATA);
1860
1861 {
1862 dep_def _dep, *dep = &_dep;
b8698a0f 1863
ce18efcb 1864 init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
b953c2b8 1865 current_sched_info->flags & USE_DEPS_LIST ? ds : 0);
1a83e602 1866 DEP_NONREG (dep) = 1;
e855c69d
AB
1867 maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1868 }
1869
1870}
1871
1872static void
6144a836 1873haifa_note_dep (rtx_insn *elem, ds_t ds)
e855c69d
AB
1874{
1875 dep_def _dep;
1876 dep_t dep = &_dep;
1877
1878 init_dep (dep, elem, cur_insn, ds_to_dt (ds));
1a83e602
BS
1879 if (mark_as_hard)
1880 DEP_NONREG (dep) = 1;
e855c69d
AB
1881 maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1882}
1883
1884static void
1885note_reg_use (int r)
1886{
1887 if (sched_deps_info->note_reg_use)
1888 sched_deps_info->note_reg_use (r);
1889}
1890
1891static void
1892note_reg_set (int r)
1893{
1894 if (sched_deps_info->note_reg_set)
1895 sched_deps_info->note_reg_set (r);
1896}
1897
1898static void
1899note_reg_clobber (int r)
1900{
1901 if (sched_deps_info->note_reg_clobber)
1902 sched_deps_info->note_reg_clobber (r);
1903}
1904
1905static void
6144a836 1906note_mem_dep (rtx m1, rtx m2, rtx_insn *e, ds_t ds)
e855c69d
AB
1907{
1908 if (sched_deps_info->note_mem_dep)
1909 sched_deps_info->note_mem_dep (m1, m2, e, ds);
1910}
1911
1912static void
ce1ce33a 1913note_dep (rtx_insn *e, ds_t ds)
e855c69d
AB
1914{
1915 if (sched_deps_info->note_dep)
1916 sched_deps_info->note_dep (e, ds);
1917}
1918
1919/* Return corresponding to DS reg_note. */
1920enum reg_note
1921ds_to_dt (ds_t ds)
1922{
1923 if (ds & DEP_TRUE)
1924 return REG_DEP_TRUE;
1925 else if (ds & DEP_OUTPUT)
1926 return REG_DEP_OUTPUT;
e2724e63
BS
1927 else if (ds & DEP_ANTI)
1928 return REG_DEP_ANTI;
e855c69d
AB
1929 else
1930 {
e2724e63
BS
1931 gcc_assert (ds & DEP_CONTROL);
1932 return REG_DEP_CONTROL;
e855c69d 1933 }
16f6ece6 1934}
ce18efcb
VM
1935
1936\f
1937
1938/* Functions for computation of info needed for register pressure
1939 sensitive insn scheduling. */
1940
1941
1942/* Allocate and return reg_use_data structure for REGNO and INSN. */
1943static struct reg_use_data *
f57aa6b0 1944create_insn_reg_use (int regno, rtx_insn *insn)
ce18efcb
VM
1945{
1946 struct reg_use_data *use;
1947
1948 use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1949 use->regno = regno;
1950 use->insn = insn;
1951 use->next_insn_use = INSN_REG_USE_LIST (insn);
1952 INSN_REG_USE_LIST (insn) = use;
1953 return use;
1954}
1955
f20f2613
VM
1956/* Allocate reg_set_data structure for REGNO and INSN. */
1957static void
ce18efcb
VM
1958create_insn_reg_set (int regno, rtx insn)
1959{
1960 struct reg_set_data *set;
1961
1962 set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1963 set->regno = regno;
1964 set->insn = insn;
1965 set->next_insn_set = INSN_REG_SET_LIST (insn);
1966 INSN_REG_SET_LIST (insn) = set;
ce18efcb
VM
1967}
1968
1969/* Set up insn register uses for INSN and dependency context DEPS. */
1970static void
f57aa6b0 1971setup_insn_reg_uses (struct deps_desc *deps, rtx_insn *insn)
ce18efcb
VM
1972{
1973 unsigned i;
1974 reg_set_iterator rsi;
ce18efcb
VM
1975 struct reg_use_data *use, *use2, *next;
1976 struct deps_reg *reg_last;
1977
1978 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1979 {
1980 if (i < FIRST_PSEUDO_REGISTER
1981 && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1982 continue;
1983
1984 if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1985 && ! REGNO_REG_SET_P (reg_pending_sets, i)
1986 && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
1987 /* Ignore use which is not dying. */
1988 continue;
1989
1990 use = create_insn_reg_use (i, insn);
1991 use->next_regno_use = use;
1992 reg_last = &deps->reg_last[i];
b8698a0f 1993
ce18efcb 1994 /* Create the cycle list of uses. */
c838c402 1995 for (rtx_insn_list *list = reg_last->uses; list; list = list->next ())
ce18efcb 1996 {
c838c402 1997 use2 = create_insn_reg_use (i, list->insn ());
ce18efcb
VM
1998 next = use->next_regno_use;
1999 use->next_regno_use = use2;
2000 use2->next_regno_use = next;
2001 }
2002 }
2003}
2004
2005/* Register pressure info for the currently processed insn. */
2006static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
2007
2008/* Return TRUE if INSN has the use structure for REGNO. */
2009static bool
2010insn_use_p (rtx insn, int regno)
2011{
2012 struct reg_use_data *use;
2013
2014 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
2015 if (use->regno == regno)
2016 return true;
2017 return false;
2018}
2019
2020/* Update the register pressure info after birth of pseudo register REGNO
2021 in INSN. Arguments CLOBBER_P and UNUSED_P say correspondingly that
2022 the register is in clobber or unused after the insn. */
2023static void
2024mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
2025{
2026 int incr, new_incr;
2027 enum reg_class cl;
2028
2029 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
1756cb66 2030 cl = sched_regno_pressure_class[regno];
ce18efcb
VM
2031 if (cl != NO_REGS)
2032 {
1756cb66 2033 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
ce18efcb
VM
2034 if (clobber_p)
2035 {
2036 new_incr = reg_pressure_info[cl].clobber_increase + incr;
2037 reg_pressure_info[cl].clobber_increase = new_incr;
2038 }
2039 else if (unused_p)
2040 {
2041 new_incr = reg_pressure_info[cl].unused_set_increase + incr;
2042 reg_pressure_info[cl].unused_set_increase = new_incr;
2043 }
2044 else
2045 {
2046 new_incr = reg_pressure_info[cl].set_increase + incr;
2047 reg_pressure_info[cl].set_increase = new_incr;
2048 if (! insn_use_p (insn, regno))
2049 reg_pressure_info[cl].change += incr;
2050 create_insn_reg_set (regno, insn);
2051 }
2052 gcc_assert (new_incr < (1 << INCREASE_BITS));
2053 }
2054}
2055
2056/* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
2057 hard registers involved in the birth. */
2058static void
2059mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
2060 bool clobber_p, bool unused_p)
2061{
2062 enum reg_class cl;
2063 int new_incr, last = regno + nregs;
b8698a0f 2064
ce18efcb
VM
2065 while (regno < last)
2066 {
2067 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2068 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2069 {
1756cb66 2070 cl = sched_regno_pressure_class[regno];
ce18efcb
VM
2071 if (cl != NO_REGS)
2072 {
2073 if (clobber_p)
2074 {
2075 new_incr = reg_pressure_info[cl].clobber_increase + 1;
2076 reg_pressure_info[cl].clobber_increase = new_incr;
2077 }
2078 else if (unused_p)
2079 {
2080 new_incr = reg_pressure_info[cl].unused_set_increase + 1;
2081 reg_pressure_info[cl].unused_set_increase = new_incr;
2082 }
2083 else
2084 {
2085 new_incr = reg_pressure_info[cl].set_increase + 1;
2086 reg_pressure_info[cl].set_increase = new_incr;
2087 if (! insn_use_p (insn, regno))
2088 reg_pressure_info[cl].change += 1;
2089 create_insn_reg_set (regno, insn);
2090 }
2091 gcc_assert (new_incr < (1 << INCREASE_BITS));
2092 }
2093 }
2094 regno++;
2095 }
2096}
2097
2098/* Update the register pressure info after birth of pseudo or hard
2099 register REG in INSN. Arguments CLOBBER_P and UNUSED_P say
2100 correspondingly that the register is in clobber or unused after the
2101 insn. */
2102static void
2103mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
2104{
2105 int regno;
2106
2107 if (GET_CODE (reg) == SUBREG)
2108 reg = SUBREG_REG (reg);
2109
2110 if (! REG_P (reg))
2111 return;
2112
2113 regno = REGNO (reg);
2114 if (regno < FIRST_PSEUDO_REGISTER)
dc8afb70 2115 mark_insn_hard_regno_birth (insn, regno, REG_NREGS (reg),
ce18efcb
VM
2116 clobber_p, unused_p);
2117 else
2118 mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
2119}
2120
2121/* Update the register pressure info after death of pseudo register
2122 REGNO. */
2123static void
2124mark_pseudo_death (int regno)
2125{
2126 int incr;
2127 enum reg_class cl;
2128
2129 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
1756cb66 2130 cl = sched_regno_pressure_class[regno];
ce18efcb
VM
2131 if (cl != NO_REGS)
2132 {
1756cb66 2133 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
ce18efcb
VM
2134 reg_pressure_info[cl].change -= incr;
2135 }
2136}
2137
2138/* Like mark_pseudo_death except that NREGS saying how many hard
2139 registers involved in the death. */
2140static void
2141mark_hard_regno_death (int regno, int nregs)
2142{
2143 enum reg_class cl;
2144 int last = regno + nregs;
b8698a0f 2145
ce18efcb
VM
2146 while (regno < last)
2147 {
2148 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2149 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2150 {
1756cb66 2151 cl = sched_regno_pressure_class[regno];
ce18efcb
VM
2152 if (cl != NO_REGS)
2153 reg_pressure_info[cl].change -= 1;
2154 }
2155 regno++;
2156 }
2157}
2158
2159/* Update the register pressure info after death of pseudo or hard
2160 register REG. */
2161static void
2162mark_reg_death (rtx reg)
2163{
2164 int regno;
2165
2166 if (GET_CODE (reg) == SUBREG)
2167 reg = SUBREG_REG (reg);
2168
2169 if (! REG_P (reg))
2170 return;
2171
2172 regno = REGNO (reg);
2173 if (regno < FIRST_PSEUDO_REGISTER)
dc8afb70 2174 mark_hard_regno_death (regno, REG_NREGS (reg));
ce18efcb
VM
2175 else
2176 mark_pseudo_death (regno);
2177}
2178
2179/* Process SETTER of REG. DATA is an insn containing the setter. */
2180static void
2181mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
2182{
2183 if (setter != NULL_RTX && GET_CODE (setter) != SET)
2184 return;
2185 mark_insn_reg_birth
2186 ((rtx) data, reg, false,
2187 find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
2188}
2189
2190/* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs. */
2191static void
2192mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
2193{
2194 if (GET_CODE (setter) == CLOBBER)
2195 mark_insn_reg_birth ((rtx) data, reg, true, false);
2196}
2197
2198/* Set up reg pressure info related to INSN. */
19ac7892 2199void
90831096 2200init_insn_reg_pressure_info (rtx_insn *insn)
ce18efcb
VM
2201{
2202 int i, len;
2203 enum reg_class cl;
2204 static struct reg_pressure_data *pressure_info;
2205 rtx link;
2206
7a1aca9c 2207 gcc_assert (sched_pressure != SCHED_PRESSURE_NONE);
ce18efcb
VM
2208
2209 if (! INSN_P (insn))
2210 return;
2211
1756cb66 2212 for (i = 0; i < ira_pressure_classes_num; i++)
ce18efcb 2213 {
1756cb66 2214 cl = ira_pressure_classes[i];
ce18efcb
VM
2215 reg_pressure_info[cl].clobber_increase = 0;
2216 reg_pressure_info[cl].set_increase = 0;
2217 reg_pressure_info[cl].unused_set_increase = 0;
2218 reg_pressure_info[cl].change = 0;
2219 }
b8698a0f 2220
ce18efcb 2221 note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
b8698a0f 2222
ce18efcb 2223 note_stores (PATTERN (insn), mark_insn_reg_store, insn);
b8698a0f 2224
ce18efcb
VM
2225#ifdef AUTO_INC_DEC
2226 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2227 if (REG_NOTE_KIND (link) == REG_INC)
2228 mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
2229#endif
2230
2231 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2232 if (REG_NOTE_KIND (link) == REG_DEAD)
2233 mark_reg_death (XEXP (link, 0));
b8698a0f 2234
1756cb66 2235 len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num;
ce18efcb
VM
2236 pressure_info
2237 = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
7a1aca9c
RS
2238 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
2239 INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_pressure_classes_num
2240 * sizeof (int), 1);
1756cb66 2241 for (i = 0; i < ira_pressure_classes_num; i++)
ce18efcb 2242 {
1756cb66 2243 cl = ira_pressure_classes[i];
ce18efcb
VM
2244 pressure_info[i].clobber_increase
2245 = reg_pressure_info[cl].clobber_increase;
2246 pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2247 pressure_info[i].unused_set_increase
2248 = reg_pressure_info[cl].unused_set_increase;
2249 pressure_info[i].change = reg_pressure_info[cl].change;
2250 }
2251}
2252
2253
16f6ece6 2254\f
e855c69d
AB
2255
2256/* Internal variable for sched_analyze_[12] () functions.
2257 If it is nonzero, this means that sched_analyze_[12] looks
2258 at the most toplevel SET. */
2259static bool can_start_lhs_rhs_p;
2260
b8698a0f 2261/* Extend reg info for the deps context DEPS given that
e855c69d
AB
2262 we have just generated a register numbered REGNO. */
2263static void
88302d54 2264extend_deps_reg_info (struct deps_desc *deps, int regno)
e855c69d
AB
2265{
2266 int max_regno = regno + 1;
2267
2268 gcc_assert (!reload_completed);
2269
2270 /* In a readonly context, it would not hurt to extend info,
2271 but it should not be needed. */
2272 if (reload_completed && deps->readonly)
2273 {
2274 deps->max_reg = max_regno;
2275 return;
2276 }
2277
2278 if (max_regno > deps->max_reg)
2279 {
b8698a0f 2280 deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
e855c69d
AB
2281 max_regno);
2282 memset (&deps->reg_last[deps->max_reg],
b8698a0f 2283 0, (max_regno - deps->max_reg)
e855c69d
AB
2284 * sizeof (struct deps_reg));
2285 deps->max_reg = max_regno;
2286 }
2287}
2288
2289/* Extends REG_INFO_P if needed. */
2290void
2291maybe_extend_reg_info_p (void)
2292{
2293 /* Extend REG_INFO_P, if needed. */
2294 if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2295 {
2296 size_t new_reg_info_p_size = max_regno + 128;
2297
2298 gcc_assert (!reload_completed && sel_sched_p ());
2299
2300 reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2301 new_reg_info_p_size,
2302 reg_info_p_size,
2303 sizeof (*reg_info_p));
2304 reg_info_p_size = new_reg_info_p_size;
2305 }
2306}
2307
2e9182fd
EB
2308/* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2309 The type of the reference is specified by REF and can be SET,
2310 CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
2311
2312static void
ef4bddc2 2313sched_analyze_reg (struct deps_desc *deps, int regno, machine_mode mode,
ce1ce33a 2314 enum rtx_code ref, rtx_insn *insn)
2e9182fd 2315{
e855c69d
AB
2316 /* We could emit new pseudos in renaming. Extend the reg structures. */
2317 if (!reload_completed && sel_sched_p ()
2318 && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2319 extend_deps_reg_info (deps, regno);
2320
2321 maybe_extend_reg_info_p ();
2322
2e9182fd
EB
2323 /* A hard reg in a wide mode may really be multiple registers.
2324 If so, mark all of them just like the first. */
2325 if (regno < FIRST_PSEUDO_REGISTER)
2326 {
2327 int i = hard_regno_nregs[regno][mode];
2328 if (ref == SET)
2329 {
2330 while (--i >= 0)
e855c69d 2331 note_reg_set (regno + i);
2e9182fd
EB
2332 }
2333 else if (ref == USE)
2334 {
2335 while (--i >= 0)
e855c69d 2336 note_reg_use (regno + i);
2e9182fd
EB
2337 }
2338 else
2339 {
2340 while (--i >= 0)
e855c69d 2341 note_reg_clobber (regno + i);
2e9182fd
EB
2342 }
2343 }
2344
2345 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2346 it does not reload. Ignore these as they have served their
2347 purpose already. */
2348 else if (regno >= deps->max_reg)
2349 {
2350 enum rtx_code code = GET_CODE (PATTERN (insn));
2351 gcc_assert (code == USE || code == CLOBBER);
2352 }
2353
2354 else
2355 {
2356 if (ref == SET)
e855c69d 2357 note_reg_set (regno);
2e9182fd 2358 else if (ref == USE)
e855c69d 2359 note_reg_use (regno);
2e9182fd 2360 else
e855c69d 2361 note_reg_clobber (regno);
2e9182fd
EB
2362
2363 /* Pseudos that are REG_EQUIV to something may be replaced
2364 by that during reloading. We need only add dependencies for
2365 the address in the REG_EQUIV note. */
2366 if (!reload_completed && get_reg_known_equiv_p (regno))
2367 {
2368 rtx t = get_reg_known_value (regno);
2369 if (MEM_P (t))
2370 sched_analyze_2 (deps, XEXP (t, 0), insn);
2371 }
2372
2373 /* Don't let it cross a call after scheduling if it doesn't
2374 already cross one. */
2375 if (REG_N_CALLS_CROSSED (regno) == 0)
2376 {
b5b8b0ac 2377 if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
2e9182fd
EB
2378 deps->sched_before_next_call
2379 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2380 else
2381 add_dependence_list (insn, deps->last_function_call, 1,
1a83e602 2382 REG_DEP_ANTI, false);
2e9182fd
EB
2383 }
2384 }
2385}
2386
16f6ece6
BS
2387/* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2388 rtx, X, creating all dependencies generated by the write to the
2389 destination of X, and reads of everything mentioned. */
2390
2391static void
ce1ce33a 2392sched_analyze_1 (struct deps_desc *deps, rtx x, rtx_insn *insn)
16f6ece6 2393{
b3694847 2394 rtx dest = XEXP (x, 0);
16f6ece6 2395 enum rtx_code code = GET_CODE (x);
e855c69d 2396 bool cslr_p = can_start_lhs_rhs_p;
16f6ece6 2397
e855c69d
AB
2398 can_start_lhs_rhs_p = false;
2399
2400 gcc_assert (dest);
16f6ece6
BS
2401 if (dest == 0)
2402 return;
2403
e855c69d
AB
2404 if (cslr_p && sched_deps_info->start_lhs)
2405 sched_deps_info->start_lhs (dest);
2406
7193d1dc 2407 if (GET_CODE (dest) == PARALLEL)
16f6ece6 2408 {
b3694847 2409 int i;
90d036a0 2410
16f6ece6 2411 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
7193d1dc
RK
2412 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2413 sched_analyze_1 (deps,
2414 gen_rtx_CLOBBER (VOIDmode,
2415 XEXP (XVECEXP (dest, 0, i), 0)),
2416 insn);
90d036a0 2417
e855c69d
AB
2418 if (cslr_p && sched_deps_info->finish_lhs)
2419 sched_deps_info->finish_lhs ();
2420
2421 if (code == SET)
2422 {
2423 can_start_lhs_rhs_p = cslr_p;
2424
2425 sched_analyze_2 (deps, SET_SRC (x), insn);
2426
2427 can_start_lhs_rhs_p = false;
2428 }
2429
16f6ece6
BS
2430 return;
2431 }
2432
2433 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
46d096a3 2434 || GET_CODE (dest) == ZERO_EXTRACT)
16f6ece6 2435 {
37bd08f8
HP
2436 if (GET_CODE (dest) == STRICT_LOW_PART
2437 || GET_CODE (dest) == ZERO_EXTRACT
4d779342 2438 || df_read_modify_subreg_p (dest))
37bd08f8 2439 {
46c5ad27 2440 /* These both read and modify the result. We must handle
37bd08f8
HP
2441 them as writes to get proper dependencies for following
2442 instructions. We must handle them as reads to get proper
2443 dependencies from this to previous instructions.
71c0e7fc 2444 Thus we need to call sched_analyze_2. */
37bd08f8 2445
46c5ad27 2446 sched_analyze_2 (deps, XEXP (dest, 0), insn);
37bd08f8 2447 }
46d096a3 2448 if (GET_CODE (dest) == ZERO_EXTRACT)
16f6ece6
BS
2449 {
2450 /* The second and third arguments are values read by this insn. */
2451 sched_analyze_2 (deps, XEXP (dest, 1), insn);
2452 sched_analyze_2 (deps, XEXP (dest, 2), insn);
2453 }
2454 dest = XEXP (dest, 0);
2455 }
2456
f8cfc6aa 2457 if (REG_P (dest))
16f6ece6 2458 {
2e9182fd 2459 int regno = REGNO (dest);
ef4bddc2 2460 machine_mode mode = GET_MODE (dest);
2e9182fd
EB
2461
2462 sched_analyze_reg (deps, regno, mode, code, insn);
16f6ece6 2463
c3bdeea8
RS
2464#ifdef STACK_REGS
2465 /* Treat all writes to a stack register as modifying the TOS. */
2466 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2467 {
2e9182fd
EB
2468 /* Avoid analyzing the same register twice. */
2469 if (regno != FIRST_STACK_REG)
2470 sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
ce18efcb 2471
d108e679
AS
2472 add_to_hard_reg_set (&implicit_reg_pending_uses, mode,
2473 FIRST_STACK_REG);
c3bdeea8
RS
2474 }
2475#endif
16f6ece6 2476 }
3c0cb5de 2477 else if (MEM_P (dest))
16f6ece6
BS
2478 {
2479 /* Writing memory. */
4b6c5340
BS
2480 rtx t = dest;
2481
e855c69d 2482 if (sched_deps_info->use_cselib)
4b6c5340 2483 {
ef4bddc2 2484 machine_mode address_mode = get_address_mode (dest);
d4ebfa65 2485
4b6c5340 2486 t = shallow_copy_rtx (dest);
4deef538
AO
2487 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2488 GET_MODE (t), insn);
9a76e83d
JJ
2489 XEXP (t, 0)
2490 = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2491 insn);
4b6c5340 2492 }
c7866154 2493 t = canon_rtx (t);
16f6ece6 2494
e855c69d
AB
2495 /* Pending lists can't get larger with a readonly context. */
2496 if (!deps->readonly
2497 && ((deps->pending_read_list_length + deps->pending_write_list_length)
43722f9f 2498 >= MAX_PENDING_LIST_LENGTH))
16f6ece6
BS
2499 {
2500 /* Flush all pending reads and writes to prevent the pending lists
2501 from getting any larger. Insn scheduling runs too slowly when
4a121cc3 2502 these lists get long. When compiling GCC with itself,
16f6ece6 2503 this flush occurs 8 times for sparc, and 10 times for m88k using
4a121cc3 2504 the default value of 32. */
37a0f8a5 2505 flush_pending_lists (deps, insn, false, true);
16f6ece6
BS
2506 }
2507 else
2508 {
c838c402
DM
2509 rtx_insn_list *pending;
2510 rtx_expr_list *pending_mem;
16f6ece6
BS
2511
2512 pending = deps->pending_read_insns;
2513 pending_mem = deps->pending_read_mems;
2514 while (pending)
2515 {
c838c402
DM
2516 if (anti_dependence (pending_mem->element (), t)
2517 && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
2518 note_mem_dep (t, pending_mem->element (), pending->insn (),
e855c69d 2519 DEP_ANTI);
16f6ece6 2520
c838c402
DM
2521 pending = pending->next ();
2522 pending_mem = pending_mem->next ();
16f6ece6
BS
2523 }
2524
2525 pending = deps->pending_write_insns;
2526 pending_mem = deps->pending_write_mems;
2527 while (pending)
2528 {
c838c402
DM
2529 if (output_dependence (pending_mem->element (), t)
2530 && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
2531 note_mem_dep (t, pending_mem->element (),
2532 pending->insn (),
e855c69d 2533 DEP_OUTPUT);
16f6ece6 2534
c838c402
DM
2535 pending = pending->next ();
2536 pending_mem = pending_mem-> next ();
16f6ece6
BS
2537 }
2538
b27cedc6 2539 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
1a83e602 2540 REG_DEP_ANTI, true);
e2724e63 2541 add_dependence_list (insn, deps->pending_jump_insns, 1,
1a83e602 2542 REG_DEP_CONTROL, true);
16f6ece6 2543
e855c69d
AB
2544 if (!deps->readonly)
2545 add_insn_mem_dependence (deps, false, insn, dest);
16f6ece6
BS
2546 }
2547 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2548 }
2549
e855c69d
AB
2550 if (cslr_p && sched_deps_info->finish_lhs)
2551 sched_deps_info->finish_lhs ();
2552
16f6ece6
BS
2553 /* Analyze reads. */
2554 if (GET_CODE (x) == SET)
e855c69d
AB
2555 {
2556 can_start_lhs_rhs_p = cslr_p;
2557
2558 sched_analyze_2 (deps, SET_SRC (x), insn);
2559
2560 can_start_lhs_rhs_p = false;
2561 }
16f6ece6
BS
2562}
2563
2564/* Analyze the uses of memory and registers in rtx X in INSN. */
16f6ece6 2565static void
ce1ce33a 2566sched_analyze_2 (struct deps_desc *deps, rtx x, rtx_insn *insn)
16f6ece6 2567{
b3694847
SS
2568 int i;
2569 int j;
2570 enum rtx_code code;
2571 const char *fmt;
e855c69d
AB
2572 bool cslr_p = can_start_lhs_rhs_p;
2573
2574 can_start_lhs_rhs_p = false;
16f6ece6 2575
e855c69d 2576 gcc_assert (x);
16f6ece6
BS
2577 if (x == 0)
2578 return;
2579
e855c69d
AB
2580 if (cslr_p && sched_deps_info->start_rhs)
2581 sched_deps_info->start_rhs (x);
2582
16f6ece6
BS
2583 code = GET_CODE (x);
2584
2585 switch (code)
2586 {
d8116890 2587 CASE_CONST_ANY:
16f6ece6
BS
2588 case SYMBOL_REF:
2589 case CONST:
2590 case LABEL_REF:
bf520698 2591 /* Ignore constants. */
e855c69d
AB
2592 if (cslr_p && sched_deps_info->finish_rhs)
2593 sched_deps_info->finish_rhs ();
2594
16f6ece6
BS
2595 return;
2596
16f6ece6 2597 case CC0:
618f4073
TS
2598 if (!HAVE_cc0)
2599 gcc_unreachable ();
2600
16f6ece6 2601 /* User of CC0 depends on immediately preceding insn. */
4c2b2906 2602 SCHED_GROUP_P (insn) = 1;
caf819ae
VM
2603 /* Don't move CC0 setter to another block (it can set up the
2604 same flag for previous CC0 users which is safe). */
2605 CANT_MOVE (prev_nonnote_insn (insn)) = 1;
e855c69d
AB
2606
2607 if (cslr_p && sched_deps_info->finish_rhs)
2608 sched_deps_info->finish_rhs ();
2609
16f6ece6 2610 return;
16f6ece6
BS
2611
2612 case REG:
2613 {
16f6ece6 2614 int regno = REGNO (x);
ef4bddc2 2615 machine_mode mode = GET_MODE (x);
2e9182fd
EB
2616
2617 sched_analyze_reg (deps, regno, mode, USE, insn);
c3bdeea8
RS
2618
2619#ifdef STACK_REGS
2620 /* Treat all reads of a stack register as modifying the TOS. */
2621 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2622 {
2e9182fd
EB
2623 /* Avoid analyzing the same register twice. */
2624 if (regno != FIRST_STACK_REG)
2625 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2626 sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
c3bdeea8
RS
2627 }
2628#endif
e855c69d
AB
2629
2630 if (cslr_p && sched_deps_info->finish_rhs)
2631 sched_deps_info->finish_rhs ();
2632
16f6ece6
BS
2633 return;
2634 }
2635
2636 case MEM:
2637 {
2638 /* Reading memory. */
1476d1bd 2639 rtx_insn_list *u;
c838c402
DM
2640 rtx_insn_list *pending;
2641 rtx_expr_list *pending_mem;
4b6c5340 2642 rtx t = x;
16f6ece6 2643
e855c69d 2644 if (sched_deps_info->use_cselib)
4b6c5340 2645 {
ef4bddc2 2646 machine_mode address_mode = get_address_mode (t);
d4ebfa65 2647
4b6c5340 2648 t = shallow_copy_rtx (t);
4deef538
AO
2649 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2650 GET_MODE (t), insn);
9a76e83d
JJ
2651 XEXP (t, 0)
2652 = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2653 insn);
4b6c5340 2654 }
16f6ece6 2655
f49b295a 2656 if (!DEBUG_INSN_P (insn))
16f6ece6 2657 {
f49b295a
AO
2658 t = canon_rtx (t);
2659 pending = deps->pending_read_insns;
2660 pending_mem = deps->pending_read_mems;
2661 while (pending)
2662 {
c838c402 2663 if (read_dependence (pending_mem->element (), t)
f49b295a 2664 && ! sched_insns_conditions_mutex_p (insn,
c838c402
DM
2665 pending->insn ()))
2666 note_mem_dep (t, pending_mem->element (),
2667 pending->insn (),
f49b295a
AO
2668 DEP_ANTI);
2669
c838c402
DM
2670 pending = pending->next ();
2671 pending_mem = pending_mem->next ();
f49b295a 2672 }
16f6ece6 2673
f49b295a
AO
2674 pending = deps->pending_write_insns;
2675 pending_mem = deps->pending_write_mems;
2676 while (pending)
e855c69d 2677 {
c838c402 2678 if (true_dependence (pending_mem->element (), VOIDmode, t)
f49b295a 2679 && ! sched_insns_conditions_mutex_p (insn,
c838c402
DM
2680 pending->insn ()))
2681 note_mem_dep (t, pending_mem->element (),
2682 pending->insn (),
f49b295a
AO
2683 sched_deps_info->generate_spec_deps
2684 ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2685
c838c402
DM
2686 pending = pending->next ();
2687 pending_mem = pending_mem->next ();
f49b295a 2688 }
e855c69d 2689
1476d1bd
MM
2690 for (u = deps->last_pending_memory_flush; u; u = u->next ())
2691 add_dependence (insn, u->insn (), REG_DEP_ANTI);
e2724e63 2692
1476d1bd 2693 for (u = deps->pending_jump_insns; u; u = u->next ())
e2724e63
BS
2694 if (deps_may_trap_p (x))
2695 {
2696 if ((sched_deps_info->generate_spec_deps)
2697 && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2698 {
2699 ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2700 MAX_DEP_WEAK);
2701
1476d1bd 2702 note_dep (u->insn (), ds);
e2724e63
BS
2703 }
2704 else
1476d1bd 2705 add_dependence (insn, u->insn (), REG_DEP_CONTROL);
e2724e63 2706 }
e855c69d 2707 }
16f6ece6
BS
2708
2709 /* Always add these dependencies to pending_reads, since
2710 this insn may be followed by a write. */
1baf9159
JJ
2711 if (!deps->readonly)
2712 {
2713 if ((deps->pending_read_list_length
2714 + deps->pending_write_list_length)
43722f9f 2715 >= MAX_PENDING_LIST_LENGTH
1baf9159
JJ
2716 && !DEBUG_INSN_P (insn))
2717 flush_pending_lists (deps, insn, true, true);
2718 add_insn_mem_dependence (deps, true, insn, x);
2719 }
16f6ece6 2720
16f6ece6 2721 sched_analyze_2 (deps, XEXP (x, 0), insn);
e855c69d
AB
2722
2723 if (cslr_p && sched_deps_info->finish_rhs)
2724 sched_deps_info->finish_rhs ();
2725
16f6ece6
BS
2726 return;
2727 }
2728
2729 /* Force pending stores to memory in case a trap handler needs them. */
2730 case TRAP_IF:
37a0f8a5 2731 flush_pending_lists (deps, insn, true, false);
16f6ece6
BS
2732 break;
2733
3fce100b
AK
2734 case PREFETCH:
2735 if (PREFETCH_SCHEDULE_BARRIER_P (x))
2736 reg_pending_barrier = TRUE_BARRIER;
0e80383f
VM
2737 /* Prefetch insn contains addresses only. So if the prefetch
2738 address has no registers, there will be no dependencies on
2739 the prefetch insn. This is wrong with result code
2740 correctness point of view as such prefetch can be moved below
2741 a jump insn which usually generates MOVE_BARRIER preventing
2742 to move insns containing registers or memories through the
2743 barrier. It is also wrong with generated code performance
2744 point of view as prefetch withouth dependecies will have a
2745 tendency to be issued later instead of earlier. It is hard
2746 to generate accurate dependencies for prefetch insns as
2747 prefetch has only the start address but it is better to have
2748 something than nothing. */
79f01c76 2749 if (!deps->readonly)
db1fb332
JJ
2750 {
2751 rtx x = gen_rtx_MEM (Pmode, XEXP (PATTERN (insn), 0));
2752 if (sched_deps_info->use_cselib)
2753 cselib_lookup_from_insn (x, Pmode, true, VOIDmode, insn);
2754 add_insn_mem_dependence (deps, true, insn, x);
2755 }
3fce100b
AK
2756 break;
2757
a11fe699
UB
2758 case UNSPEC_VOLATILE:
2759 flush_pending_lists (deps, insn, true, true);
2760 /* FALLTHRU */
2761
16f6ece6
BS
2762 case ASM_OPERANDS:
2763 case ASM_INPUT:
16f6ece6 2764 {
16f6ece6
BS
2765 /* Traditional and volatile asm instructions must be considered to use
2766 and clobber all hard registers, all pseudo-registers and all of
2767 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
2768
2769 Consider for instance a volatile asm that changes the fpu rounding
2770 mode. An insn should not be moved across this even if it only uses
2771 pseudo-regs because it might give an incorrectly rounded result. */
bf95b629
RB
2772 if ((code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2773 && !DEBUG_INSN_P (insn))
d1c9693e 2774 reg_pending_barrier = TRUE_BARRIER;
16f6ece6
BS
2775
2776 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
2777 We can not just fall through here since then we would be confused
2778 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2779 traditional asms unlike their normal usage. */
2780
2781 if (code == ASM_OPERANDS)
2782 {
2783 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2784 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
e855c69d
AB
2785
2786 if (cslr_p && sched_deps_info->finish_rhs)
2787 sched_deps_info->finish_rhs ();
2788
16f6ece6
BS
2789 return;
2790 }
2791 break;
2792 }
2793
2794 case PRE_DEC:
2795 case POST_DEC:
2796 case PRE_INC:
2797 case POST_INC:
2798 /* These both read and modify the result. We must handle them as writes
2799 to get proper dependencies for following instructions. We must handle
2800 them as reads to get proper dependencies from this to previous
2801 instructions. Thus we need to pass them to both sched_analyze_1
2802 and sched_analyze_2. We must call sched_analyze_2 first in order
2803 to get the proper antecedent for the read. */
2804 sched_analyze_2 (deps, XEXP (x, 0), insn);
2805 sched_analyze_1 (deps, x, insn);
e855c69d
AB
2806
2807 if (cslr_p && sched_deps_info->finish_rhs)
2808 sched_deps_info->finish_rhs ();
2809
16f6ece6
BS
2810 return;
2811
2812 case POST_MODIFY:
2813 case PRE_MODIFY:
2814 /* op0 = op0 + op1 */
2815 sched_analyze_2 (deps, XEXP (x, 0), insn);
2816 sched_analyze_2 (deps, XEXP (x, 1), insn);
2817 sched_analyze_1 (deps, x, insn);
e855c69d
AB
2818
2819 if (cslr_p && sched_deps_info->finish_rhs)
2820 sched_deps_info->finish_rhs ();
2821
16f6ece6
BS
2822 return;
2823
2824 default:
2825 break;
2826 }
2827
2828 /* Other cases: walk the insn. */
2829 fmt = GET_RTX_FORMAT (code);
2830 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2831 {
2832 if (fmt[i] == 'e')
2833 sched_analyze_2 (deps, XEXP (x, i), insn);
2834 else if (fmt[i] == 'E')
2835 for (j = 0; j < XVECLEN (x, i); j++)
2836 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2837 }
e855c69d
AB
2838
2839 if (cslr_p && sched_deps_info->finish_rhs)
2840 sched_deps_info->finish_rhs ();
16f6ece6
BS
2841}
2842
32d48af5 2843/* Try to group two fusible insns together to prevent scheduler
dc549f34 2844 from scheduling them apart. */
ba4fa581
WM
2845
2846static void
ac44248e 2847sched_macro_fuse_insns (rtx_insn *insn)
ba4fa581 2848{
ac44248e 2849 rtx_insn *prev;
ba4fa581 2850
dc549f34
RR
2851 if (any_condjump_p (insn))
2852 {
2853 unsigned int condreg1, condreg2;
2854 rtx cc_reg_1;
2855 targetm.fixed_condition_code_regs (&condreg1, &condreg2);
2856 cc_reg_1 = gen_rtx_REG (CCmode, condreg1);
2857 prev = prev_nonnote_nondebug_insn (insn);
2858 if (!reg_referenced_p (cc_reg_1, PATTERN (insn))
2859 || !prev
2860 || !modified_in_p (cc_reg_1, prev))
2861 return;
2862 }
2863 else
2864 {
2865 rtx insn_set = single_set (insn);
ba4fa581 2866
dc549f34
RR
2867 prev = prev_nonnote_nondebug_insn (insn);
2868 if (!prev
2869 || !insn_set
892d9879 2870 || !single_set (prev))
dc549f34 2871 return;
ba4fa581 2872
dc549f34
RR
2873 }
2874
2875 if (targetm.sched.macro_fusion_pair_p (prev, insn))
2876 SCHED_GROUP_P (insn) = 1;
ba4fa581 2877
ba4fa581
WM
2878}
2879
16f6ece6 2880/* Analyze an INSN with pattern X to find all dependencies. */
16f6ece6 2881static void
ce1ce33a 2882sched_analyze_insn (struct deps_desc *deps, rtx x, rtx_insn *insn)
16f6ece6 2883{
b3694847 2884 RTX_CODE code = GET_CODE (x);
16f6ece6 2885 rtx link;
3cd8c58a 2886 unsigned i;
a2041967 2887 reg_set_iterator rsi;
16f6ece6 2888
ce18efcb
VM
2889 if (! reload_completed)
2890 {
2891 HARD_REG_SET temp;
2892
2893 extract_insn (insn);
1145837d 2894 preprocess_constraints (insn);
ce18efcb 2895 ira_implicitly_set_insn_hard_regs (&temp);
6eeea7a7 2896 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
ce18efcb
VM
2897 IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
2898 }
2899
e855c69d
AB
2900 can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2901 && code == SET);
2902
ba4fa581
WM
2903 /* Group compare and branch insns for macro-fusion. */
2904 if (targetm.sched.macro_fusion_p
2905 && targetm.sched.macro_fusion_p ())
dc549f34 2906 sched_macro_fuse_insns (insn);
ba4fa581 2907
1098d3a5 2908 if (may_trap_p (x))
073a8998 2909 /* Avoid moving trapping instructions across function calls that might
1098d3a5
JJ
2910 not always return. */
2911 add_dependence_list (insn, deps->last_function_call_may_noreturn,
1a83e602 2912 1, REG_DEP_ANTI, true);
1098d3a5 2913
7b4ef662
RH
2914 /* We must avoid creating a situation in which two successors of the
2915 current block have different unwind info after scheduling. If at any
2916 point the two paths re-join this leads to incorrect unwind info. */
2917 /* ??? There are certain situations involving a forced frame pointer in
2918 which, with extra effort, we could fix up the unwind info at a later
2919 CFG join. However, it seems better to notice these cases earlier
2920 during prologue generation and avoid marking the frame pointer setup
2921 as frame-related at all. */
2922 if (RTX_FRAME_RELATED_P (insn))
2db036a7
TV
2923 {
2924 /* Make sure prologue insn is scheduled before next jump. */
2925 deps->sched_before_next_jump
2926 = alloc_INSN_LIST (insn, deps->sched_before_next_jump);
2927
2928 /* Make sure epilogue insn is scheduled after preceding jumps. */
1a83e602
BS
2929 add_dependence_list (insn, deps->pending_jump_insns, 1, REG_DEP_ANTI,
2930 true);
2db036a7 2931 }
7b4ef662 2932
16f6ece6
BS
2933 if (code == COND_EXEC)
2934 {
2935 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2936
2937 /* ??? Should be recording conditions so we reduce the number of
ff7cc307 2938 false dependencies. */
16f6ece6
BS
2939 x = COND_EXEC_CODE (x);
2940 code = GET_CODE (x);
2941 }
2942 if (code == SET || code == CLOBBER)
c2310f51
RH
2943 {
2944 sched_analyze_1 (deps, x, insn);
2945
2946 /* Bare clobber insns are used for letting life analysis, reg-stack
2947 and others know that a value is dead. Depend on the last call
2948 instruction so that reg-stack won't get confused. */
2949 if (code == CLOBBER)
ce18efcb 2950 add_dependence_list (insn, deps->last_function_call, 1,
1a83e602 2951 REG_DEP_OUTPUT, true);
c2310f51 2952 }
16f6ece6
BS
2953 else if (code == PARALLEL)
2954 {
3cd8c58a 2955 for (i = XVECLEN (x, 0); i--;)
16f6ece6
BS
2956 {
2957 rtx sub = XVECEXP (x, 0, i);
2958 code = GET_CODE (sub);
2959
2960 if (code == COND_EXEC)
2961 {
2962 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2963 sub = COND_EXEC_CODE (sub);
2964 code = GET_CODE (sub);
2965 }
2966 if (code == SET || code == CLOBBER)
2967 sched_analyze_1 (deps, sub, insn);
2968 else
2969 sched_analyze_2 (deps, sub, insn);
2970 }
2971 }
2972 else
2973 sched_analyze_2 (deps, x, insn);
2974
2975 /* Mark registers CLOBBERED or used by called function. */
4b4bf941 2976 if (CALL_P (insn))
570a98eb
JH
2977 {
2978 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2979 {
2980 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2981 sched_analyze_1 (deps, XEXP (link, 0), insn);
e384e6b5 2982 else if (GET_CODE (XEXP (link, 0)) != SET)
570a98eb
JH
2983 sched_analyze_2 (deps, XEXP (link, 0), insn);
2984 }
9790c19e
JJ
2985 /* Don't schedule anything after a tail call, tail call needs
2986 to use at least all call-saved registers. */
2987 if (SIBLING_CALL_P (insn))
2988 reg_pending_barrier = TRUE_BARRIER;
2989 else if (find_reg_note (insn, REG_SETJMP, NULL))
d1c9693e 2990 reg_pending_barrier = MOVE_BARRIER;
570a98eb 2991 }
16f6ece6 2992
4b4bf941 2993 if (JUMP_P (insn))
18e720b3 2994 {
e67d1102 2995 rtx_insn *next = next_nonnote_nondebug_insn (insn);
4b4bf941 2996 if (next && BARRIER_P (next))
0a1766b2 2997 reg_pending_barrier = MOVE_BARRIER;
18e720b3
BS
2998 else
2999 {
c838c402
DM
3000 rtx_insn_list *pending;
3001 rtx_expr_list *pending_mem;
5a257872 3002
e855c69d
AB
3003 if (sched_deps_info->compute_jump_reg_dependencies)
3004 {
e2724e63
BS
3005 (*sched_deps_info->compute_jump_reg_dependencies)
3006 (insn, reg_pending_control_uses);
e855c69d 3007
e855c69d 3008 /* Make latency of jump equal to 0 by using anti-dependence. */
e2724e63 3009 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
e855c69d
AB
3010 {
3011 struct deps_reg *reg_last = &deps->reg_last[i];
1a83e602
BS
3012 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI,
3013 false);
ce18efcb 3014 add_dependence_list (insn, reg_last->implicit_sets,
1a83e602 3015 0, REG_DEP_ANTI, false);
e855c69d 3016 add_dependence_list (insn, reg_last->clobbers, 0,
1a83e602 3017 REG_DEP_ANTI, false);
e855c69d 3018 }
e855c69d 3019 }
79ba58ef 3020
d9214e53
RH
3021 /* All memory writes and volatile reads must happen before the
3022 jump. Non-volatile reads must happen before the jump iff
3023 the result is needed by the above register used mask. */
3024
79ba58ef
BS
3025 pending = deps->pending_write_insns;
3026 pending_mem = deps->pending_write_mems;
3027 while (pending)
3028 {
c838c402
DM
3029 if (! sched_insns_conditions_mutex_p (insn, pending->insn ()))
3030 add_dependence (insn, pending->insn (),
ce1ce33a 3031 REG_DEP_OUTPUT);
c838c402
DM
3032 pending = pending->next ();
3033 pending_mem = pending_mem->next ();
d9214e53 3034 }
79ba58ef 3035
d9214e53
RH
3036 pending = deps->pending_read_insns;
3037 pending_mem = deps->pending_read_mems;
3038 while (pending)
3039 {
c838c402
DM
3040 if (MEM_VOLATILE_P (pending_mem->element ())
3041 && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
3042 add_dependence (insn, pending->insn (),
ce1ce33a 3043 REG_DEP_OUTPUT);
c838c402
DM
3044 pending = pending->next ();
3045 pending_mem = pending_mem->next ();
79ba58ef
BS
3046 }
3047
b27cedc6 3048 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
1a83e602 3049 REG_DEP_ANTI, true);
e2724e63 3050 add_dependence_list (insn, deps->pending_jump_insns, 1,
1a83e602 3051 REG_DEP_ANTI, true);
18e720b3 3052 }
18e720b3
BS
3053 }
3054
ca3c1028 3055 /* If this instruction can throw an exception, then moving it changes
786de7eb 3056 where block boundaries fall. This is mighty confusing elsewhere.
d47a8b83
EB
3057 Therefore, prevent such an instruction from being moved. Same for
3058 non-jump instructions that define block boundaries.
3059 ??? Unclear whether this is still necessary in EBB mode. If not,
3060 add_branch_dependences should be adjusted for RGN mode instead. */
3061 if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
3062 || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
d1c9693e 3063 reg_pending_barrier = MOVE_BARRIER;
ca3c1028 3064
7a1aca9c 3065 if (sched_pressure != SCHED_PRESSURE_NONE)
ce18efcb
VM
3066 {
3067 setup_insn_reg_uses (deps, insn);
19ac7892 3068 init_insn_reg_pressure_info (insn);
ce18efcb
VM
3069 }
3070
b5b8b0ac
AO
3071 /* Add register dependencies for insn. */
3072 if (DEBUG_INSN_P (insn))
3073 {
ce1ce33a 3074 rtx_insn *prev = deps->last_debug_insn;
1476d1bd 3075 rtx_insn_list *u;
b5b8b0ac
AO
3076
3077 if (!deps->readonly)
3078 deps->last_debug_insn = insn;
3079
3080 if (prev)
3081 add_dependence (insn, prev, REG_DEP_ANTI);
3082
3083 add_dependence_list (insn, deps->last_function_call, 1,
1a83e602 3084 REG_DEP_ANTI, false);
b5b8b0ac 3085
1a83e602 3086 if (!sel_sched_p ())
1476d1bd
MM
3087 for (u = deps->last_pending_memory_flush; u; u = u->next ())
3088 add_dependence (insn, u->insn (), REG_DEP_ANTI);
b5b8b0ac
AO
3089
3090 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3091 {
3092 struct deps_reg *reg_last = &deps->reg_last[i];
1a83e602 3093 add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI, false);
e2724e63
BS
3094 /* There's no point in making REG_DEP_CONTROL dependencies for
3095 debug insns. */
1a83e602
BS
3096 add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI,
3097 false);
f49b295a
AO
3098
3099 if (!deps->readonly)
3100 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
b5b8b0ac
AO
3101 }
3102 CLEAR_REG_SET (reg_pending_uses);
3103
3104 /* Quite often, a debug insn will refer to stuff in the
3105 previous instruction, but the reason we want this
3106 dependency here is to make sure the scheduler doesn't
3107 gratuitously move a debug insn ahead. This could dirty
3108 DF flags and cause additional analysis that wouldn't have
3109 occurred in compilation without debug insns, and such
3110 additional analysis can modify the generated code. */
3111 prev = PREV_INSN (insn);
3112
3113 if (prev && NONDEBUG_INSN_P (prev))
3114 add_dependence (insn, prev, REG_DEP_ANTI);
3115 }
0a1766b2
AM
3116 else
3117 {
b0bd15f7
BS
3118 regset_head set_or_clobbered;
3119
0a1766b2 3120 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
ce18efcb
VM
3121 {
3122 struct deps_reg *reg_last = &deps->reg_last[i];
1a83e602
BS
3123 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3124 add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI,
3125 false);
3126 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3127 false);
b8698a0f 3128
ce18efcb
VM
3129 if (!deps->readonly)
3130 {
3131 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3132 reg_last->uses_length++;
3133 }
3134 }
b8698a0f 3135
ce18efcb
VM
3136 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3137 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
3138 {
3139 struct deps_reg *reg_last = &deps->reg_last[i];
1a83e602 3140 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
ce18efcb 3141 add_dependence_list (insn, reg_last->implicit_sets, 0,
1a83e602
BS
3142 REG_DEP_ANTI, false);
3143 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3144 false);
b8698a0f 3145
ce18efcb
VM
3146 if (!deps->readonly)
3147 {
3148 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3149 reg_last->uses_length++;
3150 }
3151 }
e855c69d 3152
b0bd15f7
BS
3153 if (targetm.sched.exposed_pipeline)
3154 {
3155 INIT_REG_SET (&set_or_clobbered);
3156 bitmap_ior (&set_or_clobbered, reg_pending_clobbers,
3157 reg_pending_sets);
3158 EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered, 0, i, rsi)
3159 {
3160 struct deps_reg *reg_last = &deps->reg_last[i];
3161 rtx list;
3162 for (list = reg_last->uses; list; list = XEXP (list, 1))
3163 {
3164 rtx other = XEXP (list, 0);
f0898307 3165 if (INSN_CACHED_COND (other) != const_true_rtx
c9bd6bcd 3166 && refers_to_regno_p (i, INSN_CACHED_COND (other)))
f0898307 3167 INSN_CACHED_COND (other) = const_true_rtx;
b0bd15f7
BS
3168 }
3169 }
3170 }
3171
ce18efcb
VM
3172 /* If the current insn is conditional, we can't free any
3173 of the lists. */
3174 if (sched_has_condition_p (insn))
3175 {
3176 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3177 {
3178 struct deps_reg *reg_last = &deps->reg_last[i];
1a83e602
BS
3179 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3180 false);
ce18efcb 3181 add_dependence_list (insn, reg_last->implicit_sets, 0,
1a83e602
BS
3182 REG_DEP_ANTI, false);
3183 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3184 false);
e2724e63 3185 add_dependence_list (insn, reg_last->control_uses, 0,
1a83e602 3186 REG_DEP_CONTROL, false);
b8698a0f 3187
ce18efcb
VM
3188 if (!deps->readonly)
3189 {
3190 reg_last->clobbers
3191 = alloc_INSN_LIST (insn, reg_last->clobbers);
3192 reg_last->clobbers_length++;
3193 }
3194 }
3195 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3196 {
3197 struct deps_reg *reg_last = &deps->reg_last[i];
1a83e602
BS
3198 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3199 false);
ce18efcb 3200 add_dependence_list (insn, reg_last->implicit_sets, 0,
1a83e602
BS
3201 REG_DEP_ANTI, false);
3202 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT,
3203 false);
3204 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3205 false);
e2724e63 3206 add_dependence_list (insn, reg_last->control_uses, 0,
1a83e602 3207 REG_DEP_CONTROL, false);
b8698a0f 3208
ce18efcb 3209 if (!deps->readonly)
aef0e7a8 3210 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
ce18efcb
VM
3211 }
3212 }
3213 else
3214 {
3215 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3216 {
3217 struct deps_reg *reg_last = &deps->reg_last[i];
43722f9f
BC
3218 if (reg_last->uses_length >= MAX_PENDING_LIST_LENGTH
3219 || reg_last->clobbers_length >= MAX_PENDING_LIST_LENGTH)
ce18efcb
VM
3220 {
3221 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
1a83e602 3222 REG_DEP_OUTPUT, false);
ce18efcb
VM
3223 add_dependence_list_and_free (deps, insn,
3224 &reg_last->implicit_sets, 0,
1a83e602 3225 REG_DEP_ANTI, false);
ce18efcb 3226 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
1a83e602 3227 REG_DEP_ANTI, false);
e2724e63
BS
3228 add_dependence_list_and_free (deps, insn,
3229 &reg_last->control_uses, 0,
1a83e602
BS
3230 REG_DEP_ANTI, false);
3231 add_dependence_list_and_free (deps, insn,
3232 &reg_last->clobbers, 0,
3233 REG_DEP_OUTPUT, false);
b8698a0f 3234
ce18efcb
VM
3235 if (!deps->readonly)
3236 {
3237 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3238 reg_last->clobbers_length = 0;
3239 reg_last->uses_length = 0;
3240 }
3241 }
3242 else
3243 {
1a83e602
BS
3244 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3245 false);
ce18efcb 3246 add_dependence_list (insn, reg_last->implicit_sets, 0,
1a83e602
BS
3247 REG_DEP_ANTI, false);
3248 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3249 false);
e2724e63 3250 add_dependence_list (insn, reg_last->control_uses, 0,
1a83e602 3251 REG_DEP_CONTROL, false);
ce18efcb 3252 }
b8698a0f 3253
ce18efcb
VM
3254 if (!deps->readonly)
3255 {
3256 reg_last->clobbers_length++;
3257 reg_last->clobbers
3258 = alloc_INSN_LIST (insn, reg_last->clobbers);
3259 }
3260 }
3261 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3262 {
3263 struct deps_reg *reg_last = &deps->reg_last[i];
b8698a0f 3264
ce18efcb 3265 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
1a83e602 3266 REG_DEP_OUTPUT, false);
ce18efcb
VM
3267 add_dependence_list_and_free (deps, insn,
3268 &reg_last->implicit_sets,
1a83e602 3269 0, REG_DEP_ANTI, false);
ce18efcb 3270 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
1a83e602 3271 REG_DEP_OUTPUT, false);
ce18efcb 3272 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
1a83e602 3273 REG_DEP_ANTI, false);
e2724e63 3274 add_dependence_list (insn, reg_last->control_uses, 0,
1a83e602 3275 REG_DEP_CONTROL, false);
b8698a0f 3276
ce18efcb
VM
3277 if (!deps->readonly)
3278 {
3279 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3280 reg_last->uses_length = 0;
3281 reg_last->clobbers_length = 0;
ce18efcb
VM
3282 }
3283 }
3284 }
e2724e63
BS
3285 if (!deps->readonly)
3286 {
3287 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3288 {
3289 struct deps_reg *reg_last = &deps->reg_last[i];
3290 reg_last->control_uses
3291 = alloc_INSN_LIST (insn, reg_last->control_uses);
3292 }
3293 }
0a1766b2
AM
3294 }
3295
ce18efcb
VM
3296 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3297 if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3298 {
3299 struct deps_reg *reg_last = &deps->reg_last[i];
1a83e602
BS
3300 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI, false);
3301 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI, false);
3302 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI, false);
3303 add_dependence_list (insn, reg_last->control_uses, 0, REG_DEP_ANTI,
3304 false);
b8698a0f 3305
ce18efcb
VM
3306 if (!deps->readonly)
3307 reg_last->implicit_sets
3308 = alloc_INSN_LIST (insn, reg_last->implicit_sets);
3309 }
3310
e855c69d
AB
3311 if (!deps->readonly)
3312 {
3313 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
3314 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
3315 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
ce18efcb
VM
3316 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3317 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
3318 || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3319 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
e855c69d
AB
3320
3321 /* Set up the pending barrier found. */
3322 deps->last_reg_pending_barrier = reg_pending_barrier;
3323 }
0a1766b2
AM
3324
3325 CLEAR_REG_SET (reg_pending_uses);
3326 CLEAR_REG_SET (reg_pending_clobbers);
3327 CLEAR_REG_SET (reg_pending_sets);
e2724e63 3328 CLEAR_REG_SET (reg_pending_control_uses);
ce18efcb
VM
3329 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3330 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
0a1766b2 3331
ca3c1028 3332 /* Add dependencies if a scheduling barrier was found. */
37a0f8a5 3333 if (reg_pending_barrier)
ca3c1028 3334 {
30028c85
VM
3335 /* In the case of barrier the most added dependencies are not
3336 real, so we use anti-dependence here. */
e855c69d 3337 if (sched_has_condition_p (insn))
16f6ece6 3338 {
a2041967 3339 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
37a0f8a5
RH
3340 {
3341 struct deps_reg *reg_last = &deps->reg_last[i];
1a83e602
BS
3342 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3343 true);
ce18efcb
VM
3344 add_dependence_list (insn, reg_last->sets, 0,
3345 reg_pending_barrier == TRUE_BARRIER
1a83e602 3346 ? REG_DEP_TRUE : REG_DEP_ANTI, true);
ce18efcb 3347 add_dependence_list (insn, reg_last->implicit_sets, 0,
1a83e602 3348 REG_DEP_ANTI, true);
ce18efcb
VM
3349 add_dependence_list (insn, reg_last->clobbers, 0,
3350 reg_pending_barrier == TRUE_BARRIER
1a83e602 3351 ? REG_DEP_TRUE : REG_DEP_ANTI, true);
a2041967 3352 }
37a0f8a5
RH
3353 }
3354 else
3355 {
a2041967 3356 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
37a0f8a5
RH
3357 {
3358 struct deps_reg *reg_last = &deps->reg_last[i];
e855c69d 3359 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
1a83e602 3360 REG_DEP_ANTI, true);
e2724e63
BS
3361 add_dependence_list_and_free (deps, insn,
3362 &reg_last->control_uses, 0,
1a83e602 3363 REG_DEP_CONTROL, true);
ce18efcb
VM
3364 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3365 reg_pending_barrier == TRUE_BARRIER
1a83e602
BS
3366 ? REG_DEP_TRUE : REG_DEP_ANTI,
3367 true);
ce18efcb
VM
3368 add_dependence_list_and_free (deps, insn,
3369 &reg_last->implicit_sets, 0,
1a83e602 3370 REG_DEP_ANTI, true);
ce18efcb
VM
3371 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3372 reg_pending_barrier == TRUE_BARRIER
1a83e602
BS
3373 ? REG_DEP_TRUE : REG_DEP_ANTI,
3374 true);
16f6ece6 3375
e855c69d
AB
3376 if (!deps->readonly)
3377 {
3378 reg_last->uses_length = 0;
3379 reg_last->clobbers_length = 0;
3380 }
3381 }
0a1bb917 3382 }
37a0f8a5 3383
e855c69d
AB
3384 if (!deps->readonly)
3385 for (i = 0; i < (unsigned)deps->max_reg; i++)
3386 {
3387 struct deps_reg *reg_last = &deps->reg_last[i];
3388 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3389 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3390 }
3391
ed358aea 3392 /* Don't flush pending lists on speculative checks for
f45e9053
AB
3393 selective scheduling. */
3394 if (!sel_sched_p () || !sel_insn_is_speculation_check (insn))
e855c69d 3395 flush_pending_lists (deps, insn, true, true);
b8698a0f 3396
d1c9693e 3397 reg_pending_barrier = NOT_A_BARRIER;
4ba478b8 3398 }
16f6ece6
BS
3399
3400 /* If a post-call group is still open, see if it should remain so.
3401 This insn must be a simple move of a hard reg to a pseudo or
3402 vice-versa.
3403
42db504c
SB
3404 We must avoid moving these insns for correctness on targets
3405 with small register classes, and for special registers like
16f6ece6
BS
3406 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
3407 hard regs for all targets. */
3408
3409 if (deps->in_post_call_group_p)
3410 {
3411 rtx tmp, set = single_set (insn);
3412 int src_regno, dest_regno;
3413
3414 if (set == NULL)
b5b8b0ac
AO
3415 {
3416 if (DEBUG_INSN_P (insn))
3417 /* We don't want to mark debug insns as part of the same
3418 sched group. We know they really aren't, but if we use
3419 debug insns to tell that a call group is over, we'll
3420 get different code if debug insns are not there and
3421 instructions that follow seem like they should be part
3422 of the call group.
3423
fd59643c 3424 Also, if we did, chain_to_prev_insn would move the
b5b8b0ac
AO
3425 deps of the debug insn to the call insn, modifying
3426 non-debug post-dependency counts of the debug insn
3427 dependencies and otherwise messing with the scheduling
3428 order.
3429
3430 Instead, let such debug insns be scheduled freely, but
3431 keep the call group open in case there are insns that
3432 should be part of it afterwards. Since we grant debug
3433 insns higher priority than even sched group insns, it
3434 will all turn out all right. */
3435 goto debug_dont_end_call_group;
3436 else
3437 goto end_call_group;
3438 }
16f6ece6
BS
3439
3440 tmp = SET_DEST (set);
3441 if (GET_CODE (tmp) == SUBREG)
3442 tmp = SUBREG_REG (tmp);
f8cfc6aa 3443 if (REG_P (tmp))
16f6ece6
BS
3444 dest_regno = REGNO (tmp);
3445 else
3446 goto end_call_group;
3447
3448 tmp = SET_SRC (set);
3449 if (GET_CODE (tmp) == SUBREG)
3450 tmp = SUBREG_REG (tmp);
58f206d6
DD
3451 if ((GET_CODE (tmp) == PLUS
3452 || GET_CODE (tmp) == MINUS)
f8cfc6aa 3453 && REG_P (XEXP (tmp, 0))
58f206d6
DD
3454 && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3455 && dest_regno == STACK_POINTER_REGNUM)
3456 src_regno = STACK_POINTER_REGNUM;
f8cfc6aa 3457 else if (REG_P (tmp))
16f6ece6
BS
3458 src_regno = REGNO (tmp);
3459 else
3460 goto end_call_group;
3461
3462 if (src_regno < FIRST_PSEUDO_REGISTER
3463 || dest_regno < FIRST_PSEUDO_REGISTER)
3464 {
e855c69d
AB
3465 if (!deps->readonly
3466 && deps->in_post_call_group_p == post_call_initial)
4c2b2906
EC
3467 deps->in_post_call_group_p = post_call;
3468
b8698a0f 3469 if (!sel_sched_p () || sched_emulate_haifa_p)
e855c69d
AB
3470 {
3471 SCHED_GROUP_P (insn) = 1;
3472 CANT_MOVE (insn) = 1;
3473 }
16f6ece6
BS
3474 }
3475 else
3476 {
3477 end_call_group:
e855c69d
AB
3478 if (!deps->readonly)
3479 deps->in_post_call_group_p = not_post_call;
16f6ece6
BS
3480 }
3481 }
4c2b2906 3482
b5b8b0ac 3483 debug_dont_end_call_group:
e2f6ff94
MK
3484 if ((current_sched_info->flags & DO_SPECULATION)
3485 && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
3486 /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3487 be speculated. */
3488 {
e855c69d
AB
3489 if (sel_sched_p ())
3490 sel_mark_hard_insn (insn);
3491 else
3492 {
3493 sd_iterator_def sd_it;
3494 dep_t dep;
b8698a0f 3495
e855c69d
AB
3496 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3497 sd_iterator_cond (&sd_it, &dep);)
3498 change_spec_dep_to_hard (sd_it);
3499 }
e2f6ff94 3500 }
76df831f
RH
3501
3502 /* We do not yet have code to adjust REG_ARGS_SIZE, therefore we must
3503 honor their original ordering. */
3504 if (find_reg_note (insn, REG_ARGS_SIZE, NULL))
3505 {
3506 if (deps->last_args_size)
3507 add_dependence (insn, deps->last_args_size, REG_DEP_OUTPUT);
3508 deps->last_args_size = insn;
3509 }
16f6ece6
BS
3510}
3511
1098d3a5
JJ
3512/* Return TRUE if INSN might not always return normally (e.g. call exit,
3513 longjmp, loop forever, ...). */
532aafad
SB
3514/* FIXME: Why can't this function just use flags_from_decl_or_type and
3515 test for ECF_NORETURN? */
1098d3a5 3516static bool
90831096 3517call_may_noreturn_p (rtx_insn *insn)
1098d3a5
JJ
3518{
3519 rtx call;
3520
3521 /* const or pure calls that aren't looping will always return. */
3522 if (RTL_CONST_OR_PURE_CALL_P (insn)
3523 && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3524 return false;
3525
da4fdf2d
SB
3526 call = get_call_rtx_from (insn);
3527 if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
1098d3a5
JJ
3528 {
3529 rtx symbol = XEXP (XEXP (call, 0), 0);
3530 if (SYMBOL_REF_DECL (symbol)
3531 && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3532 {
3533 if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3534 == BUILT_IN_NORMAL)
3535 switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3536 {
3537 case BUILT_IN_BCMP:
3538 case BUILT_IN_BCOPY:
3539 case BUILT_IN_BZERO:
3540 case BUILT_IN_INDEX:
3541 case BUILT_IN_MEMCHR:
3542 case BUILT_IN_MEMCMP:
3543 case BUILT_IN_MEMCPY:
3544 case BUILT_IN_MEMMOVE:
3545 case BUILT_IN_MEMPCPY:
3546 case BUILT_IN_MEMSET:
3547 case BUILT_IN_RINDEX:
3548 case BUILT_IN_STPCPY:
3549 case BUILT_IN_STPNCPY:
3550 case BUILT_IN_STRCAT:
3551 case BUILT_IN_STRCHR:
3552 case BUILT_IN_STRCMP:
3553 case BUILT_IN_STRCPY:
3554 case BUILT_IN_STRCSPN:
3555 case BUILT_IN_STRLEN:
3556 case BUILT_IN_STRNCAT:
3557 case BUILT_IN_STRNCMP:
3558 case BUILT_IN_STRNCPY:
3559 case BUILT_IN_STRPBRK:
3560 case BUILT_IN_STRRCHR:
3561 case BUILT_IN_STRSPN:
3562 case BUILT_IN_STRSTR:
3563 /* Assume certain string/memory builtins always return. */
3564 return false;
3565 default:
3566 break;
3567 }
3568 }
3569 }
3570
3571 /* For all other calls assume that they might not always return. */
3572 return true;
3573}
3574
fd59643c
RS
3575/* Return true if INSN should be made dependent on the previous instruction
3576 group, and if all INSN's dependencies should be moved to the first
3577 instruction of that group. */
3578
3579static bool
90831096 3580chain_to_prev_insn_p (rtx_insn *insn)
fd59643c 3581{
fd59643c
RS
3582 /* INSN forms a group with the previous instruction. */
3583 if (SCHED_GROUP_P (insn))
3584 return true;
3585
3586 /* If the previous instruction clobbers a register R and this one sets
3587 part of R, the clobber was added specifically to help us track the
3588 liveness of R. There's no point scheduling the clobber and leaving
3589 INSN behind, especially if we move the clobber to another block. */
e67d1102 3590 rtx_insn *prev = prev_nonnote_nondebug_insn (insn);
fd59643c
RS
3591 if (prev
3592 && INSN_P (prev)
3593 && BLOCK_FOR_INSN (prev) == BLOCK_FOR_INSN (insn)
3594 && GET_CODE (PATTERN (prev)) == CLOBBER)
3595 {
e67d1102 3596 rtx x = XEXP (PATTERN (prev), 0);
fd59643c
RS
3597 if (set_of (x, insn))
3598 return true;
3599 }
3600
3601 return false;
3602}
3603
e855c69d 3604/* Analyze INSN with DEPS as a context. */
16f6ece6 3605void
ce1ce33a 3606deps_analyze_insn (struct deps_desc *deps, rtx_insn *insn)
16f6ece6 3607{
e855c69d
AB
3608 if (sched_deps_info->start_insn)
3609 sched_deps_info->start_insn (insn);
16f6ece6 3610
b0bd15f7
BS
3611 /* Record the condition for this insn. */
3612 if (NONDEBUG_INSN_P (insn))
e2724e63
BS
3613 {
3614 rtx t;
3615 sched_get_condition_with_rev (insn, NULL);
3616 t = INSN_CACHED_COND (insn);
3dc99c19 3617 INSN_COND_DEPS (insn) = NULL;
e2724e63
BS
3618 if (reload_completed
3619 && (current_sched_info->flags & DO_PREDICATION)
3620 && COMPARISON_P (t)
3621 && REG_P (XEXP (t, 0))
3622 && CONSTANT_P (XEXP (t, 1)))
3623 {
3624 unsigned int regno;
3625 int nregs;
3dc99c19 3626 rtx_insn_list *cond_deps = NULL;
e2724e63
BS
3627 t = XEXP (t, 0);
3628 regno = REGNO (t);
dc8afb70 3629 nregs = REG_NREGS (t);
e2724e63
BS
3630 while (nregs-- > 0)
3631 {
3632 struct deps_reg *reg_last = &deps->reg_last[regno + nregs];
3dc99c19
DM
3633 cond_deps = concat_INSN_LIST (reg_last->sets, cond_deps);
3634 cond_deps = concat_INSN_LIST (reg_last->clobbers, cond_deps);
3635 cond_deps = concat_INSN_LIST (reg_last->implicit_sets, cond_deps);
e2724e63 3636 }
3dc99c19 3637 INSN_COND_DEPS (insn) = cond_deps;
e2724e63
BS
3638 }
3639 }
b0bd15f7 3640
7b4ef662 3641 if (JUMP_P (insn))
e855c69d 3642 {
b8698a0f 3643 /* Make each JUMP_INSN (but not a speculative check)
e855c69d
AB
3644 a scheduling barrier for memory references. */
3645 if (!deps->readonly
b8698a0f 3646 && !(sel_sched_p ()
e855c69d
AB
3647 && sel_insn_is_speculation_check (insn)))
3648 {
3649 /* Keep the list a reasonable size. */
43722f9f 3650 if (deps->pending_flush_length++ >= MAX_PENDING_LIST_LENGTH)
e855c69d
AB
3651 flush_pending_lists (deps, insn, true, true);
3652 else
e2724e63
BS
3653 deps->pending_jump_insns
3654 = alloc_INSN_LIST (insn, deps->pending_jump_insns);
e855c69d
AB
3655 }
3656
7b4ef662
RH
3657 /* For each insn which shouldn't cross a jump, add a dependence. */
3658 add_dependence_list_and_free (deps, insn,
3659 &deps->sched_before_next_jump, 1,
1a83e602 3660 REG_DEP_ANTI, true);
7b4ef662
RH
3661
3662 sched_analyze_insn (deps, PATTERN (insn), insn);
3663 }
3664 else if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn))
3665 {
e855c69d
AB
3666 sched_analyze_insn (deps, PATTERN (insn), insn);
3667 }
3668 else if (CALL_P (insn))
3669 {
3670 int i;
3671
3672 CANT_MOVE (insn) = 1;
3673
3674 if (find_reg_note (insn, REG_SETJMP, NULL))
3675 {
3676 /* This is setjmp. Assume that all registers, not just
3677 hard registers, may be clobbered by this call. */
3678 reg_pending_barrier = MOVE_BARRIER;
3679 }
3680 else
3681 {
3682 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3683 /* A call may read and modify global register variables. */
3684 if (global_regs[i])
3685 {
3686 SET_REGNO_REG_SET (reg_pending_sets, i);
ce18efcb 3687 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
e855c69d
AB
3688 }
3689 /* Other call-clobbered hard regs may be clobbered.
3690 Since we only have a choice between 'might be clobbered'
3691 and 'definitely not clobbered', we must include all
3692 partly call-clobbered registers here. */
3693 else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
3694 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
3695 SET_REGNO_REG_SET (reg_pending_clobbers, i);
3696 /* We don't know what set of fixed registers might be used
3697 by the function, but it is certain that the stack pointer
3698 is among them, but be conservative. */
3699 else if (fixed_regs[i])
ce18efcb 3700 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
e855c69d
AB
3701 /* The frame pointer is normally not used by the function
3702 itself, but by the debugger. */
3703 /* ??? MIPS o32 is an exception. It uses the frame pointer
3704 in the macro expansion of jal but does not represent this
3705 fact in the call_insn rtl. */
3706 else if (i == FRAME_POINTER_REGNUM
3707 || (i == HARD_FRAME_POINTER_REGNUM
3708 && (! reload_completed || frame_pointer_needed)))
ce18efcb 3709 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
e855c69d
AB
3710 }
3711
3712 /* For each insn which shouldn't cross a call, add a dependence
3713 between that insn and this call insn. */
b8698a0f 3714 add_dependence_list_and_free (deps, insn,
e855c69d 3715 &deps->sched_before_next_call, 1,
1a83e602 3716 REG_DEP_ANTI, true);
e855c69d
AB
3717
3718 sched_analyze_insn (deps, PATTERN (insn), insn);
3719
3720 /* If CALL would be in a sched group, then this will violate
3721 convention that sched group insns have dependencies only on the
3722 previous instruction.
3723
3724 Of course one can say: "Hey! What about head of the sched group?"
3725 And I will answer: "Basic principles (one dep per insn) are always
3726 the same." */
3727 gcc_assert (!SCHED_GROUP_P (insn));
3728
3729 /* In the absence of interprocedural alias analysis, we must flush
3730 all pending reads and writes, and start new dependencies starting
3731 from here. But only flush writes for constant calls (which may
3732 be passed a pointer to something we haven't written yet). */
3733 flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
3734
3735 if (!deps->readonly)
3736 {
3737 /* Remember the last function call for limiting lifetimes. */
3738 free_INSN_LIST_list (&deps->last_function_call);
3739 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
1098d3a5
JJ
3740
3741 if (call_may_noreturn_p (insn))
3742 {
3743 /* Remember the last function call that might not always return
3744 normally for limiting moves of trapping insns. */
3745 free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3746 deps->last_function_call_may_noreturn
3747 = alloc_INSN_LIST (insn, NULL_RTX);
3748 }
3749
e855c69d
AB
3750 /* Before reload, begin a post-call group, so as to keep the
3751 lifetimes of hard registers correct. */
3752 if (! reload_completed)
3753 deps->in_post_call_group_p = post_call;
3754 }
3755 }
3756
3757 if (sched_deps_info->use_cselib)
3758 cselib_process_insn (insn);
3759
e855c69d
AB
3760 if (sched_deps_info->finish_insn)
3761 sched_deps_info->finish_insn ();
3762
3763 /* Fixup the dependencies in the sched group. */
b8698a0f 3764 if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
fd59643c
RS
3765 && chain_to_prev_insn_p (insn)
3766 && !sel_sched_p ())
3767 chain_to_prev_insn (insn);
e855c69d
AB
3768}
3769
3770/* Initialize DEPS for the new block beginning with HEAD. */
3771void
99de8612 3772deps_start_bb (struct deps_desc *deps, rtx_insn *head)
e855c69d
AB
3773{
3774 gcc_assert (!deps->readonly);
4b6c5340 3775
b40742a5
R
3776 /* Before reload, if the previous block ended in a call, show that
3777 we are inside a post-call group, so as to keep the lifetimes of
3778 hard registers correct. */
4b4bf941 3779 if (! reload_completed && !LABEL_P (head))
b40742a5 3780 {
99de8612 3781 rtx_insn *insn = prev_nonnote_nondebug_insn (head);
e855c69d 3782
4b4bf941 3783 if (insn && CALL_P (insn))
b40742a5
R
3784 deps->in_post_call_group_p = post_call_initial;
3785 }
e855c69d
AB
3786}
3787
3788/* Analyze every insn between HEAD and TAIL inclusive, creating backward
3789 dependencies for each insn. */
3790void
ce1ce33a 3791sched_analyze (struct deps_desc *deps, rtx_insn *head, rtx_insn *tail)
e855c69d 3792{
ce1ce33a 3793 rtx_insn *insn;
e855c69d
AB
3794
3795 if (sched_deps_info->use_cselib)
457eeaae 3796 cselib_init (CSELIB_RECORD_MEMORY);
e855c69d
AB
3797
3798 deps_start_bb (deps, head);
3799
16f6ece6
BS
3800 for (insn = head;; insn = NEXT_INSN (insn))
3801 {
e855c69d 3802
b198261f 3803 if (INSN_P (insn))
16f6ece6 3804 {
e2f6ff94
MK
3805 /* And initialize deps_lists. */
3806 sd_init_insn (insn);
ba4fa581
WM
3807 /* Clean up SCHED_GROUP_P which may be set by last
3808 scheduler pass. */
3809 if (SCHED_GROUP_P (insn))
3810 SCHED_GROUP_P (insn) = 0;
b198261f
MK
3811 }
3812
e855c69d 3813 deps_analyze_insn (deps, insn);
85d69216 3814
16f6ece6 3815 if (insn == tail)
4b6c5340 3816 {
e855c69d 3817 if (sched_deps_info->use_cselib)
4b6c5340
BS
3818 cselib_finish ();
3819 return;
3820 }
16f6ece6 3821 }
41374e13 3822 gcc_unreachable ();
16f6ece6 3823}
15aab9c0 3824
e2f6ff94
MK
3825/* Helper for sched_free_deps ().
3826 Delete INSN's (RESOLVED_P) backward dependencies. */
3827static void
90831096 3828delete_dep_nodes_in_back_deps (rtx_insn *insn, bool resolved_p)
15aab9c0 3829{
e2f6ff94
MK
3830 sd_iterator_def sd_it;
3831 dep_t dep;
3832 sd_list_types_def types;
15aab9c0 3833
e2f6ff94
MK
3834 if (resolved_p)
3835 types = SD_LIST_RES_BACK;
3836 else
3837 types = SD_LIST_BACK;
3838
3839 for (sd_it = sd_iterator_start (insn, types);
3840 sd_iterator_cond (&sd_it, &dep);)
ddbd5439 3841 {
e2f6ff94
MK
3842 dep_link_t link = *sd_it.linkp;
3843 dep_node_t node = DEP_LINK_NODE (link);
3844 deps_list_t back_list;
3845 deps_list_t forw_list;
3846
3847 get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
3848 remove_from_deps_list (link, back_list);
3849 delete_dep_node (node);
ddbd5439 3850 }
15aab9c0
VM
3851}
3852
e2f6ff94
MK
3853/* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3854 deps_lists. */
16f6ece6 3855void
dc01c3d1 3856sched_free_deps (rtx_insn *head, rtx_insn *tail, bool resolved_p)
16f6ece6 3857{
dc01c3d1
DM
3858 rtx_insn *insn;
3859 rtx_insn *next_tail = NEXT_INSN (tail);
16f6ece6 3860
b953c2b8
BS
3861 /* We make two passes since some insns may be scheduled before their
3862 dependencies are resolved. */
16f6ece6 3863 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
e2f6ff94
MK
3864 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3865 {
e2f6ff94
MK
3866 /* Clear forward deps and leave the dep_nodes to the
3867 corresponding back_deps list. */
3868 if (resolved_p)
3869 clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3870 else
3871 clear_deps_list (INSN_FORW_DEPS (insn));
b953c2b8
BS
3872 }
3873 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3874 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3875 {
3876 /* Clear resolved back deps together with its dep_nodes. */
3877 delete_dep_nodes_in_back_deps (insn, resolved_p);
16f6ece6 3878
e2f6ff94
MK
3879 sd_finish_insn (insn);
3880 }
16f6ece6
BS
3881}
3882\f
3883/* Initialize variables for region data dependence analysis.
b8698a0f 3884 When LAZY_REG_LAST is true, do not allocate reg_last array
88302d54 3885 of struct deps_desc immediately. */
16f6ece6
BS
3886
3887void
88302d54 3888init_deps (struct deps_desc *deps, bool lazy_reg_last)
16f6ece6 3889{
4ba478b8
RH
3890 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3891
3892 deps->max_reg = max_reg;
bcf33775
AB
3893 if (lazy_reg_last)
3894 deps->reg_last = NULL;
3895 else
3896 deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
4ba478b8 3897 INIT_REG_SET (&deps->reg_last_in_use);
16f6ece6
BS
3898
3899 deps->pending_read_insns = 0;
3900 deps->pending_read_mems = 0;
3901 deps->pending_write_insns = 0;
3902 deps->pending_write_mems = 0;
e2724e63 3903 deps->pending_jump_insns = 0;
bdbf40a5
MK
3904 deps->pending_read_list_length = 0;
3905 deps->pending_write_list_length = 0;
4a121cc3 3906 deps->pending_flush_length = 0;
16f6ece6
BS
3907 deps->last_pending_memory_flush = 0;
3908 deps->last_function_call = 0;
1098d3a5 3909 deps->last_function_call_may_noreturn = 0;
37a0f8a5 3910 deps->sched_before_next_call = 0;
7b4ef662 3911 deps->sched_before_next_jump = 0;
b40742a5 3912 deps->in_post_call_group_p = not_post_call;
b5b8b0ac 3913 deps->last_debug_insn = 0;
76df831f 3914 deps->last_args_size = 0;
e855c69d
AB
3915 deps->last_reg_pending_barrier = NOT_A_BARRIER;
3916 deps->readonly = 0;
16f6ece6
BS
3917}
3918
b8698a0f 3919/* Init only reg_last field of DEPS, which was not allocated before as
bcf33775
AB
3920 we inited DEPS lazily. */
3921void
88302d54 3922init_deps_reg_last (struct deps_desc *deps)
bcf33775
AB
3923{
3924 gcc_assert (deps && deps->max_reg > 0);
3925 gcc_assert (deps->reg_last == NULL);
3926
3927 deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3928}
3929
3930
16f6ece6
BS
3931/* Free insn lists found in DEPS. */
3932
3933void
88302d54 3934free_deps (struct deps_desc *deps)
16f6ece6 3935{
3cd8c58a 3936 unsigned i;
a2041967 3937 reg_set_iterator rsi;
16f6ece6 3938
bcf33775
AB
3939 /* We set max_reg to 0 when this context was already freed. */
3940 if (deps->max_reg == 0)
3941 {
3942 gcc_assert (deps->reg_last == NULL);
3943 return;
3944 }
3945 deps->max_reg = 0;
b8698a0f 3946
37a0f8a5
RH
3947 free_INSN_LIST_list (&deps->pending_read_insns);
3948 free_EXPR_LIST_list (&deps->pending_read_mems);
3949 free_INSN_LIST_list (&deps->pending_write_insns);
3950 free_EXPR_LIST_list (&deps->pending_write_mems);
3951 free_INSN_LIST_list (&deps->last_pending_memory_flush);
3952
4ba478b8 3953 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
7ef0daad 3954 times. For a testcase with 42000 regs and 8000 small basic blocks,
4ba478b8 3955 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
a2041967 3956 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
16f6ece6 3957 {
4ba478b8 3958 struct deps_reg *reg_last = &deps->reg_last[i];
f2846820
JL
3959 if (reg_last->uses)
3960 free_INSN_LIST_list (&reg_last->uses);
3961 if (reg_last->sets)
3962 free_INSN_LIST_list (&reg_last->sets);
ce18efcb
VM
3963 if (reg_last->implicit_sets)
3964 free_INSN_LIST_list (&reg_last->implicit_sets);
e2724e63
BS
3965 if (reg_last->control_uses)
3966 free_INSN_LIST_list (&reg_last->control_uses);
f2846820
JL
3967 if (reg_last->clobbers)
3968 free_INSN_LIST_list (&reg_last->clobbers);
a2041967 3969 }
4ba478b8
RH
3970 CLEAR_REG_SET (&deps->reg_last_in_use);
3971
b8698a0f 3972 /* As we initialize reg_last lazily, it is possible that we didn't allocate
bcf33775 3973 it at all. */
04695783 3974 free (deps->reg_last);
e855c69d
AB
3975 deps->reg_last = NULL;
3976
3977 deps = NULL;
16f6ece6
BS
3978}
3979
aef0e7a8 3980/* Remove INSN from dependence contexts DEPS. */
e855c69d 3981void
3dc99c19 3982remove_from_deps (struct deps_desc *deps, rtx_insn *insn)
e855c69d
AB
3983{
3984 int removed;
3985 unsigned i;
3986 reg_set_iterator rsi;
b8698a0f 3987
e855c69d
AB
3988 removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
3989 &deps->pending_read_mems);
f49b295a
AO
3990 if (!DEBUG_INSN_P (insn))
3991 deps->pending_read_list_length -= removed;
e855c69d
AB
3992 removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
3993 &deps->pending_write_mems);
3994 deps->pending_write_list_length -= removed;
e2724e63
BS
3995
3996 removed = remove_from_dependence_list (insn, &deps->pending_jump_insns);
3997 deps->pending_flush_length -= removed;
e855c69d
AB
3998 removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
3999 deps->pending_flush_length -= removed;
16f6ece6 4000
e855c69d
AB
4001 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
4002 {
4003 struct deps_reg *reg_last = &deps->reg_last[i];
4004 if (reg_last->uses)
4005 remove_from_dependence_list (insn, &reg_last->uses);
4006 if (reg_last->sets)
4007 remove_from_dependence_list (insn, &reg_last->sets);
ce18efcb
VM
4008 if (reg_last->implicit_sets)
4009 remove_from_dependence_list (insn, &reg_last->implicit_sets);
e855c69d
AB
4010 if (reg_last->clobbers)
4011 remove_from_dependence_list (insn, &reg_last->clobbers);
ce18efcb
VM
4012 if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
4013 && !reg_last->clobbers)
e855c69d
AB
4014 CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i);
4015 }
4016
4017 if (CALL_P (insn))
1098d3a5
JJ
4018 {
4019 remove_from_dependence_list (insn, &deps->last_function_call);
b8698a0f 4020 remove_from_dependence_list (insn,
1098d3a5
JJ
4021 &deps->last_function_call_may_noreturn);
4022 }
e855c69d
AB
4023 remove_from_dependence_list (insn, &deps->sched_before_next_call);
4024}
4025
4026/* Init deps data vector. */
4027static void
4028init_deps_data_vector (void)
4029{
9771b263
DN
4030 int reserve = (sched_max_luid + 1 - h_d_i_d.length ());
4031 if (reserve > 0 && ! h_d_i_d.space (reserve))
4032 h_d_i_d.safe_grow_cleared (3 * sched_max_luid / 2);
e855c69d
AB
4033}
4034
4035/* If it is profitable to use them, initialize or extend (depending on
4036 GLOBAL_P) dependency data. */
16f6ece6 4037void
e855c69d 4038sched_deps_init (bool global_p)
16f6ece6 4039{
e2f6ff94
MK
4040 /* Average number of insns in the basic block.
4041 '+ 1' is used to make it nonzero. */
0cae8d31 4042 int insns_in_block = sched_max_luid / n_basic_blocks_for_fn (cfun) + 1;
e2f6ff94 4043
e855c69d 4044 init_deps_data_vector ();
b8698a0f
L
4045
4046 /* We use another caching mechanism for selective scheduling, so
e855c69d
AB
4047 we don't use this one. */
4048 if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
4049 {
4050 /* ?!? We could save some memory by computing a per-region luid mapping
4051 which could reduce both the number of vectors in the cache and the
4052 size of each vector. Instead we just avoid the cache entirely unless
4053 the average number of instructions in a basic block is very high. See
4054 the comment before the declaration of true_dependency_cache for
4055 what we consider "very high". */
496d7bb0 4056 cache_size = 0;
e855c69d 4057 extend_dependency_caches (sched_max_luid, true);
496d7bb0 4058 }
b198261f 4059
b8698a0f 4060 if (global_p)
e855c69d 4061 {
973004cb 4062 dl_pool = new pool_allocator<_deps_list> ("deps_list",
e855c69d
AB
4063 /* Allocate lists for one block at a time. */
4064 insns_in_block);
973004cb 4065 dn_pool = new pool_allocator<_dep_node> ("dep_node",
e855c69d
AB
4066 /* Allocate nodes for one block at a time.
4067 We assume that average insn has
4068 5 producers. */
4069 5 * insns_in_block);
4070 }
496d7bb0 4071}
ddbd5439 4072
e855c69d 4073
496d7bb0
MK
4074/* Create or extend (depending on CREATE_P) dependency caches to
4075 size N. */
4076void
4077extend_dependency_caches (int n, bool create_p)
4078{
4079 if (create_p || true_dependency_cache)
4080 {
4081 int i, luid = cache_size + n;
4082
4083 true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
4084 luid);
4085 output_dependency_cache = XRESIZEVEC (bitmap_head,
4086 output_dependency_cache, luid);
4087 anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
4088 luid);
e2724e63
BS
4089 control_dependency_cache = XRESIZEVEC (bitmap_head, control_dependency_cache,
4090 luid);
e2f6ff94 4091
ddbd5439
MK
4092 if (current_sched_info->flags & DO_SPECULATION)
4093 spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
4094 luid);
4095
496d7bb0 4096 for (i = cache_size; i < luid; i++)
84568e14
JH
4097 {
4098 bitmap_initialize (&true_dependency_cache[i], 0);
84568e14 4099 bitmap_initialize (&output_dependency_cache[i], 0);
ddbd5439 4100 bitmap_initialize (&anti_dependency_cache[i], 0);
e2724e63 4101 bitmap_initialize (&control_dependency_cache[i], 0);
e2f6ff94 4102
ddbd5439
MK
4103 if (current_sched_info->flags & DO_SPECULATION)
4104 bitmap_initialize (&spec_dependency_cache[i], 0);
84568e14
JH
4105 }
4106 cache_size = luid;
16f6ece6
BS
4107 }
4108}
4109
e855c69d 4110/* Finalize dependency information for the whole function. */
16f6ece6 4111void
e855c69d 4112sched_deps_finish (void)
16f6ece6 4113{
e2f6ff94 4114 gcc_assert (deps_pools_are_empty_p ());
973004cb
ML
4115 dn_pool->release_if_empty ();
4116 dn_pool = NULL;
4117 dl_pool->release_if_empty ();
4118 dl_pool = NULL;
b198261f 4119
9771b263 4120 h_d_i_d.release ();
e855c69d 4121 cache_size = 0;
b8698a0f 4122
16f6ece6
BS
4123 if (true_dependency_cache)
4124 {
84568e14
JH
4125 int i;
4126
4127 for (i = 0; i < cache_size; i++)
4128 {
4129 bitmap_clear (&true_dependency_cache[i]);
84568e14 4130 bitmap_clear (&output_dependency_cache[i]);
ddbd5439 4131 bitmap_clear (&anti_dependency_cache[i]);
e2724e63 4132 bitmap_clear (&control_dependency_cache[i]);
e2f6ff94 4133
e855c69d 4134 if (sched_deps_info->generate_spec_deps)
ddbd5439 4135 bitmap_clear (&spec_dependency_cache[i]);
84568e14
JH
4136 }
4137 free (true_dependency_cache);
16f6ece6 4138 true_dependency_cache = NULL;
84568e14 4139 free (output_dependency_cache);
16f6ece6 4140 output_dependency_cache = NULL;
ddbd5439
MK
4141 free (anti_dependency_cache);
4142 anti_dependency_cache = NULL;
e2724e63
BS
4143 free (control_dependency_cache);
4144 control_dependency_cache = NULL;
e2f6ff94 4145
e855c69d 4146 if (sched_deps_info->generate_spec_deps)
ddbd5439
MK
4147 {
4148 free (spec_dependency_cache);
4149 spec_dependency_cache = NULL;
4150 }
e855c69d 4151
16f6ece6
BS
4152 }
4153}
4154
4155/* Initialize some global variables needed by the dependency analysis
4156 code. */
4157
4158void
46c5ad27 4159init_deps_global (void)
16f6ece6 4160{
ce18efcb
VM
4161 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
4162 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
04389919
NS
4163 reg_pending_sets = ALLOC_REG_SET (&reg_obstack);
4164 reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack);
4165 reg_pending_uses = ALLOC_REG_SET (&reg_obstack);
e2724e63 4166 reg_pending_control_uses = ALLOC_REG_SET (&reg_obstack);
d1c9693e 4167 reg_pending_barrier = NOT_A_BARRIER;
e855c69d
AB
4168
4169 if (!sel_sched_p () || sched_emulate_haifa_p)
4170 {
4171 sched_deps_info->start_insn = haifa_start_insn;
4172 sched_deps_info->finish_insn = haifa_finish_insn;
4173
4174 sched_deps_info->note_reg_set = haifa_note_reg_set;
4175 sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
4176 sched_deps_info->note_reg_use = haifa_note_reg_use;
4177
4178 sched_deps_info->note_mem_dep = haifa_note_mem_dep;
4179 sched_deps_info->note_dep = haifa_note_dep;
4180 }
16f6ece6
BS
4181}
4182
4183/* Free everything used by the dependency analysis code. */
4184
4185void
46c5ad27 4186finish_deps_global (void)
16f6ece6
BS
4187{
4188 FREE_REG_SET (reg_pending_sets);
4189 FREE_REG_SET (reg_pending_clobbers);
37a0f8a5 4190 FREE_REG_SET (reg_pending_uses);
e2724e63 4191 FREE_REG_SET (reg_pending_control_uses);
16f6ece6 4192}
ddbd5439 4193
ddbd5439 4194/* Estimate the weakness of dependence between MEM1 and MEM2. */
e855c69d 4195dw_t
ddbd5439
MK
4196estimate_dep_weak (rtx mem1, rtx mem2)
4197{
4198 rtx r1, r2;
4199
4200 if (mem1 == mem2)
4201 /* MEMs are the same - don't speculate. */
4202 return MIN_DEP_WEAK;
4203
4204 r1 = XEXP (mem1, 0);
4205 r2 = XEXP (mem2, 0);
4206
4207 if (r1 == r2
4208 || (REG_P (r1) && REG_P (r2)
4209 && REGNO (r1) == REGNO (r2)))
4210 /* Again, MEMs are the same. */
4211 return MIN_DEP_WEAK;
4212 else if ((REG_P (r1) && !REG_P (r2))
4213 || (!REG_P (r1) && REG_P (r2)))
4214 /* Different addressing modes - reason to be more speculative,
4215 than usual. */
4216 return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
4217 else
4218 /* We can't say anything about the dependence. */
4219 return UNCERTAIN_DEP_WEAK;
4220}
4221
4222/* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
4223 This function can handle same INSN and ELEM (INSN == ELEM).
4224 It is a convenience wrapper. */
e2724e63 4225static void
ce1ce33a 4226add_dependence_1 (rtx_insn *insn, rtx_insn *elem, enum reg_note dep_type)
ddbd5439 4227{
e855c69d
AB
4228 ds_t ds;
4229 bool internal;
ddbd5439 4230
e855c69d
AB
4231 if (dep_type == REG_DEP_TRUE)
4232 ds = DEP_TRUE;
4233 else if (dep_type == REG_DEP_OUTPUT)
4234 ds = DEP_OUTPUT;
e2724e63
BS
4235 else if (dep_type == REG_DEP_CONTROL)
4236 ds = DEP_CONTROL;
e855c69d
AB
4237 else
4238 {
4239 gcc_assert (dep_type == REG_DEP_ANTI);
4240 ds = DEP_ANTI;
4241 }
4242
4243 /* When add_dependence is called from inside sched-deps.c, we expect
4244 cur_insn to be non-null. */
4245 internal = cur_insn != NULL;
4246 if (internal)
4247 gcc_assert (insn == cur_insn);
4248 else
4249 cur_insn = insn;
b8698a0f 4250
e855c69d
AB
4251 note_dep (elem, ds);
4252 if (!internal)
4253 cur_insn = NULL;
ddbd5439
MK
4254}
4255
8e90de43
SB
4256/* Return weakness of speculative type TYPE in the dep_status DS,
4257 without checking to prevent ICEs on malformed input. */
4258static dw_t
e2f6ff94 4259get_dep_weak_1 (ds_t ds, ds_t type)
ddbd5439
MK
4260{
4261 ds = ds & type;
e855c69d 4262
ddbd5439
MK
4263 switch (type)
4264 {
4265 case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
4266 case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
4267 case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
4268 case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
4269 default: gcc_unreachable ();
4270 }
4271
ddbd5439
MK
4272 return (dw_t) ds;
4273}
4274
8e90de43 4275/* Return weakness of speculative type TYPE in the dep_status DS. */
e2f6ff94
MK
4276dw_t
4277get_dep_weak (ds_t ds, ds_t type)
4278{
4279 dw_t dw = get_dep_weak_1 (ds, type);
4280
4281 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
e2f6ff94
MK
4282 return dw;
4283}
4284
ddbd5439
MK
4285/* Return the dep_status, which has the same parameters as DS, except for
4286 speculative type TYPE, that will have weakness DW. */
4287ds_t
4288set_dep_weak (ds_t ds, ds_t type, dw_t dw)
4289{
4290 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4291
4292 ds &= ~type;
4293 switch (type)
4294 {
4295 case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
4296 case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
4297 case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
4298 case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
4299 default: gcc_unreachable ();
4300 }
4301 return ds;
4302}
4303
e855c69d
AB
4304/* Return the join of two dep_statuses DS1 and DS2.
4305 If MAX_P is true then choose the greater probability,
4306 otherwise multiply probabilities.
4307 This function assumes that both DS1 and DS2 contain speculative bits. */
4308static ds_t
4309ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
ddbd5439
MK
4310{
4311 ds_t ds, t;
4312
4313 gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
4314
4315 ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
4316
4317 t = FIRST_SPEC_TYPE;
4318 do
4319 {
4320 if ((ds1 & t) && !(ds2 & t))
4321 ds |= ds1 & t;
4322 else if (!(ds1 & t) && (ds2 & t))
4323 ds |= ds2 & t;
4324 else if ((ds1 & t) && (ds2 & t))
4325 {
e855c69d
AB
4326 dw_t dw1 = get_dep_weak (ds1, t);
4327 dw_t dw2 = get_dep_weak (ds2, t);
ddbd5439
MK
4328 ds_t dw;
4329
e855c69d
AB
4330 if (!max_p)
4331 {
4332 dw = ((ds_t) dw1) * ((ds_t) dw2);
4333 dw /= MAX_DEP_WEAK;
4334 if (dw < MIN_DEP_WEAK)
4335 dw = MIN_DEP_WEAK;
4336 }
4337 else
4338 {
4339 if (dw1 >= dw2)
4340 dw = dw1;
4341 else
4342 dw = dw2;
4343 }
ddbd5439
MK
4344
4345 ds = set_dep_weak (ds, t, (dw_t) dw);
4346 }
4347
4348 if (t == LAST_SPEC_TYPE)
4349 break;
4350 t <<= SPEC_TYPE_SHIFT;
4351 }
4352 while (1);
4353
4354 return ds;
4355}
4356
e855c69d
AB
4357/* Return the join of two dep_statuses DS1 and DS2.
4358 This function assumes that both DS1 and DS2 contain speculative bits. */
4359ds_t
4360ds_merge (ds_t ds1, ds_t ds2)
4361{
4362 return ds_merge_1 (ds1, ds2, false);
4363}
4364
4365/* Return the join of two dep_statuses DS1 and DS2. */
4366ds_t
4367ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
4368{
4369 ds_t new_status = ds | ds2;
4370
4371 if (new_status & SPECULATIVE)
4372 {
4373 if ((ds && !(ds & SPECULATIVE))
4374 || (ds2 && !(ds2 & SPECULATIVE)))
4375 /* Then this dep can't be speculative. */
4376 new_status &= ~SPECULATIVE;
4377 else
4378 {
4379 /* Both are speculative. Merging probabilities. */
4380 if (mem1)
4381 {
4382 dw_t dw;
4383
4384 dw = estimate_dep_weak (mem1, mem2);
4385 ds = set_dep_weak (ds, BEGIN_DATA, dw);
4386 }
4387
4388 if (!ds)
4389 new_status = ds2;
4390 else if (!ds2)
4391 new_status = ds;
4392 else
4393 new_status = ds_merge (ds2, ds);
4394 }
4395 }
4396
4397 return new_status;
4398}
4399
4400/* Return the join of DS1 and DS2. Use maximum instead of multiplying
4401 probabilities. */
4402ds_t
4403ds_max_merge (ds_t ds1, ds_t ds2)
4404{
4405 if (ds1 == 0 && ds2 == 0)
4406 return 0;
4407
4408 if (ds1 == 0 && ds2 != 0)
4409 return ds2;
4410
4411 if (ds1 != 0 && ds2 == 0)
4412 return ds1;
4413
4414 return ds_merge_1 (ds1, ds2, true);
4415}
4416
4417/* Return the probability of speculation success for the speculation
4418 status DS. */
4419dw_t
4420ds_weak (ds_t ds)
4421{
4422 ds_t res = 1, dt;
4423 int n = 0;
4424
4425 dt = FIRST_SPEC_TYPE;
4426 do
4427 {
4428 if (ds & dt)
4429 {
4430 res *= (ds_t) get_dep_weak (ds, dt);
4431 n++;
4432 }
4433
4434 if (dt == LAST_SPEC_TYPE)
4435 break;
4436 dt <<= SPEC_TYPE_SHIFT;
4437 }
4438 while (1);
4439
4440 gcc_assert (n);
4441 while (--n)
4442 res /= MAX_DEP_WEAK;
4443
4444 if (res < MIN_DEP_WEAK)
4445 res = MIN_DEP_WEAK;
4446
4447 gcc_assert (res <= MAX_DEP_WEAK);
4448
4449 return (dw_t) res;
4450}
4451
4452/* Return a dep status that contains all speculation types of DS. */
4453ds_t
4454ds_get_speculation_types (ds_t ds)
4455{
4456 if (ds & BEGIN_DATA)
4457 ds |= BEGIN_DATA;
4458 if (ds & BE_IN_DATA)
4459 ds |= BE_IN_DATA;
4460 if (ds & BEGIN_CONTROL)
4461 ds |= BEGIN_CONTROL;
4462 if (ds & BE_IN_CONTROL)
4463 ds |= BE_IN_CONTROL;
4464
4465 return ds & SPECULATIVE;
4466}
4467
4468/* Return a dep status that contains maximal weakness for each speculation
4469 type present in DS. */
4470ds_t
4471ds_get_max_dep_weak (ds_t ds)
4472{
4473 if (ds & BEGIN_DATA)
4474 ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
4475 if (ds & BE_IN_DATA)
4476 ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
4477 if (ds & BEGIN_CONTROL)
4478 ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
4479 if (ds & BE_IN_CONTROL)
4480 ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
4481
4482 return ds;
4483}
4484
e2f6ff94
MK
4485/* Dump information about the dependence status S. */
4486static void
4487dump_ds (FILE *f, ds_t s)
4488{
4489 fprintf (f, "{");
4490
4491 if (s & BEGIN_DATA)
4492 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
4493 if (s & BE_IN_DATA)
4494 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
4495 if (s & BEGIN_CONTROL)
4496 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
4497 if (s & BE_IN_CONTROL)
4498 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
4499
4500 if (s & HARD_DEP)
4501 fprintf (f, "HARD_DEP; ");
4502
4503 if (s & DEP_TRUE)
4504 fprintf (f, "DEP_TRUE; ");
e2f6ff94
MK
4505 if (s & DEP_OUTPUT)
4506 fprintf (f, "DEP_OUTPUT; ");
e2724e63
BS
4507 if (s & DEP_ANTI)
4508 fprintf (f, "DEP_ANTI; ");
4509 if (s & DEP_CONTROL)
4510 fprintf (f, "DEP_CONTROL; ");
e2f6ff94
MK
4511
4512 fprintf (f, "}");
4513}
4514
24e47c76 4515DEBUG_FUNCTION void
e2f6ff94
MK
4516debug_ds (ds_t s)
4517{
4518 dump_ds (stderr, s);
4519 fprintf (stderr, "\n");
4520}
4521
ddbd5439
MK
4522#ifdef ENABLE_CHECKING
4523/* Verify that dependence type and status are consistent.
4524 If RELAXED_P is true, then skip dep_weakness checks. */
4525static void
e2f6ff94 4526check_dep (dep_t dep, bool relaxed_p)
ddbd5439 4527{
e2f6ff94
MK
4528 enum reg_note dt = DEP_TYPE (dep);
4529 ds_t ds = DEP_STATUS (dep);
4530
4531 gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
4532
4533 if (!(current_sched_info->flags & USE_DEPS_LIST))
4534 {
b953c2b8 4535 gcc_assert (ds == 0);
e2f6ff94
MK
4536 return;
4537 }
4538
ddbd5439
MK
4539 /* Check that dependence type contains the same bits as the status. */
4540 if (dt == REG_DEP_TRUE)
4541 gcc_assert (ds & DEP_TRUE);
4542 else if (dt == REG_DEP_OUTPUT)
4543 gcc_assert ((ds & DEP_OUTPUT)
b8698a0f 4544 && !(ds & DEP_TRUE));
e2724e63
BS
4545 else if (dt == REG_DEP_ANTI)
4546 gcc_assert ((ds & DEP_ANTI)
ddbd5439 4547 && !(ds & (DEP_OUTPUT | DEP_TRUE)));
e2724e63
BS
4548 else
4549 gcc_assert (dt == REG_DEP_CONTROL
4550 && (ds & DEP_CONTROL)
4551 && !(ds & (DEP_OUTPUT | DEP_ANTI | DEP_TRUE)));
ddbd5439
MK
4552
4553 /* HARD_DEP can not appear in dep_status of a link. */
b8698a0f 4554 gcc_assert (!(ds & HARD_DEP));
ddbd5439
MK
4555
4556 /* Check that dependence status is set correctly when speculation is not
4557 supported. */
e855c69d 4558 if (!sched_deps_info->generate_spec_deps)
ddbd5439
MK
4559 gcc_assert (!(ds & SPECULATIVE));
4560 else if (ds & SPECULATIVE)
4561 {
4562 if (!relaxed_p)
4563 {
4564 ds_t type = FIRST_SPEC_TYPE;
4565
4566 /* Check that dependence weakness is in proper range. */
4567 do
4568 {
4569 if (ds & type)
4570 get_dep_weak (ds, type);
4571
4572 if (type == LAST_SPEC_TYPE)
4573 break;
4574 type <<= SPEC_TYPE_SHIFT;
4575 }
4576 while (1);
4577 }
4578
4579 if (ds & BEGIN_SPEC)
4580 {
4581 /* Only true dependence can be data speculative. */
4582 if (ds & BEGIN_DATA)
4583 gcc_assert (ds & DEP_TRUE);
4584
4585 /* Control dependencies in the insn scheduler are represented by
4586 anti-dependencies, therefore only anti dependence can be
4587 control speculative. */
4588 if (ds & BEGIN_CONTROL)
4589 gcc_assert (ds & DEP_ANTI);
4590 }
4591 else
4592 {
4593 /* Subsequent speculations should resolve true dependencies. */
4594 gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4595 }
b8698a0f
L
4596
4597 /* Check that true and anti dependencies can't have other speculative
ddbd5439
MK
4598 statuses. */
4599 if (ds & DEP_TRUE)
4600 gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4601 /* An output dependence can't be speculative at all. */
4602 gcc_assert (!(ds & DEP_OUTPUT));
4603 if (ds & DEP_ANTI)
4604 gcc_assert (ds & BEGIN_CONTROL);
4605 }
4606}
a750daa2
MK
4607#endif /* ENABLE_CHECKING */
4608
1a83e602
BS
4609/* The following code discovers opportunities to switch a memory reference
4610 and an increment by modifying the address. We ensure that this is done
4611 only for dependencies that are only used to show a single register
4612 dependence (using DEP_NONREG and DEP_MULTIPLE), and so that every memory
4613 instruction involved is subject to only one dep that can cause a pattern
4614 change.
4615
4616 When we discover a suitable dependency, we fill in the dep_replacement
4617 structure to show how to modify the memory reference. */
4618
4619/* Holds information about a pair of memory reference and register increment
4620 insns which depend on each other, but could possibly be interchanged. */
4621struct mem_inc_info
4622{
ce1ce33a
DM
4623 rtx_insn *inc_insn;
4624 rtx_insn *mem_insn;
1a83e602
BS
4625
4626 rtx *mem_loc;
4627 /* A register occurring in the memory address for which we wish to break
4628 the dependence. This must be identical to the destination register of
4629 the increment. */
4630 rtx mem_reg0;
4631 /* Any kind of index that is added to that register. */
4632 rtx mem_index;
4633 /* The constant offset used in the memory address. */
4634 HOST_WIDE_INT mem_constant;
4635 /* The constant added in the increment insn. Negated if the increment is
4636 after the memory address. */
4637 HOST_WIDE_INT inc_constant;
4638 /* The source register used in the increment. May be different from mem_reg0
4639 if the increment occurs before the memory address. */
4640 rtx inc_input;
4641};
4642
4643/* Verify that the memory location described in MII can be replaced with
4644 one using NEW_ADDR. Return the new memory reference or NULL_RTX. The
4645 insn remains unchanged by this function. */
4646
4647static rtx
4648attempt_change (struct mem_inc_info *mii, rtx new_addr)
4649{
4650 rtx mem = *mii->mem_loc;
4651 rtx new_mem;
4652
1aa95df7 4653 /* Jump through a lot of hoops to keep the attributes up to date. We
1a83e602
BS
4654 do not want to call one of the change address variants that take
4655 an offset even though we know the offset in many cases. These
4656 assume you are changing where the address is pointing by the
4657 offset. */
4658 new_mem = replace_equiv_address_nv (mem, new_addr);
4659 if (! validate_change (mii->mem_insn, mii->mem_loc, new_mem, 0))
4660 {
4661 if (sched_verbose >= 5)
4662 fprintf (sched_dump, "validation failure\n");
4663 return NULL_RTX;
4664 }
4665
4666 /* Put back the old one. */
4667 validate_change (mii->mem_insn, mii->mem_loc, mem, 0);
4668
4669 return new_mem;
4670}
4671
4672/* Return true if INSN is of a form "a = b op c" where a and b are
4673 regs. op is + if c is a reg and +|- if c is a const. Fill in
4674 informantion in MII about what is found.
4675 BEFORE_MEM indicates whether the increment is found before or after
4676 a corresponding memory reference. */
4677
4678static bool
ce1ce33a 4679parse_add_or_inc (struct mem_inc_info *mii, rtx_insn *insn, bool before_mem)
1a83e602
BS
4680{
4681 rtx pat = single_set (insn);
4682 rtx src, cst;
4683 bool regs_equal;
4684
4685 if (RTX_FRAME_RELATED_P (insn) || !pat)
4686 return false;
4687
4688 /* Result must be single reg. */
4689 if (!REG_P (SET_DEST (pat)))
4690 return false;
4691
0380c51f 4692 if (GET_CODE (SET_SRC (pat)) != PLUS)
1a83e602
BS
4693 return false;
4694
4695 mii->inc_insn = insn;
4696 src = SET_SRC (pat);
4697 mii->inc_input = XEXP (src, 0);
4698
4699 if (!REG_P (XEXP (src, 0)))
4700 return false;
4701
4702 if (!rtx_equal_p (SET_DEST (pat), mii->mem_reg0))
4703 return false;
4704
4705 cst = XEXP (src, 1);
4706 if (!CONST_INT_P (cst))
4707 return false;
4708 mii->inc_constant = INTVAL (cst);
4709
4710 regs_equal = rtx_equal_p (mii->inc_input, mii->mem_reg0);
4711
4712 if (!before_mem)
4713 {
4714 mii->inc_constant = -mii->inc_constant;
4715 if (!regs_equal)
4716 return false;
4717 }
4718
4719 if (regs_equal && REGNO (SET_DEST (pat)) == STACK_POINTER_REGNUM)
0380c51f
BS
4720 {
4721 /* Note that the sign has already been reversed for !before_mem. */
581edfa3
TS
4722 if (STACK_GROWS_DOWNWARD)
4723 return mii->inc_constant > 0;
4724 else
4725 return mii->inc_constant < 0;
0380c51f 4726 }
1a83e602
BS
4727 return true;
4728}
4729
4730/* Once a suitable mem reference has been found and the corresponding data
4731 in MII has been filled in, this function is called to find a suitable
4732 add or inc insn involving the register we found in the memory
4733 reference. */
4734
4735static bool
4736find_inc (struct mem_inc_info *mii, bool backwards)
4737{
4738 sd_iterator_def sd_it;
4739 dep_t dep;
4740
4741 sd_it = sd_iterator_start (mii->mem_insn,
4742 backwards ? SD_LIST_HARD_BACK : SD_LIST_FORW);
4743 while (sd_iterator_cond (&sd_it, &dep))
4744 {
4745 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
974c43e9
DM
4746 rtx_insn *pro = DEP_PRO (dep);
4747 rtx_insn *con = DEP_CON (dep);
ce1ce33a 4748 rtx_insn *inc_cand = backwards ? pro : con;
1a83e602
BS
4749 if (DEP_NONREG (dep) || DEP_MULTIPLE (dep))
4750 goto next;
4751 if (parse_add_or_inc (mii, inc_cand, backwards))
4752 {
4753 struct dep_replacement *desc;
bfac633a 4754 df_ref def;
1a83e602
BS
4755 rtx newaddr, newmem;
4756
4757 if (sched_verbose >= 5)
4758 fprintf (sched_dump, "candidate mem/inc pair: %d %d\n",
4759 INSN_UID (mii->mem_insn), INSN_UID (inc_cand));
4760
4761 /* Need to assure that none of the operands of the inc
4762 instruction are assigned to by the mem insn. */
bfac633a
RS
4763 FOR_EACH_INSN_DEF (def, mii->mem_insn)
4764 if (reg_overlap_mentioned_p (DF_REF_REG (def), mii->inc_input)
4765 || reg_overlap_mentioned_p (DF_REF_REG (def), mii->mem_reg0))
4766 {
4767 if (sched_verbose >= 5)
4768 fprintf (sched_dump,
4769 "inc conflicts with store failure.\n");
4770 goto next;
4771 }
c7cf969c 4772
1a83e602
BS
4773 newaddr = mii->inc_input;
4774 if (mii->mem_index != NULL_RTX)
4775 newaddr = gen_rtx_PLUS (GET_MODE (newaddr), newaddr,
4776 mii->mem_index);
4777 newaddr = plus_constant (GET_MODE (newaddr), newaddr,
4778 mii->mem_constant + mii->inc_constant);
4779 newmem = attempt_change (mii, newaddr);
4780 if (newmem == NULL_RTX)
4781 goto next;
4782 if (sched_verbose >= 5)
4783 fprintf (sched_dump, "successful address replacement\n");
4784 desc = XCNEW (struct dep_replacement);
4785 DEP_REPLACE (dep) = desc;
4786 desc->loc = mii->mem_loc;
4787 desc->newval = newmem;
4788 desc->orig = *desc->loc;
4789 desc->insn = mii->mem_insn;
4790 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
4791 INSN_SPEC_BACK_DEPS (con));
4792 if (backwards)
4793 {
4794 FOR_EACH_DEP (mii->inc_insn, SD_LIST_BACK, sd_it, dep)
28d1688d
BS
4795 add_dependence_1 (mii->mem_insn, DEP_PRO (dep),
4796 REG_DEP_TRUE);
1a83e602
BS
4797 }
4798 else
4799 {
4800 FOR_EACH_DEP (mii->inc_insn, SD_LIST_FORW, sd_it, dep)
28d1688d
BS
4801 add_dependence_1 (DEP_CON (dep), mii->mem_insn,
4802 REG_DEP_ANTI);
1a83e602
BS
4803 }
4804 return true;
4805 }
4806 next:
4807 sd_iterator_next (&sd_it);
4808 }
4809 return false;
4810}
4811
4812/* A recursive function that walks ADDRESS_OF_X to find memory references
4813 which could be modified during scheduling. We call find_inc for each
4814 one we find that has a recognizable form. MII holds information about
4815 the pair of memory/increment instructions.
4816 We ensure that every instruction with a memory reference (which will be
4817 the location of the replacement) is assigned at most one breakable
4818 dependency. */
4819
4820static bool
4821find_mem (struct mem_inc_info *mii, rtx *address_of_x)
4822{
4823 rtx x = *address_of_x;
4824 enum rtx_code code = GET_CODE (x);
4825 const char *const fmt = GET_RTX_FORMAT (code);
4826 int i;
4827
4828 if (code == MEM)
4829 {
4830 rtx reg0 = XEXP (x, 0);
4831
4832 mii->mem_loc = address_of_x;
4833 mii->mem_index = NULL_RTX;
4834 mii->mem_constant = 0;
4835 if (GET_CODE (reg0) == PLUS && CONST_INT_P (XEXP (reg0, 1)))
4836 {
4837 mii->mem_constant = INTVAL (XEXP (reg0, 1));
4838 reg0 = XEXP (reg0, 0);
4839 }
4840 if (GET_CODE (reg0) == PLUS)
4841 {
4842 mii->mem_index = XEXP (reg0, 1);
4843 reg0 = XEXP (reg0, 0);
4844 }
4845 if (REG_P (reg0))
4846 {
bfac633a 4847 df_ref use;
1a83e602
BS
4848 int occurrences = 0;
4849
4850 /* Make sure this reg appears only once in this insn. Can't use
4851 count_occurrences since that only works for pseudos. */
bfac633a
RS
4852 FOR_EACH_INSN_USE (use, mii->mem_insn)
4853 if (reg_overlap_mentioned_p (reg0, DF_REF_REG (use)))
4854 if (++occurrences > 1)
4855 {
4856 if (sched_verbose >= 5)
4857 fprintf (sched_dump, "mem count failure\n");
4858 return false;
4859 }
1a83e602
BS
4860
4861 mii->mem_reg0 = reg0;
4862 return find_inc (mii, true) || find_inc (mii, false);
4863 }
4864 return false;
4865 }
4866
4867 if (code == SIGN_EXTRACT || code == ZERO_EXTRACT)
4868 {
4869 /* If REG occurs inside a MEM used in a bit-field reference,
4870 that is unacceptable. */
4871 return false;
4872 }
4873
4874 /* Time for some deep diving. */
4875 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4876 {
4877 if (fmt[i] == 'e')
4878 {
4879 if (find_mem (mii, &XEXP (x, i)))
4880 return true;
4881 }
4882 else if (fmt[i] == 'E')
4883 {
4884 int j;
4885 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4886 if (find_mem (mii, &XVECEXP (x, i, j)))
4887 return true;
4888 }
4889 }
4890 return false;
4891}
4892
4893
4894/* Examine the instructions between HEAD and TAIL and try to find
4895 dependencies that can be broken by modifying one of the patterns. */
4896
4897void
ce1ce33a 4898find_modifiable_mems (rtx_insn *head, rtx_insn *tail)
1a83e602 4899{
ce1ce33a 4900 rtx_insn *insn, *next_tail = NEXT_INSN (tail);
1a83e602
BS
4901 int success_in_block = 0;
4902
8b8de8b6 4903 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1a83e602
BS
4904 {
4905 struct mem_inc_info mii;
4906
4907 if (!NONDEBUG_INSN_P (insn) || RTX_FRAME_RELATED_P (insn))
4908 continue;
4909
4910 mii.mem_insn = insn;
4911 if (find_mem (&mii, &PATTERN (insn)))
4912 success_in_block++;
4913 }
4914 if (success_in_block && sched_verbose >= 5)
4915 fprintf (sched_dump, "%d candidates for address modification found.\n",
4916 success_in_block);
4917}
4918
a750daa2 4919#endif /* INSN_SCHEDULING */