]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/sched-deps.c
Use rtx_expr_list in various places
[thirdparty/gcc.git] / gcc / sched-deps.c
CommitLineData
6adce0fb 1/* Instruction scheduling pass. This file computes dependencies between
2 instructions.
3aea1f79 3 Copyright (C) 1992-2014 Free Software Foundation, Inc.
6adce0fb 4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
6
f12b58b3 7This file is part of GCC.
6adce0fb 8
f12b58b3 9GCC is free software; you can redistribute it and/or modify it under
10the terms of the GNU General Public License as published by the Free
8c4c00c1 11Software Foundation; either version 3, or (at your option) any later
f12b58b3 12version.
6adce0fb 13
f12b58b3 14GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15WARRANTY; without even the implied warranty of MERCHANTABILITY or
6adce0fb 16FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17for more details.
18
19You should have received a copy of the GNU General Public License
8c4c00c1 20along with GCC; see the file COPYING3. If not see
21<http://www.gnu.org/licenses/>. */
6adce0fb 22\f
23#include "config.h"
24#include "system.h"
805e22b2 25#include "coretypes.h"
26#include "tm.h"
0b205f4c 27#include "diagnostic-core.h"
6adce0fb 28#include "rtl.h"
4a020a8c 29#include "tree.h" /* FIXME: Used by call_may_noreturn_p. */
6adce0fb 30#include "tm_p.h"
31#include "hard-reg-set.h"
6adce0fb 32#include "regs.h"
33#include "function.h"
34#include "flags.h"
35#include "insn-config.h"
36#include "insn-attr.h"
37#include "except.h"
6adce0fb 38#include "recog.h"
d452a169 39#include "emit-rtl.h"
6adce0fb 40#include "sched-int.h"
85de291e 41#include "params.h"
503f2817 42#include "cselib.h"
a7dcf969 43#include "ira.h"
98155838 44#include "target.h"
6adce0fb 45
db982eeb 46#ifdef INSN_SCHEDULING
47
9997bd27 48#ifdef ENABLE_CHECKING
49#define CHECK (true)
50#else
51#define CHECK (false)
52#endif
53
e1ab7874 54/* Holds current parameters for the dependency analyzer. */
55struct sched_deps_info_def *sched_deps_info;
56
57/* The data is specific to the Haifa scheduler. */
f1f41a6c 58vec<haifa_deps_insn_data_def>
1e094109 59 h_d_i_d = vNULL;
e1ab7874 60
9997bd27 61/* Return the major type present in the DS. */
62enum reg_note
63ds_to_dk (ds_t ds)
64{
65 if (ds & DEP_TRUE)
66 return REG_DEP_TRUE;
67
68 if (ds & DEP_OUTPUT)
69 return REG_DEP_OUTPUT;
70
effd1640 71 if (ds & DEP_CONTROL)
72 return REG_DEP_CONTROL;
73
9997bd27 74 gcc_assert (ds & DEP_ANTI);
75
76 return REG_DEP_ANTI;
77}
78
79/* Return equivalent dep_status. */
80ds_t
81dk_to_ds (enum reg_note dk)
82{
83 switch (dk)
84 {
85 case REG_DEP_TRUE:
86 return DEP_TRUE;
87
88 case REG_DEP_OUTPUT:
89 return DEP_OUTPUT;
90
effd1640 91 case REG_DEP_CONTROL:
92 return DEP_CONTROL;
93
9997bd27 94 default:
95 gcc_assert (dk == REG_DEP_ANTI);
96 return DEP_ANTI;
97 }
98}
99
100/* Functions to operate with dependence information container - dep_t. */
101
102/* Init DEP with the arguments. */
93f6b030 103void
fc5ad70a 104init_dep_1 (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note type, ds_t ds)
9997bd27 105{
fc5ad70a 106 DEP_PRO (dep) = pro;
107 DEP_CON (dep) = con;
93f6b030 108 DEP_TYPE (dep) = type;
9997bd27 109 DEP_STATUS (dep) = ds;
38354bb6 110 DEP_COST (dep) = UNKNOWN_DEP_COST;
d452a169 111 DEP_NONREG (dep) = 0;
112 DEP_MULTIPLE (dep) = 0;
113 DEP_REPLACE (dep) = NULL;
9997bd27 114}
115
116/* Init DEP with the arguments.
117 While most of the scheduler (including targets) only need the major type
f2b32076 118 of the dependency, it is convenient to hide full dep_status from them. */
9997bd27 119void
fc5ad70a 120init_dep (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note kind)
9997bd27 121{
122 ds_t ds;
123
93f6b030 124 if ((current_sched_info->flags & USE_DEPS_LIST))
9997bd27 125 ds = dk_to_ds (kind);
126 else
2261c559 127 ds = 0;
9997bd27 128
129 init_dep_1 (dep, pro, con, kind, ds);
130}
131
132/* Make a copy of FROM in TO. */
133static void
134copy_dep (dep_t to, dep_t from)
135{
136 memcpy (to, from, sizeof (*to));
137}
138
93f6b030 139static void dump_ds (FILE *, ds_t);
9997bd27 140
93f6b030 141/* Define flags for dump_dep (). */
142
143/* Dump producer of the dependence. */
144#define DUMP_DEP_PRO (2)
145
146/* Dump consumer of the dependence. */
147#define DUMP_DEP_CON (4)
148
149/* Dump type of the dependence. */
150#define DUMP_DEP_TYPE (8)
151
152/* Dump status of the dependence. */
153#define DUMP_DEP_STATUS (16)
154
155/* Dump all information about the dependence. */
156#define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE \
157 |DUMP_DEP_STATUS)
158
159/* Dump DEP to DUMP.
160 FLAGS is a bit mask specifying what information about DEP needs
161 to be printed.
162 If FLAGS has the very first bit set, then dump all information about DEP
163 and propagate this bit into the callee dump functions. */
164static void
165dump_dep (FILE *dump, dep_t dep, int flags)
9997bd27 166{
93f6b030 167 if (flags & 1)
168 flags |= DUMP_DEP_ALL;
169
170 fprintf (dump, "<");
171
172 if (flags & DUMP_DEP_PRO)
173 fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
174
175 if (flags & DUMP_DEP_CON)
176 fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
177
178 if (flags & DUMP_DEP_TYPE)
179 {
180 char t;
181 enum reg_note type = DEP_TYPE (dep);
182
183 switch (type)
184 {
185 case REG_DEP_TRUE:
186 t = 't';
187 break;
188
189 case REG_DEP_OUTPUT:
190 t = 'o';
191 break;
9997bd27 192
effd1640 193 case REG_DEP_CONTROL:
194 t = 'c';
195 break;
196
93f6b030 197 case REG_DEP_ANTI:
198 t = 'a';
199 break;
200
201 default:
202 gcc_unreachable ();
203 break;
204 }
205
206 fprintf (dump, "%c; ", t);
207 }
208
209 if (flags & DUMP_DEP_STATUS)
210 {
211 if (current_sched_info->flags & USE_DEPS_LIST)
212 dump_ds (dump, DEP_STATUS (dep));
213 }
214
215 fprintf (dump, ">");
216}
217
218/* Default flags for dump_dep (). */
219static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
220
221/* Dump all fields of DEP to STDERR. */
222void
223sd_debug_dep (dep_t dep)
224{
225 dump_dep (stderr, dep, 1);
226 fprintf (stderr, "\n");
9997bd27 227}
228
c971b445 229/* Determine whether DEP is a dependency link of a non-debug insn on a
230 debug insn. */
231
232static inline bool
233depl_on_debug_p (dep_link_t dep)
234{
235 return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
236 && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
237}
238
93f6b030 239/* Functions to operate with a single link from the dependencies lists -
240 dep_link_t. */
241
9997bd27 242/* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
243 PREV_NEXT_P. */
244static void
245attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
246{
247 dep_link_t next = *prev_nextp;
248
249 gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
250 && DEP_LINK_NEXT (l) == NULL);
251
252 /* Init node being inserted. */
253 DEP_LINK_PREV_NEXTP (l) = prev_nextp;
254 DEP_LINK_NEXT (l) = next;
255
256 /* Fix next node. */
257 if (next != NULL)
258 {
259 gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
260
261 DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
262 }
263
264 /* Fix prev node. */
265 *prev_nextp = l;
266}
267
268/* Add dep_link LINK to deps_list L. */
269static void
270add_to_deps_list (dep_link_t link, deps_list_t l)
271{
272 attach_dep_link (link, &DEPS_LIST_FIRST (l));
93f6b030 273
c971b445 274 /* Don't count debug deps. */
275 if (!depl_on_debug_p (link))
276 ++DEPS_LIST_N_LINKS (l);
9997bd27 277}
278
279/* Detach dep_link L from the list. */
280static void
281detach_dep_link (dep_link_t l)
282{
283 dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
284 dep_link_t next = DEP_LINK_NEXT (l);
285
286 *prev_nextp = next;
287
288 if (next != NULL)
289 DEP_LINK_PREV_NEXTP (next) = prev_nextp;
290
9997bd27 291 DEP_LINK_PREV_NEXTP (l) = NULL;
292 DEP_LINK_NEXT (l) = NULL;
293}
294
93f6b030 295/* Remove link LINK from list LIST. */
296static void
297remove_from_deps_list (dep_link_t link, deps_list_t list)
9997bd27 298{
299 detach_dep_link (link);
9997bd27 300
c971b445 301 /* Don't count debug deps. */
302 if (!depl_on_debug_p (link))
303 --DEPS_LIST_N_LINKS (list);
9997bd27 304}
305
93f6b030 306/* Move link LINK from list FROM to list TO. */
9997bd27 307static void
93f6b030 308move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
9997bd27 309{
93f6b030 310 remove_from_deps_list (link, from);
311 add_to_deps_list (link, to);
9997bd27 312}
313
93f6b030 314/* Return true of LINK is not attached to any list. */
315static bool
316dep_link_is_detached_p (dep_link_t link)
9997bd27 317{
93f6b030 318 return DEP_LINK_PREV_NEXTP (link) == NULL;
9997bd27 319}
320
93f6b030 321/* Pool to hold all dependency nodes (dep_node_t). */
322static alloc_pool dn_pool;
9997bd27 323
93f6b030 324/* Number of dep_nodes out there. */
325static int dn_pool_diff = 0;
9997bd27 326
93f6b030 327/* Create a dep_node. */
328static dep_node_t
329create_dep_node (void)
330{
331 dep_node_t n = (dep_node_t) pool_alloc (dn_pool);
332 dep_link_t back = DEP_NODE_BACK (n);
333 dep_link_t forw = DEP_NODE_FORW (n);
9997bd27 334
93f6b030 335 DEP_LINK_NODE (back) = n;
336 DEP_LINK_NEXT (back) = NULL;
337 DEP_LINK_PREV_NEXTP (back) = NULL;
9997bd27 338
93f6b030 339 DEP_LINK_NODE (forw) = n;
340 DEP_LINK_NEXT (forw) = NULL;
341 DEP_LINK_PREV_NEXTP (forw) = NULL;
9997bd27 342
93f6b030 343 ++dn_pool_diff;
9997bd27 344
93f6b030 345 return n;
9997bd27 346}
347
93f6b030 348/* Delete dep_node N. N must not be connected to any deps_list. */
9997bd27 349static void
93f6b030 350delete_dep_node (dep_node_t n)
9997bd27 351{
93f6b030 352 gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
353 && dep_link_is_detached_p (DEP_NODE_FORW (n)));
9997bd27 354
a776325f 355 XDELETE (DEP_REPLACE (DEP_NODE_DEP (n)));
356
93f6b030 357 --dn_pool_diff;
9997bd27 358
93f6b030 359 pool_free (dn_pool, n);
9997bd27 360}
361
93f6b030 362/* Pool to hold dependencies lists (deps_list_t). */
363static alloc_pool dl_pool;
9997bd27 364
93f6b030 365/* Number of deps_lists out there. */
366static int dl_pool_diff = 0;
9997bd27 367
93f6b030 368/* Functions to operate with dependences lists - deps_list_t. */
9997bd27 369
93f6b030 370/* Return true if list L is empty. */
9997bd27 371static bool
93f6b030 372deps_list_empty_p (deps_list_t l)
9997bd27 373{
93f6b030 374 return DEPS_LIST_N_LINKS (l) == 0;
9997bd27 375}
376
93f6b030 377/* Create a new deps_list. */
378static deps_list_t
379create_deps_list (void)
9997bd27 380{
93f6b030 381 deps_list_t l = (deps_list_t) pool_alloc (dl_pool);
9997bd27 382
93f6b030 383 DEPS_LIST_FIRST (l) = NULL;
384 DEPS_LIST_N_LINKS (l) = 0;
9997bd27 385
93f6b030 386 ++dl_pool_diff;
387 return l;
9997bd27 388}
389
93f6b030 390/* Free deps_list L. */
391static void
392free_deps_list (deps_list_t l)
9997bd27 393{
93f6b030 394 gcc_assert (deps_list_empty_p (l));
9997bd27 395
93f6b030 396 --dl_pool_diff;
9997bd27 397
93f6b030 398 pool_free (dl_pool, l);
9997bd27 399}
400
93f6b030 401/* Return true if there is no dep_nodes and deps_lists out there.
becfaa62 402 After the region is scheduled all the dependency nodes and lists
93f6b030 403 should [generally] be returned to pool. */
404bool
405deps_pools_are_empty_p (void)
9997bd27 406{
93f6b030 407 return dn_pool_diff == 0 && dl_pool_diff == 0;
9997bd27 408}
409
93f6b030 410/* Remove all elements from L. */
411static void
412clear_deps_list (deps_list_t l)
9997bd27 413{
93f6b030 414 do
415 {
416 dep_link_t link = DEPS_LIST_FIRST (l);
9997bd27 417
93f6b030 418 if (link == NULL)
419 break;
9997bd27 420
93f6b030 421 remove_from_deps_list (link, l);
9997bd27 422 }
93f6b030 423 while (1);
9997bd27 424}
6adce0fb 425
2261c559 426/* Decide whether a dependency should be treated as a hard or a speculative
427 dependency. */
428static bool
429dep_spec_p (dep_t dep)
430{
431 if (current_sched_info->flags & DO_SPECULATION)
effd1640 432 {
433 if (DEP_STATUS (dep) & SPECULATIVE)
434 return true;
435 }
436 if (current_sched_info->flags & DO_PREDICATION)
437 {
438 if (DEP_TYPE (dep) == REG_DEP_CONTROL)
439 return true;
440 }
d452a169 441 if (DEP_REPLACE (dep) != NULL)
442 return true;
2261c559 443 return false;
444}
445
6adce0fb 446static regset reg_pending_sets;
447static regset reg_pending_clobbers;
5deaeb50 448static regset reg_pending_uses;
effd1640 449static regset reg_pending_control_uses;
c14a239f 450static enum reg_pending_barrier_mode reg_pending_barrier;
6adce0fb 451
a7dcf969 452/* Hard registers implicitly clobbered or used (or may be implicitly
453 clobbered or used) by the currently analyzed insn. For example,
454 insn in its constraint has one register class. Even if there is
455 currently no hard register in the insn, the particular hard
456 register will be in the insn after reload pass because the
457 constraint requires it. */
458static HARD_REG_SET implicit_reg_pending_clobbers;
459static HARD_REG_SET implicit_reg_pending_uses;
460
6adce0fb 461/* To speed up the test for duplicate dependency links we keep a
462 record of dependencies created by add_dependence when the average
463 number of instructions in a basic block is very large.
464
465 Studies have shown that there is typically around 5 instructions between
466 branches for typical C code. So we can make a guess that the average
467 basic block is approximately 5 instructions long; we will choose 100X
468 the average size as a very large basic block.
469
470 Each insn has associated bitmaps for its dependencies. Each bitmap
471 has enough entries to represent a dependency on any other insn in
472 the insn chain. All bitmap for true dependencies cache is
1be87b72 473 allocated then the rest two ones are also allocated. */
e1ab7874 474static bitmap_head *true_dependency_cache = NULL;
475static bitmap_head *output_dependency_cache = NULL;
476static bitmap_head *anti_dependency_cache = NULL;
effd1640 477static bitmap_head *control_dependency_cache = NULL;
e1ab7874 478static bitmap_head *spec_dependency_cache = NULL;
f4b6f424 479static int cache_size;
6adce0fb 480
d452a169 481/* True if we should mark added dependencies as a non-register deps. */
482static bool mark_as_hard;
483
5493cb9a 484static int deps_may_trap_p (const_rtx);
b24ef467 485static void add_dependence_1 (rtx_insn *, rtx_insn *, enum reg_note);
54267fdf 486static void add_dependence_list (rtx_insn *, rtx_insn_list *, int,
487 enum reg_note, bool);
b24ef467 488static void add_dependence_list_and_free (struct deps_desc *, rtx_insn *,
54267fdf 489 rtx_insn_list **, int, enum reg_note,
490 bool);
828f2394 491static void delete_all_dependences (rtx);
b24ef467 492static void chain_to_prev_insn (rtx_insn *);
6adce0fb 493
b24ef467 494static void flush_pending_lists (struct deps_desc *, rtx_insn *, int, int);
495static void sched_analyze_1 (struct deps_desc *, rtx, rtx_insn *);
496static void sched_analyze_2 (struct deps_desc *, rtx, rtx_insn *);
497static void sched_analyze_insn (struct deps_desc *, rtx, rtx_insn *);
4164e76b 498
e1ab7874 499static bool sched_has_condition_p (const_rtx);
500static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
4d64d9a4 501
93f6b030 502static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
503 rtx, rtx);
504static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
4d64d9a4 505
4d64d9a4 506#ifdef ENABLE_CHECKING
93f6b030 507static void check_dep (dep_t, bool);
4d64d9a4 508#endif
6adce0fb 509\f
970c0c06 510/* Return nonzero if a load of the memory reference MEM can cause a trap. */
511
512static int
5493cb9a 513deps_may_trap_p (const_rtx mem)
970c0c06 514{
5493cb9a 515 const_rtx addr = XEXP (mem, 0);
970c0c06 516
12cb06ad 517 if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
518 {
5493cb9a 519 const_rtx t = get_reg_known_value (REGNO (addr));
12cb06ad 520 if (t)
521 addr = t;
522 }
970c0c06 523 return rtx_addr_can_trap_p (addr);
524}
525\f
4164e76b 526
e1ab7874 527/* Find the condition under which INSN is executed. If REV is not NULL,
528 it is set to TRUE when the returned comparison should be reversed
60ba0654 529 to get the actual condition. */
4164e76b 530static rtx
60ba0654 531sched_get_condition_with_rev_uncached (const_rtx insn, bool *rev)
4164e76b 532{
533 rtx pat = PATTERN (insn);
d87bddc5 534 rtx src;
4164e76b 535
e1ab7874 536 if (rev)
537 *rev = false;
538
4164e76b 539 if (GET_CODE (pat) == COND_EXEC)
60ba0654 540 return COND_EXEC_TEST (pat);
bbb82bbf 541
542 if (!any_condjump_p (insn) || !onlyjump_p (insn))
4164e76b 543 return 0;
bbb82bbf 544
d87bddc5 545 src = SET_SRC (pc_set (insn));
e6a25dc9 546
d87bddc5 547 if (XEXP (src, 2) == pc_rtx)
60ba0654 548 return XEXP (src, 0);
d87bddc5 549 else if (XEXP (src, 1) == pc_rtx)
bbb82bbf 550 {
d87bddc5 551 rtx cond = XEXP (src, 0);
bbb82bbf 552 enum rtx_code revcode = reversed_comparison_code (cond, insn);
553
554 if (revcode == UNKNOWN)
555 return 0;
e1ab7874 556
557 if (rev)
558 *rev = true;
559 return cond;
bbb82bbf 560 }
af4ff459 561
562 return 0;
4164e76b 563}
564
effd1640 565/* Return the condition under which INSN does not execute (i.e. the
566 not-taken condition for a conditional branch), or NULL if we cannot
567 find such a condition. The caller should make a copy of the condition
568 before using it. */
569rtx
570sched_get_reverse_condition_uncached (const_rtx insn)
571{
572 bool rev;
573 rtx cond = sched_get_condition_with_rev_uncached (insn, &rev);
574 if (cond == NULL_RTX)
575 return cond;
576 if (!rev)
577 {
578 enum rtx_code revcode = reversed_comparison_code (cond, insn);
579 cond = gen_rtx_fmt_ee (revcode, GET_MODE (cond),
580 XEXP (cond, 0),
581 XEXP (cond, 1));
582 }
583 return cond;
584}
585
60ba0654 586/* Caching variant of sched_get_condition_with_rev_uncached.
587 We only do actual work the first time we come here for an insn; the
588 results are cached in INSN_CACHED_COND and INSN_REVERSE_COND. */
589static rtx
590sched_get_condition_with_rev (const_rtx insn, bool *rev)
591{
592 bool tmp;
593
594 if (INSN_LUID (insn) == 0)
595 return sched_get_condition_with_rev_uncached (insn, rev);
596
597 if (INSN_CACHED_COND (insn) == const_true_rtx)
598 return NULL_RTX;
599
600 if (INSN_CACHED_COND (insn) != NULL_RTX)
601 {
602 if (rev)
603 *rev = INSN_REVERSE_COND (insn);
604 return INSN_CACHED_COND (insn);
605 }
606
607 INSN_CACHED_COND (insn) = sched_get_condition_with_rev_uncached (insn, &tmp);
608 INSN_REVERSE_COND (insn) = tmp;
609
610 if (INSN_CACHED_COND (insn) == NULL_RTX)
611 {
612 INSN_CACHED_COND (insn) = const_true_rtx;
613 return NULL_RTX;
614 }
615
616 if (rev)
617 *rev = INSN_REVERSE_COND (insn);
618 return INSN_CACHED_COND (insn);
619}
620
e1ab7874 621/* True when we can find a condition under which INSN is executed. */
622static bool
623sched_has_condition_p (const_rtx insn)
624{
625 return !! sched_get_condition_with_rev (insn, NULL);
626}
627
e6a25dc9 628\f
4164e76b 629
e1ab7874 630/* Return nonzero if conditions COND1 and COND2 can never be both true. */
4164e76b 631static int
e1ab7874 632conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
4164e76b 633{
6720e96c 634 if (COMPARISON_P (cond1)
635 && COMPARISON_P (cond2)
e1ab7874 636 && GET_CODE (cond1) ==
637 (rev1==rev2
638 ? reversed_comparison_code (cond2, NULL)
639 : GET_CODE (cond2))
6aed13f1 640 && rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
4164e76b 641 && XEXP (cond1, 1) == XEXP (cond2, 1))
642 return 1;
643 return 0;
644}
e6a25dc9 645
646/* Return true if insn1 and insn2 can never depend on one another because
647 the conditions under which they are executed are mutually exclusive. */
648bool
5493cb9a 649sched_insns_conditions_mutex_p (const_rtx insn1, const_rtx insn2)
e6a25dc9 650{
651 rtx cond1, cond2;
da7f8ea0 652 bool rev1 = false, rev2 = false;
e6a25dc9 653
3072d30e 654 /* df doesn't handle conditional lifetimes entirely correctly;
e6a25dc9 655 calls mess up the conditional lifetimes. */
656 if (!CALL_P (insn1) && !CALL_P (insn2))
657 {
e1ab7874 658 cond1 = sched_get_condition_with_rev (insn1, &rev1);
659 cond2 = sched_get_condition_with_rev (insn2, &rev2);
e6a25dc9 660 if (cond1 && cond2
e1ab7874 661 && conditions_mutex_p (cond1, cond2, rev1, rev2)
e6a25dc9 662 /* Make sure first instruction doesn't affect condition of second
663 instruction if switched. */
664 && !modified_in_p (cond1, insn2)
665 /* Make sure second instruction doesn't affect condition of first
666 instruction if switched. */
667 && !modified_in_p (cond2, insn1))
668 return true;
669 }
670 return false;
671}
4164e76b 672\f
4d64d9a4 673
86265ed0 674/* Return true if INSN can potentially be speculated with type DS. */
675bool
676sched_insn_is_legitimate_for_speculation_p (const_rtx insn, ds_t ds)
677{
678 if (HAS_INTERNAL_DEP (insn))
679 return false;
680
681 if (!NONJUMP_INSN_P (insn))
682 return false;
683
684 if (SCHED_GROUP_P (insn))
685 return false;
686
e1ab7874 687 if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX (insn)))
86265ed0 688 return false;
689
690 if (side_effects_p (PATTERN (insn)))
691 return false;
692
693 if (ds & BE_IN_SPEC)
694 /* The following instructions, which depend on a speculatively scheduled
695 instruction, cannot be speculatively scheduled along. */
696 {
a67e70de 697 if (may_trap_or_fault_p (PATTERN (insn)))
698 /* If instruction might fault, it cannot be speculatively scheduled.
86265ed0 699 For control speculation it's obvious why and for data speculation
700 it's because the insn might get wrong input if speculation
701 wasn't successful. */
702 return false;
703
704 if ((ds & BE_IN_DATA)
e1ab7874 705 && sched_has_condition_p (insn))
86265ed0 706 /* If this is a predicated instruction, then it cannot be
707 speculatively scheduled. See PR35659. */
708 return false;
709 }
710
711 return true;
712}
713
93f6b030 714/* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
715 initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
716 and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
717 This function is used to switch sd_iterator to the next list.
718 !!! For internal use only. Might consider moving it to sched-int.h. */
719void
5493cb9a 720sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
93f6b030 721 deps_list_t *list_ptr, bool *resolved_p_ptr)
722{
723 sd_list_types_def types = *types_ptr;
724
725 if (types & SD_LIST_HARD_BACK)
726 {
727 *list_ptr = INSN_HARD_BACK_DEPS (insn);
728 *resolved_p_ptr = false;
729 *types_ptr = types & ~SD_LIST_HARD_BACK;
730 }
731 else if (types & SD_LIST_SPEC_BACK)
732 {
733 *list_ptr = INSN_SPEC_BACK_DEPS (insn);
734 *resolved_p_ptr = false;
735 *types_ptr = types & ~SD_LIST_SPEC_BACK;
736 }
737 else if (types & SD_LIST_FORW)
738 {
739 *list_ptr = INSN_FORW_DEPS (insn);
740 *resolved_p_ptr = false;
741 *types_ptr = types & ~SD_LIST_FORW;
742 }
743 else if (types & SD_LIST_RES_BACK)
744 {
745 *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
746 *resolved_p_ptr = true;
747 *types_ptr = types & ~SD_LIST_RES_BACK;
748 }
749 else if (types & SD_LIST_RES_FORW)
750 {
751 *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
752 *resolved_p_ptr = true;
753 *types_ptr = types & ~SD_LIST_RES_FORW;
754 }
755 else
756 {
757 *list_ptr = NULL;
758 *resolved_p_ptr = false;
759 *types_ptr = SD_LIST_NONE;
760 }
761}
762
763/* Return the summary size of INSN's lists defined by LIST_TYPES. */
764int
5493cb9a 765sd_lists_size (const_rtx insn, sd_list_types_def list_types)
93f6b030 766{
767 int size = 0;
768
769 while (list_types != SD_LIST_NONE)
770 {
771 deps_list_t list;
772 bool resolved_p;
773
774 sd_next_list (insn, &list_types, &list, &resolved_p);
9845d120 775 if (list)
776 size += DEPS_LIST_N_LINKS (list);
93f6b030 777 }
778
779 return size;
780}
781
782/* Return true if INSN's lists defined by LIST_TYPES are all empty. */
c971b445 783
93f6b030 784bool
5493cb9a 785sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
93f6b030 786{
c971b445 787 while (list_types != SD_LIST_NONE)
788 {
789 deps_list_t list;
790 bool resolved_p;
791
792 sd_next_list (insn, &list_types, &list, &resolved_p);
793 if (!deps_list_empty_p (list))
794 return false;
795 }
796
797 return true;
93f6b030 798}
799
800/* Initialize data for INSN. */
801void
802sd_init_insn (rtx insn)
803{
804 INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
805 INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
806 INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
807 INSN_FORW_DEPS (insn) = create_deps_list ();
808 INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
809
810 /* ??? It would be nice to allocate dependency caches here. */
811}
812
813/* Free data for INSN. */
814void
815sd_finish_insn (rtx insn)
816{
817 /* ??? It would be nice to deallocate dependency caches here. */
818
819 free_deps_list (INSN_HARD_BACK_DEPS (insn));
820 INSN_HARD_BACK_DEPS (insn) = NULL;
821
822 free_deps_list (INSN_SPEC_BACK_DEPS (insn));
823 INSN_SPEC_BACK_DEPS (insn) = NULL;
824
825 free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
826 INSN_RESOLVED_BACK_DEPS (insn) = NULL;
827
828 free_deps_list (INSN_FORW_DEPS (insn));
829 INSN_FORW_DEPS (insn) = NULL;
830
831 free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
832 INSN_RESOLVED_FORW_DEPS (insn) = NULL;
833}
834
835/* Find a dependency between producer PRO and consumer CON.
836 Search through resolved dependency lists if RESOLVED_P is true.
837 If no such dependency is found return NULL,
becfaa62 838 otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
93f6b030 839 with an iterator pointing to it. */
840static dep_t
841sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
842 sd_iterator_def *sd_it_ptr)
843{
844 sd_list_types_def pro_list_type;
845 sd_list_types_def con_list_type;
846 sd_iterator_def sd_it;
847 dep_t dep;
848 bool found_p = false;
849
850 if (resolved_p)
851 {
852 pro_list_type = SD_LIST_RES_FORW;
853 con_list_type = SD_LIST_RES_BACK;
854 }
855 else
856 {
857 pro_list_type = SD_LIST_FORW;
858 con_list_type = SD_LIST_BACK;
859 }
860
861 /* Walk through either back list of INSN or forw list of ELEM
862 depending on which one is shorter. */
863 if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
864 {
865 /* Find the dep_link with producer PRO in consumer's back_deps. */
866 FOR_EACH_DEP (con, con_list_type, sd_it, dep)
867 if (DEP_PRO (dep) == pro)
868 {
869 found_p = true;
870 break;
871 }
872 }
873 else
874 {
875 /* Find the dep_link with consumer CON in producer's forw_deps. */
876 FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
877 if (DEP_CON (dep) == con)
878 {
879 found_p = true;
880 break;
881 }
882 }
883
884 if (found_p)
885 {
886 if (sd_it_ptr != NULL)
887 *sd_it_ptr = sd_it;
888
889 return dep;
890 }
891
892 return NULL;
893}
894
895/* Find a dependency between producer PRO and consumer CON.
896 Use dependency [if available] to check if dependency is present at all.
897 Search through resolved dependency lists if RESOLVED_P is true.
898 If the dependency or NULL if none found. */
899dep_t
900sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
901{
902 if (true_dependency_cache != NULL)
903 /* Avoiding the list walk below can cut compile times dramatically
904 for some code. */
905 {
906 int elem_luid = INSN_LUID (pro);
907 int insn_luid = INSN_LUID (con);
908
93f6b030 909 if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
910 && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
effd1640 911 && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)
912 && !bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
93f6b030 913 return NULL;
914 }
915
916 return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
917}
918
919/* Add or update a dependence described by DEP.
920 MEM1 and MEM2, if non-null, correspond to memory locations in case of
921 data speculation.
922
923 The function returns a value indicating if an old entry has been changed
924 or a new entry has been added to insn's backward deps.
925
926 This function merely checks if producer and consumer is the same insn
927 and doesn't create a dep in this case. Actual manipulation of
928 dependence data structures is performed in add_or_update_dep_1. */
4d64d9a4 929static enum DEPS_ADJUST_RESULT
93f6b030 930maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
6adce0fb 931{
9d8bf733 932 rtx_insn *elem = DEP_PRO (dep);
933 rtx_insn *insn = DEP_CON (dep);
93f6b030 934
4d64d9a4 935 gcc_assert (INSN_P (insn) && INSN_P (elem));
6adce0fb 936
937 /* Don't depend an insn on itself. */
938 if (insn == elem)
4d64d9a4 939 {
e1ab7874 940 if (sched_deps_info->generate_spec_deps)
4d64d9a4 941 /* INSN has an internal dependence, which we can't overcome. */
942 HAS_INTERNAL_DEP (insn) = 1;
93f6b030 943
944 return DEP_NODEP;
4d64d9a4 945 }
6adce0fb 946
93f6b030 947 return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
4d64d9a4 948}
6adce0fb 949
93f6b030 950/* Ask dependency caches what needs to be done for dependence DEP.
951 Return DEP_CREATED if new dependence should be created and there is no
952 need to try to find one searching the dependencies lists.
953 Return DEP_PRESENT if there already is a dependence described by DEP and
954 hence nothing is to be done.
955 Return DEP_CHANGED if there already is a dependence, but it should be
956 updated to incorporate additional information from DEP. */
4d64d9a4 957static enum DEPS_ADJUST_RESULT
93f6b030 958ask_dependency_caches (dep_t dep)
4d64d9a4 959{
93f6b030 960 int elem_luid = INSN_LUID (DEP_PRO (dep));
961 int insn_luid = INSN_LUID (DEP_CON (dep));
4d64d9a4 962
93f6b030 963 gcc_assert (true_dependency_cache != NULL
964 && output_dependency_cache != NULL
effd1640 965 && anti_dependency_cache != NULL
966 && control_dependency_cache != NULL);
4d64d9a4 967
93f6b030 968 if (!(current_sched_info->flags & USE_DEPS_LIST))
48e1416a 969 {
93f6b030 970 enum reg_note present_dep_type;
971
972 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
973 present_dep_type = REG_DEP_TRUE;
974 else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
975 present_dep_type = REG_DEP_OUTPUT;
976 else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
977 present_dep_type = REG_DEP_ANTI;
effd1640 978 else if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
979 present_dep_type = REG_DEP_CONTROL;
93f6b030 980 else
981 /* There is no existing dep so it should be created. */
982 return DEP_CREATED;
983
984 if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
985 /* DEP does not add anything to the existing dependence. */
986 return DEP_PRESENT;
987 }
988 else
48e1416a 989 {
93f6b030 990 ds_t present_dep_types = 0;
48e1416a 991
93f6b030 992 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
993 present_dep_types |= DEP_TRUE;
994 if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
995 present_dep_types |= DEP_OUTPUT;
996 if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
997 present_dep_types |= DEP_ANTI;
effd1640 998 if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
999 present_dep_types |= DEP_CONTROL;
93f6b030 1000
1001 if (present_dep_types == 0)
1002 /* There is no existing dep so it should be created. */
1003 return DEP_CREATED;
1004
1005 if (!(current_sched_info->flags & DO_SPECULATION)
1006 || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
1007 {
1008 if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
1009 == present_dep_types)
1010 /* DEP does not add anything to the existing dependence. */
1011 return DEP_PRESENT;
1012 }
1013 else
1014 {
1015 /* Only true dependencies can be data speculative and
1016 only anti dependencies can be control speculative. */
1017 gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
1018 == present_dep_types);
1019
1020 /* if (DEP is SPECULATIVE) then
1021 ..we should update DEP_STATUS
1022 else
1023 ..we should reset existing dep to non-speculative. */
1024 }
1025 }
1026
1027 return DEP_CHANGED;
1028}
1029
1030/* Set dependency caches according to DEP. */
1031static void
1032set_dependency_caches (dep_t dep)
1033{
1034 int elem_luid = INSN_LUID (DEP_PRO (dep));
1035 int insn_luid = INSN_LUID (DEP_CON (dep));
1036
1037 if (!(current_sched_info->flags & USE_DEPS_LIST))
1038 {
1039 switch (DEP_TYPE (dep))
1040 {
1041 case REG_DEP_TRUE:
1042 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1043 break;
1044
1045 case REG_DEP_OUTPUT:
1046 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1047 break;
1048
1049 case REG_DEP_ANTI:
1050 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1051 break;
1052
effd1640 1053 case REG_DEP_CONTROL:
1054 bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1055 break;
1056
93f6b030 1057 default:
1058 gcc_unreachable ();
1059 }
1060 }
1061 else
1062 {
1063 ds_t ds = DEP_STATUS (dep);
1064
1065 if (ds & DEP_TRUE)
1066 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1067 if (ds & DEP_OUTPUT)
1068 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1069 if (ds & DEP_ANTI)
1070 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
effd1640 1071 if (ds & DEP_CONTROL)
1072 bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
93f6b030 1073
1074 if (ds & SPECULATIVE)
1075 {
1076 gcc_assert (current_sched_info->flags & DO_SPECULATION);
1077 bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
1078 }
1079 }
1080}
1081
1082/* Type of dependence DEP have changed from OLD_TYPE. Update dependency
1083 caches accordingly. */
1084static void
1085update_dependency_caches (dep_t dep, enum reg_note old_type)
1086{
1087 int elem_luid = INSN_LUID (DEP_PRO (dep));
1088 int insn_luid = INSN_LUID (DEP_CON (dep));
1089
1090 /* Clear corresponding cache entry because type of the link
1091 may have changed. Keep them if we use_deps_list. */
1092 if (!(current_sched_info->flags & USE_DEPS_LIST))
1093 {
1094 switch (old_type)
1095 {
1096 case REG_DEP_OUTPUT:
1097 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1098 break;
1099
1100 case REG_DEP_ANTI:
1101 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1102 break;
1103
effd1640 1104 case REG_DEP_CONTROL:
1105 bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1106 break;
1107
93f6b030 1108 default:
48e1416a 1109 gcc_unreachable ();
93f6b030 1110 }
1111 }
1112
1113 set_dependency_caches (dep);
1114}
1115
1116/* Convert a dependence pointed to by SD_IT to be non-speculative. */
1117static void
1118change_spec_dep_to_hard (sd_iterator_def sd_it)
1119{
1120 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1121 dep_link_t link = DEP_NODE_BACK (node);
1122 dep_t dep = DEP_NODE_DEP (node);
9d8bf733 1123 rtx_insn *elem = DEP_PRO (dep);
1124 rtx_insn *insn = DEP_CON (dep);
93f6b030 1125
1126 move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1127
1128 DEP_STATUS (dep) &= ~SPECULATIVE;
6adce0fb 1129
6adce0fb 1130 if (true_dependency_cache != NULL)
93f6b030 1131 /* Clear the cache entry. */
1132 bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1133 INSN_LUID (elem));
1134}
93f6b030 1135
1136/* Update DEP to incorporate information from NEW_DEP.
1137 SD_IT points to DEP in case it should be moved to another list.
1138 MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1139 data-speculative dependence should be updated. */
1140static enum DEPS_ADJUST_RESULT
1141update_dep (dep_t dep, dep_t new_dep,
c407d287 1142 sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1143 rtx mem1 ATTRIBUTE_UNUSED,
1144 rtx mem2 ATTRIBUTE_UNUSED)
93f6b030 1145{
1146 enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1147 enum reg_note old_type = DEP_TYPE (dep);
2261c559 1148 bool was_spec = dep_spec_p (dep);
93f6b030 1149
d452a169 1150 DEP_NONREG (dep) |= DEP_NONREG (new_dep);
1151 DEP_MULTIPLE (dep) = 1;
1152
93f6b030 1153 /* If this is a more restrictive type of dependence than the
1154 existing one, then change the existing dependence to this
1155 type. */
1156 if ((int) DEP_TYPE (new_dep) < (int) old_type)
6adce0fb 1157 {
93f6b030 1158 DEP_TYPE (dep) = DEP_TYPE (new_dep);
1159 res = DEP_CHANGED;
1160 }
1161
93f6b030 1162 if (current_sched_info->flags & USE_DEPS_LIST)
1163 /* Update DEP_STATUS. */
1164 {
1165 ds_t dep_status = DEP_STATUS (dep);
1166 ds_t ds = DEP_STATUS (new_dep);
1167 ds_t new_status = ds | dep_status;
1168
1169 if (new_status & SPECULATIVE)
93f6b030 1170 {
2261c559 1171 /* Either existing dep or a dep we're adding or both are
1172 speculative. */
93f6b030 1173 if (!(ds & SPECULATIVE)
1174 || !(dep_status & SPECULATIVE))
1175 /* The new dep can't be speculative. */
2261c559 1176 new_status &= ~SPECULATIVE;
93f6b030 1177 else
4d64d9a4 1178 {
93f6b030 1179 /* Both are speculative. Merge probabilities. */
1180 if (mem1 != NULL)
4d64d9a4 1181 {
93f6b030 1182 dw_t dw;
1183
1184 dw = estimate_dep_weak (mem1, mem2);
1185 ds = set_dep_weak (ds, BEGIN_DATA, dw);
4d64d9a4 1186 }
48e1416a 1187
93f6b030 1188 new_status = ds_merge (dep_status, ds);
4d64d9a4 1189 }
93f6b030 1190 }
1191
1192 ds = new_status;
1193
1194 if (dep_status != ds)
1195 {
1196 DEP_STATUS (dep) = ds;
1197 res = DEP_CHANGED;
1198 }
1199 }
1200
2261c559 1201 if (was_spec && !dep_spec_p (dep))
1202 /* The old dep was speculative, but now it isn't. */
1203 change_spec_dep_to_hard (sd_it);
1204
93f6b030 1205 if (true_dependency_cache != NULL
1206 && res == DEP_CHANGED)
1207 update_dependency_caches (dep, old_type);
93f6b030 1208
1209 return res;
1210}
1211
1212/* Add or update a dependence described by DEP.
1213 MEM1 and MEM2, if non-null, correspond to memory locations in case of
1214 data speculation.
1215
1216 The function returns a value indicating if an old entry has been changed
1217 or a new entry has been added to insn's backward deps or nothing has
1218 been updated at all. */
1219static enum DEPS_ADJUST_RESULT
1220add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1221 rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1222{
1223 bool maybe_present_p = true;
1224 bool present_p = false;
1225
1226 gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1227 && DEP_PRO (new_dep) != DEP_CON (new_dep));
48e1416a 1228
93f6b030 1229#ifdef ENABLE_CHECKING
1230 check_dep (new_dep, mem1 != NULL);
1231#endif
1232
1233 if (true_dependency_cache != NULL)
1234 {
1235 switch (ask_dependency_caches (new_dep))
1236 {
1237 case DEP_PRESENT:
1238 return DEP_PRESENT;
1239
1240 case DEP_CHANGED:
1241 maybe_present_p = true;
1242 present_p = true;
1243 break;
1244
1245 case DEP_CREATED:
1246 maybe_present_p = false;
1247 present_p = false;
1248 break;
1249
1250 default:
1251 gcc_unreachable ();
1252 break;
1253 }
6adce0fb 1254 }
6adce0fb 1255
1256 /* Check that we don't already have this dependence. */
4d64d9a4 1257 if (maybe_present_p)
1258 {
93f6b030 1259 dep_t present_dep;
1260 sd_iterator_def sd_it;
4d64d9a4 1261
93f6b030 1262 gcc_assert (true_dependency_cache == NULL || present_p);
60b8c5b3 1263
93f6b030 1264 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1265 DEP_CON (new_dep),
1266 resolved_p, &sd_it);
40734805 1267
93f6b030 1268 if (present_dep != NULL)
1269 /* We found an existing dependency between ELEM and INSN. */
1270 return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1271 else
1272 /* We didn't find a dep, it shouldn't present in the cache. */
1273 gcc_assert (!present_p);
4d64d9a4 1274 }
6adce0fb 1275
4d64d9a4 1276 /* Might want to check one level of transitivity to save conses.
93f6b030 1277 This check should be done in maybe_add_or_update_dep_1.
1278 Since we made it to add_or_update_dep_1, we must create
4d64d9a4 1279 (or update) a link. */
6adce0fb 1280
93f6b030 1281 if (mem1 != NULL_RTX)
4d64d9a4 1282 {
e1ab7874 1283 gcc_assert (sched_deps_info->generate_spec_deps);
93f6b030 1284 DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1285 estimate_dep_weak (mem1, mem2));
4d64d9a4 1286 }
93f6b030 1287
1288 sd_add_dep (new_dep, resolved_p);
48e1416a 1289
4d64d9a4 1290 return DEP_CREATED;
1291}
6adce0fb 1292
93f6b030 1293/* Initialize BACK_LIST_PTR with consumer's backward list and
1294 FORW_LIST_PTR with producer's forward list. If RESOLVED_P is true
1295 initialize with lists that hold resolved deps. */
4d64d9a4 1296static void
93f6b030 1297get_back_and_forw_lists (dep_t dep, bool resolved_p,
1298 deps_list_t *back_list_ptr,
1299 deps_list_t *forw_list_ptr)
4d64d9a4 1300{
9d8bf733 1301 rtx_insn *con = DEP_CON (dep);
9997bd27 1302
93f6b030 1303 if (!resolved_p)
1304 {
2261c559 1305 if (dep_spec_p (dep))
93f6b030 1306 *back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1307 else
1308 *back_list_ptr = INSN_HARD_BACK_DEPS (con);
4d64d9a4 1309
93f6b030 1310 *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1311 }
4d64d9a4 1312 else
93f6b030 1313 {
1314 *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1315 *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1316 }
1317}
1318
1319/* Add dependence described by DEP.
1320 If RESOLVED_P is true treat the dependence as a resolved one. */
1321void
1322sd_add_dep (dep_t dep, bool resolved_p)
1323{
1324 dep_node_t n = create_dep_node ();
1325 deps_list_t con_back_deps;
1326 deps_list_t pro_forw_deps;
9d8bf733 1327 rtx_insn *elem = DEP_PRO (dep);
1328 rtx_insn *insn = DEP_CON (dep);
93f6b030 1329
1330 gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1331
2261c559 1332 if ((current_sched_info->flags & DO_SPECULATION) == 0
1333 || !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
93f6b030 1334 DEP_STATUS (dep) &= ~SPECULATIVE;
1335
1336 copy_dep (DEP_NODE_DEP (n), dep);
1337
1338 get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
9997bd27 1339
93f6b030 1340 add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
9997bd27 1341
4d64d9a4 1342#ifdef ENABLE_CHECKING
93f6b030 1343 check_dep (dep, false);
4d64d9a4 1344#endif
1345
93f6b030 1346 add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1347
6adce0fb 1348 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1be87b72 1349 in the bitmap caches of dependency information. */
6adce0fb 1350 if (true_dependency_cache != NULL)
93f6b030 1351 set_dependency_caches (dep);
93f6b030 1352}
1353
1354/* Add or update backward dependence between INSN and ELEM
1355 with given type DEP_TYPE and dep_status DS.
1356 This function is a convenience wrapper. */
1357enum DEPS_ADJUST_RESULT
1358sd_add_or_update_dep (dep_t dep, bool resolved_p)
1359{
1360 return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1361}
1362
1363/* Resolved dependence pointed to by SD_IT.
1364 SD_IT will advance to the next element. */
1365void
1366sd_resolve_dep (sd_iterator_def sd_it)
1367{
1368 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1369 dep_t dep = DEP_NODE_DEP (node);
9d8bf733 1370 rtx_insn *pro = DEP_PRO (dep);
1371 rtx_insn *con = DEP_CON (dep);
93f6b030 1372
2261c559 1373 if (dep_spec_p (dep))
93f6b030 1374 move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1375 INSN_RESOLVED_BACK_DEPS (con));
1376 else
1377 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1378 INSN_RESOLVED_BACK_DEPS (con));
1379
1380 move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1381 INSN_RESOLVED_FORW_DEPS (pro));
1382}
1383
e2f4a6ff 1384/* Perform the inverse operation of sd_resolve_dep. Restore the dependence
1385 pointed to by SD_IT to unresolved state. */
1386void
1387sd_unresolve_dep (sd_iterator_def sd_it)
1388{
1389 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1390 dep_t dep = DEP_NODE_DEP (node);
9d8bf733 1391 rtx_insn *pro = DEP_PRO (dep);
1392 rtx_insn *con = DEP_CON (dep);
e2f4a6ff 1393
effd1640 1394 if (dep_spec_p (dep))
e2f4a6ff 1395 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1396 INSN_SPEC_BACK_DEPS (con));
1397 else
1398 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1399 INSN_HARD_BACK_DEPS (con));
1400
1401 move_dep_link (DEP_NODE_FORW (node), INSN_RESOLVED_FORW_DEPS (pro),
1402 INSN_FORW_DEPS (pro));
1403}
1404
93f6b030 1405/* Make TO depend on all the FROM's producers.
1406 If RESOLVED_P is true add dependencies to the resolved lists. */
1407void
fc5ad70a 1408sd_copy_back_deps (rtx_insn *to, rtx_insn *from, bool resolved_p)
93f6b030 1409{
1410 sd_list_types_def list_type;
1411 sd_iterator_def sd_it;
1412 dep_t dep;
1413
1414 list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1415
1416 FOR_EACH_DEP (from, list_type, sd_it, dep)
6adce0fb 1417 {
93f6b030 1418 dep_def _new_dep, *new_dep = &_new_dep;
1419
1420 copy_dep (new_dep, dep);
fc5ad70a 1421 DEP_CON (new_dep) = to;
93f6b030 1422 sd_add_dep (new_dep, resolved_p);
6adce0fb 1423 }
93f6b030 1424}
1425
1426/* Remove a dependency referred to by SD_IT.
1427 SD_IT will point to the next dependence after removal. */
1428void
1429sd_delete_dep (sd_iterator_def sd_it)
1430{
1431 dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1432 dep_t dep = DEP_NODE_DEP (n);
9d8bf733 1433 rtx_insn *pro = DEP_PRO (dep);
1434 rtx_insn *con = DEP_CON (dep);
93f6b030 1435 deps_list_t con_back_deps;
1436 deps_list_t pro_forw_deps;
1437
1438 if (true_dependency_cache != NULL)
1439 {
1440 int elem_luid = INSN_LUID (pro);
1441 int insn_luid = INSN_LUID (con);
1442
1443 bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1444 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
effd1640 1445 bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
93f6b030 1446 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1447
1448 if (current_sched_info->flags & DO_SPECULATION)
1449 bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1450 }
1451
1452 get_back_and_forw_lists (dep, sd_it.resolved_p,
1453 &con_back_deps, &pro_forw_deps);
1454
1455 remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1456 remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1457
1458 delete_dep_node (n);
1459}
1460
1461/* Dump size of the lists. */
1462#define DUMP_LISTS_SIZE (2)
1463
1464/* Dump dependencies of the lists. */
1465#define DUMP_LISTS_DEPS (4)
1466
1467/* Dump all information about the lists. */
1468#define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1469
1470/* Dump deps_lists of INSN specified by TYPES to DUMP.
1471 FLAGS is a bit mask specifying what information about the lists needs
1472 to be printed.
1473 If FLAGS has the very first bit set, then dump all information about
1474 the lists and propagate this bit into the callee dump functions. */
1475static void
1476dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1477{
1478 sd_iterator_def sd_it;
1479 dep_t dep;
1480 int all;
1481
1482 all = (flags & 1);
1483
1484 if (all)
1485 flags |= DUMP_LISTS_ALL;
1486
1487 fprintf (dump, "[");
1488
1489 if (flags & DUMP_LISTS_SIZE)
1490 fprintf (dump, "%d; ", sd_lists_size (insn, types));
1491
1492 if (flags & DUMP_LISTS_DEPS)
1493 {
1494 FOR_EACH_DEP (insn, types, sd_it, dep)
1495 {
1496 dump_dep (dump, dep, dump_dep_flags | all);
1497 fprintf (dump, " ");
1498 }
1499 }
1500}
1501
1502/* Dump all information about deps_lists of INSN specified by TYPES
1503 to STDERR. */
1504void
1505sd_debug_lists (rtx insn, sd_list_types_def types)
1506{
1507 dump_lists (stderr, insn, types, 1);
1508 fprintf (stderr, "\n");
6adce0fb 1509}
1510
effd1640 1511/* A wrapper around add_dependence_1, to add a dependence of CON on
1512 PRO, with type DEP_TYPE. This function implements special handling
1513 for REG_DEP_CONTROL dependencies. For these, we optionally promote
1514 the type to REG_DEP_ANTI if we can determine that predication is
1515 impossible; otherwise we add additional true dependencies on the
1516 INSN_COND_DEPS list of the jump (which PRO must be). */
1517void
b24ef467 1518add_dependence (rtx_insn *con, rtx_insn *pro, enum reg_note dep_type)
effd1640 1519{
754e815f 1520 if (dep_type == REG_DEP_CONTROL
1521 && !(current_sched_info->flags & DO_PREDICATION))
1522 dep_type = REG_DEP_ANTI;
1523
effd1640 1524 /* A REG_DEP_CONTROL dependence may be eliminated through predication,
1525 so we must also make the insn dependent on the setter of the
1526 condition. */
1527 if (dep_type == REG_DEP_CONTROL)
1528 {
73e15687 1529 rtx_insn *real_pro = pro;
1530 rtx_insn *other = real_insn_for_shadow (real_pro);
effd1640 1531 rtx cond;
1532
1533 if (other != NULL_RTX)
1534 real_pro = other;
1535 cond = sched_get_reverse_condition_uncached (real_pro);
1536 /* Verify that the insn does not use a different value in
1537 the condition register than the one that was present at
1538 the jump. */
1539 if (cond == NULL_RTX)
1540 dep_type = REG_DEP_ANTI;
1541 else if (INSN_CACHED_COND (real_pro) == const_true_rtx)
1542 {
1543 HARD_REG_SET uses;
1544 CLEAR_HARD_REG_SET (uses);
1545 note_uses (&PATTERN (con), record_hard_reg_uses, &uses);
1546 if (TEST_HARD_REG_BIT (uses, REGNO (XEXP (cond, 0))))
1547 dep_type = REG_DEP_ANTI;
1548 }
1549 if (dep_type == REG_DEP_CONTROL)
1550 {
1551 if (sched_verbose >= 5)
1552 fprintf (sched_dump, "making DEP_CONTROL for %d\n",
1553 INSN_UID (real_pro));
1554 add_dependence_list (con, INSN_COND_DEPS (real_pro), 0,
d452a169 1555 REG_DEP_TRUE, false);
effd1640 1556 }
1557 }
1558
1559 add_dependence_1 (con, pro, dep_type);
1560}
1561
d452a169 1562/* A convenience wrapper to operate on an entire list. HARD should be
1563 true if DEP_NONREG should be set on newly created dependencies. */
5deaeb50 1564
1565static void
54267fdf 1566add_dependence_list (rtx_insn *insn, rtx_insn_list *list, int uncond,
1567 enum reg_note dep_type, bool hard)
5deaeb50 1568{
d452a169 1569 mark_as_hard = hard;
54267fdf 1570 for (; list; list = list->next ())
e6a25dc9 1571 {
54267fdf 1572 if (uncond || ! sched_insns_conditions_mutex_p (insn, list->insn ()))
1573 add_dependence (insn, list->insn (), dep_type);
e6a25dc9 1574 }
d452a169 1575 mark_as_hard = false;
5deaeb50 1576}
1577
48e1416a 1578/* Similar, but free *LISTP at the same time, when the context
d452a169 1579 is not readonly. HARD should be true if DEP_NONREG should be set on
1580 newly created dependencies. */
5deaeb50 1581
1582static void
54267fdf 1583add_dependence_list_and_free (struct deps_desc *deps, rtx_insn *insn,
1584 rtx_insn_list **listp,
d452a169 1585 int uncond, enum reg_note dep_type, bool hard)
5deaeb50 1586{
d452a169 1587 add_dependence_list (insn, *listp, uncond, dep_type, hard);
e1ab7874 1588
3e0431fe 1589 /* We don't want to short-circuit dependencies involving debug
1590 insns, because they may cause actual dependencies to be
1591 disregarded. */
1592 if (deps->readonly || DEBUG_INSN_P (insn))
a7a503a5 1593 return;
e1ab7874 1594
a7a503a5 1595 free_INSN_LIST_list (listp);
5deaeb50 1596}
1597
9d75589a 1598/* Remove all occurrences of INSN from LIST. Return the number of
1599 occurrences removed. */
e1ab7874 1600
1601static int
54267fdf 1602remove_from_dependence_list (rtx insn, rtx_insn_list **listp)
e1ab7874 1603{
1604 int removed = 0;
48e1416a 1605
e1ab7874 1606 while (*listp)
1607 {
54267fdf 1608 if ((*listp)->insn () == insn)
e1ab7874 1609 {
1610 remove_free_INSN_LIST_node (listp);
1611 removed++;
1612 continue;
1613 }
48e1416a 1614
54267fdf 1615 listp = (rtx_insn_list **)&XEXP (*listp, 1);
e1ab7874 1616 }
48e1416a 1617
e1ab7874 1618 return removed;
1619}
1620
1621/* Same as above, but process two lists at once. */
48e1416a 1622static int
54267fdf 1623remove_from_both_dependence_lists (rtx insn,
1624 rtx_insn_list **listp,
8e56831f 1625 rtx_expr_list **exprp)
e1ab7874 1626{
1627 int removed = 0;
48e1416a 1628
e1ab7874 1629 while (*listp)
1630 {
1631 if (XEXP (*listp, 0) == insn)
1632 {
1633 remove_free_INSN_LIST_node (listp);
1634 remove_free_EXPR_LIST_node (exprp);
1635 removed++;
1636 continue;
1637 }
48e1416a 1638
54267fdf 1639 listp = (rtx_insn_list **)&XEXP (*listp, 1);
8e56831f 1640 exprp = (rtx_expr_list **)&XEXP (*exprp, 1);
e1ab7874 1641 }
48e1416a 1642
e1ab7874 1643 return removed;
1644}
1645
828f2394 1646/* Clear all dependencies for an insn. */
6adce0fb 1647static void
828f2394 1648delete_all_dependences (rtx insn)
6adce0fb 1649{
93f6b030 1650 sd_iterator_def sd_it;
1651 dep_t dep;
6adce0fb 1652
93f6b030 1653 /* The below cycle can be optimized to clear the caches and back_deps
1654 in one call but that would provoke duplication of code from
1655 delete_dep (). */
6adce0fb 1656
93f6b030 1657 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1658 sd_iterator_cond (&sd_it, &dep);)
1659 sd_delete_dep (sd_it);
828f2394 1660}
1661
1662/* All insns in a scheduling group except the first should only have
1663 dependencies on the previous insn in the group. So we find the
1664 first instruction in the scheduling group by walking the dependence
1665 chains backwards. Then we add the dependencies for the group to
1666 the previous nonnote insn. */
1667
1668static void
b24ef467 1669chain_to_prev_insn (rtx_insn *insn)
828f2394 1670{
93f6b030 1671 sd_iterator_def sd_it;
1672 dep_t dep;
b24ef467 1673 rtx_insn *prev_nonnote;
828f2394 1674
93f6b030 1675 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
828f2394 1676 {
b24ef467 1677 rtx_insn *i = insn;
9d8bf733 1678 rtx_insn *pro = DEP_PRO (dep);
9997bd27 1679
828f2394 1680 do
1681 {
1682 i = prev_nonnote_insn (i);
1683
9997bd27 1684 if (pro == i)
828f2394 1685 goto next_link;
9845d120 1686 } while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
9997bd27 1687
1688 if (! sched_insns_conditions_mutex_p (i, pro))
93f6b030 1689 add_dependence (i, pro, DEP_TYPE (dep));
828f2394 1690 next_link:;
1691 }
1692
1693 delete_all_dependences (insn);
1694
5b8537a8 1695 prev_nonnote = prev_nonnote_nondebug_insn (insn);
e6a25dc9 1696 if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1697 && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1698 add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
6adce0fb 1699}
1700\f
1701/* Process an insn's memory dependencies. There are four kinds of
1702 dependencies:
1703
1704 (0) read dependence: read follows read
1705 (1) true dependence: read follows write
4d64d9a4 1706 (2) output dependence: write follows write
1707 (3) anti dependence: write follows read
6adce0fb 1708
1709 We are careful to build only dependencies which actually exist, and
1710 use transitivity to avoid building too many links. */
1711
1712/* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1713 The MEM is a memory reference contained within INSN, which we are saving
1714 so that we can do memory aliasing on it. */
1715
2749f808 1716static void
68e419a1 1717add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
e722456b 1718 rtx_insn *insn, rtx mem)
6adce0fb 1719{
54267fdf 1720 rtx_insn_list **insn_list;
1721 rtx_insn_list *insn_node;
8e56831f 1722 rtx_expr_list **mem_list;
1723 rtx_expr_list *mem_node;
6adce0fb 1724
e1ab7874 1725 gcc_assert (!deps->readonly);
c5947ab7 1726 if (read_p)
1727 {
1728 insn_list = &deps->pending_read_insns;
1729 mem_list = &deps->pending_read_mems;
c971b445 1730 if (!DEBUG_INSN_P (insn))
1731 deps->pending_read_list_length++;
c5947ab7 1732 }
1733 else
1734 {
1735 insn_list = &deps->pending_write_insns;
1736 mem_list = &deps->pending_write_mems;
1737 deps->pending_write_list_length++;
1738 }
1739
54267fdf 1740 insn_node = alloc_INSN_LIST (insn, *insn_list);
1741 *insn_list = insn_node;
6adce0fb 1742
e1ab7874 1743 if (sched_deps_info->use_cselib)
503f2817 1744 {
1745 mem = shallow_copy_rtx (mem);
2af89801 1746 XEXP (mem, 0) = cselib_subst_to_values_from_insn (XEXP (mem, 0),
1747 GET_MODE (mem), insn);
503f2817 1748 }
54267fdf 1749 mem_node = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1750 *mem_list = mem_node;
6adce0fb 1751}
1752
1753/* Make a dependency between every memory reference on the pending lists
5deaeb50 1754 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
1755 dependencies for a read operation, similarly with FOR_WRITE. */
6adce0fb 1756
1757static void
b24ef467 1758flush_pending_lists (struct deps_desc *deps, rtx_insn *insn, int for_read,
60b8c5b3 1759 int for_write)
6adce0fb 1760{
5deaeb50 1761 if (for_write)
6adce0fb 1762 {
48e1416a 1763 add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
d452a169 1764 1, REG_DEP_ANTI, true);
e1ab7874 1765 if (!deps->readonly)
1766 {
1767 free_EXPR_LIST_list (&deps->pending_read_mems);
1768 deps->pending_read_list_length = 0;
1769 }
6adce0fb 1770 }
6adce0fb 1771
e1ab7874 1772 add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
d452a169 1773 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1774 true);
6adce0fb 1775
48e1416a 1776 add_dependence_list_and_free (deps, insn,
e1ab7874 1777 &deps->last_pending_memory_flush, 1,
d452a169 1778 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1779 true);
effd1640 1780
1781 add_dependence_list_and_free (deps, insn, &deps->pending_jump_insns, 1,
d452a169 1782 REG_DEP_ANTI, true);
effd1640 1783
a7a503a5 1784 if (DEBUG_INSN_P (insn))
1785 {
1786 if (for_write)
1787 free_INSN_LIST_list (&deps->pending_read_insns);
1788 free_INSN_LIST_list (&deps->pending_write_insns);
1789 free_INSN_LIST_list (&deps->last_pending_memory_flush);
1790 free_INSN_LIST_list (&deps->pending_jump_insns);
1791 }
1792
e1ab7874 1793 if (!deps->readonly)
1794 {
1795 free_EXPR_LIST_list (&deps->pending_write_mems);
1796 deps->pending_write_list_length = 0;
1797
1798 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1799 deps->pending_flush_length = 1;
1800 }
d452a169 1801 mark_as_hard = false;
e1ab7874 1802}
1803\f
1804/* Instruction which dependencies we are analyzing. */
b24ef467 1805static rtx_insn *cur_insn = NULL;
e1ab7874 1806
1807/* Implement hooks for haifa scheduler. */
1808
1809static void
2f3c9801 1810haifa_start_insn (rtx_insn *insn)
e1ab7874 1811{
1812 gcc_assert (insn && !cur_insn);
1813
2f3c9801 1814 cur_insn = insn;
e1ab7874 1815}
1816
1817static void
1818haifa_finish_insn (void)
1819{
1820 cur_insn = NULL;
1821}
1822
1823void
1824haifa_note_reg_set (int regno)
1825{
1826 SET_REGNO_REG_SET (reg_pending_sets, regno);
1827}
1828
1829void
1830haifa_note_reg_clobber (int regno)
1831{
1832 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1833}
1834
1835void
1836haifa_note_reg_use (int regno)
1837{
1838 SET_REGNO_REG_SET (reg_pending_uses, regno);
1839}
1840
1841static void
2f3c9801 1842haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx_insn *pending_insn, ds_t ds)
e1ab7874 1843{
1844 if (!(ds & SPECULATIVE))
1845 {
1846 mem = NULL_RTX;
1847 pending_mem = NULL_RTX;
1848 }
1849 else
1850 gcc_assert (ds & BEGIN_DATA);
1851
1852 {
1853 dep_def _dep, *dep = &_dep;
48e1416a 1854
a7dcf969 1855 init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
2261c559 1856 current_sched_info->flags & USE_DEPS_LIST ? ds : 0);
d452a169 1857 DEP_NONREG (dep) = 1;
e1ab7874 1858 maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1859 }
1860
1861}
1862
1863static void
2f3c9801 1864haifa_note_dep (rtx_insn *elem, ds_t ds)
e1ab7874 1865{
1866 dep_def _dep;
1867 dep_t dep = &_dep;
1868
1869 init_dep (dep, elem, cur_insn, ds_to_dt (ds));
d452a169 1870 if (mark_as_hard)
1871 DEP_NONREG (dep) = 1;
e1ab7874 1872 maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1873}
1874
1875static void
1876note_reg_use (int r)
1877{
1878 if (sched_deps_info->note_reg_use)
1879 sched_deps_info->note_reg_use (r);
1880}
1881
1882static void
1883note_reg_set (int r)
1884{
1885 if (sched_deps_info->note_reg_set)
1886 sched_deps_info->note_reg_set (r);
1887}
1888
1889static void
1890note_reg_clobber (int r)
1891{
1892 if (sched_deps_info->note_reg_clobber)
1893 sched_deps_info->note_reg_clobber (r);
1894}
1895
1896static void
2f3c9801 1897note_mem_dep (rtx m1, rtx m2, rtx_insn *e, ds_t ds)
e1ab7874 1898{
1899 if (sched_deps_info->note_mem_dep)
1900 sched_deps_info->note_mem_dep (m1, m2, e, ds);
1901}
1902
1903static void
b24ef467 1904note_dep (rtx_insn *e, ds_t ds)
e1ab7874 1905{
1906 if (sched_deps_info->note_dep)
1907 sched_deps_info->note_dep (e, ds);
1908}
1909
1910/* Return corresponding to DS reg_note. */
1911enum reg_note
1912ds_to_dt (ds_t ds)
1913{
1914 if (ds & DEP_TRUE)
1915 return REG_DEP_TRUE;
1916 else if (ds & DEP_OUTPUT)
1917 return REG_DEP_OUTPUT;
effd1640 1918 else if (ds & DEP_ANTI)
1919 return REG_DEP_ANTI;
e1ab7874 1920 else
1921 {
effd1640 1922 gcc_assert (ds & DEP_CONTROL);
1923 return REG_DEP_CONTROL;
e1ab7874 1924 }
6adce0fb 1925}
a7dcf969 1926
1927\f
1928
1929/* Functions for computation of info needed for register pressure
1930 sensitive insn scheduling. */
1931
1932
1933/* Allocate and return reg_use_data structure for REGNO and INSN. */
1934static struct reg_use_data *
73e15687 1935create_insn_reg_use (int regno, rtx_insn *insn)
a7dcf969 1936{
1937 struct reg_use_data *use;
1938
1939 use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1940 use->regno = regno;
1941 use->insn = insn;
1942 use->next_insn_use = INSN_REG_USE_LIST (insn);
1943 INSN_REG_USE_LIST (insn) = use;
1944 return use;
1945}
1946
57a8bf1b 1947/* Allocate reg_set_data structure for REGNO and INSN. */
1948static void
a7dcf969 1949create_insn_reg_set (int regno, rtx insn)
1950{
1951 struct reg_set_data *set;
1952
1953 set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1954 set->regno = regno;
1955 set->insn = insn;
1956 set->next_insn_set = INSN_REG_SET_LIST (insn);
1957 INSN_REG_SET_LIST (insn) = set;
a7dcf969 1958}
1959
1960/* Set up insn register uses for INSN and dependency context DEPS. */
1961static void
73e15687 1962setup_insn_reg_uses (struct deps_desc *deps, rtx_insn *insn)
a7dcf969 1963{
1964 unsigned i;
1965 reg_set_iterator rsi;
1966 rtx list;
1967 struct reg_use_data *use, *use2, *next;
1968 struct deps_reg *reg_last;
1969
1970 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1971 {
1972 if (i < FIRST_PSEUDO_REGISTER
1973 && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1974 continue;
1975
1976 if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1977 && ! REGNO_REG_SET_P (reg_pending_sets, i)
1978 && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
1979 /* Ignore use which is not dying. */
1980 continue;
1981
1982 use = create_insn_reg_use (i, insn);
1983 use->next_regno_use = use;
1984 reg_last = &deps->reg_last[i];
48e1416a 1985
a7dcf969 1986 /* Create the cycle list of uses. */
1987 for (list = reg_last->uses; list; list = XEXP (list, 1))
1988 {
73e15687 1989 use2 = create_insn_reg_use (i, as_a <rtx_insn *> (XEXP (list, 0)));
a7dcf969 1990 next = use->next_regno_use;
1991 use->next_regno_use = use2;
1992 use2->next_regno_use = next;
1993 }
1994 }
1995}
1996
1997/* Register pressure info for the currently processed insn. */
1998static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
1999
2000/* Return TRUE if INSN has the use structure for REGNO. */
2001static bool
2002insn_use_p (rtx insn, int regno)
2003{
2004 struct reg_use_data *use;
2005
2006 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
2007 if (use->regno == regno)
2008 return true;
2009 return false;
2010}
2011
2012/* Update the register pressure info after birth of pseudo register REGNO
2013 in INSN. Arguments CLOBBER_P and UNUSED_P say correspondingly that
2014 the register is in clobber or unused after the insn. */
2015static void
2016mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
2017{
2018 int incr, new_incr;
2019 enum reg_class cl;
2020
2021 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
66d9a7b9 2022 cl = sched_regno_pressure_class[regno];
a7dcf969 2023 if (cl != NO_REGS)
2024 {
66d9a7b9 2025 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
a7dcf969 2026 if (clobber_p)
2027 {
2028 new_incr = reg_pressure_info[cl].clobber_increase + incr;
2029 reg_pressure_info[cl].clobber_increase = new_incr;
2030 }
2031 else if (unused_p)
2032 {
2033 new_incr = reg_pressure_info[cl].unused_set_increase + incr;
2034 reg_pressure_info[cl].unused_set_increase = new_incr;
2035 }
2036 else
2037 {
2038 new_incr = reg_pressure_info[cl].set_increase + incr;
2039 reg_pressure_info[cl].set_increase = new_incr;
2040 if (! insn_use_p (insn, regno))
2041 reg_pressure_info[cl].change += incr;
2042 create_insn_reg_set (regno, insn);
2043 }
2044 gcc_assert (new_incr < (1 << INCREASE_BITS));
2045 }
2046}
2047
2048/* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
2049 hard registers involved in the birth. */
2050static void
2051mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
2052 bool clobber_p, bool unused_p)
2053{
2054 enum reg_class cl;
2055 int new_incr, last = regno + nregs;
48e1416a 2056
a7dcf969 2057 while (regno < last)
2058 {
2059 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2060 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2061 {
66d9a7b9 2062 cl = sched_regno_pressure_class[regno];
a7dcf969 2063 if (cl != NO_REGS)
2064 {
2065 if (clobber_p)
2066 {
2067 new_incr = reg_pressure_info[cl].clobber_increase + 1;
2068 reg_pressure_info[cl].clobber_increase = new_incr;
2069 }
2070 else if (unused_p)
2071 {
2072 new_incr = reg_pressure_info[cl].unused_set_increase + 1;
2073 reg_pressure_info[cl].unused_set_increase = new_incr;
2074 }
2075 else
2076 {
2077 new_incr = reg_pressure_info[cl].set_increase + 1;
2078 reg_pressure_info[cl].set_increase = new_incr;
2079 if (! insn_use_p (insn, regno))
2080 reg_pressure_info[cl].change += 1;
2081 create_insn_reg_set (regno, insn);
2082 }
2083 gcc_assert (new_incr < (1 << INCREASE_BITS));
2084 }
2085 }
2086 regno++;
2087 }
2088}
2089
2090/* Update the register pressure info after birth of pseudo or hard
2091 register REG in INSN. Arguments CLOBBER_P and UNUSED_P say
2092 correspondingly that the register is in clobber or unused after the
2093 insn. */
2094static void
2095mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
2096{
2097 int regno;
2098
2099 if (GET_CODE (reg) == SUBREG)
2100 reg = SUBREG_REG (reg);
2101
2102 if (! REG_P (reg))
2103 return;
2104
2105 regno = REGNO (reg);
2106 if (regno < FIRST_PSEUDO_REGISTER)
2107 mark_insn_hard_regno_birth (insn, regno,
2108 hard_regno_nregs[regno][GET_MODE (reg)],
2109 clobber_p, unused_p);
2110 else
2111 mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
2112}
2113
2114/* Update the register pressure info after death of pseudo register
2115 REGNO. */
2116static void
2117mark_pseudo_death (int regno)
2118{
2119 int incr;
2120 enum reg_class cl;
2121
2122 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
66d9a7b9 2123 cl = sched_regno_pressure_class[regno];
a7dcf969 2124 if (cl != NO_REGS)
2125 {
66d9a7b9 2126 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
a7dcf969 2127 reg_pressure_info[cl].change -= incr;
2128 }
2129}
2130
2131/* Like mark_pseudo_death except that NREGS saying how many hard
2132 registers involved in the death. */
2133static void
2134mark_hard_regno_death (int regno, int nregs)
2135{
2136 enum reg_class cl;
2137 int last = regno + nregs;
48e1416a 2138
a7dcf969 2139 while (regno < last)
2140 {
2141 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2142 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2143 {
66d9a7b9 2144 cl = sched_regno_pressure_class[regno];
a7dcf969 2145 if (cl != NO_REGS)
2146 reg_pressure_info[cl].change -= 1;
2147 }
2148 regno++;
2149 }
2150}
2151
2152/* Update the register pressure info after death of pseudo or hard
2153 register REG. */
2154static void
2155mark_reg_death (rtx reg)
2156{
2157 int regno;
2158
2159 if (GET_CODE (reg) == SUBREG)
2160 reg = SUBREG_REG (reg);
2161
2162 if (! REG_P (reg))
2163 return;
2164
2165 regno = REGNO (reg);
2166 if (regno < FIRST_PSEUDO_REGISTER)
2167 mark_hard_regno_death (regno, hard_regno_nregs[regno][GET_MODE (reg)]);
2168 else
2169 mark_pseudo_death (regno);
2170}
2171
2172/* Process SETTER of REG. DATA is an insn containing the setter. */
2173static void
2174mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
2175{
2176 if (setter != NULL_RTX && GET_CODE (setter) != SET)
2177 return;
2178 mark_insn_reg_birth
2179 ((rtx) data, reg, false,
2180 find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
2181}
2182
2183/* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs. */
2184static void
2185mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
2186{
2187 if (GET_CODE (setter) == CLOBBER)
2188 mark_insn_reg_birth ((rtx) data, reg, true, false);
2189}
2190
2191/* Set up reg pressure info related to INSN. */
c15d7785 2192void
2193init_insn_reg_pressure_info (rtx insn)
a7dcf969 2194{
2195 int i, len;
2196 enum reg_class cl;
2197 static struct reg_pressure_data *pressure_info;
2198 rtx link;
2199
b30b031c 2200 gcc_assert (sched_pressure != SCHED_PRESSURE_NONE);
a7dcf969 2201
2202 if (! INSN_P (insn))
2203 return;
2204
66d9a7b9 2205 for (i = 0; i < ira_pressure_classes_num; i++)
a7dcf969 2206 {
66d9a7b9 2207 cl = ira_pressure_classes[i];
a7dcf969 2208 reg_pressure_info[cl].clobber_increase = 0;
2209 reg_pressure_info[cl].set_increase = 0;
2210 reg_pressure_info[cl].unused_set_increase = 0;
2211 reg_pressure_info[cl].change = 0;
2212 }
48e1416a 2213
a7dcf969 2214 note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
48e1416a 2215
a7dcf969 2216 note_stores (PATTERN (insn), mark_insn_reg_store, insn);
48e1416a 2217
a7dcf969 2218#ifdef AUTO_INC_DEC
2219 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2220 if (REG_NOTE_KIND (link) == REG_INC)
2221 mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
2222#endif
2223
2224 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2225 if (REG_NOTE_KIND (link) == REG_DEAD)
2226 mark_reg_death (XEXP (link, 0));
48e1416a 2227
66d9a7b9 2228 len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num;
a7dcf969 2229 pressure_info
2230 = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
b30b031c 2231 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
2232 INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_pressure_classes_num
2233 * sizeof (int), 1);
66d9a7b9 2234 for (i = 0; i < ira_pressure_classes_num; i++)
a7dcf969 2235 {
66d9a7b9 2236 cl = ira_pressure_classes[i];
a7dcf969 2237 pressure_info[i].clobber_increase
2238 = reg_pressure_info[cl].clobber_increase;
2239 pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2240 pressure_info[i].unused_set_increase
2241 = reg_pressure_info[cl].unused_set_increase;
2242 pressure_info[i].change = reg_pressure_info[cl].change;
2243 }
2244}
2245
2246
6adce0fb 2247\f
e1ab7874 2248
2249/* Internal variable for sched_analyze_[12] () functions.
2250 If it is nonzero, this means that sched_analyze_[12] looks
2251 at the most toplevel SET. */
2252static bool can_start_lhs_rhs_p;
2253
48e1416a 2254/* Extend reg info for the deps context DEPS given that
e1ab7874 2255 we have just generated a register numbered REGNO. */
2256static void
68e419a1 2257extend_deps_reg_info (struct deps_desc *deps, int regno)
e1ab7874 2258{
2259 int max_regno = regno + 1;
2260
2261 gcc_assert (!reload_completed);
2262
2263 /* In a readonly context, it would not hurt to extend info,
2264 but it should not be needed. */
2265 if (reload_completed && deps->readonly)
2266 {
2267 deps->max_reg = max_regno;
2268 return;
2269 }
2270
2271 if (max_regno > deps->max_reg)
2272 {
48e1416a 2273 deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
e1ab7874 2274 max_regno);
2275 memset (&deps->reg_last[deps->max_reg],
48e1416a 2276 0, (max_regno - deps->max_reg)
e1ab7874 2277 * sizeof (struct deps_reg));
2278 deps->max_reg = max_regno;
2279 }
2280}
2281
2282/* Extends REG_INFO_P if needed. */
2283void
2284maybe_extend_reg_info_p (void)
2285{
2286 /* Extend REG_INFO_P, if needed. */
2287 if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2288 {
2289 size_t new_reg_info_p_size = max_regno + 128;
2290
2291 gcc_assert (!reload_completed && sel_sched_p ());
2292
2293 reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2294 new_reg_info_p_size,
2295 reg_info_p_size,
2296 sizeof (*reg_info_p));
2297 reg_info_p_size = new_reg_info_p_size;
2298 }
2299}
2300
8ad6f16d 2301/* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2302 The type of the reference is specified by REF and can be SET,
2303 CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
2304
2305static void
68e419a1 2306sched_analyze_reg (struct deps_desc *deps, int regno, enum machine_mode mode,
b24ef467 2307 enum rtx_code ref, rtx_insn *insn)
8ad6f16d 2308{
e1ab7874 2309 /* We could emit new pseudos in renaming. Extend the reg structures. */
2310 if (!reload_completed && sel_sched_p ()
2311 && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2312 extend_deps_reg_info (deps, regno);
2313
2314 maybe_extend_reg_info_p ();
2315
8ad6f16d 2316 /* A hard reg in a wide mode may really be multiple registers.
2317 If so, mark all of them just like the first. */
2318 if (regno < FIRST_PSEUDO_REGISTER)
2319 {
2320 int i = hard_regno_nregs[regno][mode];
2321 if (ref == SET)
2322 {
2323 while (--i >= 0)
e1ab7874 2324 note_reg_set (regno + i);
8ad6f16d 2325 }
2326 else if (ref == USE)
2327 {
2328 while (--i >= 0)
e1ab7874 2329 note_reg_use (regno + i);
8ad6f16d 2330 }
2331 else
2332 {
2333 while (--i >= 0)
e1ab7874 2334 note_reg_clobber (regno + i);
8ad6f16d 2335 }
2336 }
2337
2338 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2339 it does not reload. Ignore these as they have served their
2340 purpose already. */
2341 else if (regno >= deps->max_reg)
2342 {
2343 enum rtx_code code = GET_CODE (PATTERN (insn));
2344 gcc_assert (code == USE || code == CLOBBER);
2345 }
2346
2347 else
2348 {
2349 if (ref == SET)
e1ab7874 2350 note_reg_set (regno);
8ad6f16d 2351 else if (ref == USE)
e1ab7874 2352 note_reg_use (regno);
8ad6f16d 2353 else
e1ab7874 2354 note_reg_clobber (regno);
8ad6f16d 2355
2356 /* Pseudos that are REG_EQUIV to something may be replaced
2357 by that during reloading. We need only add dependencies for
2358 the address in the REG_EQUIV note. */
2359 if (!reload_completed && get_reg_known_equiv_p (regno))
2360 {
2361 rtx t = get_reg_known_value (regno);
2362 if (MEM_P (t))
2363 sched_analyze_2 (deps, XEXP (t, 0), insn);
2364 }
2365
2366 /* Don't let it cross a call after scheduling if it doesn't
2367 already cross one. */
2368 if (REG_N_CALLS_CROSSED (regno) == 0)
2369 {
9845d120 2370 if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
8ad6f16d 2371 deps->sched_before_next_call
2372 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2373 else
2374 add_dependence_list (insn, deps->last_function_call, 1,
d452a169 2375 REG_DEP_ANTI, false);
8ad6f16d 2376 }
2377 }
2378}
2379
6adce0fb 2380/* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2381 rtx, X, creating all dependencies generated by the write to the
2382 destination of X, and reads of everything mentioned. */
2383
2384static void
b24ef467 2385sched_analyze_1 (struct deps_desc *deps, rtx x, rtx_insn *insn)
6adce0fb 2386{
19cb6b50 2387 rtx dest = XEXP (x, 0);
6adce0fb 2388 enum rtx_code code = GET_CODE (x);
e1ab7874 2389 bool cslr_p = can_start_lhs_rhs_p;
6adce0fb 2390
e1ab7874 2391 can_start_lhs_rhs_p = false;
2392
2393 gcc_assert (dest);
6adce0fb 2394 if (dest == 0)
2395 return;
2396
e1ab7874 2397 if (cslr_p && sched_deps_info->start_lhs)
2398 sched_deps_info->start_lhs (dest);
2399
4b303227 2400 if (GET_CODE (dest) == PARALLEL)
6adce0fb 2401 {
19cb6b50 2402 int i;
216b2683 2403
6adce0fb 2404 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
4b303227 2405 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2406 sched_analyze_1 (deps,
2407 gen_rtx_CLOBBER (VOIDmode,
2408 XEXP (XVECEXP (dest, 0, i), 0)),
2409 insn);
216b2683 2410
e1ab7874 2411 if (cslr_p && sched_deps_info->finish_lhs)
2412 sched_deps_info->finish_lhs ();
2413
2414 if (code == SET)
2415 {
2416 can_start_lhs_rhs_p = cslr_p;
2417
2418 sched_analyze_2 (deps, SET_SRC (x), insn);
2419
2420 can_start_lhs_rhs_p = false;
2421 }
2422
6adce0fb 2423 return;
2424 }
2425
2426 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
476d094d 2427 || GET_CODE (dest) == ZERO_EXTRACT)
6adce0fb 2428 {
b69139c7 2429 if (GET_CODE (dest) == STRICT_LOW_PART
2430 || GET_CODE (dest) == ZERO_EXTRACT
e011eba9 2431 || df_read_modify_subreg_p (dest))
b69139c7 2432 {
60b8c5b3 2433 /* These both read and modify the result. We must handle
b69139c7 2434 them as writes to get proper dependencies for following
2435 instructions. We must handle them as reads to get proper
2436 dependencies from this to previous instructions.
6473f3f4 2437 Thus we need to call sched_analyze_2. */
b69139c7 2438
60b8c5b3 2439 sched_analyze_2 (deps, XEXP (dest, 0), insn);
b69139c7 2440 }
476d094d 2441 if (GET_CODE (dest) == ZERO_EXTRACT)
6adce0fb 2442 {
2443 /* The second and third arguments are values read by this insn. */
2444 sched_analyze_2 (deps, XEXP (dest, 1), insn);
2445 sched_analyze_2 (deps, XEXP (dest, 2), insn);
2446 }
2447 dest = XEXP (dest, 0);
2448 }
2449
8ad4c111 2450 if (REG_P (dest))
6adce0fb 2451 {
8ad6f16d 2452 int regno = REGNO (dest);
2453 enum machine_mode mode = GET_MODE (dest);
2454
2455 sched_analyze_reg (deps, regno, mode, code, insn);
6adce0fb 2456
1541bfd5 2457#ifdef STACK_REGS
2458 /* Treat all writes to a stack register as modifying the TOS. */
2459 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2460 {
8ad6f16d 2461 /* Avoid analyzing the same register twice. */
2462 if (regno != FIRST_STACK_REG)
2463 sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
a7dcf969 2464
d82cf2b2 2465 add_to_hard_reg_set (&implicit_reg_pending_uses, mode,
2466 FIRST_STACK_REG);
1541bfd5 2467 }
2468#endif
6adce0fb 2469 }
e16ceb8e 2470 else if (MEM_P (dest))
6adce0fb 2471 {
2472 /* Writing memory. */
503f2817 2473 rtx t = dest;
2474
e1ab7874 2475 if (sched_deps_info->use_cselib)
503f2817 2476 {
87cf5753 2477 enum machine_mode address_mode = get_address_mode (dest);
98155838 2478
503f2817 2479 t = shallow_copy_rtx (dest);
1f864115 2480 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2481 GET_MODE (t), insn);
2af89801 2482 XEXP (t, 0)
2483 = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2484 insn);
503f2817 2485 }
faa80ce8 2486 t = canon_rtx (t);
6adce0fb 2487
e1ab7874 2488 /* Pending lists can't get larger with a readonly context. */
2489 if (!deps->readonly
2490 && ((deps->pending_read_list_length + deps->pending_write_list_length)
2491 > MAX_PENDING_LIST_LENGTH))
6adce0fb 2492 {
2493 /* Flush all pending reads and writes to prevent the pending lists
2494 from getting any larger. Insn scheduling runs too slowly when
85de291e 2495 these lists get long. When compiling GCC with itself,
6adce0fb 2496 this flush occurs 8 times for sparc, and 10 times for m88k using
85de291e 2497 the default value of 32. */
5deaeb50 2498 flush_pending_lists (deps, insn, false, true);
6adce0fb 2499 }
2500 else
2501 {
6adce0fb 2502 rtx pending, pending_mem;
2503
2504 pending = deps->pending_read_insns;
2505 pending_mem = deps->pending_read_mems;
2506 while (pending)
2507 {
e6a25dc9 2508 if (anti_dependence (XEXP (pending_mem, 0), t)
2509 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2f3c9801 2510 note_mem_dep (t, XEXP (pending_mem, 0), as_a <rtx_insn *> (XEXP (pending, 0)),
e1ab7874 2511 DEP_ANTI);
6adce0fb 2512
2513 pending = XEXP (pending, 1);
2514 pending_mem = XEXP (pending_mem, 1);
2515 }
2516
2517 pending = deps->pending_write_insns;
2518 pending_mem = deps->pending_write_mems;
2519 while (pending)
2520 {
e6a25dc9 2521 if (output_dependence (XEXP (pending_mem, 0), t)
2522 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2f3c9801 2523 note_mem_dep (t, XEXP (pending_mem, 0),
2524 as_a <rtx_insn *> (XEXP (pending, 0)),
e1ab7874 2525 DEP_OUTPUT);
6adce0fb 2526
2527 pending = XEXP (pending, 1);
2528 pending_mem = XEXP (pending_mem, 1);
2529 }
2530
e228dd5d 2531 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
d452a169 2532 REG_DEP_ANTI, true);
effd1640 2533 add_dependence_list (insn, deps->pending_jump_insns, 1,
d452a169 2534 REG_DEP_CONTROL, true);
6adce0fb 2535
e1ab7874 2536 if (!deps->readonly)
2537 add_insn_mem_dependence (deps, false, insn, dest);
6adce0fb 2538 }
2539 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2540 }
2541
e1ab7874 2542 if (cslr_p && sched_deps_info->finish_lhs)
2543 sched_deps_info->finish_lhs ();
2544
6adce0fb 2545 /* Analyze reads. */
2546 if (GET_CODE (x) == SET)
e1ab7874 2547 {
2548 can_start_lhs_rhs_p = cslr_p;
2549
2550 sched_analyze_2 (deps, SET_SRC (x), insn);
2551
2552 can_start_lhs_rhs_p = false;
2553 }
6adce0fb 2554}
2555
2556/* Analyze the uses of memory and registers in rtx X in INSN. */
6adce0fb 2557static void
b24ef467 2558sched_analyze_2 (struct deps_desc *deps, rtx x, rtx_insn *insn)
6adce0fb 2559{
19cb6b50 2560 int i;
2561 int j;
2562 enum rtx_code code;
2563 const char *fmt;
e1ab7874 2564 bool cslr_p = can_start_lhs_rhs_p;
2565
2566 can_start_lhs_rhs_p = false;
6adce0fb 2567
e1ab7874 2568 gcc_assert (x);
6adce0fb 2569 if (x == 0)
2570 return;
2571
e1ab7874 2572 if (cslr_p && sched_deps_info->start_rhs)
2573 sched_deps_info->start_rhs (x);
2574
6adce0fb 2575 code = GET_CODE (x);
2576
2577 switch (code)
2578 {
0349edce 2579 CASE_CONST_ANY:
6adce0fb 2580 case SYMBOL_REF:
2581 case CONST:
2582 case LABEL_REF:
8990536a 2583 /* Ignore constants. */
e1ab7874 2584 if (cslr_p && sched_deps_info->finish_rhs)
2585 sched_deps_info->finish_rhs ();
2586
6adce0fb 2587 return;
2588
2589#ifdef HAVE_cc0
2590 case CC0:
2591 /* User of CC0 depends on immediately preceding insn. */
828f2394 2592 SCHED_GROUP_P (insn) = 1;
f61f0d0c 2593 /* Don't move CC0 setter to another block (it can set up the
2594 same flag for previous CC0 users which is safe). */
2595 CANT_MOVE (prev_nonnote_insn (insn)) = 1;
e1ab7874 2596
2597 if (cslr_p && sched_deps_info->finish_rhs)
2598 sched_deps_info->finish_rhs ();
2599
6adce0fb 2600 return;
2601#endif
2602
2603 case REG:
2604 {
6adce0fb 2605 int regno = REGNO (x);
8ad6f16d 2606 enum machine_mode mode = GET_MODE (x);
2607
2608 sched_analyze_reg (deps, regno, mode, USE, insn);
1541bfd5 2609
2610#ifdef STACK_REGS
2611 /* Treat all reads of a stack register as modifying the TOS. */
2612 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2613 {
8ad6f16d 2614 /* Avoid analyzing the same register twice. */
2615 if (regno != FIRST_STACK_REG)
2616 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2617 sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
1541bfd5 2618 }
2619#endif
e1ab7874 2620
2621 if (cslr_p && sched_deps_info->finish_rhs)
2622 sched_deps_info->finish_rhs ();
2623
6adce0fb 2624 return;
2625 }
2626
2627 case MEM:
2628 {
2629 /* Reading memory. */
2630 rtx u;
2631 rtx pending, pending_mem;
503f2817 2632 rtx t = x;
6adce0fb 2633
e1ab7874 2634 if (sched_deps_info->use_cselib)
503f2817 2635 {
87cf5753 2636 enum machine_mode address_mode = get_address_mode (t);
98155838 2637
503f2817 2638 t = shallow_copy_rtx (t);
1f864115 2639 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2640 GET_MODE (t), insn);
2af89801 2641 XEXP (t, 0)
2642 = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2643 insn);
503f2817 2644 }
6adce0fb 2645
c971b445 2646 if (!DEBUG_INSN_P (insn))
6adce0fb 2647 {
c971b445 2648 t = canon_rtx (t);
2649 pending = deps->pending_read_insns;
2650 pending_mem = deps->pending_read_mems;
2651 while (pending)
2652 {
2653 if (read_dependence (XEXP (pending_mem, 0), t)
2654 && ! sched_insns_conditions_mutex_p (insn,
2655 XEXP (pending, 0)))
2f3c9801 2656 note_mem_dep (t, XEXP (pending_mem, 0),
2657 as_a <rtx_insn *> (XEXP (pending, 0)),
c971b445 2658 DEP_ANTI);
2659
2660 pending = XEXP (pending, 1);
2661 pending_mem = XEXP (pending_mem, 1);
2662 }
6adce0fb 2663
c971b445 2664 pending = deps->pending_write_insns;
2665 pending_mem = deps->pending_write_mems;
2666 while (pending)
e1ab7874 2667 {
376a287d 2668 if (true_dependence (XEXP (pending_mem, 0), VOIDmode, t)
c971b445 2669 && ! sched_insns_conditions_mutex_p (insn,
2670 XEXP (pending, 0)))
2f3c9801 2671 note_mem_dep (t, XEXP (pending_mem, 0),
2672 as_a <rtx_insn *> (XEXP (pending, 0)),
c971b445 2673 sched_deps_info->generate_spec_deps
2674 ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2675
2676 pending = XEXP (pending, 1);
2677 pending_mem = XEXP (pending_mem, 1);
2678 }
e1ab7874 2679
c971b445 2680 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
b24ef467 2681 add_dependence (insn, as_a <rtx_insn *> (XEXP (u, 0)),
2682 REG_DEP_ANTI);
effd1640 2683
2684 for (u = deps->pending_jump_insns; u; u = XEXP (u, 1))
2685 if (deps_may_trap_p (x))
2686 {
2687 if ((sched_deps_info->generate_spec_deps)
2688 && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2689 {
2690 ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2691 MAX_DEP_WEAK);
2692
b24ef467 2693 note_dep (as_a <rtx_insn *> (XEXP (u, 0)), ds);
effd1640 2694 }
2695 else
b24ef467 2696 add_dependence (insn, as_a <rtx_insn *> (XEXP (u, 0)),
2697 REG_DEP_CONTROL);
effd1640 2698 }
e1ab7874 2699 }
6adce0fb 2700
2701 /* Always add these dependencies to pending_reads, since
2702 this insn may be followed by a write. */
c2066d40 2703 if (!deps->readonly)
2704 {
2705 if ((deps->pending_read_list_length
2706 + deps->pending_write_list_length)
2707 > MAX_PENDING_LIST_LENGTH
2708 && !DEBUG_INSN_P (insn))
2709 flush_pending_lists (deps, insn, true, true);
2710 add_insn_mem_dependence (deps, true, insn, x);
2711 }
6adce0fb 2712
6adce0fb 2713 sched_analyze_2 (deps, XEXP (x, 0), insn);
e1ab7874 2714
2715 if (cslr_p && sched_deps_info->finish_rhs)
2716 sched_deps_info->finish_rhs ();
2717
6adce0fb 2718 return;
2719 }
2720
2721 /* Force pending stores to memory in case a trap handler needs them. */
2722 case TRAP_IF:
5deaeb50 2723 flush_pending_lists (deps, insn, true, false);
6adce0fb 2724 break;
2725
6540132c 2726 case PREFETCH:
2727 if (PREFETCH_SCHEDULE_BARRIER_P (x))
2728 reg_pending_barrier = TRUE_BARRIER;
42fb263f 2729 /* Prefetch insn contains addresses only. So if the prefetch
2730 address has no registers, there will be no dependencies on
2731 the prefetch insn. This is wrong with result code
2732 correctness point of view as such prefetch can be moved below
2733 a jump insn which usually generates MOVE_BARRIER preventing
2734 to move insns containing registers or memories through the
2735 barrier. It is also wrong with generated code performance
2736 point of view as prefetch withouth dependecies will have a
2737 tendency to be issued later instead of earlier. It is hard
2738 to generate accurate dependencies for prefetch insns as
2739 prefetch has only the start address but it is better to have
2740 something than nothing. */
a2e20fd2 2741 if (!deps->readonly)
12e51a1d 2742 {
2743 rtx x = gen_rtx_MEM (Pmode, XEXP (PATTERN (insn), 0));
2744 if (sched_deps_info->use_cselib)
2745 cselib_lookup_from_insn (x, Pmode, true, VOIDmode, insn);
2746 add_insn_mem_dependence (deps, true, insn, x);
2747 }
6540132c 2748 break;
2749
e5e67ff9 2750 case UNSPEC_VOLATILE:
2751 flush_pending_lists (deps, insn, true, true);
2752 /* FALLTHRU */
2753
6adce0fb 2754 case ASM_OPERANDS:
2755 case ASM_INPUT:
6adce0fb 2756 {
6adce0fb 2757 /* Traditional and volatile asm instructions must be considered to use
2758 and clobber all hard registers, all pseudo-registers and all of
2759 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
2760
2761 Consider for instance a volatile asm that changes the fpu rounding
2762 mode. An insn should not be moved across this even if it only uses
2763 pseudo-regs because it might give an incorrectly rounded result. */
5185189e 2764 if ((code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2765 && !DEBUG_INSN_P (insn))
c14a239f 2766 reg_pending_barrier = TRUE_BARRIER;
6adce0fb 2767
2768 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
2769 We can not just fall through here since then we would be confused
2770 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2771 traditional asms unlike their normal usage. */
2772
2773 if (code == ASM_OPERANDS)
2774 {
2775 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2776 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
e1ab7874 2777
2778 if (cslr_p && sched_deps_info->finish_rhs)
2779 sched_deps_info->finish_rhs ();
2780
6adce0fb 2781 return;
2782 }
2783 break;
2784 }
2785
2786 case PRE_DEC:
2787 case POST_DEC:
2788 case PRE_INC:
2789 case POST_INC:
2790 /* These both read and modify the result. We must handle them as writes
2791 to get proper dependencies for following instructions. We must handle
2792 them as reads to get proper dependencies from this to previous
2793 instructions. Thus we need to pass them to both sched_analyze_1
2794 and sched_analyze_2. We must call sched_analyze_2 first in order
2795 to get the proper antecedent for the read. */
2796 sched_analyze_2 (deps, XEXP (x, 0), insn);
2797 sched_analyze_1 (deps, x, insn);
e1ab7874 2798
2799 if (cslr_p && sched_deps_info->finish_rhs)
2800 sched_deps_info->finish_rhs ();
2801
6adce0fb 2802 return;
2803
2804 case POST_MODIFY:
2805 case PRE_MODIFY:
2806 /* op0 = op0 + op1 */
2807 sched_analyze_2 (deps, XEXP (x, 0), insn);
2808 sched_analyze_2 (deps, XEXP (x, 1), insn);
2809 sched_analyze_1 (deps, x, insn);
e1ab7874 2810
2811 if (cslr_p && sched_deps_info->finish_rhs)
2812 sched_deps_info->finish_rhs ();
2813
6adce0fb 2814 return;
2815
2816 default:
2817 break;
2818 }
2819
2820 /* Other cases: walk the insn. */
2821 fmt = GET_RTX_FORMAT (code);
2822 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2823 {
2824 if (fmt[i] == 'e')
2825 sched_analyze_2 (deps, XEXP (x, i), insn);
2826 else if (fmt[i] == 'E')
2827 for (j = 0; j < XVECLEN (x, i); j++)
2828 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2829 }
e1ab7874 2830
2831 if (cslr_p && sched_deps_info->finish_rhs)
2832 sched_deps_info->finish_rhs ();
6adce0fb 2833}
2834
b0aa049b 2835/* Try to group two fuseable insns together to prevent scheduler
2836 from scheduling them apart. */
55e7138c 2837
2838static void
18282db0 2839sched_macro_fuse_insns (rtx_insn *insn)
55e7138c 2840{
18282db0 2841 rtx_insn *prev;
55e7138c 2842
b0aa049b 2843 if (any_condjump_p (insn))
2844 {
2845 unsigned int condreg1, condreg2;
2846 rtx cc_reg_1;
2847 targetm.fixed_condition_code_regs (&condreg1, &condreg2);
2848 cc_reg_1 = gen_rtx_REG (CCmode, condreg1);
2849 prev = prev_nonnote_nondebug_insn (insn);
2850 if (!reg_referenced_p (cc_reg_1, PATTERN (insn))
2851 || !prev
2852 || !modified_in_p (cc_reg_1, prev))
2853 return;
2854 }
2855 else
2856 {
2857 rtx insn_set = single_set (insn);
55e7138c 2858
b0aa049b 2859 prev = prev_nonnote_nondebug_insn (insn);
2860 if (!prev
2861 || !insn_set
2862 || !single_set (prev)
2863 || !modified_in_p (SET_DEST (insn_set), prev))
2864 return;
55e7138c 2865
b0aa049b 2866 }
2867
2868 if (targetm.sched.macro_fusion_pair_p (prev, insn))
2869 SCHED_GROUP_P (insn) = 1;
55e7138c 2870
55e7138c 2871}
2872
6adce0fb 2873/* Analyze an INSN with pattern X to find all dependencies. */
6adce0fb 2874static void
b24ef467 2875sched_analyze_insn (struct deps_desc *deps, rtx x, rtx_insn *insn)
6adce0fb 2876{
19cb6b50 2877 RTX_CODE code = GET_CODE (x);
6adce0fb 2878 rtx link;
4f917ffe 2879 unsigned i;
8c97cf13 2880 reg_set_iterator rsi;
6adce0fb 2881
a7dcf969 2882 if (! reload_completed)
2883 {
2884 HARD_REG_SET temp;
2885
2886 extract_insn (insn);
8eaaac4d 2887 preprocess_constraints (insn);
a7dcf969 2888 ira_implicitly_set_insn_hard_regs (&temp);
9dab02d9 2889 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
a7dcf969 2890 IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
2891 }
2892
e1ab7874 2893 can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2894 && code == SET);
2895
55e7138c 2896 /* Group compare and branch insns for macro-fusion. */
2897 if (targetm.sched.macro_fusion_p
2898 && targetm.sched.macro_fusion_p ())
b0aa049b 2899 sched_macro_fuse_insns (insn);
55e7138c 2900
326d0c19 2901 if (may_trap_p (x))
9d75589a 2902 /* Avoid moving trapping instructions across function calls that might
326d0c19 2903 not always return. */
2904 add_dependence_list (insn, deps->last_function_call_may_noreturn,
d452a169 2905 1, REG_DEP_ANTI, true);
326d0c19 2906
0741df1e 2907 /* We must avoid creating a situation in which two successors of the
2908 current block have different unwind info after scheduling. If at any
2909 point the two paths re-join this leads to incorrect unwind info. */
2910 /* ??? There are certain situations involving a forced frame pointer in
2911 which, with extra effort, we could fix up the unwind info at a later
2912 CFG join. However, it seems better to notice these cases earlier
2913 during prologue generation and avoid marking the frame pointer setup
2914 as frame-related at all. */
2915 if (RTX_FRAME_RELATED_P (insn))
420a9b66 2916 {
2917 /* Make sure prologue insn is scheduled before next jump. */
2918 deps->sched_before_next_jump
2919 = alloc_INSN_LIST (insn, deps->sched_before_next_jump);
2920
2921 /* Make sure epilogue insn is scheduled after preceding jumps. */
d452a169 2922 add_dependence_list (insn, deps->pending_jump_insns, 1, REG_DEP_ANTI,
2923 true);
420a9b66 2924 }
0741df1e 2925
6adce0fb 2926 if (code == COND_EXEC)
2927 {
2928 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2929
2930 /* ??? Should be recording conditions so we reduce the number of
424da949 2931 false dependencies. */
6adce0fb 2932 x = COND_EXEC_CODE (x);
2933 code = GET_CODE (x);
2934 }
2935 if (code == SET || code == CLOBBER)
a35ad173 2936 {
2937 sched_analyze_1 (deps, x, insn);
2938
2939 /* Bare clobber insns are used for letting life analysis, reg-stack
2940 and others know that a value is dead. Depend on the last call
2941 instruction so that reg-stack won't get confused. */
2942 if (code == CLOBBER)
a7dcf969 2943 add_dependence_list (insn, deps->last_function_call, 1,
d452a169 2944 REG_DEP_OUTPUT, true);
a35ad173 2945 }
6adce0fb 2946 else if (code == PARALLEL)
2947 {
4f917ffe 2948 for (i = XVECLEN (x, 0); i--;)
6adce0fb 2949 {
2950 rtx sub = XVECEXP (x, 0, i);
2951 code = GET_CODE (sub);
2952
2953 if (code == COND_EXEC)
2954 {
2955 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2956 sub = COND_EXEC_CODE (sub);
2957 code = GET_CODE (sub);
2958 }
2959 if (code == SET || code == CLOBBER)
2960 sched_analyze_1 (deps, sub, insn);
2961 else
2962 sched_analyze_2 (deps, sub, insn);
2963 }
2964 }
2965 else
2966 sched_analyze_2 (deps, x, insn);
2967
2968 /* Mark registers CLOBBERED or used by called function. */
6d7dc5b9 2969 if (CALL_P (insn))
9239aee6 2970 {
2971 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2972 {
2973 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2974 sched_analyze_1 (deps, XEXP (link, 0), insn);
c8010b80 2975 else if (GET_CODE (XEXP (link, 0)) != SET)
9239aee6 2976 sched_analyze_2 (deps, XEXP (link, 0), insn);
2977 }
e703e1fc 2978 /* Don't schedule anything after a tail call, tail call needs
2979 to use at least all call-saved registers. */
2980 if (SIBLING_CALL_P (insn))
2981 reg_pending_barrier = TRUE_BARRIER;
2982 else if (find_reg_note (insn, REG_SETJMP, NULL))
c14a239f 2983 reg_pending_barrier = MOVE_BARRIER;
9239aee6 2984 }
6adce0fb 2985
6d7dc5b9 2986 if (JUMP_P (insn))
d6141c0c 2987 {
097ef0af 2988 rtx next;
5b8537a8 2989 next = next_nonnote_nondebug_insn (insn);
6d7dc5b9 2990 if (next && BARRIER_P (next))
ca93e19d 2991 reg_pending_barrier = MOVE_BARRIER;
d6141c0c 2992 else
2993 {
5deaeb50 2994 rtx pending, pending_mem;
31c5c470 2995
e1ab7874 2996 if (sched_deps_info->compute_jump_reg_dependencies)
2997 {
effd1640 2998 (*sched_deps_info->compute_jump_reg_dependencies)
2999 (insn, reg_pending_control_uses);
e1ab7874 3000
e1ab7874 3001 /* Make latency of jump equal to 0 by using anti-dependence. */
effd1640 3002 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
e1ab7874 3003 {
3004 struct deps_reg *reg_last = &deps->reg_last[i];
d452a169 3005 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI,
3006 false);
a7dcf969 3007 add_dependence_list (insn, reg_last->implicit_sets,
d452a169 3008 0, REG_DEP_ANTI, false);
e1ab7874 3009 add_dependence_list (insn, reg_last->clobbers, 0,
d452a169 3010 REG_DEP_ANTI, false);
e1ab7874 3011 }
e1ab7874 3012 }
097ef0af 3013
9807cfe1 3014 /* All memory writes and volatile reads must happen before the
3015 jump. Non-volatile reads must happen before the jump iff
3016 the result is needed by the above register used mask. */
3017
097ef0af 3018 pending = deps->pending_write_insns;
3019 pending_mem = deps->pending_write_mems;
3020 while (pending)
3021 {
e6a25dc9 3022 if (! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
b24ef467 3023 add_dependence (insn, as_a <rtx_insn *> (XEXP (pending, 0)),
3024 REG_DEP_OUTPUT);
9807cfe1 3025 pending = XEXP (pending, 1);
3026 pending_mem = XEXP (pending_mem, 1);
3027 }
097ef0af 3028
9807cfe1 3029 pending = deps->pending_read_insns;
3030 pending_mem = deps->pending_read_mems;
3031 while (pending)
3032 {
e6a25dc9 3033 if (MEM_VOLATILE_P (XEXP (pending_mem, 0))
3034 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
b24ef467 3035 add_dependence (insn, as_a <rtx_insn *> (XEXP (pending, 0)),
3036 REG_DEP_OUTPUT);
097ef0af 3037 pending = XEXP (pending, 1);
3038 pending_mem = XEXP (pending_mem, 1);
3039 }
3040
e228dd5d 3041 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
d452a169 3042 REG_DEP_ANTI, true);
effd1640 3043 add_dependence_list (insn, deps->pending_jump_insns, 1,
d452a169 3044 REG_DEP_ANTI, true);
d6141c0c 3045 }
d6141c0c 3046 }
3047
f787dd2b 3048 /* If this instruction can throw an exception, then moving it changes
40734805 3049 where block boundaries fall. This is mighty confusing elsewhere.
88a00894 3050 Therefore, prevent such an instruction from being moved. Same for
3051 non-jump instructions that define block boundaries.
3052 ??? Unclear whether this is still necessary in EBB mode. If not,
3053 add_branch_dependences should be adjusted for RGN mode instead. */
3054 if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
3055 || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
c14a239f 3056 reg_pending_barrier = MOVE_BARRIER;
f787dd2b 3057
b30b031c 3058 if (sched_pressure != SCHED_PRESSURE_NONE)
a7dcf969 3059 {
3060 setup_insn_reg_uses (deps, insn);
c15d7785 3061 init_insn_reg_pressure_info (insn);
a7dcf969 3062 }
3063
9845d120 3064 /* Add register dependencies for insn. */
3065 if (DEBUG_INSN_P (insn))
3066 {
b24ef467 3067 rtx_insn *prev = deps->last_debug_insn;
9845d120 3068 rtx u;
3069
3070 if (!deps->readonly)
3071 deps->last_debug_insn = insn;
3072
3073 if (prev)
3074 add_dependence (insn, prev, REG_DEP_ANTI);
3075
3076 add_dependence_list (insn, deps->last_function_call, 1,
d452a169 3077 REG_DEP_ANTI, false);
9845d120 3078
d452a169 3079 if (!sel_sched_p ())
3080 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
b24ef467 3081 add_dependence (insn, as_a <rtx_insn *> (XEXP (u, 0)), REG_DEP_ANTI);
9845d120 3082
3083 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3084 {
3085 struct deps_reg *reg_last = &deps->reg_last[i];
d452a169 3086 add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI, false);
effd1640 3087 /* There's no point in making REG_DEP_CONTROL dependencies for
3088 debug insns. */
d452a169 3089 add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI,
3090 false);
c971b445 3091
3092 if (!deps->readonly)
3093 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
9845d120 3094 }
3095 CLEAR_REG_SET (reg_pending_uses);
3096
3097 /* Quite often, a debug insn will refer to stuff in the
3098 previous instruction, but the reason we want this
3099 dependency here is to make sure the scheduler doesn't
3100 gratuitously move a debug insn ahead. This could dirty
3101 DF flags and cause additional analysis that wouldn't have
3102 occurred in compilation without debug insns, and such
3103 additional analysis can modify the generated code. */
3104 prev = PREV_INSN (insn);
3105
3106 if (prev && NONDEBUG_INSN_P (prev))
3107 add_dependence (insn, prev, REG_DEP_ANTI);
3108 }
ca93e19d 3109 else
3110 {
6c2d9e41 3111 regset_head set_or_clobbered;
3112
ca93e19d 3113 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
a7dcf969 3114 {
3115 struct deps_reg *reg_last = &deps->reg_last[i];
d452a169 3116 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3117 add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI,
3118 false);
3119 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3120 false);
48e1416a 3121
a7dcf969 3122 if (!deps->readonly)
3123 {
3124 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3125 reg_last->uses_length++;
3126 }
3127 }
48e1416a 3128
a7dcf969 3129 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3130 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
3131 {
3132 struct deps_reg *reg_last = &deps->reg_last[i];
d452a169 3133 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
a7dcf969 3134 add_dependence_list (insn, reg_last->implicit_sets, 0,
d452a169 3135 REG_DEP_ANTI, false);
3136 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3137 false);
48e1416a 3138
a7dcf969 3139 if (!deps->readonly)
3140 {
3141 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3142 reg_last->uses_length++;
3143 }
3144 }
e1ab7874 3145
6c2d9e41 3146 if (targetm.sched.exposed_pipeline)
3147 {
3148 INIT_REG_SET (&set_or_clobbered);
3149 bitmap_ior (&set_or_clobbered, reg_pending_clobbers,
3150 reg_pending_sets);
3151 EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered, 0, i, rsi)
3152 {
3153 struct deps_reg *reg_last = &deps->reg_last[i];
3154 rtx list;
3155 for (list = reg_last->uses; list; list = XEXP (list, 1))
3156 {
3157 rtx other = XEXP (list, 0);
60ba0654 3158 if (INSN_CACHED_COND (other) != const_true_rtx
3159 && refers_to_regno_p (i, i + 1, INSN_CACHED_COND (other), NULL))
3160 INSN_CACHED_COND (other) = const_true_rtx;
6c2d9e41 3161 }
3162 }
3163 }
3164
a7dcf969 3165 /* If the current insn is conditional, we can't free any
3166 of the lists. */
3167 if (sched_has_condition_p (insn))
3168 {
3169 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3170 {
3171 struct deps_reg *reg_last = &deps->reg_last[i];
d452a169 3172 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3173 false);
a7dcf969 3174 add_dependence_list (insn, reg_last->implicit_sets, 0,
d452a169 3175 REG_DEP_ANTI, false);
3176 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3177 false);
effd1640 3178 add_dependence_list (insn, reg_last->control_uses, 0,
d452a169 3179 REG_DEP_CONTROL, false);
48e1416a 3180
a7dcf969 3181 if (!deps->readonly)
3182 {
3183 reg_last->clobbers
3184 = alloc_INSN_LIST (insn, reg_last->clobbers);
3185 reg_last->clobbers_length++;
3186 }
3187 }
3188 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3189 {
3190 struct deps_reg *reg_last = &deps->reg_last[i];
d452a169 3191 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3192 false);
a7dcf969 3193 add_dependence_list (insn, reg_last->implicit_sets, 0,
d452a169 3194 REG_DEP_ANTI, false);
3195 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT,
3196 false);
3197 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3198 false);
effd1640 3199 add_dependence_list (insn, reg_last->control_uses, 0,
d452a169 3200 REG_DEP_CONTROL, false);
48e1416a 3201
a7dcf969 3202 if (!deps->readonly)
6aed13f1 3203 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
a7dcf969 3204 }
3205 }
3206 else
3207 {
3208 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3209 {
3210 struct deps_reg *reg_last = &deps->reg_last[i];
3211 if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
3212 || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
3213 {
3214 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
d452a169 3215 REG_DEP_OUTPUT, false);
a7dcf969 3216 add_dependence_list_and_free (deps, insn,
3217 &reg_last->implicit_sets, 0,
d452a169 3218 REG_DEP_ANTI, false);
a7dcf969 3219 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
d452a169 3220 REG_DEP_ANTI, false);
effd1640 3221 add_dependence_list_and_free (deps, insn,
3222 &reg_last->control_uses, 0,
d452a169 3223 REG_DEP_ANTI, false);
3224 add_dependence_list_and_free (deps, insn,
3225 &reg_last->clobbers, 0,
3226 REG_DEP_OUTPUT, false);
48e1416a 3227
a7dcf969 3228 if (!deps->readonly)
3229 {
3230 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3231 reg_last->clobbers_length = 0;
3232 reg_last->uses_length = 0;
3233 }
3234 }
3235 else
3236 {
d452a169 3237 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3238 false);
a7dcf969 3239 add_dependence_list (insn, reg_last->implicit_sets, 0,
d452a169 3240 REG_DEP_ANTI, false);
3241 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3242 false);
effd1640 3243 add_dependence_list (insn, reg_last->control_uses, 0,
d452a169 3244 REG_DEP_CONTROL, false);
a7dcf969 3245 }
48e1416a 3246
a7dcf969 3247 if (!deps->readonly)
3248 {
3249 reg_last->clobbers_length++;
3250 reg_last->clobbers
3251 = alloc_INSN_LIST (insn, reg_last->clobbers);
3252 }
3253 }
3254 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3255 {
3256 struct deps_reg *reg_last = &deps->reg_last[i];
48e1416a 3257
a7dcf969 3258 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
d452a169 3259 REG_DEP_OUTPUT, false);
a7dcf969 3260 add_dependence_list_and_free (deps, insn,
3261 &reg_last->implicit_sets,
d452a169 3262 0, REG_DEP_ANTI, false);
a7dcf969 3263 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
d452a169 3264 REG_DEP_OUTPUT, false);
a7dcf969 3265 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
d452a169 3266 REG_DEP_ANTI, false);
effd1640 3267 add_dependence_list (insn, reg_last->control_uses, 0,
d452a169 3268 REG_DEP_CONTROL, false);
48e1416a 3269
a7dcf969 3270 if (!deps->readonly)
3271 {
3272 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3273 reg_last->uses_length = 0;
3274 reg_last->clobbers_length = 0;
a7dcf969 3275 }
3276 }
3277 }
effd1640 3278 if (!deps->readonly)
3279 {
3280 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3281 {
3282 struct deps_reg *reg_last = &deps->reg_last[i];
3283 reg_last->control_uses
3284 = alloc_INSN_LIST (insn, reg_last->control_uses);
3285 }
3286 }
ca93e19d 3287 }
3288
a7dcf969 3289 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3290 if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3291 {
3292 struct deps_reg *reg_last = &deps->reg_last[i];
d452a169 3293 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI, false);
3294 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI, false);
3295 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI, false);
3296 add_dependence_list (insn, reg_last->control_uses, 0, REG_DEP_ANTI,
3297 false);
48e1416a 3298
a7dcf969 3299 if (!deps->readonly)
3300 reg_last->implicit_sets
3301 = alloc_INSN_LIST (insn, reg_last->implicit_sets);
3302 }
3303
e1ab7874 3304 if (!deps->readonly)
3305 {
3306 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
3307 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
3308 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
a7dcf969 3309 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3310 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
3311 || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3312 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
e1ab7874 3313
3314 /* Set up the pending barrier found. */
3315 deps->last_reg_pending_barrier = reg_pending_barrier;
3316 }
ca93e19d 3317
3318 CLEAR_REG_SET (reg_pending_uses);
3319 CLEAR_REG_SET (reg_pending_clobbers);
3320 CLEAR_REG_SET (reg_pending_sets);
effd1640 3321 CLEAR_REG_SET (reg_pending_control_uses);
a7dcf969 3322 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3323 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
ca93e19d 3324
f787dd2b 3325 /* Add dependencies if a scheduling barrier was found. */
5deaeb50 3326 if (reg_pending_barrier)
f787dd2b 3327 {
58ada791 3328 /* In the case of barrier the most added dependencies are not
3329 real, so we use anti-dependence here. */
e1ab7874 3330 if (sched_has_condition_p (insn))
6adce0fb 3331 {
8c97cf13 3332 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
5deaeb50 3333 {
3334 struct deps_reg *reg_last = &deps->reg_last[i];
d452a169 3335 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3336 true);
a7dcf969 3337 add_dependence_list (insn, reg_last->sets, 0,
3338 reg_pending_barrier == TRUE_BARRIER
d452a169 3339 ? REG_DEP_TRUE : REG_DEP_ANTI, true);
a7dcf969 3340 add_dependence_list (insn, reg_last->implicit_sets, 0,
d452a169 3341 REG_DEP_ANTI, true);
a7dcf969 3342 add_dependence_list (insn, reg_last->clobbers, 0,
3343 reg_pending_barrier == TRUE_BARRIER
d452a169 3344 ? REG_DEP_TRUE : REG_DEP_ANTI, true);
8c97cf13 3345 }
5deaeb50 3346 }
3347 else
3348 {
8c97cf13 3349 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
5deaeb50 3350 {
3351 struct deps_reg *reg_last = &deps->reg_last[i];
e1ab7874 3352 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
d452a169 3353 REG_DEP_ANTI, true);
effd1640 3354 add_dependence_list_and_free (deps, insn,
3355 &reg_last->control_uses, 0,
d452a169 3356 REG_DEP_CONTROL, true);
a7dcf969 3357 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3358 reg_pending_barrier == TRUE_BARRIER
d452a169 3359 ? REG_DEP_TRUE : REG_DEP_ANTI,
3360 true);
a7dcf969 3361 add_dependence_list_and_free (deps, insn,
3362 &reg_last->implicit_sets, 0,
d452a169 3363 REG_DEP_ANTI, true);
a7dcf969 3364 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3365 reg_pending_barrier == TRUE_BARRIER
d452a169 3366 ? REG_DEP_TRUE : REG_DEP_ANTI,
3367 true);
6adce0fb 3368
e1ab7874 3369 if (!deps->readonly)
3370 {
3371 reg_last->uses_length = 0;
3372 reg_last->clobbers_length = 0;
3373 }
3374 }
4164e76b 3375 }
5deaeb50 3376
e1ab7874 3377 if (!deps->readonly)
3378 for (i = 0; i < (unsigned)deps->max_reg; i++)
3379 {
3380 struct deps_reg *reg_last = &deps->reg_last[i];
3381 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3382 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3383 }
3384
f22426d2 3385 /* Don't flush pending lists on speculative checks for
e965825b 3386 selective scheduling. */
3387 if (!sel_sched_p () || !sel_insn_is_speculation_check (insn))
e1ab7874 3388 flush_pending_lists (deps, insn, true, true);
48e1416a 3389
c14a239f 3390 reg_pending_barrier = NOT_A_BARRIER;
749c6f58 3391 }
6adce0fb 3392
3393 /* If a post-call group is still open, see if it should remain so.
3394 This insn must be a simple move of a hard reg to a pseudo or
3395 vice-versa.
3396
ed5527ca 3397 We must avoid moving these insns for correctness on targets
3398 with small register classes, and for special registers like
6adce0fb 3399 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
3400 hard regs for all targets. */
3401
3402 if (deps->in_post_call_group_p)
3403 {
3404 rtx tmp, set = single_set (insn);
3405 int src_regno, dest_regno;
3406
3407 if (set == NULL)
9845d120 3408 {
3409 if (DEBUG_INSN_P (insn))
3410 /* We don't want to mark debug insns as part of the same
3411 sched group. We know they really aren't, but if we use
3412 debug insns to tell that a call group is over, we'll
3413 get different code if debug insns are not there and
3414 instructions that follow seem like they should be part
3415 of the call group.
3416
fdb1b2b1 3417 Also, if we did, chain_to_prev_insn would move the
9845d120 3418 deps of the debug insn to the call insn, modifying
3419 non-debug post-dependency counts of the debug insn
3420 dependencies and otherwise messing with the scheduling
3421 order.
3422
3423 Instead, let such debug insns be scheduled freely, but
3424 keep the call group open in case there are insns that
3425 should be part of it afterwards. Since we grant debug
3426 insns higher priority than even sched group insns, it
3427 will all turn out all right. */
3428 goto debug_dont_end_call_group;
3429 else
3430 goto end_call_group;
3431 }
6adce0fb 3432
3433 tmp = SET_DEST (set);
3434 if (GET_CODE (tmp) == SUBREG)
3435 tmp = SUBREG_REG (tmp);
8ad4c111 3436 if (REG_P (tmp))
6adce0fb 3437 dest_regno = REGNO (tmp);
3438 else
3439 goto end_call_group;
3440
3441 tmp = SET_SRC (set);
3442 if (GET_CODE (tmp) == SUBREG)
3443 tmp = SUBREG_REG (tmp);
0cbad263 3444 if ((GET_CODE (tmp) == PLUS
3445 || GET_CODE (tmp) == MINUS)
8ad4c111 3446 && REG_P (XEXP (tmp, 0))
0cbad263 3447 && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3448 && dest_regno == STACK_POINTER_REGNUM)
3449 src_regno = STACK_POINTER_REGNUM;
8ad4c111 3450 else if (REG_P (tmp))
6adce0fb 3451 src_regno = REGNO (tmp);
3452 else
3453 goto end_call_group;
3454
3455 if (src_regno < FIRST_PSEUDO_REGISTER
3456 || dest_regno < FIRST_PSEUDO_REGISTER)
3457 {
e1ab7874 3458 if (!deps->readonly
3459 && deps->in_post_call_group_p == post_call_initial)
828f2394 3460 deps->in_post_call_group_p = post_call;
3461
48e1416a 3462 if (!sel_sched_p () || sched_emulate_haifa_p)
e1ab7874 3463 {
3464 SCHED_GROUP_P (insn) = 1;
3465 CANT_MOVE (insn) = 1;
3466 }
6adce0fb 3467 }
3468 else
3469 {
3470 end_call_group:
e1ab7874 3471 if (!deps->readonly)
3472 deps->in_post_call_group_p = not_post_call;
6adce0fb 3473 }
3474 }
828f2394 3475
9845d120 3476 debug_dont_end_call_group:
93f6b030 3477 if ((current_sched_info->flags & DO_SPECULATION)
3478 && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
3479 /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3480 be speculated. */
3481 {
e1ab7874 3482 if (sel_sched_p ())
3483 sel_mark_hard_insn (insn);
3484 else
3485 {
3486 sd_iterator_def sd_it;
3487 dep_t dep;
48e1416a 3488
e1ab7874 3489 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3490 sd_iterator_cond (&sd_it, &dep);)
3491 change_spec_dep_to_hard (sd_it);
3492 }
93f6b030 3493 }
617529e8 3494
3495 /* We do not yet have code to adjust REG_ARGS_SIZE, therefore we must
3496 honor their original ordering. */
3497 if (find_reg_note (insn, REG_ARGS_SIZE, NULL))
3498 {
3499 if (deps->last_args_size)
3500 add_dependence (insn, deps->last_args_size, REG_DEP_OUTPUT);
3501 deps->last_args_size = insn;
3502 }
6adce0fb 3503}
3504
326d0c19 3505/* Return TRUE if INSN might not always return normally (e.g. call exit,
3506 longjmp, loop forever, ...). */
4a020a8c 3507/* FIXME: Why can't this function just use flags_from_decl_or_type and
3508 test for ECF_NORETURN? */
326d0c19 3509static bool
3510call_may_noreturn_p (rtx insn)
3511{
3512 rtx call;
3513
3514 /* const or pure calls that aren't looping will always return. */
3515 if (RTL_CONST_OR_PURE_CALL_P (insn)
3516 && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3517 return false;
3518
cf7fb72d 3519 call = get_call_rtx_from (insn);
3520 if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
326d0c19 3521 {
3522 rtx symbol = XEXP (XEXP (call, 0), 0);
3523 if (SYMBOL_REF_DECL (symbol)
3524 && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3525 {
3526 if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3527 == BUILT_IN_NORMAL)
3528 switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3529 {
3530 case BUILT_IN_BCMP:
3531 case BUILT_IN_BCOPY:
3532 case BUILT_IN_BZERO:
3533 case BUILT_IN_INDEX:
3534 case BUILT_IN_MEMCHR:
3535 case BUILT_IN_MEMCMP:
3536 case BUILT_IN_MEMCPY:
3537 case BUILT_IN_MEMMOVE:
3538 case BUILT_IN_MEMPCPY:
3539 case BUILT_IN_MEMSET:
3540 case BUILT_IN_RINDEX:
3541 case BUILT_IN_STPCPY:
3542 case BUILT_IN_STPNCPY:
3543 case BUILT_IN_STRCAT:
3544 case BUILT_IN_STRCHR:
3545 case BUILT_IN_STRCMP:
3546 case BUILT_IN_STRCPY:
3547 case BUILT_IN_STRCSPN:
3548 case BUILT_IN_STRLEN:
3549 case BUILT_IN_STRNCAT:
3550 case BUILT_IN_STRNCMP:
3551 case BUILT_IN_STRNCPY:
3552 case BUILT_IN_STRPBRK:
3553 case BUILT_IN_STRRCHR:
3554 case BUILT_IN_STRSPN:
3555 case BUILT_IN_STRSTR:
3556 /* Assume certain string/memory builtins always return. */
3557 return false;
3558 default:
3559 break;
3560 }
3561 }
3562 }
3563
3564 /* For all other calls assume that they might not always return. */
3565 return true;
3566}
3567
fdb1b2b1 3568/* Return true if INSN should be made dependent on the previous instruction
3569 group, and if all INSN's dependencies should be moved to the first
3570 instruction of that group. */
3571
3572static bool
3573chain_to_prev_insn_p (rtx insn)
3574{
3575 rtx prev, x;
3576
3577 /* INSN forms a group with the previous instruction. */
3578 if (SCHED_GROUP_P (insn))
3579 return true;
3580
3581 /* If the previous instruction clobbers a register R and this one sets
3582 part of R, the clobber was added specifically to help us track the
3583 liveness of R. There's no point scheduling the clobber and leaving
3584 INSN behind, especially if we move the clobber to another block. */
3585 prev = prev_nonnote_nondebug_insn (insn);
3586 if (prev
3587 && INSN_P (prev)
3588 && BLOCK_FOR_INSN (prev) == BLOCK_FOR_INSN (insn)
3589 && GET_CODE (PATTERN (prev)) == CLOBBER)
3590 {
3591 x = XEXP (PATTERN (prev), 0);
3592 if (set_of (x, insn))
3593 return true;
3594 }
3595
3596 return false;
3597}
3598
e1ab7874 3599/* Analyze INSN with DEPS as a context. */
6adce0fb 3600void
b24ef467 3601deps_analyze_insn (struct deps_desc *deps, rtx_insn *insn)
6adce0fb 3602{
e1ab7874 3603 if (sched_deps_info->start_insn)
3604 sched_deps_info->start_insn (insn);
6adce0fb 3605
6c2d9e41 3606 /* Record the condition for this insn. */
3607 if (NONDEBUG_INSN_P (insn))
effd1640 3608 {
3609 rtx t;
3610 sched_get_condition_with_rev (insn, NULL);
3611 t = INSN_CACHED_COND (insn);
54267fdf 3612 INSN_COND_DEPS (insn) = NULL;
effd1640 3613 if (reload_completed
3614 && (current_sched_info->flags & DO_PREDICATION)
3615 && COMPARISON_P (t)
3616 && REG_P (XEXP (t, 0))
3617 && CONSTANT_P (XEXP (t, 1)))
3618 {
3619 unsigned int regno;
3620 int nregs;
54267fdf 3621 rtx_insn_list *cond_deps = NULL;
effd1640 3622 t = XEXP (t, 0);
3623 regno = REGNO (t);
3624 nregs = hard_regno_nregs[regno][GET_MODE (t)];
effd1640 3625 while (nregs-- > 0)
3626 {
3627 struct deps_reg *reg_last = &deps->reg_last[regno + nregs];
54267fdf 3628 cond_deps = concat_INSN_LIST (reg_last->sets, cond_deps);
3629 cond_deps = concat_INSN_LIST (reg_last->clobbers, cond_deps);
3630 cond_deps = concat_INSN_LIST (reg_last->implicit_sets, cond_deps);
effd1640 3631 }
54267fdf 3632 INSN_COND_DEPS (insn) = cond_deps;
effd1640 3633 }
3634 }
6c2d9e41 3635
0741df1e 3636 if (JUMP_P (insn))
e1ab7874 3637 {
48e1416a 3638 /* Make each JUMP_INSN (but not a speculative check)
e1ab7874 3639 a scheduling barrier for memory references. */
3640 if (!deps->readonly
48e1416a 3641 && !(sel_sched_p ()
e1ab7874 3642 && sel_insn_is_speculation_check (insn)))
3643 {
3644 /* Keep the list a reasonable size. */
3645 if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
3646 flush_pending_lists (deps, insn, true, true);
3647 else
effd1640 3648 deps->pending_jump_insns
3649 = alloc_INSN_LIST (insn, deps->pending_jump_insns);
e1ab7874 3650 }
3651
0741df1e 3652 /* For each insn which shouldn't cross a jump, add a dependence. */
3653 add_dependence_list_and_free (deps, insn,
3654 &deps->sched_before_next_jump, 1,
d452a169 3655 REG_DEP_ANTI, true);
0741df1e 3656
3657 sched_analyze_insn (deps, PATTERN (insn), insn);
3658 }
3659 else if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn))
3660 {
e1ab7874 3661 sched_analyze_insn (deps, PATTERN (insn), insn);
3662 }
3663 else if (CALL_P (insn))
3664 {
3665 int i;
3666
3667 CANT_MOVE (insn) = 1;
3668
3669 if (find_reg_note (insn, REG_SETJMP, NULL))
3670 {
3671 /* This is setjmp. Assume that all registers, not just
3672 hard registers, may be clobbered by this call. */
3673 reg_pending_barrier = MOVE_BARRIER;
3674 }
3675 else
3676 {
3677 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3678 /* A call may read and modify global register variables. */
3679 if (global_regs[i])
3680 {
3681 SET_REGNO_REG_SET (reg_pending_sets, i);
a7dcf969 3682 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
e1ab7874 3683 }
3684 /* Other call-clobbered hard regs may be clobbered.
3685 Since we only have a choice between 'might be clobbered'
3686 and 'definitely not clobbered', we must include all
3687 partly call-clobbered registers here. */
3688 else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
3689 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
3690 SET_REGNO_REG_SET (reg_pending_clobbers, i);
3691 /* We don't know what set of fixed registers might be used
3692 by the function, but it is certain that the stack pointer
3693 is among them, but be conservative. */
3694 else if (fixed_regs[i])
a7dcf969 3695 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
e1ab7874 3696 /* The frame pointer is normally not used by the function
3697 itself, but by the debugger. */
3698 /* ??? MIPS o32 is an exception. It uses the frame pointer
3699 in the macro expansion of jal but does not represent this
3700 fact in the call_insn rtl. */
3701 else if (i == FRAME_POINTER_REGNUM
3702 || (i == HARD_FRAME_POINTER_REGNUM
3703 && (! reload_completed || frame_pointer_needed)))
a7dcf969 3704 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
e1ab7874 3705 }
3706
3707 /* For each insn which shouldn't cross a call, add a dependence
3708 between that insn and this call insn. */
48e1416a 3709 add_dependence_list_and_free (deps, insn,
e1ab7874 3710 &deps->sched_before_next_call, 1,
d452a169 3711 REG_DEP_ANTI, true);
e1ab7874 3712
3713 sched_analyze_insn (deps, PATTERN (insn), insn);
3714
3715 /* If CALL would be in a sched group, then this will violate
3716 convention that sched group insns have dependencies only on the
3717 previous instruction.
3718
3719 Of course one can say: "Hey! What about head of the sched group?"
3720 And I will answer: "Basic principles (one dep per insn) are always
3721 the same." */
3722 gcc_assert (!SCHED_GROUP_P (insn));
3723
3724 /* In the absence of interprocedural alias analysis, we must flush
3725 all pending reads and writes, and start new dependencies starting
3726 from here. But only flush writes for constant calls (which may
3727 be passed a pointer to something we haven't written yet). */
3728 flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
3729
3730 if (!deps->readonly)
3731 {
3732 /* Remember the last function call for limiting lifetimes. */
3733 free_INSN_LIST_list (&deps->last_function_call);
3734 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
326d0c19 3735
3736 if (call_may_noreturn_p (insn))
3737 {
3738 /* Remember the last function call that might not always return
3739 normally for limiting moves of trapping insns. */
3740 free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3741 deps->last_function_call_may_noreturn
3742 = alloc_INSN_LIST (insn, NULL_RTX);
3743 }
3744
e1ab7874 3745 /* Before reload, begin a post-call group, so as to keep the
3746 lifetimes of hard registers correct. */
3747 if (! reload_completed)
3748 deps->in_post_call_group_p = post_call;
3749 }
3750 }
3751
3752 if (sched_deps_info->use_cselib)
3753 cselib_process_insn (insn);
3754
e1ab7874 3755 if (sched_deps_info->finish_insn)
3756 sched_deps_info->finish_insn ();
3757
3758 /* Fixup the dependencies in the sched group. */
48e1416a 3759 if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
fdb1b2b1 3760 && chain_to_prev_insn_p (insn)
3761 && !sel_sched_p ())
3762 chain_to_prev_insn (insn);
e1ab7874 3763}
3764
3765/* Initialize DEPS for the new block beginning with HEAD. */
3766void
68e419a1 3767deps_start_bb (struct deps_desc *deps, rtx head)
e1ab7874 3768{
3769 gcc_assert (!deps->readonly);
503f2817 3770
865b3698 3771 /* Before reload, if the previous block ended in a call, show that
3772 we are inside a post-call group, so as to keep the lifetimes of
3773 hard registers correct. */
6d7dc5b9 3774 if (! reload_completed && !LABEL_P (head))
865b3698 3775 {
5b8537a8 3776 rtx insn = prev_nonnote_nondebug_insn (head);
e1ab7874 3777
6d7dc5b9 3778 if (insn && CALL_P (insn))
865b3698 3779 deps->in_post_call_group_p = post_call_initial;
3780 }
e1ab7874 3781}
3782
3783/* Analyze every insn between HEAD and TAIL inclusive, creating backward
3784 dependencies for each insn. */
3785void
b24ef467 3786sched_analyze (struct deps_desc *deps, rtx_insn *head, rtx_insn *tail)
e1ab7874 3787{
b24ef467 3788 rtx_insn *insn;
e1ab7874 3789
3790 if (sched_deps_info->use_cselib)
35af0188 3791 cselib_init (CSELIB_RECORD_MEMORY);
e1ab7874 3792
3793 deps_start_bb (deps, head);
3794
6adce0fb 3795 for (insn = head;; insn = NEXT_INSN (insn))
3796 {
e1ab7874 3797
9997bd27 3798 if (INSN_P (insn))
6adce0fb 3799 {
93f6b030 3800 /* And initialize deps_lists. */
3801 sd_init_insn (insn);
55e7138c 3802 /* Clean up SCHED_GROUP_P which may be set by last
3803 scheduler pass. */
3804 if (SCHED_GROUP_P (insn))
3805 SCHED_GROUP_P (insn) = 0;
9997bd27 3806 }
3807
e1ab7874 3808 deps_analyze_insn (deps, insn);
37cde9fb 3809
6adce0fb 3810 if (insn == tail)
503f2817 3811 {
e1ab7874 3812 if (sched_deps_info->use_cselib)
503f2817 3813 cselib_finish ();
3814 return;
3815 }
6adce0fb 3816 }
04e579b6 3817 gcc_unreachable ();
6adce0fb 3818}
9b7c6f02 3819
93f6b030 3820/* Helper for sched_free_deps ().
3821 Delete INSN's (RESOLVED_P) backward dependencies. */
3822static void
3823delete_dep_nodes_in_back_deps (rtx insn, bool resolved_p)
9b7c6f02 3824{
93f6b030 3825 sd_iterator_def sd_it;
3826 dep_t dep;
3827 sd_list_types_def types;
9b7c6f02 3828
93f6b030 3829 if (resolved_p)
3830 types = SD_LIST_RES_BACK;
3831 else
3832 types = SD_LIST_BACK;
3833
3834 for (sd_it = sd_iterator_start (insn, types);
3835 sd_iterator_cond (&sd_it, &dep);)
4d64d9a4 3836 {
93f6b030 3837 dep_link_t link = *sd_it.linkp;
3838 dep_node_t node = DEP_LINK_NODE (link);
3839 deps_list_t back_list;
3840 deps_list_t forw_list;
3841
3842 get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
3843 remove_from_deps_list (link, back_list);
3844 delete_dep_node (node);
4d64d9a4 3845 }
9b7c6f02 3846}
3847
93f6b030 3848/* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3849 deps_lists. */
6adce0fb 3850void
93f6b030 3851sched_free_deps (rtx head, rtx tail, bool resolved_p)
6adce0fb 3852{
4d64d9a4 3853 rtx insn;
93f6b030 3854 rtx next_tail = NEXT_INSN (tail);
6adce0fb 3855
2261c559 3856 /* We make two passes since some insns may be scheduled before their
3857 dependencies are resolved. */
6adce0fb 3858 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
93f6b030 3859 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3860 {
93f6b030 3861 /* Clear forward deps and leave the dep_nodes to the
3862 corresponding back_deps list. */
3863 if (resolved_p)
3864 clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3865 else
3866 clear_deps_list (INSN_FORW_DEPS (insn));
2261c559 3867 }
3868 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3869 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3870 {
3871 /* Clear resolved back deps together with its dep_nodes. */
3872 delete_dep_nodes_in_back_deps (insn, resolved_p);
6adce0fb 3873
93f6b030 3874 sd_finish_insn (insn);
3875 }
6adce0fb 3876}
3877\f
3878/* Initialize variables for region data dependence analysis.
48e1416a 3879 When LAZY_REG_LAST is true, do not allocate reg_last array
68e419a1 3880 of struct deps_desc immediately. */
6adce0fb 3881
3882void
68e419a1 3883init_deps (struct deps_desc *deps, bool lazy_reg_last)
6adce0fb 3884{
749c6f58 3885 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3886
3887 deps->max_reg = max_reg;
d9ab2038 3888 if (lazy_reg_last)
3889 deps->reg_last = NULL;
3890 else
3891 deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
749c6f58 3892 INIT_REG_SET (&deps->reg_last_in_use);
6adce0fb 3893
3894 deps->pending_read_insns = 0;
3895 deps->pending_read_mems = 0;
3896 deps->pending_write_insns = 0;
3897 deps->pending_write_mems = 0;
effd1640 3898 deps->pending_jump_insns = 0;
c5947ab7 3899 deps->pending_read_list_length = 0;
3900 deps->pending_write_list_length = 0;
85de291e 3901 deps->pending_flush_length = 0;
6adce0fb 3902 deps->last_pending_memory_flush = 0;
3903 deps->last_function_call = 0;
326d0c19 3904 deps->last_function_call_may_noreturn = 0;
5deaeb50 3905 deps->sched_before_next_call = 0;
0741df1e 3906 deps->sched_before_next_jump = 0;
865b3698 3907 deps->in_post_call_group_p = not_post_call;
9845d120 3908 deps->last_debug_insn = 0;
617529e8 3909 deps->last_args_size = 0;
e1ab7874 3910 deps->last_reg_pending_barrier = NOT_A_BARRIER;
3911 deps->readonly = 0;
6adce0fb 3912}
3913
48e1416a 3914/* Init only reg_last field of DEPS, which was not allocated before as
d9ab2038 3915 we inited DEPS lazily. */
3916void
68e419a1 3917init_deps_reg_last (struct deps_desc *deps)
d9ab2038 3918{
3919 gcc_assert (deps && deps->max_reg > 0);
3920 gcc_assert (deps->reg_last == NULL);
3921
3922 deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3923}
3924
3925
6adce0fb 3926/* Free insn lists found in DEPS. */
3927
3928void
68e419a1 3929free_deps (struct deps_desc *deps)
6adce0fb 3930{
4f917ffe 3931 unsigned i;
8c97cf13 3932 reg_set_iterator rsi;
6adce0fb 3933
d9ab2038 3934 /* We set max_reg to 0 when this context was already freed. */
3935 if (deps->max_reg == 0)
3936 {
3937 gcc_assert (deps->reg_last == NULL);
3938 return;
3939 }
3940 deps->max_reg = 0;
48e1416a 3941
5deaeb50 3942 free_INSN_LIST_list (&deps->pending_read_insns);
3943 free_EXPR_LIST_list (&deps->pending_read_mems);
3944 free_INSN_LIST_list (&deps->pending_write_insns);
3945 free_EXPR_LIST_list (&deps->pending_write_mems);
3946 free_INSN_LIST_list (&deps->last_pending_memory_flush);
3947
749c6f58 3948 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
de44dcb9 3949 times. For a testcase with 42000 regs and 8000 small basic blocks,
749c6f58 3950 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
8c97cf13 3951 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
6adce0fb 3952 {
749c6f58 3953 struct deps_reg *reg_last = &deps->reg_last[i];
b479b8be 3954 if (reg_last->uses)
3955 free_INSN_LIST_list (&reg_last->uses);
3956 if (reg_last->sets)
3957 free_INSN_LIST_list (&reg_last->sets);
a7dcf969 3958 if (reg_last->implicit_sets)
3959 free_INSN_LIST_list (&reg_last->implicit_sets);
effd1640 3960 if (reg_last->control_uses)
3961 free_INSN_LIST_list (&reg_last->control_uses);
b479b8be 3962 if (reg_last->clobbers)
3963 free_INSN_LIST_list (&reg_last->clobbers);
8c97cf13 3964 }
749c6f58 3965 CLEAR_REG_SET (&deps->reg_last_in_use);
3966
48e1416a 3967 /* As we initialize reg_last lazily, it is possible that we didn't allocate
d9ab2038 3968 it at all. */
dd045aee 3969 free (deps->reg_last);
e1ab7874 3970 deps->reg_last = NULL;
3971
3972 deps = NULL;
6adce0fb 3973}
3974
6aed13f1 3975/* Remove INSN from dependence contexts DEPS. */
e1ab7874 3976void
54267fdf 3977remove_from_deps (struct deps_desc *deps, rtx_insn *insn)
e1ab7874 3978{
3979 int removed;
3980 unsigned i;
3981 reg_set_iterator rsi;
48e1416a 3982
e1ab7874 3983 removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
3984 &deps->pending_read_mems);
c971b445 3985 if (!DEBUG_INSN_P (insn))
3986 deps->pending_read_list_length -= removed;
e1ab7874 3987 removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
3988 &deps->pending_write_mems);
3989 deps->pending_write_list_length -= removed;
effd1640 3990
3991 removed = remove_from_dependence_list (insn, &deps->pending_jump_insns);
3992 deps->pending_flush_length -= removed;
e1ab7874 3993 removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
3994 deps->pending_flush_length -= removed;
6adce0fb 3995
e1ab7874 3996 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3997 {
3998 struct deps_reg *reg_last = &deps->reg_last[i];
3999 if (reg_last->uses)
4000 remove_from_dependence_list (insn, &reg_last->uses);
4001 if (reg_last->sets)
4002 remove_from_dependence_list (insn, &reg_last->sets);
a7dcf969 4003 if (reg_last->implicit_sets)
4004 remove_from_dependence_list (insn, &reg_last->implicit_sets);
e1ab7874 4005 if (reg_last->clobbers)
4006 remove_from_dependence_list (insn, &reg_last->clobbers);
a7dcf969 4007 if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
4008 && !reg_last->clobbers)
e1ab7874 4009 CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i);
4010 }
4011
4012 if (CALL_P (insn))
326d0c19 4013 {
4014 remove_from_dependence_list (insn, &deps->last_function_call);
48e1416a 4015 remove_from_dependence_list (insn,
326d0c19 4016 &deps->last_function_call_may_noreturn);
4017 }
e1ab7874 4018 remove_from_dependence_list (insn, &deps->sched_before_next_call);
4019}
4020
4021/* Init deps data vector. */
4022static void
4023init_deps_data_vector (void)
4024{
f1f41a6c 4025 int reserve = (sched_max_luid + 1 - h_d_i_d.length ());
4026 if (reserve > 0 && ! h_d_i_d.space (reserve))
4027 h_d_i_d.safe_grow_cleared (3 * sched_max_luid / 2);
e1ab7874 4028}
4029
4030/* If it is profitable to use them, initialize or extend (depending on
4031 GLOBAL_P) dependency data. */
6adce0fb 4032void
e1ab7874 4033sched_deps_init (bool global_p)
6adce0fb 4034{
93f6b030 4035 /* Average number of insns in the basic block.
4036 '+ 1' is used to make it nonzero. */
a28770e1 4037 int insns_in_block = sched_max_luid / n_basic_blocks_for_fn (cfun) + 1;
93f6b030 4038
e1ab7874 4039 init_deps_data_vector ();
48e1416a 4040
4041 /* We use another caching mechanism for selective scheduling, so
e1ab7874 4042 we don't use this one. */
4043 if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
4044 {
4045 /* ?!? We could save some memory by computing a per-region luid mapping
4046 which could reduce both the number of vectors in the cache and the
4047 size of each vector. Instead we just avoid the cache entirely unless
4048 the average number of instructions in a basic block is very high. See
4049 the comment before the declaration of true_dependency_cache for
4050 what we consider "very high". */
6a1cdb4d 4051 cache_size = 0;
e1ab7874 4052 extend_dependency_caches (sched_max_luid, true);
6a1cdb4d 4053 }
9997bd27 4054
48e1416a 4055 if (global_p)
e1ab7874 4056 {
4057 dl_pool = create_alloc_pool ("deps_list", sizeof (struct _deps_list),
4058 /* Allocate lists for one block at a time. */
4059 insns_in_block);
4060 dn_pool = create_alloc_pool ("dep_node", sizeof (struct _dep_node),
4061 /* Allocate nodes for one block at a time.
4062 We assume that average insn has
4063 5 producers. */
4064 5 * insns_in_block);
4065 }
6a1cdb4d 4066}
4d64d9a4 4067
e1ab7874 4068
6a1cdb4d 4069/* Create or extend (depending on CREATE_P) dependency caches to
4070 size N. */
4071void
4072extend_dependency_caches (int n, bool create_p)
4073{
4074 if (create_p || true_dependency_cache)
4075 {
4076 int i, luid = cache_size + n;
4077
4078 true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
4079 luid);
4080 output_dependency_cache = XRESIZEVEC (bitmap_head,
4081 output_dependency_cache, luid);
4082 anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
4083 luid);
effd1640 4084 control_dependency_cache = XRESIZEVEC (bitmap_head, control_dependency_cache,
4085 luid);
93f6b030 4086
4d64d9a4 4087 if (current_sched_info->flags & DO_SPECULATION)
4088 spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
4089 luid);
4090
6a1cdb4d 4091 for (i = cache_size; i < luid; i++)
d4f68afb 4092 {
4093 bitmap_initialize (&true_dependency_cache[i], 0);
d4f68afb 4094 bitmap_initialize (&output_dependency_cache[i], 0);
4d64d9a4 4095 bitmap_initialize (&anti_dependency_cache[i], 0);
effd1640 4096 bitmap_initialize (&control_dependency_cache[i], 0);
93f6b030 4097
4d64d9a4 4098 if (current_sched_info->flags & DO_SPECULATION)
4099 bitmap_initialize (&spec_dependency_cache[i], 0);
d4f68afb 4100 }
4101 cache_size = luid;
6adce0fb 4102 }
4103}
4104
e1ab7874 4105/* Finalize dependency information for the whole function. */
6adce0fb 4106void
e1ab7874 4107sched_deps_finish (void)
6adce0fb 4108{
93f6b030 4109 gcc_assert (deps_pools_are_empty_p ());
4110 free_alloc_pool_if_empty (&dn_pool);
4111 free_alloc_pool_if_empty (&dl_pool);
4112 gcc_assert (dn_pool == NULL && dl_pool == NULL);
9997bd27 4113
f1f41a6c 4114 h_d_i_d.release ();
e1ab7874 4115 cache_size = 0;
48e1416a 4116
6adce0fb 4117 if (true_dependency_cache)
4118 {
d4f68afb 4119 int i;
4120
4121 for (i = 0; i < cache_size; i++)
4122 {
4123 bitmap_clear (&true_dependency_cache[i]);
d4f68afb 4124 bitmap_clear (&output_dependency_cache[i]);
4d64d9a4 4125 bitmap_clear (&anti_dependency_cache[i]);
effd1640 4126 bitmap_clear (&control_dependency_cache[i]);
93f6b030 4127
e1ab7874 4128 if (sched_deps_info->generate_spec_deps)
4d64d9a4 4129 bitmap_clear (&spec_dependency_cache[i]);
d4f68afb 4130 }
4131 free (true_dependency_cache);
6adce0fb 4132 true_dependency_cache = NULL;
d4f68afb 4133 free (output_dependency_cache);
6adce0fb 4134 output_dependency_cache = NULL;
4d64d9a4 4135 free (anti_dependency_cache);
4136 anti_dependency_cache = NULL;
effd1640 4137 free (control_dependency_cache);
4138 control_dependency_cache = NULL;
93f6b030 4139
e1ab7874 4140 if (sched_deps_info->generate_spec_deps)
4d64d9a4 4141 {
4142 free (spec_dependency_cache);
4143 spec_dependency_cache = NULL;
4144 }
e1ab7874 4145
6adce0fb 4146 }
4147}
4148
4149/* Initialize some global variables needed by the dependency analysis
4150 code. */
4151
4152void
60b8c5b3 4153init_deps_global (void)
6adce0fb 4154{
a7dcf969 4155 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
4156 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
ae85a37a 4157 reg_pending_sets = ALLOC_REG_SET (&reg_obstack);
4158 reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack);
4159 reg_pending_uses = ALLOC_REG_SET (&reg_obstack);
effd1640 4160 reg_pending_control_uses = ALLOC_REG_SET (&reg_obstack);
c14a239f 4161 reg_pending_barrier = NOT_A_BARRIER;
e1ab7874 4162
4163 if (!sel_sched_p () || sched_emulate_haifa_p)
4164 {
4165 sched_deps_info->start_insn = haifa_start_insn;
4166 sched_deps_info->finish_insn = haifa_finish_insn;
4167
4168 sched_deps_info->note_reg_set = haifa_note_reg_set;
4169 sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
4170 sched_deps_info->note_reg_use = haifa_note_reg_use;
4171
4172 sched_deps_info->note_mem_dep = haifa_note_mem_dep;
4173 sched_deps_info->note_dep = haifa_note_dep;
4174 }
6adce0fb 4175}
4176
4177/* Free everything used by the dependency analysis code. */
4178
4179void
60b8c5b3 4180finish_deps_global (void)
6adce0fb 4181{
4182 FREE_REG_SET (reg_pending_sets);
4183 FREE_REG_SET (reg_pending_clobbers);
5deaeb50 4184 FREE_REG_SET (reg_pending_uses);
effd1640 4185 FREE_REG_SET (reg_pending_control_uses);
6adce0fb 4186}
4d64d9a4 4187
4d64d9a4 4188/* Estimate the weakness of dependence between MEM1 and MEM2. */
e1ab7874 4189dw_t
4d64d9a4 4190estimate_dep_weak (rtx mem1, rtx mem2)
4191{
4192 rtx r1, r2;
4193
4194 if (mem1 == mem2)
4195 /* MEMs are the same - don't speculate. */
4196 return MIN_DEP_WEAK;
4197
4198 r1 = XEXP (mem1, 0);
4199 r2 = XEXP (mem2, 0);
4200
4201 if (r1 == r2
4202 || (REG_P (r1) && REG_P (r2)
4203 && REGNO (r1) == REGNO (r2)))
4204 /* Again, MEMs are the same. */
4205 return MIN_DEP_WEAK;
4206 else if ((REG_P (r1) && !REG_P (r2))
4207 || (!REG_P (r1) && REG_P (r2)))
4208 /* Different addressing modes - reason to be more speculative,
4209 than usual. */
4210 return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
4211 else
4212 /* We can't say anything about the dependence. */
4213 return UNCERTAIN_DEP_WEAK;
4214}
4215
4216/* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
4217 This function can handle same INSN and ELEM (INSN == ELEM).
4218 It is a convenience wrapper. */
effd1640 4219static void
b24ef467 4220add_dependence_1 (rtx_insn *insn, rtx_insn *elem, enum reg_note dep_type)
4d64d9a4 4221{
e1ab7874 4222 ds_t ds;
4223 bool internal;
4d64d9a4 4224
e1ab7874 4225 if (dep_type == REG_DEP_TRUE)
4226 ds = DEP_TRUE;
4227 else if (dep_type == REG_DEP_OUTPUT)
4228 ds = DEP_OUTPUT;
effd1640 4229 else if (dep_type == REG_DEP_CONTROL)
4230 ds = DEP_CONTROL;
e1ab7874 4231 else
4232 {
4233 gcc_assert (dep_type == REG_DEP_ANTI);
4234 ds = DEP_ANTI;
4235 }
4236
4237 /* When add_dependence is called from inside sched-deps.c, we expect
4238 cur_insn to be non-null. */
4239 internal = cur_insn != NULL;
4240 if (internal)
4241 gcc_assert (insn == cur_insn);
4242 else
4243 cur_insn = insn;
48e1416a 4244
e1ab7874 4245 note_dep (elem, ds);
4246 if (!internal)
4247 cur_insn = NULL;
4d64d9a4 4248}
4249
242d1ee6 4250/* Return weakness of speculative type TYPE in the dep_status DS,
4251 without checking to prevent ICEs on malformed input. */
4252static dw_t
93f6b030 4253get_dep_weak_1 (ds_t ds, ds_t type)
4d64d9a4 4254{
4255 ds = ds & type;
e1ab7874 4256
4d64d9a4 4257 switch (type)
4258 {
4259 case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
4260 case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
4261 case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
4262 case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
4263 default: gcc_unreachable ();
4264 }
4265
4d64d9a4 4266 return (dw_t) ds;
4267}
4268
242d1ee6 4269/* Return weakness of speculative type TYPE in the dep_status DS. */
93f6b030 4270dw_t
4271get_dep_weak (ds_t ds, ds_t type)
4272{
4273 dw_t dw = get_dep_weak_1 (ds, type);
4274
4275 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
93f6b030 4276 return dw;
4277}
4278
4d64d9a4 4279/* Return the dep_status, which has the same parameters as DS, except for
4280 speculative type TYPE, that will have weakness DW. */
4281ds_t
4282set_dep_weak (ds_t ds, ds_t type, dw_t dw)
4283{
4284 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4285
4286 ds &= ~type;
4287 switch (type)
4288 {
4289 case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
4290 case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
4291 case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
4292 case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
4293 default: gcc_unreachable ();
4294 }
4295 return ds;
4296}
4297
e1ab7874 4298/* Return the join of two dep_statuses DS1 and DS2.
4299 If MAX_P is true then choose the greater probability,
4300 otherwise multiply probabilities.
4301 This function assumes that both DS1 and DS2 contain speculative bits. */
4302static ds_t
4303ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
4d64d9a4 4304{
4305 ds_t ds, t;
4306
4307 gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
4308
4309 ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
4310
4311 t = FIRST_SPEC_TYPE;
4312 do
4313 {
4314 if ((ds1 & t) && !(ds2 & t))
4315 ds |= ds1 & t;
4316 else if (!(ds1 & t) && (ds2 & t))
4317 ds |= ds2 & t;
4318 else if ((ds1 & t) && (ds2 & t))
4319 {
e1ab7874 4320 dw_t dw1 = get_dep_weak (ds1, t);
4321 dw_t dw2 = get_dep_weak (ds2, t);
4d64d9a4 4322 ds_t dw;
4323
e1ab7874 4324 if (!max_p)
4325 {
4326 dw = ((ds_t) dw1) * ((ds_t) dw2);
4327 dw /= MAX_DEP_WEAK;
4328 if (dw < MIN_DEP_WEAK)
4329 dw = MIN_DEP_WEAK;
4330 }
4331 else
4332 {
4333 if (dw1 >= dw2)
4334 dw = dw1;
4335 else
4336 dw = dw2;
4337 }
4d64d9a4 4338
4339 ds = set_dep_weak (ds, t, (dw_t) dw);
4340 }
4341
4342 if (t == LAST_SPEC_TYPE)
4343 break;
4344 t <<= SPEC_TYPE_SHIFT;
4345 }
4346 while (1);
4347
4348 return ds;
4349}
4350
e1ab7874 4351/* Return the join of two dep_statuses DS1 and DS2.
4352 This function assumes that both DS1 and DS2 contain speculative bits. */
4353ds_t
4354ds_merge (ds_t ds1, ds_t ds2)
4355{
4356 return ds_merge_1 (ds1, ds2, false);
4357}
4358
4359/* Return the join of two dep_statuses DS1 and DS2. */
4360ds_t
4361ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
4362{
4363 ds_t new_status = ds | ds2;
4364
4365 if (new_status & SPECULATIVE)
4366 {
4367 if ((ds && !(ds & SPECULATIVE))
4368 || (ds2 && !(ds2 & SPECULATIVE)))
4369 /* Then this dep can't be speculative. */
4370 new_status &= ~SPECULATIVE;
4371 else
4372 {
4373 /* Both are speculative. Merging probabilities. */
4374 if (mem1)
4375 {
4376 dw_t dw;
4377
4378 dw = estimate_dep_weak (mem1, mem2);
4379 ds = set_dep_weak (ds, BEGIN_DATA, dw);
4380 }
4381
4382 if (!ds)
4383 new_status = ds2;
4384 else if (!ds2)
4385 new_status = ds;
4386 else
4387 new_status = ds_merge (ds2, ds);
4388 }
4389 }
4390
4391 return new_status;
4392}
4393
4394/* Return the join of DS1 and DS2. Use maximum instead of multiplying
4395 probabilities. */
4396ds_t
4397ds_max_merge (ds_t ds1, ds_t ds2)
4398{
4399 if (ds1 == 0 && ds2 == 0)
4400 return 0;
4401
4402 if (ds1 == 0 && ds2 != 0)
4403 return ds2;
4404
4405 if (ds1 != 0 && ds2 == 0)
4406 return ds1;
4407
4408 return ds_merge_1 (ds1, ds2, true);
4409}
4410
4411/* Return the probability of speculation success for the speculation
4412 status DS. */
4413dw_t
4414ds_weak (ds_t ds)
4415{
4416 ds_t res = 1, dt;
4417 int n = 0;
4418
4419 dt = FIRST_SPEC_TYPE;
4420 do
4421 {
4422 if (ds & dt)
4423 {
4424 res *= (ds_t) get_dep_weak (ds, dt);
4425 n++;
4426 }
4427
4428 if (dt == LAST_SPEC_TYPE)
4429 break;
4430 dt <<= SPEC_TYPE_SHIFT;
4431 }
4432 while (1);
4433
4434 gcc_assert (n);
4435 while (--n)
4436 res /= MAX_DEP_WEAK;
4437
4438 if (res < MIN_DEP_WEAK)
4439 res = MIN_DEP_WEAK;
4440
4441 gcc_assert (res <= MAX_DEP_WEAK);
4442
4443 return (dw_t) res;
4444}
4445
4446/* Return a dep status that contains all speculation types of DS. */
4447ds_t
4448ds_get_speculation_types (ds_t ds)
4449{
4450 if (ds & BEGIN_DATA)
4451 ds |= BEGIN_DATA;
4452 if (ds & BE_IN_DATA)
4453 ds |= BE_IN_DATA;
4454 if (ds & BEGIN_CONTROL)
4455 ds |= BEGIN_CONTROL;
4456 if (ds & BE_IN_CONTROL)
4457 ds |= BE_IN_CONTROL;
4458
4459 return ds & SPECULATIVE;
4460}
4461
4462/* Return a dep status that contains maximal weakness for each speculation
4463 type present in DS. */
4464ds_t
4465ds_get_max_dep_weak (ds_t ds)
4466{
4467 if (ds & BEGIN_DATA)
4468 ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
4469 if (ds & BE_IN_DATA)
4470 ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
4471 if (ds & BEGIN_CONTROL)
4472 ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
4473 if (ds & BE_IN_CONTROL)
4474 ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
4475
4476 return ds;
4477}
4478
93f6b030 4479/* Dump information about the dependence status S. */
4480static void
4481dump_ds (FILE *f, ds_t s)
4482{
4483 fprintf (f, "{");
4484
4485 if (s & BEGIN_DATA)
4486 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
4487 if (s & BE_IN_DATA)
4488 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
4489 if (s & BEGIN_CONTROL)
4490 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
4491 if (s & BE_IN_CONTROL)
4492 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
4493
4494 if (s & HARD_DEP)
4495 fprintf (f, "HARD_DEP; ");
4496
4497 if (s & DEP_TRUE)
4498 fprintf (f, "DEP_TRUE; ");
93f6b030 4499 if (s & DEP_OUTPUT)
4500 fprintf (f, "DEP_OUTPUT; ");
effd1640 4501 if (s & DEP_ANTI)
4502 fprintf (f, "DEP_ANTI; ");
4503 if (s & DEP_CONTROL)
4504 fprintf (f, "DEP_CONTROL; ");
93f6b030 4505
4506 fprintf (f, "}");
4507}
4508
4b987fac 4509DEBUG_FUNCTION void
93f6b030 4510debug_ds (ds_t s)
4511{
4512 dump_ds (stderr, s);
4513 fprintf (stderr, "\n");
4514}
4515
4d64d9a4 4516#ifdef ENABLE_CHECKING
4517/* Verify that dependence type and status are consistent.
4518 If RELAXED_P is true, then skip dep_weakness checks. */
4519static void
93f6b030 4520check_dep (dep_t dep, bool relaxed_p)
4d64d9a4 4521{
93f6b030 4522 enum reg_note dt = DEP_TYPE (dep);
4523 ds_t ds = DEP_STATUS (dep);
4524
4525 gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
4526
4527 if (!(current_sched_info->flags & USE_DEPS_LIST))
4528 {
2261c559 4529 gcc_assert (ds == 0);
93f6b030 4530 return;
4531 }
4532
4d64d9a4 4533 /* Check that dependence type contains the same bits as the status. */
4534 if (dt == REG_DEP_TRUE)
4535 gcc_assert (ds & DEP_TRUE);
4536 else if (dt == REG_DEP_OUTPUT)
4537 gcc_assert ((ds & DEP_OUTPUT)
48e1416a 4538 && !(ds & DEP_TRUE));
effd1640 4539 else if (dt == REG_DEP_ANTI)
4540 gcc_assert ((ds & DEP_ANTI)
4d64d9a4 4541 && !(ds & (DEP_OUTPUT | DEP_TRUE)));
effd1640 4542 else
4543 gcc_assert (dt == REG_DEP_CONTROL
4544 && (ds & DEP_CONTROL)
4545 && !(ds & (DEP_OUTPUT | DEP_ANTI | DEP_TRUE)));
4d64d9a4 4546
4547 /* HARD_DEP can not appear in dep_status of a link. */
48e1416a 4548 gcc_assert (!(ds & HARD_DEP));
4d64d9a4 4549
4550 /* Check that dependence status is set correctly when speculation is not
4551 supported. */
e1ab7874 4552 if (!sched_deps_info->generate_spec_deps)
4d64d9a4 4553 gcc_assert (!(ds & SPECULATIVE));
4554 else if (ds & SPECULATIVE)
4555 {
4556 if (!relaxed_p)
4557 {
4558 ds_t type = FIRST_SPEC_TYPE;
4559
4560 /* Check that dependence weakness is in proper range. */
4561 do
4562 {
4563 if (ds & type)
4564 get_dep_weak (ds, type);
4565
4566 if (type == LAST_SPEC_TYPE)
4567 break;
4568 type <<= SPEC_TYPE_SHIFT;
4569 }
4570 while (1);
4571 }
4572
4573 if (ds & BEGIN_SPEC)
4574 {
4575 /* Only true dependence can be data speculative. */
4576 if (ds & BEGIN_DATA)
4577 gcc_assert (ds & DEP_TRUE);
4578
4579 /* Control dependencies in the insn scheduler are represented by
4580 anti-dependencies, therefore only anti dependence can be
4581 control speculative. */
4582 if (ds & BEGIN_CONTROL)
4583 gcc_assert (ds & DEP_ANTI);
4584 }
4585 else
4586 {
4587 /* Subsequent speculations should resolve true dependencies. */
4588 gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4589 }
48e1416a 4590
4591 /* Check that true and anti dependencies can't have other speculative
4d64d9a4 4592 statuses. */
4593 if (ds & DEP_TRUE)
4594 gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4595 /* An output dependence can't be speculative at all. */
4596 gcc_assert (!(ds & DEP_OUTPUT));
4597 if (ds & DEP_ANTI)
4598 gcc_assert (ds & BEGIN_CONTROL);
4599 }
4600}
db982eeb 4601#endif /* ENABLE_CHECKING */
4602
d452a169 4603/* The following code discovers opportunities to switch a memory reference
4604 and an increment by modifying the address. We ensure that this is done
4605 only for dependencies that are only used to show a single register
4606 dependence (using DEP_NONREG and DEP_MULTIPLE), and so that every memory
4607 instruction involved is subject to only one dep that can cause a pattern
4608 change.
4609
4610 When we discover a suitable dependency, we fill in the dep_replacement
4611 structure to show how to modify the memory reference. */
4612
4613/* Holds information about a pair of memory reference and register increment
4614 insns which depend on each other, but could possibly be interchanged. */
4615struct mem_inc_info
4616{
b24ef467 4617 rtx_insn *inc_insn;
4618 rtx_insn *mem_insn;
d452a169 4619
4620 rtx *mem_loc;
4621 /* A register occurring in the memory address for which we wish to break
4622 the dependence. This must be identical to the destination register of
4623 the increment. */
4624 rtx mem_reg0;
4625 /* Any kind of index that is added to that register. */
4626 rtx mem_index;
4627 /* The constant offset used in the memory address. */
4628 HOST_WIDE_INT mem_constant;
4629 /* The constant added in the increment insn. Negated if the increment is
4630 after the memory address. */
4631 HOST_WIDE_INT inc_constant;
4632 /* The source register used in the increment. May be different from mem_reg0
4633 if the increment occurs before the memory address. */
4634 rtx inc_input;
4635};
4636
4637/* Verify that the memory location described in MII can be replaced with
4638 one using NEW_ADDR. Return the new memory reference or NULL_RTX. The
4639 insn remains unchanged by this function. */
4640
4641static rtx
4642attempt_change (struct mem_inc_info *mii, rtx new_addr)
4643{
4644 rtx mem = *mii->mem_loc;
4645 rtx new_mem;
4646
75de4aa2 4647 /* Jump through a lot of hoops to keep the attributes up to date. We
d452a169 4648 do not want to call one of the change address variants that take
4649 an offset even though we know the offset in many cases. These
4650 assume you are changing where the address is pointing by the
4651 offset. */
4652 new_mem = replace_equiv_address_nv (mem, new_addr);
4653 if (! validate_change (mii->mem_insn, mii->mem_loc, new_mem, 0))
4654 {
4655 if (sched_verbose >= 5)
4656 fprintf (sched_dump, "validation failure\n");
4657 return NULL_RTX;
4658 }
4659
4660 /* Put back the old one. */
4661 validate_change (mii->mem_insn, mii->mem_loc, mem, 0);
4662
4663 return new_mem;
4664}
4665
4666/* Return true if INSN is of a form "a = b op c" where a and b are
4667 regs. op is + if c is a reg and +|- if c is a const. Fill in
4668 informantion in MII about what is found.
4669 BEFORE_MEM indicates whether the increment is found before or after
4670 a corresponding memory reference. */
4671
4672static bool
b24ef467 4673parse_add_or_inc (struct mem_inc_info *mii, rtx_insn *insn, bool before_mem)
d452a169 4674{
4675 rtx pat = single_set (insn);
4676 rtx src, cst;
4677 bool regs_equal;
4678
4679 if (RTX_FRAME_RELATED_P (insn) || !pat)
4680 return false;
4681
4682 /* Result must be single reg. */
4683 if (!REG_P (SET_DEST (pat)))
4684 return false;
4685
36faf01b 4686 if (GET_CODE (SET_SRC (pat)) != PLUS)
d452a169 4687 return false;
4688
4689 mii->inc_insn = insn;
4690 src = SET_SRC (pat);
4691 mii->inc_input = XEXP (src, 0);
4692
4693 if (!REG_P (XEXP (src, 0)))
4694 return false;
4695
4696 if (!rtx_equal_p (SET_DEST (pat), mii->mem_reg0))
4697 return false;
4698
4699 cst = XEXP (src, 1);
4700 if (!CONST_INT_P (cst))
4701 return false;
4702 mii->inc_constant = INTVAL (cst);
4703
4704 regs_equal = rtx_equal_p (mii->inc_input, mii->mem_reg0);
4705
4706 if (!before_mem)
4707 {
4708 mii->inc_constant = -mii->inc_constant;
4709 if (!regs_equal)
4710 return false;
4711 }
4712
4713 if (regs_equal && REGNO (SET_DEST (pat)) == STACK_POINTER_REGNUM)
36faf01b 4714 {
4715 /* Note that the sign has already been reversed for !before_mem. */
4716#ifdef STACK_GROWS_DOWNWARD
4717 return mii->inc_constant > 0;
4718#else
4719 return mii->inc_constant < 0;
4720#endif
4721 }
d452a169 4722 return true;
4723}
4724
4725/* Once a suitable mem reference has been found and the corresponding data
4726 in MII has been filled in, this function is called to find a suitable
4727 add or inc insn involving the register we found in the memory
4728 reference. */
4729
4730static bool
4731find_inc (struct mem_inc_info *mii, bool backwards)
4732{
4733 sd_iterator_def sd_it;
4734 dep_t dep;
4735
4736 sd_it = sd_iterator_start (mii->mem_insn,
4737 backwards ? SD_LIST_HARD_BACK : SD_LIST_FORW);
4738 while (sd_iterator_cond (&sd_it, &dep))
4739 {
4740 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
9d8bf733 4741 rtx_insn *pro = DEP_PRO (dep);
4742 rtx_insn *con = DEP_CON (dep);
b24ef467 4743 rtx_insn *inc_cand = backwards ? pro : con;
d452a169 4744 if (DEP_NONREG (dep) || DEP_MULTIPLE (dep))
4745 goto next;
4746 if (parse_add_or_inc (mii, inc_cand, backwards))
4747 {
4748 struct dep_replacement *desc;
be10bb5a 4749 df_ref def;
d452a169 4750 rtx newaddr, newmem;
4751
4752 if (sched_verbose >= 5)
4753 fprintf (sched_dump, "candidate mem/inc pair: %d %d\n",
4754 INSN_UID (mii->mem_insn), INSN_UID (inc_cand));
4755
4756 /* Need to assure that none of the operands of the inc
4757 instruction are assigned to by the mem insn. */
be10bb5a 4758 FOR_EACH_INSN_DEF (def, mii->mem_insn)
4759 if (reg_overlap_mentioned_p (DF_REF_REG (def), mii->inc_input)
4760 || reg_overlap_mentioned_p (DF_REF_REG (def), mii->mem_reg0))
4761 {
4762 if (sched_verbose >= 5)
4763 fprintf (sched_dump,
4764 "inc conflicts with store failure.\n");
4765 goto next;
4766 }
490aa3a5 4767
4768 /* The inc instruction could have clobbers, make sure those
4769 registers are not used in mem insn. */
4770 FOR_EACH_INSN_DEF (def, mii->inc_insn)
4771 if (!reg_overlap_mentioned_p (DF_REF_REG (def), mii->mem_reg0))
4772 {
4773 df_ref use;
4774 FOR_EACH_INSN_USE (use, mii->mem_insn)
4775 if (reg_overlap_mentioned_p (DF_REF_REG (def),
4776 DF_REF_REG (use)))
4777 {
4778 if (sched_verbose >= 5)
4779 fprintf (sched_dump,
4780 "inc clobber used in store failure.\n");
4781 goto next;
4782 }
4783 }
4784
d452a169 4785 newaddr = mii->inc_input;
4786 if (mii->mem_index != NULL_RTX)
4787 newaddr = gen_rtx_PLUS (GET_MODE (newaddr), newaddr,
4788 mii->mem_index);
4789 newaddr = plus_constant (GET_MODE (newaddr), newaddr,
4790 mii->mem_constant + mii->inc_constant);
4791 newmem = attempt_change (mii, newaddr);
4792 if (newmem == NULL_RTX)
4793 goto next;
4794 if (sched_verbose >= 5)
4795 fprintf (sched_dump, "successful address replacement\n");
4796 desc = XCNEW (struct dep_replacement);
4797 DEP_REPLACE (dep) = desc;
4798 desc->loc = mii->mem_loc;
4799 desc->newval = newmem;
4800 desc->orig = *desc->loc;
4801 desc->insn = mii->mem_insn;
4802 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
4803 INSN_SPEC_BACK_DEPS (con));
4804 if (backwards)
4805 {
4806 FOR_EACH_DEP (mii->inc_insn, SD_LIST_BACK, sd_it, dep)
f00069ed 4807 add_dependence_1 (mii->mem_insn, DEP_PRO (dep),
4808 REG_DEP_TRUE);
d452a169 4809 }
4810 else
4811 {
4812 FOR_EACH_DEP (mii->inc_insn, SD_LIST_FORW, sd_it, dep)
f00069ed 4813 add_dependence_1 (DEP_CON (dep), mii->mem_insn,
4814 REG_DEP_ANTI);
d452a169 4815 }
4816 return true;
4817 }
4818 next:
4819 sd_iterator_next (&sd_it);
4820 }
4821 return false;
4822}
4823
4824/* A recursive function that walks ADDRESS_OF_X to find memory references
4825 which could be modified during scheduling. We call find_inc for each
4826 one we find that has a recognizable form. MII holds information about
4827 the pair of memory/increment instructions.
4828 We ensure that every instruction with a memory reference (which will be
4829 the location of the replacement) is assigned at most one breakable
4830 dependency. */
4831
4832static bool
4833find_mem (struct mem_inc_info *mii, rtx *address_of_x)
4834{
4835 rtx x = *address_of_x;
4836 enum rtx_code code = GET_CODE (x);
4837 const char *const fmt = GET_RTX_FORMAT (code);
4838 int i;
4839
4840 if (code == MEM)
4841 {
4842 rtx reg0 = XEXP (x, 0);
4843
4844 mii->mem_loc = address_of_x;
4845 mii->mem_index = NULL_RTX;
4846 mii->mem_constant = 0;
4847 if (GET_CODE (reg0) == PLUS && CONST_INT_P (XEXP (reg0, 1)))
4848 {
4849 mii->mem_constant = INTVAL (XEXP (reg0, 1));
4850 reg0 = XEXP (reg0, 0);
4851 }
4852 if (GET_CODE (reg0) == PLUS)
4853 {
4854 mii->mem_index = XEXP (reg0, 1);
4855 reg0 = XEXP (reg0, 0);
4856 }
4857 if (REG_P (reg0))
4858 {
be10bb5a 4859 df_ref use;
d452a169 4860 int occurrences = 0;
4861
4862 /* Make sure this reg appears only once in this insn. Can't use
4863 count_occurrences since that only works for pseudos. */
be10bb5a 4864 FOR_EACH_INSN_USE (use, mii->mem_insn)
4865 if (reg_overlap_mentioned_p (reg0, DF_REF_REG (use)))
4866 if (++occurrences > 1)
4867 {
4868 if (sched_verbose >= 5)
4869 fprintf (sched_dump, "mem count failure\n");
4870 return false;
4871 }
d452a169 4872
4873 mii->mem_reg0 = reg0;
4874 return find_inc (mii, true) || find_inc (mii, false);
4875 }
4876 return false;
4877 }
4878
4879 if (code == SIGN_EXTRACT || code == ZERO_EXTRACT)
4880 {
4881 /* If REG occurs inside a MEM used in a bit-field reference,
4882 that is unacceptable. */
4883 return false;
4884 }
4885
4886 /* Time for some deep diving. */
4887 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4888 {
4889 if (fmt[i] == 'e')
4890 {
4891 if (find_mem (mii, &XEXP (x, i)))
4892 return true;
4893 }
4894 else if (fmt[i] == 'E')
4895 {
4896 int j;
4897 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4898 if (find_mem (mii, &XVECEXP (x, i, j)))
4899 return true;
4900 }
4901 }
4902 return false;
4903}
4904
4905
4906/* Examine the instructions between HEAD and TAIL and try to find
4907 dependencies that can be broken by modifying one of the patterns. */
4908
4909void
b24ef467 4910find_modifiable_mems (rtx_insn *head, rtx_insn *tail)
d452a169 4911{
b24ef467 4912 rtx_insn *insn, *next_tail = NEXT_INSN (tail);
d452a169 4913 int success_in_block = 0;
4914
1c5ae16e 4915 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
d452a169 4916 {
4917 struct mem_inc_info mii;
4918
4919 if (!NONDEBUG_INSN_P (insn) || RTX_FRAME_RELATED_P (insn))
4920 continue;
4921
4922 mii.mem_insn = insn;
4923 if (find_mem (&mii, &PATTERN (insn)))
4924 success_in_block++;
4925 }
4926 if (success_in_block && sched_verbose >= 5)
4927 fprintf (sched_dump, "%d candidates for address modification found.\n",
4928 success_in_block);
4929}
4930
db982eeb 4931#endif /* INSN_SCHEDULING */