]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/sched-deps.c
2015-06-17 Andrew MacLeod <amacleod@redhat.com>
[thirdparty/gcc.git] / gcc / sched-deps.c
CommitLineData
6adce0fb 1/* Instruction scheduling pass. This file computes dependencies between
2 instructions.
d353bf18 3 Copyright (C) 1992-2015 Free Software Foundation, Inc.
6adce0fb 4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
6
f12b58b3 7This file is part of GCC.
6adce0fb 8
f12b58b3 9GCC is free software; you can redistribute it and/or modify it under
10the terms of the GNU General Public License as published by the Free
8c4c00c1 11Software Foundation; either version 3, or (at your option) any later
f12b58b3 12version.
6adce0fb 13
f12b58b3 14GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15WARRANTY; without even the implied warranty of MERCHANTABILITY or
6adce0fb 16FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17for more details.
18
19You should have received a copy of the GNU General Public License
8c4c00c1 20along with GCC; see the file COPYING3. If not see
21<http://www.gnu.org/licenses/>. */
6adce0fb 22\f
23#include "config.h"
24#include "system.h"
805e22b2 25#include "coretypes.h"
26#include "tm.h"
0b205f4c 27#include "diagnostic-core.h"
6adce0fb 28#include "rtl.h"
b20a8bb4 29#include "alias.h"
30#include "symtab.h"
4a020a8c 31#include "tree.h" /* FIXME: Used by call_may_noreturn_p. */
6adce0fb 32#include "tm_p.h"
33#include "hard-reg-set.h"
6adce0fb 34#include "regs.h"
35#include "function.h"
36#include "flags.h"
37#include "insn-config.h"
38#include "insn-attr.h"
39#include "except.h"
6adce0fb 40#include "recog.h"
d452a169 41#include "emit-rtl.h"
94ea8568 42#include "dominance.h"
43#include "cfg.h"
44#include "cfgbuild.h"
45#include "predict.h"
46#include "basic-block.h"
6adce0fb 47#include "sched-int.h"
85de291e 48#include "params.h"
1f85e1e3 49#include "alloc-pool.h"
503f2817 50#include "cselib.h"
a7dcf969 51#include "ira.h"
98155838 52#include "target.h"
6adce0fb 53
db982eeb 54#ifdef INSN_SCHEDULING
55
9997bd27 56#ifdef ENABLE_CHECKING
57#define CHECK (true)
58#else
59#define CHECK (false)
60#endif
61
e1ab7874 62/* Holds current parameters for the dependency analyzer. */
63struct sched_deps_info_def *sched_deps_info;
64
65/* The data is specific to the Haifa scheduler. */
f1f41a6c 66vec<haifa_deps_insn_data_def>
1e094109 67 h_d_i_d = vNULL;
e1ab7874 68
9997bd27 69/* Return the major type present in the DS. */
70enum reg_note
71ds_to_dk (ds_t ds)
72{
73 if (ds & DEP_TRUE)
74 return REG_DEP_TRUE;
75
76 if (ds & DEP_OUTPUT)
77 return REG_DEP_OUTPUT;
78
effd1640 79 if (ds & DEP_CONTROL)
80 return REG_DEP_CONTROL;
81
9997bd27 82 gcc_assert (ds & DEP_ANTI);
83
84 return REG_DEP_ANTI;
85}
86
87/* Return equivalent dep_status. */
88ds_t
89dk_to_ds (enum reg_note dk)
90{
91 switch (dk)
92 {
93 case REG_DEP_TRUE:
94 return DEP_TRUE;
95
96 case REG_DEP_OUTPUT:
97 return DEP_OUTPUT;
98
effd1640 99 case REG_DEP_CONTROL:
100 return DEP_CONTROL;
101
9997bd27 102 default:
103 gcc_assert (dk == REG_DEP_ANTI);
104 return DEP_ANTI;
105 }
106}
107
108/* Functions to operate with dependence information container - dep_t. */
109
110/* Init DEP with the arguments. */
93f6b030 111void
fc5ad70a 112init_dep_1 (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note type, ds_t ds)
9997bd27 113{
fc5ad70a 114 DEP_PRO (dep) = pro;
115 DEP_CON (dep) = con;
93f6b030 116 DEP_TYPE (dep) = type;
9997bd27 117 DEP_STATUS (dep) = ds;
38354bb6 118 DEP_COST (dep) = UNKNOWN_DEP_COST;
d452a169 119 DEP_NONREG (dep) = 0;
120 DEP_MULTIPLE (dep) = 0;
121 DEP_REPLACE (dep) = NULL;
9997bd27 122}
123
124/* Init DEP with the arguments.
125 While most of the scheduler (including targets) only need the major type
f2b32076 126 of the dependency, it is convenient to hide full dep_status from them. */
9997bd27 127void
fc5ad70a 128init_dep (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note kind)
9997bd27 129{
130 ds_t ds;
131
93f6b030 132 if ((current_sched_info->flags & USE_DEPS_LIST))
9997bd27 133 ds = dk_to_ds (kind);
134 else
2261c559 135 ds = 0;
9997bd27 136
137 init_dep_1 (dep, pro, con, kind, ds);
138}
139
140/* Make a copy of FROM in TO. */
141static void
142copy_dep (dep_t to, dep_t from)
143{
144 memcpy (to, from, sizeof (*to));
145}
146
93f6b030 147static void dump_ds (FILE *, ds_t);
9997bd27 148
93f6b030 149/* Define flags for dump_dep (). */
150
151/* Dump producer of the dependence. */
152#define DUMP_DEP_PRO (2)
153
154/* Dump consumer of the dependence. */
155#define DUMP_DEP_CON (4)
156
157/* Dump type of the dependence. */
158#define DUMP_DEP_TYPE (8)
159
160/* Dump status of the dependence. */
161#define DUMP_DEP_STATUS (16)
162
163/* Dump all information about the dependence. */
164#define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE \
165 |DUMP_DEP_STATUS)
166
167/* Dump DEP to DUMP.
168 FLAGS is a bit mask specifying what information about DEP needs
169 to be printed.
170 If FLAGS has the very first bit set, then dump all information about DEP
171 and propagate this bit into the callee dump functions. */
172static void
173dump_dep (FILE *dump, dep_t dep, int flags)
9997bd27 174{
93f6b030 175 if (flags & 1)
176 flags |= DUMP_DEP_ALL;
177
178 fprintf (dump, "<");
179
180 if (flags & DUMP_DEP_PRO)
181 fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
182
183 if (flags & DUMP_DEP_CON)
184 fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
185
186 if (flags & DUMP_DEP_TYPE)
187 {
188 char t;
189 enum reg_note type = DEP_TYPE (dep);
190
191 switch (type)
192 {
193 case REG_DEP_TRUE:
194 t = 't';
195 break;
196
197 case REG_DEP_OUTPUT:
198 t = 'o';
199 break;
9997bd27 200
effd1640 201 case REG_DEP_CONTROL:
202 t = 'c';
203 break;
204
93f6b030 205 case REG_DEP_ANTI:
206 t = 'a';
207 break;
208
209 default:
210 gcc_unreachable ();
211 break;
212 }
213
214 fprintf (dump, "%c; ", t);
215 }
216
217 if (flags & DUMP_DEP_STATUS)
218 {
219 if (current_sched_info->flags & USE_DEPS_LIST)
220 dump_ds (dump, DEP_STATUS (dep));
221 }
222
223 fprintf (dump, ">");
224}
225
226/* Default flags for dump_dep (). */
227static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
228
229/* Dump all fields of DEP to STDERR. */
230void
231sd_debug_dep (dep_t dep)
232{
233 dump_dep (stderr, dep, 1);
234 fprintf (stderr, "\n");
9997bd27 235}
236
c971b445 237/* Determine whether DEP is a dependency link of a non-debug insn on a
238 debug insn. */
239
240static inline bool
241depl_on_debug_p (dep_link_t dep)
242{
243 return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
244 && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
245}
246
93f6b030 247/* Functions to operate with a single link from the dependencies lists -
248 dep_link_t. */
249
9997bd27 250/* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
251 PREV_NEXT_P. */
252static void
253attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
254{
255 dep_link_t next = *prev_nextp;
256
257 gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
258 && DEP_LINK_NEXT (l) == NULL);
259
260 /* Init node being inserted. */
261 DEP_LINK_PREV_NEXTP (l) = prev_nextp;
262 DEP_LINK_NEXT (l) = next;
263
264 /* Fix next node. */
265 if (next != NULL)
266 {
267 gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
268
269 DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
270 }
271
272 /* Fix prev node. */
273 *prev_nextp = l;
274}
275
276/* Add dep_link LINK to deps_list L. */
277static void
278add_to_deps_list (dep_link_t link, deps_list_t l)
279{
280 attach_dep_link (link, &DEPS_LIST_FIRST (l));
93f6b030 281
c971b445 282 /* Don't count debug deps. */
283 if (!depl_on_debug_p (link))
284 ++DEPS_LIST_N_LINKS (l);
9997bd27 285}
286
287/* Detach dep_link L from the list. */
288static void
289detach_dep_link (dep_link_t l)
290{
291 dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
292 dep_link_t next = DEP_LINK_NEXT (l);
293
294 *prev_nextp = next;
295
296 if (next != NULL)
297 DEP_LINK_PREV_NEXTP (next) = prev_nextp;
298
9997bd27 299 DEP_LINK_PREV_NEXTP (l) = NULL;
300 DEP_LINK_NEXT (l) = NULL;
301}
302
93f6b030 303/* Remove link LINK from list LIST. */
304static void
305remove_from_deps_list (dep_link_t link, deps_list_t list)
9997bd27 306{
307 detach_dep_link (link);
9997bd27 308
c971b445 309 /* Don't count debug deps. */
310 if (!depl_on_debug_p (link))
311 --DEPS_LIST_N_LINKS (list);
9997bd27 312}
313
93f6b030 314/* Move link LINK from list FROM to list TO. */
9997bd27 315static void
93f6b030 316move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
9997bd27 317{
93f6b030 318 remove_from_deps_list (link, from);
319 add_to_deps_list (link, to);
9997bd27 320}
321
93f6b030 322/* Return true of LINK is not attached to any list. */
323static bool
324dep_link_is_detached_p (dep_link_t link)
9997bd27 325{
93f6b030 326 return DEP_LINK_PREV_NEXTP (link) == NULL;
9997bd27 327}
328
93f6b030 329/* Pool to hold all dependency nodes (dep_node_t). */
3da86c6b 330static pool_allocator<_dep_node> *dn_pool;
9997bd27 331
93f6b030 332/* Number of dep_nodes out there. */
333static int dn_pool_diff = 0;
9997bd27 334
93f6b030 335/* Create a dep_node. */
336static dep_node_t
337create_dep_node (void)
338{
3da86c6b 339 dep_node_t n = dn_pool->allocate ();
93f6b030 340 dep_link_t back = DEP_NODE_BACK (n);
341 dep_link_t forw = DEP_NODE_FORW (n);
9997bd27 342
93f6b030 343 DEP_LINK_NODE (back) = n;
344 DEP_LINK_NEXT (back) = NULL;
345 DEP_LINK_PREV_NEXTP (back) = NULL;
9997bd27 346
93f6b030 347 DEP_LINK_NODE (forw) = n;
348 DEP_LINK_NEXT (forw) = NULL;
349 DEP_LINK_PREV_NEXTP (forw) = NULL;
9997bd27 350
93f6b030 351 ++dn_pool_diff;
9997bd27 352
93f6b030 353 return n;
9997bd27 354}
355
93f6b030 356/* Delete dep_node N. N must not be connected to any deps_list. */
9997bd27 357static void
93f6b030 358delete_dep_node (dep_node_t n)
9997bd27 359{
93f6b030 360 gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
361 && dep_link_is_detached_p (DEP_NODE_FORW (n)));
9997bd27 362
a776325f 363 XDELETE (DEP_REPLACE (DEP_NODE_DEP (n)));
364
93f6b030 365 --dn_pool_diff;
9997bd27 366
3da86c6b 367 dn_pool->remove (n);
9997bd27 368}
369
93f6b030 370/* Pool to hold dependencies lists (deps_list_t). */
3da86c6b 371static pool_allocator<_deps_list> *dl_pool;
9997bd27 372
93f6b030 373/* Number of deps_lists out there. */
374static int dl_pool_diff = 0;
9997bd27 375
93f6b030 376/* Functions to operate with dependences lists - deps_list_t. */
9997bd27 377
93f6b030 378/* Return true if list L is empty. */
9997bd27 379static bool
93f6b030 380deps_list_empty_p (deps_list_t l)
9997bd27 381{
93f6b030 382 return DEPS_LIST_N_LINKS (l) == 0;
9997bd27 383}
384
93f6b030 385/* Create a new deps_list. */
386static deps_list_t
387create_deps_list (void)
9997bd27 388{
3da86c6b 389 deps_list_t l = dl_pool->allocate ();
9997bd27 390
93f6b030 391 DEPS_LIST_FIRST (l) = NULL;
392 DEPS_LIST_N_LINKS (l) = 0;
9997bd27 393
93f6b030 394 ++dl_pool_diff;
395 return l;
9997bd27 396}
397
93f6b030 398/* Free deps_list L. */
399static void
400free_deps_list (deps_list_t l)
9997bd27 401{
93f6b030 402 gcc_assert (deps_list_empty_p (l));
9997bd27 403
93f6b030 404 --dl_pool_diff;
9997bd27 405
3da86c6b 406 dl_pool->remove (l);
9997bd27 407}
408
93f6b030 409/* Return true if there is no dep_nodes and deps_lists out there.
becfaa62 410 After the region is scheduled all the dependency nodes and lists
93f6b030 411 should [generally] be returned to pool. */
412bool
413deps_pools_are_empty_p (void)
9997bd27 414{
93f6b030 415 return dn_pool_diff == 0 && dl_pool_diff == 0;
9997bd27 416}
417
93f6b030 418/* Remove all elements from L. */
419static void
420clear_deps_list (deps_list_t l)
9997bd27 421{
93f6b030 422 do
423 {
424 dep_link_t link = DEPS_LIST_FIRST (l);
9997bd27 425
93f6b030 426 if (link == NULL)
427 break;
9997bd27 428
93f6b030 429 remove_from_deps_list (link, l);
9997bd27 430 }
93f6b030 431 while (1);
9997bd27 432}
6adce0fb 433
2261c559 434/* Decide whether a dependency should be treated as a hard or a speculative
435 dependency. */
436static bool
437dep_spec_p (dep_t dep)
438{
439 if (current_sched_info->flags & DO_SPECULATION)
effd1640 440 {
441 if (DEP_STATUS (dep) & SPECULATIVE)
442 return true;
443 }
444 if (current_sched_info->flags & DO_PREDICATION)
445 {
446 if (DEP_TYPE (dep) == REG_DEP_CONTROL)
447 return true;
448 }
d452a169 449 if (DEP_REPLACE (dep) != NULL)
450 return true;
2261c559 451 return false;
452}
453
6adce0fb 454static regset reg_pending_sets;
455static regset reg_pending_clobbers;
5deaeb50 456static regset reg_pending_uses;
effd1640 457static regset reg_pending_control_uses;
c14a239f 458static enum reg_pending_barrier_mode reg_pending_barrier;
6adce0fb 459
a7dcf969 460/* Hard registers implicitly clobbered or used (or may be implicitly
461 clobbered or used) by the currently analyzed insn. For example,
462 insn in its constraint has one register class. Even if there is
463 currently no hard register in the insn, the particular hard
464 register will be in the insn after reload pass because the
465 constraint requires it. */
466static HARD_REG_SET implicit_reg_pending_clobbers;
467static HARD_REG_SET implicit_reg_pending_uses;
468
6adce0fb 469/* To speed up the test for duplicate dependency links we keep a
470 record of dependencies created by add_dependence when the average
471 number of instructions in a basic block is very large.
472
473 Studies have shown that there is typically around 5 instructions between
474 branches for typical C code. So we can make a guess that the average
475 basic block is approximately 5 instructions long; we will choose 100X
476 the average size as a very large basic block.
477
478 Each insn has associated bitmaps for its dependencies. Each bitmap
479 has enough entries to represent a dependency on any other insn in
480 the insn chain. All bitmap for true dependencies cache is
1be87b72 481 allocated then the rest two ones are also allocated. */
e1ab7874 482static bitmap_head *true_dependency_cache = NULL;
483static bitmap_head *output_dependency_cache = NULL;
484static bitmap_head *anti_dependency_cache = NULL;
effd1640 485static bitmap_head *control_dependency_cache = NULL;
e1ab7874 486static bitmap_head *spec_dependency_cache = NULL;
f4b6f424 487static int cache_size;
6adce0fb 488
d452a169 489/* True if we should mark added dependencies as a non-register deps. */
490static bool mark_as_hard;
491
5493cb9a 492static int deps_may_trap_p (const_rtx);
b24ef467 493static void add_dependence_1 (rtx_insn *, rtx_insn *, enum reg_note);
54267fdf 494static void add_dependence_list (rtx_insn *, rtx_insn_list *, int,
495 enum reg_note, bool);
b24ef467 496static void add_dependence_list_and_free (struct deps_desc *, rtx_insn *,
54267fdf 497 rtx_insn_list **, int, enum reg_note,
498 bool);
71ce7f59 499static void delete_all_dependences (rtx_insn *);
b24ef467 500static void chain_to_prev_insn (rtx_insn *);
6adce0fb 501
b24ef467 502static void flush_pending_lists (struct deps_desc *, rtx_insn *, int, int);
503static void sched_analyze_1 (struct deps_desc *, rtx, rtx_insn *);
504static void sched_analyze_2 (struct deps_desc *, rtx, rtx_insn *);
505static void sched_analyze_insn (struct deps_desc *, rtx, rtx_insn *);
4164e76b 506
355af201 507static bool sched_has_condition_p (const rtx_insn *);
e1ab7874 508static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
4d64d9a4 509
93f6b030 510static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
511 rtx, rtx);
512static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
4d64d9a4 513
4d64d9a4 514#ifdef ENABLE_CHECKING
93f6b030 515static void check_dep (dep_t, bool);
4d64d9a4 516#endif
6adce0fb 517\f
970c0c06 518/* Return nonzero if a load of the memory reference MEM can cause a trap. */
519
520static int
5493cb9a 521deps_may_trap_p (const_rtx mem)
970c0c06 522{
5493cb9a 523 const_rtx addr = XEXP (mem, 0);
970c0c06 524
12cb06ad 525 if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
526 {
5493cb9a 527 const_rtx t = get_reg_known_value (REGNO (addr));
12cb06ad 528 if (t)
529 addr = t;
530 }
970c0c06 531 return rtx_addr_can_trap_p (addr);
532}
533\f
4164e76b 534
e1ab7874 535/* Find the condition under which INSN is executed. If REV is not NULL,
536 it is set to TRUE when the returned comparison should be reversed
60ba0654 537 to get the actual condition. */
4164e76b 538static rtx
355af201 539sched_get_condition_with_rev_uncached (const rtx_insn *insn, bool *rev)
4164e76b 540{
541 rtx pat = PATTERN (insn);
d87bddc5 542 rtx src;
4164e76b 543
e1ab7874 544 if (rev)
545 *rev = false;
546
4164e76b 547 if (GET_CODE (pat) == COND_EXEC)
60ba0654 548 return COND_EXEC_TEST (pat);
bbb82bbf 549
550 if (!any_condjump_p (insn) || !onlyjump_p (insn))
4164e76b 551 return 0;
bbb82bbf 552
d87bddc5 553 src = SET_SRC (pc_set (insn));
e6a25dc9 554
d87bddc5 555 if (XEXP (src, 2) == pc_rtx)
60ba0654 556 return XEXP (src, 0);
d87bddc5 557 else if (XEXP (src, 1) == pc_rtx)
bbb82bbf 558 {
d87bddc5 559 rtx cond = XEXP (src, 0);
bbb82bbf 560 enum rtx_code revcode = reversed_comparison_code (cond, insn);
561
562 if (revcode == UNKNOWN)
563 return 0;
e1ab7874 564
565 if (rev)
566 *rev = true;
567 return cond;
bbb82bbf 568 }
af4ff459 569
570 return 0;
4164e76b 571}
572
effd1640 573/* Return the condition under which INSN does not execute (i.e. the
574 not-taken condition for a conditional branch), or NULL if we cannot
575 find such a condition. The caller should make a copy of the condition
576 before using it. */
577rtx
355af201 578sched_get_reverse_condition_uncached (const rtx_insn *insn)
effd1640 579{
580 bool rev;
581 rtx cond = sched_get_condition_with_rev_uncached (insn, &rev);
582 if (cond == NULL_RTX)
583 return cond;
584 if (!rev)
585 {
586 enum rtx_code revcode = reversed_comparison_code (cond, insn);
587 cond = gen_rtx_fmt_ee (revcode, GET_MODE (cond),
588 XEXP (cond, 0),
589 XEXP (cond, 1));
590 }
591 return cond;
592}
593
60ba0654 594/* Caching variant of sched_get_condition_with_rev_uncached.
595 We only do actual work the first time we come here for an insn; the
596 results are cached in INSN_CACHED_COND and INSN_REVERSE_COND. */
597static rtx
355af201 598sched_get_condition_with_rev (const rtx_insn *insn, bool *rev)
60ba0654 599{
600 bool tmp;
601
602 if (INSN_LUID (insn) == 0)
603 return sched_get_condition_with_rev_uncached (insn, rev);
604
605 if (INSN_CACHED_COND (insn) == const_true_rtx)
606 return NULL_RTX;
607
608 if (INSN_CACHED_COND (insn) != NULL_RTX)
609 {
610 if (rev)
611 *rev = INSN_REVERSE_COND (insn);
612 return INSN_CACHED_COND (insn);
613 }
614
615 INSN_CACHED_COND (insn) = sched_get_condition_with_rev_uncached (insn, &tmp);
616 INSN_REVERSE_COND (insn) = tmp;
617
618 if (INSN_CACHED_COND (insn) == NULL_RTX)
619 {
620 INSN_CACHED_COND (insn) = const_true_rtx;
621 return NULL_RTX;
622 }
623
624 if (rev)
625 *rev = INSN_REVERSE_COND (insn);
626 return INSN_CACHED_COND (insn);
627}
628
e1ab7874 629/* True when we can find a condition under which INSN is executed. */
630static bool
355af201 631sched_has_condition_p (const rtx_insn *insn)
e1ab7874 632{
633 return !! sched_get_condition_with_rev (insn, NULL);
634}
635
e6a25dc9 636\f
4164e76b 637
e1ab7874 638/* Return nonzero if conditions COND1 and COND2 can never be both true. */
4164e76b 639static int
e1ab7874 640conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
4164e76b 641{
6720e96c 642 if (COMPARISON_P (cond1)
643 && COMPARISON_P (cond2)
e1ab7874 644 && GET_CODE (cond1) ==
645 (rev1==rev2
646 ? reversed_comparison_code (cond2, NULL)
647 : GET_CODE (cond2))
6aed13f1 648 && rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
4164e76b 649 && XEXP (cond1, 1) == XEXP (cond2, 1))
650 return 1;
651 return 0;
652}
e6a25dc9 653
654/* Return true if insn1 and insn2 can never depend on one another because
655 the conditions under which they are executed are mutually exclusive. */
656bool
355af201 657sched_insns_conditions_mutex_p (const rtx_insn *insn1, const rtx_insn *insn2)
e6a25dc9 658{
659 rtx cond1, cond2;
da7f8ea0 660 bool rev1 = false, rev2 = false;
e6a25dc9 661
3072d30e 662 /* df doesn't handle conditional lifetimes entirely correctly;
e6a25dc9 663 calls mess up the conditional lifetimes. */
664 if (!CALL_P (insn1) && !CALL_P (insn2))
665 {
e1ab7874 666 cond1 = sched_get_condition_with_rev (insn1, &rev1);
667 cond2 = sched_get_condition_with_rev (insn2, &rev2);
e6a25dc9 668 if (cond1 && cond2
e1ab7874 669 && conditions_mutex_p (cond1, cond2, rev1, rev2)
e6a25dc9 670 /* Make sure first instruction doesn't affect condition of second
671 instruction if switched. */
672 && !modified_in_p (cond1, insn2)
673 /* Make sure second instruction doesn't affect condition of first
674 instruction if switched. */
675 && !modified_in_p (cond2, insn1))
676 return true;
677 }
678 return false;
679}
4164e76b 680\f
4d64d9a4 681
86265ed0 682/* Return true if INSN can potentially be speculated with type DS. */
683bool
355af201 684sched_insn_is_legitimate_for_speculation_p (const rtx_insn *insn, ds_t ds)
86265ed0 685{
686 if (HAS_INTERNAL_DEP (insn))
687 return false;
688
689 if (!NONJUMP_INSN_P (insn))
690 return false;
691
692 if (SCHED_GROUP_P (insn))
693 return false;
694
355af201 695 if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX_INSN (insn)))
86265ed0 696 return false;
697
698 if (side_effects_p (PATTERN (insn)))
699 return false;
700
701 if (ds & BE_IN_SPEC)
702 /* The following instructions, which depend on a speculatively scheduled
703 instruction, cannot be speculatively scheduled along. */
704 {
a67e70de 705 if (may_trap_or_fault_p (PATTERN (insn)))
706 /* If instruction might fault, it cannot be speculatively scheduled.
86265ed0 707 For control speculation it's obvious why and for data speculation
708 it's because the insn might get wrong input if speculation
709 wasn't successful. */
710 return false;
711
712 if ((ds & BE_IN_DATA)
e1ab7874 713 && sched_has_condition_p (insn))
86265ed0 714 /* If this is a predicated instruction, then it cannot be
715 speculatively scheduled. See PR35659. */
716 return false;
717 }
718
719 return true;
720}
721
93f6b030 722/* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
723 initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
724 and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
725 This function is used to switch sd_iterator to the next list.
726 !!! For internal use only. Might consider moving it to sched-int.h. */
727void
5493cb9a 728sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
93f6b030 729 deps_list_t *list_ptr, bool *resolved_p_ptr)
730{
731 sd_list_types_def types = *types_ptr;
732
733 if (types & SD_LIST_HARD_BACK)
734 {
735 *list_ptr = INSN_HARD_BACK_DEPS (insn);
736 *resolved_p_ptr = false;
737 *types_ptr = types & ~SD_LIST_HARD_BACK;
738 }
739 else if (types & SD_LIST_SPEC_BACK)
740 {
741 *list_ptr = INSN_SPEC_BACK_DEPS (insn);
742 *resolved_p_ptr = false;
743 *types_ptr = types & ~SD_LIST_SPEC_BACK;
744 }
745 else if (types & SD_LIST_FORW)
746 {
747 *list_ptr = INSN_FORW_DEPS (insn);
748 *resolved_p_ptr = false;
749 *types_ptr = types & ~SD_LIST_FORW;
750 }
751 else if (types & SD_LIST_RES_BACK)
752 {
753 *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
754 *resolved_p_ptr = true;
755 *types_ptr = types & ~SD_LIST_RES_BACK;
756 }
757 else if (types & SD_LIST_RES_FORW)
758 {
759 *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
760 *resolved_p_ptr = true;
761 *types_ptr = types & ~SD_LIST_RES_FORW;
762 }
763 else
764 {
765 *list_ptr = NULL;
766 *resolved_p_ptr = false;
767 *types_ptr = SD_LIST_NONE;
768 }
769}
770
771/* Return the summary size of INSN's lists defined by LIST_TYPES. */
772int
5493cb9a 773sd_lists_size (const_rtx insn, sd_list_types_def list_types)
93f6b030 774{
775 int size = 0;
776
777 while (list_types != SD_LIST_NONE)
778 {
779 deps_list_t list;
780 bool resolved_p;
781
782 sd_next_list (insn, &list_types, &list, &resolved_p);
9845d120 783 if (list)
784 size += DEPS_LIST_N_LINKS (list);
93f6b030 785 }
786
787 return size;
788}
789
790/* Return true if INSN's lists defined by LIST_TYPES are all empty. */
c971b445 791
93f6b030 792bool
5493cb9a 793sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
93f6b030 794{
c971b445 795 while (list_types != SD_LIST_NONE)
796 {
797 deps_list_t list;
798 bool resolved_p;
799
800 sd_next_list (insn, &list_types, &list, &resolved_p);
801 if (!deps_list_empty_p (list))
802 return false;
803 }
804
805 return true;
93f6b030 806}
807
808/* Initialize data for INSN. */
809void
a1db0f0f 810sd_init_insn (rtx_insn *insn)
93f6b030 811{
812 INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
813 INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
814 INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
815 INSN_FORW_DEPS (insn) = create_deps_list ();
816 INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
817
818 /* ??? It would be nice to allocate dependency caches here. */
819}
820
821/* Free data for INSN. */
822void
a1db0f0f 823sd_finish_insn (rtx_insn *insn)
93f6b030 824{
825 /* ??? It would be nice to deallocate dependency caches here. */
826
827 free_deps_list (INSN_HARD_BACK_DEPS (insn));
828 INSN_HARD_BACK_DEPS (insn) = NULL;
829
830 free_deps_list (INSN_SPEC_BACK_DEPS (insn));
831 INSN_SPEC_BACK_DEPS (insn) = NULL;
832
833 free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
834 INSN_RESOLVED_BACK_DEPS (insn) = NULL;
835
836 free_deps_list (INSN_FORW_DEPS (insn));
837 INSN_FORW_DEPS (insn) = NULL;
838
839 free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
840 INSN_RESOLVED_FORW_DEPS (insn) = NULL;
841}
842
843/* Find a dependency between producer PRO and consumer CON.
844 Search through resolved dependency lists if RESOLVED_P is true.
845 If no such dependency is found return NULL,
becfaa62 846 otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
93f6b030 847 with an iterator pointing to it. */
848static dep_t
849sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
850 sd_iterator_def *sd_it_ptr)
851{
852 sd_list_types_def pro_list_type;
853 sd_list_types_def con_list_type;
854 sd_iterator_def sd_it;
855 dep_t dep;
856 bool found_p = false;
857
858 if (resolved_p)
859 {
860 pro_list_type = SD_LIST_RES_FORW;
861 con_list_type = SD_LIST_RES_BACK;
862 }
863 else
864 {
865 pro_list_type = SD_LIST_FORW;
866 con_list_type = SD_LIST_BACK;
867 }
868
869 /* Walk through either back list of INSN or forw list of ELEM
870 depending on which one is shorter. */
871 if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
872 {
873 /* Find the dep_link with producer PRO in consumer's back_deps. */
874 FOR_EACH_DEP (con, con_list_type, sd_it, dep)
875 if (DEP_PRO (dep) == pro)
876 {
877 found_p = true;
878 break;
879 }
880 }
881 else
882 {
883 /* Find the dep_link with consumer CON in producer's forw_deps. */
884 FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
885 if (DEP_CON (dep) == con)
886 {
887 found_p = true;
888 break;
889 }
890 }
891
892 if (found_p)
893 {
894 if (sd_it_ptr != NULL)
895 *sd_it_ptr = sd_it;
896
897 return dep;
898 }
899
900 return NULL;
901}
902
903/* Find a dependency between producer PRO and consumer CON.
904 Use dependency [if available] to check if dependency is present at all.
905 Search through resolved dependency lists if RESOLVED_P is true.
906 If the dependency or NULL if none found. */
907dep_t
908sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
909{
910 if (true_dependency_cache != NULL)
911 /* Avoiding the list walk below can cut compile times dramatically
912 for some code. */
913 {
914 int elem_luid = INSN_LUID (pro);
915 int insn_luid = INSN_LUID (con);
916
93f6b030 917 if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
918 && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
effd1640 919 && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)
920 && !bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
93f6b030 921 return NULL;
922 }
923
924 return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
925}
926
927/* Add or update a dependence described by DEP.
928 MEM1 and MEM2, if non-null, correspond to memory locations in case of
929 data speculation.
930
931 The function returns a value indicating if an old entry has been changed
932 or a new entry has been added to insn's backward deps.
933
934 This function merely checks if producer and consumer is the same insn
935 and doesn't create a dep in this case. Actual manipulation of
936 dependence data structures is performed in add_or_update_dep_1. */
4d64d9a4 937static enum DEPS_ADJUST_RESULT
93f6b030 938maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
6adce0fb 939{
9d8bf733 940 rtx_insn *elem = DEP_PRO (dep);
941 rtx_insn *insn = DEP_CON (dep);
93f6b030 942
4d64d9a4 943 gcc_assert (INSN_P (insn) && INSN_P (elem));
6adce0fb 944
945 /* Don't depend an insn on itself. */
946 if (insn == elem)
4d64d9a4 947 {
e1ab7874 948 if (sched_deps_info->generate_spec_deps)
4d64d9a4 949 /* INSN has an internal dependence, which we can't overcome. */
950 HAS_INTERNAL_DEP (insn) = 1;
93f6b030 951
952 return DEP_NODEP;
4d64d9a4 953 }
6adce0fb 954
93f6b030 955 return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
4d64d9a4 956}
6adce0fb 957
93f6b030 958/* Ask dependency caches what needs to be done for dependence DEP.
959 Return DEP_CREATED if new dependence should be created and there is no
960 need to try to find one searching the dependencies lists.
961 Return DEP_PRESENT if there already is a dependence described by DEP and
962 hence nothing is to be done.
963 Return DEP_CHANGED if there already is a dependence, but it should be
964 updated to incorporate additional information from DEP. */
4d64d9a4 965static enum DEPS_ADJUST_RESULT
93f6b030 966ask_dependency_caches (dep_t dep)
4d64d9a4 967{
93f6b030 968 int elem_luid = INSN_LUID (DEP_PRO (dep));
969 int insn_luid = INSN_LUID (DEP_CON (dep));
4d64d9a4 970
93f6b030 971 gcc_assert (true_dependency_cache != NULL
972 && output_dependency_cache != NULL
effd1640 973 && anti_dependency_cache != NULL
974 && control_dependency_cache != NULL);
4d64d9a4 975
93f6b030 976 if (!(current_sched_info->flags & USE_DEPS_LIST))
48e1416a 977 {
93f6b030 978 enum reg_note present_dep_type;
979
980 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
981 present_dep_type = REG_DEP_TRUE;
982 else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
983 present_dep_type = REG_DEP_OUTPUT;
984 else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
985 present_dep_type = REG_DEP_ANTI;
effd1640 986 else if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
987 present_dep_type = REG_DEP_CONTROL;
93f6b030 988 else
989 /* There is no existing dep so it should be created. */
990 return DEP_CREATED;
991
992 if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
993 /* DEP does not add anything to the existing dependence. */
994 return DEP_PRESENT;
995 }
996 else
48e1416a 997 {
93f6b030 998 ds_t present_dep_types = 0;
48e1416a 999
93f6b030 1000 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
1001 present_dep_types |= DEP_TRUE;
1002 if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
1003 present_dep_types |= DEP_OUTPUT;
1004 if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
1005 present_dep_types |= DEP_ANTI;
effd1640 1006 if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
1007 present_dep_types |= DEP_CONTROL;
93f6b030 1008
1009 if (present_dep_types == 0)
1010 /* There is no existing dep so it should be created. */
1011 return DEP_CREATED;
1012
1013 if (!(current_sched_info->flags & DO_SPECULATION)
1014 || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
1015 {
1016 if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
1017 == present_dep_types)
1018 /* DEP does not add anything to the existing dependence. */
1019 return DEP_PRESENT;
1020 }
1021 else
1022 {
1023 /* Only true dependencies can be data speculative and
1024 only anti dependencies can be control speculative. */
1025 gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
1026 == present_dep_types);
1027
1028 /* if (DEP is SPECULATIVE) then
1029 ..we should update DEP_STATUS
1030 else
1031 ..we should reset existing dep to non-speculative. */
1032 }
1033 }
1034
1035 return DEP_CHANGED;
1036}
1037
1038/* Set dependency caches according to DEP. */
1039static void
1040set_dependency_caches (dep_t dep)
1041{
1042 int elem_luid = INSN_LUID (DEP_PRO (dep));
1043 int insn_luid = INSN_LUID (DEP_CON (dep));
1044
1045 if (!(current_sched_info->flags & USE_DEPS_LIST))
1046 {
1047 switch (DEP_TYPE (dep))
1048 {
1049 case REG_DEP_TRUE:
1050 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1051 break;
1052
1053 case REG_DEP_OUTPUT:
1054 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1055 break;
1056
1057 case REG_DEP_ANTI:
1058 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1059 break;
1060
effd1640 1061 case REG_DEP_CONTROL:
1062 bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1063 break;
1064
93f6b030 1065 default:
1066 gcc_unreachable ();
1067 }
1068 }
1069 else
1070 {
1071 ds_t ds = DEP_STATUS (dep);
1072
1073 if (ds & DEP_TRUE)
1074 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1075 if (ds & DEP_OUTPUT)
1076 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1077 if (ds & DEP_ANTI)
1078 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
effd1640 1079 if (ds & DEP_CONTROL)
1080 bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
93f6b030 1081
1082 if (ds & SPECULATIVE)
1083 {
1084 gcc_assert (current_sched_info->flags & DO_SPECULATION);
1085 bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
1086 }
1087 }
1088}
1089
1090/* Type of dependence DEP have changed from OLD_TYPE. Update dependency
1091 caches accordingly. */
1092static void
1093update_dependency_caches (dep_t dep, enum reg_note old_type)
1094{
1095 int elem_luid = INSN_LUID (DEP_PRO (dep));
1096 int insn_luid = INSN_LUID (DEP_CON (dep));
1097
1098 /* Clear corresponding cache entry because type of the link
1099 may have changed. Keep them if we use_deps_list. */
1100 if (!(current_sched_info->flags & USE_DEPS_LIST))
1101 {
1102 switch (old_type)
1103 {
1104 case REG_DEP_OUTPUT:
1105 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1106 break;
1107
1108 case REG_DEP_ANTI:
1109 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1110 break;
1111
effd1640 1112 case REG_DEP_CONTROL:
1113 bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1114 break;
1115
93f6b030 1116 default:
48e1416a 1117 gcc_unreachable ();
93f6b030 1118 }
1119 }
1120
1121 set_dependency_caches (dep);
1122}
1123
1124/* Convert a dependence pointed to by SD_IT to be non-speculative. */
1125static void
1126change_spec_dep_to_hard (sd_iterator_def sd_it)
1127{
1128 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1129 dep_link_t link = DEP_NODE_BACK (node);
1130 dep_t dep = DEP_NODE_DEP (node);
9d8bf733 1131 rtx_insn *elem = DEP_PRO (dep);
1132 rtx_insn *insn = DEP_CON (dep);
93f6b030 1133
1134 move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1135
1136 DEP_STATUS (dep) &= ~SPECULATIVE;
6adce0fb 1137
6adce0fb 1138 if (true_dependency_cache != NULL)
93f6b030 1139 /* Clear the cache entry. */
1140 bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1141 INSN_LUID (elem));
1142}
93f6b030 1143
1144/* Update DEP to incorporate information from NEW_DEP.
1145 SD_IT points to DEP in case it should be moved to another list.
1146 MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1147 data-speculative dependence should be updated. */
1148static enum DEPS_ADJUST_RESULT
1149update_dep (dep_t dep, dep_t new_dep,
c407d287 1150 sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1151 rtx mem1 ATTRIBUTE_UNUSED,
1152 rtx mem2 ATTRIBUTE_UNUSED)
93f6b030 1153{
1154 enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1155 enum reg_note old_type = DEP_TYPE (dep);
2261c559 1156 bool was_spec = dep_spec_p (dep);
93f6b030 1157
d452a169 1158 DEP_NONREG (dep) |= DEP_NONREG (new_dep);
1159 DEP_MULTIPLE (dep) = 1;
1160
93f6b030 1161 /* If this is a more restrictive type of dependence than the
1162 existing one, then change the existing dependence to this
1163 type. */
1164 if ((int) DEP_TYPE (new_dep) < (int) old_type)
6adce0fb 1165 {
93f6b030 1166 DEP_TYPE (dep) = DEP_TYPE (new_dep);
1167 res = DEP_CHANGED;
1168 }
1169
93f6b030 1170 if (current_sched_info->flags & USE_DEPS_LIST)
1171 /* Update DEP_STATUS. */
1172 {
1173 ds_t dep_status = DEP_STATUS (dep);
1174 ds_t ds = DEP_STATUS (new_dep);
1175 ds_t new_status = ds | dep_status;
1176
1177 if (new_status & SPECULATIVE)
93f6b030 1178 {
2261c559 1179 /* Either existing dep or a dep we're adding or both are
1180 speculative. */
93f6b030 1181 if (!(ds & SPECULATIVE)
1182 || !(dep_status & SPECULATIVE))
1183 /* The new dep can't be speculative. */
2261c559 1184 new_status &= ~SPECULATIVE;
93f6b030 1185 else
4d64d9a4 1186 {
93f6b030 1187 /* Both are speculative. Merge probabilities. */
1188 if (mem1 != NULL)
4d64d9a4 1189 {
93f6b030 1190 dw_t dw;
1191
1192 dw = estimate_dep_weak (mem1, mem2);
1193 ds = set_dep_weak (ds, BEGIN_DATA, dw);
4d64d9a4 1194 }
48e1416a 1195
93f6b030 1196 new_status = ds_merge (dep_status, ds);
4d64d9a4 1197 }
93f6b030 1198 }
1199
1200 ds = new_status;
1201
1202 if (dep_status != ds)
1203 {
1204 DEP_STATUS (dep) = ds;
1205 res = DEP_CHANGED;
1206 }
1207 }
1208
2261c559 1209 if (was_spec && !dep_spec_p (dep))
1210 /* The old dep was speculative, but now it isn't. */
1211 change_spec_dep_to_hard (sd_it);
1212
93f6b030 1213 if (true_dependency_cache != NULL
1214 && res == DEP_CHANGED)
1215 update_dependency_caches (dep, old_type);
93f6b030 1216
1217 return res;
1218}
1219
1220/* Add or update a dependence described by DEP.
1221 MEM1 and MEM2, if non-null, correspond to memory locations in case of
1222 data speculation.
1223
1224 The function returns a value indicating if an old entry has been changed
1225 or a new entry has been added to insn's backward deps or nothing has
1226 been updated at all. */
1227static enum DEPS_ADJUST_RESULT
1228add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1229 rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1230{
1231 bool maybe_present_p = true;
1232 bool present_p = false;
1233
1234 gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1235 && DEP_PRO (new_dep) != DEP_CON (new_dep));
48e1416a 1236
93f6b030 1237#ifdef ENABLE_CHECKING
1238 check_dep (new_dep, mem1 != NULL);
1239#endif
1240
1241 if (true_dependency_cache != NULL)
1242 {
1243 switch (ask_dependency_caches (new_dep))
1244 {
1245 case DEP_PRESENT:
c07ddae4 1246 dep_t present_dep;
1247 sd_iterator_def sd_it;
1248
1249 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1250 DEP_CON (new_dep),
1251 resolved_p, &sd_it);
1252 DEP_MULTIPLE (present_dep) = 1;
93f6b030 1253 return DEP_PRESENT;
1254
1255 case DEP_CHANGED:
1256 maybe_present_p = true;
1257 present_p = true;
1258 break;
1259
1260 case DEP_CREATED:
1261 maybe_present_p = false;
1262 present_p = false;
1263 break;
1264
1265 default:
1266 gcc_unreachable ();
1267 break;
1268 }
6adce0fb 1269 }
6adce0fb 1270
1271 /* Check that we don't already have this dependence. */
4d64d9a4 1272 if (maybe_present_p)
1273 {
93f6b030 1274 dep_t present_dep;
1275 sd_iterator_def sd_it;
4d64d9a4 1276
93f6b030 1277 gcc_assert (true_dependency_cache == NULL || present_p);
60b8c5b3 1278
93f6b030 1279 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1280 DEP_CON (new_dep),
1281 resolved_p, &sd_it);
40734805 1282
93f6b030 1283 if (present_dep != NULL)
1284 /* We found an existing dependency between ELEM and INSN. */
1285 return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1286 else
1287 /* We didn't find a dep, it shouldn't present in the cache. */
1288 gcc_assert (!present_p);
4d64d9a4 1289 }
6adce0fb 1290
4d64d9a4 1291 /* Might want to check one level of transitivity to save conses.
93f6b030 1292 This check should be done in maybe_add_or_update_dep_1.
1293 Since we made it to add_or_update_dep_1, we must create
4d64d9a4 1294 (or update) a link. */
6adce0fb 1295
93f6b030 1296 if (mem1 != NULL_RTX)
4d64d9a4 1297 {
e1ab7874 1298 gcc_assert (sched_deps_info->generate_spec_deps);
93f6b030 1299 DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1300 estimate_dep_weak (mem1, mem2));
4d64d9a4 1301 }
93f6b030 1302
1303 sd_add_dep (new_dep, resolved_p);
48e1416a 1304
4d64d9a4 1305 return DEP_CREATED;
1306}
6adce0fb 1307
93f6b030 1308/* Initialize BACK_LIST_PTR with consumer's backward list and
1309 FORW_LIST_PTR with producer's forward list. If RESOLVED_P is true
1310 initialize with lists that hold resolved deps. */
4d64d9a4 1311static void
93f6b030 1312get_back_and_forw_lists (dep_t dep, bool resolved_p,
1313 deps_list_t *back_list_ptr,
1314 deps_list_t *forw_list_ptr)
4d64d9a4 1315{
9d8bf733 1316 rtx_insn *con = DEP_CON (dep);
9997bd27 1317
93f6b030 1318 if (!resolved_p)
1319 {
2261c559 1320 if (dep_spec_p (dep))
93f6b030 1321 *back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1322 else
1323 *back_list_ptr = INSN_HARD_BACK_DEPS (con);
4d64d9a4 1324
93f6b030 1325 *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1326 }
4d64d9a4 1327 else
93f6b030 1328 {
1329 *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1330 *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1331 }
1332}
1333
1334/* Add dependence described by DEP.
1335 If RESOLVED_P is true treat the dependence as a resolved one. */
1336void
1337sd_add_dep (dep_t dep, bool resolved_p)
1338{
1339 dep_node_t n = create_dep_node ();
1340 deps_list_t con_back_deps;
1341 deps_list_t pro_forw_deps;
9d8bf733 1342 rtx_insn *elem = DEP_PRO (dep);
1343 rtx_insn *insn = DEP_CON (dep);
93f6b030 1344
1345 gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1346
2261c559 1347 if ((current_sched_info->flags & DO_SPECULATION) == 0
1348 || !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
93f6b030 1349 DEP_STATUS (dep) &= ~SPECULATIVE;
1350
1351 copy_dep (DEP_NODE_DEP (n), dep);
1352
1353 get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
9997bd27 1354
93f6b030 1355 add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
9997bd27 1356
4d64d9a4 1357#ifdef ENABLE_CHECKING
93f6b030 1358 check_dep (dep, false);
4d64d9a4 1359#endif
1360
93f6b030 1361 add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1362
6adce0fb 1363 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1be87b72 1364 in the bitmap caches of dependency information. */
6adce0fb 1365 if (true_dependency_cache != NULL)
93f6b030 1366 set_dependency_caches (dep);
93f6b030 1367}
1368
1369/* Add or update backward dependence between INSN and ELEM
1370 with given type DEP_TYPE and dep_status DS.
1371 This function is a convenience wrapper. */
1372enum DEPS_ADJUST_RESULT
1373sd_add_or_update_dep (dep_t dep, bool resolved_p)
1374{
1375 return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1376}
1377
1378/* Resolved dependence pointed to by SD_IT.
1379 SD_IT will advance to the next element. */
1380void
1381sd_resolve_dep (sd_iterator_def sd_it)
1382{
1383 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1384 dep_t dep = DEP_NODE_DEP (node);
9d8bf733 1385 rtx_insn *pro = DEP_PRO (dep);
1386 rtx_insn *con = DEP_CON (dep);
93f6b030 1387
2261c559 1388 if (dep_spec_p (dep))
93f6b030 1389 move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1390 INSN_RESOLVED_BACK_DEPS (con));
1391 else
1392 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1393 INSN_RESOLVED_BACK_DEPS (con));
1394
1395 move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1396 INSN_RESOLVED_FORW_DEPS (pro));
1397}
1398
e2f4a6ff 1399/* Perform the inverse operation of sd_resolve_dep. Restore the dependence
1400 pointed to by SD_IT to unresolved state. */
1401void
1402sd_unresolve_dep (sd_iterator_def sd_it)
1403{
1404 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1405 dep_t dep = DEP_NODE_DEP (node);
9d8bf733 1406 rtx_insn *pro = DEP_PRO (dep);
1407 rtx_insn *con = DEP_CON (dep);
e2f4a6ff 1408
effd1640 1409 if (dep_spec_p (dep))
e2f4a6ff 1410 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1411 INSN_SPEC_BACK_DEPS (con));
1412 else
1413 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1414 INSN_HARD_BACK_DEPS (con));
1415
1416 move_dep_link (DEP_NODE_FORW (node), INSN_RESOLVED_FORW_DEPS (pro),
1417 INSN_FORW_DEPS (pro));
1418}
1419
93f6b030 1420/* Make TO depend on all the FROM's producers.
1421 If RESOLVED_P is true add dependencies to the resolved lists. */
1422void
fc5ad70a 1423sd_copy_back_deps (rtx_insn *to, rtx_insn *from, bool resolved_p)
93f6b030 1424{
1425 sd_list_types_def list_type;
1426 sd_iterator_def sd_it;
1427 dep_t dep;
1428
1429 list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1430
1431 FOR_EACH_DEP (from, list_type, sd_it, dep)
6adce0fb 1432 {
93f6b030 1433 dep_def _new_dep, *new_dep = &_new_dep;
1434
1435 copy_dep (new_dep, dep);
fc5ad70a 1436 DEP_CON (new_dep) = to;
93f6b030 1437 sd_add_dep (new_dep, resolved_p);
6adce0fb 1438 }
93f6b030 1439}
1440
1441/* Remove a dependency referred to by SD_IT.
1442 SD_IT will point to the next dependence after removal. */
1443void
1444sd_delete_dep (sd_iterator_def sd_it)
1445{
1446 dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1447 dep_t dep = DEP_NODE_DEP (n);
9d8bf733 1448 rtx_insn *pro = DEP_PRO (dep);
1449 rtx_insn *con = DEP_CON (dep);
93f6b030 1450 deps_list_t con_back_deps;
1451 deps_list_t pro_forw_deps;
1452
1453 if (true_dependency_cache != NULL)
1454 {
1455 int elem_luid = INSN_LUID (pro);
1456 int insn_luid = INSN_LUID (con);
1457
1458 bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1459 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
effd1640 1460 bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
93f6b030 1461 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1462
1463 if (current_sched_info->flags & DO_SPECULATION)
1464 bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1465 }
1466
1467 get_back_and_forw_lists (dep, sd_it.resolved_p,
1468 &con_back_deps, &pro_forw_deps);
1469
1470 remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1471 remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1472
1473 delete_dep_node (n);
1474}
1475
1476/* Dump size of the lists. */
1477#define DUMP_LISTS_SIZE (2)
1478
1479/* Dump dependencies of the lists. */
1480#define DUMP_LISTS_DEPS (4)
1481
1482/* Dump all information about the lists. */
1483#define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1484
1485/* Dump deps_lists of INSN specified by TYPES to DUMP.
1486 FLAGS is a bit mask specifying what information about the lists needs
1487 to be printed.
1488 If FLAGS has the very first bit set, then dump all information about
1489 the lists and propagate this bit into the callee dump functions. */
1490static void
1491dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1492{
1493 sd_iterator_def sd_it;
1494 dep_t dep;
1495 int all;
1496
1497 all = (flags & 1);
1498
1499 if (all)
1500 flags |= DUMP_LISTS_ALL;
1501
1502 fprintf (dump, "[");
1503
1504 if (flags & DUMP_LISTS_SIZE)
1505 fprintf (dump, "%d; ", sd_lists_size (insn, types));
1506
1507 if (flags & DUMP_LISTS_DEPS)
1508 {
1509 FOR_EACH_DEP (insn, types, sd_it, dep)
1510 {
1511 dump_dep (dump, dep, dump_dep_flags | all);
1512 fprintf (dump, " ");
1513 }
1514 }
1515}
1516
1517/* Dump all information about deps_lists of INSN specified by TYPES
1518 to STDERR. */
1519void
1520sd_debug_lists (rtx insn, sd_list_types_def types)
1521{
1522 dump_lists (stderr, insn, types, 1);
1523 fprintf (stderr, "\n");
6adce0fb 1524}
1525
effd1640 1526/* A wrapper around add_dependence_1, to add a dependence of CON on
1527 PRO, with type DEP_TYPE. This function implements special handling
1528 for REG_DEP_CONTROL dependencies. For these, we optionally promote
1529 the type to REG_DEP_ANTI if we can determine that predication is
1530 impossible; otherwise we add additional true dependencies on the
1531 INSN_COND_DEPS list of the jump (which PRO must be). */
1532void
b24ef467 1533add_dependence (rtx_insn *con, rtx_insn *pro, enum reg_note dep_type)
effd1640 1534{
754e815f 1535 if (dep_type == REG_DEP_CONTROL
1536 && !(current_sched_info->flags & DO_PREDICATION))
1537 dep_type = REG_DEP_ANTI;
1538
effd1640 1539 /* A REG_DEP_CONTROL dependence may be eliminated through predication,
1540 so we must also make the insn dependent on the setter of the
1541 condition. */
1542 if (dep_type == REG_DEP_CONTROL)
1543 {
73e15687 1544 rtx_insn *real_pro = pro;
1545 rtx_insn *other = real_insn_for_shadow (real_pro);
effd1640 1546 rtx cond;
1547
1548 if (other != NULL_RTX)
1549 real_pro = other;
1550 cond = sched_get_reverse_condition_uncached (real_pro);
1551 /* Verify that the insn does not use a different value in
1552 the condition register than the one that was present at
1553 the jump. */
1554 if (cond == NULL_RTX)
1555 dep_type = REG_DEP_ANTI;
1556 else if (INSN_CACHED_COND (real_pro) == const_true_rtx)
1557 {
1558 HARD_REG_SET uses;
1559 CLEAR_HARD_REG_SET (uses);
1560 note_uses (&PATTERN (con), record_hard_reg_uses, &uses);
1561 if (TEST_HARD_REG_BIT (uses, REGNO (XEXP (cond, 0))))
1562 dep_type = REG_DEP_ANTI;
1563 }
1564 if (dep_type == REG_DEP_CONTROL)
1565 {
1566 if (sched_verbose >= 5)
1567 fprintf (sched_dump, "making DEP_CONTROL for %d\n",
1568 INSN_UID (real_pro));
1569 add_dependence_list (con, INSN_COND_DEPS (real_pro), 0,
d452a169 1570 REG_DEP_TRUE, false);
effd1640 1571 }
1572 }
1573
1574 add_dependence_1 (con, pro, dep_type);
1575}
1576
d452a169 1577/* A convenience wrapper to operate on an entire list. HARD should be
1578 true if DEP_NONREG should be set on newly created dependencies. */
5deaeb50 1579
1580static void
54267fdf 1581add_dependence_list (rtx_insn *insn, rtx_insn_list *list, int uncond,
1582 enum reg_note dep_type, bool hard)
5deaeb50 1583{
d452a169 1584 mark_as_hard = hard;
54267fdf 1585 for (; list; list = list->next ())
e6a25dc9 1586 {
54267fdf 1587 if (uncond || ! sched_insns_conditions_mutex_p (insn, list->insn ()))
1588 add_dependence (insn, list->insn (), dep_type);
e6a25dc9 1589 }
d452a169 1590 mark_as_hard = false;
5deaeb50 1591}
1592
48e1416a 1593/* Similar, but free *LISTP at the same time, when the context
d452a169 1594 is not readonly. HARD should be true if DEP_NONREG should be set on
1595 newly created dependencies. */
5deaeb50 1596
1597static void
54267fdf 1598add_dependence_list_and_free (struct deps_desc *deps, rtx_insn *insn,
1599 rtx_insn_list **listp,
d452a169 1600 int uncond, enum reg_note dep_type, bool hard)
5deaeb50 1601{
d452a169 1602 add_dependence_list (insn, *listp, uncond, dep_type, hard);
e1ab7874 1603
3e0431fe 1604 /* We don't want to short-circuit dependencies involving debug
1605 insns, because they may cause actual dependencies to be
1606 disregarded. */
1607 if (deps->readonly || DEBUG_INSN_P (insn))
a7a503a5 1608 return;
e1ab7874 1609
a7a503a5 1610 free_INSN_LIST_list (listp);
5deaeb50 1611}
1612
9d75589a 1613/* Remove all occurrences of INSN from LIST. Return the number of
1614 occurrences removed. */
e1ab7874 1615
1616static int
71ce7f59 1617remove_from_dependence_list (rtx_insn *insn, rtx_insn_list **listp)
e1ab7874 1618{
1619 int removed = 0;
48e1416a 1620
e1ab7874 1621 while (*listp)
1622 {
54267fdf 1623 if ((*listp)->insn () == insn)
e1ab7874 1624 {
1625 remove_free_INSN_LIST_node (listp);
1626 removed++;
1627 continue;
1628 }
48e1416a 1629
54267fdf 1630 listp = (rtx_insn_list **)&XEXP (*listp, 1);
e1ab7874 1631 }
48e1416a 1632
e1ab7874 1633 return removed;
1634}
1635
1636/* Same as above, but process two lists at once. */
48e1416a 1637static int
71ce7f59 1638remove_from_both_dependence_lists (rtx_insn *insn,
54267fdf 1639 rtx_insn_list **listp,
8e56831f 1640 rtx_expr_list **exprp)
e1ab7874 1641{
1642 int removed = 0;
48e1416a 1643
e1ab7874 1644 while (*listp)
1645 {
1646 if (XEXP (*listp, 0) == insn)
1647 {
1648 remove_free_INSN_LIST_node (listp);
1649 remove_free_EXPR_LIST_node (exprp);
1650 removed++;
1651 continue;
1652 }
48e1416a 1653
54267fdf 1654 listp = (rtx_insn_list **)&XEXP (*listp, 1);
8e56831f 1655 exprp = (rtx_expr_list **)&XEXP (*exprp, 1);
e1ab7874 1656 }
48e1416a 1657
e1ab7874 1658 return removed;
1659}
1660
828f2394 1661/* Clear all dependencies for an insn. */
6adce0fb 1662static void
71ce7f59 1663delete_all_dependences (rtx_insn *insn)
6adce0fb 1664{
93f6b030 1665 sd_iterator_def sd_it;
1666 dep_t dep;
6adce0fb 1667
93f6b030 1668 /* The below cycle can be optimized to clear the caches and back_deps
1669 in one call but that would provoke duplication of code from
1670 delete_dep (). */
6adce0fb 1671
93f6b030 1672 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1673 sd_iterator_cond (&sd_it, &dep);)
1674 sd_delete_dep (sd_it);
828f2394 1675}
1676
1677/* All insns in a scheduling group except the first should only have
1678 dependencies on the previous insn in the group. So we find the
1679 first instruction in the scheduling group by walking the dependence
1680 chains backwards. Then we add the dependencies for the group to
1681 the previous nonnote insn. */
1682
1683static void
b24ef467 1684chain_to_prev_insn (rtx_insn *insn)
828f2394 1685{
93f6b030 1686 sd_iterator_def sd_it;
1687 dep_t dep;
b24ef467 1688 rtx_insn *prev_nonnote;
828f2394 1689
93f6b030 1690 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
828f2394 1691 {
b24ef467 1692 rtx_insn *i = insn;
9d8bf733 1693 rtx_insn *pro = DEP_PRO (dep);
9997bd27 1694
828f2394 1695 do
1696 {
1697 i = prev_nonnote_insn (i);
1698
9997bd27 1699 if (pro == i)
828f2394 1700 goto next_link;
9845d120 1701 } while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
9997bd27 1702
1703 if (! sched_insns_conditions_mutex_p (i, pro))
93f6b030 1704 add_dependence (i, pro, DEP_TYPE (dep));
828f2394 1705 next_link:;
1706 }
1707
1708 delete_all_dependences (insn);
1709
5b8537a8 1710 prev_nonnote = prev_nonnote_nondebug_insn (insn);
e6a25dc9 1711 if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1712 && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1713 add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
6adce0fb 1714}
1715\f
1716/* Process an insn's memory dependencies. There are four kinds of
1717 dependencies:
1718
1719 (0) read dependence: read follows read
1720 (1) true dependence: read follows write
4d64d9a4 1721 (2) output dependence: write follows write
1722 (3) anti dependence: write follows read
6adce0fb 1723
1724 We are careful to build only dependencies which actually exist, and
1725 use transitivity to avoid building too many links. */
1726
1727/* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1728 The MEM is a memory reference contained within INSN, which we are saving
1729 so that we can do memory aliasing on it. */
1730
2749f808 1731static void
68e419a1 1732add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
e722456b 1733 rtx_insn *insn, rtx mem)
6adce0fb 1734{
54267fdf 1735 rtx_insn_list **insn_list;
1736 rtx_insn_list *insn_node;
8e56831f 1737 rtx_expr_list **mem_list;
1738 rtx_expr_list *mem_node;
6adce0fb 1739
e1ab7874 1740 gcc_assert (!deps->readonly);
c5947ab7 1741 if (read_p)
1742 {
1743 insn_list = &deps->pending_read_insns;
1744 mem_list = &deps->pending_read_mems;
c971b445 1745 if (!DEBUG_INSN_P (insn))
1746 deps->pending_read_list_length++;
c5947ab7 1747 }
1748 else
1749 {
1750 insn_list = &deps->pending_write_insns;
1751 mem_list = &deps->pending_write_mems;
1752 deps->pending_write_list_length++;
1753 }
1754
54267fdf 1755 insn_node = alloc_INSN_LIST (insn, *insn_list);
1756 *insn_list = insn_node;
6adce0fb 1757
e1ab7874 1758 if (sched_deps_info->use_cselib)
503f2817 1759 {
1760 mem = shallow_copy_rtx (mem);
2af89801 1761 XEXP (mem, 0) = cselib_subst_to_values_from_insn (XEXP (mem, 0),
1762 GET_MODE (mem), insn);
503f2817 1763 }
54267fdf 1764 mem_node = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1765 *mem_list = mem_node;
6adce0fb 1766}
1767
1768/* Make a dependency between every memory reference on the pending lists
5deaeb50 1769 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
1770 dependencies for a read operation, similarly with FOR_WRITE. */
6adce0fb 1771
1772static void
b24ef467 1773flush_pending_lists (struct deps_desc *deps, rtx_insn *insn, int for_read,
60b8c5b3 1774 int for_write)
6adce0fb 1775{
5deaeb50 1776 if (for_write)
6adce0fb 1777 {
48e1416a 1778 add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
d452a169 1779 1, REG_DEP_ANTI, true);
e1ab7874 1780 if (!deps->readonly)
1781 {
1782 free_EXPR_LIST_list (&deps->pending_read_mems);
1783 deps->pending_read_list_length = 0;
1784 }
6adce0fb 1785 }
6adce0fb 1786
e1ab7874 1787 add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
d452a169 1788 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1789 true);
6adce0fb 1790
48e1416a 1791 add_dependence_list_and_free (deps, insn,
e1ab7874 1792 &deps->last_pending_memory_flush, 1,
d452a169 1793 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1794 true);
effd1640 1795
1796 add_dependence_list_and_free (deps, insn, &deps->pending_jump_insns, 1,
d452a169 1797 REG_DEP_ANTI, true);
effd1640 1798
a7a503a5 1799 if (DEBUG_INSN_P (insn))
1800 {
1801 if (for_write)
1802 free_INSN_LIST_list (&deps->pending_read_insns);
1803 free_INSN_LIST_list (&deps->pending_write_insns);
1804 free_INSN_LIST_list (&deps->last_pending_memory_flush);
1805 free_INSN_LIST_list (&deps->pending_jump_insns);
1806 }
1807
e1ab7874 1808 if (!deps->readonly)
1809 {
1810 free_EXPR_LIST_list (&deps->pending_write_mems);
1811 deps->pending_write_list_length = 0;
1812
1813 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1814 deps->pending_flush_length = 1;
1815 }
d452a169 1816 mark_as_hard = false;
e1ab7874 1817}
1818\f
1819/* Instruction which dependencies we are analyzing. */
b24ef467 1820static rtx_insn *cur_insn = NULL;
e1ab7874 1821
1822/* Implement hooks for haifa scheduler. */
1823
1824static void
2f3c9801 1825haifa_start_insn (rtx_insn *insn)
e1ab7874 1826{
1827 gcc_assert (insn && !cur_insn);
1828
2f3c9801 1829 cur_insn = insn;
e1ab7874 1830}
1831
1832static void
1833haifa_finish_insn (void)
1834{
1835 cur_insn = NULL;
1836}
1837
1838void
1839haifa_note_reg_set (int regno)
1840{
1841 SET_REGNO_REG_SET (reg_pending_sets, regno);
1842}
1843
1844void
1845haifa_note_reg_clobber (int regno)
1846{
1847 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1848}
1849
1850void
1851haifa_note_reg_use (int regno)
1852{
1853 SET_REGNO_REG_SET (reg_pending_uses, regno);
1854}
1855
1856static void
2f3c9801 1857haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx_insn *pending_insn, ds_t ds)
e1ab7874 1858{
1859 if (!(ds & SPECULATIVE))
1860 {
1861 mem = NULL_RTX;
1862 pending_mem = NULL_RTX;
1863 }
1864 else
1865 gcc_assert (ds & BEGIN_DATA);
1866
1867 {
1868 dep_def _dep, *dep = &_dep;
48e1416a 1869
a7dcf969 1870 init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
2261c559 1871 current_sched_info->flags & USE_DEPS_LIST ? ds : 0);
d452a169 1872 DEP_NONREG (dep) = 1;
e1ab7874 1873 maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1874 }
1875
1876}
1877
1878static void
2f3c9801 1879haifa_note_dep (rtx_insn *elem, ds_t ds)
e1ab7874 1880{
1881 dep_def _dep;
1882 dep_t dep = &_dep;
1883
1884 init_dep (dep, elem, cur_insn, ds_to_dt (ds));
d452a169 1885 if (mark_as_hard)
1886 DEP_NONREG (dep) = 1;
e1ab7874 1887 maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1888}
1889
1890static void
1891note_reg_use (int r)
1892{
1893 if (sched_deps_info->note_reg_use)
1894 sched_deps_info->note_reg_use (r);
1895}
1896
1897static void
1898note_reg_set (int r)
1899{
1900 if (sched_deps_info->note_reg_set)
1901 sched_deps_info->note_reg_set (r);
1902}
1903
1904static void
1905note_reg_clobber (int r)
1906{
1907 if (sched_deps_info->note_reg_clobber)
1908 sched_deps_info->note_reg_clobber (r);
1909}
1910
1911static void
2f3c9801 1912note_mem_dep (rtx m1, rtx m2, rtx_insn *e, ds_t ds)
e1ab7874 1913{
1914 if (sched_deps_info->note_mem_dep)
1915 sched_deps_info->note_mem_dep (m1, m2, e, ds);
1916}
1917
1918static void
b24ef467 1919note_dep (rtx_insn *e, ds_t ds)
e1ab7874 1920{
1921 if (sched_deps_info->note_dep)
1922 sched_deps_info->note_dep (e, ds);
1923}
1924
1925/* Return corresponding to DS reg_note. */
1926enum reg_note
1927ds_to_dt (ds_t ds)
1928{
1929 if (ds & DEP_TRUE)
1930 return REG_DEP_TRUE;
1931 else if (ds & DEP_OUTPUT)
1932 return REG_DEP_OUTPUT;
effd1640 1933 else if (ds & DEP_ANTI)
1934 return REG_DEP_ANTI;
e1ab7874 1935 else
1936 {
effd1640 1937 gcc_assert (ds & DEP_CONTROL);
1938 return REG_DEP_CONTROL;
e1ab7874 1939 }
6adce0fb 1940}
a7dcf969 1941
1942\f
1943
1944/* Functions for computation of info needed for register pressure
1945 sensitive insn scheduling. */
1946
1947
1948/* Allocate and return reg_use_data structure for REGNO and INSN. */
1949static struct reg_use_data *
73e15687 1950create_insn_reg_use (int regno, rtx_insn *insn)
a7dcf969 1951{
1952 struct reg_use_data *use;
1953
1954 use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1955 use->regno = regno;
1956 use->insn = insn;
1957 use->next_insn_use = INSN_REG_USE_LIST (insn);
1958 INSN_REG_USE_LIST (insn) = use;
1959 return use;
1960}
1961
57a8bf1b 1962/* Allocate reg_set_data structure for REGNO and INSN. */
1963static void
a7dcf969 1964create_insn_reg_set (int regno, rtx insn)
1965{
1966 struct reg_set_data *set;
1967
1968 set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1969 set->regno = regno;
1970 set->insn = insn;
1971 set->next_insn_set = INSN_REG_SET_LIST (insn);
1972 INSN_REG_SET_LIST (insn) = set;
a7dcf969 1973}
1974
1975/* Set up insn register uses for INSN and dependency context DEPS. */
1976static void
73e15687 1977setup_insn_reg_uses (struct deps_desc *deps, rtx_insn *insn)
a7dcf969 1978{
1979 unsigned i;
1980 reg_set_iterator rsi;
a7dcf969 1981 struct reg_use_data *use, *use2, *next;
1982 struct deps_reg *reg_last;
1983
1984 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1985 {
1986 if (i < FIRST_PSEUDO_REGISTER
1987 && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1988 continue;
1989
1990 if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1991 && ! REGNO_REG_SET_P (reg_pending_sets, i)
1992 && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
1993 /* Ignore use which is not dying. */
1994 continue;
1995
1996 use = create_insn_reg_use (i, insn);
1997 use->next_regno_use = use;
1998 reg_last = &deps->reg_last[i];
48e1416a 1999
a7dcf969 2000 /* Create the cycle list of uses. */
355af201 2001 for (rtx_insn_list *list = reg_last->uses; list; list = list->next ())
a7dcf969 2002 {
355af201 2003 use2 = create_insn_reg_use (i, list->insn ());
a7dcf969 2004 next = use->next_regno_use;
2005 use->next_regno_use = use2;
2006 use2->next_regno_use = next;
2007 }
2008 }
2009}
2010
2011/* Register pressure info for the currently processed insn. */
2012static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
2013
2014/* Return TRUE if INSN has the use structure for REGNO. */
2015static bool
2016insn_use_p (rtx insn, int regno)
2017{
2018 struct reg_use_data *use;
2019
2020 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
2021 if (use->regno == regno)
2022 return true;
2023 return false;
2024}
2025
2026/* Update the register pressure info after birth of pseudo register REGNO
2027 in INSN. Arguments CLOBBER_P and UNUSED_P say correspondingly that
2028 the register is in clobber or unused after the insn. */
2029static void
2030mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
2031{
2032 int incr, new_incr;
2033 enum reg_class cl;
2034
2035 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
66d9a7b9 2036 cl = sched_regno_pressure_class[regno];
a7dcf969 2037 if (cl != NO_REGS)
2038 {
66d9a7b9 2039 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
a7dcf969 2040 if (clobber_p)
2041 {
2042 new_incr = reg_pressure_info[cl].clobber_increase + incr;
2043 reg_pressure_info[cl].clobber_increase = new_incr;
2044 }
2045 else if (unused_p)
2046 {
2047 new_incr = reg_pressure_info[cl].unused_set_increase + incr;
2048 reg_pressure_info[cl].unused_set_increase = new_incr;
2049 }
2050 else
2051 {
2052 new_incr = reg_pressure_info[cl].set_increase + incr;
2053 reg_pressure_info[cl].set_increase = new_incr;
2054 if (! insn_use_p (insn, regno))
2055 reg_pressure_info[cl].change += incr;
2056 create_insn_reg_set (regno, insn);
2057 }
2058 gcc_assert (new_incr < (1 << INCREASE_BITS));
2059 }
2060}
2061
2062/* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
2063 hard registers involved in the birth. */
2064static void
2065mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
2066 bool clobber_p, bool unused_p)
2067{
2068 enum reg_class cl;
2069 int new_incr, last = regno + nregs;
48e1416a 2070
a7dcf969 2071 while (regno < last)
2072 {
2073 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2074 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2075 {
66d9a7b9 2076 cl = sched_regno_pressure_class[regno];
a7dcf969 2077 if (cl != NO_REGS)
2078 {
2079 if (clobber_p)
2080 {
2081 new_incr = reg_pressure_info[cl].clobber_increase + 1;
2082 reg_pressure_info[cl].clobber_increase = new_incr;
2083 }
2084 else if (unused_p)
2085 {
2086 new_incr = reg_pressure_info[cl].unused_set_increase + 1;
2087 reg_pressure_info[cl].unused_set_increase = new_incr;
2088 }
2089 else
2090 {
2091 new_incr = reg_pressure_info[cl].set_increase + 1;
2092 reg_pressure_info[cl].set_increase = new_incr;
2093 if (! insn_use_p (insn, regno))
2094 reg_pressure_info[cl].change += 1;
2095 create_insn_reg_set (regno, insn);
2096 }
2097 gcc_assert (new_incr < (1 << INCREASE_BITS));
2098 }
2099 }
2100 regno++;
2101 }
2102}
2103
2104/* Update the register pressure info after birth of pseudo or hard
2105 register REG in INSN. Arguments CLOBBER_P and UNUSED_P say
2106 correspondingly that the register is in clobber or unused after the
2107 insn. */
2108static void
2109mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
2110{
2111 int regno;
2112
2113 if (GET_CODE (reg) == SUBREG)
2114 reg = SUBREG_REG (reg);
2115
2116 if (! REG_P (reg))
2117 return;
2118
2119 regno = REGNO (reg);
2120 if (regno < FIRST_PSEUDO_REGISTER)
0933f1d9 2121 mark_insn_hard_regno_birth (insn, regno, REG_NREGS (reg),
a7dcf969 2122 clobber_p, unused_p);
2123 else
2124 mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
2125}
2126
2127/* Update the register pressure info after death of pseudo register
2128 REGNO. */
2129static void
2130mark_pseudo_death (int regno)
2131{
2132 int incr;
2133 enum reg_class cl;
2134
2135 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
66d9a7b9 2136 cl = sched_regno_pressure_class[regno];
a7dcf969 2137 if (cl != NO_REGS)
2138 {
66d9a7b9 2139 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
a7dcf969 2140 reg_pressure_info[cl].change -= incr;
2141 }
2142}
2143
2144/* Like mark_pseudo_death except that NREGS saying how many hard
2145 registers involved in the death. */
2146static void
2147mark_hard_regno_death (int regno, int nregs)
2148{
2149 enum reg_class cl;
2150 int last = regno + nregs;
48e1416a 2151
a7dcf969 2152 while (regno < last)
2153 {
2154 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2155 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2156 {
66d9a7b9 2157 cl = sched_regno_pressure_class[regno];
a7dcf969 2158 if (cl != NO_REGS)
2159 reg_pressure_info[cl].change -= 1;
2160 }
2161 regno++;
2162 }
2163}
2164
2165/* Update the register pressure info after death of pseudo or hard
2166 register REG. */
2167static void
2168mark_reg_death (rtx reg)
2169{
2170 int regno;
2171
2172 if (GET_CODE (reg) == SUBREG)
2173 reg = SUBREG_REG (reg);
2174
2175 if (! REG_P (reg))
2176 return;
2177
2178 regno = REGNO (reg);
2179 if (regno < FIRST_PSEUDO_REGISTER)
0933f1d9 2180 mark_hard_regno_death (regno, REG_NREGS (reg));
a7dcf969 2181 else
2182 mark_pseudo_death (regno);
2183}
2184
2185/* Process SETTER of REG. DATA is an insn containing the setter. */
2186static void
2187mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
2188{
2189 if (setter != NULL_RTX && GET_CODE (setter) != SET)
2190 return;
2191 mark_insn_reg_birth
2192 ((rtx) data, reg, false,
2193 find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
2194}
2195
2196/* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs. */
2197static void
2198mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
2199{
2200 if (GET_CODE (setter) == CLOBBER)
2201 mark_insn_reg_birth ((rtx) data, reg, true, false);
2202}
2203
2204/* Set up reg pressure info related to INSN. */
c15d7785 2205void
71ce7f59 2206init_insn_reg_pressure_info (rtx_insn *insn)
a7dcf969 2207{
2208 int i, len;
2209 enum reg_class cl;
2210 static struct reg_pressure_data *pressure_info;
2211 rtx link;
2212
b30b031c 2213 gcc_assert (sched_pressure != SCHED_PRESSURE_NONE);
a7dcf969 2214
2215 if (! INSN_P (insn))
2216 return;
2217
66d9a7b9 2218 for (i = 0; i < ira_pressure_classes_num; i++)
a7dcf969 2219 {
66d9a7b9 2220 cl = ira_pressure_classes[i];
a7dcf969 2221 reg_pressure_info[cl].clobber_increase = 0;
2222 reg_pressure_info[cl].set_increase = 0;
2223 reg_pressure_info[cl].unused_set_increase = 0;
2224 reg_pressure_info[cl].change = 0;
2225 }
48e1416a 2226
a7dcf969 2227 note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
48e1416a 2228
a7dcf969 2229 note_stores (PATTERN (insn), mark_insn_reg_store, insn);
48e1416a 2230
a7dcf969 2231#ifdef AUTO_INC_DEC
2232 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2233 if (REG_NOTE_KIND (link) == REG_INC)
2234 mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
2235#endif
2236
2237 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2238 if (REG_NOTE_KIND (link) == REG_DEAD)
2239 mark_reg_death (XEXP (link, 0));
48e1416a 2240
66d9a7b9 2241 len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num;
a7dcf969 2242 pressure_info
2243 = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
b30b031c 2244 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
2245 INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_pressure_classes_num
2246 * sizeof (int), 1);
66d9a7b9 2247 for (i = 0; i < ira_pressure_classes_num; i++)
a7dcf969 2248 {
66d9a7b9 2249 cl = ira_pressure_classes[i];
a7dcf969 2250 pressure_info[i].clobber_increase
2251 = reg_pressure_info[cl].clobber_increase;
2252 pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2253 pressure_info[i].unused_set_increase
2254 = reg_pressure_info[cl].unused_set_increase;
2255 pressure_info[i].change = reg_pressure_info[cl].change;
2256 }
2257}
2258
2259
6adce0fb 2260\f
e1ab7874 2261
2262/* Internal variable for sched_analyze_[12] () functions.
2263 If it is nonzero, this means that sched_analyze_[12] looks
2264 at the most toplevel SET. */
2265static bool can_start_lhs_rhs_p;
2266
48e1416a 2267/* Extend reg info for the deps context DEPS given that
e1ab7874 2268 we have just generated a register numbered REGNO. */
2269static void
68e419a1 2270extend_deps_reg_info (struct deps_desc *deps, int regno)
e1ab7874 2271{
2272 int max_regno = regno + 1;
2273
2274 gcc_assert (!reload_completed);
2275
2276 /* In a readonly context, it would not hurt to extend info,
2277 but it should not be needed. */
2278 if (reload_completed && deps->readonly)
2279 {
2280 deps->max_reg = max_regno;
2281 return;
2282 }
2283
2284 if (max_regno > deps->max_reg)
2285 {
48e1416a 2286 deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
e1ab7874 2287 max_regno);
2288 memset (&deps->reg_last[deps->max_reg],
48e1416a 2289 0, (max_regno - deps->max_reg)
e1ab7874 2290 * sizeof (struct deps_reg));
2291 deps->max_reg = max_regno;
2292 }
2293}
2294
2295/* Extends REG_INFO_P if needed. */
2296void
2297maybe_extend_reg_info_p (void)
2298{
2299 /* Extend REG_INFO_P, if needed. */
2300 if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2301 {
2302 size_t new_reg_info_p_size = max_regno + 128;
2303
2304 gcc_assert (!reload_completed && sel_sched_p ());
2305
2306 reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2307 new_reg_info_p_size,
2308 reg_info_p_size,
2309 sizeof (*reg_info_p));
2310 reg_info_p_size = new_reg_info_p_size;
2311 }
2312}
2313
8ad6f16d 2314/* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2315 The type of the reference is specified by REF and can be SET,
2316 CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
2317
2318static void
3754d046 2319sched_analyze_reg (struct deps_desc *deps, int regno, machine_mode mode,
b24ef467 2320 enum rtx_code ref, rtx_insn *insn)
8ad6f16d 2321{
e1ab7874 2322 /* We could emit new pseudos in renaming. Extend the reg structures. */
2323 if (!reload_completed && sel_sched_p ()
2324 && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2325 extend_deps_reg_info (deps, regno);
2326
2327 maybe_extend_reg_info_p ();
2328
8ad6f16d 2329 /* A hard reg in a wide mode may really be multiple registers.
2330 If so, mark all of them just like the first. */
2331 if (regno < FIRST_PSEUDO_REGISTER)
2332 {
2333 int i = hard_regno_nregs[regno][mode];
2334 if (ref == SET)
2335 {
2336 while (--i >= 0)
e1ab7874 2337 note_reg_set (regno + i);
8ad6f16d 2338 }
2339 else if (ref == USE)
2340 {
2341 while (--i >= 0)
e1ab7874 2342 note_reg_use (regno + i);
8ad6f16d 2343 }
2344 else
2345 {
2346 while (--i >= 0)
e1ab7874 2347 note_reg_clobber (regno + i);
8ad6f16d 2348 }
2349 }
2350
2351 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2352 it does not reload. Ignore these as they have served their
2353 purpose already. */
2354 else if (regno >= deps->max_reg)
2355 {
2356 enum rtx_code code = GET_CODE (PATTERN (insn));
2357 gcc_assert (code == USE || code == CLOBBER);
2358 }
2359
2360 else
2361 {
2362 if (ref == SET)
e1ab7874 2363 note_reg_set (regno);
8ad6f16d 2364 else if (ref == USE)
e1ab7874 2365 note_reg_use (regno);
8ad6f16d 2366 else
e1ab7874 2367 note_reg_clobber (regno);
8ad6f16d 2368
2369 /* Pseudos that are REG_EQUIV to something may be replaced
2370 by that during reloading. We need only add dependencies for
2371 the address in the REG_EQUIV note. */
2372 if (!reload_completed && get_reg_known_equiv_p (regno))
2373 {
2374 rtx t = get_reg_known_value (regno);
2375 if (MEM_P (t))
2376 sched_analyze_2 (deps, XEXP (t, 0), insn);
2377 }
2378
2379 /* Don't let it cross a call after scheduling if it doesn't
2380 already cross one. */
2381 if (REG_N_CALLS_CROSSED (regno) == 0)
2382 {
9845d120 2383 if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
8ad6f16d 2384 deps->sched_before_next_call
2385 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2386 else
2387 add_dependence_list (insn, deps->last_function_call, 1,
d452a169 2388 REG_DEP_ANTI, false);
8ad6f16d 2389 }
2390 }
2391}
2392
6adce0fb 2393/* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2394 rtx, X, creating all dependencies generated by the write to the
2395 destination of X, and reads of everything mentioned. */
2396
2397static void
b24ef467 2398sched_analyze_1 (struct deps_desc *deps, rtx x, rtx_insn *insn)
6adce0fb 2399{
19cb6b50 2400 rtx dest = XEXP (x, 0);
6adce0fb 2401 enum rtx_code code = GET_CODE (x);
e1ab7874 2402 bool cslr_p = can_start_lhs_rhs_p;
6adce0fb 2403
e1ab7874 2404 can_start_lhs_rhs_p = false;
2405
2406 gcc_assert (dest);
6adce0fb 2407 if (dest == 0)
2408 return;
2409
e1ab7874 2410 if (cslr_p && sched_deps_info->start_lhs)
2411 sched_deps_info->start_lhs (dest);
2412
4b303227 2413 if (GET_CODE (dest) == PARALLEL)
6adce0fb 2414 {
19cb6b50 2415 int i;
216b2683 2416
6adce0fb 2417 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
4b303227 2418 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2419 sched_analyze_1 (deps,
2420 gen_rtx_CLOBBER (VOIDmode,
2421 XEXP (XVECEXP (dest, 0, i), 0)),
2422 insn);
216b2683 2423
e1ab7874 2424 if (cslr_p && sched_deps_info->finish_lhs)
2425 sched_deps_info->finish_lhs ();
2426
2427 if (code == SET)
2428 {
2429 can_start_lhs_rhs_p = cslr_p;
2430
2431 sched_analyze_2 (deps, SET_SRC (x), insn);
2432
2433 can_start_lhs_rhs_p = false;
2434 }
2435
6adce0fb 2436 return;
2437 }
2438
2439 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
476d094d 2440 || GET_CODE (dest) == ZERO_EXTRACT)
6adce0fb 2441 {
b69139c7 2442 if (GET_CODE (dest) == STRICT_LOW_PART
2443 || GET_CODE (dest) == ZERO_EXTRACT
e011eba9 2444 || df_read_modify_subreg_p (dest))
b69139c7 2445 {
60b8c5b3 2446 /* These both read and modify the result. We must handle
b69139c7 2447 them as writes to get proper dependencies for following
2448 instructions. We must handle them as reads to get proper
2449 dependencies from this to previous instructions.
6473f3f4 2450 Thus we need to call sched_analyze_2. */
b69139c7 2451
60b8c5b3 2452 sched_analyze_2 (deps, XEXP (dest, 0), insn);
b69139c7 2453 }
476d094d 2454 if (GET_CODE (dest) == ZERO_EXTRACT)
6adce0fb 2455 {
2456 /* The second and third arguments are values read by this insn. */
2457 sched_analyze_2 (deps, XEXP (dest, 1), insn);
2458 sched_analyze_2 (deps, XEXP (dest, 2), insn);
2459 }
2460 dest = XEXP (dest, 0);
2461 }
2462
8ad4c111 2463 if (REG_P (dest))
6adce0fb 2464 {
8ad6f16d 2465 int regno = REGNO (dest);
3754d046 2466 machine_mode mode = GET_MODE (dest);
8ad6f16d 2467
2468 sched_analyze_reg (deps, regno, mode, code, insn);
6adce0fb 2469
1541bfd5 2470#ifdef STACK_REGS
2471 /* Treat all writes to a stack register as modifying the TOS. */
2472 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2473 {
8ad6f16d 2474 /* Avoid analyzing the same register twice. */
2475 if (regno != FIRST_STACK_REG)
2476 sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
a7dcf969 2477
d82cf2b2 2478 add_to_hard_reg_set (&implicit_reg_pending_uses, mode,
2479 FIRST_STACK_REG);
1541bfd5 2480 }
2481#endif
6adce0fb 2482 }
e16ceb8e 2483 else if (MEM_P (dest))
6adce0fb 2484 {
2485 /* Writing memory. */
503f2817 2486 rtx t = dest;
2487
e1ab7874 2488 if (sched_deps_info->use_cselib)
503f2817 2489 {
3754d046 2490 machine_mode address_mode = get_address_mode (dest);
98155838 2491
503f2817 2492 t = shallow_copy_rtx (dest);
1f864115 2493 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2494 GET_MODE (t), insn);
2af89801 2495 XEXP (t, 0)
2496 = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2497 insn);
503f2817 2498 }
faa80ce8 2499 t = canon_rtx (t);
6adce0fb 2500
e1ab7874 2501 /* Pending lists can't get larger with a readonly context. */
2502 if (!deps->readonly
2503 && ((deps->pending_read_list_length + deps->pending_write_list_length)
1deb811a 2504 >= MAX_PENDING_LIST_LENGTH))
6adce0fb 2505 {
2506 /* Flush all pending reads and writes to prevent the pending lists
2507 from getting any larger. Insn scheduling runs too slowly when
85de291e 2508 these lists get long. When compiling GCC with itself,
6adce0fb 2509 this flush occurs 8 times for sparc, and 10 times for m88k using
85de291e 2510 the default value of 32. */
5deaeb50 2511 flush_pending_lists (deps, insn, false, true);
6adce0fb 2512 }
2513 else
2514 {
355af201 2515 rtx_insn_list *pending;
2516 rtx_expr_list *pending_mem;
6adce0fb 2517
2518 pending = deps->pending_read_insns;
2519 pending_mem = deps->pending_read_mems;
2520 while (pending)
2521 {
355af201 2522 if (anti_dependence (pending_mem->element (), t)
2523 && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
2524 note_mem_dep (t, pending_mem->element (), pending->insn (),
e1ab7874 2525 DEP_ANTI);
6adce0fb 2526
355af201 2527 pending = pending->next ();
2528 pending_mem = pending_mem->next ();
6adce0fb 2529 }
2530
2531 pending = deps->pending_write_insns;
2532 pending_mem = deps->pending_write_mems;
2533 while (pending)
2534 {
355af201 2535 if (output_dependence (pending_mem->element (), t)
2536 && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
2537 note_mem_dep (t, pending_mem->element (),
2538 pending->insn (),
e1ab7874 2539 DEP_OUTPUT);
6adce0fb 2540
355af201 2541 pending = pending->next ();
2542 pending_mem = pending_mem-> next ();
6adce0fb 2543 }
2544
e228dd5d 2545 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
d452a169 2546 REG_DEP_ANTI, true);
effd1640 2547 add_dependence_list (insn, deps->pending_jump_insns, 1,
d452a169 2548 REG_DEP_CONTROL, true);
6adce0fb 2549
e1ab7874 2550 if (!deps->readonly)
2551 add_insn_mem_dependence (deps, false, insn, dest);
6adce0fb 2552 }
2553 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2554 }
2555
e1ab7874 2556 if (cslr_p && sched_deps_info->finish_lhs)
2557 sched_deps_info->finish_lhs ();
2558
6adce0fb 2559 /* Analyze reads. */
2560 if (GET_CODE (x) == SET)
e1ab7874 2561 {
2562 can_start_lhs_rhs_p = cslr_p;
2563
2564 sched_analyze_2 (deps, SET_SRC (x), insn);
2565
2566 can_start_lhs_rhs_p = false;
2567 }
6adce0fb 2568}
2569
2570/* Analyze the uses of memory and registers in rtx X in INSN. */
6adce0fb 2571static void
b24ef467 2572sched_analyze_2 (struct deps_desc *deps, rtx x, rtx_insn *insn)
6adce0fb 2573{
19cb6b50 2574 int i;
2575 int j;
2576 enum rtx_code code;
2577 const char *fmt;
e1ab7874 2578 bool cslr_p = can_start_lhs_rhs_p;
2579
2580 can_start_lhs_rhs_p = false;
6adce0fb 2581
e1ab7874 2582 gcc_assert (x);
6adce0fb 2583 if (x == 0)
2584 return;
2585
e1ab7874 2586 if (cslr_p && sched_deps_info->start_rhs)
2587 sched_deps_info->start_rhs (x);
2588
6adce0fb 2589 code = GET_CODE (x);
2590
2591 switch (code)
2592 {
0349edce 2593 CASE_CONST_ANY:
6adce0fb 2594 case SYMBOL_REF:
2595 case CONST:
2596 case LABEL_REF:
8990536a 2597 /* Ignore constants. */
e1ab7874 2598 if (cslr_p && sched_deps_info->finish_rhs)
2599 sched_deps_info->finish_rhs ();
2600
6adce0fb 2601 return;
2602
6adce0fb 2603 case CC0:
693c9f42 2604 if (!HAVE_cc0)
2605 gcc_unreachable ();
2606
6adce0fb 2607 /* User of CC0 depends on immediately preceding insn. */
828f2394 2608 SCHED_GROUP_P (insn) = 1;
f61f0d0c 2609 /* Don't move CC0 setter to another block (it can set up the
2610 same flag for previous CC0 users which is safe). */
2611 CANT_MOVE (prev_nonnote_insn (insn)) = 1;
e1ab7874 2612
2613 if (cslr_p && sched_deps_info->finish_rhs)
2614 sched_deps_info->finish_rhs ();
2615
6adce0fb 2616 return;
6adce0fb 2617
2618 case REG:
2619 {
6adce0fb 2620 int regno = REGNO (x);
3754d046 2621 machine_mode mode = GET_MODE (x);
8ad6f16d 2622
2623 sched_analyze_reg (deps, regno, mode, USE, insn);
1541bfd5 2624
2625#ifdef STACK_REGS
2626 /* Treat all reads of a stack register as modifying the TOS. */
2627 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2628 {
8ad6f16d 2629 /* Avoid analyzing the same register twice. */
2630 if (regno != FIRST_STACK_REG)
2631 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2632 sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
1541bfd5 2633 }
2634#endif
e1ab7874 2635
2636 if (cslr_p && sched_deps_info->finish_rhs)
2637 sched_deps_info->finish_rhs ();
2638
6adce0fb 2639 return;
2640 }
2641
2642 case MEM:
2643 {
2644 /* Reading memory. */
f9a00e9e 2645 rtx_insn_list *u;
355af201 2646 rtx_insn_list *pending;
2647 rtx_expr_list *pending_mem;
503f2817 2648 rtx t = x;
6adce0fb 2649
e1ab7874 2650 if (sched_deps_info->use_cselib)
503f2817 2651 {
3754d046 2652 machine_mode address_mode = get_address_mode (t);
98155838 2653
503f2817 2654 t = shallow_copy_rtx (t);
1f864115 2655 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2656 GET_MODE (t), insn);
2af89801 2657 XEXP (t, 0)
2658 = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2659 insn);
503f2817 2660 }
6adce0fb 2661
c971b445 2662 if (!DEBUG_INSN_P (insn))
6adce0fb 2663 {
c971b445 2664 t = canon_rtx (t);
2665 pending = deps->pending_read_insns;
2666 pending_mem = deps->pending_read_mems;
2667 while (pending)
2668 {
355af201 2669 if (read_dependence (pending_mem->element (), t)
c971b445 2670 && ! sched_insns_conditions_mutex_p (insn,
355af201 2671 pending->insn ()))
2672 note_mem_dep (t, pending_mem->element (),
2673 pending->insn (),
c971b445 2674 DEP_ANTI);
2675
355af201 2676 pending = pending->next ();
2677 pending_mem = pending_mem->next ();
c971b445 2678 }
6adce0fb 2679
c971b445 2680 pending = deps->pending_write_insns;
2681 pending_mem = deps->pending_write_mems;
2682 while (pending)
e1ab7874 2683 {
355af201 2684 if (true_dependence (pending_mem->element (), VOIDmode, t)
c971b445 2685 && ! sched_insns_conditions_mutex_p (insn,
355af201 2686 pending->insn ()))
2687 note_mem_dep (t, pending_mem->element (),
2688 pending->insn (),
c971b445 2689 sched_deps_info->generate_spec_deps
2690 ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2691
355af201 2692 pending = pending->next ();
2693 pending_mem = pending_mem->next ();
c971b445 2694 }
e1ab7874 2695
f9a00e9e 2696 for (u = deps->last_pending_memory_flush; u; u = u->next ())
2697 add_dependence (insn, u->insn (), REG_DEP_ANTI);
effd1640 2698
f9a00e9e 2699 for (u = deps->pending_jump_insns; u; u = u->next ())
effd1640 2700 if (deps_may_trap_p (x))
2701 {
2702 if ((sched_deps_info->generate_spec_deps)
2703 && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2704 {
2705 ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2706 MAX_DEP_WEAK);
2707
f9a00e9e 2708 note_dep (u->insn (), ds);
effd1640 2709 }
2710 else
f9a00e9e 2711 add_dependence (insn, u->insn (), REG_DEP_CONTROL);
effd1640 2712 }
e1ab7874 2713 }
6adce0fb 2714
2715 /* Always add these dependencies to pending_reads, since
2716 this insn may be followed by a write. */
c2066d40 2717 if (!deps->readonly)
2718 {
2719 if ((deps->pending_read_list_length
2720 + deps->pending_write_list_length)
1deb811a 2721 >= MAX_PENDING_LIST_LENGTH
c2066d40 2722 && !DEBUG_INSN_P (insn))
2723 flush_pending_lists (deps, insn, true, true);
2724 add_insn_mem_dependence (deps, true, insn, x);
2725 }
6adce0fb 2726
6adce0fb 2727 sched_analyze_2 (deps, XEXP (x, 0), insn);
e1ab7874 2728
2729 if (cslr_p && sched_deps_info->finish_rhs)
2730 sched_deps_info->finish_rhs ();
2731
6adce0fb 2732 return;
2733 }
2734
2735 /* Force pending stores to memory in case a trap handler needs them. */
2736 case TRAP_IF:
5deaeb50 2737 flush_pending_lists (deps, insn, true, false);
6adce0fb 2738 break;
2739
6540132c 2740 case PREFETCH:
2741 if (PREFETCH_SCHEDULE_BARRIER_P (x))
2742 reg_pending_barrier = TRUE_BARRIER;
42fb263f 2743 /* Prefetch insn contains addresses only. So if the prefetch
2744 address has no registers, there will be no dependencies on
2745 the prefetch insn. This is wrong with result code
2746 correctness point of view as such prefetch can be moved below
2747 a jump insn which usually generates MOVE_BARRIER preventing
2748 to move insns containing registers or memories through the
2749 barrier. It is also wrong with generated code performance
2750 point of view as prefetch withouth dependecies will have a
2751 tendency to be issued later instead of earlier. It is hard
2752 to generate accurate dependencies for prefetch insns as
2753 prefetch has only the start address but it is better to have
2754 something than nothing. */
a2e20fd2 2755 if (!deps->readonly)
12e51a1d 2756 {
2757 rtx x = gen_rtx_MEM (Pmode, XEXP (PATTERN (insn), 0));
2758 if (sched_deps_info->use_cselib)
2759 cselib_lookup_from_insn (x, Pmode, true, VOIDmode, insn);
2760 add_insn_mem_dependence (deps, true, insn, x);
2761 }
6540132c 2762 break;
2763
e5e67ff9 2764 case UNSPEC_VOLATILE:
2765 flush_pending_lists (deps, insn, true, true);
2766 /* FALLTHRU */
2767
6adce0fb 2768 case ASM_OPERANDS:
2769 case ASM_INPUT:
6adce0fb 2770 {
6adce0fb 2771 /* Traditional and volatile asm instructions must be considered to use
2772 and clobber all hard registers, all pseudo-registers and all of
2773 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
2774
2775 Consider for instance a volatile asm that changes the fpu rounding
2776 mode. An insn should not be moved across this even if it only uses
2777 pseudo-regs because it might give an incorrectly rounded result. */
5185189e 2778 if ((code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2779 && !DEBUG_INSN_P (insn))
c14a239f 2780 reg_pending_barrier = TRUE_BARRIER;
6adce0fb 2781
2782 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
2783 We can not just fall through here since then we would be confused
2784 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2785 traditional asms unlike their normal usage. */
2786
2787 if (code == ASM_OPERANDS)
2788 {
2789 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2790 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
e1ab7874 2791
2792 if (cslr_p && sched_deps_info->finish_rhs)
2793 sched_deps_info->finish_rhs ();
2794
6adce0fb 2795 return;
2796 }
2797 break;
2798 }
2799
2800 case PRE_DEC:
2801 case POST_DEC:
2802 case PRE_INC:
2803 case POST_INC:
2804 /* These both read and modify the result. We must handle them as writes
2805 to get proper dependencies for following instructions. We must handle
2806 them as reads to get proper dependencies from this to previous
2807 instructions. Thus we need to pass them to both sched_analyze_1
2808 and sched_analyze_2. We must call sched_analyze_2 first in order
2809 to get the proper antecedent for the read. */
2810 sched_analyze_2 (deps, XEXP (x, 0), insn);
2811 sched_analyze_1 (deps, x, insn);
e1ab7874 2812
2813 if (cslr_p && sched_deps_info->finish_rhs)
2814 sched_deps_info->finish_rhs ();
2815
6adce0fb 2816 return;
2817
2818 case POST_MODIFY:
2819 case PRE_MODIFY:
2820 /* op0 = op0 + op1 */
2821 sched_analyze_2 (deps, XEXP (x, 0), insn);
2822 sched_analyze_2 (deps, XEXP (x, 1), insn);
2823 sched_analyze_1 (deps, x, insn);
e1ab7874 2824
2825 if (cslr_p && sched_deps_info->finish_rhs)
2826 sched_deps_info->finish_rhs ();
2827
6adce0fb 2828 return;
2829
2830 default:
2831 break;
2832 }
2833
2834 /* Other cases: walk the insn. */
2835 fmt = GET_RTX_FORMAT (code);
2836 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2837 {
2838 if (fmt[i] == 'e')
2839 sched_analyze_2 (deps, XEXP (x, i), insn);
2840 else if (fmt[i] == 'E')
2841 for (j = 0; j < XVECLEN (x, i); j++)
2842 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2843 }
e1ab7874 2844
2845 if (cslr_p && sched_deps_info->finish_rhs)
2846 sched_deps_info->finish_rhs ();
6adce0fb 2847}
2848
6da3491c 2849/* Try to group two fusible insns together to prevent scheduler
b0aa049b 2850 from scheduling them apart. */
55e7138c 2851
2852static void
18282db0 2853sched_macro_fuse_insns (rtx_insn *insn)
55e7138c 2854{
18282db0 2855 rtx_insn *prev;
55e7138c 2856
b0aa049b 2857 if (any_condjump_p (insn))
2858 {
2859 unsigned int condreg1, condreg2;
2860 rtx cc_reg_1;
2861 targetm.fixed_condition_code_regs (&condreg1, &condreg2);
2862 cc_reg_1 = gen_rtx_REG (CCmode, condreg1);
2863 prev = prev_nonnote_nondebug_insn (insn);
2864 if (!reg_referenced_p (cc_reg_1, PATTERN (insn))
2865 || !prev
2866 || !modified_in_p (cc_reg_1, prev))
2867 return;
2868 }
2869 else
2870 {
2871 rtx insn_set = single_set (insn);
55e7138c 2872
b0aa049b 2873 prev = prev_nonnote_nondebug_insn (insn);
2874 if (!prev
2875 || !insn_set
524474b3 2876 || !single_set (prev))
b0aa049b 2877 return;
55e7138c 2878
b0aa049b 2879 }
2880
2881 if (targetm.sched.macro_fusion_pair_p (prev, insn))
2882 SCHED_GROUP_P (insn) = 1;
55e7138c 2883
55e7138c 2884}
2885
6adce0fb 2886/* Analyze an INSN with pattern X to find all dependencies. */
6adce0fb 2887static void
b24ef467 2888sched_analyze_insn (struct deps_desc *deps, rtx x, rtx_insn *insn)
6adce0fb 2889{
19cb6b50 2890 RTX_CODE code = GET_CODE (x);
6adce0fb 2891 rtx link;
4f917ffe 2892 unsigned i;
8c97cf13 2893 reg_set_iterator rsi;
6adce0fb 2894
a7dcf969 2895 if (! reload_completed)
2896 {
2897 HARD_REG_SET temp;
2898
2899 extract_insn (insn);
8eaaac4d 2900 preprocess_constraints (insn);
a7dcf969 2901 ira_implicitly_set_insn_hard_regs (&temp);
9dab02d9 2902 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
a7dcf969 2903 IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
2904 }
2905
e1ab7874 2906 can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2907 && code == SET);
2908
55e7138c 2909 /* Group compare and branch insns for macro-fusion. */
2910 if (targetm.sched.macro_fusion_p
2911 && targetm.sched.macro_fusion_p ())
b0aa049b 2912 sched_macro_fuse_insns (insn);
55e7138c 2913
326d0c19 2914 if (may_trap_p (x))
9d75589a 2915 /* Avoid moving trapping instructions across function calls that might
326d0c19 2916 not always return. */
2917 add_dependence_list (insn, deps->last_function_call_may_noreturn,
d452a169 2918 1, REG_DEP_ANTI, true);
326d0c19 2919
0741df1e 2920 /* We must avoid creating a situation in which two successors of the
2921 current block have different unwind info after scheduling. If at any
2922 point the two paths re-join this leads to incorrect unwind info. */
2923 /* ??? There are certain situations involving a forced frame pointer in
2924 which, with extra effort, we could fix up the unwind info at a later
2925 CFG join. However, it seems better to notice these cases earlier
2926 during prologue generation and avoid marking the frame pointer setup
2927 as frame-related at all. */
2928 if (RTX_FRAME_RELATED_P (insn))
420a9b66 2929 {
2930 /* Make sure prologue insn is scheduled before next jump. */
2931 deps->sched_before_next_jump
2932 = alloc_INSN_LIST (insn, deps->sched_before_next_jump);
2933
2934 /* Make sure epilogue insn is scheduled after preceding jumps. */
d452a169 2935 add_dependence_list (insn, deps->pending_jump_insns, 1, REG_DEP_ANTI,
2936 true);
420a9b66 2937 }
0741df1e 2938
6adce0fb 2939 if (code == COND_EXEC)
2940 {
2941 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2942
2943 /* ??? Should be recording conditions so we reduce the number of
424da949 2944 false dependencies. */
6adce0fb 2945 x = COND_EXEC_CODE (x);
2946 code = GET_CODE (x);
2947 }
2948 if (code == SET || code == CLOBBER)
a35ad173 2949 {
2950 sched_analyze_1 (deps, x, insn);
2951
2952 /* Bare clobber insns are used for letting life analysis, reg-stack
2953 and others know that a value is dead. Depend on the last call
2954 instruction so that reg-stack won't get confused. */
2955 if (code == CLOBBER)
a7dcf969 2956 add_dependence_list (insn, deps->last_function_call, 1,
d452a169 2957 REG_DEP_OUTPUT, true);
a35ad173 2958 }
6adce0fb 2959 else if (code == PARALLEL)
2960 {
4f917ffe 2961 for (i = XVECLEN (x, 0); i--;)
6adce0fb 2962 {
2963 rtx sub = XVECEXP (x, 0, i);
2964 code = GET_CODE (sub);
2965
2966 if (code == COND_EXEC)
2967 {
2968 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2969 sub = COND_EXEC_CODE (sub);
2970 code = GET_CODE (sub);
2971 }
2972 if (code == SET || code == CLOBBER)
2973 sched_analyze_1 (deps, sub, insn);
2974 else
2975 sched_analyze_2 (deps, sub, insn);
2976 }
2977 }
2978 else
2979 sched_analyze_2 (deps, x, insn);
2980
2981 /* Mark registers CLOBBERED or used by called function. */
6d7dc5b9 2982 if (CALL_P (insn))
9239aee6 2983 {
2984 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2985 {
2986 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2987 sched_analyze_1 (deps, XEXP (link, 0), insn);
c8010b80 2988 else if (GET_CODE (XEXP (link, 0)) != SET)
9239aee6 2989 sched_analyze_2 (deps, XEXP (link, 0), insn);
2990 }
e703e1fc 2991 /* Don't schedule anything after a tail call, tail call needs
2992 to use at least all call-saved registers. */
2993 if (SIBLING_CALL_P (insn))
2994 reg_pending_barrier = TRUE_BARRIER;
2995 else if (find_reg_note (insn, REG_SETJMP, NULL))
c14a239f 2996 reg_pending_barrier = MOVE_BARRIER;
9239aee6 2997 }
6adce0fb 2998
6d7dc5b9 2999 if (JUMP_P (insn))
d6141c0c 3000 {
9ed997be 3001 rtx_insn *next = next_nonnote_nondebug_insn (insn);
6d7dc5b9 3002 if (next && BARRIER_P (next))
ca93e19d 3003 reg_pending_barrier = MOVE_BARRIER;
d6141c0c 3004 else
3005 {
355af201 3006 rtx_insn_list *pending;
3007 rtx_expr_list *pending_mem;
31c5c470 3008
e1ab7874 3009 if (sched_deps_info->compute_jump_reg_dependencies)
3010 {
effd1640 3011 (*sched_deps_info->compute_jump_reg_dependencies)
3012 (insn, reg_pending_control_uses);
e1ab7874 3013
e1ab7874 3014 /* Make latency of jump equal to 0 by using anti-dependence. */
effd1640 3015 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
e1ab7874 3016 {
3017 struct deps_reg *reg_last = &deps->reg_last[i];
d452a169 3018 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI,
3019 false);
a7dcf969 3020 add_dependence_list (insn, reg_last->implicit_sets,
d452a169 3021 0, REG_DEP_ANTI, false);
e1ab7874 3022 add_dependence_list (insn, reg_last->clobbers, 0,
d452a169 3023 REG_DEP_ANTI, false);
e1ab7874 3024 }
e1ab7874 3025 }
097ef0af 3026
9807cfe1 3027 /* All memory writes and volatile reads must happen before the
3028 jump. Non-volatile reads must happen before the jump iff
3029 the result is needed by the above register used mask. */
3030
097ef0af 3031 pending = deps->pending_write_insns;
3032 pending_mem = deps->pending_write_mems;
3033 while (pending)
3034 {
355af201 3035 if (! sched_insns_conditions_mutex_p (insn, pending->insn ()))
3036 add_dependence (insn, pending->insn (),
b24ef467 3037 REG_DEP_OUTPUT);
355af201 3038 pending = pending->next ();
3039 pending_mem = pending_mem->next ();
9807cfe1 3040 }
097ef0af 3041
9807cfe1 3042 pending = deps->pending_read_insns;
3043 pending_mem = deps->pending_read_mems;
3044 while (pending)
3045 {
355af201 3046 if (MEM_VOLATILE_P (pending_mem->element ())
3047 && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
3048 add_dependence (insn, pending->insn (),
b24ef467 3049 REG_DEP_OUTPUT);
355af201 3050 pending = pending->next ();
3051 pending_mem = pending_mem->next ();
097ef0af 3052 }
3053
e228dd5d 3054 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
d452a169 3055 REG_DEP_ANTI, true);
effd1640 3056 add_dependence_list (insn, deps->pending_jump_insns, 1,
d452a169 3057 REG_DEP_ANTI, true);
d6141c0c 3058 }
d6141c0c 3059 }
3060
f787dd2b 3061 /* If this instruction can throw an exception, then moving it changes
40734805 3062 where block boundaries fall. This is mighty confusing elsewhere.
88a00894 3063 Therefore, prevent such an instruction from being moved. Same for
3064 non-jump instructions that define block boundaries.
3065 ??? Unclear whether this is still necessary in EBB mode. If not,
3066 add_branch_dependences should be adjusted for RGN mode instead. */
3067 if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
3068 || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
c14a239f 3069 reg_pending_barrier = MOVE_BARRIER;
f787dd2b 3070
b30b031c 3071 if (sched_pressure != SCHED_PRESSURE_NONE)
a7dcf969 3072 {
3073 setup_insn_reg_uses (deps, insn);
c15d7785 3074 init_insn_reg_pressure_info (insn);
a7dcf969 3075 }
3076
9845d120 3077 /* Add register dependencies for insn. */
3078 if (DEBUG_INSN_P (insn))
3079 {
b24ef467 3080 rtx_insn *prev = deps->last_debug_insn;
f9a00e9e 3081 rtx_insn_list *u;
9845d120 3082
3083 if (!deps->readonly)
3084 deps->last_debug_insn = insn;
3085
3086 if (prev)
3087 add_dependence (insn, prev, REG_DEP_ANTI);
3088
3089 add_dependence_list (insn, deps->last_function_call, 1,
d452a169 3090 REG_DEP_ANTI, false);
9845d120 3091
d452a169 3092 if (!sel_sched_p ())
f9a00e9e 3093 for (u = deps->last_pending_memory_flush; u; u = u->next ())
3094 add_dependence (insn, u->insn (), REG_DEP_ANTI);
9845d120 3095
3096 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3097 {
3098 struct deps_reg *reg_last = &deps->reg_last[i];
d452a169 3099 add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI, false);
effd1640 3100 /* There's no point in making REG_DEP_CONTROL dependencies for
3101 debug insns. */
d452a169 3102 add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI,
3103 false);
c971b445 3104
3105 if (!deps->readonly)
3106 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
9845d120 3107 }
3108 CLEAR_REG_SET (reg_pending_uses);
3109
3110 /* Quite often, a debug insn will refer to stuff in the
3111 previous instruction, but the reason we want this
3112 dependency here is to make sure the scheduler doesn't
3113 gratuitously move a debug insn ahead. This could dirty
3114 DF flags and cause additional analysis that wouldn't have
3115 occurred in compilation without debug insns, and such
3116 additional analysis can modify the generated code. */
3117 prev = PREV_INSN (insn);
3118
3119 if (prev && NONDEBUG_INSN_P (prev))
3120 add_dependence (insn, prev, REG_DEP_ANTI);
3121 }
ca93e19d 3122 else
3123 {
6c2d9e41 3124 regset_head set_or_clobbered;
3125
ca93e19d 3126 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
a7dcf969 3127 {
3128 struct deps_reg *reg_last = &deps->reg_last[i];
d452a169 3129 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3130 add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI,
3131 false);
3132 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3133 false);
48e1416a 3134
a7dcf969 3135 if (!deps->readonly)
3136 {
3137 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3138 reg_last->uses_length++;
3139 }
3140 }
48e1416a 3141
a7dcf969 3142 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3143 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
3144 {
3145 struct deps_reg *reg_last = &deps->reg_last[i];
d452a169 3146 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
a7dcf969 3147 add_dependence_list (insn, reg_last->implicit_sets, 0,
d452a169 3148 REG_DEP_ANTI, false);
3149 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3150 false);
48e1416a 3151
a7dcf969 3152 if (!deps->readonly)
3153 {
3154 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3155 reg_last->uses_length++;
3156 }
3157 }
e1ab7874 3158
6c2d9e41 3159 if (targetm.sched.exposed_pipeline)
3160 {
3161 INIT_REG_SET (&set_or_clobbered);
3162 bitmap_ior (&set_or_clobbered, reg_pending_clobbers,
3163 reg_pending_sets);
3164 EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered, 0, i, rsi)
3165 {
3166 struct deps_reg *reg_last = &deps->reg_last[i];
3167 rtx list;
3168 for (list = reg_last->uses; list; list = XEXP (list, 1))
3169 {
3170 rtx other = XEXP (list, 0);
60ba0654 3171 if (INSN_CACHED_COND (other) != const_true_rtx
2ec77a7c 3172 && refers_to_regno_p (i, INSN_CACHED_COND (other)))
60ba0654 3173 INSN_CACHED_COND (other) = const_true_rtx;
6c2d9e41 3174 }
3175 }
3176 }
3177
a7dcf969 3178 /* If the current insn is conditional, we can't free any
3179 of the lists. */
3180 if (sched_has_condition_p (insn))
3181 {
3182 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3183 {
3184 struct deps_reg *reg_last = &deps->reg_last[i];
d452a169 3185 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3186 false);
a7dcf969 3187 add_dependence_list (insn, reg_last->implicit_sets, 0,
d452a169 3188 REG_DEP_ANTI, false);
3189 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3190 false);
effd1640 3191 add_dependence_list (insn, reg_last->control_uses, 0,
d452a169 3192 REG_DEP_CONTROL, false);
48e1416a 3193
a7dcf969 3194 if (!deps->readonly)
3195 {
3196 reg_last->clobbers
3197 = alloc_INSN_LIST (insn, reg_last->clobbers);
3198 reg_last->clobbers_length++;
3199 }
3200 }
3201 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3202 {
3203 struct deps_reg *reg_last = &deps->reg_last[i];
d452a169 3204 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3205 false);
a7dcf969 3206 add_dependence_list (insn, reg_last->implicit_sets, 0,
d452a169 3207 REG_DEP_ANTI, false);
3208 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT,
3209 false);
3210 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3211 false);
effd1640 3212 add_dependence_list (insn, reg_last->control_uses, 0,
d452a169 3213 REG_DEP_CONTROL, false);
48e1416a 3214
a7dcf969 3215 if (!deps->readonly)
6aed13f1 3216 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
a7dcf969 3217 }
3218 }
3219 else
3220 {
3221 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3222 {
3223 struct deps_reg *reg_last = &deps->reg_last[i];
1deb811a 3224 if (reg_last->uses_length >= MAX_PENDING_LIST_LENGTH
3225 || reg_last->clobbers_length >= MAX_PENDING_LIST_LENGTH)
a7dcf969 3226 {
3227 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
d452a169 3228 REG_DEP_OUTPUT, false);
a7dcf969 3229 add_dependence_list_and_free (deps, insn,
3230 &reg_last->implicit_sets, 0,
d452a169 3231 REG_DEP_ANTI, false);
a7dcf969 3232 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
d452a169 3233 REG_DEP_ANTI, false);
effd1640 3234 add_dependence_list_and_free (deps, insn,
3235 &reg_last->control_uses, 0,
d452a169 3236 REG_DEP_ANTI, false);
3237 add_dependence_list_and_free (deps, insn,
3238 &reg_last->clobbers, 0,
3239 REG_DEP_OUTPUT, false);
48e1416a 3240
a7dcf969 3241 if (!deps->readonly)
3242 {
3243 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3244 reg_last->clobbers_length = 0;
3245 reg_last->uses_length = 0;
3246 }
3247 }
3248 else
3249 {
d452a169 3250 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3251 false);
a7dcf969 3252 add_dependence_list (insn, reg_last->implicit_sets, 0,
d452a169 3253 REG_DEP_ANTI, false);
3254 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3255 false);
effd1640 3256 add_dependence_list (insn, reg_last->control_uses, 0,
d452a169 3257 REG_DEP_CONTROL, false);
a7dcf969 3258 }
48e1416a 3259
a7dcf969 3260 if (!deps->readonly)
3261 {
3262 reg_last->clobbers_length++;
3263 reg_last->clobbers
3264 = alloc_INSN_LIST (insn, reg_last->clobbers);
3265 }
3266 }
3267 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3268 {
3269 struct deps_reg *reg_last = &deps->reg_last[i];
48e1416a 3270
a7dcf969 3271 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
d452a169 3272 REG_DEP_OUTPUT, false);
a7dcf969 3273 add_dependence_list_and_free (deps, insn,
3274 &reg_last->implicit_sets,
d452a169 3275 0, REG_DEP_ANTI, false);
a7dcf969 3276 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
d452a169 3277 REG_DEP_OUTPUT, false);
a7dcf969 3278 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
d452a169 3279 REG_DEP_ANTI, false);
effd1640 3280 add_dependence_list (insn, reg_last->control_uses, 0,
d452a169 3281 REG_DEP_CONTROL, false);
48e1416a 3282
a7dcf969 3283 if (!deps->readonly)
3284 {
3285 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3286 reg_last->uses_length = 0;
3287 reg_last->clobbers_length = 0;
a7dcf969 3288 }
3289 }
3290 }
effd1640 3291 if (!deps->readonly)
3292 {
3293 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3294 {
3295 struct deps_reg *reg_last = &deps->reg_last[i];
3296 reg_last->control_uses
3297 = alloc_INSN_LIST (insn, reg_last->control_uses);
3298 }
3299 }
ca93e19d 3300 }
3301
a7dcf969 3302 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3303 if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3304 {
3305 struct deps_reg *reg_last = &deps->reg_last[i];
d452a169 3306 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI, false);
3307 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI, false);
3308 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI, false);
3309 add_dependence_list (insn, reg_last->control_uses, 0, REG_DEP_ANTI,
3310 false);
48e1416a 3311
a7dcf969 3312 if (!deps->readonly)
3313 reg_last->implicit_sets
3314 = alloc_INSN_LIST (insn, reg_last->implicit_sets);
3315 }
3316
e1ab7874 3317 if (!deps->readonly)
3318 {
3319 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
3320 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
3321 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
a7dcf969 3322 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3323 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
3324 || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3325 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
e1ab7874 3326
3327 /* Set up the pending barrier found. */
3328 deps->last_reg_pending_barrier = reg_pending_barrier;
3329 }
ca93e19d 3330
3331 CLEAR_REG_SET (reg_pending_uses);
3332 CLEAR_REG_SET (reg_pending_clobbers);
3333 CLEAR_REG_SET (reg_pending_sets);
effd1640 3334 CLEAR_REG_SET (reg_pending_control_uses);
a7dcf969 3335 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3336 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
ca93e19d 3337
f787dd2b 3338 /* Add dependencies if a scheduling barrier was found. */
5deaeb50 3339 if (reg_pending_barrier)
f787dd2b 3340 {
58ada791 3341 /* In the case of barrier the most added dependencies are not
3342 real, so we use anti-dependence here. */
e1ab7874 3343 if (sched_has_condition_p (insn))
6adce0fb 3344 {
8c97cf13 3345 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
5deaeb50 3346 {
3347 struct deps_reg *reg_last = &deps->reg_last[i];
d452a169 3348 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3349 true);
a7dcf969 3350 add_dependence_list (insn, reg_last->sets, 0,
3351 reg_pending_barrier == TRUE_BARRIER
d452a169 3352 ? REG_DEP_TRUE : REG_DEP_ANTI, true);
a7dcf969 3353 add_dependence_list (insn, reg_last->implicit_sets, 0,
d452a169 3354 REG_DEP_ANTI, true);
a7dcf969 3355 add_dependence_list (insn, reg_last->clobbers, 0,
3356 reg_pending_barrier == TRUE_BARRIER
d452a169 3357 ? REG_DEP_TRUE : REG_DEP_ANTI, true);
8c97cf13 3358 }
5deaeb50 3359 }
3360 else
3361 {
8c97cf13 3362 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
5deaeb50 3363 {
3364 struct deps_reg *reg_last = &deps->reg_last[i];
e1ab7874 3365 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
d452a169 3366 REG_DEP_ANTI, true);
effd1640 3367 add_dependence_list_and_free (deps, insn,
3368 &reg_last->control_uses, 0,
d452a169 3369 REG_DEP_CONTROL, true);
a7dcf969 3370 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3371 reg_pending_barrier == TRUE_BARRIER
d452a169 3372 ? REG_DEP_TRUE : REG_DEP_ANTI,
3373 true);
a7dcf969 3374 add_dependence_list_and_free (deps, insn,
3375 &reg_last->implicit_sets, 0,
d452a169 3376 REG_DEP_ANTI, true);
a7dcf969 3377 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3378 reg_pending_barrier == TRUE_BARRIER
d452a169 3379 ? REG_DEP_TRUE : REG_DEP_ANTI,
3380 true);
6adce0fb 3381
e1ab7874 3382 if (!deps->readonly)
3383 {
3384 reg_last->uses_length = 0;
3385 reg_last->clobbers_length = 0;
3386 }
3387 }
4164e76b 3388 }
5deaeb50 3389
e1ab7874 3390 if (!deps->readonly)
3391 for (i = 0; i < (unsigned)deps->max_reg; i++)
3392 {
3393 struct deps_reg *reg_last = &deps->reg_last[i];
3394 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3395 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3396 }
3397
f22426d2 3398 /* Don't flush pending lists on speculative checks for
e965825b 3399 selective scheduling. */
3400 if (!sel_sched_p () || !sel_insn_is_speculation_check (insn))
e1ab7874 3401 flush_pending_lists (deps, insn, true, true);
48e1416a 3402
c14a239f 3403 reg_pending_barrier = NOT_A_BARRIER;
749c6f58 3404 }
6adce0fb 3405
3406 /* If a post-call group is still open, see if it should remain so.
3407 This insn must be a simple move of a hard reg to a pseudo or
3408 vice-versa.
3409
ed5527ca 3410 We must avoid moving these insns for correctness on targets
3411 with small register classes, and for special registers like
6adce0fb 3412 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
3413 hard regs for all targets. */
3414
3415 if (deps->in_post_call_group_p)
3416 {
3417 rtx tmp, set = single_set (insn);
3418 int src_regno, dest_regno;
3419
3420 if (set == NULL)
9845d120 3421 {
3422 if (DEBUG_INSN_P (insn))
3423 /* We don't want to mark debug insns as part of the same
3424 sched group. We know they really aren't, but if we use
3425 debug insns to tell that a call group is over, we'll
3426 get different code if debug insns are not there and
3427 instructions that follow seem like they should be part
3428 of the call group.
3429
fdb1b2b1 3430 Also, if we did, chain_to_prev_insn would move the
9845d120 3431 deps of the debug insn to the call insn, modifying
3432 non-debug post-dependency counts of the debug insn
3433 dependencies and otherwise messing with the scheduling
3434 order.
3435
3436 Instead, let such debug insns be scheduled freely, but
3437 keep the call group open in case there are insns that
3438 should be part of it afterwards. Since we grant debug
3439 insns higher priority than even sched group insns, it
3440 will all turn out all right. */
3441 goto debug_dont_end_call_group;
3442 else
3443 goto end_call_group;
3444 }
6adce0fb 3445
3446 tmp = SET_DEST (set);
3447 if (GET_CODE (tmp) == SUBREG)
3448 tmp = SUBREG_REG (tmp);
8ad4c111 3449 if (REG_P (tmp))
6adce0fb 3450 dest_regno = REGNO (tmp);
3451 else
3452 goto end_call_group;
3453
3454 tmp = SET_SRC (set);
3455 if (GET_CODE (tmp) == SUBREG)
3456 tmp = SUBREG_REG (tmp);
0cbad263 3457 if ((GET_CODE (tmp) == PLUS
3458 || GET_CODE (tmp) == MINUS)
8ad4c111 3459 && REG_P (XEXP (tmp, 0))
0cbad263 3460 && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3461 && dest_regno == STACK_POINTER_REGNUM)
3462 src_regno = STACK_POINTER_REGNUM;
8ad4c111 3463 else if (REG_P (tmp))
6adce0fb 3464 src_regno = REGNO (tmp);
3465 else
3466 goto end_call_group;
3467
3468 if (src_regno < FIRST_PSEUDO_REGISTER
3469 || dest_regno < FIRST_PSEUDO_REGISTER)
3470 {
e1ab7874 3471 if (!deps->readonly
3472 && deps->in_post_call_group_p == post_call_initial)
828f2394 3473 deps->in_post_call_group_p = post_call;
3474
48e1416a 3475 if (!sel_sched_p () || sched_emulate_haifa_p)
e1ab7874 3476 {
3477 SCHED_GROUP_P (insn) = 1;
3478 CANT_MOVE (insn) = 1;
3479 }
6adce0fb 3480 }
3481 else
3482 {
3483 end_call_group:
e1ab7874 3484 if (!deps->readonly)
3485 deps->in_post_call_group_p = not_post_call;
6adce0fb 3486 }
3487 }
828f2394 3488
9845d120 3489 debug_dont_end_call_group:
93f6b030 3490 if ((current_sched_info->flags & DO_SPECULATION)
3491 && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
3492 /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3493 be speculated. */
3494 {
e1ab7874 3495 if (sel_sched_p ())
3496 sel_mark_hard_insn (insn);
3497 else
3498 {
3499 sd_iterator_def sd_it;
3500 dep_t dep;
48e1416a 3501
e1ab7874 3502 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3503 sd_iterator_cond (&sd_it, &dep);)
3504 change_spec_dep_to_hard (sd_it);
3505 }
93f6b030 3506 }
617529e8 3507
3508 /* We do not yet have code to adjust REG_ARGS_SIZE, therefore we must
3509 honor their original ordering. */
3510 if (find_reg_note (insn, REG_ARGS_SIZE, NULL))
3511 {
3512 if (deps->last_args_size)
3513 add_dependence (insn, deps->last_args_size, REG_DEP_OUTPUT);
3514 deps->last_args_size = insn;
3515 }
6adce0fb 3516}
3517
326d0c19 3518/* Return TRUE if INSN might not always return normally (e.g. call exit,
3519 longjmp, loop forever, ...). */
4a020a8c 3520/* FIXME: Why can't this function just use flags_from_decl_or_type and
3521 test for ECF_NORETURN? */
326d0c19 3522static bool
71ce7f59 3523call_may_noreturn_p (rtx_insn *insn)
326d0c19 3524{
3525 rtx call;
3526
3527 /* const or pure calls that aren't looping will always return. */
3528 if (RTL_CONST_OR_PURE_CALL_P (insn)
3529 && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3530 return false;
3531
cf7fb72d 3532 call = get_call_rtx_from (insn);
3533 if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
326d0c19 3534 {
3535 rtx symbol = XEXP (XEXP (call, 0), 0);
3536 if (SYMBOL_REF_DECL (symbol)
3537 && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3538 {
3539 if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3540 == BUILT_IN_NORMAL)
3541 switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3542 {
3543 case BUILT_IN_BCMP:
3544 case BUILT_IN_BCOPY:
3545 case BUILT_IN_BZERO:
3546 case BUILT_IN_INDEX:
3547 case BUILT_IN_MEMCHR:
3548 case BUILT_IN_MEMCMP:
3549 case BUILT_IN_MEMCPY:
3550 case BUILT_IN_MEMMOVE:
3551 case BUILT_IN_MEMPCPY:
3552 case BUILT_IN_MEMSET:
3553 case BUILT_IN_RINDEX:
3554 case BUILT_IN_STPCPY:
3555 case BUILT_IN_STPNCPY:
3556 case BUILT_IN_STRCAT:
3557 case BUILT_IN_STRCHR:
3558 case BUILT_IN_STRCMP:
3559 case BUILT_IN_STRCPY:
3560 case BUILT_IN_STRCSPN:
3561 case BUILT_IN_STRLEN:
3562 case BUILT_IN_STRNCAT:
3563 case BUILT_IN_STRNCMP:
3564 case BUILT_IN_STRNCPY:
3565 case BUILT_IN_STRPBRK:
3566 case BUILT_IN_STRRCHR:
3567 case BUILT_IN_STRSPN:
3568 case BUILT_IN_STRSTR:
3569 /* Assume certain string/memory builtins always return. */
3570 return false;
3571 default:
3572 break;
3573 }
3574 }
3575 }
3576
3577 /* For all other calls assume that they might not always return. */
3578 return true;
3579}
3580
fdb1b2b1 3581/* Return true if INSN should be made dependent on the previous instruction
3582 group, and if all INSN's dependencies should be moved to the first
3583 instruction of that group. */
3584
3585static bool
71ce7f59 3586chain_to_prev_insn_p (rtx_insn *insn)
fdb1b2b1 3587{
fdb1b2b1 3588 /* INSN forms a group with the previous instruction. */
3589 if (SCHED_GROUP_P (insn))
3590 return true;
3591
3592 /* If the previous instruction clobbers a register R and this one sets
3593 part of R, the clobber was added specifically to help us track the
3594 liveness of R. There's no point scheduling the clobber and leaving
3595 INSN behind, especially if we move the clobber to another block. */
9ed997be 3596 rtx_insn *prev = prev_nonnote_nondebug_insn (insn);
fdb1b2b1 3597 if (prev
3598 && INSN_P (prev)
3599 && BLOCK_FOR_INSN (prev) == BLOCK_FOR_INSN (insn)
3600 && GET_CODE (PATTERN (prev)) == CLOBBER)
3601 {
9ed997be 3602 rtx x = XEXP (PATTERN (prev), 0);
fdb1b2b1 3603 if (set_of (x, insn))
3604 return true;
3605 }
3606
3607 return false;
3608}
3609
e1ab7874 3610/* Analyze INSN with DEPS as a context. */
6adce0fb 3611void
b24ef467 3612deps_analyze_insn (struct deps_desc *deps, rtx_insn *insn)
6adce0fb 3613{
e1ab7874 3614 if (sched_deps_info->start_insn)
3615 sched_deps_info->start_insn (insn);
6adce0fb 3616
6c2d9e41 3617 /* Record the condition for this insn. */
3618 if (NONDEBUG_INSN_P (insn))
effd1640 3619 {
3620 rtx t;
3621 sched_get_condition_with_rev (insn, NULL);
3622 t = INSN_CACHED_COND (insn);
54267fdf 3623 INSN_COND_DEPS (insn) = NULL;
effd1640 3624 if (reload_completed
3625 && (current_sched_info->flags & DO_PREDICATION)
3626 && COMPARISON_P (t)
3627 && REG_P (XEXP (t, 0))
3628 && CONSTANT_P (XEXP (t, 1)))
3629 {
3630 unsigned int regno;
3631 int nregs;
54267fdf 3632 rtx_insn_list *cond_deps = NULL;
effd1640 3633 t = XEXP (t, 0);
3634 regno = REGNO (t);
0933f1d9 3635 nregs = REG_NREGS (t);
effd1640 3636 while (nregs-- > 0)
3637 {
3638 struct deps_reg *reg_last = &deps->reg_last[regno + nregs];
54267fdf 3639 cond_deps = concat_INSN_LIST (reg_last->sets, cond_deps);
3640 cond_deps = concat_INSN_LIST (reg_last->clobbers, cond_deps);
3641 cond_deps = concat_INSN_LIST (reg_last->implicit_sets, cond_deps);
effd1640 3642 }
54267fdf 3643 INSN_COND_DEPS (insn) = cond_deps;
effd1640 3644 }
3645 }
6c2d9e41 3646
0741df1e 3647 if (JUMP_P (insn))
e1ab7874 3648 {
48e1416a 3649 /* Make each JUMP_INSN (but not a speculative check)
e1ab7874 3650 a scheduling barrier for memory references. */
3651 if (!deps->readonly
48e1416a 3652 && !(sel_sched_p ()
e1ab7874 3653 && sel_insn_is_speculation_check (insn)))
3654 {
3655 /* Keep the list a reasonable size. */
1deb811a 3656 if (deps->pending_flush_length++ >= MAX_PENDING_LIST_LENGTH)
e1ab7874 3657 flush_pending_lists (deps, insn, true, true);
3658 else
effd1640 3659 deps->pending_jump_insns
3660 = alloc_INSN_LIST (insn, deps->pending_jump_insns);
e1ab7874 3661 }
3662
0741df1e 3663 /* For each insn which shouldn't cross a jump, add a dependence. */
3664 add_dependence_list_and_free (deps, insn,
3665 &deps->sched_before_next_jump, 1,
d452a169 3666 REG_DEP_ANTI, true);
0741df1e 3667
3668 sched_analyze_insn (deps, PATTERN (insn), insn);
3669 }
3670 else if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn))
3671 {
e1ab7874 3672 sched_analyze_insn (deps, PATTERN (insn), insn);
3673 }
3674 else if (CALL_P (insn))
3675 {
3676 int i;
3677
3678 CANT_MOVE (insn) = 1;
3679
3680 if (find_reg_note (insn, REG_SETJMP, NULL))
3681 {
3682 /* This is setjmp. Assume that all registers, not just
3683 hard registers, may be clobbered by this call. */
3684 reg_pending_barrier = MOVE_BARRIER;
3685 }
3686 else
3687 {
3688 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3689 /* A call may read and modify global register variables. */
3690 if (global_regs[i])
3691 {
3692 SET_REGNO_REG_SET (reg_pending_sets, i);
a7dcf969 3693 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
e1ab7874 3694 }
3695 /* Other call-clobbered hard regs may be clobbered.
3696 Since we only have a choice between 'might be clobbered'
3697 and 'definitely not clobbered', we must include all
3698 partly call-clobbered registers here. */
3699 else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
3700 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
3701 SET_REGNO_REG_SET (reg_pending_clobbers, i);
3702 /* We don't know what set of fixed registers might be used
3703 by the function, but it is certain that the stack pointer
3704 is among them, but be conservative. */
3705 else if (fixed_regs[i])
a7dcf969 3706 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
e1ab7874 3707 /* The frame pointer is normally not used by the function
3708 itself, but by the debugger. */
3709 /* ??? MIPS o32 is an exception. It uses the frame pointer
3710 in the macro expansion of jal but does not represent this
3711 fact in the call_insn rtl. */
3712 else if (i == FRAME_POINTER_REGNUM
3713 || (i == HARD_FRAME_POINTER_REGNUM
3714 && (! reload_completed || frame_pointer_needed)))
a7dcf969 3715 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
e1ab7874 3716 }
3717
3718 /* For each insn which shouldn't cross a call, add a dependence
3719 between that insn and this call insn. */
48e1416a 3720 add_dependence_list_and_free (deps, insn,
e1ab7874 3721 &deps->sched_before_next_call, 1,
d452a169 3722 REG_DEP_ANTI, true);
e1ab7874 3723
3724 sched_analyze_insn (deps, PATTERN (insn), insn);
3725
3726 /* If CALL would be in a sched group, then this will violate
3727 convention that sched group insns have dependencies only on the
3728 previous instruction.
3729
3730 Of course one can say: "Hey! What about head of the sched group?"
3731 And I will answer: "Basic principles (one dep per insn) are always
3732 the same." */
3733 gcc_assert (!SCHED_GROUP_P (insn));
3734
3735 /* In the absence of interprocedural alias analysis, we must flush
3736 all pending reads and writes, and start new dependencies starting
3737 from here. But only flush writes for constant calls (which may
3738 be passed a pointer to something we haven't written yet). */
3739 flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
3740
3741 if (!deps->readonly)
3742 {
3743 /* Remember the last function call for limiting lifetimes. */
3744 free_INSN_LIST_list (&deps->last_function_call);
3745 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
326d0c19 3746
3747 if (call_may_noreturn_p (insn))
3748 {
3749 /* Remember the last function call that might not always return
3750 normally for limiting moves of trapping insns. */
3751 free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3752 deps->last_function_call_may_noreturn
3753 = alloc_INSN_LIST (insn, NULL_RTX);
3754 }
3755
e1ab7874 3756 /* Before reload, begin a post-call group, so as to keep the
3757 lifetimes of hard registers correct. */
3758 if (! reload_completed)
3759 deps->in_post_call_group_p = post_call;
3760 }
3761 }
3762
3763 if (sched_deps_info->use_cselib)
3764 cselib_process_insn (insn);
3765
e1ab7874 3766 if (sched_deps_info->finish_insn)
3767 sched_deps_info->finish_insn ();
3768
3769 /* Fixup the dependencies in the sched group. */
48e1416a 3770 if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
fdb1b2b1 3771 && chain_to_prev_insn_p (insn)
3772 && !sel_sched_p ())
3773 chain_to_prev_insn (insn);
e1ab7874 3774}
3775
3776/* Initialize DEPS for the new block beginning with HEAD. */
3777void
e55e25f6 3778deps_start_bb (struct deps_desc *deps, rtx_insn *head)
e1ab7874 3779{
3780 gcc_assert (!deps->readonly);
503f2817 3781
865b3698 3782 /* Before reload, if the previous block ended in a call, show that
3783 we are inside a post-call group, so as to keep the lifetimes of
3784 hard registers correct. */
6d7dc5b9 3785 if (! reload_completed && !LABEL_P (head))
865b3698 3786 {
e55e25f6 3787 rtx_insn *insn = prev_nonnote_nondebug_insn (head);
e1ab7874 3788
6d7dc5b9 3789 if (insn && CALL_P (insn))
865b3698 3790 deps->in_post_call_group_p = post_call_initial;
3791 }
e1ab7874 3792}
3793
3794/* Analyze every insn between HEAD and TAIL inclusive, creating backward
3795 dependencies for each insn. */
3796void
b24ef467 3797sched_analyze (struct deps_desc *deps, rtx_insn *head, rtx_insn *tail)
e1ab7874 3798{
b24ef467 3799 rtx_insn *insn;
e1ab7874 3800
3801 if (sched_deps_info->use_cselib)
35af0188 3802 cselib_init (CSELIB_RECORD_MEMORY);
e1ab7874 3803
3804 deps_start_bb (deps, head);
3805
6adce0fb 3806 for (insn = head;; insn = NEXT_INSN (insn))
3807 {
e1ab7874 3808
9997bd27 3809 if (INSN_P (insn))
6adce0fb 3810 {
93f6b030 3811 /* And initialize deps_lists. */
3812 sd_init_insn (insn);
55e7138c 3813 /* Clean up SCHED_GROUP_P which may be set by last
3814 scheduler pass. */
3815 if (SCHED_GROUP_P (insn))
3816 SCHED_GROUP_P (insn) = 0;
9997bd27 3817 }
3818
e1ab7874 3819 deps_analyze_insn (deps, insn);
37cde9fb 3820
6adce0fb 3821 if (insn == tail)
503f2817 3822 {
e1ab7874 3823 if (sched_deps_info->use_cselib)
503f2817 3824 cselib_finish ();
3825 return;
3826 }
6adce0fb 3827 }
04e579b6 3828 gcc_unreachable ();
6adce0fb 3829}
9b7c6f02 3830
93f6b030 3831/* Helper for sched_free_deps ().
3832 Delete INSN's (RESOLVED_P) backward dependencies. */
3833static void
71ce7f59 3834delete_dep_nodes_in_back_deps (rtx_insn *insn, bool resolved_p)
9b7c6f02 3835{
93f6b030 3836 sd_iterator_def sd_it;
3837 dep_t dep;
3838 sd_list_types_def types;
9b7c6f02 3839
93f6b030 3840 if (resolved_p)
3841 types = SD_LIST_RES_BACK;
3842 else
3843 types = SD_LIST_BACK;
3844
3845 for (sd_it = sd_iterator_start (insn, types);
3846 sd_iterator_cond (&sd_it, &dep);)
4d64d9a4 3847 {
93f6b030 3848 dep_link_t link = *sd_it.linkp;
3849 dep_node_t node = DEP_LINK_NODE (link);
3850 deps_list_t back_list;
3851 deps_list_t forw_list;
3852
3853 get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
3854 remove_from_deps_list (link, back_list);
3855 delete_dep_node (node);
4d64d9a4 3856 }
9b7c6f02 3857}
3858
93f6b030 3859/* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3860 deps_lists. */
6adce0fb 3861void
4cd001d5 3862sched_free_deps (rtx_insn *head, rtx_insn *tail, bool resolved_p)
6adce0fb 3863{
4cd001d5 3864 rtx_insn *insn;
3865 rtx_insn *next_tail = NEXT_INSN (tail);
6adce0fb 3866
2261c559 3867 /* We make two passes since some insns may be scheduled before their
3868 dependencies are resolved. */
6adce0fb 3869 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
93f6b030 3870 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3871 {
93f6b030 3872 /* Clear forward deps and leave the dep_nodes to the
3873 corresponding back_deps list. */
3874 if (resolved_p)
3875 clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3876 else
3877 clear_deps_list (INSN_FORW_DEPS (insn));
2261c559 3878 }
3879 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3880 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3881 {
3882 /* Clear resolved back deps together with its dep_nodes. */
3883 delete_dep_nodes_in_back_deps (insn, resolved_p);
6adce0fb 3884
93f6b030 3885 sd_finish_insn (insn);
3886 }
6adce0fb 3887}
3888\f
3889/* Initialize variables for region data dependence analysis.
48e1416a 3890 When LAZY_REG_LAST is true, do not allocate reg_last array
68e419a1 3891 of struct deps_desc immediately. */
6adce0fb 3892
3893void
68e419a1 3894init_deps (struct deps_desc *deps, bool lazy_reg_last)
6adce0fb 3895{
749c6f58 3896 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3897
3898 deps->max_reg = max_reg;
d9ab2038 3899 if (lazy_reg_last)
3900 deps->reg_last = NULL;
3901 else
3902 deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
749c6f58 3903 INIT_REG_SET (&deps->reg_last_in_use);
6adce0fb 3904
3905 deps->pending_read_insns = 0;
3906 deps->pending_read_mems = 0;
3907 deps->pending_write_insns = 0;
3908 deps->pending_write_mems = 0;
effd1640 3909 deps->pending_jump_insns = 0;
c5947ab7 3910 deps->pending_read_list_length = 0;
3911 deps->pending_write_list_length = 0;
85de291e 3912 deps->pending_flush_length = 0;
6adce0fb 3913 deps->last_pending_memory_flush = 0;
3914 deps->last_function_call = 0;
326d0c19 3915 deps->last_function_call_may_noreturn = 0;
5deaeb50 3916 deps->sched_before_next_call = 0;
0741df1e 3917 deps->sched_before_next_jump = 0;
865b3698 3918 deps->in_post_call_group_p = not_post_call;
9845d120 3919 deps->last_debug_insn = 0;
617529e8 3920 deps->last_args_size = 0;
e1ab7874 3921 deps->last_reg_pending_barrier = NOT_A_BARRIER;
3922 deps->readonly = 0;
6adce0fb 3923}
3924
48e1416a 3925/* Init only reg_last field of DEPS, which was not allocated before as
d9ab2038 3926 we inited DEPS lazily. */
3927void
68e419a1 3928init_deps_reg_last (struct deps_desc *deps)
d9ab2038 3929{
3930 gcc_assert (deps && deps->max_reg > 0);
3931 gcc_assert (deps->reg_last == NULL);
3932
3933 deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3934}
3935
3936
6adce0fb 3937/* Free insn lists found in DEPS. */
3938
3939void
68e419a1 3940free_deps (struct deps_desc *deps)
6adce0fb 3941{
4f917ffe 3942 unsigned i;
8c97cf13 3943 reg_set_iterator rsi;
6adce0fb 3944
d9ab2038 3945 /* We set max_reg to 0 when this context was already freed. */
3946 if (deps->max_reg == 0)
3947 {
3948 gcc_assert (deps->reg_last == NULL);
3949 return;
3950 }
3951 deps->max_reg = 0;
48e1416a 3952
5deaeb50 3953 free_INSN_LIST_list (&deps->pending_read_insns);
3954 free_EXPR_LIST_list (&deps->pending_read_mems);
3955 free_INSN_LIST_list (&deps->pending_write_insns);
3956 free_EXPR_LIST_list (&deps->pending_write_mems);
3957 free_INSN_LIST_list (&deps->last_pending_memory_flush);
3958
749c6f58 3959 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
de44dcb9 3960 times. For a testcase with 42000 regs and 8000 small basic blocks,
749c6f58 3961 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
8c97cf13 3962 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
6adce0fb 3963 {
749c6f58 3964 struct deps_reg *reg_last = &deps->reg_last[i];
b479b8be 3965 if (reg_last->uses)
3966 free_INSN_LIST_list (&reg_last->uses);
3967 if (reg_last->sets)
3968 free_INSN_LIST_list (&reg_last->sets);
a7dcf969 3969 if (reg_last->implicit_sets)
3970 free_INSN_LIST_list (&reg_last->implicit_sets);
effd1640 3971 if (reg_last->control_uses)
3972 free_INSN_LIST_list (&reg_last->control_uses);
b479b8be 3973 if (reg_last->clobbers)
3974 free_INSN_LIST_list (&reg_last->clobbers);
8c97cf13 3975 }
749c6f58 3976 CLEAR_REG_SET (&deps->reg_last_in_use);
3977
48e1416a 3978 /* As we initialize reg_last lazily, it is possible that we didn't allocate
d9ab2038 3979 it at all. */
dd045aee 3980 free (deps->reg_last);
e1ab7874 3981 deps->reg_last = NULL;
3982
3983 deps = NULL;
6adce0fb 3984}
3985
6aed13f1 3986/* Remove INSN from dependence contexts DEPS. */
e1ab7874 3987void
54267fdf 3988remove_from_deps (struct deps_desc *deps, rtx_insn *insn)
e1ab7874 3989{
3990 int removed;
3991 unsigned i;
3992 reg_set_iterator rsi;
48e1416a 3993
e1ab7874 3994 removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
3995 &deps->pending_read_mems);
c971b445 3996 if (!DEBUG_INSN_P (insn))
3997 deps->pending_read_list_length -= removed;
e1ab7874 3998 removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
3999 &deps->pending_write_mems);
4000 deps->pending_write_list_length -= removed;
effd1640 4001
4002 removed = remove_from_dependence_list (insn, &deps->pending_jump_insns);
4003 deps->pending_flush_length -= removed;
e1ab7874 4004 removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
4005 deps->pending_flush_length -= removed;
6adce0fb 4006
e1ab7874 4007 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
4008 {
4009 struct deps_reg *reg_last = &deps->reg_last[i];
4010 if (reg_last->uses)
4011 remove_from_dependence_list (insn, &reg_last->uses);
4012 if (reg_last->sets)
4013 remove_from_dependence_list (insn, &reg_last->sets);
a7dcf969 4014 if (reg_last->implicit_sets)
4015 remove_from_dependence_list (insn, &reg_last->implicit_sets);
e1ab7874 4016 if (reg_last->clobbers)
4017 remove_from_dependence_list (insn, &reg_last->clobbers);
a7dcf969 4018 if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
4019 && !reg_last->clobbers)
e1ab7874 4020 CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i);
4021 }
4022
4023 if (CALL_P (insn))
326d0c19 4024 {
4025 remove_from_dependence_list (insn, &deps->last_function_call);
48e1416a 4026 remove_from_dependence_list (insn,
326d0c19 4027 &deps->last_function_call_may_noreturn);
4028 }
e1ab7874 4029 remove_from_dependence_list (insn, &deps->sched_before_next_call);
4030}
4031
4032/* Init deps data vector. */
4033static void
4034init_deps_data_vector (void)
4035{
f1f41a6c 4036 int reserve = (sched_max_luid + 1 - h_d_i_d.length ());
4037 if (reserve > 0 && ! h_d_i_d.space (reserve))
4038 h_d_i_d.safe_grow_cleared (3 * sched_max_luid / 2);
e1ab7874 4039}
4040
4041/* If it is profitable to use them, initialize or extend (depending on
4042 GLOBAL_P) dependency data. */
6adce0fb 4043void
e1ab7874 4044sched_deps_init (bool global_p)
6adce0fb 4045{
93f6b030 4046 /* Average number of insns in the basic block.
4047 '+ 1' is used to make it nonzero. */
a28770e1 4048 int insns_in_block = sched_max_luid / n_basic_blocks_for_fn (cfun) + 1;
93f6b030 4049
e1ab7874 4050 init_deps_data_vector ();
48e1416a 4051
4052 /* We use another caching mechanism for selective scheduling, so
e1ab7874 4053 we don't use this one. */
4054 if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
4055 {
4056 /* ?!? We could save some memory by computing a per-region luid mapping
4057 which could reduce both the number of vectors in the cache and the
4058 size of each vector. Instead we just avoid the cache entirely unless
4059 the average number of instructions in a basic block is very high. See
4060 the comment before the declaration of true_dependency_cache for
4061 what we consider "very high". */
6a1cdb4d 4062 cache_size = 0;
e1ab7874 4063 extend_dependency_caches (sched_max_luid, true);
6a1cdb4d 4064 }
9997bd27 4065
48e1416a 4066 if (global_p)
e1ab7874 4067 {
3da86c6b 4068 dl_pool = new pool_allocator<_deps_list> ("deps_list",
e1ab7874 4069 /* Allocate lists for one block at a time. */
4070 insns_in_block);
3da86c6b 4071 dn_pool = new pool_allocator<_dep_node> ("dep_node",
e1ab7874 4072 /* Allocate nodes for one block at a time.
4073 We assume that average insn has
4074 5 producers. */
4075 5 * insns_in_block);
4076 }
6a1cdb4d 4077}
4d64d9a4 4078
e1ab7874 4079
6a1cdb4d 4080/* Create or extend (depending on CREATE_P) dependency caches to
4081 size N. */
4082void
4083extend_dependency_caches (int n, bool create_p)
4084{
4085 if (create_p || true_dependency_cache)
4086 {
4087 int i, luid = cache_size + n;
4088
4089 true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
4090 luid);
4091 output_dependency_cache = XRESIZEVEC (bitmap_head,
4092 output_dependency_cache, luid);
4093 anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
4094 luid);
effd1640 4095 control_dependency_cache = XRESIZEVEC (bitmap_head, control_dependency_cache,
4096 luid);
93f6b030 4097
4d64d9a4 4098 if (current_sched_info->flags & DO_SPECULATION)
4099 spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
4100 luid);
4101
6a1cdb4d 4102 for (i = cache_size; i < luid; i++)
d4f68afb 4103 {
4104 bitmap_initialize (&true_dependency_cache[i], 0);
d4f68afb 4105 bitmap_initialize (&output_dependency_cache[i], 0);
4d64d9a4 4106 bitmap_initialize (&anti_dependency_cache[i], 0);
effd1640 4107 bitmap_initialize (&control_dependency_cache[i], 0);
93f6b030 4108
4d64d9a4 4109 if (current_sched_info->flags & DO_SPECULATION)
4110 bitmap_initialize (&spec_dependency_cache[i], 0);
d4f68afb 4111 }
4112 cache_size = luid;
6adce0fb 4113 }
4114}
4115
e1ab7874 4116/* Finalize dependency information for the whole function. */
6adce0fb 4117void
e1ab7874 4118sched_deps_finish (void)
6adce0fb 4119{
93f6b030 4120 gcc_assert (deps_pools_are_empty_p ());
3da86c6b 4121 dn_pool->release_if_empty ();
4122 dn_pool = NULL;
4123 dl_pool->release_if_empty ();
4124 dl_pool = NULL;
9997bd27 4125
f1f41a6c 4126 h_d_i_d.release ();
e1ab7874 4127 cache_size = 0;
48e1416a 4128
6adce0fb 4129 if (true_dependency_cache)
4130 {
d4f68afb 4131 int i;
4132
4133 for (i = 0; i < cache_size; i++)
4134 {
4135 bitmap_clear (&true_dependency_cache[i]);
d4f68afb 4136 bitmap_clear (&output_dependency_cache[i]);
4d64d9a4 4137 bitmap_clear (&anti_dependency_cache[i]);
effd1640 4138 bitmap_clear (&control_dependency_cache[i]);
93f6b030 4139
e1ab7874 4140 if (sched_deps_info->generate_spec_deps)
4d64d9a4 4141 bitmap_clear (&spec_dependency_cache[i]);
d4f68afb 4142 }
4143 free (true_dependency_cache);
6adce0fb 4144 true_dependency_cache = NULL;
d4f68afb 4145 free (output_dependency_cache);
6adce0fb 4146 output_dependency_cache = NULL;
4d64d9a4 4147 free (anti_dependency_cache);
4148 anti_dependency_cache = NULL;
effd1640 4149 free (control_dependency_cache);
4150 control_dependency_cache = NULL;
93f6b030 4151
e1ab7874 4152 if (sched_deps_info->generate_spec_deps)
4d64d9a4 4153 {
4154 free (spec_dependency_cache);
4155 spec_dependency_cache = NULL;
4156 }
e1ab7874 4157
6adce0fb 4158 }
4159}
4160
4161/* Initialize some global variables needed by the dependency analysis
4162 code. */
4163
4164void
60b8c5b3 4165init_deps_global (void)
6adce0fb 4166{
a7dcf969 4167 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
4168 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
ae85a37a 4169 reg_pending_sets = ALLOC_REG_SET (&reg_obstack);
4170 reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack);
4171 reg_pending_uses = ALLOC_REG_SET (&reg_obstack);
effd1640 4172 reg_pending_control_uses = ALLOC_REG_SET (&reg_obstack);
c14a239f 4173 reg_pending_barrier = NOT_A_BARRIER;
e1ab7874 4174
4175 if (!sel_sched_p () || sched_emulate_haifa_p)
4176 {
4177 sched_deps_info->start_insn = haifa_start_insn;
4178 sched_deps_info->finish_insn = haifa_finish_insn;
4179
4180 sched_deps_info->note_reg_set = haifa_note_reg_set;
4181 sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
4182 sched_deps_info->note_reg_use = haifa_note_reg_use;
4183
4184 sched_deps_info->note_mem_dep = haifa_note_mem_dep;
4185 sched_deps_info->note_dep = haifa_note_dep;
4186 }
6adce0fb 4187}
4188
4189/* Free everything used by the dependency analysis code. */
4190
4191void
60b8c5b3 4192finish_deps_global (void)
6adce0fb 4193{
4194 FREE_REG_SET (reg_pending_sets);
4195 FREE_REG_SET (reg_pending_clobbers);
5deaeb50 4196 FREE_REG_SET (reg_pending_uses);
effd1640 4197 FREE_REG_SET (reg_pending_control_uses);
6adce0fb 4198}
4d64d9a4 4199
4d64d9a4 4200/* Estimate the weakness of dependence between MEM1 and MEM2. */
e1ab7874 4201dw_t
4d64d9a4 4202estimate_dep_weak (rtx mem1, rtx mem2)
4203{
4204 rtx r1, r2;
4205
4206 if (mem1 == mem2)
4207 /* MEMs are the same - don't speculate. */
4208 return MIN_DEP_WEAK;
4209
4210 r1 = XEXP (mem1, 0);
4211 r2 = XEXP (mem2, 0);
4212
4213 if (r1 == r2
4214 || (REG_P (r1) && REG_P (r2)
4215 && REGNO (r1) == REGNO (r2)))
4216 /* Again, MEMs are the same. */
4217 return MIN_DEP_WEAK;
4218 else if ((REG_P (r1) && !REG_P (r2))
4219 || (!REG_P (r1) && REG_P (r2)))
4220 /* Different addressing modes - reason to be more speculative,
4221 than usual. */
4222 return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
4223 else
4224 /* We can't say anything about the dependence. */
4225 return UNCERTAIN_DEP_WEAK;
4226}
4227
4228/* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
4229 This function can handle same INSN and ELEM (INSN == ELEM).
4230 It is a convenience wrapper. */
effd1640 4231static void
b24ef467 4232add_dependence_1 (rtx_insn *insn, rtx_insn *elem, enum reg_note dep_type)
4d64d9a4 4233{
e1ab7874 4234 ds_t ds;
4235 bool internal;
4d64d9a4 4236
e1ab7874 4237 if (dep_type == REG_DEP_TRUE)
4238 ds = DEP_TRUE;
4239 else if (dep_type == REG_DEP_OUTPUT)
4240 ds = DEP_OUTPUT;
effd1640 4241 else if (dep_type == REG_DEP_CONTROL)
4242 ds = DEP_CONTROL;
e1ab7874 4243 else
4244 {
4245 gcc_assert (dep_type == REG_DEP_ANTI);
4246 ds = DEP_ANTI;
4247 }
4248
4249 /* When add_dependence is called from inside sched-deps.c, we expect
4250 cur_insn to be non-null. */
4251 internal = cur_insn != NULL;
4252 if (internal)
4253 gcc_assert (insn == cur_insn);
4254 else
4255 cur_insn = insn;
48e1416a 4256
e1ab7874 4257 note_dep (elem, ds);
4258 if (!internal)
4259 cur_insn = NULL;
4d64d9a4 4260}
4261
242d1ee6 4262/* Return weakness of speculative type TYPE in the dep_status DS,
4263 without checking to prevent ICEs on malformed input. */
4264static dw_t
93f6b030 4265get_dep_weak_1 (ds_t ds, ds_t type)
4d64d9a4 4266{
4267 ds = ds & type;
e1ab7874 4268
4d64d9a4 4269 switch (type)
4270 {
4271 case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
4272 case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
4273 case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
4274 case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
4275 default: gcc_unreachable ();
4276 }
4277
4d64d9a4 4278 return (dw_t) ds;
4279}
4280
242d1ee6 4281/* Return weakness of speculative type TYPE in the dep_status DS. */
93f6b030 4282dw_t
4283get_dep_weak (ds_t ds, ds_t type)
4284{
4285 dw_t dw = get_dep_weak_1 (ds, type);
4286
4287 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
93f6b030 4288 return dw;
4289}
4290
4d64d9a4 4291/* Return the dep_status, which has the same parameters as DS, except for
4292 speculative type TYPE, that will have weakness DW. */
4293ds_t
4294set_dep_weak (ds_t ds, ds_t type, dw_t dw)
4295{
4296 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4297
4298 ds &= ~type;
4299 switch (type)
4300 {
4301 case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
4302 case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
4303 case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
4304 case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
4305 default: gcc_unreachable ();
4306 }
4307 return ds;
4308}
4309
e1ab7874 4310/* Return the join of two dep_statuses DS1 and DS2.
4311 If MAX_P is true then choose the greater probability,
4312 otherwise multiply probabilities.
4313 This function assumes that both DS1 and DS2 contain speculative bits. */
4314static ds_t
4315ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
4d64d9a4 4316{
4317 ds_t ds, t;
4318
4319 gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
4320
4321 ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
4322
4323 t = FIRST_SPEC_TYPE;
4324 do
4325 {
4326 if ((ds1 & t) && !(ds2 & t))
4327 ds |= ds1 & t;
4328 else if (!(ds1 & t) && (ds2 & t))
4329 ds |= ds2 & t;
4330 else if ((ds1 & t) && (ds2 & t))
4331 {
e1ab7874 4332 dw_t dw1 = get_dep_weak (ds1, t);
4333 dw_t dw2 = get_dep_weak (ds2, t);
4d64d9a4 4334 ds_t dw;
4335
e1ab7874 4336 if (!max_p)
4337 {
4338 dw = ((ds_t) dw1) * ((ds_t) dw2);
4339 dw /= MAX_DEP_WEAK;
4340 if (dw < MIN_DEP_WEAK)
4341 dw = MIN_DEP_WEAK;
4342 }
4343 else
4344 {
4345 if (dw1 >= dw2)
4346 dw = dw1;
4347 else
4348 dw = dw2;
4349 }
4d64d9a4 4350
4351 ds = set_dep_weak (ds, t, (dw_t) dw);
4352 }
4353
4354 if (t == LAST_SPEC_TYPE)
4355 break;
4356 t <<= SPEC_TYPE_SHIFT;
4357 }
4358 while (1);
4359
4360 return ds;
4361}
4362
e1ab7874 4363/* Return the join of two dep_statuses DS1 and DS2.
4364 This function assumes that both DS1 and DS2 contain speculative bits. */
4365ds_t
4366ds_merge (ds_t ds1, ds_t ds2)
4367{
4368 return ds_merge_1 (ds1, ds2, false);
4369}
4370
4371/* Return the join of two dep_statuses DS1 and DS2. */
4372ds_t
4373ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
4374{
4375 ds_t new_status = ds | ds2;
4376
4377 if (new_status & SPECULATIVE)
4378 {
4379 if ((ds && !(ds & SPECULATIVE))
4380 || (ds2 && !(ds2 & SPECULATIVE)))
4381 /* Then this dep can't be speculative. */
4382 new_status &= ~SPECULATIVE;
4383 else
4384 {
4385 /* Both are speculative. Merging probabilities. */
4386 if (mem1)
4387 {
4388 dw_t dw;
4389
4390 dw = estimate_dep_weak (mem1, mem2);
4391 ds = set_dep_weak (ds, BEGIN_DATA, dw);
4392 }
4393
4394 if (!ds)
4395 new_status = ds2;
4396 else if (!ds2)
4397 new_status = ds;
4398 else
4399 new_status = ds_merge (ds2, ds);
4400 }
4401 }
4402
4403 return new_status;
4404}
4405
4406/* Return the join of DS1 and DS2. Use maximum instead of multiplying
4407 probabilities. */
4408ds_t
4409ds_max_merge (ds_t ds1, ds_t ds2)
4410{
4411 if (ds1 == 0 && ds2 == 0)
4412 return 0;
4413
4414 if (ds1 == 0 && ds2 != 0)
4415 return ds2;
4416
4417 if (ds1 != 0 && ds2 == 0)
4418 return ds1;
4419
4420 return ds_merge_1 (ds1, ds2, true);
4421}
4422
4423/* Return the probability of speculation success for the speculation
4424 status DS. */
4425dw_t
4426ds_weak (ds_t ds)
4427{
4428 ds_t res = 1, dt;
4429 int n = 0;
4430
4431 dt = FIRST_SPEC_TYPE;
4432 do
4433 {
4434 if (ds & dt)
4435 {
4436 res *= (ds_t) get_dep_weak (ds, dt);
4437 n++;
4438 }
4439
4440 if (dt == LAST_SPEC_TYPE)
4441 break;
4442 dt <<= SPEC_TYPE_SHIFT;
4443 }
4444 while (1);
4445
4446 gcc_assert (n);
4447 while (--n)
4448 res /= MAX_DEP_WEAK;
4449
4450 if (res < MIN_DEP_WEAK)
4451 res = MIN_DEP_WEAK;
4452
4453 gcc_assert (res <= MAX_DEP_WEAK);
4454
4455 return (dw_t) res;
4456}
4457
4458/* Return a dep status that contains all speculation types of DS. */
4459ds_t
4460ds_get_speculation_types (ds_t ds)
4461{
4462 if (ds & BEGIN_DATA)
4463 ds |= BEGIN_DATA;
4464 if (ds & BE_IN_DATA)
4465 ds |= BE_IN_DATA;
4466 if (ds & BEGIN_CONTROL)
4467 ds |= BEGIN_CONTROL;
4468 if (ds & BE_IN_CONTROL)
4469 ds |= BE_IN_CONTROL;
4470
4471 return ds & SPECULATIVE;
4472}
4473
4474/* Return a dep status that contains maximal weakness for each speculation
4475 type present in DS. */
4476ds_t
4477ds_get_max_dep_weak (ds_t ds)
4478{
4479 if (ds & BEGIN_DATA)
4480 ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
4481 if (ds & BE_IN_DATA)
4482 ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
4483 if (ds & BEGIN_CONTROL)
4484 ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
4485 if (ds & BE_IN_CONTROL)
4486 ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
4487
4488 return ds;
4489}
4490
93f6b030 4491/* Dump information about the dependence status S. */
4492static void
4493dump_ds (FILE *f, ds_t s)
4494{
4495 fprintf (f, "{");
4496
4497 if (s & BEGIN_DATA)
4498 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
4499 if (s & BE_IN_DATA)
4500 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
4501 if (s & BEGIN_CONTROL)
4502 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
4503 if (s & BE_IN_CONTROL)
4504 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
4505
4506 if (s & HARD_DEP)
4507 fprintf (f, "HARD_DEP; ");
4508
4509 if (s & DEP_TRUE)
4510 fprintf (f, "DEP_TRUE; ");
93f6b030 4511 if (s & DEP_OUTPUT)
4512 fprintf (f, "DEP_OUTPUT; ");
effd1640 4513 if (s & DEP_ANTI)
4514 fprintf (f, "DEP_ANTI; ");
4515 if (s & DEP_CONTROL)
4516 fprintf (f, "DEP_CONTROL; ");
93f6b030 4517
4518 fprintf (f, "}");
4519}
4520
4b987fac 4521DEBUG_FUNCTION void
93f6b030 4522debug_ds (ds_t s)
4523{
4524 dump_ds (stderr, s);
4525 fprintf (stderr, "\n");
4526}
4527
4d64d9a4 4528#ifdef ENABLE_CHECKING
4529/* Verify that dependence type and status are consistent.
4530 If RELAXED_P is true, then skip dep_weakness checks. */
4531static void
93f6b030 4532check_dep (dep_t dep, bool relaxed_p)
4d64d9a4 4533{
93f6b030 4534 enum reg_note dt = DEP_TYPE (dep);
4535 ds_t ds = DEP_STATUS (dep);
4536
4537 gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
4538
4539 if (!(current_sched_info->flags & USE_DEPS_LIST))
4540 {
2261c559 4541 gcc_assert (ds == 0);
93f6b030 4542 return;
4543 }
4544
4d64d9a4 4545 /* Check that dependence type contains the same bits as the status. */
4546 if (dt == REG_DEP_TRUE)
4547 gcc_assert (ds & DEP_TRUE);
4548 else if (dt == REG_DEP_OUTPUT)
4549 gcc_assert ((ds & DEP_OUTPUT)
48e1416a 4550 && !(ds & DEP_TRUE));
effd1640 4551 else if (dt == REG_DEP_ANTI)
4552 gcc_assert ((ds & DEP_ANTI)
4d64d9a4 4553 && !(ds & (DEP_OUTPUT | DEP_TRUE)));
effd1640 4554 else
4555 gcc_assert (dt == REG_DEP_CONTROL
4556 && (ds & DEP_CONTROL)
4557 && !(ds & (DEP_OUTPUT | DEP_ANTI | DEP_TRUE)));
4d64d9a4 4558
4559 /* HARD_DEP can not appear in dep_status of a link. */
48e1416a 4560 gcc_assert (!(ds & HARD_DEP));
4d64d9a4 4561
4562 /* Check that dependence status is set correctly when speculation is not
4563 supported. */
e1ab7874 4564 if (!sched_deps_info->generate_spec_deps)
4d64d9a4 4565 gcc_assert (!(ds & SPECULATIVE));
4566 else if (ds & SPECULATIVE)
4567 {
4568 if (!relaxed_p)
4569 {
4570 ds_t type = FIRST_SPEC_TYPE;
4571
4572 /* Check that dependence weakness is in proper range. */
4573 do
4574 {
4575 if (ds & type)
4576 get_dep_weak (ds, type);
4577
4578 if (type == LAST_SPEC_TYPE)
4579 break;
4580 type <<= SPEC_TYPE_SHIFT;
4581 }
4582 while (1);
4583 }
4584
4585 if (ds & BEGIN_SPEC)
4586 {
4587 /* Only true dependence can be data speculative. */
4588 if (ds & BEGIN_DATA)
4589 gcc_assert (ds & DEP_TRUE);
4590
4591 /* Control dependencies in the insn scheduler are represented by
4592 anti-dependencies, therefore only anti dependence can be
4593 control speculative. */
4594 if (ds & BEGIN_CONTROL)
4595 gcc_assert (ds & DEP_ANTI);
4596 }
4597 else
4598 {
4599 /* Subsequent speculations should resolve true dependencies. */
4600 gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4601 }
48e1416a 4602
4603 /* Check that true and anti dependencies can't have other speculative
4d64d9a4 4604 statuses. */
4605 if (ds & DEP_TRUE)
4606 gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4607 /* An output dependence can't be speculative at all. */
4608 gcc_assert (!(ds & DEP_OUTPUT));
4609 if (ds & DEP_ANTI)
4610 gcc_assert (ds & BEGIN_CONTROL);
4611 }
4612}
db982eeb 4613#endif /* ENABLE_CHECKING */
4614
d452a169 4615/* The following code discovers opportunities to switch a memory reference
4616 and an increment by modifying the address. We ensure that this is done
4617 only for dependencies that are only used to show a single register
4618 dependence (using DEP_NONREG and DEP_MULTIPLE), and so that every memory
4619 instruction involved is subject to only one dep that can cause a pattern
4620 change.
4621
4622 When we discover a suitable dependency, we fill in the dep_replacement
4623 structure to show how to modify the memory reference. */
4624
4625/* Holds information about a pair of memory reference and register increment
4626 insns which depend on each other, but could possibly be interchanged. */
4627struct mem_inc_info
4628{
b24ef467 4629 rtx_insn *inc_insn;
4630 rtx_insn *mem_insn;
d452a169 4631
4632 rtx *mem_loc;
4633 /* A register occurring in the memory address for which we wish to break
4634 the dependence. This must be identical to the destination register of
4635 the increment. */
4636 rtx mem_reg0;
4637 /* Any kind of index that is added to that register. */
4638 rtx mem_index;
4639 /* The constant offset used in the memory address. */
4640 HOST_WIDE_INT mem_constant;
4641 /* The constant added in the increment insn. Negated if the increment is
4642 after the memory address. */
4643 HOST_WIDE_INT inc_constant;
4644 /* The source register used in the increment. May be different from mem_reg0
4645 if the increment occurs before the memory address. */
4646 rtx inc_input;
4647};
4648
4649/* Verify that the memory location described in MII can be replaced with
4650 one using NEW_ADDR. Return the new memory reference or NULL_RTX. The
4651 insn remains unchanged by this function. */
4652
4653static rtx
4654attempt_change (struct mem_inc_info *mii, rtx new_addr)
4655{
4656 rtx mem = *mii->mem_loc;
4657 rtx new_mem;
4658
75de4aa2 4659 /* Jump through a lot of hoops to keep the attributes up to date. We
d452a169 4660 do not want to call one of the change address variants that take
4661 an offset even though we know the offset in many cases. These
4662 assume you are changing where the address is pointing by the
4663 offset. */
4664 new_mem = replace_equiv_address_nv (mem, new_addr);
4665 if (! validate_change (mii->mem_insn, mii->mem_loc, new_mem, 0))
4666 {
4667 if (sched_verbose >= 5)
4668 fprintf (sched_dump, "validation failure\n");
4669 return NULL_RTX;
4670 }
4671
4672 /* Put back the old one. */
4673 validate_change (mii->mem_insn, mii->mem_loc, mem, 0);
4674
4675 return new_mem;
4676}
4677
4678/* Return true if INSN is of a form "a = b op c" where a and b are
4679 regs. op is + if c is a reg and +|- if c is a const. Fill in
4680 informantion in MII about what is found.
4681 BEFORE_MEM indicates whether the increment is found before or after
4682 a corresponding memory reference. */
4683
4684static bool
b24ef467 4685parse_add_or_inc (struct mem_inc_info *mii, rtx_insn *insn, bool before_mem)
d452a169 4686{
4687 rtx pat = single_set (insn);
4688 rtx src, cst;
4689 bool regs_equal;
4690
4691 if (RTX_FRAME_RELATED_P (insn) || !pat)
4692 return false;
4693
4694 /* Result must be single reg. */
4695 if (!REG_P (SET_DEST (pat)))
4696 return false;
4697
36faf01b 4698 if (GET_CODE (SET_SRC (pat)) != PLUS)
d452a169 4699 return false;
4700
4701 mii->inc_insn = insn;
4702 src = SET_SRC (pat);
4703 mii->inc_input = XEXP (src, 0);
4704
4705 if (!REG_P (XEXP (src, 0)))
4706 return false;
4707
4708 if (!rtx_equal_p (SET_DEST (pat), mii->mem_reg0))
4709 return false;
4710
4711 cst = XEXP (src, 1);
4712 if (!CONST_INT_P (cst))
4713 return false;
4714 mii->inc_constant = INTVAL (cst);
4715
4716 regs_equal = rtx_equal_p (mii->inc_input, mii->mem_reg0);
4717
4718 if (!before_mem)
4719 {
4720 mii->inc_constant = -mii->inc_constant;
4721 if (!regs_equal)
4722 return false;
4723 }
4724
4725 if (regs_equal && REGNO (SET_DEST (pat)) == STACK_POINTER_REGNUM)
36faf01b 4726 {
4727 /* Note that the sign has already been reversed for !before_mem. */
3764c94e 4728 if (STACK_GROWS_DOWNWARD)
4729 return mii->inc_constant > 0;
4730 else
4731 return mii->inc_constant < 0;
36faf01b 4732 }
d452a169 4733 return true;
4734}
4735
4736/* Once a suitable mem reference has been found and the corresponding data
4737 in MII has been filled in, this function is called to find a suitable
4738 add or inc insn involving the register we found in the memory
4739 reference. */
4740
4741static bool
4742find_inc (struct mem_inc_info *mii, bool backwards)
4743{
4744 sd_iterator_def sd_it;
4745 dep_t dep;
4746
4747 sd_it = sd_iterator_start (mii->mem_insn,
4748 backwards ? SD_LIST_HARD_BACK : SD_LIST_FORW);
4749 while (sd_iterator_cond (&sd_it, &dep))
4750 {
4751 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
9d8bf733 4752 rtx_insn *pro = DEP_PRO (dep);
4753 rtx_insn *con = DEP_CON (dep);
b24ef467 4754 rtx_insn *inc_cand = backwards ? pro : con;
d452a169 4755 if (DEP_NONREG (dep) || DEP_MULTIPLE (dep))
4756 goto next;
4757 if (parse_add_or_inc (mii, inc_cand, backwards))
4758 {
4759 struct dep_replacement *desc;
be10bb5a 4760 df_ref def;
d452a169 4761 rtx newaddr, newmem;
4762
4763 if (sched_verbose >= 5)
4764 fprintf (sched_dump, "candidate mem/inc pair: %d %d\n",
4765 INSN_UID (mii->mem_insn), INSN_UID (inc_cand));
4766
4767 /* Need to assure that none of the operands of the inc
4768 instruction are assigned to by the mem insn. */
be10bb5a 4769 FOR_EACH_INSN_DEF (def, mii->mem_insn)
4770 if (reg_overlap_mentioned_p (DF_REF_REG (def), mii->inc_input)
4771 || reg_overlap_mentioned_p (DF_REF_REG (def), mii->mem_reg0))
4772 {
4773 if (sched_verbose >= 5)
4774 fprintf (sched_dump,
4775 "inc conflicts with store failure.\n");
4776 goto next;
4777 }
490aa3a5 4778
d452a169 4779 newaddr = mii->inc_input;
4780 if (mii->mem_index != NULL_RTX)
4781 newaddr = gen_rtx_PLUS (GET_MODE (newaddr), newaddr,
4782 mii->mem_index);
4783 newaddr = plus_constant (GET_MODE (newaddr), newaddr,
4784 mii->mem_constant + mii->inc_constant);
4785 newmem = attempt_change (mii, newaddr);
4786 if (newmem == NULL_RTX)
4787 goto next;
4788 if (sched_verbose >= 5)
4789 fprintf (sched_dump, "successful address replacement\n");
4790 desc = XCNEW (struct dep_replacement);
4791 DEP_REPLACE (dep) = desc;
4792 desc->loc = mii->mem_loc;
4793 desc->newval = newmem;
4794 desc->orig = *desc->loc;
4795 desc->insn = mii->mem_insn;
4796 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
4797 INSN_SPEC_BACK_DEPS (con));
4798 if (backwards)
4799 {
4800 FOR_EACH_DEP (mii->inc_insn, SD_LIST_BACK, sd_it, dep)
f00069ed 4801 add_dependence_1 (mii->mem_insn, DEP_PRO (dep),
4802 REG_DEP_TRUE);
d452a169 4803 }
4804 else
4805 {
4806 FOR_EACH_DEP (mii->inc_insn, SD_LIST_FORW, sd_it, dep)
f00069ed 4807 add_dependence_1 (DEP_CON (dep), mii->mem_insn,
4808 REG_DEP_ANTI);
d452a169 4809 }
4810 return true;
4811 }
4812 next:
4813 sd_iterator_next (&sd_it);
4814 }
4815 return false;
4816}
4817
4818/* A recursive function that walks ADDRESS_OF_X to find memory references
4819 which could be modified during scheduling. We call find_inc for each
4820 one we find that has a recognizable form. MII holds information about
4821 the pair of memory/increment instructions.
4822 We ensure that every instruction with a memory reference (which will be
4823 the location of the replacement) is assigned at most one breakable
4824 dependency. */
4825
4826static bool
4827find_mem (struct mem_inc_info *mii, rtx *address_of_x)
4828{
4829 rtx x = *address_of_x;
4830 enum rtx_code code = GET_CODE (x);
4831 const char *const fmt = GET_RTX_FORMAT (code);
4832 int i;
4833
4834 if (code == MEM)
4835 {
4836 rtx reg0 = XEXP (x, 0);
4837
4838 mii->mem_loc = address_of_x;
4839 mii->mem_index = NULL_RTX;
4840 mii->mem_constant = 0;
4841 if (GET_CODE (reg0) == PLUS && CONST_INT_P (XEXP (reg0, 1)))
4842 {
4843 mii->mem_constant = INTVAL (XEXP (reg0, 1));
4844 reg0 = XEXP (reg0, 0);
4845 }
4846 if (GET_CODE (reg0) == PLUS)
4847 {
4848 mii->mem_index = XEXP (reg0, 1);
4849 reg0 = XEXP (reg0, 0);
4850 }
4851 if (REG_P (reg0))
4852 {
be10bb5a 4853 df_ref use;
d452a169 4854 int occurrences = 0;
4855
4856 /* Make sure this reg appears only once in this insn. Can't use
4857 count_occurrences since that only works for pseudos. */
be10bb5a 4858 FOR_EACH_INSN_USE (use, mii->mem_insn)
4859 if (reg_overlap_mentioned_p (reg0, DF_REF_REG (use)))
4860 if (++occurrences > 1)
4861 {
4862 if (sched_verbose >= 5)
4863 fprintf (sched_dump, "mem count failure\n");
4864 return false;
4865 }
d452a169 4866
4867 mii->mem_reg0 = reg0;
4868 return find_inc (mii, true) || find_inc (mii, false);
4869 }
4870 return false;
4871 }
4872
4873 if (code == SIGN_EXTRACT || code == ZERO_EXTRACT)
4874 {
4875 /* If REG occurs inside a MEM used in a bit-field reference,
4876 that is unacceptable. */
4877 return false;
4878 }
4879
4880 /* Time for some deep diving. */
4881 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4882 {
4883 if (fmt[i] == 'e')
4884 {
4885 if (find_mem (mii, &XEXP (x, i)))
4886 return true;
4887 }
4888 else if (fmt[i] == 'E')
4889 {
4890 int j;
4891 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4892 if (find_mem (mii, &XVECEXP (x, i, j)))
4893 return true;
4894 }
4895 }
4896 return false;
4897}
4898
4899
4900/* Examine the instructions between HEAD and TAIL and try to find
4901 dependencies that can be broken by modifying one of the patterns. */
4902
4903void
b24ef467 4904find_modifiable_mems (rtx_insn *head, rtx_insn *tail)
d452a169 4905{
b24ef467 4906 rtx_insn *insn, *next_tail = NEXT_INSN (tail);
d452a169 4907 int success_in_block = 0;
4908
1c5ae16e 4909 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
d452a169 4910 {
4911 struct mem_inc_info mii;
4912
4913 if (!NONDEBUG_INSN_P (insn) || RTX_FRAME_RELATED_P (insn))
4914 continue;
4915
4916 mii.mem_insn = insn;
4917 if (find_mem (&mii, &PATTERN (insn)))
4918 success_in_block++;
4919 }
4920 if (success_in_block && sched_verbose >= 5)
4921 fprintf (sched_dump, "%d candidates for address modification found.\n",
4922 success_in_block);
4923}
4924
db982eeb 4925#endif /* INSN_SCHEDULING */