]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/sched-deps.c
fix PR68343: disable fuse-*.c tests for isl 0.14 or earlier
[thirdparty/gcc.git] / gcc / sched-deps.c
CommitLineData
16f6ece6
BS
1/* Instruction scheduling pass. This file computes dependencies between
2 instructions.
818ab71a 3 Copyright (C) 1992-2016 Free Software Foundation, Inc.
16f6ece6
BS
4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
6
1322177d 7This file is part of GCC.
16f6ece6 8
1322177d
LB
9GCC is free software; you can redistribute it and/or modify it under
10the terms of the GNU General Public License as published by the Free
9dcd6f09 11Software Foundation; either version 3, or (at your option) any later
1322177d 12version.
16f6ece6 13
1322177d
LB
14GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15WARRANTY; without even the implied warranty of MERCHANTABILITY or
16f6ece6
BS
16FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17for more details.
18
19You should have received a copy of the GNU General Public License
9dcd6f09
NC
20along with GCC; see the file COPYING3. If not see
21<http://www.gnu.org/licenses/>. */
16f6ece6
BS
22\f
23#include "config.h"
24#include "system.h"
4977bab6 25#include "coretypes.h"
c7131fb2 26#include "backend.h"
957060b5 27#include "target.h"
16f6ece6 28#include "rtl.h"
957060b5 29#include "tree.h"
c7131fb2 30#include "df.h"
957060b5 31#include "insn-config.h"
16f6ece6 32#include "regs.h"
957060b5
AM
33#include "ira.h"
34#include "ira-int.h"
16f6ece6 35#include "insn-attr.h"
60393bbc 36#include "cfgbuild.h"
16f6ece6 37#include "sched-int.h"
4a121cc3 38#include "params.h"
4b6c5340 39#include "cselib.h"
16f6ece6 40
a750daa2
MK
41#ifdef INSN_SCHEDULING
42
e855c69d
AB
43/* Holds current parameters for the dependency analyzer. */
44struct sched_deps_info_def *sched_deps_info;
45
46/* The data is specific to the Haifa scheduler. */
9771b263 47vec<haifa_deps_insn_data_def>
6e1aa848 48 h_d_i_d = vNULL;
e855c69d 49
b198261f
MK
50/* Return the major type present in the DS. */
51enum reg_note
52ds_to_dk (ds_t ds)
53{
54 if (ds & DEP_TRUE)
55 return REG_DEP_TRUE;
56
57 if (ds & DEP_OUTPUT)
58 return REG_DEP_OUTPUT;
59
e2724e63
BS
60 if (ds & DEP_CONTROL)
61 return REG_DEP_CONTROL;
62
b198261f
MK
63 gcc_assert (ds & DEP_ANTI);
64
65 return REG_DEP_ANTI;
66}
67
68/* Return equivalent dep_status. */
69ds_t
70dk_to_ds (enum reg_note dk)
71{
72 switch (dk)
73 {
74 case REG_DEP_TRUE:
75 return DEP_TRUE;
76
77 case REG_DEP_OUTPUT:
78 return DEP_OUTPUT;
79
e2724e63
BS
80 case REG_DEP_CONTROL:
81 return DEP_CONTROL;
82
b198261f
MK
83 default:
84 gcc_assert (dk == REG_DEP_ANTI);
85 return DEP_ANTI;
86 }
87}
88
89/* Functions to operate with dependence information container - dep_t. */
90
91/* Init DEP with the arguments. */
e2f6ff94 92void
0e0ce50d 93init_dep_1 (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note type, ds_t ds)
b198261f 94{
0e0ce50d
DM
95 DEP_PRO (dep) = pro;
96 DEP_CON (dep) = con;
e2f6ff94 97 DEP_TYPE (dep) = type;
b198261f 98 DEP_STATUS (dep) = ds;
a19af9c4 99 DEP_COST (dep) = UNKNOWN_DEP_COST;
1a83e602
BS
100 DEP_NONREG (dep) = 0;
101 DEP_MULTIPLE (dep) = 0;
102 DEP_REPLACE (dep) = NULL;
b198261f
MK
103}
104
105/* Init DEP with the arguments.
106 While most of the scheduler (including targets) only need the major type
9f5ed61a 107 of the dependency, it is convenient to hide full dep_status from them. */
b198261f 108void
0e0ce50d 109init_dep (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note kind)
b198261f
MK
110{
111 ds_t ds;
112
e2f6ff94 113 if ((current_sched_info->flags & USE_DEPS_LIST))
b198261f
MK
114 ds = dk_to_ds (kind);
115 else
b953c2b8 116 ds = 0;
b198261f
MK
117
118 init_dep_1 (dep, pro, con, kind, ds);
119}
120
121/* Make a copy of FROM in TO. */
122static void
123copy_dep (dep_t to, dep_t from)
124{
125 memcpy (to, from, sizeof (*to));
126}
127
e2f6ff94 128static void dump_ds (FILE *, ds_t);
b198261f 129
e2f6ff94
MK
130/* Define flags for dump_dep (). */
131
132/* Dump producer of the dependence. */
133#define DUMP_DEP_PRO (2)
134
135/* Dump consumer of the dependence. */
136#define DUMP_DEP_CON (4)
137
138/* Dump type of the dependence. */
139#define DUMP_DEP_TYPE (8)
140
141/* Dump status of the dependence. */
142#define DUMP_DEP_STATUS (16)
143
144/* Dump all information about the dependence. */
145#define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE \
146 |DUMP_DEP_STATUS)
147
148/* Dump DEP to DUMP.
149 FLAGS is a bit mask specifying what information about DEP needs
150 to be printed.
151 If FLAGS has the very first bit set, then dump all information about DEP
152 and propagate this bit into the callee dump functions. */
153static void
154dump_dep (FILE *dump, dep_t dep, int flags)
b198261f 155{
e2f6ff94
MK
156 if (flags & 1)
157 flags |= DUMP_DEP_ALL;
158
159 fprintf (dump, "<");
160
161 if (flags & DUMP_DEP_PRO)
162 fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
163
164 if (flags & DUMP_DEP_CON)
165 fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
166
167 if (flags & DUMP_DEP_TYPE)
168 {
169 char t;
170 enum reg_note type = DEP_TYPE (dep);
171
172 switch (type)
173 {
174 case REG_DEP_TRUE:
175 t = 't';
176 break;
177
178 case REG_DEP_OUTPUT:
179 t = 'o';
180 break;
b198261f 181
e2724e63
BS
182 case REG_DEP_CONTROL:
183 t = 'c';
184 break;
185
e2f6ff94
MK
186 case REG_DEP_ANTI:
187 t = 'a';
188 break;
189
190 default:
191 gcc_unreachable ();
192 break;
193 }
194
195 fprintf (dump, "%c; ", t);
196 }
197
198 if (flags & DUMP_DEP_STATUS)
199 {
200 if (current_sched_info->flags & USE_DEPS_LIST)
201 dump_ds (dump, DEP_STATUS (dep));
202 }
203
204 fprintf (dump, ">");
205}
206
207/* Default flags for dump_dep (). */
208static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
209
210/* Dump all fields of DEP to STDERR. */
211void
212sd_debug_dep (dep_t dep)
213{
214 dump_dep (stderr, dep, 1);
215 fprintf (stderr, "\n");
b198261f
MK
216}
217
f49b295a
AO
218/* Determine whether DEP is a dependency link of a non-debug insn on a
219 debug insn. */
220
221static inline bool
222depl_on_debug_p (dep_link_t dep)
223{
224 return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
225 && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
226}
227
e2f6ff94
MK
228/* Functions to operate with a single link from the dependencies lists -
229 dep_link_t. */
230
b198261f
MK
231/* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
232 PREV_NEXT_P. */
233static void
234attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
235{
236 dep_link_t next = *prev_nextp;
237
238 gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
239 && DEP_LINK_NEXT (l) == NULL);
240
241 /* Init node being inserted. */
242 DEP_LINK_PREV_NEXTP (l) = prev_nextp;
243 DEP_LINK_NEXT (l) = next;
244
245 /* Fix next node. */
246 if (next != NULL)
247 {
248 gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
249
250 DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
251 }
252
253 /* Fix prev node. */
254 *prev_nextp = l;
255}
256
257/* Add dep_link LINK to deps_list L. */
258static void
259add_to_deps_list (dep_link_t link, deps_list_t l)
260{
261 attach_dep_link (link, &DEPS_LIST_FIRST (l));
e2f6ff94 262
f49b295a
AO
263 /* Don't count debug deps. */
264 if (!depl_on_debug_p (link))
265 ++DEPS_LIST_N_LINKS (l);
b198261f
MK
266}
267
268/* Detach dep_link L from the list. */
269static void
270detach_dep_link (dep_link_t l)
271{
272 dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
273 dep_link_t next = DEP_LINK_NEXT (l);
274
275 *prev_nextp = next;
276
277 if (next != NULL)
278 DEP_LINK_PREV_NEXTP (next) = prev_nextp;
279
b198261f
MK
280 DEP_LINK_PREV_NEXTP (l) = NULL;
281 DEP_LINK_NEXT (l) = NULL;
282}
283
e2f6ff94
MK
284/* Remove link LINK from list LIST. */
285static void
286remove_from_deps_list (dep_link_t link, deps_list_t list)
b198261f
MK
287{
288 detach_dep_link (link);
b198261f 289
f49b295a
AO
290 /* Don't count debug deps. */
291 if (!depl_on_debug_p (link))
292 --DEPS_LIST_N_LINKS (list);
b198261f
MK
293}
294
e2f6ff94 295/* Move link LINK from list FROM to list TO. */
b198261f 296static void
e2f6ff94 297move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
b198261f 298{
e2f6ff94
MK
299 remove_from_deps_list (link, from);
300 add_to_deps_list (link, to);
b198261f
MK
301}
302
e2f6ff94
MK
303/* Return true of LINK is not attached to any list. */
304static bool
305dep_link_is_detached_p (dep_link_t link)
b198261f 306{
e2f6ff94 307 return DEP_LINK_PREV_NEXTP (link) == NULL;
b198261f
MK
308}
309
e2f6ff94 310/* Pool to hold all dependency nodes (dep_node_t). */
fb0b2914 311static object_allocator<_dep_node> *dn_pool;
b198261f 312
e2f6ff94
MK
313/* Number of dep_nodes out there. */
314static int dn_pool_diff = 0;
b198261f 315
e2f6ff94
MK
316/* Create a dep_node. */
317static dep_node_t
318create_dep_node (void)
319{
973004cb 320 dep_node_t n = dn_pool->allocate ();
e2f6ff94
MK
321 dep_link_t back = DEP_NODE_BACK (n);
322 dep_link_t forw = DEP_NODE_FORW (n);
b198261f 323
e2f6ff94
MK
324 DEP_LINK_NODE (back) = n;
325 DEP_LINK_NEXT (back) = NULL;
326 DEP_LINK_PREV_NEXTP (back) = NULL;
b198261f 327
e2f6ff94
MK
328 DEP_LINK_NODE (forw) = n;
329 DEP_LINK_NEXT (forw) = NULL;
330 DEP_LINK_PREV_NEXTP (forw) = NULL;
b198261f 331
e2f6ff94 332 ++dn_pool_diff;
b198261f 333
e2f6ff94 334 return n;
b198261f
MK
335}
336
e2f6ff94 337/* Delete dep_node N. N must not be connected to any deps_list. */
b198261f 338static void
e2f6ff94 339delete_dep_node (dep_node_t n)
b198261f 340{
e2f6ff94
MK
341 gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
342 && dep_link_is_detached_p (DEP_NODE_FORW (n)));
b198261f 343
334e71e8
JJ
344 XDELETE (DEP_REPLACE (DEP_NODE_DEP (n)));
345
e2f6ff94 346 --dn_pool_diff;
b198261f 347
973004cb 348 dn_pool->remove (n);
b198261f
MK
349}
350
e2f6ff94 351/* Pool to hold dependencies lists (deps_list_t). */
fb0b2914 352static object_allocator<_deps_list> *dl_pool;
b198261f 353
e2f6ff94
MK
354/* Number of deps_lists out there. */
355static int dl_pool_diff = 0;
b198261f 356
e2f6ff94 357/* Functions to operate with dependences lists - deps_list_t. */
b198261f 358
e2f6ff94 359/* Return true if list L is empty. */
b198261f 360static bool
e2f6ff94 361deps_list_empty_p (deps_list_t l)
b198261f 362{
e2f6ff94 363 return DEPS_LIST_N_LINKS (l) == 0;
b198261f
MK
364}
365
e2f6ff94
MK
366/* Create a new deps_list. */
367static deps_list_t
368create_deps_list (void)
b198261f 369{
973004cb 370 deps_list_t l = dl_pool->allocate ();
b198261f 371
e2f6ff94
MK
372 DEPS_LIST_FIRST (l) = NULL;
373 DEPS_LIST_N_LINKS (l) = 0;
b198261f 374
e2f6ff94
MK
375 ++dl_pool_diff;
376 return l;
b198261f
MK
377}
378
e2f6ff94
MK
379/* Free deps_list L. */
380static void
381free_deps_list (deps_list_t l)
b198261f 382{
e2f6ff94 383 gcc_assert (deps_list_empty_p (l));
b198261f 384
e2f6ff94 385 --dl_pool_diff;
b198261f 386
973004cb 387 dl_pool->remove (l);
b198261f
MK
388}
389
e2f6ff94 390/* Return true if there is no dep_nodes and deps_lists out there.
15dc95cb 391 After the region is scheduled all the dependency nodes and lists
e2f6ff94
MK
392 should [generally] be returned to pool. */
393bool
394deps_pools_are_empty_p (void)
b198261f 395{
e2f6ff94 396 return dn_pool_diff == 0 && dl_pool_diff == 0;
b198261f
MK
397}
398
e2f6ff94
MK
399/* Remove all elements from L. */
400static void
401clear_deps_list (deps_list_t l)
b198261f 402{
e2f6ff94
MK
403 do
404 {
405 dep_link_t link = DEPS_LIST_FIRST (l);
b198261f 406
e2f6ff94
MK
407 if (link == NULL)
408 break;
b198261f 409
e2f6ff94 410 remove_from_deps_list (link, l);
b198261f 411 }
e2f6ff94 412 while (1);
b198261f 413}
16f6ece6 414
b953c2b8
BS
415/* Decide whether a dependency should be treated as a hard or a speculative
416 dependency. */
417static bool
418dep_spec_p (dep_t dep)
419{
420 if (current_sched_info->flags & DO_SPECULATION)
e2724e63
BS
421 {
422 if (DEP_STATUS (dep) & SPECULATIVE)
423 return true;
424 }
425 if (current_sched_info->flags & DO_PREDICATION)
426 {
427 if (DEP_TYPE (dep) == REG_DEP_CONTROL)
428 return true;
429 }
1a83e602
BS
430 if (DEP_REPLACE (dep) != NULL)
431 return true;
b953c2b8
BS
432 return false;
433}
434
16f6ece6
BS
435static regset reg_pending_sets;
436static regset reg_pending_clobbers;
37a0f8a5 437static regset reg_pending_uses;
e2724e63 438static regset reg_pending_control_uses;
d1c9693e 439static enum reg_pending_barrier_mode reg_pending_barrier;
16f6ece6 440
ce18efcb
VM
441/* Hard registers implicitly clobbered or used (or may be implicitly
442 clobbered or used) by the currently analyzed insn. For example,
443 insn in its constraint has one register class. Even if there is
444 currently no hard register in the insn, the particular hard
445 register will be in the insn after reload pass because the
446 constraint requires it. */
447static HARD_REG_SET implicit_reg_pending_clobbers;
448static HARD_REG_SET implicit_reg_pending_uses;
449
16f6ece6
BS
450/* To speed up the test for duplicate dependency links we keep a
451 record of dependencies created by add_dependence when the average
452 number of instructions in a basic block is very large.
453
454 Studies have shown that there is typically around 5 instructions between
455 branches for typical C code. So we can make a guess that the average
456 basic block is approximately 5 instructions long; we will choose 100X
457 the average size as a very large basic block.
458
459 Each insn has associated bitmaps for its dependencies. Each bitmap
460 has enough entries to represent a dependency on any other insn in
461 the insn chain. All bitmap for true dependencies cache is
2d76cb1a 462 allocated then the rest two ones are also allocated. */
e855c69d
AB
463static bitmap_head *true_dependency_cache = NULL;
464static bitmap_head *output_dependency_cache = NULL;
465static bitmap_head *anti_dependency_cache = NULL;
e2724e63 466static bitmap_head *control_dependency_cache = NULL;
e855c69d 467static bitmap_head *spec_dependency_cache = NULL;
392c64a0 468static int cache_size;
16f6ece6 469
1a83e602
BS
470/* True if we should mark added dependencies as a non-register deps. */
471static bool mark_as_hard;
472
9678086d 473static int deps_may_trap_p (const_rtx);
ce1ce33a 474static void add_dependence_1 (rtx_insn *, rtx_insn *, enum reg_note);
3dc99c19
DM
475static void add_dependence_list (rtx_insn *, rtx_insn_list *, int,
476 enum reg_note, bool);
ce1ce33a 477static void add_dependence_list_and_free (struct deps_desc *, rtx_insn *,
3dc99c19
DM
478 rtx_insn_list **, int, enum reg_note,
479 bool);
90831096 480static void delete_all_dependences (rtx_insn *);
ce1ce33a 481static void chain_to_prev_insn (rtx_insn *);
16f6ece6 482
ce1ce33a
DM
483static void flush_pending_lists (struct deps_desc *, rtx_insn *, int, int);
484static void sched_analyze_1 (struct deps_desc *, rtx, rtx_insn *);
485static void sched_analyze_2 (struct deps_desc *, rtx, rtx_insn *);
486static void sched_analyze_insn (struct deps_desc *, rtx, rtx_insn *);
0a1bb917 487
c838c402 488static bool sched_has_condition_p (const rtx_insn *);
e855c69d 489static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
ddbd5439 490
e2f6ff94
MK
491static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
492 rtx, rtx);
493static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
ddbd5439 494
e2f6ff94 495static void check_dep (dep_t, bool);
b2b29377 496
16f6ece6 497\f
713f41f9
BS
498/* Return nonzero if a load of the memory reference MEM can cause a trap. */
499
500static int
9678086d 501deps_may_trap_p (const_rtx mem)
713f41f9 502{
9678086d 503 const_rtx addr = XEXP (mem, 0);
713f41f9 504
bb1acb3e
RH
505 if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
506 {
9678086d 507 const_rtx t = get_reg_known_value (REGNO (addr));
bb1acb3e
RH
508 if (t)
509 addr = t;
510 }
713f41f9
BS
511 return rtx_addr_can_trap_p (addr);
512}
513\f
0a1bb917 514
e855c69d
AB
515/* Find the condition under which INSN is executed. If REV is not NULL,
516 it is set to TRUE when the returned comparison should be reversed
f0898307 517 to get the actual condition. */
0a1bb917 518static rtx
c838c402 519sched_get_condition_with_rev_uncached (const rtx_insn *insn, bool *rev)
0a1bb917
BS
520{
521 rtx pat = PATTERN (insn);
adf968c7 522 rtx src;
0a1bb917 523
e855c69d
AB
524 if (rev)
525 *rev = false;
526
0a1bb917 527 if (GET_CODE (pat) == COND_EXEC)
f0898307 528 return COND_EXEC_TEST (pat);
6903ecd4
RE
529
530 if (!any_condjump_p (insn) || !onlyjump_p (insn))
0a1bb917 531 return 0;
6903ecd4 532
adf968c7 533 src = SET_SRC (pc_set (insn));
2bd1e239 534
adf968c7 535 if (XEXP (src, 2) == pc_rtx)
f0898307 536 return XEXP (src, 0);
adf968c7 537 else if (XEXP (src, 1) == pc_rtx)
6903ecd4 538 {
adf968c7 539 rtx cond = XEXP (src, 0);
6903ecd4
RE
540 enum rtx_code revcode = reversed_comparison_code (cond, insn);
541
542 if (revcode == UNKNOWN)
543 return 0;
e855c69d
AB
544
545 if (rev)
546 *rev = true;
547 return cond;
6903ecd4 548 }
a8b3643c
RE
549
550 return 0;
0a1bb917
BS
551}
552
e2724e63
BS
553/* Return the condition under which INSN does not execute (i.e. the
554 not-taken condition for a conditional branch), or NULL if we cannot
555 find such a condition. The caller should make a copy of the condition
556 before using it. */
557rtx
c838c402 558sched_get_reverse_condition_uncached (const rtx_insn *insn)
e2724e63
BS
559{
560 bool rev;
561 rtx cond = sched_get_condition_with_rev_uncached (insn, &rev);
562 if (cond == NULL_RTX)
563 return cond;
564 if (!rev)
565 {
566 enum rtx_code revcode = reversed_comparison_code (cond, insn);
567 cond = gen_rtx_fmt_ee (revcode, GET_MODE (cond),
568 XEXP (cond, 0),
569 XEXP (cond, 1));
570 }
571 return cond;
572}
573
f0898307
SG
574/* Caching variant of sched_get_condition_with_rev_uncached.
575 We only do actual work the first time we come here for an insn; the
576 results are cached in INSN_CACHED_COND and INSN_REVERSE_COND. */
577static rtx
c838c402 578sched_get_condition_with_rev (const rtx_insn *insn, bool *rev)
f0898307
SG
579{
580 bool tmp;
581
582 if (INSN_LUID (insn) == 0)
583 return sched_get_condition_with_rev_uncached (insn, rev);
584
585 if (INSN_CACHED_COND (insn) == const_true_rtx)
586 return NULL_RTX;
587
588 if (INSN_CACHED_COND (insn) != NULL_RTX)
589 {
590 if (rev)
591 *rev = INSN_REVERSE_COND (insn);
592 return INSN_CACHED_COND (insn);
593 }
594
595 INSN_CACHED_COND (insn) = sched_get_condition_with_rev_uncached (insn, &tmp);
596 INSN_REVERSE_COND (insn) = tmp;
597
598 if (INSN_CACHED_COND (insn) == NULL_RTX)
599 {
600 INSN_CACHED_COND (insn) = const_true_rtx;
601 return NULL_RTX;
602 }
603
604 if (rev)
605 *rev = INSN_REVERSE_COND (insn);
606 return INSN_CACHED_COND (insn);
607}
608
e855c69d
AB
609/* True when we can find a condition under which INSN is executed. */
610static bool
c838c402 611sched_has_condition_p (const rtx_insn *insn)
e855c69d
AB
612{
613 return !! sched_get_condition_with_rev (insn, NULL);
614}
615
2bd1e239 616\f
0a1bb917 617
e855c69d 618/* Return nonzero if conditions COND1 and COND2 can never be both true. */
0a1bb917 619static int
e855c69d 620conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
0a1bb917 621{
ec8e098d
PB
622 if (COMPARISON_P (cond1)
623 && COMPARISON_P (cond2)
e855c69d
AB
624 && GET_CODE (cond1) ==
625 (rev1==rev2
626 ? reversed_comparison_code (cond2, NULL)
627 : GET_CODE (cond2))
aef0e7a8 628 && rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
0a1bb917
BS
629 && XEXP (cond1, 1) == XEXP (cond2, 1))
630 return 1;
631 return 0;
632}
2bd1e239
SB
633
634/* Return true if insn1 and insn2 can never depend on one another because
635 the conditions under which they are executed are mutually exclusive. */
636bool
c838c402 637sched_insns_conditions_mutex_p (const rtx_insn *insn1, const rtx_insn *insn2)
2bd1e239
SB
638{
639 rtx cond1, cond2;
99e395c7 640 bool rev1 = false, rev2 = false;
2bd1e239 641
6fb5fa3c 642 /* df doesn't handle conditional lifetimes entirely correctly;
2bd1e239
SB
643 calls mess up the conditional lifetimes. */
644 if (!CALL_P (insn1) && !CALL_P (insn2))
645 {
e855c69d
AB
646 cond1 = sched_get_condition_with_rev (insn1, &rev1);
647 cond2 = sched_get_condition_with_rev (insn2, &rev2);
2bd1e239 648 if (cond1 && cond2
e855c69d 649 && conditions_mutex_p (cond1, cond2, rev1, rev2)
2bd1e239
SB
650 /* Make sure first instruction doesn't affect condition of second
651 instruction if switched. */
652 && !modified_in_p (cond1, insn2)
653 /* Make sure second instruction doesn't affect condition of first
654 instruction if switched. */
655 && !modified_in_p (cond2, insn1))
656 return true;
657 }
658 return false;
659}
0a1bb917 660\f
ddbd5439 661
c275297b
MK
662/* Return true if INSN can potentially be speculated with type DS. */
663bool
c838c402 664sched_insn_is_legitimate_for_speculation_p (const rtx_insn *insn, ds_t ds)
c275297b
MK
665{
666 if (HAS_INTERNAL_DEP (insn))
667 return false;
668
669 if (!NONJUMP_INSN_P (insn))
670 return false;
671
672 if (SCHED_GROUP_P (insn))
673 return false;
674
c838c402 675 if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX_INSN (insn)))
c275297b
MK
676 return false;
677
678 if (side_effects_p (PATTERN (insn)))
679 return false;
680
681 if (ds & BE_IN_SPEC)
682 /* The following instructions, which depend on a speculatively scheduled
683 instruction, cannot be speculatively scheduled along. */
684 {
a61c3633
EB
685 if (may_trap_or_fault_p (PATTERN (insn)))
686 /* If instruction might fault, it cannot be speculatively scheduled.
c275297b
MK
687 For control speculation it's obvious why and for data speculation
688 it's because the insn might get wrong input if speculation
689 wasn't successful. */
690 return false;
691
692 if ((ds & BE_IN_DATA)
e855c69d 693 && sched_has_condition_p (insn))
c275297b
MK
694 /* If this is a predicated instruction, then it cannot be
695 speculatively scheduled. See PR35659. */
696 return false;
697 }
698
699 return true;
700}
701
e2f6ff94
MK
702/* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
703 initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
704 and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
705 This function is used to switch sd_iterator to the next list.
706 !!! For internal use only. Might consider moving it to sched-int.h. */
707void
9678086d 708sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
e2f6ff94
MK
709 deps_list_t *list_ptr, bool *resolved_p_ptr)
710{
711 sd_list_types_def types = *types_ptr;
712
713 if (types & SD_LIST_HARD_BACK)
714 {
715 *list_ptr = INSN_HARD_BACK_DEPS (insn);
716 *resolved_p_ptr = false;
717 *types_ptr = types & ~SD_LIST_HARD_BACK;
718 }
719 else if (types & SD_LIST_SPEC_BACK)
720 {
721 *list_ptr = INSN_SPEC_BACK_DEPS (insn);
722 *resolved_p_ptr = false;
723 *types_ptr = types & ~SD_LIST_SPEC_BACK;
724 }
725 else if (types & SD_LIST_FORW)
726 {
727 *list_ptr = INSN_FORW_DEPS (insn);
728 *resolved_p_ptr = false;
729 *types_ptr = types & ~SD_LIST_FORW;
730 }
731 else if (types & SD_LIST_RES_BACK)
732 {
733 *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
734 *resolved_p_ptr = true;
735 *types_ptr = types & ~SD_LIST_RES_BACK;
736 }
737 else if (types & SD_LIST_RES_FORW)
738 {
739 *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
740 *resolved_p_ptr = true;
741 *types_ptr = types & ~SD_LIST_RES_FORW;
742 }
743 else
744 {
745 *list_ptr = NULL;
746 *resolved_p_ptr = false;
747 *types_ptr = SD_LIST_NONE;
748 }
749}
750
751/* Return the summary size of INSN's lists defined by LIST_TYPES. */
752int
9678086d 753sd_lists_size (const_rtx insn, sd_list_types_def list_types)
e2f6ff94
MK
754{
755 int size = 0;
756
757 while (list_types != SD_LIST_NONE)
758 {
759 deps_list_t list;
760 bool resolved_p;
761
762 sd_next_list (insn, &list_types, &list, &resolved_p);
b5b8b0ac
AO
763 if (list)
764 size += DEPS_LIST_N_LINKS (list);
e2f6ff94
MK
765 }
766
767 return size;
768}
769
770/* Return true if INSN's lists defined by LIST_TYPES are all empty. */
f49b295a 771
e2f6ff94 772bool
9678086d 773sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
e2f6ff94 774{
f49b295a
AO
775 while (list_types != SD_LIST_NONE)
776 {
777 deps_list_t list;
778 bool resolved_p;
779
780 sd_next_list (insn, &list_types, &list, &resolved_p);
781 if (!deps_list_empty_p (list))
782 return false;
783 }
784
785 return true;
e2f6ff94
MK
786}
787
788/* Initialize data for INSN. */
789void
df8379e7 790sd_init_insn (rtx_insn *insn)
e2f6ff94
MK
791{
792 INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
793 INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
794 INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
795 INSN_FORW_DEPS (insn) = create_deps_list ();
796 INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
797
798 /* ??? It would be nice to allocate dependency caches here. */
799}
800
801/* Free data for INSN. */
802void
df8379e7 803sd_finish_insn (rtx_insn *insn)
e2f6ff94
MK
804{
805 /* ??? It would be nice to deallocate dependency caches here. */
806
807 free_deps_list (INSN_HARD_BACK_DEPS (insn));
808 INSN_HARD_BACK_DEPS (insn) = NULL;
809
810 free_deps_list (INSN_SPEC_BACK_DEPS (insn));
811 INSN_SPEC_BACK_DEPS (insn) = NULL;
812
813 free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
814 INSN_RESOLVED_BACK_DEPS (insn) = NULL;
815
816 free_deps_list (INSN_FORW_DEPS (insn));
817 INSN_FORW_DEPS (insn) = NULL;
818
819 free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
820 INSN_RESOLVED_FORW_DEPS (insn) = NULL;
821}
822
823/* Find a dependency between producer PRO and consumer CON.
824 Search through resolved dependency lists if RESOLVED_P is true.
825 If no such dependency is found return NULL,
15dc95cb 826 otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
e2f6ff94
MK
827 with an iterator pointing to it. */
828static dep_t
829sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
830 sd_iterator_def *sd_it_ptr)
831{
832 sd_list_types_def pro_list_type;
833 sd_list_types_def con_list_type;
834 sd_iterator_def sd_it;
835 dep_t dep;
836 bool found_p = false;
837
838 if (resolved_p)
839 {
840 pro_list_type = SD_LIST_RES_FORW;
841 con_list_type = SD_LIST_RES_BACK;
842 }
843 else
844 {
845 pro_list_type = SD_LIST_FORW;
846 con_list_type = SD_LIST_BACK;
847 }
848
849 /* Walk through either back list of INSN or forw list of ELEM
850 depending on which one is shorter. */
851 if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
852 {
853 /* Find the dep_link with producer PRO in consumer's back_deps. */
854 FOR_EACH_DEP (con, con_list_type, sd_it, dep)
855 if (DEP_PRO (dep) == pro)
856 {
857 found_p = true;
858 break;
859 }
860 }
861 else
862 {
863 /* Find the dep_link with consumer CON in producer's forw_deps. */
864 FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
865 if (DEP_CON (dep) == con)
866 {
867 found_p = true;
868 break;
869 }
870 }
871
872 if (found_p)
873 {
874 if (sd_it_ptr != NULL)
875 *sd_it_ptr = sd_it;
876
877 return dep;
878 }
879
880 return NULL;
881}
882
883/* Find a dependency between producer PRO and consumer CON.
884 Use dependency [if available] to check if dependency is present at all.
885 Search through resolved dependency lists if RESOLVED_P is true.
886 If the dependency or NULL if none found. */
887dep_t
888sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
889{
890 if (true_dependency_cache != NULL)
891 /* Avoiding the list walk below can cut compile times dramatically
892 for some code. */
893 {
894 int elem_luid = INSN_LUID (pro);
895 int insn_luid = INSN_LUID (con);
896
e2f6ff94
MK
897 if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
898 && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
e2724e63
BS
899 && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)
900 && !bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
e2f6ff94
MK
901 return NULL;
902 }
903
904 return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
905}
906
907/* Add or update a dependence described by DEP.
908 MEM1 and MEM2, if non-null, correspond to memory locations in case of
909 data speculation.
910
911 The function returns a value indicating if an old entry has been changed
912 or a new entry has been added to insn's backward deps.
913
914 This function merely checks if producer and consumer is the same insn
915 and doesn't create a dep in this case. Actual manipulation of
916 dependence data structures is performed in add_or_update_dep_1. */
ddbd5439 917static enum DEPS_ADJUST_RESULT
e2f6ff94 918maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
16f6ece6 919{
974c43e9
DM
920 rtx_insn *elem = DEP_PRO (dep);
921 rtx_insn *insn = DEP_CON (dep);
e2f6ff94 922
ddbd5439 923 gcc_assert (INSN_P (insn) && INSN_P (elem));
16f6ece6
BS
924
925 /* Don't depend an insn on itself. */
926 if (insn == elem)
ddbd5439 927 {
e855c69d 928 if (sched_deps_info->generate_spec_deps)
ddbd5439
MK
929 /* INSN has an internal dependence, which we can't overcome. */
930 HAS_INTERNAL_DEP (insn) = 1;
e2f6ff94
MK
931
932 return DEP_NODEP;
ddbd5439 933 }
16f6ece6 934
e2f6ff94 935 return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
ddbd5439 936}
16f6ece6 937
e2f6ff94
MK
938/* Ask dependency caches what needs to be done for dependence DEP.
939 Return DEP_CREATED if new dependence should be created and there is no
940 need to try to find one searching the dependencies lists.
941 Return DEP_PRESENT if there already is a dependence described by DEP and
942 hence nothing is to be done.
943 Return DEP_CHANGED if there already is a dependence, but it should be
944 updated to incorporate additional information from DEP. */
ddbd5439 945static enum DEPS_ADJUST_RESULT
e2f6ff94 946ask_dependency_caches (dep_t dep)
ddbd5439 947{
e2f6ff94
MK
948 int elem_luid = INSN_LUID (DEP_PRO (dep));
949 int insn_luid = INSN_LUID (DEP_CON (dep));
ddbd5439 950
e2f6ff94
MK
951 gcc_assert (true_dependency_cache != NULL
952 && output_dependency_cache != NULL
e2724e63
BS
953 && anti_dependency_cache != NULL
954 && control_dependency_cache != NULL);
ddbd5439 955
e2f6ff94 956 if (!(current_sched_info->flags & USE_DEPS_LIST))
b8698a0f 957 {
e2f6ff94
MK
958 enum reg_note present_dep_type;
959
960 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
961 present_dep_type = REG_DEP_TRUE;
962 else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
963 present_dep_type = REG_DEP_OUTPUT;
964 else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
965 present_dep_type = REG_DEP_ANTI;
e2724e63
BS
966 else if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
967 present_dep_type = REG_DEP_CONTROL;
e2f6ff94
MK
968 else
969 /* There is no existing dep so it should be created. */
970 return DEP_CREATED;
971
972 if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
973 /* DEP does not add anything to the existing dependence. */
974 return DEP_PRESENT;
975 }
976 else
b8698a0f 977 {
e2f6ff94 978 ds_t present_dep_types = 0;
b8698a0f 979
e2f6ff94
MK
980 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
981 present_dep_types |= DEP_TRUE;
982 if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
983 present_dep_types |= DEP_OUTPUT;
984 if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
985 present_dep_types |= DEP_ANTI;
e2724e63
BS
986 if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
987 present_dep_types |= DEP_CONTROL;
e2f6ff94
MK
988
989 if (present_dep_types == 0)
990 /* There is no existing dep so it should be created. */
991 return DEP_CREATED;
992
993 if (!(current_sched_info->flags & DO_SPECULATION)
994 || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
995 {
996 if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
997 == present_dep_types)
998 /* DEP does not add anything to the existing dependence. */
999 return DEP_PRESENT;
1000 }
1001 else
1002 {
1003 /* Only true dependencies can be data speculative and
1004 only anti dependencies can be control speculative. */
1005 gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
1006 == present_dep_types);
1007
1008 /* if (DEP is SPECULATIVE) then
1009 ..we should update DEP_STATUS
1010 else
1011 ..we should reset existing dep to non-speculative. */
1012 }
1013 }
1014
1015 return DEP_CHANGED;
1016}
1017
1018/* Set dependency caches according to DEP. */
1019static void
1020set_dependency_caches (dep_t dep)
1021{
1022 int elem_luid = INSN_LUID (DEP_PRO (dep));
1023 int insn_luid = INSN_LUID (DEP_CON (dep));
1024
1025 if (!(current_sched_info->flags & USE_DEPS_LIST))
1026 {
1027 switch (DEP_TYPE (dep))
1028 {
1029 case REG_DEP_TRUE:
1030 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1031 break;
1032
1033 case REG_DEP_OUTPUT:
1034 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1035 break;
1036
1037 case REG_DEP_ANTI:
1038 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1039 break;
1040
e2724e63
BS
1041 case REG_DEP_CONTROL:
1042 bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1043 break;
1044
e2f6ff94
MK
1045 default:
1046 gcc_unreachable ();
1047 }
1048 }
1049 else
1050 {
1051 ds_t ds = DEP_STATUS (dep);
1052
1053 if (ds & DEP_TRUE)
1054 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1055 if (ds & DEP_OUTPUT)
1056 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1057 if (ds & DEP_ANTI)
1058 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
e2724e63
BS
1059 if (ds & DEP_CONTROL)
1060 bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
e2f6ff94
MK
1061
1062 if (ds & SPECULATIVE)
1063 {
1064 gcc_assert (current_sched_info->flags & DO_SPECULATION);
1065 bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
1066 }
1067 }
1068}
1069
1070/* Type of dependence DEP have changed from OLD_TYPE. Update dependency
1071 caches accordingly. */
1072static void
1073update_dependency_caches (dep_t dep, enum reg_note old_type)
1074{
1075 int elem_luid = INSN_LUID (DEP_PRO (dep));
1076 int insn_luid = INSN_LUID (DEP_CON (dep));
1077
1078 /* Clear corresponding cache entry because type of the link
1079 may have changed. Keep them if we use_deps_list. */
1080 if (!(current_sched_info->flags & USE_DEPS_LIST))
1081 {
1082 switch (old_type)
1083 {
1084 case REG_DEP_OUTPUT:
1085 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1086 break;
1087
1088 case REG_DEP_ANTI:
1089 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1090 break;
1091
e2724e63
BS
1092 case REG_DEP_CONTROL:
1093 bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1094 break;
1095
e2f6ff94 1096 default:
b8698a0f 1097 gcc_unreachable ();
e2f6ff94
MK
1098 }
1099 }
1100
1101 set_dependency_caches (dep);
1102}
1103
1104/* Convert a dependence pointed to by SD_IT to be non-speculative. */
1105static void
1106change_spec_dep_to_hard (sd_iterator_def sd_it)
1107{
1108 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1109 dep_link_t link = DEP_NODE_BACK (node);
1110 dep_t dep = DEP_NODE_DEP (node);
974c43e9
DM
1111 rtx_insn *elem = DEP_PRO (dep);
1112 rtx_insn *insn = DEP_CON (dep);
e2f6ff94
MK
1113
1114 move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1115
1116 DEP_STATUS (dep) &= ~SPECULATIVE;
16f6ece6 1117
16f6ece6 1118 if (true_dependency_cache != NULL)
e2f6ff94
MK
1119 /* Clear the cache entry. */
1120 bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1121 INSN_LUID (elem));
1122}
e2f6ff94
MK
1123
1124/* Update DEP to incorporate information from NEW_DEP.
1125 SD_IT points to DEP in case it should be moved to another list.
1126 MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1127 data-speculative dependence should be updated. */
1128static enum DEPS_ADJUST_RESULT
1129update_dep (dep_t dep, dep_t new_dep,
a34ef915
MK
1130 sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1131 rtx mem1 ATTRIBUTE_UNUSED,
1132 rtx mem2 ATTRIBUTE_UNUSED)
e2f6ff94
MK
1133{
1134 enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1135 enum reg_note old_type = DEP_TYPE (dep);
b953c2b8 1136 bool was_spec = dep_spec_p (dep);
e2f6ff94 1137
1a83e602
BS
1138 DEP_NONREG (dep) |= DEP_NONREG (new_dep);
1139 DEP_MULTIPLE (dep) = 1;
1140
e2f6ff94
MK
1141 /* If this is a more restrictive type of dependence than the
1142 existing one, then change the existing dependence to this
1143 type. */
1144 if ((int) DEP_TYPE (new_dep) < (int) old_type)
16f6ece6 1145 {
e2f6ff94
MK
1146 DEP_TYPE (dep) = DEP_TYPE (new_dep);
1147 res = DEP_CHANGED;
1148 }
1149
e2f6ff94
MK
1150 if (current_sched_info->flags & USE_DEPS_LIST)
1151 /* Update DEP_STATUS. */
1152 {
1153 ds_t dep_status = DEP_STATUS (dep);
1154 ds_t ds = DEP_STATUS (new_dep);
1155 ds_t new_status = ds | dep_status;
1156
1157 if (new_status & SPECULATIVE)
e2f6ff94 1158 {
b953c2b8
BS
1159 /* Either existing dep or a dep we're adding or both are
1160 speculative. */
e2f6ff94
MK
1161 if (!(ds & SPECULATIVE)
1162 || !(dep_status & SPECULATIVE))
1163 /* The new dep can't be speculative. */
b953c2b8 1164 new_status &= ~SPECULATIVE;
e2f6ff94 1165 else
ddbd5439 1166 {
e2f6ff94
MK
1167 /* Both are speculative. Merge probabilities. */
1168 if (mem1 != NULL)
ddbd5439 1169 {
e2f6ff94
MK
1170 dw_t dw;
1171
1172 dw = estimate_dep_weak (mem1, mem2);
1173 ds = set_dep_weak (ds, BEGIN_DATA, dw);
ddbd5439 1174 }
b8698a0f 1175
e2f6ff94 1176 new_status = ds_merge (dep_status, ds);
ddbd5439 1177 }
e2f6ff94
MK
1178 }
1179
1180 ds = new_status;
1181
1182 if (dep_status != ds)
1183 {
1184 DEP_STATUS (dep) = ds;
1185 res = DEP_CHANGED;
1186 }
1187 }
1188
b953c2b8
BS
1189 if (was_spec && !dep_spec_p (dep))
1190 /* The old dep was speculative, but now it isn't. */
1191 change_spec_dep_to_hard (sd_it);
1192
e2f6ff94
MK
1193 if (true_dependency_cache != NULL
1194 && res == DEP_CHANGED)
1195 update_dependency_caches (dep, old_type);
e2f6ff94
MK
1196
1197 return res;
1198}
1199
1200/* Add or update a dependence described by DEP.
1201 MEM1 and MEM2, if non-null, correspond to memory locations in case of
1202 data speculation.
1203
1204 The function returns a value indicating if an old entry has been changed
1205 or a new entry has been added to insn's backward deps or nothing has
1206 been updated at all. */
1207static enum DEPS_ADJUST_RESULT
1208add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1209 rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1210{
1211 bool maybe_present_p = true;
1212 bool present_p = false;
1213
1214 gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1215 && DEP_PRO (new_dep) != DEP_CON (new_dep));
b8698a0f 1216
b2b29377
MM
1217 if (flag_checking)
1218 check_dep (new_dep, mem1 != NULL);
e2f6ff94
MK
1219
1220 if (true_dependency_cache != NULL)
1221 {
1222 switch (ask_dependency_caches (new_dep))
1223 {
1224 case DEP_PRESENT:
a0107bd0
JJ
1225 dep_t present_dep;
1226 sd_iterator_def sd_it;
1227
1228 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1229 DEP_CON (new_dep),
1230 resolved_p, &sd_it);
1231 DEP_MULTIPLE (present_dep) = 1;
e2f6ff94
MK
1232 return DEP_PRESENT;
1233
1234 case DEP_CHANGED:
1235 maybe_present_p = true;
1236 present_p = true;
1237 break;
1238
1239 case DEP_CREATED:
1240 maybe_present_p = false;
1241 present_p = false;
1242 break;
1243
1244 default:
1245 gcc_unreachable ();
1246 break;
1247 }
16f6ece6 1248 }
16f6ece6
BS
1249
1250 /* Check that we don't already have this dependence. */
ddbd5439
MK
1251 if (maybe_present_p)
1252 {
e2f6ff94
MK
1253 dep_t present_dep;
1254 sd_iterator_def sd_it;
ddbd5439 1255
e2f6ff94 1256 gcc_assert (true_dependency_cache == NULL || present_p);
46c5ad27 1257
e2f6ff94
MK
1258 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1259 DEP_CON (new_dep),
1260 resolved_p, &sd_it);
786de7eb 1261
e2f6ff94
MK
1262 if (present_dep != NULL)
1263 /* We found an existing dependency between ELEM and INSN. */
1264 return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1265 else
1266 /* We didn't find a dep, it shouldn't present in the cache. */
1267 gcc_assert (!present_p);
ddbd5439 1268 }
16f6ece6 1269
ddbd5439 1270 /* Might want to check one level of transitivity to save conses.
e2f6ff94
MK
1271 This check should be done in maybe_add_or_update_dep_1.
1272 Since we made it to add_or_update_dep_1, we must create
ddbd5439 1273 (or update) a link. */
16f6ece6 1274
e2f6ff94 1275 if (mem1 != NULL_RTX)
ddbd5439 1276 {
e855c69d 1277 gcc_assert (sched_deps_info->generate_spec_deps);
e2f6ff94
MK
1278 DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1279 estimate_dep_weak (mem1, mem2));
ddbd5439 1280 }
e2f6ff94
MK
1281
1282 sd_add_dep (new_dep, resolved_p);
b8698a0f 1283
ddbd5439
MK
1284 return DEP_CREATED;
1285}
16f6ece6 1286
e2f6ff94
MK
1287/* Initialize BACK_LIST_PTR with consumer's backward list and
1288 FORW_LIST_PTR with producer's forward list. If RESOLVED_P is true
1289 initialize with lists that hold resolved deps. */
ddbd5439 1290static void
e2f6ff94
MK
1291get_back_and_forw_lists (dep_t dep, bool resolved_p,
1292 deps_list_t *back_list_ptr,
1293 deps_list_t *forw_list_ptr)
ddbd5439 1294{
974c43e9 1295 rtx_insn *con = DEP_CON (dep);
b198261f 1296
e2f6ff94
MK
1297 if (!resolved_p)
1298 {
b953c2b8 1299 if (dep_spec_p (dep))
e2f6ff94
MK
1300 *back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1301 else
1302 *back_list_ptr = INSN_HARD_BACK_DEPS (con);
ddbd5439 1303
e2f6ff94
MK
1304 *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1305 }
ddbd5439 1306 else
e2f6ff94
MK
1307 {
1308 *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1309 *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1310 }
1311}
1312
1313/* Add dependence described by DEP.
1314 If RESOLVED_P is true treat the dependence as a resolved one. */
1315void
1316sd_add_dep (dep_t dep, bool resolved_p)
1317{
1318 dep_node_t n = create_dep_node ();
1319 deps_list_t con_back_deps;
1320 deps_list_t pro_forw_deps;
974c43e9
DM
1321 rtx_insn *elem = DEP_PRO (dep);
1322 rtx_insn *insn = DEP_CON (dep);
e2f6ff94
MK
1323
1324 gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1325
b953c2b8
BS
1326 if ((current_sched_info->flags & DO_SPECULATION) == 0
1327 || !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
e2f6ff94
MK
1328 DEP_STATUS (dep) &= ~SPECULATIVE;
1329
1330 copy_dep (DEP_NODE_DEP (n), dep);
1331
1332 get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
b198261f 1333
e2f6ff94 1334 add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
b198261f 1335
b2b29377
MM
1336 if (flag_checking)
1337 check_dep (dep, false);
ddbd5439 1338
e2f6ff94
MK
1339 add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1340
16f6ece6 1341 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
2d76cb1a 1342 in the bitmap caches of dependency information. */
16f6ece6 1343 if (true_dependency_cache != NULL)
e2f6ff94 1344 set_dependency_caches (dep);
e2f6ff94
MK
1345}
1346
1347/* Add or update backward dependence between INSN and ELEM
1348 with given type DEP_TYPE and dep_status DS.
1349 This function is a convenience wrapper. */
1350enum DEPS_ADJUST_RESULT
1351sd_add_or_update_dep (dep_t dep, bool resolved_p)
1352{
1353 return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1354}
1355
1356/* Resolved dependence pointed to by SD_IT.
1357 SD_IT will advance to the next element. */
1358void
1359sd_resolve_dep (sd_iterator_def sd_it)
1360{
1361 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1362 dep_t dep = DEP_NODE_DEP (node);
974c43e9
DM
1363 rtx_insn *pro = DEP_PRO (dep);
1364 rtx_insn *con = DEP_CON (dep);
e2f6ff94 1365
b953c2b8 1366 if (dep_spec_p (dep))
e2f6ff94
MK
1367 move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1368 INSN_RESOLVED_BACK_DEPS (con));
1369 else
1370 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1371 INSN_RESOLVED_BACK_DEPS (con));
1372
1373 move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1374 INSN_RESOLVED_FORW_DEPS (pro));
1375}
1376
26965010
BS
1377/* Perform the inverse operation of sd_resolve_dep. Restore the dependence
1378 pointed to by SD_IT to unresolved state. */
1379void
1380sd_unresolve_dep (sd_iterator_def sd_it)
1381{
1382 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1383 dep_t dep = DEP_NODE_DEP (node);
974c43e9
DM
1384 rtx_insn *pro = DEP_PRO (dep);
1385 rtx_insn *con = DEP_CON (dep);
26965010 1386
e2724e63 1387 if (dep_spec_p (dep))
26965010
BS
1388 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1389 INSN_SPEC_BACK_DEPS (con));
1390 else
1391 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1392 INSN_HARD_BACK_DEPS (con));
1393
1394 move_dep_link (DEP_NODE_FORW (node), INSN_RESOLVED_FORW_DEPS (pro),
1395 INSN_FORW_DEPS (pro));
1396}
1397
e2f6ff94
MK
1398/* Make TO depend on all the FROM's producers.
1399 If RESOLVED_P is true add dependencies to the resolved lists. */
1400void
0e0ce50d 1401sd_copy_back_deps (rtx_insn *to, rtx_insn *from, bool resolved_p)
e2f6ff94
MK
1402{
1403 sd_list_types_def list_type;
1404 sd_iterator_def sd_it;
1405 dep_t dep;
1406
1407 list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1408
1409 FOR_EACH_DEP (from, list_type, sd_it, dep)
16f6ece6 1410 {
e2f6ff94
MK
1411 dep_def _new_dep, *new_dep = &_new_dep;
1412
1413 copy_dep (new_dep, dep);
0e0ce50d 1414 DEP_CON (new_dep) = to;
e2f6ff94 1415 sd_add_dep (new_dep, resolved_p);
16f6ece6 1416 }
e2f6ff94
MK
1417}
1418
1419/* Remove a dependency referred to by SD_IT.
1420 SD_IT will point to the next dependence after removal. */
1421void
1422sd_delete_dep (sd_iterator_def sd_it)
1423{
1424 dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1425 dep_t dep = DEP_NODE_DEP (n);
974c43e9
DM
1426 rtx_insn *pro = DEP_PRO (dep);
1427 rtx_insn *con = DEP_CON (dep);
e2f6ff94
MK
1428 deps_list_t con_back_deps;
1429 deps_list_t pro_forw_deps;
1430
1431 if (true_dependency_cache != NULL)
1432 {
1433 int elem_luid = INSN_LUID (pro);
1434 int insn_luid = INSN_LUID (con);
1435
1436 bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1437 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
e2724e63 1438 bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
e2f6ff94
MK
1439 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1440
1441 if (current_sched_info->flags & DO_SPECULATION)
1442 bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1443 }
1444
1445 get_back_and_forw_lists (dep, sd_it.resolved_p,
1446 &con_back_deps, &pro_forw_deps);
1447
1448 remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1449 remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1450
1451 delete_dep_node (n);
1452}
1453
1454/* Dump size of the lists. */
1455#define DUMP_LISTS_SIZE (2)
1456
1457/* Dump dependencies of the lists. */
1458#define DUMP_LISTS_DEPS (4)
1459
1460/* Dump all information about the lists. */
1461#define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1462
1463/* Dump deps_lists of INSN specified by TYPES to DUMP.
1464 FLAGS is a bit mask specifying what information about the lists needs
1465 to be printed.
1466 If FLAGS has the very first bit set, then dump all information about
1467 the lists and propagate this bit into the callee dump functions. */
1468static void
1469dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1470{
1471 sd_iterator_def sd_it;
1472 dep_t dep;
1473 int all;
1474
1475 all = (flags & 1);
1476
1477 if (all)
1478 flags |= DUMP_LISTS_ALL;
1479
1480 fprintf (dump, "[");
1481
1482 if (flags & DUMP_LISTS_SIZE)
1483 fprintf (dump, "%d; ", sd_lists_size (insn, types));
1484
1485 if (flags & DUMP_LISTS_DEPS)
1486 {
1487 FOR_EACH_DEP (insn, types, sd_it, dep)
1488 {
1489 dump_dep (dump, dep, dump_dep_flags | all);
1490 fprintf (dump, " ");
1491 }
1492 }
1493}
1494
1495/* Dump all information about deps_lists of INSN specified by TYPES
1496 to STDERR. */
1497void
1498sd_debug_lists (rtx insn, sd_list_types_def types)
1499{
1500 dump_lists (stderr, insn, types, 1);
1501 fprintf (stderr, "\n");
16f6ece6
BS
1502}
1503
e2724e63
BS
1504/* A wrapper around add_dependence_1, to add a dependence of CON on
1505 PRO, with type DEP_TYPE. This function implements special handling
1506 for REG_DEP_CONTROL dependencies. For these, we optionally promote
1507 the type to REG_DEP_ANTI if we can determine that predication is
1508 impossible; otherwise we add additional true dependencies on the
1509 INSN_COND_DEPS list of the jump (which PRO must be). */
1510void
ce1ce33a 1511add_dependence (rtx_insn *con, rtx_insn *pro, enum reg_note dep_type)
e2724e63 1512{
f561ef0f
BS
1513 if (dep_type == REG_DEP_CONTROL
1514 && !(current_sched_info->flags & DO_PREDICATION))
1515 dep_type = REG_DEP_ANTI;
1516
e2724e63
BS
1517 /* A REG_DEP_CONTROL dependence may be eliminated through predication,
1518 so we must also make the insn dependent on the setter of the
1519 condition. */
1520 if (dep_type == REG_DEP_CONTROL)
1521 {
f57aa6b0
DM
1522 rtx_insn *real_pro = pro;
1523 rtx_insn *other = real_insn_for_shadow (real_pro);
e2724e63
BS
1524 rtx cond;
1525
1526 if (other != NULL_RTX)
1527 real_pro = other;
1528 cond = sched_get_reverse_condition_uncached (real_pro);
1529 /* Verify that the insn does not use a different value in
1530 the condition register than the one that was present at
1531 the jump. */
1532 if (cond == NULL_RTX)
1533 dep_type = REG_DEP_ANTI;
1534 else if (INSN_CACHED_COND (real_pro) == const_true_rtx)
1535 {
1536 HARD_REG_SET uses;
1537 CLEAR_HARD_REG_SET (uses);
1538 note_uses (&PATTERN (con), record_hard_reg_uses, &uses);
1539 if (TEST_HARD_REG_BIT (uses, REGNO (XEXP (cond, 0))))
1540 dep_type = REG_DEP_ANTI;
1541 }
1542 if (dep_type == REG_DEP_CONTROL)
1543 {
1544 if (sched_verbose >= 5)
1545 fprintf (sched_dump, "making DEP_CONTROL for %d\n",
1546 INSN_UID (real_pro));
1547 add_dependence_list (con, INSN_COND_DEPS (real_pro), 0,
1a83e602 1548 REG_DEP_TRUE, false);
e2724e63
BS
1549 }
1550 }
1551
1552 add_dependence_1 (con, pro, dep_type);
1553}
1554
1a83e602
BS
1555/* A convenience wrapper to operate on an entire list. HARD should be
1556 true if DEP_NONREG should be set on newly created dependencies. */
37a0f8a5
RH
1557
1558static void
3dc99c19
DM
1559add_dependence_list (rtx_insn *insn, rtx_insn_list *list, int uncond,
1560 enum reg_note dep_type, bool hard)
37a0f8a5 1561{
1a83e602 1562 mark_as_hard = hard;
3dc99c19 1563 for (; list; list = list->next ())
2bd1e239 1564 {
3dc99c19
DM
1565 if (uncond || ! sched_insns_conditions_mutex_p (insn, list->insn ()))
1566 add_dependence (insn, list->insn (), dep_type);
2bd1e239 1567 }
1a83e602 1568 mark_as_hard = false;
37a0f8a5
RH
1569}
1570
b8698a0f 1571/* Similar, but free *LISTP at the same time, when the context
1a83e602
BS
1572 is not readonly. HARD should be true if DEP_NONREG should be set on
1573 newly created dependencies. */
37a0f8a5
RH
1574
1575static void
3dc99c19
DM
1576add_dependence_list_and_free (struct deps_desc *deps, rtx_insn *insn,
1577 rtx_insn_list **listp,
1a83e602 1578 int uncond, enum reg_note dep_type, bool hard)
37a0f8a5 1579{
1a83e602 1580 add_dependence_list (insn, *listp, uncond, dep_type, hard);
e855c69d 1581
39e7207a
AO
1582 /* We don't want to short-circuit dependencies involving debug
1583 insns, because they may cause actual dependencies to be
1584 disregarded. */
1585 if (deps->readonly || DEBUG_INSN_P (insn))
8febe3fa 1586 return;
e855c69d 1587
8febe3fa 1588 free_INSN_LIST_list (listp);
37a0f8a5
RH
1589}
1590
073a8998
JM
1591/* Remove all occurrences of INSN from LIST. Return the number of
1592 occurrences removed. */
e855c69d
AB
1593
1594static int
90831096 1595remove_from_dependence_list (rtx_insn *insn, rtx_insn_list **listp)
e855c69d
AB
1596{
1597 int removed = 0;
b8698a0f 1598
e855c69d
AB
1599 while (*listp)
1600 {
3dc99c19 1601 if ((*listp)->insn () == insn)
e855c69d
AB
1602 {
1603 remove_free_INSN_LIST_node (listp);
1604 removed++;
1605 continue;
1606 }
b8698a0f 1607
3dc99c19 1608 listp = (rtx_insn_list **)&XEXP (*listp, 1);
e855c69d 1609 }
b8698a0f 1610
e855c69d
AB
1611 return removed;
1612}
1613
1614/* Same as above, but process two lists at once. */
b8698a0f 1615static int
90831096 1616remove_from_both_dependence_lists (rtx_insn *insn,
3dc99c19 1617 rtx_insn_list **listp,
2f33ff0a 1618 rtx_expr_list **exprp)
e855c69d
AB
1619{
1620 int removed = 0;
b8698a0f 1621
e855c69d
AB
1622 while (*listp)
1623 {
1624 if (XEXP (*listp, 0) == insn)
1625 {
1626 remove_free_INSN_LIST_node (listp);
1627 remove_free_EXPR_LIST_node (exprp);
1628 removed++;
1629 continue;
1630 }
b8698a0f 1631
3dc99c19 1632 listp = (rtx_insn_list **)&XEXP (*listp, 1);
2f33ff0a 1633 exprp = (rtx_expr_list **)&XEXP (*exprp, 1);
e855c69d 1634 }
b8698a0f 1635
e855c69d
AB
1636 return removed;
1637}
1638
4c2b2906 1639/* Clear all dependencies for an insn. */
16f6ece6 1640static void
90831096 1641delete_all_dependences (rtx_insn *insn)
16f6ece6 1642{
e2f6ff94
MK
1643 sd_iterator_def sd_it;
1644 dep_t dep;
16f6ece6 1645
e2f6ff94
MK
1646 /* The below cycle can be optimized to clear the caches and back_deps
1647 in one call but that would provoke duplication of code from
1648 delete_dep (). */
16f6ece6 1649
e2f6ff94
MK
1650 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1651 sd_iterator_cond (&sd_it, &dep);)
1652 sd_delete_dep (sd_it);
4c2b2906
EC
1653}
1654
1655/* All insns in a scheduling group except the first should only have
1656 dependencies on the previous insn in the group. So we find the
1657 first instruction in the scheduling group by walking the dependence
1658 chains backwards. Then we add the dependencies for the group to
1659 the previous nonnote insn. */
1660
1661static void
ce1ce33a 1662chain_to_prev_insn (rtx_insn *insn)
4c2b2906 1663{
e2f6ff94
MK
1664 sd_iterator_def sd_it;
1665 dep_t dep;
ce1ce33a 1666 rtx_insn *prev_nonnote;
4c2b2906 1667
e2f6ff94 1668 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4c2b2906 1669 {
ce1ce33a 1670 rtx_insn *i = insn;
974c43e9 1671 rtx_insn *pro = DEP_PRO (dep);
b198261f 1672
4c2b2906
EC
1673 do
1674 {
1675 i = prev_nonnote_insn (i);
1676
b198261f 1677 if (pro == i)
4c2b2906 1678 goto next_link;
b5b8b0ac 1679 } while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
b198261f
MK
1680
1681 if (! sched_insns_conditions_mutex_p (i, pro))
e2f6ff94 1682 add_dependence (i, pro, DEP_TYPE (dep));
4c2b2906
EC
1683 next_link:;
1684 }
1685
1686 delete_all_dependences (insn);
1687
f0fc0803 1688 prev_nonnote = prev_nonnote_nondebug_insn (insn);
2bd1e239
SB
1689 if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1690 && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1691 add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
16f6ece6
BS
1692}
1693\f
1694/* Process an insn's memory dependencies. There are four kinds of
1695 dependencies:
1696
1697 (0) read dependence: read follows read
1698 (1) true dependence: read follows write
ddbd5439
MK
1699 (2) output dependence: write follows write
1700 (3) anti dependence: write follows read
16f6ece6
BS
1701
1702 We are careful to build only dependencies which actually exist, and
1703 use transitivity to avoid building too many links. */
1704
1705/* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1706 The MEM is a memory reference contained within INSN, which we are saving
1707 so that we can do memory aliasing on it. */
1708
c66a4368 1709static void
88302d54 1710add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
12ea1b95 1711 rtx_insn *insn, rtx mem)
16f6ece6 1712{
3dc99c19
DM
1713 rtx_insn_list **insn_list;
1714 rtx_insn_list *insn_node;
2f33ff0a
DM
1715 rtx_expr_list **mem_list;
1716 rtx_expr_list *mem_node;
16f6ece6 1717
e855c69d 1718 gcc_assert (!deps->readonly);
bdbf40a5
MK
1719 if (read_p)
1720 {
1721 insn_list = &deps->pending_read_insns;
1722 mem_list = &deps->pending_read_mems;
f49b295a
AO
1723 if (!DEBUG_INSN_P (insn))
1724 deps->pending_read_list_length++;
bdbf40a5
MK
1725 }
1726 else
1727 {
1728 insn_list = &deps->pending_write_insns;
1729 mem_list = &deps->pending_write_mems;
1730 deps->pending_write_list_length++;
1731 }
1732
3dc99c19
DM
1733 insn_node = alloc_INSN_LIST (insn, *insn_list);
1734 *insn_list = insn_node;
16f6ece6 1735
e855c69d 1736 if (sched_deps_info->use_cselib)
4b6c5340
BS
1737 {
1738 mem = shallow_copy_rtx (mem);
9a76e83d
JJ
1739 XEXP (mem, 0) = cselib_subst_to_values_from_insn (XEXP (mem, 0),
1740 GET_MODE (mem), insn);
4b6c5340 1741 }
3dc99c19
DM
1742 mem_node = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1743 *mem_list = mem_node;
16f6ece6
BS
1744}
1745
1746/* Make a dependency between every memory reference on the pending lists
37a0f8a5
RH
1747 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
1748 dependencies for a read operation, similarly with FOR_WRITE. */
16f6ece6
BS
1749
1750static void
ce1ce33a 1751flush_pending_lists (struct deps_desc *deps, rtx_insn *insn, int for_read,
46c5ad27 1752 int for_write)
16f6ece6 1753{
37a0f8a5 1754 if (for_write)
16f6ece6 1755 {
b8698a0f 1756 add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1a83e602 1757 1, REG_DEP_ANTI, true);
e855c69d
AB
1758 if (!deps->readonly)
1759 {
1760 free_EXPR_LIST_list (&deps->pending_read_mems);
1761 deps->pending_read_list_length = 0;
1762 }
16f6ece6 1763 }
16f6ece6 1764
e855c69d 1765 add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1a83e602
BS
1766 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1767 true);
16f6ece6 1768
b8698a0f 1769 add_dependence_list_and_free (deps, insn,
e855c69d 1770 &deps->last_pending_memory_flush, 1,
1a83e602
BS
1771 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1772 true);
e2724e63
BS
1773
1774 add_dependence_list_and_free (deps, insn, &deps->pending_jump_insns, 1,
1a83e602 1775 REG_DEP_ANTI, true);
e2724e63 1776
8febe3fa
MK
1777 if (DEBUG_INSN_P (insn))
1778 {
1779 if (for_write)
1780 free_INSN_LIST_list (&deps->pending_read_insns);
1781 free_INSN_LIST_list (&deps->pending_write_insns);
1782 free_INSN_LIST_list (&deps->last_pending_memory_flush);
1783 free_INSN_LIST_list (&deps->pending_jump_insns);
1784 }
1785
e855c69d
AB
1786 if (!deps->readonly)
1787 {
1788 free_EXPR_LIST_list (&deps->pending_write_mems);
1789 deps->pending_write_list_length = 0;
1790
1791 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1792 deps->pending_flush_length = 1;
1793 }
1a83e602 1794 mark_as_hard = false;
e855c69d
AB
1795}
1796\f
1797/* Instruction which dependencies we are analyzing. */
ce1ce33a 1798static rtx_insn *cur_insn = NULL;
e855c69d
AB
1799
1800/* Implement hooks for haifa scheduler. */
1801
1802static void
6144a836 1803haifa_start_insn (rtx_insn *insn)
e855c69d
AB
1804{
1805 gcc_assert (insn && !cur_insn);
1806
6144a836 1807 cur_insn = insn;
e855c69d
AB
1808}
1809
1810static void
1811haifa_finish_insn (void)
1812{
1813 cur_insn = NULL;
1814}
1815
1816void
1817haifa_note_reg_set (int regno)
1818{
1819 SET_REGNO_REG_SET (reg_pending_sets, regno);
1820}
1821
1822void
1823haifa_note_reg_clobber (int regno)
1824{
1825 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1826}
1827
1828void
1829haifa_note_reg_use (int regno)
1830{
1831 SET_REGNO_REG_SET (reg_pending_uses, regno);
1832}
1833
1834static void
6144a836 1835haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx_insn *pending_insn, ds_t ds)
e855c69d
AB
1836{
1837 if (!(ds & SPECULATIVE))
1838 {
1839 mem = NULL_RTX;
1840 pending_mem = NULL_RTX;
1841 }
1842 else
1843 gcc_assert (ds & BEGIN_DATA);
1844
1845 {
1846 dep_def _dep, *dep = &_dep;
b8698a0f 1847
ce18efcb 1848 init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
b953c2b8 1849 current_sched_info->flags & USE_DEPS_LIST ? ds : 0);
1a83e602 1850 DEP_NONREG (dep) = 1;
e855c69d
AB
1851 maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1852 }
1853
1854}
1855
1856static void
6144a836 1857haifa_note_dep (rtx_insn *elem, ds_t ds)
e855c69d
AB
1858{
1859 dep_def _dep;
1860 dep_t dep = &_dep;
1861
1862 init_dep (dep, elem, cur_insn, ds_to_dt (ds));
1a83e602
BS
1863 if (mark_as_hard)
1864 DEP_NONREG (dep) = 1;
e855c69d
AB
1865 maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1866}
1867
1868static void
1869note_reg_use (int r)
1870{
1871 if (sched_deps_info->note_reg_use)
1872 sched_deps_info->note_reg_use (r);
1873}
1874
1875static void
1876note_reg_set (int r)
1877{
1878 if (sched_deps_info->note_reg_set)
1879 sched_deps_info->note_reg_set (r);
1880}
1881
1882static void
1883note_reg_clobber (int r)
1884{
1885 if (sched_deps_info->note_reg_clobber)
1886 sched_deps_info->note_reg_clobber (r);
1887}
1888
1889static void
6144a836 1890note_mem_dep (rtx m1, rtx m2, rtx_insn *e, ds_t ds)
e855c69d
AB
1891{
1892 if (sched_deps_info->note_mem_dep)
1893 sched_deps_info->note_mem_dep (m1, m2, e, ds);
1894}
1895
1896static void
ce1ce33a 1897note_dep (rtx_insn *e, ds_t ds)
e855c69d
AB
1898{
1899 if (sched_deps_info->note_dep)
1900 sched_deps_info->note_dep (e, ds);
1901}
1902
1903/* Return corresponding to DS reg_note. */
1904enum reg_note
1905ds_to_dt (ds_t ds)
1906{
1907 if (ds & DEP_TRUE)
1908 return REG_DEP_TRUE;
1909 else if (ds & DEP_OUTPUT)
1910 return REG_DEP_OUTPUT;
e2724e63
BS
1911 else if (ds & DEP_ANTI)
1912 return REG_DEP_ANTI;
e855c69d
AB
1913 else
1914 {
e2724e63
BS
1915 gcc_assert (ds & DEP_CONTROL);
1916 return REG_DEP_CONTROL;
e855c69d 1917 }
16f6ece6 1918}
ce18efcb
VM
1919
1920\f
1921
1922/* Functions for computation of info needed for register pressure
1923 sensitive insn scheduling. */
1924
1925
1926/* Allocate and return reg_use_data structure for REGNO and INSN. */
1927static struct reg_use_data *
f57aa6b0 1928create_insn_reg_use (int regno, rtx_insn *insn)
ce18efcb
VM
1929{
1930 struct reg_use_data *use;
1931
1932 use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1933 use->regno = regno;
1934 use->insn = insn;
1935 use->next_insn_use = INSN_REG_USE_LIST (insn);
1936 INSN_REG_USE_LIST (insn) = use;
1937 return use;
1938}
1939
f20f2613
VM
1940/* Allocate reg_set_data structure for REGNO and INSN. */
1941static void
ce18efcb
VM
1942create_insn_reg_set (int regno, rtx insn)
1943{
1944 struct reg_set_data *set;
1945
1946 set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1947 set->regno = regno;
1948 set->insn = insn;
1949 set->next_insn_set = INSN_REG_SET_LIST (insn);
1950 INSN_REG_SET_LIST (insn) = set;
ce18efcb
VM
1951}
1952
1953/* Set up insn register uses for INSN and dependency context DEPS. */
1954static void
f57aa6b0 1955setup_insn_reg_uses (struct deps_desc *deps, rtx_insn *insn)
ce18efcb
VM
1956{
1957 unsigned i;
1958 reg_set_iterator rsi;
ce18efcb
VM
1959 struct reg_use_data *use, *use2, *next;
1960 struct deps_reg *reg_last;
1961
1962 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1963 {
1964 if (i < FIRST_PSEUDO_REGISTER
1965 && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1966 continue;
1967
1968 if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1969 && ! REGNO_REG_SET_P (reg_pending_sets, i)
1970 && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
1971 /* Ignore use which is not dying. */
1972 continue;
1973
1974 use = create_insn_reg_use (i, insn);
1975 use->next_regno_use = use;
1976 reg_last = &deps->reg_last[i];
b8698a0f 1977
ce18efcb 1978 /* Create the cycle list of uses. */
c838c402 1979 for (rtx_insn_list *list = reg_last->uses; list; list = list->next ())
ce18efcb 1980 {
c838c402 1981 use2 = create_insn_reg_use (i, list->insn ());
ce18efcb
VM
1982 next = use->next_regno_use;
1983 use->next_regno_use = use2;
1984 use2->next_regno_use = next;
1985 }
1986 }
1987}
1988
1989/* Register pressure info for the currently processed insn. */
1990static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
1991
1992/* Return TRUE if INSN has the use structure for REGNO. */
1993static bool
1994insn_use_p (rtx insn, int regno)
1995{
1996 struct reg_use_data *use;
1997
1998 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1999 if (use->regno == regno)
2000 return true;
2001 return false;
2002}
2003
2004/* Update the register pressure info after birth of pseudo register REGNO
2005 in INSN. Arguments CLOBBER_P and UNUSED_P say correspondingly that
2006 the register is in clobber or unused after the insn. */
2007static void
2008mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
2009{
2010 int incr, new_incr;
2011 enum reg_class cl;
2012
2013 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
1756cb66 2014 cl = sched_regno_pressure_class[regno];
ce18efcb
VM
2015 if (cl != NO_REGS)
2016 {
1756cb66 2017 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
ce18efcb
VM
2018 if (clobber_p)
2019 {
2020 new_incr = reg_pressure_info[cl].clobber_increase + incr;
2021 reg_pressure_info[cl].clobber_increase = new_incr;
2022 }
2023 else if (unused_p)
2024 {
2025 new_incr = reg_pressure_info[cl].unused_set_increase + incr;
2026 reg_pressure_info[cl].unused_set_increase = new_incr;
2027 }
2028 else
2029 {
2030 new_incr = reg_pressure_info[cl].set_increase + incr;
2031 reg_pressure_info[cl].set_increase = new_incr;
2032 if (! insn_use_p (insn, regno))
2033 reg_pressure_info[cl].change += incr;
2034 create_insn_reg_set (regno, insn);
2035 }
2036 gcc_assert (new_incr < (1 << INCREASE_BITS));
2037 }
2038}
2039
2040/* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
2041 hard registers involved in the birth. */
2042static void
2043mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
2044 bool clobber_p, bool unused_p)
2045{
2046 enum reg_class cl;
2047 int new_incr, last = regno + nregs;
b8698a0f 2048
ce18efcb
VM
2049 while (regno < last)
2050 {
2051 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2052 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2053 {
1756cb66 2054 cl = sched_regno_pressure_class[regno];
ce18efcb
VM
2055 if (cl != NO_REGS)
2056 {
2057 if (clobber_p)
2058 {
2059 new_incr = reg_pressure_info[cl].clobber_increase + 1;
2060 reg_pressure_info[cl].clobber_increase = new_incr;
2061 }
2062 else if (unused_p)
2063 {
2064 new_incr = reg_pressure_info[cl].unused_set_increase + 1;
2065 reg_pressure_info[cl].unused_set_increase = new_incr;
2066 }
2067 else
2068 {
2069 new_incr = reg_pressure_info[cl].set_increase + 1;
2070 reg_pressure_info[cl].set_increase = new_incr;
2071 if (! insn_use_p (insn, regno))
2072 reg_pressure_info[cl].change += 1;
2073 create_insn_reg_set (regno, insn);
2074 }
2075 gcc_assert (new_incr < (1 << INCREASE_BITS));
2076 }
2077 }
2078 regno++;
2079 }
2080}
2081
2082/* Update the register pressure info after birth of pseudo or hard
2083 register REG in INSN. Arguments CLOBBER_P and UNUSED_P say
2084 correspondingly that the register is in clobber or unused after the
2085 insn. */
2086static void
2087mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
2088{
2089 int regno;
2090
2091 if (GET_CODE (reg) == SUBREG)
2092 reg = SUBREG_REG (reg);
2093
2094 if (! REG_P (reg))
2095 return;
2096
2097 regno = REGNO (reg);
2098 if (regno < FIRST_PSEUDO_REGISTER)
dc8afb70 2099 mark_insn_hard_regno_birth (insn, regno, REG_NREGS (reg),
ce18efcb
VM
2100 clobber_p, unused_p);
2101 else
2102 mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
2103}
2104
2105/* Update the register pressure info after death of pseudo register
2106 REGNO. */
2107static void
2108mark_pseudo_death (int regno)
2109{
2110 int incr;
2111 enum reg_class cl;
2112
2113 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
1756cb66 2114 cl = sched_regno_pressure_class[regno];
ce18efcb
VM
2115 if (cl != NO_REGS)
2116 {
1756cb66 2117 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
ce18efcb
VM
2118 reg_pressure_info[cl].change -= incr;
2119 }
2120}
2121
2122/* Like mark_pseudo_death except that NREGS saying how many hard
2123 registers involved in the death. */
2124static void
2125mark_hard_regno_death (int regno, int nregs)
2126{
2127 enum reg_class cl;
2128 int last = regno + nregs;
b8698a0f 2129
ce18efcb
VM
2130 while (regno < last)
2131 {
2132 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2133 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2134 {
1756cb66 2135 cl = sched_regno_pressure_class[regno];
ce18efcb
VM
2136 if (cl != NO_REGS)
2137 reg_pressure_info[cl].change -= 1;
2138 }
2139 regno++;
2140 }
2141}
2142
2143/* Update the register pressure info after death of pseudo or hard
2144 register REG. */
2145static void
2146mark_reg_death (rtx reg)
2147{
2148 int regno;
2149
2150 if (GET_CODE (reg) == SUBREG)
2151 reg = SUBREG_REG (reg);
2152
2153 if (! REG_P (reg))
2154 return;
2155
2156 regno = REGNO (reg);
2157 if (regno < FIRST_PSEUDO_REGISTER)
dc8afb70 2158 mark_hard_regno_death (regno, REG_NREGS (reg));
ce18efcb
VM
2159 else
2160 mark_pseudo_death (regno);
2161}
2162
2163/* Process SETTER of REG. DATA is an insn containing the setter. */
2164static void
2165mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
2166{
2167 if (setter != NULL_RTX && GET_CODE (setter) != SET)
2168 return;
2169 mark_insn_reg_birth
2170 ((rtx) data, reg, false,
2171 find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
2172}
2173
2174/* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs. */
2175static void
2176mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
2177{
2178 if (GET_CODE (setter) == CLOBBER)
2179 mark_insn_reg_birth ((rtx) data, reg, true, false);
2180}
2181
2182/* Set up reg pressure info related to INSN. */
19ac7892 2183void
90831096 2184init_insn_reg_pressure_info (rtx_insn *insn)
ce18efcb
VM
2185{
2186 int i, len;
2187 enum reg_class cl;
2188 static struct reg_pressure_data *pressure_info;
2189 rtx link;
2190
7a1aca9c 2191 gcc_assert (sched_pressure != SCHED_PRESSURE_NONE);
ce18efcb
VM
2192
2193 if (! INSN_P (insn))
2194 return;
2195
1756cb66 2196 for (i = 0; i < ira_pressure_classes_num; i++)
ce18efcb 2197 {
1756cb66 2198 cl = ira_pressure_classes[i];
ce18efcb
VM
2199 reg_pressure_info[cl].clobber_increase = 0;
2200 reg_pressure_info[cl].set_increase = 0;
2201 reg_pressure_info[cl].unused_set_increase = 0;
2202 reg_pressure_info[cl].change = 0;
2203 }
b8698a0f 2204
ce18efcb 2205 note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
b8698a0f 2206
ce18efcb 2207 note_stores (PATTERN (insn), mark_insn_reg_store, insn);
b8698a0f 2208
760edf20
TS
2209 if (AUTO_INC_DEC)
2210 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2211 if (REG_NOTE_KIND (link) == REG_INC)
2212 mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
ce18efcb
VM
2213
2214 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2215 if (REG_NOTE_KIND (link) == REG_DEAD)
2216 mark_reg_death (XEXP (link, 0));
b8698a0f 2217
1756cb66 2218 len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num;
ce18efcb
VM
2219 pressure_info
2220 = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
7a1aca9c
RS
2221 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
2222 INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_pressure_classes_num
2223 * sizeof (int), 1);
1756cb66 2224 for (i = 0; i < ira_pressure_classes_num; i++)
ce18efcb 2225 {
1756cb66 2226 cl = ira_pressure_classes[i];
ce18efcb
VM
2227 pressure_info[i].clobber_increase
2228 = reg_pressure_info[cl].clobber_increase;
2229 pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2230 pressure_info[i].unused_set_increase
2231 = reg_pressure_info[cl].unused_set_increase;
2232 pressure_info[i].change = reg_pressure_info[cl].change;
2233 }
2234}
2235
2236
16f6ece6 2237\f
e855c69d
AB
2238
2239/* Internal variable for sched_analyze_[12] () functions.
2240 If it is nonzero, this means that sched_analyze_[12] looks
2241 at the most toplevel SET. */
2242static bool can_start_lhs_rhs_p;
2243
b8698a0f 2244/* Extend reg info for the deps context DEPS given that
e855c69d
AB
2245 we have just generated a register numbered REGNO. */
2246static void
88302d54 2247extend_deps_reg_info (struct deps_desc *deps, int regno)
e855c69d
AB
2248{
2249 int max_regno = regno + 1;
2250
2251 gcc_assert (!reload_completed);
2252
2253 /* In a readonly context, it would not hurt to extend info,
2254 but it should not be needed. */
2255 if (reload_completed && deps->readonly)
2256 {
2257 deps->max_reg = max_regno;
2258 return;
2259 }
2260
2261 if (max_regno > deps->max_reg)
2262 {
b8698a0f 2263 deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
e855c69d
AB
2264 max_regno);
2265 memset (&deps->reg_last[deps->max_reg],
b8698a0f 2266 0, (max_regno - deps->max_reg)
e855c69d
AB
2267 * sizeof (struct deps_reg));
2268 deps->max_reg = max_regno;
2269 }
2270}
2271
2272/* Extends REG_INFO_P if needed. */
2273void
2274maybe_extend_reg_info_p (void)
2275{
2276 /* Extend REG_INFO_P, if needed. */
2277 if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2278 {
2279 size_t new_reg_info_p_size = max_regno + 128;
2280
2281 gcc_assert (!reload_completed && sel_sched_p ());
2282
2283 reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2284 new_reg_info_p_size,
2285 reg_info_p_size,
2286 sizeof (*reg_info_p));
2287 reg_info_p_size = new_reg_info_p_size;
2288 }
2289}
2290
2e9182fd
EB
2291/* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2292 The type of the reference is specified by REF and can be SET,
2293 CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
2294
2295static void
ef4bddc2 2296sched_analyze_reg (struct deps_desc *deps, int regno, machine_mode mode,
ce1ce33a 2297 enum rtx_code ref, rtx_insn *insn)
2e9182fd 2298{
e855c69d
AB
2299 /* We could emit new pseudos in renaming. Extend the reg structures. */
2300 if (!reload_completed && sel_sched_p ()
2301 && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2302 extend_deps_reg_info (deps, regno);
2303
2304 maybe_extend_reg_info_p ();
2305
2e9182fd
EB
2306 /* A hard reg in a wide mode may really be multiple registers.
2307 If so, mark all of them just like the first. */
2308 if (regno < FIRST_PSEUDO_REGISTER)
2309 {
2310 int i = hard_regno_nregs[regno][mode];
2311 if (ref == SET)
2312 {
2313 while (--i >= 0)
e855c69d 2314 note_reg_set (regno + i);
2e9182fd
EB
2315 }
2316 else if (ref == USE)
2317 {
2318 while (--i >= 0)
e855c69d 2319 note_reg_use (regno + i);
2e9182fd
EB
2320 }
2321 else
2322 {
2323 while (--i >= 0)
e855c69d 2324 note_reg_clobber (regno + i);
2e9182fd
EB
2325 }
2326 }
2327
2328 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2329 it does not reload. Ignore these as they have served their
2330 purpose already. */
2331 else if (regno >= deps->max_reg)
2332 {
2333 enum rtx_code code = GET_CODE (PATTERN (insn));
2334 gcc_assert (code == USE || code == CLOBBER);
2335 }
2336
2337 else
2338 {
2339 if (ref == SET)
e855c69d 2340 note_reg_set (regno);
2e9182fd 2341 else if (ref == USE)
e855c69d 2342 note_reg_use (regno);
2e9182fd 2343 else
e855c69d 2344 note_reg_clobber (regno);
2e9182fd
EB
2345
2346 /* Pseudos that are REG_EQUIV to something may be replaced
2347 by that during reloading. We need only add dependencies for
2348 the address in the REG_EQUIV note. */
2349 if (!reload_completed && get_reg_known_equiv_p (regno))
2350 {
2351 rtx t = get_reg_known_value (regno);
2352 if (MEM_P (t))
2353 sched_analyze_2 (deps, XEXP (t, 0), insn);
2354 }
2355
2356 /* Don't let it cross a call after scheduling if it doesn't
2357 already cross one. */
2358 if (REG_N_CALLS_CROSSED (regno) == 0)
2359 {
b5b8b0ac 2360 if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
2e9182fd
EB
2361 deps->sched_before_next_call
2362 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2363 else
2364 add_dependence_list (insn, deps->last_function_call, 1,
1a83e602 2365 REG_DEP_ANTI, false);
2e9182fd
EB
2366 }
2367 }
2368}
2369
16f6ece6
BS
2370/* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2371 rtx, X, creating all dependencies generated by the write to the
2372 destination of X, and reads of everything mentioned. */
2373
2374static void
ce1ce33a 2375sched_analyze_1 (struct deps_desc *deps, rtx x, rtx_insn *insn)
16f6ece6 2376{
b3694847 2377 rtx dest = XEXP (x, 0);
16f6ece6 2378 enum rtx_code code = GET_CODE (x);
e855c69d 2379 bool cslr_p = can_start_lhs_rhs_p;
16f6ece6 2380
e855c69d
AB
2381 can_start_lhs_rhs_p = false;
2382
2383 gcc_assert (dest);
16f6ece6
BS
2384 if (dest == 0)
2385 return;
2386
e855c69d
AB
2387 if (cslr_p && sched_deps_info->start_lhs)
2388 sched_deps_info->start_lhs (dest);
2389
7193d1dc 2390 if (GET_CODE (dest) == PARALLEL)
16f6ece6 2391 {
b3694847 2392 int i;
90d036a0 2393
16f6ece6 2394 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
7193d1dc
RK
2395 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2396 sched_analyze_1 (deps,
2397 gen_rtx_CLOBBER (VOIDmode,
2398 XEXP (XVECEXP (dest, 0, i), 0)),
2399 insn);
90d036a0 2400
e855c69d
AB
2401 if (cslr_p && sched_deps_info->finish_lhs)
2402 sched_deps_info->finish_lhs ();
2403
2404 if (code == SET)
2405 {
2406 can_start_lhs_rhs_p = cslr_p;
2407
2408 sched_analyze_2 (deps, SET_SRC (x), insn);
2409
2410 can_start_lhs_rhs_p = false;
2411 }
2412
16f6ece6
BS
2413 return;
2414 }
2415
2416 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
46d096a3 2417 || GET_CODE (dest) == ZERO_EXTRACT)
16f6ece6 2418 {
37bd08f8
HP
2419 if (GET_CODE (dest) == STRICT_LOW_PART
2420 || GET_CODE (dest) == ZERO_EXTRACT
4d779342 2421 || df_read_modify_subreg_p (dest))
37bd08f8 2422 {
46c5ad27 2423 /* These both read and modify the result. We must handle
37bd08f8
HP
2424 them as writes to get proper dependencies for following
2425 instructions. We must handle them as reads to get proper
2426 dependencies from this to previous instructions.
71c0e7fc 2427 Thus we need to call sched_analyze_2. */
37bd08f8 2428
46c5ad27 2429 sched_analyze_2 (deps, XEXP (dest, 0), insn);
37bd08f8 2430 }
46d096a3 2431 if (GET_CODE (dest) == ZERO_EXTRACT)
16f6ece6
BS
2432 {
2433 /* The second and third arguments are values read by this insn. */
2434 sched_analyze_2 (deps, XEXP (dest, 1), insn);
2435 sched_analyze_2 (deps, XEXP (dest, 2), insn);
2436 }
2437 dest = XEXP (dest, 0);
2438 }
2439
f8cfc6aa 2440 if (REG_P (dest))
16f6ece6 2441 {
2e9182fd 2442 int regno = REGNO (dest);
ef4bddc2 2443 machine_mode mode = GET_MODE (dest);
2e9182fd
EB
2444
2445 sched_analyze_reg (deps, regno, mode, code, insn);
16f6ece6 2446
c3bdeea8
RS
2447#ifdef STACK_REGS
2448 /* Treat all writes to a stack register as modifying the TOS. */
2449 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2450 {
2e9182fd
EB
2451 /* Avoid analyzing the same register twice. */
2452 if (regno != FIRST_STACK_REG)
2453 sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
ce18efcb 2454
d108e679
AS
2455 add_to_hard_reg_set (&implicit_reg_pending_uses, mode,
2456 FIRST_STACK_REG);
c3bdeea8
RS
2457 }
2458#endif
16f6ece6 2459 }
3c0cb5de 2460 else if (MEM_P (dest))
16f6ece6
BS
2461 {
2462 /* Writing memory. */
4b6c5340
BS
2463 rtx t = dest;
2464
e855c69d 2465 if (sched_deps_info->use_cselib)
4b6c5340 2466 {
ef4bddc2 2467 machine_mode address_mode = get_address_mode (dest);
d4ebfa65 2468
4b6c5340 2469 t = shallow_copy_rtx (dest);
4deef538
AO
2470 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2471 GET_MODE (t), insn);
9a76e83d
JJ
2472 XEXP (t, 0)
2473 = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2474 insn);
4b6c5340 2475 }
c7866154 2476 t = canon_rtx (t);
16f6ece6 2477
e855c69d
AB
2478 /* Pending lists can't get larger with a readonly context. */
2479 if (!deps->readonly
2480 && ((deps->pending_read_list_length + deps->pending_write_list_length)
43722f9f 2481 >= MAX_PENDING_LIST_LENGTH))
16f6ece6
BS
2482 {
2483 /* Flush all pending reads and writes to prevent the pending lists
2484 from getting any larger. Insn scheduling runs too slowly when
4a121cc3 2485 these lists get long. When compiling GCC with itself,
16f6ece6 2486 this flush occurs 8 times for sparc, and 10 times for m88k using
4a121cc3 2487 the default value of 32. */
37a0f8a5 2488 flush_pending_lists (deps, insn, false, true);
16f6ece6
BS
2489 }
2490 else
2491 {
c838c402
DM
2492 rtx_insn_list *pending;
2493 rtx_expr_list *pending_mem;
16f6ece6
BS
2494
2495 pending = deps->pending_read_insns;
2496 pending_mem = deps->pending_read_mems;
2497 while (pending)
2498 {
c838c402
DM
2499 if (anti_dependence (pending_mem->element (), t)
2500 && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
2501 note_mem_dep (t, pending_mem->element (), pending->insn (),
e855c69d 2502 DEP_ANTI);
16f6ece6 2503
c838c402
DM
2504 pending = pending->next ();
2505 pending_mem = pending_mem->next ();
16f6ece6
BS
2506 }
2507
2508 pending = deps->pending_write_insns;
2509 pending_mem = deps->pending_write_mems;
2510 while (pending)
2511 {
c838c402
DM
2512 if (output_dependence (pending_mem->element (), t)
2513 && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
2514 note_mem_dep (t, pending_mem->element (),
2515 pending->insn (),
e855c69d 2516 DEP_OUTPUT);
16f6ece6 2517
c838c402
DM
2518 pending = pending->next ();
2519 pending_mem = pending_mem-> next ();
16f6ece6
BS
2520 }
2521
b27cedc6 2522 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
1a83e602 2523 REG_DEP_ANTI, true);
e2724e63 2524 add_dependence_list (insn, deps->pending_jump_insns, 1,
1a83e602 2525 REG_DEP_CONTROL, true);
16f6ece6 2526
e855c69d
AB
2527 if (!deps->readonly)
2528 add_insn_mem_dependence (deps, false, insn, dest);
16f6ece6
BS
2529 }
2530 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2531 }
2532
e855c69d
AB
2533 if (cslr_p && sched_deps_info->finish_lhs)
2534 sched_deps_info->finish_lhs ();
2535
16f6ece6
BS
2536 /* Analyze reads. */
2537 if (GET_CODE (x) == SET)
e855c69d
AB
2538 {
2539 can_start_lhs_rhs_p = cslr_p;
2540
2541 sched_analyze_2 (deps, SET_SRC (x), insn);
2542
2543 can_start_lhs_rhs_p = false;
2544 }
16f6ece6
BS
2545}
2546
2547/* Analyze the uses of memory and registers in rtx X in INSN. */
16f6ece6 2548static void
ce1ce33a 2549sched_analyze_2 (struct deps_desc *deps, rtx x, rtx_insn *insn)
16f6ece6 2550{
b3694847
SS
2551 int i;
2552 int j;
2553 enum rtx_code code;
2554 const char *fmt;
e855c69d
AB
2555 bool cslr_p = can_start_lhs_rhs_p;
2556
2557 can_start_lhs_rhs_p = false;
16f6ece6 2558
e855c69d 2559 gcc_assert (x);
16f6ece6
BS
2560 if (x == 0)
2561 return;
2562
e855c69d
AB
2563 if (cslr_p && sched_deps_info->start_rhs)
2564 sched_deps_info->start_rhs (x);
2565
16f6ece6
BS
2566 code = GET_CODE (x);
2567
2568 switch (code)
2569 {
d8116890 2570 CASE_CONST_ANY:
16f6ece6
BS
2571 case SYMBOL_REF:
2572 case CONST:
2573 case LABEL_REF:
bf520698 2574 /* Ignore constants. */
e855c69d
AB
2575 if (cslr_p && sched_deps_info->finish_rhs)
2576 sched_deps_info->finish_rhs ();
2577
16f6ece6
BS
2578 return;
2579
16f6ece6 2580 case CC0:
618f4073
TS
2581 if (!HAVE_cc0)
2582 gcc_unreachable ();
2583
16f6ece6 2584 /* User of CC0 depends on immediately preceding insn. */
4c2b2906 2585 SCHED_GROUP_P (insn) = 1;
caf819ae
VM
2586 /* Don't move CC0 setter to another block (it can set up the
2587 same flag for previous CC0 users which is safe). */
2588 CANT_MOVE (prev_nonnote_insn (insn)) = 1;
e855c69d
AB
2589
2590 if (cslr_p && sched_deps_info->finish_rhs)
2591 sched_deps_info->finish_rhs ();
2592
16f6ece6 2593 return;
16f6ece6
BS
2594
2595 case REG:
2596 {
16f6ece6 2597 int regno = REGNO (x);
ef4bddc2 2598 machine_mode mode = GET_MODE (x);
2e9182fd
EB
2599
2600 sched_analyze_reg (deps, regno, mode, USE, insn);
c3bdeea8
RS
2601
2602#ifdef STACK_REGS
2603 /* Treat all reads of a stack register as modifying the TOS. */
2604 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2605 {
2e9182fd
EB
2606 /* Avoid analyzing the same register twice. */
2607 if (regno != FIRST_STACK_REG)
2608 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2609 sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
c3bdeea8
RS
2610 }
2611#endif
e855c69d
AB
2612
2613 if (cslr_p && sched_deps_info->finish_rhs)
2614 sched_deps_info->finish_rhs ();
2615
16f6ece6
BS
2616 return;
2617 }
2618
2619 case MEM:
2620 {
2621 /* Reading memory. */
1476d1bd 2622 rtx_insn_list *u;
c838c402
DM
2623 rtx_insn_list *pending;
2624 rtx_expr_list *pending_mem;
4b6c5340 2625 rtx t = x;
16f6ece6 2626
e855c69d 2627 if (sched_deps_info->use_cselib)
4b6c5340 2628 {
ef4bddc2 2629 machine_mode address_mode = get_address_mode (t);
d4ebfa65 2630
4b6c5340 2631 t = shallow_copy_rtx (t);
4deef538
AO
2632 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2633 GET_MODE (t), insn);
9a76e83d
JJ
2634 XEXP (t, 0)
2635 = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2636 insn);
4b6c5340 2637 }
16f6ece6 2638
f49b295a 2639 if (!DEBUG_INSN_P (insn))
16f6ece6 2640 {
f49b295a
AO
2641 t = canon_rtx (t);
2642 pending = deps->pending_read_insns;
2643 pending_mem = deps->pending_read_mems;
2644 while (pending)
2645 {
c838c402 2646 if (read_dependence (pending_mem->element (), t)
f49b295a 2647 && ! sched_insns_conditions_mutex_p (insn,
c838c402
DM
2648 pending->insn ()))
2649 note_mem_dep (t, pending_mem->element (),
2650 pending->insn (),
f49b295a
AO
2651 DEP_ANTI);
2652
c838c402
DM
2653 pending = pending->next ();
2654 pending_mem = pending_mem->next ();
f49b295a 2655 }
16f6ece6 2656
f49b295a
AO
2657 pending = deps->pending_write_insns;
2658 pending_mem = deps->pending_write_mems;
2659 while (pending)
e855c69d 2660 {
c838c402 2661 if (true_dependence (pending_mem->element (), VOIDmode, t)
f49b295a 2662 && ! sched_insns_conditions_mutex_p (insn,
c838c402
DM
2663 pending->insn ()))
2664 note_mem_dep (t, pending_mem->element (),
2665 pending->insn (),
f49b295a
AO
2666 sched_deps_info->generate_spec_deps
2667 ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2668
c838c402
DM
2669 pending = pending->next ();
2670 pending_mem = pending_mem->next ();
f49b295a 2671 }
e855c69d 2672
1476d1bd
MM
2673 for (u = deps->last_pending_memory_flush; u; u = u->next ())
2674 add_dependence (insn, u->insn (), REG_DEP_ANTI);
e2724e63 2675
1476d1bd 2676 for (u = deps->pending_jump_insns; u; u = u->next ())
e2724e63
BS
2677 if (deps_may_trap_p (x))
2678 {
2679 if ((sched_deps_info->generate_spec_deps)
2680 && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2681 {
2682 ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2683 MAX_DEP_WEAK);
2684
1476d1bd 2685 note_dep (u->insn (), ds);
e2724e63
BS
2686 }
2687 else
1476d1bd 2688 add_dependence (insn, u->insn (), REG_DEP_CONTROL);
e2724e63 2689 }
e855c69d 2690 }
16f6ece6
BS
2691
2692 /* Always add these dependencies to pending_reads, since
2693 this insn may be followed by a write. */
1baf9159
JJ
2694 if (!deps->readonly)
2695 {
2696 if ((deps->pending_read_list_length
2697 + deps->pending_write_list_length)
43722f9f 2698 >= MAX_PENDING_LIST_LENGTH
1baf9159
JJ
2699 && !DEBUG_INSN_P (insn))
2700 flush_pending_lists (deps, insn, true, true);
2701 add_insn_mem_dependence (deps, true, insn, x);
2702 }
16f6ece6 2703
16f6ece6 2704 sched_analyze_2 (deps, XEXP (x, 0), insn);
e855c69d
AB
2705
2706 if (cslr_p && sched_deps_info->finish_rhs)
2707 sched_deps_info->finish_rhs ();
2708
16f6ece6
BS
2709 return;
2710 }
2711
2712 /* Force pending stores to memory in case a trap handler needs them. */
2713 case TRAP_IF:
37a0f8a5 2714 flush_pending_lists (deps, insn, true, false);
16f6ece6
BS
2715 break;
2716
3fce100b
AK
2717 case PREFETCH:
2718 if (PREFETCH_SCHEDULE_BARRIER_P (x))
2719 reg_pending_barrier = TRUE_BARRIER;
0e80383f
VM
2720 /* Prefetch insn contains addresses only. So if the prefetch
2721 address has no registers, there will be no dependencies on
2722 the prefetch insn. This is wrong with result code
2723 correctness point of view as such prefetch can be moved below
2724 a jump insn which usually generates MOVE_BARRIER preventing
2725 to move insns containing registers or memories through the
2726 barrier. It is also wrong with generated code performance
2727 point of view as prefetch withouth dependecies will have a
2728 tendency to be issued later instead of earlier. It is hard
2729 to generate accurate dependencies for prefetch insns as
2730 prefetch has only the start address but it is better to have
2731 something than nothing. */
79f01c76 2732 if (!deps->readonly)
db1fb332
JJ
2733 {
2734 rtx x = gen_rtx_MEM (Pmode, XEXP (PATTERN (insn), 0));
2735 if (sched_deps_info->use_cselib)
2736 cselib_lookup_from_insn (x, Pmode, true, VOIDmode, insn);
2737 add_insn_mem_dependence (deps, true, insn, x);
2738 }
3fce100b
AK
2739 break;
2740
a11fe699
UB
2741 case UNSPEC_VOLATILE:
2742 flush_pending_lists (deps, insn, true, true);
2743 /* FALLTHRU */
2744
16f6ece6
BS
2745 case ASM_OPERANDS:
2746 case ASM_INPUT:
16f6ece6 2747 {
16f6ece6
BS
2748 /* Traditional and volatile asm instructions must be considered to use
2749 and clobber all hard registers, all pseudo-registers and all of
2750 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
2751
2752 Consider for instance a volatile asm that changes the fpu rounding
2753 mode. An insn should not be moved across this even if it only uses
2754 pseudo-regs because it might give an incorrectly rounded result. */
bf95b629
RB
2755 if ((code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2756 && !DEBUG_INSN_P (insn))
d1c9693e 2757 reg_pending_barrier = TRUE_BARRIER;
16f6ece6
BS
2758
2759 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
2760 We can not just fall through here since then we would be confused
2761 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2762 traditional asms unlike their normal usage. */
2763
2764 if (code == ASM_OPERANDS)
2765 {
2766 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2767 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
e855c69d
AB
2768
2769 if (cslr_p && sched_deps_info->finish_rhs)
2770 sched_deps_info->finish_rhs ();
2771
16f6ece6
BS
2772 return;
2773 }
2774 break;
2775 }
2776
2777 case PRE_DEC:
2778 case POST_DEC:
2779 case PRE_INC:
2780 case POST_INC:
2781 /* These both read and modify the result. We must handle them as writes
2782 to get proper dependencies for following instructions. We must handle
2783 them as reads to get proper dependencies from this to previous
2784 instructions. Thus we need to pass them to both sched_analyze_1
2785 and sched_analyze_2. We must call sched_analyze_2 first in order
2786 to get the proper antecedent for the read. */
2787 sched_analyze_2 (deps, XEXP (x, 0), insn);
2788 sched_analyze_1 (deps, x, insn);
e855c69d
AB
2789
2790 if (cslr_p && sched_deps_info->finish_rhs)
2791 sched_deps_info->finish_rhs ();
2792
16f6ece6
BS
2793 return;
2794
2795 case POST_MODIFY:
2796 case PRE_MODIFY:
2797 /* op0 = op0 + op1 */
2798 sched_analyze_2 (deps, XEXP (x, 0), insn);
2799 sched_analyze_2 (deps, XEXP (x, 1), insn);
2800 sched_analyze_1 (deps, x, insn);
e855c69d
AB
2801
2802 if (cslr_p && sched_deps_info->finish_rhs)
2803 sched_deps_info->finish_rhs ();
2804
16f6ece6
BS
2805 return;
2806
2807 default:
2808 break;
2809 }
2810
2811 /* Other cases: walk the insn. */
2812 fmt = GET_RTX_FORMAT (code);
2813 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2814 {
2815 if (fmt[i] == 'e')
2816 sched_analyze_2 (deps, XEXP (x, i), insn);
2817 else if (fmt[i] == 'E')
2818 for (j = 0; j < XVECLEN (x, i); j++)
2819 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2820 }
e855c69d
AB
2821
2822 if (cslr_p && sched_deps_info->finish_rhs)
2823 sched_deps_info->finish_rhs ();
16f6ece6
BS
2824}
2825
32d48af5 2826/* Try to group two fusible insns together to prevent scheduler
dc549f34 2827 from scheduling them apart. */
ba4fa581
WM
2828
2829static void
ac44248e 2830sched_macro_fuse_insns (rtx_insn *insn)
ba4fa581 2831{
ac44248e 2832 rtx_insn *prev;
ba4fa581 2833
dc549f34
RR
2834 if (any_condjump_p (insn))
2835 {
2836 unsigned int condreg1, condreg2;
2837 rtx cc_reg_1;
2838 targetm.fixed_condition_code_regs (&condreg1, &condreg2);
2839 cc_reg_1 = gen_rtx_REG (CCmode, condreg1);
2840 prev = prev_nonnote_nondebug_insn (insn);
2841 if (!reg_referenced_p (cc_reg_1, PATTERN (insn))
2842 || !prev
2843 || !modified_in_p (cc_reg_1, prev))
2844 return;
2845 }
2846 else
2847 {
2848 rtx insn_set = single_set (insn);
ba4fa581 2849
dc549f34
RR
2850 prev = prev_nonnote_nondebug_insn (insn);
2851 if (!prev
2852 || !insn_set
892d9879 2853 || !single_set (prev))
dc549f34 2854 return;
ba4fa581 2855
dc549f34
RR
2856 }
2857
2858 if (targetm.sched.macro_fusion_pair_p (prev, insn))
2859 SCHED_GROUP_P (insn) = 1;
ba4fa581 2860
ba4fa581
WM
2861}
2862
16f6ece6 2863/* Analyze an INSN with pattern X to find all dependencies. */
16f6ece6 2864static void
ce1ce33a 2865sched_analyze_insn (struct deps_desc *deps, rtx x, rtx_insn *insn)
16f6ece6 2866{
b3694847 2867 RTX_CODE code = GET_CODE (x);
16f6ece6 2868 rtx link;
3cd8c58a 2869 unsigned i;
a2041967 2870 reg_set_iterator rsi;
16f6ece6 2871
ce18efcb
VM
2872 if (! reload_completed)
2873 {
2874 HARD_REG_SET temp;
2875
2876 extract_insn (insn);
1145837d 2877 preprocess_constraints (insn);
8f3f5ac0
L
2878 alternative_mask prefrred = get_preferred_alternatives (insn);
2879 ira_implicitly_set_insn_hard_regs (&temp, prefrred);
6eeea7a7 2880 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
ce18efcb
VM
2881 IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
2882 }
2883
e855c69d
AB
2884 can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2885 && code == SET);
2886
ba4fa581
WM
2887 /* Group compare and branch insns for macro-fusion. */
2888 if (targetm.sched.macro_fusion_p
2889 && targetm.sched.macro_fusion_p ())
dc549f34 2890 sched_macro_fuse_insns (insn);
ba4fa581 2891
1098d3a5 2892 if (may_trap_p (x))
073a8998 2893 /* Avoid moving trapping instructions across function calls that might
1098d3a5
JJ
2894 not always return. */
2895 add_dependence_list (insn, deps->last_function_call_may_noreturn,
1a83e602 2896 1, REG_DEP_ANTI, true);
1098d3a5 2897
7b4ef662
RH
2898 /* We must avoid creating a situation in which two successors of the
2899 current block have different unwind info after scheduling. If at any
2900 point the two paths re-join this leads to incorrect unwind info. */
2901 /* ??? There are certain situations involving a forced frame pointer in
2902 which, with extra effort, we could fix up the unwind info at a later
2903 CFG join. However, it seems better to notice these cases earlier
2904 during prologue generation and avoid marking the frame pointer setup
2905 as frame-related at all. */
2906 if (RTX_FRAME_RELATED_P (insn))
2db036a7
TV
2907 {
2908 /* Make sure prologue insn is scheduled before next jump. */
2909 deps->sched_before_next_jump
2910 = alloc_INSN_LIST (insn, deps->sched_before_next_jump);
2911
2912 /* Make sure epilogue insn is scheduled after preceding jumps. */
1a83e602
BS
2913 add_dependence_list (insn, deps->pending_jump_insns, 1, REG_DEP_ANTI,
2914 true);
2db036a7 2915 }
7b4ef662 2916
16f6ece6
BS
2917 if (code == COND_EXEC)
2918 {
2919 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2920
2921 /* ??? Should be recording conditions so we reduce the number of
ff7cc307 2922 false dependencies. */
16f6ece6
BS
2923 x = COND_EXEC_CODE (x);
2924 code = GET_CODE (x);
2925 }
2926 if (code == SET || code == CLOBBER)
c2310f51
RH
2927 {
2928 sched_analyze_1 (deps, x, insn);
2929
2930 /* Bare clobber insns are used for letting life analysis, reg-stack
2931 and others know that a value is dead. Depend on the last call
2932 instruction so that reg-stack won't get confused. */
2933 if (code == CLOBBER)
ce18efcb 2934 add_dependence_list (insn, deps->last_function_call, 1,
1a83e602 2935 REG_DEP_OUTPUT, true);
c2310f51 2936 }
16f6ece6
BS
2937 else if (code == PARALLEL)
2938 {
3cd8c58a 2939 for (i = XVECLEN (x, 0); i--;)
16f6ece6
BS
2940 {
2941 rtx sub = XVECEXP (x, 0, i);
2942 code = GET_CODE (sub);
2943
2944 if (code == COND_EXEC)
2945 {
2946 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2947 sub = COND_EXEC_CODE (sub);
2948 code = GET_CODE (sub);
2949 }
2950 if (code == SET || code == CLOBBER)
2951 sched_analyze_1 (deps, sub, insn);
2952 else
2953 sched_analyze_2 (deps, sub, insn);
2954 }
2955 }
2956 else
2957 sched_analyze_2 (deps, x, insn);
2958
2959 /* Mark registers CLOBBERED or used by called function. */
4b4bf941 2960 if (CALL_P (insn))
570a98eb
JH
2961 {
2962 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2963 {
2964 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2965 sched_analyze_1 (deps, XEXP (link, 0), insn);
e384e6b5 2966 else if (GET_CODE (XEXP (link, 0)) != SET)
570a98eb
JH
2967 sched_analyze_2 (deps, XEXP (link, 0), insn);
2968 }
9790c19e
JJ
2969 /* Don't schedule anything after a tail call, tail call needs
2970 to use at least all call-saved registers. */
2971 if (SIBLING_CALL_P (insn))
2972 reg_pending_barrier = TRUE_BARRIER;
2973 else if (find_reg_note (insn, REG_SETJMP, NULL))
d1c9693e 2974 reg_pending_barrier = MOVE_BARRIER;
570a98eb 2975 }
16f6ece6 2976
4b4bf941 2977 if (JUMP_P (insn))
18e720b3 2978 {
e67d1102 2979 rtx_insn *next = next_nonnote_nondebug_insn (insn);
4b4bf941 2980 if (next && BARRIER_P (next))
0a1766b2 2981 reg_pending_barrier = MOVE_BARRIER;
18e720b3
BS
2982 else
2983 {
c838c402
DM
2984 rtx_insn_list *pending;
2985 rtx_expr_list *pending_mem;
5a257872 2986
e855c69d
AB
2987 if (sched_deps_info->compute_jump_reg_dependencies)
2988 {
e2724e63
BS
2989 (*sched_deps_info->compute_jump_reg_dependencies)
2990 (insn, reg_pending_control_uses);
e855c69d 2991
e855c69d 2992 /* Make latency of jump equal to 0 by using anti-dependence. */
e2724e63 2993 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
e855c69d
AB
2994 {
2995 struct deps_reg *reg_last = &deps->reg_last[i];
1a83e602
BS
2996 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI,
2997 false);
ce18efcb 2998 add_dependence_list (insn, reg_last->implicit_sets,
1a83e602 2999 0, REG_DEP_ANTI, false);
e855c69d 3000 add_dependence_list (insn, reg_last->clobbers, 0,
1a83e602 3001 REG_DEP_ANTI, false);
e855c69d 3002 }
e855c69d 3003 }
79ba58ef 3004
d9214e53
RH
3005 /* All memory writes and volatile reads must happen before the
3006 jump. Non-volatile reads must happen before the jump iff
3007 the result is needed by the above register used mask. */
3008
79ba58ef
BS
3009 pending = deps->pending_write_insns;
3010 pending_mem = deps->pending_write_mems;
3011 while (pending)
3012 {
c838c402
DM
3013 if (! sched_insns_conditions_mutex_p (insn, pending->insn ()))
3014 add_dependence (insn, pending->insn (),
ce1ce33a 3015 REG_DEP_OUTPUT);
c838c402
DM
3016 pending = pending->next ();
3017 pending_mem = pending_mem->next ();
d9214e53 3018 }
79ba58ef 3019
d9214e53
RH
3020 pending = deps->pending_read_insns;
3021 pending_mem = deps->pending_read_mems;
3022 while (pending)
3023 {
c838c402
DM
3024 if (MEM_VOLATILE_P (pending_mem->element ())
3025 && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
3026 add_dependence (insn, pending->insn (),
ce1ce33a 3027 REG_DEP_OUTPUT);
c838c402
DM
3028 pending = pending->next ();
3029 pending_mem = pending_mem->next ();
79ba58ef
BS
3030 }
3031
b27cedc6 3032 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
1a83e602 3033 REG_DEP_ANTI, true);
e2724e63 3034 add_dependence_list (insn, deps->pending_jump_insns, 1,
1a83e602 3035 REG_DEP_ANTI, true);
18e720b3 3036 }
18e720b3
BS
3037 }
3038
ca3c1028 3039 /* If this instruction can throw an exception, then moving it changes
786de7eb 3040 where block boundaries fall. This is mighty confusing elsewhere.
d47a8b83
EB
3041 Therefore, prevent such an instruction from being moved. Same for
3042 non-jump instructions that define block boundaries.
3043 ??? Unclear whether this is still necessary in EBB mode. If not,
3044 add_branch_dependences should be adjusted for RGN mode instead. */
3045 if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
3046 || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
d1c9693e 3047 reg_pending_barrier = MOVE_BARRIER;
ca3c1028 3048
7a1aca9c 3049 if (sched_pressure != SCHED_PRESSURE_NONE)
ce18efcb
VM
3050 {
3051 setup_insn_reg_uses (deps, insn);
19ac7892 3052 init_insn_reg_pressure_info (insn);
ce18efcb
VM
3053 }
3054
b5b8b0ac
AO
3055 /* Add register dependencies for insn. */
3056 if (DEBUG_INSN_P (insn))
3057 {
ce1ce33a 3058 rtx_insn *prev = deps->last_debug_insn;
1476d1bd 3059 rtx_insn_list *u;
b5b8b0ac
AO
3060
3061 if (!deps->readonly)
3062 deps->last_debug_insn = insn;
3063
3064 if (prev)
3065 add_dependence (insn, prev, REG_DEP_ANTI);
3066
3067 add_dependence_list (insn, deps->last_function_call, 1,
1a83e602 3068 REG_DEP_ANTI, false);
b5b8b0ac 3069
1a83e602 3070 if (!sel_sched_p ())
1476d1bd
MM
3071 for (u = deps->last_pending_memory_flush; u; u = u->next ())
3072 add_dependence (insn, u->insn (), REG_DEP_ANTI);
b5b8b0ac
AO
3073
3074 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3075 {
3076 struct deps_reg *reg_last = &deps->reg_last[i];
1a83e602 3077 add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI, false);
e2724e63
BS
3078 /* There's no point in making REG_DEP_CONTROL dependencies for
3079 debug insns. */
1a83e602
BS
3080 add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI,
3081 false);
f49b295a
AO
3082
3083 if (!deps->readonly)
3084 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
b5b8b0ac
AO
3085 }
3086 CLEAR_REG_SET (reg_pending_uses);
3087
3088 /* Quite often, a debug insn will refer to stuff in the
3089 previous instruction, but the reason we want this
3090 dependency here is to make sure the scheduler doesn't
3091 gratuitously move a debug insn ahead. This could dirty
3092 DF flags and cause additional analysis that wouldn't have
3093 occurred in compilation without debug insns, and such
3094 additional analysis can modify the generated code. */
3095 prev = PREV_INSN (insn);
3096
3097 if (prev && NONDEBUG_INSN_P (prev))
3098 add_dependence (insn, prev, REG_DEP_ANTI);
3099 }
0a1766b2
AM
3100 else
3101 {
b0bd15f7
BS
3102 regset_head set_or_clobbered;
3103
0a1766b2 3104 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
ce18efcb
VM
3105 {
3106 struct deps_reg *reg_last = &deps->reg_last[i];
1a83e602
BS
3107 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3108 add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI,
3109 false);
3110 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3111 false);
b8698a0f 3112
ce18efcb
VM
3113 if (!deps->readonly)
3114 {
3115 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3116 reg_last->uses_length++;
3117 }
3118 }
b8698a0f 3119
ce18efcb
VM
3120 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3121 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
3122 {
3123 struct deps_reg *reg_last = &deps->reg_last[i];
1a83e602 3124 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
ce18efcb 3125 add_dependence_list (insn, reg_last->implicit_sets, 0,
1a83e602
BS
3126 REG_DEP_ANTI, false);
3127 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3128 false);
b8698a0f 3129
ce18efcb
VM
3130 if (!deps->readonly)
3131 {
3132 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3133 reg_last->uses_length++;
3134 }
3135 }
e855c69d 3136
b0bd15f7
BS
3137 if (targetm.sched.exposed_pipeline)
3138 {
3139 INIT_REG_SET (&set_or_clobbered);
3140 bitmap_ior (&set_or_clobbered, reg_pending_clobbers,
3141 reg_pending_sets);
3142 EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered, 0, i, rsi)
3143 {
3144 struct deps_reg *reg_last = &deps->reg_last[i];
3145 rtx list;
3146 for (list = reg_last->uses; list; list = XEXP (list, 1))
3147 {
3148 rtx other = XEXP (list, 0);
f0898307 3149 if (INSN_CACHED_COND (other) != const_true_rtx
c9bd6bcd 3150 && refers_to_regno_p (i, INSN_CACHED_COND (other)))
f0898307 3151 INSN_CACHED_COND (other) = const_true_rtx;
b0bd15f7
BS
3152 }
3153 }
3154 }
3155
ce18efcb
VM
3156 /* If the current insn is conditional, we can't free any
3157 of the lists. */
3158 if (sched_has_condition_p (insn))
3159 {
3160 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3161 {
3162 struct deps_reg *reg_last = &deps->reg_last[i];
1a83e602
BS
3163 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3164 false);
ce18efcb 3165 add_dependence_list (insn, reg_last->implicit_sets, 0,
1a83e602
BS
3166 REG_DEP_ANTI, false);
3167 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3168 false);
e2724e63 3169 add_dependence_list (insn, reg_last->control_uses, 0,
1a83e602 3170 REG_DEP_CONTROL, false);
b8698a0f 3171
ce18efcb
VM
3172 if (!deps->readonly)
3173 {
3174 reg_last->clobbers
3175 = alloc_INSN_LIST (insn, reg_last->clobbers);
3176 reg_last->clobbers_length++;
3177 }
3178 }
3179 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3180 {
3181 struct deps_reg *reg_last = &deps->reg_last[i];
1a83e602
BS
3182 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3183 false);
ce18efcb 3184 add_dependence_list (insn, reg_last->implicit_sets, 0,
1a83e602
BS
3185 REG_DEP_ANTI, false);
3186 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT,
3187 false);
3188 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3189 false);
e2724e63 3190 add_dependence_list (insn, reg_last->control_uses, 0,
1a83e602 3191 REG_DEP_CONTROL, false);
b8698a0f 3192
ce18efcb 3193 if (!deps->readonly)
aef0e7a8 3194 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
ce18efcb
VM
3195 }
3196 }
3197 else
3198 {
3199 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3200 {
3201 struct deps_reg *reg_last = &deps->reg_last[i];
43722f9f
BC
3202 if (reg_last->uses_length >= MAX_PENDING_LIST_LENGTH
3203 || reg_last->clobbers_length >= MAX_PENDING_LIST_LENGTH)
ce18efcb
VM
3204 {
3205 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
1a83e602 3206 REG_DEP_OUTPUT, false);
ce18efcb
VM
3207 add_dependence_list_and_free (deps, insn,
3208 &reg_last->implicit_sets, 0,
1a83e602 3209 REG_DEP_ANTI, false);
ce18efcb 3210 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
1a83e602 3211 REG_DEP_ANTI, false);
e2724e63
BS
3212 add_dependence_list_and_free (deps, insn,
3213 &reg_last->control_uses, 0,
1a83e602
BS
3214 REG_DEP_ANTI, false);
3215 add_dependence_list_and_free (deps, insn,
3216 &reg_last->clobbers, 0,
3217 REG_DEP_OUTPUT, false);
b8698a0f 3218
ce18efcb
VM
3219 if (!deps->readonly)
3220 {
3221 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3222 reg_last->clobbers_length = 0;
3223 reg_last->uses_length = 0;
3224 }
3225 }
3226 else
3227 {
1a83e602
BS
3228 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3229 false);
ce18efcb 3230 add_dependence_list (insn, reg_last->implicit_sets, 0,
1a83e602
BS
3231 REG_DEP_ANTI, false);
3232 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3233 false);
e2724e63 3234 add_dependence_list (insn, reg_last->control_uses, 0,
1a83e602 3235 REG_DEP_CONTROL, false);
ce18efcb 3236 }
b8698a0f 3237
ce18efcb
VM
3238 if (!deps->readonly)
3239 {
3240 reg_last->clobbers_length++;
3241 reg_last->clobbers
3242 = alloc_INSN_LIST (insn, reg_last->clobbers);
3243 }
3244 }
3245 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3246 {
3247 struct deps_reg *reg_last = &deps->reg_last[i];
b8698a0f 3248
ce18efcb 3249 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
1a83e602 3250 REG_DEP_OUTPUT, false);
ce18efcb
VM
3251 add_dependence_list_and_free (deps, insn,
3252 &reg_last->implicit_sets,
1a83e602 3253 0, REG_DEP_ANTI, false);
ce18efcb 3254 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
1a83e602 3255 REG_DEP_OUTPUT, false);
ce18efcb 3256 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
1a83e602 3257 REG_DEP_ANTI, false);
e2724e63 3258 add_dependence_list (insn, reg_last->control_uses, 0,
1a83e602 3259 REG_DEP_CONTROL, false);
b8698a0f 3260
ce18efcb
VM
3261 if (!deps->readonly)
3262 {
3263 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3264 reg_last->uses_length = 0;
3265 reg_last->clobbers_length = 0;
ce18efcb
VM
3266 }
3267 }
3268 }
e2724e63
BS
3269 if (!deps->readonly)
3270 {
3271 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3272 {
3273 struct deps_reg *reg_last = &deps->reg_last[i];
3274 reg_last->control_uses
3275 = alloc_INSN_LIST (insn, reg_last->control_uses);
3276 }
3277 }
0a1766b2
AM
3278 }
3279
ce18efcb
VM
3280 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3281 if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3282 {
3283 struct deps_reg *reg_last = &deps->reg_last[i];
1a83e602
BS
3284 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI, false);
3285 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI, false);
3286 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI, false);
3287 add_dependence_list (insn, reg_last->control_uses, 0, REG_DEP_ANTI,
3288 false);
b8698a0f 3289
ce18efcb
VM
3290 if (!deps->readonly)
3291 reg_last->implicit_sets
3292 = alloc_INSN_LIST (insn, reg_last->implicit_sets);
3293 }
3294
e855c69d
AB
3295 if (!deps->readonly)
3296 {
3297 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
3298 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
3299 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
ce18efcb
VM
3300 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3301 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
3302 || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3303 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
e855c69d
AB
3304
3305 /* Set up the pending barrier found. */
3306 deps->last_reg_pending_barrier = reg_pending_barrier;
3307 }
0a1766b2
AM
3308
3309 CLEAR_REG_SET (reg_pending_uses);
3310 CLEAR_REG_SET (reg_pending_clobbers);
3311 CLEAR_REG_SET (reg_pending_sets);
e2724e63 3312 CLEAR_REG_SET (reg_pending_control_uses);
ce18efcb
VM
3313 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3314 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
0a1766b2 3315
ca3c1028 3316 /* Add dependencies if a scheduling barrier was found. */
37a0f8a5 3317 if (reg_pending_barrier)
ca3c1028 3318 {
30028c85
VM
3319 /* In the case of barrier the most added dependencies are not
3320 real, so we use anti-dependence here. */
e855c69d 3321 if (sched_has_condition_p (insn))
16f6ece6 3322 {
a2041967 3323 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
37a0f8a5
RH
3324 {
3325 struct deps_reg *reg_last = &deps->reg_last[i];
1a83e602
BS
3326 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3327 true);
ce18efcb
VM
3328 add_dependence_list (insn, reg_last->sets, 0,
3329 reg_pending_barrier == TRUE_BARRIER
1a83e602 3330 ? REG_DEP_TRUE : REG_DEP_ANTI, true);
ce18efcb 3331 add_dependence_list (insn, reg_last->implicit_sets, 0,
1a83e602 3332 REG_DEP_ANTI, true);
ce18efcb
VM
3333 add_dependence_list (insn, reg_last->clobbers, 0,
3334 reg_pending_barrier == TRUE_BARRIER
1a83e602 3335 ? REG_DEP_TRUE : REG_DEP_ANTI, true);
a2041967 3336 }
37a0f8a5
RH
3337 }
3338 else
3339 {
a2041967 3340 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
37a0f8a5
RH
3341 {
3342 struct deps_reg *reg_last = &deps->reg_last[i];
e855c69d 3343 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
1a83e602 3344 REG_DEP_ANTI, true);
e2724e63
BS
3345 add_dependence_list_and_free (deps, insn,
3346 &reg_last->control_uses, 0,
1a83e602 3347 REG_DEP_CONTROL, true);
ce18efcb
VM
3348 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3349 reg_pending_barrier == TRUE_BARRIER
1a83e602
BS
3350 ? REG_DEP_TRUE : REG_DEP_ANTI,
3351 true);
ce18efcb
VM
3352 add_dependence_list_and_free (deps, insn,
3353 &reg_last->implicit_sets, 0,
1a83e602 3354 REG_DEP_ANTI, true);
ce18efcb
VM
3355 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3356 reg_pending_barrier == TRUE_BARRIER
1a83e602
BS
3357 ? REG_DEP_TRUE : REG_DEP_ANTI,
3358 true);
16f6ece6 3359
e855c69d
AB
3360 if (!deps->readonly)
3361 {
3362 reg_last->uses_length = 0;
3363 reg_last->clobbers_length = 0;
3364 }
3365 }
0a1bb917 3366 }
37a0f8a5 3367
e855c69d
AB
3368 if (!deps->readonly)
3369 for (i = 0; i < (unsigned)deps->max_reg; i++)
3370 {
3371 struct deps_reg *reg_last = &deps->reg_last[i];
3372 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3373 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3374 }
3375
ed358aea 3376 /* Don't flush pending lists on speculative checks for
f45e9053
AB
3377 selective scheduling. */
3378 if (!sel_sched_p () || !sel_insn_is_speculation_check (insn))
e855c69d 3379 flush_pending_lists (deps, insn, true, true);
b8698a0f 3380
d1c9693e 3381 reg_pending_barrier = NOT_A_BARRIER;
4ba478b8 3382 }
16f6ece6
BS
3383
3384 /* If a post-call group is still open, see if it should remain so.
3385 This insn must be a simple move of a hard reg to a pseudo or
3386 vice-versa.
3387
42db504c
SB
3388 We must avoid moving these insns for correctness on targets
3389 with small register classes, and for special registers like
16f6ece6
BS
3390 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
3391 hard regs for all targets. */
3392
3393 if (deps->in_post_call_group_p)
3394 {
3395 rtx tmp, set = single_set (insn);
3396 int src_regno, dest_regno;
3397
3398 if (set == NULL)
b5b8b0ac
AO
3399 {
3400 if (DEBUG_INSN_P (insn))
3401 /* We don't want to mark debug insns as part of the same
3402 sched group. We know they really aren't, but if we use
3403 debug insns to tell that a call group is over, we'll
3404 get different code if debug insns are not there and
3405 instructions that follow seem like they should be part
3406 of the call group.
3407
fd59643c 3408 Also, if we did, chain_to_prev_insn would move the
b5b8b0ac
AO
3409 deps of the debug insn to the call insn, modifying
3410 non-debug post-dependency counts of the debug insn
3411 dependencies and otherwise messing with the scheduling
3412 order.
3413
3414 Instead, let such debug insns be scheduled freely, but
3415 keep the call group open in case there are insns that
3416 should be part of it afterwards. Since we grant debug
3417 insns higher priority than even sched group insns, it
3418 will all turn out all right. */
3419 goto debug_dont_end_call_group;
3420 else
3421 goto end_call_group;
3422 }
16f6ece6
BS
3423
3424 tmp = SET_DEST (set);
3425 if (GET_CODE (tmp) == SUBREG)
3426 tmp = SUBREG_REG (tmp);
f8cfc6aa 3427 if (REG_P (tmp))
16f6ece6
BS
3428 dest_regno = REGNO (tmp);
3429 else
3430 goto end_call_group;
3431
3432 tmp = SET_SRC (set);
3433 if (GET_CODE (tmp) == SUBREG)
3434 tmp = SUBREG_REG (tmp);
58f206d6
DD
3435 if ((GET_CODE (tmp) == PLUS
3436 || GET_CODE (tmp) == MINUS)
f8cfc6aa 3437 && REG_P (XEXP (tmp, 0))
58f206d6
DD
3438 && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3439 && dest_regno == STACK_POINTER_REGNUM)
3440 src_regno = STACK_POINTER_REGNUM;
f8cfc6aa 3441 else if (REG_P (tmp))
16f6ece6
BS
3442 src_regno = REGNO (tmp);
3443 else
3444 goto end_call_group;
3445
3446 if (src_regno < FIRST_PSEUDO_REGISTER
3447 || dest_regno < FIRST_PSEUDO_REGISTER)
3448 {
e855c69d
AB
3449 if (!deps->readonly
3450 && deps->in_post_call_group_p == post_call_initial)
4c2b2906
EC
3451 deps->in_post_call_group_p = post_call;
3452
b8698a0f 3453 if (!sel_sched_p () || sched_emulate_haifa_p)
e855c69d
AB
3454 {
3455 SCHED_GROUP_P (insn) = 1;
3456 CANT_MOVE (insn) = 1;
3457 }
16f6ece6
BS
3458 }
3459 else
3460 {
3461 end_call_group:
e855c69d
AB
3462 if (!deps->readonly)
3463 deps->in_post_call_group_p = not_post_call;
16f6ece6
BS
3464 }
3465 }
4c2b2906 3466
b5b8b0ac 3467 debug_dont_end_call_group:
e2f6ff94
MK
3468 if ((current_sched_info->flags & DO_SPECULATION)
3469 && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
3470 /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3471 be speculated. */
3472 {
e855c69d
AB
3473 if (sel_sched_p ())
3474 sel_mark_hard_insn (insn);
3475 else
3476 {
3477 sd_iterator_def sd_it;
3478 dep_t dep;
b8698a0f 3479
e855c69d
AB
3480 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3481 sd_iterator_cond (&sd_it, &dep);)
3482 change_spec_dep_to_hard (sd_it);
3483 }
e2f6ff94 3484 }
76df831f
RH
3485
3486 /* We do not yet have code to adjust REG_ARGS_SIZE, therefore we must
3487 honor their original ordering. */
3488 if (find_reg_note (insn, REG_ARGS_SIZE, NULL))
3489 {
3490 if (deps->last_args_size)
3491 add_dependence (insn, deps->last_args_size, REG_DEP_OUTPUT);
3492 deps->last_args_size = insn;
3493 }
16f6ece6
BS
3494}
3495
1098d3a5
JJ
3496/* Return TRUE if INSN might not always return normally (e.g. call exit,
3497 longjmp, loop forever, ...). */
532aafad
SB
3498/* FIXME: Why can't this function just use flags_from_decl_or_type and
3499 test for ECF_NORETURN? */
1098d3a5 3500static bool
90831096 3501call_may_noreturn_p (rtx_insn *insn)
1098d3a5
JJ
3502{
3503 rtx call;
3504
3505 /* const or pure calls that aren't looping will always return. */
3506 if (RTL_CONST_OR_PURE_CALL_P (insn)
3507 && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3508 return false;
3509
da4fdf2d
SB
3510 call = get_call_rtx_from (insn);
3511 if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
1098d3a5
JJ
3512 {
3513 rtx symbol = XEXP (XEXP (call, 0), 0);
3514 if (SYMBOL_REF_DECL (symbol)
3515 && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3516 {
3517 if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3518 == BUILT_IN_NORMAL)
3519 switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3520 {
3521 case BUILT_IN_BCMP:
3522 case BUILT_IN_BCOPY:
3523 case BUILT_IN_BZERO:
3524 case BUILT_IN_INDEX:
3525 case BUILT_IN_MEMCHR:
3526 case BUILT_IN_MEMCMP:
3527 case BUILT_IN_MEMCPY:
3528 case BUILT_IN_MEMMOVE:
3529 case BUILT_IN_MEMPCPY:
3530 case BUILT_IN_MEMSET:
3531 case BUILT_IN_RINDEX:
3532 case BUILT_IN_STPCPY:
3533 case BUILT_IN_STPNCPY:
3534 case BUILT_IN_STRCAT:
3535 case BUILT_IN_STRCHR:
3536 case BUILT_IN_STRCMP:
3537 case BUILT_IN_STRCPY:
3538 case BUILT_IN_STRCSPN:
3539 case BUILT_IN_STRLEN:
3540 case BUILT_IN_STRNCAT:
3541 case BUILT_IN_STRNCMP:
3542 case BUILT_IN_STRNCPY:
3543 case BUILT_IN_STRPBRK:
3544 case BUILT_IN_STRRCHR:
3545 case BUILT_IN_STRSPN:
3546 case BUILT_IN_STRSTR:
3547 /* Assume certain string/memory builtins always return. */
3548 return false;
3549 default:
3550 break;
3551 }
3552 }
3553 }
3554
3555 /* For all other calls assume that they might not always return. */
3556 return true;
3557}
3558
fd59643c
RS
3559/* Return true if INSN should be made dependent on the previous instruction
3560 group, and if all INSN's dependencies should be moved to the first
3561 instruction of that group. */
3562
3563static bool
90831096 3564chain_to_prev_insn_p (rtx_insn *insn)
fd59643c 3565{
fd59643c
RS
3566 /* INSN forms a group with the previous instruction. */
3567 if (SCHED_GROUP_P (insn))
3568 return true;
3569
3570 /* If the previous instruction clobbers a register R and this one sets
3571 part of R, the clobber was added specifically to help us track the
3572 liveness of R. There's no point scheduling the clobber and leaving
3573 INSN behind, especially if we move the clobber to another block. */
e67d1102 3574 rtx_insn *prev = prev_nonnote_nondebug_insn (insn);
fd59643c
RS
3575 if (prev
3576 && INSN_P (prev)
3577 && BLOCK_FOR_INSN (prev) == BLOCK_FOR_INSN (insn)
3578 && GET_CODE (PATTERN (prev)) == CLOBBER)
3579 {
e67d1102 3580 rtx x = XEXP (PATTERN (prev), 0);
fd59643c
RS
3581 if (set_of (x, insn))
3582 return true;
3583 }
3584
3585 return false;
3586}
3587
e855c69d 3588/* Analyze INSN with DEPS as a context. */
16f6ece6 3589void
ce1ce33a 3590deps_analyze_insn (struct deps_desc *deps, rtx_insn *insn)
16f6ece6 3591{
e855c69d
AB
3592 if (sched_deps_info->start_insn)
3593 sched_deps_info->start_insn (insn);
16f6ece6 3594
b0bd15f7
BS
3595 /* Record the condition for this insn. */
3596 if (NONDEBUG_INSN_P (insn))
e2724e63
BS
3597 {
3598 rtx t;
3599 sched_get_condition_with_rev (insn, NULL);
3600 t = INSN_CACHED_COND (insn);
3dc99c19 3601 INSN_COND_DEPS (insn) = NULL;
e2724e63
BS
3602 if (reload_completed
3603 && (current_sched_info->flags & DO_PREDICATION)
3604 && COMPARISON_P (t)
3605 && REG_P (XEXP (t, 0))
3606 && CONSTANT_P (XEXP (t, 1)))
3607 {
3608 unsigned int regno;
3609 int nregs;
3dc99c19 3610 rtx_insn_list *cond_deps = NULL;
e2724e63
BS
3611 t = XEXP (t, 0);
3612 regno = REGNO (t);
dc8afb70 3613 nregs = REG_NREGS (t);
e2724e63
BS
3614 while (nregs-- > 0)
3615 {
3616 struct deps_reg *reg_last = &deps->reg_last[regno + nregs];
3dc99c19
DM
3617 cond_deps = concat_INSN_LIST (reg_last->sets, cond_deps);
3618 cond_deps = concat_INSN_LIST (reg_last->clobbers, cond_deps);
3619 cond_deps = concat_INSN_LIST (reg_last->implicit_sets, cond_deps);
e2724e63 3620 }
3dc99c19 3621 INSN_COND_DEPS (insn) = cond_deps;
e2724e63
BS
3622 }
3623 }
b0bd15f7 3624
7b4ef662 3625 if (JUMP_P (insn))
e855c69d 3626 {
b8698a0f 3627 /* Make each JUMP_INSN (but not a speculative check)
e855c69d
AB
3628 a scheduling barrier for memory references. */
3629 if (!deps->readonly
b8698a0f 3630 && !(sel_sched_p ()
e855c69d
AB
3631 && sel_insn_is_speculation_check (insn)))
3632 {
3633 /* Keep the list a reasonable size. */
43722f9f 3634 if (deps->pending_flush_length++ >= MAX_PENDING_LIST_LENGTH)
e855c69d
AB
3635 flush_pending_lists (deps, insn, true, true);
3636 else
e2724e63
BS
3637 deps->pending_jump_insns
3638 = alloc_INSN_LIST (insn, deps->pending_jump_insns);
e855c69d
AB
3639 }
3640
7b4ef662
RH
3641 /* For each insn which shouldn't cross a jump, add a dependence. */
3642 add_dependence_list_and_free (deps, insn,
3643 &deps->sched_before_next_jump, 1,
1a83e602 3644 REG_DEP_ANTI, true);
7b4ef662
RH
3645
3646 sched_analyze_insn (deps, PATTERN (insn), insn);
3647 }
3648 else if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn))
3649 {
e855c69d
AB
3650 sched_analyze_insn (deps, PATTERN (insn), insn);
3651 }
3652 else if (CALL_P (insn))
3653 {
3654 int i;
3655
3656 CANT_MOVE (insn) = 1;
3657
3658 if (find_reg_note (insn, REG_SETJMP, NULL))
3659 {
3660 /* This is setjmp. Assume that all registers, not just
3661 hard registers, may be clobbered by this call. */
3662 reg_pending_barrier = MOVE_BARRIER;
3663 }
3664 else
3665 {
3666 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3667 /* A call may read and modify global register variables. */
3668 if (global_regs[i])
3669 {
3670 SET_REGNO_REG_SET (reg_pending_sets, i);
ce18efcb 3671 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
e855c69d
AB
3672 }
3673 /* Other call-clobbered hard regs may be clobbered.
3674 Since we only have a choice between 'might be clobbered'
3675 and 'definitely not clobbered', we must include all
3676 partly call-clobbered registers here. */
3677 else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
3678 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
3679 SET_REGNO_REG_SET (reg_pending_clobbers, i);
3680 /* We don't know what set of fixed registers might be used
3681 by the function, but it is certain that the stack pointer
3682 is among them, but be conservative. */
3683 else if (fixed_regs[i])
ce18efcb 3684 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
e855c69d
AB
3685 /* The frame pointer is normally not used by the function
3686 itself, but by the debugger. */
3687 /* ??? MIPS o32 is an exception. It uses the frame pointer
3688 in the macro expansion of jal but does not represent this
3689 fact in the call_insn rtl. */
3690 else if (i == FRAME_POINTER_REGNUM
3691 || (i == HARD_FRAME_POINTER_REGNUM
3692 && (! reload_completed || frame_pointer_needed)))
ce18efcb 3693 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
e855c69d
AB
3694 }
3695
3696 /* For each insn which shouldn't cross a call, add a dependence
3697 between that insn and this call insn. */
b8698a0f 3698 add_dependence_list_and_free (deps, insn,
e855c69d 3699 &deps->sched_before_next_call, 1,
1a83e602 3700 REG_DEP_ANTI, true);
e855c69d
AB
3701
3702 sched_analyze_insn (deps, PATTERN (insn), insn);
3703
3704 /* If CALL would be in a sched group, then this will violate
3705 convention that sched group insns have dependencies only on the
3706 previous instruction.
3707
3708 Of course one can say: "Hey! What about head of the sched group?"
3709 And I will answer: "Basic principles (one dep per insn) are always
3710 the same." */
3711 gcc_assert (!SCHED_GROUP_P (insn));
3712
3713 /* In the absence of interprocedural alias analysis, we must flush
3714 all pending reads and writes, and start new dependencies starting
3715 from here. But only flush writes for constant calls (which may
3716 be passed a pointer to something we haven't written yet). */
3717 flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
3718
3719 if (!deps->readonly)
3720 {
3721 /* Remember the last function call for limiting lifetimes. */
3722 free_INSN_LIST_list (&deps->last_function_call);
3723 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
1098d3a5
JJ
3724
3725 if (call_may_noreturn_p (insn))
3726 {
3727 /* Remember the last function call that might not always return
3728 normally for limiting moves of trapping insns. */
3729 free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3730 deps->last_function_call_may_noreturn
3731 = alloc_INSN_LIST (insn, NULL_RTX);
3732 }
3733
e855c69d
AB
3734 /* Before reload, begin a post-call group, so as to keep the
3735 lifetimes of hard registers correct. */
3736 if (! reload_completed)
3737 deps->in_post_call_group_p = post_call;
3738 }
3739 }
3740
3741 if (sched_deps_info->use_cselib)
3742 cselib_process_insn (insn);
3743
e855c69d
AB
3744 if (sched_deps_info->finish_insn)
3745 sched_deps_info->finish_insn ();
3746
3747 /* Fixup the dependencies in the sched group. */
b8698a0f 3748 if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
fd59643c
RS
3749 && chain_to_prev_insn_p (insn)
3750 && !sel_sched_p ())
3751 chain_to_prev_insn (insn);
e855c69d
AB
3752}
3753
3754/* Initialize DEPS for the new block beginning with HEAD. */
3755void
99de8612 3756deps_start_bb (struct deps_desc *deps, rtx_insn *head)
e855c69d
AB
3757{
3758 gcc_assert (!deps->readonly);
4b6c5340 3759
b40742a5
R
3760 /* Before reload, if the previous block ended in a call, show that
3761 we are inside a post-call group, so as to keep the lifetimes of
3762 hard registers correct. */
4b4bf941 3763 if (! reload_completed && !LABEL_P (head))
b40742a5 3764 {
99de8612 3765 rtx_insn *insn = prev_nonnote_nondebug_insn (head);
e855c69d 3766
4b4bf941 3767 if (insn && CALL_P (insn))
b40742a5
R
3768 deps->in_post_call_group_p = post_call_initial;
3769 }
e855c69d
AB
3770}
3771
3772/* Analyze every insn between HEAD and TAIL inclusive, creating backward
3773 dependencies for each insn. */
3774void
ce1ce33a 3775sched_analyze (struct deps_desc *deps, rtx_insn *head, rtx_insn *tail)
e855c69d 3776{
ce1ce33a 3777 rtx_insn *insn;
e855c69d
AB
3778
3779 if (sched_deps_info->use_cselib)
457eeaae 3780 cselib_init (CSELIB_RECORD_MEMORY);
e855c69d
AB
3781
3782 deps_start_bb (deps, head);
3783
16f6ece6
BS
3784 for (insn = head;; insn = NEXT_INSN (insn))
3785 {
e855c69d 3786
b198261f 3787 if (INSN_P (insn))
16f6ece6 3788 {
e2f6ff94
MK
3789 /* And initialize deps_lists. */
3790 sd_init_insn (insn);
ba4fa581
WM
3791 /* Clean up SCHED_GROUP_P which may be set by last
3792 scheduler pass. */
3793 if (SCHED_GROUP_P (insn))
3794 SCHED_GROUP_P (insn) = 0;
b198261f
MK
3795 }
3796
e855c69d 3797 deps_analyze_insn (deps, insn);
85d69216 3798
16f6ece6 3799 if (insn == tail)
4b6c5340 3800 {
e855c69d 3801 if (sched_deps_info->use_cselib)
4b6c5340
BS
3802 cselib_finish ();
3803 return;
3804 }
16f6ece6 3805 }
41374e13 3806 gcc_unreachable ();
16f6ece6 3807}
15aab9c0 3808
e2f6ff94
MK
3809/* Helper for sched_free_deps ().
3810 Delete INSN's (RESOLVED_P) backward dependencies. */
3811static void
90831096 3812delete_dep_nodes_in_back_deps (rtx_insn *insn, bool resolved_p)
15aab9c0 3813{
e2f6ff94
MK
3814 sd_iterator_def sd_it;
3815 dep_t dep;
3816 sd_list_types_def types;
15aab9c0 3817
e2f6ff94
MK
3818 if (resolved_p)
3819 types = SD_LIST_RES_BACK;
3820 else
3821 types = SD_LIST_BACK;
3822
3823 for (sd_it = sd_iterator_start (insn, types);
3824 sd_iterator_cond (&sd_it, &dep);)
ddbd5439 3825 {
e2f6ff94
MK
3826 dep_link_t link = *sd_it.linkp;
3827 dep_node_t node = DEP_LINK_NODE (link);
3828 deps_list_t back_list;
3829 deps_list_t forw_list;
3830
3831 get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
3832 remove_from_deps_list (link, back_list);
3833 delete_dep_node (node);
ddbd5439 3834 }
15aab9c0
VM
3835}
3836
e2f6ff94
MK
3837/* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3838 deps_lists. */
16f6ece6 3839void
dc01c3d1 3840sched_free_deps (rtx_insn *head, rtx_insn *tail, bool resolved_p)
16f6ece6 3841{
dc01c3d1
DM
3842 rtx_insn *insn;
3843 rtx_insn *next_tail = NEXT_INSN (tail);
16f6ece6 3844
b953c2b8
BS
3845 /* We make two passes since some insns may be scheduled before their
3846 dependencies are resolved. */
16f6ece6 3847 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
e2f6ff94
MK
3848 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3849 {
e2f6ff94
MK
3850 /* Clear forward deps and leave the dep_nodes to the
3851 corresponding back_deps list. */
3852 if (resolved_p)
3853 clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3854 else
3855 clear_deps_list (INSN_FORW_DEPS (insn));
b953c2b8
BS
3856 }
3857 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3858 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3859 {
3860 /* Clear resolved back deps together with its dep_nodes. */
3861 delete_dep_nodes_in_back_deps (insn, resolved_p);
16f6ece6 3862
e2f6ff94
MK
3863 sd_finish_insn (insn);
3864 }
16f6ece6
BS
3865}
3866\f
3867/* Initialize variables for region data dependence analysis.
b8698a0f 3868 When LAZY_REG_LAST is true, do not allocate reg_last array
88302d54 3869 of struct deps_desc immediately. */
16f6ece6
BS
3870
3871void
88302d54 3872init_deps (struct deps_desc *deps, bool lazy_reg_last)
16f6ece6 3873{
4ba478b8
RH
3874 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3875
3876 deps->max_reg = max_reg;
bcf33775
AB
3877 if (lazy_reg_last)
3878 deps->reg_last = NULL;
3879 else
3880 deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
4ba478b8 3881 INIT_REG_SET (&deps->reg_last_in_use);
16f6ece6
BS
3882
3883 deps->pending_read_insns = 0;
3884 deps->pending_read_mems = 0;
3885 deps->pending_write_insns = 0;
3886 deps->pending_write_mems = 0;
e2724e63 3887 deps->pending_jump_insns = 0;
bdbf40a5
MK
3888 deps->pending_read_list_length = 0;
3889 deps->pending_write_list_length = 0;
4a121cc3 3890 deps->pending_flush_length = 0;
16f6ece6
BS
3891 deps->last_pending_memory_flush = 0;
3892 deps->last_function_call = 0;
1098d3a5 3893 deps->last_function_call_may_noreturn = 0;
37a0f8a5 3894 deps->sched_before_next_call = 0;
7b4ef662 3895 deps->sched_before_next_jump = 0;
b40742a5 3896 deps->in_post_call_group_p = not_post_call;
b5b8b0ac 3897 deps->last_debug_insn = 0;
76df831f 3898 deps->last_args_size = 0;
e855c69d
AB
3899 deps->last_reg_pending_barrier = NOT_A_BARRIER;
3900 deps->readonly = 0;
16f6ece6
BS
3901}
3902
b8698a0f 3903/* Init only reg_last field of DEPS, which was not allocated before as
bcf33775
AB
3904 we inited DEPS lazily. */
3905void
88302d54 3906init_deps_reg_last (struct deps_desc *deps)
bcf33775
AB
3907{
3908 gcc_assert (deps && deps->max_reg > 0);
3909 gcc_assert (deps->reg_last == NULL);
3910
3911 deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3912}
3913
3914
16f6ece6
BS
3915/* Free insn lists found in DEPS. */
3916
3917void
88302d54 3918free_deps (struct deps_desc *deps)
16f6ece6 3919{
3cd8c58a 3920 unsigned i;
a2041967 3921 reg_set_iterator rsi;
16f6ece6 3922
bcf33775
AB
3923 /* We set max_reg to 0 when this context was already freed. */
3924 if (deps->max_reg == 0)
3925 {
3926 gcc_assert (deps->reg_last == NULL);
3927 return;
3928 }
3929 deps->max_reg = 0;
b8698a0f 3930
37a0f8a5
RH
3931 free_INSN_LIST_list (&deps->pending_read_insns);
3932 free_EXPR_LIST_list (&deps->pending_read_mems);
3933 free_INSN_LIST_list (&deps->pending_write_insns);
3934 free_EXPR_LIST_list (&deps->pending_write_mems);
3935 free_INSN_LIST_list (&deps->last_pending_memory_flush);
3936
4ba478b8 3937 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
7ef0daad 3938 times. For a testcase with 42000 regs and 8000 small basic blocks,
4ba478b8 3939 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
a2041967 3940 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
16f6ece6 3941 {
4ba478b8 3942 struct deps_reg *reg_last = &deps->reg_last[i];
f2846820
JL
3943 if (reg_last->uses)
3944 free_INSN_LIST_list (&reg_last->uses);
3945 if (reg_last->sets)
3946 free_INSN_LIST_list (&reg_last->sets);
ce18efcb
VM
3947 if (reg_last->implicit_sets)
3948 free_INSN_LIST_list (&reg_last->implicit_sets);
e2724e63
BS
3949 if (reg_last->control_uses)
3950 free_INSN_LIST_list (&reg_last->control_uses);
f2846820
JL
3951 if (reg_last->clobbers)
3952 free_INSN_LIST_list (&reg_last->clobbers);
a2041967 3953 }
4ba478b8
RH
3954 CLEAR_REG_SET (&deps->reg_last_in_use);
3955
b8698a0f 3956 /* As we initialize reg_last lazily, it is possible that we didn't allocate
bcf33775 3957 it at all. */
04695783 3958 free (deps->reg_last);
e855c69d
AB
3959 deps->reg_last = NULL;
3960
3961 deps = NULL;
16f6ece6
BS
3962}
3963
aef0e7a8 3964/* Remove INSN from dependence contexts DEPS. */
e855c69d 3965void
3dc99c19 3966remove_from_deps (struct deps_desc *deps, rtx_insn *insn)
e855c69d
AB
3967{
3968 int removed;
3969 unsigned i;
3970 reg_set_iterator rsi;
b8698a0f 3971
e855c69d
AB
3972 removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
3973 &deps->pending_read_mems);
f49b295a
AO
3974 if (!DEBUG_INSN_P (insn))
3975 deps->pending_read_list_length -= removed;
e855c69d
AB
3976 removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
3977 &deps->pending_write_mems);
3978 deps->pending_write_list_length -= removed;
e2724e63
BS
3979
3980 removed = remove_from_dependence_list (insn, &deps->pending_jump_insns);
3981 deps->pending_flush_length -= removed;
e855c69d
AB
3982 removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
3983 deps->pending_flush_length -= removed;
16f6ece6 3984
e855c69d
AB
3985 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3986 {
3987 struct deps_reg *reg_last = &deps->reg_last[i];
3988 if (reg_last->uses)
3989 remove_from_dependence_list (insn, &reg_last->uses);
3990 if (reg_last->sets)
3991 remove_from_dependence_list (insn, &reg_last->sets);
ce18efcb
VM
3992 if (reg_last->implicit_sets)
3993 remove_from_dependence_list (insn, &reg_last->implicit_sets);
e855c69d
AB
3994 if (reg_last->clobbers)
3995 remove_from_dependence_list (insn, &reg_last->clobbers);
ce18efcb
VM
3996 if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
3997 && !reg_last->clobbers)
e855c69d
AB
3998 CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i);
3999 }
4000
4001 if (CALL_P (insn))
1098d3a5
JJ
4002 {
4003 remove_from_dependence_list (insn, &deps->last_function_call);
b8698a0f 4004 remove_from_dependence_list (insn,
1098d3a5
JJ
4005 &deps->last_function_call_may_noreturn);
4006 }
e855c69d
AB
4007 remove_from_dependence_list (insn, &deps->sched_before_next_call);
4008}
4009
4010/* Init deps data vector. */
4011static void
4012init_deps_data_vector (void)
4013{
9771b263
DN
4014 int reserve = (sched_max_luid + 1 - h_d_i_d.length ());
4015 if (reserve > 0 && ! h_d_i_d.space (reserve))
4016 h_d_i_d.safe_grow_cleared (3 * sched_max_luid / 2);
e855c69d
AB
4017}
4018
4019/* If it is profitable to use them, initialize or extend (depending on
4020 GLOBAL_P) dependency data. */
16f6ece6 4021void
e855c69d 4022sched_deps_init (bool global_p)
16f6ece6 4023{
e2f6ff94
MK
4024 /* Average number of insns in the basic block.
4025 '+ 1' is used to make it nonzero. */
0cae8d31 4026 int insns_in_block = sched_max_luid / n_basic_blocks_for_fn (cfun) + 1;
e2f6ff94 4027
e855c69d 4028 init_deps_data_vector ();
b8698a0f
L
4029
4030 /* We use another caching mechanism for selective scheduling, so
e855c69d
AB
4031 we don't use this one. */
4032 if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
4033 {
4034 /* ?!? We could save some memory by computing a per-region luid mapping
4035 which could reduce both the number of vectors in the cache and the
4036 size of each vector. Instead we just avoid the cache entirely unless
4037 the average number of instructions in a basic block is very high. See
4038 the comment before the declaration of true_dependency_cache for
4039 what we consider "very high". */
496d7bb0 4040 cache_size = 0;
e855c69d 4041 extend_dependency_caches (sched_max_luid, true);
496d7bb0 4042 }
b198261f 4043
b8698a0f 4044 if (global_p)
e855c69d 4045 {
fcb87c50
MM
4046 dl_pool = new object_allocator<_deps_list> ("deps_list");
4047 /* Allocate lists for one block at a time. */
4048 dn_pool = new object_allocator<_dep_node> ("dep_node");
4049 /* Allocate nodes for one block at a time. */
e855c69d 4050 }
496d7bb0 4051}
ddbd5439 4052
e855c69d 4053
496d7bb0
MK
4054/* Create or extend (depending on CREATE_P) dependency caches to
4055 size N. */
4056void
4057extend_dependency_caches (int n, bool create_p)
4058{
4059 if (create_p || true_dependency_cache)
4060 {
4061 int i, luid = cache_size + n;
4062
4063 true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
4064 luid);
4065 output_dependency_cache = XRESIZEVEC (bitmap_head,
4066 output_dependency_cache, luid);
4067 anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
4068 luid);
e2724e63
BS
4069 control_dependency_cache = XRESIZEVEC (bitmap_head, control_dependency_cache,
4070 luid);
e2f6ff94 4071
ddbd5439
MK
4072 if (current_sched_info->flags & DO_SPECULATION)
4073 spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
4074 luid);
4075
496d7bb0 4076 for (i = cache_size; i < luid; i++)
84568e14
JH
4077 {
4078 bitmap_initialize (&true_dependency_cache[i], 0);
84568e14 4079 bitmap_initialize (&output_dependency_cache[i], 0);
ddbd5439 4080 bitmap_initialize (&anti_dependency_cache[i], 0);
e2724e63 4081 bitmap_initialize (&control_dependency_cache[i], 0);
e2f6ff94 4082
ddbd5439
MK
4083 if (current_sched_info->flags & DO_SPECULATION)
4084 bitmap_initialize (&spec_dependency_cache[i], 0);
84568e14
JH
4085 }
4086 cache_size = luid;
16f6ece6
BS
4087 }
4088}
4089
e855c69d 4090/* Finalize dependency information for the whole function. */
16f6ece6 4091void
e855c69d 4092sched_deps_finish (void)
16f6ece6 4093{
e2f6ff94 4094 gcc_assert (deps_pools_are_empty_p ());
ff569744
RB
4095 delete dn_pool;
4096 delete dl_pool;
973004cb 4097 dn_pool = NULL;
973004cb 4098 dl_pool = NULL;
b198261f 4099
9771b263 4100 h_d_i_d.release ();
e855c69d 4101 cache_size = 0;
b8698a0f 4102
16f6ece6
BS
4103 if (true_dependency_cache)
4104 {
84568e14
JH
4105 int i;
4106
4107 for (i = 0; i < cache_size; i++)
4108 {
4109 bitmap_clear (&true_dependency_cache[i]);
84568e14 4110 bitmap_clear (&output_dependency_cache[i]);
ddbd5439 4111 bitmap_clear (&anti_dependency_cache[i]);
e2724e63 4112 bitmap_clear (&control_dependency_cache[i]);
e2f6ff94 4113
e855c69d 4114 if (sched_deps_info->generate_spec_deps)
ddbd5439 4115 bitmap_clear (&spec_dependency_cache[i]);
84568e14
JH
4116 }
4117 free (true_dependency_cache);
16f6ece6 4118 true_dependency_cache = NULL;
84568e14 4119 free (output_dependency_cache);
16f6ece6 4120 output_dependency_cache = NULL;
ddbd5439
MK
4121 free (anti_dependency_cache);
4122 anti_dependency_cache = NULL;
e2724e63
BS
4123 free (control_dependency_cache);
4124 control_dependency_cache = NULL;
e2f6ff94 4125
e855c69d 4126 if (sched_deps_info->generate_spec_deps)
ddbd5439
MK
4127 {
4128 free (spec_dependency_cache);
4129 spec_dependency_cache = NULL;
4130 }
e855c69d 4131
16f6ece6
BS
4132 }
4133}
4134
4135/* Initialize some global variables needed by the dependency analysis
4136 code. */
4137
4138void
46c5ad27 4139init_deps_global (void)
16f6ece6 4140{
ce18efcb
VM
4141 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
4142 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
04389919
NS
4143 reg_pending_sets = ALLOC_REG_SET (&reg_obstack);
4144 reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack);
4145 reg_pending_uses = ALLOC_REG_SET (&reg_obstack);
e2724e63 4146 reg_pending_control_uses = ALLOC_REG_SET (&reg_obstack);
d1c9693e 4147 reg_pending_barrier = NOT_A_BARRIER;
e855c69d
AB
4148
4149 if (!sel_sched_p () || sched_emulate_haifa_p)
4150 {
4151 sched_deps_info->start_insn = haifa_start_insn;
4152 sched_deps_info->finish_insn = haifa_finish_insn;
4153
4154 sched_deps_info->note_reg_set = haifa_note_reg_set;
4155 sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
4156 sched_deps_info->note_reg_use = haifa_note_reg_use;
4157
4158 sched_deps_info->note_mem_dep = haifa_note_mem_dep;
4159 sched_deps_info->note_dep = haifa_note_dep;
4160 }
16f6ece6
BS
4161}
4162
4163/* Free everything used by the dependency analysis code. */
4164
4165void
46c5ad27 4166finish_deps_global (void)
16f6ece6
BS
4167{
4168 FREE_REG_SET (reg_pending_sets);
4169 FREE_REG_SET (reg_pending_clobbers);
37a0f8a5 4170 FREE_REG_SET (reg_pending_uses);
e2724e63 4171 FREE_REG_SET (reg_pending_control_uses);
16f6ece6 4172}
ddbd5439 4173
ddbd5439 4174/* Estimate the weakness of dependence between MEM1 and MEM2. */
e855c69d 4175dw_t
ddbd5439
MK
4176estimate_dep_weak (rtx mem1, rtx mem2)
4177{
4178 rtx r1, r2;
4179
4180 if (mem1 == mem2)
4181 /* MEMs are the same - don't speculate. */
4182 return MIN_DEP_WEAK;
4183
4184 r1 = XEXP (mem1, 0);
4185 r2 = XEXP (mem2, 0);
4186
4187 if (r1 == r2
4188 || (REG_P (r1) && REG_P (r2)
4189 && REGNO (r1) == REGNO (r2)))
4190 /* Again, MEMs are the same. */
4191 return MIN_DEP_WEAK;
4192 else if ((REG_P (r1) && !REG_P (r2))
4193 || (!REG_P (r1) && REG_P (r2)))
4194 /* Different addressing modes - reason to be more speculative,
4195 than usual. */
4196 return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
4197 else
4198 /* We can't say anything about the dependence. */
4199 return UNCERTAIN_DEP_WEAK;
4200}
4201
4202/* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
4203 This function can handle same INSN and ELEM (INSN == ELEM).
4204 It is a convenience wrapper. */
e2724e63 4205static void
ce1ce33a 4206add_dependence_1 (rtx_insn *insn, rtx_insn *elem, enum reg_note dep_type)
ddbd5439 4207{
e855c69d
AB
4208 ds_t ds;
4209 bool internal;
ddbd5439 4210
e855c69d
AB
4211 if (dep_type == REG_DEP_TRUE)
4212 ds = DEP_TRUE;
4213 else if (dep_type == REG_DEP_OUTPUT)
4214 ds = DEP_OUTPUT;
e2724e63
BS
4215 else if (dep_type == REG_DEP_CONTROL)
4216 ds = DEP_CONTROL;
e855c69d
AB
4217 else
4218 {
4219 gcc_assert (dep_type == REG_DEP_ANTI);
4220 ds = DEP_ANTI;
4221 }
4222
4223 /* When add_dependence is called from inside sched-deps.c, we expect
4224 cur_insn to be non-null. */
4225 internal = cur_insn != NULL;
4226 if (internal)
4227 gcc_assert (insn == cur_insn);
4228 else
4229 cur_insn = insn;
b8698a0f 4230
e855c69d
AB
4231 note_dep (elem, ds);
4232 if (!internal)
4233 cur_insn = NULL;
ddbd5439
MK
4234}
4235
8e90de43
SB
4236/* Return weakness of speculative type TYPE in the dep_status DS,
4237 without checking to prevent ICEs on malformed input. */
4238static dw_t
e2f6ff94 4239get_dep_weak_1 (ds_t ds, ds_t type)
ddbd5439
MK
4240{
4241 ds = ds & type;
e855c69d 4242
ddbd5439
MK
4243 switch (type)
4244 {
4245 case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
4246 case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
4247 case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
4248 case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
4249 default: gcc_unreachable ();
4250 }
4251
ddbd5439
MK
4252 return (dw_t) ds;
4253}
4254
8e90de43 4255/* Return weakness of speculative type TYPE in the dep_status DS. */
e2f6ff94
MK
4256dw_t
4257get_dep_weak (ds_t ds, ds_t type)
4258{
4259 dw_t dw = get_dep_weak_1 (ds, type);
4260
4261 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
e2f6ff94
MK
4262 return dw;
4263}
4264
ddbd5439
MK
4265/* Return the dep_status, which has the same parameters as DS, except for
4266 speculative type TYPE, that will have weakness DW. */
4267ds_t
4268set_dep_weak (ds_t ds, ds_t type, dw_t dw)
4269{
4270 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4271
4272 ds &= ~type;
4273 switch (type)
4274 {
4275 case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
4276 case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
4277 case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
4278 case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
4279 default: gcc_unreachable ();
4280 }
4281 return ds;
4282}
4283
e855c69d
AB
4284/* Return the join of two dep_statuses DS1 and DS2.
4285 If MAX_P is true then choose the greater probability,
4286 otherwise multiply probabilities.
4287 This function assumes that both DS1 and DS2 contain speculative bits. */
4288static ds_t
4289ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
ddbd5439
MK
4290{
4291 ds_t ds, t;
4292
4293 gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
4294
4295 ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
4296
4297 t = FIRST_SPEC_TYPE;
4298 do
4299 {
4300 if ((ds1 & t) && !(ds2 & t))
4301 ds |= ds1 & t;
4302 else if (!(ds1 & t) && (ds2 & t))
4303 ds |= ds2 & t;
4304 else if ((ds1 & t) && (ds2 & t))
4305 {
e855c69d
AB
4306 dw_t dw1 = get_dep_weak (ds1, t);
4307 dw_t dw2 = get_dep_weak (ds2, t);
ddbd5439
MK
4308 ds_t dw;
4309
e855c69d
AB
4310 if (!max_p)
4311 {
4312 dw = ((ds_t) dw1) * ((ds_t) dw2);
4313 dw /= MAX_DEP_WEAK;
4314 if (dw < MIN_DEP_WEAK)
4315 dw = MIN_DEP_WEAK;
4316 }
4317 else
4318 {
4319 if (dw1 >= dw2)
4320 dw = dw1;
4321 else
4322 dw = dw2;
4323 }
ddbd5439
MK
4324
4325 ds = set_dep_weak (ds, t, (dw_t) dw);
4326 }
4327
4328 if (t == LAST_SPEC_TYPE)
4329 break;
4330 t <<= SPEC_TYPE_SHIFT;
4331 }
4332 while (1);
4333
4334 return ds;
4335}
4336
e855c69d
AB
4337/* Return the join of two dep_statuses DS1 and DS2.
4338 This function assumes that both DS1 and DS2 contain speculative bits. */
4339ds_t
4340ds_merge (ds_t ds1, ds_t ds2)
4341{
4342 return ds_merge_1 (ds1, ds2, false);
4343}
4344
4345/* Return the join of two dep_statuses DS1 and DS2. */
4346ds_t
4347ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
4348{
4349 ds_t new_status = ds | ds2;
4350
4351 if (new_status & SPECULATIVE)
4352 {
4353 if ((ds && !(ds & SPECULATIVE))
4354 || (ds2 && !(ds2 & SPECULATIVE)))
4355 /* Then this dep can't be speculative. */
4356 new_status &= ~SPECULATIVE;
4357 else
4358 {
4359 /* Both are speculative. Merging probabilities. */
4360 if (mem1)
4361 {
4362 dw_t dw;
4363
4364 dw = estimate_dep_weak (mem1, mem2);
4365 ds = set_dep_weak (ds, BEGIN_DATA, dw);
4366 }
4367
4368 if (!ds)
4369 new_status = ds2;
4370 else if (!ds2)
4371 new_status = ds;
4372 else
4373 new_status = ds_merge (ds2, ds);
4374 }
4375 }
4376
4377 return new_status;
4378}
4379
4380/* Return the join of DS1 and DS2. Use maximum instead of multiplying
4381 probabilities. */
4382ds_t
4383ds_max_merge (ds_t ds1, ds_t ds2)
4384{
4385 if (ds1 == 0 && ds2 == 0)
4386 return 0;
4387
4388 if (ds1 == 0 && ds2 != 0)
4389 return ds2;
4390
4391 if (ds1 != 0 && ds2 == 0)
4392 return ds1;
4393
4394 return ds_merge_1 (ds1, ds2, true);
4395}
4396
4397/* Return the probability of speculation success for the speculation
4398 status DS. */
4399dw_t
4400ds_weak (ds_t ds)
4401{
4402 ds_t res = 1, dt;
4403 int n = 0;
4404
4405 dt = FIRST_SPEC_TYPE;
4406 do
4407 {
4408 if (ds & dt)
4409 {
4410 res *= (ds_t) get_dep_weak (ds, dt);
4411 n++;
4412 }
4413
4414 if (dt == LAST_SPEC_TYPE)
4415 break;
4416 dt <<= SPEC_TYPE_SHIFT;
4417 }
4418 while (1);
4419
4420 gcc_assert (n);
4421 while (--n)
4422 res /= MAX_DEP_WEAK;
4423
4424 if (res < MIN_DEP_WEAK)
4425 res = MIN_DEP_WEAK;
4426
4427 gcc_assert (res <= MAX_DEP_WEAK);
4428
4429 return (dw_t) res;
4430}
4431
4432/* Return a dep status that contains all speculation types of DS. */
4433ds_t
4434ds_get_speculation_types (ds_t ds)
4435{
4436 if (ds & BEGIN_DATA)
4437 ds |= BEGIN_DATA;
4438 if (ds & BE_IN_DATA)
4439 ds |= BE_IN_DATA;
4440 if (ds & BEGIN_CONTROL)
4441 ds |= BEGIN_CONTROL;
4442 if (ds & BE_IN_CONTROL)
4443 ds |= BE_IN_CONTROL;
4444
4445 return ds & SPECULATIVE;
4446}
4447
4448/* Return a dep status that contains maximal weakness for each speculation
4449 type present in DS. */
4450ds_t
4451ds_get_max_dep_weak (ds_t ds)
4452{
4453 if (ds & BEGIN_DATA)
4454 ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
4455 if (ds & BE_IN_DATA)
4456 ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
4457 if (ds & BEGIN_CONTROL)
4458 ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
4459 if (ds & BE_IN_CONTROL)
4460 ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
4461
4462 return ds;
4463}
4464
e2f6ff94
MK
4465/* Dump information about the dependence status S. */
4466static void
4467dump_ds (FILE *f, ds_t s)
4468{
4469 fprintf (f, "{");
4470
4471 if (s & BEGIN_DATA)
4472 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
4473 if (s & BE_IN_DATA)
4474 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
4475 if (s & BEGIN_CONTROL)
4476 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
4477 if (s & BE_IN_CONTROL)
4478 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
4479
4480 if (s & HARD_DEP)
4481 fprintf (f, "HARD_DEP; ");
4482
4483 if (s & DEP_TRUE)
4484 fprintf (f, "DEP_TRUE; ");
e2f6ff94
MK
4485 if (s & DEP_OUTPUT)
4486 fprintf (f, "DEP_OUTPUT; ");
e2724e63
BS
4487 if (s & DEP_ANTI)
4488 fprintf (f, "DEP_ANTI; ");
4489 if (s & DEP_CONTROL)
4490 fprintf (f, "DEP_CONTROL; ");
e2f6ff94
MK
4491
4492 fprintf (f, "}");
4493}
4494
24e47c76 4495DEBUG_FUNCTION void
e2f6ff94
MK
4496debug_ds (ds_t s)
4497{
4498 dump_ds (stderr, s);
4499 fprintf (stderr, "\n");
4500}
4501
ddbd5439
MK
4502/* Verify that dependence type and status are consistent.
4503 If RELAXED_P is true, then skip dep_weakness checks. */
4504static void
e2f6ff94 4505check_dep (dep_t dep, bool relaxed_p)
ddbd5439 4506{
e2f6ff94
MK
4507 enum reg_note dt = DEP_TYPE (dep);
4508 ds_t ds = DEP_STATUS (dep);
4509
4510 gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
4511
4512 if (!(current_sched_info->flags & USE_DEPS_LIST))
4513 {
b953c2b8 4514 gcc_assert (ds == 0);
e2f6ff94
MK
4515 return;
4516 }
4517
ddbd5439
MK
4518 /* Check that dependence type contains the same bits as the status. */
4519 if (dt == REG_DEP_TRUE)
4520 gcc_assert (ds & DEP_TRUE);
4521 else if (dt == REG_DEP_OUTPUT)
4522 gcc_assert ((ds & DEP_OUTPUT)
b8698a0f 4523 && !(ds & DEP_TRUE));
e2724e63
BS
4524 else if (dt == REG_DEP_ANTI)
4525 gcc_assert ((ds & DEP_ANTI)
ddbd5439 4526 && !(ds & (DEP_OUTPUT | DEP_TRUE)));
e2724e63
BS
4527 else
4528 gcc_assert (dt == REG_DEP_CONTROL
4529 && (ds & DEP_CONTROL)
4530 && !(ds & (DEP_OUTPUT | DEP_ANTI | DEP_TRUE)));
ddbd5439
MK
4531
4532 /* HARD_DEP can not appear in dep_status of a link. */
b8698a0f 4533 gcc_assert (!(ds & HARD_DEP));
ddbd5439
MK
4534
4535 /* Check that dependence status is set correctly when speculation is not
4536 supported. */
e855c69d 4537 if (!sched_deps_info->generate_spec_deps)
ddbd5439
MK
4538 gcc_assert (!(ds & SPECULATIVE));
4539 else if (ds & SPECULATIVE)
4540 {
4541 if (!relaxed_p)
4542 {
4543 ds_t type = FIRST_SPEC_TYPE;
4544
4545 /* Check that dependence weakness is in proper range. */
4546 do
4547 {
4548 if (ds & type)
4549 get_dep_weak (ds, type);
4550
4551 if (type == LAST_SPEC_TYPE)
4552 break;
4553 type <<= SPEC_TYPE_SHIFT;
4554 }
4555 while (1);
4556 }
4557
4558 if (ds & BEGIN_SPEC)
4559 {
4560 /* Only true dependence can be data speculative. */
4561 if (ds & BEGIN_DATA)
4562 gcc_assert (ds & DEP_TRUE);
4563
4564 /* Control dependencies in the insn scheduler are represented by
4565 anti-dependencies, therefore only anti dependence can be
4566 control speculative. */
4567 if (ds & BEGIN_CONTROL)
4568 gcc_assert (ds & DEP_ANTI);
4569 }
4570 else
4571 {
4572 /* Subsequent speculations should resolve true dependencies. */
4573 gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4574 }
b8698a0f
L
4575
4576 /* Check that true and anti dependencies can't have other speculative
ddbd5439
MK
4577 statuses. */
4578 if (ds & DEP_TRUE)
4579 gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4580 /* An output dependence can't be speculative at all. */
4581 gcc_assert (!(ds & DEP_OUTPUT));
4582 if (ds & DEP_ANTI)
4583 gcc_assert (ds & BEGIN_CONTROL);
4584 }
4585}
a750daa2 4586
1a83e602
BS
4587/* The following code discovers opportunities to switch a memory reference
4588 and an increment by modifying the address. We ensure that this is done
4589 only for dependencies that are only used to show a single register
4590 dependence (using DEP_NONREG and DEP_MULTIPLE), and so that every memory
4591 instruction involved is subject to only one dep that can cause a pattern
4592 change.
4593
4594 When we discover a suitable dependency, we fill in the dep_replacement
4595 structure to show how to modify the memory reference. */
4596
4597/* Holds information about a pair of memory reference and register increment
4598 insns which depend on each other, but could possibly be interchanged. */
4599struct mem_inc_info
4600{
ce1ce33a
DM
4601 rtx_insn *inc_insn;
4602 rtx_insn *mem_insn;
1a83e602
BS
4603
4604 rtx *mem_loc;
4605 /* A register occurring in the memory address for which we wish to break
4606 the dependence. This must be identical to the destination register of
4607 the increment. */
4608 rtx mem_reg0;
4609 /* Any kind of index that is added to that register. */
4610 rtx mem_index;
4611 /* The constant offset used in the memory address. */
4612 HOST_WIDE_INT mem_constant;
4613 /* The constant added in the increment insn. Negated if the increment is
4614 after the memory address. */
4615 HOST_WIDE_INT inc_constant;
4616 /* The source register used in the increment. May be different from mem_reg0
4617 if the increment occurs before the memory address. */
4618 rtx inc_input;
4619};
4620
4621/* Verify that the memory location described in MII can be replaced with
4622 one using NEW_ADDR. Return the new memory reference or NULL_RTX. The
4623 insn remains unchanged by this function. */
4624
4625static rtx
4626attempt_change (struct mem_inc_info *mii, rtx new_addr)
4627{
4628 rtx mem = *mii->mem_loc;
4629 rtx new_mem;
4630
1aa95df7 4631 /* Jump through a lot of hoops to keep the attributes up to date. We
1a83e602
BS
4632 do not want to call one of the change address variants that take
4633 an offset even though we know the offset in many cases. These
4634 assume you are changing where the address is pointing by the
4635 offset. */
4636 new_mem = replace_equiv_address_nv (mem, new_addr);
4637 if (! validate_change (mii->mem_insn, mii->mem_loc, new_mem, 0))
4638 {
4639 if (sched_verbose >= 5)
4640 fprintf (sched_dump, "validation failure\n");
4641 return NULL_RTX;
4642 }
4643
4644 /* Put back the old one. */
4645 validate_change (mii->mem_insn, mii->mem_loc, mem, 0);
4646
4647 return new_mem;
4648}
4649
4650/* Return true if INSN is of a form "a = b op c" where a and b are
4651 regs. op is + if c is a reg and +|- if c is a const. Fill in
4652 informantion in MII about what is found.
4653 BEFORE_MEM indicates whether the increment is found before or after
4654 a corresponding memory reference. */
4655
4656static bool
ce1ce33a 4657parse_add_or_inc (struct mem_inc_info *mii, rtx_insn *insn, bool before_mem)
1a83e602
BS
4658{
4659 rtx pat = single_set (insn);
4660 rtx src, cst;
4661 bool regs_equal;
4662
4663 if (RTX_FRAME_RELATED_P (insn) || !pat)
4664 return false;
4665
4666 /* Result must be single reg. */
4667 if (!REG_P (SET_DEST (pat)))
4668 return false;
4669
0380c51f 4670 if (GET_CODE (SET_SRC (pat)) != PLUS)
1a83e602
BS
4671 return false;
4672
4673 mii->inc_insn = insn;
4674 src = SET_SRC (pat);
4675 mii->inc_input = XEXP (src, 0);
4676
4677 if (!REG_P (XEXP (src, 0)))
4678 return false;
4679
4680 if (!rtx_equal_p (SET_DEST (pat), mii->mem_reg0))
4681 return false;
4682
4683 cst = XEXP (src, 1);
4684 if (!CONST_INT_P (cst))
4685 return false;
4686 mii->inc_constant = INTVAL (cst);
4687
4688 regs_equal = rtx_equal_p (mii->inc_input, mii->mem_reg0);
4689
4690 if (!before_mem)
4691 {
4692 mii->inc_constant = -mii->inc_constant;
4693 if (!regs_equal)
4694 return false;
4695 }
4696
4697 if (regs_equal && REGNO (SET_DEST (pat)) == STACK_POINTER_REGNUM)
0380c51f
BS
4698 {
4699 /* Note that the sign has already been reversed for !before_mem. */
581edfa3
TS
4700 if (STACK_GROWS_DOWNWARD)
4701 return mii->inc_constant > 0;
4702 else
4703 return mii->inc_constant < 0;
0380c51f 4704 }
1a83e602
BS
4705 return true;
4706}
4707
4708/* Once a suitable mem reference has been found and the corresponding data
4709 in MII has been filled in, this function is called to find a suitable
4710 add or inc insn involving the register we found in the memory
4711 reference. */
4712
4713static bool
4714find_inc (struct mem_inc_info *mii, bool backwards)
4715{
4716 sd_iterator_def sd_it;
4717 dep_t dep;
4718
4719 sd_it = sd_iterator_start (mii->mem_insn,
4720 backwards ? SD_LIST_HARD_BACK : SD_LIST_FORW);
4721 while (sd_iterator_cond (&sd_it, &dep))
4722 {
4723 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
974c43e9
DM
4724 rtx_insn *pro = DEP_PRO (dep);
4725 rtx_insn *con = DEP_CON (dep);
ce1ce33a 4726 rtx_insn *inc_cand = backwards ? pro : con;
1a83e602
BS
4727 if (DEP_NONREG (dep) || DEP_MULTIPLE (dep))
4728 goto next;
4729 if (parse_add_or_inc (mii, inc_cand, backwards))
4730 {
4731 struct dep_replacement *desc;
bfac633a 4732 df_ref def;
1a83e602
BS
4733 rtx newaddr, newmem;
4734
4735 if (sched_verbose >= 5)
4736 fprintf (sched_dump, "candidate mem/inc pair: %d %d\n",
4737 INSN_UID (mii->mem_insn), INSN_UID (inc_cand));
4738
4739 /* Need to assure that none of the operands of the inc
4740 instruction are assigned to by the mem insn. */
bfac633a
RS
4741 FOR_EACH_INSN_DEF (def, mii->mem_insn)
4742 if (reg_overlap_mentioned_p (DF_REF_REG (def), mii->inc_input)
4743 || reg_overlap_mentioned_p (DF_REF_REG (def), mii->mem_reg0))
4744 {
4745 if (sched_verbose >= 5)
4746 fprintf (sched_dump,
4747 "inc conflicts with store failure.\n");
4748 goto next;
4749 }
c7cf969c 4750
1a83e602
BS
4751 newaddr = mii->inc_input;
4752 if (mii->mem_index != NULL_RTX)
4753 newaddr = gen_rtx_PLUS (GET_MODE (newaddr), newaddr,
4754 mii->mem_index);
4755 newaddr = plus_constant (GET_MODE (newaddr), newaddr,
4756 mii->mem_constant + mii->inc_constant);
4757 newmem = attempt_change (mii, newaddr);
4758 if (newmem == NULL_RTX)
4759 goto next;
4760 if (sched_verbose >= 5)
4761 fprintf (sched_dump, "successful address replacement\n");
4762 desc = XCNEW (struct dep_replacement);
4763 DEP_REPLACE (dep) = desc;
4764 desc->loc = mii->mem_loc;
4765 desc->newval = newmem;
4766 desc->orig = *desc->loc;
4767 desc->insn = mii->mem_insn;
4768 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
4769 INSN_SPEC_BACK_DEPS (con));
4770 if (backwards)
4771 {
4772 FOR_EACH_DEP (mii->inc_insn, SD_LIST_BACK, sd_it, dep)
28d1688d
BS
4773 add_dependence_1 (mii->mem_insn, DEP_PRO (dep),
4774 REG_DEP_TRUE);
1a83e602
BS
4775 }
4776 else
4777 {
4778 FOR_EACH_DEP (mii->inc_insn, SD_LIST_FORW, sd_it, dep)
28d1688d
BS
4779 add_dependence_1 (DEP_CON (dep), mii->mem_insn,
4780 REG_DEP_ANTI);
1a83e602
BS
4781 }
4782 return true;
4783 }
4784 next:
4785 sd_iterator_next (&sd_it);
4786 }
4787 return false;
4788}
4789
4790/* A recursive function that walks ADDRESS_OF_X to find memory references
4791 which could be modified during scheduling. We call find_inc for each
4792 one we find that has a recognizable form. MII holds information about
4793 the pair of memory/increment instructions.
4794 We ensure that every instruction with a memory reference (which will be
4795 the location of the replacement) is assigned at most one breakable
4796 dependency. */
4797
4798static bool
4799find_mem (struct mem_inc_info *mii, rtx *address_of_x)
4800{
4801 rtx x = *address_of_x;
4802 enum rtx_code code = GET_CODE (x);
4803 const char *const fmt = GET_RTX_FORMAT (code);
4804 int i;
4805
4806 if (code == MEM)
4807 {
4808 rtx reg0 = XEXP (x, 0);
4809
4810 mii->mem_loc = address_of_x;
4811 mii->mem_index = NULL_RTX;
4812 mii->mem_constant = 0;
4813 if (GET_CODE (reg0) == PLUS && CONST_INT_P (XEXP (reg0, 1)))
4814 {
4815 mii->mem_constant = INTVAL (XEXP (reg0, 1));
4816 reg0 = XEXP (reg0, 0);
4817 }
4818 if (GET_CODE (reg0) == PLUS)
4819 {
4820 mii->mem_index = XEXP (reg0, 1);
4821 reg0 = XEXP (reg0, 0);
4822 }
4823 if (REG_P (reg0))
4824 {
bfac633a 4825 df_ref use;
1a83e602
BS
4826 int occurrences = 0;
4827
4828 /* Make sure this reg appears only once in this insn. Can't use
4829 count_occurrences since that only works for pseudos. */
bfac633a
RS
4830 FOR_EACH_INSN_USE (use, mii->mem_insn)
4831 if (reg_overlap_mentioned_p (reg0, DF_REF_REG (use)))
4832 if (++occurrences > 1)
4833 {
4834 if (sched_verbose >= 5)
4835 fprintf (sched_dump, "mem count failure\n");
4836 return false;
4837 }
1a83e602
BS
4838
4839 mii->mem_reg0 = reg0;
4840 return find_inc (mii, true) || find_inc (mii, false);
4841 }
4842 return false;
4843 }
4844
4845 if (code == SIGN_EXTRACT || code == ZERO_EXTRACT)
4846 {
4847 /* If REG occurs inside a MEM used in a bit-field reference,
4848 that is unacceptable. */
4849 return false;
4850 }
4851
4852 /* Time for some deep diving. */
4853 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4854 {
4855 if (fmt[i] == 'e')
4856 {
4857 if (find_mem (mii, &XEXP (x, i)))
4858 return true;
4859 }
4860 else if (fmt[i] == 'E')
4861 {
4862 int j;
4863 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4864 if (find_mem (mii, &XVECEXP (x, i, j)))
4865 return true;
4866 }
4867 }
4868 return false;
4869}
4870
4871
4872/* Examine the instructions between HEAD and TAIL and try to find
4873 dependencies that can be broken by modifying one of the patterns. */
4874
4875void
ce1ce33a 4876find_modifiable_mems (rtx_insn *head, rtx_insn *tail)
1a83e602 4877{
ce1ce33a 4878 rtx_insn *insn, *next_tail = NEXT_INSN (tail);
1a83e602
BS
4879 int success_in_block = 0;
4880
8b8de8b6 4881 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1a83e602
BS
4882 {
4883 struct mem_inc_info mii;
4884
4885 if (!NONDEBUG_INSN_P (insn) || RTX_FRAME_RELATED_P (insn))
4886 continue;
4887
4888 mii.mem_insn = insn;
4889 if (find_mem (&mii, &PATTERN (insn)))
4890 success_in_block++;
4891 }
4892 if (success_in_block && sched_verbose >= 5)
4893 fprintf (sched_dump, "%d candidates for address modification found.\n",
4894 success_in_block);
4895}
4896
a750daa2 4897#endif /* INSN_SCHEDULING */