]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/sched-deps.c
2009-09-02 Vladimir Makarov <vmakarov@redhat.com>
[thirdparty/gcc.git] / gcc / sched-deps.c
1 /* Instruction scheduling pass. This file computes dependencies between
2 instructions.
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
5 Free Software Foundation, Inc.
6 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
7 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24 \f
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "toplev.h"
30 #include "rtl.h"
31 #include "tm_p.h"
32 #include "hard-reg-set.h"
33 #include "regs.h"
34 #include "function.h"
35 #include "flags.h"
36 #include "insn-config.h"
37 #include "insn-attr.h"
38 #include "except.h"
39 #include "toplev.h"
40 #include "recog.h"
41 #include "sched-int.h"
42 #include "params.h"
43 #include "cselib.h"
44 #include "ira.h"
45
46 #ifdef INSN_SCHEDULING
47
48 #ifdef ENABLE_CHECKING
49 #define CHECK (true)
50 #else
51 #define CHECK (false)
52 #endif
53
54 /* Holds current parameters for the dependency analyzer. */
55 struct sched_deps_info_def *sched_deps_info;
56
57 /* The data is specific to the Haifa scheduler. */
58 VEC(haifa_deps_insn_data_def, heap) *h_d_i_d = NULL;
59
60 /* Return the major type present in the DS. */
61 enum reg_note
62 ds_to_dk (ds_t ds)
63 {
64 if (ds & DEP_TRUE)
65 return REG_DEP_TRUE;
66
67 if (ds & DEP_OUTPUT)
68 return REG_DEP_OUTPUT;
69
70 gcc_assert (ds & DEP_ANTI);
71
72 return REG_DEP_ANTI;
73 }
74
75 /* Return equivalent dep_status. */
76 ds_t
77 dk_to_ds (enum reg_note dk)
78 {
79 switch (dk)
80 {
81 case REG_DEP_TRUE:
82 return DEP_TRUE;
83
84 case REG_DEP_OUTPUT:
85 return DEP_OUTPUT;
86
87 default:
88 gcc_assert (dk == REG_DEP_ANTI);
89 return DEP_ANTI;
90 }
91 }
92
93 /* Functions to operate with dependence information container - dep_t. */
94
95 /* Init DEP with the arguments. */
96 void
97 init_dep_1 (dep_t dep, rtx pro, rtx con, enum reg_note type, ds_t ds)
98 {
99 DEP_PRO (dep) = pro;
100 DEP_CON (dep) = con;
101 DEP_TYPE (dep) = type;
102 DEP_STATUS (dep) = ds;
103 }
104
105 /* Init DEP with the arguments.
106 While most of the scheduler (including targets) only need the major type
107 of the dependency, it is convenient to hide full dep_status from them. */
108 void
109 init_dep (dep_t dep, rtx pro, rtx con, enum reg_note kind)
110 {
111 ds_t ds;
112
113 if ((current_sched_info->flags & USE_DEPS_LIST))
114 ds = dk_to_ds (kind);
115 else
116 ds = -1;
117
118 init_dep_1 (dep, pro, con, kind, ds);
119 }
120
121 /* Make a copy of FROM in TO. */
122 static void
123 copy_dep (dep_t to, dep_t from)
124 {
125 memcpy (to, from, sizeof (*to));
126 }
127
128 static void dump_ds (FILE *, ds_t);
129
130 /* Define flags for dump_dep (). */
131
132 /* Dump producer of the dependence. */
133 #define DUMP_DEP_PRO (2)
134
135 /* Dump consumer of the dependence. */
136 #define DUMP_DEP_CON (4)
137
138 /* Dump type of the dependence. */
139 #define DUMP_DEP_TYPE (8)
140
141 /* Dump status of the dependence. */
142 #define DUMP_DEP_STATUS (16)
143
144 /* Dump all information about the dependence. */
145 #define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE \
146 |DUMP_DEP_STATUS)
147
148 /* Dump DEP to DUMP.
149 FLAGS is a bit mask specifying what information about DEP needs
150 to be printed.
151 If FLAGS has the very first bit set, then dump all information about DEP
152 and propagate this bit into the callee dump functions. */
153 static void
154 dump_dep (FILE *dump, dep_t dep, int flags)
155 {
156 if (flags & 1)
157 flags |= DUMP_DEP_ALL;
158
159 fprintf (dump, "<");
160
161 if (flags & DUMP_DEP_PRO)
162 fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
163
164 if (flags & DUMP_DEP_CON)
165 fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
166
167 if (flags & DUMP_DEP_TYPE)
168 {
169 char t;
170 enum reg_note type = DEP_TYPE (dep);
171
172 switch (type)
173 {
174 case REG_DEP_TRUE:
175 t = 't';
176 break;
177
178 case REG_DEP_OUTPUT:
179 t = 'o';
180 break;
181
182 case REG_DEP_ANTI:
183 t = 'a';
184 break;
185
186 default:
187 gcc_unreachable ();
188 break;
189 }
190
191 fprintf (dump, "%c; ", t);
192 }
193
194 if (flags & DUMP_DEP_STATUS)
195 {
196 if (current_sched_info->flags & USE_DEPS_LIST)
197 dump_ds (dump, DEP_STATUS (dep));
198 }
199
200 fprintf (dump, ">");
201 }
202
203 /* Default flags for dump_dep (). */
204 static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
205
206 /* Dump all fields of DEP to STDERR. */
207 void
208 sd_debug_dep (dep_t dep)
209 {
210 dump_dep (stderr, dep, 1);
211 fprintf (stderr, "\n");
212 }
213
214 /* Functions to operate with a single link from the dependencies lists -
215 dep_link_t. */
216
217 /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
218 PREV_NEXT_P. */
219 static void
220 attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
221 {
222 dep_link_t next = *prev_nextp;
223
224 gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
225 && DEP_LINK_NEXT (l) == NULL);
226
227 /* Init node being inserted. */
228 DEP_LINK_PREV_NEXTP (l) = prev_nextp;
229 DEP_LINK_NEXT (l) = next;
230
231 /* Fix next node. */
232 if (next != NULL)
233 {
234 gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
235
236 DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
237 }
238
239 /* Fix prev node. */
240 *prev_nextp = l;
241 }
242
243 /* Add dep_link LINK to deps_list L. */
244 static void
245 add_to_deps_list (dep_link_t link, deps_list_t l)
246 {
247 attach_dep_link (link, &DEPS_LIST_FIRST (l));
248
249 ++DEPS_LIST_N_LINKS (l);
250 }
251
252 /* Detach dep_link L from the list. */
253 static void
254 detach_dep_link (dep_link_t l)
255 {
256 dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
257 dep_link_t next = DEP_LINK_NEXT (l);
258
259 *prev_nextp = next;
260
261 if (next != NULL)
262 DEP_LINK_PREV_NEXTP (next) = prev_nextp;
263
264 DEP_LINK_PREV_NEXTP (l) = NULL;
265 DEP_LINK_NEXT (l) = NULL;
266 }
267
268 /* Remove link LINK from list LIST. */
269 static void
270 remove_from_deps_list (dep_link_t link, deps_list_t list)
271 {
272 detach_dep_link (link);
273
274 --DEPS_LIST_N_LINKS (list);
275 }
276
277 /* Move link LINK from list FROM to list TO. */
278 static void
279 move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
280 {
281 remove_from_deps_list (link, from);
282 add_to_deps_list (link, to);
283 }
284
285 /* Return true of LINK is not attached to any list. */
286 static bool
287 dep_link_is_detached_p (dep_link_t link)
288 {
289 return DEP_LINK_PREV_NEXTP (link) == NULL;
290 }
291
292 /* Pool to hold all dependency nodes (dep_node_t). */
293 static alloc_pool dn_pool;
294
295 /* Number of dep_nodes out there. */
296 static int dn_pool_diff = 0;
297
298 /* Create a dep_node. */
299 static dep_node_t
300 create_dep_node (void)
301 {
302 dep_node_t n = (dep_node_t) pool_alloc (dn_pool);
303 dep_link_t back = DEP_NODE_BACK (n);
304 dep_link_t forw = DEP_NODE_FORW (n);
305
306 DEP_LINK_NODE (back) = n;
307 DEP_LINK_NEXT (back) = NULL;
308 DEP_LINK_PREV_NEXTP (back) = NULL;
309
310 DEP_LINK_NODE (forw) = n;
311 DEP_LINK_NEXT (forw) = NULL;
312 DEP_LINK_PREV_NEXTP (forw) = NULL;
313
314 ++dn_pool_diff;
315
316 return n;
317 }
318
319 /* Delete dep_node N. N must not be connected to any deps_list. */
320 static void
321 delete_dep_node (dep_node_t n)
322 {
323 gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
324 && dep_link_is_detached_p (DEP_NODE_FORW (n)));
325
326 --dn_pool_diff;
327
328 pool_free (dn_pool, n);
329 }
330
331 /* Pool to hold dependencies lists (deps_list_t). */
332 static alloc_pool dl_pool;
333
334 /* Number of deps_lists out there. */
335 static int dl_pool_diff = 0;
336
337 /* Functions to operate with dependences lists - deps_list_t. */
338
339 /* Return true if list L is empty. */
340 static bool
341 deps_list_empty_p (deps_list_t l)
342 {
343 return DEPS_LIST_N_LINKS (l) == 0;
344 }
345
346 /* Create a new deps_list. */
347 static deps_list_t
348 create_deps_list (void)
349 {
350 deps_list_t l = (deps_list_t) pool_alloc (dl_pool);
351
352 DEPS_LIST_FIRST (l) = NULL;
353 DEPS_LIST_N_LINKS (l) = 0;
354
355 ++dl_pool_diff;
356 return l;
357 }
358
359 /* Free deps_list L. */
360 static void
361 free_deps_list (deps_list_t l)
362 {
363 gcc_assert (deps_list_empty_p (l));
364
365 --dl_pool_diff;
366
367 pool_free (dl_pool, l);
368 }
369
370 /* Return true if there is no dep_nodes and deps_lists out there.
371 After the region is scheduled all the dependency nodes and lists
372 should [generally] be returned to pool. */
373 bool
374 deps_pools_are_empty_p (void)
375 {
376 return dn_pool_diff == 0 && dl_pool_diff == 0;
377 }
378
379 /* Remove all elements from L. */
380 static void
381 clear_deps_list (deps_list_t l)
382 {
383 do
384 {
385 dep_link_t link = DEPS_LIST_FIRST (l);
386
387 if (link == NULL)
388 break;
389
390 remove_from_deps_list (link, l);
391 }
392 while (1);
393 }
394
395 static regset reg_pending_sets;
396 static regset reg_pending_clobbers;
397 static regset reg_pending_uses;
398 static enum reg_pending_barrier_mode reg_pending_barrier;
399
400 /* Hard registers implicitly clobbered or used (or may be implicitly
401 clobbered or used) by the currently analyzed insn. For example,
402 insn in its constraint has one register class. Even if there is
403 currently no hard register in the insn, the particular hard
404 register will be in the insn after reload pass because the
405 constraint requires it. */
406 static HARD_REG_SET implicit_reg_pending_clobbers;
407 static HARD_REG_SET implicit_reg_pending_uses;
408
409 /* To speed up the test for duplicate dependency links we keep a
410 record of dependencies created by add_dependence when the average
411 number of instructions in a basic block is very large.
412
413 Studies have shown that there is typically around 5 instructions between
414 branches for typical C code. So we can make a guess that the average
415 basic block is approximately 5 instructions long; we will choose 100X
416 the average size as a very large basic block.
417
418 Each insn has associated bitmaps for its dependencies. Each bitmap
419 has enough entries to represent a dependency on any other insn in
420 the insn chain. All bitmap for true dependencies cache is
421 allocated then the rest two ones are also allocated. */
422 static bitmap_head *true_dependency_cache = NULL;
423 static bitmap_head *output_dependency_cache = NULL;
424 static bitmap_head *anti_dependency_cache = NULL;
425 static bitmap_head *spec_dependency_cache = NULL;
426 static int cache_size;
427
428 static int deps_may_trap_p (const_rtx);
429 static void add_dependence_list (rtx, rtx, int, enum reg_note);
430 static void add_dependence_list_and_free (struct deps *, rtx,
431 rtx *, int, enum reg_note);
432 static void delete_all_dependences (rtx);
433 static void fixup_sched_groups (rtx);
434
435 static void flush_pending_lists (struct deps *, rtx, int, int);
436 static void sched_analyze_1 (struct deps *, rtx, rtx);
437 static void sched_analyze_2 (struct deps *, rtx, rtx);
438 static void sched_analyze_insn (struct deps *, rtx, rtx);
439
440 static bool sched_has_condition_p (const_rtx);
441 static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
442
443 static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
444 rtx, rtx);
445 static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
446
447 #ifdef ENABLE_CHECKING
448 static void check_dep (dep_t, bool);
449 #endif
450 \f
451 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
452
453 static int
454 deps_may_trap_p (const_rtx mem)
455 {
456 const_rtx addr = XEXP (mem, 0);
457
458 if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
459 {
460 const_rtx t = get_reg_known_value (REGNO (addr));
461 if (t)
462 addr = t;
463 }
464 return rtx_addr_can_trap_p (addr);
465 }
466 \f
467
468 /* Find the condition under which INSN is executed. If REV is not NULL,
469 it is set to TRUE when the returned comparison should be reversed
470 to get the actual condition. */
471 static rtx
472 sched_get_condition_with_rev (const_rtx insn, bool *rev)
473 {
474 rtx pat = PATTERN (insn);
475 rtx src;
476
477 if (pat == 0)
478 return 0;
479
480 if (rev)
481 *rev = false;
482
483 if (GET_CODE (pat) == COND_EXEC)
484 return COND_EXEC_TEST (pat);
485
486 if (!any_condjump_p (insn) || !onlyjump_p (insn))
487 return 0;
488
489 src = SET_SRC (pc_set (insn));
490
491 if (XEXP (src, 2) == pc_rtx)
492 return XEXP (src, 0);
493 else if (XEXP (src, 1) == pc_rtx)
494 {
495 rtx cond = XEXP (src, 0);
496 enum rtx_code revcode = reversed_comparison_code (cond, insn);
497
498 if (revcode == UNKNOWN)
499 return 0;
500
501 if (rev)
502 *rev = true;
503 return cond;
504 }
505
506 return 0;
507 }
508
509 /* True when we can find a condition under which INSN is executed. */
510 static bool
511 sched_has_condition_p (const_rtx insn)
512 {
513 return !! sched_get_condition_with_rev (insn, NULL);
514 }
515
516 \f
517
518 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
519 static int
520 conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
521 {
522 if (COMPARISON_P (cond1)
523 && COMPARISON_P (cond2)
524 && GET_CODE (cond1) ==
525 (rev1==rev2
526 ? reversed_comparison_code (cond2, NULL)
527 : GET_CODE (cond2))
528 && XEXP (cond1, 0) == XEXP (cond2, 0)
529 && XEXP (cond1, 1) == XEXP (cond2, 1))
530 return 1;
531 return 0;
532 }
533
534 /* Return true if insn1 and insn2 can never depend on one another because
535 the conditions under which they are executed are mutually exclusive. */
536 bool
537 sched_insns_conditions_mutex_p (const_rtx insn1, const_rtx insn2)
538 {
539 rtx cond1, cond2;
540 bool rev1 = false, rev2 = false;
541
542 /* df doesn't handle conditional lifetimes entirely correctly;
543 calls mess up the conditional lifetimes. */
544 if (!CALL_P (insn1) && !CALL_P (insn2))
545 {
546 cond1 = sched_get_condition_with_rev (insn1, &rev1);
547 cond2 = sched_get_condition_with_rev (insn2, &rev2);
548 if (cond1 && cond2
549 && conditions_mutex_p (cond1, cond2, rev1, rev2)
550 /* Make sure first instruction doesn't affect condition of second
551 instruction if switched. */
552 && !modified_in_p (cond1, insn2)
553 /* Make sure second instruction doesn't affect condition of first
554 instruction if switched. */
555 && !modified_in_p (cond2, insn1))
556 return true;
557 }
558 return false;
559 }
560 \f
561
562 /* Return true if INSN can potentially be speculated with type DS. */
563 bool
564 sched_insn_is_legitimate_for_speculation_p (const_rtx insn, ds_t ds)
565 {
566 if (HAS_INTERNAL_DEP (insn))
567 return false;
568
569 if (!NONJUMP_INSN_P (insn))
570 return false;
571
572 if (SCHED_GROUP_P (insn))
573 return false;
574
575 if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX (insn)))
576 return false;
577
578 if (side_effects_p (PATTERN (insn)))
579 return false;
580
581 if (ds & BE_IN_SPEC)
582 /* The following instructions, which depend on a speculatively scheduled
583 instruction, cannot be speculatively scheduled along. */
584 {
585 if (may_trap_p (PATTERN (insn)))
586 /* If instruction might trap, it cannot be speculatively scheduled.
587 For control speculation it's obvious why and for data speculation
588 it's because the insn might get wrong input if speculation
589 wasn't successful. */
590 return false;
591
592 if ((ds & BE_IN_DATA)
593 && sched_has_condition_p (insn))
594 /* If this is a predicated instruction, then it cannot be
595 speculatively scheduled. See PR35659. */
596 return false;
597 }
598
599 return true;
600 }
601
602 /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
603 initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
604 and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
605 This function is used to switch sd_iterator to the next list.
606 !!! For internal use only. Might consider moving it to sched-int.h. */
607 void
608 sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
609 deps_list_t *list_ptr, bool *resolved_p_ptr)
610 {
611 sd_list_types_def types = *types_ptr;
612
613 if (types & SD_LIST_HARD_BACK)
614 {
615 *list_ptr = INSN_HARD_BACK_DEPS (insn);
616 *resolved_p_ptr = false;
617 *types_ptr = types & ~SD_LIST_HARD_BACK;
618 }
619 else if (types & SD_LIST_SPEC_BACK)
620 {
621 *list_ptr = INSN_SPEC_BACK_DEPS (insn);
622 *resolved_p_ptr = false;
623 *types_ptr = types & ~SD_LIST_SPEC_BACK;
624 }
625 else if (types & SD_LIST_FORW)
626 {
627 *list_ptr = INSN_FORW_DEPS (insn);
628 *resolved_p_ptr = false;
629 *types_ptr = types & ~SD_LIST_FORW;
630 }
631 else if (types & SD_LIST_RES_BACK)
632 {
633 *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
634 *resolved_p_ptr = true;
635 *types_ptr = types & ~SD_LIST_RES_BACK;
636 }
637 else if (types & SD_LIST_RES_FORW)
638 {
639 *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
640 *resolved_p_ptr = true;
641 *types_ptr = types & ~SD_LIST_RES_FORW;
642 }
643 else
644 {
645 *list_ptr = NULL;
646 *resolved_p_ptr = false;
647 *types_ptr = SD_LIST_NONE;
648 }
649 }
650
651 /* Return the summary size of INSN's lists defined by LIST_TYPES. */
652 int
653 sd_lists_size (const_rtx insn, sd_list_types_def list_types)
654 {
655 int size = 0;
656
657 while (list_types != SD_LIST_NONE)
658 {
659 deps_list_t list;
660 bool resolved_p;
661
662 sd_next_list (insn, &list_types, &list, &resolved_p);
663 if (list)
664 size += DEPS_LIST_N_LINKS (list);
665 }
666
667 return size;
668 }
669
670 /* Return true if INSN's lists defined by LIST_TYPES are all empty. */
671 bool
672 sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
673 {
674 return sd_lists_size (insn, list_types) == 0;
675 }
676
677 /* Initialize data for INSN. */
678 void
679 sd_init_insn (rtx insn)
680 {
681 INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
682 INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
683 INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
684 INSN_FORW_DEPS (insn) = create_deps_list ();
685 INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
686
687 if (DEBUG_INSN_P (insn))
688 DEBUG_INSN_SCHED_P (insn) = TRUE;
689
690 /* ??? It would be nice to allocate dependency caches here. */
691 }
692
693 /* Free data for INSN. */
694 void
695 sd_finish_insn (rtx insn)
696 {
697 /* ??? It would be nice to deallocate dependency caches here. */
698
699 if (DEBUG_INSN_P (insn))
700 {
701 gcc_assert (DEBUG_INSN_SCHED_P (insn));
702 DEBUG_INSN_SCHED_P (insn) = FALSE;
703 }
704
705 free_deps_list (INSN_HARD_BACK_DEPS (insn));
706 INSN_HARD_BACK_DEPS (insn) = NULL;
707
708 free_deps_list (INSN_SPEC_BACK_DEPS (insn));
709 INSN_SPEC_BACK_DEPS (insn) = NULL;
710
711 free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
712 INSN_RESOLVED_BACK_DEPS (insn) = NULL;
713
714 free_deps_list (INSN_FORW_DEPS (insn));
715 INSN_FORW_DEPS (insn) = NULL;
716
717 free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
718 INSN_RESOLVED_FORW_DEPS (insn) = NULL;
719 }
720
721 /* Find a dependency between producer PRO and consumer CON.
722 Search through resolved dependency lists if RESOLVED_P is true.
723 If no such dependency is found return NULL,
724 otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
725 with an iterator pointing to it. */
726 static dep_t
727 sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
728 sd_iterator_def *sd_it_ptr)
729 {
730 sd_list_types_def pro_list_type;
731 sd_list_types_def con_list_type;
732 sd_iterator_def sd_it;
733 dep_t dep;
734 bool found_p = false;
735
736 if (resolved_p)
737 {
738 pro_list_type = SD_LIST_RES_FORW;
739 con_list_type = SD_LIST_RES_BACK;
740 }
741 else
742 {
743 pro_list_type = SD_LIST_FORW;
744 con_list_type = SD_LIST_BACK;
745 }
746
747 /* Walk through either back list of INSN or forw list of ELEM
748 depending on which one is shorter. */
749 if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
750 {
751 /* Find the dep_link with producer PRO in consumer's back_deps. */
752 FOR_EACH_DEP (con, con_list_type, sd_it, dep)
753 if (DEP_PRO (dep) == pro)
754 {
755 found_p = true;
756 break;
757 }
758 }
759 else
760 {
761 /* Find the dep_link with consumer CON in producer's forw_deps. */
762 FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
763 if (DEP_CON (dep) == con)
764 {
765 found_p = true;
766 break;
767 }
768 }
769
770 if (found_p)
771 {
772 if (sd_it_ptr != NULL)
773 *sd_it_ptr = sd_it;
774
775 return dep;
776 }
777
778 return NULL;
779 }
780
781 /* Find a dependency between producer PRO and consumer CON.
782 Use dependency [if available] to check if dependency is present at all.
783 Search through resolved dependency lists if RESOLVED_P is true.
784 If the dependency or NULL if none found. */
785 dep_t
786 sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
787 {
788 if (true_dependency_cache != NULL)
789 /* Avoiding the list walk below can cut compile times dramatically
790 for some code. */
791 {
792 int elem_luid = INSN_LUID (pro);
793 int insn_luid = INSN_LUID (con);
794
795 gcc_assert (output_dependency_cache != NULL
796 && anti_dependency_cache != NULL);
797
798 if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
799 && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
800 && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
801 return NULL;
802 }
803
804 return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
805 }
806
807 /* Add or update a dependence described by DEP.
808 MEM1 and MEM2, if non-null, correspond to memory locations in case of
809 data speculation.
810
811 The function returns a value indicating if an old entry has been changed
812 or a new entry has been added to insn's backward deps.
813
814 This function merely checks if producer and consumer is the same insn
815 and doesn't create a dep in this case. Actual manipulation of
816 dependence data structures is performed in add_or_update_dep_1. */
817 static enum DEPS_ADJUST_RESULT
818 maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
819 {
820 rtx elem = DEP_PRO (dep);
821 rtx insn = DEP_CON (dep);
822
823 gcc_assert (INSN_P (insn) && INSN_P (elem));
824
825 /* Don't depend an insn on itself. */
826 if (insn == elem)
827 {
828 if (sched_deps_info->generate_spec_deps)
829 /* INSN has an internal dependence, which we can't overcome. */
830 HAS_INTERNAL_DEP (insn) = 1;
831
832 return DEP_NODEP;
833 }
834
835 return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
836 }
837
838 /* Ask dependency caches what needs to be done for dependence DEP.
839 Return DEP_CREATED if new dependence should be created and there is no
840 need to try to find one searching the dependencies lists.
841 Return DEP_PRESENT if there already is a dependence described by DEP and
842 hence nothing is to be done.
843 Return DEP_CHANGED if there already is a dependence, but it should be
844 updated to incorporate additional information from DEP. */
845 static enum DEPS_ADJUST_RESULT
846 ask_dependency_caches (dep_t dep)
847 {
848 int elem_luid = INSN_LUID (DEP_PRO (dep));
849 int insn_luid = INSN_LUID (DEP_CON (dep));
850
851 gcc_assert (true_dependency_cache != NULL
852 && output_dependency_cache != NULL
853 && anti_dependency_cache != NULL);
854
855 if (!(current_sched_info->flags & USE_DEPS_LIST))
856 {
857 enum reg_note present_dep_type;
858
859 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
860 present_dep_type = REG_DEP_TRUE;
861 else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
862 present_dep_type = REG_DEP_OUTPUT;
863 else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
864 present_dep_type = REG_DEP_ANTI;
865 else
866 /* There is no existing dep so it should be created. */
867 return DEP_CREATED;
868
869 if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
870 /* DEP does not add anything to the existing dependence. */
871 return DEP_PRESENT;
872 }
873 else
874 {
875 ds_t present_dep_types = 0;
876
877 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
878 present_dep_types |= DEP_TRUE;
879 if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
880 present_dep_types |= DEP_OUTPUT;
881 if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
882 present_dep_types |= DEP_ANTI;
883
884 if (present_dep_types == 0)
885 /* There is no existing dep so it should be created. */
886 return DEP_CREATED;
887
888 if (!(current_sched_info->flags & DO_SPECULATION)
889 || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
890 {
891 if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
892 == present_dep_types)
893 /* DEP does not add anything to the existing dependence. */
894 return DEP_PRESENT;
895 }
896 else
897 {
898 /* Only true dependencies can be data speculative and
899 only anti dependencies can be control speculative. */
900 gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
901 == present_dep_types);
902
903 /* if (DEP is SPECULATIVE) then
904 ..we should update DEP_STATUS
905 else
906 ..we should reset existing dep to non-speculative. */
907 }
908 }
909
910 return DEP_CHANGED;
911 }
912
913 /* Set dependency caches according to DEP. */
914 static void
915 set_dependency_caches (dep_t dep)
916 {
917 int elem_luid = INSN_LUID (DEP_PRO (dep));
918 int insn_luid = INSN_LUID (DEP_CON (dep));
919
920 if (!(current_sched_info->flags & USE_DEPS_LIST))
921 {
922 switch (DEP_TYPE (dep))
923 {
924 case REG_DEP_TRUE:
925 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
926 break;
927
928 case REG_DEP_OUTPUT:
929 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
930 break;
931
932 case REG_DEP_ANTI:
933 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
934 break;
935
936 default:
937 gcc_unreachable ();
938 }
939 }
940 else
941 {
942 ds_t ds = DEP_STATUS (dep);
943
944 if (ds & DEP_TRUE)
945 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
946 if (ds & DEP_OUTPUT)
947 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
948 if (ds & DEP_ANTI)
949 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
950
951 if (ds & SPECULATIVE)
952 {
953 gcc_assert (current_sched_info->flags & DO_SPECULATION);
954 bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
955 }
956 }
957 }
958
959 /* Type of dependence DEP have changed from OLD_TYPE. Update dependency
960 caches accordingly. */
961 static void
962 update_dependency_caches (dep_t dep, enum reg_note old_type)
963 {
964 int elem_luid = INSN_LUID (DEP_PRO (dep));
965 int insn_luid = INSN_LUID (DEP_CON (dep));
966
967 /* Clear corresponding cache entry because type of the link
968 may have changed. Keep them if we use_deps_list. */
969 if (!(current_sched_info->flags & USE_DEPS_LIST))
970 {
971 switch (old_type)
972 {
973 case REG_DEP_OUTPUT:
974 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
975 break;
976
977 case REG_DEP_ANTI:
978 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
979 break;
980
981 default:
982 gcc_unreachable ();
983 }
984 }
985
986 set_dependency_caches (dep);
987 }
988
989 /* Convert a dependence pointed to by SD_IT to be non-speculative. */
990 static void
991 change_spec_dep_to_hard (sd_iterator_def sd_it)
992 {
993 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
994 dep_link_t link = DEP_NODE_BACK (node);
995 dep_t dep = DEP_NODE_DEP (node);
996 rtx elem = DEP_PRO (dep);
997 rtx insn = DEP_CON (dep);
998
999 move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1000
1001 DEP_STATUS (dep) &= ~SPECULATIVE;
1002
1003 if (true_dependency_cache != NULL)
1004 /* Clear the cache entry. */
1005 bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1006 INSN_LUID (elem));
1007 }
1008
1009 /* Update DEP to incorporate information from NEW_DEP.
1010 SD_IT points to DEP in case it should be moved to another list.
1011 MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1012 data-speculative dependence should be updated. */
1013 static enum DEPS_ADJUST_RESULT
1014 update_dep (dep_t dep, dep_t new_dep,
1015 sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1016 rtx mem1 ATTRIBUTE_UNUSED,
1017 rtx mem2 ATTRIBUTE_UNUSED)
1018 {
1019 enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1020 enum reg_note old_type = DEP_TYPE (dep);
1021
1022 /* If this is a more restrictive type of dependence than the
1023 existing one, then change the existing dependence to this
1024 type. */
1025 if ((int) DEP_TYPE (new_dep) < (int) old_type)
1026 {
1027 DEP_TYPE (dep) = DEP_TYPE (new_dep);
1028 res = DEP_CHANGED;
1029 }
1030
1031 if (current_sched_info->flags & USE_DEPS_LIST)
1032 /* Update DEP_STATUS. */
1033 {
1034 ds_t dep_status = DEP_STATUS (dep);
1035 ds_t ds = DEP_STATUS (new_dep);
1036 ds_t new_status = ds | dep_status;
1037
1038 if (new_status & SPECULATIVE)
1039 /* Either existing dep or a dep we're adding or both are
1040 speculative. */
1041 {
1042 if (!(ds & SPECULATIVE)
1043 || !(dep_status & SPECULATIVE))
1044 /* The new dep can't be speculative. */
1045 {
1046 new_status &= ~SPECULATIVE;
1047
1048 if (dep_status & SPECULATIVE)
1049 /* The old dep was speculative, but now it
1050 isn't. */
1051 change_spec_dep_to_hard (sd_it);
1052 }
1053 else
1054 {
1055 /* Both are speculative. Merge probabilities. */
1056 if (mem1 != NULL)
1057 {
1058 dw_t dw;
1059
1060 dw = estimate_dep_weak (mem1, mem2);
1061 ds = set_dep_weak (ds, BEGIN_DATA, dw);
1062 }
1063
1064 new_status = ds_merge (dep_status, ds);
1065 }
1066 }
1067
1068 ds = new_status;
1069
1070 if (dep_status != ds)
1071 {
1072 DEP_STATUS (dep) = ds;
1073 res = DEP_CHANGED;
1074 }
1075 }
1076
1077 if (true_dependency_cache != NULL
1078 && res == DEP_CHANGED)
1079 update_dependency_caches (dep, old_type);
1080
1081 return res;
1082 }
1083
1084 /* Add or update a dependence described by DEP.
1085 MEM1 and MEM2, if non-null, correspond to memory locations in case of
1086 data speculation.
1087
1088 The function returns a value indicating if an old entry has been changed
1089 or a new entry has been added to insn's backward deps or nothing has
1090 been updated at all. */
1091 static enum DEPS_ADJUST_RESULT
1092 add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1093 rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1094 {
1095 bool maybe_present_p = true;
1096 bool present_p = false;
1097
1098 gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1099 && DEP_PRO (new_dep) != DEP_CON (new_dep));
1100
1101 #ifdef ENABLE_CHECKING
1102 check_dep (new_dep, mem1 != NULL);
1103 #endif
1104
1105 if (true_dependency_cache != NULL)
1106 {
1107 switch (ask_dependency_caches (new_dep))
1108 {
1109 case DEP_PRESENT:
1110 return DEP_PRESENT;
1111
1112 case DEP_CHANGED:
1113 maybe_present_p = true;
1114 present_p = true;
1115 break;
1116
1117 case DEP_CREATED:
1118 maybe_present_p = false;
1119 present_p = false;
1120 break;
1121
1122 default:
1123 gcc_unreachable ();
1124 break;
1125 }
1126 }
1127
1128 /* Check that we don't already have this dependence. */
1129 if (maybe_present_p)
1130 {
1131 dep_t present_dep;
1132 sd_iterator_def sd_it;
1133
1134 gcc_assert (true_dependency_cache == NULL || present_p);
1135
1136 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1137 DEP_CON (new_dep),
1138 resolved_p, &sd_it);
1139
1140 if (present_dep != NULL)
1141 /* We found an existing dependency between ELEM and INSN. */
1142 return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1143 else
1144 /* We didn't find a dep, it shouldn't present in the cache. */
1145 gcc_assert (!present_p);
1146 }
1147
1148 /* Might want to check one level of transitivity to save conses.
1149 This check should be done in maybe_add_or_update_dep_1.
1150 Since we made it to add_or_update_dep_1, we must create
1151 (or update) a link. */
1152
1153 if (mem1 != NULL_RTX)
1154 {
1155 gcc_assert (sched_deps_info->generate_spec_deps);
1156 DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1157 estimate_dep_weak (mem1, mem2));
1158 }
1159
1160 sd_add_dep (new_dep, resolved_p);
1161
1162 return DEP_CREATED;
1163 }
1164
1165 /* Initialize BACK_LIST_PTR with consumer's backward list and
1166 FORW_LIST_PTR with producer's forward list. If RESOLVED_P is true
1167 initialize with lists that hold resolved deps. */
1168 static void
1169 get_back_and_forw_lists (dep_t dep, bool resolved_p,
1170 deps_list_t *back_list_ptr,
1171 deps_list_t *forw_list_ptr)
1172 {
1173 rtx con = DEP_CON (dep);
1174
1175 if (!resolved_p)
1176 {
1177 if ((current_sched_info->flags & DO_SPECULATION)
1178 && (DEP_STATUS (dep) & SPECULATIVE))
1179 *back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1180 else
1181 *back_list_ptr = INSN_HARD_BACK_DEPS (con);
1182
1183 *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1184 }
1185 else
1186 {
1187 *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1188 *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1189 }
1190 }
1191
1192 /* Add dependence described by DEP.
1193 If RESOLVED_P is true treat the dependence as a resolved one. */
1194 void
1195 sd_add_dep (dep_t dep, bool resolved_p)
1196 {
1197 dep_node_t n = create_dep_node ();
1198 deps_list_t con_back_deps;
1199 deps_list_t pro_forw_deps;
1200 rtx elem = DEP_PRO (dep);
1201 rtx insn = DEP_CON (dep);
1202
1203 gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1204 gcc_assert (!DEBUG_INSN_P (elem) || DEBUG_INSN_P (insn));
1205
1206 if ((current_sched_info->flags & DO_SPECULATION)
1207 && !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
1208 DEP_STATUS (dep) &= ~SPECULATIVE;
1209
1210 copy_dep (DEP_NODE_DEP (n), dep);
1211
1212 get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
1213
1214 add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
1215
1216 #ifdef ENABLE_CHECKING
1217 check_dep (dep, false);
1218 #endif
1219
1220 add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1221
1222 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1223 in the bitmap caches of dependency information. */
1224 if (true_dependency_cache != NULL)
1225 set_dependency_caches (dep);
1226 }
1227
1228 /* Add or update backward dependence between INSN and ELEM
1229 with given type DEP_TYPE and dep_status DS.
1230 This function is a convenience wrapper. */
1231 enum DEPS_ADJUST_RESULT
1232 sd_add_or_update_dep (dep_t dep, bool resolved_p)
1233 {
1234 return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1235 }
1236
1237 /* Resolved dependence pointed to by SD_IT.
1238 SD_IT will advance to the next element. */
1239 void
1240 sd_resolve_dep (sd_iterator_def sd_it)
1241 {
1242 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1243 dep_t dep = DEP_NODE_DEP (node);
1244 rtx pro = DEP_PRO (dep);
1245 rtx con = DEP_CON (dep);
1246
1247 if ((current_sched_info->flags & DO_SPECULATION)
1248 && (DEP_STATUS (dep) & SPECULATIVE))
1249 move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1250 INSN_RESOLVED_BACK_DEPS (con));
1251 else
1252 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1253 INSN_RESOLVED_BACK_DEPS (con));
1254
1255 move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1256 INSN_RESOLVED_FORW_DEPS (pro));
1257 }
1258
1259 /* Make TO depend on all the FROM's producers.
1260 If RESOLVED_P is true add dependencies to the resolved lists. */
1261 void
1262 sd_copy_back_deps (rtx to, rtx from, bool resolved_p)
1263 {
1264 sd_list_types_def list_type;
1265 sd_iterator_def sd_it;
1266 dep_t dep;
1267
1268 list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1269
1270 FOR_EACH_DEP (from, list_type, sd_it, dep)
1271 {
1272 dep_def _new_dep, *new_dep = &_new_dep;
1273
1274 copy_dep (new_dep, dep);
1275 DEP_CON (new_dep) = to;
1276 sd_add_dep (new_dep, resolved_p);
1277 }
1278 }
1279
1280 /* Remove a dependency referred to by SD_IT.
1281 SD_IT will point to the next dependence after removal. */
1282 void
1283 sd_delete_dep (sd_iterator_def sd_it)
1284 {
1285 dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1286 dep_t dep = DEP_NODE_DEP (n);
1287 rtx pro = DEP_PRO (dep);
1288 rtx con = DEP_CON (dep);
1289 deps_list_t con_back_deps;
1290 deps_list_t pro_forw_deps;
1291
1292 if (true_dependency_cache != NULL)
1293 {
1294 int elem_luid = INSN_LUID (pro);
1295 int insn_luid = INSN_LUID (con);
1296
1297 bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1298 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1299 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1300
1301 if (current_sched_info->flags & DO_SPECULATION)
1302 bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1303 }
1304
1305 get_back_and_forw_lists (dep, sd_it.resolved_p,
1306 &con_back_deps, &pro_forw_deps);
1307
1308 remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1309 remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1310
1311 delete_dep_node (n);
1312 }
1313
1314 /* Dump size of the lists. */
1315 #define DUMP_LISTS_SIZE (2)
1316
1317 /* Dump dependencies of the lists. */
1318 #define DUMP_LISTS_DEPS (4)
1319
1320 /* Dump all information about the lists. */
1321 #define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1322
1323 /* Dump deps_lists of INSN specified by TYPES to DUMP.
1324 FLAGS is a bit mask specifying what information about the lists needs
1325 to be printed.
1326 If FLAGS has the very first bit set, then dump all information about
1327 the lists and propagate this bit into the callee dump functions. */
1328 static void
1329 dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1330 {
1331 sd_iterator_def sd_it;
1332 dep_t dep;
1333 int all;
1334
1335 all = (flags & 1);
1336
1337 if (all)
1338 flags |= DUMP_LISTS_ALL;
1339
1340 fprintf (dump, "[");
1341
1342 if (flags & DUMP_LISTS_SIZE)
1343 fprintf (dump, "%d; ", sd_lists_size (insn, types));
1344
1345 if (flags & DUMP_LISTS_DEPS)
1346 {
1347 FOR_EACH_DEP (insn, types, sd_it, dep)
1348 {
1349 dump_dep (dump, dep, dump_dep_flags | all);
1350 fprintf (dump, " ");
1351 }
1352 }
1353 }
1354
1355 /* Dump all information about deps_lists of INSN specified by TYPES
1356 to STDERR. */
1357 void
1358 sd_debug_lists (rtx insn, sd_list_types_def types)
1359 {
1360 dump_lists (stderr, insn, types, 1);
1361 fprintf (stderr, "\n");
1362 }
1363
1364 /* A convenience wrapper to operate on an entire list. */
1365
1366 static void
1367 add_dependence_list (rtx insn, rtx list, int uncond, enum reg_note dep_type)
1368 {
1369 for (; list; list = XEXP (list, 1))
1370 {
1371 if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1372 add_dependence (insn, XEXP (list, 0), dep_type);
1373 }
1374 }
1375
1376 /* Similar, but free *LISTP at the same time, when the context
1377 is not readonly. */
1378
1379 static void
1380 add_dependence_list_and_free (struct deps *deps, rtx insn, rtx *listp,
1381 int uncond, enum reg_note dep_type)
1382 {
1383 rtx list, next;
1384
1385 if (deps->readonly)
1386 {
1387 add_dependence_list (insn, *listp, uncond, dep_type);
1388 return;
1389 }
1390
1391 for (list = *listp, *listp = NULL; list ; list = next)
1392 {
1393 next = XEXP (list, 1);
1394 if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1395 add_dependence (insn, XEXP (list, 0), dep_type);
1396 free_INSN_LIST_node (list);
1397 }
1398 }
1399
1400 /* Remove all occurences of INSN from LIST. Return the number of
1401 occurences removed. */
1402
1403 static int
1404 remove_from_dependence_list (rtx insn, rtx* listp)
1405 {
1406 int removed = 0;
1407
1408 while (*listp)
1409 {
1410 if (XEXP (*listp, 0) == insn)
1411 {
1412 remove_free_INSN_LIST_node (listp);
1413 removed++;
1414 continue;
1415 }
1416
1417 listp = &XEXP (*listp, 1);
1418 }
1419
1420 return removed;
1421 }
1422
1423 /* Same as above, but process two lists at once. */
1424 static int
1425 remove_from_both_dependence_lists (rtx insn, rtx *listp, rtx *exprp)
1426 {
1427 int removed = 0;
1428
1429 while (*listp)
1430 {
1431 if (XEXP (*listp, 0) == insn)
1432 {
1433 remove_free_INSN_LIST_node (listp);
1434 remove_free_EXPR_LIST_node (exprp);
1435 removed++;
1436 continue;
1437 }
1438
1439 listp = &XEXP (*listp, 1);
1440 exprp = &XEXP (*exprp, 1);
1441 }
1442
1443 return removed;
1444 }
1445
1446 /* Clear all dependencies for an insn. */
1447 static void
1448 delete_all_dependences (rtx insn)
1449 {
1450 sd_iterator_def sd_it;
1451 dep_t dep;
1452
1453 /* The below cycle can be optimized to clear the caches and back_deps
1454 in one call but that would provoke duplication of code from
1455 delete_dep (). */
1456
1457 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1458 sd_iterator_cond (&sd_it, &dep);)
1459 sd_delete_dep (sd_it);
1460 }
1461
1462 /* All insns in a scheduling group except the first should only have
1463 dependencies on the previous insn in the group. So we find the
1464 first instruction in the scheduling group by walking the dependence
1465 chains backwards. Then we add the dependencies for the group to
1466 the previous nonnote insn. */
1467
1468 static void
1469 fixup_sched_groups (rtx insn)
1470 {
1471 sd_iterator_def sd_it;
1472 dep_t dep;
1473 rtx prev_nonnote;
1474
1475 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1476 {
1477 rtx i = insn;
1478 rtx pro = DEP_PRO (dep);
1479
1480 do
1481 {
1482 i = prev_nonnote_insn (i);
1483
1484 if (pro == i)
1485 goto next_link;
1486 } while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
1487
1488 if (! sched_insns_conditions_mutex_p (i, pro))
1489 add_dependence (i, pro, DEP_TYPE (dep));
1490 next_link:;
1491 }
1492
1493 delete_all_dependences (insn);
1494
1495 prev_nonnote = prev_nonnote_insn (insn);
1496 while (DEBUG_INSN_P (prev_nonnote))
1497 prev_nonnote = prev_nonnote_insn (prev_nonnote);
1498 if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1499 && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1500 add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1501 }
1502 \f
1503 /* Process an insn's memory dependencies. There are four kinds of
1504 dependencies:
1505
1506 (0) read dependence: read follows read
1507 (1) true dependence: read follows write
1508 (2) output dependence: write follows write
1509 (3) anti dependence: write follows read
1510
1511 We are careful to build only dependencies which actually exist, and
1512 use transitivity to avoid building too many links. */
1513
1514 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1515 The MEM is a memory reference contained within INSN, which we are saving
1516 so that we can do memory aliasing on it. */
1517
1518 static void
1519 add_insn_mem_dependence (struct deps *deps, bool read_p,
1520 rtx insn, rtx mem)
1521 {
1522 rtx *insn_list;
1523 rtx *mem_list;
1524 rtx link;
1525
1526 gcc_assert (!deps->readonly);
1527 if (read_p)
1528 {
1529 insn_list = &deps->pending_read_insns;
1530 mem_list = &deps->pending_read_mems;
1531 deps->pending_read_list_length++;
1532 }
1533 else
1534 {
1535 insn_list = &deps->pending_write_insns;
1536 mem_list = &deps->pending_write_mems;
1537 deps->pending_write_list_length++;
1538 }
1539
1540 link = alloc_INSN_LIST (insn, *insn_list);
1541 *insn_list = link;
1542
1543 if (sched_deps_info->use_cselib)
1544 {
1545 mem = shallow_copy_rtx (mem);
1546 XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0));
1547 }
1548 link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1549 *mem_list = link;
1550 }
1551
1552 /* Make a dependency between every memory reference on the pending lists
1553 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
1554 dependencies for a read operation, similarly with FOR_WRITE. */
1555
1556 static void
1557 flush_pending_lists (struct deps *deps, rtx insn, int for_read,
1558 int for_write)
1559 {
1560 if (for_write)
1561 {
1562 add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1563 1, REG_DEP_ANTI);
1564 if (!deps->readonly)
1565 {
1566 free_EXPR_LIST_list (&deps->pending_read_mems);
1567 deps->pending_read_list_length = 0;
1568 }
1569 }
1570
1571 add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1572 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1573
1574 add_dependence_list_and_free (deps, insn,
1575 &deps->last_pending_memory_flush, 1,
1576 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1577 if (!deps->readonly)
1578 {
1579 free_EXPR_LIST_list (&deps->pending_write_mems);
1580 deps->pending_write_list_length = 0;
1581
1582 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1583 deps->pending_flush_length = 1;
1584 }
1585 }
1586 \f
1587 /* Instruction which dependencies we are analyzing. */
1588 static rtx cur_insn = NULL_RTX;
1589
1590 /* Implement hooks for haifa scheduler. */
1591
1592 static void
1593 haifa_start_insn (rtx insn)
1594 {
1595 gcc_assert (insn && !cur_insn);
1596
1597 cur_insn = insn;
1598 }
1599
1600 static void
1601 haifa_finish_insn (void)
1602 {
1603 cur_insn = NULL;
1604 }
1605
1606 void
1607 haifa_note_reg_set (int regno)
1608 {
1609 SET_REGNO_REG_SET (reg_pending_sets, regno);
1610 }
1611
1612 void
1613 haifa_note_reg_clobber (int regno)
1614 {
1615 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1616 }
1617
1618 void
1619 haifa_note_reg_use (int regno)
1620 {
1621 SET_REGNO_REG_SET (reg_pending_uses, regno);
1622 }
1623
1624 static void
1625 haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx pending_insn, ds_t ds)
1626 {
1627 if (!(ds & SPECULATIVE))
1628 {
1629 mem = NULL_RTX;
1630 pending_mem = NULL_RTX;
1631 }
1632 else
1633 gcc_assert (ds & BEGIN_DATA);
1634
1635 {
1636 dep_def _dep, *dep = &_dep;
1637
1638 init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
1639 current_sched_info->flags & USE_DEPS_LIST ? ds : -1);
1640 maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1641 }
1642
1643 }
1644
1645 static void
1646 haifa_note_dep (rtx elem, ds_t ds)
1647 {
1648 dep_def _dep;
1649 dep_t dep = &_dep;
1650
1651 init_dep (dep, elem, cur_insn, ds_to_dt (ds));
1652 maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1653 }
1654
1655 static void
1656 note_reg_use (int r)
1657 {
1658 if (sched_deps_info->note_reg_use)
1659 sched_deps_info->note_reg_use (r);
1660 }
1661
1662 static void
1663 note_reg_set (int r)
1664 {
1665 if (sched_deps_info->note_reg_set)
1666 sched_deps_info->note_reg_set (r);
1667 }
1668
1669 static void
1670 note_reg_clobber (int r)
1671 {
1672 if (sched_deps_info->note_reg_clobber)
1673 sched_deps_info->note_reg_clobber (r);
1674 }
1675
1676 static void
1677 note_mem_dep (rtx m1, rtx m2, rtx e, ds_t ds)
1678 {
1679 if (sched_deps_info->note_mem_dep)
1680 sched_deps_info->note_mem_dep (m1, m2, e, ds);
1681 }
1682
1683 static void
1684 note_dep (rtx e, ds_t ds)
1685 {
1686 if (sched_deps_info->note_dep)
1687 sched_deps_info->note_dep (e, ds);
1688 }
1689
1690 /* Return corresponding to DS reg_note. */
1691 enum reg_note
1692 ds_to_dt (ds_t ds)
1693 {
1694 if (ds & DEP_TRUE)
1695 return REG_DEP_TRUE;
1696 else if (ds & DEP_OUTPUT)
1697 return REG_DEP_OUTPUT;
1698 else
1699 {
1700 gcc_assert (ds & DEP_ANTI);
1701 return REG_DEP_ANTI;
1702 }
1703 }
1704
1705 \f
1706
1707 /* Functions for computation of info needed for register pressure
1708 sensitive insn scheduling. */
1709
1710
1711 /* Allocate and return reg_use_data structure for REGNO and INSN. */
1712 static struct reg_use_data *
1713 create_insn_reg_use (int regno, rtx insn)
1714 {
1715 struct reg_use_data *use;
1716
1717 use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1718 use->regno = regno;
1719 use->insn = insn;
1720 use->next_insn_use = INSN_REG_USE_LIST (insn);
1721 INSN_REG_USE_LIST (insn) = use;
1722 return use;
1723 }
1724
1725 /* Allocate and return reg_set_data structure for REGNO and INSN. */
1726 static struct reg_set_data *
1727 create_insn_reg_set (int regno, rtx insn)
1728 {
1729 struct reg_set_data *set;
1730
1731 set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1732 set->regno = regno;
1733 set->insn = insn;
1734 set->next_insn_set = INSN_REG_SET_LIST (insn);
1735 INSN_REG_SET_LIST (insn) = set;
1736 return set;
1737 }
1738
1739 /* Set up insn register uses for INSN and dependency context DEPS. */
1740 static void
1741 setup_insn_reg_uses (struct deps *deps, rtx insn)
1742 {
1743 unsigned i;
1744 reg_set_iterator rsi;
1745 rtx list;
1746 struct reg_use_data *use, *use2, *next;
1747 struct deps_reg *reg_last;
1748
1749 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1750 {
1751 if (i < FIRST_PSEUDO_REGISTER
1752 && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1753 continue;
1754
1755 if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1756 && ! REGNO_REG_SET_P (reg_pending_sets, i)
1757 && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
1758 /* Ignore use which is not dying. */
1759 continue;
1760
1761 use = create_insn_reg_use (i, insn);
1762 use->next_regno_use = use;
1763 reg_last = &deps->reg_last[i];
1764
1765 /* Create the cycle list of uses. */
1766 for (list = reg_last->uses; list; list = XEXP (list, 1))
1767 {
1768 use2 = create_insn_reg_use (i, XEXP (list, 0));
1769 next = use->next_regno_use;
1770 use->next_regno_use = use2;
1771 use2->next_regno_use = next;
1772 }
1773 }
1774 }
1775
1776 /* Register pressure info for the currently processed insn. */
1777 static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
1778
1779 /* Return TRUE if INSN has the use structure for REGNO. */
1780 static bool
1781 insn_use_p (rtx insn, int regno)
1782 {
1783 struct reg_use_data *use;
1784
1785 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1786 if (use->regno == regno)
1787 return true;
1788 return false;
1789 }
1790
1791 /* Update the register pressure info after birth of pseudo register REGNO
1792 in INSN. Arguments CLOBBER_P and UNUSED_P say correspondingly that
1793 the register is in clobber or unused after the insn. */
1794 static void
1795 mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
1796 {
1797 int incr, new_incr;
1798 enum reg_class cl;
1799
1800 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
1801 cl = sched_regno_cover_class[regno];
1802 if (cl != NO_REGS)
1803 {
1804 incr = ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (regno)];
1805 if (clobber_p)
1806 {
1807 new_incr = reg_pressure_info[cl].clobber_increase + incr;
1808 reg_pressure_info[cl].clobber_increase = new_incr;
1809 }
1810 else if (unused_p)
1811 {
1812 new_incr = reg_pressure_info[cl].unused_set_increase + incr;
1813 reg_pressure_info[cl].unused_set_increase = new_incr;
1814 }
1815 else
1816 {
1817 new_incr = reg_pressure_info[cl].set_increase + incr;
1818 reg_pressure_info[cl].set_increase = new_incr;
1819 if (! insn_use_p (insn, regno))
1820 reg_pressure_info[cl].change += incr;
1821 create_insn_reg_set (regno, insn);
1822 }
1823 gcc_assert (new_incr < (1 << INCREASE_BITS));
1824 }
1825 }
1826
1827 /* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
1828 hard registers involved in the birth. */
1829 static void
1830 mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
1831 bool clobber_p, bool unused_p)
1832 {
1833 enum reg_class cl;
1834 int new_incr, last = regno + nregs;
1835
1836 while (regno < last)
1837 {
1838 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
1839 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
1840 {
1841 cl = sched_regno_cover_class[regno];
1842 if (cl != NO_REGS)
1843 {
1844 if (clobber_p)
1845 {
1846 new_incr = reg_pressure_info[cl].clobber_increase + 1;
1847 reg_pressure_info[cl].clobber_increase = new_incr;
1848 }
1849 else if (unused_p)
1850 {
1851 new_incr = reg_pressure_info[cl].unused_set_increase + 1;
1852 reg_pressure_info[cl].unused_set_increase = new_incr;
1853 }
1854 else
1855 {
1856 new_incr = reg_pressure_info[cl].set_increase + 1;
1857 reg_pressure_info[cl].set_increase = new_incr;
1858 if (! insn_use_p (insn, regno))
1859 reg_pressure_info[cl].change += 1;
1860 create_insn_reg_set (regno, insn);
1861 }
1862 gcc_assert (new_incr < (1 << INCREASE_BITS));
1863 }
1864 }
1865 regno++;
1866 }
1867 }
1868
1869 /* Update the register pressure info after birth of pseudo or hard
1870 register REG in INSN. Arguments CLOBBER_P and UNUSED_P say
1871 correspondingly that the register is in clobber or unused after the
1872 insn. */
1873 static void
1874 mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
1875 {
1876 int regno;
1877
1878 if (GET_CODE (reg) == SUBREG)
1879 reg = SUBREG_REG (reg);
1880
1881 if (! REG_P (reg))
1882 return;
1883
1884 regno = REGNO (reg);
1885 if (regno < FIRST_PSEUDO_REGISTER)
1886 mark_insn_hard_regno_birth (insn, regno,
1887 hard_regno_nregs[regno][GET_MODE (reg)],
1888 clobber_p, unused_p);
1889 else
1890 mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
1891 }
1892
1893 /* Update the register pressure info after death of pseudo register
1894 REGNO. */
1895 static void
1896 mark_pseudo_death (int regno)
1897 {
1898 int incr;
1899 enum reg_class cl;
1900
1901 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
1902 cl = sched_regno_cover_class[regno];
1903 if (cl != NO_REGS)
1904 {
1905 incr = ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (regno)];
1906 reg_pressure_info[cl].change -= incr;
1907 }
1908 }
1909
1910 /* Like mark_pseudo_death except that NREGS saying how many hard
1911 registers involved in the death. */
1912 static void
1913 mark_hard_regno_death (int regno, int nregs)
1914 {
1915 enum reg_class cl;
1916 int last = regno + nregs;
1917
1918 while (regno < last)
1919 {
1920 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
1921 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
1922 {
1923 cl = sched_regno_cover_class[regno];
1924 if (cl != NO_REGS)
1925 reg_pressure_info[cl].change -= 1;
1926 }
1927 regno++;
1928 }
1929 }
1930
1931 /* Update the register pressure info after death of pseudo or hard
1932 register REG. */
1933 static void
1934 mark_reg_death (rtx reg)
1935 {
1936 int regno;
1937
1938 if (GET_CODE (reg) == SUBREG)
1939 reg = SUBREG_REG (reg);
1940
1941 if (! REG_P (reg))
1942 return;
1943
1944 regno = REGNO (reg);
1945 if (regno < FIRST_PSEUDO_REGISTER)
1946 mark_hard_regno_death (regno, hard_regno_nregs[regno][GET_MODE (reg)]);
1947 else
1948 mark_pseudo_death (regno);
1949 }
1950
1951 /* Process SETTER of REG. DATA is an insn containing the setter. */
1952 static void
1953 mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
1954 {
1955 if (setter != NULL_RTX && GET_CODE (setter) != SET)
1956 return;
1957 mark_insn_reg_birth
1958 ((rtx) data, reg, false,
1959 find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
1960 }
1961
1962 /* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs. */
1963 static void
1964 mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
1965 {
1966 if (GET_CODE (setter) == CLOBBER)
1967 mark_insn_reg_birth ((rtx) data, reg, true, false);
1968 }
1969
1970 /* Set up reg pressure info related to INSN. */
1971 static void
1972 setup_insn_reg_pressure_info (rtx insn)
1973 {
1974 int i, len;
1975 enum reg_class cl;
1976 static struct reg_pressure_data *pressure_info;
1977 rtx link;
1978
1979 gcc_assert (sched_pressure_p);
1980
1981 if (! INSN_P (insn))
1982 return;
1983
1984 for (i = 0; i < ira_reg_class_cover_size; i++)
1985 {
1986 cl = ira_reg_class_cover[i];
1987 reg_pressure_info[cl].clobber_increase = 0;
1988 reg_pressure_info[cl].set_increase = 0;
1989 reg_pressure_info[cl].unused_set_increase = 0;
1990 reg_pressure_info[cl].change = 0;
1991 }
1992
1993 note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
1994
1995 note_stores (PATTERN (insn), mark_insn_reg_store, insn);
1996
1997 #ifdef AUTO_INC_DEC
1998 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1999 if (REG_NOTE_KIND (link) == REG_INC)
2000 mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
2001 #endif
2002
2003 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2004 if (REG_NOTE_KIND (link) == REG_DEAD)
2005 mark_reg_death (XEXP (link, 0));
2006
2007 len = sizeof (struct reg_pressure_data) * ira_reg_class_cover_size;
2008 pressure_info
2009 = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
2010 INSN_MAX_REG_PRESSURE (insn) = (int *) xmalloc (ira_reg_class_cover_size
2011 * sizeof (int));
2012 for (i = 0; i < ira_reg_class_cover_size; i++)
2013 {
2014 cl = ira_reg_class_cover[i];
2015 pressure_info[i].clobber_increase
2016 = reg_pressure_info[cl].clobber_increase;
2017 pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2018 pressure_info[i].unused_set_increase
2019 = reg_pressure_info[cl].unused_set_increase;
2020 pressure_info[i].change = reg_pressure_info[cl].change;
2021 }
2022 }
2023
2024
2025 \f
2026
2027 /* Internal variable for sched_analyze_[12] () functions.
2028 If it is nonzero, this means that sched_analyze_[12] looks
2029 at the most toplevel SET. */
2030 static bool can_start_lhs_rhs_p;
2031
2032 /* Extend reg info for the deps context DEPS given that
2033 we have just generated a register numbered REGNO. */
2034 static void
2035 extend_deps_reg_info (struct deps *deps, int regno)
2036 {
2037 int max_regno = regno + 1;
2038
2039 gcc_assert (!reload_completed);
2040
2041 /* In a readonly context, it would not hurt to extend info,
2042 but it should not be needed. */
2043 if (reload_completed && deps->readonly)
2044 {
2045 deps->max_reg = max_regno;
2046 return;
2047 }
2048
2049 if (max_regno > deps->max_reg)
2050 {
2051 deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
2052 max_regno);
2053 memset (&deps->reg_last[deps->max_reg],
2054 0, (max_regno - deps->max_reg)
2055 * sizeof (struct deps_reg));
2056 deps->max_reg = max_regno;
2057 }
2058 }
2059
2060 /* Extends REG_INFO_P if needed. */
2061 void
2062 maybe_extend_reg_info_p (void)
2063 {
2064 /* Extend REG_INFO_P, if needed. */
2065 if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2066 {
2067 size_t new_reg_info_p_size = max_regno + 128;
2068
2069 gcc_assert (!reload_completed && sel_sched_p ());
2070
2071 reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2072 new_reg_info_p_size,
2073 reg_info_p_size,
2074 sizeof (*reg_info_p));
2075 reg_info_p_size = new_reg_info_p_size;
2076 }
2077 }
2078
2079 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2080 The type of the reference is specified by REF and can be SET,
2081 CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
2082
2083 static void
2084 sched_analyze_reg (struct deps *deps, int regno, enum machine_mode mode,
2085 enum rtx_code ref, rtx insn)
2086 {
2087 /* We could emit new pseudos in renaming. Extend the reg structures. */
2088 if (!reload_completed && sel_sched_p ()
2089 && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2090 extend_deps_reg_info (deps, regno);
2091
2092 maybe_extend_reg_info_p ();
2093
2094 /* A hard reg in a wide mode may really be multiple registers.
2095 If so, mark all of them just like the first. */
2096 if (regno < FIRST_PSEUDO_REGISTER)
2097 {
2098 int i = hard_regno_nregs[regno][mode];
2099 if (ref == SET)
2100 {
2101 while (--i >= 0)
2102 note_reg_set (regno + i);
2103 }
2104 else if (ref == USE)
2105 {
2106 while (--i >= 0)
2107 note_reg_use (regno + i);
2108 }
2109 else
2110 {
2111 while (--i >= 0)
2112 note_reg_clobber (regno + i);
2113 }
2114 }
2115
2116 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2117 it does not reload. Ignore these as they have served their
2118 purpose already. */
2119 else if (regno >= deps->max_reg)
2120 {
2121 enum rtx_code code = GET_CODE (PATTERN (insn));
2122 gcc_assert (code == USE || code == CLOBBER);
2123 }
2124
2125 else
2126 {
2127 if (ref == SET)
2128 note_reg_set (regno);
2129 else if (ref == USE)
2130 note_reg_use (regno);
2131 else
2132 note_reg_clobber (regno);
2133
2134 /* Pseudos that are REG_EQUIV to something may be replaced
2135 by that during reloading. We need only add dependencies for
2136 the address in the REG_EQUIV note. */
2137 if (!reload_completed && get_reg_known_equiv_p (regno))
2138 {
2139 rtx t = get_reg_known_value (regno);
2140 if (MEM_P (t))
2141 sched_analyze_2 (deps, XEXP (t, 0), insn);
2142 }
2143
2144 /* Don't let it cross a call after scheduling if it doesn't
2145 already cross one. */
2146 if (REG_N_CALLS_CROSSED (regno) == 0)
2147 {
2148 if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
2149 deps->sched_before_next_call
2150 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2151 else
2152 add_dependence_list (insn, deps->last_function_call, 1,
2153 REG_DEP_ANTI);
2154 }
2155 }
2156 }
2157
2158 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2159 rtx, X, creating all dependencies generated by the write to the
2160 destination of X, and reads of everything mentioned. */
2161
2162 static void
2163 sched_analyze_1 (struct deps *deps, rtx x, rtx insn)
2164 {
2165 rtx dest = XEXP (x, 0);
2166 enum rtx_code code = GET_CODE (x);
2167 bool cslr_p = can_start_lhs_rhs_p;
2168
2169 can_start_lhs_rhs_p = false;
2170
2171 gcc_assert (dest);
2172 if (dest == 0)
2173 return;
2174
2175 if (cslr_p && sched_deps_info->start_lhs)
2176 sched_deps_info->start_lhs (dest);
2177
2178 if (GET_CODE (dest) == PARALLEL)
2179 {
2180 int i;
2181
2182 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2183 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2184 sched_analyze_1 (deps,
2185 gen_rtx_CLOBBER (VOIDmode,
2186 XEXP (XVECEXP (dest, 0, i), 0)),
2187 insn);
2188
2189 if (cslr_p && sched_deps_info->finish_lhs)
2190 sched_deps_info->finish_lhs ();
2191
2192 if (code == SET)
2193 {
2194 can_start_lhs_rhs_p = cslr_p;
2195
2196 sched_analyze_2 (deps, SET_SRC (x), insn);
2197
2198 can_start_lhs_rhs_p = false;
2199 }
2200
2201 return;
2202 }
2203
2204 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
2205 || GET_CODE (dest) == ZERO_EXTRACT)
2206 {
2207 if (GET_CODE (dest) == STRICT_LOW_PART
2208 || GET_CODE (dest) == ZERO_EXTRACT
2209 || df_read_modify_subreg_p (dest))
2210 {
2211 /* These both read and modify the result. We must handle
2212 them as writes to get proper dependencies for following
2213 instructions. We must handle them as reads to get proper
2214 dependencies from this to previous instructions.
2215 Thus we need to call sched_analyze_2. */
2216
2217 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2218 }
2219 if (GET_CODE (dest) == ZERO_EXTRACT)
2220 {
2221 /* The second and third arguments are values read by this insn. */
2222 sched_analyze_2 (deps, XEXP (dest, 1), insn);
2223 sched_analyze_2 (deps, XEXP (dest, 2), insn);
2224 }
2225 dest = XEXP (dest, 0);
2226 }
2227
2228 if (REG_P (dest))
2229 {
2230 int regno = REGNO (dest);
2231 enum machine_mode mode = GET_MODE (dest);
2232
2233 sched_analyze_reg (deps, regno, mode, code, insn);
2234
2235 #ifdef STACK_REGS
2236 /* Treat all writes to a stack register as modifying the TOS. */
2237 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2238 {
2239 int nregs;
2240
2241 /* Avoid analyzing the same register twice. */
2242 if (regno != FIRST_STACK_REG)
2243 sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
2244
2245 nregs = hard_regno_nregs[FIRST_STACK_REG][mode];
2246 while (--nregs >= 0)
2247 SET_HARD_REG_BIT (implicit_reg_pending_uses,
2248 FIRST_STACK_REG + nregs);
2249 }
2250 #endif
2251 }
2252 else if (MEM_P (dest))
2253 {
2254 /* Writing memory. */
2255 rtx t = dest;
2256
2257 if (sched_deps_info->use_cselib)
2258 {
2259 t = shallow_copy_rtx (dest);
2260 cselib_lookup (XEXP (t, 0), Pmode, 1);
2261 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
2262 }
2263 t = canon_rtx (t);
2264
2265 /* Pending lists can't get larger with a readonly context. */
2266 if (!deps->readonly
2267 && ((deps->pending_read_list_length + deps->pending_write_list_length)
2268 > MAX_PENDING_LIST_LENGTH))
2269 {
2270 /* Flush all pending reads and writes to prevent the pending lists
2271 from getting any larger. Insn scheduling runs too slowly when
2272 these lists get long. When compiling GCC with itself,
2273 this flush occurs 8 times for sparc, and 10 times for m88k using
2274 the default value of 32. */
2275 flush_pending_lists (deps, insn, false, true);
2276 }
2277 else
2278 {
2279 rtx pending, pending_mem;
2280
2281 pending = deps->pending_read_insns;
2282 pending_mem = deps->pending_read_mems;
2283 while (pending)
2284 {
2285 if (anti_dependence (XEXP (pending_mem, 0), t)
2286 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2287 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2288 DEP_ANTI);
2289
2290 pending = XEXP (pending, 1);
2291 pending_mem = XEXP (pending_mem, 1);
2292 }
2293
2294 pending = deps->pending_write_insns;
2295 pending_mem = deps->pending_write_mems;
2296 while (pending)
2297 {
2298 if (output_dependence (XEXP (pending_mem, 0), t)
2299 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2300 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2301 DEP_OUTPUT);
2302
2303 pending = XEXP (pending, 1);
2304 pending_mem = XEXP (pending_mem, 1);
2305 }
2306
2307 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2308 REG_DEP_ANTI);
2309
2310 if (!deps->readonly)
2311 add_insn_mem_dependence (deps, false, insn, dest);
2312 }
2313 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2314 }
2315
2316 if (cslr_p && sched_deps_info->finish_lhs)
2317 sched_deps_info->finish_lhs ();
2318
2319 /* Analyze reads. */
2320 if (GET_CODE (x) == SET)
2321 {
2322 can_start_lhs_rhs_p = cslr_p;
2323
2324 sched_analyze_2 (deps, SET_SRC (x), insn);
2325
2326 can_start_lhs_rhs_p = false;
2327 }
2328 }
2329
2330 /* Analyze the uses of memory and registers in rtx X in INSN. */
2331 static void
2332 sched_analyze_2 (struct deps *deps, rtx x, rtx insn)
2333 {
2334 int i;
2335 int j;
2336 enum rtx_code code;
2337 const char *fmt;
2338 bool cslr_p = can_start_lhs_rhs_p;
2339
2340 can_start_lhs_rhs_p = false;
2341
2342 gcc_assert (x);
2343 if (x == 0)
2344 return;
2345
2346 if (cslr_p && sched_deps_info->start_rhs)
2347 sched_deps_info->start_rhs (x);
2348
2349 code = GET_CODE (x);
2350
2351 switch (code)
2352 {
2353 case CONST_INT:
2354 case CONST_DOUBLE:
2355 case CONST_FIXED:
2356 case CONST_VECTOR:
2357 case SYMBOL_REF:
2358 case CONST:
2359 case LABEL_REF:
2360 /* Ignore constants. */
2361 if (cslr_p && sched_deps_info->finish_rhs)
2362 sched_deps_info->finish_rhs ();
2363
2364 return;
2365
2366 #ifdef HAVE_cc0
2367 case CC0:
2368 /* User of CC0 depends on immediately preceding insn. */
2369 SCHED_GROUP_P (insn) = 1;
2370 /* Don't move CC0 setter to another block (it can set up the
2371 same flag for previous CC0 users which is safe). */
2372 CANT_MOVE (prev_nonnote_insn (insn)) = 1;
2373
2374 if (cslr_p && sched_deps_info->finish_rhs)
2375 sched_deps_info->finish_rhs ();
2376
2377 return;
2378 #endif
2379
2380 case REG:
2381 {
2382 int regno = REGNO (x);
2383 enum machine_mode mode = GET_MODE (x);
2384
2385 sched_analyze_reg (deps, regno, mode, USE, insn);
2386
2387 #ifdef STACK_REGS
2388 /* Treat all reads of a stack register as modifying the TOS. */
2389 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2390 {
2391 /* Avoid analyzing the same register twice. */
2392 if (regno != FIRST_STACK_REG)
2393 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2394 sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
2395 }
2396 #endif
2397
2398 if (cslr_p && sched_deps_info->finish_rhs)
2399 sched_deps_info->finish_rhs ();
2400
2401 return;
2402 }
2403
2404 case MEM:
2405 {
2406 /* Reading memory. */
2407 rtx u;
2408 rtx pending, pending_mem;
2409 rtx t = x;
2410
2411 if (DEBUG_INSN_P (insn))
2412 {
2413 sched_analyze_2 (deps, XEXP (x, 0), insn);
2414 return;
2415 }
2416
2417 if (sched_deps_info->use_cselib)
2418 {
2419 t = shallow_copy_rtx (t);
2420 cselib_lookup (XEXP (t, 0), Pmode, 1);
2421 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
2422 }
2423 t = canon_rtx (t);
2424 pending = deps->pending_read_insns;
2425 pending_mem = deps->pending_read_mems;
2426 while (pending)
2427 {
2428 if (read_dependence (XEXP (pending_mem, 0), t)
2429 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2430 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2431 DEP_ANTI);
2432
2433 pending = XEXP (pending, 1);
2434 pending_mem = XEXP (pending_mem, 1);
2435 }
2436
2437 pending = deps->pending_write_insns;
2438 pending_mem = deps->pending_write_mems;
2439 while (pending)
2440 {
2441 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
2442 t, rtx_varies_p)
2443 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2444 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2445 sched_deps_info->generate_spec_deps
2446 ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2447
2448 pending = XEXP (pending, 1);
2449 pending_mem = XEXP (pending_mem, 1);
2450 }
2451
2452 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2453 {
2454 if (! JUMP_P (XEXP (u, 0)))
2455 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2456 else if (deps_may_trap_p (x))
2457 {
2458 if ((sched_deps_info->generate_spec_deps)
2459 && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2460 {
2461 ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2462 MAX_DEP_WEAK);
2463
2464 note_dep (XEXP (u, 0), ds);
2465 }
2466 else
2467 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2468 }
2469 }
2470
2471 /* Always add these dependencies to pending_reads, since
2472 this insn may be followed by a write. */
2473 if (!deps->readonly)
2474 add_insn_mem_dependence (deps, true, insn, x);
2475
2476 /* Take advantage of tail recursion here. */
2477 sched_analyze_2 (deps, XEXP (x, 0), insn);
2478
2479 if (cslr_p && sched_deps_info->finish_rhs)
2480 sched_deps_info->finish_rhs ();
2481
2482 return;
2483 }
2484
2485 /* Force pending stores to memory in case a trap handler needs them. */
2486 case TRAP_IF:
2487 flush_pending_lists (deps, insn, true, false);
2488 break;
2489
2490 case UNSPEC_VOLATILE:
2491 flush_pending_lists (deps, insn, true, true);
2492 /* FALLTHRU */
2493
2494 case ASM_OPERANDS:
2495 case ASM_INPUT:
2496 {
2497 /* Traditional and volatile asm instructions must be considered to use
2498 and clobber all hard registers, all pseudo-registers and all of
2499 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
2500
2501 Consider for instance a volatile asm that changes the fpu rounding
2502 mode. An insn should not be moved across this even if it only uses
2503 pseudo-regs because it might give an incorrectly rounded result. */
2504 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2505 reg_pending_barrier = TRUE_BARRIER;
2506
2507 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
2508 We can not just fall through here since then we would be confused
2509 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2510 traditional asms unlike their normal usage. */
2511
2512 if (code == ASM_OPERANDS)
2513 {
2514 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2515 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
2516
2517 if (cslr_p && sched_deps_info->finish_rhs)
2518 sched_deps_info->finish_rhs ();
2519
2520 return;
2521 }
2522 break;
2523 }
2524
2525 case PRE_DEC:
2526 case POST_DEC:
2527 case PRE_INC:
2528 case POST_INC:
2529 /* These both read and modify the result. We must handle them as writes
2530 to get proper dependencies for following instructions. We must handle
2531 them as reads to get proper dependencies from this to previous
2532 instructions. Thus we need to pass them to both sched_analyze_1
2533 and sched_analyze_2. We must call sched_analyze_2 first in order
2534 to get the proper antecedent for the read. */
2535 sched_analyze_2 (deps, XEXP (x, 0), insn);
2536 sched_analyze_1 (deps, x, insn);
2537
2538 if (cslr_p && sched_deps_info->finish_rhs)
2539 sched_deps_info->finish_rhs ();
2540
2541 return;
2542
2543 case POST_MODIFY:
2544 case PRE_MODIFY:
2545 /* op0 = op0 + op1 */
2546 sched_analyze_2 (deps, XEXP (x, 0), insn);
2547 sched_analyze_2 (deps, XEXP (x, 1), insn);
2548 sched_analyze_1 (deps, x, insn);
2549
2550 if (cslr_p && sched_deps_info->finish_rhs)
2551 sched_deps_info->finish_rhs ();
2552
2553 return;
2554
2555 default:
2556 break;
2557 }
2558
2559 /* Other cases: walk the insn. */
2560 fmt = GET_RTX_FORMAT (code);
2561 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2562 {
2563 if (fmt[i] == 'e')
2564 sched_analyze_2 (deps, XEXP (x, i), insn);
2565 else if (fmt[i] == 'E')
2566 for (j = 0; j < XVECLEN (x, i); j++)
2567 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2568 }
2569
2570 if (cslr_p && sched_deps_info->finish_rhs)
2571 sched_deps_info->finish_rhs ();
2572 }
2573
2574 /* Analyze an INSN with pattern X to find all dependencies. */
2575 static void
2576 sched_analyze_insn (struct deps *deps, rtx x, rtx insn)
2577 {
2578 RTX_CODE code = GET_CODE (x);
2579 rtx link;
2580 unsigned i;
2581 reg_set_iterator rsi;
2582
2583 if (! reload_completed)
2584 {
2585 HARD_REG_SET temp;
2586
2587 extract_insn (insn);
2588 preprocess_constraints ();
2589 ira_implicitly_set_insn_hard_regs (&temp);
2590 IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
2591 }
2592
2593 can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2594 && code == SET);
2595
2596 if (code == COND_EXEC)
2597 {
2598 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2599
2600 /* ??? Should be recording conditions so we reduce the number of
2601 false dependencies. */
2602 x = COND_EXEC_CODE (x);
2603 code = GET_CODE (x);
2604 }
2605 if (code == SET || code == CLOBBER)
2606 {
2607 sched_analyze_1 (deps, x, insn);
2608
2609 /* Bare clobber insns are used for letting life analysis, reg-stack
2610 and others know that a value is dead. Depend on the last call
2611 instruction so that reg-stack won't get confused. */
2612 if (code == CLOBBER)
2613 add_dependence_list (insn, deps->last_function_call, 1,
2614 REG_DEP_OUTPUT);
2615 }
2616 else if (code == PARALLEL)
2617 {
2618 for (i = XVECLEN (x, 0); i--;)
2619 {
2620 rtx sub = XVECEXP (x, 0, i);
2621 code = GET_CODE (sub);
2622
2623 if (code == COND_EXEC)
2624 {
2625 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2626 sub = COND_EXEC_CODE (sub);
2627 code = GET_CODE (sub);
2628 }
2629 if (code == SET || code == CLOBBER)
2630 sched_analyze_1 (deps, sub, insn);
2631 else
2632 sched_analyze_2 (deps, sub, insn);
2633 }
2634 }
2635 else
2636 sched_analyze_2 (deps, x, insn);
2637
2638 /* Mark registers CLOBBERED or used by called function. */
2639 if (CALL_P (insn))
2640 {
2641 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2642 {
2643 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2644 sched_analyze_1 (deps, XEXP (link, 0), insn);
2645 else
2646 sched_analyze_2 (deps, XEXP (link, 0), insn);
2647 }
2648 if (find_reg_note (insn, REG_SETJMP, NULL))
2649 reg_pending_barrier = MOVE_BARRIER;
2650 }
2651
2652 if (JUMP_P (insn))
2653 {
2654 rtx next;
2655 next = next_nonnote_insn (insn);
2656 while (next && DEBUG_INSN_P (next))
2657 next = next_nonnote_insn (next);
2658 if (next && BARRIER_P (next))
2659 reg_pending_barrier = MOVE_BARRIER;
2660 else
2661 {
2662 rtx pending, pending_mem;
2663
2664 if (sched_deps_info->compute_jump_reg_dependencies)
2665 {
2666 regset_head tmp_uses, tmp_sets;
2667 INIT_REG_SET (&tmp_uses);
2668 INIT_REG_SET (&tmp_sets);
2669
2670 (*sched_deps_info->compute_jump_reg_dependencies)
2671 (insn, &deps->reg_conditional_sets, &tmp_uses, &tmp_sets);
2672 /* Make latency of jump equal to 0 by using anti-dependence. */
2673 EXECUTE_IF_SET_IN_REG_SET (&tmp_uses, 0, i, rsi)
2674 {
2675 struct deps_reg *reg_last = &deps->reg_last[i];
2676 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
2677 add_dependence_list (insn, reg_last->implicit_sets,
2678 0, REG_DEP_ANTI);
2679 add_dependence_list (insn, reg_last->clobbers, 0,
2680 REG_DEP_ANTI);
2681
2682 if (!deps->readonly)
2683 {
2684 reg_last->uses_length++;
2685 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2686 }
2687 }
2688 IOR_REG_SET (reg_pending_sets, &tmp_sets);
2689
2690 CLEAR_REG_SET (&tmp_uses);
2691 CLEAR_REG_SET (&tmp_sets);
2692 }
2693
2694 /* All memory writes and volatile reads must happen before the
2695 jump. Non-volatile reads must happen before the jump iff
2696 the result is needed by the above register used mask. */
2697
2698 pending = deps->pending_write_insns;
2699 pending_mem = deps->pending_write_mems;
2700 while (pending)
2701 {
2702 if (! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2703 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
2704 pending = XEXP (pending, 1);
2705 pending_mem = XEXP (pending_mem, 1);
2706 }
2707
2708 pending = deps->pending_read_insns;
2709 pending_mem = deps->pending_read_mems;
2710 while (pending)
2711 {
2712 if (MEM_VOLATILE_P (XEXP (pending_mem, 0))
2713 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2714 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
2715 pending = XEXP (pending, 1);
2716 pending_mem = XEXP (pending_mem, 1);
2717 }
2718
2719 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2720 REG_DEP_ANTI);
2721 }
2722 }
2723
2724 /* If this instruction can throw an exception, then moving it changes
2725 where block boundaries fall. This is mighty confusing elsewhere.
2726 Therefore, prevent such an instruction from being moved. Same for
2727 non-jump instructions that define block boundaries.
2728 ??? Unclear whether this is still necessary in EBB mode. If not,
2729 add_branch_dependences should be adjusted for RGN mode instead. */
2730 if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
2731 || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
2732 reg_pending_barrier = MOVE_BARRIER;
2733
2734 if (sched_pressure_p)
2735 {
2736 setup_insn_reg_uses (deps, insn);
2737 setup_insn_reg_pressure_info (insn);
2738 }
2739
2740 /* Add register dependencies for insn. */
2741 if (DEBUG_INSN_P (insn))
2742 {
2743 rtx prev = deps->last_debug_insn;
2744 rtx u;
2745
2746 if (!deps->readonly)
2747 deps->last_debug_insn = insn;
2748
2749 if (prev)
2750 add_dependence (insn, prev, REG_DEP_ANTI);
2751
2752 add_dependence_list (insn, deps->last_function_call, 1,
2753 REG_DEP_ANTI);
2754
2755 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2756 if (! JUMP_P (XEXP (u, 0))
2757 || !sel_sched_p ())
2758 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2759
2760 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
2761 {
2762 struct deps_reg *reg_last = &deps->reg_last[i];
2763 add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI);
2764 add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI);
2765 }
2766 CLEAR_REG_SET (reg_pending_uses);
2767
2768 /* Quite often, a debug insn will refer to stuff in the
2769 previous instruction, but the reason we want this
2770 dependency here is to make sure the scheduler doesn't
2771 gratuitously move a debug insn ahead. This could dirty
2772 DF flags and cause additional analysis that wouldn't have
2773 occurred in compilation without debug insns, and such
2774 additional analysis can modify the generated code. */
2775 prev = PREV_INSN (insn);
2776
2777 if (prev && NONDEBUG_INSN_P (prev))
2778 add_dependence (insn, prev, REG_DEP_ANTI);
2779 }
2780 else
2781 {
2782 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
2783 {
2784 struct deps_reg *reg_last = &deps->reg_last[i];
2785 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
2786 add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI);
2787 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
2788
2789 if (!deps->readonly)
2790 {
2791 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2792 reg_last->uses_length++;
2793 }
2794 }
2795
2796 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2797 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
2798 {
2799 struct deps_reg *reg_last = &deps->reg_last[i];
2800 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
2801 add_dependence_list (insn, reg_last->implicit_sets, 0,
2802 REG_DEP_ANTI);
2803 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
2804
2805 if (!deps->readonly)
2806 {
2807 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2808 reg_last->uses_length++;
2809 }
2810 }
2811
2812 /* If the current insn is conditional, we can't free any
2813 of the lists. */
2814 if (sched_has_condition_p (insn))
2815 {
2816 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
2817 {
2818 struct deps_reg *reg_last = &deps->reg_last[i];
2819 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2820 add_dependence_list (insn, reg_last->implicit_sets, 0,
2821 REG_DEP_ANTI);
2822 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2823
2824 if (!deps->readonly)
2825 {
2826 reg_last->clobbers
2827 = alloc_INSN_LIST (insn, reg_last->clobbers);
2828 reg_last->clobbers_length++;
2829 }
2830 }
2831 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
2832 {
2833 struct deps_reg *reg_last = &deps->reg_last[i];
2834 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2835 add_dependence_list (insn, reg_last->implicit_sets, 0,
2836 REG_DEP_ANTI);
2837 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT);
2838 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2839
2840 if (!deps->readonly)
2841 {
2842 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2843 SET_REGNO_REG_SET (&deps->reg_conditional_sets, i);
2844 }
2845 }
2846 }
2847 else
2848 {
2849 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
2850 {
2851 struct deps_reg *reg_last = &deps->reg_last[i];
2852 if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
2853 || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
2854 {
2855 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
2856 REG_DEP_OUTPUT);
2857 add_dependence_list_and_free (deps, insn,
2858 &reg_last->implicit_sets, 0,
2859 REG_DEP_ANTI);
2860 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
2861 REG_DEP_ANTI);
2862 add_dependence_list_and_free
2863 (deps, insn, &reg_last->clobbers, 0, REG_DEP_OUTPUT);
2864
2865 if (!deps->readonly)
2866 {
2867 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2868 reg_last->clobbers_length = 0;
2869 reg_last->uses_length = 0;
2870 }
2871 }
2872 else
2873 {
2874 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2875 add_dependence_list (insn, reg_last->implicit_sets, 0,
2876 REG_DEP_ANTI);
2877 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2878 }
2879
2880 if (!deps->readonly)
2881 {
2882 reg_last->clobbers_length++;
2883 reg_last->clobbers
2884 = alloc_INSN_LIST (insn, reg_last->clobbers);
2885 }
2886 }
2887 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
2888 {
2889 struct deps_reg *reg_last = &deps->reg_last[i];
2890
2891 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
2892 REG_DEP_OUTPUT);
2893 add_dependence_list_and_free (deps, insn,
2894 &reg_last->implicit_sets,
2895 0, REG_DEP_ANTI);
2896 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
2897 REG_DEP_OUTPUT);
2898 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
2899 REG_DEP_ANTI);
2900
2901 if (!deps->readonly)
2902 {
2903 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2904 reg_last->uses_length = 0;
2905 reg_last->clobbers_length = 0;
2906 CLEAR_REGNO_REG_SET (&deps->reg_conditional_sets, i);
2907 }
2908 }
2909 }
2910 }
2911
2912 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2913 if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
2914 {
2915 struct deps_reg *reg_last = &deps->reg_last[i];
2916 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
2917 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI);
2918 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2919
2920 if (!deps->readonly)
2921 reg_last->implicit_sets
2922 = alloc_INSN_LIST (insn, reg_last->implicit_sets);
2923 }
2924
2925 if (!deps->readonly)
2926 {
2927 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
2928 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
2929 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
2930 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2931 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
2932 || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
2933 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
2934
2935 /* Set up the pending barrier found. */
2936 deps->last_reg_pending_barrier = reg_pending_barrier;
2937 }
2938
2939 CLEAR_REG_SET (reg_pending_uses);
2940 CLEAR_REG_SET (reg_pending_clobbers);
2941 CLEAR_REG_SET (reg_pending_sets);
2942 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
2943 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
2944
2945 /* Add dependencies if a scheduling barrier was found. */
2946 if (reg_pending_barrier)
2947 {
2948 /* In the case of barrier the most added dependencies are not
2949 real, so we use anti-dependence here. */
2950 if (sched_has_condition_p (insn))
2951 {
2952 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
2953 {
2954 struct deps_reg *reg_last = &deps->reg_last[i];
2955 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2956 add_dependence_list (insn, reg_last->sets, 0,
2957 reg_pending_barrier == TRUE_BARRIER
2958 ? REG_DEP_TRUE : REG_DEP_ANTI);
2959 add_dependence_list (insn, reg_last->implicit_sets, 0,
2960 REG_DEP_ANTI);
2961 add_dependence_list (insn, reg_last->clobbers, 0,
2962 reg_pending_barrier == TRUE_BARRIER
2963 ? REG_DEP_TRUE : REG_DEP_ANTI);
2964 }
2965 }
2966 else
2967 {
2968 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
2969 {
2970 struct deps_reg *reg_last = &deps->reg_last[i];
2971 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
2972 REG_DEP_ANTI);
2973 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
2974 reg_pending_barrier == TRUE_BARRIER
2975 ? REG_DEP_TRUE : REG_DEP_ANTI);
2976 add_dependence_list_and_free (deps, insn,
2977 &reg_last->implicit_sets, 0,
2978 REG_DEP_ANTI);
2979 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
2980 reg_pending_barrier == TRUE_BARRIER
2981 ? REG_DEP_TRUE : REG_DEP_ANTI);
2982
2983 if (!deps->readonly)
2984 {
2985 reg_last->uses_length = 0;
2986 reg_last->clobbers_length = 0;
2987 }
2988 }
2989 }
2990
2991 if (!deps->readonly)
2992 for (i = 0; i < (unsigned)deps->max_reg; i++)
2993 {
2994 struct deps_reg *reg_last = &deps->reg_last[i];
2995 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2996 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
2997 }
2998
2999 /* Flush pending lists on jumps, but not on speculative checks. */
3000 if (JUMP_P (insn) && !(sel_sched_p ()
3001 && sel_insn_is_speculation_check (insn)))
3002 flush_pending_lists (deps, insn, true, true);
3003
3004 if (!deps->readonly)
3005 CLEAR_REG_SET (&deps->reg_conditional_sets);
3006 reg_pending_barrier = NOT_A_BARRIER;
3007 }
3008
3009 /* If a post-call group is still open, see if it should remain so.
3010 This insn must be a simple move of a hard reg to a pseudo or
3011 vice-versa.
3012
3013 We must avoid moving these insns for correctness on
3014 SMALL_REGISTER_CLASS machines, and for special registers like
3015 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
3016 hard regs for all targets. */
3017
3018 if (deps->in_post_call_group_p)
3019 {
3020 rtx tmp, set = single_set (insn);
3021 int src_regno, dest_regno;
3022
3023 if (set == NULL)
3024 {
3025 if (DEBUG_INSN_P (insn))
3026 /* We don't want to mark debug insns as part of the same
3027 sched group. We know they really aren't, but if we use
3028 debug insns to tell that a call group is over, we'll
3029 get different code if debug insns are not there and
3030 instructions that follow seem like they should be part
3031 of the call group.
3032
3033 Also, if we did, fixup_sched_groups() would move the
3034 deps of the debug insn to the call insn, modifying
3035 non-debug post-dependency counts of the debug insn
3036 dependencies and otherwise messing with the scheduling
3037 order.
3038
3039 Instead, let such debug insns be scheduled freely, but
3040 keep the call group open in case there are insns that
3041 should be part of it afterwards. Since we grant debug
3042 insns higher priority than even sched group insns, it
3043 will all turn out all right. */
3044 goto debug_dont_end_call_group;
3045 else
3046 goto end_call_group;
3047 }
3048
3049 tmp = SET_DEST (set);
3050 if (GET_CODE (tmp) == SUBREG)
3051 tmp = SUBREG_REG (tmp);
3052 if (REG_P (tmp))
3053 dest_regno = REGNO (tmp);
3054 else
3055 goto end_call_group;
3056
3057 tmp = SET_SRC (set);
3058 if (GET_CODE (tmp) == SUBREG)
3059 tmp = SUBREG_REG (tmp);
3060 if ((GET_CODE (tmp) == PLUS
3061 || GET_CODE (tmp) == MINUS)
3062 && REG_P (XEXP (tmp, 0))
3063 && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3064 && dest_regno == STACK_POINTER_REGNUM)
3065 src_regno = STACK_POINTER_REGNUM;
3066 else if (REG_P (tmp))
3067 src_regno = REGNO (tmp);
3068 else
3069 goto end_call_group;
3070
3071 if (src_regno < FIRST_PSEUDO_REGISTER
3072 || dest_regno < FIRST_PSEUDO_REGISTER)
3073 {
3074 if (!deps->readonly
3075 && deps->in_post_call_group_p == post_call_initial)
3076 deps->in_post_call_group_p = post_call;
3077
3078 if (!sel_sched_p () || sched_emulate_haifa_p)
3079 {
3080 SCHED_GROUP_P (insn) = 1;
3081 CANT_MOVE (insn) = 1;
3082 }
3083 }
3084 else
3085 {
3086 end_call_group:
3087 if (!deps->readonly)
3088 deps->in_post_call_group_p = not_post_call;
3089 }
3090 }
3091
3092 debug_dont_end_call_group:
3093 if ((current_sched_info->flags & DO_SPECULATION)
3094 && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
3095 /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3096 be speculated. */
3097 {
3098 if (sel_sched_p ())
3099 sel_mark_hard_insn (insn);
3100 else
3101 {
3102 sd_iterator_def sd_it;
3103 dep_t dep;
3104
3105 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3106 sd_iterator_cond (&sd_it, &dep);)
3107 change_spec_dep_to_hard (sd_it);
3108 }
3109 }
3110 }
3111
3112 /* Analyze INSN with DEPS as a context. */
3113 void
3114 deps_analyze_insn (struct deps *deps, rtx insn)
3115 {
3116 if (sched_deps_info->start_insn)
3117 sched_deps_info->start_insn (insn);
3118
3119 if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn) || JUMP_P (insn))
3120 {
3121 /* Make each JUMP_INSN (but not a speculative check)
3122 a scheduling barrier for memory references. */
3123 if (!deps->readonly
3124 && JUMP_P (insn)
3125 && !(sel_sched_p ()
3126 && sel_insn_is_speculation_check (insn)))
3127 {
3128 /* Keep the list a reasonable size. */
3129 if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
3130 flush_pending_lists (deps, insn, true, true);
3131 else
3132 deps->last_pending_memory_flush
3133 = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
3134 }
3135
3136 sched_analyze_insn (deps, PATTERN (insn), insn);
3137 }
3138 else if (CALL_P (insn))
3139 {
3140 int i;
3141
3142 CANT_MOVE (insn) = 1;
3143
3144 if (find_reg_note (insn, REG_SETJMP, NULL))
3145 {
3146 /* This is setjmp. Assume that all registers, not just
3147 hard registers, may be clobbered by this call. */
3148 reg_pending_barrier = MOVE_BARRIER;
3149 }
3150 else
3151 {
3152 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3153 /* A call may read and modify global register variables. */
3154 if (global_regs[i])
3155 {
3156 SET_REGNO_REG_SET (reg_pending_sets, i);
3157 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3158 }
3159 /* Other call-clobbered hard regs may be clobbered.
3160 Since we only have a choice between 'might be clobbered'
3161 and 'definitely not clobbered', we must include all
3162 partly call-clobbered registers here. */
3163 else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
3164 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
3165 SET_REGNO_REG_SET (reg_pending_clobbers, i);
3166 /* We don't know what set of fixed registers might be used
3167 by the function, but it is certain that the stack pointer
3168 is among them, but be conservative. */
3169 else if (fixed_regs[i])
3170 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3171 /* The frame pointer is normally not used by the function
3172 itself, but by the debugger. */
3173 /* ??? MIPS o32 is an exception. It uses the frame pointer
3174 in the macro expansion of jal but does not represent this
3175 fact in the call_insn rtl. */
3176 else if (i == FRAME_POINTER_REGNUM
3177 || (i == HARD_FRAME_POINTER_REGNUM
3178 && (! reload_completed || frame_pointer_needed)))
3179 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3180 }
3181
3182 /* For each insn which shouldn't cross a call, add a dependence
3183 between that insn and this call insn. */
3184 add_dependence_list_and_free (deps, insn,
3185 &deps->sched_before_next_call, 1,
3186 REG_DEP_ANTI);
3187
3188 sched_analyze_insn (deps, PATTERN (insn), insn);
3189
3190 /* If CALL would be in a sched group, then this will violate
3191 convention that sched group insns have dependencies only on the
3192 previous instruction.
3193
3194 Of course one can say: "Hey! What about head of the sched group?"
3195 And I will answer: "Basic principles (one dep per insn) are always
3196 the same." */
3197 gcc_assert (!SCHED_GROUP_P (insn));
3198
3199 /* In the absence of interprocedural alias analysis, we must flush
3200 all pending reads and writes, and start new dependencies starting
3201 from here. But only flush writes for constant calls (which may
3202 be passed a pointer to something we haven't written yet). */
3203 flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
3204
3205 if (!deps->readonly)
3206 {
3207 /* Remember the last function call for limiting lifetimes. */
3208 free_INSN_LIST_list (&deps->last_function_call);
3209 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
3210
3211 /* Before reload, begin a post-call group, so as to keep the
3212 lifetimes of hard registers correct. */
3213 if (! reload_completed)
3214 deps->in_post_call_group_p = post_call;
3215 }
3216 }
3217
3218 if (sched_deps_info->use_cselib)
3219 cselib_process_insn (insn);
3220
3221 /* EH_REGION insn notes can not appear until well after we complete
3222 scheduling. */
3223 if (NOTE_P (insn))
3224 gcc_assert (NOTE_KIND (insn) != NOTE_INSN_EH_REGION_BEG
3225 && NOTE_KIND (insn) != NOTE_INSN_EH_REGION_END);
3226
3227 if (sched_deps_info->finish_insn)
3228 sched_deps_info->finish_insn ();
3229
3230 /* Fixup the dependencies in the sched group. */
3231 if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
3232 && SCHED_GROUP_P (insn) && !sel_sched_p ())
3233 fixup_sched_groups (insn);
3234 }
3235
3236 /* Initialize DEPS for the new block beginning with HEAD. */
3237 void
3238 deps_start_bb (struct deps *deps, rtx head)
3239 {
3240 gcc_assert (!deps->readonly);
3241
3242 /* Before reload, if the previous block ended in a call, show that
3243 we are inside a post-call group, so as to keep the lifetimes of
3244 hard registers correct. */
3245 if (! reload_completed && !LABEL_P (head))
3246 {
3247 rtx insn = prev_nonnote_insn (head);
3248
3249 while (insn && DEBUG_INSN_P (insn))
3250 insn = prev_nonnote_insn (insn);
3251 if (insn && CALL_P (insn))
3252 deps->in_post_call_group_p = post_call_initial;
3253 }
3254 }
3255
3256 /* Analyze every insn between HEAD and TAIL inclusive, creating backward
3257 dependencies for each insn. */
3258 void
3259 sched_analyze (struct deps *deps, rtx head, rtx tail)
3260 {
3261 rtx insn;
3262
3263 if (sched_deps_info->use_cselib)
3264 cselib_init (true);
3265
3266 deps_start_bb (deps, head);
3267
3268 for (insn = head;; insn = NEXT_INSN (insn))
3269 {
3270
3271 if (INSN_P (insn))
3272 {
3273 /* And initialize deps_lists. */
3274 sd_init_insn (insn);
3275 }
3276
3277 deps_analyze_insn (deps, insn);
3278
3279 if (insn == tail)
3280 {
3281 if (sched_deps_info->use_cselib)
3282 cselib_finish ();
3283 return;
3284 }
3285 }
3286 gcc_unreachable ();
3287 }
3288
3289 /* Helper for sched_free_deps ().
3290 Delete INSN's (RESOLVED_P) backward dependencies. */
3291 static void
3292 delete_dep_nodes_in_back_deps (rtx insn, bool resolved_p)
3293 {
3294 sd_iterator_def sd_it;
3295 dep_t dep;
3296 sd_list_types_def types;
3297
3298 if (resolved_p)
3299 types = SD_LIST_RES_BACK;
3300 else
3301 types = SD_LIST_BACK;
3302
3303 for (sd_it = sd_iterator_start (insn, types);
3304 sd_iterator_cond (&sd_it, &dep);)
3305 {
3306 dep_link_t link = *sd_it.linkp;
3307 dep_node_t node = DEP_LINK_NODE (link);
3308 deps_list_t back_list;
3309 deps_list_t forw_list;
3310
3311 get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
3312 remove_from_deps_list (link, back_list);
3313 delete_dep_node (node);
3314 }
3315 }
3316
3317 /* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3318 deps_lists. */
3319 void
3320 sched_free_deps (rtx head, rtx tail, bool resolved_p)
3321 {
3322 rtx insn;
3323 rtx next_tail = NEXT_INSN (tail);
3324
3325 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3326 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3327 {
3328 /* Clear resolved back deps together with its dep_nodes. */
3329 delete_dep_nodes_in_back_deps (insn, resolved_p);
3330
3331 /* Clear forward deps and leave the dep_nodes to the
3332 corresponding back_deps list. */
3333 if (resolved_p)
3334 clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3335 else
3336 clear_deps_list (INSN_FORW_DEPS (insn));
3337
3338 sd_finish_insn (insn);
3339 }
3340 }
3341 \f
3342 /* Initialize variables for region data dependence analysis.
3343 n_bbs is the number of region blocks. */
3344
3345 void
3346 init_deps (struct deps *deps)
3347 {
3348 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3349
3350 deps->max_reg = max_reg;
3351 deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
3352 INIT_REG_SET (&deps->reg_last_in_use);
3353 INIT_REG_SET (&deps->reg_conditional_sets);
3354
3355 deps->pending_read_insns = 0;
3356 deps->pending_read_mems = 0;
3357 deps->pending_write_insns = 0;
3358 deps->pending_write_mems = 0;
3359 deps->pending_read_list_length = 0;
3360 deps->pending_write_list_length = 0;
3361 deps->pending_flush_length = 0;
3362 deps->last_pending_memory_flush = 0;
3363 deps->last_function_call = 0;
3364 deps->sched_before_next_call = 0;
3365 deps->in_post_call_group_p = not_post_call;
3366 deps->last_debug_insn = 0;
3367 deps->last_reg_pending_barrier = NOT_A_BARRIER;
3368 deps->readonly = 0;
3369 }
3370
3371 /* Free insn lists found in DEPS. */
3372
3373 void
3374 free_deps (struct deps *deps)
3375 {
3376 unsigned i;
3377 reg_set_iterator rsi;
3378
3379 free_INSN_LIST_list (&deps->pending_read_insns);
3380 free_EXPR_LIST_list (&deps->pending_read_mems);
3381 free_INSN_LIST_list (&deps->pending_write_insns);
3382 free_EXPR_LIST_list (&deps->pending_write_mems);
3383 free_INSN_LIST_list (&deps->last_pending_memory_flush);
3384
3385 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
3386 times. For a testcase with 42000 regs and 8000 small basic blocks,
3387 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
3388 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3389 {
3390 struct deps_reg *reg_last = &deps->reg_last[i];
3391 if (reg_last->uses)
3392 free_INSN_LIST_list (&reg_last->uses);
3393 if (reg_last->sets)
3394 free_INSN_LIST_list (&reg_last->sets);
3395 if (reg_last->implicit_sets)
3396 free_INSN_LIST_list (&reg_last->implicit_sets);
3397 if (reg_last->clobbers)
3398 free_INSN_LIST_list (&reg_last->clobbers);
3399 }
3400 CLEAR_REG_SET (&deps->reg_last_in_use);
3401 CLEAR_REG_SET (&deps->reg_conditional_sets);
3402
3403 free (deps->reg_last);
3404 deps->reg_last = NULL;
3405
3406 deps = NULL;
3407 }
3408
3409 /* Remove INSN from dependence contexts DEPS. Caution: reg_conditional_sets
3410 is not handled. */
3411 void
3412 remove_from_deps (struct deps *deps, rtx insn)
3413 {
3414 int removed;
3415 unsigned i;
3416 reg_set_iterator rsi;
3417
3418 removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
3419 &deps->pending_read_mems);
3420 deps->pending_read_list_length -= removed;
3421 removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
3422 &deps->pending_write_mems);
3423 deps->pending_write_list_length -= removed;
3424 removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
3425 deps->pending_flush_length -= removed;
3426
3427 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3428 {
3429 struct deps_reg *reg_last = &deps->reg_last[i];
3430 if (reg_last->uses)
3431 remove_from_dependence_list (insn, &reg_last->uses);
3432 if (reg_last->sets)
3433 remove_from_dependence_list (insn, &reg_last->sets);
3434 if (reg_last->implicit_sets)
3435 remove_from_dependence_list (insn, &reg_last->implicit_sets);
3436 if (reg_last->clobbers)
3437 remove_from_dependence_list (insn, &reg_last->clobbers);
3438 if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
3439 && !reg_last->clobbers)
3440 CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i);
3441 }
3442
3443 if (CALL_P (insn))
3444 remove_from_dependence_list (insn, &deps->last_function_call);
3445 remove_from_dependence_list (insn, &deps->sched_before_next_call);
3446 }
3447
3448 /* Init deps data vector. */
3449 static void
3450 init_deps_data_vector (void)
3451 {
3452 int reserve = (sched_max_luid + 1
3453 - VEC_length (haifa_deps_insn_data_def, h_d_i_d));
3454 if (reserve > 0
3455 && ! VEC_space (haifa_deps_insn_data_def, h_d_i_d, reserve))
3456 VEC_safe_grow_cleared (haifa_deps_insn_data_def, heap, h_d_i_d,
3457 3 * sched_max_luid / 2);
3458 }
3459
3460 /* If it is profitable to use them, initialize or extend (depending on
3461 GLOBAL_P) dependency data. */
3462 void
3463 sched_deps_init (bool global_p)
3464 {
3465 /* Average number of insns in the basic block.
3466 '+ 1' is used to make it nonzero. */
3467 int insns_in_block = sched_max_luid / n_basic_blocks + 1;
3468
3469 init_deps_data_vector ();
3470
3471 /* We use another caching mechanism for selective scheduling, so
3472 we don't use this one. */
3473 if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
3474 {
3475 /* ?!? We could save some memory by computing a per-region luid mapping
3476 which could reduce both the number of vectors in the cache and the
3477 size of each vector. Instead we just avoid the cache entirely unless
3478 the average number of instructions in a basic block is very high. See
3479 the comment before the declaration of true_dependency_cache for
3480 what we consider "very high". */
3481 cache_size = 0;
3482 extend_dependency_caches (sched_max_luid, true);
3483 }
3484
3485 if (global_p)
3486 {
3487 dl_pool = create_alloc_pool ("deps_list", sizeof (struct _deps_list),
3488 /* Allocate lists for one block at a time. */
3489 insns_in_block);
3490 dn_pool = create_alloc_pool ("dep_node", sizeof (struct _dep_node),
3491 /* Allocate nodes for one block at a time.
3492 We assume that average insn has
3493 5 producers. */
3494 5 * insns_in_block);
3495 }
3496 }
3497
3498
3499 /* Create or extend (depending on CREATE_P) dependency caches to
3500 size N. */
3501 void
3502 extend_dependency_caches (int n, bool create_p)
3503 {
3504 if (create_p || true_dependency_cache)
3505 {
3506 int i, luid = cache_size + n;
3507
3508 true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
3509 luid);
3510 output_dependency_cache = XRESIZEVEC (bitmap_head,
3511 output_dependency_cache, luid);
3512 anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
3513 luid);
3514
3515 if (current_sched_info->flags & DO_SPECULATION)
3516 spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
3517 luid);
3518
3519 for (i = cache_size; i < luid; i++)
3520 {
3521 bitmap_initialize (&true_dependency_cache[i], 0);
3522 bitmap_initialize (&output_dependency_cache[i], 0);
3523 bitmap_initialize (&anti_dependency_cache[i], 0);
3524
3525 if (current_sched_info->flags & DO_SPECULATION)
3526 bitmap_initialize (&spec_dependency_cache[i], 0);
3527 }
3528 cache_size = luid;
3529 }
3530 }
3531
3532 /* Finalize dependency information for the whole function. */
3533 void
3534 sched_deps_finish (void)
3535 {
3536 gcc_assert (deps_pools_are_empty_p ());
3537 free_alloc_pool_if_empty (&dn_pool);
3538 free_alloc_pool_if_empty (&dl_pool);
3539 gcc_assert (dn_pool == NULL && dl_pool == NULL);
3540
3541 VEC_free (haifa_deps_insn_data_def, heap, h_d_i_d);
3542 cache_size = 0;
3543
3544 if (true_dependency_cache)
3545 {
3546 int i;
3547
3548 for (i = 0; i < cache_size; i++)
3549 {
3550 bitmap_clear (&true_dependency_cache[i]);
3551 bitmap_clear (&output_dependency_cache[i]);
3552 bitmap_clear (&anti_dependency_cache[i]);
3553
3554 if (sched_deps_info->generate_spec_deps)
3555 bitmap_clear (&spec_dependency_cache[i]);
3556 }
3557 free (true_dependency_cache);
3558 true_dependency_cache = NULL;
3559 free (output_dependency_cache);
3560 output_dependency_cache = NULL;
3561 free (anti_dependency_cache);
3562 anti_dependency_cache = NULL;
3563
3564 if (sched_deps_info->generate_spec_deps)
3565 {
3566 free (spec_dependency_cache);
3567 spec_dependency_cache = NULL;
3568 }
3569
3570 }
3571 }
3572
3573 /* Initialize some global variables needed by the dependency analysis
3574 code. */
3575
3576 void
3577 init_deps_global (void)
3578 {
3579 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3580 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
3581 reg_pending_sets = ALLOC_REG_SET (&reg_obstack);
3582 reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack);
3583 reg_pending_uses = ALLOC_REG_SET (&reg_obstack);
3584 reg_pending_barrier = NOT_A_BARRIER;
3585
3586 if (!sel_sched_p () || sched_emulate_haifa_p)
3587 {
3588 sched_deps_info->start_insn = haifa_start_insn;
3589 sched_deps_info->finish_insn = haifa_finish_insn;
3590
3591 sched_deps_info->note_reg_set = haifa_note_reg_set;
3592 sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
3593 sched_deps_info->note_reg_use = haifa_note_reg_use;
3594
3595 sched_deps_info->note_mem_dep = haifa_note_mem_dep;
3596 sched_deps_info->note_dep = haifa_note_dep;
3597 }
3598 }
3599
3600 /* Free everything used by the dependency analysis code. */
3601
3602 void
3603 finish_deps_global (void)
3604 {
3605 FREE_REG_SET (reg_pending_sets);
3606 FREE_REG_SET (reg_pending_clobbers);
3607 FREE_REG_SET (reg_pending_uses);
3608 }
3609
3610 /* Estimate the weakness of dependence between MEM1 and MEM2. */
3611 dw_t
3612 estimate_dep_weak (rtx mem1, rtx mem2)
3613 {
3614 rtx r1, r2;
3615
3616 if (mem1 == mem2)
3617 /* MEMs are the same - don't speculate. */
3618 return MIN_DEP_WEAK;
3619
3620 r1 = XEXP (mem1, 0);
3621 r2 = XEXP (mem2, 0);
3622
3623 if (r1 == r2
3624 || (REG_P (r1) && REG_P (r2)
3625 && REGNO (r1) == REGNO (r2)))
3626 /* Again, MEMs are the same. */
3627 return MIN_DEP_WEAK;
3628 else if ((REG_P (r1) && !REG_P (r2))
3629 || (!REG_P (r1) && REG_P (r2)))
3630 /* Different addressing modes - reason to be more speculative,
3631 than usual. */
3632 return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
3633 else
3634 /* We can't say anything about the dependence. */
3635 return UNCERTAIN_DEP_WEAK;
3636 }
3637
3638 /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
3639 This function can handle same INSN and ELEM (INSN == ELEM).
3640 It is a convenience wrapper. */
3641 void
3642 add_dependence (rtx insn, rtx elem, enum reg_note dep_type)
3643 {
3644 ds_t ds;
3645 bool internal;
3646
3647 if (dep_type == REG_DEP_TRUE)
3648 ds = DEP_TRUE;
3649 else if (dep_type == REG_DEP_OUTPUT)
3650 ds = DEP_OUTPUT;
3651 else
3652 {
3653 gcc_assert (dep_type == REG_DEP_ANTI);
3654 ds = DEP_ANTI;
3655 }
3656
3657 /* When add_dependence is called from inside sched-deps.c, we expect
3658 cur_insn to be non-null. */
3659 internal = cur_insn != NULL;
3660 if (internal)
3661 gcc_assert (insn == cur_insn);
3662 else
3663 cur_insn = insn;
3664
3665 note_dep (elem, ds);
3666 if (!internal)
3667 cur_insn = NULL;
3668 }
3669
3670 /* Return weakness of speculative type TYPE in the dep_status DS. */
3671 dw_t
3672 get_dep_weak_1 (ds_t ds, ds_t type)
3673 {
3674 ds = ds & type;
3675
3676 switch (type)
3677 {
3678 case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
3679 case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
3680 case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
3681 case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
3682 default: gcc_unreachable ();
3683 }
3684
3685 return (dw_t) ds;
3686 }
3687
3688 dw_t
3689 get_dep_weak (ds_t ds, ds_t type)
3690 {
3691 dw_t dw = get_dep_weak_1 (ds, type);
3692
3693 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
3694 return dw;
3695 }
3696
3697 /* Return the dep_status, which has the same parameters as DS, except for
3698 speculative type TYPE, that will have weakness DW. */
3699 ds_t
3700 set_dep_weak (ds_t ds, ds_t type, dw_t dw)
3701 {
3702 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
3703
3704 ds &= ~type;
3705 switch (type)
3706 {
3707 case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
3708 case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
3709 case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
3710 case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
3711 default: gcc_unreachable ();
3712 }
3713 return ds;
3714 }
3715
3716 /* Return the join of two dep_statuses DS1 and DS2.
3717 If MAX_P is true then choose the greater probability,
3718 otherwise multiply probabilities.
3719 This function assumes that both DS1 and DS2 contain speculative bits. */
3720 static ds_t
3721 ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
3722 {
3723 ds_t ds, t;
3724
3725 gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
3726
3727 ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
3728
3729 t = FIRST_SPEC_TYPE;
3730 do
3731 {
3732 if ((ds1 & t) && !(ds2 & t))
3733 ds |= ds1 & t;
3734 else if (!(ds1 & t) && (ds2 & t))
3735 ds |= ds2 & t;
3736 else if ((ds1 & t) && (ds2 & t))
3737 {
3738 dw_t dw1 = get_dep_weak (ds1, t);
3739 dw_t dw2 = get_dep_weak (ds2, t);
3740 ds_t dw;
3741
3742 if (!max_p)
3743 {
3744 dw = ((ds_t) dw1) * ((ds_t) dw2);
3745 dw /= MAX_DEP_WEAK;
3746 if (dw < MIN_DEP_WEAK)
3747 dw = MIN_DEP_WEAK;
3748 }
3749 else
3750 {
3751 if (dw1 >= dw2)
3752 dw = dw1;
3753 else
3754 dw = dw2;
3755 }
3756
3757 ds = set_dep_weak (ds, t, (dw_t) dw);
3758 }
3759
3760 if (t == LAST_SPEC_TYPE)
3761 break;
3762 t <<= SPEC_TYPE_SHIFT;
3763 }
3764 while (1);
3765
3766 return ds;
3767 }
3768
3769 /* Return the join of two dep_statuses DS1 and DS2.
3770 This function assumes that both DS1 and DS2 contain speculative bits. */
3771 ds_t
3772 ds_merge (ds_t ds1, ds_t ds2)
3773 {
3774 return ds_merge_1 (ds1, ds2, false);
3775 }
3776
3777 /* Return the join of two dep_statuses DS1 and DS2. */
3778 ds_t
3779 ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
3780 {
3781 ds_t new_status = ds | ds2;
3782
3783 if (new_status & SPECULATIVE)
3784 {
3785 if ((ds && !(ds & SPECULATIVE))
3786 || (ds2 && !(ds2 & SPECULATIVE)))
3787 /* Then this dep can't be speculative. */
3788 new_status &= ~SPECULATIVE;
3789 else
3790 {
3791 /* Both are speculative. Merging probabilities. */
3792 if (mem1)
3793 {
3794 dw_t dw;
3795
3796 dw = estimate_dep_weak (mem1, mem2);
3797 ds = set_dep_weak (ds, BEGIN_DATA, dw);
3798 }
3799
3800 if (!ds)
3801 new_status = ds2;
3802 else if (!ds2)
3803 new_status = ds;
3804 else
3805 new_status = ds_merge (ds2, ds);
3806 }
3807 }
3808
3809 return new_status;
3810 }
3811
3812 /* Return the join of DS1 and DS2. Use maximum instead of multiplying
3813 probabilities. */
3814 ds_t
3815 ds_max_merge (ds_t ds1, ds_t ds2)
3816 {
3817 if (ds1 == 0 && ds2 == 0)
3818 return 0;
3819
3820 if (ds1 == 0 && ds2 != 0)
3821 return ds2;
3822
3823 if (ds1 != 0 && ds2 == 0)
3824 return ds1;
3825
3826 return ds_merge_1 (ds1, ds2, true);
3827 }
3828
3829 /* Return the probability of speculation success for the speculation
3830 status DS. */
3831 dw_t
3832 ds_weak (ds_t ds)
3833 {
3834 ds_t res = 1, dt;
3835 int n = 0;
3836
3837 dt = FIRST_SPEC_TYPE;
3838 do
3839 {
3840 if (ds & dt)
3841 {
3842 res *= (ds_t) get_dep_weak (ds, dt);
3843 n++;
3844 }
3845
3846 if (dt == LAST_SPEC_TYPE)
3847 break;
3848 dt <<= SPEC_TYPE_SHIFT;
3849 }
3850 while (1);
3851
3852 gcc_assert (n);
3853 while (--n)
3854 res /= MAX_DEP_WEAK;
3855
3856 if (res < MIN_DEP_WEAK)
3857 res = MIN_DEP_WEAK;
3858
3859 gcc_assert (res <= MAX_DEP_WEAK);
3860
3861 return (dw_t) res;
3862 }
3863
3864 /* Return a dep status that contains all speculation types of DS. */
3865 ds_t
3866 ds_get_speculation_types (ds_t ds)
3867 {
3868 if (ds & BEGIN_DATA)
3869 ds |= BEGIN_DATA;
3870 if (ds & BE_IN_DATA)
3871 ds |= BE_IN_DATA;
3872 if (ds & BEGIN_CONTROL)
3873 ds |= BEGIN_CONTROL;
3874 if (ds & BE_IN_CONTROL)
3875 ds |= BE_IN_CONTROL;
3876
3877 return ds & SPECULATIVE;
3878 }
3879
3880 /* Return a dep status that contains maximal weakness for each speculation
3881 type present in DS. */
3882 ds_t
3883 ds_get_max_dep_weak (ds_t ds)
3884 {
3885 if (ds & BEGIN_DATA)
3886 ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
3887 if (ds & BE_IN_DATA)
3888 ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
3889 if (ds & BEGIN_CONTROL)
3890 ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
3891 if (ds & BE_IN_CONTROL)
3892 ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
3893
3894 return ds;
3895 }
3896
3897 /* Dump information about the dependence status S. */
3898 static void
3899 dump_ds (FILE *f, ds_t s)
3900 {
3901 fprintf (f, "{");
3902
3903 if (s & BEGIN_DATA)
3904 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
3905 if (s & BE_IN_DATA)
3906 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
3907 if (s & BEGIN_CONTROL)
3908 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
3909 if (s & BE_IN_CONTROL)
3910 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
3911
3912 if (s & HARD_DEP)
3913 fprintf (f, "HARD_DEP; ");
3914
3915 if (s & DEP_TRUE)
3916 fprintf (f, "DEP_TRUE; ");
3917 if (s & DEP_ANTI)
3918 fprintf (f, "DEP_ANTI; ");
3919 if (s & DEP_OUTPUT)
3920 fprintf (f, "DEP_OUTPUT; ");
3921
3922 fprintf (f, "}");
3923 }
3924
3925 void
3926 debug_ds (ds_t s)
3927 {
3928 dump_ds (stderr, s);
3929 fprintf (stderr, "\n");
3930 }
3931
3932 #ifdef ENABLE_CHECKING
3933 /* Verify that dependence type and status are consistent.
3934 If RELAXED_P is true, then skip dep_weakness checks. */
3935 static void
3936 check_dep (dep_t dep, bool relaxed_p)
3937 {
3938 enum reg_note dt = DEP_TYPE (dep);
3939 ds_t ds = DEP_STATUS (dep);
3940
3941 gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
3942
3943 if (!(current_sched_info->flags & USE_DEPS_LIST))
3944 {
3945 gcc_assert (ds == -1);
3946 return;
3947 }
3948
3949 /* Check that dependence type contains the same bits as the status. */
3950 if (dt == REG_DEP_TRUE)
3951 gcc_assert (ds & DEP_TRUE);
3952 else if (dt == REG_DEP_OUTPUT)
3953 gcc_assert ((ds & DEP_OUTPUT)
3954 && !(ds & DEP_TRUE));
3955 else
3956 gcc_assert ((dt == REG_DEP_ANTI)
3957 && (ds & DEP_ANTI)
3958 && !(ds & (DEP_OUTPUT | DEP_TRUE)));
3959
3960 /* HARD_DEP can not appear in dep_status of a link. */
3961 gcc_assert (!(ds & HARD_DEP));
3962
3963 /* Check that dependence status is set correctly when speculation is not
3964 supported. */
3965 if (!sched_deps_info->generate_spec_deps)
3966 gcc_assert (!(ds & SPECULATIVE));
3967 else if (ds & SPECULATIVE)
3968 {
3969 if (!relaxed_p)
3970 {
3971 ds_t type = FIRST_SPEC_TYPE;
3972
3973 /* Check that dependence weakness is in proper range. */
3974 do
3975 {
3976 if (ds & type)
3977 get_dep_weak (ds, type);
3978
3979 if (type == LAST_SPEC_TYPE)
3980 break;
3981 type <<= SPEC_TYPE_SHIFT;
3982 }
3983 while (1);
3984 }
3985
3986 if (ds & BEGIN_SPEC)
3987 {
3988 /* Only true dependence can be data speculative. */
3989 if (ds & BEGIN_DATA)
3990 gcc_assert (ds & DEP_TRUE);
3991
3992 /* Control dependencies in the insn scheduler are represented by
3993 anti-dependencies, therefore only anti dependence can be
3994 control speculative. */
3995 if (ds & BEGIN_CONTROL)
3996 gcc_assert (ds & DEP_ANTI);
3997 }
3998 else
3999 {
4000 /* Subsequent speculations should resolve true dependencies. */
4001 gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4002 }
4003
4004 /* Check that true and anti dependencies can't have other speculative
4005 statuses. */
4006 if (ds & DEP_TRUE)
4007 gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4008 /* An output dependence can't be speculative at all. */
4009 gcc_assert (!(ds & DEP_OUTPUT));
4010 if (ds & DEP_ANTI)
4011 gcc_assert (ds & BEGIN_CONTROL);
4012 }
4013 }
4014 #endif /* ENABLE_CHECKING */
4015
4016 #endif /* INSN_SCHEDULING */