]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/sched-deps.c
re PR rtl-optimization/88347 (ICE in begin_move_insn, at sched-ebb.c:175)
[thirdparty/gcc.git] / gcc / sched-deps.c
1 /* Instruction scheduling pass. This file computes dependencies between
2 instructions.
3 Copyright (C) 1992-2019 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22 \f
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "target.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "df.h"
31 #include "insn-config.h"
32 #include "regs.h"
33 #include "memmodel.h"
34 #include "ira.h"
35 #include "ira-int.h"
36 #include "insn-attr.h"
37 #include "cfgbuild.h"
38 #include "sched-int.h"
39 #include "params.h"
40 #include "cselib.h"
41
42 #ifdef INSN_SCHEDULING
43
44 /* Holds current parameters for the dependency analyzer. */
45 struct sched_deps_info_def *sched_deps_info;
46
47 /* The data is specific to the Haifa scheduler. */
48 vec<haifa_deps_insn_data_def>
49 h_d_i_d = vNULL;
50
51 /* Return the major type present in the DS. */
52 enum reg_note
53 ds_to_dk (ds_t ds)
54 {
55 if (ds & DEP_TRUE)
56 return REG_DEP_TRUE;
57
58 if (ds & DEP_OUTPUT)
59 return REG_DEP_OUTPUT;
60
61 if (ds & DEP_CONTROL)
62 return REG_DEP_CONTROL;
63
64 gcc_assert (ds & DEP_ANTI);
65
66 return REG_DEP_ANTI;
67 }
68
69 /* Return equivalent dep_status. */
70 ds_t
71 dk_to_ds (enum reg_note dk)
72 {
73 switch (dk)
74 {
75 case REG_DEP_TRUE:
76 return DEP_TRUE;
77
78 case REG_DEP_OUTPUT:
79 return DEP_OUTPUT;
80
81 case REG_DEP_CONTROL:
82 return DEP_CONTROL;
83
84 default:
85 gcc_assert (dk == REG_DEP_ANTI);
86 return DEP_ANTI;
87 }
88 }
89
90 /* Functions to operate with dependence information container - dep_t. */
91
92 /* Init DEP with the arguments. */
93 void
94 init_dep_1 (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note type, ds_t ds)
95 {
96 DEP_PRO (dep) = pro;
97 DEP_CON (dep) = con;
98 DEP_TYPE (dep) = type;
99 DEP_STATUS (dep) = ds;
100 DEP_COST (dep) = UNKNOWN_DEP_COST;
101 DEP_NONREG (dep) = 0;
102 DEP_MULTIPLE (dep) = 0;
103 DEP_REPLACE (dep) = NULL;
104 }
105
106 /* Init DEP with the arguments.
107 While most of the scheduler (including targets) only need the major type
108 of the dependency, it is convenient to hide full dep_status from them. */
109 void
110 init_dep (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note kind)
111 {
112 ds_t ds;
113
114 if ((current_sched_info->flags & USE_DEPS_LIST))
115 ds = dk_to_ds (kind);
116 else
117 ds = 0;
118
119 init_dep_1 (dep, pro, con, kind, ds);
120 }
121
122 /* Make a copy of FROM in TO. */
123 static void
124 copy_dep (dep_t to, dep_t from)
125 {
126 memcpy (to, from, sizeof (*to));
127 }
128
129 static void dump_ds (FILE *, ds_t);
130
131 /* Define flags for dump_dep (). */
132
133 /* Dump producer of the dependence. */
134 #define DUMP_DEP_PRO (2)
135
136 /* Dump consumer of the dependence. */
137 #define DUMP_DEP_CON (4)
138
139 /* Dump type of the dependence. */
140 #define DUMP_DEP_TYPE (8)
141
142 /* Dump status of the dependence. */
143 #define DUMP_DEP_STATUS (16)
144
145 /* Dump all information about the dependence. */
146 #define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE \
147 |DUMP_DEP_STATUS)
148
149 /* Dump DEP to DUMP.
150 FLAGS is a bit mask specifying what information about DEP needs
151 to be printed.
152 If FLAGS has the very first bit set, then dump all information about DEP
153 and propagate this bit into the callee dump functions. */
154 static void
155 dump_dep (FILE *dump, dep_t dep, int flags)
156 {
157 if (flags & 1)
158 flags |= DUMP_DEP_ALL;
159
160 fprintf (dump, "<");
161
162 if (flags & DUMP_DEP_PRO)
163 fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
164
165 if (flags & DUMP_DEP_CON)
166 fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
167
168 if (flags & DUMP_DEP_TYPE)
169 {
170 char t;
171 enum reg_note type = DEP_TYPE (dep);
172
173 switch (type)
174 {
175 case REG_DEP_TRUE:
176 t = 't';
177 break;
178
179 case REG_DEP_OUTPUT:
180 t = 'o';
181 break;
182
183 case REG_DEP_CONTROL:
184 t = 'c';
185 break;
186
187 case REG_DEP_ANTI:
188 t = 'a';
189 break;
190
191 default:
192 gcc_unreachable ();
193 break;
194 }
195
196 fprintf (dump, "%c; ", t);
197 }
198
199 if (flags & DUMP_DEP_STATUS)
200 {
201 if (current_sched_info->flags & USE_DEPS_LIST)
202 dump_ds (dump, DEP_STATUS (dep));
203 }
204
205 fprintf (dump, ">");
206 }
207
208 /* Default flags for dump_dep (). */
209 static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
210
211 /* Dump all fields of DEP to STDERR. */
212 void
213 sd_debug_dep (dep_t dep)
214 {
215 dump_dep (stderr, dep, 1);
216 fprintf (stderr, "\n");
217 }
218
219 /* Determine whether DEP is a dependency link of a non-debug insn on a
220 debug insn. */
221
222 static inline bool
223 depl_on_debug_p (dep_link_t dep)
224 {
225 return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
226 && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
227 }
228
229 /* Functions to operate with a single link from the dependencies lists -
230 dep_link_t. */
231
232 /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
233 PREV_NEXT_P. */
234 static void
235 attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
236 {
237 dep_link_t next = *prev_nextp;
238
239 gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
240 && DEP_LINK_NEXT (l) == NULL);
241
242 /* Init node being inserted. */
243 DEP_LINK_PREV_NEXTP (l) = prev_nextp;
244 DEP_LINK_NEXT (l) = next;
245
246 /* Fix next node. */
247 if (next != NULL)
248 {
249 gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
250
251 DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
252 }
253
254 /* Fix prev node. */
255 *prev_nextp = l;
256 }
257
258 /* Add dep_link LINK to deps_list L. */
259 static void
260 add_to_deps_list (dep_link_t link, deps_list_t l)
261 {
262 attach_dep_link (link, &DEPS_LIST_FIRST (l));
263
264 /* Don't count debug deps. */
265 if (!depl_on_debug_p (link))
266 ++DEPS_LIST_N_LINKS (l);
267 }
268
269 /* Detach dep_link L from the list. */
270 static void
271 detach_dep_link (dep_link_t l)
272 {
273 dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
274 dep_link_t next = DEP_LINK_NEXT (l);
275
276 *prev_nextp = next;
277
278 if (next != NULL)
279 DEP_LINK_PREV_NEXTP (next) = prev_nextp;
280
281 DEP_LINK_PREV_NEXTP (l) = NULL;
282 DEP_LINK_NEXT (l) = NULL;
283 }
284
285 /* Remove link LINK from list LIST. */
286 static void
287 remove_from_deps_list (dep_link_t link, deps_list_t list)
288 {
289 detach_dep_link (link);
290
291 /* Don't count debug deps. */
292 if (!depl_on_debug_p (link))
293 --DEPS_LIST_N_LINKS (list);
294 }
295
296 /* Move link LINK from list FROM to list TO. */
297 static void
298 move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
299 {
300 remove_from_deps_list (link, from);
301 add_to_deps_list (link, to);
302 }
303
304 /* Return true of LINK is not attached to any list. */
305 static bool
306 dep_link_is_detached_p (dep_link_t link)
307 {
308 return DEP_LINK_PREV_NEXTP (link) == NULL;
309 }
310
311 /* Pool to hold all dependency nodes (dep_node_t). */
312 static object_allocator<_dep_node> *dn_pool;
313
314 /* Number of dep_nodes out there. */
315 static int dn_pool_diff = 0;
316
317 /* Create a dep_node. */
318 static dep_node_t
319 create_dep_node (void)
320 {
321 dep_node_t n = dn_pool->allocate ();
322 dep_link_t back = DEP_NODE_BACK (n);
323 dep_link_t forw = DEP_NODE_FORW (n);
324
325 DEP_LINK_NODE (back) = n;
326 DEP_LINK_NEXT (back) = NULL;
327 DEP_LINK_PREV_NEXTP (back) = NULL;
328
329 DEP_LINK_NODE (forw) = n;
330 DEP_LINK_NEXT (forw) = NULL;
331 DEP_LINK_PREV_NEXTP (forw) = NULL;
332
333 ++dn_pool_diff;
334
335 return n;
336 }
337
338 /* Delete dep_node N. N must not be connected to any deps_list. */
339 static void
340 delete_dep_node (dep_node_t n)
341 {
342 gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
343 && dep_link_is_detached_p (DEP_NODE_FORW (n)));
344
345 XDELETE (DEP_REPLACE (DEP_NODE_DEP (n)));
346
347 --dn_pool_diff;
348
349 dn_pool->remove (n);
350 }
351
352 /* Pool to hold dependencies lists (deps_list_t). */
353 static object_allocator<_deps_list> *dl_pool;
354
355 /* Number of deps_lists out there. */
356 static int dl_pool_diff = 0;
357
358 /* Functions to operate with dependences lists - deps_list_t. */
359
360 /* Return true if list L is empty. */
361 static bool
362 deps_list_empty_p (deps_list_t l)
363 {
364 return DEPS_LIST_N_LINKS (l) == 0;
365 }
366
367 /* Create a new deps_list. */
368 static deps_list_t
369 create_deps_list (void)
370 {
371 deps_list_t l = dl_pool->allocate ();
372
373 DEPS_LIST_FIRST (l) = NULL;
374 DEPS_LIST_N_LINKS (l) = 0;
375
376 ++dl_pool_diff;
377 return l;
378 }
379
380 /* Free deps_list L. */
381 static void
382 free_deps_list (deps_list_t l)
383 {
384 gcc_assert (deps_list_empty_p (l));
385
386 --dl_pool_diff;
387
388 dl_pool->remove (l);
389 }
390
391 /* Return true if there is no dep_nodes and deps_lists out there.
392 After the region is scheduled all the dependency nodes and lists
393 should [generally] be returned to pool. */
394 bool
395 deps_pools_are_empty_p (void)
396 {
397 return dn_pool_diff == 0 && dl_pool_diff == 0;
398 }
399
400 /* Remove all elements from L. */
401 static void
402 clear_deps_list (deps_list_t l)
403 {
404 do
405 {
406 dep_link_t link = DEPS_LIST_FIRST (l);
407
408 if (link == NULL)
409 break;
410
411 remove_from_deps_list (link, l);
412 }
413 while (1);
414 }
415
416 /* Decide whether a dependency should be treated as a hard or a speculative
417 dependency. */
418 static bool
419 dep_spec_p (dep_t dep)
420 {
421 if (current_sched_info->flags & DO_SPECULATION)
422 {
423 if (DEP_STATUS (dep) & SPECULATIVE)
424 return true;
425 }
426 if (current_sched_info->flags & DO_PREDICATION)
427 {
428 if (DEP_TYPE (dep) == REG_DEP_CONTROL)
429 return true;
430 }
431 if (DEP_REPLACE (dep) != NULL)
432 return true;
433 return false;
434 }
435
436 static regset reg_pending_sets;
437 static regset reg_pending_clobbers;
438 static regset reg_pending_uses;
439 static regset reg_pending_control_uses;
440 static enum reg_pending_barrier_mode reg_pending_barrier;
441
442 /* Hard registers implicitly clobbered or used (or may be implicitly
443 clobbered or used) by the currently analyzed insn. For example,
444 insn in its constraint has one register class. Even if there is
445 currently no hard register in the insn, the particular hard
446 register will be in the insn after reload pass because the
447 constraint requires it. */
448 static HARD_REG_SET implicit_reg_pending_clobbers;
449 static HARD_REG_SET implicit_reg_pending_uses;
450
451 /* To speed up the test for duplicate dependency links we keep a
452 record of dependencies created by add_dependence when the average
453 number of instructions in a basic block is very large.
454
455 Studies have shown that there is typically around 5 instructions between
456 branches for typical C code. So we can make a guess that the average
457 basic block is approximately 5 instructions long; we will choose 100X
458 the average size as a very large basic block.
459
460 Each insn has associated bitmaps for its dependencies. Each bitmap
461 has enough entries to represent a dependency on any other insn in
462 the insn chain. All bitmap for true dependencies cache is
463 allocated then the rest two ones are also allocated. */
464 static bitmap true_dependency_cache = NULL;
465 static bitmap output_dependency_cache = NULL;
466 static bitmap anti_dependency_cache = NULL;
467 static bitmap control_dependency_cache = NULL;
468 static bitmap spec_dependency_cache = NULL;
469 static int cache_size;
470
471 /* True if we should mark added dependencies as a non-register deps. */
472 static bool mark_as_hard;
473
474 static int deps_may_trap_p (const_rtx);
475 static void add_dependence_1 (rtx_insn *, rtx_insn *, enum reg_note);
476 static void add_dependence_list (rtx_insn *, rtx_insn_list *, int,
477 enum reg_note, bool);
478 static void add_dependence_list_and_free (struct deps_desc *, rtx_insn *,
479 rtx_insn_list **, int, enum reg_note,
480 bool);
481 static void delete_all_dependences (rtx_insn *);
482 static void chain_to_prev_insn (rtx_insn *);
483
484 static void flush_pending_lists (struct deps_desc *, rtx_insn *, int, int);
485 static void sched_analyze_1 (struct deps_desc *, rtx, rtx_insn *);
486 static void sched_analyze_2 (struct deps_desc *, rtx, rtx_insn *);
487 static void sched_analyze_insn (struct deps_desc *, rtx, rtx_insn *);
488
489 static bool sched_has_condition_p (const rtx_insn *);
490 static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
491
492 static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
493 rtx, rtx);
494 static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
495
496 static void check_dep (dep_t, bool);
497
498 \f
499 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
500
501 static int
502 deps_may_trap_p (const_rtx mem)
503 {
504 const_rtx addr = XEXP (mem, 0);
505
506 if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
507 {
508 const_rtx t = get_reg_known_value (REGNO (addr));
509 if (t)
510 addr = t;
511 }
512 return rtx_addr_can_trap_p (addr);
513 }
514 \f
515
516 /* Find the condition under which INSN is executed. If REV is not NULL,
517 it is set to TRUE when the returned comparison should be reversed
518 to get the actual condition. */
519 static rtx
520 sched_get_condition_with_rev_uncached (const rtx_insn *insn, bool *rev)
521 {
522 rtx pat = PATTERN (insn);
523 rtx src;
524
525 if (rev)
526 *rev = false;
527
528 if (GET_CODE (pat) == COND_EXEC)
529 return COND_EXEC_TEST (pat);
530
531 if (!any_condjump_p (insn) || !onlyjump_p (insn))
532 return 0;
533
534 src = SET_SRC (pc_set (insn));
535
536 if (XEXP (src, 2) == pc_rtx)
537 return XEXP (src, 0);
538 else if (XEXP (src, 1) == pc_rtx)
539 {
540 rtx cond = XEXP (src, 0);
541 enum rtx_code revcode = reversed_comparison_code (cond, insn);
542
543 if (revcode == UNKNOWN)
544 return 0;
545
546 if (rev)
547 *rev = true;
548 return cond;
549 }
550
551 return 0;
552 }
553
554 /* Return the condition under which INSN does not execute (i.e. the
555 not-taken condition for a conditional branch), or NULL if we cannot
556 find such a condition. The caller should make a copy of the condition
557 before using it. */
558 rtx
559 sched_get_reverse_condition_uncached (const rtx_insn *insn)
560 {
561 bool rev;
562 rtx cond = sched_get_condition_with_rev_uncached (insn, &rev);
563 if (cond == NULL_RTX)
564 return cond;
565 if (!rev)
566 {
567 enum rtx_code revcode = reversed_comparison_code (cond, insn);
568 cond = gen_rtx_fmt_ee (revcode, GET_MODE (cond),
569 XEXP (cond, 0),
570 XEXP (cond, 1));
571 }
572 return cond;
573 }
574
575 /* Caching variant of sched_get_condition_with_rev_uncached.
576 We only do actual work the first time we come here for an insn; the
577 results are cached in INSN_CACHED_COND and INSN_REVERSE_COND. */
578 static rtx
579 sched_get_condition_with_rev (const rtx_insn *insn, bool *rev)
580 {
581 bool tmp;
582
583 if (INSN_LUID (insn) == 0)
584 return sched_get_condition_with_rev_uncached (insn, rev);
585
586 if (INSN_CACHED_COND (insn) == const_true_rtx)
587 return NULL_RTX;
588
589 if (INSN_CACHED_COND (insn) != NULL_RTX)
590 {
591 if (rev)
592 *rev = INSN_REVERSE_COND (insn);
593 return INSN_CACHED_COND (insn);
594 }
595
596 INSN_CACHED_COND (insn) = sched_get_condition_with_rev_uncached (insn, &tmp);
597 INSN_REVERSE_COND (insn) = tmp;
598
599 if (INSN_CACHED_COND (insn) == NULL_RTX)
600 {
601 INSN_CACHED_COND (insn) = const_true_rtx;
602 return NULL_RTX;
603 }
604
605 if (rev)
606 *rev = INSN_REVERSE_COND (insn);
607 return INSN_CACHED_COND (insn);
608 }
609
610 /* True when we can find a condition under which INSN is executed. */
611 static bool
612 sched_has_condition_p (const rtx_insn *insn)
613 {
614 return !! sched_get_condition_with_rev (insn, NULL);
615 }
616
617 \f
618
619 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
620 static int
621 conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
622 {
623 if (COMPARISON_P (cond1)
624 && COMPARISON_P (cond2)
625 && GET_CODE (cond1) ==
626 (rev1==rev2
627 ? reversed_comparison_code (cond2, NULL)
628 : GET_CODE (cond2))
629 && rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
630 && XEXP (cond1, 1) == XEXP (cond2, 1))
631 return 1;
632 return 0;
633 }
634
635 /* Return true if insn1 and insn2 can never depend on one another because
636 the conditions under which they are executed are mutually exclusive. */
637 bool
638 sched_insns_conditions_mutex_p (const rtx_insn *insn1, const rtx_insn *insn2)
639 {
640 rtx cond1, cond2;
641 bool rev1 = false, rev2 = false;
642
643 /* df doesn't handle conditional lifetimes entirely correctly;
644 calls mess up the conditional lifetimes. */
645 if (!CALL_P (insn1) && !CALL_P (insn2))
646 {
647 cond1 = sched_get_condition_with_rev (insn1, &rev1);
648 cond2 = sched_get_condition_with_rev (insn2, &rev2);
649 if (cond1 && cond2
650 && conditions_mutex_p (cond1, cond2, rev1, rev2)
651 /* Make sure first instruction doesn't affect condition of second
652 instruction if switched. */
653 && !modified_in_p (cond1, insn2)
654 /* Make sure second instruction doesn't affect condition of first
655 instruction if switched. */
656 && !modified_in_p (cond2, insn1))
657 return true;
658 }
659 return false;
660 }
661 \f
662
663 /* Return true if INSN can potentially be speculated with type DS. */
664 bool
665 sched_insn_is_legitimate_for_speculation_p (const rtx_insn *insn, ds_t ds)
666 {
667 if (HAS_INTERNAL_DEP (insn))
668 return false;
669
670 if (!NONJUMP_INSN_P (insn))
671 return false;
672
673 if (SCHED_GROUP_P (insn))
674 return false;
675
676 if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX_INSN (insn)))
677 return false;
678
679 if (side_effects_p (PATTERN (insn)))
680 return false;
681
682 if (ds & BE_IN_SPEC)
683 /* The following instructions, which depend on a speculatively scheduled
684 instruction, cannot be speculatively scheduled along. */
685 {
686 if (may_trap_or_fault_p (PATTERN (insn)))
687 /* If instruction might fault, it cannot be speculatively scheduled.
688 For control speculation it's obvious why and for data speculation
689 it's because the insn might get wrong input if speculation
690 wasn't successful. */
691 return false;
692
693 if ((ds & BE_IN_DATA)
694 && sched_has_condition_p (insn))
695 /* If this is a predicated instruction, then it cannot be
696 speculatively scheduled. See PR35659. */
697 return false;
698 }
699
700 return true;
701 }
702
703 /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
704 initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
705 and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
706 This function is used to switch sd_iterator to the next list.
707 !!! For internal use only. Might consider moving it to sched-int.h. */
708 void
709 sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
710 deps_list_t *list_ptr, bool *resolved_p_ptr)
711 {
712 sd_list_types_def types = *types_ptr;
713
714 if (types & SD_LIST_HARD_BACK)
715 {
716 *list_ptr = INSN_HARD_BACK_DEPS (insn);
717 *resolved_p_ptr = false;
718 *types_ptr = types & ~SD_LIST_HARD_BACK;
719 }
720 else if (types & SD_LIST_SPEC_BACK)
721 {
722 *list_ptr = INSN_SPEC_BACK_DEPS (insn);
723 *resolved_p_ptr = false;
724 *types_ptr = types & ~SD_LIST_SPEC_BACK;
725 }
726 else if (types & SD_LIST_FORW)
727 {
728 *list_ptr = INSN_FORW_DEPS (insn);
729 *resolved_p_ptr = false;
730 *types_ptr = types & ~SD_LIST_FORW;
731 }
732 else if (types & SD_LIST_RES_BACK)
733 {
734 *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
735 *resolved_p_ptr = true;
736 *types_ptr = types & ~SD_LIST_RES_BACK;
737 }
738 else if (types & SD_LIST_RES_FORW)
739 {
740 *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
741 *resolved_p_ptr = true;
742 *types_ptr = types & ~SD_LIST_RES_FORW;
743 }
744 else
745 {
746 *list_ptr = NULL;
747 *resolved_p_ptr = false;
748 *types_ptr = SD_LIST_NONE;
749 }
750 }
751
752 /* Return the summary size of INSN's lists defined by LIST_TYPES. */
753 int
754 sd_lists_size (const_rtx insn, sd_list_types_def list_types)
755 {
756 int size = 0;
757
758 while (list_types != SD_LIST_NONE)
759 {
760 deps_list_t list;
761 bool resolved_p;
762
763 sd_next_list (insn, &list_types, &list, &resolved_p);
764 if (list)
765 size += DEPS_LIST_N_LINKS (list);
766 }
767
768 return size;
769 }
770
771 /* Return true if INSN's lists defined by LIST_TYPES are all empty. */
772
773 bool
774 sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
775 {
776 while (list_types != SD_LIST_NONE)
777 {
778 deps_list_t list;
779 bool resolved_p;
780
781 sd_next_list (insn, &list_types, &list, &resolved_p);
782 if (!deps_list_empty_p (list))
783 return false;
784 }
785
786 return true;
787 }
788
789 /* Initialize data for INSN. */
790 void
791 sd_init_insn (rtx_insn *insn)
792 {
793 INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
794 INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
795 INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
796 INSN_FORW_DEPS (insn) = create_deps_list ();
797 INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
798
799 /* ??? It would be nice to allocate dependency caches here. */
800 }
801
802 /* Free data for INSN. */
803 void
804 sd_finish_insn (rtx_insn *insn)
805 {
806 /* ??? It would be nice to deallocate dependency caches here. */
807
808 free_deps_list (INSN_HARD_BACK_DEPS (insn));
809 INSN_HARD_BACK_DEPS (insn) = NULL;
810
811 free_deps_list (INSN_SPEC_BACK_DEPS (insn));
812 INSN_SPEC_BACK_DEPS (insn) = NULL;
813
814 free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
815 INSN_RESOLVED_BACK_DEPS (insn) = NULL;
816
817 free_deps_list (INSN_FORW_DEPS (insn));
818 INSN_FORW_DEPS (insn) = NULL;
819
820 free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
821 INSN_RESOLVED_FORW_DEPS (insn) = NULL;
822 }
823
824 /* Find a dependency between producer PRO and consumer CON.
825 Search through resolved dependency lists if RESOLVED_P is true.
826 If no such dependency is found return NULL,
827 otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
828 with an iterator pointing to it. */
829 static dep_t
830 sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
831 sd_iterator_def *sd_it_ptr)
832 {
833 sd_list_types_def pro_list_type;
834 sd_list_types_def con_list_type;
835 sd_iterator_def sd_it;
836 dep_t dep;
837 bool found_p = false;
838
839 if (resolved_p)
840 {
841 pro_list_type = SD_LIST_RES_FORW;
842 con_list_type = SD_LIST_RES_BACK;
843 }
844 else
845 {
846 pro_list_type = SD_LIST_FORW;
847 con_list_type = SD_LIST_BACK;
848 }
849
850 /* Walk through either back list of INSN or forw list of ELEM
851 depending on which one is shorter. */
852 if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
853 {
854 /* Find the dep_link with producer PRO in consumer's back_deps. */
855 FOR_EACH_DEP (con, con_list_type, sd_it, dep)
856 if (DEP_PRO (dep) == pro)
857 {
858 found_p = true;
859 break;
860 }
861 }
862 else
863 {
864 /* Find the dep_link with consumer CON in producer's forw_deps. */
865 FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
866 if (DEP_CON (dep) == con)
867 {
868 found_p = true;
869 break;
870 }
871 }
872
873 if (found_p)
874 {
875 if (sd_it_ptr != NULL)
876 *sd_it_ptr = sd_it;
877
878 return dep;
879 }
880
881 return NULL;
882 }
883
884 /* Find a dependency between producer PRO and consumer CON.
885 Use dependency [if available] to check if dependency is present at all.
886 Search through resolved dependency lists if RESOLVED_P is true.
887 If the dependency or NULL if none found. */
888 dep_t
889 sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
890 {
891 if (true_dependency_cache != NULL)
892 /* Avoiding the list walk below can cut compile times dramatically
893 for some code. */
894 {
895 int elem_luid = INSN_LUID (pro);
896 int insn_luid = INSN_LUID (con);
897
898 if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
899 && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
900 && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)
901 && !bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
902 return NULL;
903 }
904
905 return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
906 }
907
908 /* Add or update a dependence described by DEP.
909 MEM1 and MEM2, if non-null, correspond to memory locations in case of
910 data speculation.
911
912 The function returns a value indicating if an old entry has been changed
913 or a new entry has been added to insn's backward deps.
914
915 This function merely checks if producer and consumer is the same insn
916 and doesn't create a dep in this case. Actual manipulation of
917 dependence data structures is performed in add_or_update_dep_1. */
918 static enum DEPS_ADJUST_RESULT
919 maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
920 {
921 rtx_insn *elem = DEP_PRO (dep);
922 rtx_insn *insn = DEP_CON (dep);
923
924 gcc_assert (INSN_P (insn) && INSN_P (elem));
925
926 /* Don't depend an insn on itself. */
927 if (insn == elem)
928 {
929 if (sched_deps_info->generate_spec_deps)
930 /* INSN has an internal dependence, which we can't overcome. */
931 HAS_INTERNAL_DEP (insn) = 1;
932
933 return DEP_NODEP;
934 }
935
936 return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
937 }
938
939 /* Ask dependency caches what needs to be done for dependence DEP.
940 Return DEP_CREATED if new dependence should be created and there is no
941 need to try to find one searching the dependencies lists.
942 Return DEP_PRESENT if there already is a dependence described by DEP and
943 hence nothing is to be done.
944 Return DEP_CHANGED if there already is a dependence, but it should be
945 updated to incorporate additional information from DEP. */
946 static enum DEPS_ADJUST_RESULT
947 ask_dependency_caches (dep_t dep)
948 {
949 int elem_luid = INSN_LUID (DEP_PRO (dep));
950 int insn_luid = INSN_LUID (DEP_CON (dep));
951
952 gcc_assert (true_dependency_cache != NULL
953 && output_dependency_cache != NULL
954 && anti_dependency_cache != NULL
955 && control_dependency_cache != NULL);
956
957 if (!(current_sched_info->flags & USE_DEPS_LIST))
958 {
959 enum reg_note present_dep_type;
960
961 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
962 present_dep_type = REG_DEP_TRUE;
963 else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
964 present_dep_type = REG_DEP_OUTPUT;
965 else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
966 present_dep_type = REG_DEP_ANTI;
967 else if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
968 present_dep_type = REG_DEP_CONTROL;
969 else
970 /* There is no existing dep so it should be created. */
971 return DEP_CREATED;
972
973 if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
974 /* DEP does not add anything to the existing dependence. */
975 return DEP_PRESENT;
976 }
977 else
978 {
979 ds_t present_dep_types = 0;
980
981 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
982 present_dep_types |= DEP_TRUE;
983 if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
984 present_dep_types |= DEP_OUTPUT;
985 if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
986 present_dep_types |= DEP_ANTI;
987 if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
988 present_dep_types |= DEP_CONTROL;
989
990 if (present_dep_types == 0)
991 /* There is no existing dep so it should be created. */
992 return DEP_CREATED;
993
994 if (!(current_sched_info->flags & DO_SPECULATION)
995 || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
996 {
997 if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
998 == present_dep_types)
999 /* DEP does not add anything to the existing dependence. */
1000 return DEP_PRESENT;
1001 }
1002 else
1003 {
1004 /* Only true dependencies can be data speculative and
1005 only anti dependencies can be control speculative. */
1006 gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
1007 == present_dep_types);
1008
1009 /* if (DEP is SPECULATIVE) then
1010 ..we should update DEP_STATUS
1011 else
1012 ..we should reset existing dep to non-speculative. */
1013 }
1014 }
1015
1016 return DEP_CHANGED;
1017 }
1018
1019 /* Set dependency caches according to DEP. */
1020 static void
1021 set_dependency_caches (dep_t dep)
1022 {
1023 int elem_luid = INSN_LUID (DEP_PRO (dep));
1024 int insn_luid = INSN_LUID (DEP_CON (dep));
1025
1026 if (!(current_sched_info->flags & USE_DEPS_LIST))
1027 {
1028 switch (DEP_TYPE (dep))
1029 {
1030 case REG_DEP_TRUE:
1031 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1032 break;
1033
1034 case REG_DEP_OUTPUT:
1035 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1036 break;
1037
1038 case REG_DEP_ANTI:
1039 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1040 break;
1041
1042 case REG_DEP_CONTROL:
1043 bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1044 break;
1045
1046 default:
1047 gcc_unreachable ();
1048 }
1049 }
1050 else
1051 {
1052 ds_t ds = DEP_STATUS (dep);
1053
1054 if (ds & DEP_TRUE)
1055 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1056 if (ds & DEP_OUTPUT)
1057 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1058 if (ds & DEP_ANTI)
1059 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1060 if (ds & DEP_CONTROL)
1061 bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1062
1063 if (ds & SPECULATIVE)
1064 {
1065 gcc_assert (current_sched_info->flags & DO_SPECULATION);
1066 bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
1067 }
1068 }
1069 }
1070
1071 /* Type of dependence DEP have changed from OLD_TYPE. Update dependency
1072 caches accordingly. */
1073 static void
1074 update_dependency_caches (dep_t dep, enum reg_note old_type)
1075 {
1076 int elem_luid = INSN_LUID (DEP_PRO (dep));
1077 int insn_luid = INSN_LUID (DEP_CON (dep));
1078
1079 /* Clear corresponding cache entry because type of the link
1080 may have changed. Keep them if we use_deps_list. */
1081 if (!(current_sched_info->flags & USE_DEPS_LIST))
1082 {
1083 switch (old_type)
1084 {
1085 case REG_DEP_OUTPUT:
1086 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1087 break;
1088
1089 case REG_DEP_ANTI:
1090 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1091 break;
1092
1093 case REG_DEP_CONTROL:
1094 bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1095 break;
1096
1097 default:
1098 gcc_unreachable ();
1099 }
1100 }
1101
1102 set_dependency_caches (dep);
1103 }
1104
1105 /* Convert a dependence pointed to by SD_IT to be non-speculative. */
1106 static void
1107 change_spec_dep_to_hard (sd_iterator_def sd_it)
1108 {
1109 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1110 dep_link_t link = DEP_NODE_BACK (node);
1111 dep_t dep = DEP_NODE_DEP (node);
1112 rtx_insn *elem = DEP_PRO (dep);
1113 rtx_insn *insn = DEP_CON (dep);
1114
1115 move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1116
1117 DEP_STATUS (dep) &= ~SPECULATIVE;
1118
1119 if (true_dependency_cache != NULL)
1120 /* Clear the cache entry. */
1121 bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1122 INSN_LUID (elem));
1123 }
1124
1125 /* Update DEP to incorporate information from NEW_DEP.
1126 SD_IT points to DEP in case it should be moved to another list.
1127 MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1128 data-speculative dependence should be updated. */
1129 static enum DEPS_ADJUST_RESULT
1130 update_dep (dep_t dep, dep_t new_dep,
1131 sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1132 rtx mem1 ATTRIBUTE_UNUSED,
1133 rtx mem2 ATTRIBUTE_UNUSED)
1134 {
1135 enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1136 enum reg_note old_type = DEP_TYPE (dep);
1137 bool was_spec = dep_spec_p (dep);
1138
1139 DEP_NONREG (dep) |= DEP_NONREG (new_dep);
1140 DEP_MULTIPLE (dep) = 1;
1141
1142 /* If this is a more restrictive type of dependence than the
1143 existing one, then change the existing dependence to this
1144 type. */
1145 if ((int) DEP_TYPE (new_dep) < (int) old_type)
1146 {
1147 DEP_TYPE (dep) = DEP_TYPE (new_dep);
1148 res = DEP_CHANGED;
1149 }
1150
1151 if (current_sched_info->flags & USE_DEPS_LIST)
1152 /* Update DEP_STATUS. */
1153 {
1154 ds_t dep_status = DEP_STATUS (dep);
1155 ds_t ds = DEP_STATUS (new_dep);
1156 ds_t new_status = ds | dep_status;
1157
1158 if (new_status & SPECULATIVE)
1159 {
1160 /* Either existing dep or a dep we're adding or both are
1161 speculative. */
1162 if (!(ds & SPECULATIVE)
1163 || !(dep_status & SPECULATIVE))
1164 /* The new dep can't be speculative. */
1165 new_status &= ~SPECULATIVE;
1166 else
1167 {
1168 /* Both are speculative. Merge probabilities. */
1169 if (mem1 != NULL)
1170 {
1171 dw_t dw;
1172
1173 dw = estimate_dep_weak (mem1, mem2);
1174 ds = set_dep_weak (ds, BEGIN_DATA, dw);
1175 }
1176
1177 new_status = ds_merge (dep_status, ds);
1178 }
1179 }
1180
1181 ds = new_status;
1182
1183 if (dep_status != ds)
1184 {
1185 DEP_STATUS (dep) = ds;
1186 res = DEP_CHANGED;
1187 }
1188 }
1189
1190 if (was_spec && !dep_spec_p (dep))
1191 /* The old dep was speculative, but now it isn't. */
1192 change_spec_dep_to_hard (sd_it);
1193
1194 if (true_dependency_cache != NULL
1195 && res == DEP_CHANGED)
1196 update_dependency_caches (dep, old_type);
1197
1198 return res;
1199 }
1200
1201 /* Add or update a dependence described by DEP.
1202 MEM1 and MEM2, if non-null, correspond to memory locations in case of
1203 data speculation.
1204
1205 The function returns a value indicating if an old entry has been changed
1206 or a new entry has been added to insn's backward deps or nothing has
1207 been updated at all. */
1208 static enum DEPS_ADJUST_RESULT
1209 add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1210 rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1211 {
1212 bool maybe_present_p = true;
1213 bool present_p = false;
1214
1215 gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1216 && DEP_PRO (new_dep) != DEP_CON (new_dep));
1217
1218 if (flag_checking)
1219 check_dep (new_dep, mem1 != NULL);
1220
1221 if (true_dependency_cache != NULL)
1222 {
1223 switch (ask_dependency_caches (new_dep))
1224 {
1225 case DEP_PRESENT:
1226 dep_t present_dep;
1227 sd_iterator_def sd_it;
1228
1229 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1230 DEP_CON (new_dep),
1231 resolved_p, &sd_it);
1232 DEP_MULTIPLE (present_dep) = 1;
1233 return DEP_PRESENT;
1234
1235 case DEP_CHANGED:
1236 maybe_present_p = true;
1237 present_p = true;
1238 break;
1239
1240 case DEP_CREATED:
1241 maybe_present_p = false;
1242 present_p = false;
1243 break;
1244
1245 default:
1246 gcc_unreachable ();
1247 break;
1248 }
1249 }
1250
1251 /* Check that we don't already have this dependence. */
1252 if (maybe_present_p)
1253 {
1254 dep_t present_dep;
1255 sd_iterator_def sd_it;
1256
1257 gcc_assert (true_dependency_cache == NULL || present_p);
1258
1259 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1260 DEP_CON (new_dep),
1261 resolved_p, &sd_it);
1262
1263 if (present_dep != NULL)
1264 /* We found an existing dependency between ELEM and INSN. */
1265 return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1266 else
1267 /* We didn't find a dep, it shouldn't present in the cache. */
1268 gcc_assert (!present_p);
1269 }
1270
1271 /* Might want to check one level of transitivity to save conses.
1272 This check should be done in maybe_add_or_update_dep_1.
1273 Since we made it to add_or_update_dep_1, we must create
1274 (or update) a link. */
1275
1276 if (mem1 != NULL_RTX)
1277 {
1278 gcc_assert (sched_deps_info->generate_spec_deps);
1279 DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1280 estimate_dep_weak (mem1, mem2));
1281 }
1282
1283 sd_add_dep (new_dep, resolved_p);
1284
1285 return DEP_CREATED;
1286 }
1287
1288 /* Initialize BACK_LIST_PTR with consumer's backward list and
1289 FORW_LIST_PTR with producer's forward list. If RESOLVED_P is true
1290 initialize with lists that hold resolved deps. */
1291 static void
1292 get_back_and_forw_lists (dep_t dep, bool resolved_p,
1293 deps_list_t *back_list_ptr,
1294 deps_list_t *forw_list_ptr)
1295 {
1296 rtx_insn *con = DEP_CON (dep);
1297
1298 if (!resolved_p)
1299 {
1300 if (dep_spec_p (dep))
1301 *back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1302 else
1303 *back_list_ptr = INSN_HARD_BACK_DEPS (con);
1304
1305 *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1306 }
1307 else
1308 {
1309 *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1310 *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1311 }
1312 }
1313
1314 /* Add dependence described by DEP.
1315 If RESOLVED_P is true treat the dependence as a resolved one. */
1316 void
1317 sd_add_dep (dep_t dep, bool resolved_p)
1318 {
1319 dep_node_t n = create_dep_node ();
1320 deps_list_t con_back_deps;
1321 deps_list_t pro_forw_deps;
1322 rtx_insn *elem = DEP_PRO (dep);
1323 rtx_insn *insn = DEP_CON (dep);
1324
1325 gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1326
1327 if ((current_sched_info->flags & DO_SPECULATION) == 0
1328 || !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
1329 DEP_STATUS (dep) &= ~SPECULATIVE;
1330
1331 copy_dep (DEP_NODE_DEP (n), dep);
1332
1333 get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
1334
1335 add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
1336
1337 if (flag_checking)
1338 check_dep (dep, false);
1339
1340 add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1341
1342 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1343 in the bitmap caches of dependency information. */
1344 if (true_dependency_cache != NULL)
1345 set_dependency_caches (dep);
1346 }
1347
1348 /* Add or update backward dependence between INSN and ELEM
1349 with given type DEP_TYPE and dep_status DS.
1350 This function is a convenience wrapper. */
1351 enum DEPS_ADJUST_RESULT
1352 sd_add_or_update_dep (dep_t dep, bool resolved_p)
1353 {
1354 return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1355 }
1356
1357 /* Resolved dependence pointed to by SD_IT.
1358 SD_IT will advance to the next element. */
1359 void
1360 sd_resolve_dep (sd_iterator_def sd_it)
1361 {
1362 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1363 dep_t dep = DEP_NODE_DEP (node);
1364 rtx_insn *pro = DEP_PRO (dep);
1365 rtx_insn *con = DEP_CON (dep);
1366
1367 if (dep_spec_p (dep))
1368 move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1369 INSN_RESOLVED_BACK_DEPS (con));
1370 else
1371 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1372 INSN_RESOLVED_BACK_DEPS (con));
1373
1374 move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1375 INSN_RESOLVED_FORW_DEPS (pro));
1376 }
1377
1378 /* Perform the inverse operation of sd_resolve_dep. Restore the dependence
1379 pointed to by SD_IT to unresolved state. */
1380 void
1381 sd_unresolve_dep (sd_iterator_def sd_it)
1382 {
1383 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1384 dep_t dep = DEP_NODE_DEP (node);
1385 rtx_insn *pro = DEP_PRO (dep);
1386 rtx_insn *con = DEP_CON (dep);
1387
1388 if (dep_spec_p (dep))
1389 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1390 INSN_SPEC_BACK_DEPS (con));
1391 else
1392 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1393 INSN_HARD_BACK_DEPS (con));
1394
1395 move_dep_link (DEP_NODE_FORW (node), INSN_RESOLVED_FORW_DEPS (pro),
1396 INSN_FORW_DEPS (pro));
1397 }
1398
1399 /* Make TO depend on all the FROM's producers.
1400 If RESOLVED_P is true add dependencies to the resolved lists. */
1401 void
1402 sd_copy_back_deps (rtx_insn *to, rtx_insn *from, bool resolved_p)
1403 {
1404 sd_list_types_def list_type;
1405 sd_iterator_def sd_it;
1406 dep_t dep;
1407
1408 list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1409
1410 FOR_EACH_DEP (from, list_type, sd_it, dep)
1411 {
1412 dep_def _new_dep, *new_dep = &_new_dep;
1413
1414 copy_dep (new_dep, dep);
1415 DEP_CON (new_dep) = to;
1416 sd_add_dep (new_dep, resolved_p);
1417 }
1418 }
1419
1420 /* Remove a dependency referred to by SD_IT.
1421 SD_IT will point to the next dependence after removal. */
1422 void
1423 sd_delete_dep (sd_iterator_def sd_it)
1424 {
1425 dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1426 dep_t dep = DEP_NODE_DEP (n);
1427 rtx_insn *pro = DEP_PRO (dep);
1428 rtx_insn *con = DEP_CON (dep);
1429 deps_list_t con_back_deps;
1430 deps_list_t pro_forw_deps;
1431
1432 if (true_dependency_cache != NULL)
1433 {
1434 int elem_luid = INSN_LUID (pro);
1435 int insn_luid = INSN_LUID (con);
1436
1437 bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1438 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1439 bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1440 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1441
1442 if (current_sched_info->flags & DO_SPECULATION)
1443 bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1444 }
1445
1446 get_back_and_forw_lists (dep, sd_it.resolved_p,
1447 &con_back_deps, &pro_forw_deps);
1448
1449 remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1450 remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1451
1452 delete_dep_node (n);
1453 }
1454
1455 /* Dump size of the lists. */
1456 #define DUMP_LISTS_SIZE (2)
1457
1458 /* Dump dependencies of the lists. */
1459 #define DUMP_LISTS_DEPS (4)
1460
1461 /* Dump all information about the lists. */
1462 #define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1463
1464 /* Dump deps_lists of INSN specified by TYPES to DUMP.
1465 FLAGS is a bit mask specifying what information about the lists needs
1466 to be printed.
1467 If FLAGS has the very first bit set, then dump all information about
1468 the lists and propagate this bit into the callee dump functions. */
1469 static void
1470 dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1471 {
1472 sd_iterator_def sd_it;
1473 dep_t dep;
1474 int all;
1475
1476 all = (flags & 1);
1477
1478 if (all)
1479 flags |= DUMP_LISTS_ALL;
1480
1481 fprintf (dump, "[");
1482
1483 if (flags & DUMP_LISTS_SIZE)
1484 fprintf (dump, "%d; ", sd_lists_size (insn, types));
1485
1486 if (flags & DUMP_LISTS_DEPS)
1487 {
1488 FOR_EACH_DEP (insn, types, sd_it, dep)
1489 {
1490 dump_dep (dump, dep, dump_dep_flags | all);
1491 fprintf (dump, " ");
1492 }
1493 }
1494 }
1495
1496 /* Dump all information about deps_lists of INSN specified by TYPES
1497 to STDERR. */
1498 void
1499 sd_debug_lists (rtx insn, sd_list_types_def types)
1500 {
1501 dump_lists (stderr, insn, types, 1);
1502 fprintf (stderr, "\n");
1503 }
1504
1505 /* A wrapper around add_dependence_1, to add a dependence of CON on
1506 PRO, with type DEP_TYPE. This function implements special handling
1507 for REG_DEP_CONTROL dependencies. For these, we optionally promote
1508 the type to REG_DEP_ANTI if we can determine that predication is
1509 impossible; otherwise we add additional true dependencies on the
1510 INSN_COND_DEPS list of the jump (which PRO must be). */
1511 void
1512 add_dependence (rtx_insn *con, rtx_insn *pro, enum reg_note dep_type)
1513 {
1514 if (dep_type == REG_DEP_CONTROL
1515 && !(current_sched_info->flags & DO_PREDICATION))
1516 dep_type = REG_DEP_ANTI;
1517
1518 /* A REG_DEP_CONTROL dependence may be eliminated through predication,
1519 so we must also make the insn dependent on the setter of the
1520 condition. */
1521 if (dep_type == REG_DEP_CONTROL)
1522 {
1523 rtx_insn *real_pro = pro;
1524 rtx_insn *other = real_insn_for_shadow (real_pro);
1525 rtx cond;
1526
1527 if (other != NULL_RTX)
1528 real_pro = other;
1529 cond = sched_get_reverse_condition_uncached (real_pro);
1530 /* Verify that the insn does not use a different value in
1531 the condition register than the one that was present at
1532 the jump. */
1533 if (cond == NULL_RTX)
1534 dep_type = REG_DEP_ANTI;
1535 else if (INSN_CACHED_COND (real_pro) == const_true_rtx)
1536 {
1537 HARD_REG_SET uses;
1538 CLEAR_HARD_REG_SET (uses);
1539 note_uses (&PATTERN (con), record_hard_reg_uses, &uses);
1540 if (TEST_HARD_REG_BIT (uses, REGNO (XEXP (cond, 0))))
1541 dep_type = REG_DEP_ANTI;
1542 }
1543 if (dep_type == REG_DEP_CONTROL)
1544 {
1545 if (sched_verbose >= 5)
1546 fprintf (sched_dump, "making DEP_CONTROL for %d\n",
1547 INSN_UID (real_pro));
1548 add_dependence_list (con, INSN_COND_DEPS (real_pro), 0,
1549 REG_DEP_TRUE, false);
1550 }
1551 }
1552
1553 add_dependence_1 (con, pro, dep_type);
1554 }
1555
1556 /* A convenience wrapper to operate on an entire list. HARD should be
1557 true if DEP_NONREG should be set on newly created dependencies. */
1558
1559 static void
1560 add_dependence_list (rtx_insn *insn, rtx_insn_list *list, int uncond,
1561 enum reg_note dep_type, bool hard)
1562 {
1563 mark_as_hard = hard;
1564 for (; list; list = list->next ())
1565 {
1566 if (uncond || ! sched_insns_conditions_mutex_p (insn, list->insn ()))
1567 add_dependence (insn, list->insn (), dep_type);
1568 }
1569 mark_as_hard = false;
1570 }
1571
1572 /* Similar, but free *LISTP at the same time, when the context
1573 is not readonly. HARD should be true if DEP_NONREG should be set on
1574 newly created dependencies. */
1575
1576 static void
1577 add_dependence_list_and_free (struct deps_desc *deps, rtx_insn *insn,
1578 rtx_insn_list **listp,
1579 int uncond, enum reg_note dep_type, bool hard)
1580 {
1581 add_dependence_list (insn, *listp, uncond, dep_type, hard);
1582
1583 /* We don't want to short-circuit dependencies involving debug
1584 insns, because they may cause actual dependencies to be
1585 disregarded. */
1586 if (deps->readonly || DEBUG_INSN_P (insn))
1587 return;
1588
1589 free_INSN_LIST_list (listp);
1590 }
1591
1592 /* Remove all occurrences of INSN from LIST. Return the number of
1593 occurrences removed. */
1594
1595 static int
1596 remove_from_dependence_list (rtx_insn *insn, rtx_insn_list **listp)
1597 {
1598 int removed = 0;
1599
1600 while (*listp)
1601 {
1602 if ((*listp)->insn () == insn)
1603 {
1604 remove_free_INSN_LIST_node (listp);
1605 removed++;
1606 continue;
1607 }
1608
1609 listp = (rtx_insn_list **)&XEXP (*listp, 1);
1610 }
1611
1612 return removed;
1613 }
1614
1615 /* Same as above, but process two lists at once. */
1616 static int
1617 remove_from_both_dependence_lists (rtx_insn *insn,
1618 rtx_insn_list **listp,
1619 rtx_expr_list **exprp)
1620 {
1621 int removed = 0;
1622
1623 while (*listp)
1624 {
1625 if (XEXP (*listp, 0) == insn)
1626 {
1627 remove_free_INSN_LIST_node (listp);
1628 remove_free_EXPR_LIST_node (exprp);
1629 removed++;
1630 continue;
1631 }
1632
1633 listp = (rtx_insn_list **)&XEXP (*listp, 1);
1634 exprp = (rtx_expr_list **)&XEXP (*exprp, 1);
1635 }
1636
1637 return removed;
1638 }
1639
1640 /* Clear all dependencies for an insn. */
1641 static void
1642 delete_all_dependences (rtx_insn *insn)
1643 {
1644 sd_iterator_def sd_it;
1645 dep_t dep;
1646
1647 /* The below cycle can be optimized to clear the caches and back_deps
1648 in one call but that would provoke duplication of code from
1649 delete_dep (). */
1650
1651 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1652 sd_iterator_cond (&sd_it, &dep);)
1653 sd_delete_dep (sd_it);
1654 }
1655
1656 /* All insns in a scheduling group except the first should only have
1657 dependencies on the previous insn in the group. So we find the
1658 first instruction in the scheduling group by walking the dependence
1659 chains backwards. Then we add the dependencies for the group to
1660 the previous nonnote insn. */
1661
1662 static void
1663 chain_to_prev_insn (rtx_insn *insn)
1664 {
1665 sd_iterator_def sd_it;
1666 dep_t dep;
1667 rtx_insn *prev_nonnote;
1668
1669 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1670 {
1671 rtx_insn *i = insn;
1672 rtx_insn *pro = DEP_PRO (dep);
1673
1674 do
1675 {
1676 i = prev_nonnote_insn (i);
1677
1678 if (pro == i)
1679 goto next_link;
1680 } while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
1681
1682 if (! sched_insns_conditions_mutex_p (i, pro))
1683 add_dependence (i, pro, DEP_TYPE (dep));
1684 next_link:;
1685 }
1686
1687 delete_all_dependences (insn);
1688
1689 prev_nonnote = prev_nonnote_nondebug_insn (insn);
1690 if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1691 && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1692 add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1693 }
1694 \f
1695 /* Process an insn's memory dependencies. There are four kinds of
1696 dependencies:
1697
1698 (0) read dependence: read follows read
1699 (1) true dependence: read follows write
1700 (2) output dependence: write follows write
1701 (3) anti dependence: write follows read
1702
1703 We are careful to build only dependencies which actually exist, and
1704 use transitivity to avoid building too many links. */
1705
1706 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1707 The MEM is a memory reference contained within INSN, which we are saving
1708 so that we can do memory aliasing on it. */
1709
1710 static void
1711 add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
1712 rtx_insn *insn, rtx mem)
1713 {
1714 rtx_insn_list **insn_list;
1715 rtx_insn_list *insn_node;
1716 rtx_expr_list **mem_list;
1717 rtx_expr_list *mem_node;
1718
1719 gcc_assert (!deps->readonly);
1720 if (read_p)
1721 {
1722 insn_list = &deps->pending_read_insns;
1723 mem_list = &deps->pending_read_mems;
1724 if (!DEBUG_INSN_P (insn))
1725 deps->pending_read_list_length++;
1726 }
1727 else
1728 {
1729 insn_list = &deps->pending_write_insns;
1730 mem_list = &deps->pending_write_mems;
1731 deps->pending_write_list_length++;
1732 }
1733
1734 insn_node = alloc_INSN_LIST (insn, *insn_list);
1735 *insn_list = insn_node;
1736
1737 if (sched_deps_info->use_cselib)
1738 {
1739 mem = shallow_copy_rtx (mem);
1740 XEXP (mem, 0) = cselib_subst_to_values_from_insn (XEXP (mem, 0),
1741 GET_MODE (mem), insn);
1742 }
1743 mem_node = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1744 *mem_list = mem_node;
1745 }
1746
1747 /* Make a dependency between every memory reference on the pending lists
1748 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
1749 dependencies for a read operation, similarly with FOR_WRITE. */
1750
1751 static void
1752 flush_pending_lists (struct deps_desc *deps, rtx_insn *insn, int for_read,
1753 int for_write)
1754 {
1755 if (for_write)
1756 {
1757 add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1758 1, REG_DEP_ANTI, true);
1759 if (!deps->readonly)
1760 {
1761 free_EXPR_LIST_list (&deps->pending_read_mems);
1762 deps->pending_read_list_length = 0;
1763 }
1764 }
1765
1766 add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1767 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1768 true);
1769
1770 add_dependence_list_and_free (deps, insn,
1771 &deps->last_pending_memory_flush, 1,
1772 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1773 true);
1774
1775 add_dependence_list_and_free (deps, insn, &deps->pending_jump_insns, 1,
1776 REG_DEP_ANTI, true);
1777
1778 if (DEBUG_INSN_P (insn))
1779 {
1780 if (for_write)
1781 free_INSN_LIST_list (&deps->pending_read_insns);
1782 free_INSN_LIST_list (&deps->pending_write_insns);
1783 free_INSN_LIST_list (&deps->last_pending_memory_flush);
1784 free_INSN_LIST_list (&deps->pending_jump_insns);
1785 }
1786
1787 if (!deps->readonly)
1788 {
1789 free_EXPR_LIST_list (&deps->pending_write_mems);
1790 deps->pending_write_list_length = 0;
1791
1792 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1793 deps->pending_flush_length = 1;
1794 }
1795 mark_as_hard = false;
1796 }
1797 \f
1798 /* Instruction which dependencies we are analyzing. */
1799 static rtx_insn *cur_insn = NULL;
1800
1801 /* Implement hooks for haifa scheduler. */
1802
1803 static void
1804 haifa_start_insn (rtx_insn *insn)
1805 {
1806 gcc_assert (insn && !cur_insn);
1807
1808 cur_insn = insn;
1809 }
1810
1811 static void
1812 haifa_finish_insn (void)
1813 {
1814 cur_insn = NULL;
1815 }
1816
1817 void
1818 haifa_note_reg_set (int regno)
1819 {
1820 SET_REGNO_REG_SET (reg_pending_sets, regno);
1821 }
1822
1823 void
1824 haifa_note_reg_clobber (int regno)
1825 {
1826 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1827 }
1828
1829 void
1830 haifa_note_reg_use (int regno)
1831 {
1832 SET_REGNO_REG_SET (reg_pending_uses, regno);
1833 }
1834
1835 static void
1836 haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx_insn *pending_insn, ds_t ds)
1837 {
1838 if (!(ds & SPECULATIVE))
1839 {
1840 mem = NULL_RTX;
1841 pending_mem = NULL_RTX;
1842 }
1843 else
1844 gcc_assert (ds & BEGIN_DATA);
1845
1846 {
1847 dep_def _dep, *dep = &_dep;
1848
1849 init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
1850 current_sched_info->flags & USE_DEPS_LIST ? ds : 0);
1851 DEP_NONREG (dep) = 1;
1852 maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1853 }
1854
1855 }
1856
1857 static void
1858 haifa_note_dep (rtx_insn *elem, ds_t ds)
1859 {
1860 dep_def _dep;
1861 dep_t dep = &_dep;
1862
1863 init_dep (dep, elem, cur_insn, ds_to_dt (ds));
1864 if (mark_as_hard)
1865 DEP_NONREG (dep) = 1;
1866 maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1867 }
1868
1869 static void
1870 note_reg_use (int r)
1871 {
1872 if (sched_deps_info->note_reg_use)
1873 sched_deps_info->note_reg_use (r);
1874 }
1875
1876 static void
1877 note_reg_set (int r)
1878 {
1879 if (sched_deps_info->note_reg_set)
1880 sched_deps_info->note_reg_set (r);
1881 }
1882
1883 static void
1884 note_reg_clobber (int r)
1885 {
1886 if (sched_deps_info->note_reg_clobber)
1887 sched_deps_info->note_reg_clobber (r);
1888 }
1889
1890 static void
1891 note_mem_dep (rtx m1, rtx m2, rtx_insn *e, ds_t ds)
1892 {
1893 if (sched_deps_info->note_mem_dep)
1894 sched_deps_info->note_mem_dep (m1, m2, e, ds);
1895 }
1896
1897 static void
1898 note_dep (rtx_insn *e, ds_t ds)
1899 {
1900 if (sched_deps_info->note_dep)
1901 sched_deps_info->note_dep (e, ds);
1902 }
1903
1904 /* Return corresponding to DS reg_note. */
1905 enum reg_note
1906 ds_to_dt (ds_t ds)
1907 {
1908 if (ds & DEP_TRUE)
1909 return REG_DEP_TRUE;
1910 else if (ds & DEP_OUTPUT)
1911 return REG_DEP_OUTPUT;
1912 else if (ds & DEP_ANTI)
1913 return REG_DEP_ANTI;
1914 else
1915 {
1916 gcc_assert (ds & DEP_CONTROL);
1917 return REG_DEP_CONTROL;
1918 }
1919 }
1920
1921 \f
1922
1923 /* Functions for computation of info needed for register pressure
1924 sensitive insn scheduling. */
1925
1926
1927 /* Allocate and return reg_use_data structure for REGNO and INSN. */
1928 static struct reg_use_data *
1929 create_insn_reg_use (int regno, rtx_insn *insn)
1930 {
1931 struct reg_use_data *use;
1932
1933 use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1934 use->regno = regno;
1935 use->insn = insn;
1936 use->next_insn_use = INSN_REG_USE_LIST (insn);
1937 INSN_REG_USE_LIST (insn) = use;
1938 return use;
1939 }
1940
1941 /* Allocate reg_set_data structure for REGNO and INSN. */
1942 static void
1943 create_insn_reg_set (int regno, rtx insn)
1944 {
1945 struct reg_set_data *set;
1946
1947 set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1948 set->regno = regno;
1949 set->insn = insn;
1950 set->next_insn_set = INSN_REG_SET_LIST (insn);
1951 INSN_REG_SET_LIST (insn) = set;
1952 }
1953
1954 /* Set up insn register uses for INSN and dependency context DEPS. */
1955 static void
1956 setup_insn_reg_uses (struct deps_desc *deps, rtx_insn *insn)
1957 {
1958 unsigned i;
1959 reg_set_iterator rsi;
1960 struct reg_use_data *use, *use2, *next;
1961 struct deps_reg *reg_last;
1962
1963 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1964 {
1965 if (i < FIRST_PSEUDO_REGISTER
1966 && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1967 continue;
1968
1969 if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1970 && ! REGNO_REG_SET_P (reg_pending_sets, i)
1971 && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
1972 /* Ignore use which is not dying. */
1973 continue;
1974
1975 use = create_insn_reg_use (i, insn);
1976 use->next_regno_use = use;
1977 reg_last = &deps->reg_last[i];
1978
1979 /* Create the cycle list of uses. */
1980 for (rtx_insn_list *list = reg_last->uses; list; list = list->next ())
1981 {
1982 use2 = create_insn_reg_use (i, list->insn ());
1983 next = use->next_regno_use;
1984 use->next_regno_use = use2;
1985 use2->next_regno_use = next;
1986 }
1987 }
1988 }
1989
1990 /* Register pressure info for the currently processed insn. */
1991 static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
1992
1993 /* Return TRUE if INSN has the use structure for REGNO. */
1994 static bool
1995 insn_use_p (rtx insn, int regno)
1996 {
1997 struct reg_use_data *use;
1998
1999 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
2000 if (use->regno == regno)
2001 return true;
2002 return false;
2003 }
2004
2005 /* Update the register pressure info after birth of pseudo register REGNO
2006 in INSN. Arguments CLOBBER_P and UNUSED_P say correspondingly that
2007 the register is in clobber or unused after the insn. */
2008 static void
2009 mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
2010 {
2011 int incr, new_incr;
2012 enum reg_class cl;
2013
2014 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2015 cl = sched_regno_pressure_class[regno];
2016 if (cl != NO_REGS)
2017 {
2018 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2019 if (clobber_p)
2020 {
2021 new_incr = reg_pressure_info[cl].clobber_increase + incr;
2022 reg_pressure_info[cl].clobber_increase = new_incr;
2023 }
2024 else if (unused_p)
2025 {
2026 new_incr = reg_pressure_info[cl].unused_set_increase + incr;
2027 reg_pressure_info[cl].unused_set_increase = new_incr;
2028 }
2029 else
2030 {
2031 new_incr = reg_pressure_info[cl].set_increase + incr;
2032 reg_pressure_info[cl].set_increase = new_incr;
2033 if (! insn_use_p (insn, regno))
2034 reg_pressure_info[cl].change += incr;
2035 create_insn_reg_set (regno, insn);
2036 }
2037 gcc_assert (new_incr < (1 << INCREASE_BITS));
2038 }
2039 }
2040
2041 /* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
2042 hard registers involved in the birth. */
2043 static void
2044 mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
2045 bool clobber_p, bool unused_p)
2046 {
2047 enum reg_class cl;
2048 int new_incr, last = regno + nregs;
2049
2050 while (regno < last)
2051 {
2052 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2053 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2054 {
2055 cl = sched_regno_pressure_class[regno];
2056 if (cl != NO_REGS)
2057 {
2058 if (clobber_p)
2059 {
2060 new_incr = reg_pressure_info[cl].clobber_increase + 1;
2061 reg_pressure_info[cl].clobber_increase = new_incr;
2062 }
2063 else if (unused_p)
2064 {
2065 new_incr = reg_pressure_info[cl].unused_set_increase + 1;
2066 reg_pressure_info[cl].unused_set_increase = new_incr;
2067 }
2068 else
2069 {
2070 new_incr = reg_pressure_info[cl].set_increase + 1;
2071 reg_pressure_info[cl].set_increase = new_incr;
2072 if (! insn_use_p (insn, regno))
2073 reg_pressure_info[cl].change += 1;
2074 create_insn_reg_set (regno, insn);
2075 }
2076 gcc_assert (new_incr < (1 << INCREASE_BITS));
2077 }
2078 }
2079 regno++;
2080 }
2081 }
2082
2083 /* Update the register pressure info after birth of pseudo or hard
2084 register REG in INSN. Arguments CLOBBER_P and UNUSED_P say
2085 correspondingly that the register is in clobber or unused after the
2086 insn. */
2087 static void
2088 mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
2089 {
2090 int regno;
2091
2092 if (GET_CODE (reg) == SUBREG)
2093 reg = SUBREG_REG (reg);
2094
2095 if (! REG_P (reg))
2096 return;
2097
2098 regno = REGNO (reg);
2099 if (regno < FIRST_PSEUDO_REGISTER)
2100 mark_insn_hard_regno_birth (insn, regno, REG_NREGS (reg),
2101 clobber_p, unused_p);
2102 else
2103 mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
2104 }
2105
2106 /* Update the register pressure info after death of pseudo register
2107 REGNO. */
2108 static void
2109 mark_pseudo_death (int regno)
2110 {
2111 int incr;
2112 enum reg_class cl;
2113
2114 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2115 cl = sched_regno_pressure_class[regno];
2116 if (cl != NO_REGS)
2117 {
2118 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2119 reg_pressure_info[cl].change -= incr;
2120 }
2121 }
2122
2123 /* Like mark_pseudo_death except that NREGS saying how many hard
2124 registers involved in the death. */
2125 static void
2126 mark_hard_regno_death (int regno, int nregs)
2127 {
2128 enum reg_class cl;
2129 int last = regno + nregs;
2130
2131 while (regno < last)
2132 {
2133 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2134 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2135 {
2136 cl = sched_regno_pressure_class[regno];
2137 if (cl != NO_REGS)
2138 reg_pressure_info[cl].change -= 1;
2139 }
2140 regno++;
2141 }
2142 }
2143
2144 /* Update the register pressure info after death of pseudo or hard
2145 register REG. */
2146 static void
2147 mark_reg_death (rtx reg)
2148 {
2149 int regno;
2150
2151 if (GET_CODE (reg) == SUBREG)
2152 reg = SUBREG_REG (reg);
2153
2154 if (! REG_P (reg))
2155 return;
2156
2157 regno = REGNO (reg);
2158 if (regno < FIRST_PSEUDO_REGISTER)
2159 mark_hard_regno_death (regno, REG_NREGS (reg));
2160 else
2161 mark_pseudo_death (regno);
2162 }
2163
2164 /* Process SETTER of REG. DATA is an insn containing the setter. */
2165 static void
2166 mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
2167 {
2168 if (setter != NULL_RTX && GET_CODE (setter) != SET)
2169 return;
2170 mark_insn_reg_birth
2171 ((rtx) data, reg, false,
2172 find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
2173 }
2174
2175 /* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs. */
2176 static void
2177 mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
2178 {
2179 if (GET_CODE (setter) == CLOBBER)
2180 mark_insn_reg_birth ((rtx) data, reg, true, false);
2181 }
2182
2183 /* Set up reg pressure info related to INSN. */
2184 void
2185 init_insn_reg_pressure_info (rtx_insn *insn)
2186 {
2187 int i, len;
2188 enum reg_class cl;
2189 static struct reg_pressure_data *pressure_info;
2190 rtx link;
2191
2192 gcc_assert (sched_pressure != SCHED_PRESSURE_NONE);
2193
2194 if (! INSN_P (insn))
2195 return;
2196
2197 for (i = 0; i < ira_pressure_classes_num; i++)
2198 {
2199 cl = ira_pressure_classes[i];
2200 reg_pressure_info[cl].clobber_increase = 0;
2201 reg_pressure_info[cl].set_increase = 0;
2202 reg_pressure_info[cl].unused_set_increase = 0;
2203 reg_pressure_info[cl].change = 0;
2204 }
2205
2206 note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
2207
2208 note_stores (PATTERN (insn), mark_insn_reg_store, insn);
2209
2210 if (AUTO_INC_DEC)
2211 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2212 if (REG_NOTE_KIND (link) == REG_INC)
2213 mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
2214
2215 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2216 if (REG_NOTE_KIND (link) == REG_DEAD)
2217 mark_reg_death (XEXP (link, 0));
2218
2219 len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num;
2220 pressure_info
2221 = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
2222 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
2223 INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_pressure_classes_num
2224 * sizeof (int), 1);
2225 for (i = 0; i < ira_pressure_classes_num; i++)
2226 {
2227 cl = ira_pressure_classes[i];
2228 pressure_info[i].clobber_increase
2229 = reg_pressure_info[cl].clobber_increase;
2230 pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2231 pressure_info[i].unused_set_increase
2232 = reg_pressure_info[cl].unused_set_increase;
2233 pressure_info[i].change = reg_pressure_info[cl].change;
2234 }
2235 }
2236
2237
2238 \f
2239
2240 /* Internal variable for sched_analyze_[12] () functions.
2241 If it is nonzero, this means that sched_analyze_[12] looks
2242 at the most toplevel SET. */
2243 static bool can_start_lhs_rhs_p;
2244
2245 /* Extend reg info for the deps context DEPS given that
2246 we have just generated a register numbered REGNO. */
2247 static void
2248 extend_deps_reg_info (struct deps_desc *deps, int regno)
2249 {
2250 int max_regno = regno + 1;
2251
2252 gcc_assert (!reload_completed);
2253
2254 /* In a readonly context, it would not hurt to extend info,
2255 but it should not be needed. */
2256 if (reload_completed && deps->readonly)
2257 {
2258 deps->max_reg = max_regno;
2259 return;
2260 }
2261
2262 if (max_regno > deps->max_reg)
2263 {
2264 deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
2265 max_regno);
2266 memset (&deps->reg_last[deps->max_reg],
2267 0, (max_regno - deps->max_reg)
2268 * sizeof (struct deps_reg));
2269 deps->max_reg = max_regno;
2270 }
2271 }
2272
2273 /* Extends REG_INFO_P if needed. */
2274 void
2275 maybe_extend_reg_info_p (void)
2276 {
2277 /* Extend REG_INFO_P, if needed. */
2278 if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2279 {
2280 size_t new_reg_info_p_size = max_regno + 128;
2281
2282 gcc_assert (!reload_completed && sel_sched_p ());
2283
2284 reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2285 new_reg_info_p_size,
2286 reg_info_p_size,
2287 sizeof (*reg_info_p));
2288 reg_info_p_size = new_reg_info_p_size;
2289 }
2290 }
2291
2292 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2293 The type of the reference is specified by REF and can be SET,
2294 CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
2295
2296 static void
2297 sched_analyze_reg (struct deps_desc *deps, int regno, machine_mode mode,
2298 enum rtx_code ref, rtx_insn *insn)
2299 {
2300 /* We could emit new pseudos in renaming. Extend the reg structures. */
2301 if (!reload_completed && sel_sched_p ()
2302 && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2303 extend_deps_reg_info (deps, regno);
2304
2305 maybe_extend_reg_info_p ();
2306
2307 /* A hard reg in a wide mode may really be multiple registers.
2308 If so, mark all of them just like the first. */
2309 if (regno < FIRST_PSEUDO_REGISTER)
2310 {
2311 int i = hard_regno_nregs (regno, mode);
2312 if (ref == SET)
2313 {
2314 while (--i >= 0)
2315 note_reg_set (regno + i);
2316 }
2317 else if (ref == USE)
2318 {
2319 while (--i >= 0)
2320 note_reg_use (regno + i);
2321 }
2322 else if (ref == CLOBBER_HIGH)
2323 {
2324 gcc_assert (i == 1);
2325 /* We don't know the current state of the register, so have to treat
2326 the clobber high as a full clobber. */
2327 note_reg_clobber (regno);
2328 }
2329 else
2330 {
2331 while (--i >= 0)
2332 note_reg_clobber (regno + i);
2333 }
2334 }
2335
2336 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2337 it does not reload. Ignore these as they have served their
2338 purpose already. */
2339 else if (regno >= deps->max_reg)
2340 {
2341 enum rtx_code code = GET_CODE (PATTERN (insn));
2342 gcc_assert (code == USE || code == CLOBBER);
2343 }
2344
2345 else
2346 {
2347 if (ref == SET)
2348 note_reg_set (regno);
2349 else if (ref == USE)
2350 note_reg_use (regno);
2351 else
2352 /* For CLOBBER_HIGH, we don't know the current state of the register,
2353 so have to treat it as a full clobber. */
2354 note_reg_clobber (regno);
2355
2356 /* Pseudos that are REG_EQUIV to something may be replaced
2357 by that during reloading. We need only add dependencies for
2358 the address in the REG_EQUIV note. */
2359 if (!reload_completed && get_reg_known_equiv_p (regno))
2360 {
2361 rtx t = get_reg_known_value (regno);
2362 if (MEM_P (t))
2363 sched_analyze_2 (deps, XEXP (t, 0), insn);
2364 }
2365
2366 /* Don't let it cross a call after scheduling if it doesn't
2367 already cross one. */
2368 if (REG_N_CALLS_CROSSED (regno) == 0)
2369 {
2370 if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
2371 deps->sched_before_next_call
2372 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2373 else
2374 add_dependence_list (insn, deps->last_function_call, 1,
2375 REG_DEP_ANTI, false);
2376 }
2377 }
2378 }
2379
2380 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2381 rtx, X, creating all dependencies generated by the write to the
2382 destination of X, and reads of everything mentioned. */
2383
2384 static void
2385 sched_analyze_1 (struct deps_desc *deps, rtx x, rtx_insn *insn)
2386 {
2387 rtx dest = XEXP (x, 0);
2388 enum rtx_code code = GET_CODE (x);
2389 bool cslr_p = can_start_lhs_rhs_p;
2390
2391 can_start_lhs_rhs_p = false;
2392
2393 gcc_assert (dest);
2394 if (dest == 0)
2395 return;
2396
2397 if (cslr_p && sched_deps_info->start_lhs)
2398 sched_deps_info->start_lhs (dest);
2399
2400 if (GET_CODE (dest) == PARALLEL)
2401 {
2402 int i;
2403
2404 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2405 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2406 sched_analyze_1 (deps,
2407 gen_rtx_CLOBBER (VOIDmode,
2408 XEXP (XVECEXP (dest, 0, i), 0)),
2409 insn);
2410
2411 if (cslr_p && sched_deps_info->finish_lhs)
2412 sched_deps_info->finish_lhs ();
2413
2414 if (code == SET)
2415 {
2416 can_start_lhs_rhs_p = cslr_p;
2417
2418 sched_analyze_2 (deps, SET_SRC (x), insn);
2419
2420 can_start_lhs_rhs_p = false;
2421 }
2422
2423 return;
2424 }
2425
2426 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
2427 || GET_CODE (dest) == ZERO_EXTRACT)
2428 {
2429 if (GET_CODE (dest) == STRICT_LOW_PART
2430 || GET_CODE (dest) == ZERO_EXTRACT
2431 || read_modify_subreg_p (dest))
2432 {
2433 /* These both read and modify the result. We must handle
2434 them as writes to get proper dependencies for following
2435 instructions. We must handle them as reads to get proper
2436 dependencies from this to previous instructions.
2437 Thus we need to call sched_analyze_2. */
2438
2439 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2440 }
2441 if (GET_CODE (dest) == ZERO_EXTRACT)
2442 {
2443 /* The second and third arguments are values read by this insn. */
2444 sched_analyze_2 (deps, XEXP (dest, 1), insn);
2445 sched_analyze_2 (deps, XEXP (dest, 2), insn);
2446 }
2447 dest = XEXP (dest, 0);
2448 }
2449
2450 if (REG_P (dest))
2451 {
2452 int regno = REGNO (dest);
2453 machine_mode mode = GET_MODE (dest);
2454
2455 sched_analyze_reg (deps, regno, mode, code, insn);
2456
2457 #ifdef STACK_REGS
2458 /* Treat all writes to a stack register as modifying the TOS. */
2459 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2460 {
2461 /* Avoid analyzing the same register twice. */
2462 if (regno != FIRST_STACK_REG)
2463 sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
2464
2465 add_to_hard_reg_set (&implicit_reg_pending_uses, mode,
2466 FIRST_STACK_REG);
2467 }
2468 #endif
2469 }
2470 else if (MEM_P (dest))
2471 {
2472 /* Writing memory. */
2473 rtx t = dest;
2474
2475 if (sched_deps_info->use_cselib)
2476 {
2477 machine_mode address_mode = get_address_mode (dest);
2478
2479 t = shallow_copy_rtx (dest);
2480 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2481 GET_MODE (t), insn);
2482 XEXP (t, 0)
2483 = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2484 insn);
2485 }
2486 t = canon_rtx (t);
2487
2488 /* Pending lists can't get larger with a readonly context. */
2489 if (!deps->readonly
2490 && ((deps->pending_read_list_length + deps->pending_write_list_length)
2491 >= MAX_PENDING_LIST_LENGTH))
2492 {
2493 /* Flush all pending reads and writes to prevent the pending lists
2494 from getting any larger. Insn scheduling runs too slowly when
2495 these lists get long. When compiling GCC with itself,
2496 this flush occurs 8 times for sparc, and 10 times for m88k using
2497 the default value of 32. */
2498 flush_pending_lists (deps, insn, false, true);
2499 }
2500 else
2501 {
2502 rtx_insn_list *pending;
2503 rtx_expr_list *pending_mem;
2504
2505 pending = deps->pending_read_insns;
2506 pending_mem = deps->pending_read_mems;
2507 while (pending)
2508 {
2509 if (anti_dependence (pending_mem->element (), t)
2510 && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
2511 note_mem_dep (t, pending_mem->element (), pending->insn (),
2512 DEP_ANTI);
2513
2514 pending = pending->next ();
2515 pending_mem = pending_mem->next ();
2516 }
2517
2518 pending = deps->pending_write_insns;
2519 pending_mem = deps->pending_write_mems;
2520 while (pending)
2521 {
2522 if (output_dependence (pending_mem->element (), t)
2523 && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
2524 note_mem_dep (t, pending_mem->element (),
2525 pending->insn (),
2526 DEP_OUTPUT);
2527
2528 pending = pending->next ();
2529 pending_mem = pending_mem-> next ();
2530 }
2531
2532 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2533 REG_DEP_ANTI, true);
2534 add_dependence_list (insn, deps->pending_jump_insns, 1,
2535 REG_DEP_CONTROL, true);
2536
2537 if (!deps->readonly)
2538 add_insn_mem_dependence (deps, false, insn, dest);
2539 }
2540 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2541 }
2542
2543 if (cslr_p && sched_deps_info->finish_lhs)
2544 sched_deps_info->finish_lhs ();
2545
2546 /* Analyze reads. */
2547 if (GET_CODE (x) == SET)
2548 {
2549 can_start_lhs_rhs_p = cslr_p;
2550
2551 sched_analyze_2 (deps, SET_SRC (x), insn);
2552
2553 can_start_lhs_rhs_p = false;
2554 }
2555 }
2556
2557 /* Analyze the uses of memory and registers in rtx X in INSN. */
2558 static void
2559 sched_analyze_2 (struct deps_desc *deps, rtx x, rtx_insn *insn)
2560 {
2561 int i;
2562 int j;
2563 enum rtx_code code;
2564 const char *fmt;
2565 bool cslr_p = can_start_lhs_rhs_p;
2566
2567 can_start_lhs_rhs_p = false;
2568
2569 gcc_assert (x);
2570 if (x == 0)
2571 return;
2572
2573 if (cslr_p && sched_deps_info->start_rhs)
2574 sched_deps_info->start_rhs (x);
2575
2576 code = GET_CODE (x);
2577
2578 switch (code)
2579 {
2580 CASE_CONST_ANY:
2581 case SYMBOL_REF:
2582 case CONST:
2583 case LABEL_REF:
2584 /* Ignore constants. */
2585 if (cslr_p && sched_deps_info->finish_rhs)
2586 sched_deps_info->finish_rhs ();
2587
2588 return;
2589
2590 case CC0:
2591 if (!HAVE_cc0)
2592 gcc_unreachable ();
2593
2594 /* User of CC0 depends on immediately preceding insn. */
2595 SCHED_GROUP_P (insn) = 1;
2596 /* Don't move CC0 setter to another block (it can set up the
2597 same flag for previous CC0 users which is safe). */
2598 CANT_MOVE (prev_nonnote_insn (insn)) = 1;
2599
2600 if (cslr_p && sched_deps_info->finish_rhs)
2601 sched_deps_info->finish_rhs ();
2602
2603 return;
2604
2605 case REG:
2606 {
2607 int regno = REGNO (x);
2608 machine_mode mode = GET_MODE (x);
2609
2610 sched_analyze_reg (deps, regno, mode, USE, insn);
2611
2612 #ifdef STACK_REGS
2613 /* Treat all reads of a stack register as modifying the TOS. */
2614 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2615 {
2616 /* Avoid analyzing the same register twice. */
2617 if (regno != FIRST_STACK_REG)
2618 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2619 sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
2620 }
2621 #endif
2622
2623 if (cslr_p && sched_deps_info->finish_rhs)
2624 sched_deps_info->finish_rhs ();
2625
2626 return;
2627 }
2628
2629 case MEM:
2630 {
2631 /* Reading memory. */
2632 rtx_insn_list *u;
2633 rtx_insn_list *pending;
2634 rtx_expr_list *pending_mem;
2635 rtx t = x;
2636
2637 if (sched_deps_info->use_cselib)
2638 {
2639 machine_mode address_mode = get_address_mode (t);
2640
2641 t = shallow_copy_rtx (t);
2642 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2643 GET_MODE (t), insn);
2644 XEXP (t, 0)
2645 = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2646 insn);
2647 }
2648
2649 if (!DEBUG_INSN_P (insn))
2650 {
2651 t = canon_rtx (t);
2652 pending = deps->pending_read_insns;
2653 pending_mem = deps->pending_read_mems;
2654 while (pending)
2655 {
2656 if (read_dependence (pending_mem->element (), t)
2657 && ! sched_insns_conditions_mutex_p (insn,
2658 pending->insn ()))
2659 note_mem_dep (t, pending_mem->element (),
2660 pending->insn (),
2661 DEP_ANTI);
2662
2663 pending = pending->next ();
2664 pending_mem = pending_mem->next ();
2665 }
2666
2667 pending = deps->pending_write_insns;
2668 pending_mem = deps->pending_write_mems;
2669 while (pending)
2670 {
2671 if (true_dependence (pending_mem->element (), VOIDmode, t)
2672 && ! sched_insns_conditions_mutex_p (insn,
2673 pending->insn ()))
2674 note_mem_dep (t, pending_mem->element (),
2675 pending->insn (),
2676 sched_deps_info->generate_spec_deps
2677 ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2678
2679 pending = pending->next ();
2680 pending_mem = pending_mem->next ();
2681 }
2682
2683 for (u = deps->last_pending_memory_flush; u; u = u->next ())
2684 add_dependence (insn, u->insn (), REG_DEP_ANTI);
2685
2686 for (u = deps->pending_jump_insns; u; u = u->next ())
2687 if (deps_may_trap_p (x))
2688 {
2689 if ((sched_deps_info->generate_spec_deps)
2690 && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2691 {
2692 ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2693 MAX_DEP_WEAK);
2694
2695 note_dep (u->insn (), ds);
2696 }
2697 else
2698 add_dependence (insn, u->insn (), REG_DEP_CONTROL);
2699 }
2700 }
2701
2702 /* Always add these dependencies to pending_reads, since
2703 this insn may be followed by a write. */
2704 if (!deps->readonly)
2705 {
2706 if ((deps->pending_read_list_length
2707 + deps->pending_write_list_length)
2708 >= MAX_PENDING_LIST_LENGTH
2709 && !DEBUG_INSN_P (insn))
2710 flush_pending_lists (deps, insn, true, true);
2711 add_insn_mem_dependence (deps, true, insn, x);
2712 }
2713
2714 sched_analyze_2 (deps, XEXP (x, 0), insn);
2715
2716 if (cslr_p && sched_deps_info->finish_rhs)
2717 sched_deps_info->finish_rhs ();
2718
2719 return;
2720 }
2721
2722 /* Force pending stores to memory in case a trap handler needs them.
2723 Also force pending loads from memory; loads and stores can segfault
2724 and the signal handler won't be triggered if the trap insn was moved
2725 above load or store insn. */
2726 case TRAP_IF:
2727 flush_pending_lists (deps, insn, true, true);
2728 break;
2729
2730 case PREFETCH:
2731 if (PREFETCH_SCHEDULE_BARRIER_P (x))
2732 reg_pending_barrier = TRUE_BARRIER;
2733 /* Prefetch insn contains addresses only. So if the prefetch
2734 address has no registers, there will be no dependencies on
2735 the prefetch insn. This is wrong with result code
2736 correctness point of view as such prefetch can be moved below
2737 a jump insn which usually generates MOVE_BARRIER preventing
2738 to move insns containing registers or memories through the
2739 barrier. It is also wrong with generated code performance
2740 point of view as prefetch withouth dependecies will have a
2741 tendency to be issued later instead of earlier. It is hard
2742 to generate accurate dependencies for prefetch insns as
2743 prefetch has only the start address but it is better to have
2744 something than nothing. */
2745 if (!deps->readonly)
2746 {
2747 rtx x = gen_rtx_MEM (Pmode, XEXP (PATTERN (insn), 0));
2748 if (sched_deps_info->use_cselib)
2749 cselib_lookup_from_insn (x, Pmode, true, VOIDmode, insn);
2750 add_insn_mem_dependence (deps, true, insn, x);
2751 }
2752 break;
2753
2754 case UNSPEC_VOLATILE:
2755 flush_pending_lists (deps, insn, true, true);
2756 /* FALLTHRU */
2757
2758 case ASM_OPERANDS:
2759 case ASM_INPUT:
2760 {
2761 /* Traditional and volatile asm instructions must be considered to use
2762 and clobber all hard registers, all pseudo-registers and all of
2763 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
2764
2765 Consider for instance a volatile asm that changes the fpu rounding
2766 mode. An insn should not be moved across this even if it only uses
2767 pseudo-regs because it might give an incorrectly rounded result. */
2768 if ((code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2769 && !DEBUG_INSN_P (insn))
2770 reg_pending_barrier = TRUE_BARRIER;
2771
2772 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
2773 We cannot just fall through here since then we would be confused
2774 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2775 traditional asms unlike their normal usage. */
2776
2777 if (code == ASM_OPERANDS)
2778 {
2779 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2780 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
2781
2782 if (cslr_p && sched_deps_info->finish_rhs)
2783 sched_deps_info->finish_rhs ();
2784
2785 return;
2786 }
2787 break;
2788 }
2789
2790 case PRE_DEC:
2791 case POST_DEC:
2792 case PRE_INC:
2793 case POST_INC:
2794 /* These both read and modify the result. We must handle them as writes
2795 to get proper dependencies for following instructions. We must handle
2796 them as reads to get proper dependencies from this to previous
2797 instructions. Thus we need to pass them to both sched_analyze_1
2798 and sched_analyze_2. We must call sched_analyze_2 first in order
2799 to get the proper antecedent for the read. */
2800 sched_analyze_2 (deps, XEXP (x, 0), insn);
2801 sched_analyze_1 (deps, x, insn);
2802
2803 if (cslr_p && sched_deps_info->finish_rhs)
2804 sched_deps_info->finish_rhs ();
2805
2806 return;
2807
2808 case POST_MODIFY:
2809 case PRE_MODIFY:
2810 /* op0 = op0 + op1 */
2811 sched_analyze_2 (deps, XEXP (x, 0), insn);
2812 sched_analyze_2 (deps, XEXP (x, 1), insn);
2813 sched_analyze_1 (deps, x, insn);
2814
2815 if (cslr_p && sched_deps_info->finish_rhs)
2816 sched_deps_info->finish_rhs ();
2817
2818 return;
2819
2820 default:
2821 break;
2822 }
2823
2824 /* Other cases: walk the insn. */
2825 fmt = GET_RTX_FORMAT (code);
2826 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2827 {
2828 if (fmt[i] == 'e')
2829 sched_analyze_2 (deps, XEXP (x, i), insn);
2830 else if (fmt[i] == 'E')
2831 for (j = 0; j < XVECLEN (x, i); j++)
2832 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2833 }
2834
2835 if (cslr_p && sched_deps_info->finish_rhs)
2836 sched_deps_info->finish_rhs ();
2837 }
2838
2839 /* Try to group two fusible insns together to prevent scheduler
2840 from scheduling them apart. */
2841
2842 static void
2843 sched_macro_fuse_insns (rtx_insn *insn)
2844 {
2845 rtx_insn *prev;
2846 /* No target hook would return true for debug insn as any of the
2847 hook operand, and with very large sequences of only debug insns
2848 where on each we call sched_macro_fuse_insns it has quadratic
2849 compile time complexity. */
2850 if (DEBUG_INSN_P (insn))
2851 return;
2852 prev = prev_nonnote_nondebug_insn (insn);
2853 if (!prev)
2854 return;
2855
2856 if (any_condjump_p (insn))
2857 {
2858 unsigned int condreg1, condreg2;
2859 rtx cc_reg_1;
2860 targetm.fixed_condition_code_regs (&condreg1, &condreg2);
2861 cc_reg_1 = gen_rtx_REG (CCmode, condreg1);
2862 if (reg_referenced_p (cc_reg_1, PATTERN (insn))
2863 && modified_in_p (cc_reg_1, prev))
2864 {
2865 if (targetm.sched.macro_fusion_pair_p (prev, insn))
2866 SCHED_GROUP_P (insn) = 1;
2867 return;
2868 }
2869 }
2870
2871 if (single_set (insn) && single_set (prev))
2872 {
2873 if (targetm.sched.macro_fusion_pair_p (prev, insn))
2874 SCHED_GROUP_P (insn) = 1;
2875 }
2876 }
2877
2878 /* Get the implicit reg pending clobbers for INSN and save them in TEMP. */
2879 void
2880 get_implicit_reg_pending_clobbers (HARD_REG_SET *temp, rtx_insn *insn)
2881 {
2882 extract_insn (insn);
2883 preprocess_constraints (insn);
2884 alternative_mask preferred = get_preferred_alternatives (insn);
2885 ira_implicitly_set_insn_hard_regs (temp, preferred);
2886 AND_COMPL_HARD_REG_SET (*temp, ira_no_alloc_regs);
2887 }
2888
2889 /* Analyze an INSN with pattern X to find all dependencies. */
2890 static void
2891 sched_analyze_insn (struct deps_desc *deps, rtx x, rtx_insn *insn)
2892 {
2893 RTX_CODE code = GET_CODE (x);
2894 rtx link;
2895 unsigned i;
2896 reg_set_iterator rsi;
2897
2898 if (! reload_completed)
2899 {
2900 HARD_REG_SET temp;
2901 get_implicit_reg_pending_clobbers (&temp, insn);
2902 IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
2903 }
2904
2905 can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2906 && code == SET);
2907
2908 /* Group compare and branch insns for macro-fusion. */
2909 if (!deps->readonly
2910 && targetm.sched.macro_fusion_p
2911 && targetm.sched.macro_fusion_p ())
2912 sched_macro_fuse_insns (insn);
2913
2914 if (may_trap_p (x))
2915 /* Avoid moving trapping instructions across function calls that might
2916 not always return. */
2917 add_dependence_list (insn, deps->last_function_call_may_noreturn,
2918 1, REG_DEP_ANTI, true);
2919
2920 /* We must avoid creating a situation in which two successors of the
2921 current block have different unwind info after scheduling. If at any
2922 point the two paths re-join this leads to incorrect unwind info. */
2923 /* ??? There are certain situations involving a forced frame pointer in
2924 which, with extra effort, we could fix up the unwind info at a later
2925 CFG join. However, it seems better to notice these cases earlier
2926 during prologue generation and avoid marking the frame pointer setup
2927 as frame-related at all. */
2928 if (RTX_FRAME_RELATED_P (insn))
2929 {
2930 /* Make sure prologue insn is scheduled before next jump. */
2931 deps->sched_before_next_jump
2932 = alloc_INSN_LIST (insn, deps->sched_before_next_jump);
2933
2934 /* Make sure epilogue insn is scheduled after preceding jumps. */
2935 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2936 REG_DEP_ANTI, true);
2937 add_dependence_list (insn, deps->pending_jump_insns, 1, REG_DEP_ANTI,
2938 true);
2939 }
2940
2941 if (code == COND_EXEC)
2942 {
2943 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2944
2945 /* ??? Should be recording conditions so we reduce the number of
2946 false dependencies. */
2947 x = COND_EXEC_CODE (x);
2948 code = GET_CODE (x);
2949 }
2950 if (code == SET || code == CLOBBER)
2951 {
2952 sched_analyze_1 (deps, x, insn);
2953
2954 /* Bare clobber insns are used for letting life analysis, reg-stack
2955 and others know that a value is dead. Depend on the last call
2956 instruction so that reg-stack won't get confused. */
2957 if (code == CLOBBER)
2958 add_dependence_list (insn, deps->last_function_call, 1,
2959 REG_DEP_OUTPUT, true);
2960 }
2961 else if (code == PARALLEL)
2962 {
2963 for (i = XVECLEN (x, 0); i--;)
2964 {
2965 rtx sub = XVECEXP (x, 0, i);
2966 code = GET_CODE (sub);
2967
2968 if (code == COND_EXEC)
2969 {
2970 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2971 sub = COND_EXEC_CODE (sub);
2972 code = GET_CODE (sub);
2973 }
2974 else if (code == SET || code == CLOBBER || code == CLOBBER_HIGH)
2975 sched_analyze_1 (deps, sub, insn);
2976 else
2977 sched_analyze_2 (deps, sub, insn);
2978 }
2979 }
2980 else
2981 sched_analyze_2 (deps, x, insn);
2982
2983 /* Mark registers CLOBBERED or used by called function. */
2984 if (CALL_P (insn))
2985 {
2986 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2987 {
2988 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2989 sched_analyze_1 (deps, XEXP (link, 0), insn);
2990 else if (GET_CODE (XEXP (link, 0)) == CLOBBER_HIGH)
2991 /* We could support CLOBBER_HIGH and treat it in the same way as
2992 HARD_REGNO_CALL_PART_CLOBBERED, but no port needs that yet. */
2993 gcc_unreachable ();
2994 else if (GET_CODE (XEXP (link, 0)) != SET)
2995 sched_analyze_2 (deps, XEXP (link, 0), insn);
2996 }
2997 /* Don't schedule anything after a tail call, tail call needs
2998 to use at least all call-saved registers. */
2999 if (SIBLING_CALL_P (insn))
3000 reg_pending_barrier = TRUE_BARRIER;
3001 else if (find_reg_note (insn, REG_SETJMP, NULL))
3002 reg_pending_barrier = MOVE_BARRIER;
3003 }
3004
3005 if (JUMP_P (insn))
3006 {
3007 rtx_insn *next = next_nonnote_nondebug_insn (insn);
3008 /* ??? For tablejumps, the barrier may appear not immediately after
3009 the jump, but after a label and a jump_table_data insn. */
3010 if (next && LABEL_P (next) && NEXT_INSN (next)
3011 && JUMP_TABLE_DATA_P (NEXT_INSN (next)))
3012 next = NEXT_INSN (NEXT_INSN (next));
3013 if (next && BARRIER_P (next))
3014 reg_pending_barrier = MOVE_BARRIER;
3015 else
3016 {
3017 rtx_insn_list *pending;
3018 rtx_expr_list *pending_mem;
3019
3020 if (sched_deps_info->compute_jump_reg_dependencies)
3021 {
3022 (*sched_deps_info->compute_jump_reg_dependencies)
3023 (insn, reg_pending_control_uses);
3024
3025 /* Make latency of jump equal to 0 by using anti-dependence. */
3026 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3027 {
3028 struct deps_reg *reg_last = &deps->reg_last[i];
3029 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI,
3030 false);
3031 add_dependence_list (insn, reg_last->implicit_sets,
3032 0, REG_DEP_ANTI, false);
3033 add_dependence_list (insn, reg_last->clobbers, 0,
3034 REG_DEP_ANTI, false);
3035 }
3036 }
3037
3038 /* All memory writes and volatile reads must happen before the
3039 jump. Non-volatile reads must happen before the jump iff
3040 the result is needed by the above register used mask. */
3041
3042 pending = deps->pending_write_insns;
3043 pending_mem = deps->pending_write_mems;
3044 while (pending)
3045 {
3046 if (! sched_insns_conditions_mutex_p (insn, pending->insn ()))
3047 add_dependence (insn, pending->insn (),
3048 REG_DEP_OUTPUT);
3049 pending = pending->next ();
3050 pending_mem = pending_mem->next ();
3051 }
3052
3053 pending = deps->pending_read_insns;
3054 pending_mem = deps->pending_read_mems;
3055 while (pending)
3056 {
3057 if (MEM_VOLATILE_P (pending_mem->element ())
3058 && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
3059 add_dependence (insn, pending->insn (),
3060 REG_DEP_OUTPUT);
3061 pending = pending->next ();
3062 pending_mem = pending_mem->next ();
3063 }
3064
3065 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
3066 REG_DEP_ANTI, true);
3067 add_dependence_list (insn, deps->pending_jump_insns, 1,
3068 REG_DEP_ANTI, true);
3069 }
3070 }
3071
3072 /* If this instruction can throw an exception, then moving it changes
3073 where block boundaries fall. This is mighty confusing elsewhere.
3074 Therefore, prevent such an instruction from being moved. Same for
3075 non-jump instructions that define block boundaries.
3076 ??? Unclear whether this is still necessary in EBB mode. If not,
3077 add_branch_dependences should be adjusted for RGN mode instead. */
3078 if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
3079 || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
3080 reg_pending_barrier = MOVE_BARRIER;
3081
3082 if (sched_pressure != SCHED_PRESSURE_NONE)
3083 {
3084 setup_insn_reg_uses (deps, insn);
3085 init_insn_reg_pressure_info (insn);
3086 }
3087
3088 /* Add register dependencies for insn. */
3089 if (DEBUG_INSN_P (insn))
3090 {
3091 rtx_insn *prev = deps->last_debug_insn;
3092 rtx_insn_list *u;
3093
3094 if (!deps->readonly)
3095 deps->last_debug_insn = insn;
3096
3097 if (prev)
3098 add_dependence (insn, prev, REG_DEP_ANTI);
3099
3100 add_dependence_list (insn, deps->last_function_call, 1,
3101 REG_DEP_ANTI, false);
3102
3103 if (!sel_sched_p ())
3104 for (u = deps->last_pending_memory_flush; u; u = u->next ())
3105 add_dependence (insn, u->insn (), REG_DEP_ANTI);
3106
3107 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3108 {
3109 struct deps_reg *reg_last = &deps->reg_last[i];
3110 add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI, false);
3111 /* There's no point in making REG_DEP_CONTROL dependencies for
3112 debug insns. */
3113 add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI,
3114 false);
3115
3116 if (!deps->readonly)
3117 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3118 }
3119 CLEAR_REG_SET (reg_pending_uses);
3120
3121 /* Quite often, a debug insn will refer to stuff in the
3122 previous instruction, but the reason we want this
3123 dependency here is to make sure the scheduler doesn't
3124 gratuitously move a debug insn ahead. This could dirty
3125 DF flags and cause additional analysis that wouldn't have
3126 occurred in compilation without debug insns, and such
3127 additional analysis can modify the generated code. */
3128 prev = PREV_INSN (insn);
3129
3130 if (prev && NONDEBUG_INSN_P (prev))
3131 add_dependence (insn, prev, REG_DEP_ANTI);
3132 }
3133 else
3134 {
3135 regset_head set_or_clobbered;
3136
3137 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3138 {
3139 struct deps_reg *reg_last = &deps->reg_last[i];
3140 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3141 add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI,
3142 false);
3143 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3144 false);
3145
3146 if (!deps->readonly)
3147 {
3148 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3149 reg_last->uses_length++;
3150 }
3151 }
3152
3153 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3154 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
3155 {
3156 struct deps_reg *reg_last = &deps->reg_last[i];
3157 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3158 add_dependence_list (insn, reg_last->implicit_sets, 0,
3159 REG_DEP_ANTI, false);
3160 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3161 false);
3162
3163 if (!deps->readonly)
3164 {
3165 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3166 reg_last->uses_length++;
3167 }
3168 }
3169
3170 if (targetm.sched.exposed_pipeline)
3171 {
3172 INIT_REG_SET (&set_or_clobbered);
3173 bitmap_ior (&set_or_clobbered, reg_pending_clobbers,
3174 reg_pending_sets);
3175 EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered, 0, i, rsi)
3176 {
3177 struct deps_reg *reg_last = &deps->reg_last[i];
3178 rtx list;
3179 for (list = reg_last->uses; list; list = XEXP (list, 1))
3180 {
3181 rtx other = XEXP (list, 0);
3182 if (INSN_CACHED_COND (other) != const_true_rtx
3183 && refers_to_regno_p (i, INSN_CACHED_COND (other)))
3184 INSN_CACHED_COND (other) = const_true_rtx;
3185 }
3186 }
3187 }
3188
3189 /* If the current insn is conditional, we can't free any
3190 of the lists. */
3191 if (sched_has_condition_p (insn))
3192 {
3193 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3194 {
3195 struct deps_reg *reg_last = &deps->reg_last[i];
3196 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3197 false);
3198 add_dependence_list (insn, reg_last->implicit_sets, 0,
3199 REG_DEP_ANTI, false);
3200 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3201 false);
3202 add_dependence_list (insn, reg_last->control_uses, 0,
3203 REG_DEP_CONTROL, false);
3204
3205 if (!deps->readonly)
3206 {
3207 reg_last->clobbers
3208 = alloc_INSN_LIST (insn, reg_last->clobbers);
3209 reg_last->clobbers_length++;
3210 }
3211 }
3212 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3213 {
3214 struct deps_reg *reg_last = &deps->reg_last[i];
3215 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3216 false);
3217 add_dependence_list (insn, reg_last->implicit_sets, 0,
3218 REG_DEP_ANTI, false);
3219 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT,
3220 false);
3221 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3222 false);
3223 add_dependence_list (insn, reg_last->control_uses, 0,
3224 REG_DEP_CONTROL, false);
3225
3226 if (!deps->readonly)
3227 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3228 }
3229 }
3230 else
3231 {
3232 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3233 {
3234 struct deps_reg *reg_last = &deps->reg_last[i];
3235 if (reg_last->uses_length >= MAX_PENDING_LIST_LENGTH
3236 || reg_last->clobbers_length >= MAX_PENDING_LIST_LENGTH)
3237 {
3238 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3239 REG_DEP_OUTPUT, false);
3240 add_dependence_list_and_free (deps, insn,
3241 &reg_last->implicit_sets, 0,
3242 REG_DEP_ANTI, false);
3243 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3244 REG_DEP_ANTI, false);
3245 add_dependence_list_and_free (deps, insn,
3246 &reg_last->control_uses, 0,
3247 REG_DEP_ANTI, false);
3248 add_dependence_list_and_free (deps, insn,
3249 &reg_last->clobbers, 0,
3250 REG_DEP_OUTPUT, false);
3251
3252 if (!deps->readonly)
3253 {
3254 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3255 reg_last->clobbers_length = 0;
3256 reg_last->uses_length = 0;
3257 }
3258 }
3259 else
3260 {
3261 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3262 false);
3263 add_dependence_list (insn, reg_last->implicit_sets, 0,
3264 REG_DEP_ANTI, false);
3265 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3266 false);
3267 add_dependence_list (insn, reg_last->control_uses, 0,
3268 REG_DEP_CONTROL, false);
3269 }
3270
3271 if (!deps->readonly)
3272 {
3273 reg_last->clobbers_length++;
3274 reg_last->clobbers
3275 = alloc_INSN_LIST (insn, reg_last->clobbers);
3276 }
3277 }
3278 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3279 {
3280 struct deps_reg *reg_last = &deps->reg_last[i];
3281
3282 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3283 REG_DEP_OUTPUT, false);
3284 add_dependence_list_and_free (deps, insn,
3285 &reg_last->implicit_sets,
3286 0, REG_DEP_ANTI, false);
3287 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3288 REG_DEP_OUTPUT, false);
3289 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3290 REG_DEP_ANTI, false);
3291 add_dependence_list (insn, reg_last->control_uses, 0,
3292 REG_DEP_CONTROL, false);
3293
3294 if (!deps->readonly)
3295 {
3296 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3297 reg_last->uses_length = 0;
3298 reg_last->clobbers_length = 0;
3299 }
3300 }
3301 }
3302 if (!deps->readonly)
3303 {
3304 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3305 {
3306 struct deps_reg *reg_last = &deps->reg_last[i];
3307 reg_last->control_uses
3308 = alloc_INSN_LIST (insn, reg_last->control_uses);
3309 }
3310 }
3311 }
3312
3313 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3314 if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3315 {
3316 struct deps_reg *reg_last = &deps->reg_last[i];
3317 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI, false);
3318 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI, false);
3319 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI, false);
3320 add_dependence_list (insn, reg_last->control_uses, 0, REG_DEP_ANTI,
3321 false);
3322
3323 if (!deps->readonly)
3324 reg_last->implicit_sets
3325 = alloc_INSN_LIST (insn, reg_last->implicit_sets);
3326 }
3327
3328 if (!deps->readonly)
3329 {
3330 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
3331 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
3332 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
3333 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3334 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
3335 || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3336 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3337
3338 /* Set up the pending barrier found. */
3339 deps->last_reg_pending_barrier = reg_pending_barrier;
3340 }
3341
3342 CLEAR_REG_SET (reg_pending_uses);
3343 CLEAR_REG_SET (reg_pending_clobbers);
3344 CLEAR_REG_SET (reg_pending_sets);
3345 CLEAR_REG_SET (reg_pending_control_uses);
3346 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3347 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
3348
3349 /* Add dependencies if a scheduling barrier was found. */
3350 if (reg_pending_barrier)
3351 {
3352 /* In the case of barrier the most added dependencies are not
3353 real, so we use anti-dependence here. */
3354 if (sched_has_condition_p (insn))
3355 {
3356 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3357 {
3358 struct deps_reg *reg_last = &deps->reg_last[i];
3359 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3360 true);
3361 add_dependence_list (insn, reg_last->sets, 0,
3362 reg_pending_barrier == TRUE_BARRIER
3363 ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3364 add_dependence_list (insn, reg_last->implicit_sets, 0,
3365 REG_DEP_ANTI, true);
3366 add_dependence_list (insn, reg_last->clobbers, 0,
3367 reg_pending_barrier == TRUE_BARRIER
3368 ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3369 }
3370 }
3371 else
3372 {
3373 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3374 {
3375 struct deps_reg *reg_last = &deps->reg_last[i];
3376 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3377 REG_DEP_ANTI, true);
3378 add_dependence_list_and_free (deps, insn,
3379 &reg_last->control_uses, 0,
3380 REG_DEP_CONTROL, true);
3381 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3382 reg_pending_barrier == TRUE_BARRIER
3383 ? REG_DEP_TRUE : REG_DEP_ANTI,
3384 true);
3385 add_dependence_list_and_free (deps, insn,
3386 &reg_last->implicit_sets, 0,
3387 REG_DEP_ANTI, true);
3388 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3389 reg_pending_barrier == TRUE_BARRIER
3390 ? REG_DEP_TRUE : REG_DEP_ANTI,
3391 true);
3392
3393 if (!deps->readonly)
3394 {
3395 reg_last->uses_length = 0;
3396 reg_last->clobbers_length = 0;
3397 }
3398 }
3399 }
3400
3401 if (!deps->readonly)
3402 for (i = 0; i < (unsigned)deps->max_reg; i++)
3403 {
3404 struct deps_reg *reg_last = &deps->reg_last[i];
3405 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3406 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3407 }
3408
3409 /* Don't flush pending lists on speculative checks for
3410 selective scheduling. */
3411 if (!sel_sched_p () || !sel_insn_is_speculation_check (insn))
3412 flush_pending_lists (deps, insn, true, true);
3413
3414 reg_pending_barrier = NOT_A_BARRIER;
3415 }
3416
3417 /* If a post-call group is still open, see if it should remain so.
3418 This insn must be a simple move of a hard reg to a pseudo or
3419 vice-versa.
3420
3421 We must avoid moving these insns for correctness on targets
3422 with small register classes, and for special registers like
3423 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
3424 hard regs for all targets. */
3425
3426 if (deps->in_post_call_group_p)
3427 {
3428 rtx tmp, set = single_set (insn);
3429 int src_regno, dest_regno;
3430
3431 if (set == NULL)
3432 {
3433 if (DEBUG_INSN_P (insn))
3434 /* We don't want to mark debug insns as part of the same
3435 sched group. We know they really aren't, but if we use
3436 debug insns to tell that a call group is over, we'll
3437 get different code if debug insns are not there and
3438 instructions that follow seem like they should be part
3439 of the call group.
3440
3441 Also, if we did, chain_to_prev_insn would move the
3442 deps of the debug insn to the call insn, modifying
3443 non-debug post-dependency counts of the debug insn
3444 dependencies and otherwise messing with the scheduling
3445 order.
3446
3447 Instead, let such debug insns be scheduled freely, but
3448 keep the call group open in case there are insns that
3449 should be part of it afterwards. Since we grant debug
3450 insns higher priority than even sched group insns, it
3451 will all turn out all right. */
3452 goto debug_dont_end_call_group;
3453 else
3454 goto end_call_group;
3455 }
3456
3457 tmp = SET_DEST (set);
3458 if (GET_CODE (tmp) == SUBREG)
3459 tmp = SUBREG_REG (tmp);
3460 if (REG_P (tmp))
3461 dest_regno = REGNO (tmp);
3462 else
3463 goto end_call_group;
3464
3465 tmp = SET_SRC (set);
3466 if (GET_CODE (tmp) == SUBREG)
3467 tmp = SUBREG_REG (tmp);
3468 if ((GET_CODE (tmp) == PLUS
3469 || GET_CODE (tmp) == MINUS)
3470 && REG_P (XEXP (tmp, 0))
3471 && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3472 && dest_regno == STACK_POINTER_REGNUM)
3473 src_regno = STACK_POINTER_REGNUM;
3474 else if (REG_P (tmp))
3475 src_regno = REGNO (tmp);
3476 else
3477 goto end_call_group;
3478
3479 if (src_regno < FIRST_PSEUDO_REGISTER
3480 || dest_regno < FIRST_PSEUDO_REGISTER)
3481 {
3482 if (!deps->readonly
3483 && deps->in_post_call_group_p == post_call_initial)
3484 deps->in_post_call_group_p = post_call;
3485
3486 if (!sel_sched_p () || sched_emulate_haifa_p)
3487 {
3488 SCHED_GROUP_P (insn) = 1;
3489 CANT_MOVE (insn) = 1;
3490 }
3491 }
3492 else
3493 {
3494 end_call_group:
3495 if (!deps->readonly)
3496 deps->in_post_call_group_p = not_post_call;
3497 }
3498 }
3499
3500 debug_dont_end_call_group:
3501 if ((current_sched_info->flags & DO_SPECULATION)
3502 && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
3503 /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3504 be speculated. */
3505 {
3506 if (sel_sched_p ())
3507 sel_mark_hard_insn (insn);
3508 else
3509 {
3510 sd_iterator_def sd_it;
3511 dep_t dep;
3512
3513 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3514 sd_iterator_cond (&sd_it, &dep);)
3515 change_spec_dep_to_hard (sd_it);
3516 }
3517 }
3518
3519 /* We do not yet have code to adjust REG_ARGS_SIZE, therefore we must
3520 honor their original ordering. */
3521 if (find_reg_note (insn, REG_ARGS_SIZE, NULL))
3522 {
3523 if (deps->last_args_size)
3524 add_dependence (insn, deps->last_args_size, REG_DEP_OUTPUT);
3525 if (!deps->readonly)
3526 deps->last_args_size = insn;
3527 }
3528
3529 /* We must not mix prologue and epilogue insns. See PR78029. */
3530 if (prologue_contains (insn))
3531 {
3532 add_dependence_list (insn, deps->last_epilogue, true, REG_DEP_ANTI, true);
3533 if (!deps->readonly)
3534 {
3535 if (deps->last_logue_was_epilogue)
3536 free_INSN_LIST_list (&deps->last_prologue);
3537 deps->last_prologue = alloc_INSN_LIST (insn, deps->last_prologue);
3538 deps->last_logue_was_epilogue = false;
3539 }
3540 }
3541
3542 if (epilogue_contains (insn))
3543 {
3544 add_dependence_list (insn, deps->last_prologue, true, REG_DEP_ANTI, true);
3545 if (!deps->readonly)
3546 {
3547 if (!deps->last_logue_was_epilogue)
3548 free_INSN_LIST_list (&deps->last_epilogue);
3549 deps->last_epilogue = alloc_INSN_LIST (insn, deps->last_epilogue);
3550 deps->last_logue_was_epilogue = true;
3551 }
3552 }
3553 }
3554
3555 /* Return TRUE if INSN might not always return normally (e.g. call exit,
3556 longjmp, loop forever, ...). */
3557 /* FIXME: Why can't this function just use flags_from_decl_or_type and
3558 test for ECF_NORETURN? */
3559 static bool
3560 call_may_noreturn_p (rtx_insn *insn)
3561 {
3562 rtx call;
3563
3564 /* const or pure calls that aren't looping will always return. */
3565 if (RTL_CONST_OR_PURE_CALL_P (insn)
3566 && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3567 return false;
3568
3569 call = get_call_rtx_from (insn);
3570 if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
3571 {
3572 rtx symbol = XEXP (XEXP (call, 0), 0);
3573 if (SYMBOL_REF_DECL (symbol)
3574 && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3575 {
3576 if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3577 == BUILT_IN_NORMAL)
3578 switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3579 {
3580 case BUILT_IN_BCMP:
3581 case BUILT_IN_BCOPY:
3582 case BUILT_IN_BZERO:
3583 case BUILT_IN_INDEX:
3584 case BUILT_IN_MEMCHR:
3585 case BUILT_IN_MEMCMP:
3586 case BUILT_IN_MEMCPY:
3587 case BUILT_IN_MEMMOVE:
3588 case BUILT_IN_MEMPCPY:
3589 case BUILT_IN_MEMSET:
3590 case BUILT_IN_RINDEX:
3591 case BUILT_IN_STPCPY:
3592 case BUILT_IN_STPNCPY:
3593 case BUILT_IN_STRCAT:
3594 case BUILT_IN_STRCHR:
3595 case BUILT_IN_STRCMP:
3596 case BUILT_IN_STRCPY:
3597 case BUILT_IN_STRCSPN:
3598 case BUILT_IN_STRLEN:
3599 case BUILT_IN_STRNCAT:
3600 case BUILT_IN_STRNCMP:
3601 case BUILT_IN_STRNCPY:
3602 case BUILT_IN_STRPBRK:
3603 case BUILT_IN_STRRCHR:
3604 case BUILT_IN_STRSPN:
3605 case BUILT_IN_STRSTR:
3606 /* Assume certain string/memory builtins always return. */
3607 return false;
3608 default:
3609 break;
3610 }
3611 }
3612 }
3613
3614 /* For all other calls assume that they might not always return. */
3615 return true;
3616 }
3617
3618 /* Return true if INSN should be made dependent on the previous instruction
3619 group, and if all INSN's dependencies should be moved to the first
3620 instruction of that group. */
3621
3622 static bool
3623 chain_to_prev_insn_p (rtx_insn *insn)
3624 {
3625 /* INSN forms a group with the previous instruction. */
3626 if (SCHED_GROUP_P (insn))
3627 return true;
3628
3629 /* If the previous instruction clobbers a register R and this one sets
3630 part of R, the clobber was added specifically to help us track the
3631 liveness of R. There's no point scheduling the clobber and leaving
3632 INSN behind, especially if we move the clobber to another block. */
3633 rtx_insn *prev = prev_nonnote_nondebug_insn (insn);
3634 if (prev
3635 && INSN_P (prev)
3636 && BLOCK_FOR_INSN (prev) == BLOCK_FOR_INSN (insn)
3637 && GET_CODE (PATTERN (prev)) == CLOBBER)
3638 {
3639 rtx x = XEXP (PATTERN (prev), 0);
3640 if (set_of (x, insn))
3641 return true;
3642 }
3643
3644 return false;
3645 }
3646
3647 /* Analyze INSN with DEPS as a context. */
3648 void
3649 deps_analyze_insn (struct deps_desc *deps, rtx_insn *insn)
3650 {
3651 if (sched_deps_info->start_insn)
3652 sched_deps_info->start_insn (insn);
3653
3654 /* Record the condition for this insn. */
3655 if (NONDEBUG_INSN_P (insn))
3656 {
3657 rtx t;
3658 sched_get_condition_with_rev (insn, NULL);
3659 t = INSN_CACHED_COND (insn);
3660 INSN_COND_DEPS (insn) = NULL;
3661 if (reload_completed
3662 && (current_sched_info->flags & DO_PREDICATION)
3663 && COMPARISON_P (t)
3664 && REG_P (XEXP (t, 0))
3665 && CONSTANT_P (XEXP (t, 1)))
3666 {
3667 unsigned int regno;
3668 int nregs;
3669 rtx_insn_list *cond_deps = NULL;
3670 t = XEXP (t, 0);
3671 regno = REGNO (t);
3672 nregs = REG_NREGS (t);
3673 while (nregs-- > 0)
3674 {
3675 struct deps_reg *reg_last = &deps->reg_last[regno + nregs];
3676 cond_deps = concat_INSN_LIST (reg_last->sets, cond_deps);
3677 cond_deps = concat_INSN_LIST (reg_last->clobbers, cond_deps);
3678 cond_deps = concat_INSN_LIST (reg_last->implicit_sets, cond_deps);
3679 }
3680 INSN_COND_DEPS (insn) = cond_deps;
3681 }
3682 }
3683
3684 if (JUMP_P (insn))
3685 {
3686 /* Make each JUMP_INSN (but not a speculative check)
3687 a scheduling barrier for memory references. */
3688 if (!deps->readonly
3689 && !(sel_sched_p ()
3690 && sel_insn_is_speculation_check (insn)))
3691 {
3692 /* Keep the list a reasonable size. */
3693 if (deps->pending_flush_length++ >= MAX_PENDING_LIST_LENGTH)
3694 flush_pending_lists (deps, insn, true, true);
3695 else
3696 deps->pending_jump_insns
3697 = alloc_INSN_LIST (insn, deps->pending_jump_insns);
3698 }
3699
3700 /* For each insn which shouldn't cross a jump, add a dependence. */
3701 add_dependence_list_and_free (deps, insn,
3702 &deps->sched_before_next_jump, 1,
3703 REG_DEP_ANTI, true);
3704
3705 sched_analyze_insn (deps, PATTERN (insn), insn);
3706 }
3707 else if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn))
3708 {
3709 sched_analyze_insn (deps, PATTERN (insn), insn);
3710 }
3711 else if (CALL_P (insn))
3712 {
3713 int i;
3714
3715 CANT_MOVE (insn) = 1;
3716
3717 if (find_reg_note (insn, REG_SETJMP, NULL))
3718 {
3719 /* This is setjmp. Assume that all registers, not just
3720 hard registers, may be clobbered by this call. */
3721 reg_pending_barrier = MOVE_BARRIER;
3722 }
3723 else
3724 {
3725 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3726 /* A call may read and modify global register variables. */
3727 if (global_regs[i])
3728 {
3729 SET_REGNO_REG_SET (reg_pending_sets, i);
3730 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3731 }
3732 /* Other call-clobbered hard regs may be clobbered.
3733 Since we only have a choice between 'might be clobbered'
3734 and 'definitely not clobbered', we must include all
3735 partly call-clobbered registers here. */
3736 else if (targetm.hard_regno_call_part_clobbered (insn, i,
3737 reg_raw_mode[i])
3738 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
3739 SET_REGNO_REG_SET (reg_pending_clobbers, i);
3740 /* We don't know what set of fixed registers might be used
3741 by the function, but it is certain that the stack pointer
3742 is among them, but be conservative. */
3743 else if (fixed_regs[i])
3744 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3745 /* The frame pointer is normally not used by the function
3746 itself, but by the debugger. */
3747 /* ??? MIPS o32 is an exception. It uses the frame pointer
3748 in the macro expansion of jal but does not represent this
3749 fact in the call_insn rtl. */
3750 else if (i == FRAME_POINTER_REGNUM
3751 || (i == HARD_FRAME_POINTER_REGNUM
3752 && (! reload_completed || frame_pointer_needed)))
3753 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3754 }
3755
3756 /* For each insn which shouldn't cross a call, add a dependence
3757 between that insn and this call insn. */
3758 add_dependence_list_and_free (deps, insn,
3759 &deps->sched_before_next_call, 1,
3760 REG_DEP_ANTI, true);
3761
3762 sched_analyze_insn (deps, PATTERN (insn), insn);
3763
3764 /* If CALL would be in a sched group, then this will violate
3765 convention that sched group insns have dependencies only on the
3766 previous instruction.
3767
3768 Of course one can say: "Hey! What about head of the sched group?"
3769 And I will answer: "Basic principles (one dep per insn) are always
3770 the same." */
3771 gcc_assert (!SCHED_GROUP_P (insn));
3772
3773 /* In the absence of interprocedural alias analysis, we must flush
3774 all pending reads and writes, and start new dependencies starting
3775 from here. But only flush writes for constant calls (which may
3776 be passed a pointer to something we haven't written yet). */
3777 flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
3778
3779 if (!deps->readonly)
3780 {
3781 /* Remember the last function call for limiting lifetimes. */
3782 free_INSN_LIST_list (&deps->last_function_call);
3783 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
3784
3785 if (call_may_noreturn_p (insn))
3786 {
3787 /* Remember the last function call that might not always return
3788 normally for limiting moves of trapping insns. */
3789 free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3790 deps->last_function_call_may_noreturn
3791 = alloc_INSN_LIST (insn, NULL_RTX);
3792 }
3793
3794 /* Before reload, begin a post-call group, so as to keep the
3795 lifetimes of hard registers correct. */
3796 if (! reload_completed)
3797 deps->in_post_call_group_p = post_call;
3798 }
3799 }
3800
3801 if (sched_deps_info->use_cselib)
3802 cselib_process_insn (insn);
3803
3804 if (sched_deps_info->finish_insn)
3805 sched_deps_info->finish_insn ();
3806
3807 /* Fixup the dependencies in the sched group. */
3808 if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
3809 && chain_to_prev_insn_p (insn)
3810 && !sel_sched_p ())
3811 chain_to_prev_insn (insn);
3812 }
3813
3814 /* Initialize DEPS for the new block beginning with HEAD. */
3815 void
3816 deps_start_bb (struct deps_desc *deps, rtx_insn *head)
3817 {
3818 gcc_assert (!deps->readonly);
3819
3820 /* Before reload, if the previous block ended in a call, show that
3821 we are inside a post-call group, so as to keep the lifetimes of
3822 hard registers correct. */
3823 if (! reload_completed && !LABEL_P (head))
3824 {
3825 rtx_insn *insn = prev_nonnote_nondebug_insn (head);
3826
3827 if (insn && CALL_P (insn))
3828 deps->in_post_call_group_p = post_call_initial;
3829 }
3830 }
3831
3832 /* Analyze every insn between HEAD and TAIL inclusive, creating backward
3833 dependencies for each insn. */
3834 void
3835 sched_analyze (struct deps_desc *deps, rtx_insn *head, rtx_insn *tail)
3836 {
3837 rtx_insn *insn;
3838
3839 if (sched_deps_info->use_cselib)
3840 cselib_init (CSELIB_RECORD_MEMORY);
3841
3842 deps_start_bb (deps, head);
3843
3844 for (insn = head;; insn = NEXT_INSN (insn))
3845 {
3846
3847 if (INSN_P (insn))
3848 {
3849 /* And initialize deps_lists. */
3850 sd_init_insn (insn);
3851 /* Clean up SCHED_GROUP_P which may be set by last
3852 scheduler pass. */
3853 if (SCHED_GROUP_P (insn))
3854 SCHED_GROUP_P (insn) = 0;
3855 }
3856
3857 deps_analyze_insn (deps, insn);
3858
3859 if (insn == tail)
3860 {
3861 if (sched_deps_info->use_cselib)
3862 cselib_finish ();
3863 return;
3864 }
3865 }
3866 gcc_unreachable ();
3867 }
3868
3869 /* Helper for sched_free_deps ().
3870 Delete INSN's (RESOLVED_P) backward dependencies. */
3871 static void
3872 delete_dep_nodes_in_back_deps (rtx_insn *insn, bool resolved_p)
3873 {
3874 sd_iterator_def sd_it;
3875 dep_t dep;
3876 sd_list_types_def types;
3877
3878 if (resolved_p)
3879 types = SD_LIST_RES_BACK;
3880 else
3881 types = SD_LIST_BACK;
3882
3883 for (sd_it = sd_iterator_start (insn, types);
3884 sd_iterator_cond (&sd_it, &dep);)
3885 {
3886 dep_link_t link = *sd_it.linkp;
3887 dep_node_t node = DEP_LINK_NODE (link);
3888 deps_list_t back_list;
3889 deps_list_t forw_list;
3890
3891 get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
3892 remove_from_deps_list (link, back_list);
3893 delete_dep_node (node);
3894 }
3895 }
3896
3897 /* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3898 deps_lists. */
3899 void
3900 sched_free_deps (rtx_insn *head, rtx_insn *tail, bool resolved_p)
3901 {
3902 rtx_insn *insn;
3903 rtx_insn *next_tail = NEXT_INSN (tail);
3904
3905 /* We make two passes since some insns may be scheduled before their
3906 dependencies are resolved. */
3907 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3908 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3909 {
3910 /* Clear forward deps and leave the dep_nodes to the
3911 corresponding back_deps list. */
3912 if (resolved_p)
3913 clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3914 else
3915 clear_deps_list (INSN_FORW_DEPS (insn));
3916 }
3917 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3918 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3919 {
3920 /* Clear resolved back deps together with its dep_nodes. */
3921 delete_dep_nodes_in_back_deps (insn, resolved_p);
3922
3923 sd_finish_insn (insn);
3924 }
3925 }
3926 \f
3927 /* Initialize variables for region data dependence analysis.
3928 When LAZY_REG_LAST is true, do not allocate reg_last array
3929 of struct deps_desc immediately. */
3930
3931 void
3932 init_deps (struct deps_desc *deps, bool lazy_reg_last)
3933 {
3934 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3935
3936 deps->max_reg = max_reg;
3937 if (lazy_reg_last)
3938 deps->reg_last = NULL;
3939 else
3940 deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
3941 INIT_REG_SET (&deps->reg_last_in_use);
3942
3943 deps->pending_read_insns = 0;
3944 deps->pending_read_mems = 0;
3945 deps->pending_write_insns = 0;
3946 deps->pending_write_mems = 0;
3947 deps->pending_jump_insns = 0;
3948 deps->pending_read_list_length = 0;
3949 deps->pending_write_list_length = 0;
3950 deps->pending_flush_length = 0;
3951 deps->last_pending_memory_flush = 0;
3952 deps->last_function_call = 0;
3953 deps->last_function_call_may_noreturn = 0;
3954 deps->sched_before_next_call = 0;
3955 deps->sched_before_next_jump = 0;
3956 deps->in_post_call_group_p = not_post_call;
3957 deps->last_debug_insn = 0;
3958 deps->last_args_size = 0;
3959 deps->last_prologue = 0;
3960 deps->last_epilogue = 0;
3961 deps->last_logue_was_epilogue = false;
3962 deps->last_reg_pending_barrier = NOT_A_BARRIER;
3963 deps->readonly = 0;
3964 }
3965
3966 /* Init only reg_last field of DEPS, which was not allocated before as
3967 we inited DEPS lazily. */
3968 void
3969 init_deps_reg_last (struct deps_desc *deps)
3970 {
3971 gcc_assert (deps && deps->max_reg > 0);
3972 gcc_assert (deps->reg_last == NULL);
3973
3974 deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3975 }
3976
3977
3978 /* Free insn lists found in DEPS. */
3979
3980 void
3981 free_deps (struct deps_desc *deps)
3982 {
3983 unsigned i;
3984 reg_set_iterator rsi;
3985
3986 /* We set max_reg to 0 when this context was already freed. */
3987 if (deps->max_reg == 0)
3988 {
3989 gcc_assert (deps->reg_last == NULL);
3990 return;
3991 }
3992 deps->max_reg = 0;
3993
3994 free_INSN_LIST_list (&deps->pending_read_insns);
3995 free_EXPR_LIST_list (&deps->pending_read_mems);
3996 free_INSN_LIST_list (&deps->pending_write_insns);
3997 free_EXPR_LIST_list (&deps->pending_write_mems);
3998 free_INSN_LIST_list (&deps->last_pending_memory_flush);
3999
4000 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
4001 times. For a testcase with 42000 regs and 8000 small basic blocks,
4002 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
4003 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
4004 {
4005 struct deps_reg *reg_last = &deps->reg_last[i];
4006 if (reg_last->uses)
4007 free_INSN_LIST_list (&reg_last->uses);
4008 if (reg_last->sets)
4009 free_INSN_LIST_list (&reg_last->sets);
4010 if (reg_last->implicit_sets)
4011 free_INSN_LIST_list (&reg_last->implicit_sets);
4012 if (reg_last->control_uses)
4013 free_INSN_LIST_list (&reg_last->control_uses);
4014 if (reg_last->clobbers)
4015 free_INSN_LIST_list (&reg_last->clobbers);
4016 }
4017 CLEAR_REG_SET (&deps->reg_last_in_use);
4018
4019 /* As we initialize reg_last lazily, it is possible that we didn't allocate
4020 it at all. */
4021 free (deps->reg_last);
4022 deps->reg_last = NULL;
4023
4024 deps = NULL;
4025 }
4026
4027 /* Remove INSN from dependence contexts DEPS. */
4028 void
4029 remove_from_deps (struct deps_desc *deps, rtx_insn *insn)
4030 {
4031 int removed;
4032 unsigned i;
4033 reg_set_iterator rsi;
4034
4035 removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
4036 &deps->pending_read_mems);
4037 if (!DEBUG_INSN_P (insn))
4038 deps->pending_read_list_length -= removed;
4039 removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
4040 &deps->pending_write_mems);
4041 deps->pending_write_list_length -= removed;
4042
4043 removed = remove_from_dependence_list (insn, &deps->pending_jump_insns);
4044 deps->pending_flush_length -= removed;
4045 removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
4046 deps->pending_flush_length -= removed;
4047
4048 unsigned to_clear = -1U;
4049 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
4050 {
4051 if (to_clear != -1U)
4052 {
4053 CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, to_clear);
4054 to_clear = -1U;
4055 }
4056 struct deps_reg *reg_last = &deps->reg_last[i];
4057 if (reg_last->uses)
4058 remove_from_dependence_list (insn, &reg_last->uses);
4059 if (reg_last->sets)
4060 remove_from_dependence_list (insn, &reg_last->sets);
4061 if (reg_last->implicit_sets)
4062 remove_from_dependence_list (insn, &reg_last->implicit_sets);
4063 if (reg_last->clobbers)
4064 remove_from_dependence_list (insn, &reg_last->clobbers);
4065 if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
4066 && !reg_last->clobbers)
4067 to_clear = i;
4068 }
4069 if (to_clear != -1U)
4070 CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, to_clear);
4071
4072 if (CALL_P (insn))
4073 {
4074 remove_from_dependence_list (insn, &deps->last_function_call);
4075 remove_from_dependence_list (insn,
4076 &deps->last_function_call_may_noreturn);
4077 }
4078 remove_from_dependence_list (insn, &deps->sched_before_next_call);
4079 }
4080
4081 /* Init deps data vector. */
4082 static void
4083 init_deps_data_vector (void)
4084 {
4085 int reserve = (sched_max_luid + 1 - h_d_i_d.length ());
4086 if (reserve > 0 && ! h_d_i_d.space (reserve))
4087 h_d_i_d.safe_grow_cleared (3 * sched_max_luid / 2);
4088 }
4089
4090 /* If it is profitable to use them, initialize or extend (depending on
4091 GLOBAL_P) dependency data. */
4092 void
4093 sched_deps_init (bool global_p)
4094 {
4095 /* Average number of insns in the basic block.
4096 '+ 1' is used to make it nonzero. */
4097 int insns_in_block = sched_max_luid / n_basic_blocks_for_fn (cfun) + 1;
4098
4099 init_deps_data_vector ();
4100
4101 /* We use another caching mechanism for selective scheduling, so
4102 we don't use this one. */
4103 if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
4104 {
4105 /* ?!? We could save some memory by computing a per-region luid mapping
4106 which could reduce both the number of vectors in the cache and the
4107 size of each vector. Instead we just avoid the cache entirely unless
4108 the average number of instructions in a basic block is very high. See
4109 the comment before the declaration of true_dependency_cache for
4110 what we consider "very high". */
4111 cache_size = 0;
4112 extend_dependency_caches (sched_max_luid, true);
4113 }
4114
4115 if (global_p)
4116 {
4117 dl_pool = new object_allocator<_deps_list> ("deps_list");
4118 /* Allocate lists for one block at a time. */
4119 dn_pool = new object_allocator<_dep_node> ("dep_node");
4120 /* Allocate nodes for one block at a time. */
4121 }
4122 }
4123
4124
4125 /* Create or extend (depending on CREATE_P) dependency caches to
4126 size N. */
4127 void
4128 extend_dependency_caches (int n, bool create_p)
4129 {
4130 if (create_p || true_dependency_cache)
4131 {
4132 int i, luid = cache_size + n;
4133
4134 true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
4135 luid);
4136 output_dependency_cache = XRESIZEVEC (bitmap_head,
4137 output_dependency_cache, luid);
4138 anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
4139 luid);
4140 control_dependency_cache = XRESIZEVEC (bitmap_head, control_dependency_cache,
4141 luid);
4142
4143 if (current_sched_info->flags & DO_SPECULATION)
4144 spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
4145 luid);
4146
4147 for (i = cache_size; i < luid; i++)
4148 {
4149 bitmap_initialize (&true_dependency_cache[i], 0);
4150 bitmap_initialize (&output_dependency_cache[i], 0);
4151 bitmap_initialize (&anti_dependency_cache[i], 0);
4152 bitmap_initialize (&control_dependency_cache[i], 0);
4153
4154 if (current_sched_info->flags & DO_SPECULATION)
4155 bitmap_initialize (&spec_dependency_cache[i], 0);
4156 }
4157 cache_size = luid;
4158 }
4159 }
4160
4161 /* Finalize dependency information for the whole function. */
4162 void
4163 sched_deps_finish (void)
4164 {
4165 gcc_assert (deps_pools_are_empty_p ());
4166 delete dn_pool;
4167 delete dl_pool;
4168 dn_pool = NULL;
4169 dl_pool = NULL;
4170
4171 h_d_i_d.release ();
4172 cache_size = 0;
4173
4174 if (true_dependency_cache)
4175 {
4176 int i;
4177
4178 for (i = 0; i < cache_size; i++)
4179 {
4180 bitmap_clear (&true_dependency_cache[i]);
4181 bitmap_clear (&output_dependency_cache[i]);
4182 bitmap_clear (&anti_dependency_cache[i]);
4183 bitmap_clear (&control_dependency_cache[i]);
4184
4185 if (sched_deps_info->generate_spec_deps)
4186 bitmap_clear (&spec_dependency_cache[i]);
4187 }
4188 free (true_dependency_cache);
4189 true_dependency_cache = NULL;
4190 free (output_dependency_cache);
4191 output_dependency_cache = NULL;
4192 free (anti_dependency_cache);
4193 anti_dependency_cache = NULL;
4194 free (control_dependency_cache);
4195 control_dependency_cache = NULL;
4196
4197 if (sched_deps_info->generate_spec_deps)
4198 {
4199 free (spec_dependency_cache);
4200 spec_dependency_cache = NULL;
4201 }
4202
4203 }
4204 }
4205
4206 /* Initialize some global variables needed by the dependency analysis
4207 code. */
4208
4209 void
4210 init_deps_global (void)
4211 {
4212 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
4213 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
4214 reg_pending_sets = ALLOC_REG_SET (&reg_obstack);
4215 reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack);
4216 reg_pending_uses = ALLOC_REG_SET (&reg_obstack);
4217 reg_pending_control_uses = ALLOC_REG_SET (&reg_obstack);
4218 reg_pending_barrier = NOT_A_BARRIER;
4219
4220 if (!sel_sched_p () || sched_emulate_haifa_p)
4221 {
4222 sched_deps_info->start_insn = haifa_start_insn;
4223 sched_deps_info->finish_insn = haifa_finish_insn;
4224
4225 sched_deps_info->note_reg_set = haifa_note_reg_set;
4226 sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
4227 sched_deps_info->note_reg_use = haifa_note_reg_use;
4228
4229 sched_deps_info->note_mem_dep = haifa_note_mem_dep;
4230 sched_deps_info->note_dep = haifa_note_dep;
4231 }
4232 }
4233
4234 /* Free everything used by the dependency analysis code. */
4235
4236 void
4237 finish_deps_global (void)
4238 {
4239 FREE_REG_SET (reg_pending_sets);
4240 FREE_REG_SET (reg_pending_clobbers);
4241 FREE_REG_SET (reg_pending_uses);
4242 FREE_REG_SET (reg_pending_control_uses);
4243 }
4244
4245 /* Estimate the weakness of dependence between MEM1 and MEM2. */
4246 dw_t
4247 estimate_dep_weak (rtx mem1, rtx mem2)
4248 {
4249 if (mem1 == mem2)
4250 /* MEMs are the same - don't speculate. */
4251 return MIN_DEP_WEAK;
4252
4253 rtx r1 = XEXP (mem1, 0);
4254 rtx r2 = XEXP (mem2, 0);
4255
4256 if (sched_deps_info->use_cselib)
4257 {
4258 /* We cannot call rtx_equal_for_cselib_p because the VALUEs might be
4259 dangling at this point, since we never preserve them. Instead we
4260 canonicalize manually to get stable VALUEs out of hashing. */
4261 if (GET_CODE (r1) == VALUE && CSELIB_VAL_PTR (r1))
4262 r1 = canonical_cselib_val (CSELIB_VAL_PTR (r1))->val_rtx;
4263 if (GET_CODE (r2) == VALUE && CSELIB_VAL_PTR (r2))
4264 r2 = canonical_cselib_val (CSELIB_VAL_PTR (r2))->val_rtx;
4265 }
4266
4267 if (r1 == r2
4268 || (REG_P (r1) && REG_P (r2) && REGNO (r1) == REGNO (r2)))
4269 /* Again, MEMs are the same. */
4270 return MIN_DEP_WEAK;
4271 else if ((REG_P (r1) && !REG_P (r2)) || (!REG_P (r1) && REG_P (r2)))
4272 /* Different addressing modes - reason to be more speculative,
4273 than usual. */
4274 return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
4275 else
4276 /* We can't say anything about the dependence. */
4277 return UNCERTAIN_DEP_WEAK;
4278 }
4279
4280 /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
4281 This function can handle same INSN and ELEM (INSN == ELEM).
4282 It is a convenience wrapper. */
4283 static void
4284 add_dependence_1 (rtx_insn *insn, rtx_insn *elem, enum reg_note dep_type)
4285 {
4286 ds_t ds;
4287 bool internal;
4288
4289 if (dep_type == REG_DEP_TRUE)
4290 ds = DEP_TRUE;
4291 else if (dep_type == REG_DEP_OUTPUT)
4292 ds = DEP_OUTPUT;
4293 else if (dep_type == REG_DEP_CONTROL)
4294 ds = DEP_CONTROL;
4295 else
4296 {
4297 gcc_assert (dep_type == REG_DEP_ANTI);
4298 ds = DEP_ANTI;
4299 }
4300
4301 /* When add_dependence is called from inside sched-deps.c, we expect
4302 cur_insn to be non-null. */
4303 internal = cur_insn != NULL;
4304 if (internal)
4305 gcc_assert (insn == cur_insn);
4306 else
4307 cur_insn = insn;
4308
4309 note_dep (elem, ds);
4310 if (!internal)
4311 cur_insn = NULL;
4312 }
4313
4314 /* Return weakness of speculative type TYPE in the dep_status DS,
4315 without checking to prevent ICEs on malformed input. */
4316 static dw_t
4317 get_dep_weak_1 (ds_t ds, ds_t type)
4318 {
4319 ds = ds & type;
4320
4321 switch (type)
4322 {
4323 case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
4324 case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
4325 case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
4326 case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
4327 default: gcc_unreachable ();
4328 }
4329
4330 return (dw_t) ds;
4331 }
4332
4333 /* Return weakness of speculative type TYPE in the dep_status DS. */
4334 dw_t
4335 get_dep_weak (ds_t ds, ds_t type)
4336 {
4337 dw_t dw = get_dep_weak_1 (ds, type);
4338
4339 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4340 return dw;
4341 }
4342
4343 /* Return the dep_status, which has the same parameters as DS, except for
4344 speculative type TYPE, that will have weakness DW. */
4345 ds_t
4346 set_dep_weak (ds_t ds, ds_t type, dw_t dw)
4347 {
4348 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4349
4350 ds &= ~type;
4351 switch (type)
4352 {
4353 case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
4354 case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
4355 case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
4356 case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
4357 default: gcc_unreachable ();
4358 }
4359 return ds;
4360 }
4361
4362 /* Return the join of two dep_statuses DS1 and DS2.
4363 If MAX_P is true then choose the greater probability,
4364 otherwise multiply probabilities.
4365 This function assumes that both DS1 and DS2 contain speculative bits. */
4366 static ds_t
4367 ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
4368 {
4369 ds_t ds, t;
4370
4371 gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
4372
4373 ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
4374
4375 t = FIRST_SPEC_TYPE;
4376 do
4377 {
4378 if ((ds1 & t) && !(ds2 & t))
4379 ds |= ds1 & t;
4380 else if (!(ds1 & t) && (ds2 & t))
4381 ds |= ds2 & t;
4382 else if ((ds1 & t) && (ds2 & t))
4383 {
4384 dw_t dw1 = get_dep_weak (ds1, t);
4385 dw_t dw2 = get_dep_weak (ds2, t);
4386 ds_t dw;
4387
4388 if (!max_p)
4389 {
4390 dw = ((ds_t) dw1) * ((ds_t) dw2);
4391 dw /= MAX_DEP_WEAK;
4392 if (dw < MIN_DEP_WEAK)
4393 dw = MIN_DEP_WEAK;
4394 }
4395 else
4396 {
4397 if (dw1 >= dw2)
4398 dw = dw1;
4399 else
4400 dw = dw2;
4401 }
4402
4403 ds = set_dep_weak (ds, t, (dw_t) dw);
4404 }
4405
4406 if (t == LAST_SPEC_TYPE)
4407 break;
4408 t <<= SPEC_TYPE_SHIFT;
4409 }
4410 while (1);
4411
4412 return ds;
4413 }
4414
4415 /* Return the join of two dep_statuses DS1 and DS2.
4416 This function assumes that both DS1 and DS2 contain speculative bits. */
4417 ds_t
4418 ds_merge (ds_t ds1, ds_t ds2)
4419 {
4420 return ds_merge_1 (ds1, ds2, false);
4421 }
4422
4423 /* Return the join of two dep_statuses DS1 and DS2. */
4424 ds_t
4425 ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
4426 {
4427 ds_t new_status = ds | ds2;
4428
4429 if (new_status & SPECULATIVE)
4430 {
4431 if ((ds && !(ds & SPECULATIVE))
4432 || (ds2 && !(ds2 & SPECULATIVE)))
4433 /* Then this dep can't be speculative. */
4434 new_status &= ~SPECULATIVE;
4435 else
4436 {
4437 /* Both are speculative. Merging probabilities. */
4438 if (mem1)
4439 {
4440 dw_t dw;
4441
4442 dw = estimate_dep_weak (mem1, mem2);
4443 ds = set_dep_weak (ds, BEGIN_DATA, dw);
4444 }
4445
4446 if (!ds)
4447 new_status = ds2;
4448 else if (!ds2)
4449 new_status = ds;
4450 else
4451 new_status = ds_merge (ds2, ds);
4452 }
4453 }
4454
4455 return new_status;
4456 }
4457
4458 /* Return the join of DS1 and DS2. Use maximum instead of multiplying
4459 probabilities. */
4460 ds_t
4461 ds_max_merge (ds_t ds1, ds_t ds2)
4462 {
4463 if (ds1 == 0 && ds2 == 0)
4464 return 0;
4465
4466 if (ds1 == 0 && ds2 != 0)
4467 return ds2;
4468
4469 if (ds1 != 0 && ds2 == 0)
4470 return ds1;
4471
4472 return ds_merge_1 (ds1, ds2, true);
4473 }
4474
4475 /* Return the probability of speculation success for the speculation
4476 status DS. */
4477 dw_t
4478 ds_weak (ds_t ds)
4479 {
4480 ds_t res = 1, dt;
4481 int n = 0;
4482
4483 dt = FIRST_SPEC_TYPE;
4484 do
4485 {
4486 if (ds & dt)
4487 {
4488 res *= (ds_t) get_dep_weak (ds, dt);
4489 n++;
4490 }
4491
4492 if (dt == LAST_SPEC_TYPE)
4493 break;
4494 dt <<= SPEC_TYPE_SHIFT;
4495 }
4496 while (1);
4497
4498 gcc_assert (n);
4499 while (--n)
4500 res /= MAX_DEP_WEAK;
4501
4502 if (res < MIN_DEP_WEAK)
4503 res = MIN_DEP_WEAK;
4504
4505 gcc_assert (res <= MAX_DEP_WEAK);
4506
4507 return (dw_t) res;
4508 }
4509
4510 /* Return a dep status that contains all speculation types of DS. */
4511 ds_t
4512 ds_get_speculation_types (ds_t ds)
4513 {
4514 if (ds & BEGIN_DATA)
4515 ds |= BEGIN_DATA;
4516 if (ds & BE_IN_DATA)
4517 ds |= BE_IN_DATA;
4518 if (ds & BEGIN_CONTROL)
4519 ds |= BEGIN_CONTROL;
4520 if (ds & BE_IN_CONTROL)
4521 ds |= BE_IN_CONTROL;
4522
4523 return ds & SPECULATIVE;
4524 }
4525
4526 /* Return a dep status that contains maximal weakness for each speculation
4527 type present in DS. */
4528 ds_t
4529 ds_get_max_dep_weak (ds_t ds)
4530 {
4531 if (ds & BEGIN_DATA)
4532 ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
4533 if (ds & BE_IN_DATA)
4534 ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
4535 if (ds & BEGIN_CONTROL)
4536 ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
4537 if (ds & BE_IN_CONTROL)
4538 ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
4539
4540 return ds;
4541 }
4542
4543 /* Dump information about the dependence status S. */
4544 static void
4545 dump_ds (FILE *f, ds_t s)
4546 {
4547 fprintf (f, "{");
4548
4549 if (s & BEGIN_DATA)
4550 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
4551 if (s & BE_IN_DATA)
4552 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
4553 if (s & BEGIN_CONTROL)
4554 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
4555 if (s & BE_IN_CONTROL)
4556 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
4557
4558 if (s & HARD_DEP)
4559 fprintf (f, "HARD_DEP; ");
4560
4561 if (s & DEP_TRUE)
4562 fprintf (f, "DEP_TRUE; ");
4563 if (s & DEP_OUTPUT)
4564 fprintf (f, "DEP_OUTPUT; ");
4565 if (s & DEP_ANTI)
4566 fprintf (f, "DEP_ANTI; ");
4567 if (s & DEP_CONTROL)
4568 fprintf (f, "DEP_CONTROL; ");
4569
4570 fprintf (f, "}");
4571 }
4572
4573 DEBUG_FUNCTION void
4574 debug_ds (ds_t s)
4575 {
4576 dump_ds (stderr, s);
4577 fprintf (stderr, "\n");
4578 }
4579
4580 /* Verify that dependence type and status are consistent.
4581 If RELAXED_P is true, then skip dep_weakness checks. */
4582 static void
4583 check_dep (dep_t dep, bool relaxed_p)
4584 {
4585 enum reg_note dt = DEP_TYPE (dep);
4586 ds_t ds = DEP_STATUS (dep);
4587
4588 gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
4589
4590 if (!(current_sched_info->flags & USE_DEPS_LIST))
4591 {
4592 gcc_assert (ds == 0);
4593 return;
4594 }
4595
4596 /* Check that dependence type contains the same bits as the status. */
4597 if (dt == REG_DEP_TRUE)
4598 gcc_assert (ds & DEP_TRUE);
4599 else if (dt == REG_DEP_OUTPUT)
4600 gcc_assert ((ds & DEP_OUTPUT)
4601 && !(ds & DEP_TRUE));
4602 else if (dt == REG_DEP_ANTI)
4603 gcc_assert ((ds & DEP_ANTI)
4604 && !(ds & (DEP_OUTPUT | DEP_TRUE)));
4605 else
4606 gcc_assert (dt == REG_DEP_CONTROL
4607 && (ds & DEP_CONTROL)
4608 && !(ds & (DEP_OUTPUT | DEP_ANTI | DEP_TRUE)));
4609
4610 /* HARD_DEP cannot appear in dep_status of a link. */
4611 gcc_assert (!(ds & HARD_DEP));
4612
4613 /* Check that dependence status is set correctly when speculation is not
4614 supported. */
4615 if (!sched_deps_info->generate_spec_deps)
4616 gcc_assert (!(ds & SPECULATIVE));
4617 else if (ds & SPECULATIVE)
4618 {
4619 if (!relaxed_p)
4620 {
4621 ds_t type = FIRST_SPEC_TYPE;
4622
4623 /* Check that dependence weakness is in proper range. */
4624 do
4625 {
4626 if (ds & type)
4627 get_dep_weak (ds, type);
4628
4629 if (type == LAST_SPEC_TYPE)
4630 break;
4631 type <<= SPEC_TYPE_SHIFT;
4632 }
4633 while (1);
4634 }
4635
4636 if (ds & BEGIN_SPEC)
4637 {
4638 /* Only true dependence can be data speculative. */
4639 if (ds & BEGIN_DATA)
4640 gcc_assert (ds & DEP_TRUE);
4641
4642 /* Control dependencies in the insn scheduler are represented by
4643 anti-dependencies, therefore only anti dependence can be
4644 control speculative. */
4645 if (ds & BEGIN_CONTROL)
4646 gcc_assert (ds & DEP_ANTI);
4647 }
4648 else
4649 {
4650 /* Subsequent speculations should resolve true dependencies. */
4651 gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4652 }
4653
4654 /* Check that true and anti dependencies can't have other speculative
4655 statuses. */
4656 if (ds & DEP_TRUE)
4657 gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4658 /* An output dependence can't be speculative at all. */
4659 gcc_assert (!(ds & DEP_OUTPUT));
4660 if (ds & DEP_ANTI)
4661 gcc_assert (ds & BEGIN_CONTROL);
4662 }
4663 }
4664
4665 /* The following code discovers opportunities to switch a memory reference
4666 and an increment by modifying the address. We ensure that this is done
4667 only for dependencies that are only used to show a single register
4668 dependence (using DEP_NONREG and DEP_MULTIPLE), and so that every memory
4669 instruction involved is subject to only one dep that can cause a pattern
4670 change.
4671
4672 When we discover a suitable dependency, we fill in the dep_replacement
4673 structure to show how to modify the memory reference. */
4674
4675 /* Holds information about a pair of memory reference and register increment
4676 insns which depend on each other, but could possibly be interchanged. */
4677 struct mem_inc_info
4678 {
4679 rtx_insn *inc_insn;
4680 rtx_insn *mem_insn;
4681
4682 rtx *mem_loc;
4683 /* A register occurring in the memory address for which we wish to break
4684 the dependence. This must be identical to the destination register of
4685 the increment. */
4686 rtx mem_reg0;
4687 /* Any kind of index that is added to that register. */
4688 rtx mem_index;
4689 /* The constant offset used in the memory address. */
4690 HOST_WIDE_INT mem_constant;
4691 /* The constant added in the increment insn. Negated if the increment is
4692 after the memory address. */
4693 HOST_WIDE_INT inc_constant;
4694 /* The source register used in the increment. May be different from mem_reg0
4695 if the increment occurs before the memory address. */
4696 rtx inc_input;
4697 };
4698
4699 /* Verify that the memory location described in MII can be replaced with
4700 one using NEW_ADDR. Return the new memory reference or NULL_RTX. The
4701 insn remains unchanged by this function. */
4702
4703 static rtx
4704 attempt_change (struct mem_inc_info *mii, rtx new_addr)
4705 {
4706 rtx mem = *mii->mem_loc;
4707 rtx new_mem;
4708
4709 /* Jump through a lot of hoops to keep the attributes up to date. We
4710 do not want to call one of the change address variants that take
4711 an offset even though we know the offset in many cases. These
4712 assume you are changing where the address is pointing by the
4713 offset. */
4714 new_mem = replace_equiv_address_nv (mem, new_addr);
4715 if (! validate_change (mii->mem_insn, mii->mem_loc, new_mem, 0))
4716 {
4717 if (sched_verbose >= 5)
4718 fprintf (sched_dump, "validation failure\n");
4719 return NULL_RTX;
4720 }
4721
4722 /* Put back the old one. */
4723 validate_change (mii->mem_insn, mii->mem_loc, mem, 0);
4724
4725 return new_mem;
4726 }
4727
4728 /* Return true if INSN is of a form "a = b op c" where a and b are
4729 regs. op is + if c is a reg and +|- if c is a const. Fill in
4730 informantion in MII about what is found.
4731 BEFORE_MEM indicates whether the increment is found before or after
4732 a corresponding memory reference. */
4733
4734 static bool
4735 parse_add_or_inc (struct mem_inc_info *mii, rtx_insn *insn, bool before_mem)
4736 {
4737 rtx pat = single_set (insn);
4738 rtx src, cst;
4739 bool regs_equal;
4740
4741 if (RTX_FRAME_RELATED_P (insn) || !pat)
4742 return false;
4743
4744 /* Do not allow breaking data dependencies for insns that are marked
4745 with REG_STACK_CHECK. */
4746 if (find_reg_note (insn, REG_STACK_CHECK, NULL))
4747 return false;
4748
4749 /* Result must be single reg. */
4750 if (!REG_P (SET_DEST (pat)))
4751 return false;
4752
4753 if (GET_CODE (SET_SRC (pat)) != PLUS)
4754 return false;
4755
4756 mii->inc_insn = insn;
4757 src = SET_SRC (pat);
4758 mii->inc_input = XEXP (src, 0);
4759
4760 if (!REG_P (XEXP (src, 0)))
4761 return false;
4762
4763 if (!rtx_equal_p (SET_DEST (pat), mii->mem_reg0))
4764 return false;
4765
4766 cst = XEXP (src, 1);
4767 if (!CONST_INT_P (cst))
4768 return false;
4769 mii->inc_constant = INTVAL (cst);
4770
4771 regs_equal = rtx_equal_p (mii->inc_input, mii->mem_reg0);
4772
4773 if (!before_mem)
4774 {
4775 mii->inc_constant = -mii->inc_constant;
4776 if (!regs_equal)
4777 return false;
4778 }
4779
4780 if (regs_equal && REGNO (SET_DEST (pat)) == STACK_POINTER_REGNUM)
4781 {
4782 /* Note that the sign has already been reversed for !before_mem. */
4783 if (STACK_GROWS_DOWNWARD)
4784 return mii->inc_constant > 0;
4785 else
4786 return mii->inc_constant < 0;
4787 }
4788 return true;
4789 }
4790
4791 /* Once a suitable mem reference has been found and the corresponding data
4792 in MII has been filled in, this function is called to find a suitable
4793 add or inc insn involving the register we found in the memory
4794 reference. */
4795
4796 static bool
4797 find_inc (struct mem_inc_info *mii, bool backwards)
4798 {
4799 sd_iterator_def sd_it;
4800 dep_t dep;
4801
4802 sd_it = sd_iterator_start (mii->mem_insn,
4803 backwards ? SD_LIST_HARD_BACK : SD_LIST_FORW);
4804 while (sd_iterator_cond (&sd_it, &dep))
4805 {
4806 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
4807 rtx_insn *pro = DEP_PRO (dep);
4808 rtx_insn *con = DEP_CON (dep);
4809 rtx_insn *inc_cand = backwards ? pro : con;
4810 if (DEP_NONREG (dep) || DEP_MULTIPLE (dep))
4811 goto next;
4812 if (parse_add_or_inc (mii, inc_cand, backwards))
4813 {
4814 struct dep_replacement *desc;
4815 df_ref def;
4816 rtx newaddr, newmem;
4817
4818 if (sched_verbose >= 5)
4819 fprintf (sched_dump, "candidate mem/inc pair: %d %d\n",
4820 INSN_UID (mii->mem_insn), INSN_UID (inc_cand));
4821
4822 /* Need to assure that none of the operands of the inc
4823 instruction are assigned to by the mem insn. */
4824 FOR_EACH_INSN_DEF (def, mii->mem_insn)
4825 if (reg_overlap_mentioned_p (DF_REF_REG (def), mii->inc_input)
4826 || reg_overlap_mentioned_p (DF_REF_REG (def), mii->mem_reg0))
4827 {
4828 if (sched_verbose >= 5)
4829 fprintf (sched_dump,
4830 "inc conflicts with store failure.\n");
4831 goto next;
4832 }
4833
4834 newaddr = mii->inc_input;
4835 if (mii->mem_index != NULL_RTX)
4836 newaddr = gen_rtx_PLUS (GET_MODE (newaddr), newaddr,
4837 mii->mem_index);
4838 newaddr = plus_constant (GET_MODE (newaddr), newaddr,
4839 mii->mem_constant + mii->inc_constant);
4840 newmem = attempt_change (mii, newaddr);
4841 if (newmem == NULL_RTX)
4842 goto next;
4843 if (sched_verbose >= 5)
4844 fprintf (sched_dump, "successful address replacement\n");
4845 desc = XCNEW (struct dep_replacement);
4846 DEP_REPLACE (dep) = desc;
4847 desc->loc = mii->mem_loc;
4848 desc->newval = newmem;
4849 desc->orig = *desc->loc;
4850 desc->insn = mii->mem_insn;
4851 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
4852 INSN_SPEC_BACK_DEPS (con));
4853 if (backwards)
4854 {
4855 FOR_EACH_DEP (mii->inc_insn, SD_LIST_BACK, sd_it, dep)
4856 add_dependence_1 (mii->mem_insn, DEP_PRO (dep),
4857 REG_DEP_TRUE);
4858 }
4859 else
4860 {
4861 FOR_EACH_DEP (mii->inc_insn, SD_LIST_FORW, sd_it, dep)
4862 add_dependence_1 (DEP_CON (dep), mii->mem_insn,
4863 REG_DEP_ANTI);
4864 }
4865 return true;
4866 }
4867 next:
4868 sd_iterator_next (&sd_it);
4869 }
4870 return false;
4871 }
4872
4873 /* A recursive function that walks ADDRESS_OF_X to find memory references
4874 which could be modified during scheduling. We call find_inc for each
4875 one we find that has a recognizable form. MII holds information about
4876 the pair of memory/increment instructions.
4877 We ensure that every instruction with a memory reference (which will be
4878 the location of the replacement) is assigned at most one breakable
4879 dependency. */
4880
4881 static bool
4882 find_mem (struct mem_inc_info *mii, rtx *address_of_x)
4883 {
4884 rtx x = *address_of_x;
4885 enum rtx_code code = GET_CODE (x);
4886 const char *const fmt = GET_RTX_FORMAT (code);
4887 int i;
4888
4889 if (code == MEM)
4890 {
4891 rtx reg0 = XEXP (x, 0);
4892
4893 mii->mem_loc = address_of_x;
4894 mii->mem_index = NULL_RTX;
4895 mii->mem_constant = 0;
4896 if (GET_CODE (reg0) == PLUS && CONST_INT_P (XEXP (reg0, 1)))
4897 {
4898 mii->mem_constant = INTVAL (XEXP (reg0, 1));
4899 reg0 = XEXP (reg0, 0);
4900 }
4901 if (GET_CODE (reg0) == PLUS)
4902 {
4903 mii->mem_index = XEXP (reg0, 1);
4904 reg0 = XEXP (reg0, 0);
4905 }
4906 if (REG_P (reg0))
4907 {
4908 df_ref use;
4909 int occurrences = 0;
4910
4911 /* Make sure this reg appears only once in this insn. Can't use
4912 count_occurrences since that only works for pseudos. */
4913 FOR_EACH_INSN_USE (use, mii->mem_insn)
4914 if (reg_overlap_mentioned_p (reg0, DF_REF_REG (use)))
4915 if (++occurrences > 1)
4916 {
4917 if (sched_verbose >= 5)
4918 fprintf (sched_dump, "mem count failure\n");
4919 return false;
4920 }
4921
4922 mii->mem_reg0 = reg0;
4923 return find_inc (mii, true) || find_inc (mii, false);
4924 }
4925 return false;
4926 }
4927
4928 if (code == SIGN_EXTRACT || code == ZERO_EXTRACT)
4929 {
4930 /* If REG occurs inside a MEM used in a bit-field reference,
4931 that is unacceptable. */
4932 return false;
4933 }
4934
4935 /* Time for some deep diving. */
4936 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4937 {
4938 if (fmt[i] == 'e')
4939 {
4940 if (find_mem (mii, &XEXP (x, i)))
4941 return true;
4942 }
4943 else if (fmt[i] == 'E')
4944 {
4945 int j;
4946 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4947 if (find_mem (mii, &XVECEXP (x, i, j)))
4948 return true;
4949 }
4950 }
4951 return false;
4952 }
4953
4954
4955 /* Examine the instructions between HEAD and TAIL and try to find
4956 dependencies that can be broken by modifying one of the patterns. */
4957
4958 void
4959 find_modifiable_mems (rtx_insn *head, rtx_insn *tail)
4960 {
4961 rtx_insn *insn, *next_tail = NEXT_INSN (tail);
4962 int success_in_block = 0;
4963
4964 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
4965 {
4966 struct mem_inc_info mii;
4967
4968 if (!NONDEBUG_INSN_P (insn) || RTX_FRAME_RELATED_P (insn))
4969 continue;
4970
4971 mii.mem_insn = insn;
4972 if (find_mem (&mii, &PATTERN (insn)))
4973 success_in_block++;
4974 }
4975 if (success_in_block && sched_verbose >= 5)
4976 fprintf (sched_dump, "%d candidates for address modification found.\n",
4977 success_in_block);
4978 }
4979
4980 #endif /* INSN_SCHEDULING */