]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/sel-sched-ir.c
Correct date on latest submissions.
[thirdparty/gcc.git] / gcc / sel-sched-ir.c
CommitLineData
e855c69d 1/* Instruction scheduling pass. Selective scheduler and pipeliner.
c02e2d5c 2 Copyright (C) 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
e855c69d
AB
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
8Software Foundation; either version 3, or (at your option) any later
9version.
10
11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
15
16You should have received a copy of the GNU General Public License
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
19
20#include "config.h"
21#include "system.h"
22#include "coretypes.h"
23#include "tm.h"
718f9c0f 24#include "diagnostic-core.h"
e855c69d
AB
25#include "rtl.h"
26#include "tm_p.h"
27#include "hard-reg-set.h"
28#include "regs.h"
29#include "function.h"
30#include "flags.h"
31#include "insn-config.h"
32#include "insn-attr.h"
33#include "except.h"
e855c69d
AB
34#include "recog.h"
35#include "params.h"
36#include "target.h"
37#include "timevar.h"
38#include "tree-pass.h"
39#include "sched-int.h"
40#include "ggc.h"
41#include "tree.h"
42#include "vec.h"
43#include "langhooks.h"
44#include "rtlhooks-def.h"
5936d944 45#include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
e855c69d
AB
46
47#ifdef INSN_SCHEDULING
48#include "sel-sched-ir.h"
49/* We don't have to use it except for sel_print_insn. */
50#include "sel-sched-dump.h"
51
52/* A vector holding bb info for whole scheduling pass. */
53VEC(sel_global_bb_info_def, heap) *sel_global_bb_info = NULL;
54
55/* A vector holding bb info. */
56VEC(sel_region_bb_info_def, heap) *sel_region_bb_info = NULL;
57
58/* A pool for allocating all lists. */
59alloc_pool sched_lists_pool;
60
61/* This contains information about successors for compute_av_set. */
62struct succs_info current_succs;
63
64/* Data structure to describe interaction with the generic scheduler utils. */
65static struct common_sched_info_def sel_common_sched_info;
66
67/* The loop nest being pipelined. */
68struct loop *current_loop_nest;
69
70/* LOOP_NESTS is a vector containing the corresponding loop nest for
71 each region. */
72static VEC(loop_p, heap) *loop_nests = NULL;
73
74/* Saves blocks already in loop regions, indexed by bb->index. */
75static sbitmap bbs_in_loop_rgns = NULL;
76
77/* CFG hooks that are saved before changing create_basic_block hook. */
78static struct cfg_hooks orig_cfg_hooks;
79\f
80
81/* Array containing reverse topological index of function basic blocks,
82 indexed by BB->INDEX. */
83static int *rev_top_order_index = NULL;
84
85/* Length of the above array. */
86static int rev_top_order_index_len = -1;
87
88/* A regset pool structure. */
89static struct
90{
91 /* The stack to which regsets are returned. */
92 regset *v;
93
94 /* Its pointer. */
95 int n;
96
97 /* Its size. */
98 int s;
99
100 /* In VV we save all generated regsets so that, when destructing the
101 pool, we can compare it with V and check that every regset was returned
102 back to pool. */
103 regset *vv;
104
105 /* The pointer of VV stack. */
106 int nn;
107
108 /* Its size. */
109 int ss;
110
111 /* The difference between allocated and returned regsets. */
112 int diff;
113} regset_pool = { NULL, 0, 0, NULL, 0, 0, 0 };
114
115/* This represents the nop pool. */
116static struct
117{
118 /* The vector which holds previously emitted nops. */
119 insn_t *v;
120
121 /* Its pointer. */
122 int n;
123
124 /* Its size. */
b8698a0f 125 int s;
e855c69d
AB
126} nop_pool = { NULL, 0, 0 };
127
128/* The pool for basic block notes. */
129static rtx_vec_t bb_note_pool;
130
131/* A NOP pattern used to emit placeholder insns. */
132rtx nop_pattern = NULL_RTX;
133/* A special instruction that resides in EXIT_BLOCK.
134 EXIT_INSN is successor of the insns that lead to EXIT_BLOCK. */
135rtx exit_insn = NULL_RTX;
136
b8698a0f 137/* TRUE if while scheduling current region, which is loop, its preheader
e855c69d
AB
138 was removed. */
139bool preheader_removed = false;
140\f
141
142/* Forward static declarations. */
143static void fence_clear (fence_t);
144
145static void deps_init_id (idata_t, insn_t, bool);
146static void init_id_from_df (idata_t, insn_t, bool);
147static expr_t set_insn_init (expr_t, vinsn_t, int);
148
149static void cfg_preds (basic_block, insn_t **, int *);
150static void prepare_insn_expr (insn_t, int);
151static void free_history_vect (VEC (expr_history_def, heap) **);
152
153static void move_bb_info (basic_block, basic_block);
154static void remove_empty_bb (basic_block, bool);
262d8232 155static void sel_merge_blocks (basic_block, basic_block);
e855c69d
AB
156static void sel_remove_loop_preheader (void);
157
158static bool insn_is_the_only_one_in_bb_p (insn_t);
159static void create_initial_data_sets (basic_block);
160
b5b8b0ac 161static void free_av_set (basic_block);
e855c69d
AB
162static void invalidate_av_set (basic_block);
163static void extend_insn_data (void);
164static void sel_init_new_insn (insn_t, int);
165static void finish_insns (void);
166\f
167/* Various list functions. */
168
169/* Copy an instruction list L. */
170ilist_t
171ilist_copy (ilist_t l)
172{
173 ilist_t head = NULL, *tailp = &head;
174
175 while (l)
176 {
177 ilist_add (tailp, ILIST_INSN (l));
178 tailp = &ILIST_NEXT (*tailp);
179 l = ILIST_NEXT (l);
180 }
181
182 return head;
183}
184
185/* Invert an instruction list L. */
186ilist_t
187ilist_invert (ilist_t l)
188{
189 ilist_t res = NULL;
190
191 while (l)
192 {
193 ilist_add (&res, ILIST_INSN (l));
194 l = ILIST_NEXT (l);
195 }
196
197 return res;
198}
199
200/* Add a new boundary to the LP list with parameters TO, PTR, and DC. */
201void
202blist_add (blist_t *lp, insn_t to, ilist_t ptr, deps_t dc)
203{
204 bnd_t bnd;
205
206 _list_add (lp);
207 bnd = BLIST_BND (*lp);
208
209 BND_TO (bnd) = to;
210 BND_PTR (bnd) = ptr;
211 BND_AV (bnd) = NULL;
212 BND_AV1 (bnd) = NULL;
213 BND_DC (bnd) = dc;
214}
215
216/* Remove the list note pointed to by LP. */
217void
218blist_remove (blist_t *lp)
219{
220 bnd_t b = BLIST_BND (*lp);
221
222 av_set_clear (&BND_AV (b));
223 av_set_clear (&BND_AV1 (b));
224 ilist_clear (&BND_PTR (b));
225
226 _list_remove (lp);
227}
228
229/* Init a fence tail L. */
230void
231flist_tail_init (flist_tail_t l)
232{
233 FLIST_TAIL_HEAD (l) = NULL;
234 FLIST_TAIL_TAILP (l) = &FLIST_TAIL_HEAD (l);
235}
236
237/* Try to find fence corresponding to INSN in L. */
238fence_t
239flist_lookup (flist_t l, insn_t insn)
240{
241 while (l)
242 {
243 if (FENCE_INSN (FLIST_FENCE (l)) == insn)
244 return FLIST_FENCE (l);
245
246 l = FLIST_NEXT (l);
247 }
248
249 return NULL;
250}
251
252/* Init the fields of F before running fill_insns. */
253static void
254init_fence_for_scheduling (fence_t f)
255{
256 FENCE_BNDS (f) = NULL;
257 FENCE_PROCESSED_P (f) = false;
258 FENCE_SCHEDULED_P (f) = false;
259}
260
261/* Add new fence consisting of INSN and STATE to the list pointed to by LP. */
262static void
b8698a0f
L
263flist_add (flist_t *lp, insn_t insn, state_t state, deps_t dc, void *tc,
264 insn_t last_scheduled_insn, VEC(rtx,gc) *executing_insns,
265 int *ready_ticks, int ready_ticks_size, insn_t sched_next,
136e01a3 266 int cycle, int cycle_issued_insns, int issue_more,
e855c69d
AB
267 bool starts_cycle_p, bool after_stall_p)
268{
269 fence_t f;
270
271 _list_add (lp);
272 f = FLIST_FENCE (*lp);
273
274 FENCE_INSN (f) = insn;
275
276 gcc_assert (state != NULL);
277 FENCE_STATE (f) = state;
278
279 FENCE_CYCLE (f) = cycle;
280 FENCE_ISSUED_INSNS (f) = cycle_issued_insns;
281 FENCE_STARTS_CYCLE_P (f) = starts_cycle_p;
282 FENCE_AFTER_STALL_P (f) = after_stall_p;
283
284 gcc_assert (dc != NULL);
285 FENCE_DC (f) = dc;
286
287 gcc_assert (tc != NULL || targetm.sched.alloc_sched_context == NULL);
288 FENCE_TC (f) = tc;
289
290 FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn;
136e01a3 291 FENCE_ISSUE_MORE (f) = issue_more;
e855c69d
AB
292 FENCE_EXECUTING_INSNS (f) = executing_insns;
293 FENCE_READY_TICKS (f) = ready_ticks;
294 FENCE_READY_TICKS_SIZE (f) = ready_ticks_size;
295 FENCE_SCHED_NEXT (f) = sched_next;
296
297 init_fence_for_scheduling (f);
298}
299
300/* Remove the head node of the list pointed to by LP. */
301static void
302flist_remove (flist_t *lp)
303{
304 if (FENCE_INSN (FLIST_FENCE (*lp)))
305 fence_clear (FLIST_FENCE (*lp));
306 _list_remove (lp);
307}
308
309/* Clear the fence list pointed to by LP. */
310void
311flist_clear (flist_t *lp)
312{
313 while (*lp)
314 flist_remove (lp);
315}
316
317/* Add ORIGINAL_INSN the def list DL honoring CROSSES_CALL. */
318void
319def_list_add (def_list_t *dl, insn_t original_insn, bool crosses_call)
320{
321 def_t d;
b8698a0f 322
e855c69d
AB
323 _list_add (dl);
324 d = DEF_LIST_DEF (*dl);
325
326 d->orig_insn = original_insn;
327 d->crosses_call = crosses_call;
328}
329\f
330
331/* Functions to work with target contexts. */
332
b8698a0f 333/* Bulk target context. It is convenient for debugging purposes to ensure
e855c69d
AB
334 that there are no uninitialized (null) target contexts. */
335static tc_t bulk_tc = (tc_t) 1;
336
b8698a0f 337/* Target hooks wrappers. In the future we can provide some default
e855c69d
AB
338 implementations for them. */
339
340/* Allocate a store for the target context. */
341static tc_t
342alloc_target_context (void)
343{
344 return (targetm.sched.alloc_sched_context
345 ? targetm.sched.alloc_sched_context () : bulk_tc);
346}
347
348/* Init target context TC.
349 If CLEAN_P is true, then make TC as it is beginning of the scheduler.
350 Overwise, copy current backend context to TC. */
351static void
352init_target_context (tc_t tc, bool clean_p)
353{
354 if (targetm.sched.init_sched_context)
355 targetm.sched.init_sched_context (tc, clean_p);
356}
357
358/* Allocate and initialize a target context. Meaning of CLEAN_P is the same as
359 int init_target_context (). */
360tc_t
361create_target_context (bool clean_p)
362{
363 tc_t tc = alloc_target_context ();
364
365 init_target_context (tc, clean_p);
366 return tc;
367}
368
369/* Copy TC to the current backend context. */
370void
371set_target_context (tc_t tc)
372{
373 if (targetm.sched.set_sched_context)
374 targetm.sched.set_sched_context (tc);
375}
376
377/* TC is about to be destroyed. Free any internal data. */
378static void
379clear_target_context (tc_t tc)
380{
381 if (targetm.sched.clear_sched_context)
382 targetm.sched.clear_sched_context (tc);
383}
384
385/* Clear and free it. */
386static void
387delete_target_context (tc_t tc)
388{
389 clear_target_context (tc);
390
391 if (targetm.sched.free_sched_context)
392 targetm.sched.free_sched_context (tc);
393}
394
395/* Make a copy of FROM in TO.
396 NB: May be this should be a hook. */
397static void
398copy_target_context (tc_t to, tc_t from)
399{
400 tc_t tmp = create_target_context (false);
401
402 set_target_context (from);
403 init_target_context (to, false);
404
405 set_target_context (tmp);
406 delete_target_context (tmp);
407}
408
409/* Create a copy of TC. */
410static tc_t
411create_copy_of_target_context (tc_t tc)
412{
413 tc_t copy = alloc_target_context ();
414
415 copy_target_context (copy, tc);
416
417 return copy;
418}
419
420/* Clear TC and initialize it according to CLEAN_P. The meaning of CLEAN_P
421 is the same as in init_target_context (). */
422void
423reset_target_context (tc_t tc, bool clean_p)
424{
425 clear_target_context (tc);
426 init_target_context (tc, clean_p);
427}
428\f
b8698a0f 429/* Functions to work with dependence contexts.
88302d54 430 Dc (aka deps context, aka deps_t, aka struct deps_desc *) is short for dependence
e855c69d
AB
431 context. It accumulates information about processed insns to decide if
432 current insn is dependent on the processed ones. */
433
434/* Make a copy of FROM in TO. */
435static void
436copy_deps_context (deps_t to, deps_t from)
437{
bcf33775 438 init_deps (to, false);
e855c69d
AB
439 deps_join (to, from);
440}
441
442/* Allocate store for dep context. */
443static deps_t
444alloc_deps_context (void)
445{
88302d54 446 return XNEW (struct deps_desc);
e855c69d
AB
447}
448
449/* Allocate and initialize dep context. */
450static deps_t
451create_deps_context (void)
452{
453 deps_t dc = alloc_deps_context ();
454
bcf33775 455 init_deps (dc, false);
e855c69d
AB
456 return dc;
457}
458
459/* Create a copy of FROM. */
460static deps_t
461create_copy_of_deps_context (deps_t from)
462{
463 deps_t to = alloc_deps_context ();
464
465 copy_deps_context (to, from);
466 return to;
467}
468
469/* Clean up internal data of DC. */
470static void
471clear_deps_context (deps_t dc)
472{
473 free_deps (dc);
474}
475
476/* Clear and free DC. */
477static void
478delete_deps_context (deps_t dc)
479{
480 clear_deps_context (dc);
481 free (dc);
482}
483
484/* Clear and init DC. */
485static void
486reset_deps_context (deps_t dc)
487{
488 clear_deps_context (dc);
bcf33775 489 init_deps (dc, false);
e855c69d
AB
490}
491
b8698a0f 492/* This structure describes the dependence analysis hooks for advancing
e855c69d
AB
493 dependence context. */
494static struct sched_deps_info_def advance_deps_context_sched_deps_info =
495 {
496 NULL,
497
498 NULL, /* start_insn */
499 NULL, /* finish_insn */
500 NULL, /* start_lhs */
501 NULL, /* finish_lhs */
502 NULL, /* start_rhs */
503 NULL, /* finish_rhs */
504 haifa_note_reg_set,
505 haifa_note_reg_clobber,
506 haifa_note_reg_use,
507 NULL, /* note_mem_dep */
508 NULL, /* note_dep */
509
510 0, 0, 0
511 };
512
513/* Process INSN and add its impact on DC. */
514void
515advance_deps_context (deps_t dc, insn_t insn)
516{
517 sched_deps_info = &advance_deps_context_sched_deps_info;
518 deps_analyze_insn (dc, insn);
519}
520\f
521
522/* Functions to work with DFA states. */
523
524/* Allocate store for a DFA state. */
525static state_t
526state_alloc (void)
527{
528 return xmalloc (dfa_state_size);
529}
530
531/* Allocate and initialize DFA state. */
532static state_t
533state_create (void)
534{
535 state_t state = state_alloc ();
536
537 state_reset (state);
538 advance_state (state);
539 return state;
540}
541
542/* Free DFA state. */
543static void
544state_free (state_t state)
545{
546 free (state);
547}
548
549/* Make a copy of FROM in TO. */
550static void
551state_copy (state_t to, state_t from)
552{
553 memcpy (to, from, dfa_state_size);
554}
555
556/* Create a copy of FROM. */
557static state_t
558state_create_copy (state_t from)
559{
560 state_t to = state_alloc ();
561
562 state_copy (to, from);
563 return to;
564}
565\f
566
567/* Functions to work with fences. */
568
569/* Clear the fence. */
570static void
571fence_clear (fence_t f)
572{
573 state_t s = FENCE_STATE (f);
574 deps_t dc = FENCE_DC (f);
575 void *tc = FENCE_TC (f);
576
577 ilist_clear (&FENCE_BNDS (f));
578
579 gcc_assert ((s != NULL && dc != NULL && tc != NULL)
580 || (s == NULL && dc == NULL && tc == NULL));
581
582 if (s != NULL)
583 free (s);
584
585 if (dc != NULL)
586 delete_deps_context (dc);
587
588 if (tc != NULL)
589 delete_target_context (tc);
590 VEC_free (rtx, gc, FENCE_EXECUTING_INSNS (f));
591 free (FENCE_READY_TICKS (f));
592 FENCE_READY_TICKS (f) = NULL;
593}
594
595/* Init a list of fences with successors of OLD_FENCE. */
596void
597init_fences (insn_t old_fence)
598{
599 insn_t succ;
600 succ_iterator si;
601 bool first = true;
602 int ready_ticks_size = get_max_uid () + 1;
b8698a0f
L
603
604 FOR_EACH_SUCC_1 (succ, si, old_fence,
e855c69d
AB
605 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
606 {
b8698a0f 607
e855c69d
AB
608 if (first)
609 first = false;
610 else
611 gcc_assert (flag_sel_sched_pipelining_outer_loops);
612
613 flist_add (&fences, succ,
614 state_create (),
615 create_deps_context () /* dc */,
616 create_target_context (true) /* tc */,
b8698a0f 617 NULL_RTX /* last_scheduled_insn */,
e855c69d
AB
618 NULL, /* executing_insns */
619 XCNEWVEC (int, ready_ticks_size), /* ready_ticks */
620 ready_ticks_size,
621 NULL_RTX /* sched_next */,
b8698a0f 622 1 /* cycle */, 0 /* cycle_issued_insns */,
136e01a3 623 issue_rate, /* issue_more */
b8698a0f 624 1 /* starts_cycle_p */, 0 /* after_stall_p */);
e855c69d
AB
625 }
626}
627
628/* Merges two fences (filling fields of fence F with resulting values) by
629 following rules: 1) state, target context and last scheduled insn are
b8698a0f 630 propagated from fallthrough edge if it is available;
e855c69d 631 2) deps context and cycle is propagated from more probable edge;
b8698a0f 632 3) all other fields are set to corresponding constant values.
e855c69d 633
b8698a0f 634 INSN, STATE, DC, TC, LAST_SCHEDULED_INSN, EXECUTING_INSNS,
136e01a3
AB
635 READY_TICKS, READY_TICKS_SIZE, SCHED_NEXT, CYCLE, ISSUE_MORE
636 and AFTER_STALL_P are the corresponding fields of the second fence. */
e855c69d
AB
637static void
638merge_fences (fence_t f, insn_t insn,
b8698a0f 639 state_t state, deps_t dc, void *tc,
e855c69d
AB
640 rtx last_scheduled_insn, VEC(rtx, gc) *executing_insns,
641 int *ready_ticks, int ready_ticks_size,
136e01a3 642 rtx sched_next, int cycle, int issue_more, bool after_stall_p)
e855c69d
AB
643{
644 insn_t last_scheduled_insn_old = FENCE_LAST_SCHEDULED_INSN (f);
645
646 gcc_assert (sel_bb_head_p (FENCE_INSN (f))
647 && !sched_next && !FENCE_SCHED_NEXT (f));
648
b8698a0f 649 /* Check if we can decide which path fences came.
e855c69d
AB
650 If we can't (or don't want to) - reset all. */
651 if (last_scheduled_insn == NULL
652 || last_scheduled_insn_old == NULL
b8698a0f
L
653 /* This is a case when INSN is reachable on several paths from
654 one insn (this can happen when pipelining of outer loops is on and
655 there are two edges: one going around of inner loop and the other -
e855c69d
AB
656 right through it; in such case just reset everything). */
657 || last_scheduled_insn == last_scheduled_insn_old)
658 {
659 state_reset (FENCE_STATE (f));
660 state_free (state);
b8698a0f 661
e855c69d
AB
662 reset_deps_context (FENCE_DC (f));
663 delete_deps_context (dc);
b8698a0f 664
e855c69d
AB
665 reset_target_context (FENCE_TC (f), true);
666 delete_target_context (tc);
667
668 if (cycle > FENCE_CYCLE (f))
669 FENCE_CYCLE (f) = cycle;
670
671 FENCE_LAST_SCHEDULED_INSN (f) = NULL;
136e01a3 672 FENCE_ISSUE_MORE (f) = issue_rate;
e855c69d
AB
673 VEC_free (rtx, gc, executing_insns);
674 free (ready_ticks);
675 if (FENCE_EXECUTING_INSNS (f))
b8698a0f 676 VEC_block_remove (rtx, FENCE_EXECUTING_INSNS (f), 0,
e855c69d
AB
677 VEC_length (rtx, FENCE_EXECUTING_INSNS (f)));
678 if (FENCE_READY_TICKS (f))
679 memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f));
680 }
681 else
682 {
683 edge edge_old = NULL, edge_new = NULL;
684 edge candidate;
685 succ_iterator si;
686 insn_t succ;
b8698a0f 687
e855c69d
AB
688 /* Find fallthrough edge. */
689 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb);
0fd4b31d 690 candidate = find_fallthru_edge_from (BLOCK_FOR_INSN (insn)->prev_bb);
e855c69d
AB
691
692 if (!candidate
693 || (candidate->src != BLOCK_FOR_INSN (last_scheduled_insn)
694 && candidate->src != BLOCK_FOR_INSN (last_scheduled_insn_old)))
695 {
696 /* No fallthrough edge leading to basic block of INSN. */
697 state_reset (FENCE_STATE (f));
698 state_free (state);
b8698a0f 699
e855c69d
AB
700 reset_target_context (FENCE_TC (f), true);
701 delete_target_context (tc);
b8698a0f 702
e855c69d 703 FENCE_LAST_SCHEDULED_INSN (f) = NULL;
136e01a3 704 FENCE_ISSUE_MORE (f) = issue_rate;
e855c69d
AB
705 }
706 else
707 if (candidate->src == BLOCK_FOR_INSN (last_scheduled_insn))
708 {
b8698a0f 709 /* Would be weird if same insn is successor of several fallthrough
e855c69d
AB
710 edges. */
711 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb
712 != BLOCK_FOR_INSN (last_scheduled_insn_old));
713
714 state_free (FENCE_STATE (f));
715 FENCE_STATE (f) = state;
716
717 delete_target_context (FENCE_TC (f));
718 FENCE_TC (f) = tc;
719
720 FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn;
136e01a3 721 FENCE_ISSUE_MORE (f) = issue_more;
e855c69d
AB
722 }
723 else
724 {
725 /* Leave STATE, TC and LAST_SCHEDULED_INSN fields untouched. */
726 state_free (state);
727 delete_target_context (tc);
728
729 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb
730 != BLOCK_FOR_INSN (last_scheduled_insn));
731 }
732
733 /* Find edge of first predecessor (last_scheduled_insn_old->insn). */
734 FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn_old,
735 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
736 {
737 if (succ == insn)
738 {
739 /* No same successor allowed from several edges. */
740 gcc_assert (!edge_old);
741 edge_old = si.e1;
742 }
743 }
744 /* Find edge of second predecessor (last_scheduled_insn->insn). */
745 FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn,
746 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
747 {
748 if (succ == insn)
749 {
750 /* No same successor allowed from several edges. */
751 gcc_assert (!edge_new);
752 edge_new = si.e1;
753 }
754 }
755
756 /* Check if we can choose most probable predecessor. */
757 if (edge_old == NULL || edge_new == NULL)
758 {
759 reset_deps_context (FENCE_DC (f));
760 delete_deps_context (dc);
761 VEC_free (rtx, gc, executing_insns);
762 free (ready_ticks);
b8698a0f 763
e855c69d
AB
764 FENCE_CYCLE (f) = MAX (FENCE_CYCLE (f), cycle);
765 if (FENCE_EXECUTING_INSNS (f))
b8698a0f 766 VEC_block_remove (rtx, FENCE_EXECUTING_INSNS (f), 0,
e855c69d
AB
767 VEC_length (rtx, FENCE_EXECUTING_INSNS (f)));
768 if (FENCE_READY_TICKS (f))
769 memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f));
770 }
771 else
772 if (edge_new->probability > edge_old->probability)
773 {
774 delete_deps_context (FENCE_DC (f));
775 FENCE_DC (f) = dc;
776 VEC_free (rtx, gc, FENCE_EXECUTING_INSNS (f));
777 FENCE_EXECUTING_INSNS (f) = executing_insns;
778 free (FENCE_READY_TICKS (f));
779 FENCE_READY_TICKS (f) = ready_ticks;
780 FENCE_READY_TICKS_SIZE (f) = ready_ticks_size;
781 FENCE_CYCLE (f) = cycle;
782 }
783 else
784 {
785 /* Leave DC and CYCLE untouched. */
786 delete_deps_context (dc);
787 VEC_free (rtx, gc, executing_insns);
788 free (ready_ticks);
789 }
790 }
791
792 /* Fill remaining invariant fields. */
793 if (after_stall_p)
794 FENCE_AFTER_STALL_P (f) = 1;
795
796 FENCE_ISSUED_INSNS (f) = 0;
797 FENCE_STARTS_CYCLE_P (f) = 1;
798 FENCE_SCHED_NEXT (f) = NULL;
799}
800
b8698a0f 801/* Add a new fence to NEW_FENCES list, initializing it from all
e855c69d
AB
802 other parameters. */
803static void
804add_to_fences (flist_tail_t new_fences, insn_t insn,
b8698a0f
L
805 state_t state, deps_t dc, void *tc, rtx last_scheduled_insn,
806 VEC(rtx, gc) *executing_insns, int *ready_ticks,
807 int ready_ticks_size, rtx sched_next, int cycle,
136e01a3
AB
808 int cycle_issued_insns, int issue_rate,
809 bool starts_cycle_p, bool after_stall_p)
e855c69d
AB
810{
811 fence_t f = flist_lookup (FLIST_TAIL_HEAD (new_fences), insn);
812
813 if (! f)
814 {
815 flist_add (FLIST_TAIL_TAILP (new_fences), insn, state, dc, tc,
b8698a0f 816 last_scheduled_insn, executing_insns, ready_ticks,
e855c69d 817 ready_ticks_size, sched_next, cycle, cycle_issued_insns,
136e01a3 818 issue_rate, starts_cycle_p, after_stall_p);
e855c69d
AB
819
820 FLIST_TAIL_TAILP (new_fences)
821 = &FLIST_NEXT (*FLIST_TAIL_TAILP (new_fences));
822 }
823 else
824 {
b8698a0f
L
825 merge_fences (f, insn, state, dc, tc, last_scheduled_insn,
826 executing_insns, ready_ticks, ready_ticks_size,
136e01a3 827 sched_next, cycle, issue_rate, after_stall_p);
e855c69d
AB
828 }
829}
830
831/* Move the first fence in the OLD_FENCES list to NEW_FENCES. */
832void
833move_fence_to_fences (flist_t old_fences, flist_tail_t new_fences)
834{
835 fence_t f, old;
836 flist_t *tailp = FLIST_TAIL_TAILP (new_fences);
837
838 old = FLIST_FENCE (old_fences);
b8698a0f 839 f = flist_lookup (FLIST_TAIL_HEAD (new_fences),
e855c69d
AB
840 FENCE_INSN (FLIST_FENCE (old_fences)));
841 if (f)
842 {
843 merge_fences (f, old->insn, old->state, old->dc, old->tc,
844 old->last_scheduled_insn, old->executing_insns,
845 old->ready_ticks, old->ready_ticks_size,
136e01a3 846 old->sched_next, old->cycle, old->issue_more,
e855c69d
AB
847 old->after_stall_p);
848 }
849 else
850 {
851 _list_add (tailp);
852 FLIST_TAIL_TAILP (new_fences) = &FLIST_NEXT (*tailp);
853 *FLIST_FENCE (*tailp) = *old;
854 init_fence_for_scheduling (FLIST_FENCE (*tailp));
855 }
856 FENCE_INSN (old) = NULL;
857}
858
b8698a0f 859/* Add a new fence to NEW_FENCES list and initialize most of its data
e855c69d
AB
860 as a clean one. */
861void
862add_clean_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence)
863{
864 int ready_ticks_size = get_max_uid () + 1;
b8698a0f 865
e855c69d
AB
866 add_to_fences (new_fences,
867 succ, state_create (), create_deps_context (),
868 create_target_context (true),
b8698a0f 869 NULL_RTX, NULL,
e855c69d
AB
870 XCNEWVEC (int, ready_ticks_size), ready_ticks_size,
871 NULL_RTX, FENCE_CYCLE (fence) + 1,
136e01a3 872 0, issue_rate, 1, FENCE_AFTER_STALL_P (fence));
e855c69d
AB
873}
874
b8698a0f 875/* Add a new fence to NEW_FENCES list and initialize all of its data
e855c69d
AB
876 from FENCE and SUCC. */
877void
878add_dirty_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence)
879{
b8698a0f 880 int * new_ready_ticks
e855c69d 881 = XNEWVEC (int, FENCE_READY_TICKS_SIZE (fence));
b8698a0f 882
e855c69d
AB
883 memcpy (new_ready_ticks, FENCE_READY_TICKS (fence),
884 FENCE_READY_TICKS_SIZE (fence) * sizeof (int));
885 add_to_fences (new_fences,
886 succ, state_create_copy (FENCE_STATE (fence)),
887 create_copy_of_deps_context (FENCE_DC (fence)),
888 create_copy_of_target_context (FENCE_TC (fence)),
b8698a0f 889 FENCE_LAST_SCHEDULED_INSN (fence),
e855c69d
AB
890 VEC_copy (rtx, gc, FENCE_EXECUTING_INSNS (fence)),
891 new_ready_ticks,
892 FENCE_READY_TICKS_SIZE (fence),
893 FENCE_SCHED_NEXT (fence),
894 FENCE_CYCLE (fence),
895 FENCE_ISSUED_INSNS (fence),
136e01a3 896 FENCE_ISSUE_MORE (fence),
e855c69d
AB
897 FENCE_STARTS_CYCLE_P (fence),
898 FENCE_AFTER_STALL_P (fence));
899}
900\f
901
902/* Functions to work with regset and nop pools. */
903
904/* Returns the new regset from pool. It might have some of the bits set
905 from the previous usage. */
906regset
907get_regset_from_pool (void)
908{
909 regset rs;
910
911 if (regset_pool.n != 0)
912 rs = regset_pool.v[--regset_pool.n];
913 else
914 /* We need to create the regset. */
915 {
916 rs = ALLOC_REG_SET (&reg_obstack);
917
918 if (regset_pool.nn == regset_pool.ss)
919 regset_pool.vv = XRESIZEVEC (regset, regset_pool.vv,
920 (regset_pool.ss = 2 * regset_pool.ss + 1));
921 regset_pool.vv[regset_pool.nn++] = rs;
922 }
923
924 regset_pool.diff++;
925
926 return rs;
927}
928
929/* Same as above, but returns the empty regset. */
930regset
931get_clear_regset_from_pool (void)
932{
933 regset rs = get_regset_from_pool ();
934
935 CLEAR_REG_SET (rs);
936 return rs;
937}
938
939/* Return regset RS to the pool for future use. */
940void
941return_regset_to_pool (regset rs)
942{
9ef1bf71 943 gcc_assert (rs);
e855c69d
AB
944 regset_pool.diff--;
945
946 if (regset_pool.n == regset_pool.s)
947 regset_pool.v = XRESIZEVEC (regset, regset_pool.v,
948 (regset_pool.s = 2 * regset_pool.s + 1));
949 regset_pool.v[regset_pool.n++] = rs;
950}
951
68ad446f 952#ifdef ENABLE_CHECKING
e855c69d
AB
953/* This is used as a qsort callback for sorting regset pool stacks.
954 X and XX are addresses of two regsets. They are never equal. */
955static int
956cmp_v_in_regset_pool (const void *x, const void *xx)
957{
958 return *((const regset *) x) - *((const regset *) xx);
959}
68ad446f 960#endif
e855c69d
AB
961
962/* Free the regset pool possibly checking for memory leaks. */
963void
964free_regset_pool (void)
965{
966#ifdef ENABLE_CHECKING
967 {
968 regset *v = regset_pool.v;
969 int i = 0;
970 int n = regset_pool.n;
b8698a0f 971
e855c69d
AB
972 regset *vv = regset_pool.vv;
973 int ii = 0;
974 int nn = regset_pool.nn;
b8698a0f 975
e855c69d 976 int diff = 0;
b8698a0f 977
e855c69d 978 gcc_assert (n <= nn);
b8698a0f 979
e855c69d
AB
980 /* Sort both vectors so it will be possible to compare them. */
981 qsort (v, n, sizeof (*v), cmp_v_in_regset_pool);
982 qsort (vv, nn, sizeof (*vv), cmp_v_in_regset_pool);
b8698a0f 983
e855c69d
AB
984 while (ii < nn)
985 {
986 if (v[i] == vv[ii])
987 i++;
988 else
989 /* VV[II] was lost. */
990 diff++;
b8698a0f 991
e855c69d
AB
992 ii++;
993 }
b8698a0f 994
e855c69d
AB
995 gcc_assert (diff == regset_pool.diff);
996 }
997#endif
b8698a0f 998
e855c69d
AB
999 /* If not true - we have a memory leak. */
1000 gcc_assert (regset_pool.diff == 0);
b8698a0f 1001
e855c69d
AB
1002 while (regset_pool.n)
1003 {
1004 --regset_pool.n;
1005 FREE_REG_SET (regset_pool.v[regset_pool.n]);
1006 }
1007
1008 free (regset_pool.v);
1009 regset_pool.v = NULL;
1010 regset_pool.s = 0;
b8698a0f 1011
e855c69d
AB
1012 free (regset_pool.vv);
1013 regset_pool.vv = NULL;
1014 regset_pool.nn = 0;
1015 regset_pool.ss = 0;
1016
1017 regset_pool.diff = 0;
1018}
1019\f
1020
b8698a0f
L
1021/* Functions to work with nop pools. NOP insns are used as temporary
1022 placeholders of the insns being scheduled to allow correct update of
e855c69d
AB
1023 the data sets. When update is finished, NOPs are deleted. */
1024
1025/* A vinsn that is used to represent a nop. This vinsn is shared among all
1026 nops sel-sched generates. */
1027static vinsn_t nop_vinsn = NULL;
1028
1029/* Emit a nop before INSN, taking it from pool. */
1030insn_t
1031get_nop_from_pool (insn_t insn)
1032{
1033 insn_t nop;
1034 bool old_p = nop_pool.n != 0;
1035 int flags;
1036
1037 if (old_p)
1038 nop = nop_pool.v[--nop_pool.n];
1039 else
1040 nop = nop_pattern;
1041
1042 nop = emit_insn_before (nop, insn);
1043
1044 if (old_p)
1045 flags = INSN_INIT_TODO_SSID;
1046 else
1047 flags = INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID;
1048
1049 set_insn_init (INSN_EXPR (insn), nop_vinsn, INSN_SEQNO (insn));
1050 sel_init_new_insn (nop, flags);
1051
1052 return nop;
1053}
1054
1055/* Remove NOP from the instruction stream and return it to the pool. */
1056void
b5b8b0ac 1057return_nop_to_pool (insn_t nop, bool full_tidying)
e855c69d
AB
1058{
1059 gcc_assert (INSN_IN_STREAM_P (nop));
b5b8b0ac 1060 sel_remove_insn (nop, false, full_tidying);
e855c69d
AB
1061
1062 if (nop_pool.n == nop_pool.s)
b8698a0f 1063 nop_pool.v = XRESIZEVEC (rtx, nop_pool.v,
e855c69d
AB
1064 (nop_pool.s = 2 * nop_pool.s + 1));
1065 nop_pool.v[nop_pool.n++] = nop;
1066}
1067
1068/* Free the nop pool. */
1069void
1070free_nop_pool (void)
1071{
1072 nop_pool.n = 0;
1073 nop_pool.s = 0;
1074 free (nop_pool.v);
1075 nop_pool.v = NULL;
1076}
1077\f
1078
b8698a0f 1079/* Skip unspec to support ia64 speculation. Called from rtx_equal_p_cb.
e855c69d
AB
1080 The callback is given two rtxes XX and YY and writes the new rtxes
1081 to NX and NY in case some needs to be skipped. */
1082static int
1083skip_unspecs_callback (const_rtx *xx, const_rtx *yy, rtx *nx, rtx* ny)
1084{
1085 const_rtx x = *xx;
1086 const_rtx y = *yy;
b8698a0f 1087
e855c69d
AB
1088 if (GET_CODE (x) == UNSPEC
1089 && (targetm.sched.skip_rtx_p == NULL
1090 || targetm.sched.skip_rtx_p (x)))
1091 {
1092 *nx = XVECEXP (x, 0, 0);
1093 *ny = CONST_CAST_RTX (y);
1094 return 1;
1095 }
b8698a0f 1096
e855c69d
AB
1097 if (GET_CODE (y) == UNSPEC
1098 && (targetm.sched.skip_rtx_p == NULL
1099 || targetm.sched.skip_rtx_p (y)))
1100 {
1101 *nx = CONST_CAST_RTX (x);
1102 *ny = XVECEXP (y, 0, 0);
1103 return 1;
1104 }
b8698a0f 1105
e855c69d
AB
1106 return 0;
1107}
1108
b8698a0f 1109/* Callback, called from hash_rtx_cb. Helps to hash UNSPEC rtx X in a correct way
e855c69d
AB
1110 to support ia64 speculation. When changes are needed, new rtx X and new mode
1111 NMODE are written, and the callback returns true. */
1112static int
1113hash_with_unspec_callback (const_rtx x, enum machine_mode mode ATTRIBUTE_UNUSED,
1114 rtx *nx, enum machine_mode* nmode)
1115{
b8698a0f 1116 if (GET_CODE (x) == UNSPEC
e855c69d
AB
1117 && targetm.sched.skip_rtx_p
1118 && targetm.sched.skip_rtx_p (x))
1119 {
1120 *nx = XVECEXP (x, 0 ,0);
32e8bb8e 1121 *nmode = VOIDmode;
e855c69d
AB
1122 return 1;
1123 }
b8698a0f 1124
e855c69d
AB
1125 return 0;
1126}
1127
1128/* Returns LHS and RHS are ok to be scheduled separately. */
1129static bool
1130lhs_and_rhs_separable_p (rtx lhs, rtx rhs)
1131{
1132 if (lhs == NULL || rhs == NULL)
1133 return false;
1134
b8698a0f
L
1135 /* Do not schedule CONST, CONST_INT and CONST_DOUBLE etc as rhs: no point
1136 to use reg, if const can be used. Moreover, scheduling const as rhs may
1137 lead to mode mismatch cause consts don't have modes but they could be
e855c69d
AB
1138 merged from branches where the same const used in different modes. */
1139 if (CONSTANT_P (rhs))
1140 return false;
1141
1142 /* ??? Do not rename predicate registers to avoid ICEs in bundling. */
1143 if (COMPARISON_P (rhs))
1144 return false;
1145
1146 /* Do not allow single REG to be an rhs. */
1147 if (REG_P (rhs))
1148 return false;
1149
b8698a0f 1150 /* See comment at find_used_regs_1 (*1) for explanation of this
e855c69d
AB
1151 restriction. */
1152 /* FIXME: remove this later. */
1153 if (MEM_P (lhs))
1154 return false;
1155
1156 /* This will filter all tricky things like ZERO_EXTRACT etc.
1157 For now we don't handle it. */
1158 if (!REG_P (lhs) && !MEM_P (lhs))
1159 return false;
1160
1161 return true;
1162}
1163
b8698a0f
L
1164/* Initialize vinsn VI for INSN. Only for use from vinsn_create (). When
1165 FORCE_UNIQUE_P is true, the resulting vinsn will not be clonable. This is
e855c69d
AB
1166 used e.g. for insns from recovery blocks. */
1167static void
1168vinsn_init (vinsn_t vi, insn_t insn, bool force_unique_p)
1169{
1170 hash_rtx_callback_function hrcf;
1171 int insn_class;
1172
1173 VINSN_INSN_RTX (vi) = insn;
1174 VINSN_COUNT (vi) = 0;
1175 vi->cost = -1;
b8698a0f 1176
9ef1bf71
AM
1177 if (INSN_NOP_P (insn))
1178 return;
1179
e855c69d
AB
1180 if (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL)
1181 init_id_from_df (VINSN_ID (vi), insn, force_unique_p);
1182 else
1183 deps_init_id (VINSN_ID (vi), insn, force_unique_p);
b8698a0f 1184
e855c69d
AB
1185 /* Hash vinsn depending on whether it is separable or not. */
1186 hrcf = targetm.sched.skip_rtx_p ? hash_with_unspec_callback : NULL;
1187 if (VINSN_SEPARABLE_P (vi))
1188 {
1189 rtx rhs = VINSN_RHS (vi);
1190
1191 VINSN_HASH (vi) = hash_rtx_cb (rhs, GET_MODE (rhs),
1192 NULL, NULL, false, hrcf);
1193 VINSN_HASH_RTX (vi) = hash_rtx_cb (VINSN_PATTERN (vi),
1194 VOIDmode, NULL, NULL,
1195 false, hrcf);
1196 }
1197 else
1198 {
1199 VINSN_HASH (vi) = hash_rtx_cb (VINSN_PATTERN (vi), VOIDmode,
1200 NULL, NULL, false, hrcf);
1201 VINSN_HASH_RTX (vi) = VINSN_HASH (vi);
1202 }
b8698a0f 1203
e855c69d
AB
1204 insn_class = haifa_classify_insn (insn);
1205 if (insn_class >= 2
1206 && (!targetm.sched.get_insn_spec_ds
1207 || ((targetm.sched.get_insn_spec_ds (insn) & BEGIN_CONTROL)
1208 == 0)))
1209 VINSN_MAY_TRAP_P (vi) = true;
1210 else
1211 VINSN_MAY_TRAP_P (vi) = false;
1212}
1213
1214/* Indicate that VI has become the part of an rtx object. */
1215void
1216vinsn_attach (vinsn_t vi)
1217{
1218 /* Assert that VI is not pending for deletion. */
1219 gcc_assert (VINSN_INSN_RTX (vi));
1220
1221 VINSN_COUNT (vi)++;
1222}
1223
b8698a0f 1224/* Create and init VI from the INSN. Use UNIQUE_P for determining the correct
e855c69d
AB
1225 VINSN_TYPE (VI). */
1226static vinsn_t
1227vinsn_create (insn_t insn, bool force_unique_p)
1228{
1229 vinsn_t vi = XCNEW (struct vinsn_def);
1230
1231 vinsn_init (vi, insn, force_unique_p);
1232 return vi;
1233}
1234
1235/* Return a copy of VI. When REATTACH_P is true, detach VI and attach
1236 the copy. */
b8698a0f 1237vinsn_t
e855c69d
AB
1238vinsn_copy (vinsn_t vi, bool reattach_p)
1239{
1240 rtx copy;
1241 bool unique = VINSN_UNIQUE_P (vi);
1242 vinsn_t new_vi;
b8698a0f 1243
e855c69d
AB
1244 copy = create_copy_of_insn_rtx (VINSN_INSN_RTX (vi));
1245 new_vi = create_vinsn_from_insn_rtx (copy, unique);
1246 if (reattach_p)
1247 {
1248 vinsn_detach (vi);
1249 vinsn_attach (new_vi);
1250 }
1251
1252 return new_vi;
1253}
1254
1255/* Delete the VI vinsn and free its data. */
1256static void
1257vinsn_delete (vinsn_t vi)
1258{
1259 gcc_assert (VINSN_COUNT (vi) == 0);
1260
9ef1bf71
AM
1261 if (!INSN_NOP_P (VINSN_INSN_RTX (vi)))
1262 {
1263 return_regset_to_pool (VINSN_REG_SETS (vi));
1264 return_regset_to_pool (VINSN_REG_USES (vi));
1265 return_regset_to_pool (VINSN_REG_CLOBBERS (vi));
1266 }
e855c69d
AB
1267
1268 free (vi);
1269}
1270
b8698a0f 1271/* Indicate that VI is no longer a part of some rtx object.
e855c69d
AB
1272 Remove VI if it is no longer needed. */
1273void
1274vinsn_detach (vinsn_t vi)
1275{
1276 gcc_assert (VINSN_COUNT (vi) > 0);
1277
1278 if (--VINSN_COUNT (vi) == 0)
1279 vinsn_delete (vi);
1280}
1281
1282/* Returns TRUE if VI is a branch. */
1283bool
1284vinsn_cond_branch_p (vinsn_t vi)
1285{
1286 insn_t insn;
1287
1288 if (!VINSN_UNIQUE_P (vi))
1289 return false;
1290
1291 insn = VINSN_INSN_RTX (vi);
1292 if (BB_END (BLOCK_FOR_INSN (insn)) != insn)
1293 return false;
1294
1295 return control_flow_insn_p (insn);
1296}
1297
1298/* Return latency of INSN. */
1299static int
1300sel_insn_rtx_cost (rtx insn)
1301{
1302 int cost;
1303
1304 /* A USE insn, or something else we don't need to
1305 understand. We can't pass these directly to
1306 result_ready_cost or insn_default_latency because it will
1307 trigger a fatal error for unrecognizable insns. */
1308 if (recog_memoized (insn) < 0)
1309 cost = 0;
1310 else
1311 {
1312 cost = insn_default_latency (insn);
1313
1314 if (cost < 0)
1315 cost = 0;
1316 }
1317
1318 return cost;
1319}
1320
1321/* Return the cost of the VI.
1322 !!! FIXME: Unify with haifa-sched.c: insn_cost (). */
1323int
1324sel_vinsn_cost (vinsn_t vi)
1325{
1326 int cost = vi->cost;
1327
1328 if (cost < 0)
1329 {
1330 cost = sel_insn_rtx_cost (VINSN_INSN_RTX (vi));
1331 vi->cost = cost;
1332 }
1333
1334 return cost;
1335}
1336\f
1337
1338/* Functions for insn emitting. */
1339
1340/* Emit new insn after AFTER based on PATTERN and initialize its data from
1341 EXPR and SEQNO. */
1342insn_t
1343sel_gen_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno, insn_t after)
1344{
1345 insn_t new_insn;
1346
1347 gcc_assert (EXPR_TARGET_AVAILABLE (expr) == true);
1348
1349 new_insn = emit_insn_after (pattern, after);
1350 set_insn_init (expr, NULL, seqno);
1351 sel_init_new_insn (new_insn, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID);
1352
1353 return new_insn;
1354}
1355
1356/* Force newly generated vinsns to be unique. */
1357static bool init_insn_force_unique_p = false;
1358
1359/* Emit new speculation recovery insn after AFTER based on PATTERN and
1360 initialize its data from EXPR and SEQNO. */
1361insn_t
1362sel_gen_recovery_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno,
1363 insn_t after)
1364{
1365 insn_t insn;
1366
1367 gcc_assert (!init_insn_force_unique_p);
1368
1369 init_insn_force_unique_p = true;
1370 insn = sel_gen_insn_from_rtx_after (pattern, expr, seqno, after);
1371 CANT_MOVE (insn) = 1;
1372 init_insn_force_unique_p = false;
1373
1374 return insn;
1375}
1376
1377/* Emit new insn after AFTER based on EXPR and SEQNO. If VINSN is not NULL,
b8698a0f
L
1378 take it as a new vinsn instead of EXPR's vinsn.
1379 We simplify insns later, after scheduling region in
e855c69d
AB
1380 simplify_changed_insns. */
1381insn_t
b8698a0f 1382sel_gen_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno,
e855c69d
AB
1383 insn_t after)
1384{
1385 expr_t emit_expr;
1386 insn_t insn;
1387 int flags;
b8698a0f
L
1388
1389 emit_expr = set_insn_init (expr, vinsn ? vinsn : EXPR_VINSN (expr),
e855c69d
AB
1390 seqno);
1391 insn = EXPR_INSN_RTX (emit_expr);
b8698a0f 1392 add_insn_after (insn, after, BLOCK_FOR_INSN (insn));
e855c69d
AB
1393
1394 flags = INSN_INIT_TODO_SSID;
1395 if (INSN_LUID (insn) == 0)
1396 flags |= INSN_INIT_TODO_LUID;
1397 sel_init_new_insn (insn, flags);
1398
1399 return insn;
1400}
1401
1402/* Move insn from EXPR after AFTER. */
1403insn_t
1404sel_move_insn (expr_t expr, int seqno, insn_t after)
1405{
1406 insn_t insn = EXPR_INSN_RTX (expr);
1407 basic_block bb = BLOCK_FOR_INSN (after);
1408 insn_t next = NEXT_INSN (after);
1409
1410 /* Assert that in move_op we disconnected this insn properly. */
1411 gcc_assert (EXPR_VINSN (INSN_EXPR (insn)) != NULL);
1412 PREV_INSN (insn) = after;
1413 NEXT_INSN (insn) = next;
1414
1415 NEXT_INSN (after) = insn;
1416 PREV_INSN (next) = insn;
1417
1418 /* Update links from insn to bb and vice versa. */
1419 df_insn_change_bb (insn, bb);
1420 if (BB_END (bb) == after)
1421 BB_END (bb) = insn;
b8698a0f 1422
e855c69d
AB
1423 prepare_insn_expr (insn, seqno);
1424 return insn;
1425}
1426
1427\f
1428/* Functions to work with right-hand sides. */
1429
b8698a0f 1430/* Search for a hash value determined by UID/NEW_VINSN in a sorted vector
e855c69d 1431 VECT and return true when found. Use NEW_VINSN for comparison only when
b8698a0f
L
1432 COMPARE_VINSNS is true. Write to INDP the index on which
1433 the search has stopped, such that inserting the new element at INDP will
e855c69d
AB
1434 retain VECT's sort order. */
1435static bool
b8698a0f
L
1436find_in_history_vect_1 (VEC(expr_history_def, heap) *vect,
1437 unsigned uid, vinsn_t new_vinsn,
e855c69d
AB
1438 bool compare_vinsns, int *indp)
1439{
1440 expr_history_def *arr;
1441 int i, j, len = VEC_length (expr_history_def, vect);
1442
1443 if (len == 0)
1444 {
1445 *indp = 0;
1446 return false;
1447 }
1448
1449 arr = VEC_address (expr_history_def, vect);
1450 i = 0, j = len - 1;
1451
1452 while (i <= j)
1453 {
1454 unsigned auid = arr[i].uid;
b8698a0f 1455 vinsn_t avinsn = arr[i].new_expr_vinsn;
e855c69d
AB
1456
1457 if (auid == uid
b8698a0f
L
1458 /* When undoing transformation on a bookkeeping copy, the new vinsn
1459 may not be exactly equal to the one that is saved in the vector.
e855c69d
AB
1460 This is because the insn whose copy we're checking was possibly
1461 substituted itself. */
b8698a0f 1462 && (! compare_vinsns
e855c69d
AB
1463 || vinsn_equal_p (avinsn, new_vinsn)))
1464 {
1465 *indp = i;
1466 return true;
1467 }
1468 else if (auid > uid)
1469 break;
1470 i++;
1471 }
1472
1473 *indp = i;
1474 return false;
1475}
1476
b8698a0f
L
1477/* Search for a uid of INSN and NEW_VINSN in a sorted vector VECT. Return
1478 the position found or -1, if no such value is in vector.
e855c69d
AB
1479 Search also for UIDs of insn's originators, if ORIGINATORS_P is true. */
1480int
b8698a0f 1481find_in_history_vect (VEC(expr_history_def, heap) *vect, rtx insn,
e855c69d
AB
1482 vinsn_t new_vinsn, bool originators_p)
1483{
1484 int ind;
1485
b8698a0f 1486 if (find_in_history_vect_1 (vect, INSN_UID (insn), new_vinsn,
e855c69d
AB
1487 false, &ind))
1488 return ind;
1489
1490 if (INSN_ORIGINATORS (insn) && originators_p)
1491 {
1492 unsigned uid;
1493 bitmap_iterator bi;
1494
1495 EXECUTE_IF_SET_IN_BITMAP (INSN_ORIGINATORS (insn), 0, uid, bi)
1496 if (find_in_history_vect_1 (vect, uid, new_vinsn, false, &ind))
1497 return ind;
1498 }
b8698a0f 1499
e855c69d
AB
1500 return -1;
1501}
1502
b8698a0f
L
1503/* Insert new element in a sorted history vector pointed to by PVECT,
1504 if it is not there already. The element is searched using
e855c69d
AB
1505 UID/NEW_EXPR_VINSN pair. TYPE, OLD_EXPR_VINSN and SPEC_DS save
1506 the history of a transformation. */
1507void
1508insert_in_history_vect (VEC (expr_history_def, heap) **pvect,
1509 unsigned uid, enum local_trans_type type,
b8698a0f 1510 vinsn_t old_expr_vinsn, vinsn_t new_expr_vinsn,
e855c69d
AB
1511 ds_t spec_ds)
1512{
1513 VEC(expr_history_def, heap) *vect = *pvect;
1514 expr_history_def temp;
1515 bool res;
1516 int ind;
1517
1518 res = find_in_history_vect_1 (vect, uid, new_expr_vinsn, true, &ind);
1519
1520 if (res)
1521 {
1522 expr_history_def *phist = VEC_index (expr_history_def, vect, ind);
1523
b8698a0f 1524 /* It is possible that speculation types of expressions that were
e855c69d
AB
1525 propagated through different paths will be different here. In this
1526 case, merge the status to get the correct check later. */
1527 if (phist->spec_ds != spec_ds)
1528 phist->spec_ds = ds_max_merge (phist->spec_ds, spec_ds);
1529 return;
1530 }
b8698a0f 1531
e855c69d
AB
1532 temp.uid = uid;
1533 temp.old_expr_vinsn = old_expr_vinsn;
b8698a0f 1534 temp.new_expr_vinsn = new_expr_vinsn;
e855c69d
AB
1535 temp.spec_ds = spec_ds;
1536 temp.type = type;
1537
1538 vinsn_attach (old_expr_vinsn);
1539 vinsn_attach (new_expr_vinsn);
1540 VEC_safe_insert (expr_history_def, heap, vect, ind, &temp);
1541 *pvect = vect;
1542}
1543
1544/* Free history vector PVECT. */
1545static void
1546free_history_vect (VEC (expr_history_def, heap) **pvect)
1547{
1548 unsigned i;
1549 expr_history_def *phist;
1550
1551 if (! *pvect)
1552 return;
b8698a0f
L
1553
1554 for (i = 0;
e855c69d
AB
1555 VEC_iterate (expr_history_def, *pvect, i, phist);
1556 i++)
1557 {
1558 vinsn_detach (phist->old_expr_vinsn);
1559 vinsn_detach (phist->new_expr_vinsn);
1560 }
b8698a0f 1561
e855c69d
AB
1562 VEC_free (expr_history_def, heap, *pvect);
1563 *pvect = NULL;
1564}
1565
1566
1567/* Compare two vinsns as rhses if possible and as vinsns otherwise. */
1568bool
1569vinsn_equal_p (vinsn_t x, vinsn_t y)
1570{
1571 rtx_equal_p_callback_function repcf;
1572
1573 if (x == y)
1574 return true;
1575
1576 if (VINSN_TYPE (x) != VINSN_TYPE (y))
1577 return false;
1578
1579 if (VINSN_HASH (x) != VINSN_HASH (y))
1580 return false;
1581
1582 repcf = targetm.sched.skip_rtx_p ? skip_unspecs_callback : NULL;
b8698a0f 1583 if (VINSN_SEPARABLE_P (x))
e855c69d
AB
1584 {
1585 /* Compare RHSes of VINSNs. */
1586 gcc_assert (VINSN_RHS (x));
1587 gcc_assert (VINSN_RHS (y));
1588
1589 return rtx_equal_p_cb (VINSN_RHS (x), VINSN_RHS (y), repcf);
1590 }
1591
1592 return rtx_equal_p_cb (VINSN_PATTERN (x), VINSN_PATTERN (y), repcf);
1593}
1594\f
1595
1596/* Functions for working with expressions. */
1597
1598/* Initialize EXPR. */
1599static void
1600init_expr (expr_t expr, vinsn_t vi, int spec, int use, int priority,
1601 int sched_times, int orig_bb_index, ds_t spec_done_ds,
1602 ds_t spec_to_check_ds, int orig_sched_cycle,
f3764768 1603 VEC(expr_history_def, heap) *history, signed char target_available,
e855c69d
AB
1604 bool was_substituted, bool was_renamed, bool needs_spec_check_p,
1605 bool cant_move)
1606{
1607 vinsn_attach (vi);
1608
1609 EXPR_VINSN (expr) = vi;
1610 EXPR_SPEC (expr) = spec;
1611 EXPR_USEFULNESS (expr) = use;
1612 EXPR_PRIORITY (expr) = priority;
1613 EXPR_PRIORITY_ADJ (expr) = 0;
1614 EXPR_SCHED_TIMES (expr) = sched_times;
1615 EXPR_ORIG_BB_INDEX (expr) = orig_bb_index;
1616 EXPR_ORIG_SCHED_CYCLE (expr) = orig_sched_cycle;
1617 EXPR_SPEC_DONE_DS (expr) = spec_done_ds;
1618 EXPR_SPEC_TO_CHECK_DS (expr) = spec_to_check_ds;
1619
1620 if (history)
1621 EXPR_HISTORY_OF_CHANGES (expr) = history;
1622 else
1623 EXPR_HISTORY_OF_CHANGES (expr) = NULL;
1624
1625 EXPR_TARGET_AVAILABLE (expr) = target_available;
1626 EXPR_WAS_SUBSTITUTED (expr) = was_substituted;
1627 EXPR_WAS_RENAMED (expr) = was_renamed;
1628 EXPR_NEEDS_SPEC_CHECK_P (expr) = needs_spec_check_p;
1629 EXPR_CANT_MOVE (expr) = cant_move;
1630}
1631
1632/* Make a copy of the expr FROM into the expr TO. */
1633void
1634copy_expr (expr_t to, expr_t from)
1635{
1636 VEC(expr_history_def, heap) *temp = NULL;
1637
1638 if (EXPR_HISTORY_OF_CHANGES (from))
1639 {
1640 unsigned i;
1641 expr_history_def *phist;
1642
1643 temp = VEC_copy (expr_history_def, heap, EXPR_HISTORY_OF_CHANGES (from));
b8698a0f 1644 for (i = 0;
e855c69d
AB
1645 VEC_iterate (expr_history_def, temp, i, phist);
1646 i++)
1647 {
1648 vinsn_attach (phist->old_expr_vinsn);
1649 vinsn_attach (phist->new_expr_vinsn);
1650 }
1651 }
1652
b8698a0f 1653 init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from),
e855c69d
AB
1654 EXPR_USEFULNESS (from), EXPR_PRIORITY (from),
1655 EXPR_SCHED_TIMES (from), EXPR_ORIG_BB_INDEX (from),
b8698a0f 1656 EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from),
e855c69d 1657 EXPR_ORIG_SCHED_CYCLE (from), temp,
b8698a0f 1658 EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from),
e855c69d
AB
1659 EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from),
1660 EXPR_CANT_MOVE (from));
1661}
1662
b8698a0f 1663/* Same, but the final expr will not ever be in av sets, so don't copy
e855c69d
AB
1664 "uninteresting" data such as bitmap cache. */
1665void
1666copy_expr_onside (expr_t to, expr_t from)
1667{
1668 init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from), EXPR_USEFULNESS (from),
1669 EXPR_PRIORITY (from), EXPR_SCHED_TIMES (from), 0,
1670 EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from), 0, NULL,
1671 EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from),
1672 EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from),
1673 EXPR_CANT_MOVE (from));
1674}
1675
1676/* Prepare the expr of INSN for scheduling. Used when moving insn and when
1677 initializing new insns. */
1678static void
1679prepare_insn_expr (insn_t insn, int seqno)
1680{
1681 expr_t expr = INSN_EXPR (insn);
1682 ds_t ds;
b8698a0f 1683
e855c69d
AB
1684 INSN_SEQNO (insn) = seqno;
1685 EXPR_ORIG_BB_INDEX (expr) = BLOCK_NUM (insn);
1686 EXPR_SPEC (expr) = 0;
1687 EXPR_ORIG_SCHED_CYCLE (expr) = 0;
1688 EXPR_WAS_SUBSTITUTED (expr) = 0;
1689 EXPR_WAS_RENAMED (expr) = 0;
1690 EXPR_TARGET_AVAILABLE (expr) = 1;
1691 INSN_LIVE_VALID_P (insn) = false;
1692
1693 /* ??? If this expression is speculative, make its dependence
1694 as weak as possible. We can filter this expression later
1695 in process_spec_exprs, because we do not distinguish
1696 between the status we got during compute_av_set and the
1697 existing status. To be fixed. */
1698 ds = EXPR_SPEC_DONE_DS (expr);
1699 if (ds)
1700 EXPR_SPEC_DONE_DS (expr) = ds_get_max_dep_weak (ds);
1701
1702 free_history_vect (&EXPR_HISTORY_OF_CHANGES (expr));
1703}
1704
1705/* Update target_available bits when merging exprs TO and FROM. SPLIT_POINT
b8698a0f 1706 is non-null when expressions are merged from different successors at
e855c69d
AB
1707 a split point. */
1708static void
1709update_target_availability (expr_t to, expr_t from, insn_t split_point)
1710{
b8698a0f 1711 if (EXPR_TARGET_AVAILABLE (to) < 0
e855c69d
AB
1712 || EXPR_TARGET_AVAILABLE (from) < 0)
1713 EXPR_TARGET_AVAILABLE (to) = -1;
1714 else
1715 {
1716 /* We try to detect the case when one of the expressions
1717 can only be reached through another one. In this case,
1718 we can do better. */
1719 if (split_point == NULL)
1720 {
1721 int toind, fromind;
1722
1723 toind = EXPR_ORIG_BB_INDEX (to);
1724 fromind = EXPR_ORIG_BB_INDEX (from);
b8698a0f 1725
e855c69d 1726 if (toind && toind == fromind)
b8698a0f 1727 /* Do nothing -- everything is done in
e855c69d
AB
1728 merge_with_other_exprs. */
1729 ;
1730 else
1731 EXPR_TARGET_AVAILABLE (to) = -1;
1732 }
1733 else
1734 EXPR_TARGET_AVAILABLE (to) &= EXPR_TARGET_AVAILABLE (from);
1735 }
1736}
1737
1738/* Update speculation bits when merging exprs TO and FROM. SPLIT_POINT
b8698a0f 1739 is non-null when expressions are merged from different successors at
e855c69d
AB
1740 a split point. */
1741static void
1742update_speculative_bits (expr_t to, expr_t from, insn_t split_point)
1743{
1744 ds_t old_to_ds, old_from_ds;
1745
1746 old_to_ds = EXPR_SPEC_DONE_DS (to);
1747 old_from_ds = EXPR_SPEC_DONE_DS (from);
b8698a0f 1748
e855c69d
AB
1749 EXPR_SPEC_DONE_DS (to) = ds_max_merge (old_to_ds, old_from_ds);
1750 EXPR_SPEC_TO_CHECK_DS (to) |= EXPR_SPEC_TO_CHECK_DS (from);
1751 EXPR_NEEDS_SPEC_CHECK_P (to) |= EXPR_NEEDS_SPEC_CHECK_P (from);
1752
1753 /* When merging e.g. control & data speculative exprs, or a control
b8698a0f 1754 speculative with a control&data speculative one, we really have
e855c69d
AB
1755 to change vinsn too. Also, when speculative status is changed,
1756 we also need to record this as a transformation in expr's history. */
1757 if ((old_to_ds & SPECULATIVE) || (old_from_ds & SPECULATIVE))
1758 {
1759 old_to_ds = ds_get_speculation_types (old_to_ds);
1760 old_from_ds = ds_get_speculation_types (old_from_ds);
b8698a0f 1761
e855c69d
AB
1762 if (old_to_ds != old_from_ds)
1763 {
1764 ds_t record_ds;
b8698a0f
L
1765
1766 /* When both expressions are speculative, we need to change
e855c69d
AB
1767 the vinsn first. */
1768 if ((old_to_ds & SPECULATIVE) && (old_from_ds & SPECULATIVE))
1769 {
1770 int res;
b8698a0f 1771
e855c69d
AB
1772 res = speculate_expr (to, EXPR_SPEC_DONE_DS (to));
1773 gcc_assert (res >= 0);
1774 }
1775
1776 if (split_point != NULL)
1777 {
1778 /* Record the change with proper status. */
1779 record_ds = EXPR_SPEC_DONE_DS (to) & SPECULATIVE;
1780 record_ds &= ~(old_to_ds & SPECULATIVE);
1781 record_ds &= ~(old_from_ds & SPECULATIVE);
b8698a0f
L
1782
1783 insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to),
1784 INSN_UID (split_point), TRANS_SPECULATION,
e855c69d
AB
1785 EXPR_VINSN (from), EXPR_VINSN (to),
1786 record_ds);
1787 }
1788 }
1789 }
1790}
1791
1792
1793/* Merge bits of FROM expr to TO expr. When SPLIT_POINT is not NULL,
1794 this is done along different paths. */
1795void
1796merge_expr_data (expr_t to, expr_t from, insn_t split_point)
1797{
1798 int i;
1799 expr_history_def *phist;
b8698a0f 1800
e855c69d
AB
1801 /* For now, we just set the spec of resulting expr to be minimum of the specs
1802 of merged exprs. */
1803 if (EXPR_SPEC (to) > EXPR_SPEC (from))
1804 EXPR_SPEC (to) = EXPR_SPEC (from);
1805
1806 if (split_point)
1807 EXPR_USEFULNESS (to) += EXPR_USEFULNESS (from);
1808 else
b8698a0f 1809 EXPR_USEFULNESS (to) = MAX (EXPR_USEFULNESS (to),
e855c69d
AB
1810 EXPR_USEFULNESS (from));
1811
1812 if (EXPR_PRIORITY (to) < EXPR_PRIORITY (from))
1813 EXPR_PRIORITY (to) = EXPR_PRIORITY (from);
1814
1815 if (EXPR_SCHED_TIMES (to) > EXPR_SCHED_TIMES (from))
1816 EXPR_SCHED_TIMES (to) = EXPR_SCHED_TIMES (from);
1817
1818 if (EXPR_ORIG_BB_INDEX (to) != EXPR_ORIG_BB_INDEX (from))
1819 EXPR_ORIG_BB_INDEX (to) = 0;
1820
b8698a0f 1821 EXPR_ORIG_SCHED_CYCLE (to) = MIN (EXPR_ORIG_SCHED_CYCLE (to),
e855c69d
AB
1822 EXPR_ORIG_SCHED_CYCLE (from));
1823
1824 /* We keep this vector sorted. */
b8698a0f
L
1825 for (i = 0;
1826 VEC_iterate (expr_history_def, EXPR_HISTORY_OF_CHANGES (from),
e855c69d
AB
1827 i, phist);
1828 i++)
b8698a0f
L
1829 insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to),
1830 phist->uid, phist->type,
1831 phist->old_expr_vinsn, phist->new_expr_vinsn,
e855c69d
AB
1832 phist->spec_ds);
1833
1834 EXPR_WAS_SUBSTITUTED (to) |= EXPR_WAS_SUBSTITUTED (from);
1835 EXPR_WAS_RENAMED (to) |= EXPR_WAS_RENAMED (from);
1836 EXPR_CANT_MOVE (to) |= EXPR_CANT_MOVE (from);
1837
1838 update_target_availability (to, from, split_point);
1839 update_speculative_bits (to, from, split_point);
1840}
1841
1842/* Merge bits of FROM expr to TO expr. Vinsns in the exprs should be equal
b8698a0f 1843 in terms of vinsn_equal_p. SPLIT_POINT is non-null when expressions
e855c69d
AB
1844 are merged from different successors at a split point. */
1845void
1846merge_expr (expr_t to, expr_t from, insn_t split_point)
1847{
1848 vinsn_t to_vi = EXPR_VINSN (to);
1849 vinsn_t from_vi = EXPR_VINSN (from);
1850
1851 gcc_assert (vinsn_equal_p (to_vi, from_vi));
1852
1853 /* Make sure that speculative pattern is propagated into exprs that
1854 have non-speculative one. This will provide us with consistent
1855 speculative bits and speculative patterns inside expr. */
1856 if (EXPR_SPEC_DONE_DS (to) == 0
1857 && EXPR_SPEC_DONE_DS (from) != 0)
1858 change_vinsn_in_expr (to, EXPR_VINSN (from));
1859
1860 merge_expr_data (to, from, split_point);
1861 gcc_assert (EXPR_USEFULNESS (to) <= REG_BR_PROB_BASE);
1862}
1863
1864/* Clear the information of this EXPR. */
1865void
1866clear_expr (expr_t expr)
1867{
b8698a0f 1868
e855c69d
AB
1869 vinsn_detach (EXPR_VINSN (expr));
1870 EXPR_VINSN (expr) = NULL;
1871
1872 free_history_vect (&EXPR_HISTORY_OF_CHANGES (expr));
1873}
1874
1875/* For a given LV_SET, mark EXPR having unavailable target register. */
1876static void
1877set_unavailable_target_for_expr (expr_t expr, regset lv_set)
1878{
1879 if (EXPR_SEPARABLE_P (expr))
1880 {
1881 if (REG_P (EXPR_LHS (expr))
1882 && bitmap_bit_p (lv_set, REGNO (EXPR_LHS (expr))))
1883 {
b8698a0f
L
1884 /* If it's an insn like r1 = use (r1, ...), and it exists in
1885 different forms in each of the av_sets being merged, we can't say
1886 whether original destination register is available or not.
1887 However, this still works if destination register is not used
e855c69d
AB
1888 in the original expression: if the branch at which LV_SET we're
1889 looking here is not actually 'other branch' in sense that same
b8698a0f 1890 expression is available through it (but it can't be determined
e855c69d 1891 at computation stage because of transformations on one of the
b8698a0f
L
1892 branches), it still won't affect the availability.
1893 Liveness of a register somewhere on a code motion path means
1894 it's either read somewhere on a codemotion path, live on
e855c69d
AB
1895 'other' branch, live at the point immediately following
1896 the original operation, or is read by the original operation.
1897 The latter case is filtered out in the condition below.
1898 It still doesn't cover the case when register is defined and used
1899 somewhere within the code motion path, and in this case we could
1900 miss a unifying code motion along both branches using a renamed
1901 register, but it won't affect a code correctness since upon
1902 an actual code motion a bookkeeping code would be generated. */
b8698a0f 1903 if (bitmap_bit_p (VINSN_REG_USES (EXPR_VINSN (expr)),
e855c69d
AB
1904 REGNO (EXPR_LHS (expr))))
1905 EXPR_TARGET_AVAILABLE (expr) = -1;
1906 else
1907 EXPR_TARGET_AVAILABLE (expr) = false;
1908 }
1909 }
1910 else
1911 {
1912 unsigned regno;
1913 reg_set_iterator rsi;
b8698a0f
L
1914
1915 EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_SETS (EXPR_VINSN (expr)),
e855c69d
AB
1916 0, regno, rsi)
1917 if (bitmap_bit_p (lv_set, regno))
1918 {
1919 EXPR_TARGET_AVAILABLE (expr) = false;
1920 break;
1921 }
1922
1923 EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_CLOBBERS (EXPR_VINSN (expr)),
1924 0, regno, rsi)
1925 if (bitmap_bit_p (lv_set, regno))
1926 {
1927 EXPR_TARGET_AVAILABLE (expr) = false;
1928 break;
1929 }
1930 }
1931}
1932
b8698a0f 1933/* Try to make EXPR speculative. Return 1 when EXPR's pattern
e855c69d
AB
1934 or dependence status have changed, 2 when also the target register
1935 became unavailable, 0 if nothing had to be changed. */
1936int
1937speculate_expr (expr_t expr, ds_t ds)
1938{
1939 int res;
1940 rtx orig_insn_rtx;
1941 rtx spec_pat;
1942 ds_t target_ds, current_ds;
1943
1944 /* Obtain the status we need to put on EXPR. */
1945 target_ds = (ds & SPECULATIVE);
1946 current_ds = EXPR_SPEC_DONE_DS (expr);
1947 ds = ds_full_merge (current_ds, target_ds, NULL_RTX, NULL_RTX);
1948
1949 orig_insn_rtx = EXPR_INSN_RTX (expr);
1950
1951 res = sched_speculate_insn (orig_insn_rtx, ds, &spec_pat);
1952
1953 switch (res)
1954 {
1955 case 0:
1956 EXPR_SPEC_DONE_DS (expr) = ds;
1957 return current_ds != ds ? 1 : 0;
b8698a0f 1958
e855c69d
AB
1959 case 1:
1960 {
1961 rtx spec_insn_rtx = create_insn_rtx_from_pattern (spec_pat, NULL_RTX);
1962 vinsn_t spec_vinsn = create_vinsn_from_insn_rtx (spec_insn_rtx, false);
1963
1964 change_vinsn_in_expr (expr, spec_vinsn);
1965 EXPR_SPEC_DONE_DS (expr) = ds;
1966 EXPR_NEEDS_SPEC_CHECK_P (expr) = true;
1967
b8698a0f 1968 /* Do not allow clobbering the address register of speculative
e855c69d 1969 insns. */
b8698a0f 1970 if (bitmap_bit_p (VINSN_REG_USES (EXPR_VINSN (expr)),
e855c69d
AB
1971 expr_dest_regno (expr)))
1972 {
1973 EXPR_TARGET_AVAILABLE (expr) = false;
1974 return 2;
1975 }
1976
1977 return 1;
1978 }
1979
1980 case -1:
1981 return -1;
1982
1983 default:
1984 gcc_unreachable ();
1985 return -1;
1986 }
1987}
1988
1989/* Return a destination register, if any, of EXPR. */
1990rtx
1991expr_dest_reg (expr_t expr)
1992{
1993 rtx dest = VINSN_LHS (EXPR_VINSN (expr));
1994
1995 if (dest != NULL_RTX && REG_P (dest))
1996 return dest;
1997
1998 return NULL_RTX;
1999}
2000
2001/* Returns the REGNO of the R's destination. */
2002unsigned
2003expr_dest_regno (expr_t expr)
2004{
2005 rtx dest = expr_dest_reg (expr);
2006
2007 gcc_assert (dest != NULL_RTX);
2008 return REGNO (dest);
2009}
2010
b8698a0f 2011/* For a given LV_SET, mark all expressions in JOIN_SET, but not present in
e855c69d
AB
2012 AV_SET having unavailable target register. */
2013void
2014mark_unavailable_targets (av_set_t join_set, av_set_t av_set, regset lv_set)
2015{
2016 expr_t expr;
2017 av_set_iterator avi;
2018
2019 FOR_EACH_EXPR (expr, avi, join_set)
2020 if (av_set_lookup (av_set, EXPR_VINSN (expr)) == NULL)
2021 set_unavailable_target_for_expr (expr, lv_set);
2022}
2023\f
2024
2025/* Av set functions. */
2026
2027/* Add a new element to av set SETP.
2028 Return the element added. */
2029static av_set_t
2030av_set_add_element (av_set_t *setp)
2031{
2032 /* Insert at the beginning of the list. */
2033 _list_add (setp);
2034 return *setp;
2035}
2036
2037/* Add EXPR to SETP. */
2038void
2039av_set_add (av_set_t *setp, expr_t expr)
2040{
2041 av_set_t elem;
b8698a0f 2042
e855c69d
AB
2043 gcc_assert (!INSN_NOP_P (EXPR_INSN_RTX (expr)));
2044 elem = av_set_add_element (setp);
2045 copy_expr (_AV_SET_EXPR (elem), expr);
2046}
2047
2048/* Same, but do not copy EXPR. */
2049static void
2050av_set_add_nocopy (av_set_t *setp, expr_t expr)
2051{
2052 av_set_t elem;
2053
2054 elem = av_set_add_element (setp);
2055 *_AV_SET_EXPR (elem) = *expr;
2056}
2057
2058/* Remove expr pointed to by IP from the av_set. */
2059void
2060av_set_iter_remove (av_set_iterator *ip)
2061{
2062 clear_expr (_AV_SET_EXPR (*ip->lp));
2063 _list_iter_remove (ip);
2064}
2065
2066/* Search for an expr in SET, such that it's equivalent to SOUGHT_VINSN in the
2067 sense of vinsn_equal_p function. Return NULL if no such expr is
2068 in SET was found. */
2069expr_t
2070av_set_lookup (av_set_t set, vinsn_t sought_vinsn)
2071{
2072 expr_t expr;
2073 av_set_iterator i;
2074
2075 FOR_EACH_EXPR (expr, i, set)
2076 if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn))
2077 return expr;
2078 return NULL;
2079}
2080
2081/* Same, but also remove the EXPR found. */
2082static expr_t
2083av_set_lookup_and_remove (av_set_t *setp, vinsn_t sought_vinsn)
2084{
2085 expr_t expr;
2086 av_set_iterator i;
2087
2088 FOR_EACH_EXPR_1 (expr, i, setp)
2089 if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn))
2090 {
2091 _list_iter_remove_nofree (&i);
2092 return expr;
2093 }
2094 return NULL;
2095}
2096
2097/* Search for an expr in SET, such that it's equivalent to EXPR in the
2098 sense of vinsn_equal_p function of their vinsns, but not EXPR itself.
2099 Returns NULL if no such expr is in SET was found. */
2100static expr_t
2101av_set_lookup_other_equiv_expr (av_set_t set, expr_t expr)
2102{
2103 expr_t cur_expr;
2104 av_set_iterator i;
2105
2106 FOR_EACH_EXPR (cur_expr, i, set)
2107 {
2108 if (cur_expr == expr)
2109 continue;
2110 if (vinsn_equal_p (EXPR_VINSN (cur_expr), EXPR_VINSN (expr)))
2111 return cur_expr;
2112 }
2113
2114 return NULL;
2115}
2116
2117/* If other expression is already in AVP, remove one of them. */
2118expr_t
2119merge_with_other_exprs (av_set_t *avp, av_set_iterator *ip, expr_t expr)
2120{
2121 expr_t expr2;
2122
2123 expr2 = av_set_lookup_other_equiv_expr (*avp, expr);
2124 if (expr2 != NULL)
2125 {
2126 /* Reset target availability on merge, since taking it only from one
2127 of the exprs would be controversial for different code. */
2128 EXPR_TARGET_AVAILABLE (expr2) = -1;
2129 EXPR_USEFULNESS (expr2) = 0;
2130
2131 merge_expr (expr2, expr, NULL);
b8698a0f 2132
e855c69d
AB
2133 /* Fix usefulness as it should be now REG_BR_PROB_BASE. */
2134 EXPR_USEFULNESS (expr2) = REG_BR_PROB_BASE;
b8698a0f 2135
e855c69d
AB
2136 av_set_iter_remove (ip);
2137 return expr2;
2138 }
2139
2140 return expr;
2141}
2142
2143/* Return true if there is an expr that correlates to VI in SET. */
2144bool
2145av_set_is_in_p (av_set_t set, vinsn_t vi)
2146{
2147 return av_set_lookup (set, vi) != NULL;
2148}
2149
2150/* Return a copy of SET. */
2151av_set_t
2152av_set_copy (av_set_t set)
2153{
2154 expr_t expr;
2155 av_set_iterator i;
2156 av_set_t res = NULL;
2157
2158 FOR_EACH_EXPR (expr, i, set)
2159 av_set_add (&res, expr);
2160
2161 return res;
2162}
2163
2164/* Join two av sets that do not have common elements by attaching second set
2165 (pointed to by FROMP) to the end of first set (TO_TAILP must point to
2166 _AV_SET_NEXT of first set's last element). */
2167static void
2168join_distinct_sets (av_set_t *to_tailp, av_set_t *fromp)
2169{
2170 gcc_assert (*to_tailp == NULL);
2171 *to_tailp = *fromp;
2172 *fromp = NULL;
2173}
2174
2175/* Makes set pointed to by TO to be the union of TO and FROM. Clear av_set
2176 pointed to by FROMP afterwards. */
2177void
2178av_set_union_and_clear (av_set_t *top, av_set_t *fromp, insn_t insn)
2179{
2180 expr_t expr1;
2181 av_set_iterator i;
2182
2183 /* Delete from TOP all exprs, that present in FROMP. */
2184 FOR_EACH_EXPR_1 (expr1, i, top)
2185 {
2186 expr_t expr2 = av_set_lookup (*fromp, EXPR_VINSN (expr1));
2187
2188 if (expr2)
2189 {
2190 merge_expr (expr2, expr1, insn);
2191 av_set_iter_remove (&i);
2192 }
2193 }
2194
2195 join_distinct_sets (i.lp, fromp);
2196}
2197
b8698a0f 2198/* Same as above, but also update availability of target register in
e855c69d
AB
2199 TOP judging by TO_LV_SET and FROM_LV_SET. */
2200void
2201av_set_union_and_live (av_set_t *top, av_set_t *fromp, regset to_lv_set,
2202 regset from_lv_set, insn_t insn)
2203{
2204 expr_t expr1;
2205 av_set_iterator i;
2206 av_set_t *to_tailp, in_both_set = NULL;
2207
2208 /* Delete from TOP all expres, that present in FROMP. */
2209 FOR_EACH_EXPR_1 (expr1, i, top)
2210 {
2211 expr_t expr2 = av_set_lookup_and_remove (fromp, EXPR_VINSN (expr1));
2212
2213 if (expr2)
2214 {
b8698a0f 2215 /* It may be that the expressions have different destination
e855c69d
AB
2216 registers, in which case we need to check liveness here. */
2217 if (EXPR_SEPARABLE_P (expr1))
2218 {
b8698a0f 2219 int regno1 = (REG_P (EXPR_LHS (expr1))
e855c69d 2220 ? (int) expr_dest_regno (expr1) : -1);
b8698a0f 2221 int regno2 = (REG_P (EXPR_LHS (expr2))
e855c69d 2222 ? (int) expr_dest_regno (expr2) : -1);
b8698a0f
L
2223
2224 /* ??? We don't have a way to check restrictions for
e855c69d
AB
2225 *other* register on the current path, we did it only
2226 for the current target register. Give up. */
2227 if (regno1 != regno2)
2228 EXPR_TARGET_AVAILABLE (expr2) = -1;
2229 }
2230 else if (EXPR_INSN_RTX (expr1) != EXPR_INSN_RTX (expr2))
2231 EXPR_TARGET_AVAILABLE (expr2) = -1;
2232
2233 merge_expr (expr2, expr1, insn);
2234 av_set_add_nocopy (&in_both_set, expr2);
2235 av_set_iter_remove (&i);
2236 }
2237 else
b8698a0f 2238 /* EXPR1 is present in TOP, but not in FROMP. Check it on
e855c69d
AB
2239 FROM_LV_SET. */
2240 set_unavailable_target_for_expr (expr1, from_lv_set);
2241 }
2242 to_tailp = i.lp;
2243
2244 /* These expressions are not present in TOP. Check liveness
2245 restrictions on TO_LV_SET. */
2246 FOR_EACH_EXPR (expr1, i, *fromp)
2247 set_unavailable_target_for_expr (expr1, to_lv_set);
2248
2249 join_distinct_sets (i.lp, &in_both_set);
2250 join_distinct_sets (to_tailp, fromp);
2251}
2252
2253/* Clear av_set pointed to by SETP. */
2254void
2255av_set_clear (av_set_t *setp)
2256{
2257 expr_t expr;
2258 av_set_iterator i;
2259
2260 FOR_EACH_EXPR_1 (expr, i, setp)
2261 av_set_iter_remove (&i);
2262
2263 gcc_assert (*setp == NULL);
2264}
2265
2266/* Leave only one non-speculative element in the SETP. */
2267void
2268av_set_leave_one_nonspec (av_set_t *setp)
2269{
2270 expr_t expr;
2271 av_set_iterator i;
2272 bool has_one_nonspec = false;
2273
b8698a0f 2274 /* Keep all speculative exprs, and leave one non-speculative
e855c69d
AB
2275 (the first one). */
2276 FOR_EACH_EXPR_1 (expr, i, setp)
2277 {
2278 if (!EXPR_SPEC_DONE_DS (expr))
2279 {
2280 if (has_one_nonspec)
2281 av_set_iter_remove (&i);
2282 else
2283 has_one_nonspec = true;
2284 }
2285 }
2286}
2287
2288/* Return the N'th element of the SET. */
2289expr_t
2290av_set_element (av_set_t set, int n)
2291{
2292 expr_t expr;
2293 av_set_iterator i;
2294
2295 FOR_EACH_EXPR (expr, i, set)
2296 if (n-- == 0)
2297 return expr;
2298
2299 gcc_unreachable ();
2300 return NULL;
2301}
2302
2303/* Deletes all expressions from AVP that are conditional branches (IFs). */
2304void
2305av_set_substract_cond_branches (av_set_t *avp)
2306{
2307 av_set_iterator i;
2308 expr_t expr;
2309
2310 FOR_EACH_EXPR_1 (expr, i, avp)
2311 if (vinsn_cond_branch_p (EXPR_VINSN (expr)))
2312 av_set_iter_remove (&i);
2313}
2314
b8698a0f 2315/* Multiplies usefulness attribute of each member of av-set *AVP by
e855c69d
AB
2316 value PROB / ALL_PROB. */
2317void
2318av_set_split_usefulness (av_set_t av, int prob, int all_prob)
2319{
2320 av_set_iterator i;
2321 expr_t expr;
2322
2323 FOR_EACH_EXPR (expr, i, av)
b8698a0f 2324 EXPR_USEFULNESS (expr) = (all_prob
e855c69d
AB
2325 ? (EXPR_USEFULNESS (expr) * prob) / all_prob
2326 : 0);
2327}
2328
2329/* Leave in AVP only those expressions, which are present in AV,
2330 and return it. */
2331void
2332av_set_intersect (av_set_t *avp, av_set_t av)
2333{
2334 av_set_iterator i;
2335 expr_t expr;
2336
2337 FOR_EACH_EXPR_1 (expr, i, avp)
2338 if (av_set_lookup (av, EXPR_VINSN (expr)) == NULL)
2339 av_set_iter_remove (&i);
2340}
2341
2342\f
2343
2344/* Dependence hooks to initialize insn data. */
2345
2346/* This is used in hooks callable from dependence analysis when initializing
2347 instruction's data. */
2348static struct
2349{
2350 /* Where the dependence was found (lhs/rhs). */
2351 deps_where_t where;
2352
2353 /* The actual data object to initialize. */
2354 idata_t id;
2355
2356 /* True when the insn should not be made clonable. */
2357 bool force_unique_p;
2358
2359 /* True when insn should be treated as of type USE, i.e. never renamed. */
2360 bool force_use_p;
2361} deps_init_id_data;
2362
2363
b8698a0f 2364/* Setup ID for INSN. FORCE_UNIQUE_P is true when INSN should not be
e855c69d
AB
2365 clonable. */
2366static void
2367setup_id_for_insn (idata_t id, insn_t insn, bool force_unique_p)
2368{
2369 int type;
b8698a0f 2370
e855c69d
AB
2371 /* Determine whether INSN could be cloned and return appropriate vinsn type.
2372 That clonable insns which can be separated into lhs and rhs have type SET.
2373 Other clonable insns have type USE. */
2374 type = GET_CODE (insn);
2375
2376 /* Only regular insns could be cloned. */
2377 if (type == INSN && !force_unique_p)
2378 type = SET;
2379 else if (type == JUMP_INSN && simplejump_p (insn))
2380 type = PC;
b5b8b0ac
AO
2381 else if (type == DEBUG_INSN)
2382 type = !force_unique_p ? USE : INSN;
b8698a0f 2383
e855c69d
AB
2384 IDATA_TYPE (id) = type;
2385 IDATA_REG_SETS (id) = get_clear_regset_from_pool ();
2386 IDATA_REG_USES (id) = get_clear_regset_from_pool ();
2387 IDATA_REG_CLOBBERS (id) = get_clear_regset_from_pool ();
2388}
2389
2390/* Start initializing insn data. */
2391static void
2392deps_init_id_start_insn (insn_t insn)
2393{
2394 gcc_assert (deps_init_id_data.where == DEPS_IN_NOWHERE);
2395
2396 setup_id_for_insn (deps_init_id_data.id, insn,
2397 deps_init_id_data.force_unique_p);
2398 deps_init_id_data.where = DEPS_IN_INSN;
2399}
2400
2401/* Start initializing lhs data. */
2402static void
2403deps_init_id_start_lhs (rtx lhs)
2404{
2405 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2406 gcc_assert (IDATA_LHS (deps_init_id_data.id) == NULL);
2407
2408 if (IDATA_TYPE (deps_init_id_data.id) == SET)
2409 {
2410 IDATA_LHS (deps_init_id_data.id) = lhs;
2411 deps_init_id_data.where = DEPS_IN_LHS;
2412 }
2413}
2414
2415/* Finish initializing lhs data. */
2416static void
2417deps_init_id_finish_lhs (void)
2418{
2419 deps_init_id_data.where = DEPS_IN_INSN;
2420}
2421
2422/* Note a set of REGNO. */
2423static void
2424deps_init_id_note_reg_set (int regno)
2425{
2426 haifa_note_reg_set (regno);
2427
2428 if (deps_init_id_data.where == DEPS_IN_RHS)
2429 deps_init_id_data.force_use_p = true;
2430
2431 if (IDATA_TYPE (deps_init_id_data.id) != PC)
2432 SET_REGNO_REG_SET (IDATA_REG_SETS (deps_init_id_data.id), regno);
2433
2434#ifdef STACK_REGS
b8698a0f 2435 /* Make instructions that set stack registers to be ineligible for
e855c69d
AB
2436 renaming to avoid issues with find_used_regs. */
2437 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2438 deps_init_id_data.force_use_p = true;
2439#endif
2440}
2441
2442/* Note a clobber of REGNO. */
2443static void
2444deps_init_id_note_reg_clobber (int regno)
2445{
2446 haifa_note_reg_clobber (regno);
2447
2448 if (deps_init_id_data.where == DEPS_IN_RHS)
2449 deps_init_id_data.force_use_p = true;
2450
2451 if (IDATA_TYPE (deps_init_id_data.id) != PC)
2452 SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (deps_init_id_data.id), regno);
2453}
2454
2455/* Note a use of REGNO. */
2456static void
2457deps_init_id_note_reg_use (int regno)
2458{
2459 haifa_note_reg_use (regno);
2460
2461 if (IDATA_TYPE (deps_init_id_data.id) != PC)
2462 SET_REGNO_REG_SET (IDATA_REG_USES (deps_init_id_data.id), regno);
2463}
2464
2465/* Start initializing rhs data. */
2466static void
2467deps_init_id_start_rhs (rtx rhs)
2468{
2469 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2470
2471 /* And there was no sel_deps_reset_to_insn (). */
2472 if (IDATA_LHS (deps_init_id_data.id) != NULL)
2473 {
2474 IDATA_RHS (deps_init_id_data.id) = rhs;
2475 deps_init_id_data.where = DEPS_IN_RHS;
2476 }
2477}
2478
2479/* Finish initializing rhs data. */
2480static void
2481deps_init_id_finish_rhs (void)
2482{
2483 gcc_assert (deps_init_id_data.where == DEPS_IN_RHS
2484 || deps_init_id_data.where == DEPS_IN_INSN);
2485 deps_init_id_data.where = DEPS_IN_INSN;
2486}
2487
2488/* Finish initializing insn data. */
2489static void
2490deps_init_id_finish_insn (void)
2491{
2492 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2493
2494 if (IDATA_TYPE (deps_init_id_data.id) == SET)
2495 {
2496 rtx lhs = IDATA_LHS (deps_init_id_data.id);
2497 rtx rhs = IDATA_RHS (deps_init_id_data.id);
2498
2499 if (lhs == NULL || rhs == NULL || !lhs_and_rhs_separable_p (lhs, rhs)
2500 || deps_init_id_data.force_use_p)
2501 {
b8698a0f 2502 /* This should be a USE, as we don't want to schedule its RHS
e855c69d 2503 separately. However, we still want to have them recorded
b8698a0f 2504 for the purposes of substitution. That's why we don't
e855c69d
AB
2505 simply call downgrade_to_use () here. */
2506 gcc_assert (IDATA_TYPE (deps_init_id_data.id) == SET);
2507 gcc_assert (!lhs == !rhs);
2508
2509 IDATA_TYPE (deps_init_id_data.id) = USE;
2510 }
2511 }
2512
2513 deps_init_id_data.where = DEPS_IN_NOWHERE;
2514}
2515
2516/* This is dependence info used for initializing insn's data. */
2517static struct sched_deps_info_def deps_init_id_sched_deps_info;
2518
2519/* This initializes most of the static part of the above structure. */
2520static const struct sched_deps_info_def const_deps_init_id_sched_deps_info =
2521 {
2522 NULL,
2523
2524 deps_init_id_start_insn,
2525 deps_init_id_finish_insn,
2526 deps_init_id_start_lhs,
2527 deps_init_id_finish_lhs,
2528 deps_init_id_start_rhs,
2529 deps_init_id_finish_rhs,
2530 deps_init_id_note_reg_set,
2531 deps_init_id_note_reg_clobber,
2532 deps_init_id_note_reg_use,
2533 NULL, /* note_mem_dep */
2534 NULL, /* note_dep */
2535
2536 0, /* use_cselib */
2537 0, /* use_deps_list */
2538 0 /* generate_spec_deps */
2539 };
2540
2541/* Initialize INSN's lhs and rhs in ID. When FORCE_UNIQUE_P is true,
2542 we don't actually need information about lhs and rhs. */
2543static void
2544setup_id_lhs_rhs (idata_t id, insn_t insn, bool force_unique_p)
2545{
2546 rtx pat = PATTERN (insn);
b8698a0f 2547
481683e1 2548 if (NONJUMP_INSN_P (insn)
b8698a0f 2549 && GET_CODE (pat) == SET
e855c69d
AB
2550 && !force_unique_p)
2551 {
2552 IDATA_RHS (id) = SET_SRC (pat);
2553 IDATA_LHS (id) = SET_DEST (pat);
2554 }
2555 else
2556 IDATA_LHS (id) = IDATA_RHS (id) = NULL;
2557}
2558
2559/* Possibly downgrade INSN to USE. */
2560static void
2561maybe_downgrade_id_to_use (idata_t id, insn_t insn)
2562{
2563 bool must_be_use = false;
2564 unsigned uid = INSN_UID (insn);
57512f53 2565 df_ref *rec;
e855c69d
AB
2566 rtx lhs = IDATA_LHS (id);
2567 rtx rhs = IDATA_RHS (id);
b8698a0f 2568
e855c69d
AB
2569 /* We downgrade only SETs. */
2570 if (IDATA_TYPE (id) != SET)
2571 return;
2572
2573 if (!lhs || !lhs_and_rhs_separable_p (lhs, rhs))
2574 {
2575 IDATA_TYPE (id) = USE;
2576 return;
2577 }
b8698a0f 2578
e855c69d
AB
2579 for (rec = DF_INSN_UID_DEFS (uid); *rec; rec++)
2580 {
57512f53 2581 df_ref def = *rec;
b8698a0f 2582
e855c69d
AB
2583 if (DF_REF_INSN (def)
2584 && DF_REF_FLAGS_IS_SET (def, DF_REF_PRE_POST_MODIFY)
2585 && loc_mentioned_in_p (DF_REF_LOC (def), IDATA_RHS (id)))
2586 {
2587 must_be_use = true;
2588 break;
2589 }
2590
2591#ifdef STACK_REGS
b8698a0f 2592 /* Make instructions that set stack registers to be ineligible for
e855c69d
AB
2593 renaming to avoid issues with find_used_regs. */
2594 if (IN_RANGE (DF_REF_REGNO (def), FIRST_STACK_REG, LAST_STACK_REG))
2595 {
2596 must_be_use = true;
2597 break;
2598 }
2599#endif
b8698a0f
L
2600 }
2601
e855c69d
AB
2602 if (must_be_use)
2603 IDATA_TYPE (id) = USE;
2604}
2605
2606/* Setup register sets describing INSN in ID. */
2607static void
2608setup_id_reg_sets (idata_t id, insn_t insn)
2609{
2610 unsigned uid = INSN_UID (insn);
57512f53 2611 df_ref *rec;
e855c69d 2612 regset tmp = get_clear_regset_from_pool ();
b8698a0f 2613
e855c69d
AB
2614 for (rec = DF_INSN_UID_DEFS (uid); *rec; rec++)
2615 {
57512f53 2616 df_ref def = *rec;
e855c69d 2617 unsigned int regno = DF_REF_REGNO (def);
b8698a0f 2618
e855c69d
AB
2619 /* Post modifies are treated like clobbers by sched-deps.c. */
2620 if (DF_REF_FLAGS_IS_SET (def, (DF_REF_MUST_CLOBBER
2621 | DF_REF_PRE_POST_MODIFY)))
2622 SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (id), regno);
2623 else if (! DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER))
2624 {
2625 SET_REGNO_REG_SET (IDATA_REG_SETS (id), regno);
2626
2627#ifdef STACK_REGS
b8698a0f 2628 /* For stack registers, treat writes to them as writes
e855c69d
AB
2629 to the first one to be consistent with sched-deps.c. */
2630 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2631 SET_REGNO_REG_SET (IDATA_REG_SETS (id), FIRST_STACK_REG);
2632#endif
2633 }
2634 /* Mark special refs that generate read/write def pair. */
2635 if (DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL)
2636 || regno == STACK_POINTER_REGNUM)
2637 bitmap_set_bit (tmp, regno);
2638 }
b8698a0f 2639
e855c69d
AB
2640 for (rec = DF_INSN_UID_USES (uid); *rec; rec++)
2641 {
57512f53 2642 df_ref use = *rec;
e855c69d
AB
2643 unsigned int regno = DF_REF_REGNO (use);
2644
2645 /* When these refs are met for the first time, skip them, as
2646 these uses are just counterparts of some defs. */
2647 if (bitmap_bit_p (tmp, regno))
2648 bitmap_clear_bit (tmp, regno);
2649 else if (! DF_REF_FLAGS_IS_SET (use, DF_REF_CALL_STACK_USAGE))
2650 {
2651 SET_REGNO_REG_SET (IDATA_REG_USES (id), regno);
2652
2653#ifdef STACK_REGS
b8698a0f 2654 /* For stack registers, treat reads from them as reads from
e855c69d
AB
2655 the first one to be consistent with sched-deps.c. */
2656 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2657 SET_REGNO_REG_SET (IDATA_REG_USES (id), FIRST_STACK_REG);
2658#endif
2659 }
2660 }
2661
2662 return_regset_to_pool (tmp);
2663}
2664
2665/* Initialize instruction data for INSN in ID using DF's data. */
2666static void
2667init_id_from_df (idata_t id, insn_t insn, bool force_unique_p)
2668{
2669 gcc_assert (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL);
2670
2671 setup_id_for_insn (id, insn, force_unique_p);
2672 setup_id_lhs_rhs (id, insn, force_unique_p);
2673
2674 if (INSN_NOP_P (insn))
2675 return;
2676
2677 maybe_downgrade_id_to_use (id, insn);
2678 setup_id_reg_sets (id, insn);
2679}
2680
2681/* Initialize instruction data for INSN in ID. */
2682static void
2683deps_init_id (idata_t id, insn_t insn, bool force_unique_p)
2684{
88302d54 2685 struct deps_desc _dc, *dc = &_dc;
e855c69d
AB
2686
2687 deps_init_id_data.where = DEPS_IN_NOWHERE;
2688 deps_init_id_data.id = id;
2689 deps_init_id_data.force_unique_p = force_unique_p;
2690 deps_init_id_data.force_use_p = false;
2691
bcf33775 2692 init_deps (dc, false);
e855c69d
AB
2693
2694 memcpy (&deps_init_id_sched_deps_info,
2695 &const_deps_init_id_sched_deps_info,
2696 sizeof (deps_init_id_sched_deps_info));
2697
2698 if (spec_info != NULL)
2699 deps_init_id_sched_deps_info.generate_spec_deps = 1;
2700
2701 sched_deps_info = &deps_init_id_sched_deps_info;
2702
2703 deps_analyze_insn (dc, insn);
2704
2705 free_deps (dc);
2706
2707 deps_init_id_data.id = NULL;
2708}
2709
2710\f
2711
2712/* Implement hooks for collecting fundamental insn properties like if insn is
2713 an ASM or is within a SCHED_GROUP. */
2714
2715/* True when a "one-time init" data for INSN was already inited. */
2716static bool
2717first_time_insn_init (insn_t insn)
2718{
2719 return INSN_LIVE (insn) == NULL;
2720}
2721
2722/* Hash an entry in a transformed_insns hashtable. */
2723static hashval_t
2724hash_transformed_insns (const void *p)
2725{
2726 return VINSN_HASH_RTX (((const struct transformed_insns *) p)->vinsn_old);
2727}
2728
2729/* Compare the entries in a transformed_insns hashtable. */
2730static int
2731eq_transformed_insns (const void *p, const void *q)
2732{
2733 rtx i1 = VINSN_INSN_RTX (((const struct transformed_insns *) p)->vinsn_old);
2734 rtx i2 = VINSN_INSN_RTX (((const struct transformed_insns *) q)->vinsn_old);
2735
2736 if (INSN_UID (i1) == INSN_UID (i2))
2737 return 1;
2738 return rtx_equal_p (PATTERN (i1), PATTERN (i2));
2739}
2740
2741/* Free an entry in a transformed_insns hashtable. */
2742static void
2743free_transformed_insns (void *p)
2744{
2745 struct transformed_insns *pti = (struct transformed_insns *) p;
2746
2747 vinsn_detach (pti->vinsn_old);
2748 vinsn_detach (pti->vinsn_new);
2749 free (pti);
2750}
2751
b8698a0f 2752/* Init the s_i_d data for INSN which should be inited just once, when
e855c69d
AB
2753 we first see the insn. */
2754static void
2755init_first_time_insn_data (insn_t insn)
2756{
2757 /* This should not be set if this is the first time we init data for
2758 insn. */
2759 gcc_assert (first_time_insn_init (insn));
b8698a0f 2760
e855c69d
AB
2761 /* These are needed for nops too. */
2762 INSN_LIVE (insn) = get_regset_from_pool ();
2763 INSN_LIVE_VALID_P (insn) = false;
bcf33775 2764
e855c69d
AB
2765 if (!INSN_NOP_P (insn))
2766 {
2767 INSN_ANALYZED_DEPS (insn) = BITMAP_ALLOC (NULL);
2768 INSN_FOUND_DEPS (insn) = BITMAP_ALLOC (NULL);
b8698a0f 2769 INSN_TRANSFORMED_INSNS (insn)
e855c69d
AB
2770 = htab_create (16, hash_transformed_insns,
2771 eq_transformed_insns, free_transformed_insns);
bcf33775 2772 init_deps (&INSN_DEPS_CONTEXT (insn), true);
e855c69d
AB
2773 }
2774}
2775
b8698a0f 2776/* Free almost all above data for INSN that is scheduled already.
bcf33775
AB
2777 Used for extra-large basic blocks. */
2778void
2779free_data_for_scheduled_insn (insn_t insn)
e855c69d
AB
2780{
2781 gcc_assert (! first_time_insn_init (insn));
b8698a0f 2782
bcf33775
AB
2783 if (! INSN_ANALYZED_DEPS (insn))
2784 return;
b8698a0f 2785
e855c69d
AB
2786 BITMAP_FREE (INSN_ANALYZED_DEPS (insn));
2787 BITMAP_FREE (INSN_FOUND_DEPS (insn));
2788 htab_delete (INSN_TRANSFORMED_INSNS (insn));
b8698a0f 2789
e855c69d
AB
2790 /* This is allocated only for bookkeeping insns. */
2791 if (INSN_ORIGINATORS (insn))
2792 BITMAP_FREE (INSN_ORIGINATORS (insn));
2793 free_deps (&INSN_DEPS_CONTEXT (insn));
bcf33775
AB
2794
2795 INSN_ANALYZED_DEPS (insn) = NULL;
2796
b8698a0f 2797 /* Clear the readonly flag so we would ICE when trying to recalculate
bcf33775
AB
2798 the deps context (as we believe that it should not happen). */
2799 (&INSN_DEPS_CONTEXT (insn))->readonly = 0;
2800}
2801
2802/* Free the same data as above for INSN. */
2803static void
2804free_first_time_insn_data (insn_t insn)
2805{
2806 gcc_assert (! first_time_insn_init (insn));
2807
2808 free_data_for_scheduled_insn (insn);
2809 return_regset_to_pool (INSN_LIVE (insn));
2810 INSN_LIVE (insn) = NULL;
2811 INSN_LIVE_VALID_P (insn) = false;
e855c69d
AB
2812}
2813
2814/* Initialize region-scope data structures for basic blocks. */
2815static void
2816init_global_and_expr_for_bb (basic_block bb)
2817{
2818 if (sel_bb_empty_p (bb))
2819 return;
2820
2821 invalidate_av_set (bb);
2822}
2823
2824/* Data for global dependency analysis (to initialize CANT_MOVE and
2825 SCHED_GROUP_P). */
2826static struct
2827{
2828 /* Previous insn. */
2829 insn_t prev_insn;
2830} init_global_data;
2831
2832/* Determine if INSN is in the sched_group, is an asm or should not be
2833 cloned. After that initialize its expr. */
2834static void
2835init_global_and_expr_for_insn (insn_t insn)
2836{
2837 if (LABEL_P (insn))
2838 return;
2839
2840 if (NOTE_INSN_BASIC_BLOCK_P (insn))
2841 {
2842 init_global_data.prev_insn = NULL_RTX;
2843 return;
2844 }
2845
2846 gcc_assert (INSN_P (insn));
2847
2848 if (SCHED_GROUP_P (insn))
2849 /* Setup a sched_group. */
2850 {
2851 insn_t prev_insn = init_global_data.prev_insn;
2852
2853 if (prev_insn)
2854 INSN_SCHED_NEXT (prev_insn) = insn;
2855
2856 init_global_data.prev_insn = insn;
2857 }
2858 else
2859 init_global_data.prev_insn = NULL_RTX;
2860
2861 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
2862 || asm_noperands (PATTERN (insn)) >= 0)
2863 /* Mark INSN as an asm. */
2864 INSN_ASM_P (insn) = true;
2865
2866 {
2867 bool force_unique_p;
2868 ds_t spec_done_ds;
2869
cfeb0fa8
AB
2870 /* Certain instructions cannot be cloned, and frame related insns and
2871 the insn adjacent to NOTE_INSN_EPILOGUE_BEG cannot be moved out of
2872 their block. */
2873 if (prologue_epilogue_contains (insn))
2874 {
2875 if (RTX_FRAME_RELATED_P (insn))
2876 CANT_MOVE (insn) = 1;
2877 else
2878 {
2879 rtx note;
2880 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2881 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE
2882 && ((enum insn_note) INTVAL (XEXP (note, 0))
2883 == NOTE_INSN_EPILOGUE_BEG))
2884 {
2885 CANT_MOVE (insn) = 1;
2886 break;
2887 }
2888 }
2889 force_unique_p = true;
2890 }
e855c69d 2891 else
cfeb0fa8
AB
2892 if (CANT_MOVE (insn)
2893 || INSN_ASM_P (insn)
2894 || SCHED_GROUP_P (insn)
2895 /* Exception handling insns are always unique. */
2896 || (cfun->can_throw_non_call_exceptions && can_throw_internal (insn))
2897 /* TRAP_IF though have an INSN code is control_flow_insn_p (). */
2898 || control_flow_insn_p (insn))
2899 force_unique_p = true;
2900 else
2901 force_unique_p = false;
e855c69d
AB
2902
2903 if (targetm.sched.get_insn_spec_ds)
2904 {
2905 spec_done_ds = targetm.sched.get_insn_spec_ds (insn);
2906 spec_done_ds = ds_get_max_dep_weak (spec_done_ds);
2907 }
2908 else
2909 spec_done_ds = 0;
2910
2911 /* Initialize INSN's expr. */
2912 init_expr (INSN_EXPR (insn), vinsn_create (insn, force_unique_p), 0,
2913 REG_BR_PROB_BASE, INSN_PRIORITY (insn), 0, BLOCK_NUM (insn),
b8698a0f 2914 spec_done_ds, 0, 0, NULL, true, false, false, false,
e855c69d
AB
2915 CANT_MOVE (insn));
2916 }
2917
2918 init_first_time_insn_data (insn);
2919}
2920
2921/* Scan the region and initialize instruction data for basic blocks BBS. */
2922void
2923sel_init_global_and_expr (bb_vec_t bbs)
2924{
2925 /* ??? It would be nice to implement push / pop scheme for sched_infos. */
2926 const struct sched_scan_info_def ssi =
2927 {
2928 NULL, /* extend_bb */
2929 init_global_and_expr_for_bb, /* init_bb */
2930 extend_insn_data, /* extend_insn */
2931 init_global_and_expr_for_insn /* init_insn */
2932 };
b8698a0f 2933
e855c69d
AB
2934 sched_scan (&ssi, bbs, NULL, NULL, NULL);
2935}
2936
2937/* Finalize region-scope data structures for basic blocks. */
2938static void
2939finish_global_and_expr_for_bb (basic_block bb)
2940{
2941 av_set_clear (&BB_AV_SET (bb));
2942 BB_AV_LEVEL (bb) = 0;
2943}
2944
2945/* Finalize INSN's data. */
2946static void
2947finish_global_and_expr_insn (insn_t insn)
2948{
2949 if (LABEL_P (insn) || NOTE_INSN_BASIC_BLOCK_P (insn))
2950 return;
2951
2952 gcc_assert (INSN_P (insn));
2953
2954 if (INSN_LUID (insn) > 0)
2955 {
2956 free_first_time_insn_data (insn);
2957 INSN_WS_LEVEL (insn) = 0;
2958 CANT_MOVE (insn) = 0;
b8698a0f
L
2959
2960 /* We can no longer assert this, as vinsns of this insn could be
2961 easily live in other insn's caches. This should be changed to
e855c69d
AB
2962 a counter-like approach among all vinsns. */
2963 gcc_assert (true || VINSN_COUNT (INSN_VINSN (insn)) == 1);
2964 clear_expr (INSN_EXPR (insn));
2965 }
2966}
2967
2968/* Finalize per instruction data for the whole region. */
2969void
2970sel_finish_global_and_expr (void)
2971{
2972 {
2973 bb_vec_t bbs;
2974 int i;
2975
2976 bbs = VEC_alloc (basic_block, heap, current_nr_blocks);
2977
2978 for (i = 0; i < current_nr_blocks; i++)
2979 VEC_quick_push (basic_block, bbs, BASIC_BLOCK (BB_TO_BLOCK (i)));
2980
2981 /* Clear AV_SETs and INSN_EXPRs. */
2982 {
2983 const struct sched_scan_info_def ssi =
2984 {
2985 NULL, /* extend_bb */
2986 finish_global_and_expr_for_bb, /* init_bb */
2987 NULL, /* extend_insn */
2988 finish_global_and_expr_insn /* init_insn */
2989 };
2990
2991 sched_scan (&ssi, bbs, NULL, NULL, NULL);
2992 }
2993
2994 VEC_free (basic_block, heap, bbs);
2995 }
2996
2997 finish_insns ();
2998}
2999\f
3000
b8698a0f
L
3001/* In the below hooks, we merely calculate whether or not a dependence
3002 exists, and in what part of insn. However, we will need more data
e855c69d
AB
3003 when we'll start caching dependence requests. */
3004
3005/* Container to hold information for dependency analysis. */
3006static struct
3007{
3008 deps_t dc;
3009
3010 /* A variable to track which part of rtx we are scanning in
3011 sched-deps.c: sched_analyze_insn (). */
3012 deps_where_t where;
3013
3014 /* Current producer. */
3015 insn_t pro;
3016
3017 /* Current consumer. */
3018 vinsn_t con;
3019
3020 /* Is SEL_DEPS_HAS_DEP_P[DEPS_IN_X] is true, then X has a dependence.
3021 X is from { INSN, LHS, RHS }. */
3022 ds_t has_dep_p[DEPS_IN_NOWHERE];
3023} has_dependence_data;
3024
3025/* Start analyzing dependencies of INSN. */
3026static void
3027has_dependence_start_insn (insn_t insn ATTRIBUTE_UNUSED)
3028{
3029 gcc_assert (has_dependence_data.where == DEPS_IN_NOWHERE);
3030
3031 has_dependence_data.where = DEPS_IN_INSN;
3032}
3033
3034/* Finish analyzing dependencies of an insn. */
3035static void
3036has_dependence_finish_insn (void)
3037{
3038 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3039
3040 has_dependence_data.where = DEPS_IN_NOWHERE;
3041}
3042
3043/* Start analyzing dependencies of LHS. */
3044static void
3045has_dependence_start_lhs (rtx lhs ATTRIBUTE_UNUSED)
3046{
3047 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3048
3049 if (VINSN_LHS (has_dependence_data.con) != NULL)
3050 has_dependence_data.where = DEPS_IN_LHS;
3051}
3052
3053/* Finish analyzing dependencies of an lhs. */
3054static void
3055has_dependence_finish_lhs (void)
3056{
3057 has_dependence_data.where = DEPS_IN_INSN;
3058}
3059
3060/* Start analyzing dependencies of RHS. */
3061static void
3062has_dependence_start_rhs (rtx rhs ATTRIBUTE_UNUSED)
3063{
3064 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3065
3066 if (VINSN_RHS (has_dependence_data.con) != NULL)
3067 has_dependence_data.where = DEPS_IN_RHS;
3068}
3069
3070/* Start analyzing dependencies of an rhs. */
3071static void
3072has_dependence_finish_rhs (void)
3073{
3074 gcc_assert (has_dependence_data.where == DEPS_IN_RHS
3075 || has_dependence_data.where == DEPS_IN_INSN);
3076
3077 has_dependence_data.where = DEPS_IN_INSN;
3078}
3079
3080/* Note a set of REGNO. */
3081static void
3082has_dependence_note_reg_set (int regno)
3083{
3084 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3085
3086 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3087 VINSN_INSN_RTX
3088 (has_dependence_data.con)))
3089 {
3090 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3091
3092 if (reg_last->sets != NULL
3093 || reg_last->clobbers != NULL)
3094 *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT;
3095
3096 if (reg_last->uses)
3097 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3098 }
3099}
3100
3101/* Note a clobber of REGNO. */
3102static void
3103has_dependence_note_reg_clobber (int regno)
3104{
3105 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3106
3107 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3108 VINSN_INSN_RTX
3109 (has_dependence_data.con)))
3110 {
3111 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3112
3113 if (reg_last->sets)
3114 *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT;
b8698a0f 3115
e855c69d
AB
3116 if (reg_last->uses)
3117 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3118 }
3119}
3120
3121/* Note a use of REGNO. */
3122static void
3123has_dependence_note_reg_use (int regno)
3124{
3125 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3126
3127 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3128 VINSN_INSN_RTX
3129 (has_dependence_data.con)))
3130 {
3131 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3132
3133 if (reg_last->sets)
3134 *dsp = (*dsp & ~SPECULATIVE) | DEP_TRUE;
3135
3136 if (reg_last->clobbers)
3137 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3138
3139 /* Handle BE_IN_SPEC. */
3140 if (reg_last->uses)
3141 {
3142 ds_t pro_spec_checked_ds;
3143
3144 pro_spec_checked_ds = INSN_SPEC_CHECKED_DS (has_dependence_data.pro);
3145 pro_spec_checked_ds = ds_get_max_dep_weak (pro_spec_checked_ds);
3146
3147 if (pro_spec_checked_ds != 0)
3148 /* Merge BE_IN_SPEC bits into *DSP. */
3149 *dsp = ds_full_merge (*dsp, pro_spec_checked_ds,
3150 NULL_RTX, NULL_RTX);
3151 }
3152 }
3153}
3154
3155/* Note a memory dependence. */
3156static void
3157has_dependence_note_mem_dep (rtx mem ATTRIBUTE_UNUSED,
3158 rtx pending_mem ATTRIBUTE_UNUSED,
3159 insn_t pending_insn ATTRIBUTE_UNUSED,
3160 ds_t ds ATTRIBUTE_UNUSED)
3161{
3162 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3163 VINSN_INSN_RTX (has_dependence_data.con)))
3164 {
3165 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3166
3167 *dsp = ds_full_merge (ds, *dsp, pending_mem, mem);
3168 }
3169}
3170
3171/* Note a dependence. */
3172static void
3173has_dependence_note_dep (insn_t pro ATTRIBUTE_UNUSED,
3174 ds_t ds ATTRIBUTE_UNUSED)
3175{
3176 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3177 VINSN_INSN_RTX (has_dependence_data.con)))
3178 {
3179 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3180
3181 *dsp = ds_full_merge (ds, *dsp, NULL_RTX, NULL_RTX);
3182 }
3183}
3184
3185/* Mark the insn as having a hard dependence that prevents speculation. */
3186void
3187sel_mark_hard_insn (rtx insn)
3188{
3189 int i;
3190
3191 /* Only work when we're in has_dependence_p mode.
3192 ??? This is a hack, this should actually be a hook. */
3193 if (!has_dependence_data.dc || !has_dependence_data.pro)
3194 return;
3195
3196 gcc_assert (insn == VINSN_INSN_RTX (has_dependence_data.con));
3197 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3198
3199 for (i = 0; i < DEPS_IN_NOWHERE; i++)
3200 has_dependence_data.has_dep_p[i] &= ~SPECULATIVE;
3201}
3202
3203/* This structure holds the hooks for the dependency analysis used when
3204 actually processing dependencies in the scheduler. */
3205static struct sched_deps_info_def has_dependence_sched_deps_info;
3206
3207/* This initializes most of the fields of the above structure. */
3208static const struct sched_deps_info_def const_has_dependence_sched_deps_info =
3209 {
3210 NULL,
3211
3212 has_dependence_start_insn,
3213 has_dependence_finish_insn,
3214 has_dependence_start_lhs,
3215 has_dependence_finish_lhs,
3216 has_dependence_start_rhs,
3217 has_dependence_finish_rhs,
3218 has_dependence_note_reg_set,
3219 has_dependence_note_reg_clobber,
3220 has_dependence_note_reg_use,
3221 has_dependence_note_mem_dep,
3222 has_dependence_note_dep,
3223
3224 0, /* use_cselib */
3225 0, /* use_deps_list */
3226 0 /* generate_spec_deps */
3227 };
3228
3229/* Initialize has_dependence_sched_deps_info with extra spec field. */
3230static void
3231setup_has_dependence_sched_deps_info (void)
3232{
3233 memcpy (&has_dependence_sched_deps_info,
3234 &const_has_dependence_sched_deps_info,
3235 sizeof (has_dependence_sched_deps_info));
3236
3237 if (spec_info != NULL)
3238 has_dependence_sched_deps_info.generate_spec_deps = 1;
3239
3240 sched_deps_info = &has_dependence_sched_deps_info;
3241}
3242
3243/* Remove all dependences found and recorded in has_dependence_data array. */
3244void
3245sel_clear_has_dependence (void)
3246{
3247 int i;
3248
3249 for (i = 0; i < DEPS_IN_NOWHERE; i++)
3250 has_dependence_data.has_dep_p[i] = 0;
3251}
3252
3253/* Return nonzero if EXPR has is dependent upon PRED. Return the pointer
3254 to the dependence information array in HAS_DEP_PP. */
3255ds_t
3256has_dependence_p (expr_t expr, insn_t pred, ds_t **has_dep_pp)
3257{
3258 int i;
3259 ds_t ds;
88302d54 3260 struct deps_desc *dc;
e855c69d
AB
3261
3262 if (INSN_SIMPLEJUMP_P (pred))
3263 /* Unconditional jump is just a transfer of control flow.
3264 Ignore it. */
3265 return false;
3266
3267 dc = &INSN_DEPS_CONTEXT (pred);
bcf33775
AB
3268
3269 /* We init this field lazily. */
3270 if (dc->reg_last == NULL)
3271 init_deps_reg_last (dc);
b8698a0f 3272
e855c69d
AB
3273 if (!dc->readonly)
3274 {
3275 has_dependence_data.pro = NULL;
3276 /* Initialize empty dep context with information about PRED. */
3277 advance_deps_context (dc, pred);
3278 dc->readonly = 1;
3279 }
3280
3281 has_dependence_data.where = DEPS_IN_NOWHERE;
3282 has_dependence_data.pro = pred;
3283 has_dependence_data.con = EXPR_VINSN (expr);
3284 has_dependence_data.dc = dc;
3285
3286 sel_clear_has_dependence ();
3287
3288 /* Now catch all dependencies that would be generated between PRED and
3289 INSN. */
3290 setup_has_dependence_sched_deps_info ();
3291 deps_analyze_insn (dc, EXPR_INSN_RTX (expr));
3292 has_dependence_data.dc = NULL;
3293
3294 /* When a barrier was found, set DEPS_IN_INSN bits. */
3295 if (dc->last_reg_pending_barrier == TRUE_BARRIER)
3296 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_TRUE;
3297 else if (dc->last_reg_pending_barrier == MOVE_BARRIER)
3298 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI;
3299
3300 /* Do not allow stores to memory to move through checks. Currently
3301 we don't move this to sched-deps.c as the check doesn't have
b8698a0f 3302 obvious places to which this dependence can be attached.
e855c69d
AB
3303 FIMXE: this should go to a hook. */
3304 if (EXPR_LHS (expr)
3305 && MEM_P (EXPR_LHS (expr))
3306 && sel_insn_is_speculation_check (pred))
3307 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI;
b8698a0f 3308
e855c69d
AB
3309 *has_dep_pp = has_dependence_data.has_dep_p;
3310 ds = 0;
3311 for (i = 0; i < DEPS_IN_NOWHERE; i++)
3312 ds = ds_full_merge (ds, has_dependence_data.has_dep_p[i],
3313 NULL_RTX, NULL_RTX);
3314
3315 return ds;
3316}
3317\f
3318
b8698a0f
L
3319/* Dependence hooks implementation that checks dependence latency constraints
3320 on the insns being scheduled. The entry point for these routines is
3321 tick_check_p predicate. */
e855c69d
AB
3322
3323static struct
3324{
3325 /* An expr we are currently checking. */
3326 expr_t expr;
3327
3328 /* A minimal cycle for its scheduling. */
3329 int cycle;
3330
3331 /* Whether we have seen a true dependence while checking. */
3332 bool seen_true_dep_p;
3333} tick_check_data;
3334
3335/* Update minimal scheduling cycle for tick_check_insn given that it depends
3336 on PRO with status DS and weight DW. */
3337static void
3338tick_check_dep_with_dw (insn_t pro_insn, ds_t ds, dw_t dw)
3339{
3340 expr_t con_expr = tick_check_data.expr;
3341 insn_t con_insn = EXPR_INSN_RTX (con_expr);
3342
3343 if (con_insn != pro_insn)
3344 {
3345 enum reg_note dt;
3346 int tick;
3347
3348 if (/* PROducer was removed from above due to pipelining. */
3349 !INSN_IN_STREAM_P (pro_insn)
3350 /* Or PROducer was originally on the next iteration regarding the
3351 CONsumer. */
3352 || (INSN_SCHED_TIMES (pro_insn)
3353 - EXPR_SCHED_TIMES (con_expr)) > 1)
3354 /* Don't count this dependence. */
3355 return;
3356
3357 dt = ds_to_dt (ds);
3358 if (dt == REG_DEP_TRUE)
3359 tick_check_data.seen_true_dep_p = true;
3360
3361 gcc_assert (INSN_SCHED_CYCLE (pro_insn) > 0);
3362
3363 {
3364 dep_def _dep, *dep = &_dep;
3365
3366 init_dep (dep, pro_insn, con_insn, dt);
3367
3368 tick = INSN_SCHED_CYCLE (pro_insn) + dep_cost_1 (dep, dw);
3369 }
3370
3371 /* When there are several kinds of dependencies between pro and con,
3372 only REG_DEP_TRUE should be taken into account. */
3373 if (tick > tick_check_data.cycle
3374 && (dt == REG_DEP_TRUE || !tick_check_data.seen_true_dep_p))
3375 tick_check_data.cycle = tick;
3376 }
3377}
3378
3379/* An implementation of note_dep hook. */
3380static void
3381tick_check_note_dep (insn_t pro, ds_t ds)
3382{
3383 tick_check_dep_with_dw (pro, ds, 0);
3384}
3385
3386/* An implementation of note_mem_dep hook. */
3387static void
3388tick_check_note_mem_dep (rtx mem1, rtx mem2, insn_t pro, ds_t ds)
3389{
3390 dw_t dw;
3391
3392 dw = (ds_to_dt (ds) == REG_DEP_TRUE
3393 ? estimate_dep_weak (mem1, mem2)
3394 : 0);
3395
3396 tick_check_dep_with_dw (pro, ds, dw);
3397}
3398
3399/* This structure contains hooks for dependence analysis used when determining
3400 whether an insn is ready for scheduling. */
3401static struct sched_deps_info_def tick_check_sched_deps_info =
3402 {
3403 NULL,
3404
3405 NULL,
3406 NULL,
3407 NULL,
3408 NULL,
3409 NULL,
3410 NULL,
3411 haifa_note_reg_set,
3412 haifa_note_reg_clobber,
3413 haifa_note_reg_use,
3414 tick_check_note_mem_dep,
3415 tick_check_note_dep,
3416
3417 0, 0, 0
3418 };
3419
3420/* Estimate number of cycles from the current cycle of FENCE until EXPR can be
3421 scheduled. Return 0 if all data from producers in DC is ready. */
3422int
3423tick_check_p (expr_t expr, deps_t dc, fence_t fence)
3424{
3425 int cycles_left;
3426 /* Initialize variables. */
3427 tick_check_data.expr = expr;
3428 tick_check_data.cycle = 0;
3429 tick_check_data.seen_true_dep_p = false;
3430 sched_deps_info = &tick_check_sched_deps_info;
b8698a0f 3431
e855c69d
AB
3432 gcc_assert (!dc->readonly);
3433 dc->readonly = 1;
3434 deps_analyze_insn (dc, EXPR_INSN_RTX (expr));
3435 dc->readonly = 0;
3436
3437 cycles_left = tick_check_data.cycle - FENCE_CYCLE (fence);
3438
3439 return cycles_left >= 0 ? cycles_left : 0;
3440}
3441\f
3442
3443/* Functions to work with insns. */
3444
3445/* Returns true if LHS of INSN is the same as DEST of an insn
3446 being moved. */
3447bool
3448lhs_of_insn_equals_to_dest_p (insn_t insn, rtx dest)
3449{
3450 rtx lhs = INSN_LHS (insn);
3451
3452 if (lhs == NULL || dest == NULL)
3453 return false;
b8698a0f 3454
e855c69d
AB
3455 return rtx_equal_p (lhs, dest);
3456}
3457
3458/* Return s_i_d entry of INSN. Callable from debugger. */
3459sel_insn_data_def
3460insn_sid (insn_t insn)
3461{
3462 return *SID (insn);
3463}
3464
3465/* True when INSN is a speculative check. We can tell this by looking
3466 at the data structures of the selective scheduler, not by examining
3467 the pattern. */
3468bool
3469sel_insn_is_speculation_check (rtx insn)
3470{
3471 return s_i_d && !! INSN_SPEC_CHECKED_DS (insn);
3472}
3473
b8698a0f 3474/* Extracts machine mode MODE and destination location DST_LOC
e855c69d
AB
3475 for given INSN. */
3476void
3477get_dest_and_mode (rtx insn, rtx *dst_loc, enum machine_mode *mode)
3478{
3479 rtx pat = PATTERN (insn);
3480
3481 gcc_assert (dst_loc);
3482 gcc_assert (GET_CODE (pat) == SET);
3483
3484 *dst_loc = SET_DEST (pat);
3485
3486 gcc_assert (*dst_loc);
3487 gcc_assert (MEM_P (*dst_loc) || REG_P (*dst_loc));
3488
3489 if (mode)
3490 *mode = GET_MODE (*dst_loc);
3491}
3492
b8698a0f 3493/* Returns true when moving through JUMP will result in bookkeeping
e855c69d
AB
3494 creation. */
3495bool
3496bookkeeping_can_be_created_if_moved_through_p (insn_t jump)
3497{
3498 insn_t succ;
3499 succ_iterator si;
3500
3501 FOR_EACH_SUCC (succ, si, jump)
3502 if (sel_num_cfg_preds_gt_1 (succ))
3503 return true;
3504
3505 return false;
3506}
3507
3508/* Return 'true' if INSN is the only one in its basic block. */
3509static bool
3510insn_is_the_only_one_in_bb_p (insn_t insn)
3511{
3512 return sel_bb_head_p (insn) && sel_bb_end_p (insn);
3513}
3514
3515#ifdef ENABLE_CHECKING
b8698a0f 3516/* Check that the region we're scheduling still has at most one
e855c69d
AB
3517 backedge. */
3518static void
3519verify_backedges (void)
3520{
3521 if (pipelining_p)
3522 {
3523 int i, n = 0;
3524 edge e;
3525 edge_iterator ei;
b8698a0f 3526
e855c69d
AB
3527 for (i = 0; i < current_nr_blocks; i++)
3528 FOR_EACH_EDGE (e, ei, BASIC_BLOCK (BB_TO_BLOCK (i))->succs)
3529 if (in_current_region_p (e->dest)
3530 && BLOCK_TO_BB (e->dest->index) < i)
3531 n++;
b8698a0f 3532
e855c69d
AB
3533 gcc_assert (n <= 1);
3534 }
3535}
3536#endif
3537\f
3538
3539/* Functions to work with control flow. */
3540
b59ab570
AM
3541/* Recompute BLOCK_TO_BB and BB_FOR_BLOCK for current region so that blocks
3542 are sorted in topological order (it might have been invalidated by
3543 redirecting an edge). */
3544static void
3545sel_recompute_toporder (void)
3546{
3547 int i, n, rgn;
3548 int *postorder, n_blocks;
3549
3550 postorder = XALLOCAVEC (int, n_basic_blocks);
3551 n_blocks = post_order_compute (postorder, false, false);
3552
3553 rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
3554 for (n = 0, i = n_blocks - 1; i >= 0; i--)
3555 if (CONTAINING_RGN (postorder[i]) == rgn)
3556 {
3557 BLOCK_TO_BB (postorder[i]) = n;
3558 BB_TO_BLOCK (n) = postorder[i];
3559 n++;
3560 }
3561
3562 /* Assert that we updated info for all blocks. We may miss some blocks if
3563 this function is called when redirecting an edge made a block
3564 unreachable, but that block is not deleted yet. */
3565 gcc_assert (n == RGN_NR_BLOCKS (rgn));
3566}
3567
e855c69d 3568/* Tidy the possibly empty block BB. */
65592aad 3569static bool
5f33b972 3570maybe_tidy_empty_bb (basic_block bb)
e855c69d
AB
3571{
3572 basic_block succ_bb, pred_bb;
f2c45f08
AM
3573 edge e;
3574 edge_iterator ei;
e855c69d
AB
3575 bool rescan_p;
3576
3577 /* Keep empty bb only if this block immediately precedes EXIT and
762bffba
AB
3578 has incoming non-fallthrough edge, or it has no predecessors or
3579 successors. Otherwise remove it. */
b5b8b0ac 3580 if (!sel_bb_empty_p (bb)
b8698a0f 3581 || (single_succ_p (bb)
e855c69d 3582 && single_succ (bb) == EXIT_BLOCK_PTR
b8698a0f 3583 && (!single_pred_p (bb)
762bffba
AB
3584 || !(single_pred_edge (bb)->flags & EDGE_FALLTHRU)))
3585 || EDGE_COUNT (bb->preds) == 0
3586 || EDGE_COUNT (bb->succs) == 0)
e855c69d
AB
3587 return false;
3588
f2c45f08
AM
3589 /* Do not attempt to redirect complex edges. */
3590 FOR_EACH_EDGE (e, ei, bb->preds)
3591 if (e->flags & EDGE_COMPLEX)
3592 return false;
3593
e855c69d
AB
3594 free_data_sets (bb);
3595
3596 /* Do not delete BB if it has more than one successor.
3597 That can occur when we moving a jump. */
3598 if (!single_succ_p (bb))
3599 {
3600 gcc_assert (can_merge_blocks_p (bb->prev_bb, bb));
3601 sel_merge_blocks (bb->prev_bb, bb);
3602 return true;
3603 }
3604
3605 succ_bb = single_succ (bb);
3606 rescan_p = true;
3607 pred_bb = NULL;
3608
3609 /* Redirect all non-fallthru edges to the next bb. */
3610 while (rescan_p)
3611 {
e855c69d
AB
3612 rescan_p = false;
3613
3614 FOR_EACH_EDGE (e, ei, bb->preds)
3615 {
3616 pred_bb = e->src;
3617
3618 if (!(e->flags & EDGE_FALLTHRU))
3619 {
5f33b972
AM
3620 /* We can not invalidate computed topological order by moving
3621 the edge destination block (E->SUCC) along a fallthru edge. */
3622 sel_redirect_edge_and_branch (e, succ_bb);
e855c69d
AB
3623 rescan_p = true;
3624 break;
3625 }
5f33b972
AM
3626 /* If the edge is fallthru, but PRED_BB ends in a conditional jump
3627 to BB (so there is no non-fallthru edge from PRED_BB to BB), we
3628 still have to adjust it. */
3629 else if (single_succ_p (pred_bb) && any_condjump_p (BB_END (pred_bb)))
3630 {
3631 /* If possible, try to remove the unneeded conditional jump. */
3632 if (INSN_SCHED_TIMES (BB_END (pred_bb)) == 0
3633 && !IN_CURRENT_FENCE_P (BB_END (pred_bb)))
3634 {
3635 if (!sel_remove_insn (BB_END (pred_bb), false, false))
3636 tidy_fallthru_edge (e);
3637 }
3638 else
3639 sel_redirect_edge_and_branch (e, succ_bb);
3640 rescan_p = true;
3641 break;
3642 }
e855c69d
AB
3643 }
3644 }
3645
e855c69d
AB
3646 if (can_merge_blocks_p (bb->prev_bb, bb))
3647 sel_merge_blocks (bb->prev_bb, bb);
3648 else
e855c69d 3649 {
262d8232 3650 /* This is a block without fallthru predecessor. Just delete it. */
e855c69d
AB
3651 gcc_assert (pred_bb != NULL);
3652
762bffba
AB
3653 if (in_current_region_p (pred_bb))
3654 move_bb_info (pred_bb, bb);
e855c69d
AB
3655 remove_empty_bb (bb, true);
3656 }
3657
e855c69d
AB
3658 return true;
3659}
3660
b8698a0f 3661/* Tidy the control flow after we have removed original insn from
e855c69d
AB
3662 XBB. Return true if we have removed some blocks. When FULL_TIDYING
3663 is true, also try to optimize control flow on non-empty blocks. */
3664bool
3665tidy_control_flow (basic_block xbb, bool full_tidying)
3666{
3667 bool changed = true;
b5b8b0ac 3668 insn_t first, last;
b8698a0f 3669
e855c69d 3670 /* First check whether XBB is empty. */
5f33b972 3671 changed = maybe_tidy_empty_bb (xbb);
e855c69d
AB
3672 if (changed || !full_tidying)
3673 return changed;
b8698a0f 3674
e855c69d
AB
3675 /* Check if there is a unnecessary jump after insn left. */
3676 if (jump_leads_only_to_bb_p (BB_END (xbb), xbb->next_bb)
3677 && INSN_SCHED_TIMES (BB_END (xbb)) == 0
3678 && !IN_CURRENT_FENCE_P (BB_END (xbb)))
3679 {
3680 if (sel_remove_insn (BB_END (xbb), false, false))
3681 return true;
3682 tidy_fallthru_edge (EDGE_SUCC (xbb, 0));
3683 }
3684
b5b8b0ac
AO
3685 first = sel_bb_head (xbb);
3686 last = sel_bb_end (xbb);
3687 if (MAY_HAVE_DEBUG_INSNS)
3688 {
3689 if (first != last && DEBUG_INSN_P (first))
3690 do
3691 first = NEXT_INSN (first);
3692 while (first != last && (DEBUG_INSN_P (first) || NOTE_P (first)));
3693
3694 if (first != last && DEBUG_INSN_P (last))
3695 do
3696 last = PREV_INSN (last);
3697 while (first != last && (DEBUG_INSN_P (last) || NOTE_P (last)));
3698 }
e855c69d 3699 /* Check if there is an unnecessary jump in previous basic block leading
b8698a0f
L
3700 to next basic block left after removing INSN from stream.
3701 If it is so, remove that jump and redirect edge to current
3702 basic block (where there was INSN before deletion). This way
3703 when NOP will be deleted several instructions later with its
3704 basic block we will not get a jump to next instruction, which
e855c69d 3705 can be harmful. */
b5b8b0ac 3706 if (first == last
e855c69d 3707 && !sel_bb_empty_p (xbb)
b5b8b0ac 3708 && INSN_NOP_P (last)
e855c69d
AB
3709 /* Flow goes fallthru from current block to the next. */
3710 && EDGE_COUNT (xbb->succs) == 1
3711 && (EDGE_SUCC (xbb, 0)->flags & EDGE_FALLTHRU)
3712 /* When successor is an EXIT block, it may not be the next block. */
3713 && single_succ (xbb) != EXIT_BLOCK_PTR
3714 /* And unconditional jump in previous basic block leads to
3715 next basic block of XBB and this jump can be safely removed. */
3716 && in_current_region_p (xbb->prev_bb)
3717 && jump_leads_only_to_bb_p (BB_END (xbb->prev_bb), xbb->next_bb)
3718 && INSN_SCHED_TIMES (BB_END (xbb->prev_bb)) == 0
3719 /* Also this jump is not at the scheduling boundary. */
3720 && !IN_CURRENT_FENCE_P (BB_END (xbb->prev_bb)))
3721 {
b59ab570 3722 bool recompute_toporder_p;
e855c69d
AB
3723 /* Clear data structures of jump - jump itself will be removed
3724 by sel_redirect_edge_and_branch. */
3725 clear_expr (INSN_EXPR (BB_END (xbb->prev_bb)));
b59ab570
AM
3726 recompute_toporder_p
3727 = sel_redirect_edge_and_branch (EDGE_SUCC (xbb->prev_bb, 0), xbb);
3728
e855c69d
AB
3729 gcc_assert (EDGE_SUCC (xbb->prev_bb, 0)->flags & EDGE_FALLTHRU);
3730
3731 /* It can turn out that after removing unused jump, basic block
3732 that contained that jump, becomes empty too. In such case
3733 remove it too. */
3734 if (sel_bb_empty_p (xbb->prev_bb))
5f33b972
AM
3735 changed = maybe_tidy_empty_bb (xbb->prev_bb);
3736 if (recompute_toporder_p)
b59ab570 3737 sel_recompute_toporder ();
e855c69d 3738 }
d787f788
AM
3739
3740#ifdef ENABLE_CHECKING
3741 verify_backedges ();
3742#endif
3743
e855c69d
AB
3744 return changed;
3745}
3746
b59ab570
AM
3747/* Purge meaningless empty blocks in the middle of a region. */
3748void
3749purge_empty_blocks (void)
3750{
3751 /* Do not attempt to delete preheader. */
3752 int i = sel_is_loop_preheader_p (BASIC_BLOCK (BB_TO_BLOCK (0))) ? 1 : 0;
3753
3754 while (i < current_nr_blocks)
3755 {
3756 basic_block b = BASIC_BLOCK (BB_TO_BLOCK (i));
3757
5f33b972 3758 if (maybe_tidy_empty_bb (b))
b59ab570
AM
3759 continue;
3760
3761 i++;
3762 }
3763}
3764
b8698a0f
L
3765/* Rip-off INSN from the insn stream. When ONLY_DISCONNECT is true,
3766 do not delete insn's data, because it will be later re-emitted.
e855c69d
AB
3767 Return true if we have removed some blocks afterwards. */
3768bool
3769sel_remove_insn (insn_t insn, bool only_disconnect, bool full_tidying)
3770{
3771 basic_block bb = BLOCK_FOR_INSN (insn);
3772
3773 gcc_assert (INSN_IN_STREAM_P (insn));
3774
b5b8b0ac
AO
3775 if (DEBUG_INSN_P (insn) && BB_AV_SET_VALID_P (bb))
3776 {
3777 expr_t expr;
3778 av_set_iterator i;
3779
3780 /* When we remove a debug insn that is head of a BB, it remains
3781 in the AV_SET of the block, but it shouldn't. */
3782 FOR_EACH_EXPR_1 (expr, i, &BB_AV_SET (bb))
3783 if (EXPR_INSN_RTX (expr) == insn)
3784 {
3785 av_set_iter_remove (&i);
3786 break;
3787 }
3788 }
3789
e855c69d
AB
3790 if (only_disconnect)
3791 {
3792 insn_t prev = PREV_INSN (insn);
3793 insn_t next = NEXT_INSN (insn);
3794 basic_block bb = BLOCK_FOR_INSN (insn);
3795
3796 NEXT_INSN (prev) = next;
3797 PREV_INSN (next) = prev;
3798
3799 if (BB_HEAD (bb) == insn)
3800 {
3801 gcc_assert (BLOCK_FOR_INSN (prev) == bb);
3802 BB_HEAD (bb) = prev;
3803 }
3804 if (BB_END (bb) == insn)
3805 BB_END (bb) = prev;
3806 }
3807 else
3808 {
3809 remove_insn (insn);
3810 clear_expr (INSN_EXPR (insn));
3811 }
3812
3813 /* It is necessary to null this fields before calling add_insn (). */
3814 PREV_INSN (insn) = NULL_RTX;
3815 NEXT_INSN (insn) = NULL_RTX;
3816
3817 return tidy_control_flow (bb, full_tidying);
3818}
3819
3820/* Estimate number of the insns in BB. */
3821static int
3822sel_estimate_number_of_insns (basic_block bb)
3823{
3824 int res = 0;
3825 insn_t insn = NEXT_INSN (BB_HEAD (bb)), next_tail = NEXT_INSN (BB_END (bb));
3826
3827 for (; insn != next_tail; insn = NEXT_INSN (insn))
b5b8b0ac 3828 if (NONDEBUG_INSN_P (insn))
e855c69d
AB
3829 res++;
3830
3831 return res;
3832}
3833
3834/* We don't need separate luids for notes or labels. */
3835static int
3836sel_luid_for_non_insn (rtx x)
3837{
3838 gcc_assert (NOTE_P (x) || LABEL_P (x));
3839
3840 return -1;
3841}
3842
3843/* Return seqno of the only predecessor of INSN. */
3844static int
3845get_seqno_of_a_pred (insn_t insn)
3846{
3847 int seqno;
3848
3849 gcc_assert (INSN_SIMPLEJUMP_P (insn));
3850
3851 if (!sel_bb_head_p (insn))
3852 seqno = INSN_SEQNO (PREV_INSN (insn));
3853 else
3854 {
3855 basic_block bb = BLOCK_FOR_INSN (insn);
3856
3857 if (single_pred_p (bb)
3858 && !in_current_region_p (single_pred (bb)))
3859 {
3860 /* We can have preds outside a region when splitting edges
b8698a0f 3861 for pipelining of an outer loop. Use succ instead.
e855c69d
AB
3862 There should be only one of them. */
3863 insn_t succ = NULL;
3864 succ_iterator si;
3865 bool first = true;
b8698a0f 3866
e855c69d
AB
3867 gcc_assert (flag_sel_sched_pipelining_outer_loops
3868 && current_loop_nest);
b8698a0f 3869 FOR_EACH_SUCC_1 (succ, si, insn,
e855c69d
AB
3870 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
3871 {
3872 gcc_assert (first);
3873 first = false;
3874 }
3875
3876 gcc_assert (succ != NULL);
3877 seqno = INSN_SEQNO (succ);
3878 }
3879 else
3880 {
3881 insn_t *preds;
3882 int n;
3883
3884 cfg_preds (BLOCK_FOR_INSN (insn), &preds, &n);
3885 gcc_assert (n == 1);
3886
3887 seqno = INSN_SEQNO (preds[0]);
b8698a0f 3888
e855c69d
AB
3889 free (preds);
3890 }
3891 }
3892
3893 return seqno;
3894}
3895
da7ba240
AB
3896/* Find the proper seqno for inserting at INSN. Returns -1 if no predecessors
3897 with positive seqno exist. */
e855c69d
AB
3898int
3899get_seqno_by_preds (rtx insn)
3900{
3901 basic_block bb = BLOCK_FOR_INSN (insn);
3902 rtx tmp = insn, head = BB_HEAD (bb);
3903 insn_t *preds;
3904 int n, i, seqno;
3905
3906 while (tmp != head)
3907 if (INSN_P (tmp))
3908 return INSN_SEQNO (tmp);
3909 else
3910 tmp = PREV_INSN (tmp);
b8698a0f 3911
e855c69d
AB
3912 cfg_preds (bb, &preds, &n);
3913 for (i = 0, seqno = -1; i < n; i++)
3914 seqno = MAX (seqno, INSN_SEQNO (preds[i]));
3915
e855c69d
AB
3916 return seqno;
3917}
3918
3919\f
3920
3921/* Extend pass-scope data structures for basic blocks. */
3922void
3923sel_extend_global_bb_info (void)
3924{
3925 VEC_safe_grow_cleared (sel_global_bb_info_def, heap, sel_global_bb_info,
3926 last_basic_block);
3927}
3928
3929/* Extend region-scope data structures for basic blocks. */
3930static void
3931extend_region_bb_info (void)
3932{
3933 VEC_safe_grow_cleared (sel_region_bb_info_def, heap, sel_region_bb_info,
3934 last_basic_block);
3935}
3936
3937/* Extend all data structures to fit for all basic blocks. */
3938static void
3939extend_bb_info (void)
3940{
3941 sel_extend_global_bb_info ();
3942 extend_region_bb_info ();
3943}
3944
3945/* Finalize pass-scope data structures for basic blocks. */
3946void
3947sel_finish_global_bb_info (void)
3948{
3949 VEC_free (sel_global_bb_info_def, heap, sel_global_bb_info);
3950}
3951
3952/* Finalize region-scope data structures for basic blocks. */
3953static void
3954finish_region_bb_info (void)
3955{
3956 VEC_free (sel_region_bb_info_def, heap, sel_region_bb_info);
3957}
3958\f
3959
3960/* Data for each insn in current region. */
3961VEC (sel_insn_data_def, heap) *s_i_d = NULL;
3962
3963/* A vector for the insns we've emitted. */
3964static insn_vec_t new_insns = NULL;
3965
3966/* Extend data structures for insns from current region. */
3967static void
3968extend_insn_data (void)
3969{
3970 int reserve;
b8698a0f 3971
e855c69d
AB
3972 sched_extend_target ();
3973 sched_deps_init (false);
3974
3975 /* Extend data structures for insns from current region. */
b8698a0f 3976 reserve = (sched_max_luid + 1
e855c69d 3977 - VEC_length (sel_insn_data_def, s_i_d));
b8698a0f 3978 if (reserve > 0
e855c69d 3979 && ! VEC_space (sel_insn_data_def, s_i_d, reserve))
bcf33775
AB
3980 {
3981 int size;
3982
3983 if (sched_max_luid / 2 > 1024)
3984 size = sched_max_luid + 1024;
3985 else
3986 size = 3 * sched_max_luid / 2;
b8698a0f 3987
bcf33775
AB
3988
3989 VEC_safe_grow_cleared (sel_insn_data_def, heap, s_i_d, size);
3990 }
e855c69d
AB
3991}
3992
3993/* Finalize data structures for insns from current region. */
3994static void
3995finish_insns (void)
3996{
3997 unsigned i;
3998
3999 /* Clear here all dependence contexts that may have left from insns that were
4000 removed during the scheduling. */
4001 for (i = 0; i < VEC_length (sel_insn_data_def, s_i_d); i++)
4002 {
4003 sel_insn_data_def *sid_entry = VEC_index (sel_insn_data_def, s_i_d, i);
b8698a0f 4004
e855c69d
AB
4005 if (sid_entry->live)
4006 return_regset_to_pool (sid_entry->live);
4007 if (sid_entry->analyzed_deps)
4008 {
4009 BITMAP_FREE (sid_entry->analyzed_deps);
4010 BITMAP_FREE (sid_entry->found_deps);
4011 htab_delete (sid_entry->transformed_insns);
4012 free_deps (&sid_entry->deps_context);
4013 }
4014 if (EXPR_VINSN (&sid_entry->expr))
4015 {
4016 clear_expr (&sid_entry->expr);
b8698a0f 4017
e855c69d
AB
4018 /* Also, clear CANT_MOVE bit here, because we really don't want it
4019 to be passed to the next region. */
4020 CANT_MOVE_BY_LUID (i) = 0;
4021 }
4022 }
b8698a0f 4023
e855c69d
AB
4024 VEC_free (sel_insn_data_def, heap, s_i_d);
4025}
4026
4027/* A proxy to pass initialization data to init_insn (). */
4028static sel_insn_data_def _insn_init_ssid;
4029static sel_insn_data_t insn_init_ssid = &_insn_init_ssid;
4030
4031/* If true create a new vinsn. Otherwise use the one from EXPR. */
4032static bool insn_init_create_new_vinsn_p;
4033
4034/* Set all necessary data for initialization of the new insn[s]. */
4035static expr_t
4036set_insn_init (expr_t expr, vinsn_t vi, int seqno)
4037{
4038 expr_t x = &insn_init_ssid->expr;
4039
4040 copy_expr_onside (x, expr);
4041 if (vi != NULL)
4042 {
4043 insn_init_create_new_vinsn_p = false;
4044 change_vinsn_in_expr (x, vi);
4045 }
4046 else
4047 insn_init_create_new_vinsn_p = true;
4048
4049 insn_init_ssid->seqno = seqno;
4050 return x;
4051}
4052
4053/* Init data for INSN. */
4054static void
4055init_insn_data (insn_t insn)
4056{
4057 expr_t expr;
4058 sel_insn_data_t ssid = insn_init_ssid;
4059
4060 /* The fields mentioned below are special and hence are not being
4061 propagated to the new insns. */
4062 gcc_assert (!ssid->asm_p && ssid->sched_next == NULL
4063 && !ssid->after_stall_p && ssid->sched_cycle == 0);
4064 gcc_assert (INSN_P (insn) && INSN_LUID (insn) > 0);
4065
4066 expr = INSN_EXPR (insn);
4067 copy_expr (expr, &ssid->expr);
4068 prepare_insn_expr (insn, ssid->seqno);
4069
4070 if (insn_init_create_new_vinsn_p)
4071 change_vinsn_in_expr (expr, vinsn_create (insn, init_insn_force_unique_p));
b8698a0f 4072
e855c69d
AB
4073 if (first_time_insn_init (insn))
4074 init_first_time_insn_data (insn);
4075}
4076
4077/* This is used to initialize spurious jumps generated by
4078 sel_redirect_edge (). */
4079static void
4080init_simplejump_data (insn_t insn)
4081{
4082 init_expr (INSN_EXPR (insn), vinsn_create (insn, false), 0,
b8698a0f 4083 REG_BR_PROB_BASE, 0, 0, 0, 0, 0, 0, NULL, true, false, false,
e855c69d
AB
4084 false, true);
4085 INSN_SEQNO (insn) = get_seqno_of_a_pred (insn);
4086 init_first_time_insn_data (insn);
4087}
4088
b8698a0f 4089/* Perform deferred initialization of insns. This is used to process
e855c69d
AB
4090 a new jump that may be created by redirect_edge. */
4091void
4092sel_init_new_insn (insn_t insn, int flags)
4093{
4094 /* We create data structures for bb when the first insn is emitted in it. */
4095 if (INSN_P (insn)
4096 && INSN_IN_STREAM_P (insn)
4097 && insn_is_the_only_one_in_bb_p (insn))
4098 {
4099 extend_bb_info ();
4100 create_initial_data_sets (BLOCK_FOR_INSN (insn));
4101 }
b8698a0f 4102
e855c69d
AB
4103 if (flags & INSN_INIT_TODO_LUID)
4104 sched_init_luids (NULL, NULL, NULL, insn);
4105
4106 if (flags & INSN_INIT_TODO_SSID)
4107 {
4108 extend_insn_data ();
4109 init_insn_data (insn);
4110 clear_expr (&insn_init_ssid->expr);
4111 }
4112
4113 if (flags & INSN_INIT_TODO_SIMPLEJUMP)
4114 {
4115 extend_insn_data ();
4116 init_simplejump_data (insn);
4117 }
b8698a0f 4118
e855c69d
AB
4119 gcc_assert (CONTAINING_RGN (BLOCK_NUM (insn))
4120 == CONTAINING_RGN (BB_TO_BLOCK (0)));
4121}
4122\f
4123
4124/* Functions to init/finish work with lv sets. */
4125
4126/* Init BB_LV_SET of BB from DF_LR_IN set of BB. */
4127static void
4128init_lv_set (basic_block bb)
4129{
4130 gcc_assert (!BB_LV_SET_VALID_P (bb));
4131
4132 BB_LV_SET (bb) = get_regset_from_pool ();
b8698a0f 4133 COPY_REG_SET (BB_LV_SET (bb), DF_LR_IN (bb));
e855c69d
AB
4134 BB_LV_SET_VALID_P (bb) = true;
4135}
4136
4137/* Copy liveness information to BB from FROM_BB. */
4138static void
4139copy_lv_set_from (basic_block bb, basic_block from_bb)
4140{
4141 gcc_assert (!BB_LV_SET_VALID_P (bb));
b8698a0f 4142
e855c69d
AB
4143 COPY_REG_SET (BB_LV_SET (bb), BB_LV_SET (from_bb));
4144 BB_LV_SET_VALID_P (bb) = true;
b8698a0f 4145}
e855c69d
AB
4146
4147/* Initialize lv set of all bb headers. */
4148void
4149init_lv_sets (void)
4150{
4151 basic_block bb;
4152
4153 /* Initialize of LV sets. */
4154 FOR_EACH_BB (bb)
4155 init_lv_set (bb);
4156
4157 /* Don't forget EXIT_BLOCK. */
4158 init_lv_set (EXIT_BLOCK_PTR);
4159}
4160
4161/* Release lv set of HEAD. */
4162static void
4163free_lv_set (basic_block bb)
4164{
4165 gcc_assert (BB_LV_SET (bb) != NULL);
4166
4167 return_regset_to_pool (BB_LV_SET (bb));
4168 BB_LV_SET (bb) = NULL;
4169 BB_LV_SET_VALID_P (bb) = false;
4170}
4171
4172/* Finalize lv sets of all bb headers. */
4173void
4174free_lv_sets (void)
4175{
4176 basic_block bb;
4177
4178 /* Don't forget EXIT_BLOCK. */
4179 free_lv_set (EXIT_BLOCK_PTR);
4180
4181 /* Free LV sets. */
4182 FOR_EACH_BB (bb)
4183 if (BB_LV_SET (bb))
4184 free_lv_set (bb);
4185}
4186
4187/* Initialize an invalid AV_SET for BB.
4188 This set will be updated next time compute_av () process BB. */
4189static void
4190invalidate_av_set (basic_block bb)
4191{
4192 gcc_assert (BB_AV_LEVEL (bb) <= 0
4193 && BB_AV_SET (bb) == NULL);
4194
4195 BB_AV_LEVEL (bb) = -1;
4196}
4197
4198/* Create initial data sets for BB (they will be invalid). */
4199static void
4200create_initial_data_sets (basic_block bb)
4201{
4202 if (BB_LV_SET (bb))
4203 BB_LV_SET_VALID_P (bb) = false;
4204 else
4205 BB_LV_SET (bb) = get_regset_from_pool ();
4206 invalidate_av_set (bb);
4207}
4208
4209/* Free av set of BB. */
4210static void
4211free_av_set (basic_block bb)
4212{
4213 av_set_clear (&BB_AV_SET (bb));
4214 BB_AV_LEVEL (bb) = 0;
4215}
4216
4217/* Free data sets of BB. */
4218void
4219free_data_sets (basic_block bb)
4220{
4221 free_lv_set (bb);
4222 free_av_set (bb);
4223}
4224
4225/* Exchange lv sets of TO and FROM. */
4226static void
4227exchange_lv_sets (basic_block to, basic_block from)
4228{
4229 {
4230 regset to_lv_set = BB_LV_SET (to);
4231
4232 BB_LV_SET (to) = BB_LV_SET (from);
4233 BB_LV_SET (from) = to_lv_set;
4234 }
4235
4236 {
4237 bool to_lv_set_valid_p = BB_LV_SET_VALID_P (to);
4238
4239 BB_LV_SET_VALID_P (to) = BB_LV_SET_VALID_P (from);
4240 BB_LV_SET_VALID_P (from) = to_lv_set_valid_p;
4241 }
4242}
4243
4244
4245/* Exchange av sets of TO and FROM. */
4246static void
4247exchange_av_sets (basic_block to, basic_block from)
4248{
4249 {
4250 av_set_t to_av_set = BB_AV_SET (to);
4251
4252 BB_AV_SET (to) = BB_AV_SET (from);
4253 BB_AV_SET (from) = to_av_set;
4254 }
4255
4256 {
4257 int to_av_level = BB_AV_LEVEL (to);
4258
4259 BB_AV_LEVEL (to) = BB_AV_LEVEL (from);
4260 BB_AV_LEVEL (from) = to_av_level;
4261 }
4262}
4263
4264/* Exchange data sets of TO and FROM. */
4265void
4266exchange_data_sets (basic_block to, basic_block from)
4267{
4268 exchange_lv_sets (to, from);
4269 exchange_av_sets (to, from);
4270}
4271
4272/* Copy data sets of FROM to TO. */
4273void
4274copy_data_sets (basic_block to, basic_block from)
4275{
4276 gcc_assert (!BB_LV_SET_VALID_P (to) && !BB_AV_SET_VALID_P (to));
4277 gcc_assert (BB_AV_SET (to) == NULL);
4278
4279 BB_AV_LEVEL (to) = BB_AV_LEVEL (from);
4280 BB_LV_SET_VALID_P (to) = BB_LV_SET_VALID_P (from);
4281
4282 if (BB_AV_SET_VALID_P (from))
4283 {
4284 BB_AV_SET (to) = av_set_copy (BB_AV_SET (from));
4285 }
4286 if (BB_LV_SET_VALID_P (from))
4287 {
4288 gcc_assert (BB_LV_SET (to) != NULL);
4289 COPY_REG_SET (BB_LV_SET (to), BB_LV_SET (from));
4290 }
4291}
4292
4293/* Return an av set for INSN, if any. */
4294av_set_t
4295get_av_set (insn_t insn)
4296{
4297 av_set_t av_set;
4298
4299 gcc_assert (AV_SET_VALID_P (insn));
4300
4301 if (sel_bb_head_p (insn))
4302 av_set = BB_AV_SET (BLOCK_FOR_INSN (insn));
4303 else
4304 av_set = NULL;
4305
4306 return av_set;
4307}
4308
4309/* Implementation of AV_LEVEL () macro. Return AV_LEVEL () of INSN. */
4310int
4311get_av_level (insn_t insn)
4312{
4313 int av_level;
4314
4315 gcc_assert (INSN_P (insn));
4316
4317 if (sel_bb_head_p (insn))
4318 av_level = BB_AV_LEVEL (BLOCK_FOR_INSN (insn));
4319 else
4320 av_level = INSN_WS_LEVEL (insn);
4321
4322 return av_level;
4323}
4324
4325\f
4326
4327/* Variables to work with control-flow graph. */
4328
4329/* The basic block that already has been processed by the sched_data_update (),
4330 but hasn't been in sel_add_bb () yet. */
4331static VEC (basic_block, heap) *last_added_blocks = NULL;
4332
4333/* A pool for allocating successor infos. */
4334static struct
4335{
4336 /* A stack for saving succs_info structures. */
4337 struct succs_info *stack;
4338
4339 /* Its size. */
4340 int size;
4341
4342 /* Top of the stack. */
4343 int top;
4344
4345 /* Maximal value of the top. */
4346 int max_top;
4347} succs_info_pool;
4348
4349/* Functions to work with control-flow graph. */
4350
4351/* Return basic block note of BB. */
4352insn_t
4353sel_bb_head (basic_block bb)
4354{
4355 insn_t head;
4356
4357 if (bb == EXIT_BLOCK_PTR)
4358 {
4359 gcc_assert (exit_insn != NULL_RTX);
4360 head = exit_insn;
4361 }
4362 else
4363 {
4364 insn_t note;
4365
4366 note = bb_note (bb);
4367 head = next_nonnote_insn (note);
4368
89ad0f25 4369 if (head && (BARRIER_P (head) || BLOCK_FOR_INSN (head) != bb))
e855c69d
AB
4370 head = NULL_RTX;
4371 }
4372
4373 return head;
4374}
4375
4376/* Return true if INSN is a basic block header. */
4377bool
4378sel_bb_head_p (insn_t insn)
4379{
4380 return sel_bb_head (BLOCK_FOR_INSN (insn)) == insn;
4381}
4382
4383/* Return last insn of BB. */
4384insn_t
4385sel_bb_end (basic_block bb)
4386{
4387 if (sel_bb_empty_p (bb))
4388 return NULL_RTX;
4389
4390 gcc_assert (bb != EXIT_BLOCK_PTR);
4391
4392 return BB_END (bb);
4393}
4394
4395/* Return true if INSN is the last insn in its basic block. */
4396bool
4397sel_bb_end_p (insn_t insn)
4398{
4399 return insn == sel_bb_end (BLOCK_FOR_INSN (insn));
4400}
4401
4402/* Return true if BB consist of single NOTE_INSN_BASIC_BLOCK. */
4403bool
4404sel_bb_empty_p (basic_block bb)
4405{
4406 return sel_bb_head (bb) == NULL;
4407}
4408
4409/* True when BB belongs to the current scheduling region. */
4410bool
4411in_current_region_p (basic_block bb)
4412{
4413 if (bb->index < NUM_FIXED_BLOCKS)
4414 return false;
4415
4416 return CONTAINING_RGN (bb->index) == CONTAINING_RGN (BB_TO_BLOCK (0));
4417}
4418
4419/* Return the block which is a fallthru bb of a conditional jump JUMP. */
4420basic_block
4421fallthru_bb_of_jump (rtx jump)
4422{
4423 if (!JUMP_P (jump))
4424 return NULL;
4425
4426 if (any_uncondjump_p (jump))
4427 return single_succ (BLOCK_FOR_INSN (jump));
4428
4429 if (!any_condjump_p (jump))
4430 return NULL;
4431
268bab85
AB
4432 /* A basic block that ends with a conditional jump may still have one successor
4433 (and be followed by a barrier), we are not interested. */
4434 if (single_succ_p (BLOCK_FOR_INSN (jump)))
4435 return NULL;
4436
e855c69d
AB
4437 return FALLTHRU_EDGE (BLOCK_FOR_INSN (jump))->dest;
4438}
4439
4440/* Remove all notes from BB. */
4441static void
4442init_bb (basic_block bb)
4443{
4444 remove_notes (bb_note (bb), BB_END (bb));
4445 BB_NOTE_LIST (bb) = note_list;
4446}
4447
4448void
4449sel_init_bbs (bb_vec_t bbs, basic_block bb)
4450{
4451 const struct sched_scan_info_def ssi =
4452 {
4453 extend_bb_info, /* extend_bb */
4454 init_bb, /* init_bb */
4455 NULL, /* extend_insn */
4456 NULL /* init_insn */
4457 };
4458
4459 sched_scan (&ssi, bbs, bb, new_insns, NULL);
4460}
4461
7898b93b 4462/* Restore notes for the whole region. */
e855c69d 4463static void
7898b93b 4464sel_restore_notes (void)
e855c69d
AB
4465{
4466 int bb;
7898b93b 4467 insn_t insn;
e855c69d
AB
4468
4469 for (bb = 0; bb < current_nr_blocks; bb++)
4470 {
4471 basic_block first, last;
4472
4473 first = EBB_FIRST_BB (bb);
4474 last = EBB_LAST_BB (bb)->next_bb;
4475
4476 do
4477 {
4478 note_list = BB_NOTE_LIST (first);
4479 restore_other_notes (NULL, first);
4480 BB_NOTE_LIST (first) = NULL_RTX;
4481
7898b93b
AM
4482 FOR_BB_INSNS (first, insn)
4483 if (NONDEBUG_INSN_P (insn))
4484 reemit_notes (insn);
4485
e855c69d
AB
4486 first = first->next_bb;
4487 }
4488 while (first != last);
4489 }
4490}
4491
4492/* Free per-bb data structures. */
4493void
4494sel_finish_bbs (void)
4495{
7898b93b 4496 sel_restore_notes ();
e855c69d
AB
4497
4498 /* Remove current loop preheader from this loop. */
4499 if (current_loop_nest)
4500 sel_remove_loop_preheader ();
4501
4502 finish_region_bb_info ();
4503}
4504
4505/* Return true if INSN has a single successor of type FLAGS. */
4506bool
4507sel_insn_has_single_succ_p (insn_t insn, int flags)
4508{
4509 insn_t succ;
4510 succ_iterator si;
4511 bool first_p = true;
4512
4513 FOR_EACH_SUCC_1 (succ, si, insn, flags)
4514 {
4515 if (first_p)
4516 first_p = false;
4517 else
4518 return false;
4519 }
4520
4521 return true;
4522}
4523
4524/* Allocate successor's info. */
4525static struct succs_info *
4526alloc_succs_info (void)
4527{
4528 if (succs_info_pool.top == succs_info_pool.max_top)
4529 {
4530 int i;
b8698a0f 4531
e855c69d
AB
4532 if (++succs_info_pool.max_top >= succs_info_pool.size)
4533 gcc_unreachable ();
4534
4535 i = ++succs_info_pool.top;
4536 succs_info_pool.stack[i].succs_ok = VEC_alloc (rtx, heap, 10);
4537 succs_info_pool.stack[i].succs_other = VEC_alloc (rtx, heap, 10);
4538 succs_info_pool.stack[i].probs_ok = VEC_alloc (int, heap, 10);
4539 }
4540 else
4541 succs_info_pool.top++;
4542
4543 return &succs_info_pool.stack[succs_info_pool.top];
4544}
4545
4546/* Free successor's info. */
4547void
4548free_succs_info (struct succs_info * sinfo)
4549{
b8698a0f 4550 gcc_assert (succs_info_pool.top >= 0
e855c69d
AB
4551 && &succs_info_pool.stack[succs_info_pool.top] == sinfo);
4552 succs_info_pool.top--;
4553
4554 /* Clear stale info. */
b8698a0f 4555 VEC_block_remove (rtx, sinfo->succs_ok,
e855c69d 4556 0, VEC_length (rtx, sinfo->succs_ok));
b8698a0f 4557 VEC_block_remove (rtx, sinfo->succs_other,
e855c69d 4558 0, VEC_length (rtx, sinfo->succs_other));
b8698a0f 4559 VEC_block_remove (int, sinfo->probs_ok,
e855c69d
AB
4560 0, VEC_length (int, sinfo->probs_ok));
4561 sinfo->all_prob = 0;
4562 sinfo->succs_ok_n = 0;
4563 sinfo->all_succs_n = 0;
4564}
4565
b8698a0f 4566/* Compute successor info for INSN. FLAGS are the flags passed
e855c69d
AB
4567 to the FOR_EACH_SUCC_1 iterator. */
4568struct succs_info *
4569compute_succs_info (insn_t insn, short flags)
4570{
4571 succ_iterator si;
4572 insn_t succ;
4573 struct succs_info *sinfo = alloc_succs_info ();
4574
4575 /* Traverse *all* successors and decide what to do with each. */
4576 FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_ALL)
4577 {
4578 /* FIXME: this doesn't work for skipping to loop exits, as we don't
4579 perform code motion through inner loops. */
4580 short current_flags = si.current_flags & ~SUCCS_SKIP_TO_LOOP_EXITS;
4581
4582 if (current_flags & flags)
4583 {
4584 VEC_safe_push (rtx, heap, sinfo->succs_ok, succ);
4585 VEC_safe_push (int, heap, sinfo->probs_ok,
b8698a0f 4586 /* FIXME: Improve calculation when skipping
e855c69d 4587 inner loop to exits. */
b8698a0f
L
4588 (si.bb_end
4589 ? si.e1->probability
e855c69d
AB
4590 : REG_BR_PROB_BASE));
4591 sinfo->succs_ok_n++;
4592 }
4593 else
4594 VEC_safe_push (rtx, heap, sinfo->succs_other, succ);
4595
4596 /* Compute all_prob. */
4597 if (!si.bb_end)
4598 sinfo->all_prob = REG_BR_PROB_BASE;
4599 else
4600 sinfo->all_prob += si.e1->probability;
4601
4602 sinfo->all_succs_n++;
4603 }
4604
4605 return sinfo;
4606}
4607
b8698a0f 4608/* Return the predecessors of BB in PREDS and their number in N.
e855c69d
AB
4609 Empty blocks are skipped. SIZE is used to allocate PREDS. */
4610static void
4611cfg_preds_1 (basic_block bb, insn_t **preds, int *n, int *size)
4612{
4613 edge e;
4614 edge_iterator ei;
4615
4616 gcc_assert (BLOCK_TO_BB (bb->index) != 0);
4617
4618 FOR_EACH_EDGE (e, ei, bb->preds)
4619 {
4620 basic_block pred_bb = e->src;
4621 insn_t bb_end = BB_END (pred_bb);
4622
3e6a3f6f
AB
4623 if (!in_current_region_p (pred_bb))
4624 {
4625 gcc_assert (flag_sel_sched_pipelining_outer_loops
4626 && current_loop_nest);
4627 continue;
4628 }
e855c69d
AB
4629
4630 if (sel_bb_empty_p (pred_bb))
4631 cfg_preds_1 (pred_bb, preds, n, size);
4632 else
4633 {
4634 if (*n == *size)
b8698a0f 4635 *preds = XRESIZEVEC (insn_t, *preds,
e855c69d
AB
4636 (*size = 2 * *size + 1));
4637 (*preds)[(*n)++] = bb_end;
4638 }
4639 }
4640
3e6a3f6f
AB
4641 gcc_assert (*n != 0
4642 || (flag_sel_sched_pipelining_outer_loops
4643 && current_loop_nest));
e855c69d
AB
4644}
4645
b8698a0f
L
4646/* Find all predecessors of BB and record them in PREDS and their number
4647 in N. Empty blocks are skipped, and only normal (forward in-region)
e855c69d
AB
4648 edges are processed. */
4649static void
4650cfg_preds (basic_block bb, insn_t **preds, int *n)
4651{
4652 int size = 0;
4653
4654 *preds = NULL;
4655 *n = 0;
4656 cfg_preds_1 (bb, preds, n, &size);
4657}
4658
4659/* Returns true if we are moving INSN through join point. */
4660bool
4661sel_num_cfg_preds_gt_1 (insn_t insn)
4662{
4663 basic_block bb;
4664
4665 if (!sel_bb_head_p (insn) || INSN_BB (insn) == 0)
4666 return false;
4667
4668 bb = BLOCK_FOR_INSN (insn);
4669
4670 while (1)
4671 {
4672 if (EDGE_COUNT (bb->preds) > 1)
4673 return true;
4674
4675 gcc_assert (EDGE_PRED (bb, 0)->dest == bb);
4676 bb = EDGE_PRED (bb, 0)->src;
4677
4678 if (!sel_bb_empty_p (bb))
4679 break;
4680 }
4681
4682 return false;
4683}
4684
b8698a0f 4685/* Returns true when BB should be the end of an ebb. Adapted from the
e855c69d
AB
4686 code in sched-ebb.c. */
4687bool
4688bb_ends_ebb_p (basic_block bb)
4689{
4690 basic_block next_bb = bb_next_bb (bb);
4691 edge e;
b8698a0f 4692
e855c69d
AB
4693 if (next_bb == EXIT_BLOCK_PTR
4694 || bitmap_bit_p (forced_ebb_heads, next_bb->index)
4695 || (LABEL_P (BB_HEAD (next_bb))
4696 /* NB: LABEL_NUSES () is not maintained outside of jump.c.
4697 Work around that. */
4698 && !single_pred_p (next_bb)))
4699 return true;
4700
4701 if (!in_current_region_p (next_bb))
4702 return true;
4703
0fd4b31d
NF
4704 e = find_fallthru_edge (bb->succs);
4705 if (e)
4706 {
4707 gcc_assert (e->dest == next_bb);
4708
4709 return false;
4710 }
e855c69d
AB
4711
4712 return true;
4713}
4714
4715/* Returns true when INSN and SUCC are in the same EBB, given that SUCC is a
4716 successor of INSN. */
4717bool
4718in_same_ebb_p (insn_t insn, insn_t succ)
4719{
4720 basic_block ptr = BLOCK_FOR_INSN (insn);
4721
4722 for(;;)
4723 {
4724 if (ptr == BLOCK_FOR_INSN (succ))
4725 return true;
b8698a0f 4726
e855c69d
AB
4727 if (bb_ends_ebb_p (ptr))
4728 return false;
4729
4730 ptr = bb_next_bb (ptr);
4731 }
4732
4733 gcc_unreachable ();
4734 return false;
4735}
4736
4737/* Recomputes the reverse topological order for the function and
4738 saves it in REV_TOP_ORDER_INDEX. REV_TOP_ORDER_INDEX_LEN is also
4739 modified appropriately. */
4740static void
4741recompute_rev_top_order (void)
4742{
4743 int *postorder;
4744 int n_blocks, i;
4745
4746 if (!rev_top_order_index || rev_top_order_index_len < last_basic_block)
4747 {
b8698a0f 4748 rev_top_order_index_len = last_basic_block;
e855c69d
AB
4749 rev_top_order_index = XRESIZEVEC (int, rev_top_order_index,
4750 rev_top_order_index_len);
4751 }
4752
4753 postorder = XNEWVEC (int, n_basic_blocks);
4754
4755 n_blocks = post_order_compute (postorder, true, false);
4756 gcc_assert (n_basic_blocks == n_blocks);
4757
4758 /* Build reverse function: for each basic block with BB->INDEX == K
4759 rev_top_order_index[K] is it's reverse topological sort number. */
4760 for (i = 0; i < n_blocks; i++)
4761 {
4762 gcc_assert (postorder[i] < rev_top_order_index_len);
4763 rev_top_order_index[postorder[i]] = i;
4764 }
4765
4766 free (postorder);
4767}
4768
4769/* Clear all flags from insns in BB that could spoil its rescheduling. */
4770void
4771clear_outdated_rtx_info (basic_block bb)
4772{
4773 rtx insn;
4774
4775 FOR_BB_INSNS (bb, insn)
4776 if (INSN_P (insn))
4777 {
4778 SCHED_GROUP_P (insn) = 0;
4779 INSN_AFTER_STALL_P (insn) = 0;
4780 INSN_SCHED_TIMES (insn) = 0;
4781 EXPR_PRIORITY_ADJ (INSN_EXPR (insn)) = 0;
4782
4783 /* We cannot use the changed caches, as previously we could ignore
b8698a0f 4784 the LHS dependence due to enabled renaming and transform
e855c69d
AB
4785 the expression, and currently we'll be unable to do this. */
4786 htab_empty (INSN_TRANSFORMED_INSNS (insn));
4787 }
4788}
4789
4790/* Add BB_NOTE to the pool of available basic block notes. */
4791static void
4792return_bb_to_pool (basic_block bb)
4793{
4794 rtx note = bb_note (bb);
4795
4796 gcc_assert (NOTE_BASIC_BLOCK (note) == bb
4797 && bb->aux == NULL);
4798
4799 /* It turns out that current cfg infrastructure does not support
4800 reuse of basic blocks. Don't bother for now. */
4801 /*VEC_safe_push (rtx, heap, bb_note_pool, note);*/
4802}
4803
4804/* Get a bb_note from pool or return NULL_RTX if pool is empty. */
4805static rtx
4806get_bb_note_from_pool (void)
4807{
4808 if (VEC_empty (rtx, bb_note_pool))
4809 return NULL_RTX;
4810 else
4811 {
4812 rtx note = VEC_pop (rtx, bb_note_pool);
4813
4814 PREV_INSN (note) = NULL_RTX;
4815 NEXT_INSN (note) = NULL_RTX;
4816
4817 return note;
4818 }
4819}
4820
4821/* Free bb_note_pool. */
4822void
4823free_bb_note_pool (void)
4824{
4825 VEC_free (rtx, heap, bb_note_pool);
4826}
4827
4828/* Setup scheduler pool and successor structure. */
4829void
4830alloc_sched_pools (void)
4831{
4832 int succs_size;
4833
4834 succs_size = MAX_WS + 1;
b8698a0f 4835 succs_info_pool.stack = XCNEWVEC (struct succs_info, succs_size);
e855c69d
AB
4836 succs_info_pool.size = succs_size;
4837 succs_info_pool.top = -1;
4838 succs_info_pool.max_top = -1;
4839
b8698a0f 4840 sched_lists_pool = create_alloc_pool ("sel-sched-lists",
e855c69d
AB
4841 sizeof (struct _list_node), 500);
4842}
4843
4844/* Free the pools. */
4845void
4846free_sched_pools (void)
4847{
4848 int i;
b8698a0f 4849
e855c69d
AB
4850 free_alloc_pool (sched_lists_pool);
4851 gcc_assert (succs_info_pool.top == -1);
4852 for (i = 0; i < succs_info_pool.max_top; i++)
4853 {
4854 VEC_free (rtx, heap, succs_info_pool.stack[i].succs_ok);
4855 VEC_free (rtx, heap, succs_info_pool.stack[i].succs_other);
4856 VEC_free (int, heap, succs_info_pool.stack[i].probs_ok);
4857 }
4858 free (succs_info_pool.stack);
4859}
4860\f
4861
b8698a0f 4862/* Returns a position in RGN where BB can be inserted retaining
e855c69d
AB
4863 topological order. */
4864static int
4865find_place_to_insert_bb (basic_block bb, int rgn)
4866{
4867 bool has_preds_outside_rgn = false;
4868 edge e;
4869 edge_iterator ei;
b8698a0f 4870
e855c69d
AB
4871 /* Find whether we have preds outside the region. */
4872 FOR_EACH_EDGE (e, ei, bb->preds)
4873 if (!in_current_region_p (e->src))
4874 {
4875 has_preds_outside_rgn = true;
4876 break;
4877 }
b8698a0f 4878
e855c69d
AB
4879 /* Recompute the top order -- needed when we have > 1 pred
4880 and in case we don't have preds outside. */
4881 if (flag_sel_sched_pipelining_outer_loops
4882 && (has_preds_outside_rgn || EDGE_COUNT (bb->preds) > 1))
4883 {
4884 int i, bbi = bb->index, cur_bbi;
4885
4886 recompute_rev_top_order ();
4887 for (i = RGN_NR_BLOCKS (rgn) - 1; i >= 0; i--)
4888 {
4889 cur_bbi = BB_TO_BLOCK (i);
b8698a0f 4890 if (rev_top_order_index[bbi]
e855c69d
AB
4891 < rev_top_order_index[cur_bbi])
4892 break;
4893 }
b8698a0f 4894
e855c69d
AB
4895 /* We skipped the right block, so we increase i. We accomodate
4896 it for increasing by step later, so we decrease i. */
4897 return (i + 1) - 1;
4898 }
4899 else if (has_preds_outside_rgn)
4900 {
4901 /* This is the case when we generate an extra empty block
4902 to serve as region head during pipelining. */
4903 e = EDGE_SUCC (bb, 0);
4904 gcc_assert (EDGE_COUNT (bb->succs) == 1
4905 && in_current_region_p (EDGE_SUCC (bb, 0)->dest)
4906 && (BLOCK_TO_BB (e->dest->index) == 0));
4907 return -1;
4908 }
4909
4910 /* We don't have preds outside the region. We should have
4911 the only pred, because the multiple preds case comes from
4912 the pipelining of outer loops, and that is handled above.
4913 Just take the bbi of this single pred. */
4914 if (EDGE_COUNT (bb->succs) > 0)
4915 {
4916 int pred_bbi;
b8698a0f 4917
e855c69d 4918 gcc_assert (EDGE_COUNT (bb->preds) == 1);
b8698a0f 4919
e855c69d
AB
4920 pred_bbi = EDGE_PRED (bb, 0)->src->index;
4921 return BLOCK_TO_BB (pred_bbi);
4922 }
4923 else
4924 /* BB has no successors. It is safe to put it in the end. */
4925 return current_nr_blocks - 1;
4926}
4927
4928/* Deletes an empty basic block freeing its data. */
4929static void
4930delete_and_free_basic_block (basic_block bb)
4931{
4932 gcc_assert (sel_bb_empty_p (bb));
4933
4934 if (BB_LV_SET (bb))
4935 free_lv_set (bb);
4936
4937 bitmap_clear_bit (blocks_to_reschedule, bb->index);
4938
b8698a0f
L
4939 /* Can't assert av_set properties because we use sel_aremove_bb
4940 when removing loop preheader from the region. At the point of
e855c69d
AB
4941 removing the preheader we already have deallocated sel_region_bb_info. */
4942 gcc_assert (BB_LV_SET (bb) == NULL
4943 && !BB_LV_SET_VALID_P (bb)
4944 && BB_AV_LEVEL (bb) == 0
4945 && BB_AV_SET (bb) == NULL);
b8698a0f 4946
e855c69d
AB
4947 delete_basic_block (bb);
4948}
4949
4950/* Add BB to the current region and update the region data. */
4951static void
4952add_block_to_current_region (basic_block bb)
4953{
4954 int i, pos, bbi = -2, rgn;
4955
4956 rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
4957 bbi = find_place_to_insert_bb (bb, rgn);
4958 bbi += 1;
4959 pos = RGN_BLOCKS (rgn) + bbi;
4960
4961 gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0
4962 && ebb_head[bbi] == pos);
b8698a0f 4963
e855c69d
AB
4964 /* Make a place for the new block. */
4965 extend_regions ();
4966
4967 for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--)
4968 BLOCK_TO_BB (rgn_bb_table[i])++;
b8698a0f 4969
e855c69d
AB
4970 memmove (rgn_bb_table + pos + 1,
4971 rgn_bb_table + pos,
4972 (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table));
4973
4974 /* Initialize data for BB. */
4975 rgn_bb_table[pos] = bb->index;
4976 BLOCK_TO_BB (bb->index) = bbi;
4977 CONTAINING_RGN (bb->index) = rgn;
4978
4979 RGN_NR_BLOCKS (rgn)++;
b8698a0f 4980
e855c69d
AB
4981 for (i = rgn + 1; i <= nr_regions; i++)
4982 RGN_BLOCKS (i)++;
4983}
4984
4985/* Remove BB from the current region and update the region data. */
4986static void
4987remove_bb_from_region (basic_block bb)
4988{
4989 int i, pos, bbi = -2, rgn;
4990
4991 rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
4992 bbi = BLOCK_TO_BB (bb->index);
4993 pos = RGN_BLOCKS (rgn) + bbi;
4994
4995 gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0
4996 && ebb_head[bbi] == pos);
4997
4998 for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--)
4999 BLOCK_TO_BB (rgn_bb_table[i])--;
5000
5001 memmove (rgn_bb_table + pos,
5002 rgn_bb_table + pos + 1,
5003 (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table));
5004
5005 RGN_NR_BLOCKS (rgn)--;
5006 for (i = rgn + 1; i <= nr_regions; i++)
5007 RGN_BLOCKS (i)--;
5008}
5009
b8698a0f 5010/* Add BB to the current region and update all data. If BB is NULL, add all
e855c69d
AB
5011 blocks from last_added_blocks vector. */
5012static void
5013sel_add_bb (basic_block bb)
5014{
5015 /* Extend luids so that new notes will receive zero luids. */
5016 sched_init_luids (NULL, NULL, NULL, NULL);
5017 sched_init_bbs ();
5018 sel_init_bbs (last_added_blocks, NULL);
5019
b8698a0f 5020 /* When bb is passed explicitly, the vector should contain
e855c69d
AB
5021 the only element that equals to bb; otherwise, the vector
5022 should not be NULL. */
5023 gcc_assert (last_added_blocks != NULL);
b8698a0f 5024
e855c69d
AB
5025 if (bb != NULL)
5026 {
5027 gcc_assert (VEC_length (basic_block, last_added_blocks) == 1
b8698a0f
L
5028 && VEC_index (basic_block,
5029 last_added_blocks, 0) == bb);
e855c69d
AB
5030 add_block_to_current_region (bb);
5031
5032 /* We associate creating/deleting data sets with the first insn
5033 appearing / disappearing in the bb. */
5034 if (!sel_bb_empty_p (bb) && BB_LV_SET (bb) == NULL)
5035 create_initial_data_sets (bb);
b8698a0f 5036
e855c69d
AB
5037 VEC_free (basic_block, heap, last_added_blocks);
5038 }
5039 else
5040 /* BB is NULL - process LAST_ADDED_BLOCKS instead. */
5041 {
5042 int i;
5043 basic_block temp_bb = NULL;
5044
b8698a0f 5045 for (i = 0;
e855c69d
AB
5046 VEC_iterate (basic_block, last_added_blocks, i, bb); i++)
5047 {
5048 add_block_to_current_region (bb);
5049 temp_bb = bb;
5050 }
5051
b8698a0f 5052 /* We need to fetch at least one bb so we know the region
e855c69d
AB
5053 to update. */
5054 gcc_assert (temp_bb != NULL);
5055 bb = temp_bb;
5056
5057 VEC_free (basic_block, heap, last_added_blocks);
5058 }
5059
5060 rgn_setup_region (CONTAINING_RGN (bb->index));
5061}
5062
b8698a0f 5063/* Remove BB from the current region and update all data.
e855c69d
AB
5064 If REMOVE_FROM_CFG_PBB is true, also remove the block cfom cfg. */
5065static void
5066sel_remove_bb (basic_block bb, bool remove_from_cfg_p)
5067{
262d8232
AB
5068 unsigned idx = bb->index;
5069
e855c69d 5070 gcc_assert (bb != NULL && BB_NOTE_LIST (bb) == NULL_RTX);
b8698a0f 5071
e855c69d
AB
5072 remove_bb_from_region (bb);
5073 return_bb_to_pool (bb);
262d8232 5074 bitmap_clear_bit (blocks_to_reschedule, idx);
b8698a0f 5075
e855c69d
AB
5076 if (remove_from_cfg_p)
5077 delete_and_free_basic_block (bb);
5078
262d8232 5079 rgn_setup_region (CONTAINING_RGN (idx));
e855c69d
AB
5080}
5081
5082/* Concatenate info of EMPTY_BB to info of MERGE_BB. */
5083static void
5084move_bb_info (basic_block merge_bb, basic_block empty_bb)
5085{
5086 gcc_assert (in_current_region_p (merge_bb));
5087
b8698a0f 5088 concat_note_lists (BB_NOTE_LIST (empty_bb),
e855c69d
AB
5089 &BB_NOTE_LIST (merge_bb));
5090 BB_NOTE_LIST (empty_bb) = NULL_RTX;
5091
5092}
5093
e855c69d
AB
5094/* Remove EMPTY_BB. If REMOVE_FROM_CFG_P is false, remove EMPTY_BB from
5095 region, but keep it in CFG. */
5096static void
5097remove_empty_bb (basic_block empty_bb, bool remove_from_cfg_p)
5098{
5099 /* The block should contain just a note or a label.
5100 We try to check whether it is unused below. */
5101 gcc_assert (BB_HEAD (empty_bb) == BB_END (empty_bb)
5102 || LABEL_P (BB_HEAD (empty_bb)));
5103
5104 /* If basic block has predecessors or successors, redirect them. */
5105 if (remove_from_cfg_p
5106 && (EDGE_COUNT (empty_bb->preds) > 0
5107 || EDGE_COUNT (empty_bb->succs) > 0))
5108 {
5109 basic_block pred;
5110 basic_block succ;
5111
5112 /* We need to init PRED and SUCC before redirecting edges. */
5113 if (EDGE_COUNT (empty_bb->preds) > 0)
5114 {
5115 edge e;
5116
5117 gcc_assert (EDGE_COUNT (empty_bb->preds) == 1);
5118
5119 e = EDGE_PRED (empty_bb, 0);
5120 gcc_assert (e->src == empty_bb->prev_bb
5121 && (e->flags & EDGE_FALLTHRU));
5122
5123 pred = empty_bb->prev_bb;
5124 }
5125 else
5126 pred = NULL;
5127
5128 if (EDGE_COUNT (empty_bb->succs) > 0)
5129 {
5130 /* We do not check fallthruness here as above, because
5131 after removing a jump the edge may actually be not fallthru. */
5132 gcc_assert (EDGE_COUNT (empty_bb->succs) == 1);
5133 succ = EDGE_SUCC (empty_bb, 0)->dest;
5134 }
5135 else
5136 succ = NULL;
5137
5138 if (EDGE_COUNT (empty_bb->preds) > 0 && succ != NULL)
5139 {
5140 edge e = EDGE_PRED (empty_bb, 0);
5141
5142 if (e->flags & EDGE_FALLTHRU)
5143 redirect_edge_succ_nodup (e, succ);
5144 else
5145 sel_redirect_edge_and_branch (EDGE_PRED (empty_bb, 0), succ);
5146 }
5147
5148 if (EDGE_COUNT (empty_bb->succs) > 0 && pred != NULL)
5149 {
5150 edge e = EDGE_SUCC (empty_bb, 0);
5151
5152 if (find_edge (pred, e->dest) == NULL)
5153 redirect_edge_pred (e, pred);
5154 }
5155 }
5156
5157 /* Finish removing. */
5158 sel_remove_bb (empty_bb, remove_from_cfg_p);
5159}
5160
b8698a0f 5161/* An implementation of create_basic_block hook, which additionally updates
e855c69d
AB
5162 per-bb data structures. */
5163static basic_block
5164sel_create_basic_block (void *headp, void *endp, basic_block after)
5165{
5166 basic_block new_bb;
5167 insn_t new_bb_note;
b8698a0f
L
5168
5169 gcc_assert (flag_sel_sched_pipelining_outer_loops
e855c69d
AB
5170 || last_added_blocks == NULL);
5171
5172 new_bb_note = get_bb_note_from_pool ();
5173
5174 if (new_bb_note == NULL_RTX)
5175 new_bb = orig_cfg_hooks.create_basic_block (headp, endp, after);
5176 else
5177 {
5178 new_bb = create_basic_block_structure ((rtx) headp, (rtx) endp,
5179 new_bb_note, after);
5180 new_bb->aux = NULL;
5181 }
5182
5183 VEC_safe_push (basic_block, heap, last_added_blocks, new_bb);
5184
5185 return new_bb;
5186}
5187
5188/* Implement sched_init_only_bb (). */
5189static void
5190sel_init_only_bb (basic_block bb, basic_block after)
5191{
5192 gcc_assert (after == NULL);
5193
5194 extend_regions ();
5195 rgn_make_new_region_out_of_new_block (bb);
5196}
5197
5198/* Update the latch when we've splitted or merged it from FROM block to TO.
5199 This should be checked for all outer loops, too. */
5200static void
5201change_loops_latches (basic_block from, basic_block to)
5202{
5203 gcc_assert (from != to);
5204
5205 if (current_loop_nest)
5206 {
5207 struct loop *loop;
5208
5209 for (loop = current_loop_nest; loop; loop = loop_outer (loop))
5210 if (considered_for_pipelining_p (loop) && loop->latch == from)
5211 {
5212 gcc_assert (loop == current_loop_nest);
5213 loop->latch = to;
5214 gcc_assert (loop_latch_edge (loop));
5215 }
5216 }
5217}
5218
b8698a0f 5219/* Splits BB on two basic blocks, adding it to the region and extending
e855c69d
AB
5220 per-bb data structures. Returns the newly created bb. */
5221static basic_block
5222sel_split_block (basic_block bb, rtx after)
5223{
5224 basic_block new_bb;
5225 insn_t insn;
5226
5227 new_bb = sched_split_block_1 (bb, after);
5228 sel_add_bb (new_bb);
5229
5230 /* This should be called after sel_add_bb, because this uses
b8698a0f 5231 CONTAINING_RGN for the new block, which is not yet initialized.
e855c69d
AB
5232 FIXME: this function may be a no-op now. */
5233 change_loops_latches (bb, new_bb);
5234
5235 /* Update ORIG_BB_INDEX for insns moved into the new block. */
5236 FOR_BB_INSNS (new_bb, insn)
5237 if (INSN_P (insn))
5238 EXPR_ORIG_BB_INDEX (INSN_EXPR (insn)) = new_bb->index;
5239
5240 if (sel_bb_empty_p (bb))
5241 {
5242 gcc_assert (!sel_bb_empty_p (new_bb));
5243
5244 /* NEW_BB has data sets that need to be updated and BB holds
5245 data sets that should be removed. Exchange these data sets
5246 so that we won't lose BB's valid data sets. */
5247 exchange_data_sets (new_bb, bb);
5248 free_data_sets (bb);
5249 }
5250
5251 if (!sel_bb_empty_p (new_bb)
5252 && bitmap_bit_p (blocks_to_reschedule, bb->index))
5253 bitmap_set_bit (blocks_to_reschedule, new_bb->index);
5254
5255 return new_bb;
5256}
5257
5258/* If BB ends with a jump insn whose ID is bigger then PREV_MAX_UID, return it.
5259 Otherwise returns NULL. */
5260static rtx
5261check_for_new_jump (basic_block bb, int prev_max_uid)
5262{
5263 rtx end;
5264
5265 end = sel_bb_end (bb);
5266 if (end && INSN_UID (end) >= prev_max_uid)
5267 return end;
5268 return NULL;
5269}
5270
b8698a0f 5271/* Look for a new jump either in FROM_BB block or in newly created JUMP_BB block.
e855c69d
AB
5272 New means having UID at least equal to PREV_MAX_UID. */
5273static rtx
5274find_new_jump (basic_block from, basic_block jump_bb, int prev_max_uid)
5275{
5276 rtx jump;
5277
5278 /* Return immediately if no new insns were emitted. */
5279 if (get_max_uid () == prev_max_uid)
5280 return NULL;
b8698a0f 5281
e855c69d
AB
5282 /* Now check both blocks for new jumps. It will ever be only one. */
5283 if ((jump = check_for_new_jump (from, prev_max_uid)))
5284 return jump;
5285
5286 if (jump_bb != NULL
5287 && (jump = check_for_new_jump (jump_bb, prev_max_uid)))
5288 return jump;
5289 return NULL;
5290}
5291
5292/* Splits E and adds the newly created basic block to the current region.
5293 Returns this basic block. */
5294basic_block
5295sel_split_edge (edge e)
5296{
5297 basic_block new_bb, src, other_bb = NULL;
5298 int prev_max_uid;
5299 rtx jump;
5300
5301 src = e->src;
5302 prev_max_uid = get_max_uid ();
5303 new_bb = split_edge (e);
5304
b8698a0f 5305 if (flag_sel_sched_pipelining_outer_loops
e855c69d
AB
5306 && current_loop_nest)
5307 {
5308 int i;
5309 basic_block bb;
5310
b8698a0f 5311 /* Some of the basic blocks might not have been added to the loop.
e855c69d 5312 Add them here, until this is fixed in force_fallthru. */
b8698a0f 5313 for (i = 0;
e855c69d
AB
5314 VEC_iterate (basic_block, last_added_blocks, i, bb); i++)
5315 if (!bb->loop_father)
5316 {
5317 add_bb_to_loop (bb, e->dest->loop_father);
5318
5319 gcc_assert (!other_bb && (new_bb->index != bb->index));
5320 other_bb = bb;
5321 }
5322 }
5323
5324 /* Add all last_added_blocks to the region. */
5325 sel_add_bb (NULL);
5326
5327 jump = find_new_jump (src, new_bb, prev_max_uid);
5328 if (jump)
5329 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5330
5331 /* Put the correct lv set on this block. */
5332 if (other_bb && !sel_bb_empty_p (other_bb))
5333 compute_live (sel_bb_head (other_bb));
5334
5335 return new_bb;
5336}
5337
5338/* Implement sched_create_empty_bb (). */
5339static basic_block
5340sel_create_empty_bb (basic_block after)
5341{
5342 basic_block new_bb;
5343
5344 new_bb = sched_create_empty_bb_1 (after);
5345
5346 /* We'll explicitly initialize NEW_BB via sel_init_only_bb () a bit
5347 later. */
5348 gcc_assert (VEC_length (basic_block, last_added_blocks) == 1
5349 && VEC_index (basic_block, last_added_blocks, 0) == new_bb);
5350
5351 VEC_free (basic_block, heap, last_added_blocks);
5352 return new_bb;
5353}
5354
5355/* Implement sched_create_recovery_block. ORIG_INSN is where block
5356 will be splitted to insert a check. */
5357basic_block
5358sel_create_recovery_block (insn_t orig_insn)
5359{
5360 basic_block first_bb, second_bb, recovery_block;
5361 basic_block before_recovery = NULL;
5362 rtx jump;
5363
5364 first_bb = BLOCK_FOR_INSN (orig_insn);
5365 if (sel_bb_end_p (orig_insn))
5366 {
5367 /* Avoid introducing an empty block while splitting. */
5368 gcc_assert (single_succ_p (first_bb));
5369 second_bb = single_succ (first_bb);
5370 }
5371 else
5372 second_bb = sched_split_block (first_bb, orig_insn);
5373
5374 recovery_block = sched_create_recovery_block (&before_recovery);
5375 if (before_recovery)
5376 copy_lv_set_from (before_recovery, EXIT_BLOCK_PTR);
5377
5378 gcc_assert (sel_bb_empty_p (recovery_block));
5379 sched_create_recovery_edges (first_bb, recovery_block, second_bb);
5380 if (current_loops != NULL)
5381 add_bb_to_loop (recovery_block, first_bb->loop_father);
b8698a0f 5382
e855c69d 5383 sel_add_bb (recovery_block);
b8698a0f 5384
e855c69d
AB
5385 jump = BB_END (recovery_block);
5386 gcc_assert (sel_bb_head (recovery_block) == jump);
5387 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5388
5389 return recovery_block;
5390}
5391
5392/* Merge basic block B into basic block A. */
262d8232 5393static void
e855c69d
AB
5394sel_merge_blocks (basic_block a, basic_block b)
5395{
262d8232
AB
5396 gcc_assert (sel_bb_empty_p (b)
5397 && EDGE_COUNT (b->preds) == 1
5398 && EDGE_PRED (b, 0)->src == b->prev_bb);
e855c69d 5399
262d8232
AB
5400 move_bb_info (b->prev_bb, b);
5401 remove_empty_bb (b, false);
5402 merge_blocks (a, b);
e855c69d
AB
5403 change_loops_latches (b, a);
5404}
5405
5406/* A wrapper for redirect_edge_and_branch_force, which also initializes
5407 data structures for possibly created bb and insns. Returns the newly
5408 added bb or NULL, when a bb was not needed. */
5409void
5410sel_redirect_edge_and_branch_force (edge e, basic_block to)
5411{
5412 basic_block jump_bb, src;
5413 int prev_max_uid;
5414 rtx jump;
b8698a0f 5415
e855c69d 5416 gcc_assert (!sel_bb_empty_p (e->src));
b8698a0f 5417
e855c69d
AB
5418 src = e->src;
5419 prev_max_uid = get_max_uid ();
5420 jump_bb = redirect_edge_and_branch_force (e, to);
5421
5422 if (jump_bb != NULL)
5423 sel_add_bb (jump_bb);
5424
5425 /* This function could not be used to spoil the loop structure by now,
5426 thus we don't care to update anything. But check it to be sure. */
5427 if (current_loop_nest
5428 && pipelining_p)
5429 gcc_assert (loop_latch_edge (current_loop_nest));
b8698a0f 5430
e855c69d
AB
5431 jump = find_new_jump (src, jump_bb, prev_max_uid);
5432 if (jump)
5433 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5434}
5435
b59ab570
AM
5436/* A wrapper for redirect_edge_and_branch. Return TRUE if blocks connected by
5437 redirected edge are in reverse topological order. */
5438bool
e855c69d
AB
5439sel_redirect_edge_and_branch (edge e, basic_block to)
5440{
5441 bool latch_edge_p;
5442 basic_block src;
5443 int prev_max_uid;
5444 rtx jump;
f2c45f08 5445 edge redirected;
b59ab570 5446 bool recompute_toporder_p = false;
e855c69d
AB
5447
5448 latch_edge_p = (pipelining_p
5449 && current_loop_nest
5450 && e == loop_latch_edge (current_loop_nest));
5451
5452 src = e->src;
5453 prev_max_uid = get_max_uid ();
f2c45f08
AM
5454
5455 redirected = redirect_edge_and_branch (e, to);
5456
5457 gcc_assert (redirected && last_added_blocks == NULL);
e855c69d
AB
5458
5459 /* When we've redirected a latch edge, update the header. */
5460 if (latch_edge_p)
5461 {
5462 current_loop_nest->header = to;
5463 gcc_assert (loop_latch_edge (current_loop_nest));
5464 }
5465
b59ab570
AM
5466 /* In rare situations, the topological relation between the blocks connected
5467 by the redirected edge can change (see PR42245 for an example). Update
5468 block_to_bb/bb_to_block. */
5469 if (CONTAINING_RGN (e->src->index) == CONTAINING_RGN (to->index)
5470 && BLOCK_TO_BB (e->src->index) > BLOCK_TO_BB (to->index))
5471 recompute_toporder_p = true;
5472
e855c69d
AB
5473 jump = find_new_jump (src, NULL, prev_max_uid);
5474 if (jump)
5475 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
b59ab570
AM
5476
5477 return recompute_toporder_p;
e855c69d
AB
5478}
5479
5480/* This variable holds the cfg hooks used by the selective scheduler. */
5481static struct cfg_hooks sel_cfg_hooks;
5482
5483/* Register sel-sched cfg hooks. */
5484void
5485sel_register_cfg_hooks (void)
5486{
5487 sched_split_block = sel_split_block;
5488
5489 orig_cfg_hooks = get_cfg_hooks ();
5490 sel_cfg_hooks = orig_cfg_hooks;
5491
5492 sel_cfg_hooks.create_basic_block = sel_create_basic_block;
5493
5494 set_cfg_hooks (sel_cfg_hooks);
5495
5496 sched_init_only_bb = sel_init_only_bb;
5497 sched_split_block = sel_split_block;
5498 sched_create_empty_bb = sel_create_empty_bb;
5499}
5500
5501/* Unregister sel-sched cfg hooks. */
5502void
5503sel_unregister_cfg_hooks (void)
5504{
5505 sched_create_empty_bb = NULL;
5506 sched_split_block = NULL;
5507 sched_init_only_bb = NULL;
5508
5509 set_cfg_hooks (orig_cfg_hooks);
5510}
5511\f
5512
5513/* Emit an insn rtx based on PATTERN. If a jump insn is wanted,
5514 LABEL is where this jump should be directed. */
5515rtx
5516create_insn_rtx_from_pattern (rtx pattern, rtx label)
5517{
5518 rtx insn_rtx;
5519
5520 gcc_assert (!INSN_P (pattern));
5521
5522 start_sequence ();
5523
5524 if (label == NULL_RTX)
5525 insn_rtx = emit_insn (pattern);
b5b8b0ac
AO
5526 else if (DEBUG_INSN_P (label))
5527 insn_rtx = emit_debug_insn (pattern);
e855c69d
AB
5528 else
5529 {
5530 insn_rtx = emit_jump_insn (pattern);
5531 JUMP_LABEL (insn_rtx) = label;
5532 ++LABEL_NUSES (label);
5533 }
5534
5535 end_sequence ();
5536
5537 sched_init_luids (NULL, NULL, NULL, NULL);
5538 sched_extend_target ();
5539 sched_deps_init (false);
5540
5541 /* Initialize INSN_CODE now. */
5542 recog_memoized (insn_rtx);
5543 return insn_rtx;
5544}
5545
5546/* Create a new vinsn for INSN_RTX. FORCE_UNIQUE_P is true when the vinsn
5547 must not be clonable. */
5548vinsn_t
5549create_vinsn_from_insn_rtx (rtx insn_rtx, bool force_unique_p)
5550{
5551 gcc_assert (INSN_P (insn_rtx) && !INSN_IN_STREAM_P (insn_rtx));
5552
5553 /* If VINSN_TYPE is not USE, retain its uniqueness. */
5554 return vinsn_create (insn_rtx, force_unique_p);
5555}
5556
5557/* Create a copy of INSN_RTX. */
5558rtx
5559create_copy_of_insn_rtx (rtx insn_rtx)
5560{
5561 rtx res;
5562
b5b8b0ac
AO
5563 if (DEBUG_INSN_P (insn_rtx))
5564 return create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)),
5565 insn_rtx);
5566
e855c69d
AB
5567 gcc_assert (NONJUMP_INSN_P (insn_rtx));
5568
5569 res = create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)),
5570 NULL_RTX);
5571 return res;
5572}
5573
5574/* Change vinsn field of EXPR to hold NEW_VINSN. */
5575void
5576change_vinsn_in_expr (expr_t expr, vinsn_t new_vinsn)
5577{
5578 vinsn_detach (EXPR_VINSN (expr));
5579
5580 EXPR_VINSN (expr) = new_vinsn;
5581 vinsn_attach (new_vinsn);
5582}
5583
5584/* Helpers for global init. */
5585/* This structure is used to be able to call existing bundling mechanism
5586 and calculate insn priorities. */
b8698a0f 5587static struct haifa_sched_info sched_sel_haifa_sched_info =
e855c69d
AB
5588{
5589 NULL, /* init_ready_list */
5590 NULL, /* can_schedule_ready_p */
5591 NULL, /* schedule_more_p */
5592 NULL, /* new_ready */
5593 NULL, /* rgn_rank */
5594 sel_print_insn, /* rgn_print_insn */
5595 contributes_to_priority,
356c23b3 5596 NULL, /* insn_finishes_block_p */
e855c69d
AB
5597
5598 NULL, NULL,
5599 NULL, NULL,
5600 0, 0,
5601
5602 NULL, /* add_remove_insn */
5603 NULL, /* begin_schedule_ready */
5604 NULL, /* advance_target_bb */
5605 SEL_SCHED | NEW_BBS
5606};
5607
5608/* Setup special insns used in the scheduler. */
b8698a0f 5609void
e855c69d
AB
5610setup_nop_and_exit_insns (void)
5611{
5612 gcc_assert (nop_pattern == NULL_RTX
5613 && exit_insn == NULL_RTX);
5614
9ef1bf71 5615 nop_pattern = constm1_rtx;
e855c69d
AB
5616
5617 start_sequence ();
5618 emit_insn (nop_pattern);
5619 exit_insn = get_insns ();
5620 end_sequence ();
5621 set_block_for_insn (exit_insn, EXIT_BLOCK_PTR);
5622}
5623
5624/* Free special insns used in the scheduler. */
5625void
5626free_nop_and_exit_insns (void)
5627{
5628 exit_insn = NULL_RTX;
5629 nop_pattern = NULL_RTX;
5630}
5631
5632/* Setup a special vinsn used in new insns initialization. */
5633void
5634setup_nop_vinsn (void)
5635{
5636 nop_vinsn = vinsn_create (exit_insn, false);
5637 vinsn_attach (nop_vinsn);
5638}
5639
5640/* Free a special vinsn used in new insns initialization. */
5641void
5642free_nop_vinsn (void)
5643{
5644 gcc_assert (VINSN_COUNT (nop_vinsn) == 1);
5645 vinsn_detach (nop_vinsn);
5646 nop_vinsn = NULL;
5647}
5648
5649/* Call a set_sched_flags hook. */
5650void
5651sel_set_sched_flags (void)
5652{
b8698a0f 5653 /* ??? This means that set_sched_flags were called, and we decided to
e855c69d 5654 support speculation. However, set_sched_flags also modifies flags
b8698a0f 5655 on current_sched_info, doing this only at global init. And we
e855c69d
AB
5656 sometimes change c_s_i later. So put the correct flags again. */
5657 if (spec_info && targetm.sched.set_sched_flags)
5658 targetm.sched.set_sched_flags (spec_info);
5659}
5660
5661/* Setup pointers to global sched info structures. */
5662void
5663sel_setup_sched_infos (void)
5664{
5665 rgn_setup_common_sched_info ();
5666
5667 memcpy (&sel_common_sched_info, common_sched_info,
5668 sizeof (sel_common_sched_info));
5669
5670 sel_common_sched_info.fix_recovery_cfg = NULL;
5671 sel_common_sched_info.add_block = NULL;
5672 sel_common_sched_info.estimate_number_of_insns
5673 = sel_estimate_number_of_insns;
5674 sel_common_sched_info.luid_for_non_insn = sel_luid_for_non_insn;
5675 sel_common_sched_info.sched_pass_id = SCHED_SEL_PASS;
5676
5677 common_sched_info = &sel_common_sched_info;
5678
5679 current_sched_info = &sched_sel_haifa_sched_info;
b8698a0f 5680 current_sched_info->sched_max_insns_priority =
e855c69d 5681 get_rgn_sched_max_insns_priority ();
b8698a0f 5682
e855c69d
AB
5683 sel_set_sched_flags ();
5684}
5685\f
5686
5687/* Adds basic block BB to region RGN at the position *BB_ORD_INDEX,
5688 *BB_ORD_INDEX after that is increased. */
5689static void
5690sel_add_block_to_region (basic_block bb, int *bb_ord_index, int rgn)
5691{
5692 RGN_NR_BLOCKS (rgn) += 1;
5693 RGN_DONT_CALC_DEPS (rgn) = 0;
5694 RGN_HAS_REAL_EBB (rgn) = 0;
5695 CONTAINING_RGN (bb->index) = rgn;
5696 BLOCK_TO_BB (bb->index) = *bb_ord_index;
5697 rgn_bb_table[RGN_BLOCKS (rgn) + *bb_ord_index] = bb->index;
5698 (*bb_ord_index)++;
5699
5700 /* FIXME: it is true only when not scheduling ebbs. */
5701 RGN_BLOCKS (rgn + 1) = RGN_BLOCKS (rgn) + RGN_NR_BLOCKS (rgn);
5702}
5703
5704/* Functions to support pipelining of outer loops. */
5705
5706/* Creates a new empty region and returns it's number. */
5707static int
5708sel_create_new_region (void)
5709{
5710 int new_rgn_number = nr_regions;
5711
5712 RGN_NR_BLOCKS (new_rgn_number) = 0;
5713
5714 /* FIXME: This will work only when EBBs are not created. */
5715 if (new_rgn_number != 0)
b8698a0f 5716 RGN_BLOCKS (new_rgn_number) = RGN_BLOCKS (new_rgn_number - 1) +
e855c69d
AB
5717 RGN_NR_BLOCKS (new_rgn_number - 1);
5718 else
5719 RGN_BLOCKS (new_rgn_number) = 0;
5720
5721 /* Set the blocks of the next region so the other functions may
5722 calculate the number of blocks in the region. */
b8698a0f 5723 RGN_BLOCKS (new_rgn_number + 1) = RGN_BLOCKS (new_rgn_number) +
e855c69d
AB
5724 RGN_NR_BLOCKS (new_rgn_number);
5725
5726 nr_regions++;
5727
5728 return new_rgn_number;
5729}
5730
5731/* If X has a smaller topological sort number than Y, returns -1;
5732 if greater, returns 1. */
5733static int
5734bb_top_order_comparator (const void *x, const void *y)
5735{
5736 basic_block bb1 = *(const basic_block *) x;
5737 basic_block bb2 = *(const basic_block *) y;
5738
b8698a0f
L
5739 gcc_assert (bb1 == bb2
5740 || rev_top_order_index[bb1->index]
e855c69d
AB
5741 != rev_top_order_index[bb2->index]);
5742
5743 /* It's a reverse topological order in REV_TOP_ORDER_INDEX, so
5744 bbs with greater number should go earlier. */
5745 if (rev_top_order_index[bb1->index] > rev_top_order_index[bb2->index])
5746 return -1;
5747 else
5748 return 1;
5749}
5750
b8698a0f 5751/* Create a region for LOOP and return its number. If we don't want
e855c69d
AB
5752 to pipeline LOOP, return -1. */
5753static int
5754make_region_from_loop (struct loop *loop)
5755{
5756 unsigned int i;
5757 int new_rgn_number = -1;
5758 struct loop *inner;
5759
5760 /* Basic block index, to be assigned to BLOCK_TO_BB. */
5761 int bb_ord_index = 0;
5762 basic_block *loop_blocks;
5763 basic_block preheader_block;
5764
b8698a0f 5765 if (loop->num_nodes
e855c69d
AB
5766 > (unsigned) PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_BLOCKS))
5767 return -1;
b8698a0f 5768
e855c69d
AB
5769 /* Don't pipeline loops whose latch belongs to some of its inner loops. */
5770 for (inner = loop->inner; inner; inner = inner->inner)
5771 if (flow_bb_inside_loop_p (inner, loop->latch))
5772 return -1;
5773
5774 loop->ninsns = num_loop_insns (loop);
5775 if ((int) loop->ninsns > PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_INSNS))
5776 return -1;
5777
5778 loop_blocks = get_loop_body_in_custom_order (loop, bb_top_order_comparator);
5779
5780 for (i = 0; i < loop->num_nodes; i++)
5781 if (loop_blocks[i]->flags & BB_IRREDUCIBLE_LOOP)
5782 {
5783 free (loop_blocks);
5784 return -1;
5785 }
5786
5787 preheader_block = loop_preheader_edge (loop)->src;
5788 gcc_assert (preheader_block);
5789 gcc_assert (loop_blocks[0] == loop->header);
5790
5791 new_rgn_number = sel_create_new_region ();
5792
5793 sel_add_block_to_region (preheader_block, &bb_ord_index, new_rgn_number);
5794 SET_BIT (bbs_in_loop_rgns, preheader_block->index);
5795
5796 for (i = 0; i < loop->num_nodes; i++)
5797 {
5798 /* Add only those blocks that haven't been scheduled in the inner loop.
5799 The exception is the basic blocks with bookkeeping code - they should
b8698a0f 5800 be added to the region (and they actually don't belong to the loop
e855c69d
AB
5801 body, but to the region containing that loop body). */
5802
5803 gcc_assert (new_rgn_number >= 0);
5804
5805 if (! TEST_BIT (bbs_in_loop_rgns, loop_blocks[i]->index))
5806 {
b8698a0f 5807 sel_add_block_to_region (loop_blocks[i], &bb_ord_index,
e855c69d
AB
5808 new_rgn_number);
5809 SET_BIT (bbs_in_loop_rgns, loop_blocks[i]->index);
5810 }
5811 }
5812
5813 free (loop_blocks);
5814 MARK_LOOP_FOR_PIPELINING (loop);
5815
5816 return new_rgn_number;
5817}
5818
5819/* Create a new region from preheader blocks LOOP_BLOCKS. */
5820void
5821make_region_from_loop_preheader (VEC(basic_block, heap) **loop_blocks)
5822{
5823 unsigned int i;
5824 int new_rgn_number = -1;
5825 basic_block bb;
5826
5827 /* Basic block index, to be assigned to BLOCK_TO_BB. */
5828 int bb_ord_index = 0;
5829
5830 new_rgn_number = sel_create_new_region ();
5831
ac47786e 5832 FOR_EACH_VEC_ELT (basic_block, *loop_blocks, i, bb)
e855c69d
AB
5833 {
5834 gcc_assert (new_rgn_number >= 0);
5835
5836 sel_add_block_to_region (bb, &bb_ord_index, new_rgn_number);
5837 }
5838
5839 VEC_free (basic_block, heap, *loop_blocks);
5840 gcc_assert (*loop_blocks == NULL);
5841}
5842
5843
5844/* Create region(s) from loop nest LOOP, such that inner loops will be
b8698a0f 5845 pipelined before outer loops. Returns true when a region for LOOP
e855c69d
AB
5846 is created. */
5847static bool
5848make_regions_from_loop_nest (struct loop *loop)
b8698a0f 5849{
e855c69d
AB
5850 struct loop *cur_loop;
5851 int rgn_number;
5852
5853 /* Traverse all inner nodes of the loop. */
5854 for (cur_loop = loop->inner; cur_loop; cur_loop = cur_loop->next)
5855 if (! TEST_BIT (bbs_in_loop_rgns, cur_loop->header->index))
5856 return false;
5857
5858 /* At this moment all regular inner loops should have been pipelined.
5859 Try to create a region from this loop. */
5860 rgn_number = make_region_from_loop (loop);
5861
5862 if (rgn_number < 0)
5863 return false;
5864
5865 VEC_safe_push (loop_p, heap, loop_nests, loop);
5866 return true;
5867}
5868
5869/* Initalize data structures needed. */
5870void
5871sel_init_pipelining (void)
5872{
5873 /* Collect loop information to be used in outer loops pipelining. */
5874 loop_optimizer_init (LOOPS_HAVE_PREHEADERS
5875 | LOOPS_HAVE_FALLTHRU_PREHEADERS
5876 | LOOPS_HAVE_RECORDED_EXITS
5877 | LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS);
5878 current_loop_nest = NULL;
5879
5880 bbs_in_loop_rgns = sbitmap_alloc (last_basic_block);
5881 sbitmap_zero (bbs_in_loop_rgns);
5882
5883 recompute_rev_top_order ();
5884}
5885
5886/* Returns a struct loop for region RGN. */
5887loop_p
5888get_loop_nest_for_rgn (unsigned int rgn)
5889{
5890 /* Regions created with extend_rgns don't have corresponding loop nests,
5891 because they don't represent loops. */
5892 if (rgn < VEC_length (loop_p, loop_nests))
5893 return VEC_index (loop_p, loop_nests, rgn);
5894 else
5895 return NULL;
5896}
5897
5898/* True when LOOP was included into pipelining regions. */
5899bool
5900considered_for_pipelining_p (struct loop *loop)
5901{
5902 if (loop_depth (loop) == 0)
5903 return false;
5904
b8698a0f
L
5905 /* Now, the loop could be too large or irreducible. Check whether its
5906 region is in LOOP_NESTS.
5907 We determine the region number of LOOP as the region number of its
5908 latch. We can't use header here, because this header could be
e855c69d
AB
5909 just removed preheader and it will give us the wrong region number.
5910 Latch can't be used because it could be in the inner loop too. */
8ec4d0ad 5911 if (LOOP_MARKED_FOR_PIPELINING_P (loop))
e855c69d
AB
5912 {
5913 int rgn = CONTAINING_RGN (loop->latch->index);
5914
5915 gcc_assert ((unsigned) rgn < VEC_length (loop_p, loop_nests));
5916 return true;
5917 }
b8698a0f 5918
e855c69d
AB
5919 return false;
5920}
5921
b8698a0f 5922/* Makes regions from the rest of the blocks, after loops are chosen
e855c69d
AB
5923 for pipelining. */
5924static void
5925make_regions_from_the_rest (void)
5926{
5927 int cur_rgn_blocks;
5928 int *loop_hdr;
5929 int i;
5930
5931 basic_block bb;
5932 edge e;
5933 edge_iterator ei;
5934 int *degree;
e855c69d
AB
5935
5936 /* Index in rgn_bb_table where to start allocating new regions. */
5937 cur_rgn_blocks = nr_regions ? RGN_BLOCKS (nr_regions) : 0;
e855c69d 5938
b8698a0f 5939 /* Make regions from all the rest basic blocks - those that don't belong to
e855c69d
AB
5940 any loop or belong to irreducible loops. Prepare the data structures
5941 for extend_rgns. */
5942
5943 /* LOOP_HDR[I] == -1 if I-th bb doesn't belong to any loop,
5944 LOOP_HDR[I] == LOOP_HDR[J] iff basic blocks I and J reside within the same
5945 loop. */
5946 loop_hdr = XNEWVEC (int, last_basic_block);
5947 degree = XCNEWVEC (int, last_basic_block);
5948
5949
5950 /* For each basic block that belongs to some loop assign the number
5951 of innermost loop it belongs to. */
5952 for (i = 0; i < last_basic_block; i++)
5953 loop_hdr[i] = -1;
5954
5955 FOR_EACH_BB (bb)
5956 {
5957 if (bb->loop_father && !bb->loop_father->num == 0
5958 && !(bb->flags & BB_IRREDUCIBLE_LOOP))
5959 loop_hdr[bb->index] = bb->loop_father->num;
5960 }
5961
b8698a0f 5962 /* For each basic block degree is calculated as the number of incoming
e855c69d
AB
5963 edges, that are going out of bbs that are not yet scheduled.
5964 The basic blocks that are scheduled have degree value of zero. */
b8698a0f 5965 FOR_EACH_BB (bb)
e855c69d
AB
5966 {
5967 degree[bb->index] = 0;
5968
5969 if (!TEST_BIT (bbs_in_loop_rgns, bb->index))
5970 {
5971 FOR_EACH_EDGE (e, ei, bb->preds)
5972 if (!TEST_BIT (bbs_in_loop_rgns, e->src->index))
5973 degree[bb->index]++;
5974 }
5975 else
5976 degree[bb->index] = -1;
5977 }
5978
5979 extend_rgns (degree, &cur_rgn_blocks, bbs_in_loop_rgns, loop_hdr);
5980
5981 /* Any block that did not end up in a region is placed into a region
5982 by itself. */
5983 FOR_EACH_BB (bb)
5984 if (degree[bb->index] >= 0)
5985 {
5986 rgn_bb_table[cur_rgn_blocks] = bb->index;
5987 RGN_NR_BLOCKS (nr_regions) = 1;
5988 RGN_BLOCKS (nr_regions) = cur_rgn_blocks++;
5989 RGN_DONT_CALC_DEPS (nr_regions) = 0;
5990 RGN_HAS_REAL_EBB (nr_regions) = 0;
5991 CONTAINING_RGN (bb->index) = nr_regions++;
5992 BLOCK_TO_BB (bb->index) = 0;
5993 }
5994
5995 free (degree);
5996 free (loop_hdr);
5997}
5998
5999/* Free data structures used in pipelining of loops. */
6000void sel_finish_pipelining (void)
6001{
6002 loop_iterator li;
6003 struct loop *loop;
6004
6005 /* Release aux fields so we don't free them later by mistake. */
6006 FOR_EACH_LOOP (li, loop, 0)
6007 loop->aux = NULL;
6008
6009 loop_optimizer_finalize ();
6010
6011 VEC_free (loop_p, heap, loop_nests);
6012
6013 free (rev_top_order_index);
6014 rev_top_order_index = NULL;
6015}
6016
b8698a0f 6017/* This function replaces the find_rgns when
e855c69d 6018 FLAG_SEL_SCHED_PIPELINING_OUTER_LOOPS is set. */
b8698a0f 6019void
e855c69d
AB
6020sel_find_rgns (void)
6021{
6022 sel_init_pipelining ();
6023 extend_regions ();
6024
6025 if (current_loops)
6026 {
6027 loop_p loop;
6028 loop_iterator li;
6029
6030 FOR_EACH_LOOP (li, loop, (flag_sel_sched_pipelining_outer_loops
6031 ? LI_FROM_INNERMOST
6032 : LI_ONLY_INNERMOST))
6033 make_regions_from_loop_nest (loop);
6034 }
6035
6036 /* Make regions from all the rest basic blocks and schedule them.
b8698a0f 6037 These blocks include blocks that don't belong to any loop or belong
e855c69d
AB
6038 to irreducible loops. */
6039 make_regions_from_the_rest ();
6040
6041 /* We don't need bbs_in_loop_rgns anymore. */
6042 sbitmap_free (bbs_in_loop_rgns);
6043 bbs_in_loop_rgns = NULL;
6044}
6045
b8698a0f
L
6046/* Adds the preheader blocks from previous loop to current region taking
6047 it from LOOP_PREHEADER_BLOCKS (current_loop_nest).
e855c69d
AB
6048 This function is only used with -fsel-sched-pipelining-outer-loops. */
6049void
6050sel_add_loop_preheaders (void)
6051{
6052 int i;
6053 basic_block bb;
b8698a0f 6054 VEC(basic_block, heap) *preheader_blocks
e855c69d
AB
6055 = LOOP_PREHEADER_BLOCKS (current_loop_nest);
6056
6057 for (i = 0;
6058 VEC_iterate (basic_block, preheader_blocks, i, bb);
6059 i++)
8ec4d0ad
AM
6060 {
6061 VEC_safe_push (basic_block, heap, last_added_blocks, bb);
e855c69d 6062 sel_add_bb (bb);
8ec4d0ad 6063 }
e855c69d
AB
6064
6065 VEC_free (basic_block, heap, preheader_blocks);
6066}
6067
b8698a0f
L
6068/* While pipelining outer loops, returns TRUE if BB is a loop preheader.
6069 Please note that the function should also work when pipelining_p is
6070 false, because it is used when deciding whether we should or should
e855c69d
AB
6071 not reschedule pipelined code. */
6072bool
6073sel_is_loop_preheader_p (basic_block bb)
6074{
6075 if (current_loop_nest)
6076 {
6077 struct loop *outer;
6078
6079 if (preheader_removed)
6080 return false;
6081
6082 /* Preheader is the first block in the region. */
6083 if (BLOCK_TO_BB (bb->index) == 0)
6084 return true;
6085
6086 /* We used to find a preheader with the topological information.
6087 Check that the above code is equivalent to what we did before. */
6088
6089 if (in_current_region_p (current_loop_nest->header))
b8698a0f 6090 gcc_assert (!(BLOCK_TO_BB (bb->index)
e855c69d
AB
6091 < BLOCK_TO_BB (current_loop_nest->header->index)));
6092
6093 /* Support the situation when the latch block of outer loop
6094 could be from here. */
6095 for (outer = loop_outer (current_loop_nest);
6096 outer;
6097 outer = loop_outer (outer))
6098 if (considered_for_pipelining_p (outer) && outer->latch == bb)
6099 gcc_unreachable ();
6100 }
6101
6102 return false;
6103}
6104
6105/* Checks whether JUMP leads to basic block DEST_BB and no other blocks. */
6106bool
6107jump_leads_only_to_bb_p (insn_t jump, basic_block dest_bb)
6108{
6109 basic_block jump_bb = BLOCK_FOR_INSN (jump);
6110
b8698a0f 6111 /* It is not jump, jump with side-effects or jump can lead to several
e855c69d
AB
6112 basic blocks. */
6113 if (!onlyjump_p (jump)
6114 || !any_uncondjump_p (jump))
6115 return false;
6116
b8698a0f 6117 /* Several outgoing edges, abnormal edge or destination of jump is
e855c69d
AB
6118 not DEST_BB. */
6119 if (EDGE_COUNT (jump_bb->succs) != 1
6120 || EDGE_SUCC (jump_bb, 0)->flags & EDGE_ABNORMAL
6121 || EDGE_SUCC (jump_bb, 0)->dest != dest_bb)
6122 return false;
6123
6124 /* If not anything of the upper. */
6125 return true;
6126}
6127
6128/* Removes the loop preheader from the current region and saves it in
b8698a0f 6129 PREHEADER_BLOCKS of the father loop, so they will be added later to
e855c69d
AB
6130 region that represents an outer loop. */
6131static void
6132sel_remove_loop_preheader (void)
6133{
6134 int i, old_len;
6135 int cur_rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
6136 basic_block bb;
6137 bool all_empty_p = true;
b8698a0f 6138 VEC(basic_block, heap) *preheader_blocks
e855c69d
AB
6139 = LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest));
6140
6141 gcc_assert (current_loop_nest);
6142 old_len = VEC_length (basic_block, preheader_blocks);
6143
6144 /* Add blocks that aren't within the current loop to PREHEADER_BLOCKS. */
6145 for (i = 0; i < RGN_NR_BLOCKS (cur_rgn); i++)
6146 {
6147 bb = BASIC_BLOCK (BB_TO_BLOCK (i));
6148
b8698a0f 6149 /* If the basic block belongs to region, but doesn't belong to
e855c69d
AB
6150 corresponding loop, then it should be a preheader. */
6151 if (sel_is_loop_preheader_p (bb))
6152 {
6153 VEC_safe_push (basic_block, heap, preheader_blocks, bb);
6154 if (BB_END (bb) != bb_note (bb))
6155 all_empty_p = false;
6156 }
6157 }
b8698a0f 6158
e855c69d
AB
6159 /* Remove these blocks only after iterating over the whole region. */
6160 for (i = VEC_length (basic_block, preheader_blocks) - 1;
6161 i >= old_len;
6162 i--)
6163 {
b8698a0f 6164 bb = VEC_index (basic_block, preheader_blocks, i);
e855c69d
AB
6165 sel_remove_bb (bb, false);
6166 }
6167
6168 if (!considered_for_pipelining_p (loop_outer (current_loop_nest)))
6169 {
6170 if (!all_empty_p)
6171 /* Immediately create new region from preheader. */
6172 make_region_from_loop_preheader (&preheader_blocks);
6173 else
6174 {
6175 /* If all preheader blocks are empty - dont create new empty region.
6176 Instead, remove them completely. */
ac47786e 6177 FOR_EACH_VEC_ELT (basic_block, preheader_blocks, i, bb)
e855c69d
AB
6178 {
6179 edge e;
6180 edge_iterator ei;
6181 basic_block prev_bb = bb->prev_bb, next_bb = bb->next_bb;
6182
6183 /* Redirect all incoming edges to next basic block. */
6184 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
6185 {
6186 if (! (e->flags & EDGE_FALLTHRU))
6187 redirect_edge_and_branch (e, bb->next_bb);
6188 else
6189 redirect_edge_succ (e, bb->next_bb);
6190 }
6191 gcc_assert (BB_NOTE_LIST (bb) == NULL);
6192 delete_and_free_basic_block (bb);
6193
b8698a0f
L
6194 /* Check if after deleting preheader there is a nonconditional
6195 jump in PREV_BB that leads to the next basic block NEXT_BB.
6196 If it is so - delete this jump and clear data sets of its
e855c69d
AB
6197 basic block if it becomes empty. */
6198 if (next_bb->prev_bb == prev_bb
6199 && prev_bb != ENTRY_BLOCK_PTR
6200 && jump_leads_only_to_bb_p (BB_END (prev_bb), next_bb))
6201 {
6202 redirect_edge_and_branch (EDGE_SUCC (prev_bb, 0), next_bb);
6203 if (BB_END (prev_bb) == bb_note (prev_bb))
6204 free_data_sets (prev_bb);
6205 }
6206 }
6207 }
6208 VEC_free (basic_block, heap, preheader_blocks);
6209 }
6210 else
6211 /* Store preheader within the father's loop structure. */
6212 SET_LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest),
6213 preheader_blocks);
6214}
6215#endif