]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/sel-sched-ir.c
2015-07-07 Andrew MacLeod <amacleod@redhat.com>
[thirdparty/gcc.git] / gcc / sel-sched-ir.c
CommitLineData
e1ab7874 1/* Instruction scheduling pass. Selective scheduler and pipeliner.
d353bf18 2 Copyright (C) 2006-2015 Free Software Foundation, Inc.
e1ab7874 3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
8Software Foundation; either version 3, or (at your option) any later
9version.
10
11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
15
16You should have received a copy of the GNU General Public License
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
19
20#include "config.h"
21#include "system.h"
22#include "coretypes.h"
9ef16211 23#include "backend.h"
24#include "tree.h"
e1ab7874 25#include "rtl.h"
9ef16211 26#include "df.h"
27#include "diagnostic-core.h"
e1ab7874 28#include "tm_p.h"
e1ab7874 29#include "regs.h"
94ea8568 30#include "cfgrtl.h"
31#include "cfganal.h"
32#include "cfgbuild.h"
e1ab7874 33#include "flags.h"
34#include "insn-config.h"
35#include "insn-attr.h"
36#include "except.h"
e1ab7874 37#include "recog.h"
38#include "params.h"
39#include "target.h"
e1ab7874 40#include "sched-int.h"
e1ab7874 41#include "langhooks.h"
42#include "rtlhooks-def.h"
06f9d6ef 43#include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
e1ab7874 44
45#ifdef INSN_SCHEDULING
9ef16211 46#include "regset.h"
47#include "cfgloop.h"
e1ab7874 48#include "sel-sched-ir.h"
49/* We don't have to use it except for sel_print_insn. */
50#include "sel-sched-dump.h"
51
52/* A vector holding bb info for whole scheduling pass. */
f1f41a6c 53vec<sel_global_bb_info_def>
1e094109 54 sel_global_bb_info = vNULL;
e1ab7874 55
56/* A vector holding bb info. */
f1f41a6c 57vec<sel_region_bb_info_def>
1e094109 58 sel_region_bb_info = vNULL;
e1ab7874 59
60/* A pool for allocating all lists. */
e26b6f42 61pool_allocator<_list_node> sched_lists_pool ("sel-sched-lists", 500);
e1ab7874 62
63/* This contains information about successors for compute_av_set. */
64struct succs_info current_succs;
65
66/* Data structure to describe interaction with the generic scheduler utils. */
67static struct common_sched_info_def sel_common_sched_info;
68
69/* The loop nest being pipelined. */
70struct loop *current_loop_nest;
71
72/* LOOP_NESTS is a vector containing the corresponding loop nest for
73 each region. */
1e094109 74static vec<loop_p> loop_nests = vNULL;
e1ab7874 75
76/* Saves blocks already in loop regions, indexed by bb->index. */
77static sbitmap bbs_in_loop_rgns = NULL;
78
79/* CFG hooks that are saved before changing create_basic_block hook. */
80static struct cfg_hooks orig_cfg_hooks;
81\f
82
83/* Array containing reverse topological index of function basic blocks,
84 indexed by BB->INDEX. */
85static int *rev_top_order_index = NULL;
86
87/* Length of the above array. */
88static int rev_top_order_index_len = -1;
89
90/* A regset pool structure. */
91static struct
92{
93 /* The stack to which regsets are returned. */
94 regset *v;
95
96 /* Its pointer. */
97 int n;
98
99 /* Its size. */
100 int s;
101
102 /* In VV we save all generated regsets so that, when destructing the
103 pool, we can compare it with V and check that every regset was returned
104 back to pool. */
105 regset *vv;
106
107 /* The pointer of VV stack. */
108 int nn;
109
110 /* Its size. */
111 int ss;
112
113 /* The difference between allocated and returned regsets. */
114 int diff;
115} regset_pool = { NULL, 0, 0, NULL, 0, 0, 0 };
116
117/* This represents the nop pool. */
118static struct
119{
120 /* The vector which holds previously emitted nops. */
121 insn_t *v;
122
123 /* Its pointer. */
124 int n;
125
126 /* Its size. */
48e1416a 127 int s;
e1ab7874 128} nop_pool = { NULL, 0, 0 };
129
130/* The pool for basic block notes. */
cef3d8ad 131static vec<rtx_note *> bb_note_pool;
e1ab7874 132
133/* A NOP pattern used to emit placeholder insns. */
134rtx nop_pattern = NULL_RTX;
135/* A special instruction that resides in EXIT_BLOCK.
136 EXIT_INSN is successor of the insns that lead to EXIT_BLOCK. */
179c282d 137rtx_insn *exit_insn = NULL;
e1ab7874 138
48e1416a 139/* TRUE if while scheduling current region, which is loop, its preheader
e1ab7874 140 was removed. */
141bool preheader_removed = false;
142\f
143
144/* Forward static declarations. */
145static void fence_clear (fence_t);
146
147static void deps_init_id (idata_t, insn_t, bool);
148static void init_id_from_df (idata_t, insn_t, bool);
149static expr_t set_insn_init (expr_t, vinsn_t, int);
150
151static void cfg_preds (basic_block, insn_t **, int *);
152static void prepare_insn_expr (insn_t, int);
f1f41a6c 153static void free_history_vect (vec<expr_history_def> &);
e1ab7874 154
155static void move_bb_info (basic_block, basic_block);
156static void remove_empty_bb (basic_block, bool);
0424f393 157static void sel_merge_blocks (basic_block, basic_block);
e1ab7874 158static void sel_remove_loop_preheader (void);
49087fba 159static bool bb_has_removable_jump_to_p (basic_block, basic_block);
e1ab7874 160
161static bool insn_is_the_only_one_in_bb_p (insn_t);
162static void create_initial_data_sets (basic_block);
163
9845d120 164static void free_av_set (basic_block);
e1ab7874 165static void invalidate_av_set (basic_block);
166static void extend_insn_data (void);
8d1881f5 167static void sel_init_new_insn (insn_t, int, int = -1);
e1ab7874 168static void finish_insns (void);
169\f
170/* Various list functions. */
171
172/* Copy an instruction list L. */
173ilist_t
174ilist_copy (ilist_t l)
175{
176 ilist_t head = NULL, *tailp = &head;
177
178 while (l)
179 {
180 ilist_add (tailp, ILIST_INSN (l));
181 tailp = &ILIST_NEXT (*tailp);
182 l = ILIST_NEXT (l);
183 }
184
185 return head;
186}
187
188/* Invert an instruction list L. */
189ilist_t
190ilist_invert (ilist_t l)
191{
192 ilist_t res = NULL;
193
194 while (l)
195 {
196 ilist_add (&res, ILIST_INSN (l));
197 l = ILIST_NEXT (l);
198 }
199
200 return res;
201}
202
203/* Add a new boundary to the LP list with parameters TO, PTR, and DC. */
204void
205blist_add (blist_t *lp, insn_t to, ilist_t ptr, deps_t dc)
206{
207 bnd_t bnd;
208
209 _list_add (lp);
210 bnd = BLIST_BND (*lp);
211
2f3c9801 212 BND_TO (bnd) = to;
e1ab7874 213 BND_PTR (bnd) = ptr;
214 BND_AV (bnd) = NULL;
215 BND_AV1 (bnd) = NULL;
216 BND_DC (bnd) = dc;
217}
218
219/* Remove the list note pointed to by LP. */
220void
221blist_remove (blist_t *lp)
222{
223 bnd_t b = BLIST_BND (*lp);
224
225 av_set_clear (&BND_AV (b));
226 av_set_clear (&BND_AV1 (b));
227 ilist_clear (&BND_PTR (b));
228
229 _list_remove (lp);
230}
231
232/* Init a fence tail L. */
233void
234flist_tail_init (flist_tail_t l)
235{
236 FLIST_TAIL_HEAD (l) = NULL;
237 FLIST_TAIL_TAILP (l) = &FLIST_TAIL_HEAD (l);
238}
239
240/* Try to find fence corresponding to INSN in L. */
241fence_t
242flist_lookup (flist_t l, insn_t insn)
243{
244 while (l)
245 {
246 if (FENCE_INSN (FLIST_FENCE (l)) == insn)
247 return FLIST_FENCE (l);
248
249 l = FLIST_NEXT (l);
250 }
251
252 return NULL;
253}
254
255/* Init the fields of F before running fill_insns. */
256static void
257init_fence_for_scheduling (fence_t f)
258{
259 FENCE_BNDS (f) = NULL;
260 FENCE_PROCESSED_P (f) = false;
261 FENCE_SCHEDULED_P (f) = false;
262}
263
264/* Add new fence consisting of INSN and STATE to the list pointed to by LP. */
265static void
48e1416a 266flist_add (flist_t *lp, insn_t insn, state_t state, deps_t dc, void *tc,
2f3c9801 267 insn_t last_scheduled_insn, vec<rtx_insn *, va_gc> *executing_insns,
48e1416a 268 int *ready_ticks, int ready_ticks_size, insn_t sched_next,
abb9c563 269 int cycle, int cycle_issued_insns, int issue_more,
e1ab7874 270 bool starts_cycle_p, bool after_stall_p)
271{
272 fence_t f;
273
274 _list_add (lp);
275 f = FLIST_FENCE (*lp);
276
277 FENCE_INSN (f) = insn;
278
279 gcc_assert (state != NULL);
280 FENCE_STATE (f) = state;
281
282 FENCE_CYCLE (f) = cycle;
283 FENCE_ISSUED_INSNS (f) = cycle_issued_insns;
284 FENCE_STARTS_CYCLE_P (f) = starts_cycle_p;
285 FENCE_AFTER_STALL_P (f) = after_stall_p;
286
287 gcc_assert (dc != NULL);
288 FENCE_DC (f) = dc;
289
290 gcc_assert (tc != NULL || targetm.sched.alloc_sched_context == NULL);
291 FENCE_TC (f) = tc;
292
293 FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn;
abb9c563 294 FENCE_ISSUE_MORE (f) = issue_more;
e1ab7874 295 FENCE_EXECUTING_INSNS (f) = executing_insns;
296 FENCE_READY_TICKS (f) = ready_ticks;
297 FENCE_READY_TICKS_SIZE (f) = ready_ticks_size;
298 FENCE_SCHED_NEXT (f) = sched_next;
299
300 init_fence_for_scheduling (f);
301}
302
303/* Remove the head node of the list pointed to by LP. */
304static void
305flist_remove (flist_t *lp)
306{
307 if (FENCE_INSN (FLIST_FENCE (*lp)))
308 fence_clear (FLIST_FENCE (*lp));
309 _list_remove (lp);
310}
311
312/* Clear the fence list pointed to by LP. */
313void
314flist_clear (flist_t *lp)
315{
316 while (*lp)
317 flist_remove (lp);
318}
319
320/* Add ORIGINAL_INSN the def list DL honoring CROSSES_CALL. */
321void
322def_list_add (def_list_t *dl, insn_t original_insn, bool crosses_call)
323{
324 def_t d;
48e1416a 325
e1ab7874 326 _list_add (dl);
327 d = DEF_LIST_DEF (*dl);
328
329 d->orig_insn = original_insn;
330 d->crosses_call = crosses_call;
331}
332\f
333
334/* Functions to work with target contexts. */
335
48e1416a 336/* Bulk target context. It is convenient for debugging purposes to ensure
e1ab7874 337 that there are no uninitialized (null) target contexts. */
338static tc_t bulk_tc = (tc_t) 1;
339
48e1416a 340/* Target hooks wrappers. In the future we can provide some default
e1ab7874 341 implementations for them. */
342
343/* Allocate a store for the target context. */
344static tc_t
345alloc_target_context (void)
346{
347 return (targetm.sched.alloc_sched_context
348 ? targetm.sched.alloc_sched_context () : bulk_tc);
349}
350
351/* Init target context TC.
352 If CLEAN_P is true, then make TC as it is beginning of the scheduler.
353 Overwise, copy current backend context to TC. */
354static void
355init_target_context (tc_t tc, bool clean_p)
356{
357 if (targetm.sched.init_sched_context)
358 targetm.sched.init_sched_context (tc, clean_p);
359}
360
361/* Allocate and initialize a target context. Meaning of CLEAN_P is the same as
362 int init_target_context (). */
363tc_t
364create_target_context (bool clean_p)
365{
366 tc_t tc = alloc_target_context ();
367
368 init_target_context (tc, clean_p);
369 return tc;
370}
371
372/* Copy TC to the current backend context. */
373void
374set_target_context (tc_t tc)
375{
376 if (targetm.sched.set_sched_context)
377 targetm.sched.set_sched_context (tc);
378}
379
380/* TC is about to be destroyed. Free any internal data. */
381static void
382clear_target_context (tc_t tc)
383{
384 if (targetm.sched.clear_sched_context)
385 targetm.sched.clear_sched_context (tc);
386}
387
388/* Clear and free it. */
389static void
390delete_target_context (tc_t tc)
391{
392 clear_target_context (tc);
393
394 if (targetm.sched.free_sched_context)
395 targetm.sched.free_sched_context (tc);
396}
397
398/* Make a copy of FROM in TO.
399 NB: May be this should be a hook. */
400static void
401copy_target_context (tc_t to, tc_t from)
402{
403 tc_t tmp = create_target_context (false);
404
405 set_target_context (from);
406 init_target_context (to, false);
407
408 set_target_context (tmp);
409 delete_target_context (tmp);
410}
411
412/* Create a copy of TC. */
413static tc_t
414create_copy_of_target_context (tc_t tc)
415{
416 tc_t copy = alloc_target_context ();
417
418 copy_target_context (copy, tc);
419
420 return copy;
421}
422
423/* Clear TC and initialize it according to CLEAN_P. The meaning of CLEAN_P
424 is the same as in init_target_context (). */
425void
426reset_target_context (tc_t tc, bool clean_p)
427{
428 clear_target_context (tc);
429 init_target_context (tc, clean_p);
430}
431\f
48e1416a 432/* Functions to work with dependence contexts.
68e419a1 433 Dc (aka deps context, aka deps_t, aka struct deps_desc *) is short for dependence
e1ab7874 434 context. It accumulates information about processed insns to decide if
435 current insn is dependent on the processed ones. */
436
437/* Make a copy of FROM in TO. */
438static void
439copy_deps_context (deps_t to, deps_t from)
440{
d9ab2038 441 init_deps (to, false);
e1ab7874 442 deps_join (to, from);
443}
444
445/* Allocate store for dep context. */
446static deps_t
447alloc_deps_context (void)
448{
68e419a1 449 return XNEW (struct deps_desc);
e1ab7874 450}
451
452/* Allocate and initialize dep context. */
453static deps_t
454create_deps_context (void)
455{
456 deps_t dc = alloc_deps_context ();
457
d9ab2038 458 init_deps (dc, false);
e1ab7874 459 return dc;
460}
461
462/* Create a copy of FROM. */
463static deps_t
464create_copy_of_deps_context (deps_t from)
465{
466 deps_t to = alloc_deps_context ();
467
468 copy_deps_context (to, from);
469 return to;
470}
471
472/* Clean up internal data of DC. */
473static void
474clear_deps_context (deps_t dc)
475{
476 free_deps (dc);
477}
478
479/* Clear and free DC. */
480static void
481delete_deps_context (deps_t dc)
482{
483 clear_deps_context (dc);
484 free (dc);
485}
486
487/* Clear and init DC. */
488static void
489reset_deps_context (deps_t dc)
490{
491 clear_deps_context (dc);
d9ab2038 492 init_deps (dc, false);
e1ab7874 493}
494
48e1416a 495/* This structure describes the dependence analysis hooks for advancing
e1ab7874 496 dependence context. */
497static struct sched_deps_info_def advance_deps_context_sched_deps_info =
498 {
499 NULL,
500
501 NULL, /* start_insn */
502 NULL, /* finish_insn */
503 NULL, /* start_lhs */
504 NULL, /* finish_lhs */
505 NULL, /* start_rhs */
506 NULL, /* finish_rhs */
507 haifa_note_reg_set,
508 haifa_note_reg_clobber,
509 haifa_note_reg_use,
510 NULL, /* note_mem_dep */
511 NULL, /* note_dep */
512
513 0, 0, 0
514 };
515
516/* Process INSN and add its impact on DC. */
517void
518advance_deps_context (deps_t dc, insn_t insn)
519{
520 sched_deps_info = &advance_deps_context_sched_deps_info;
2f3c9801 521 deps_analyze_insn (dc, insn);
e1ab7874 522}
523\f
524
525/* Functions to work with DFA states. */
526
527/* Allocate store for a DFA state. */
528static state_t
529state_alloc (void)
530{
531 return xmalloc (dfa_state_size);
532}
533
534/* Allocate and initialize DFA state. */
535static state_t
536state_create (void)
537{
538 state_t state = state_alloc ();
539
540 state_reset (state);
541 advance_state (state);
542 return state;
543}
544
545/* Free DFA state. */
546static void
547state_free (state_t state)
548{
549 free (state);
550}
551
552/* Make a copy of FROM in TO. */
553static void
554state_copy (state_t to, state_t from)
555{
556 memcpy (to, from, dfa_state_size);
557}
558
559/* Create a copy of FROM. */
560static state_t
561state_create_copy (state_t from)
562{
563 state_t to = state_alloc ();
564
565 state_copy (to, from);
566 return to;
567}
568\f
569
570/* Functions to work with fences. */
571
572/* Clear the fence. */
573static void
574fence_clear (fence_t f)
575{
576 state_t s = FENCE_STATE (f);
577 deps_t dc = FENCE_DC (f);
578 void *tc = FENCE_TC (f);
579
580 ilist_clear (&FENCE_BNDS (f));
581
582 gcc_assert ((s != NULL && dc != NULL && tc != NULL)
583 || (s == NULL && dc == NULL && tc == NULL));
584
dd045aee 585 free (s);
e1ab7874 586
587 if (dc != NULL)
588 delete_deps_context (dc);
589
590 if (tc != NULL)
591 delete_target_context (tc);
f1f41a6c 592 vec_free (FENCE_EXECUTING_INSNS (f));
e1ab7874 593 free (FENCE_READY_TICKS (f));
594 FENCE_READY_TICKS (f) = NULL;
595}
596
597/* Init a list of fences with successors of OLD_FENCE. */
598void
599init_fences (insn_t old_fence)
600{
601 insn_t succ;
602 succ_iterator si;
603 bool first = true;
604 int ready_ticks_size = get_max_uid () + 1;
48e1416a 605
606 FOR_EACH_SUCC_1 (succ, si, old_fence,
e1ab7874 607 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
608 {
48e1416a 609
e1ab7874 610 if (first)
611 first = false;
612 else
613 gcc_assert (flag_sel_sched_pipelining_outer_loops);
614
615 flist_add (&fences, succ,
616 state_create (),
617 create_deps_context () /* dc */,
618 create_target_context (true) /* tc */,
2f3c9801 619 NULL /* last_scheduled_insn */,
e1ab7874 620 NULL, /* executing_insns */
621 XCNEWVEC (int, ready_ticks_size), /* ready_ticks */
622 ready_ticks_size,
2f3c9801 623 NULL /* sched_next */,
48e1416a 624 1 /* cycle */, 0 /* cycle_issued_insns */,
abb9c563 625 issue_rate, /* issue_more */
48e1416a 626 1 /* starts_cycle_p */, 0 /* after_stall_p */);
e1ab7874 627 }
628}
629
630/* Merges two fences (filling fields of fence F with resulting values) by
631 following rules: 1) state, target context and last scheduled insn are
48e1416a 632 propagated from fallthrough edge if it is available;
e1ab7874 633 2) deps context and cycle is propagated from more probable edge;
48e1416a 634 3) all other fields are set to corresponding constant values.
e1ab7874 635
48e1416a 636 INSN, STATE, DC, TC, LAST_SCHEDULED_INSN, EXECUTING_INSNS,
abb9c563 637 READY_TICKS, READY_TICKS_SIZE, SCHED_NEXT, CYCLE, ISSUE_MORE
638 and AFTER_STALL_P are the corresponding fields of the second fence. */
e1ab7874 639static void
640merge_fences (fence_t f, insn_t insn,
48e1416a 641 state_t state, deps_t dc, void *tc,
2f3c9801 642 rtx_insn *last_scheduled_insn,
643 vec<rtx_insn *, va_gc> *executing_insns,
e1ab7874 644 int *ready_ticks, int ready_ticks_size,
abb9c563 645 rtx sched_next, int cycle, int issue_more, bool after_stall_p)
e1ab7874 646{
647 insn_t last_scheduled_insn_old = FENCE_LAST_SCHEDULED_INSN (f);
648
649 gcc_assert (sel_bb_head_p (FENCE_INSN (f))
650 && !sched_next && !FENCE_SCHED_NEXT (f));
651
48e1416a 652 /* Check if we can decide which path fences came.
e1ab7874 653 If we can't (or don't want to) - reset all. */
654 if (last_scheduled_insn == NULL
655 || last_scheduled_insn_old == NULL
48e1416a 656 /* This is a case when INSN is reachable on several paths from
657 one insn (this can happen when pipelining of outer loops is on and
658 there are two edges: one going around of inner loop and the other -
e1ab7874 659 right through it; in such case just reset everything). */
660 || last_scheduled_insn == last_scheduled_insn_old)
661 {
662 state_reset (FENCE_STATE (f));
663 state_free (state);
48e1416a 664
e1ab7874 665 reset_deps_context (FENCE_DC (f));
666 delete_deps_context (dc);
48e1416a 667
e1ab7874 668 reset_target_context (FENCE_TC (f), true);
669 delete_target_context (tc);
670
671 if (cycle > FENCE_CYCLE (f))
672 FENCE_CYCLE (f) = cycle;
673
674 FENCE_LAST_SCHEDULED_INSN (f) = NULL;
abb9c563 675 FENCE_ISSUE_MORE (f) = issue_rate;
f1f41a6c 676 vec_free (executing_insns);
e1ab7874 677 free (ready_ticks);
678 if (FENCE_EXECUTING_INSNS (f))
f1f41a6c 679 FENCE_EXECUTING_INSNS (f)->block_remove (0,
680 FENCE_EXECUTING_INSNS (f)->length ());
e1ab7874 681 if (FENCE_READY_TICKS (f))
682 memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f));
683 }
684 else
685 {
686 edge edge_old = NULL, edge_new = NULL;
687 edge candidate;
688 succ_iterator si;
689 insn_t succ;
48e1416a 690
e1ab7874 691 /* Find fallthrough edge. */
692 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb);
7f58c05e 693 candidate = find_fallthru_edge_from (BLOCK_FOR_INSN (insn)->prev_bb);
e1ab7874 694
695 if (!candidate
696 || (candidate->src != BLOCK_FOR_INSN (last_scheduled_insn)
697 && candidate->src != BLOCK_FOR_INSN (last_scheduled_insn_old)))
698 {
699 /* No fallthrough edge leading to basic block of INSN. */
700 state_reset (FENCE_STATE (f));
701 state_free (state);
48e1416a 702
e1ab7874 703 reset_target_context (FENCE_TC (f), true);
704 delete_target_context (tc);
48e1416a 705
e1ab7874 706 FENCE_LAST_SCHEDULED_INSN (f) = NULL;
abb9c563 707 FENCE_ISSUE_MORE (f) = issue_rate;
e1ab7874 708 }
709 else
710 if (candidate->src == BLOCK_FOR_INSN (last_scheduled_insn))
711 {
48e1416a 712 /* Would be weird if same insn is successor of several fallthrough
e1ab7874 713 edges. */
714 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb
715 != BLOCK_FOR_INSN (last_scheduled_insn_old));
716
717 state_free (FENCE_STATE (f));
718 FENCE_STATE (f) = state;
719
720 delete_target_context (FENCE_TC (f));
721 FENCE_TC (f) = tc;
722
723 FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn;
abb9c563 724 FENCE_ISSUE_MORE (f) = issue_more;
e1ab7874 725 }
726 else
727 {
728 /* Leave STATE, TC and LAST_SCHEDULED_INSN fields untouched. */
729 state_free (state);
730 delete_target_context (tc);
731
732 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb
733 != BLOCK_FOR_INSN (last_scheduled_insn));
734 }
735
736 /* Find edge of first predecessor (last_scheduled_insn_old->insn). */
737 FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn_old,
738 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
739 {
740 if (succ == insn)
741 {
742 /* No same successor allowed from several edges. */
743 gcc_assert (!edge_old);
744 edge_old = si.e1;
745 }
746 }
747 /* Find edge of second predecessor (last_scheduled_insn->insn). */
748 FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn,
749 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
750 {
751 if (succ == insn)
752 {
753 /* No same successor allowed from several edges. */
754 gcc_assert (!edge_new);
755 edge_new = si.e1;
756 }
757 }
758
759 /* Check if we can choose most probable predecessor. */
760 if (edge_old == NULL || edge_new == NULL)
761 {
762 reset_deps_context (FENCE_DC (f));
763 delete_deps_context (dc);
f1f41a6c 764 vec_free (executing_insns);
e1ab7874 765 free (ready_ticks);
48e1416a 766
e1ab7874 767 FENCE_CYCLE (f) = MAX (FENCE_CYCLE (f), cycle);
768 if (FENCE_EXECUTING_INSNS (f))
f1f41a6c 769 FENCE_EXECUTING_INSNS (f)->block_remove (0,
770 FENCE_EXECUTING_INSNS (f)->length ());
e1ab7874 771 if (FENCE_READY_TICKS (f))
772 memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f));
773 }
774 else
775 if (edge_new->probability > edge_old->probability)
776 {
777 delete_deps_context (FENCE_DC (f));
778 FENCE_DC (f) = dc;
f1f41a6c 779 vec_free (FENCE_EXECUTING_INSNS (f));
e1ab7874 780 FENCE_EXECUTING_INSNS (f) = executing_insns;
781 free (FENCE_READY_TICKS (f));
782 FENCE_READY_TICKS (f) = ready_ticks;
783 FENCE_READY_TICKS_SIZE (f) = ready_ticks_size;
784 FENCE_CYCLE (f) = cycle;
785 }
786 else
787 {
788 /* Leave DC and CYCLE untouched. */
789 delete_deps_context (dc);
f1f41a6c 790 vec_free (executing_insns);
e1ab7874 791 free (ready_ticks);
792 }
793 }
794
795 /* Fill remaining invariant fields. */
796 if (after_stall_p)
797 FENCE_AFTER_STALL_P (f) = 1;
798
799 FENCE_ISSUED_INSNS (f) = 0;
800 FENCE_STARTS_CYCLE_P (f) = 1;
801 FENCE_SCHED_NEXT (f) = NULL;
802}
803
48e1416a 804/* Add a new fence to NEW_FENCES list, initializing it from all
e1ab7874 805 other parameters. */
806static void
807add_to_fences (flist_tail_t new_fences, insn_t insn,
2f3c9801 808 state_t state, deps_t dc, void *tc,
809 rtx_insn *last_scheduled_insn,
810 vec<rtx_insn *, va_gc> *executing_insns, int *ready_ticks,
811 int ready_ticks_size, rtx_insn *sched_next, int cycle,
abb9c563 812 int cycle_issued_insns, int issue_rate,
813 bool starts_cycle_p, bool after_stall_p)
e1ab7874 814{
815 fence_t f = flist_lookup (FLIST_TAIL_HEAD (new_fences), insn);
816
817 if (! f)
818 {
819 flist_add (FLIST_TAIL_TAILP (new_fences), insn, state, dc, tc,
48e1416a 820 last_scheduled_insn, executing_insns, ready_ticks,
e1ab7874 821 ready_ticks_size, sched_next, cycle, cycle_issued_insns,
abb9c563 822 issue_rate, starts_cycle_p, after_stall_p);
e1ab7874 823
824 FLIST_TAIL_TAILP (new_fences)
825 = &FLIST_NEXT (*FLIST_TAIL_TAILP (new_fences));
826 }
827 else
828 {
48e1416a 829 merge_fences (f, insn, state, dc, tc, last_scheduled_insn,
830 executing_insns, ready_ticks, ready_ticks_size,
abb9c563 831 sched_next, cycle, issue_rate, after_stall_p);
e1ab7874 832 }
833}
834
835/* Move the first fence in the OLD_FENCES list to NEW_FENCES. */
836void
837move_fence_to_fences (flist_t old_fences, flist_tail_t new_fences)
838{
839 fence_t f, old;
840 flist_t *tailp = FLIST_TAIL_TAILP (new_fences);
841
842 old = FLIST_FENCE (old_fences);
48e1416a 843 f = flist_lookup (FLIST_TAIL_HEAD (new_fences),
e1ab7874 844 FENCE_INSN (FLIST_FENCE (old_fences)));
845 if (f)
846 {
847 merge_fences (f, old->insn, old->state, old->dc, old->tc,
848 old->last_scheduled_insn, old->executing_insns,
849 old->ready_ticks, old->ready_ticks_size,
abb9c563 850 old->sched_next, old->cycle, old->issue_more,
e1ab7874 851 old->after_stall_p);
852 }
853 else
854 {
855 _list_add (tailp);
856 FLIST_TAIL_TAILP (new_fences) = &FLIST_NEXT (*tailp);
857 *FLIST_FENCE (*tailp) = *old;
858 init_fence_for_scheduling (FLIST_FENCE (*tailp));
859 }
860 FENCE_INSN (old) = NULL;
861}
862
48e1416a 863/* Add a new fence to NEW_FENCES list and initialize most of its data
e1ab7874 864 as a clean one. */
865void
866add_clean_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence)
867{
868 int ready_ticks_size = get_max_uid () + 1;
48e1416a 869
e1ab7874 870 add_to_fences (new_fences,
871 succ, state_create (), create_deps_context (),
872 create_target_context (true),
2f3c9801 873 NULL, NULL,
e1ab7874 874 XCNEWVEC (int, ready_ticks_size), ready_ticks_size,
2f3c9801 875 NULL, FENCE_CYCLE (fence) + 1,
abb9c563 876 0, issue_rate, 1, FENCE_AFTER_STALL_P (fence));
e1ab7874 877}
878
48e1416a 879/* Add a new fence to NEW_FENCES list and initialize all of its data
e1ab7874 880 from FENCE and SUCC. */
881void
882add_dirty_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence)
883{
48e1416a 884 int * new_ready_ticks
e1ab7874 885 = XNEWVEC (int, FENCE_READY_TICKS_SIZE (fence));
48e1416a 886
e1ab7874 887 memcpy (new_ready_ticks, FENCE_READY_TICKS (fence),
888 FENCE_READY_TICKS_SIZE (fence) * sizeof (int));
889 add_to_fences (new_fences,
890 succ, state_create_copy (FENCE_STATE (fence)),
891 create_copy_of_deps_context (FENCE_DC (fence)),
892 create_copy_of_target_context (FENCE_TC (fence)),
48e1416a 893 FENCE_LAST_SCHEDULED_INSN (fence),
f1f41a6c 894 vec_safe_copy (FENCE_EXECUTING_INSNS (fence)),
e1ab7874 895 new_ready_ticks,
896 FENCE_READY_TICKS_SIZE (fence),
897 FENCE_SCHED_NEXT (fence),
898 FENCE_CYCLE (fence),
899 FENCE_ISSUED_INSNS (fence),
abb9c563 900 FENCE_ISSUE_MORE (fence),
e1ab7874 901 FENCE_STARTS_CYCLE_P (fence),
902 FENCE_AFTER_STALL_P (fence));
903}
904\f
905
906/* Functions to work with regset and nop pools. */
907
908/* Returns the new regset from pool. It might have some of the bits set
909 from the previous usage. */
910regset
911get_regset_from_pool (void)
912{
913 regset rs;
914
915 if (regset_pool.n != 0)
916 rs = regset_pool.v[--regset_pool.n];
917 else
918 /* We need to create the regset. */
919 {
920 rs = ALLOC_REG_SET (&reg_obstack);
921
922 if (regset_pool.nn == regset_pool.ss)
923 regset_pool.vv = XRESIZEVEC (regset, regset_pool.vv,
924 (regset_pool.ss = 2 * regset_pool.ss + 1));
925 regset_pool.vv[regset_pool.nn++] = rs;
926 }
927
928 regset_pool.diff++;
929
930 return rs;
931}
932
933/* Same as above, but returns the empty regset. */
934regset
935get_clear_regset_from_pool (void)
936{
937 regset rs = get_regset_from_pool ();
938
939 CLEAR_REG_SET (rs);
940 return rs;
941}
942
943/* Return regset RS to the pool for future use. */
944void
945return_regset_to_pool (regset rs)
946{
bc9cb5ed 947 gcc_assert (rs);
e1ab7874 948 regset_pool.diff--;
949
950 if (regset_pool.n == regset_pool.s)
951 regset_pool.v = XRESIZEVEC (regset, regset_pool.v,
952 (regset_pool.s = 2 * regset_pool.s + 1));
953 regset_pool.v[regset_pool.n++] = rs;
954}
955
dde7ed1e 956#ifdef ENABLE_CHECKING
e1ab7874 957/* This is used as a qsort callback for sorting regset pool stacks.
958 X and XX are addresses of two regsets. They are never equal. */
959static int
960cmp_v_in_regset_pool (const void *x, const void *xx)
961{
c72f63ac 962 uintptr_t r1 = (uintptr_t) *((const regset *) x);
963 uintptr_t r2 = (uintptr_t) *((const regset *) xx);
964 if (r1 > r2)
965 return 1;
966 else if (r1 < r2)
967 return -1;
968 gcc_unreachable ();
e1ab7874 969}
dde7ed1e 970#endif
e1ab7874 971
972/* Free the regset pool possibly checking for memory leaks. */
973void
974free_regset_pool (void)
975{
976#ifdef ENABLE_CHECKING
977 {
978 regset *v = regset_pool.v;
979 int i = 0;
980 int n = regset_pool.n;
48e1416a 981
e1ab7874 982 regset *vv = regset_pool.vv;
983 int ii = 0;
984 int nn = regset_pool.nn;
48e1416a 985
e1ab7874 986 int diff = 0;
48e1416a 987
e1ab7874 988 gcc_assert (n <= nn);
48e1416a 989
e1ab7874 990 /* Sort both vectors so it will be possible to compare them. */
991 qsort (v, n, sizeof (*v), cmp_v_in_regset_pool);
992 qsort (vv, nn, sizeof (*vv), cmp_v_in_regset_pool);
48e1416a 993
e1ab7874 994 while (ii < nn)
995 {
996 if (v[i] == vv[ii])
997 i++;
998 else
999 /* VV[II] was lost. */
1000 diff++;
48e1416a 1001
e1ab7874 1002 ii++;
1003 }
48e1416a 1004
e1ab7874 1005 gcc_assert (diff == regset_pool.diff);
1006 }
1007#endif
48e1416a 1008
e1ab7874 1009 /* If not true - we have a memory leak. */
1010 gcc_assert (regset_pool.diff == 0);
48e1416a 1011
e1ab7874 1012 while (regset_pool.n)
1013 {
1014 --regset_pool.n;
1015 FREE_REG_SET (regset_pool.v[regset_pool.n]);
1016 }
1017
1018 free (regset_pool.v);
1019 regset_pool.v = NULL;
1020 regset_pool.s = 0;
48e1416a 1021
e1ab7874 1022 free (regset_pool.vv);
1023 regset_pool.vv = NULL;
1024 regset_pool.nn = 0;
1025 regset_pool.ss = 0;
1026
1027 regset_pool.diff = 0;
1028}
1029\f
1030
48e1416a 1031/* Functions to work with nop pools. NOP insns are used as temporary
1032 placeholders of the insns being scheduled to allow correct update of
e1ab7874 1033 the data sets. When update is finished, NOPs are deleted. */
1034
1035/* A vinsn that is used to represent a nop. This vinsn is shared among all
1036 nops sel-sched generates. */
1037static vinsn_t nop_vinsn = NULL;
1038
1039/* Emit a nop before INSN, taking it from pool. */
1040insn_t
1041get_nop_from_pool (insn_t insn)
1042{
2f3c9801 1043 rtx nop_pat;
e1ab7874 1044 insn_t nop;
1045 bool old_p = nop_pool.n != 0;
1046 int flags;
1047
1048 if (old_p)
2f3c9801 1049 nop_pat = nop_pool.v[--nop_pool.n];
e1ab7874 1050 else
2f3c9801 1051 nop_pat = nop_pattern;
e1ab7874 1052
2f3c9801 1053 nop = emit_insn_before (nop_pat, insn);
e1ab7874 1054
1055 if (old_p)
1056 flags = INSN_INIT_TODO_SSID;
1057 else
1058 flags = INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID;
1059
1060 set_insn_init (INSN_EXPR (insn), nop_vinsn, INSN_SEQNO (insn));
1061 sel_init_new_insn (nop, flags);
1062
1063 return nop;
1064}
1065
1066/* Remove NOP from the instruction stream and return it to the pool. */
1067void
9845d120 1068return_nop_to_pool (insn_t nop, bool full_tidying)
e1ab7874 1069{
1070 gcc_assert (INSN_IN_STREAM_P (nop));
9845d120 1071 sel_remove_insn (nop, false, full_tidying);
e1ab7874 1072
93ff53d3 1073 /* We'll recycle this nop. */
dd1286fb 1074 nop->set_undeleted ();
93ff53d3 1075
e1ab7874 1076 if (nop_pool.n == nop_pool.s)
2f3c9801 1077 nop_pool.v = XRESIZEVEC (rtx_insn *, nop_pool.v,
e1ab7874 1078 (nop_pool.s = 2 * nop_pool.s + 1));
1079 nop_pool.v[nop_pool.n++] = nop;
1080}
1081
1082/* Free the nop pool. */
1083void
1084free_nop_pool (void)
1085{
1086 nop_pool.n = 0;
1087 nop_pool.s = 0;
1088 free (nop_pool.v);
1089 nop_pool.v = NULL;
1090}
1091\f
1092
48e1416a 1093/* Skip unspec to support ia64 speculation. Called from rtx_equal_p_cb.
e1ab7874 1094 The callback is given two rtxes XX and YY and writes the new rtxes
1095 to NX and NY in case some needs to be skipped. */
1096static int
1097skip_unspecs_callback (const_rtx *xx, const_rtx *yy, rtx *nx, rtx* ny)
1098{
1099 const_rtx x = *xx;
1100 const_rtx y = *yy;
48e1416a 1101
e1ab7874 1102 if (GET_CODE (x) == UNSPEC
1103 && (targetm.sched.skip_rtx_p == NULL
1104 || targetm.sched.skip_rtx_p (x)))
1105 {
1106 *nx = XVECEXP (x, 0, 0);
1107 *ny = CONST_CAST_RTX (y);
1108 return 1;
1109 }
48e1416a 1110
e1ab7874 1111 if (GET_CODE (y) == UNSPEC
1112 && (targetm.sched.skip_rtx_p == NULL
1113 || targetm.sched.skip_rtx_p (y)))
1114 {
1115 *nx = CONST_CAST_RTX (x);
1116 *ny = XVECEXP (y, 0, 0);
1117 return 1;
1118 }
48e1416a 1119
e1ab7874 1120 return 0;
1121}
1122
48e1416a 1123/* Callback, called from hash_rtx_cb. Helps to hash UNSPEC rtx X in a correct way
e1ab7874 1124 to support ia64 speculation. When changes are needed, new rtx X and new mode
1125 NMODE are written, and the callback returns true. */
1126static int
3754d046 1127hash_with_unspec_callback (const_rtx x, machine_mode mode ATTRIBUTE_UNUSED,
1128 rtx *nx, machine_mode* nmode)
e1ab7874 1129{
48e1416a 1130 if (GET_CODE (x) == UNSPEC
e1ab7874 1131 && targetm.sched.skip_rtx_p
1132 && targetm.sched.skip_rtx_p (x))
1133 {
1134 *nx = XVECEXP (x, 0 ,0);
8458f4ca 1135 *nmode = VOIDmode;
e1ab7874 1136 return 1;
1137 }
48e1416a 1138
e1ab7874 1139 return 0;
1140}
1141
1142/* Returns LHS and RHS are ok to be scheduled separately. */
1143static bool
1144lhs_and_rhs_separable_p (rtx lhs, rtx rhs)
1145{
1146 if (lhs == NULL || rhs == NULL)
1147 return false;
1148
e913b5cd 1149 /* Do not schedule constants as rhs: no point to use reg, if const
1150 can be used. Moreover, scheduling const as rhs may lead to mode
1151 mismatch cause consts don't have modes but they could be merged
1152 from branches where the same const used in different modes. */
e1ab7874 1153 if (CONSTANT_P (rhs))
1154 return false;
1155
1156 /* ??? Do not rename predicate registers to avoid ICEs in bundling. */
1157 if (COMPARISON_P (rhs))
1158 return false;
1159
1160 /* Do not allow single REG to be an rhs. */
1161 if (REG_P (rhs))
1162 return false;
1163
48e1416a 1164 /* See comment at find_used_regs_1 (*1) for explanation of this
e1ab7874 1165 restriction. */
1166 /* FIXME: remove this later. */
1167 if (MEM_P (lhs))
1168 return false;
1169
1170 /* This will filter all tricky things like ZERO_EXTRACT etc.
1171 For now we don't handle it. */
1172 if (!REG_P (lhs) && !MEM_P (lhs))
1173 return false;
1174
1175 return true;
1176}
1177
48e1416a 1178/* Initialize vinsn VI for INSN. Only for use from vinsn_create (). When
1179 FORCE_UNIQUE_P is true, the resulting vinsn will not be clonable. This is
e1ab7874 1180 used e.g. for insns from recovery blocks. */
1181static void
1182vinsn_init (vinsn_t vi, insn_t insn, bool force_unique_p)
1183{
1184 hash_rtx_callback_function hrcf;
1185 int insn_class;
1186
69c5a18c 1187 VINSN_INSN_RTX (vi) = insn;
e1ab7874 1188 VINSN_COUNT (vi) = 0;
1189 vi->cost = -1;
48e1416a 1190
bc9cb5ed 1191 if (INSN_NOP_P (insn))
1192 return;
1193
e1ab7874 1194 if (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL)
1195 init_id_from_df (VINSN_ID (vi), insn, force_unique_p);
1196 else
1197 deps_init_id (VINSN_ID (vi), insn, force_unique_p);
48e1416a 1198
e1ab7874 1199 /* Hash vinsn depending on whether it is separable or not. */
1200 hrcf = targetm.sched.skip_rtx_p ? hash_with_unspec_callback : NULL;
1201 if (VINSN_SEPARABLE_P (vi))
1202 {
1203 rtx rhs = VINSN_RHS (vi);
1204
1205 VINSN_HASH (vi) = hash_rtx_cb (rhs, GET_MODE (rhs),
1206 NULL, NULL, false, hrcf);
1207 VINSN_HASH_RTX (vi) = hash_rtx_cb (VINSN_PATTERN (vi),
1208 VOIDmode, NULL, NULL,
1209 false, hrcf);
1210 }
1211 else
1212 {
1213 VINSN_HASH (vi) = hash_rtx_cb (VINSN_PATTERN (vi), VOIDmode,
1214 NULL, NULL, false, hrcf);
1215 VINSN_HASH_RTX (vi) = VINSN_HASH (vi);
1216 }
48e1416a 1217
e1ab7874 1218 insn_class = haifa_classify_insn (insn);
1219 if (insn_class >= 2
1220 && (!targetm.sched.get_insn_spec_ds
1221 || ((targetm.sched.get_insn_spec_ds (insn) & BEGIN_CONTROL)
1222 == 0)))
1223 VINSN_MAY_TRAP_P (vi) = true;
1224 else
1225 VINSN_MAY_TRAP_P (vi) = false;
1226}
1227
1228/* Indicate that VI has become the part of an rtx object. */
1229void
1230vinsn_attach (vinsn_t vi)
1231{
1232 /* Assert that VI is not pending for deletion. */
1233 gcc_assert (VINSN_INSN_RTX (vi));
1234
1235 VINSN_COUNT (vi)++;
1236}
1237
48e1416a 1238/* Create and init VI from the INSN. Use UNIQUE_P for determining the correct
e1ab7874 1239 VINSN_TYPE (VI). */
1240static vinsn_t
1241vinsn_create (insn_t insn, bool force_unique_p)
1242{
1243 vinsn_t vi = XCNEW (struct vinsn_def);
1244
1245 vinsn_init (vi, insn, force_unique_p);
1246 return vi;
1247}
1248
1249/* Return a copy of VI. When REATTACH_P is true, detach VI and attach
1250 the copy. */
48e1416a 1251vinsn_t
e1ab7874 1252vinsn_copy (vinsn_t vi, bool reattach_p)
1253{
04d073df 1254 rtx_insn *copy;
e1ab7874 1255 bool unique = VINSN_UNIQUE_P (vi);
1256 vinsn_t new_vi;
48e1416a 1257
e1ab7874 1258 copy = create_copy_of_insn_rtx (VINSN_INSN_RTX (vi));
1259 new_vi = create_vinsn_from_insn_rtx (copy, unique);
1260 if (reattach_p)
1261 {
1262 vinsn_detach (vi);
1263 vinsn_attach (new_vi);
1264 }
1265
1266 return new_vi;
1267}
1268
1269/* Delete the VI vinsn and free its data. */
1270static void
1271vinsn_delete (vinsn_t vi)
1272{
1273 gcc_assert (VINSN_COUNT (vi) == 0);
1274
bc9cb5ed 1275 if (!INSN_NOP_P (VINSN_INSN_RTX (vi)))
1276 {
1277 return_regset_to_pool (VINSN_REG_SETS (vi));
1278 return_regset_to_pool (VINSN_REG_USES (vi));
1279 return_regset_to_pool (VINSN_REG_CLOBBERS (vi));
1280 }
e1ab7874 1281
1282 free (vi);
1283}
1284
48e1416a 1285/* Indicate that VI is no longer a part of some rtx object.
e1ab7874 1286 Remove VI if it is no longer needed. */
1287void
1288vinsn_detach (vinsn_t vi)
1289{
1290 gcc_assert (VINSN_COUNT (vi) > 0);
1291
1292 if (--VINSN_COUNT (vi) == 0)
1293 vinsn_delete (vi);
1294}
1295
1296/* Returns TRUE if VI is a branch. */
1297bool
1298vinsn_cond_branch_p (vinsn_t vi)
1299{
1300 insn_t insn;
1301
1302 if (!VINSN_UNIQUE_P (vi))
1303 return false;
1304
1305 insn = VINSN_INSN_RTX (vi);
1306 if (BB_END (BLOCK_FOR_INSN (insn)) != insn)
1307 return false;
1308
1309 return control_flow_insn_p (insn);
1310}
1311
1312/* Return latency of INSN. */
1313static int
ed3e6e5d 1314sel_insn_rtx_cost (rtx_insn *insn)
e1ab7874 1315{
1316 int cost;
1317
1318 /* A USE insn, or something else we don't need to
1319 understand. We can't pass these directly to
1320 result_ready_cost or insn_default_latency because it will
1321 trigger a fatal error for unrecognizable insns. */
1322 if (recog_memoized (insn) < 0)
1323 cost = 0;
1324 else
1325 {
1326 cost = insn_default_latency (insn);
1327
1328 if (cost < 0)
1329 cost = 0;
1330 }
1331
1332 return cost;
1333}
1334
1335/* Return the cost of the VI.
1336 !!! FIXME: Unify with haifa-sched.c: insn_cost (). */
1337int
1338sel_vinsn_cost (vinsn_t vi)
1339{
1340 int cost = vi->cost;
1341
1342 if (cost < 0)
1343 {
1344 cost = sel_insn_rtx_cost (VINSN_INSN_RTX (vi));
1345 vi->cost = cost;
1346 }
1347
1348 return cost;
1349}
1350\f
1351
1352/* Functions for insn emitting. */
1353
1354/* Emit new insn after AFTER based on PATTERN and initialize its data from
1355 EXPR and SEQNO. */
1356insn_t
1357sel_gen_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno, insn_t after)
1358{
1359 insn_t new_insn;
1360
1361 gcc_assert (EXPR_TARGET_AVAILABLE (expr) == true);
1362
1363 new_insn = emit_insn_after (pattern, after);
1364 set_insn_init (expr, NULL, seqno);
1365 sel_init_new_insn (new_insn, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID);
1366
1367 return new_insn;
1368}
1369
1370/* Force newly generated vinsns to be unique. */
1371static bool init_insn_force_unique_p = false;
1372
1373/* Emit new speculation recovery insn after AFTER based on PATTERN and
1374 initialize its data from EXPR and SEQNO. */
1375insn_t
1376sel_gen_recovery_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno,
1377 insn_t after)
1378{
1379 insn_t insn;
1380
1381 gcc_assert (!init_insn_force_unique_p);
1382
1383 init_insn_force_unique_p = true;
1384 insn = sel_gen_insn_from_rtx_after (pattern, expr, seqno, after);
1385 CANT_MOVE (insn) = 1;
1386 init_insn_force_unique_p = false;
1387
1388 return insn;
1389}
1390
1391/* Emit new insn after AFTER based on EXPR and SEQNO. If VINSN is not NULL,
48e1416a 1392 take it as a new vinsn instead of EXPR's vinsn.
1393 We simplify insns later, after scheduling region in
e1ab7874 1394 simplify_changed_insns. */
1395insn_t
48e1416a 1396sel_gen_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno,
e1ab7874 1397 insn_t after)
1398{
1399 expr_t emit_expr;
1400 insn_t insn;
1401 int flags;
48e1416a 1402
1403 emit_expr = set_insn_init (expr, vinsn ? vinsn : EXPR_VINSN (expr),
e1ab7874 1404 seqno);
1405 insn = EXPR_INSN_RTX (emit_expr);
2b7454f2 1406
1407 /* The insn may come from the transformation cache, which may hold already
1408 deleted insns, so mark it as not deleted. */
dd1286fb 1409 insn->set_undeleted ();
2b7454f2 1410
48e1416a 1411 add_insn_after (insn, after, BLOCK_FOR_INSN (insn));
e1ab7874 1412
1413 flags = INSN_INIT_TODO_SSID;
1414 if (INSN_LUID (insn) == 0)
1415 flags |= INSN_INIT_TODO_LUID;
1416 sel_init_new_insn (insn, flags);
1417
1418 return insn;
1419}
1420
1421/* Move insn from EXPR after AFTER. */
1422insn_t
1423sel_move_insn (expr_t expr, int seqno, insn_t after)
1424{
1425 insn_t insn = EXPR_INSN_RTX (expr);
1426 basic_block bb = BLOCK_FOR_INSN (after);
1427 insn_t next = NEXT_INSN (after);
1428
1429 /* Assert that in move_op we disconnected this insn properly. */
1430 gcc_assert (EXPR_VINSN (INSN_EXPR (insn)) != NULL);
4a57a2e8 1431 SET_PREV_INSN (insn) = after;
1432 SET_NEXT_INSN (insn) = next;
e1ab7874 1433
4a57a2e8 1434 SET_NEXT_INSN (after) = insn;
1435 SET_PREV_INSN (next) = insn;
e1ab7874 1436
1437 /* Update links from insn to bb and vice versa. */
1438 df_insn_change_bb (insn, bb);
1439 if (BB_END (bb) == after)
26bb3cb2 1440 BB_END (bb) = insn;
48e1416a 1441
e1ab7874 1442 prepare_insn_expr (insn, seqno);
1443 return insn;
1444}
1445
1446\f
1447/* Functions to work with right-hand sides. */
1448
48e1416a 1449/* Search for a hash value determined by UID/NEW_VINSN in a sorted vector
e1ab7874 1450 VECT and return true when found. Use NEW_VINSN for comparison only when
48e1416a 1451 COMPARE_VINSNS is true. Write to INDP the index on which
1452 the search has stopped, such that inserting the new element at INDP will
e1ab7874 1453 retain VECT's sort order. */
1454static bool
f1f41a6c 1455find_in_history_vect_1 (vec<expr_history_def> vect,
48e1416a 1456 unsigned uid, vinsn_t new_vinsn,
e1ab7874 1457 bool compare_vinsns, int *indp)
1458{
1459 expr_history_def *arr;
f1f41a6c 1460 int i, j, len = vect.length ();
e1ab7874 1461
1462 if (len == 0)
1463 {
1464 *indp = 0;
1465 return false;
1466 }
1467
f1f41a6c 1468 arr = vect.address ();
e1ab7874 1469 i = 0, j = len - 1;
1470
1471 while (i <= j)
1472 {
1473 unsigned auid = arr[i].uid;
48e1416a 1474 vinsn_t avinsn = arr[i].new_expr_vinsn;
e1ab7874 1475
1476 if (auid == uid
48e1416a 1477 /* When undoing transformation on a bookkeeping copy, the new vinsn
1478 may not be exactly equal to the one that is saved in the vector.
e1ab7874 1479 This is because the insn whose copy we're checking was possibly
1480 substituted itself. */
48e1416a 1481 && (! compare_vinsns
e1ab7874 1482 || vinsn_equal_p (avinsn, new_vinsn)))
1483 {
1484 *indp = i;
1485 return true;
1486 }
1487 else if (auid > uid)
1488 break;
1489 i++;
1490 }
1491
1492 *indp = i;
1493 return false;
1494}
1495
48e1416a 1496/* Search for a uid of INSN and NEW_VINSN in a sorted vector VECT. Return
1497 the position found or -1, if no such value is in vector.
e1ab7874 1498 Search also for UIDs of insn's originators, if ORIGINATORS_P is true. */
1499int
f1f41a6c 1500find_in_history_vect (vec<expr_history_def> vect, rtx insn,
e1ab7874 1501 vinsn_t new_vinsn, bool originators_p)
1502{
1503 int ind;
1504
48e1416a 1505 if (find_in_history_vect_1 (vect, INSN_UID (insn), new_vinsn,
e1ab7874 1506 false, &ind))
1507 return ind;
1508
1509 if (INSN_ORIGINATORS (insn) && originators_p)
1510 {
1511 unsigned uid;
1512 bitmap_iterator bi;
1513
1514 EXECUTE_IF_SET_IN_BITMAP (INSN_ORIGINATORS (insn), 0, uid, bi)
1515 if (find_in_history_vect_1 (vect, uid, new_vinsn, false, &ind))
1516 return ind;
1517 }
48e1416a 1518
e1ab7874 1519 return -1;
1520}
1521
48e1416a 1522/* Insert new element in a sorted history vector pointed to by PVECT,
1523 if it is not there already. The element is searched using
e1ab7874 1524 UID/NEW_EXPR_VINSN pair. TYPE, OLD_EXPR_VINSN and SPEC_DS save
1525 the history of a transformation. */
1526void
f1f41a6c 1527insert_in_history_vect (vec<expr_history_def> *pvect,
e1ab7874 1528 unsigned uid, enum local_trans_type type,
48e1416a 1529 vinsn_t old_expr_vinsn, vinsn_t new_expr_vinsn,
e1ab7874 1530 ds_t spec_ds)
1531{
f1f41a6c 1532 vec<expr_history_def> vect = *pvect;
e1ab7874 1533 expr_history_def temp;
1534 bool res;
1535 int ind;
1536
1537 res = find_in_history_vect_1 (vect, uid, new_expr_vinsn, true, &ind);
1538
1539 if (res)
1540 {
f1f41a6c 1541 expr_history_def *phist = &vect[ind];
e1ab7874 1542
48e1416a 1543 /* It is possible that speculation types of expressions that were
e1ab7874 1544 propagated through different paths will be different here. In this
1545 case, merge the status to get the correct check later. */
1546 if (phist->spec_ds != spec_ds)
1547 phist->spec_ds = ds_max_merge (phist->spec_ds, spec_ds);
1548 return;
1549 }
48e1416a 1550
e1ab7874 1551 temp.uid = uid;
1552 temp.old_expr_vinsn = old_expr_vinsn;
48e1416a 1553 temp.new_expr_vinsn = new_expr_vinsn;
e1ab7874 1554 temp.spec_ds = spec_ds;
1555 temp.type = type;
1556
1557 vinsn_attach (old_expr_vinsn);
1558 vinsn_attach (new_expr_vinsn);
f1f41a6c 1559 vect.safe_insert (ind, temp);
e1ab7874 1560 *pvect = vect;
1561}
1562
1563/* Free history vector PVECT. */
1564static void
f1f41a6c 1565free_history_vect (vec<expr_history_def> &pvect)
e1ab7874 1566{
1567 unsigned i;
1568 expr_history_def *phist;
1569
f1f41a6c 1570 if (! pvect.exists ())
e1ab7874 1571 return;
48e1416a 1572
f1f41a6c 1573 for (i = 0; pvect.iterate (i, &phist); i++)
e1ab7874 1574 {
1575 vinsn_detach (phist->old_expr_vinsn);
1576 vinsn_detach (phist->new_expr_vinsn);
1577 }
48e1416a 1578
f1f41a6c 1579 pvect.release ();
e1ab7874 1580}
1581
c53624fb 1582/* Merge vector FROM to PVECT. */
1583static void
f1f41a6c 1584merge_history_vect (vec<expr_history_def> *pvect,
1585 vec<expr_history_def> from)
c53624fb 1586{
1587 expr_history_def *phist;
1588 int i;
1589
1590 /* We keep this vector sorted. */
f1f41a6c 1591 for (i = 0; from.iterate (i, &phist); i++)
c53624fb 1592 insert_in_history_vect (pvect, phist->uid, phist->type,
1593 phist->old_expr_vinsn, phist->new_expr_vinsn,
1594 phist->spec_ds);
1595}
e1ab7874 1596
1597/* Compare two vinsns as rhses if possible and as vinsns otherwise. */
1598bool
1599vinsn_equal_p (vinsn_t x, vinsn_t y)
1600{
1601 rtx_equal_p_callback_function repcf;
1602
1603 if (x == y)
1604 return true;
1605
1606 if (VINSN_TYPE (x) != VINSN_TYPE (y))
1607 return false;
1608
1609 if (VINSN_HASH (x) != VINSN_HASH (y))
1610 return false;
1611
1612 repcf = targetm.sched.skip_rtx_p ? skip_unspecs_callback : NULL;
48e1416a 1613 if (VINSN_SEPARABLE_P (x))
e1ab7874 1614 {
1615 /* Compare RHSes of VINSNs. */
1616 gcc_assert (VINSN_RHS (x));
1617 gcc_assert (VINSN_RHS (y));
1618
1619 return rtx_equal_p_cb (VINSN_RHS (x), VINSN_RHS (y), repcf);
1620 }
1621
1622 return rtx_equal_p_cb (VINSN_PATTERN (x), VINSN_PATTERN (y), repcf);
1623}
1624\f
1625
1626/* Functions for working with expressions. */
1627
1628/* Initialize EXPR. */
1629static void
1630init_expr (expr_t expr, vinsn_t vi, int spec, int use, int priority,
1631 int sched_times, int orig_bb_index, ds_t spec_done_ds,
1632 ds_t spec_to_check_ds, int orig_sched_cycle,
f1f41a6c 1633 vec<expr_history_def> history,
1634 signed char target_available,
e1ab7874 1635 bool was_substituted, bool was_renamed, bool needs_spec_check_p,
1636 bool cant_move)
1637{
1638 vinsn_attach (vi);
1639
1640 EXPR_VINSN (expr) = vi;
1641 EXPR_SPEC (expr) = spec;
1642 EXPR_USEFULNESS (expr) = use;
1643 EXPR_PRIORITY (expr) = priority;
1644 EXPR_PRIORITY_ADJ (expr) = 0;
1645 EXPR_SCHED_TIMES (expr) = sched_times;
1646 EXPR_ORIG_BB_INDEX (expr) = orig_bb_index;
1647 EXPR_ORIG_SCHED_CYCLE (expr) = orig_sched_cycle;
1648 EXPR_SPEC_DONE_DS (expr) = spec_done_ds;
1649 EXPR_SPEC_TO_CHECK_DS (expr) = spec_to_check_ds;
1650
f1f41a6c 1651 if (history.exists ())
e1ab7874 1652 EXPR_HISTORY_OF_CHANGES (expr) = history;
1653 else
f1f41a6c 1654 EXPR_HISTORY_OF_CHANGES (expr).create (0);
e1ab7874 1655
1656 EXPR_TARGET_AVAILABLE (expr) = target_available;
1657 EXPR_WAS_SUBSTITUTED (expr) = was_substituted;
1658 EXPR_WAS_RENAMED (expr) = was_renamed;
1659 EXPR_NEEDS_SPEC_CHECK_P (expr) = needs_spec_check_p;
1660 EXPR_CANT_MOVE (expr) = cant_move;
1661}
1662
1663/* Make a copy of the expr FROM into the expr TO. */
1664void
1665copy_expr (expr_t to, expr_t from)
1666{
1e094109 1667 vec<expr_history_def> temp = vNULL;
e1ab7874 1668
f1f41a6c 1669 if (EXPR_HISTORY_OF_CHANGES (from).exists ())
e1ab7874 1670 {
1671 unsigned i;
1672 expr_history_def *phist;
1673
f1f41a6c 1674 temp = EXPR_HISTORY_OF_CHANGES (from).copy ();
48e1416a 1675 for (i = 0;
f1f41a6c 1676 temp.iterate (i, &phist);
e1ab7874 1677 i++)
1678 {
1679 vinsn_attach (phist->old_expr_vinsn);
1680 vinsn_attach (phist->new_expr_vinsn);
1681 }
1682 }
1683
48e1416a 1684 init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from),
e1ab7874 1685 EXPR_USEFULNESS (from), EXPR_PRIORITY (from),
1686 EXPR_SCHED_TIMES (from), EXPR_ORIG_BB_INDEX (from),
48e1416a 1687 EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from),
e1ab7874 1688 EXPR_ORIG_SCHED_CYCLE (from), temp,
48e1416a 1689 EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from),
e1ab7874 1690 EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from),
1691 EXPR_CANT_MOVE (from));
1692}
1693
48e1416a 1694/* Same, but the final expr will not ever be in av sets, so don't copy
e1ab7874 1695 "uninteresting" data such as bitmap cache. */
1696void
1697copy_expr_onside (expr_t to, expr_t from)
1698{
1699 init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from), EXPR_USEFULNESS (from),
1700 EXPR_PRIORITY (from), EXPR_SCHED_TIMES (from), 0,
f1f41a6c 1701 EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from), 0,
1e094109 1702 vNULL,
e1ab7874 1703 EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from),
1704 EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from),
1705 EXPR_CANT_MOVE (from));
1706}
1707
1708/* Prepare the expr of INSN for scheduling. Used when moving insn and when
1709 initializing new insns. */
1710static void
1711prepare_insn_expr (insn_t insn, int seqno)
1712{
1713 expr_t expr = INSN_EXPR (insn);
1714 ds_t ds;
48e1416a 1715
e1ab7874 1716 INSN_SEQNO (insn) = seqno;
1717 EXPR_ORIG_BB_INDEX (expr) = BLOCK_NUM (insn);
1718 EXPR_SPEC (expr) = 0;
1719 EXPR_ORIG_SCHED_CYCLE (expr) = 0;
1720 EXPR_WAS_SUBSTITUTED (expr) = 0;
1721 EXPR_WAS_RENAMED (expr) = 0;
1722 EXPR_TARGET_AVAILABLE (expr) = 1;
1723 INSN_LIVE_VALID_P (insn) = false;
1724
1725 /* ??? If this expression is speculative, make its dependence
1726 as weak as possible. We can filter this expression later
1727 in process_spec_exprs, because we do not distinguish
1728 between the status we got during compute_av_set and the
1729 existing status. To be fixed. */
1730 ds = EXPR_SPEC_DONE_DS (expr);
1731 if (ds)
1732 EXPR_SPEC_DONE_DS (expr) = ds_get_max_dep_weak (ds);
1733
f1f41a6c 1734 free_history_vect (EXPR_HISTORY_OF_CHANGES (expr));
e1ab7874 1735}
1736
1737/* Update target_available bits when merging exprs TO and FROM. SPLIT_POINT
48e1416a 1738 is non-null when expressions are merged from different successors at
e1ab7874 1739 a split point. */
1740static void
1741update_target_availability (expr_t to, expr_t from, insn_t split_point)
1742{
48e1416a 1743 if (EXPR_TARGET_AVAILABLE (to) < 0
e1ab7874 1744 || EXPR_TARGET_AVAILABLE (from) < 0)
1745 EXPR_TARGET_AVAILABLE (to) = -1;
1746 else
1747 {
1748 /* We try to detect the case when one of the expressions
1749 can only be reached through another one. In this case,
1750 we can do better. */
1751 if (split_point == NULL)
1752 {
1753 int toind, fromind;
1754
1755 toind = EXPR_ORIG_BB_INDEX (to);
1756 fromind = EXPR_ORIG_BB_INDEX (from);
48e1416a 1757
e1ab7874 1758 if (toind && toind == fromind)
48e1416a 1759 /* Do nothing -- everything is done in
e1ab7874 1760 merge_with_other_exprs. */
1761 ;
1762 else
1763 EXPR_TARGET_AVAILABLE (to) = -1;
1764 }
d6726470 1765 else if (EXPR_TARGET_AVAILABLE (from) == 0
1766 && EXPR_LHS (from)
1767 && REG_P (EXPR_LHS (from))
1768 && REGNO (EXPR_LHS (to)) != REGNO (EXPR_LHS (from)))
1769 EXPR_TARGET_AVAILABLE (to) = -1;
e1ab7874 1770 else
1771 EXPR_TARGET_AVAILABLE (to) &= EXPR_TARGET_AVAILABLE (from);
1772 }
1773}
1774
1775/* Update speculation bits when merging exprs TO and FROM. SPLIT_POINT
48e1416a 1776 is non-null when expressions are merged from different successors at
e1ab7874 1777 a split point. */
1778static void
1779update_speculative_bits (expr_t to, expr_t from, insn_t split_point)
1780{
1781 ds_t old_to_ds, old_from_ds;
1782
1783 old_to_ds = EXPR_SPEC_DONE_DS (to);
1784 old_from_ds = EXPR_SPEC_DONE_DS (from);
48e1416a 1785
e1ab7874 1786 EXPR_SPEC_DONE_DS (to) = ds_max_merge (old_to_ds, old_from_ds);
1787 EXPR_SPEC_TO_CHECK_DS (to) |= EXPR_SPEC_TO_CHECK_DS (from);
1788 EXPR_NEEDS_SPEC_CHECK_P (to) |= EXPR_NEEDS_SPEC_CHECK_P (from);
1789
1790 /* When merging e.g. control & data speculative exprs, or a control
48e1416a 1791 speculative with a control&data speculative one, we really have
e1ab7874 1792 to change vinsn too. Also, when speculative status is changed,
1793 we also need to record this as a transformation in expr's history. */
1794 if ((old_to_ds & SPECULATIVE) || (old_from_ds & SPECULATIVE))
1795 {
1796 old_to_ds = ds_get_speculation_types (old_to_ds);
1797 old_from_ds = ds_get_speculation_types (old_from_ds);
48e1416a 1798
e1ab7874 1799 if (old_to_ds != old_from_ds)
1800 {
1801 ds_t record_ds;
48e1416a 1802
1803 /* When both expressions are speculative, we need to change
e1ab7874 1804 the vinsn first. */
1805 if ((old_to_ds & SPECULATIVE) && (old_from_ds & SPECULATIVE))
1806 {
1807 int res;
48e1416a 1808
e1ab7874 1809 res = speculate_expr (to, EXPR_SPEC_DONE_DS (to));
1810 gcc_assert (res >= 0);
1811 }
1812
1813 if (split_point != NULL)
1814 {
1815 /* Record the change with proper status. */
1816 record_ds = EXPR_SPEC_DONE_DS (to) & SPECULATIVE;
1817 record_ds &= ~(old_to_ds & SPECULATIVE);
1818 record_ds &= ~(old_from_ds & SPECULATIVE);
48e1416a 1819
1820 insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to),
1821 INSN_UID (split_point), TRANS_SPECULATION,
e1ab7874 1822 EXPR_VINSN (from), EXPR_VINSN (to),
1823 record_ds);
1824 }
1825 }
1826 }
1827}
1828
1829
1830/* Merge bits of FROM expr to TO expr. When SPLIT_POINT is not NULL,
1831 this is done along different paths. */
1832void
1833merge_expr_data (expr_t to, expr_t from, insn_t split_point)
1834{
32bbc704 1835 /* Choose the maximum of the specs of merged exprs. This is required
1836 for correctness of bookkeeping. */
1837 if (EXPR_SPEC (to) < EXPR_SPEC (from))
e1ab7874 1838 EXPR_SPEC (to) = EXPR_SPEC (from);
1839
1840 if (split_point)
1841 EXPR_USEFULNESS (to) += EXPR_USEFULNESS (from);
1842 else
48e1416a 1843 EXPR_USEFULNESS (to) = MAX (EXPR_USEFULNESS (to),
e1ab7874 1844 EXPR_USEFULNESS (from));
1845
1846 if (EXPR_PRIORITY (to) < EXPR_PRIORITY (from))
1847 EXPR_PRIORITY (to) = EXPR_PRIORITY (from);
1848
1849 if (EXPR_SCHED_TIMES (to) > EXPR_SCHED_TIMES (from))
1850 EXPR_SCHED_TIMES (to) = EXPR_SCHED_TIMES (from);
1851
1852 if (EXPR_ORIG_BB_INDEX (to) != EXPR_ORIG_BB_INDEX (from))
1853 EXPR_ORIG_BB_INDEX (to) = 0;
1854
48e1416a 1855 EXPR_ORIG_SCHED_CYCLE (to) = MIN (EXPR_ORIG_SCHED_CYCLE (to),
e1ab7874 1856 EXPR_ORIG_SCHED_CYCLE (from));
1857
e1ab7874 1858 EXPR_WAS_SUBSTITUTED (to) |= EXPR_WAS_SUBSTITUTED (from);
1859 EXPR_WAS_RENAMED (to) |= EXPR_WAS_RENAMED (from);
1860 EXPR_CANT_MOVE (to) |= EXPR_CANT_MOVE (from);
1861
c53624fb 1862 merge_history_vect (&EXPR_HISTORY_OF_CHANGES (to),
1863 EXPR_HISTORY_OF_CHANGES (from));
e1ab7874 1864 update_target_availability (to, from, split_point);
1865 update_speculative_bits (to, from, split_point);
1866}
1867
1868/* Merge bits of FROM expr to TO expr. Vinsns in the exprs should be equal
48e1416a 1869 in terms of vinsn_equal_p. SPLIT_POINT is non-null when expressions
e1ab7874 1870 are merged from different successors at a split point. */
1871void
1872merge_expr (expr_t to, expr_t from, insn_t split_point)
1873{
1874 vinsn_t to_vi = EXPR_VINSN (to);
1875 vinsn_t from_vi = EXPR_VINSN (from);
1876
1877 gcc_assert (vinsn_equal_p (to_vi, from_vi));
1878
1879 /* Make sure that speculative pattern is propagated into exprs that
1880 have non-speculative one. This will provide us with consistent
1881 speculative bits and speculative patterns inside expr. */
936ab1d9 1882 if ((EXPR_SPEC_DONE_DS (from) != 0
1883 && EXPR_SPEC_DONE_DS (to) == 0)
1884 /* Do likewise for volatile insns, so that we always retain
1885 the may_trap_p bit on the resulting expression. */
1886 || (VINSN_MAY_TRAP_P (EXPR_VINSN (from))
1887 && !VINSN_MAY_TRAP_P (EXPR_VINSN (to))))
e1ab7874 1888 change_vinsn_in_expr (to, EXPR_VINSN (from));
1889
1890 merge_expr_data (to, from, split_point);
1891 gcc_assert (EXPR_USEFULNESS (to) <= REG_BR_PROB_BASE);
1892}
1893
1894/* Clear the information of this EXPR. */
1895void
1896clear_expr (expr_t expr)
1897{
48e1416a 1898
e1ab7874 1899 vinsn_detach (EXPR_VINSN (expr));
1900 EXPR_VINSN (expr) = NULL;
1901
f1f41a6c 1902 free_history_vect (EXPR_HISTORY_OF_CHANGES (expr));
e1ab7874 1903}
1904
1905/* For a given LV_SET, mark EXPR having unavailable target register. */
1906static void
1907set_unavailable_target_for_expr (expr_t expr, regset lv_set)
1908{
1909 if (EXPR_SEPARABLE_P (expr))
1910 {
1911 if (REG_P (EXPR_LHS (expr))
1f53e226 1912 && register_unavailable_p (lv_set, EXPR_LHS (expr)))
e1ab7874 1913 {
48e1416a 1914 /* If it's an insn like r1 = use (r1, ...), and it exists in
1915 different forms in each of the av_sets being merged, we can't say
1916 whether original destination register is available or not.
1917 However, this still works if destination register is not used
e1ab7874 1918 in the original expression: if the branch at which LV_SET we're
1919 looking here is not actually 'other branch' in sense that same
48e1416a 1920 expression is available through it (but it can't be determined
e1ab7874 1921 at computation stage because of transformations on one of the
48e1416a 1922 branches), it still won't affect the availability.
1923 Liveness of a register somewhere on a code motion path means
1924 it's either read somewhere on a codemotion path, live on
e1ab7874 1925 'other' branch, live at the point immediately following
1926 the original operation, or is read by the original operation.
1927 The latter case is filtered out in the condition below.
1928 It still doesn't cover the case when register is defined and used
1929 somewhere within the code motion path, and in this case we could
1930 miss a unifying code motion along both branches using a renamed
1931 register, but it won't affect a code correctness since upon
1932 an actual code motion a bookkeeping code would be generated. */
1f53e226 1933 if (register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)),
1934 EXPR_LHS (expr)))
e1ab7874 1935 EXPR_TARGET_AVAILABLE (expr) = -1;
1936 else
1937 EXPR_TARGET_AVAILABLE (expr) = false;
1938 }
1939 }
1940 else
1941 {
1942 unsigned regno;
1943 reg_set_iterator rsi;
48e1416a 1944
1945 EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_SETS (EXPR_VINSN (expr)),
e1ab7874 1946 0, regno, rsi)
1947 if (bitmap_bit_p (lv_set, regno))
1948 {
1949 EXPR_TARGET_AVAILABLE (expr) = false;
1950 break;
1951 }
1952
1953 EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_CLOBBERS (EXPR_VINSN (expr)),
1954 0, regno, rsi)
1955 if (bitmap_bit_p (lv_set, regno))
1956 {
1957 EXPR_TARGET_AVAILABLE (expr) = false;
1958 break;
1959 }
1960 }
1961}
1962
48e1416a 1963/* Try to make EXPR speculative. Return 1 when EXPR's pattern
e1ab7874 1964 or dependence status have changed, 2 when also the target register
1965 became unavailable, 0 if nothing had to be changed. */
1966int
1967speculate_expr (expr_t expr, ds_t ds)
1968{
1969 int res;
04d073df 1970 rtx_insn *orig_insn_rtx;
e1ab7874 1971 rtx spec_pat;
1972 ds_t target_ds, current_ds;
1973
1974 /* Obtain the status we need to put on EXPR. */
1975 target_ds = (ds & SPECULATIVE);
1976 current_ds = EXPR_SPEC_DONE_DS (expr);
1977 ds = ds_full_merge (current_ds, target_ds, NULL_RTX, NULL_RTX);
1978
1979 orig_insn_rtx = EXPR_INSN_RTX (expr);
1980
1981 res = sched_speculate_insn (orig_insn_rtx, ds, &spec_pat);
1982
1983 switch (res)
1984 {
1985 case 0:
1986 EXPR_SPEC_DONE_DS (expr) = ds;
1987 return current_ds != ds ? 1 : 0;
48e1416a 1988
e1ab7874 1989 case 1:
1990 {
04d073df 1991 rtx_insn *spec_insn_rtx =
1992 create_insn_rtx_from_pattern (spec_pat, NULL_RTX);
e1ab7874 1993 vinsn_t spec_vinsn = create_vinsn_from_insn_rtx (spec_insn_rtx, false);
1994
1995 change_vinsn_in_expr (expr, spec_vinsn);
1996 EXPR_SPEC_DONE_DS (expr) = ds;
1997 EXPR_NEEDS_SPEC_CHECK_P (expr) = true;
1998
48e1416a 1999 /* Do not allow clobbering the address register of speculative
e1ab7874 2000 insns. */
1f53e226 2001 if (register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)),
2002 expr_dest_reg (expr)))
e1ab7874 2003 {
2004 EXPR_TARGET_AVAILABLE (expr) = false;
2005 return 2;
2006 }
2007
2008 return 1;
2009 }
2010
2011 case -1:
2012 return -1;
2013
2014 default:
2015 gcc_unreachable ();
2016 return -1;
2017 }
2018}
2019
2020/* Return a destination register, if any, of EXPR. */
2021rtx
2022expr_dest_reg (expr_t expr)
2023{
2024 rtx dest = VINSN_LHS (EXPR_VINSN (expr));
2025
2026 if (dest != NULL_RTX && REG_P (dest))
2027 return dest;
2028
2029 return NULL_RTX;
2030}
2031
2032/* Returns the REGNO of the R's destination. */
2033unsigned
2034expr_dest_regno (expr_t expr)
2035{
2036 rtx dest = expr_dest_reg (expr);
2037
2038 gcc_assert (dest != NULL_RTX);
2039 return REGNO (dest);
2040}
2041
48e1416a 2042/* For a given LV_SET, mark all expressions in JOIN_SET, but not present in
e1ab7874 2043 AV_SET having unavailable target register. */
2044void
2045mark_unavailable_targets (av_set_t join_set, av_set_t av_set, regset lv_set)
2046{
2047 expr_t expr;
2048 av_set_iterator avi;
2049
2050 FOR_EACH_EXPR (expr, avi, join_set)
2051 if (av_set_lookup (av_set, EXPR_VINSN (expr)) == NULL)
2052 set_unavailable_target_for_expr (expr, lv_set);
2053}
2054\f
2055
1f53e226 2056/* Returns true if REG (at least partially) is present in REGS. */
2057bool
2058register_unavailable_p (regset regs, rtx reg)
2059{
2060 unsigned regno, end_regno;
2061
2062 regno = REGNO (reg);
2063 if (bitmap_bit_p (regs, regno))
2064 return true;
2065
2066 end_regno = END_REGNO (reg);
2067
2068 while (++regno < end_regno)
2069 if (bitmap_bit_p (regs, regno))
2070 return true;
2071
2072 return false;
2073}
2074
e1ab7874 2075/* Av set functions. */
2076
2077/* Add a new element to av set SETP.
2078 Return the element added. */
2079static av_set_t
2080av_set_add_element (av_set_t *setp)
2081{
2082 /* Insert at the beginning of the list. */
2083 _list_add (setp);
2084 return *setp;
2085}
2086
2087/* Add EXPR to SETP. */
2088void
2089av_set_add (av_set_t *setp, expr_t expr)
2090{
2091 av_set_t elem;
48e1416a 2092
e1ab7874 2093 gcc_assert (!INSN_NOP_P (EXPR_INSN_RTX (expr)));
2094 elem = av_set_add_element (setp);
2095 copy_expr (_AV_SET_EXPR (elem), expr);
2096}
2097
2098/* Same, but do not copy EXPR. */
2099static void
2100av_set_add_nocopy (av_set_t *setp, expr_t expr)
2101{
2102 av_set_t elem;
2103
2104 elem = av_set_add_element (setp);
2105 *_AV_SET_EXPR (elem) = *expr;
2106}
2107
2108/* Remove expr pointed to by IP from the av_set. */
2109void
2110av_set_iter_remove (av_set_iterator *ip)
2111{
2112 clear_expr (_AV_SET_EXPR (*ip->lp));
2113 _list_iter_remove (ip);
2114}
2115
2116/* Search for an expr in SET, such that it's equivalent to SOUGHT_VINSN in the
2117 sense of vinsn_equal_p function. Return NULL if no such expr is
2118 in SET was found. */
2119expr_t
2120av_set_lookup (av_set_t set, vinsn_t sought_vinsn)
2121{
2122 expr_t expr;
2123 av_set_iterator i;
2124
2125 FOR_EACH_EXPR (expr, i, set)
2126 if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn))
2127 return expr;
2128 return NULL;
2129}
2130
2131/* Same, but also remove the EXPR found. */
2132static expr_t
2133av_set_lookup_and_remove (av_set_t *setp, vinsn_t sought_vinsn)
2134{
2135 expr_t expr;
2136 av_set_iterator i;
2137
2138 FOR_EACH_EXPR_1 (expr, i, setp)
2139 if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn))
2140 {
2141 _list_iter_remove_nofree (&i);
2142 return expr;
2143 }
2144 return NULL;
2145}
2146
2147/* Search for an expr in SET, such that it's equivalent to EXPR in the
2148 sense of vinsn_equal_p function of their vinsns, but not EXPR itself.
2149 Returns NULL if no such expr is in SET was found. */
2150static expr_t
2151av_set_lookup_other_equiv_expr (av_set_t set, expr_t expr)
2152{
2153 expr_t cur_expr;
2154 av_set_iterator i;
2155
2156 FOR_EACH_EXPR (cur_expr, i, set)
2157 {
2158 if (cur_expr == expr)
2159 continue;
2160 if (vinsn_equal_p (EXPR_VINSN (cur_expr), EXPR_VINSN (expr)))
2161 return cur_expr;
2162 }
2163
2164 return NULL;
2165}
2166
2167/* If other expression is already in AVP, remove one of them. */
2168expr_t
2169merge_with_other_exprs (av_set_t *avp, av_set_iterator *ip, expr_t expr)
2170{
2171 expr_t expr2;
2172
2173 expr2 = av_set_lookup_other_equiv_expr (*avp, expr);
2174 if (expr2 != NULL)
2175 {
2176 /* Reset target availability on merge, since taking it only from one
2177 of the exprs would be controversial for different code. */
2178 EXPR_TARGET_AVAILABLE (expr2) = -1;
2179 EXPR_USEFULNESS (expr2) = 0;
2180
2181 merge_expr (expr2, expr, NULL);
48e1416a 2182
e1ab7874 2183 /* Fix usefulness as it should be now REG_BR_PROB_BASE. */
2184 EXPR_USEFULNESS (expr2) = REG_BR_PROB_BASE;
48e1416a 2185
e1ab7874 2186 av_set_iter_remove (ip);
2187 return expr2;
2188 }
2189
2190 return expr;
2191}
2192
2193/* Return true if there is an expr that correlates to VI in SET. */
2194bool
2195av_set_is_in_p (av_set_t set, vinsn_t vi)
2196{
2197 return av_set_lookup (set, vi) != NULL;
2198}
2199
2200/* Return a copy of SET. */
2201av_set_t
2202av_set_copy (av_set_t set)
2203{
2204 expr_t expr;
2205 av_set_iterator i;
2206 av_set_t res = NULL;
2207
2208 FOR_EACH_EXPR (expr, i, set)
2209 av_set_add (&res, expr);
2210
2211 return res;
2212}
2213
2214/* Join two av sets that do not have common elements by attaching second set
2215 (pointed to by FROMP) to the end of first set (TO_TAILP must point to
2216 _AV_SET_NEXT of first set's last element). */
2217static void
2218join_distinct_sets (av_set_t *to_tailp, av_set_t *fromp)
2219{
2220 gcc_assert (*to_tailp == NULL);
2221 *to_tailp = *fromp;
2222 *fromp = NULL;
2223}
2224
2225/* Makes set pointed to by TO to be the union of TO and FROM. Clear av_set
2226 pointed to by FROMP afterwards. */
2227void
2228av_set_union_and_clear (av_set_t *top, av_set_t *fromp, insn_t insn)
2229{
2230 expr_t expr1;
2231 av_set_iterator i;
2232
2233 /* Delete from TOP all exprs, that present in FROMP. */
2234 FOR_EACH_EXPR_1 (expr1, i, top)
2235 {
2236 expr_t expr2 = av_set_lookup (*fromp, EXPR_VINSN (expr1));
2237
2238 if (expr2)
2239 {
2240 merge_expr (expr2, expr1, insn);
2241 av_set_iter_remove (&i);
2242 }
2243 }
2244
2245 join_distinct_sets (i.lp, fromp);
2246}
2247
48e1416a 2248/* Same as above, but also update availability of target register in
e1ab7874 2249 TOP judging by TO_LV_SET and FROM_LV_SET. */
2250void
2251av_set_union_and_live (av_set_t *top, av_set_t *fromp, regset to_lv_set,
2252 regset from_lv_set, insn_t insn)
2253{
2254 expr_t expr1;
2255 av_set_iterator i;
2256 av_set_t *to_tailp, in_both_set = NULL;
2257
2258 /* Delete from TOP all expres, that present in FROMP. */
2259 FOR_EACH_EXPR_1 (expr1, i, top)
2260 {
2261 expr_t expr2 = av_set_lookup_and_remove (fromp, EXPR_VINSN (expr1));
2262
2263 if (expr2)
2264 {
48e1416a 2265 /* It may be that the expressions have different destination
e1ab7874 2266 registers, in which case we need to check liveness here. */
2267 if (EXPR_SEPARABLE_P (expr1))
2268 {
48e1416a 2269 int regno1 = (REG_P (EXPR_LHS (expr1))
e1ab7874 2270 ? (int) expr_dest_regno (expr1) : -1);
48e1416a 2271 int regno2 = (REG_P (EXPR_LHS (expr2))
e1ab7874 2272 ? (int) expr_dest_regno (expr2) : -1);
48e1416a 2273
2274 /* ??? We don't have a way to check restrictions for
e1ab7874 2275 *other* register on the current path, we did it only
2276 for the current target register. Give up. */
2277 if (regno1 != regno2)
2278 EXPR_TARGET_AVAILABLE (expr2) = -1;
2279 }
2280 else if (EXPR_INSN_RTX (expr1) != EXPR_INSN_RTX (expr2))
2281 EXPR_TARGET_AVAILABLE (expr2) = -1;
2282
2283 merge_expr (expr2, expr1, insn);
2284 av_set_add_nocopy (&in_both_set, expr2);
2285 av_set_iter_remove (&i);
2286 }
2287 else
48e1416a 2288 /* EXPR1 is present in TOP, but not in FROMP. Check it on
e1ab7874 2289 FROM_LV_SET. */
2290 set_unavailable_target_for_expr (expr1, from_lv_set);
2291 }
2292 to_tailp = i.lp;
2293
2294 /* These expressions are not present in TOP. Check liveness
2295 restrictions on TO_LV_SET. */
2296 FOR_EACH_EXPR (expr1, i, *fromp)
2297 set_unavailable_target_for_expr (expr1, to_lv_set);
2298
2299 join_distinct_sets (i.lp, &in_both_set);
2300 join_distinct_sets (to_tailp, fromp);
2301}
2302
2303/* Clear av_set pointed to by SETP. */
2304void
2305av_set_clear (av_set_t *setp)
2306{
2307 expr_t expr;
2308 av_set_iterator i;
2309
2310 FOR_EACH_EXPR_1 (expr, i, setp)
2311 av_set_iter_remove (&i);
2312
2313 gcc_assert (*setp == NULL);
2314}
2315
2316/* Leave only one non-speculative element in the SETP. */
2317void
2318av_set_leave_one_nonspec (av_set_t *setp)
2319{
2320 expr_t expr;
2321 av_set_iterator i;
2322 bool has_one_nonspec = false;
2323
48e1416a 2324 /* Keep all speculative exprs, and leave one non-speculative
e1ab7874 2325 (the first one). */
2326 FOR_EACH_EXPR_1 (expr, i, setp)
2327 {
2328 if (!EXPR_SPEC_DONE_DS (expr))
2329 {
2330 if (has_one_nonspec)
2331 av_set_iter_remove (&i);
2332 else
2333 has_one_nonspec = true;
2334 }
2335 }
2336}
2337
2338/* Return the N'th element of the SET. */
2339expr_t
2340av_set_element (av_set_t set, int n)
2341{
2342 expr_t expr;
2343 av_set_iterator i;
2344
2345 FOR_EACH_EXPR (expr, i, set)
2346 if (n-- == 0)
2347 return expr;
2348
2349 gcc_unreachable ();
2350 return NULL;
2351}
2352
2353/* Deletes all expressions from AVP that are conditional branches (IFs). */
2354void
2355av_set_substract_cond_branches (av_set_t *avp)
2356{
2357 av_set_iterator i;
2358 expr_t expr;
2359
2360 FOR_EACH_EXPR_1 (expr, i, avp)
2361 if (vinsn_cond_branch_p (EXPR_VINSN (expr)))
2362 av_set_iter_remove (&i);
2363}
2364
48e1416a 2365/* Multiplies usefulness attribute of each member of av-set *AVP by
e1ab7874 2366 value PROB / ALL_PROB. */
2367void
2368av_set_split_usefulness (av_set_t av, int prob, int all_prob)
2369{
2370 av_set_iterator i;
2371 expr_t expr;
2372
2373 FOR_EACH_EXPR (expr, i, av)
48e1416a 2374 EXPR_USEFULNESS (expr) = (all_prob
e1ab7874 2375 ? (EXPR_USEFULNESS (expr) * prob) / all_prob
2376 : 0);
2377}
2378
2379/* Leave in AVP only those expressions, which are present in AV,
c53624fb 2380 and return it, merging history expressions. */
e1ab7874 2381void
c53624fb 2382av_set_code_motion_filter (av_set_t *avp, av_set_t av)
e1ab7874 2383{
2384 av_set_iterator i;
c53624fb 2385 expr_t expr, expr2;
e1ab7874 2386
2387 FOR_EACH_EXPR_1 (expr, i, avp)
c53624fb 2388 if ((expr2 = av_set_lookup (av, EXPR_VINSN (expr))) == NULL)
e1ab7874 2389 av_set_iter_remove (&i);
c53624fb 2390 else
2391 /* When updating av sets in bookkeeping blocks, we can add more insns
2392 there which will be transformed but the upper av sets will not
2393 reflect those transformations. We then fail to undo those
2394 when searching for such insns. So merge the history saved
2395 in the av set of the block we are processing. */
2396 merge_history_vect (&EXPR_HISTORY_OF_CHANGES (expr),
2397 EXPR_HISTORY_OF_CHANGES (expr2));
e1ab7874 2398}
2399
2400\f
2401
2402/* Dependence hooks to initialize insn data. */
2403
2404/* This is used in hooks callable from dependence analysis when initializing
2405 instruction's data. */
2406static struct
2407{
2408 /* Where the dependence was found (lhs/rhs). */
2409 deps_where_t where;
2410
2411 /* The actual data object to initialize. */
2412 idata_t id;
2413
2414 /* True when the insn should not be made clonable. */
2415 bool force_unique_p;
2416
2417 /* True when insn should be treated as of type USE, i.e. never renamed. */
2418 bool force_use_p;
2419} deps_init_id_data;
2420
2421
48e1416a 2422/* Setup ID for INSN. FORCE_UNIQUE_P is true when INSN should not be
e1ab7874 2423 clonable. */
2424static void
2425setup_id_for_insn (idata_t id, insn_t insn, bool force_unique_p)
2426{
2427 int type;
48e1416a 2428
e1ab7874 2429 /* Determine whether INSN could be cloned and return appropriate vinsn type.
2430 That clonable insns which can be separated into lhs and rhs have type SET.
2431 Other clonable insns have type USE. */
2432 type = GET_CODE (insn);
2433
2434 /* Only regular insns could be cloned. */
2435 if (type == INSN && !force_unique_p)
2436 type = SET;
2437 else if (type == JUMP_INSN && simplejump_p (insn))
2438 type = PC;
9845d120 2439 else if (type == DEBUG_INSN)
2440 type = !force_unique_p ? USE : INSN;
48e1416a 2441
e1ab7874 2442 IDATA_TYPE (id) = type;
2443 IDATA_REG_SETS (id) = get_clear_regset_from_pool ();
2444 IDATA_REG_USES (id) = get_clear_regset_from_pool ();
2445 IDATA_REG_CLOBBERS (id) = get_clear_regset_from_pool ();
2446}
2447
2448/* Start initializing insn data. */
2449static void
2450deps_init_id_start_insn (insn_t insn)
2451{
2452 gcc_assert (deps_init_id_data.where == DEPS_IN_NOWHERE);
2453
2454 setup_id_for_insn (deps_init_id_data.id, insn,
2455 deps_init_id_data.force_unique_p);
2456 deps_init_id_data.where = DEPS_IN_INSN;
2457}
2458
2459/* Start initializing lhs data. */
2460static void
2461deps_init_id_start_lhs (rtx lhs)
2462{
2463 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2464 gcc_assert (IDATA_LHS (deps_init_id_data.id) == NULL);
2465
2466 if (IDATA_TYPE (deps_init_id_data.id) == SET)
2467 {
2468 IDATA_LHS (deps_init_id_data.id) = lhs;
2469 deps_init_id_data.where = DEPS_IN_LHS;
2470 }
2471}
2472
2473/* Finish initializing lhs data. */
2474static void
2475deps_init_id_finish_lhs (void)
2476{
2477 deps_init_id_data.where = DEPS_IN_INSN;
2478}
2479
2480/* Note a set of REGNO. */
2481static void
2482deps_init_id_note_reg_set (int regno)
2483{
2484 haifa_note_reg_set (regno);
2485
2486 if (deps_init_id_data.where == DEPS_IN_RHS)
2487 deps_init_id_data.force_use_p = true;
2488
2489 if (IDATA_TYPE (deps_init_id_data.id) != PC)
2490 SET_REGNO_REG_SET (IDATA_REG_SETS (deps_init_id_data.id), regno);
2491
2492#ifdef STACK_REGS
48e1416a 2493 /* Make instructions that set stack registers to be ineligible for
e1ab7874 2494 renaming to avoid issues with find_used_regs. */
2495 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2496 deps_init_id_data.force_use_p = true;
2497#endif
2498}
2499
2500/* Note a clobber of REGNO. */
2501static void
2502deps_init_id_note_reg_clobber (int regno)
2503{
2504 haifa_note_reg_clobber (regno);
2505
2506 if (deps_init_id_data.where == DEPS_IN_RHS)
2507 deps_init_id_data.force_use_p = true;
2508
2509 if (IDATA_TYPE (deps_init_id_data.id) != PC)
2510 SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (deps_init_id_data.id), regno);
2511}
2512
2513/* Note a use of REGNO. */
2514static void
2515deps_init_id_note_reg_use (int regno)
2516{
2517 haifa_note_reg_use (regno);
2518
2519 if (IDATA_TYPE (deps_init_id_data.id) != PC)
2520 SET_REGNO_REG_SET (IDATA_REG_USES (deps_init_id_data.id), regno);
2521}
2522
2523/* Start initializing rhs data. */
2524static void
2525deps_init_id_start_rhs (rtx rhs)
2526{
2527 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2528
2529 /* And there was no sel_deps_reset_to_insn (). */
2530 if (IDATA_LHS (deps_init_id_data.id) != NULL)
2531 {
2532 IDATA_RHS (deps_init_id_data.id) = rhs;
2533 deps_init_id_data.where = DEPS_IN_RHS;
2534 }
2535}
2536
2537/* Finish initializing rhs data. */
2538static void
2539deps_init_id_finish_rhs (void)
2540{
2541 gcc_assert (deps_init_id_data.where == DEPS_IN_RHS
2542 || deps_init_id_data.where == DEPS_IN_INSN);
2543 deps_init_id_data.where = DEPS_IN_INSN;
2544}
2545
2546/* Finish initializing insn data. */
2547static void
2548deps_init_id_finish_insn (void)
2549{
2550 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2551
2552 if (IDATA_TYPE (deps_init_id_data.id) == SET)
2553 {
2554 rtx lhs = IDATA_LHS (deps_init_id_data.id);
2555 rtx rhs = IDATA_RHS (deps_init_id_data.id);
2556
2557 if (lhs == NULL || rhs == NULL || !lhs_and_rhs_separable_p (lhs, rhs)
2558 || deps_init_id_data.force_use_p)
2559 {
48e1416a 2560 /* This should be a USE, as we don't want to schedule its RHS
e1ab7874 2561 separately. However, we still want to have them recorded
48e1416a 2562 for the purposes of substitution. That's why we don't
e1ab7874 2563 simply call downgrade_to_use () here. */
2564 gcc_assert (IDATA_TYPE (deps_init_id_data.id) == SET);
2565 gcc_assert (!lhs == !rhs);
2566
2567 IDATA_TYPE (deps_init_id_data.id) = USE;
2568 }
2569 }
2570
2571 deps_init_id_data.where = DEPS_IN_NOWHERE;
2572}
2573
2574/* This is dependence info used for initializing insn's data. */
2575static struct sched_deps_info_def deps_init_id_sched_deps_info;
2576
2577/* This initializes most of the static part of the above structure. */
2578static const struct sched_deps_info_def const_deps_init_id_sched_deps_info =
2579 {
2580 NULL,
2581
2582 deps_init_id_start_insn,
2583 deps_init_id_finish_insn,
2584 deps_init_id_start_lhs,
2585 deps_init_id_finish_lhs,
2586 deps_init_id_start_rhs,
2587 deps_init_id_finish_rhs,
2588 deps_init_id_note_reg_set,
2589 deps_init_id_note_reg_clobber,
2590 deps_init_id_note_reg_use,
2591 NULL, /* note_mem_dep */
2592 NULL, /* note_dep */
2593
2594 0, /* use_cselib */
2595 0, /* use_deps_list */
2596 0 /* generate_spec_deps */
2597 };
2598
2599/* Initialize INSN's lhs and rhs in ID. When FORCE_UNIQUE_P is true,
2600 we don't actually need information about lhs and rhs. */
2601static void
2602setup_id_lhs_rhs (idata_t id, insn_t insn, bool force_unique_p)
2603{
2604 rtx pat = PATTERN (insn);
48e1416a 2605
971ba038 2606 if (NONJUMP_INSN_P (insn)
48e1416a 2607 && GET_CODE (pat) == SET
e1ab7874 2608 && !force_unique_p)
2609 {
2610 IDATA_RHS (id) = SET_SRC (pat);
2611 IDATA_LHS (id) = SET_DEST (pat);
2612 }
2613 else
2614 IDATA_LHS (id) = IDATA_RHS (id) = NULL;
2615}
2616
2617/* Possibly downgrade INSN to USE. */
2618static void
2619maybe_downgrade_id_to_use (idata_t id, insn_t insn)
2620{
2621 bool must_be_use = false;
be10bb5a 2622 df_ref def;
e1ab7874 2623 rtx lhs = IDATA_LHS (id);
2624 rtx rhs = IDATA_RHS (id);
48e1416a 2625
e1ab7874 2626 /* We downgrade only SETs. */
2627 if (IDATA_TYPE (id) != SET)
2628 return;
2629
2630 if (!lhs || !lhs_and_rhs_separable_p (lhs, rhs))
2631 {
2632 IDATA_TYPE (id) = USE;
2633 return;
2634 }
48e1416a 2635
be10bb5a 2636 FOR_EACH_INSN_DEF (def, insn)
e1ab7874 2637 {
e1ab7874 2638 if (DF_REF_INSN (def)
2639 && DF_REF_FLAGS_IS_SET (def, DF_REF_PRE_POST_MODIFY)
2640 && loc_mentioned_in_p (DF_REF_LOC (def), IDATA_RHS (id)))
2641 {
2642 must_be_use = true;
2643 break;
2644 }
2645
2646#ifdef STACK_REGS
48e1416a 2647 /* Make instructions that set stack registers to be ineligible for
e1ab7874 2648 renaming to avoid issues with find_used_regs. */
2649 if (IN_RANGE (DF_REF_REGNO (def), FIRST_STACK_REG, LAST_STACK_REG))
2650 {
2651 must_be_use = true;
2652 break;
2653 }
2654#endif
48e1416a 2655 }
2656
e1ab7874 2657 if (must_be_use)
2658 IDATA_TYPE (id) = USE;
2659}
2660
2661/* Setup register sets describing INSN in ID. */
2662static void
2663setup_id_reg_sets (idata_t id, insn_t insn)
2664{
be10bb5a 2665 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
2666 df_ref def, use;
e1ab7874 2667 regset tmp = get_clear_regset_from_pool ();
48e1416a 2668
be10bb5a 2669 FOR_EACH_INSN_INFO_DEF (def, insn_info)
e1ab7874 2670 {
e1ab7874 2671 unsigned int regno = DF_REF_REGNO (def);
48e1416a 2672
e1ab7874 2673 /* Post modifies are treated like clobbers by sched-deps.c. */
2674 if (DF_REF_FLAGS_IS_SET (def, (DF_REF_MUST_CLOBBER
2675 | DF_REF_PRE_POST_MODIFY)))
2676 SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (id), regno);
2677 else if (! DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER))
2678 {
2679 SET_REGNO_REG_SET (IDATA_REG_SETS (id), regno);
2680
2681#ifdef STACK_REGS
48e1416a 2682 /* For stack registers, treat writes to them as writes
e1ab7874 2683 to the first one to be consistent with sched-deps.c. */
2684 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2685 SET_REGNO_REG_SET (IDATA_REG_SETS (id), FIRST_STACK_REG);
2686#endif
2687 }
2688 /* Mark special refs that generate read/write def pair. */
2689 if (DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL)
2690 || regno == STACK_POINTER_REGNUM)
2691 bitmap_set_bit (tmp, regno);
2692 }
48e1416a 2693
be10bb5a 2694 FOR_EACH_INSN_INFO_USE (use, insn_info)
e1ab7874 2695 {
e1ab7874 2696 unsigned int regno = DF_REF_REGNO (use);
2697
2698 /* When these refs are met for the first time, skip them, as
2699 these uses are just counterparts of some defs. */
2700 if (bitmap_bit_p (tmp, regno))
2701 bitmap_clear_bit (tmp, regno);
2702 else if (! DF_REF_FLAGS_IS_SET (use, DF_REF_CALL_STACK_USAGE))
2703 {
2704 SET_REGNO_REG_SET (IDATA_REG_USES (id), regno);
2705
2706#ifdef STACK_REGS
48e1416a 2707 /* For stack registers, treat reads from them as reads from
e1ab7874 2708 the first one to be consistent with sched-deps.c. */
2709 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2710 SET_REGNO_REG_SET (IDATA_REG_USES (id), FIRST_STACK_REG);
2711#endif
2712 }
2713 }
2714
2715 return_regset_to_pool (tmp);
2716}
2717
2718/* Initialize instruction data for INSN in ID using DF's data. */
2719static void
2720init_id_from_df (idata_t id, insn_t insn, bool force_unique_p)
2721{
2722 gcc_assert (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL);
2723
2724 setup_id_for_insn (id, insn, force_unique_p);
2725 setup_id_lhs_rhs (id, insn, force_unique_p);
2726
2727 if (INSN_NOP_P (insn))
2728 return;
2729
2730 maybe_downgrade_id_to_use (id, insn);
2731 setup_id_reg_sets (id, insn);
2732}
2733
2734/* Initialize instruction data for INSN in ID. */
2735static void
2736deps_init_id (idata_t id, insn_t insn, bool force_unique_p)
2737{
68e419a1 2738 struct deps_desc _dc, *dc = &_dc;
e1ab7874 2739
2740 deps_init_id_data.where = DEPS_IN_NOWHERE;
2741 deps_init_id_data.id = id;
2742 deps_init_id_data.force_unique_p = force_unique_p;
2743 deps_init_id_data.force_use_p = false;
2744
d9ab2038 2745 init_deps (dc, false);
e1ab7874 2746
2747 memcpy (&deps_init_id_sched_deps_info,
2748 &const_deps_init_id_sched_deps_info,
2749 sizeof (deps_init_id_sched_deps_info));
2750
2751 if (spec_info != NULL)
2752 deps_init_id_sched_deps_info.generate_spec_deps = 1;
2753
2754 sched_deps_info = &deps_init_id_sched_deps_info;
2755
2f3c9801 2756 deps_analyze_insn (dc, insn);
e1ab7874 2757
2758 free_deps (dc);
2759
2760 deps_init_id_data.id = NULL;
2761}
2762
2763\f
52d7e28c 2764struct sched_scan_info_def
2765{
2766 /* This hook notifies scheduler frontend to extend its internal per basic
2767 block data structures. This hook should be called once before a series of
2768 calls to bb_init (). */
2769 void (*extend_bb) (void);
2770
2771 /* This hook makes scheduler frontend to initialize its internal data
2772 structures for the passed basic block. */
2773 void (*init_bb) (basic_block);
2774
2775 /* This hook notifies scheduler frontend to extend its internal per insn data
2776 structures. This hook should be called once before a series of calls to
2777 insn_init (). */
2778 void (*extend_insn) (void);
2779
2780 /* This hook makes scheduler frontend to initialize its internal data
2781 structures for the passed insn. */
2f3c9801 2782 void (*init_insn) (insn_t);
52d7e28c 2783};
2784
2785/* A driver function to add a set of basic blocks (BBS) to the
2786 scheduling region. */
2787static void
2788sched_scan (const struct sched_scan_info_def *ssi, bb_vec_t bbs)
2789{
2790 unsigned i;
2791 basic_block bb;
2792
2793 if (ssi->extend_bb)
2794 ssi->extend_bb ();
2795
2796 if (ssi->init_bb)
f1f41a6c 2797 FOR_EACH_VEC_ELT (bbs, i, bb)
52d7e28c 2798 ssi->init_bb (bb);
2799
2800 if (ssi->extend_insn)
2801 ssi->extend_insn ();
2802
2803 if (ssi->init_insn)
f1f41a6c 2804 FOR_EACH_VEC_ELT (bbs, i, bb)
52d7e28c 2805 {
2f3c9801 2806 rtx_insn *insn;
52d7e28c 2807
2808 FOR_BB_INSNS (bb, insn)
2809 ssi->init_insn (insn);
2810 }
2811}
e1ab7874 2812
2813/* Implement hooks for collecting fundamental insn properties like if insn is
2814 an ASM or is within a SCHED_GROUP. */
2815
2816/* True when a "one-time init" data for INSN was already inited. */
2817static bool
2818first_time_insn_init (insn_t insn)
2819{
2820 return INSN_LIVE (insn) == NULL;
2821}
2822
2823/* Hash an entry in a transformed_insns hashtable. */
2824static hashval_t
2825hash_transformed_insns (const void *p)
2826{
2827 return VINSN_HASH_RTX (((const struct transformed_insns *) p)->vinsn_old);
2828}
2829
2830/* Compare the entries in a transformed_insns hashtable. */
2831static int
2832eq_transformed_insns (const void *p, const void *q)
2833{
04d073df 2834 rtx_insn *i1 =
2835 VINSN_INSN_RTX (((const struct transformed_insns *) p)->vinsn_old);
2836 rtx_insn *i2 =
2837 VINSN_INSN_RTX (((const struct transformed_insns *) q)->vinsn_old);
e1ab7874 2838
2839 if (INSN_UID (i1) == INSN_UID (i2))
2840 return 1;
2841 return rtx_equal_p (PATTERN (i1), PATTERN (i2));
2842}
2843
2844/* Free an entry in a transformed_insns hashtable. */
2845static void
2846free_transformed_insns (void *p)
2847{
2848 struct transformed_insns *pti = (struct transformed_insns *) p;
2849
2850 vinsn_detach (pti->vinsn_old);
2851 vinsn_detach (pti->vinsn_new);
2852 free (pti);
2853}
2854
48e1416a 2855/* Init the s_i_d data for INSN which should be inited just once, when
e1ab7874 2856 we first see the insn. */
2857static void
2858init_first_time_insn_data (insn_t insn)
2859{
2860 /* This should not be set if this is the first time we init data for
2861 insn. */
2862 gcc_assert (first_time_insn_init (insn));
48e1416a 2863
e1ab7874 2864 /* These are needed for nops too. */
2865 INSN_LIVE (insn) = get_regset_from_pool ();
2866 INSN_LIVE_VALID_P (insn) = false;
d9ab2038 2867
e1ab7874 2868 if (!INSN_NOP_P (insn))
2869 {
2870 INSN_ANALYZED_DEPS (insn) = BITMAP_ALLOC (NULL);
2871 INSN_FOUND_DEPS (insn) = BITMAP_ALLOC (NULL);
48e1416a 2872 INSN_TRANSFORMED_INSNS (insn)
e1ab7874 2873 = htab_create (16, hash_transformed_insns,
2874 eq_transformed_insns, free_transformed_insns);
d9ab2038 2875 init_deps (&INSN_DEPS_CONTEXT (insn), true);
e1ab7874 2876 }
2877}
2878
48e1416a 2879/* Free almost all above data for INSN that is scheduled already.
d9ab2038 2880 Used for extra-large basic blocks. */
2881void
2882free_data_for_scheduled_insn (insn_t insn)
e1ab7874 2883{
2884 gcc_assert (! first_time_insn_init (insn));
48e1416a 2885
d9ab2038 2886 if (! INSN_ANALYZED_DEPS (insn))
2887 return;
48e1416a 2888
e1ab7874 2889 BITMAP_FREE (INSN_ANALYZED_DEPS (insn));
2890 BITMAP_FREE (INSN_FOUND_DEPS (insn));
2891 htab_delete (INSN_TRANSFORMED_INSNS (insn));
48e1416a 2892
e1ab7874 2893 /* This is allocated only for bookkeeping insns. */
2894 if (INSN_ORIGINATORS (insn))
2895 BITMAP_FREE (INSN_ORIGINATORS (insn));
2896 free_deps (&INSN_DEPS_CONTEXT (insn));
d9ab2038 2897
2898 INSN_ANALYZED_DEPS (insn) = NULL;
2899
48e1416a 2900 /* Clear the readonly flag so we would ICE when trying to recalculate
d9ab2038 2901 the deps context (as we believe that it should not happen). */
2902 (&INSN_DEPS_CONTEXT (insn))->readonly = 0;
2903}
2904
2905/* Free the same data as above for INSN. */
2906static void
2907free_first_time_insn_data (insn_t insn)
2908{
2909 gcc_assert (! first_time_insn_init (insn));
2910
2911 free_data_for_scheduled_insn (insn);
2912 return_regset_to_pool (INSN_LIVE (insn));
2913 INSN_LIVE (insn) = NULL;
2914 INSN_LIVE_VALID_P (insn) = false;
e1ab7874 2915}
2916
2917/* Initialize region-scope data structures for basic blocks. */
2918static void
2919init_global_and_expr_for_bb (basic_block bb)
2920{
2921 if (sel_bb_empty_p (bb))
2922 return;
2923
2924 invalidate_av_set (bb);
2925}
2926
2927/* Data for global dependency analysis (to initialize CANT_MOVE and
2928 SCHED_GROUP_P). */
2929static struct
2930{
2931 /* Previous insn. */
2932 insn_t prev_insn;
2933} init_global_data;
2934
2935/* Determine if INSN is in the sched_group, is an asm or should not be
2936 cloned. After that initialize its expr. */
2937static void
2938init_global_and_expr_for_insn (insn_t insn)
2939{
2940 if (LABEL_P (insn))
2941 return;
2942
2943 if (NOTE_INSN_BASIC_BLOCK_P (insn))
2944 {
2f3c9801 2945 init_global_data.prev_insn = NULL;
e1ab7874 2946 return;
2947 }
2948
2949 gcc_assert (INSN_P (insn));
2950
2951 if (SCHED_GROUP_P (insn))
2952 /* Setup a sched_group. */
2953 {
2954 insn_t prev_insn = init_global_data.prev_insn;
2955
2956 if (prev_insn)
2957 INSN_SCHED_NEXT (prev_insn) = insn;
2958
2959 init_global_data.prev_insn = insn;
2960 }
2961 else
2f3c9801 2962 init_global_data.prev_insn = NULL;
e1ab7874 2963
2964 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
2965 || asm_noperands (PATTERN (insn)) >= 0)
2966 /* Mark INSN as an asm. */
2967 INSN_ASM_P (insn) = true;
2968
2969 {
2970 bool force_unique_p;
2971 ds_t spec_done_ds;
2972
982b0787 2973 /* Certain instructions cannot be cloned, and frame related insns and
2974 the insn adjacent to NOTE_INSN_EPILOGUE_BEG cannot be moved out of
2975 their block. */
2976 if (prologue_epilogue_contains (insn))
2977 {
2978 if (RTX_FRAME_RELATED_P (insn))
2979 CANT_MOVE (insn) = 1;
2980 else
2981 {
2982 rtx note;
2983 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2984 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE
2985 && ((enum insn_note) INTVAL (XEXP (note, 0))
2986 == NOTE_INSN_EPILOGUE_BEG))
2987 {
2988 CANT_MOVE (insn) = 1;
2989 break;
2990 }
2991 }
2992 force_unique_p = true;
2993 }
e1ab7874 2994 else
982b0787 2995 if (CANT_MOVE (insn)
2996 || INSN_ASM_P (insn)
2997 || SCHED_GROUP_P (insn)
a8d6ade3 2998 || CALL_P (insn)
982b0787 2999 /* Exception handling insns are always unique. */
3000 || (cfun->can_throw_non_call_exceptions && can_throw_internal (insn))
3001 /* TRAP_IF though have an INSN code is control_flow_insn_p (). */
13434dcb 3002 || control_flow_insn_p (insn)
3003 || volatile_insn_p (PATTERN (insn))
3004 || (targetm.cannot_copy_insn_p
3005 && targetm.cannot_copy_insn_p (insn)))
982b0787 3006 force_unique_p = true;
3007 else
3008 force_unique_p = false;
e1ab7874 3009
3010 if (targetm.sched.get_insn_spec_ds)
3011 {
3012 spec_done_ds = targetm.sched.get_insn_spec_ds (insn);
3013 spec_done_ds = ds_get_max_dep_weak (spec_done_ds);
3014 }
3015 else
3016 spec_done_ds = 0;
3017
3018 /* Initialize INSN's expr. */
3019 init_expr (INSN_EXPR (insn), vinsn_create (insn, force_unique_p), 0,
3020 REG_BR_PROB_BASE, INSN_PRIORITY (insn), 0, BLOCK_NUM (insn),
1e094109 3021 spec_done_ds, 0, 0, vNULL, true,
f1f41a6c 3022 false, false, false, CANT_MOVE (insn));
e1ab7874 3023 }
3024
3025 init_first_time_insn_data (insn);
3026}
3027
3028/* Scan the region and initialize instruction data for basic blocks BBS. */
3029void
3030sel_init_global_and_expr (bb_vec_t bbs)
3031{
3032 /* ??? It would be nice to implement push / pop scheme for sched_infos. */
3033 const struct sched_scan_info_def ssi =
3034 {
3035 NULL, /* extend_bb */
3036 init_global_and_expr_for_bb, /* init_bb */
3037 extend_insn_data, /* extend_insn */
3038 init_global_and_expr_for_insn /* init_insn */
3039 };
48e1416a 3040
52d7e28c 3041 sched_scan (&ssi, bbs);
e1ab7874 3042}
3043
3044/* Finalize region-scope data structures for basic blocks. */
3045static void
3046finish_global_and_expr_for_bb (basic_block bb)
3047{
3048 av_set_clear (&BB_AV_SET (bb));
3049 BB_AV_LEVEL (bb) = 0;
3050}
3051
3052/* Finalize INSN's data. */
3053static void
3054finish_global_and_expr_insn (insn_t insn)
3055{
3056 if (LABEL_P (insn) || NOTE_INSN_BASIC_BLOCK_P (insn))
3057 return;
3058
3059 gcc_assert (INSN_P (insn));
3060
3061 if (INSN_LUID (insn) > 0)
3062 {
3063 free_first_time_insn_data (insn);
3064 INSN_WS_LEVEL (insn) = 0;
3065 CANT_MOVE (insn) = 0;
48e1416a 3066
3067 /* We can no longer assert this, as vinsns of this insn could be
3068 easily live in other insn's caches. This should be changed to
e1ab7874 3069 a counter-like approach among all vinsns. */
3070 gcc_assert (true || VINSN_COUNT (INSN_VINSN (insn)) == 1);
3071 clear_expr (INSN_EXPR (insn));
3072 }
3073}
3074
3075/* Finalize per instruction data for the whole region. */
3076void
3077sel_finish_global_and_expr (void)
3078{
3079 {
3080 bb_vec_t bbs;
3081 int i;
3082
f1f41a6c 3083 bbs.create (current_nr_blocks);
e1ab7874 3084
3085 for (i = 0; i < current_nr_blocks; i++)
f5a6b05f 3086 bbs.quick_push (BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i)));
e1ab7874 3087
3088 /* Clear AV_SETs and INSN_EXPRs. */
3089 {
3090 const struct sched_scan_info_def ssi =
3091 {
3092 NULL, /* extend_bb */
3093 finish_global_and_expr_for_bb, /* init_bb */
3094 NULL, /* extend_insn */
3095 finish_global_and_expr_insn /* init_insn */
3096 };
3097
52d7e28c 3098 sched_scan (&ssi, bbs);
e1ab7874 3099 }
3100
f1f41a6c 3101 bbs.release ();
e1ab7874 3102 }
3103
3104 finish_insns ();
3105}
3106\f
3107
48e1416a 3108/* In the below hooks, we merely calculate whether or not a dependence
3109 exists, and in what part of insn. However, we will need more data
e1ab7874 3110 when we'll start caching dependence requests. */
3111
3112/* Container to hold information for dependency analysis. */
3113static struct
3114{
3115 deps_t dc;
3116
3117 /* A variable to track which part of rtx we are scanning in
3118 sched-deps.c: sched_analyze_insn (). */
3119 deps_where_t where;
3120
3121 /* Current producer. */
3122 insn_t pro;
3123
3124 /* Current consumer. */
3125 vinsn_t con;
3126
3127 /* Is SEL_DEPS_HAS_DEP_P[DEPS_IN_X] is true, then X has a dependence.
3128 X is from { INSN, LHS, RHS }. */
3129 ds_t has_dep_p[DEPS_IN_NOWHERE];
3130} has_dependence_data;
3131
3132/* Start analyzing dependencies of INSN. */
3133static void
3134has_dependence_start_insn (insn_t insn ATTRIBUTE_UNUSED)
3135{
3136 gcc_assert (has_dependence_data.where == DEPS_IN_NOWHERE);
3137
3138 has_dependence_data.where = DEPS_IN_INSN;
3139}
3140
3141/* Finish analyzing dependencies of an insn. */
3142static void
3143has_dependence_finish_insn (void)
3144{
3145 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3146
3147 has_dependence_data.where = DEPS_IN_NOWHERE;
3148}
3149
3150/* Start analyzing dependencies of LHS. */
3151static void
3152has_dependence_start_lhs (rtx lhs ATTRIBUTE_UNUSED)
3153{
3154 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3155
3156 if (VINSN_LHS (has_dependence_data.con) != NULL)
3157 has_dependence_data.where = DEPS_IN_LHS;
3158}
3159
3160/* Finish analyzing dependencies of an lhs. */
3161static void
3162has_dependence_finish_lhs (void)
3163{
3164 has_dependence_data.where = DEPS_IN_INSN;
3165}
3166
3167/* Start analyzing dependencies of RHS. */
3168static void
3169has_dependence_start_rhs (rtx rhs ATTRIBUTE_UNUSED)
3170{
3171 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3172
3173 if (VINSN_RHS (has_dependence_data.con) != NULL)
3174 has_dependence_data.where = DEPS_IN_RHS;
3175}
3176
3177/* Start analyzing dependencies of an rhs. */
3178static void
3179has_dependence_finish_rhs (void)
3180{
3181 gcc_assert (has_dependence_data.where == DEPS_IN_RHS
3182 || has_dependence_data.where == DEPS_IN_INSN);
3183
3184 has_dependence_data.where = DEPS_IN_INSN;
3185}
3186
3187/* Note a set of REGNO. */
3188static void
3189has_dependence_note_reg_set (int regno)
3190{
3191 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3192
3193 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3194 VINSN_INSN_RTX
3195 (has_dependence_data.con)))
3196 {
3197 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3198
3199 if (reg_last->sets != NULL
3200 || reg_last->clobbers != NULL)
3201 *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT;
3202
a9bfd373 3203 if (reg_last->uses || reg_last->implicit_sets)
e1ab7874 3204 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3205 }
3206}
3207
3208/* Note a clobber of REGNO. */
3209static void
3210has_dependence_note_reg_clobber (int regno)
3211{
3212 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3213
3214 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3215 VINSN_INSN_RTX
3216 (has_dependence_data.con)))
3217 {
3218 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3219
3220 if (reg_last->sets)
3221 *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT;
48e1416a 3222
a9bfd373 3223 if (reg_last->uses || reg_last->implicit_sets)
e1ab7874 3224 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3225 }
3226}
3227
3228/* Note a use of REGNO. */
3229static void
3230has_dependence_note_reg_use (int regno)
3231{
3232 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3233
3234 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3235 VINSN_INSN_RTX
3236 (has_dependence_data.con)))
3237 {
3238 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3239
3240 if (reg_last->sets)
3241 *dsp = (*dsp & ~SPECULATIVE) | DEP_TRUE;
3242
a9bfd373 3243 if (reg_last->clobbers || reg_last->implicit_sets)
e1ab7874 3244 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3245
b0691607 3246 /* Merge BE_IN_SPEC bits into *DSP when the dependency producer
3247 is actually a check insn. We need to do this for any register
3248 read-read dependency with the check unless we track properly
3249 all registers written by BE_IN_SPEC-speculated insns, as
3250 we don't have explicit dependence lists. See PR 53975. */
e1ab7874 3251 if (reg_last->uses)
3252 {
3253 ds_t pro_spec_checked_ds;
3254
3255 pro_spec_checked_ds = INSN_SPEC_CHECKED_DS (has_dependence_data.pro);
3256 pro_spec_checked_ds = ds_get_max_dep_weak (pro_spec_checked_ds);
3257
b0691607 3258 if (pro_spec_checked_ds != 0)
e1ab7874 3259 *dsp = ds_full_merge (*dsp, pro_spec_checked_ds,
3260 NULL_RTX, NULL_RTX);
3261 }
3262 }
3263}
3264
3265/* Note a memory dependence. */
3266static void
3267has_dependence_note_mem_dep (rtx mem ATTRIBUTE_UNUSED,
3268 rtx pending_mem ATTRIBUTE_UNUSED,
3269 insn_t pending_insn ATTRIBUTE_UNUSED,
3270 ds_t ds ATTRIBUTE_UNUSED)
3271{
3272 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3273 VINSN_INSN_RTX (has_dependence_data.con)))
3274 {
3275 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3276
3277 *dsp = ds_full_merge (ds, *dsp, pending_mem, mem);
3278 }
3279}
3280
3281/* Note a dependence. */
3282static void
3283has_dependence_note_dep (insn_t pro ATTRIBUTE_UNUSED,
3284 ds_t ds ATTRIBUTE_UNUSED)
3285{
3286 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3287 VINSN_INSN_RTX (has_dependence_data.con)))
3288 {
3289 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3290
3291 *dsp = ds_full_merge (ds, *dsp, NULL_RTX, NULL_RTX);
3292 }
3293}
3294
3295/* Mark the insn as having a hard dependence that prevents speculation. */
3296void
3297sel_mark_hard_insn (rtx insn)
3298{
3299 int i;
3300
3301 /* Only work when we're in has_dependence_p mode.
3302 ??? This is a hack, this should actually be a hook. */
3303 if (!has_dependence_data.dc || !has_dependence_data.pro)
3304 return;
3305
3306 gcc_assert (insn == VINSN_INSN_RTX (has_dependence_data.con));
3307 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3308
3309 for (i = 0; i < DEPS_IN_NOWHERE; i++)
3310 has_dependence_data.has_dep_p[i] &= ~SPECULATIVE;
3311}
3312
3313/* This structure holds the hooks for the dependency analysis used when
3314 actually processing dependencies in the scheduler. */
3315static struct sched_deps_info_def has_dependence_sched_deps_info;
3316
3317/* This initializes most of the fields of the above structure. */
3318static const struct sched_deps_info_def const_has_dependence_sched_deps_info =
3319 {
3320 NULL,
3321
3322 has_dependence_start_insn,
3323 has_dependence_finish_insn,
3324 has_dependence_start_lhs,
3325 has_dependence_finish_lhs,
3326 has_dependence_start_rhs,
3327 has_dependence_finish_rhs,
3328 has_dependence_note_reg_set,
3329 has_dependence_note_reg_clobber,
3330 has_dependence_note_reg_use,
3331 has_dependence_note_mem_dep,
3332 has_dependence_note_dep,
3333
3334 0, /* use_cselib */
3335 0, /* use_deps_list */
3336 0 /* generate_spec_deps */
3337 };
3338
3339/* Initialize has_dependence_sched_deps_info with extra spec field. */
3340static void
3341setup_has_dependence_sched_deps_info (void)
3342{
3343 memcpy (&has_dependence_sched_deps_info,
3344 &const_has_dependence_sched_deps_info,
3345 sizeof (has_dependence_sched_deps_info));
3346
3347 if (spec_info != NULL)
3348 has_dependence_sched_deps_info.generate_spec_deps = 1;
3349
3350 sched_deps_info = &has_dependence_sched_deps_info;
3351}
3352
3353/* Remove all dependences found and recorded in has_dependence_data array. */
3354void
3355sel_clear_has_dependence (void)
3356{
3357 int i;
3358
3359 for (i = 0; i < DEPS_IN_NOWHERE; i++)
3360 has_dependence_data.has_dep_p[i] = 0;
3361}
3362
3363/* Return nonzero if EXPR has is dependent upon PRED. Return the pointer
3364 to the dependence information array in HAS_DEP_PP. */
3365ds_t
3366has_dependence_p (expr_t expr, insn_t pred, ds_t **has_dep_pp)
3367{
3368 int i;
3369 ds_t ds;
68e419a1 3370 struct deps_desc *dc;
e1ab7874 3371
3372 if (INSN_SIMPLEJUMP_P (pred))
3373 /* Unconditional jump is just a transfer of control flow.
3374 Ignore it. */
3375 return false;
3376
3377 dc = &INSN_DEPS_CONTEXT (pred);
d9ab2038 3378
3379 /* We init this field lazily. */
3380 if (dc->reg_last == NULL)
3381 init_deps_reg_last (dc);
48e1416a 3382
e1ab7874 3383 if (!dc->readonly)
3384 {
3385 has_dependence_data.pro = NULL;
3386 /* Initialize empty dep context with information about PRED. */
3387 advance_deps_context (dc, pred);
3388 dc->readonly = 1;
3389 }
3390
3391 has_dependence_data.where = DEPS_IN_NOWHERE;
3392 has_dependence_data.pro = pred;
3393 has_dependence_data.con = EXPR_VINSN (expr);
3394 has_dependence_data.dc = dc;
3395
3396 sel_clear_has_dependence ();
3397
3398 /* Now catch all dependencies that would be generated between PRED and
3399 INSN. */
3400 setup_has_dependence_sched_deps_info ();
3401 deps_analyze_insn (dc, EXPR_INSN_RTX (expr));
3402 has_dependence_data.dc = NULL;
3403
3404 /* When a barrier was found, set DEPS_IN_INSN bits. */
3405 if (dc->last_reg_pending_barrier == TRUE_BARRIER)
3406 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_TRUE;
3407 else if (dc->last_reg_pending_barrier == MOVE_BARRIER)
3408 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI;
3409
3410 /* Do not allow stores to memory to move through checks. Currently
3411 we don't move this to sched-deps.c as the check doesn't have
48e1416a 3412 obvious places to which this dependence can be attached.
e1ab7874 3413 FIMXE: this should go to a hook. */
3414 if (EXPR_LHS (expr)
3415 && MEM_P (EXPR_LHS (expr))
3416 && sel_insn_is_speculation_check (pred))
3417 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI;
48e1416a 3418
e1ab7874 3419 *has_dep_pp = has_dependence_data.has_dep_p;
3420 ds = 0;
3421 for (i = 0; i < DEPS_IN_NOWHERE; i++)
3422 ds = ds_full_merge (ds, has_dependence_data.has_dep_p[i],
3423 NULL_RTX, NULL_RTX);
3424
3425 return ds;
3426}
3427\f
3428
48e1416a 3429/* Dependence hooks implementation that checks dependence latency constraints
3430 on the insns being scheduled. The entry point for these routines is
3431 tick_check_p predicate. */
e1ab7874 3432
3433static struct
3434{
3435 /* An expr we are currently checking. */
3436 expr_t expr;
3437
3438 /* A minimal cycle for its scheduling. */
3439 int cycle;
3440
3441 /* Whether we have seen a true dependence while checking. */
3442 bool seen_true_dep_p;
3443} tick_check_data;
3444
3445/* Update minimal scheduling cycle for tick_check_insn given that it depends
3446 on PRO with status DS and weight DW. */
3447static void
3448tick_check_dep_with_dw (insn_t pro_insn, ds_t ds, dw_t dw)
3449{
3450 expr_t con_expr = tick_check_data.expr;
3451 insn_t con_insn = EXPR_INSN_RTX (con_expr);
3452
3453 if (con_insn != pro_insn)
3454 {
3455 enum reg_note dt;
3456 int tick;
3457
3458 if (/* PROducer was removed from above due to pipelining. */
3459 !INSN_IN_STREAM_P (pro_insn)
3460 /* Or PROducer was originally on the next iteration regarding the
3461 CONsumer. */
3462 || (INSN_SCHED_TIMES (pro_insn)
3463 - EXPR_SCHED_TIMES (con_expr)) > 1)
3464 /* Don't count this dependence. */
3465 return;
3466
3467 dt = ds_to_dt (ds);
3468 if (dt == REG_DEP_TRUE)
3469 tick_check_data.seen_true_dep_p = true;
3470
3471 gcc_assert (INSN_SCHED_CYCLE (pro_insn) > 0);
3472
3473 {
3474 dep_def _dep, *dep = &_dep;
3475
3476 init_dep (dep, pro_insn, con_insn, dt);
3477
3478 tick = INSN_SCHED_CYCLE (pro_insn) + dep_cost_1 (dep, dw);
3479 }
3480
3481 /* When there are several kinds of dependencies between pro and con,
3482 only REG_DEP_TRUE should be taken into account. */
3483 if (tick > tick_check_data.cycle
3484 && (dt == REG_DEP_TRUE || !tick_check_data.seen_true_dep_p))
3485 tick_check_data.cycle = tick;
3486 }
3487}
3488
3489/* An implementation of note_dep hook. */
3490static void
3491tick_check_note_dep (insn_t pro, ds_t ds)
3492{
3493 tick_check_dep_with_dw (pro, ds, 0);
3494}
3495
3496/* An implementation of note_mem_dep hook. */
3497static void
3498tick_check_note_mem_dep (rtx mem1, rtx mem2, insn_t pro, ds_t ds)
3499{
3500 dw_t dw;
3501
3502 dw = (ds_to_dt (ds) == REG_DEP_TRUE
3503 ? estimate_dep_weak (mem1, mem2)
3504 : 0);
3505
3506 tick_check_dep_with_dw (pro, ds, dw);
3507}
3508
3509/* This structure contains hooks for dependence analysis used when determining
3510 whether an insn is ready for scheduling. */
3511static struct sched_deps_info_def tick_check_sched_deps_info =
3512 {
3513 NULL,
3514
3515 NULL,
3516 NULL,
3517 NULL,
3518 NULL,
3519 NULL,
3520 NULL,
3521 haifa_note_reg_set,
3522 haifa_note_reg_clobber,
3523 haifa_note_reg_use,
3524 tick_check_note_mem_dep,
3525 tick_check_note_dep,
3526
3527 0, 0, 0
3528 };
3529
3530/* Estimate number of cycles from the current cycle of FENCE until EXPR can be
3531 scheduled. Return 0 if all data from producers in DC is ready. */
3532int
3533tick_check_p (expr_t expr, deps_t dc, fence_t fence)
3534{
3535 int cycles_left;
3536 /* Initialize variables. */
3537 tick_check_data.expr = expr;
3538 tick_check_data.cycle = 0;
3539 tick_check_data.seen_true_dep_p = false;
3540 sched_deps_info = &tick_check_sched_deps_info;
48e1416a 3541
e1ab7874 3542 gcc_assert (!dc->readonly);
3543 dc->readonly = 1;
3544 deps_analyze_insn (dc, EXPR_INSN_RTX (expr));
3545 dc->readonly = 0;
3546
3547 cycles_left = tick_check_data.cycle - FENCE_CYCLE (fence);
3548
3549 return cycles_left >= 0 ? cycles_left : 0;
3550}
3551\f
3552
3553/* Functions to work with insns. */
3554
3555/* Returns true if LHS of INSN is the same as DEST of an insn
3556 being moved. */
3557bool
3558lhs_of_insn_equals_to_dest_p (insn_t insn, rtx dest)
3559{
3560 rtx lhs = INSN_LHS (insn);
3561
3562 if (lhs == NULL || dest == NULL)
3563 return false;
48e1416a 3564
e1ab7874 3565 return rtx_equal_p (lhs, dest);
3566}
3567
3568/* Return s_i_d entry of INSN. Callable from debugger. */
3569sel_insn_data_def
3570insn_sid (insn_t insn)
3571{
3572 return *SID (insn);
3573}
3574
3575/* True when INSN is a speculative check. We can tell this by looking
3576 at the data structures of the selective scheduler, not by examining
3577 the pattern. */
3578bool
3579sel_insn_is_speculation_check (rtx insn)
3580{
f1f41a6c 3581 return s_i_d.exists () && !! INSN_SPEC_CHECKED_DS (insn);
e1ab7874 3582}
3583
48e1416a 3584/* Extracts machine mode MODE and destination location DST_LOC
e1ab7874 3585 for given INSN. */
3586void
3754d046 3587get_dest_and_mode (rtx insn, rtx *dst_loc, machine_mode *mode)
e1ab7874 3588{
3589 rtx pat = PATTERN (insn);
3590
3591 gcc_assert (dst_loc);
3592 gcc_assert (GET_CODE (pat) == SET);
3593
3594 *dst_loc = SET_DEST (pat);
3595
3596 gcc_assert (*dst_loc);
3597 gcc_assert (MEM_P (*dst_loc) || REG_P (*dst_loc));
3598
3599 if (mode)
3600 *mode = GET_MODE (*dst_loc);
3601}
3602
48e1416a 3603/* Returns true when moving through JUMP will result in bookkeeping
e1ab7874 3604 creation. */
3605bool
3606bookkeeping_can_be_created_if_moved_through_p (insn_t jump)
3607{
3608 insn_t succ;
3609 succ_iterator si;
3610
3611 FOR_EACH_SUCC (succ, si, jump)
3612 if (sel_num_cfg_preds_gt_1 (succ))
3613 return true;
3614
3615 return false;
3616}
3617
3618/* Return 'true' if INSN is the only one in its basic block. */
3619static bool
3620insn_is_the_only_one_in_bb_p (insn_t insn)
3621{
3622 return sel_bb_head_p (insn) && sel_bb_end_p (insn);
3623}
3624
3625#ifdef ENABLE_CHECKING
48e1416a 3626/* Check that the region we're scheduling still has at most one
e1ab7874 3627 backedge. */
3628static void
3629verify_backedges (void)
3630{
3631 if (pipelining_p)
3632 {
3633 int i, n = 0;
3634 edge e;
3635 edge_iterator ei;
48e1416a 3636
e1ab7874 3637 for (i = 0; i < current_nr_blocks; i++)
f5a6b05f 3638 FOR_EACH_EDGE (e, ei, BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i))->succs)
e1ab7874 3639 if (in_current_region_p (e->dest)
3640 && BLOCK_TO_BB (e->dest->index) < i)
3641 n++;
48e1416a 3642
e1ab7874 3643 gcc_assert (n <= 1);
3644 }
3645}
3646#endif
3647\f
3648
3649/* Functions to work with control flow. */
3650
93919afc 3651/* Recompute BLOCK_TO_BB and BB_FOR_BLOCK for current region so that blocks
3652 are sorted in topological order (it might have been invalidated by
3653 redirecting an edge). */
3654static void
3655sel_recompute_toporder (void)
3656{
3657 int i, n, rgn;
3658 int *postorder, n_blocks;
3659
a28770e1 3660 postorder = XALLOCAVEC (int, n_basic_blocks_for_fn (cfun));
93919afc 3661 n_blocks = post_order_compute (postorder, false, false);
3662
3663 rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
3664 for (n = 0, i = n_blocks - 1; i >= 0; i--)
3665 if (CONTAINING_RGN (postorder[i]) == rgn)
3666 {
3667 BLOCK_TO_BB (postorder[i]) = n;
3668 BB_TO_BLOCK (n) = postorder[i];
3669 n++;
3670 }
3671
3672 /* Assert that we updated info for all blocks. We may miss some blocks if
3673 this function is called when redirecting an edge made a block
3674 unreachable, but that block is not deleted yet. */
3675 gcc_assert (n == RGN_NR_BLOCKS (rgn));
3676}
3677
e1ab7874 3678/* Tidy the possibly empty block BB. */
81d1ad0f 3679static bool
6f0e7980 3680maybe_tidy_empty_bb (basic_block bb)
e1ab7874 3681{
ef4cf572 3682 basic_block succ_bb, pred_bb, note_bb;
f1f41a6c 3683 vec<basic_block> dom_bbs;
df6266b9 3684 edge e;
3685 edge_iterator ei;
e1ab7874 3686 bool rescan_p;
3687
3688 /* Keep empty bb only if this block immediately precedes EXIT and
61e213e2 3689 has incoming non-fallthrough edge, or it has no predecessors or
3690 successors. Otherwise remove it. */
9845d120 3691 if (!sel_bb_empty_p (bb)
48e1416a 3692 || (single_succ_p (bb)
34154e27 3693 && single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun)
48e1416a 3694 && (!single_pred_p (bb)
61e213e2 3695 || !(single_pred_edge (bb)->flags & EDGE_FALLTHRU)))
3696 || EDGE_COUNT (bb->preds) == 0
3697 || EDGE_COUNT (bb->succs) == 0)
e1ab7874 3698 return false;
3699
df6266b9 3700 /* Do not attempt to redirect complex edges. */
3701 FOR_EACH_EDGE (e, ei, bb->preds)
3702 if (e->flags & EDGE_COMPLEX)
3703 return false;
a62f9dca 3704 else if (e->flags & EDGE_FALLTHRU)
3705 {
3706 rtx note;
3707 /* If prev bb ends with asm goto, see if any of the
3708 ASM_OPERANDS_LABELs don't point to the fallthru
3709 label. Do not attempt to redirect it in that case. */
3710 if (JUMP_P (BB_END (e->src))
3711 && (note = extract_asm_operands (PATTERN (BB_END (e->src)))))
3712 {
3713 int i, n = ASM_OPERANDS_LABEL_LENGTH (note);
3714
3715 for (i = 0; i < n; ++i)
3716 if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (bb))
3717 return false;
3718 }
3719 }
df6266b9 3720
e1ab7874 3721 free_data_sets (bb);
3722
3723 /* Do not delete BB if it has more than one successor.
3724 That can occur when we moving a jump. */
3725 if (!single_succ_p (bb))
3726 {
3727 gcc_assert (can_merge_blocks_p (bb->prev_bb, bb));
3728 sel_merge_blocks (bb->prev_bb, bb);
3729 return true;
3730 }
3731
3732 succ_bb = single_succ (bb);
3733 rescan_p = true;
3734 pred_bb = NULL;
f1f41a6c 3735 dom_bbs.create (0);
e1ab7874 3736
ef4cf572 3737 /* Save a pred/succ from the current region to attach the notes to. */
3738 note_bb = NULL;
3739 FOR_EACH_EDGE (e, ei, bb->preds)
3740 if (in_current_region_p (e->src))
3741 {
3742 note_bb = e->src;
3743 break;
3744 }
3745 if (note_bb == NULL)
3746 note_bb = succ_bb;
3747
e1ab7874 3748 /* Redirect all non-fallthru edges to the next bb. */
3749 while (rescan_p)
3750 {
e1ab7874 3751 rescan_p = false;
3752
3753 FOR_EACH_EDGE (e, ei, bb->preds)
3754 {
3755 pred_bb = e->src;
3756
3757 if (!(e->flags & EDGE_FALLTHRU))
3758 {
6f0e7980 3759 /* We can not invalidate computed topological order by moving
1a5dbaab 3760 the edge destination block (E->SUCC) along a fallthru edge.
3761
3762 We will update dominators here only when we'll get
3763 an unreachable block when redirecting, otherwise
3764 sel_redirect_edge_and_branch will take care of it. */
3765 if (e->dest != bb
3766 && single_pred_p (e->dest))
f1f41a6c 3767 dom_bbs.safe_push (e->dest);
6f0e7980 3768 sel_redirect_edge_and_branch (e, succ_bb);
e1ab7874 3769 rescan_p = true;
3770 break;
3771 }
6f0e7980 3772 /* If the edge is fallthru, but PRED_BB ends in a conditional jump
3773 to BB (so there is no non-fallthru edge from PRED_BB to BB), we
3774 still have to adjust it. */
3775 else if (single_succ_p (pred_bb) && any_condjump_p (BB_END (pred_bb)))
3776 {
3777 /* If possible, try to remove the unneeded conditional jump. */
3778 if (INSN_SCHED_TIMES (BB_END (pred_bb)) == 0
3779 && !IN_CURRENT_FENCE_P (BB_END (pred_bb)))
3780 {
3781 if (!sel_remove_insn (BB_END (pred_bb), false, false))
3782 tidy_fallthru_edge (e);
3783 }
3784 else
3785 sel_redirect_edge_and_branch (e, succ_bb);
3786 rescan_p = true;
3787 break;
3788 }
e1ab7874 3789 }
3790 }
3791
e1ab7874 3792 if (can_merge_blocks_p (bb->prev_bb, bb))
3793 sel_merge_blocks (bb->prev_bb, bb);
3794 else
e1ab7874 3795 {
0424f393 3796 /* This is a block without fallthru predecessor. Just delete it. */
ef4cf572 3797 gcc_assert (note_bb);
3798 move_bb_info (note_bb, bb);
e1ab7874 3799 remove_empty_bb (bb, true);
3800 }
3801
f1f41a6c 3802 if (!dom_bbs.is_empty ())
1a5dbaab 3803 {
f1f41a6c 3804 dom_bbs.safe_push (succ_bb);
1a5dbaab 3805 iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, false);
f1f41a6c 3806 dom_bbs.release ();
1a5dbaab 3807 }
3808
e1ab7874 3809 return true;
3810}
3811
48e1416a 3812/* Tidy the control flow after we have removed original insn from
e1ab7874 3813 XBB. Return true if we have removed some blocks. When FULL_TIDYING
3814 is true, also try to optimize control flow on non-empty blocks. */
3815bool
3816tidy_control_flow (basic_block xbb, bool full_tidying)
3817{
3818 bool changed = true;
9845d120 3819 insn_t first, last;
48e1416a 3820
e1ab7874 3821 /* First check whether XBB is empty. */
6f0e7980 3822 changed = maybe_tidy_empty_bb (xbb);
e1ab7874 3823 if (changed || !full_tidying)
3824 return changed;
48e1416a 3825
e1ab7874 3826 /* Check if there is a unnecessary jump after insn left. */
49087fba 3827 if (bb_has_removable_jump_to_p (xbb, xbb->next_bb)
e1ab7874 3828 && INSN_SCHED_TIMES (BB_END (xbb)) == 0
3829 && !IN_CURRENT_FENCE_P (BB_END (xbb)))
3830 {
3831 if (sel_remove_insn (BB_END (xbb), false, false))
3832 return true;
3833 tidy_fallthru_edge (EDGE_SUCC (xbb, 0));
3834 }
3835
9845d120 3836 first = sel_bb_head (xbb);
3837 last = sel_bb_end (xbb);
3838 if (MAY_HAVE_DEBUG_INSNS)
3839 {
3840 if (first != last && DEBUG_INSN_P (first))
3841 do
3842 first = NEXT_INSN (first);
3843 while (first != last && (DEBUG_INSN_P (first) || NOTE_P (first)));
3844
3845 if (first != last && DEBUG_INSN_P (last))
3846 do
3847 last = PREV_INSN (last);
3848 while (first != last && (DEBUG_INSN_P (last) || NOTE_P (last)));
3849 }
e1ab7874 3850 /* Check if there is an unnecessary jump in previous basic block leading
48e1416a 3851 to next basic block left after removing INSN from stream.
3852 If it is so, remove that jump and redirect edge to current
3853 basic block (where there was INSN before deletion). This way
3854 when NOP will be deleted several instructions later with its
3855 basic block we will not get a jump to next instruction, which
e1ab7874 3856 can be harmful. */
9845d120 3857 if (first == last
e1ab7874 3858 && !sel_bb_empty_p (xbb)
9845d120 3859 && INSN_NOP_P (last)
e1ab7874 3860 /* Flow goes fallthru from current block to the next. */
3861 && EDGE_COUNT (xbb->succs) == 1
3862 && (EDGE_SUCC (xbb, 0)->flags & EDGE_FALLTHRU)
3863 /* When successor is an EXIT block, it may not be the next block. */
34154e27 3864 && single_succ (xbb) != EXIT_BLOCK_PTR_FOR_FN (cfun)
e1ab7874 3865 /* And unconditional jump in previous basic block leads to
3866 next basic block of XBB and this jump can be safely removed. */
3867 && in_current_region_p (xbb->prev_bb)
49087fba 3868 && bb_has_removable_jump_to_p (xbb->prev_bb, xbb->next_bb)
e1ab7874 3869 && INSN_SCHED_TIMES (BB_END (xbb->prev_bb)) == 0
3870 /* Also this jump is not at the scheduling boundary. */
3871 && !IN_CURRENT_FENCE_P (BB_END (xbb->prev_bb)))
3872 {
93919afc 3873 bool recompute_toporder_p;
e1ab7874 3874 /* Clear data structures of jump - jump itself will be removed
3875 by sel_redirect_edge_and_branch. */
3876 clear_expr (INSN_EXPR (BB_END (xbb->prev_bb)));
93919afc 3877 recompute_toporder_p
3878 = sel_redirect_edge_and_branch (EDGE_SUCC (xbb->prev_bb, 0), xbb);
3879
e1ab7874 3880 gcc_assert (EDGE_SUCC (xbb->prev_bb, 0)->flags & EDGE_FALLTHRU);
3881
3882 /* It can turn out that after removing unused jump, basic block
3883 that contained that jump, becomes empty too. In such case
3884 remove it too. */
3885 if (sel_bb_empty_p (xbb->prev_bb))
6f0e7980 3886 changed = maybe_tidy_empty_bb (xbb->prev_bb);
3887 if (recompute_toporder_p)
93919afc 3888 sel_recompute_toporder ();
e1ab7874 3889 }
7af466ad 3890
3891#ifdef ENABLE_CHECKING
3892 verify_backedges ();
1a5dbaab 3893 verify_dominators (CDI_DOMINATORS);
7af466ad 3894#endif
3895
e1ab7874 3896 return changed;
3897}
3898
93919afc 3899/* Purge meaningless empty blocks in the middle of a region. */
3900void
3901purge_empty_blocks (void)
3902{
a6e634c6 3903 int i;
93919afc 3904
a6e634c6 3905 /* Do not attempt to delete the first basic block in the region. */
3906 for (i = 1; i < current_nr_blocks; )
93919afc 3907 {
f5a6b05f 3908 basic_block b = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i));
93919afc 3909
6f0e7980 3910 if (maybe_tidy_empty_bb (b))
93919afc 3911 continue;
3912
3913 i++;
3914 }
3915}
3916
48e1416a 3917/* Rip-off INSN from the insn stream. When ONLY_DISCONNECT is true,
3918 do not delete insn's data, because it will be later re-emitted.
e1ab7874 3919 Return true if we have removed some blocks afterwards. */
3920bool
3921sel_remove_insn (insn_t insn, bool only_disconnect, bool full_tidying)
3922{
3923 basic_block bb = BLOCK_FOR_INSN (insn);
3924
3925 gcc_assert (INSN_IN_STREAM_P (insn));
3926
9845d120 3927 if (DEBUG_INSN_P (insn) && BB_AV_SET_VALID_P (bb))
3928 {
3929 expr_t expr;
3930 av_set_iterator i;
3931
3932 /* When we remove a debug insn that is head of a BB, it remains
3933 in the AV_SET of the block, but it shouldn't. */
3934 FOR_EACH_EXPR_1 (expr, i, &BB_AV_SET (bb))
3935 if (EXPR_INSN_RTX (expr) == insn)
3936 {
3937 av_set_iter_remove (&i);
3938 break;
3939 }
3940 }
3941
e1ab7874 3942 if (only_disconnect)
93ff53d3 3943 remove_insn (insn);
e1ab7874 3944 else
3945 {
93ff53d3 3946 delete_insn (insn);
e1ab7874 3947 clear_expr (INSN_EXPR (insn));
3948 }
3949
93ff53d3 3950 /* It is necessary to NULL these fields in case we are going to re-insert
3951 INSN into the insns stream, as will usually happen in the ONLY_DISCONNECT
3952 case, but also for NOPs that we will return to the nop pool. */
4a57a2e8 3953 SET_PREV_INSN (insn) = NULL_RTX;
3954 SET_NEXT_INSN (insn) = NULL_RTX;
93ff53d3 3955 set_block_for_insn (insn, NULL);
e1ab7874 3956
3957 return tidy_control_flow (bb, full_tidying);
3958}
3959
3960/* Estimate number of the insns in BB. */
3961static int
3962sel_estimate_number_of_insns (basic_block bb)
3963{
3964 int res = 0;
3965 insn_t insn = NEXT_INSN (BB_HEAD (bb)), next_tail = NEXT_INSN (BB_END (bb));
3966
3967 for (; insn != next_tail; insn = NEXT_INSN (insn))
9845d120 3968 if (NONDEBUG_INSN_P (insn))
e1ab7874 3969 res++;
3970
3971 return res;
3972}
3973
3974/* We don't need separate luids for notes or labels. */
3975static int
3976sel_luid_for_non_insn (rtx x)
3977{
3978 gcc_assert (NOTE_P (x) || LABEL_P (x));
3979
3980 return -1;
3981}
3982
bdcc104c 3983/* Find the proper seqno for inserting at INSN by successors.
3984 Return -1 if no successors with positive seqno exist. */
e1ab7874 3985static int
2f3c9801 3986get_seqno_by_succs (rtx_insn *insn)
bdcc104c 3987{
3988 basic_block bb = BLOCK_FOR_INSN (insn);
2f3c9801 3989 rtx_insn *tmp = insn, *end = BB_END (bb);
bdcc104c 3990 int seqno;
3991 insn_t succ = NULL;
3992 succ_iterator si;
3993
3994 while (tmp != end)
3995 {
3996 tmp = NEXT_INSN (tmp);
3997 if (INSN_P (tmp))
3998 return INSN_SEQNO (tmp);
3999 }
4000
4001 seqno = INT_MAX;
4002
4003 FOR_EACH_SUCC_1 (succ, si, end, SUCCS_NORMAL)
4004 if (INSN_SEQNO (succ) > 0)
4005 seqno = MIN (seqno, INSN_SEQNO (succ));
4006
4007 if (seqno == INT_MAX)
4008 return -1;
4009
4010 return seqno;
4011}
4012
8d1881f5 4013/* Compute seqno for INSN by its preds or succs. Use OLD_SEQNO to compute
4014 seqno in corner cases. */
bdcc104c 4015static int
8d1881f5 4016get_seqno_for_a_jump (insn_t insn, int old_seqno)
e1ab7874 4017{
4018 int seqno;
4019
4020 gcc_assert (INSN_SIMPLEJUMP_P (insn));
4021
4022 if (!sel_bb_head_p (insn))
4023 seqno = INSN_SEQNO (PREV_INSN (insn));
4024 else
4025 {
4026 basic_block bb = BLOCK_FOR_INSN (insn);
4027
4028 if (single_pred_p (bb)
4029 && !in_current_region_p (single_pred (bb)))
4030 {
4031 /* We can have preds outside a region when splitting edges
48e1416a 4032 for pipelining of an outer loop. Use succ instead.
e1ab7874 4033 There should be only one of them. */
4034 insn_t succ = NULL;
4035 succ_iterator si;
4036 bool first = true;
48e1416a 4037
e1ab7874 4038 gcc_assert (flag_sel_sched_pipelining_outer_loops
4039 && current_loop_nest);
48e1416a 4040 FOR_EACH_SUCC_1 (succ, si, insn,
e1ab7874 4041 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
4042 {
4043 gcc_assert (first);
4044 first = false;
4045 }
4046
4047 gcc_assert (succ != NULL);
4048 seqno = INSN_SEQNO (succ);
4049 }
4050 else
4051 {
4052 insn_t *preds;
4053 int n;
4054
4055 cfg_preds (BLOCK_FOR_INSN (insn), &preds, &n);
e1ab7874 4056
bdcc104c 4057 gcc_assert (n > 0);
4058 /* For one predecessor, use simple method. */
4059 if (n == 1)
4060 seqno = INSN_SEQNO (preds[0]);
4061 else
4062 seqno = get_seqno_by_preds (insn);
48e1416a 4063
e1ab7874 4064 free (preds);
4065 }
4066 }
4067
bdcc104c 4068 /* We were unable to find a good seqno among preds. */
4069 if (seqno < 0)
4070 seqno = get_seqno_by_succs (insn);
4071
8d1881f5 4072 if (seqno < 0)
4073 {
4074 /* The only case where this could be here legally is that the only
4075 unscheduled insn was a conditional jump that got removed and turned
4076 into this unconditional one. Initialize from the old seqno
4077 of that jump passed down to here. */
4078 seqno = old_seqno;
4079 }
bdcc104c 4080
8d1881f5 4081 gcc_assert (seqno >= 0);
e1ab7874 4082 return seqno;
4083}
4084
961d3eb8 4085/* Find the proper seqno for inserting at INSN. Returns -1 if no predecessors
4086 with positive seqno exist. */
e1ab7874 4087int
91a55c11 4088get_seqno_by_preds (rtx_insn *insn)
e1ab7874 4089{
4090 basic_block bb = BLOCK_FOR_INSN (insn);
91a55c11 4091 rtx_insn *tmp = insn, *head = BB_HEAD (bb);
e1ab7874 4092 insn_t *preds;
4093 int n, i, seqno;
4094
4095 while (tmp != head)
bdcc104c 4096 {
e1ab7874 4097 tmp = PREV_INSN (tmp);
bdcc104c 4098 if (INSN_P (tmp))
4099 return INSN_SEQNO (tmp);
4100 }
48e1416a 4101
e1ab7874 4102 cfg_preds (bb, &preds, &n);
4103 for (i = 0, seqno = -1; i < n; i++)
4104 seqno = MAX (seqno, INSN_SEQNO (preds[i]));
4105
e1ab7874 4106 return seqno;
4107}
4108
4109\f
4110
4111/* Extend pass-scope data structures for basic blocks. */
4112void
4113sel_extend_global_bb_info (void)
4114{
fe672ac0 4115 sel_global_bb_info.safe_grow_cleared (last_basic_block_for_fn (cfun));
e1ab7874 4116}
4117
4118/* Extend region-scope data structures for basic blocks. */
4119static void
4120extend_region_bb_info (void)
4121{
fe672ac0 4122 sel_region_bb_info.safe_grow_cleared (last_basic_block_for_fn (cfun));
e1ab7874 4123}
4124
4125/* Extend all data structures to fit for all basic blocks. */
4126static void
4127extend_bb_info (void)
4128{
4129 sel_extend_global_bb_info ();
4130 extend_region_bb_info ();
4131}
4132
4133/* Finalize pass-scope data structures for basic blocks. */
4134void
4135sel_finish_global_bb_info (void)
4136{
f1f41a6c 4137 sel_global_bb_info.release ();
e1ab7874 4138}
4139
4140/* Finalize region-scope data structures for basic blocks. */
4141static void
4142finish_region_bb_info (void)
4143{
f1f41a6c 4144 sel_region_bb_info.release ();
e1ab7874 4145}
4146\f
4147
4148/* Data for each insn in current region. */
1e094109 4149vec<sel_insn_data_def> s_i_d = vNULL;
e1ab7874 4150
e1ab7874 4151/* Extend data structures for insns from current region. */
4152static void
4153extend_insn_data (void)
4154{
4155 int reserve;
48e1416a 4156
e1ab7874 4157 sched_extend_target ();
4158 sched_deps_init (false);
4159
4160 /* Extend data structures for insns from current region. */
f1f41a6c 4161 reserve = (sched_max_luid + 1 - s_i_d.length ());
4162 if (reserve > 0 && ! s_i_d.space (reserve))
d9ab2038 4163 {
4164 int size;
4165
4166 if (sched_max_luid / 2 > 1024)
4167 size = sched_max_luid + 1024;
4168 else
4169 size = 3 * sched_max_luid / 2;
48e1416a 4170
d9ab2038 4171
f1f41a6c 4172 s_i_d.safe_grow_cleared (size);
d9ab2038 4173 }
e1ab7874 4174}
4175
4176/* Finalize data structures for insns from current region. */
4177static void
4178finish_insns (void)
4179{
4180 unsigned i;
4181
4182 /* Clear here all dependence contexts that may have left from insns that were
4183 removed during the scheduling. */
f1f41a6c 4184 for (i = 0; i < s_i_d.length (); i++)
e1ab7874 4185 {
f1f41a6c 4186 sel_insn_data_def *sid_entry = &s_i_d[i];
48e1416a 4187
e1ab7874 4188 if (sid_entry->live)
4189 return_regset_to_pool (sid_entry->live);
4190 if (sid_entry->analyzed_deps)
4191 {
4192 BITMAP_FREE (sid_entry->analyzed_deps);
4193 BITMAP_FREE (sid_entry->found_deps);
4194 htab_delete (sid_entry->transformed_insns);
4195 free_deps (&sid_entry->deps_context);
4196 }
4197 if (EXPR_VINSN (&sid_entry->expr))
4198 {
4199 clear_expr (&sid_entry->expr);
48e1416a 4200
e1ab7874 4201 /* Also, clear CANT_MOVE bit here, because we really don't want it
4202 to be passed to the next region. */
4203 CANT_MOVE_BY_LUID (i) = 0;
4204 }
4205 }
48e1416a 4206
f1f41a6c 4207 s_i_d.release ();
e1ab7874 4208}
4209
4210/* A proxy to pass initialization data to init_insn (). */
4211static sel_insn_data_def _insn_init_ssid;
4212static sel_insn_data_t insn_init_ssid = &_insn_init_ssid;
4213
4214/* If true create a new vinsn. Otherwise use the one from EXPR. */
4215static bool insn_init_create_new_vinsn_p;
4216
4217/* Set all necessary data for initialization of the new insn[s]. */
4218static expr_t
4219set_insn_init (expr_t expr, vinsn_t vi, int seqno)
4220{
4221 expr_t x = &insn_init_ssid->expr;
4222
4223 copy_expr_onside (x, expr);
4224 if (vi != NULL)
4225 {
4226 insn_init_create_new_vinsn_p = false;
4227 change_vinsn_in_expr (x, vi);
4228 }
4229 else
4230 insn_init_create_new_vinsn_p = true;
4231
4232 insn_init_ssid->seqno = seqno;
4233 return x;
4234}
4235
4236/* Init data for INSN. */
4237static void
4238init_insn_data (insn_t insn)
4239{
4240 expr_t expr;
4241 sel_insn_data_t ssid = insn_init_ssid;
4242
4243 /* The fields mentioned below are special and hence are not being
4244 propagated to the new insns. */
4245 gcc_assert (!ssid->asm_p && ssid->sched_next == NULL
4246 && !ssid->after_stall_p && ssid->sched_cycle == 0);
4247 gcc_assert (INSN_P (insn) && INSN_LUID (insn) > 0);
4248
4249 expr = INSN_EXPR (insn);
4250 copy_expr (expr, &ssid->expr);
4251 prepare_insn_expr (insn, ssid->seqno);
4252
4253 if (insn_init_create_new_vinsn_p)
4254 change_vinsn_in_expr (expr, vinsn_create (insn, init_insn_force_unique_p));
48e1416a 4255
e1ab7874 4256 if (first_time_insn_init (insn))
4257 init_first_time_insn_data (insn);
4258}
4259
4260/* This is used to initialize spurious jumps generated by
8d1881f5 4261 sel_redirect_edge (). OLD_SEQNO is used for initializing seqnos
4262 in corner cases within get_seqno_for_a_jump. */
e1ab7874 4263static void
8d1881f5 4264init_simplejump_data (insn_t insn, int old_seqno)
e1ab7874 4265{
4266 init_expr (INSN_EXPR (insn), vinsn_create (insn, false), 0,
f1f41a6c 4267 REG_BR_PROB_BASE, 0, 0, 0, 0, 0, 0,
1e094109 4268 vNULL, true, false, false,
e1ab7874 4269 false, true);
8d1881f5 4270 INSN_SEQNO (insn) = get_seqno_for_a_jump (insn, old_seqno);
e1ab7874 4271 init_first_time_insn_data (insn);
4272}
4273
48e1416a 4274/* Perform deferred initialization of insns. This is used to process
8d1881f5 4275 a new jump that may be created by redirect_edge. OLD_SEQNO is used
4276 for initializing simplejumps in init_simplejump_data. */
4277static void
4278sel_init_new_insn (insn_t insn, int flags, int old_seqno)
e1ab7874 4279{
4280 /* We create data structures for bb when the first insn is emitted in it. */
4281 if (INSN_P (insn)
4282 && INSN_IN_STREAM_P (insn)
4283 && insn_is_the_only_one_in_bb_p (insn))
4284 {
4285 extend_bb_info ();
4286 create_initial_data_sets (BLOCK_FOR_INSN (insn));
4287 }
48e1416a 4288
e1ab7874 4289 if (flags & INSN_INIT_TODO_LUID)
52d7e28c 4290 {
4291 sched_extend_luids ();
4292 sched_init_insn_luid (insn);
4293 }
e1ab7874 4294
4295 if (flags & INSN_INIT_TODO_SSID)
4296 {
4297 extend_insn_data ();
4298 init_insn_data (insn);
4299 clear_expr (&insn_init_ssid->expr);
4300 }
4301
4302 if (flags & INSN_INIT_TODO_SIMPLEJUMP)
4303 {
4304 extend_insn_data ();
8d1881f5 4305 init_simplejump_data (insn, old_seqno);
e1ab7874 4306 }
48e1416a 4307
e1ab7874 4308 gcc_assert (CONTAINING_RGN (BLOCK_NUM (insn))
4309 == CONTAINING_RGN (BB_TO_BLOCK (0)));
4310}
4311\f
4312
4313/* Functions to init/finish work with lv sets. */
4314
4315/* Init BB_LV_SET of BB from DF_LR_IN set of BB. */
4316static void
4317init_lv_set (basic_block bb)
4318{
4319 gcc_assert (!BB_LV_SET_VALID_P (bb));
4320
4321 BB_LV_SET (bb) = get_regset_from_pool ();
48e1416a 4322 COPY_REG_SET (BB_LV_SET (bb), DF_LR_IN (bb));
e1ab7874 4323 BB_LV_SET_VALID_P (bb) = true;
4324}
4325
4326/* Copy liveness information to BB from FROM_BB. */
4327static void
4328copy_lv_set_from (basic_block bb, basic_block from_bb)
4329{
4330 gcc_assert (!BB_LV_SET_VALID_P (bb));
48e1416a 4331
e1ab7874 4332 COPY_REG_SET (BB_LV_SET (bb), BB_LV_SET (from_bb));
4333 BB_LV_SET_VALID_P (bb) = true;
48e1416a 4334}
e1ab7874 4335
4336/* Initialize lv set of all bb headers. */
4337void
4338init_lv_sets (void)
4339{
4340 basic_block bb;
4341
4342 /* Initialize of LV sets. */
fc00614f 4343 FOR_EACH_BB_FN (bb, cfun)
e1ab7874 4344 init_lv_set (bb);
4345
4346 /* Don't forget EXIT_BLOCK. */
34154e27 4347 init_lv_set (EXIT_BLOCK_PTR_FOR_FN (cfun));
e1ab7874 4348}
4349
4350/* Release lv set of HEAD. */
4351static void
4352free_lv_set (basic_block bb)
4353{
4354 gcc_assert (BB_LV_SET (bb) != NULL);
4355
4356 return_regset_to_pool (BB_LV_SET (bb));
4357 BB_LV_SET (bb) = NULL;
4358 BB_LV_SET_VALID_P (bb) = false;
4359}
4360
4361/* Finalize lv sets of all bb headers. */
4362void
4363free_lv_sets (void)
4364{
4365 basic_block bb;
4366
4367 /* Don't forget EXIT_BLOCK. */
34154e27 4368 free_lv_set (EXIT_BLOCK_PTR_FOR_FN (cfun));
e1ab7874 4369
4370 /* Free LV sets. */
fc00614f 4371 FOR_EACH_BB_FN (bb, cfun)
e1ab7874 4372 if (BB_LV_SET (bb))
4373 free_lv_set (bb);
4374}
4375
c1c8a3d0 4376/* Mark AV_SET for BB as invalid, so this set will be updated the next time
4377 compute_av() processes BB. This function is called when creating new basic
4378 blocks, as well as for blocks (either new or existing) where new jumps are
4379 created when the control flow is being updated. */
e1ab7874 4380static void
4381invalidate_av_set (basic_block bb)
4382{
e1ab7874 4383 BB_AV_LEVEL (bb) = -1;
4384}
4385
4386/* Create initial data sets for BB (they will be invalid). */
4387static void
4388create_initial_data_sets (basic_block bb)
4389{
4390 if (BB_LV_SET (bb))
4391 BB_LV_SET_VALID_P (bb) = false;
4392 else
4393 BB_LV_SET (bb) = get_regset_from_pool ();
4394 invalidate_av_set (bb);
4395}
4396
4397/* Free av set of BB. */
4398static void
4399free_av_set (basic_block bb)
4400{
4401 av_set_clear (&BB_AV_SET (bb));
4402 BB_AV_LEVEL (bb) = 0;
4403}
4404
4405/* Free data sets of BB. */
4406void
4407free_data_sets (basic_block bb)
4408{
4409 free_lv_set (bb);
4410 free_av_set (bb);
4411}
4412
e1ab7874 4413/* Exchange data sets of TO and FROM. */
4414void
4415exchange_data_sets (basic_block to, basic_block from)
4416{
a4f59596 4417 /* Exchange lv sets of TO and FROM. */
4418 std::swap (BB_LV_SET (from), BB_LV_SET (to));
4419 std::swap (BB_LV_SET_VALID_P (from), BB_LV_SET_VALID_P (to));
4420
4421 /* Exchange av sets of TO and FROM. */
4422 std::swap (BB_AV_SET (from), BB_AV_SET (to));
4423 std::swap (BB_AV_LEVEL (from), BB_AV_LEVEL (to));
e1ab7874 4424}
4425
4426/* Copy data sets of FROM to TO. */
4427void
4428copy_data_sets (basic_block to, basic_block from)
4429{
4430 gcc_assert (!BB_LV_SET_VALID_P (to) && !BB_AV_SET_VALID_P (to));
4431 gcc_assert (BB_AV_SET (to) == NULL);
4432
4433 BB_AV_LEVEL (to) = BB_AV_LEVEL (from);
4434 BB_LV_SET_VALID_P (to) = BB_LV_SET_VALID_P (from);
4435
4436 if (BB_AV_SET_VALID_P (from))
4437 {
4438 BB_AV_SET (to) = av_set_copy (BB_AV_SET (from));
4439 }
4440 if (BB_LV_SET_VALID_P (from))
4441 {
4442 gcc_assert (BB_LV_SET (to) != NULL);
4443 COPY_REG_SET (BB_LV_SET (to), BB_LV_SET (from));
4444 }
4445}
4446
4447/* Return an av set for INSN, if any. */
4448av_set_t
4449get_av_set (insn_t insn)
4450{
4451 av_set_t av_set;
4452
4453 gcc_assert (AV_SET_VALID_P (insn));
4454
4455 if (sel_bb_head_p (insn))
4456 av_set = BB_AV_SET (BLOCK_FOR_INSN (insn));
4457 else
4458 av_set = NULL;
4459
4460 return av_set;
4461}
4462
4463/* Implementation of AV_LEVEL () macro. Return AV_LEVEL () of INSN. */
4464int
4465get_av_level (insn_t insn)
4466{
4467 int av_level;
4468
4469 gcc_assert (INSN_P (insn));
4470
4471 if (sel_bb_head_p (insn))
4472 av_level = BB_AV_LEVEL (BLOCK_FOR_INSN (insn));
4473 else
4474 av_level = INSN_WS_LEVEL (insn);
4475
4476 return av_level;
4477}
4478
4479\f
4480
4481/* Variables to work with control-flow graph. */
4482
4483/* The basic block that already has been processed by the sched_data_update (),
4484 but hasn't been in sel_add_bb () yet. */
f1f41a6c 4485static vec<basic_block>
1e094109 4486 last_added_blocks = vNULL;
e1ab7874 4487
4488/* A pool for allocating successor infos. */
4489static struct
4490{
4491 /* A stack for saving succs_info structures. */
4492 struct succs_info *stack;
4493
4494 /* Its size. */
4495 int size;
4496
4497 /* Top of the stack. */
4498 int top;
4499
4500 /* Maximal value of the top. */
4501 int max_top;
4502} succs_info_pool;
4503
4504/* Functions to work with control-flow graph. */
4505
4506/* Return basic block note of BB. */
179c282d 4507rtx_insn *
e1ab7874 4508sel_bb_head (basic_block bb)
4509{
179c282d 4510 rtx_insn *head;
e1ab7874 4511
34154e27 4512 if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
e1ab7874 4513 {
4514 gcc_assert (exit_insn != NULL_RTX);
4515 head = exit_insn;
4516 }
4517 else
4518 {
9ed997be 4519 rtx_note *note = bb_note (bb);
e1ab7874 4520 head = next_nonnote_insn (note);
4521
cabd2128 4522 if (head && (BARRIER_P (head) || BLOCK_FOR_INSN (head) != bb))
179c282d 4523 head = NULL;
e1ab7874 4524 }
4525
4526 return head;
4527}
4528
4529/* Return true if INSN is a basic block header. */
4530bool
4531sel_bb_head_p (insn_t insn)
4532{
4533 return sel_bb_head (BLOCK_FOR_INSN (insn)) == insn;
4534}
4535
4536/* Return last insn of BB. */
179c282d 4537rtx_insn *
e1ab7874 4538sel_bb_end (basic_block bb)
4539{
4540 if (sel_bb_empty_p (bb))
179c282d 4541 return NULL;
e1ab7874 4542
34154e27 4543 gcc_assert (bb != EXIT_BLOCK_PTR_FOR_FN (cfun));
e1ab7874 4544
4545 return BB_END (bb);
4546}
4547
4548/* Return true if INSN is the last insn in its basic block. */
4549bool
4550sel_bb_end_p (insn_t insn)
4551{
4552 return insn == sel_bb_end (BLOCK_FOR_INSN (insn));
4553}
4554
4555/* Return true if BB consist of single NOTE_INSN_BASIC_BLOCK. */
4556bool
4557sel_bb_empty_p (basic_block bb)
4558{
4559 return sel_bb_head (bb) == NULL;
4560}
4561
4562/* True when BB belongs to the current scheduling region. */
4563bool
4564in_current_region_p (basic_block bb)
4565{
4566 if (bb->index < NUM_FIXED_BLOCKS)
4567 return false;
4568
4569 return CONTAINING_RGN (bb->index) == CONTAINING_RGN (BB_TO_BLOCK (0));
4570}
4571
4572/* Return the block which is a fallthru bb of a conditional jump JUMP. */
4573basic_block
93ee8dfb 4574fallthru_bb_of_jump (const rtx_insn *jump)
e1ab7874 4575{
4576 if (!JUMP_P (jump))
4577 return NULL;
4578
e1ab7874 4579 if (!any_condjump_p (jump))
4580 return NULL;
4581
bf19734b 4582 /* A basic block that ends with a conditional jump may still have one successor
4583 (and be followed by a barrier), we are not interested. */
4584 if (single_succ_p (BLOCK_FOR_INSN (jump)))
4585 return NULL;
4586
e1ab7874 4587 return FALLTHRU_EDGE (BLOCK_FOR_INSN (jump))->dest;
4588}
4589
4590/* Remove all notes from BB. */
4591static void
4592init_bb (basic_block bb)
4593{
4594 remove_notes (bb_note (bb), BB_END (bb));
e97a173d 4595 BB_NOTE_LIST (bb) = note_list;
e1ab7874 4596}
4597
4598void
52d7e28c 4599sel_init_bbs (bb_vec_t bbs)
e1ab7874 4600{
4601 const struct sched_scan_info_def ssi =
4602 {
4603 extend_bb_info, /* extend_bb */
4604 init_bb, /* init_bb */
4605 NULL, /* extend_insn */
4606 NULL /* init_insn */
4607 };
4608
52d7e28c 4609 sched_scan (&ssi, bbs);
e1ab7874 4610}
4611
3baa98a0 4612/* Restore notes for the whole region. */
e1ab7874 4613static void
3baa98a0 4614sel_restore_notes (void)
e1ab7874 4615{
4616 int bb;
3baa98a0 4617 insn_t insn;
e1ab7874 4618
4619 for (bb = 0; bb < current_nr_blocks; bb++)
4620 {
4621 basic_block first, last;
4622
4623 first = EBB_FIRST_BB (bb);
4624 last = EBB_LAST_BB (bb)->next_bb;
4625
4626 do
4627 {
4628 note_list = BB_NOTE_LIST (first);
4629 restore_other_notes (NULL, first);
e97a173d 4630 BB_NOTE_LIST (first) = NULL;
e1ab7874 4631
3baa98a0 4632 FOR_BB_INSNS (first, insn)
4633 if (NONDEBUG_INSN_P (insn))
4634 reemit_notes (insn);
4635
e1ab7874 4636 first = first->next_bb;
4637 }
4638 while (first != last);
4639 }
4640}
4641
4642/* Free per-bb data structures. */
4643void
4644sel_finish_bbs (void)
4645{
3baa98a0 4646 sel_restore_notes ();
e1ab7874 4647
4648 /* Remove current loop preheader from this loop. */
4649 if (current_loop_nest)
4650 sel_remove_loop_preheader ();
4651
4652 finish_region_bb_info ();
4653}
4654
4655/* Return true if INSN has a single successor of type FLAGS. */
4656bool
4657sel_insn_has_single_succ_p (insn_t insn, int flags)
4658{
4659 insn_t succ;
4660 succ_iterator si;
4661 bool first_p = true;
4662
4663 FOR_EACH_SUCC_1 (succ, si, insn, flags)
4664 {
4665 if (first_p)
4666 first_p = false;
4667 else
4668 return false;
4669 }
4670
4671 return true;
4672}
4673
4674/* Allocate successor's info. */
4675static struct succs_info *
4676alloc_succs_info (void)
4677{
4678 if (succs_info_pool.top == succs_info_pool.max_top)
4679 {
4680 int i;
48e1416a 4681
e1ab7874 4682 if (++succs_info_pool.max_top >= succs_info_pool.size)
4683 gcc_unreachable ();
4684
4685 i = ++succs_info_pool.top;
f1f41a6c 4686 succs_info_pool.stack[i].succs_ok.create (10);
4687 succs_info_pool.stack[i].succs_other.create (10);
4688 succs_info_pool.stack[i].probs_ok.create (10);
e1ab7874 4689 }
4690 else
4691 succs_info_pool.top++;
4692
4693 return &succs_info_pool.stack[succs_info_pool.top];
4694}
4695
4696/* Free successor's info. */
4697void
4698free_succs_info (struct succs_info * sinfo)
4699{
48e1416a 4700 gcc_assert (succs_info_pool.top >= 0
e1ab7874 4701 && &succs_info_pool.stack[succs_info_pool.top] == sinfo);
4702 succs_info_pool.top--;
4703
4704 /* Clear stale info. */
f1f41a6c 4705 sinfo->succs_ok.block_remove (0, sinfo->succs_ok.length ());
4706 sinfo->succs_other.block_remove (0, sinfo->succs_other.length ());
4707 sinfo->probs_ok.block_remove (0, sinfo->probs_ok.length ());
e1ab7874 4708 sinfo->all_prob = 0;
4709 sinfo->succs_ok_n = 0;
4710 sinfo->all_succs_n = 0;
4711}
4712
48e1416a 4713/* Compute successor info for INSN. FLAGS are the flags passed
e1ab7874 4714 to the FOR_EACH_SUCC_1 iterator. */
4715struct succs_info *
4716compute_succs_info (insn_t insn, short flags)
4717{
4718 succ_iterator si;
4719 insn_t succ;
4720 struct succs_info *sinfo = alloc_succs_info ();
4721
4722 /* Traverse *all* successors and decide what to do with each. */
4723 FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_ALL)
4724 {
4725 /* FIXME: this doesn't work for skipping to loop exits, as we don't
4726 perform code motion through inner loops. */
4727 short current_flags = si.current_flags & ~SUCCS_SKIP_TO_LOOP_EXITS;
4728
4729 if (current_flags & flags)
4730 {
f1f41a6c 4731 sinfo->succs_ok.safe_push (succ);
4732 sinfo->probs_ok.safe_push (
4733 /* FIXME: Improve calculation when skipping
4734 inner loop to exits. */
4735 si.bb_end ? si.e1->probability : REG_BR_PROB_BASE);
e1ab7874 4736 sinfo->succs_ok_n++;
4737 }
4738 else
f1f41a6c 4739 sinfo->succs_other.safe_push (succ);
e1ab7874 4740
4741 /* Compute all_prob. */
4742 if (!si.bb_end)
4743 sinfo->all_prob = REG_BR_PROB_BASE;
4744 else
4745 sinfo->all_prob += si.e1->probability;
4746
4747 sinfo->all_succs_n++;
4748 }
4749
4750 return sinfo;
4751}
4752
48e1416a 4753/* Return the predecessors of BB in PREDS and their number in N.
e1ab7874 4754 Empty blocks are skipped. SIZE is used to allocate PREDS. */
4755static void
4756cfg_preds_1 (basic_block bb, insn_t **preds, int *n, int *size)
4757{
4758 edge e;
4759 edge_iterator ei;
4760
4761 gcc_assert (BLOCK_TO_BB (bb->index) != 0);
4762
4763 FOR_EACH_EDGE (e, ei, bb->preds)
4764 {
4765 basic_block pred_bb = e->src;
4766 insn_t bb_end = BB_END (pred_bb);
4767
f1ec9c64 4768 if (!in_current_region_p (pred_bb))
4769 {
4770 gcc_assert (flag_sel_sched_pipelining_outer_loops
4771 && current_loop_nest);
4772 continue;
4773 }
e1ab7874 4774
4775 if (sel_bb_empty_p (pred_bb))
4776 cfg_preds_1 (pred_bb, preds, n, size);
4777 else
4778 {
4779 if (*n == *size)
48e1416a 4780 *preds = XRESIZEVEC (insn_t, *preds,
e1ab7874 4781 (*size = 2 * *size + 1));
4782 (*preds)[(*n)++] = bb_end;
4783 }
4784 }
4785
f1ec9c64 4786 gcc_assert (*n != 0
4787 || (flag_sel_sched_pipelining_outer_loops
4788 && current_loop_nest));
e1ab7874 4789}
4790
48e1416a 4791/* Find all predecessors of BB and record them in PREDS and their number
4792 in N. Empty blocks are skipped, and only normal (forward in-region)
e1ab7874 4793 edges are processed. */
4794static void
4795cfg_preds (basic_block bb, insn_t **preds, int *n)
4796{
4797 int size = 0;
4798
4799 *preds = NULL;
4800 *n = 0;
4801 cfg_preds_1 (bb, preds, n, &size);
4802}
4803
4804/* Returns true if we are moving INSN through join point. */
4805bool
4806sel_num_cfg_preds_gt_1 (insn_t insn)
4807{
4808 basic_block bb;
4809
4810 if (!sel_bb_head_p (insn) || INSN_BB (insn) == 0)
4811 return false;
4812
4813 bb = BLOCK_FOR_INSN (insn);
4814
4815 while (1)
4816 {
4817 if (EDGE_COUNT (bb->preds) > 1)
4818 return true;
4819
4820 gcc_assert (EDGE_PRED (bb, 0)->dest == bb);
4821 bb = EDGE_PRED (bb, 0)->src;
4822
4823 if (!sel_bb_empty_p (bb))
4824 break;
4825 }
4826
4827 return false;
4828}
4829
48e1416a 4830/* Returns true when BB should be the end of an ebb. Adapted from the
e1ab7874 4831 code in sched-ebb.c. */
4832bool
4833bb_ends_ebb_p (basic_block bb)
4834{
4835 basic_block next_bb = bb_next_bb (bb);
4836 edge e;
48e1416a 4837
34154e27 4838 if (next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
e1ab7874 4839 || bitmap_bit_p (forced_ebb_heads, next_bb->index)
4840 || (LABEL_P (BB_HEAD (next_bb))
4841 /* NB: LABEL_NUSES () is not maintained outside of jump.c.
4842 Work around that. */
4843 && !single_pred_p (next_bb)))
4844 return true;
4845
4846 if (!in_current_region_p (next_bb))
4847 return true;
4848
7f58c05e 4849 e = find_fallthru_edge (bb->succs);
4850 if (e)
4851 {
4852 gcc_assert (e->dest == next_bb);
4853
4854 return false;
4855 }
e1ab7874 4856
4857 return true;
4858}
4859
4860/* Returns true when INSN and SUCC are in the same EBB, given that SUCC is a
4861 successor of INSN. */
4862bool
4863in_same_ebb_p (insn_t insn, insn_t succ)
4864{
4865 basic_block ptr = BLOCK_FOR_INSN (insn);
4866
9af5ce0c 4867 for (;;)
e1ab7874 4868 {
4869 if (ptr == BLOCK_FOR_INSN (succ))
4870 return true;
48e1416a 4871
e1ab7874 4872 if (bb_ends_ebb_p (ptr))
4873 return false;
4874
4875 ptr = bb_next_bb (ptr);
4876 }
4877
4878 gcc_unreachable ();
4879 return false;
4880}
4881
4882/* Recomputes the reverse topological order for the function and
4883 saves it in REV_TOP_ORDER_INDEX. REV_TOP_ORDER_INDEX_LEN is also
4884 modified appropriately. */
4885static void
4886recompute_rev_top_order (void)
4887{
4888 int *postorder;
4889 int n_blocks, i;
4890
fe672ac0 4891 if (!rev_top_order_index
4892 || rev_top_order_index_len < last_basic_block_for_fn (cfun))
e1ab7874 4893 {
fe672ac0 4894 rev_top_order_index_len = last_basic_block_for_fn (cfun);
e1ab7874 4895 rev_top_order_index = XRESIZEVEC (int, rev_top_order_index,
4896 rev_top_order_index_len);
4897 }
4898
a28770e1 4899 postorder = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
e1ab7874 4900
4901 n_blocks = post_order_compute (postorder, true, false);
a28770e1 4902 gcc_assert (n_basic_blocks_for_fn (cfun) == n_blocks);
e1ab7874 4903
4904 /* Build reverse function: for each basic block with BB->INDEX == K
4905 rev_top_order_index[K] is it's reverse topological sort number. */
4906 for (i = 0; i < n_blocks; i++)
4907 {
4908 gcc_assert (postorder[i] < rev_top_order_index_len);
4909 rev_top_order_index[postorder[i]] = i;
4910 }
4911
4912 free (postorder);
4913}
4914
4915/* Clear all flags from insns in BB that could spoil its rescheduling. */
4916void
4917clear_outdated_rtx_info (basic_block bb)
4918{
91a55c11 4919 rtx_insn *insn;
e1ab7874 4920
4921 FOR_BB_INSNS (bb, insn)
4922 if (INSN_P (insn))
4923 {
4924 SCHED_GROUP_P (insn) = 0;
4925 INSN_AFTER_STALL_P (insn) = 0;
4926 INSN_SCHED_TIMES (insn) = 0;
4927 EXPR_PRIORITY_ADJ (INSN_EXPR (insn)) = 0;
4928
4929 /* We cannot use the changed caches, as previously we could ignore
48e1416a 4930 the LHS dependence due to enabled renaming and transform
e1ab7874 4931 the expression, and currently we'll be unable to do this. */
4932 htab_empty (INSN_TRANSFORMED_INSNS (insn));
4933 }
4934}
4935
4936/* Add BB_NOTE to the pool of available basic block notes. */
4937static void
4938return_bb_to_pool (basic_block bb)
4939{
9ed997be 4940 rtx_note *note = bb_note (bb);
e1ab7874 4941
4942 gcc_assert (NOTE_BASIC_BLOCK (note) == bb
4943 && bb->aux == NULL);
4944
4945 /* It turns out that current cfg infrastructure does not support
4946 reuse of basic blocks. Don't bother for now. */
f1f41a6c 4947 /*bb_note_pool.safe_push (note);*/
e1ab7874 4948}
4949
4950/* Get a bb_note from pool or return NULL_RTX if pool is empty. */
cef3d8ad 4951static rtx_note *
e1ab7874 4952get_bb_note_from_pool (void)
4953{
f1f41a6c 4954 if (bb_note_pool.is_empty ())
cef3d8ad 4955 return NULL;
e1ab7874 4956 else
4957 {
cef3d8ad 4958 rtx_note *note = bb_note_pool.pop ();
e1ab7874 4959
4a57a2e8 4960 SET_PREV_INSN (note) = NULL_RTX;
4961 SET_NEXT_INSN (note) = NULL_RTX;
e1ab7874 4962
4963 return note;
4964 }
4965}
4966
4967/* Free bb_note_pool. */
4968void
4969free_bb_note_pool (void)
4970{
f1f41a6c 4971 bb_note_pool.release ();
e1ab7874 4972}
4973
4974/* Setup scheduler pool and successor structure. */
4975void
4976alloc_sched_pools (void)
4977{
4978 int succs_size;
4979
4980 succs_size = MAX_WS + 1;
48e1416a 4981 succs_info_pool.stack = XCNEWVEC (struct succs_info, succs_size);
e1ab7874 4982 succs_info_pool.size = succs_size;
4983 succs_info_pool.top = -1;
4984 succs_info_pool.max_top = -1;
e1ab7874 4985}
4986
4987/* Free the pools. */
4988void
4989free_sched_pools (void)
4990{
4991 int i;
48e1416a 4992
e26b6f42 4993 sched_lists_pool.release ();
e1ab7874 4994 gcc_assert (succs_info_pool.top == -1);
862c1934 4995 for (i = 0; i <= succs_info_pool.max_top; i++)
e1ab7874 4996 {
f1f41a6c 4997 succs_info_pool.stack[i].succs_ok.release ();
4998 succs_info_pool.stack[i].succs_other.release ();
4999 succs_info_pool.stack[i].probs_ok.release ();
e1ab7874 5000 }
5001 free (succs_info_pool.stack);
5002}
5003\f
5004
48e1416a 5005/* Returns a position in RGN where BB can be inserted retaining
e1ab7874 5006 topological order. */
5007static int
5008find_place_to_insert_bb (basic_block bb, int rgn)
5009{
5010 bool has_preds_outside_rgn = false;
5011 edge e;
5012 edge_iterator ei;
48e1416a 5013
e1ab7874 5014 /* Find whether we have preds outside the region. */
5015 FOR_EACH_EDGE (e, ei, bb->preds)
5016 if (!in_current_region_p (e->src))
5017 {
5018 has_preds_outside_rgn = true;
5019 break;
5020 }
48e1416a 5021
e1ab7874 5022 /* Recompute the top order -- needed when we have > 1 pred
5023 and in case we don't have preds outside. */
5024 if (flag_sel_sched_pipelining_outer_loops
5025 && (has_preds_outside_rgn || EDGE_COUNT (bb->preds) > 1))
5026 {
5027 int i, bbi = bb->index, cur_bbi;
5028
5029 recompute_rev_top_order ();
5030 for (i = RGN_NR_BLOCKS (rgn) - 1; i >= 0; i--)
5031 {
5032 cur_bbi = BB_TO_BLOCK (i);
48e1416a 5033 if (rev_top_order_index[bbi]
e1ab7874 5034 < rev_top_order_index[cur_bbi])
5035 break;
5036 }
48e1416a 5037
9d75589a 5038 /* We skipped the right block, so we increase i. We accommodate
e1ab7874 5039 it for increasing by step later, so we decrease i. */
5040 return (i + 1) - 1;
5041 }
5042 else if (has_preds_outside_rgn)
5043 {
5044 /* This is the case when we generate an extra empty block
5045 to serve as region head during pipelining. */
5046 e = EDGE_SUCC (bb, 0);
5047 gcc_assert (EDGE_COUNT (bb->succs) == 1
5048 && in_current_region_p (EDGE_SUCC (bb, 0)->dest)
5049 && (BLOCK_TO_BB (e->dest->index) == 0));
5050 return -1;
5051 }
5052
5053 /* We don't have preds outside the region. We should have
5054 the only pred, because the multiple preds case comes from
5055 the pipelining of outer loops, and that is handled above.
5056 Just take the bbi of this single pred. */
5057 if (EDGE_COUNT (bb->succs) > 0)
5058 {
5059 int pred_bbi;
48e1416a 5060
e1ab7874 5061 gcc_assert (EDGE_COUNT (bb->preds) == 1);
48e1416a 5062
e1ab7874 5063 pred_bbi = EDGE_PRED (bb, 0)->src->index;
5064 return BLOCK_TO_BB (pred_bbi);
5065 }
5066 else
5067 /* BB has no successors. It is safe to put it in the end. */
5068 return current_nr_blocks - 1;
5069}
5070
5071/* Deletes an empty basic block freeing its data. */
5072static void
5073delete_and_free_basic_block (basic_block bb)
5074{
5075 gcc_assert (sel_bb_empty_p (bb));
5076
5077 if (BB_LV_SET (bb))
5078 free_lv_set (bb);
5079
5080 bitmap_clear_bit (blocks_to_reschedule, bb->index);
5081
48e1416a 5082 /* Can't assert av_set properties because we use sel_aremove_bb
5083 when removing loop preheader from the region. At the point of
e1ab7874 5084 removing the preheader we already have deallocated sel_region_bb_info. */
5085 gcc_assert (BB_LV_SET (bb) == NULL
5086 && !BB_LV_SET_VALID_P (bb)
5087 && BB_AV_LEVEL (bb) == 0
5088 && BB_AV_SET (bb) == NULL);
48e1416a 5089
e1ab7874 5090 delete_basic_block (bb);
5091}
5092
5093/* Add BB to the current region and update the region data. */
5094static void
5095add_block_to_current_region (basic_block bb)
5096{
5097 int i, pos, bbi = -2, rgn;
5098
5099 rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
5100 bbi = find_place_to_insert_bb (bb, rgn);
5101 bbi += 1;
5102 pos = RGN_BLOCKS (rgn) + bbi;
5103
5104 gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0
5105 && ebb_head[bbi] == pos);
48e1416a 5106
e1ab7874 5107 /* Make a place for the new block. */
5108 extend_regions ();
5109
5110 for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--)
5111 BLOCK_TO_BB (rgn_bb_table[i])++;
48e1416a 5112
e1ab7874 5113 memmove (rgn_bb_table + pos + 1,
5114 rgn_bb_table + pos,
5115 (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table));
5116
5117 /* Initialize data for BB. */
5118 rgn_bb_table[pos] = bb->index;
5119 BLOCK_TO_BB (bb->index) = bbi;
5120 CONTAINING_RGN (bb->index) = rgn;
5121
5122 RGN_NR_BLOCKS (rgn)++;
48e1416a 5123
e1ab7874 5124 for (i = rgn + 1; i <= nr_regions; i++)
5125 RGN_BLOCKS (i)++;
5126}
5127
5128/* Remove BB from the current region and update the region data. */
5129static void
5130remove_bb_from_region (basic_block bb)
5131{
5132 int i, pos, bbi = -2, rgn;
5133
5134 rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
5135 bbi = BLOCK_TO_BB (bb->index);
5136 pos = RGN_BLOCKS (rgn) + bbi;
5137
5138 gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0
5139 && ebb_head[bbi] == pos);
5140
5141 for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--)
5142 BLOCK_TO_BB (rgn_bb_table[i])--;
5143
5144 memmove (rgn_bb_table + pos,
5145 rgn_bb_table + pos + 1,
5146 (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table));
5147
5148 RGN_NR_BLOCKS (rgn)--;
5149 for (i = rgn + 1; i <= nr_regions; i++)
5150 RGN_BLOCKS (i)--;
5151}
5152
48e1416a 5153/* Add BB to the current region and update all data. If BB is NULL, add all
e1ab7874 5154 blocks from last_added_blocks vector. */
5155static void
5156sel_add_bb (basic_block bb)
5157{
5158 /* Extend luids so that new notes will receive zero luids. */
52d7e28c 5159 sched_extend_luids ();
e1ab7874 5160 sched_init_bbs ();
52d7e28c 5161 sel_init_bbs (last_added_blocks);
e1ab7874 5162
48e1416a 5163 /* When bb is passed explicitly, the vector should contain
e1ab7874 5164 the only element that equals to bb; otherwise, the vector
5165 should not be NULL. */
f1f41a6c 5166 gcc_assert (last_added_blocks.exists ());
48e1416a 5167
e1ab7874 5168 if (bb != NULL)
5169 {
f1f41a6c 5170 gcc_assert (last_added_blocks.length () == 1
5171 && last_added_blocks[0] == bb);
e1ab7874 5172 add_block_to_current_region (bb);
5173
5174 /* We associate creating/deleting data sets with the first insn
5175 appearing / disappearing in the bb. */
5176 if (!sel_bb_empty_p (bb) && BB_LV_SET (bb) == NULL)
5177 create_initial_data_sets (bb);
48e1416a 5178
f1f41a6c 5179 last_added_blocks.release ();
e1ab7874 5180 }
5181 else
5182 /* BB is NULL - process LAST_ADDED_BLOCKS instead. */
5183 {
5184 int i;
5185 basic_block temp_bb = NULL;
5186
48e1416a 5187 for (i = 0;
f1f41a6c 5188 last_added_blocks.iterate (i, &bb); i++)
e1ab7874 5189 {
5190 add_block_to_current_region (bb);
5191 temp_bb = bb;
5192 }
5193
48e1416a 5194 /* We need to fetch at least one bb so we know the region
e1ab7874 5195 to update. */
5196 gcc_assert (temp_bb != NULL);
5197 bb = temp_bb;
5198
f1f41a6c 5199 last_added_blocks.release ();
e1ab7874 5200 }
5201
5202 rgn_setup_region (CONTAINING_RGN (bb->index));
5203}
5204
48e1416a 5205/* Remove BB from the current region and update all data.
e1ab7874 5206 If REMOVE_FROM_CFG_PBB is true, also remove the block cfom cfg. */
5207static void
5208sel_remove_bb (basic_block bb, bool remove_from_cfg_p)
5209{
0424f393 5210 unsigned idx = bb->index;
5211
e1ab7874 5212 gcc_assert (bb != NULL && BB_NOTE_LIST (bb) == NULL_RTX);
48e1416a 5213
e1ab7874 5214 remove_bb_from_region (bb);
5215 return_bb_to_pool (bb);
0424f393 5216 bitmap_clear_bit (blocks_to_reschedule, idx);
48e1416a 5217
e1ab7874 5218 if (remove_from_cfg_p)
1a5dbaab 5219 {
5220 basic_block succ = single_succ (bb);
5221 delete_and_free_basic_block (bb);
5222 set_immediate_dominator (CDI_DOMINATORS, succ,
5223 recompute_dominator (CDI_DOMINATORS, succ));
5224 }
e1ab7874 5225
0424f393 5226 rgn_setup_region (CONTAINING_RGN (idx));
e1ab7874 5227}
5228
5229/* Concatenate info of EMPTY_BB to info of MERGE_BB. */
5230static void
5231move_bb_info (basic_block merge_bb, basic_block empty_bb)
5232{
ef4cf572 5233 if (in_current_region_p (merge_bb))
5234 concat_note_lists (BB_NOTE_LIST (empty_bb),
e97a173d 5235 &BB_NOTE_LIST (merge_bb));
5236 BB_NOTE_LIST (empty_bb) = NULL;
e1ab7874 5237
5238}
5239
e1ab7874 5240/* Remove EMPTY_BB. If REMOVE_FROM_CFG_P is false, remove EMPTY_BB from
5241 region, but keep it in CFG. */
5242static void
5243remove_empty_bb (basic_block empty_bb, bool remove_from_cfg_p)
5244{
5245 /* The block should contain just a note or a label.
5246 We try to check whether it is unused below. */
5247 gcc_assert (BB_HEAD (empty_bb) == BB_END (empty_bb)
5248 || LABEL_P (BB_HEAD (empty_bb)));
5249
5250 /* If basic block has predecessors or successors, redirect them. */
5251 if (remove_from_cfg_p
5252 && (EDGE_COUNT (empty_bb->preds) > 0
5253 || EDGE_COUNT (empty_bb->succs) > 0))
5254 {
5255 basic_block pred;
5256 basic_block succ;
5257
5258 /* We need to init PRED and SUCC before redirecting edges. */
5259 if (EDGE_COUNT (empty_bb->preds) > 0)
5260 {
5261 edge e;
5262
5263 gcc_assert (EDGE_COUNT (empty_bb->preds) == 1);
5264
5265 e = EDGE_PRED (empty_bb, 0);
5266 gcc_assert (e->src == empty_bb->prev_bb
5267 && (e->flags & EDGE_FALLTHRU));
5268
5269 pred = empty_bb->prev_bb;
5270 }
5271 else
5272 pred = NULL;
5273
5274 if (EDGE_COUNT (empty_bb->succs) > 0)
5275 {
5276 /* We do not check fallthruness here as above, because
5277 after removing a jump the edge may actually be not fallthru. */
5278 gcc_assert (EDGE_COUNT (empty_bb->succs) == 1);
5279 succ = EDGE_SUCC (empty_bb, 0)->dest;
5280 }
5281 else
5282 succ = NULL;
5283
5284 if (EDGE_COUNT (empty_bb->preds) > 0 && succ != NULL)
5285 {
5286 edge e = EDGE_PRED (empty_bb, 0);
5287
5288 if (e->flags & EDGE_FALLTHRU)
5289 redirect_edge_succ_nodup (e, succ);
5290 else
5291 sel_redirect_edge_and_branch (EDGE_PRED (empty_bb, 0), succ);
5292 }
5293
5294 if (EDGE_COUNT (empty_bb->succs) > 0 && pred != NULL)
5295 {
5296 edge e = EDGE_SUCC (empty_bb, 0);
5297
5298 if (find_edge (pred, e->dest) == NULL)
5299 redirect_edge_pred (e, pred);
5300 }
5301 }
5302
5303 /* Finish removing. */
5304 sel_remove_bb (empty_bb, remove_from_cfg_p);
5305}
5306
48e1416a 5307/* An implementation of create_basic_block hook, which additionally updates
e1ab7874 5308 per-bb data structures. */
5309static basic_block
5310sel_create_basic_block (void *headp, void *endp, basic_block after)
5311{
5312 basic_block new_bb;
cef3d8ad 5313 rtx_note *new_bb_note;
48e1416a 5314
5315 gcc_assert (flag_sel_sched_pipelining_outer_loops
f1f41a6c 5316 || !last_added_blocks.exists ());
e1ab7874 5317
5318 new_bb_note = get_bb_note_from_pool ();
5319
5320 if (new_bb_note == NULL_RTX)
5321 new_bb = orig_cfg_hooks.create_basic_block (headp, endp, after);
5322 else
5323 {
3c3f97b4 5324 new_bb = create_basic_block_structure ((rtx_insn *) headp,
5325 (rtx_insn *) endp,
e1ab7874 5326 new_bb_note, after);
5327 new_bb->aux = NULL;
5328 }
5329
f1f41a6c 5330 last_added_blocks.safe_push (new_bb);
e1ab7874 5331
5332 return new_bb;
5333}
5334
5335/* Implement sched_init_only_bb (). */
5336static void
5337sel_init_only_bb (basic_block bb, basic_block after)
5338{
5339 gcc_assert (after == NULL);
5340
5341 extend_regions ();
5342 rgn_make_new_region_out_of_new_block (bb);
5343}
5344
5345/* Update the latch when we've splitted or merged it from FROM block to TO.
5346 This should be checked for all outer loops, too. */
5347static void
5348change_loops_latches (basic_block from, basic_block to)
5349{
5350 gcc_assert (from != to);
5351
5352 if (current_loop_nest)
5353 {
5354 struct loop *loop;
5355
5356 for (loop = current_loop_nest; loop; loop = loop_outer (loop))
5357 if (considered_for_pipelining_p (loop) && loop->latch == from)
5358 {
5359 gcc_assert (loop == current_loop_nest);
5360 loop->latch = to;
5361 gcc_assert (loop_latch_edge (loop));
5362 }
5363 }
5364}
5365
48e1416a 5366/* Splits BB on two basic blocks, adding it to the region and extending
e1ab7874 5367 per-bb data structures. Returns the newly created bb. */
5368static basic_block
5369sel_split_block (basic_block bb, rtx after)
5370{
5371 basic_block new_bb;
5372 insn_t insn;
5373
5374 new_bb = sched_split_block_1 (bb, after);
5375 sel_add_bb (new_bb);
5376
5377 /* This should be called after sel_add_bb, because this uses
48e1416a 5378 CONTAINING_RGN for the new block, which is not yet initialized.
e1ab7874 5379 FIXME: this function may be a no-op now. */
5380 change_loops_latches (bb, new_bb);
5381
5382 /* Update ORIG_BB_INDEX for insns moved into the new block. */
5383 FOR_BB_INSNS (new_bb, insn)
5384 if (INSN_P (insn))
5385 EXPR_ORIG_BB_INDEX (INSN_EXPR (insn)) = new_bb->index;
5386
5387 if (sel_bb_empty_p (bb))
5388 {
5389 gcc_assert (!sel_bb_empty_p (new_bb));
5390
5391 /* NEW_BB has data sets that need to be updated and BB holds
5392 data sets that should be removed. Exchange these data sets
5393 so that we won't lose BB's valid data sets. */
5394 exchange_data_sets (new_bb, bb);
5395 free_data_sets (bb);
5396 }
5397
5398 if (!sel_bb_empty_p (new_bb)
5399 && bitmap_bit_p (blocks_to_reschedule, bb->index))
5400 bitmap_set_bit (blocks_to_reschedule, new_bb->index);
5401
5402 return new_bb;
5403}
5404
5405/* If BB ends with a jump insn whose ID is bigger then PREV_MAX_UID, return it.
5406 Otherwise returns NULL. */
04d073df 5407static rtx_insn *
e1ab7874 5408check_for_new_jump (basic_block bb, int prev_max_uid)
5409{
04d073df 5410 rtx_insn *end;
e1ab7874 5411
5412 end = sel_bb_end (bb);
5413 if (end && INSN_UID (end) >= prev_max_uid)
5414 return end;
5415 return NULL;
5416}
5417
48e1416a 5418/* Look for a new jump either in FROM_BB block or in newly created JUMP_BB block.
e1ab7874 5419 New means having UID at least equal to PREV_MAX_UID. */
04d073df 5420static rtx_insn *
e1ab7874 5421find_new_jump (basic_block from, basic_block jump_bb, int prev_max_uid)
5422{
04d073df 5423 rtx_insn *jump;
e1ab7874 5424
5425 /* Return immediately if no new insns were emitted. */
5426 if (get_max_uid () == prev_max_uid)
5427 return NULL;
48e1416a 5428
e1ab7874 5429 /* Now check both blocks for new jumps. It will ever be only one. */
5430 if ((jump = check_for_new_jump (from, prev_max_uid)))
5431 return jump;
5432
5433 if (jump_bb != NULL
5434 && (jump = check_for_new_jump (jump_bb, prev_max_uid)))
5435 return jump;
5436 return NULL;
5437}
5438
5439/* Splits E and adds the newly created basic block to the current region.
5440 Returns this basic block. */
5441basic_block
5442sel_split_edge (edge e)
5443{
5444 basic_block new_bb, src, other_bb = NULL;
5445 int prev_max_uid;
04d073df 5446 rtx_insn *jump;
e1ab7874 5447
5448 src = e->src;
5449 prev_max_uid = get_max_uid ();
5450 new_bb = split_edge (e);
5451
48e1416a 5452 if (flag_sel_sched_pipelining_outer_loops
e1ab7874 5453 && current_loop_nest)
5454 {
5455 int i;
5456 basic_block bb;
5457
48e1416a 5458 /* Some of the basic blocks might not have been added to the loop.
e1ab7874 5459 Add them here, until this is fixed in force_fallthru. */
48e1416a 5460 for (i = 0;
f1f41a6c 5461 last_added_blocks.iterate (i, &bb); i++)
e1ab7874 5462 if (!bb->loop_father)
5463 {
5464 add_bb_to_loop (bb, e->dest->loop_father);
5465
5466 gcc_assert (!other_bb && (new_bb->index != bb->index));
5467 other_bb = bb;
5468 }
5469 }
5470
5471 /* Add all last_added_blocks to the region. */
5472 sel_add_bb (NULL);
5473
5474 jump = find_new_jump (src, new_bb, prev_max_uid);
5475 if (jump)
5476 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5477
5478 /* Put the correct lv set on this block. */
5479 if (other_bb && !sel_bb_empty_p (other_bb))
5480 compute_live (sel_bb_head (other_bb));
5481
5482 return new_bb;
5483}
5484
5485/* Implement sched_create_empty_bb (). */
5486static basic_block
5487sel_create_empty_bb (basic_block after)
5488{
5489 basic_block new_bb;
5490
5491 new_bb = sched_create_empty_bb_1 (after);
5492
5493 /* We'll explicitly initialize NEW_BB via sel_init_only_bb () a bit
5494 later. */
f1f41a6c 5495 gcc_assert (last_added_blocks.length () == 1
5496 && last_added_blocks[0] == new_bb);
e1ab7874 5497
f1f41a6c 5498 last_added_blocks.release ();
e1ab7874 5499 return new_bb;
5500}
5501
5502/* Implement sched_create_recovery_block. ORIG_INSN is where block
5503 will be splitted to insert a check. */
5504basic_block
5505sel_create_recovery_block (insn_t orig_insn)
5506{
5507 basic_block first_bb, second_bb, recovery_block;
5508 basic_block before_recovery = NULL;
04d073df 5509 rtx_insn *jump;
e1ab7874 5510
5511 first_bb = BLOCK_FOR_INSN (orig_insn);
5512 if (sel_bb_end_p (orig_insn))
5513 {
5514 /* Avoid introducing an empty block while splitting. */
5515 gcc_assert (single_succ_p (first_bb));
5516 second_bb = single_succ (first_bb);
5517 }
5518 else
5519 second_bb = sched_split_block (first_bb, orig_insn);
5520
5521 recovery_block = sched_create_recovery_block (&before_recovery);
5522 if (before_recovery)
34154e27 5523 copy_lv_set_from (before_recovery, EXIT_BLOCK_PTR_FOR_FN (cfun));
e1ab7874 5524
5525 gcc_assert (sel_bb_empty_p (recovery_block));
5526 sched_create_recovery_edges (first_bb, recovery_block, second_bb);
5527 if (current_loops != NULL)
5528 add_bb_to_loop (recovery_block, first_bb->loop_father);
48e1416a 5529
e1ab7874 5530 sel_add_bb (recovery_block);
48e1416a 5531
e1ab7874 5532 jump = BB_END (recovery_block);
5533 gcc_assert (sel_bb_head (recovery_block) == jump);
5534 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5535
5536 return recovery_block;
5537}
5538
5539/* Merge basic block B into basic block A. */
0424f393 5540static void
e1ab7874 5541sel_merge_blocks (basic_block a, basic_block b)
5542{
0424f393 5543 gcc_assert (sel_bb_empty_p (b)
5544 && EDGE_COUNT (b->preds) == 1
5545 && EDGE_PRED (b, 0)->src == b->prev_bb);
e1ab7874 5546
0424f393 5547 move_bb_info (b->prev_bb, b);
5548 remove_empty_bb (b, false);
5549 merge_blocks (a, b);
e1ab7874 5550 change_loops_latches (b, a);
5551}
5552
5553/* A wrapper for redirect_edge_and_branch_force, which also initializes
8d1881f5 5554 data structures for possibly created bb and insns. */
e1ab7874 5555void
5556sel_redirect_edge_and_branch_force (edge e, basic_block to)
5557{
1a5dbaab 5558 basic_block jump_bb, src, orig_dest = e->dest;
e1ab7874 5559 int prev_max_uid;
04d073df 5560 rtx_insn *jump;
8d1881f5 5561 int old_seqno = -1;
48e1416a 5562
1a5dbaab 5563 /* This function is now used only for bookkeeping code creation, where
5564 we'll never get the single pred of orig_dest block and thus will not
5565 hit unreachable blocks when updating dominator info. */
5566 gcc_assert (!sel_bb_empty_p (e->src)
5567 && !single_pred_p (orig_dest));
e1ab7874 5568 src = e->src;
5569 prev_max_uid = get_max_uid ();
8d1881f5 5570 /* Compute and pass old_seqno down to sel_init_new_insn only for the case
5571 when the conditional jump being redirected may become unconditional. */
5572 if (any_condjump_p (BB_END (src))
5573 && INSN_SEQNO (BB_END (src)) >= 0)
5574 old_seqno = INSN_SEQNO (BB_END (src));
e1ab7874 5575
8d1881f5 5576 jump_bb = redirect_edge_and_branch_force (e, to);
e1ab7874 5577 if (jump_bb != NULL)
5578 sel_add_bb (jump_bb);
5579
5580 /* This function could not be used to spoil the loop structure by now,
5581 thus we don't care to update anything. But check it to be sure. */
5582 if (current_loop_nest
5583 && pipelining_p)
5584 gcc_assert (loop_latch_edge (current_loop_nest));
48e1416a 5585
e1ab7874 5586 jump = find_new_jump (src, jump_bb, prev_max_uid);
5587 if (jump)
8d1881f5 5588 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP,
5589 old_seqno);
1a5dbaab 5590 set_immediate_dominator (CDI_DOMINATORS, to,
5591 recompute_dominator (CDI_DOMINATORS, to));
5592 set_immediate_dominator (CDI_DOMINATORS, orig_dest,
5593 recompute_dominator (CDI_DOMINATORS, orig_dest));
e1ab7874 5594}
5595
93919afc 5596/* A wrapper for redirect_edge_and_branch. Return TRUE if blocks connected by
5597 redirected edge are in reverse topological order. */
5598bool
e1ab7874 5599sel_redirect_edge_and_branch (edge e, basic_block to)
5600{
5601 bool latch_edge_p;
1a5dbaab 5602 basic_block src, orig_dest = e->dest;
e1ab7874 5603 int prev_max_uid;
04d073df 5604 rtx_insn *jump;
df6266b9 5605 edge redirected;
93919afc 5606 bool recompute_toporder_p = false;
1a5dbaab 5607 bool maybe_unreachable = single_pred_p (orig_dest);
8d1881f5 5608 int old_seqno = -1;
e1ab7874 5609
5610 latch_edge_p = (pipelining_p
5611 && current_loop_nest
5612 && e == loop_latch_edge (current_loop_nest));
5613
5614 src = e->src;
5615 prev_max_uid = get_max_uid ();
df6266b9 5616
8d1881f5 5617 /* Compute and pass old_seqno down to sel_init_new_insn only for the case
5618 when the conditional jump being redirected may become unconditional. */
5619 if (any_condjump_p (BB_END (src))
5620 && INSN_SEQNO (BB_END (src)) >= 0)
5621 old_seqno = INSN_SEQNO (BB_END (src));
5622
df6266b9 5623 redirected = redirect_edge_and_branch (e, to);
5624
f1f41a6c 5625 gcc_assert (redirected && !last_added_blocks.exists ());
e1ab7874 5626
5627 /* When we've redirected a latch edge, update the header. */
5628 if (latch_edge_p)
5629 {
5630 current_loop_nest->header = to;
5631 gcc_assert (loop_latch_edge (current_loop_nest));
5632 }
5633
93919afc 5634 /* In rare situations, the topological relation between the blocks connected
5635 by the redirected edge can change (see PR42245 for an example). Update
5636 block_to_bb/bb_to_block. */
5637 if (CONTAINING_RGN (e->src->index) == CONTAINING_RGN (to->index)
5638 && BLOCK_TO_BB (e->src->index) > BLOCK_TO_BB (to->index))
5639 recompute_toporder_p = true;
5640
e1ab7874 5641 jump = find_new_jump (src, NULL, prev_max_uid);
5642 if (jump)
8d1881f5 5643 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP, old_seqno);
93919afc 5644
1a5dbaab 5645 /* Only update dominator info when we don't have unreachable blocks.
5646 Otherwise we'll update in maybe_tidy_empty_bb. */
5647 if (!maybe_unreachable)
5648 {
5649 set_immediate_dominator (CDI_DOMINATORS, to,
5650 recompute_dominator (CDI_DOMINATORS, to));
5651 set_immediate_dominator (CDI_DOMINATORS, orig_dest,
5652 recompute_dominator (CDI_DOMINATORS, orig_dest));
5653 }
93919afc 5654 return recompute_toporder_p;
e1ab7874 5655}
5656
5657/* This variable holds the cfg hooks used by the selective scheduler. */
5658static struct cfg_hooks sel_cfg_hooks;
5659
5660/* Register sel-sched cfg hooks. */
5661void
5662sel_register_cfg_hooks (void)
5663{
5664 sched_split_block = sel_split_block;
5665
5666 orig_cfg_hooks = get_cfg_hooks ();
5667 sel_cfg_hooks = orig_cfg_hooks;
5668
5669 sel_cfg_hooks.create_basic_block = sel_create_basic_block;
5670
5671 set_cfg_hooks (sel_cfg_hooks);
5672
5673 sched_init_only_bb = sel_init_only_bb;
5674 sched_split_block = sel_split_block;
5675 sched_create_empty_bb = sel_create_empty_bb;
5676}
5677
5678/* Unregister sel-sched cfg hooks. */
5679void
5680sel_unregister_cfg_hooks (void)
5681{
5682 sched_create_empty_bb = NULL;
5683 sched_split_block = NULL;
5684 sched_init_only_bb = NULL;
5685
5686 set_cfg_hooks (orig_cfg_hooks);
5687}
5688\f
5689
5690/* Emit an insn rtx based on PATTERN. If a jump insn is wanted,
5691 LABEL is where this jump should be directed. */
3aaa3eec 5692rtx_insn *
e1ab7874 5693create_insn_rtx_from_pattern (rtx pattern, rtx label)
5694{
3aaa3eec 5695 rtx_insn *insn_rtx;
e1ab7874 5696
5697 gcc_assert (!INSN_P (pattern));
5698
5699 start_sequence ();
5700
5701 if (label == NULL_RTX)
5702 insn_rtx = emit_insn (pattern);
9845d120 5703 else if (DEBUG_INSN_P (label))
5704 insn_rtx = emit_debug_insn (pattern);
e1ab7874 5705 else
5706 {
5707 insn_rtx = emit_jump_insn (pattern);
5708 JUMP_LABEL (insn_rtx) = label;
5709 ++LABEL_NUSES (label);
5710 }
5711
5712 end_sequence ();
5713
52d7e28c 5714 sched_extend_luids ();
e1ab7874 5715 sched_extend_target ();
5716 sched_deps_init (false);
5717
5718 /* Initialize INSN_CODE now. */
5719 recog_memoized (insn_rtx);
5720 return insn_rtx;
5721}
5722
5723/* Create a new vinsn for INSN_RTX. FORCE_UNIQUE_P is true when the vinsn
5724 must not be clonable. */
5725vinsn_t
2f3c9801 5726create_vinsn_from_insn_rtx (rtx_insn *insn_rtx, bool force_unique_p)
e1ab7874 5727{
5728 gcc_assert (INSN_P (insn_rtx) && !INSN_IN_STREAM_P (insn_rtx));
5729
5730 /* If VINSN_TYPE is not USE, retain its uniqueness. */
5731 return vinsn_create (insn_rtx, force_unique_p);
5732}
5733
5734/* Create a copy of INSN_RTX. */
3aaa3eec 5735rtx_insn *
e1ab7874 5736create_copy_of_insn_rtx (rtx insn_rtx)
5737{
3aaa3eec 5738 rtx_insn *res;
5739 rtx link;
e1ab7874 5740
9845d120 5741 if (DEBUG_INSN_P (insn_rtx))
5742 return create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)),
5743 insn_rtx);
5744
e1ab7874 5745 gcc_assert (NONJUMP_INSN_P (insn_rtx));
5746
5747 res = create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)),
5748 NULL_RTX);
114c1eb1 5749
5750 /* Copy all REG_NOTES except REG_EQUAL/REG_EQUIV and REG_LABEL_OPERAND
5751 since mark_jump_label will make them. REG_LABEL_TARGETs are created
5752 there too, but are supposed to be sticky, so we copy them. */
5753 for (link = REG_NOTES (insn_rtx); link; link = XEXP (link, 1))
5754 if (REG_NOTE_KIND (link) != REG_LABEL_OPERAND
5755 && REG_NOTE_KIND (link) != REG_EQUAL
5756 && REG_NOTE_KIND (link) != REG_EQUIV)
5757 {
5758 if (GET_CODE (link) == EXPR_LIST)
5759 add_reg_note (res, REG_NOTE_KIND (link),
5760 copy_insn_1 (XEXP (link, 0)));
5761 else
5762 add_reg_note (res, REG_NOTE_KIND (link), XEXP (link, 0));
5763 }
5764
e1ab7874 5765 return res;
5766}
5767
5768/* Change vinsn field of EXPR to hold NEW_VINSN. */
5769void
5770change_vinsn_in_expr (expr_t expr, vinsn_t new_vinsn)
5771{
5772 vinsn_detach (EXPR_VINSN (expr));
5773
5774 EXPR_VINSN (expr) = new_vinsn;
5775 vinsn_attach (new_vinsn);
5776}
5777
5778/* Helpers for global init. */
5779/* This structure is used to be able to call existing bundling mechanism
5780 and calculate insn priorities. */
48e1416a 5781static struct haifa_sched_info sched_sel_haifa_sched_info =
e1ab7874 5782{
5783 NULL, /* init_ready_list */
5784 NULL, /* can_schedule_ready_p */
5785 NULL, /* schedule_more_p */
5786 NULL, /* new_ready */
5787 NULL, /* rgn_rank */
5788 sel_print_insn, /* rgn_print_insn */
5789 contributes_to_priority,
4db82bc9 5790 NULL, /* insn_finishes_block_p */
e1ab7874 5791
5792 NULL, NULL,
5793 NULL, NULL,
5794 0, 0,
5795
5796 NULL, /* add_remove_insn */
5797 NULL, /* begin_schedule_ready */
d2412f57 5798 NULL, /* begin_move_insn */
e1ab7874 5799 NULL, /* advance_target_bb */
e2f4a6ff 5800
5801 NULL,
5802 NULL,
5803
e1ab7874 5804 SEL_SCHED | NEW_BBS
5805};
5806
5807/* Setup special insns used in the scheduler. */
48e1416a 5808void
e1ab7874 5809setup_nop_and_exit_insns (void)
5810{
5811 gcc_assert (nop_pattern == NULL_RTX
5812 && exit_insn == NULL_RTX);
5813
bc9cb5ed 5814 nop_pattern = constm1_rtx;
e1ab7874 5815
5816 start_sequence ();
5817 emit_insn (nop_pattern);
5818 exit_insn = get_insns ();
5819 end_sequence ();
34154e27 5820 set_block_for_insn (exit_insn, EXIT_BLOCK_PTR_FOR_FN (cfun));
e1ab7874 5821}
5822
5823/* Free special insns used in the scheduler. */
5824void
5825free_nop_and_exit_insns (void)
5826{
179c282d 5827 exit_insn = NULL;
e1ab7874 5828 nop_pattern = NULL_RTX;
5829}
5830
5831/* Setup a special vinsn used in new insns initialization. */
5832void
5833setup_nop_vinsn (void)
5834{
5835 nop_vinsn = vinsn_create (exit_insn, false);
5836 vinsn_attach (nop_vinsn);
5837}
5838
5839/* Free a special vinsn used in new insns initialization. */
5840void
5841free_nop_vinsn (void)
5842{
5843 gcc_assert (VINSN_COUNT (nop_vinsn) == 1);
5844 vinsn_detach (nop_vinsn);
5845 nop_vinsn = NULL;
5846}
5847
5848/* Call a set_sched_flags hook. */
5849void
5850sel_set_sched_flags (void)
5851{
48e1416a 5852 /* ??? This means that set_sched_flags were called, and we decided to
e1ab7874 5853 support speculation. However, set_sched_flags also modifies flags
48e1416a 5854 on current_sched_info, doing this only at global init. And we
e1ab7874 5855 sometimes change c_s_i later. So put the correct flags again. */
5856 if (spec_info && targetm.sched.set_sched_flags)
5857 targetm.sched.set_sched_flags (spec_info);
5858}
5859
5860/* Setup pointers to global sched info structures. */
5861void
5862sel_setup_sched_infos (void)
5863{
5864 rgn_setup_common_sched_info ();
5865
5866 memcpy (&sel_common_sched_info, common_sched_info,
5867 sizeof (sel_common_sched_info));
5868
5869 sel_common_sched_info.fix_recovery_cfg = NULL;
5870 sel_common_sched_info.add_block = NULL;
5871 sel_common_sched_info.estimate_number_of_insns
5872 = sel_estimate_number_of_insns;
5873 sel_common_sched_info.luid_for_non_insn = sel_luid_for_non_insn;
5874 sel_common_sched_info.sched_pass_id = SCHED_SEL_PASS;
5875
5876 common_sched_info = &sel_common_sched_info;
5877
5878 current_sched_info = &sched_sel_haifa_sched_info;
48e1416a 5879 current_sched_info->sched_max_insns_priority =
e1ab7874 5880 get_rgn_sched_max_insns_priority ();
48e1416a 5881
e1ab7874 5882 sel_set_sched_flags ();
5883}
5884\f
5885
5886/* Adds basic block BB to region RGN at the position *BB_ORD_INDEX,
5887 *BB_ORD_INDEX after that is increased. */
5888static void
5889sel_add_block_to_region (basic_block bb, int *bb_ord_index, int rgn)
5890{
5891 RGN_NR_BLOCKS (rgn) += 1;
5892 RGN_DONT_CALC_DEPS (rgn) = 0;
5893 RGN_HAS_REAL_EBB (rgn) = 0;
5894 CONTAINING_RGN (bb->index) = rgn;
5895 BLOCK_TO_BB (bb->index) = *bb_ord_index;
5896 rgn_bb_table[RGN_BLOCKS (rgn) + *bb_ord_index] = bb->index;
5897 (*bb_ord_index)++;
5898
5899 /* FIXME: it is true only when not scheduling ebbs. */
5900 RGN_BLOCKS (rgn + 1) = RGN_BLOCKS (rgn) + RGN_NR_BLOCKS (rgn);
5901}
5902
5903/* Functions to support pipelining of outer loops. */
5904
5905/* Creates a new empty region and returns it's number. */
5906static int
5907sel_create_new_region (void)
5908{
5909 int new_rgn_number = nr_regions;
5910
5911 RGN_NR_BLOCKS (new_rgn_number) = 0;
5912
5913 /* FIXME: This will work only when EBBs are not created. */
5914 if (new_rgn_number != 0)
48e1416a 5915 RGN_BLOCKS (new_rgn_number) = RGN_BLOCKS (new_rgn_number - 1) +
e1ab7874 5916 RGN_NR_BLOCKS (new_rgn_number - 1);
5917 else
5918 RGN_BLOCKS (new_rgn_number) = 0;
5919
5920 /* Set the blocks of the next region so the other functions may
5921 calculate the number of blocks in the region. */
48e1416a 5922 RGN_BLOCKS (new_rgn_number + 1) = RGN_BLOCKS (new_rgn_number) +
e1ab7874 5923 RGN_NR_BLOCKS (new_rgn_number);
5924
5925 nr_regions++;
5926
5927 return new_rgn_number;
5928}
5929
5930/* If X has a smaller topological sort number than Y, returns -1;
5931 if greater, returns 1. */
5932static int
5933bb_top_order_comparator (const void *x, const void *y)
5934{
5935 basic_block bb1 = *(const basic_block *) x;
5936 basic_block bb2 = *(const basic_block *) y;
5937
48e1416a 5938 gcc_assert (bb1 == bb2
5939 || rev_top_order_index[bb1->index]
e1ab7874 5940 != rev_top_order_index[bb2->index]);
5941
5942 /* It's a reverse topological order in REV_TOP_ORDER_INDEX, so
5943 bbs with greater number should go earlier. */
5944 if (rev_top_order_index[bb1->index] > rev_top_order_index[bb2->index])
5945 return -1;
5946 else
5947 return 1;
5948}
5949
48e1416a 5950/* Create a region for LOOP and return its number. If we don't want
e1ab7874 5951 to pipeline LOOP, return -1. */
5952static int
5953make_region_from_loop (struct loop *loop)
5954{
5955 unsigned int i;
5956 int new_rgn_number = -1;
5957 struct loop *inner;
5958
5959 /* Basic block index, to be assigned to BLOCK_TO_BB. */
5960 int bb_ord_index = 0;
5961 basic_block *loop_blocks;
5962 basic_block preheader_block;
5963
48e1416a 5964 if (loop->num_nodes
e1ab7874 5965 > (unsigned) PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_BLOCKS))
5966 return -1;
48e1416a 5967
e1ab7874 5968 /* Don't pipeline loops whose latch belongs to some of its inner loops. */
5969 for (inner = loop->inner; inner; inner = inner->inner)
5970 if (flow_bb_inside_loop_p (inner, loop->latch))
5971 return -1;
5972
5973 loop->ninsns = num_loop_insns (loop);
5974 if ((int) loop->ninsns > PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_INSNS))
5975 return -1;
5976
5977 loop_blocks = get_loop_body_in_custom_order (loop, bb_top_order_comparator);
5978
5979 for (i = 0; i < loop->num_nodes; i++)
5980 if (loop_blocks[i]->flags & BB_IRREDUCIBLE_LOOP)
5981 {
5982 free (loop_blocks);
5983 return -1;
5984 }
5985
5986 preheader_block = loop_preheader_edge (loop)->src;
5987 gcc_assert (preheader_block);
5988 gcc_assert (loop_blocks[0] == loop->header);
5989
5990 new_rgn_number = sel_create_new_region ();
5991
5992 sel_add_block_to_region (preheader_block, &bb_ord_index, new_rgn_number);
08b7917c 5993 bitmap_set_bit (bbs_in_loop_rgns, preheader_block->index);
e1ab7874 5994
5995 for (i = 0; i < loop->num_nodes; i++)
5996 {
5997 /* Add only those blocks that haven't been scheduled in the inner loop.
5998 The exception is the basic blocks with bookkeeping code - they should
48e1416a 5999 be added to the region (and they actually don't belong to the loop
e1ab7874 6000 body, but to the region containing that loop body). */
6001
6002 gcc_assert (new_rgn_number >= 0);
6003
08b7917c 6004 if (! bitmap_bit_p (bbs_in_loop_rgns, loop_blocks[i]->index))
e1ab7874 6005 {
48e1416a 6006 sel_add_block_to_region (loop_blocks[i], &bb_ord_index,
e1ab7874 6007 new_rgn_number);
08b7917c 6008 bitmap_set_bit (bbs_in_loop_rgns, loop_blocks[i]->index);
e1ab7874 6009 }
6010 }
6011
6012 free (loop_blocks);
6013 MARK_LOOP_FOR_PIPELINING (loop);
6014
6015 return new_rgn_number;
6016}
6017
6018/* Create a new region from preheader blocks LOOP_BLOCKS. */
6019void
f1f41a6c 6020make_region_from_loop_preheader (vec<basic_block> *&loop_blocks)
e1ab7874 6021{
6022 unsigned int i;
6023 int new_rgn_number = -1;
6024 basic_block bb;
6025
6026 /* Basic block index, to be assigned to BLOCK_TO_BB. */
6027 int bb_ord_index = 0;
6028
6029 new_rgn_number = sel_create_new_region ();
6030
f1f41a6c 6031 FOR_EACH_VEC_ELT (*loop_blocks, i, bb)
e1ab7874 6032 {
6033 gcc_assert (new_rgn_number >= 0);
6034
6035 sel_add_block_to_region (bb, &bb_ord_index, new_rgn_number);
6036 }
6037
f1f41a6c 6038 vec_free (loop_blocks);
e1ab7874 6039}
6040
6041
6042/* Create region(s) from loop nest LOOP, such that inner loops will be
48e1416a 6043 pipelined before outer loops. Returns true when a region for LOOP
e1ab7874 6044 is created. */
6045static bool
6046make_regions_from_loop_nest (struct loop *loop)
48e1416a 6047{
e1ab7874 6048 struct loop *cur_loop;
6049 int rgn_number;
6050
6051 /* Traverse all inner nodes of the loop. */
6052 for (cur_loop = loop->inner; cur_loop; cur_loop = cur_loop->next)
08b7917c 6053 if (! bitmap_bit_p (bbs_in_loop_rgns, cur_loop->header->index))
e1ab7874 6054 return false;
6055
6056 /* At this moment all regular inner loops should have been pipelined.
6057 Try to create a region from this loop. */
6058 rgn_number = make_region_from_loop (loop);
6059
6060 if (rgn_number < 0)
6061 return false;
6062
f1f41a6c 6063 loop_nests.safe_push (loop);
e1ab7874 6064 return true;
6065}
6066
6067/* Initalize data structures needed. */
6068void
6069sel_init_pipelining (void)
6070{
6071 /* Collect loop information to be used in outer loops pipelining. */
6072 loop_optimizer_init (LOOPS_HAVE_PREHEADERS
6073 | LOOPS_HAVE_FALLTHRU_PREHEADERS
6074 | LOOPS_HAVE_RECORDED_EXITS
6075 | LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS);
6076 current_loop_nest = NULL;
6077
fe672ac0 6078 bbs_in_loop_rgns = sbitmap_alloc (last_basic_block_for_fn (cfun));
53c5d9d4 6079 bitmap_clear (bbs_in_loop_rgns);
e1ab7874 6080
6081 recompute_rev_top_order ();
6082}
6083
6084/* Returns a struct loop for region RGN. */
6085loop_p
6086get_loop_nest_for_rgn (unsigned int rgn)
6087{
6088 /* Regions created with extend_rgns don't have corresponding loop nests,
6089 because they don't represent loops. */
f1f41a6c 6090 if (rgn < loop_nests.length ())
6091 return loop_nests[rgn];
e1ab7874 6092 else
6093 return NULL;
6094}
6095
6096/* True when LOOP was included into pipelining regions. */
6097bool
6098considered_for_pipelining_p (struct loop *loop)
6099{
6100 if (loop_depth (loop) == 0)
6101 return false;
6102
48e1416a 6103 /* Now, the loop could be too large or irreducible. Check whether its
6104 region is in LOOP_NESTS.
6105 We determine the region number of LOOP as the region number of its
6106 latch. We can't use header here, because this header could be
e1ab7874 6107 just removed preheader and it will give us the wrong region number.
6108 Latch can't be used because it could be in the inner loop too. */
a2d56a0e 6109 if (LOOP_MARKED_FOR_PIPELINING_P (loop))
e1ab7874 6110 {
6111 int rgn = CONTAINING_RGN (loop->latch->index);
6112
f1f41a6c 6113 gcc_assert ((unsigned) rgn < loop_nests.length ());
e1ab7874 6114 return true;
6115 }
48e1416a 6116
e1ab7874 6117 return false;
6118}
6119
48e1416a 6120/* Makes regions from the rest of the blocks, after loops are chosen
e1ab7874 6121 for pipelining. */
6122static void
6123make_regions_from_the_rest (void)
6124{
6125 int cur_rgn_blocks;
6126 int *loop_hdr;
6127 int i;
6128
6129 basic_block bb;
6130 edge e;
6131 edge_iterator ei;
6132 int *degree;
e1ab7874 6133
6134 /* Index in rgn_bb_table where to start allocating new regions. */
6135 cur_rgn_blocks = nr_regions ? RGN_BLOCKS (nr_regions) : 0;
e1ab7874 6136
48e1416a 6137 /* Make regions from all the rest basic blocks - those that don't belong to
e1ab7874 6138 any loop or belong to irreducible loops. Prepare the data structures
6139 for extend_rgns. */
6140
6141 /* LOOP_HDR[I] == -1 if I-th bb doesn't belong to any loop,
6142 LOOP_HDR[I] == LOOP_HDR[J] iff basic blocks I and J reside within the same
6143 loop. */
fe672ac0 6144 loop_hdr = XNEWVEC (int, last_basic_block_for_fn (cfun));
6145 degree = XCNEWVEC (int, last_basic_block_for_fn (cfun));
e1ab7874 6146
6147
6148 /* For each basic block that belongs to some loop assign the number
6149 of innermost loop it belongs to. */
fe672ac0 6150 for (i = 0; i < last_basic_block_for_fn (cfun); i++)
e1ab7874 6151 loop_hdr[i] = -1;
6152
fc00614f 6153 FOR_EACH_BB_FN (bb, cfun)
e1ab7874 6154 {
9c26ddef 6155 if (bb->loop_father && bb->loop_father->num != 0
e1ab7874 6156 && !(bb->flags & BB_IRREDUCIBLE_LOOP))
6157 loop_hdr[bb->index] = bb->loop_father->num;
6158 }
6159
48e1416a 6160 /* For each basic block degree is calculated as the number of incoming
e1ab7874 6161 edges, that are going out of bbs that are not yet scheduled.
6162 The basic blocks that are scheduled have degree value of zero. */
fc00614f 6163 FOR_EACH_BB_FN (bb, cfun)
e1ab7874 6164 {
6165 degree[bb->index] = 0;
6166
08b7917c 6167 if (!bitmap_bit_p (bbs_in_loop_rgns, bb->index))
e1ab7874 6168 {
6169 FOR_EACH_EDGE (e, ei, bb->preds)
08b7917c 6170 if (!bitmap_bit_p (bbs_in_loop_rgns, e->src->index))
e1ab7874 6171 degree[bb->index]++;
6172 }
6173 else
6174 degree[bb->index] = -1;
6175 }
6176
6177 extend_rgns (degree, &cur_rgn_blocks, bbs_in_loop_rgns, loop_hdr);
6178
6179 /* Any block that did not end up in a region is placed into a region
6180 by itself. */
fc00614f 6181 FOR_EACH_BB_FN (bb, cfun)
e1ab7874 6182 if (degree[bb->index] >= 0)
6183 {
6184 rgn_bb_table[cur_rgn_blocks] = bb->index;
6185 RGN_NR_BLOCKS (nr_regions) = 1;
6186 RGN_BLOCKS (nr_regions) = cur_rgn_blocks++;
6187 RGN_DONT_CALC_DEPS (nr_regions) = 0;
6188 RGN_HAS_REAL_EBB (nr_regions) = 0;
6189 CONTAINING_RGN (bb->index) = nr_regions++;
6190 BLOCK_TO_BB (bb->index) = 0;
6191 }
6192
6193 free (degree);
6194 free (loop_hdr);
6195}
6196
6197/* Free data structures used in pipelining of loops. */
6198void sel_finish_pipelining (void)
6199{
e1ab7874 6200 struct loop *loop;
6201
6202 /* Release aux fields so we don't free them later by mistake. */
f21d4d00 6203 FOR_EACH_LOOP (loop, 0)
e1ab7874 6204 loop->aux = NULL;
6205
6206 loop_optimizer_finalize ();
6207
f1f41a6c 6208 loop_nests.release ();
e1ab7874 6209
6210 free (rev_top_order_index);
6211 rev_top_order_index = NULL;
6212}
6213
48e1416a 6214/* This function replaces the find_rgns when
e1ab7874 6215 FLAG_SEL_SCHED_PIPELINING_OUTER_LOOPS is set. */
48e1416a 6216void
e1ab7874 6217sel_find_rgns (void)
6218{
6219 sel_init_pipelining ();
6220 extend_regions ();
6221
6222 if (current_loops)
6223 {
6224 loop_p loop;
e1ab7874 6225
f21d4d00 6226 FOR_EACH_LOOP (loop, (flag_sel_sched_pipelining_outer_loops
6227 ? LI_FROM_INNERMOST
6228 : LI_ONLY_INNERMOST))
e1ab7874 6229 make_regions_from_loop_nest (loop);
6230 }
6231
6232 /* Make regions from all the rest basic blocks and schedule them.
48e1416a 6233 These blocks include blocks that don't belong to any loop or belong
e1ab7874 6234 to irreducible loops. */
6235 make_regions_from_the_rest ();
6236
6237 /* We don't need bbs_in_loop_rgns anymore. */
6238 sbitmap_free (bbs_in_loop_rgns);
6239 bbs_in_loop_rgns = NULL;
6240}
6241
b73edd22 6242/* Add the preheader blocks from previous loop to current region taking
6243 it from LOOP_PREHEADER_BLOCKS (current_loop_nest) and record them in *BBS.
e1ab7874 6244 This function is only used with -fsel-sched-pipelining-outer-loops. */
6245void
b73edd22 6246sel_add_loop_preheaders (bb_vec_t *bbs)
e1ab7874 6247{
6248 int i;
6249 basic_block bb;
f1f41a6c 6250 vec<basic_block> *preheader_blocks
e1ab7874 6251 = LOOP_PREHEADER_BLOCKS (current_loop_nest);
6252
f1f41a6c 6253 if (!preheader_blocks)
6254 return;
6255
6256 for (i = 0; preheader_blocks->iterate (i, &bb); i++)
a2d56a0e 6257 {
f1f41a6c 6258 bbs->safe_push (bb);
6259 last_added_blocks.safe_push (bb);
e1ab7874 6260 sel_add_bb (bb);
a2d56a0e 6261 }
e1ab7874 6262
f1f41a6c 6263 vec_free (preheader_blocks);
e1ab7874 6264}
6265
48e1416a 6266/* While pipelining outer loops, returns TRUE if BB is a loop preheader.
6267 Please note that the function should also work when pipelining_p is
6268 false, because it is used when deciding whether we should or should
e1ab7874 6269 not reschedule pipelined code. */
6270bool
6271sel_is_loop_preheader_p (basic_block bb)
6272{
6273 if (current_loop_nest)
6274 {
6275 struct loop *outer;
6276
6277 if (preheader_removed)
6278 return false;
6279
6280 /* Preheader is the first block in the region. */
6281 if (BLOCK_TO_BB (bb->index) == 0)
6282 return true;
6283
6284 /* We used to find a preheader with the topological information.
6285 Check that the above code is equivalent to what we did before. */
6286
6287 if (in_current_region_p (current_loop_nest->header))
48e1416a 6288 gcc_assert (!(BLOCK_TO_BB (bb->index)
e1ab7874 6289 < BLOCK_TO_BB (current_loop_nest->header->index)));
6290
6291 /* Support the situation when the latch block of outer loop
6292 could be from here. */
6293 for (outer = loop_outer (current_loop_nest);
6294 outer;
6295 outer = loop_outer (outer))
6296 if (considered_for_pipelining_p (outer) && outer->latch == bb)
6297 gcc_unreachable ();
6298 }
6299
6300 return false;
6301}
6302
49087fba 6303/* Check whether JUMP_BB ends with a jump insn that leads only to DEST_BB and
6304 can be removed, making the corresponding edge fallthrough (assuming that
6305 all basic blocks between JUMP_BB and DEST_BB are empty). */
6306static bool
6307bb_has_removable_jump_to_p (basic_block jump_bb, basic_block dest_bb)
e1ab7874 6308{
4b816303 6309 if (!onlyjump_p (BB_END (jump_bb))
6310 || tablejump_p (BB_END (jump_bb), NULL, NULL))
e1ab7874 6311 return false;
6312
48e1416a 6313 /* Several outgoing edges, abnormal edge or destination of jump is
e1ab7874 6314 not DEST_BB. */
6315 if (EDGE_COUNT (jump_bb->succs) != 1
49087fba 6316 || EDGE_SUCC (jump_bb, 0)->flags & (EDGE_ABNORMAL | EDGE_CROSSING)
e1ab7874 6317 || EDGE_SUCC (jump_bb, 0)->dest != dest_bb)
6318 return false;
6319
6320 /* If not anything of the upper. */
6321 return true;
6322}
6323
6324/* Removes the loop preheader from the current region and saves it in
48e1416a 6325 PREHEADER_BLOCKS of the father loop, so they will be added later to
e1ab7874 6326 region that represents an outer loop. */
6327static void
6328sel_remove_loop_preheader (void)
6329{
6330 int i, old_len;
6331 int cur_rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
6332 basic_block bb;
6333 bool all_empty_p = true;
f1f41a6c 6334 vec<basic_block> *preheader_blocks
e1ab7874 6335 = LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest));
6336
f1f41a6c 6337 vec_check_alloc (preheader_blocks, 0);
6338
e1ab7874 6339 gcc_assert (current_loop_nest);
f1f41a6c 6340 old_len = preheader_blocks->length ();
e1ab7874 6341
6342 /* Add blocks that aren't within the current loop to PREHEADER_BLOCKS. */
6343 for (i = 0; i < RGN_NR_BLOCKS (cur_rgn); i++)
6344 {
f5a6b05f 6345 bb = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i));
e1ab7874 6346
48e1416a 6347 /* If the basic block belongs to region, but doesn't belong to
e1ab7874 6348 corresponding loop, then it should be a preheader. */
6349 if (sel_is_loop_preheader_p (bb))
6350 {
f1f41a6c 6351 preheader_blocks->safe_push (bb);
e1ab7874 6352 if (BB_END (bb) != bb_note (bb))
6353 all_empty_p = false;
6354 }
6355 }
48e1416a 6356
e1ab7874 6357 /* Remove these blocks only after iterating over the whole region. */
f1f41a6c 6358 for (i = preheader_blocks->length () - 1; i >= old_len; i--)
e1ab7874 6359 {
f1f41a6c 6360 bb = (*preheader_blocks)[i];
e1ab7874 6361 sel_remove_bb (bb, false);
6362 }
6363
6364 if (!considered_for_pipelining_p (loop_outer (current_loop_nest)))
6365 {
6366 if (!all_empty_p)
6367 /* Immediately create new region from preheader. */
f1f41a6c 6368 make_region_from_loop_preheader (preheader_blocks);
e1ab7874 6369 else
6370 {
6371 /* If all preheader blocks are empty - dont create new empty region.
6372 Instead, remove them completely. */
f1f41a6c 6373 FOR_EACH_VEC_ELT (*preheader_blocks, i, bb)
e1ab7874 6374 {
6375 edge e;
6376 edge_iterator ei;
6377 basic_block prev_bb = bb->prev_bb, next_bb = bb->next_bb;
6378
6379 /* Redirect all incoming edges to next basic block. */
6380 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
6381 {
6382 if (! (e->flags & EDGE_FALLTHRU))
6383 redirect_edge_and_branch (e, bb->next_bb);
6384 else
6385 redirect_edge_succ (e, bb->next_bb);
6386 }
6387 gcc_assert (BB_NOTE_LIST (bb) == NULL);
6388 delete_and_free_basic_block (bb);
6389
48e1416a 6390 /* Check if after deleting preheader there is a nonconditional
6391 jump in PREV_BB that leads to the next basic block NEXT_BB.
6392 If it is so - delete this jump and clear data sets of its
e1ab7874 6393 basic block if it becomes empty. */
6394 if (next_bb->prev_bb == prev_bb
34154e27 6395 && prev_bb != ENTRY_BLOCK_PTR_FOR_FN (cfun)
49087fba 6396 && bb_has_removable_jump_to_p (prev_bb, next_bb))
e1ab7874 6397 {
6398 redirect_edge_and_branch (EDGE_SUCC (prev_bb, 0), next_bb);
6399 if (BB_END (prev_bb) == bb_note (prev_bb))
6400 free_data_sets (prev_bb);
6401 }
1a5dbaab 6402
6403 set_immediate_dominator (CDI_DOMINATORS, next_bb,
6404 recompute_dominator (CDI_DOMINATORS,
6405 next_bb));
e1ab7874 6406 }
6407 }
f1f41a6c 6408 vec_free (preheader_blocks);
e1ab7874 6409 }
6410 else
6411 /* Store preheader within the father's loop structure. */
6412 SET_LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest),
6413 preheader_blocks);
6414}
7c5928c3 6415
e1ab7874 6416#endif