]>
Commit | Line | Data |
---|---|---|
e1ab7874 | 1 | /* Instruction scheduling pass. Selective scheduler and pipeliner. |
3aea1f79 | 2 | Copyright (C) 2006-2014 Free Software Foundation, Inc. |
e1ab7874 | 3 | |
4 | This file is part of GCC. | |
5 | ||
6 | GCC is free software; you can redistribute it and/or modify it under | |
7 | the terms of the GNU General Public License as published by the Free | |
8 | Software Foundation; either version 3, or (at your option) any later | |
9 | version. | |
10 | ||
11 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
14 | for more details. | |
15 | ||
16 | You should have received a copy of the GNU General Public License | |
17 | along with GCC; see the file COPYING3. If not see | |
18 | <http://www.gnu.org/licenses/>. */ | |
19 | ||
20 | #include "config.h" | |
21 | #include "system.h" | |
22 | #include "coretypes.h" | |
23 | #include "tm.h" | |
0b205f4c | 24 | #include "diagnostic-core.h" |
e1ab7874 | 25 | #include "rtl.h" |
26 | #include "tm_p.h" | |
27 | #include "hard-reg-set.h" | |
28 | #include "regs.h" | |
a3020f2f | 29 | #include "hashtab.h" |
30 | #include "hash-set.h" | |
31 | #include "vec.h" | |
32 | #include "machmode.h" | |
33 | #include "input.h" | |
e1ab7874 | 34 | #include "function.h" |
94ea8568 | 35 | #include "predict.h" |
36 | #include "dominance.h" | |
37 | #include "cfg.h" | |
38 | #include "cfgrtl.h" | |
39 | #include "cfganal.h" | |
40 | #include "cfgbuild.h" | |
41 | #include "basic-block.h" | |
e1ab7874 | 42 | #include "flags.h" |
43 | #include "insn-config.h" | |
44 | #include "insn-attr.h" | |
45 | #include "except.h" | |
e1ab7874 | 46 | #include "recog.h" |
47 | #include "params.h" | |
48 | #include "target.h" | |
e1ab7874 | 49 | #include "sched-int.h" |
50 | #include "ggc.h" | |
51 | #include "tree.h" | |
e1ab7874 | 52 | #include "langhooks.h" |
53 | #include "rtlhooks-def.h" | |
06f9d6ef | 54 | #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */ |
e1ab7874 | 55 | |
56 | #ifdef INSN_SCHEDULING | |
57 | #include "sel-sched-ir.h" | |
58 | /* We don't have to use it except for sel_print_insn. */ | |
59 | #include "sel-sched-dump.h" | |
60 | ||
61 | /* A vector holding bb info for whole scheduling pass. */ | |
f1f41a6c | 62 | vec<sel_global_bb_info_def> |
1e094109 | 63 | sel_global_bb_info = vNULL; |
e1ab7874 | 64 | |
65 | /* A vector holding bb info. */ | |
f1f41a6c | 66 | vec<sel_region_bb_info_def> |
1e094109 | 67 | sel_region_bb_info = vNULL; |
e1ab7874 | 68 | |
69 | /* A pool for allocating all lists. */ | |
70 | alloc_pool sched_lists_pool; | |
71 | ||
72 | /* This contains information about successors for compute_av_set. */ | |
73 | struct succs_info current_succs; | |
74 | ||
75 | /* Data structure to describe interaction with the generic scheduler utils. */ | |
76 | static struct common_sched_info_def sel_common_sched_info; | |
77 | ||
78 | /* The loop nest being pipelined. */ | |
79 | struct loop *current_loop_nest; | |
80 | ||
81 | /* LOOP_NESTS is a vector containing the corresponding loop nest for | |
82 | each region. */ | |
1e094109 | 83 | static vec<loop_p> loop_nests = vNULL; |
e1ab7874 | 84 | |
85 | /* Saves blocks already in loop regions, indexed by bb->index. */ | |
86 | static sbitmap bbs_in_loop_rgns = NULL; | |
87 | ||
88 | /* CFG hooks that are saved before changing create_basic_block hook. */ | |
89 | static struct cfg_hooks orig_cfg_hooks; | |
90 | \f | |
91 | ||
92 | /* Array containing reverse topological index of function basic blocks, | |
93 | indexed by BB->INDEX. */ | |
94 | static int *rev_top_order_index = NULL; | |
95 | ||
96 | /* Length of the above array. */ | |
97 | static int rev_top_order_index_len = -1; | |
98 | ||
99 | /* A regset pool structure. */ | |
100 | static struct | |
101 | { | |
102 | /* The stack to which regsets are returned. */ | |
103 | regset *v; | |
104 | ||
105 | /* Its pointer. */ | |
106 | int n; | |
107 | ||
108 | /* Its size. */ | |
109 | int s; | |
110 | ||
111 | /* In VV we save all generated regsets so that, when destructing the | |
112 | pool, we can compare it with V and check that every regset was returned | |
113 | back to pool. */ | |
114 | regset *vv; | |
115 | ||
116 | /* The pointer of VV stack. */ | |
117 | int nn; | |
118 | ||
119 | /* Its size. */ | |
120 | int ss; | |
121 | ||
122 | /* The difference between allocated and returned regsets. */ | |
123 | int diff; | |
124 | } regset_pool = { NULL, 0, 0, NULL, 0, 0, 0 }; | |
125 | ||
126 | /* This represents the nop pool. */ | |
127 | static struct | |
128 | { | |
129 | /* The vector which holds previously emitted nops. */ | |
130 | insn_t *v; | |
131 | ||
132 | /* Its pointer. */ | |
133 | int n; | |
134 | ||
135 | /* Its size. */ | |
48e1416a | 136 | int s; |
e1ab7874 | 137 | } nop_pool = { NULL, 0, 0 }; |
138 | ||
139 | /* The pool for basic block notes. */ | |
cef3d8ad | 140 | static vec<rtx_note *> bb_note_pool; |
e1ab7874 | 141 | |
142 | /* A NOP pattern used to emit placeholder insns. */ | |
143 | rtx nop_pattern = NULL_RTX; | |
144 | /* A special instruction that resides in EXIT_BLOCK. | |
145 | EXIT_INSN is successor of the insns that lead to EXIT_BLOCK. */ | |
179c282d | 146 | rtx_insn *exit_insn = NULL; |
e1ab7874 | 147 | |
48e1416a | 148 | /* TRUE if while scheduling current region, which is loop, its preheader |
e1ab7874 | 149 | was removed. */ |
150 | bool preheader_removed = false; | |
151 | \f | |
152 | ||
153 | /* Forward static declarations. */ | |
154 | static void fence_clear (fence_t); | |
155 | ||
156 | static void deps_init_id (idata_t, insn_t, bool); | |
157 | static void init_id_from_df (idata_t, insn_t, bool); | |
158 | static expr_t set_insn_init (expr_t, vinsn_t, int); | |
159 | ||
160 | static void cfg_preds (basic_block, insn_t **, int *); | |
161 | static void prepare_insn_expr (insn_t, int); | |
f1f41a6c | 162 | static void free_history_vect (vec<expr_history_def> &); |
e1ab7874 | 163 | |
164 | static void move_bb_info (basic_block, basic_block); | |
165 | static void remove_empty_bb (basic_block, bool); | |
0424f393 | 166 | static void sel_merge_blocks (basic_block, basic_block); |
e1ab7874 | 167 | static void sel_remove_loop_preheader (void); |
49087fba | 168 | static bool bb_has_removable_jump_to_p (basic_block, basic_block); |
e1ab7874 | 169 | |
170 | static bool insn_is_the_only_one_in_bb_p (insn_t); | |
171 | static void create_initial_data_sets (basic_block); | |
172 | ||
9845d120 | 173 | static void free_av_set (basic_block); |
e1ab7874 | 174 | static void invalidate_av_set (basic_block); |
175 | static void extend_insn_data (void); | |
8d1881f5 | 176 | static void sel_init_new_insn (insn_t, int, int = -1); |
e1ab7874 | 177 | static void finish_insns (void); |
178 | \f | |
179 | /* Various list functions. */ | |
180 | ||
181 | /* Copy an instruction list L. */ | |
182 | ilist_t | |
183 | ilist_copy (ilist_t l) | |
184 | { | |
185 | ilist_t head = NULL, *tailp = &head; | |
186 | ||
187 | while (l) | |
188 | { | |
189 | ilist_add (tailp, ILIST_INSN (l)); | |
190 | tailp = &ILIST_NEXT (*tailp); | |
191 | l = ILIST_NEXT (l); | |
192 | } | |
193 | ||
194 | return head; | |
195 | } | |
196 | ||
197 | /* Invert an instruction list L. */ | |
198 | ilist_t | |
199 | ilist_invert (ilist_t l) | |
200 | { | |
201 | ilist_t res = NULL; | |
202 | ||
203 | while (l) | |
204 | { | |
205 | ilist_add (&res, ILIST_INSN (l)); | |
206 | l = ILIST_NEXT (l); | |
207 | } | |
208 | ||
209 | return res; | |
210 | } | |
211 | ||
212 | /* Add a new boundary to the LP list with parameters TO, PTR, and DC. */ | |
213 | void | |
214 | blist_add (blist_t *lp, insn_t to, ilist_t ptr, deps_t dc) | |
215 | { | |
216 | bnd_t bnd; | |
217 | ||
218 | _list_add (lp); | |
219 | bnd = BLIST_BND (*lp); | |
220 | ||
2f3c9801 | 221 | BND_TO (bnd) = to; |
e1ab7874 | 222 | BND_PTR (bnd) = ptr; |
223 | BND_AV (bnd) = NULL; | |
224 | BND_AV1 (bnd) = NULL; | |
225 | BND_DC (bnd) = dc; | |
226 | } | |
227 | ||
228 | /* Remove the list note pointed to by LP. */ | |
229 | void | |
230 | blist_remove (blist_t *lp) | |
231 | { | |
232 | bnd_t b = BLIST_BND (*lp); | |
233 | ||
234 | av_set_clear (&BND_AV (b)); | |
235 | av_set_clear (&BND_AV1 (b)); | |
236 | ilist_clear (&BND_PTR (b)); | |
237 | ||
238 | _list_remove (lp); | |
239 | } | |
240 | ||
241 | /* Init a fence tail L. */ | |
242 | void | |
243 | flist_tail_init (flist_tail_t l) | |
244 | { | |
245 | FLIST_TAIL_HEAD (l) = NULL; | |
246 | FLIST_TAIL_TAILP (l) = &FLIST_TAIL_HEAD (l); | |
247 | } | |
248 | ||
249 | /* Try to find fence corresponding to INSN in L. */ | |
250 | fence_t | |
251 | flist_lookup (flist_t l, insn_t insn) | |
252 | { | |
253 | while (l) | |
254 | { | |
255 | if (FENCE_INSN (FLIST_FENCE (l)) == insn) | |
256 | return FLIST_FENCE (l); | |
257 | ||
258 | l = FLIST_NEXT (l); | |
259 | } | |
260 | ||
261 | return NULL; | |
262 | } | |
263 | ||
264 | /* Init the fields of F before running fill_insns. */ | |
265 | static void | |
266 | init_fence_for_scheduling (fence_t f) | |
267 | { | |
268 | FENCE_BNDS (f) = NULL; | |
269 | FENCE_PROCESSED_P (f) = false; | |
270 | FENCE_SCHEDULED_P (f) = false; | |
271 | } | |
272 | ||
273 | /* Add new fence consisting of INSN and STATE to the list pointed to by LP. */ | |
274 | static void | |
48e1416a | 275 | flist_add (flist_t *lp, insn_t insn, state_t state, deps_t dc, void *tc, |
2f3c9801 | 276 | insn_t last_scheduled_insn, vec<rtx_insn *, va_gc> *executing_insns, |
48e1416a | 277 | int *ready_ticks, int ready_ticks_size, insn_t sched_next, |
abb9c563 | 278 | int cycle, int cycle_issued_insns, int issue_more, |
e1ab7874 | 279 | bool starts_cycle_p, bool after_stall_p) |
280 | { | |
281 | fence_t f; | |
282 | ||
283 | _list_add (lp); | |
284 | f = FLIST_FENCE (*lp); | |
285 | ||
286 | FENCE_INSN (f) = insn; | |
287 | ||
288 | gcc_assert (state != NULL); | |
289 | FENCE_STATE (f) = state; | |
290 | ||
291 | FENCE_CYCLE (f) = cycle; | |
292 | FENCE_ISSUED_INSNS (f) = cycle_issued_insns; | |
293 | FENCE_STARTS_CYCLE_P (f) = starts_cycle_p; | |
294 | FENCE_AFTER_STALL_P (f) = after_stall_p; | |
295 | ||
296 | gcc_assert (dc != NULL); | |
297 | FENCE_DC (f) = dc; | |
298 | ||
299 | gcc_assert (tc != NULL || targetm.sched.alloc_sched_context == NULL); | |
300 | FENCE_TC (f) = tc; | |
301 | ||
302 | FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn; | |
abb9c563 | 303 | FENCE_ISSUE_MORE (f) = issue_more; |
e1ab7874 | 304 | FENCE_EXECUTING_INSNS (f) = executing_insns; |
305 | FENCE_READY_TICKS (f) = ready_ticks; | |
306 | FENCE_READY_TICKS_SIZE (f) = ready_ticks_size; | |
307 | FENCE_SCHED_NEXT (f) = sched_next; | |
308 | ||
309 | init_fence_for_scheduling (f); | |
310 | } | |
311 | ||
312 | /* Remove the head node of the list pointed to by LP. */ | |
313 | static void | |
314 | flist_remove (flist_t *lp) | |
315 | { | |
316 | if (FENCE_INSN (FLIST_FENCE (*lp))) | |
317 | fence_clear (FLIST_FENCE (*lp)); | |
318 | _list_remove (lp); | |
319 | } | |
320 | ||
321 | /* Clear the fence list pointed to by LP. */ | |
322 | void | |
323 | flist_clear (flist_t *lp) | |
324 | { | |
325 | while (*lp) | |
326 | flist_remove (lp); | |
327 | } | |
328 | ||
329 | /* Add ORIGINAL_INSN the def list DL honoring CROSSES_CALL. */ | |
330 | void | |
331 | def_list_add (def_list_t *dl, insn_t original_insn, bool crosses_call) | |
332 | { | |
333 | def_t d; | |
48e1416a | 334 | |
e1ab7874 | 335 | _list_add (dl); |
336 | d = DEF_LIST_DEF (*dl); | |
337 | ||
338 | d->orig_insn = original_insn; | |
339 | d->crosses_call = crosses_call; | |
340 | } | |
341 | \f | |
342 | ||
343 | /* Functions to work with target contexts. */ | |
344 | ||
48e1416a | 345 | /* Bulk target context. It is convenient for debugging purposes to ensure |
e1ab7874 | 346 | that there are no uninitialized (null) target contexts. */ |
347 | static tc_t bulk_tc = (tc_t) 1; | |
348 | ||
48e1416a | 349 | /* Target hooks wrappers. In the future we can provide some default |
e1ab7874 | 350 | implementations for them. */ |
351 | ||
352 | /* Allocate a store for the target context. */ | |
353 | static tc_t | |
354 | alloc_target_context (void) | |
355 | { | |
356 | return (targetm.sched.alloc_sched_context | |
357 | ? targetm.sched.alloc_sched_context () : bulk_tc); | |
358 | } | |
359 | ||
360 | /* Init target context TC. | |
361 | If CLEAN_P is true, then make TC as it is beginning of the scheduler. | |
362 | Overwise, copy current backend context to TC. */ | |
363 | static void | |
364 | init_target_context (tc_t tc, bool clean_p) | |
365 | { | |
366 | if (targetm.sched.init_sched_context) | |
367 | targetm.sched.init_sched_context (tc, clean_p); | |
368 | } | |
369 | ||
370 | /* Allocate and initialize a target context. Meaning of CLEAN_P is the same as | |
371 | int init_target_context (). */ | |
372 | tc_t | |
373 | create_target_context (bool clean_p) | |
374 | { | |
375 | tc_t tc = alloc_target_context (); | |
376 | ||
377 | init_target_context (tc, clean_p); | |
378 | return tc; | |
379 | } | |
380 | ||
381 | /* Copy TC to the current backend context. */ | |
382 | void | |
383 | set_target_context (tc_t tc) | |
384 | { | |
385 | if (targetm.sched.set_sched_context) | |
386 | targetm.sched.set_sched_context (tc); | |
387 | } | |
388 | ||
389 | /* TC is about to be destroyed. Free any internal data. */ | |
390 | static void | |
391 | clear_target_context (tc_t tc) | |
392 | { | |
393 | if (targetm.sched.clear_sched_context) | |
394 | targetm.sched.clear_sched_context (tc); | |
395 | } | |
396 | ||
397 | /* Clear and free it. */ | |
398 | static void | |
399 | delete_target_context (tc_t tc) | |
400 | { | |
401 | clear_target_context (tc); | |
402 | ||
403 | if (targetm.sched.free_sched_context) | |
404 | targetm.sched.free_sched_context (tc); | |
405 | } | |
406 | ||
407 | /* Make a copy of FROM in TO. | |
408 | NB: May be this should be a hook. */ | |
409 | static void | |
410 | copy_target_context (tc_t to, tc_t from) | |
411 | { | |
412 | tc_t tmp = create_target_context (false); | |
413 | ||
414 | set_target_context (from); | |
415 | init_target_context (to, false); | |
416 | ||
417 | set_target_context (tmp); | |
418 | delete_target_context (tmp); | |
419 | } | |
420 | ||
421 | /* Create a copy of TC. */ | |
422 | static tc_t | |
423 | create_copy_of_target_context (tc_t tc) | |
424 | { | |
425 | tc_t copy = alloc_target_context (); | |
426 | ||
427 | copy_target_context (copy, tc); | |
428 | ||
429 | return copy; | |
430 | } | |
431 | ||
432 | /* Clear TC and initialize it according to CLEAN_P. The meaning of CLEAN_P | |
433 | is the same as in init_target_context (). */ | |
434 | void | |
435 | reset_target_context (tc_t tc, bool clean_p) | |
436 | { | |
437 | clear_target_context (tc); | |
438 | init_target_context (tc, clean_p); | |
439 | } | |
440 | \f | |
48e1416a | 441 | /* Functions to work with dependence contexts. |
68e419a1 | 442 | Dc (aka deps context, aka deps_t, aka struct deps_desc *) is short for dependence |
e1ab7874 | 443 | context. It accumulates information about processed insns to decide if |
444 | current insn is dependent on the processed ones. */ | |
445 | ||
446 | /* Make a copy of FROM in TO. */ | |
447 | static void | |
448 | copy_deps_context (deps_t to, deps_t from) | |
449 | { | |
d9ab2038 | 450 | init_deps (to, false); |
e1ab7874 | 451 | deps_join (to, from); |
452 | } | |
453 | ||
454 | /* Allocate store for dep context. */ | |
455 | static deps_t | |
456 | alloc_deps_context (void) | |
457 | { | |
68e419a1 | 458 | return XNEW (struct deps_desc); |
e1ab7874 | 459 | } |
460 | ||
461 | /* Allocate and initialize dep context. */ | |
462 | static deps_t | |
463 | create_deps_context (void) | |
464 | { | |
465 | deps_t dc = alloc_deps_context (); | |
466 | ||
d9ab2038 | 467 | init_deps (dc, false); |
e1ab7874 | 468 | return dc; |
469 | } | |
470 | ||
471 | /* Create a copy of FROM. */ | |
472 | static deps_t | |
473 | create_copy_of_deps_context (deps_t from) | |
474 | { | |
475 | deps_t to = alloc_deps_context (); | |
476 | ||
477 | copy_deps_context (to, from); | |
478 | return to; | |
479 | } | |
480 | ||
481 | /* Clean up internal data of DC. */ | |
482 | static void | |
483 | clear_deps_context (deps_t dc) | |
484 | { | |
485 | free_deps (dc); | |
486 | } | |
487 | ||
488 | /* Clear and free DC. */ | |
489 | static void | |
490 | delete_deps_context (deps_t dc) | |
491 | { | |
492 | clear_deps_context (dc); | |
493 | free (dc); | |
494 | } | |
495 | ||
496 | /* Clear and init DC. */ | |
497 | static void | |
498 | reset_deps_context (deps_t dc) | |
499 | { | |
500 | clear_deps_context (dc); | |
d9ab2038 | 501 | init_deps (dc, false); |
e1ab7874 | 502 | } |
503 | ||
48e1416a | 504 | /* This structure describes the dependence analysis hooks for advancing |
e1ab7874 | 505 | dependence context. */ |
506 | static struct sched_deps_info_def advance_deps_context_sched_deps_info = | |
507 | { | |
508 | NULL, | |
509 | ||
510 | NULL, /* start_insn */ | |
511 | NULL, /* finish_insn */ | |
512 | NULL, /* start_lhs */ | |
513 | NULL, /* finish_lhs */ | |
514 | NULL, /* start_rhs */ | |
515 | NULL, /* finish_rhs */ | |
516 | haifa_note_reg_set, | |
517 | haifa_note_reg_clobber, | |
518 | haifa_note_reg_use, | |
519 | NULL, /* note_mem_dep */ | |
520 | NULL, /* note_dep */ | |
521 | ||
522 | 0, 0, 0 | |
523 | }; | |
524 | ||
525 | /* Process INSN and add its impact on DC. */ | |
526 | void | |
527 | advance_deps_context (deps_t dc, insn_t insn) | |
528 | { | |
529 | sched_deps_info = &advance_deps_context_sched_deps_info; | |
2f3c9801 | 530 | deps_analyze_insn (dc, insn); |
e1ab7874 | 531 | } |
532 | \f | |
533 | ||
534 | /* Functions to work with DFA states. */ | |
535 | ||
536 | /* Allocate store for a DFA state. */ | |
537 | static state_t | |
538 | state_alloc (void) | |
539 | { | |
540 | return xmalloc (dfa_state_size); | |
541 | } | |
542 | ||
543 | /* Allocate and initialize DFA state. */ | |
544 | static state_t | |
545 | state_create (void) | |
546 | { | |
547 | state_t state = state_alloc (); | |
548 | ||
549 | state_reset (state); | |
550 | advance_state (state); | |
551 | return state; | |
552 | } | |
553 | ||
554 | /* Free DFA state. */ | |
555 | static void | |
556 | state_free (state_t state) | |
557 | { | |
558 | free (state); | |
559 | } | |
560 | ||
561 | /* Make a copy of FROM in TO. */ | |
562 | static void | |
563 | state_copy (state_t to, state_t from) | |
564 | { | |
565 | memcpy (to, from, dfa_state_size); | |
566 | } | |
567 | ||
568 | /* Create a copy of FROM. */ | |
569 | static state_t | |
570 | state_create_copy (state_t from) | |
571 | { | |
572 | state_t to = state_alloc (); | |
573 | ||
574 | state_copy (to, from); | |
575 | return to; | |
576 | } | |
577 | \f | |
578 | ||
579 | /* Functions to work with fences. */ | |
580 | ||
581 | /* Clear the fence. */ | |
582 | static void | |
583 | fence_clear (fence_t f) | |
584 | { | |
585 | state_t s = FENCE_STATE (f); | |
586 | deps_t dc = FENCE_DC (f); | |
587 | void *tc = FENCE_TC (f); | |
588 | ||
589 | ilist_clear (&FENCE_BNDS (f)); | |
590 | ||
591 | gcc_assert ((s != NULL && dc != NULL && tc != NULL) | |
592 | || (s == NULL && dc == NULL && tc == NULL)); | |
593 | ||
dd045aee | 594 | free (s); |
e1ab7874 | 595 | |
596 | if (dc != NULL) | |
597 | delete_deps_context (dc); | |
598 | ||
599 | if (tc != NULL) | |
600 | delete_target_context (tc); | |
f1f41a6c | 601 | vec_free (FENCE_EXECUTING_INSNS (f)); |
e1ab7874 | 602 | free (FENCE_READY_TICKS (f)); |
603 | FENCE_READY_TICKS (f) = NULL; | |
604 | } | |
605 | ||
606 | /* Init a list of fences with successors of OLD_FENCE. */ | |
607 | void | |
608 | init_fences (insn_t old_fence) | |
609 | { | |
610 | insn_t succ; | |
611 | succ_iterator si; | |
612 | bool first = true; | |
613 | int ready_ticks_size = get_max_uid () + 1; | |
48e1416a | 614 | |
615 | FOR_EACH_SUCC_1 (succ, si, old_fence, | |
e1ab7874 | 616 | SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS) |
617 | { | |
48e1416a | 618 | |
e1ab7874 | 619 | if (first) |
620 | first = false; | |
621 | else | |
622 | gcc_assert (flag_sel_sched_pipelining_outer_loops); | |
623 | ||
624 | flist_add (&fences, succ, | |
625 | state_create (), | |
626 | create_deps_context () /* dc */, | |
627 | create_target_context (true) /* tc */, | |
2f3c9801 | 628 | NULL /* last_scheduled_insn */, |
e1ab7874 | 629 | NULL, /* executing_insns */ |
630 | XCNEWVEC (int, ready_ticks_size), /* ready_ticks */ | |
631 | ready_ticks_size, | |
2f3c9801 | 632 | NULL /* sched_next */, |
48e1416a | 633 | 1 /* cycle */, 0 /* cycle_issued_insns */, |
abb9c563 | 634 | issue_rate, /* issue_more */ |
48e1416a | 635 | 1 /* starts_cycle_p */, 0 /* after_stall_p */); |
e1ab7874 | 636 | } |
637 | } | |
638 | ||
639 | /* Merges two fences (filling fields of fence F with resulting values) by | |
640 | following rules: 1) state, target context and last scheduled insn are | |
48e1416a | 641 | propagated from fallthrough edge if it is available; |
e1ab7874 | 642 | 2) deps context and cycle is propagated from more probable edge; |
48e1416a | 643 | 3) all other fields are set to corresponding constant values. |
e1ab7874 | 644 | |
48e1416a | 645 | INSN, STATE, DC, TC, LAST_SCHEDULED_INSN, EXECUTING_INSNS, |
abb9c563 | 646 | READY_TICKS, READY_TICKS_SIZE, SCHED_NEXT, CYCLE, ISSUE_MORE |
647 | and AFTER_STALL_P are the corresponding fields of the second fence. */ | |
e1ab7874 | 648 | static void |
649 | merge_fences (fence_t f, insn_t insn, | |
48e1416a | 650 | state_t state, deps_t dc, void *tc, |
2f3c9801 | 651 | rtx_insn *last_scheduled_insn, |
652 | vec<rtx_insn *, va_gc> *executing_insns, | |
e1ab7874 | 653 | int *ready_ticks, int ready_ticks_size, |
abb9c563 | 654 | rtx sched_next, int cycle, int issue_more, bool after_stall_p) |
e1ab7874 | 655 | { |
656 | insn_t last_scheduled_insn_old = FENCE_LAST_SCHEDULED_INSN (f); | |
657 | ||
658 | gcc_assert (sel_bb_head_p (FENCE_INSN (f)) | |
659 | && !sched_next && !FENCE_SCHED_NEXT (f)); | |
660 | ||
48e1416a | 661 | /* Check if we can decide which path fences came. |
e1ab7874 | 662 | If we can't (or don't want to) - reset all. */ |
663 | if (last_scheduled_insn == NULL | |
664 | || last_scheduled_insn_old == NULL | |
48e1416a | 665 | /* This is a case when INSN is reachable on several paths from |
666 | one insn (this can happen when pipelining of outer loops is on and | |
667 | there are two edges: one going around of inner loop and the other - | |
e1ab7874 | 668 | right through it; in such case just reset everything). */ |
669 | || last_scheduled_insn == last_scheduled_insn_old) | |
670 | { | |
671 | state_reset (FENCE_STATE (f)); | |
672 | state_free (state); | |
48e1416a | 673 | |
e1ab7874 | 674 | reset_deps_context (FENCE_DC (f)); |
675 | delete_deps_context (dc); | |
48e1416a | 676 | |
e1ab7874 | 677 | reset_target_context (FENCE_TC (f), true); |
678 | delete_target_context (tc); | |
679 | ||
680 | if (cycle > FENCE_CYCLE (f)) | |
681 | FENCE_CYCLE (f) = cycle; | |
682 | ||
683 | FENCE_LAST_SCHEDULED_INSN (f) = NULL; | |
abb9c563 | 684 | FENCE_ISSUE_MORE (f) = issue_rate; |
f1f41a6c | 685 | vec_free (executing_insns); |
e1ab7874 | 686 | free (ready_ticks); |
687 | if (FENCE_EXECUTING_INSNS (f)) | |
f1f41a6c | 688 | FENCE_EXECUTING_INSNS (f)->block_remove (0, |
689 | FENCE_EXECUTING_INSNS (f)->length ()); | |
e1ab7874 | 690 | if (FENCE_READY_TICKS (f)) |
691 | memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f)); | |
692 | } | |
693 | else | |
694 | { | |
695 | edge edge_old = NULL, edge_new = NULL; | |
696 | edge candidate; | |
697 | succ_iterator si; | |
698 | insn_t succ; | |
48e1416a | 699 | |
e1ab7874 | 700 | /* Find fallthrough edge. */ |
701 | gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb); | |
7f58c05e | 702 | candidate = find_fallthru_edge_from (BLOCK_FOR_INSN (insn)->prev_bb); |
e1ab7874 | 703 | |
704 | if (!candidate | |
705 | || (candidate->src != BLOCK_FOR_INSN (last_scheduled_insn) | |
706 | && candidate->src != BLOCK_FOR_INSN (last_scheduled_insn_old))) | |
707 | { | |
708 | /* No fallthrough edge leading to basic block of INSN. */ | |
709 | state_reset (FENCE_STATE (f)); | |
710 | state_free (state); | |
48e1416a | 711 | |
e1ab7874 | 712 | reset_target_context (FENCE_TC (f), true); |
713 | delete_target_context (tc); | |
48e1416a | 714 | |
e1ab7874 | 715 | FENCE_LAST_SCHEDULED_INSN (f) = NULL; |
abb9c563 | 716 | FENCE_ISSUE_MORE (f) = issue_rate; |
e1ab7874 | 717 | } |
718 | else | |
719 | if (candidate->src == BLOCK_FOR_INSN (last_scheduled_insn)) | |
720 | { | |
48e1416a | 721 | /* Would be weird if same insn is successor of several fallthrough |
e1ab7874 | 722 | edges. */ |
723 | gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb | |
724 | != BLOCK_FOR_INSN (last_scheduled_insn_old)); | |
725 | ||
726 | state_free (FENCE_STATE (f)); | |
727 | FENCE_STATE (f) = state; | |
728 | ||
729 | delete_target_context (FENCE_TC (f)); | |
730 | FENCE_TC (f) = tc; | |
731 | ||
732 | FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn; | |
abb9c563 | 733 | FENCE_ISSUE_MORE (f) = issue_more; |
e1ab7874 | 734 | } |
735 | else | |
736 | { | |
737 | /* Leave STATE, TC and LAST_SCHEDULED_INSN fields untouched. */ | |
738 | state_free (state); | |
739 | delete_target_context (tc); | |
740 | ||
741 | gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb | |
742 | != BLOCK_FOR_INSN (last_scheduled_insn)); | |
743 | } | |
744 | ||
745 | /* Find edge of first predecessor (last_scheduled_insn_old->insn). */ | |
746 | FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn_old, | |
747 | SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS) | |
748 | { | |
749 | if (succ == insn) | |
750 | { | |
751 | /* No same successor allowed from several edges. */ | |
752 | gcc_assert (!edge_old); | |
753 | edge_old = si.e1; | |
754 | } | |
755 | } | |
756 | /* Find edge of second predecessor (last_scheduled_insn->insn). */ | |
757 | FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn, | |
758 | SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS) | |
759 | { | |
760 | if (succ == insn) | |
761 | { | |
762 | /* No same successor allowed from several edges. */ | |
763 | gcc_assert (!edge_new); | |
764 | edge_new = si.e1; | |
765 | } | |
766 | } | |
767 | ||
768 | /* Check if we can choose most probable predecessor. */ | |
769 | if (edge_old == NULL || edge_new == NULL) | |
770 | { | |
771 | reset_deps_context (FENCE_DC (f)); | |
772 | delete_deps_context (dc); | |
f1f41a6c | 773 | vec_free (executing_insns); |
e1ab7874 | 774 | free (ready_ticks); |
48e1416a | 775 | |
e1ab7874 | 776 | FENCE_CYCLE (f) = MAX (FENCE_CYCLE (f), cycle); |
777 | if (FENCE_EXECUTING_INSNS (f)) | |
f1f41a6c | 778 | FENCE_EXECUTING_INSNS (f)->block_remove (0, |
779 | FENCE_EXECUTING_INSNS (f)->length ()); | |
e1ab7874 | 780 | if (FENCE_READY_TICKS (f)) |
781 | memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f)); | |
782 | } | |
783 | else | |
784 | if (edge_new->probability > edge_old->probability) | |
785 | { | |
786 | delete_deps_context (FENCE_DC (f)); | |
787 | FENCE_DC (f) = dc; | |
f1f41a6c | 788 | vec_free (FENCE_EXECUTING_INSNS (f)); |
e1ab7874 | 789 | FENCE_EXECUTING_INSNS (f) = executing_insns; |
790 | free (FENCE_READY_TICKS (f)); | |
791 | FENCE_READY_TICKS (f) = ready_ticks; | |
792 | FENCE_READY_TICKS_SIZE (f) = ready_ticks_size; | |
793 | FENCE_CYCLE (f) = cycle; | |
794 | } | |
795 | else | |
796 | { | |
797 | /* Leave DC and CYCLE untouched. */ | |
798 | delete_deps_context (dc); | |
f1f41a6c | 799 | vec_free (executing_insns); |
e1ab7874 | 800 | free (ready_ticks); |
801 | } | |
802 | } | |
803 | ||
804 | /* Fill remaining invariant fields. */ | |
805 | if (after_stall_p) | |
806 | FENCE_AFTER_STALL_P (f) = 1; | |
807 | ||
808 | FENCE_ISSUED_INSNS (f) = 0; | |
809 | FENCE_STARTS_CYCLE_P (f) = 1; | |
810 | FENCE_SCHED_NEXT (f) = NULL; | |
811 | } | |
812 | ||
48e1416a | 813 | /* Add a new fence to NEW_FENCES list, initializing it from all |
e1ab7874 | 814 | other parameters. */ |
815 | static void | |
816 | add_to_fences (flist_tail_t new_fences, insn_t insn, | |
2f3c9801 | 817 | state_t state, deps_t dc, void *tc, |
818 | rtx_insn *last_scheduled_insn, | |
819 | vec<rtx_insn *, va_gc> *executing_insns, int *ready_ticks, | |
820 | int ready_ticks_size, rtx_insn *sched_next, int cycle, | |
abb9c563 | 821 | int cycle_issued_insns, int issue_rate, |
822 | bool starts_cycle_p, bool after_stall_p) | |
e1ab7874 | 823 | { |
824 | fence_t f = flist_lookup (FLIST_TAIL_HEAD (new_fences), insn); | |
825 | ||
826 | if (! f) | |
827 | { | |
828 | flist_add (FLIST_TAIL_TAILP (new_fences), insn, state, dc, tc, | |
48e1416a | 829 | last_scheduled_insn, executing_insns, ready_ticks, |
e1ab7874 | 830 | ready_ticks_size, sched_next, cycle, cycle_issued_insns, |
abb9c563 | 831 | issue_rate, starts_cycle_p, after_stall_p); |
e1ab7874 | 832 | |
833 | FLIST_TAIL_TAILP (new_fences) | |
834 | = &FLIST_NEXT (*FLIST_TAIL_TAILP (new_fences)); | |
835 | } | |
836 | else | |
837 | { | |
48e1416a | 838 | merge_fences (f, insn, state, dc, tc, last_scheduled_insn, |
839 | executing_insns, ready_ticks, ready_ticks_size, | |
abb9c563 | 840 | sched_next, cycle, issue_rate, after_stall_p); |
e1ab7874 | 841 | } |
842 | } | |
843 | ||
844 | /* Move the first fence in the OLD_FENCES list to NEW_FENCES. */ | |
845 | void | |
846 | move_fence_to_fences (flist_t old_fences, flist_tail_t new_fences) | |
847 | { | |
848 | fence_t f, old; | |
849 | flist_t *tailp = FLIST_TAIL_TAILP (new_fences); | |
850 | ||
851 | old = FLIST_FENCE (old_fences); | |
48e1416a | 852 | f = flist_lookup (FLIST_TAIL_HEAD (new_fences), |
e1ab7874 | 853 | FENCE_INSN (FLIST_FENCE (old_fences))); |
854 | if (f) | |
855 | { | |
856 | merge_fences (f, old->insn, old->state, old->dc, old->tc, | |
857 | old->last_scheduled_insn, old->executing_insns, | |
858 | old->ready_ticks, old->ready_ticks_size, | |
abb9c563 | 859 | old->sched_next, old->cycle, old->issue_more, |
e1ab7874 | 860 | old->after_stall_p); |
861 | } | |
862 | else | |
863 | { | |
864 | _list_add (tailp); | |
865 | FLIST_TAIL_TAILP (new_fences) = &FLIST_NEXT (*tailp); | |
866 | *FLIST_FENCE (*tailp) = *old; | |
867 | init_fence_for_scheduling (FLIST_FENCE (*tailp)); | |
868 | } | |
869 | FENCE_INSN (old) = NULL; | |
870 | } | |
871 | ||
48e1416a | 872 | /* Add a new fence to NEW_FENCES list and initialize most of its data |
e1ab7874 | 873 | as a clean one. */ |
874 | void | |
875 | add_clean_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence) | |
876 | { | |
877 | int ready_ticks_size = get_max_uid () + 1; | |
48e1416a | 878 | |
e1ab7874 | 879 | add_to_fences (new_fences, |
880 | succ, state_create (), create_deps_context (), | |
881 | create_target_context (true), | |
2f3c9801 | 882 | NULL, NULL, |
e1ab7874 | 883 | XCNEWVEC (int, ready_ticks_size), ready_ticks_size, |
2f3c9801 | 884 | NULL, FENCE_CYCLE (fence) + 1, |
abb9c563 | 885 | 0, issue_rate, 1, FENCE_AFTER_STALL_P (fence)); |
e1ab7874 | 886 | } |
887 | ||
48e1416a | 888 | /* Add a new fence to NEW_FENCES list and initialize all of its data |
e1ab7874 | 889 | from FENCE and SUCC. */ |
890 | void | |
891 | add_dirty_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence) | |
892 | { | |
48e1416a | 893 | int * new_ready_ticks |
e1ab7874 | 894 | = XNEWVEC (int, FENCE_READY_TICKS_SIZE (fence)); |
48e1416a | 895 | |
e1ab7874 | 896 | memcpy (new_ready_ticks, FENCE_READY_TICKS (fence), |
897 | FENCE_READY_TICKS_SIZE (fence) * sizeof (int)); | |
898 | add_to_fences (new_fences, | |
899 | succ, state_create_copy (FENCE_STATE (fence)), | |
900 | create_copy_of_deps_context (FENCE_DC (fence)), | |
901 | create_copy_of_target_context (FENCE_TC (fence)), | |
48e1416a | 902 | FENCE_LAST_SCHEDULED_INSN (fence), |
f1f41a6c | 903 | vec_safe_copy (FENCE_EXECUTING_INSNS (fence)), |
e1ab7874 | 904 | new_ready_ticks, |
905 | FENCE_READY_TICKS_SIZE (fence), | |
906 | FENCE_SCHED_NEXT (fence), | |
907 | FENCE_CYCLE (fence), | |
908 | FENCE_ISSUED_INSNS (fence), | |
abb9c563 | 909 | FENCE_ISSUE_MORE (fence), |
e1ab7874 | 910 | FENCE_STARTS_CYCLE_P (fence), |
911 | FENCE_AFTER_STALL_P (fence)); | |
912 | } | |
913 | \f | |
914 | ||
915 | /* Functions to work with regset and nop pools. */ | |
916 | ||
917 | /* Returns the new regset from pool. It might have some of the bits set | |
918 | from the previous usage. */ | |
919 | regset | |
920 | get_regset_from_pool (void) | |
921 | { | |
922 | regset rs; | |
923 | ||
924 | if (regset_pool.n != 0) | |
925 | rs = regset_pool.v[--regset_pool.n]; | |
926 | else | |
927 | /* We need to create the regset. */ | |
928 | { | |
929 | rs = ALLOC_REG_SET (®_obstack); | |
930 | ||
931 | if (regset_pool.nn == regset_pool.ss) | |
932 | regset_pool.vv = XRESIZEVEC (regset, regset_pool.vv, | |
933 | (regset_pool.ss = 2 * regset_pool.ss + 1)); | |
934 | regset_pool.vv[regset_pool.nn++] = rs; | |
935 | } | |
936 | ||
937 | regset_pool.diff++; | |
938 | ||
939 | return rs; | |
940 | } | |
941 | ||
942 | /* Same as above, but returns the empty regset. */ | |
943 | regset | |
944 | get_clear_regset_from_pool (void) | |
945 | { | |
946 | regset rs = get_regset_from_pool (); | |
947 | ||
948 | CLEAR_REG_SET (rs); | |
949 | return rs; | |
950 | } | |
951 | ||
952 | /* Return regset RS to the pool for future use. */ | |
953 | void | |
954 | return_regset_to_pool (regset rs) | |
955 | { | |
bc9cb5ed | 956 | gcc_assert (rs); |
e1ab7874 | 957 | regset_pool.diff--; |
958 | ||
959 | if (regset_pool.n == regset_pool.s) | |
960 | regset_pool.v = XRESIZEVEC (regset, regset_pool.v, | |
961 | (regset_pool.s = 2 * regset_pool.s + 1)); | |
962 | regset_pool.v[regset_pool.n++] = rs; | |
963 | } | |
964 | ||
dde7ed1e | 965 | #ifdef ENABLE_CHECKING |
e1ab7874 | 966 | /* This is used as a qsort callback for sorting regset pool stacks. |
967 | X and XX are addresses of two regsets. They are never equal. */ | |
968 | static int | |
969 | cmp_v_in_regset_pool (const void *x, const void *xx) | |
970 | { | |
c72f63ac | 971 | uintptr_t r1 = (uintptr_t) *((const regset *) x); |
972 | uintptr_t r2 = (uintptr_t) *((const regset *) xx); | |
973 | if (r1 > r2) | |
974 | return 1; | |
975 | else if (r1 < r2) | |
976 | return -1; | |
977 | gcc_unreachable (); | |
e1ab7874 | 978 | } |
dde7ed1e | 979 | #endif |
e1ab7874 | 980 | |
981 | /* Free the regset pool possibly checking for memory leaks. */ | |
982 | void | |
983 | free_regset_pool (void) | |
984 | { | |
985 | #ifdef ENABLE_CHECKING | |
986 | { | |
987 | regset *v = regset_pool.v; | |
988 | int i = 0; | |
989 | int n = regset_pool.n; | |
48e1416a | 990 | |
e1ab7874 | 991 | regset *vv = regset_pool.vv; |
992 | int ii = 0; | |
993 | int nn = regset_pool.nn; | |
48e1416a | 994 | |
e1ab7874 | 995 | int diff = 0; |
48e1416a | 996 | |
e1ab7874 | 997 | gcc_assert (n <= nn); |
48e1416a | 998 | |
e1ab7874 | 999 | /* Sort both vectors so it will be possible to compare them. */ |
1000 | qsort (v, n, sizeof (*v), cmp_v_in_regset_pool); | |
1001 | qsort (vv, nn, sizeof (*vv), cmp_v_in_regset_pool); | |
48e1416a | 1002 | |
e1ab7874 | 1003 | while (ii < nn) |
1004 | { | |
1005 | if (v[i] == vv[ii]) | |
1006 | i++; | |
1007 | else | |
1008 | /* VV[II] was lost. */ | |
1009 | diff++; | |
48e1416a | 1010 | |
e1ab7874 | 1011 | ii++; |
1012 | } | |
48e1416a | 1013 | |
e1ab7874 | 1014 | gcc_assert (diff == regset_pool.diff); |
1015 | } | |
1016 | #endif | |
48e1416a | 1017 | |
e1ab7874 | 1018 | /* If not true - we have a memory leak. */ |
1019 | gcc_assert (regset_pool.diff == 0); | |
48e1416a | 1020 | |
e1ab7874 | 1021 | while (regset_pool.n) |
1022 | { | |
1023 | --regset_pool.n; | |
1024 | FREE_REG_SET (regset_pool.v[regset_pool.n]); | |
1025 | } | |
1026 | ||
1027 | free (regset_pool.v); | |
1028 | regset_pool.v = NULL; | |
1029 | regset_pool.s = 0; | |
48e1416a | 1030 | |
e1ab7874 | 1031 | free (regset_pool.vv); |
1032 | regset_pool.vv = NULL; | |
1033 | regset_pool.nn = 0; | |
1034 | regset_pool.ss = 0; | |
1035 | ||
1036 | regset_pool.diff = 0; | |
1037 | } | |
1038 | \f | |
1039 | ||
48e1416a | 1040 | /* Functions to work with nop pools. NOP insns are used as temporary |
1041 | placeholders of the insns being scheduled to allow correct update of | |
e1ab7874 | 1042 | the data sets. When update is finished, NOPs are deleted. */ |
1043 | ||
1044 | /* A vinsn that is used to represent a nop. This vinsn is shared among all | |
1045 | nops sel-sched generates. */ | |
1046 | static vinsn_t nop_vinsn = NULL; | |
1047 | ||
1048 | /* Emit a nop before INSN, taking it from pool. */ | |
1049 | insn_t | |
1050 | get_nop_from_pool (insn_t insn) | |
1051 | { | |
2f3c9801 | 1052 | rtx nop_pat; |
e1ab7874 | 1053 | insn_t nop; |
1054 | bool old_p = nop_pool.n != 0; | |
1055 | int flags; | |
1056 | ||
1057 | if (old_p) | |
2f3c9801 | 1058 | nop_pat = nop_pool.v[--nop_pool.n]; |
e1ab7874 | 1059 | else |
2f3c9801 | 1060 | nop_pat = nop_pattern; |
e1ab7874 | 1061 | |
2f3c9801 | 1062 | nop = emit_insn_before (nop_pat, insn); |
e1ab7874 | 1063 | |
1064 | if (old_p) | |
1065 | flags = INSN_INIT_TODO_SSID; | |
1066 | else | |
1067 | flags = INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID; | |
1068 | ||
1069 | set_insn_init (INSN_EXPR (insn), nop_vinsn, INSN_SEQNO (insn)); | |
1070 | sel_init_new_insn (nop, flags); | |
1071 | ||
1072 | return nop; | |
1073 | } | |
1074 | ||
1075 | /* Remove NOP from the instruction stream and return it to the pool. */ | |
1076 | void | |
9845d120 | 1077 | return_nop_to_pool (insn_t nop, bool full_tidying) |
e1ab7874 | 1078 | { |
1079 | gcc_assert (INSN_IN_STREAM_P (nop)); | |
9845d120 | 1080 | sel_remove_insn (nop, false, full_tidying); |
e1ab7874 | 1081 | |
93ff53d3 | 1082 | /* We'll recycle this nop. */ |
dd1286fb | 1083 | nop->set_undeleted (); |
93ff53d3 | 1084 | |
e1ab7874 | 1085 | if (nop_pool.n == nop_pool.s) |
2f3c9801 | 1086 | nop_pool.v = XRESIZEVEC (rtx_insn *, nop_pool.v, |
e1ab7874 | 1087 | (nop_pool.s = 2 * nop_pool.s + 1)); |
1088 | nop_pool.v[nop_pool.n++] = nop; | |
1089 | } | |
1090 | ||
1091 | /* Free the nop pool. */ | |
1092 | void | |
1093 | free_nop_pool (void) | |
1094 | { | |
1095 | nop_pool.n = 0; | |
1096 | nop_pool.s = 0; | |
1097 | free (nop_pool.v); | |
1098 | nop_pool.v = NULL; | |
1099 | } | |
1100 | \f | |
1101 | ||
48e1416a | 1102 | /* Skip unspec to support ia64 speculation. Called from rtx_equal_p_cb. |
e1ab7874 | 1103 | The callback is given two rtxes XX and YY and writes the new rtxes |
1104 | to NX and NY in case some needs to be skipped. */ | |
1105 | static int | |
1106 | skip_unspecs_callback (const_rtx *xx, const_rtx *yy, rtx *nx, rtx* ny) | |
1107 | { | |
1108 | const_rtx x = *xx; | |
1109 | const_rtx y = *yy; | |
48e1416a | 1110 | |
e1ab7874 | 1111 | if (GET_CODE (x) == UNSPEC |
1112 | && (targetm.sched.skip_rtx_p == NULL | |
1113 | || targetm.sched.skip_rtx_p (x))) | |
1114 | { | |
1115 | *nx = XVECEXP (x, 0, 0); | |
1116 | *ny = CONST_CAST_RTX (y); | |
1117 | return 1; | |
1118 | } | |
48e1416a | 1119 | |
e1ab7874 | 1120 | if (GET_CODE (y) == UNSPEC |
1121 | && (targetm.sched.skip_rtx_p == NULL | |
1122 | || targetm.sched.skip_rtx_p (y))) | |
1123 | { | |
1124 | *nx = CONST_CAST_RTX (x); | |
1125 | *ny = XVECEXP (y, 0, 0); | |
1126 | return 1; | |
1127 | } | |
48e1416a | 1128 | |
e1ab7874 | 1129 | return 0; |
1130 | } | |
1131 | ||
48e1416a | 1132 | /* Callback, called from hash_rtx_cb. Helps to hash UNSPEC rtx X in a correct way |
e1ab7874 | 1133 | to support ia64 speculation. When changes are needed, new rtx X and new mode |
1134 | NMODE are written, and the callback returns true. */ | |
1135 | static int | |
3754d046 | 1136 | hash_with_unspec_callback (const_rtx x, machine_mode mode ATTRIBUTE_UNUSED, |
1137 | rtx *nx, machine_mode* nmode) | |
e1ab7874 | 1138 | { |
48e1416a | 1139 | if (GET_CODE (x) == UNSPEC |
e1ab7874 | 1140 | && targetm.sched.skip_rtx_p |
1141 | && targetm.sched.skip_rtx_p (x)) | |
1142 | { | |
1143 | *nx = XVECEXP (x, 0 ,0); | |
8458f4ca | 1144 | *nmode = VOIDmode; |
e1ab7874 | 1145 | return 1; |
1146 | } | |
48e1416a | 1147 | |
e1ab7874 | 1148 | return 0; |
1149 | } | |
1150 | ||
1151 | /* Returns LHS and RHS are ok to be scheduled separately. */ | |
1152 | static bool | |
1153 | lhs_and_rhs_separable_p (rtx lhs, rtx rhs) | |
1154 | { | |
1155 | if (lhs == NULL || rhs == NULL) | |
1156 | return false; | |
1157 | ||
e913b5cd | 1158 | /* Do not schedule constants as rhs: no point to use reg, if const |
1159 | can be used. Moreover, scheduling const as rhs may lead to mode | |
1160 | mismatch cause consts don't have modes but they could be merged | |
1161 | from branches where the same const used in different modes. */ | |
e1ab7874 | 1162 | if (CONSTANT_P (rhs)) |
1163 | return false; | |
1164 | ||
1165 | /* ??? Do not rename predicate registers to avoid ICEs in bundling. */ | |
1166 | if (COMPARISON_P (rhs)) | |
1167 | return false; | |
1168 | ||
1169 | /* Do not allow single REG to be an rhs. */ | |
1170 | if (REG_P (rhs)) | |
1171 | return false; | |
1172 | ||
48e1416a | 1173 | /* See comment at find_used_regs_1 (*1) for explanation of this |
e1ab7874 | 1174 | restriction. */ |
1175 | /* FIXME: remove this later. */ | |
1176 | if (MEM_P (lhs)) | |
1177 | return false; | |
1178 | ||
1179 | /* This will filter all tricky things like ZERO_EXTRACT etc. | |
1180 | For now we don't handle it. */ | |
1181 | if (!REG_P (lhs) && !MEM_P (lhs)) | |
1182 | return false; | |
1183 | ||
1184 | return true; | |
1185 | } | |
1186 | ||
48e1416a | 1187 | /* Initialize vinsn VI for INSN. Only for use from vinsn_create (). When |
1188 | FORCE_UNIQUE_P is true, the resulting vinsn will not be clonable. This is | |
e1ab7874 | 1189 | used e.g. for insns from recovery blocks. */ |
1190 | static void | |
1191 | vinsn_init (vinsn_t vi, insn_t insn, bool force_unique_p) | |
1192 | { | |
1193 | hash_rtx_callback_function hrcf; | |
1194 | int insn_class; | |
1195 | ||
69c5a18c | 1196 | VINSN_INSN_RTX (vi) = insn; |
e1ab7874 | 1197 | VINSN_COUNT (vi) = 0; |
1198 | vi->cost = -1; | |
48e1416a | 1199 | |
bc9cb5ed | 1200 | if (INSN_NOP_P (insn)) |
1201 | return; | |
1202 | ||
e1ab7874 | 1203 | if (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL) |
1204 | init_id_from_df (VINSN_ID (vi), insn, force_unique_p); | |
1205 | else | |
1206 | deps_init_id (VINSN_ID (vi), insn, force_unique_p); | |
48e1416a | 1207 | |
e1ab7874 | 1208 | /* Hash vinsn depending on whether it is separable or not. */ |
1209 | hrcf = targetm.sched.skip_rtx_p ? hash_with_unspec_callback : NULL; | |
1210 | if (VINSN_SEPARABLE_P (vi)) | |
1211 | { | |
1212 | rtx rhs = VINSN_RHS (vi); | |
1213 | ||
1214 | VINSN_HASH (vi) = hash_rtx_cb (rhs, GET_MODE (rhs), | |
1215 | NULL, NULL, false, hrcf); | |
1216 | VINSN_HASH_RTX (vi) = hash_rtx_cb (VINSN_PATTERN (vi), | |
1217 | VOIDmode, NULL, NULL, | |
1218 | false, hrcf); | |
1219 | } | |
1220 | else | |
1221 | { | |
1222 | VINSN_HASH (vi) = hash_rtx_cb (VINSN_PATTERN (vi), VOIDmode, | |
1223 | NULL, NULL, false, hrcf); | |
1224 | VINSN_HASH_RTX (vi) = VINSN_HASH (vi); | |
1225 | } | |
48e1416a | 1226 | |
e1ab7874 | 1227 | insn_class = haifa_classify_insn (insn); |
1228 | if (insn_class >= 2 | |
1229 | && (!targetm.sched.get_insn_spec_ds | |
1230 | || ((targetm.sched.get_insn_spec_ds (insn) & BEGIN_CONTROL) | |
1231 | == 0))) | |
1232 | VINSN_MAY_TRAP_P (vi) = true; | |
1233 | else | |
1234 | VINSN_MAY_TRAP_P (vi) = false; | |
1235 | } | |
1236 | ||
1237 | /* Indicate that VI has become the part of an rtx object. */ | |
1238 | void | |
1239 | vinsn_attach (vinsn_t vi) | |
1240 | { | |
1241 | /* Assert that VI is not pending for deletion. */ | |
1242 | gcc_assert (VINSN_INSN_RTX (vi)); | |
1243 | ||
1244 | VINSN_COUNT (vi)++; | |
1245 | } | |
1246 | ||
48e1416a | 1247 | /* Create and init VI from the INSN. Use UNIQUE_P for determining the correct |
e1ab7874 | 1248 | VINSN_TYPE (VI). */ |
1249 | static vinsn_t | |
1250 | vinsn_create (insn_t insn, bool force_unique_p) | |
1251 | { | |
1252 | vinsn_t vi = XCNEW (struct vinsn_def); | |
1253 | ||
1254 | vinsn_init (vi, insn, force_unique_p); | |
1255 | return vi; | |
1256 | } | |
1257 | ||
1258 | /* Return a copy of VI. When REATTACH_P is true, detach VI and attach | |
1259 | the copy. */ | |
48e1416a | 1260 | vinsn_t |
e1ab7874 | 1261 | vinsn_copy (vinsn_t vi, bool reattach_p) |
1262 | { | |
04d073df | 1263 | rtx_insn *copy; |
e1ab7874 | 1264 | bool unique = VINSN_UNIQUE_P (vi); |
1265 | vinsn_t new_vi; | |
48e1416a | 1266 | |
e1ab7874 | 1267 | copy = create_copy_of_insn_rtx (VINSN_INSN_RTX (vi)); |
1268 | new_vi = create_vinsn_from_insn_rtx (copy, unique); | |
1269 | if (reattach_p) | |
1270 | { | |
1271 | vinsn_detach (vi); | |
1272 | vinsn_attach (new_vi); | |
1273 | } | |
1274 | ||
1275 | return new_vi; | |
1276 | } | |
1277 | ||
1278 | /* Delete the VI vinsn and free its data. */ | |
1279 | static void | |
1280 | vinsn_delete (vinsn_t vi) | |
1281 | { | |
1282 | gcc_assert (VINSN_COUNT (vi) == 0); | |
1283 | ||
bc9cb5ed | 1284 | if (!INSN_NOP_P (VINSN_INSN_RTX (vi))) |
1285 | { | |
1286 | return_regset_to_pool (VINSN_REG_SETS (vi)); | |
1287 | return_regset_to_pool (VINSN_REG_USES (vi)); | |
1288 | return_regset_to_pool (VINSN_REG_CLOBBERS (vi)); | |
1289 | } | |
e1ab7874 | 1290 | |
1291 | free (vi); | |
1292 | } | |
1293 | ||
48e1416a | 1294 | /* Indicate that VI is no longer a part of some rtx object. |
e1ab7874 | 1295 | Remove VI if it is no longer needed. */ |
1296 | void | |
1297 | vinsn_detach (vinsn_t vi) | |
1298 | { | |
1299 | gcc_assert (VINSN_COUNT (vi) > 0); | |
1300 | ||
1301 | if (--VINSN_COUNT (vi) == 0) | |
1302 | vinsn_delete (vi); | |
1303 | } | |
1304 | ||
1305 | /* Returns TRUE if VI is a branch. */ | |
1306 | bool | |
1307 | vinsn_cond_branch_p (vinsn_t vi) | |
1308 | { | |
1309 | insn_t insn; | |
1310 | ||
1311 | if (!VINSN_UNIQUE_P (vi)) | |
1312 | return false; | |
1313 | ||
1314 | insn = VINSN_INSN_RTX (vi); | |
1315 | if (BB_END (BLOCK_FOR_INSN (insn)) != insn) | |
1316 | return false; | |
1317 | ||
1318 | return control_flow_insn_p (insn); | |
1319 | } | |
1320 | ||
1321 | /* Return latency of INSN. */ | |
1322 | static int | |
ed3e6e5d | 1323 | sel_insn_rtx_cost (rtx_insn *insn) |
e1ab7874 | 1324 | { |
1325 | int cost; | |
1326 | ||
1327 | /* A USE insn, or something else we don't need to | |
1328 | understand. We can't pass these directly to | |
1329 | result_ready_cost or insn_default_latency because it will | |
1330 | trigger a fatal error for unrecognizable insns. */ | |
1331 | if (recog_memoized (insn) < 0) | |
1332 | cost = 0; | |
1333 | else | |
1334 | { | |
1335 | cost = insn_default_latency (insn); | |
1336 | ||
1337 | if (cost < 0) | |
1338 | cost = 0; | |
1339 | } | |
1340 | ||
1341 | return cost; | |
1342 | } | |
1343 | ||
1344 | /* Return the cost of the VI. | |
1345 | !!! FIXME: Unify with haifa-sched.c: insn_cost (). */ | |
1346 | int | |
1347 | sel_vinsn_cost (vinsn_t vi) | |
1348 | { | |
1349 | int cost = vi->cost; | |
1350 | ||
1351 | if (cost < 0) | |
1352 | { | |
1353 | cost = sel_insn_rtx_cost (VINSN_INSN_RTX (vi)); | |
1354 | vi->cost = cost; | |
1355 | } | |
1356 | ||
1357 | return cost; | |
1358 | } | |
1359 | \f | |
1360 | ||
1361 | /* Functions for insn emitting. */ | |
1362 | ||
1363 | /* Emit new insn after AFTER based on PATTERN and initialize its data from | |
1364 | EXPR and SEQNO. */ | |
1365 | insn_t | |
1366 | sel_gen_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno, insn_t after) | |
1367 | { | |
1368 | insn_t new_insn; | |
1369 | ||
1370 | gcc_assert (EXPR_TARGET_AVAILABLE (expr) == true); | |
1371 | ||
1372 | new_insn = emit_insn_after (pattern, after); | |
1373 | set_insn_init (expr, NULL, seqno); | |
1374 | sel_init_new_insn (new_insn, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID); | |
1375 | ||
1376 | return new_insn; | |
1377 | } | |
1378 | ||
1379 | /* Force newly generated vinsns to be unique. */ | |
1380 | static bool init_insn_force_unique_p = false; | |
1381 | ||
1382 | /* Emit new speculation recovery insn after AFTER based on PATTERN and | |
1383 | initialize its data from EXPR and SEQNO. */ | |
1384 | insn_t | |
1385 | sel_gen_recovery_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno, | |
1386 | insn_t after) | |
1387 | { | |
1388 | insn_t insn; | |
1389 | ||
1390 | gcc_assert (!init_insn_force_unique_p); | |
1391 | ||
1392 | init_insn_force_unique_p = true; | |
1393 | insn = sel_gen_insn_from_rtx_after (pattern, expr, seqno, after); | |
1394 | CANT_MOVE (insn) = 1; | |
1395 | init_insn_force_unique_p = false; | |
1396 | ||
1397 | return insn; | |
1398 | } | |
1399 | ||
1400 | /* Emit new insn after AFTER based on EXPR and SEQNO. If VINSN is not NULL, | |
48e1416a | 1401 | take it as a new vinsn instead of EXPR's vinsn. |
1402 | We simplify insns later, after scheduling region in | |
e1ab7874 | 1403 | simplify_changed_insns. */ |
1404 | insn_t | |
48e1416a | 1405 | sel_gen_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno, |
e1ab7874 | 1406 | insn_t after) |
1407 | { | |
1408 | expr_t emit_expr; | |
1409 | insn_t insn; | |
1410 | int flags; | |
48e1416a | 1411 | |
1412 | emit_expr = set_insn_init (expr, vinsn ? vinsn : EXPR_VINSN (expr), | |
e1ab7874 | 1413 | seqno); |
1414 | insn = EXPR_INSN_RTX (emit_expr); | |
2b7454f2 | 1415 | |
1416 | /* The insn may come from the transformation cache, which may hold already | |
1417 | deleted insns, so mark it as not deleted. */ | |
dd1286fb | 1418 | insn->set_undeleted (); |
2b7454f2 | 1419 | |
48e1416a | 1420 | add_insn_after (insn, after, BLOCK_FOR_INSN (insn)); |
e1ab7874 | 1421 | |
1422 | flags = INSN_INIT_TODO_SSID; | |
1423 | if (INSN_LUID (insn) == 0) | |
1424 | flags |= INSN_INIT_TODO_LUID; | |
1425 | sel_init_new_insn (insn, flags); | |
1426 | ||
1427 | return insn; | |
1428 | } | |
1429 | ||
1430 | /* Move insn from EXPR after AFTER. */ | |
1431 | insn_t | |
1432 | sel_move_insn (expr_t expr, int seqno, insn_t after) | |
1433 | { | |
1434 | insn_t insn = EXPR_INSN_RTX (expr); | |
1435 | basic_block bb = BLOCK_FOR_INSN (after); | |
1436 | insn_t next = NEXT_INSN (after); | |
1437 | ||
1438 | /* Assert that in move_op we disconnected this insn properly. */ | |
1439 | gcc_assert (EXPR_VINSN (INSN_EXPR (insn)) != NULL); | |
4a57a2e8 | 1440 | SET_PREV_INSN (insn) = after; |
1441 | SET_NEXT_INSN (insn) = next; | |
e1ab7874 | 1442 | |
4a57a2e8 | 1443 | SET_NEXT_INSN (after) = insn; |
1444 | SET_PREV_INSN (next) = insn; | |
e1ab7874 | 1445 | |
1446 | /* Update links from insn to bb and vice versa. */ | |
1447 | df_insn_change_bb (insn, bb); | |
1448 | if (BB_END (bb) == after) | |
26bb3cb2 | 1449 | BB_END (bb) = insn; |
48e1416a | 1450 | |
e1ab7874 | 1451 | prepare_insn_expr (insn, seqno); |
1452 | return insn; | |
1453 | } | |
1454 | ||
1455 | \f | |
1456 | /* Functions to work with right-hand sides. */ | |
1457 | ||
48e1416a | 1458 | /* Search for a hash value determined by UID/NEW_VINSN in a sorted vector |
e1ab7874 | 1459 | VECT and return true when found. Use NEW_VINSN for comparison only when |
48e1416a | 1460 | COMPARE_VINSNS is true. Write to INDP the index on which |
1461 | the search has stopped, such that inserting the new element at INDP will | |
e1ab7874 | 1462 | retain VECT's sort order. */ |
1463 | static bool | |
f1f41a6c | 1464 | find_in_history_vect_1 (vec<expr_history_def> vect, |
48e1416a | 1465 | unsigned uid, vinsn_t new_vinsn, |
e1ab7874 | 1466 | bool compare_vinsns, int *indp) |
1467 | { | |
1468 | expr_history_def *arr; | |
f1f41a6c | 1469 | int i, j, len = vect.length (); |
e1ab7874 | 1470 | |
1471 | if (len == 0) | |
1472 | { | |
1473 | *indp = 0; | |
1474 | return false; | |
1475 | } | |
1476 | ||
f1f41a6c | 1477 | arr = vect.address (); |
e1ab7874 | 1478 | i = 0, j = len - 1; |
1479 | ||
1480 | while (i <= j) | |
1481 | { | |
1482 | unsigned auid = arr[i].uid; | |
48e1416a | 1483 | vinsn_t avinsn = arr[i].new_expr_vinsn; |
e1ab7874 | 1484 | |
1485 | if (auid == uid | |
48e1416a | 1486 | /* When undoing transformation on a bookkeeping copy, the new vinsn |
1487 | may not be exactly equal to the one that is saved in the vector. | |
e1ab7874 | 1488 | This is because the insn whose copy we're checking was possibly |
1489 | substituted itself. */ | |
48e1416a | 1490 | && (! compare_vinsns |
e1ab7874 | 1491 | || vinsn_equal_p (avinsn, new_vinsn))) |
1492 | { | |
1493 | *indp = i; | |
1494 | return true; | |
1495 | } | |
1496 | else if (auid > uid) | |
1497 | break; | |
1498 | i++; | |
1499 | } | |
1500 | ||
1501 | *indp = i; | |
1502 | return false; | |
1503 | } | |
1504 | ||
48e1416a | 1505 | /* Search for a uid of INSN and NEW_VINSN in a sorted vector VECT. Return |
1506 | the position found or -1, if no such value is in vector. | |
e1ab7874 | 1507 | Search also for UIDs of insn's originators, if ORIGINATORS_P is true. */ |
1508 | int | |
f1f41a6c | 1509 | find_in_history_vect (vec<expr_history_def> vect, rtx insn, |
e1ab7874 | 1510 | vinsn_t new_vinsn, bool originators_p) |
1511 | { | |
1512 | int ind; | |
1513 | ||
48e1416a | 1514 | if (find_in_history_vect_1 (vect, INSN_UID (insn), new_vinsn, |
e1ab7874 | 1515 | false, &ind)) |
1516 | return ind; | |
1517 | ||
1518 | if (INSN_ORIGINATORS (insn) && originators_p) | |
1519 | { | |
1520 | unsigned uid; | |
1521 | bitmap_iterator bi; | |
1522 | ||
1523 | EXECUTE_IF_SET_IN_BITMAP (INSN_ORIGINATORS (insn), 0, uid, bi) | |
1524 | if (find_in_history_vect_1 (vect, uid, new_vinsn, false, &ind)) | |
1525 | return ind; | |
1526 | } | |
48e1416a | 1527 | |
e1ab7874 | 1528 | return -1; |
1529 | } | |
1530 | ||
48e1416a | 1531 | /* Insert new element in a sorted history vector pointed to by PVECT, |
1532 | if it is not there already. The element is searched using | |
e1ab7874 | 1533 | UID/NEW_EXPR_VINSN pair. TYPE, OLD_EXPR_VINSN and SPEC_DS save |
1534 | the history of a transformation. */ | |
1535 | void | |
f1f41a6c | 1536 | insert_in_history_vect (vec<expr_history_def> *pvect, |
e1ab7874 | 1537 | unsigned uid, enum local_trans_type type, |
48e1416a | 1538 | vinsn_t old_expr_vinsn, vinsn_t new_expr_vinsn, |
e1ab7874 | 1539 | ds_t spec_ds) |
1540 | { | |
f1f41a6c | 1541 | vec<expr_history_def> vect = *pvect; |
e1ab7874 | 1542 | expr_history_def temp; |
1543 | bool res; | |
1544 | int ind; | |
1545 | ||
1546 | res = find_in_history_vect_1 (vect, uid, new_expr_vinsn, true, &ind); | |
1547 | ||
1548 | if (res) | |
1549 | { | |
f1f41a6c | 1550 | expr_history_def *phist = &vect[ind]; |
e1ab7874 | 1551 | |
48e1416a | 1552 | /* It is possible that speculation types of expressions that were |
e1ab7874 | 1553 | propagated through different paths will be different here. In this |
1554 | case, merge the status to get the correct check later. */ | |
1555 | if (phist->spec_ds != spec_ds) | |
1556 | phist->spec_ds = ds_max_merge (phist->spec_ds, spec_ds); | |
1557 | return; | |
1558 | } | |
48e1416a | 1559 | |
e1ab7874 | 1560 | temp.uid = uid; |
1561 | temp.old_expr_vinsn = old_expr_vinsn; | |
48e1416a | 1562 | temp.new_expr_vinsn = new_expr_vinsn; |
e1ab7874 | 1563 | temp.spec_ds = spec_ds; |
1564 | temp.type = type; | |
1565 | ||
1566 | vinsn_attach (old_expr_vinsn); | |
1567 | vinsn_attach (new_expr_vinsn); | |
f1f41a6c | 1568 | vect.safe_insert (ind, temp); |
e1ab7874 | 1569 | *pvect = vect; |
1570 | } | |
1571 | ||
1572 | /* Free history vector PVECT. */ | |
1573 | static void | |
f1f41a6c | 1574 | free_history_vect (vec<expr_history_def> &pvect) |
e1ab7874 | 1575 | { |
1576 | unsigned i; | |
1577 | expr_history_def *phist; | |
1578 | ||
f1f41a6c | 1579 | if (! pvect.exists ()) |
e1ab7874 | 1580 | return; |
48e1416a | 1581 | |
f1f41a6c | 1582 | for (i = 0; pvect.iterate (i, &phist); i++) |
e1ab7874 | 1583 | { |
1584 | vinsn_detach (phist->old_expr_vinsn); | |
1585 | vinsn_detach (phist->new_expr_vinsn); | |
1586 | } | |
48e1416a | 1587 | |
f1f41a6c | 1588 | pvect.release (); |
e1ab7874 | 1589 | } |
1590 | ||
c53624fb | 1591 | /* Merge vector FROM to PVECT. */ |
1592 | static void | |
f1f41a6c | 1593 | merge_history_vect (vec<expr_history_def> *pvect, |
1594 | vec<expr_history_def> from) | |
c53624fb | 1595 | { |
1596 | expr_history_def *phist; | |
1597 | int i; | |
1598 | ||
1599 | /* We keep this vector sorted. */ | |
f1f41a6c | 1600 | for (i = 0; from.iterate (i, &phist); i++) |
c53624fb | 1601 | insert_in_history_vect (pvect, phist->uid, phist->type, |
1602 | phist->old_expr_vinsn, phist->new_expr_vinsn, | |
1603 | phist->spec_ds); | |
1604 | } | |
e1ab7874 | 1605 | |
1606 | /* Compare two vinsns as rhses if possible and as vinsns otherwise. */ | |
1607 | bool | |
1608 | vinsn_equal_p (vinsn_t x, vinsn_t y) | |
1609 | { | |
1610 | rtx_equal_p_callback_function repcf; | |
1611 | ||
1612 | if (x == y) | |
1613 | return true; | |
1614 | ||
1615 | if (VINSN_TYPE (x) != VINSN_TYPE (y)) | |
1616 | return false; | |
1617 | ||
1618 | if (VINSN_HASH (x) != VINSN_HASH (y)) | |
1619 | return false; | |
1620 | ||
1621 | repcf = targetm.sched.skip_rtx_p ? skip_unspecs_callback : NULL; | |
48e1416a | 1622 | if (VINSN_SEPARABLE_P (x)) |
e1ab7874 | 1623 | { |
1624 | /* Compare RHSes of VINSNs. */ | |
1625 | gcc_assert (VINSN_RHS (x)); | |
1626 | gcc_assert (VINSN_RHS (y)); | |
1627 | ||
1628 | return rtx_equal_p_cb (VINSN_RHS (x), VINSN_RHS (y), repcf); | |
1629 | } | |
1630 | ||
1631 | return rtx_equal_p_cb (VINSN_PATTERN (x), VINSN_PATTERN (y), repcf); | |
1632 | } | |
1633 | \f | |
1634 | ||
1635 | /* Functions for working with expressions. */ | |
1636 | ||
1637 | /* Initialize EXPR. */ | |
1638 | static void | |
1639 | init_expr (expr_t expr, vinsn_t vi, int spec, int use, int priority, | |
1640 | int sched_times, int orig_bb_index, ds_t spec_done_ds, | |
1641 | ds_t spec_to_check_ds, int orig_sched_cycle, | |
f1f41a6c | 1642 | vec<expr_history_def> history, |
1643 | signed char target_available, | |
e1ab7874 | 1644 | bool was_substituted, bool was_renamed, bool needs_spec_check_p, |
1645 | bool cant_move) | |
1646 | { | |
1647 | vinsn_attach (vi); | |
1648 | ||
1649 | EXPR_VINSN (expr) = vi; | |
1650 | EXPR_SPEC (expr) = spec; | |
1651 | EXPR_USEFULNESS (expr) = use; | |
1652 | EXPR_PRIORITY (expr) = priority; | |
1653 | EXPR_PRIORITY_ADJ (expr) = 0; | |
1654 | EXPR_SCHED_TIMES (expr) = sched_times; | |
1655 | EXPR_ORIG_BB_INDEX (expr) = orig_bb_index; | |
1656 | EXPR_ORIG_SCHED_CYCLE (expr) = orig_sched_cycle; | |
1657 | EXPR_SPEC_DONE_DS (expr) = spec_done_ds; | |
1658 | EXPR_SPEC_TO_CHECK_DS (expr) = spec_to_check_ds; | |
1659 | ||
f1f41a6c | 1660 | if (history.exists ()) |
e1ab7874 | 1661 | EXPR_HISTORY_OF_CHANGES (expr) = history; |
1662 | else | |
f1f41a6c | 1663 | EXPR_HISTORY_OF_CHANGES (expr).create (0); |
e1ab7874 | 1664 | |
1665 | EXPR_TARGET_AVAILABLE (expr) = target_available; | |
1666 | EXPR_WAS_SUBSTITUTED (expr) = was_substituted; | |
1667 | EXPR_WAS_RENAMED (expr) = was_renamed; | |
1668 | EXPR_NEEDS_SPEC_CHECK_P (expr) = needs_spec_check_p; | |
1669 | EXPR_CANT_MOVE (expr) = cant_move; | |
1670 | } | |
1671 | ||
1672 | /* Make a copy of the expr FROM into the expr TO. */ | |
1673 | void | |
1674 | copy_expr (expr_t to, expr_t from) | |
1675 | { | |
1e094109 | 1676 | vec<expr_history_def> temp = vNULL; |
e1ab7874 | 1677 | |
f1f41a6c | 1678 | if (EXPR_HISTORY_OF_CHANGES (from).exists ()) |
e1ab7874 | 1679 | { |
1680 | unsigned i; | |
1681 | expr_history_def *phist; | |
1682 | ||
f1f41a6c | 1683 | temp = EXPR_HISTORY_OF_CHANGES (from).copy (); |
48e1416a | 1684 | for (i = 0; |
f1f41a6c | 1685 | temp.iterate (i, &phist); |
e1ab7874 | 1686 | i++) |
1687 | { | |
1688 | vinsn_attach (phist->old_expr_vinsn); | |
1689 | vinsn_attach (phist->new_expr_vinsn); | |
1690 | } | |
1691 | } | |
1692 | ||
48e1416a | 1693 | init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from), |
e1ab7874 | 1694 | EXPR_USEFULNESS (from), EXPR_PRIORITY (from), |
1695 | EXPR_SCHED_TIMES (from), EXPR_ORIG_BB_INDEX (from), | |
48e1416a | 1696 | EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from), |
e1ab7874 | 1697 | EXPR_ORIG_SCHED_CYCLE (from), temp, |
48e1416a | 1698 | EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from), |
e1ab7874 | 1699 | EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from), |
1700 | EXPR_CANT_MOVE (from)); | |
1701 | } | |
1702 | ||
48e1416a | 1703 | /* Same, but the final expr will not ever be in av sets, so don't copy |
e1ab7874 | 1704 | "uninteresting" data such as bitmap cache. */ |
1705 | void | |
1706 | copy_expr_onside (expr_t to, expr_t from) | |
1707 | { | |
1708 | init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from), EXPR_USEFULNESS (from), | |
1709 | EXPR_PRIORITY (from), EXPR_SCHED_TIMES (from), 0, | |
f1f41a6c | 1710 | EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from), 0, |
1e094109 | 1711 | vNULL, |
e1ab7874 | 1712 | EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from), |
1713 | EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from), | |
1714 | EXPR_CANT_MOVE (from)); | |
1715 | } | |
1716 | ||
1717 | /* Prepare the expr of INSN for scheduling. Used when moving insn and when | |
1718 | initializing new insns. */ | |
1719 | static void | |
1720 | prepare_insn_expr (insn_t insn, int seqno) | |
1721 | { | |
1722 | expr_t expr = INSN_EXPR (insn); | |
1723 | ds_t ds; | |
48e1416a | 1724 | |
e1ab7874 | 1725 | INSN_SEQNO (insn) = seqno; |
1726 | EXPR_ORIG_BB_INDEX (expr) = BLOCK_NUM (insn); | |
1727 | EXPR_SPEC (expr) = 0; | |
1728 | EXPR_ORIG_SCHED_CYCLE (expr) = 0; | |
1729 | EXPR_WAS_SUBSTITUTED (expr) = 0; | |
1730 | EXPR_WAS_RENAMED (expr) = 0; | |
1731 | EXPR_TARGET_AVAILABLE (expr) = 1; | |
1732 | INSN_LIVE_VALID_P (insn) = false; | |
1733 | ||
1734 | /* ??? If this expression is speculative, make its dependence | |
1735 | as weak as possible. We can filter this expression later | |
1736 | in process_spec_exprs, because we do not distinguish | |
1737 | between the status we got during compute_av_set and the | |
1738 | existing status. To be fixed. */ | |
1739 | ds = EXPR_SPEC_DONE_DS (expr); | |
1740 | if (ds) | |
1741 | EXPR_SPEC_DONE_DS (expr) = ds_get_max_dep_weak (ds); | |
1742 | ||
f1f41a6c | 1743 | free_history_vect (EXPR_HISTORY_OF_CHANGES (expr)); |
e1ab7874 | 1744 | } |
1745 | ||
1746 | /* Update target_available bits when merging exprs TO and FROM. SPLIT_POINT | |
48e1416a | 1747 | is non-null when expressions are merged from different successors at |
e1ab7874 | 1748 | a split point. */ |
1749 | static void | |
1750 | update_target_availability (expr_t to, expr_t from, insn_t split_point) | |
1751 | { | |
48e1416a | 1752 | if (EXPR_TARGET_AVAILABLE (to) < 0 |
e1ab7874 | 1753 | || EXPR_TARGET_AVAILABLE (from) < 0) |
1754 | EXPR_TARGET_AVAILABLE (to) = -1; | |
1755 | else | |
1756 | { | |
1757 | /* We try to detect the case when one of the expressions | |
1758 | can only be reached through another one. In this case, | |
1759 | we can do better. */ | |
1760 | if (split_point == NULL) | |
1761 | { | |
1762 | int toind, fromind; | |
1763 | ||
1764 | toind = EXPR_ORIG_BB_INDEX (to); | |
1765 | fromind = EXPR_ORIG_BB_INDEX (from); | |
48e1416a | 1766 | |
e1ab7874 | 1767 | if (toind && toind == fromind) |
48e1416a | 1768 | /* Do nothing -- everything is done in |
e1ab7874 | 1769 | merge_with_other_exprs. */ |
1770 | ; | |
1771 | else | |
1772 | EXPR_TARGET_AVAILABLE (to) = -1; | |
1773 | } | |
d6726470 | 1774 | else if (EXPR_TARGET_AVAILABLE (from) == 0 |
1775 | && EXPR_LHS (from) | |
1776 | && REG_P (EXPR_LHS (from)) | |
1777 | && REGNO (EXPR_LHS (to)) != REGNO (EXPR_LHS (from))) | |
1778 | EXPR_TARGET_AVAILABLE (to) = -1; | |
e1ab7874 | 1779 | else |
1780 | EXPR_TARGET_AVAILABLE (to) &= EXPR_TARGET_AVAILABLE (from); | |
1781 | } | |
1782 | } | |
1783 | ||
1784 | /* Update speculation bits when merging exprs TO and FROM. SPLIT_POINT | |
48e1416a | 1785 | is non-null when expressions are merged from different successors at |
e1ab7874 | 1786 | a split point. */ |
1787 | static void | |
1788 | update_speculative_bits (expr_t to, expr_t from, insn_t split_point) | |
1789 | { | |
1790 | ds_t old_to_ds, old_from_ds; | |
1791 | ||
1792 | old_to_ds = EXPR_SPEC_DONE_DS (to); | |
1793 | old_from_ds = EXPR_SPEC_DONE_DS (from); | |
48e1416a | 1794 | |
e1ab7874 | 1795 | EXPR_SPEC_DONE_DS (to) = ds_max_merge (old_to_ds, old_from_ds); |
1796 | EXPR_SPEC_TO_CHECK_DS (to) |= EXPR_SPEC_TO_CHECK_DS (from); | |
1797 | EXPR_NEEDS_SPEC_CHECK_P (to) |= EXPR_NEEDS_SPEC_CHECK_P (from); | |
1798 | ||
1799 | /* When merging e.g. control & data speculative exprs, or a control | |
48e1416a | 1800 | speculative with a control&data speculative one, we really have |
e1ab7874 | 1801 | to change vinsn too. Also, when speculative status is changed, |
1802 | we also need to record this as a transformation in expr's history. */ | |
1803 | if ((old_to_ds & SPECULATIVE) || (old_from_ds & SPECULATIVE)) | |
1804 | { | |
1805 | old_to_ds = ds_get_speculation_types (old_to_ds); | |
1806 | old_from_ds = ds_get_speculation_types (old_from_ds); | |
48e1416a | 1807 | |
e1ab7874 | 1808 | if (old_to_ds != old_from_ds) |
1809 | { | |
1810 | ds_t record_ds; | |
48e1416a | 1811 | |
1812 | /* When both expressions are speculative, we need to change | |
e1ab7874 | 1813 | the vinsn first. */ |
1814 | if ((old_to_ds & SPECULATIVE) && (old_from_ds & SPECULATIVE)) | |
1815 | { | |
1816 | int res; | |
48e1416a | 1817 | |
e1ab7874 | 1818 | res = speculate_expr (to, EXPR_SPEC_DONE_DS (to)); |
1819 | gcc_assert (res >= 0); | |
1820 | } | |
1821 | ||
1822 | if (split_point != NULL) | |
1823 | { | |
1824 | /* Record the change with proper status. */ | |
1825 | record_ds = EXPR_SPEC_DONE_DS (to) & SPECULATIVE; | |
1826 | record_ds &= ~(old_to_ds & SPECULATIVE); | |
1827 | record_ds &= ~(old_from_ds & SPECULATIVE); | |
48e1416a | 1828 | |
1829 | insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to), | |
1830 | INSN_UID (split_point), TRANS_SPECULATION, | |
e1ab7874 | 1831 | EXPR_VINSN (from), EXPR_VINSN (to), |
1832 | record_ds); | |
1833 | } | |
1834 | } | |
1835 | } | |
1836 | } | |
1837 | ||
1838 | ||
1839 | /* Merge bits of FROM expr to TO expr. When SPLIT_POINT is not NULL, | |
1840 | this is done along different paths. */ | |
1841 | void | |
1842 | merge_expr_data (expr_t to, expr_t from, insn_t split_point) | |
1843 | { | |
32bbc704 | 1844 | /* Choose the maximum of the specs of merged exprs. This is required |
1845 | for correctness of bookkeeping. */ | |
1846 | if (EXPR_SPEC (to) < EXPR_SPEC (from)) | |
e1ab7874 | 1847 | EXPR_SPEC (to) = EXPR_SPEC (from); |
1848 | ||
1849 | if (split_point) | |
1850 | EXPR_USEFULNESS (to) += EXPR_USEFULNESS (from); | |
1851 | else | |
48e1416a | 1852 | EXPR_USEFULNESS (to) = MAX (EXPR_USEFULNESS (to), |
e1ab7874 | 1853 | EXPR_USEFULNESS (from)); |
1854 | ||
1855 | if (EXPR_PRIORITY (to) < EXPR_PRIORITY (from)) | |
1856 | EXPR_PRIORITY (to) = EXPR_PRIORITY (from); | |
1857 | ||
1858 | if (EXPR_SCHED_TIMES (to) > EXPR_SCHED_TIMES (from)) | |
1859 | EXPR_SCHED_TIMES (to) = EXPR_SCHED_TIMES (from); | |
1860 | ||
1861 | if (EXPR_ORIG_BB_INDEX (to) != EXPR_ORIG_BB_INDEX (from)) | |
1862 | EXPR_ORIG_BB_INDEX (to) = 0; | |
1863 | ||
48e1416a | 1864 | EXPR_ORIG_SCHED_CYCLE (to) = MIN (EXPR_ORIG_SCHED_CYCLE (to), |
e1ab7874 | 1865 | EXPR_ORIG_SCHED_CYCLE (from)); |
1866 | ||
e1ab7874 | 1867 | EXPR_WAS_SUBSTITUTED (to) |= EXPR_WAS_SUBSTITUTED (from); |
1868 | EXPR_WAS_RENAMED (to) |= EXPR_WAS_RENAMED (from); | |
1869 | EXPR_CANT_MOVE (to) |= EXPR_CANT_MOVE (from); | |
1870 | ||
c53624fb | 1871 | merge_history_vect (&EXPR_HISTORY_OF_CHANGES (to), |
1872 | EXPR_HISTORY_OF_CHANGES (from)); | |
e1ab7874 | 1873 | update_target_availability (to, from, split_point); |
1874 | update_speculative_bits (to, from, split_point); | |
1875 | } | |
1876 | ||
1877 | /* Merge bits of FROM expr to TO expr. Vinsns in the exprs should be equal | |
48e1416a | 1878 | in terms of vinsn_equal_p. SPLIT_POINT is non-null when expressions |
e1ab7874 | 1879 | are merged from different successors at a split point. */ |
1880 | void | |
1881 | merge_expr (expr_t to, expr_t from, insn_t split_point) | |
1882 | { | |
1883 | vinsn_t to_vi = EXPR_VINSN (to); | |
1884 | vinsn_t from_vi = EXPR_VINSN (from); | |
1885 | ||
1886 | gcc_assert (vinsn_equal_p (to_vi, from_vi)); | |
1887 | ||
1888 | /* Make sure that speculative pattern is propagated into exprs that | |
1889 | have non-speculative one. This will provide us with consistent | |
1890 | speculative bits and speculative patterns inside expr. */ | |
936ab1d9 | 1891 | if ((EXPR_SPEC_DONE_DS (from) != 0 |
1892 | && EXPR_SPEC_DONE_DS (to) == 0) | |
1893 | /* Do likewise for volatile insns, so that we always retain | |
1894 | the may_trap_p bit on the resulting expression. */ | |
1895 | || (VINSN_MAY_TRAP_P (EXPR_VINSN (from)) | |
1896 | && !VINSN_MAY_TRAP_P (EXPR_VINSN (to)))) | |
e1ab7874 | 1897 | change_vinsn_in_expr (to, EXPR_VINSN (from)); |
1898 | ||
1899 | merge_expr_data (to, from, split_point); | |
1900 | gcc_assert (EXPR_USEFULNESS (to) <= REG_BR_PROB_BASE); | |
1901 | } | |
1902 | ||
1903 | /* Clear the information of this EXPR. */ | |
1904 | void | |
1905 | clear_expr (expr_t expr) | |
1906 | { | |
48e1416a | 1907 | |
e1ab7874 | 1908 | vinsn_detach (EXPR_VINSN (expr)); |
1909 | EXPR_VINSN (expr) = NULL; | |
1910 | ||
f1f41a6c | 1911 | free_history_vect (EXPR_HISTORY_OF_CHANGES (expr)); |
e1ab7874 | 1912 | } |
1913 | ||
1914 | /* For a given LV_SET, mark EXPR having unavailable target register. */ | |
1915 | static void | |
1916 | set_unavailable_target_for_expr (expr_t expr, regset lv_set) | |
1917 | { | |
1918 | if (EXPR_SEPARABLE_P (expr)) | |
1919 | { | |
1920 | if (REG_P (EXPR_LHS (expr)) | |
1f53e226 | 1921 | && register_unavailable_p (lv_set, EXPR_LHS (expr))) |
e1ab7874 | 1922 | { |
48e1416a | 1923 | /* If it's an insn like r1 = use (r1, ...), and it exists in |
1924 | different forms in each of the av_sets being merged, we can't say | |
1925 | whether original destination register is available or not. | |
1926 | However, this still works if destination register is not used | |
e1ab7874 | 1927 | in the original expression: if the branch at which LV_SET we're |
1928 | looking here is not actually 'other branch' in sense that same | |
48e1416a | 1929 | expression is available through it (but it can't be determined |
e1ab7874 | 1930 | at computation stage because of transformations on one of the |
48e1416a | 1931 | branches), it still won't affect the availability. |
1932 | Liveness of a register somewhere on a code motion path means | |
1933 | it's either read somewhere on a codemotion path, live on | |
e1ab7874 | 1934 | 'other' branch, live at the point immediately following |
1935 | the original operation, or is read by the original operation. | |
1936 | The latter case is filtered out in the condition below. | |
1937 | It still doesn't cover the case when register is defined and used | |
1938 | somewhere within the code motion path, and in this case we could | |
1939 | miss a unifying code motion along both branches using a renamed | |
1940 | register, but it won't affect a code correctness since upon | |
1941 | an actual code motion a bookkeeping code would be generated. */ | |
1f53e226 | 1942 | if (register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)), |
1943 | EXPR_LHS (expr))) | |
e1ab7874 | 1944 | EXPR_TARGET_AVAILABLE (expr) = -1; |
1945 | else | |
1946 | EXPR_TARGET_AVAILABLE (expr) = false; | |
1947 | } | |
1948 | } | |
1949 | else | |
1950 | { | |
1951 | unsigned regno; | |
1952 | reg_set_iterator rsi; | |
48e1416a | 1953 | |
1954 | EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_SETS (EXPR_VINSN (expr)), | |
e1ab7874 | 1955 | 0, regno, rsi) |
1956 | if (bitmap_bit_p (lv_set, regno)) | |
1957 | { | |
1958 | EXPR_TARGET_AVAILABLE (expr) = false; | |
1959 | break; | |
1960 | } | |
1961 | ||
1962 | EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_CLOBBERS (EXPR_VINSN (expr)), | |
1963 | 0, regno, rsi) | |
1964 | if (bitmap_bit_p (lv_set, regno)) | |
1965 | { | |
1966 | EXPR_TARGET_AVAILABLE (expr) = false; | |
1967 | break; | |
1968 | } | |
1969 | } | |
1970 | } | |
1971 | ||
48e1416a | 1972 | /* Try to make EXPR speculative. Return 1 when EXPR's pattern |
e1ab7874 | 1973 | or dependence status have changed, 2 when also the target register |
1974 | became unavailable, 0 if nothing had to be changed. */ | |
1975 | int | |
1976 | speculate_expr (expr_t expr, ds_t ds) | |
1977 | { | |
1978 | int res; | |
04d073df | 1979 | rtx_insn *orig_insn_rtx; |
e1ab7874 | 1980 | rtx spec_pat; |
1981 | ds_t target_ds, current_ds; | |
1982 | ||
1983 | /* Obtain the status we need to put on EXPR. */ | |
1984 | target_ds = (ds & SPECULATIVE); | |
1985 | current_ds = EXPR_SPEC_DONE_DS (expr); | |
1986 | ds = ds_full_merge (current_ds, target_ds, NULL_RTX, NULL_RTX); | |
1987 | ||
1988 | orig_insn_rtx = EXPR_INSN_RTX (expr); | |
1989 | ||
1990 | res = sched_speculate_insn (orig_insn_rtx, ds, &spec_pat); | |
1991 | ||
1992 | switch (res) | |
1993 | { | |
1994 | case 0: | |
1995 | EXPR_SPEC_DONE_DS (expr) = ds; | |
1996 | return current_ds != ds ? 1 : 0; | |
48e1416a | 1997 | |
e1ab7874 | 1998 | case 1: |
1999 | { | |
04d073df | 2000 | rtx_insn *spec_insn_rtx = |
2001 | create_insn_rtx_from_pattern (spec_pat, NULL_RTX); | |
e1ab7874 | 2002 | vinsn_t spec_vinsn = create_vinsn_from_insn_rtx (spec_insn_rtx, false); |
2003 | ||
2004 | change_vinsn_in_expr (expr, spec_vinsn); | |
2005 | EXPR_SPEC_DONE_DS (expr) = ds; | |
2006 | EXPR_NEEDS_SPEC_CHECK_P (expr) = true; | |
2007 | ||
48e1416a | 2008 | /* Do not allow clobbering the address register of speculative |
e1ab7874 | 2009 | insns. */ |
1f53e226 | 2010 | if (register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)), |
2011 | expr_dest_reg (expr))) | |
e1ab7874 | 2012 | { |
2013 | EXPR_TARGET_AVAILABLE (expr) = false; | |
2014 | return 2; | |
2015 | } | |
2016 | ||
2017 | return 1; | |
2018 | } | |
2019 | ||
2020 | case -1: | |
2021 | return -1; | |
2022 | ||
2023 | default: | |
2024 | gcc_unreachable (); | |
2025 | return -1; | |
2026 | } | |
2027 | } | |
2028 | ||
2029 | /* Return a destination register, if any, of EXPR. */ | |
2030 | rtx | |
2031 | expr_dest_reg (expr_t expr) | |
2032 | { | |
2033 | rtx dest = VINSN_LHS (EXPR_VINSN (expr)); | |
2034 | ||
2035 | if (dest != NULL_RTX && REG_P (dest)) | |
2036 | return dest; | |
2037 | ||
2038 | return NULL_RTX; | |
2039 | } | |
2040 | ||
2041 | /* Returns the REGNO of the R's destination. */ | |
2042 | unsigned | |
2043 | expr_dest_regno (expr_t expr) | |
2044 | { | |
2045 | rtx dest = expr_dest_reg (expr); | |
2046 | ||
2047 | gcc_assert (dest != NULL_RTX); | |
2048 | return REGNO (dest); | |
2049 | } | |
2050 | ||
48e1416a | 2051 | /* For a given LV_SET, mark all expressions in JOIN_SET, but not present in |
e1ab7874 | 2052 | AV_SET having unavailable target register. */ |
2053 | void | |
2054 | mark_unavailable_targets (av_set_t join_set, av_set_t av_set, regset lv_set) | |
2055 | { | |
2056 | expr_t expr; | |
2057 | av_set_iterator avi; | |
2058 | ||
2059 | FOR_EACH_EXPR (expr, avi, join_set) | |
2060 | if (av_set_lookup (av_set, EXPR_VINSN (expr)) == NULL) | |
2061 | set_unavailable_target_for_expr (expr, lv_set); | |
2062 | } | |
2063 | \f | |
2064 | ||
1f53e226 | 2065 | /* Returns true if REG (at least partially) is present in REGS. */ |
2066 | bool | |
2067 | register_unavailable_p (regset regs, rtx reg) | |
2068 | { | |
2069 | unsigned regno, end_regno; | |
2070 | ||
2071 | regno = REGNO (reg); | |
2072 | if (bitmap_bit_p (regs, regno)) | |
2073 | return true; | |
2074 | ||
2075 | end_regno = END_REGNO (reg); | |
2076 | ||
2077 | while (++regno < end_regno) | |
2078 | if (bitmap_bit_p (regs, regno)) | |
2079 | return true; | |
2080 | ||
2081 | return false; | |
2082 | } | |
2083 | ||
e1ab7874 | 2084 | /* Av set functions. */ |
2085 | ||
2086 | /* Add a new element to av set SETP. | |
2087 | Return the element added. */ | |
2088 | static av_set_t | |
2089 | av_set_add_element (av_set_t *setp) | |
2090 | { | |
2091 | /* Insert at the beginning of the list. */ | |
2092 | _list_add (setp); | |
2093 | return *setp; | |
2094 | } | |
2095 | ||
2096 | /* Add EXPR to SETP. */ | |
2097 | void | |
2098 | av_set_add (av_set_t *setp, expr_t expr) | |
2099 | { | |
2100 | av_set_t elem; | |
48e1416a | 2101 | |
e1ab7874 | 2102 | gcc_assert (!INSN_NOP_P (EXPR_INSN_RTX (expr))); |
2103 | elem = av_set_add_element (setp); | |
2104 | copy_expr (_AV_SET_EXPR (elem), expr); | |
2105 | } | |
2106 | ||
2107 | /* Same, but do not copy EXPR. */ | |
2108 | static void | |
2109 | av_set_add_nocopy (av_set_t *setp, expr_t expr) | |
2110 | { | |
2111 | av_set_t elem; | |
2112 | ||
2113 | elem = av_set_add_element (setp); | |
2114 | *_AV_SET_EXPR (elem) = *expr; | |
2115 | } | |
2116 | ||
2117 | /* Remove expr pointed to by IP from the av_set. */ | |
2118 | void | |
2119 | av_set_iter_remove (av_set_iterator *ip) | |
2120 | { | |
2121 | clear_expr (_AV_SET_EXPR (*ip->lp)); | |
2122 | _list_iter_remove (ip); | |
2123 | } | |
2124 | ||
2125 | /* Search for an expr in SET, such that it's equivalent to SOUGHT_VINSN in the | |
2126 | sense of vinsn_equal_p function. Return NULL if no such expr is | |
2127 | in SET was found. */ | |
2128 | expr_t | |
2129 | av_set_lookup (av_set_t set, vinsn_t sought_vinsn) | |
2130 | { | |
2131 | expr_t expr; | |
2132 | av_set_iterator i; | |
2133 | ||
2134 | FOR_EACH_EXPR (expr, i, set) | |
2135 | if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn)) | |
2136 | return expr; | |
2137 | return NULL; | |
2138 | } | |
2139 | ||
2140 | /* Same, but also remove the EXPR found. */ | |
2141 | static expr_t | |
2142 | av_set_lookup_and_remove (av_set_t *setp, vinsn_t sought_vinsn) | |
2143 | { | |
2144 | expr_t expr; | |
2145 | av_set_iterator i; | |
2146 | ||
2147 | FOR_EACH_EXPR_1 (expr, i, setp) | |
2148 | if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn)) | |
2149 | { | |
2150 | _list_iter_remove_nofree (&i); | |
2151 | return expr; | |
2152 | } | |
2153 | return NULL; | |
2154 | } | |
2155 | ||
2156 | /* Search for an expr in SET, such that it's equivalent to EXPR in the | |
2157 | sense of vinsn_equal_p function of their vinsns, but not EXPR itself. | |
2158 | Returns NULL if no such expr is in SET was found. */ | |
2159 | static expr_t | |
2160 | av_set_lookup_other_equiv_expr (av_set_t set, expr_t expr) | |
2161 | { | |
2162 | expr_t cur_expr; | |
2163 | av_set_iterator i; | |
2164 | ||
2165 | FOR_EACH_EXPR (cur_expr, i, set) | |
2166 | { | |
2167 | if (cur_expr == expr) | |
2168 | continue; | |
2169 | if (vinsn_equal_p (EXPR_VINSN (cur_expr), EXPR_VINSN (expr))) | |
2170 | return cur_expr; | |
2171 | } | |
2172 | ||
2173 | return NULL; | |
2174 | } | |
2175 | ||
2176 | /* If other expression is already in AVP, remove one of them. */ | |
2177 | expr_t | |
2178 | merge_with_other_exprs (av_set_t *avp, av_set_iterator *ip, expr_t expr) | |
2179 | { | |
2180 | expr_t expr2; | |
2181 | ||
2182 | expr2 = av_set_lookup_other_equiv_expr (*avp, expr); | |
2183 | if (expr2 != NULL) | |
2184 | { | |
2185 | /* Reset target availability on merge, since taking it only from one | |
2186 | of the exprs would be controversial for different code. */ | |
2187 | EXPR_TARGET_AVAILABLE (expr2) = -1; | |
2188 | EXPR_USEFULNESS (expr2) = 0; | |
2189 | ||
2190 | merge_expr (expr2, expr, NULL); | |
48e1416a | 2191 | |
e1ab7874 | 2192 | /* Fix usefulness as it should be now REG_BR_PROB_BASE. */ |
2193 | EXPR_USEFULNESS (expr2) = REG_BR_PROB_BASE; | |
48e1416a | 2194 | |
e1ab7874 | 2195 | av_set_iter_remove (ip); |
2196 | return expr2; | |
2197 | } | |
2198 | ||
2199 | return expr; | |
2200 | } | |
2201 | ||
2202 | /* Return true if there is an expr that correlates to VI in SET. */ | |
2203 | bool | |
2204 | av_set_is_in_p (av_set_t set, vinsn_t vi) | |
2205 | { | |
2206 | return av_set_lookup (set, vi) != NULL; | |
2207 | } | |
2208 | ||
2209 | /* Return a copy of SET. */ | |
2210 | av_set_t | |
2211 | av_set_copy (av_set_t set) | |
2212 | { | |
2213 | expr_t expr; | |
2214 | av_set_iterator i; | |
2215 | av_set_t res = NULL; | |
2216 | ||
2217 | FOR_EACH_EXPR (expr, i, set) | |
2218 | av_set_add (&res, expr); | |
2219 | ||
2220 | return res; | |
2221 | } | |
2222 | ||
2223 | /* Join two av sets that do not have common elements by attaching second set | |
2224 | (pointed to by FROMP) to the end of first set (TO_TAILP must point to | |
2225 | _AV_SET_NEXT of first set's last element). */ | |
2226 | static void | |
2227 | join_distinct_sets (av_set_t *to_tailp, av_set_t *fromp) | |
2228 | { | |
2229 | gcc_assert (*to_tailp == NULL); | |
2230 | *to_tailp = *fromp; | |
2231 | *fromp = NULL; | |
2232 | } | |
2233 | ||
2234 | /* Makes set pointed to by TO to be the union of TO and FROM. Clear av_set | |
2235 | pointed to by FROMP afterwards. */ | |
2236 | void | |
2237 | av_set_union_and_clear (av_set_t *top, av_set_t *fromp, insn_t insn) | |
2238 | { | |
2239 | expr_t expr1; | |
2240 | av_set_iterator i; | |
2241 | ||
2242 | /* Delete from TOP all exprs, that present in FROMP. */ | |
2243 | FOR_EACH_EXPR_1 (expr1, i, top) | |
2244 | { | |
2245 | expr_t expr2 = av_set_lookup (*fromp, EXPR_VINSN (expr1)); | |
2246 | ||
2247 | if (expr2) | |
2248 | { | |
2249 | merge_expr (expr2, expr1, insn); | |
2250 | av_set_iter_remove (&i); | |
2251 | } | |
2252 | } | |
2253 | ||
2254 | join_distinct_sets (i.lp, fromp); | |
2255 | } | |
2256 | ||
48e1416a | 2257 | /* Same as above, but also update availability of target register in |
e1ab7874 | 2258 | TOP judging by TO_LV_SET and FROM_LV_SET. */ |
2259 | void | |
2260 | av_set_union_and_live (av_set_t *top, av_set_t *fromp, regset to_lv_set, | |
2261 | regset from_lv_set, insn_t insn) | |
2262 | { | |
2263 | expr_t expr1; | |
2264 | av_set_iterator i; | |
2265 | av_set_t *to_tailp, in_both_set = NULL; | |
2266 | ||
2267 | /* Delete from TOP all expres, that present in FROMP. */ | |
2268 | FOR_EACH_EXPR_1 (expr1, i, top) | |
2269 | { | |
2270 | expr_t expr2 = av_set_lookup_and_remove (fromp, EXPR_VINSN (expr1)); | |
2271 | ||
2272 | if (expr2) | |
2273 | { | |
48e1416a | 2274 | /* It may be that the expressions have different destination |
e1ab7874 | 2275 | registers, in which case we need to check liveness here. */ |
2276 | if (EXPR_SEPARABLE_P (expr1)) | |
2277 | { | |
48e1416a | 2278 | int regno1 = (REG_P (EXPR_LHS (expr1)) |
e1ab7874 | 2279 | ? (int) expr_dest_regno (expr1) : -1); |
48e1416a | 2280 | int regno2 = (REG_P (EXPR_LHS (expr2)) |
e1ab7874 | 2281 | ? (int) expr_dest_regno (expr2) : -1); |
48e1416a | 2282 | |
2283 | /* ??? We don't have a way to check restrictions for | |
e1ab7874 | 2284 | *other* register on the current path, we did it only |
2285 | for the current target register. Give up. */ | |
2286 | if (regno1 != regno2) | |
2287 | EXPR_TARGET_AVAILABLE (expr2) = -1; | |
2288 | } | |
2289 | else if (EXPR_INSN_RTX (expr1) != EXPR_INSN_RTX (expr2)) | |
2290 | EXPR_TARGET_AVAILABLE (expr2) = -1; | |
2291 | ||
2292 | merge_expr (expr2, expr1, insn); | |
2293 | av_set_add_nocopy (&in_both_set, expr2); | |
2294 | av_set_iter_remove (&i); | |
2295 | } | |
2296 | else | |
48e1416a | 2297 | /* EXPR1 is present in TOP, but not in FROMP. Check it on |
e1ab7874 | 2298 | FROM_LV_SET. */ |
2299 | set_unavailable_target_for_expr (expr1, from_lv_set); | |
2300 | } | |
2301 | to_tailp = i.lp; | |
2302 | ||
2303 | /* These expressions are not present in TOP. Check liveness | |
2304 | restrictions on TO_LV_SET. */ | |
2305 | FOR_EACH_EXPR (expr1, i, *fromp) | |
2306 | set_unavailable_target_for_expr (expr1, to_lv_set); | |
2307 | ||
2308 | join_distinct_sets (i.lp, &in_both_set); | |
2309 | join_distinct_sets (to_tailp, fromp); | |
2310 | } | |
2311 | ||
2312 | /* Clear av_set pointed to by SETP. */ | |
2313 | void | |
2314 | av_set_clear (av_set_t *setp) | |
2315 | { | |
2316 | expr_t expr; | |
2317 | av_set_iterator i; | |
2318 | ||
2319 | FOR_EACH_EXPR_1 (expr, i, setp) | |
2320 | av_set_iter_remove (&i); | |
2321 | ||
2322 | gcc_assert (*setp == NULL); | |
2323 | } | |
2324 | ||
2325 | /* Leave only one non-speculative element in the SETP. */ | |
2326 | void | |
2327 | av_set_leave_one_nonspec (av_set_t *setp) | |
2328 | { | |
2329 | expr_t expr; | |
2330 | av_set_iterator i; | |
2331 | bool has_one_nonspec = false; | |
2332 | ||
48e1416a | 2333 | /* Keep all speculative exprs, and leave one non-speculative |
e1ab7874 | 2334 | (the first one). */ |
2335 | FOR_EACH_EXPR_1 (expr, i, setp) | |
2336 | { | |
2337 | if (!EXPR_SPEC_DONE_DS (expr)) | |
2338 | { | |
2339 | if (has_one_nonspec) | |
2340 | av_set_iter_remove (&i); | |
2341 | else | |
2342 | has_one_nonspec = true; | |
2343 | } | |
2344 | } | |
2345 | } | |
2346 | ||
2347 | /* Return the N'th element of the SET. */ | |
2348 | expr_t | |
2349 | av_set_element (av_set_t set, int n) | |
2350 | { | |
2351 | expr_t expr; | |
2352 | av_set_iterator i; | |
2353 | ||
2354 | FOR_EACH_EXPR (expr, i, set) | |
2355 | if (n-- == 0) | |
2356 | return expr; | |
2357 | ||
2358 | gcc_unreachable (); | |
2359 | return NULL; | |
2360 | } | |
2361 | ||
2362 | /* Deletes all expressions from AVP that are conditional branches (IFs). */ | |
2363 | void | |
2364 | av_set_substract_cond_branches (av_set_t *avp) | |
2365 | { | |
2366 | av_set_iterator i; | |
2367 | expr_t expr; | |
2368 | ||
2369 | FOR_EACH_EXPR_1 (expr, i, avp) | |
2370 | if (vinsn_cond_branch_p (EXPR_VINSN (expr))) | |
2371 | av_set_iter_remove (&i); | |
2372 | } | |
2373 | ||
48e1416a | 2374 | /* Multiplies usefulness attribute of each member of av-set *AVP by |
e1ab7874 | 2375 | value PROB / ALL_PROB. */ |
2376 | void | |
2377 | av_set_split_usefulness (av_set_t av, int prob, int all_prob) | |
2378 | { | |
2379 | av_set_iterator i; | |
2380 | expr_t expr; | |
2381 | ||
2382 | FOR_EACH_EXPR (expr, i, av) | |
48e1416a | 2383 | EXPR_USEFULNESS (expr) = (all_prob |
e1ab7874 | 2384 | ? (EXPR_USEFULNESS (expr) * prob) / all_prob |
2385 | : 0); | |
2386 | } | |
2387 | ||
2388 | /* Leave in AVP only those expressions, which are present in AV, | |
c53624fb | 2389 | and return it, merging history expressions. */ |
e1ab7874 | 2390 | void |
c53624fb | 2391 | av_set_code_motion_filter (av_set_t *avp, av_set_t av) |
e1ab7874 | 2392 | { |
2393 | av_set_iterator i; | |
c53624fb | 2394 | expr_t expr, expr2; |
e1ab7874 | 2395 | |
2396 | FOR_EACH_EXPR_1 (expr, i, avp) | |
c53624fb | 2397 | if ((expr2 = av_set_lookup (av, EXPR_VINSN (expr))) == NULL) |
e1ab7874 | 2398 | av_set_iter_remove (&i); |
c53624fb | 2399 | else |
2400 | /* When updating av sets in bookkeeping blocks, we can add more insns | |
2401 | there which will be transformed but the upper av sets will not | |
2402 | reflect those transformations. We then fail to undo those | |
2403 | when searching for such insns. So merge the history saved | |
2404 | in the av set of the block we are processing. */ | |
2405 | merge_history_vect (&EXPR_HISTORY_OF_CHANGES (expr), | |
2406 | EXPR_HISTORY_OF_CHANGES (expr2)); | |
e1ab7874 | 2407 | } |
2408 | ||
2409 | \f | |
2410 | ||
2411 | /* Dependence hooks to initialize insn data. */ | |
2412 | ||
2413 | /* This is used in hooks callable from dependence analysis when initializing | |
2414 | instruction's data. */ | |
2415 | static struct | |
2416 | { | |
2417 | /* Where the dependence was found (lhs/rhs). */ | |
2418 | deps_where_t where; | |
2419 | ||
2420 | /* The actual data object to initialize. */ | |
2421 | idata_t id; | |
2422 | ||
2423 | /* True when the insn should not be made clonable. */ | |
2424 | bool force_unique_p; | |
2425 | ||
2426 | /* True when insn should be treated as of type USE, i.e. never renamed. */ | |
2427 | bool force_use_p; | |
2428 | } deps_init_id_data; | |
2429 | ||
2430 | ||
48e1416a | 2431 | /* Setup ID for INSN. FORCE_UNIQUE_P is true when INSN should not be |
e1ab7874 | 2432 | clonable. */ |
2433 | static void | |
2434 | setup_id_for_insn (idata_t id, insn_t insn, bool force_unique_p) | |
2435 | { | |
2436 | int type; | |
48e1416a | 2437 | |
e1ab7874 | 2438 | /* Determine whether INSN could be cloned and return appropriate vinsn type. |
2439 | That clonable insns which can be separated into lhs and rhs have type SET. | |
2440 | Other clonable insns have type USE. */ | |
2441 | type = GET_CODE (insn); | |
2442 | ||
2443 | /* Only regular insns could be cloned. */ | |
2444 | if (type == INSN && !force_unique_p) | |
2445 | type = SET; | |
2446 | else if (type == JUMP_INSN && simplejump_p (insn)) | |
2447 | type = PC; | |
9845d120 | 2448 | else if (type == DEBUG_INSN) |
2449 | type = !force_unique_p ? USE : INSN; | |
48e1416a | 2450 | |
e1ab7874 | 2451 | IDATA_TYPE (id) = type; |
2452 | IDATA_REG_SETS (id) = get_clear_regset_from_pool (); | |
2453 | IDATA_REG_USES (id) = get_clear_regset_from_pool (); | |
2454 | IDATA_REG_CLOBBERS (id) = get_clear_regset_from_pool (); | |
2455 | } | |
2456 | ||
2457 | /* Start initializing insn data. */ | |
2458 | static void | |
2459 | deps_init_id_start_insn (insn_t insn) | |
2460 | { | |
2461 | gcc_assert (deps_init_id_data.where == DEPS_IN_NOWHERE); | |
2462 | ||
2463 | setup_id_for_insn (deps_init_id_data.id, insn, | |
2464 | deps_init_id_data.force_unique_p); | |
2465 | deps_init_id_data.where = DEPS_IN_INSN; | |
2466 | } | |
2467 | ||
2468 | /* Start initializing lhs data. */ | |
2469 | static void | |
2470 | deps_init_id_start_lhs (rtx lhs) | |
2471 | { | |
2472 | gcc_assert (deps_init_id_data.where == DEPS_IN_INSN); | |
2473 | gcc_assert (IDATA_LHS (deps_init_id_data.id) == NULL); | |
2474 | ||
2475 | if (IDATA_TYPE (deps_init_id_data.id) == SET) | |
2476 | { | |
2477 | IDATA_LHS (deps_init_id_data.id) = lhs; | |
2478 | deps_init_id_data.where = DEPS_IN_LHS; | |
2479 | } | |
2480 | } | |
2481 | ||
2482 | /* Finish initializing lhs data. */ | |
2483 | static void | |
2484 | deps_init_id_finish_lhs (void) | |
2485 | { | |
2486 | deps_init_id_data.where = DEPS_IN_INSN; | |
2487 | } | |
2488 | ||
2489 | /* Note a set of REGNO. */ | |
2490 | static void | |
2491 | deps_init_id_note_reg_set (int regno) | |
2492 | { | |
2493 | haifa_note_reg_set (regno); | |
2494 | ||
2495 | if (deps_init_id_data.where == DEPS_IN_RHS) | |
2496 | deps_init_id_data.force_use_p = true; | |
2497 | ||
2498 | if (IDATA_TYPE (deps_init_id_data.id) != PC) | |
2499 | SET_REGNO_REG_SET (IDATA_REG_SETS (deps_init_id_data.id), regno); | |
2500 | ||
2501 | #ifdef STACK_REGS | |
48e1416a | 2502 | /* Make instructions that set stack registers to be ineligible for |
e1ab7874 | 2503 | renaming to avoid issues with find_used_regs. */ |
2504 | if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG)) | |
2505 | deps_init_id_data.force_use_p = true; | |
2506 | #endif | |
2507 | } | |
2508 | ||
2509 | /* Note a clobber of REGNO. */ | |
2510 | static void | |
2511 | deps_init_id_note_reg_clobber (int regno) | |
2512 | { | |
2513 | haifa_note_reg_clobber (regno); | |
2514 | ||
2515 | if (deps_init_id_data.where == DEPS_IN_RHS) | |
2516 | deps_init_id_data.force_use_p = true; | |
2517 | ||
2518 | if (IDATA_TYPE (deps_init_id_data.id) != PC) | |
2519 | SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (deps_init_id_data.id), regno); | |
2520 | } | |
2521 | ||
2522 | /* Note a use of REGNO. */ | |
2523 | static void | |
2524 | deps_init_id_note_reg_use (int regno) | |
2525 | { | |
2526 | haifa_note_reg_use (regno); | |
2527 | ||
2528 | if (IDATA_TYPE (deps_init_id_data.id) != PC) | |
2529 | SET_REGNO_REG_SET (IDATA_REG_USES (deps_init_id_data.id), regno); | |
2530 | } | |
2531 | ||
2532 | /* Start initializing rhs data. */ | |
2533 | static void | |
2534 | deps_init_id_start_rhs (rtx rhs) | |
2535 | { | |
2536 | gcc_assert (deps_init_id_data.where == DEPS_IN_INSN); | |
2537 | ||
2538 | /* And there was no sel_deps_reset_to_insn (). */ | |
2539 | if (IDATA_LHS (deps_init_id_data.id) != NULL) | |
2540 | { | |
2541 | IDATA_RHS (deps_init_id_data.id) = rhs; | |
2542 | deps_init_id_data.where = DEPS_IN_RHS; | |
2543 | } | |
2544 | } | |
2545 | ||
2546 | /* Finish initializing rhs data. */ | |
2547 | static void | |
2548 | deps_init_id_finish_rhs (void) | |
2549 | { | |
2550 | gcc_assert (deps_init_id_data.where == DEPS_IN_RHS | |
2551 | || deps_init_id_data.where == DEPS_IN_INSN); | |
2552 | deps_init_id_data.where = DEPS_IN_INSN; | |
2553 | } | |
2554 | ||
2555 | /* Finish initializing insn data. */ | |
2556 | static void | |
2557 | deps_init_id_finish_insn (void) | |
2558 | { | |
2559 | gcc_assert (deps_init_id_data.where == DEPS_IN_INSN); | |
2560 | ||
2561 | if (IDATA_TYPE (deps_init_id_data.id) == SET) | |
2562 | { | |
2563 | rtx lhs = IDATA_LHS (deps_init_id_data.id); | |
2564 | rtx rhs = IDATA_RHS (deps_init_id_data.id); | |
2565 | ||
2566 | if (lhs == NULL || rhs == NULL || !lhs_and_rhs_separable_p (lhs, rhs) | |
2567 | || deps_init_id_data.force_use_p) | |
2568 | { | |
48e1416a | 2569 | /* This should be a USE, as we don't want to schedule its RHS |
e1ab7874 | 2570 | separately. However, we still want to have them recorded |
48e1416a | 2571 | for the purposes of substitution. That's why we don't |
e1ab7874 | 2572 | simply call downgrade_to_use () here. */ |
2573 | gcc_assert (IDATA_TYPE (deps_init_id_data.id) == SET); | |
2574 | gcc_assert (!lhs == !rhs); | |
2575 | ||
2576 | IDATA_TYPE (deps_init_id_data.id) = USE; | |
2577 | } | |
2578 | } | |
2579 | ||
2580 | deps_init_id_data.where = DEPS_IN_NOWHERE; | |
2581 | } | |
2582 | ||
2583 | /* This is dependence info used for initializing insn's data. */ | |
2584 | static struct sched_deps_info_def deps_init_id_sched_deps_info; | |
2585 | ||
2586 | /* This initializes most of the static part of the above structure. */ | |
2587 | static const struct sched_deps_info_def const_deps_init_id_sched_deps_info = | |
2588 | { | |
2589 | NULL, | |
2590 | ||
2591 | deps_init_id_start_insn, | |
2592 | deps_init_id_finish_insn, | |
2593 | deps_init_id_start_lhs, | |
2594 | deps_init_id_finish_lhs, | |
2595 | deps_init_id_start_rhs, | |
2596 | deps_init_id_finish_rhs, | |
2597 | deps_init_id_note_reg_set, | |
2598 | deps_init_id_note_reg_clobber, | |
2599 | deps_init_id_note_reg_use, | |
2600 | NULL, /* note_mem_dep */ | |
2601 | NULL, /* note_dep */ | |
2602 | ||
2603 | 0, /* use_cselib */ | |
2604 | 0, /* use_deps_list */ | |
2605 | 0 /* generate_spec_deps */ | |
2606 | }; | |
2607 | ||
2608 | /* Initialize INSN's lhs and rhs in ID. When FORCE_UNIQUE_P is true, | |
2609 | we don't actually need information about lhs and rhs. */ | |
2610 | static void | |
2611 | setup_id_lhs_rhs (idata_t id, insn_t insn, bool force_unique_p) | |
2612 | { | |
2613 | rtx pat = PATTERN (insn); | |
48e1416a | 2614 | |
971ba038 | 2615 | if (NONJUMP_INSN_P (insn) |
48e1416a | 2616 | && GET_CODE (pat) == SET |
e1ab7874 | 2617 | && !force_unique_p) |
2618 | { | |
2619 | IDATA_RHS (id) = SET_SRC (pat); | |
2620 | IDATA_LHS (id) = SET_DEST (pat); | |
2621 | } | |
2622 | else | |
2623 | IDATA_LHS (id) = IDATA_RHS (id) = NULL; | |
2624 | } | |
2625 | ||
2626 | /* Possibly downgrade INSN to USE. */ | |
2627 | static void | |
2628 | maybe_downgrade_id_to_use (idata_t id, insn_t insn) | |
2629 | { | |
2630 | bool must_be_use = false; | |
be10bb5a | 2631 | df_ref def; |
e1ab7874 | 2632 | rtx lhs = IDATA_LHS (id); |
2633 | rtx rhs = IDATA_RHS (id); | |
48e1416a | 2634 | |
e1ab7874 | 2635 | /* We downgrade only SETs. */ |
2636 | if (IDATA_TYPE (id) != SET) | |
2637 | return; | |
2638 | ||
2639 | if (!lhs || !lhs_and_rhs_separable_p (lhs, rhs)) | |
2640 | { | |
2641 | IDATA_TYPE (id) = USE; | |
2642 | return; | |
2643 | } | |
48e1416a | 2644 | |
be10bb5a | 2645 | FOR_EACH_INSN_DEF (def, insn) |
e1ab7874 | 2646 | { |
e1ab7874 | 2647 | if (DF_REF_INSN (def) |
2648 | && DF_REF_FLAGS_IS_SET (def, DF_REF_PRE_POST_MODIFY) | |
2649 | && loc_mentioned_in_p (DF_REF_LOC (def), IDATA_RHS (id))) | |
2650 | { | |
2651 | must_be_use = true; | |
2652 | break; | |
2653 | } | |
2654 | ||
2655 | #ifdef STACK_REGS | |
48e1416a | 2656 | /* Make instructions that set stack registers to be ineligible for |
e1ab7874 | 2657 | renaming to avoid issues with find_used_regs. */ |
2658 | if (IN_RANGE (DF_REF_REGNO (def), FIRST_STACK_REG, LAST_STACK_REG)) | |
2659 | { | |
2660 | must_be_use = true; | |
2661 | break; | |
2662 | } | |
2663 | #endif | |
48e1416a | 2664 | } |
2665 | ||
e1ab7874 | 2666 | if (must_be_use) |
2667 | IDATA_TYPE (id) = USE; | |
2668 | } | |
2669 | ||
2670 | /* Setup register sets describing INSN in ID. */ | |
2671 | static void | |
2672 | setup_id_reg_sets (idata_t id, insn_t insn) | |
2673 | { | |
be10bb5a | 2674 | struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn); |
2675 | df_ref def, use; | |
e1ab7874 | 2676 | regset tmp = get_clear_regset_from_pool (); |
48e1416a | 2677 | |
be10bb5a | 2678 | FOR_EACH_INSN_INFO_DEF (def, insn_info) |
e1ab7874 | 2679 | { |
e1ab7874 | 2680 | unsigned int regno = DF_REF_REGNO (def); |
48e1416a | 2681 | |
e1ab7874 | 2682 | /* Post modifies are treated like clobbers by sched-deps.c. */ |
2683 | if (DF_REF_FLAGS_IS_SET (def, (DF_REF_MUST_CLOBBER | |
2684 | | DF_REF_PRE_POST_MODIFY))) | |
2685 | SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (id), regno); | |
2686 | else if (! DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER)) | |
2687 | { | |
2688 | SET_REGNO_REG_SET (IDATA_REG_SETS (id), regno); | |
2689 | ||
2690 | #ifdef STACK_REGS | |
48e1416a | 2691 | /* For stack registers, treat writes to them as writes |
e1ab7874 | 2692 | to the first one to be consistent with sched-deps.c. */ |
2693 | if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG)) | |
2694 | SET_REGNO_REG_SET (IDATA_REG_SETS (id), FIRST_STACK_REG); | |
2695 | #endif | |
2696 | } | |
2697 | /* Mark special refs that generate read/write def pair. */ | |
2698 | if (DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL) | |
2699 | || regno == STACK_POINTER_REGNUM) | |
2700 | bitmap_set_bit (tmp, regno); | |
2701 | } | |
48e1416a | 2702 | |
be10bb5a | 2703 | FOR_EACH_INSN_INFO_USE (use, insn_info) |
e1ab7874 | 2704 | { |
e1ab7874 | 2705 | unsigned int regno = DF_REF_REGNO (use); |
2706 | ||
2707 | /* When these refs are met for the first time, skip them, as | |
2708 | these uses are just counterparts of some defs. */ | |
2709 | if (bitmap_bit_p (tmp, regno)) | |
2710 | bitmap_clear_bit (tmp, regno); | |
2711 | else if (! DF_REF_FLAGS_IS_SET (use, DF_REF_CALL_STACK_USAGE)) | |
2712 | { | |
2713 | SET_REGNO_REG_SET (IDATA_REG_USES (id), regno); | |
2714 | ||
2715 | #ifdef STACK_REGS | |
48e1416a | 2716 | /* For stack registers, treat reads from them as reads from |
e1ab7874 | 2717 | the first one to be consistent with sched-deps.c. */ |
2718 | if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG)) | |
2719 | SET_REGNO_REG_SET (IDATA_REG_USES (id), FIRST_STACK_REG); | |
2720 | #endif | |
2721 | } | |
2722 | } | |
2723 | ||
2724 | return_regset_to_pool (tmp); | |
2725 | } | |
2726 | ||
2727 | /* Initialize instruction data for INSN in ID using DF's data. */ | |
2728 | static void | |
2729 | init_id_from_df (idata_t id, insn_t insn, bool force_unique_p) | |
2730 | { | |
2731 | gcc_assert (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL); | |
2732 | ||
2733 | setup_id_for_insn (id, insn, force_unique_p); | |
2734 | setup_id_lhs_rhs (id, insn, force_unique_p); | |
2735 | ||
2736 | if (INSN_NOP_P (insn)) | |
2737 | return; | |
2738 | ||
2739 | maybe_downgrade_id_to_use (id, insn); | |
2740 | setup_id_reg_sets (id, insn); | |
2741 | } | |
2742 | ||
2743 | /* Initialize instruction data for INSN in ID. */ | |
2744 | static void | |
2745 | deps_init_id (idata_t id, insn_t insn, bool force_unique_p) | |
2746 | { | |
68e419a1 | 2747 | struct deps_desc _dc, *dc = &_dc; |
e1ab7874 | 2748 | |
2749 | deps_init_id_data.where = DEPS_IN_NOWHERE; | |
2750 | deps_init_id_data.id = id; | |
2751 | deps_init_id_data.force_unique_p = force_unique_p; | |
2752 | deps_init_id_data.force_use_p = false; | |
2753 | ||
d9ab2038 | 2754 | init_deps (dc, false); |
e1ab7874 | 2755 | |
2756 | memcpy (&deps_init_id_sched_deps_info, | |
2757 | &const_deps_init_id_sched_deps_info, | |
2758 | sizeof (deps_init_id_sched_deps_info)); | |
2759 | ||
2760 | if (spec_info != NULL) | |
2761 | deps_init_id_sched_deps_info.generate_spec_deps = 1; | |
2762 | ||
2763 | sched_deps_info = &deps_init_id_sched_deps_info; | |
2764 | ||
2f3c9801 | 2765 | deps_analyze_insn (dc, insn); |
e1ab7874 | 2766 | |
2767 | free_deps (dc); | |
2768 | ||
2769 | deps_init_id_data.id = NULL; | |
2770 | } | |
2771 | ||
2772 | \f | |
52d7e28c | 2773 | struct sched_scan_info_def |
2774 | { | |
2775 | /* This hook notifies scheduler frontend to extend its internal per basic | |
2776 | block data structures. This hook should be called once before a series of | |
2777 | calls to bb_init (). */ | |
2778 | void (*extend_bb) (void); | |
2779 | ||
2780 | /* This hook makes scheduler frontend to initialize its internal data | |
2781 | structures for the passed basic block. */ | |
2782 | void (*init_bb) (basic_block); | |
2783 | ||
2784 | /* This hook notifies scheduler frontend to extend its internal per insn data | |
2785 | structures. This hook should be called once before a series of calls to | |
2786 | insn_init (). */ | |
2787 | void (*extend_insn) (void); | |
2788 | ||
2789 | /* This hook makes scheduler frontend to initialize its internal data | |
2790 | structures for the passed insn. */ | |
2f3c9801 | 2791 | void (*init_insn) (insn_t); |
52d7e28c | 2792 | }; |
2793 | ||
2794 | /* A driver function to add a set of basic blocks (BBS) to the | |
2795 | scheduling region. */ | |
2796 | static void | |
2797 | sched_scan (const struct sched_scan_info_def *ssi, bb_vec_t bbs) | |
2798 | { | |
2799 | unsigned i; | |
2800 | basic_block bb; | |
2801 | ||
2802 | if (ssi->extend_bb) | |
2803 | ssi->extend_bb (); | |
2804 | ||
2805 | if (ssi->init_bb) | |
f1f41a6c | 2806 | FOR_EACH_VEC_ELT (bbs, i, bb) |
52d7e28c | 2807 | ssi->init_bb (bb); |
2808 | ||
2809 | if (ssi->extend_insn) | |
2810 | ssi->extend_insn (); | |
2811 | ||
2812 | if (ssi->init_insn) | |
f1f41a6c | 2813 | FOR_EACH_VEC_ELT (bbs, i, bb) |
52d7e28c | 2814 | { |
2f3c9801 | 2815 | rtx_insn *insn; |
52d7e28c | 2816 | |
2817 | FOR_BB_INSNS (bb, insn) | |
2818 | ssi->init_insn (insn); | |
2819 | } | |
2820 | } | |
e1ab7874 | 2821 | |
2822 | /* Implement hooks for collecting fundamental insn properties like if insn is | |
2823 | an ASM or is within a SCHED_GROUP. */ | |
2824 | ||
2825 | /* True when a "one-time init" data for INSN was already inited. */ | |
2826 | static bool | |
2827 | first_time_insn_init (insn_t insn) | |
2828 | { | |
2829 | return INSN_LIVE (insn) == NULL; | |
2830 | } | |
2831 | ||
2832 | /* Hash an entry in a transformed_insns hashtable. */ | |
2833 | static hashval_t | |
2834 | hash_transformed_insns (const void *p) | |
2835 | { | |
2836 | return VINSN_HASH_RTX (((const struct transformed_insns *) p)->vinsn_old); | |
2837 | } | |
2838 | ||
2839 | /* Compare the entries in a transformed_insns hashtable. */ | |
2840 | static int | |
2841 | eq_transformed_insns (const void *p, const void *q) | |
2842 | { | |
04d073df | 2843 | rtx_insn *i1 = |
2844 | VINSN_INSN_RTX (((const struct transformed_insns *) p)->vinsn_old); | |
2845 | rtx_insn *i2 = | |
2846 | VINSN_INSN_RTX (((const struct transformed_insns *) q)->vinsn_old); | |
e1ab7874 | 2847 | |
2848 | if (INSN_UID (i1) == INSN_UID (i2)) | |
2849 | return 1; | |
2850 | return rtx_equal_p (PATTERN (i1), PATTERN (i2)); | |
2851 | } | |
2852 | ||
2853 | /* Free an entry in a transformed_insns hashtable. */ | |
2854 | static void | |
2855 | free_transformed_insns (void *p) | |
2856 | { | |
2857 | struct transformed_insns *pti = (struct transformed_insns *) p; | |
2858 | ||
2859 | vinsn_detach (pti->vinsn_old); | |
2860 | vinsn_detach (pti->vinsn_new); | |
2861 | free (pti); | |
2862 | } | |
2863 | ||
48e1416a | 2864 | /* Init the s_i_d data for INSN which should be inited just once, when |
e1ab7874 | 2865 | we first see the insn. */ |
2866 | static void | |
2867 | init_first_time_insn_data (insn_t insn) | |
2868 | { | |
2869 | /* This should not be set if this is the first time we init data for | |
2870 | insn. */ | |
2871 | gcc_assert (first_time_insn_init (insn)); | |
48e1416a | 2872 | |
e1ab7874 | 2873 | /* These are needed for nops too. */ |
2874 | INSN_LIVE (insn) = get_regset_from_pool (); | |
2875 | INSN_LIVE_VALID_P (insn) = false; | |
d9ab2038 | 2876 | |
e1ab7874 | 2877 | if (!INSN_NOP_P (insn)) |
2878 | { | |
2879 | INSN_ANALYZED_DEPS (insn) = BITMAP_ALLOC (NULL); | |
2880 | INSN_FOUND_DEPS (insn) = BITMAP_ALLOC (NULL); | |
48e1416a | 2881 | INSN_TRANSFORMED_INSNS (insn) |
e1ab7874 | 2882 | = htab_create (16, hash_transformed_insns, |
2883 | eq_transformed_insns, free_transformed_insns); | |
d9ab2038 | 2884 | init_deps (&INSN_DEPS_CONTEXT (insn), true); |
e1ab7874 | 2885 | } |
2886 | } | |
2887 | ||
48e1416a | 2888 | /* Free almost all above data for INSN that is scheduled already. |
d9ab2038 | 2889 | Used for extra-large basic blocks. */ |
2890 | void | |
2891 | free_data_for_scheduled_insn (insn_t insn) | |
e1ab7874 | 2892 | { |
2893 | gcc_assert (! first_time_insn_init (insn)); | |
48e1416a | 2894 | |
d9ab2038 | 2895 | if (! INSN_ANALYZED_DEPS (insn)) |
2896 | return; | |
48e1416a | 2897 | |
e1ab7874 | 2898 | BITMAP_FREE (INSN_ANALYZED_DEPS (insn)); |
2899 | BITMAP_FREE (INSN_FOUND_DEPS (insn)); | |
2900 | htab_delete (INSN_TRANSFORMED_INSNS (insn)); | |
48e1416a | 2901 | |
e1ab7874 | 2902 | /* This is allocated only for bookkeeping insns. */ |
2903 | if (INSN_ORIGINATORS (insn)) | |
2904 | BITMAP_FREE (INSN_ORIGINATORS (insn)); | |
2905 | free_deps (&INSN_DEPS_CONTEXT (insn)); | |
d9ab2038 | 2906 | |
2907 | INSN_ANALYZED_DEPS (insn) = NULL; | |
2908 | ||
48e1416a | 2909 | /* Clear the readonly flag so we would ICE when trying to recalculate |
d9ab2038 | 2910 | the deps context (as we believe that it should not happen). */ |
2911 | (&INSN_DEPS_CONTEXT (insn))->readonly = 0; | |
2912 | } | |
2913 | ||
2914 | /* Free the same data as above for INSN. */ | |
2915 | static void | |
2916 | free_first_time_insn_data (insn_t insn) | |
2917 | { | |
2918 | gcc_assert (! first_time_insn_init (insn)); | |
2919 | ||
2920 | free_data_for_scheduled_insn (insn); | |
2921 | return_regset_to_pool (INSN_LIVE (insn)); | |
2922 | INSN_LIVE (insn) = NULL; | |
2923 | INSN_LIVE_VALID_P (insn) = false; | |
e1ab7874 | 2924 | } |
2925 | ||
2926 | /* Initialize region-scope data structures for basic blocks. */ | |
2927 | static void | |
2928 | init_global_and_expr_for_bb (basic_block bb) | |
2929 | { | |
2930 | if (sel_bb_empty_p (bb)) | |
2931 | return; | |
2932 | ||
2933 | invalidate_av_set (bb); | |
2934 | } | |
2935 | ||
2936 | /* Data for global dependency analysis (to initialize CANT_MOVE and | |
2937 | SCHED_GROUP_P). */ | |
2938 | static struct | |
2939 | { | |
2940 | /* Previous insn. */ | |
2941 | insn_t prev_insn; | |
2942 | } init_global_data; | |
2943 | ||
2944 | /* Determine if INSN is in the sched_group, is an asm or should not be | |
2945 | cloned. After that initialize its expr. */ | |
2946 | static void | |
2947 | init_global_and_expr_for_insn (insn_t insn) | |
2948 | { | |
2949 | if (LABEL_P (insn)) | |
2950 | return; | |
2951 | ||
2952 | if (NOTE_INSN_BASIC_BLOCK_P (insn)) | |
2953 | { | |
2f3c9801 | 2954 | init_global_data.prev_insn = NULL; |
e1ab7874 | 2955 | return; |
2956 | } | |
2957 | ||
2958 | gcc_assert (INSN_P (insn)); | |
2959 | ||
2960 | if (SCHED_GROUP_P (insn)) | |
2961 | /* Setup a sched_group. */ | |
2962 | { | |
2963 | insn_t prev_insn = init_global_data.prev_insn; | |
2964 | ||
2965 | if (prev_insn) | |
2966 | INSN_SCHED_NEXT (prev_insn) = insn; | |
2967 | ||
2968 | init_global_data.prev_insn = insn; | |
2969 | } | |
2970 | else | |
2f3c9801 | 2971 | init_global_data.prev_insn = NULL; |
e1ab7874 | 2972 | |
2973 | if (GET_CODE (PATTERN (insn)) == ASM_INPUT | |
2974 | || asm_noperands (PATTERN (insn)) >= 0) | |
2975 | /* Mark INSN as an asm. */ | |
2976 | INSN_ASM_P (insn) = true; | |
2977 | ||
2978 | { | |
2979 | bool force_unique_p; | |
2980 | ds_t spec_done_ds; | |
2981 | ||
982b0787 | 2982 | /* Certain instructions cannot be cloned, and frame related insns and |
2983 | the insn adjacent to NOTE_INSN_EPILOGUE_BEG cannot be moved out of | |
2984 | their block. */ | |
2985 | if (prologue_epilogue_contains (insn)) | |
2986 | { | |
2987 | if (RTX_FRAME_RELATED_P (insn)) | |
2988 | CANT_MOVE (insn) = 1; | |
2989 | else | |
2990 | { | |
2991 | rtx note; | |
2992 | for (note = REG_NOTES (insn); note; note = XEXP (note, 1)) | |
2993 | if (REG_NOTE_KIND (note) == REG_SAVE_NOTE | |
2994 | && ((enum insn_note) INTVAL (XEXP (note, 0)) | |
2995 | == NOTE_INSN_EPILOGUE_BEG)) | |
2996 | { | |
2997 | CANT_MOVE (insn) = 1; | |
2998 | break; | |
2999 | } | |
3000 | } | |
3001 | force_unique_p = true; | |
3002 | } | |
e1ab7874 | 3003 | else |
982b0787 | 3004 | if (CANT_MOVE (insn) |
3005 | || INSN_ASM_P (insn) | |
3006 | || SCHED_GROUP_P (insn) | |
a8d6ade3 | 3007 | || CALL_P (insn) |
982b0787 | 3008 | /* Exception handling insns are always unique. */ |
3009 | || (cfun->can_throw_non_call_exceptions && can_throw_internal (insn)) | |
3010 | /* TRAP_IF though have an INSN code is control_flow_insn_p (). */ | |
13434dcb | 3011 | || control_flow_insn_p (insn) |
3012 | || volatile_insn_p (PATTERN (insn)) | |
3013 | || (targetm.cannot_copy_insn_p | |
3014 | && targetm.cannot_copy_insn_p (insn))) | |
982b0787 | 3015 | force_unique_p = true; |
3016 | else | |
3017 | force_unique_p = false; | |
e1ab7874 | 3018 | |
3019 | if (targetm.sched.get_insn_spec_ds) | |
3020 | { | |
3021 | spec_done_ds = targetm.sched.get_insn_spec_ds (insn); | |
3022 | spec_done_ds = ds_get_max_dep_weak (spec_done_ds); | |
3023 | } | |
3024 | else | |
3025 | spec_done_ds = 0; | |
3026 | ||
3027 | /* Initialize INSN's expr. */ | |
3028 | init_expr (INSN_EXPR (insn), vinsn_create (insn, force_unique_p), 0, | |
3029 | REG_BR_PROB_BASE, INSN_PRIORITY (insn), 0, BLOCK_NUM (insn), | |
1e094109 | 3030 | spec_done_ds, 0, 0, vNULL, true, |
f1f41a6c | 3031 | false, false, false, CANT_MOVE (insn)); |
e1ab7874 | 3032 | } |
3033 | ||
3034 | init_first_time_insn_data (insn); | |
3035 | } | |
3036 | ||
3037 | /* Scan the region and initialize instruction data for basic blocks BBS. */ | |
3038 | void | |
3039 | sel_init_global_and_expr (bb_vec_t bbs) | |
3040 | { | |
3041 | /* ??? It would be nice to implement push / pop scheme for sched_infos. */ | |
3042 | const struct sched_scan_info_def ssi = | |
3043 | { | |
3044 | NULL, /* extend_bb */ | |
3045 | init_global_and_expr_for_bb, /* init_bb */ | |
3046 | extend_insn_data, /* extend_insn */ | |
3047 | init_global_and_expr_for_insn /* init_insn */ | |
3048 | }; | |
48e1416a | 3049 | |
52d7e28c | 3050 | sched_scan (&ssi, bbs); |
e1ab7874 | 3051 | } |
3052 | ||
3053 | /* Finalize region-scope data structures for basic blocks. */ | |
3054 | static void | |
3055 | finish_global_and_expr_for_bb (basic_block bb) | |
3056 | { | |
3057 | av_set_clear (&BB_AV_SET (bb)); | |
3058 | BB_AV_LEVEL (bb) = 0; | |
3059 | } | |
3060 | ||
3061 | /* Finalize INSN's data. */ | |
3062 | static void | |
3063 | finish_global_and_expr_insn (insn_t insn) | |
3064 | { | |
3065 | if (LABEL_P (insn) || NOTE_INSN_BASIC_BLOCK_P (insn)) | |
3066 | return; | |
3067 | ||
3068 | gcc_assert (INSN_P (insn)); | |
3069 | ||
3070 | if (INSN_LUID (insn) > 0) | |
3071 | { | |
3072 | free_first_time_insn_data (insn); | |
3073 | INSN_WS_LEVEL (insn) = 0; | |
3074 | CANT_MOVE (insn) = 0; | |
48e1416a | 3075 | |
3076 | /* We can no longer assert this, as vinsns of this insn could be | |
3077 | easily live in other insn's caches. This should be changed to | |
e1ab7874 | 3078 | a counter-like approach among all vinsns. */ |
3079 | gcc_assert (true || VINSN_COUNT (INSN_VINSN (insn)) == 1); | |
3080 | clear_expr (INSN_EXPR (insn)); | |
3081 | } | |
3082 | } | |
3083 | ||
3084 | /* Finalize per instruction data for the whole region. */ | |
3085 | void | |
3086 | sel_finish_global_and_expr (void) | |
3087 | { | |
3088 | { | |
3089 | bb_vec_t bbs; | |
3090 | int i; | |
3091 | ||
f1f41a6c | 3092 | bbs.create (current_nr_blocks); |
e1ab7874 | 3093 | |
3094 | for (i = 0; i < current_nr_blocks; i++) | |
f5a6b05f | 3095 | bbs.quick_push (BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i))); |
e1ab7874 | 3096 | |
3097 | /* Clear AV_SETs and INSN_EXPRs. */ | |
3098 | { | |
3099 | const struct sched_scan_info_def ssi = | |
3100 | { | |
3101 | NULL, /* extend_bb */ | |
3102 | finish_global_and_expr_for_bb, /* init_bb */ | |
3103 | NULL, /* extend_insn */ | |
3104 | finish_global_and_expr_insn /* init_insn */ | |
3105 | }; | |
3106 | ||
52d7e28c | 3107 | sched_scan (&ssi, bbs); |
e1ab7874 | 3108 | } |
3109 | ||
f1f41a6c | 3110 | bbs.release (); |
e1ab7874 | 3111 | } |
3112 | ||
3113 | finish_insns (); | |
3114 | } | |
3115 | \f | |
3116 | ||
48e1416a | 3117 | /* In the below hooks, we merely calculate whether or not a dependence |
3118 | exists, and in what part of insn. However, we will need more data | |
e1ab7874 | 3119 | when we'll start caching dependence requests. */ |
3120 | ||
3121 | /* Container to hold information for dependency analysis. */ | |
3122 | static struct | |
3123 | { | |
3124 | deps_t dc; | |
3125 | ||
3126 | /* A variable to track which part of rtx we are scanning in | |
3127 | sched-deps.c: sched_analyze_insn (). */ | |
3128 | deps_where_t where; | |
3129 | ||
3130 | /* Current producer. */ | |
3131 | insn_t pro; | |
3132 | ||
3133 | /* Current consumer. */ | |
3134 | vinsn_t con; | |
3135 | ||
3136 | /* Is SEL_DEPS_HAS_DEP_P[DEPS_IN_X] is true, then X has a dependence. | |
3137 | X is from { INSN, LHS, RHS }. */ | |
3138 | ds_t has_dep_p[DEPS_IN_NOWHERE]; | |
3139 | } has_dependence_data; | |
3140 | ||
3141 | /* Start analyzing dependencies of INSN. */ | |
3142 | static void | |
3143 | has_dependence_start_insn (insn_t insn ATTRIBUTE_UNUSED) | |
3144 | { | |
3145 | gcc_assert (has_dependence_data.where == DEPS_IN_NOWHERE); | |
3146 | ||
3147 | has_dependence_data.where = DEPS_IN_INSN; | |
3148 | } | |
3149 | ||
3150 | /* Finish analyzing dependencies of an insn. */ | |
3151 | static void | |
3152 | has_dependence_finish_insn (void) | |
3153 | { | |
3154 | gcc_assert (has_dependence_data.where == DEPS_IN_INSN); | |
3155 | ||
3156 | has_dependence_data.where = DEPS_IN_NOWHERE; | |
3157 | } | |
3158 | ||
3159 | /* Start analyzing dependencies of LHS. */ | |
3160 | static void | |
3161 | has_dependence_start_lhs (rtx lhs ATTRIBUTE_UNUSED) | |
3162 | { | |
3163 | gcc_assert (has_dependence_data.where == DEPS_IN_INSN); | |
3164 | ||
3165 | if (VINSN_LHS (has_dependence_data.con) != NULL) | |
3166 | has_dependence_data.where = DEPS_IN_LHS; | |
3167 | } | |
3168 | ||
3169 | /* Finish analyzing dependencies of an lhs. */ | |
3170 | static void | |
3171 | has_dependence_finish_lhs (void) | |
3172 | { | |
3173 | has_dependence_data.where = DEPS_IN_INSN; | |
3174 | } | |
3175 | ||
3176 | /* Start analyzing dependencies of RHS. */ | |
3177 | static void | |
3178 | has_dependence_start_rhs (rtx rhs ATTRIBUTE_UNUSED) | |
3179 | { | |
3180 | gcc_assert (has_dependence_data.where == DEPS_IN_INSN); | |
3181 | ||
3182 | if (VINSN_RHS (has_dependence_data.con) != NULL) | |
3183 | has_dependence_data.where = DEPS_IN_RHS; | |
3184 | } | |
3185 | ||
3186 | /* Start analyzing dependencies of an rhs. */ | |
3187 | static void | |
3188 | has_dependence_finish_rhs (void) | |
3189 | { | |
3190 | gcc_assert (has_dependence_data.where == DEPS_IN_RHS | |
3191 | || has_dependence_data.where == DEPS_IN_INSN); | |
3192 | ||
3193 | has_dependence_data.where = DEPS_IN_INSN; | |
3194 | } | |
3195 | ||
3196 | /* Note a set of REGNO. */ | |
3197 | static void | |
3198 | has_dependence_note_reg_set (int regno) | |
3199 | { | |
3200 | struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno]; | |
3201 | ||
3202 | if (!sched_insns_conditions_mutex_p (has_dependence_data.pro, | |
3203 | VINSN_INSN_RTX | |
3204 | (has_dependence_data.con))) | |
3205 | { | |
3206 | ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where]; | |
3207 | ||
3208 | if (reg_last->sets != NULL | |
3209 | || reg_last->clobbers != NULL) | |
3210 | *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT; | |
3211 | ||
a9bfd373 | 3212 | if (reg_last->uses || reg_last->implicit_sets) |
e1ab7874 | 3213 | *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI; |
3214 | } | |
3215 | } | |
3216 | ||
3217 | /* Note a clobber of REGNO. */ | |
3218 | static void | |
3219 | has_dependence_note_reg_clobber (int regno) | |
3220 | { | |
3221 | struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno]; | |
3222 | ||
3223 | if (!sched_insns_conditions_mutex_p (has_dependence_data.pro, | |
3224 | VINSN_INSN_RTX | |
3225 | (has_dependence_data.con))) | |
3226 | { | |
3227 | ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where]; | |
3228 | ||
3229 | if (reg_last->sets) | |
3230 | *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT; | |
48e1416a | 3231 | |
a9bfd373 | 3232 | if (reg_last->uses || reg_last->implicit_sets) |
e1ab7874 | 3233 | *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI; |
3234 | } | |
3235 | } | |
3236 | ||
3237 | /* Note a use of REGNO. */ | |
3238 | static void | |
3239 | has_dependence_note_reg_use (int regno) | |
3240 | { | |
3241 | struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno]; | |
3242 | ||
3243 | if (!sched_insns_conditions_mutex_p (has_dependence_data.pro, | |
3244 | VINSN_INSN_RTX | |
3245 | (has_dependence_data.con))) | |
3246 | { | |
3247 | ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where]; | |
3248 | ||
3249 | if (reg_last->sets) | |
3250 | *dsp = (*dsp & ~SPECULATIVE) | DEP_TRUE; | |
3251 | ||
a9bfd373 | 3252 | if (reg_last->clobbers || reg_last->implicit_sets) |
e1ab7874 | 3253 | *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI; |
3254 | ||
b0691607 | 3255 | /* Merge BE_IN_SPEC bits into *DSP when the dependency producer |
3256 | is actually a check insn. We need to do this for any register | |
3257 | read-read dependency with the check unless we track properly | |
3258 | all registers written by BE_IN_SPEC-speculated insns, as | |
3259 | we don't have explicit dependence lists. See PR 53975. */ | |
e1ab7874 | 3260 | if (reg_last->uses) |
3261 | { | |
3262 | ds_t pro_spec_checked_ds; | |
3263 | ||
3264 | pro_spec_checked_ds = INSN_SPEC_CHECKED_DS (has_dependence_data.pro); | |
3265 | pro_spec_checked_ds = ds_get_max_dep_weak (pro_spec_checked_ds); | |
3266 | ||
b0691607 | 3267 | if (pro_spec_checked_ds != 0) |
e1ab7874 | 3268 | *dsp = ds_full_merge (*dsp, pro_spec_checked_ds, |
3269 | NULL_RTX, NULL_RTX); | |
3270 | } | |
3271 | } | |
3272 | } | |
3273 | ||
3274 | /* Note a memory dependence. */ | |
3275 | static void | |
3276 | has_dependence_note_mem_dep (rtx mem ATTRIBUTE_UNUSED, | |
3277 | rtx pending_mem ATTRIBUTE_UNUSED, | |
3278 | insn_t pending_insn ATTRIBUTE_UNUSED, | |
3279 | ds_t ds ATTRIBUTE_UNUSED) | |
3280 | { | |
3281 | if (!sched_insns_conditions_mutex_p (has_dependence_data.pro, | |
3282 | VINSN_INSN_RTX (has_dependence_data.con))) | |
3283 | { | |
3284 | ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where]; | |
3285 | ||
3286 | *dsp = ds_full_merge (ds, *dsp, pending_mem, mem); | |
3287 | } | |
3288 | } | |
3289 | ||
3290 | /* Note a dependence. */ | |
3291 | static void | |
3292 | has_dependence_note_dep (insn_t pro ATTRIBUTE_UNUSED, | |
3293 | ds_t ds ATTRIBUTE_UNUSED) | |
3294 | { | |
3295 | if (!sched_insns_conditions_mutex_p (has_dependence_data.pro, | |
3296 | VINSN_INSN_RTX (has_dependence_data.con))) | |
3297 | { | |
3298 | ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where]; | |
3299 | ||
3300 | *dsp = ds_full_merge (ds, *dsp, NULL_RTX, NULL_RTX); | |
3301 | } | |
3302 | } | |
3303 | ||
3304 | /* Mark the insn as having a hard dependence that prevents speculation. */ | |
3305 | void | |
3306 | sel_mark_hard_insn (rtx insn) | |
3307 | { | |
3308 | int i; | |
3309 | ||
3310 | /* Only work when we're in has_dependence_p mode. | |
3311 | ??? This is a hack, this should actually be a hook. */ | |
3312 | if (!has_dependence_data.dc || !has_dependence_data.pro) | |
3313 | return; | |
3314 | ||
3315 | gcc_assert (insn == VINSN_INSN_RTX (has_dependence_data.con)); | |
3316 | gcc_assert (has_dependence_data.where == DEPS_IN_INSN); | |
3317 | ||
3318 | for (i = 0; i < DEPS_IN_NOWHERE; i++) | |
3319 | has_dependence_data.has_dep_p[i] &= ~SPECULATIVE; | |
3320 | } | |
3321 | ||
3322 | /* This structure holds the hooks for the dependency analysis used when | |
3323 | actually processing dependencies in the scheduler. */ | |
3324 | static struct sched_deps_info_def has_dependence_sched_deps_info; | |
3325 | ||
3326 | /* This initializes most of the fields of the above structure. */ | |
3327 | static const struct sched_deps_info_def const_has_dependence_sched_deps_info = | |
3328 | { | |
3329 | NULL, | |
3330 | ||
3331 | has_dependence_start_insn, | |
3332 | has_dependence_finish_insn, | |
3333 | has_dependence_start_lhs, | |
3334 | has_dependence_finish_lhs, | |
3335 | has_dependence_start_rhs, | |
3336 | has_dependence_finish_rhs, | |
3337 | has_dependence_note_reg_set, | |
3338 | has_dependence_note_reg_clobber, | |
3339 | has_dependence_note_reg_use, | |
3340 | has_dependence_note_mem_dep, | |
3341 | has_dependence_note_dep, | |
3342 | ||
3343 | 0, /* use_cselib */ | |
3344 | 0, /* use_deps_list */ | |
3345 | 0 /* generate_spec_deps */ | |
3346 | }; | |
3347 | ||
3348 | /* Initialize has_dependence_sched_deps_info with extra spec field. */ | |
3349 | static void | |
3350 | setup_has_dependence_sched_deps_info (void) | |
3351 | { | |
3352 | memcpy (&has_dependence_sched_deps_info, | |
3353 | &const_has_dependence_sched_deps_info, | |
3354 | sizeof (has_dependence_sched_deps_info)); | |
3355 | ||
3356 | if (spec_info != NULL) | |
3357 | has_dependence_sched_deps_info.generate_spec_deps = 1; | |
3358 | ||
3359 | sched_deps_info = &has_dependence_sched_deps_info; | |
3360 | } | |
3361 | ||
3362 | /* Remove all dependences found and recorded in has_dependence_data array. */ | |
3363 | void | |
3364 | sel_clear_has_dependence (void) | |
3365 | { | |
3366 | int i; | |
3367 | ||
3368 | for (i = 0; i < DEPS_IN_NOWHERE; i++) | |
3369 | has_dependence_data.has_dep_p[i] = 0; | |
3370 | } | |
3371 | ||
3372 | /* Return nonzero if EXPR has is dependent upon PRED. Return the pointer | |
3373 | to the dependence information array in HAS_DEP_PP. */ | |
3374 | ds_t | |
3375 | has_dependence_p (expr_t expr, insn_t pred, ds_t **has_dep_pp) | |
3376 | { | |
3377 | int i; | |
3378 | ds_t ds; | |
68e419a1 | 3379 | struct deps_desc *dc; |
e1ab7874 | 3380 | |
3381 | if (INSN_SIMPLEJUMP_P (pred)) | |
3382 | /* Unconditional jump is just a transfer of control flow. | |
3383 | Ignore it. */ | |
3384 | return false; | |
3385 | ||
3386 | dc = &INSN_DEPS_CONTEXT (pred); | |
d9ab2038 | 3387 | |
3388 | /* We init this field lazily. */ | |
3389 | if (dc->reg_last == NULL) | |
3390 | init_deps_reg_last (dc); | |
48e1416a | 3391 | |
e1ab7874 | 3392 | if (!dc->readonly) |
3393 | { | |
3394 | has_dependence_data.pro = NULL; | |
3395 | /* Initialize empty dep context with information about PRED. */ | |
3396 | advance_deps_context (dc, pred); | |
3397 | dc->readonly = 1; | |
3398 | } | |
3399 | ||
3400 | has_dependence_data.where = DEPS_IN_NOWHERE; | |
3401 | has_dependence_data.pro = pred; | |
3402 | has_dependence_data.con = EXPR_VINSN (expr); | |
3403 | has_dependence_data.dc = dc; | |
3404 | ||
3405 | sel_clear_has_dependence (); | |
3406 | ||
3407 | /* Now catch all dependencies that would be generated between PRED and | |
3408 | INSN. */ | |
3409 | setup_has_dependence_sched_deps_info (); | |
3410 | deps_analyze_insn (dc, EXPR_INSN_RTX (expr)); | |
3411 | has_dependence_data.dc = NULL; | |
3412 | ||
3413 | /* When a barrier was found, set DEPS_IN_INSN bits. */ | |
3414 | if (dc->last_reg_pending_barrier == TRUE_BARRIER) | |
3415 | has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_TRUE; | |
3416 | else if (dc->last_reg_pending_barrier == MOVE_BARRIER) | |
3417 | has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI; | |
3418 | ||
3419 | /* Do not allow stores to memory to move through checks. Currently | |
3420 | we don't move this to sched-deps.c as the check doesn't have | |
48e1416a | 3421 | obvious places to which this dependence can be attached. |
e1ab7874 | 3422 | FIMXE: this should go to a hook. */ |
3423 | if (EXPR_LHS (expr) | |
3424 | && MEM_P (EXPR_LHS (expr)) | |
3425 | && sel_insn_is_speculation_check (pred)) | |
3426 | has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI; | |
48e1416a | 3427 | |
e1ab7874 | 3428 | *has_dep_pp = has_dependence_data.has_dep_p; |
3429 | ds = 0; | |
3430 | for (i = 0; i < DEPS_IN_NOWHERE; i++) | |
3431 | ds = ds_full_merge (ds, has_dependence_data.has_dep_p[i], | |
3432 | NULL_RTX, NULL_RTX); | |
3433 | ||
3434 | return ds; | |
3435 | } | |
3436 | \f | |
3437 | ||
48e1416a | 3438 | /* Dependence hooks implementation that checks dependence latency constraints |
3439 | on the insns being scheduled. The entry point for these routines is | |
3440 | tick_check_p predicate. */ | |
e1ab7874 | 3441 | |
3442 | static struct | |
3443 | { | |
3444 | /* An expr we are currently checking. */ | |
3445 | expr_t expr; | |
3446 | ||
3447 | /* A minimal cycle for its scheduling. */ | |
3448 | int cycle; | |
3449 | ||
3450 | /* Whether we have seen a true dependence while checking. */ | |
3451 | bool seen_true_dep_p; | |
3452 | } tick_check_data; | |
3453 | ||
3454 | /* Update minimal scheduling cycle for tick_check_insn given that it depends | |
3455 | on PRO with status DS and weight DW. */ | |
3456 | static void | |
3457 | tick_check_dep_with_dw (insn_t pro_insn, ds_t ds, dw_t dw) | |
3458 | { | |
3459 | expr_t con_expr = tick_check_data.expr; | |
3460 | insn_t con_insn = EXPR_INSN_RTX (con_expr); | |
3461 | ||
3462 | if (con_insn != pro_insn) | |
3463 | { | |
3464 | enum reg_note dt; | |
3465 | int tick; | |
3466 | ||
3467 | if (/* PROducer was removed from above due to pipelining. */ | |
3468 | !INSN_IN_STREAM_P (pro_insn) | |
3469 | /* Or PROducer was originally on the next iteration regarding the | |
3470 | CONsumer. */ | |
3471 | || (INSN_SCHED_TIMES (pro_insn) | |
3472 | - EXPR_SCHED_TIMES (con_expr)) > 1) | |
3473 | /* Don't count this dependence. */ | |
3474 | return; | |
3475 | ||
3476 | dt = ds_to_dt (ds); | |
3477 | if (dt == REG_DEP_TRUE) | |
3478 | tick_check_data.seen_true_dep_p = true; | |
3479 | ||
3480 | gcc_assert (INSN_SCHED_CYCLE (pro_insn) > 0); | |
3481 | ||
3482 | { | |
3483 | dep_def _dep, *dep = &_dep; | |
3484 | ||
3485 | init_dep (dep, pro_insn, con_insn, dt); | |
3486 | ||
3487 | tick = INSN_SCHED_CYCLE (pro_insn) + dep_cost_1 (dep, dw); | |
3488 | } | |
3489 | ||
3490 | /* When there are several kinds of dependencies between pro and con, | |
3491 | only REG_DEP_TRUE should be taken into account. */ | |
3492 | if (tick > tick_check_data.cycle | |
3493 | && (dt == REG_DEP_TRUE || !tick_check_data.seen_true_dep_p)) | |
3494 | tick_check_data.cycle = tick; | |
3495 | } | |
3496 | } | |
3497 | ||
3498 | /* An implementation of note_dep hook. */ | |
3499 | static void | |
3500 | tick_check_note_dep (insn_t pro, ds_t ds) | |
3501 | { | |
3502 | tick_check_dep_with_dw (pro, ds, 0); | |
3503 | } | |
3504 | ||
3505 | /* An implementation of note_mem_dep hook. */ | |
3506 | static void | |
3507 | tick_check_note_mem_dep (rtx mem1, rtx mem2, insn_t pro, ds_t ds) | |
3508 | { | |
3509 | dw_t dw; | |
3510 | ||
3511 | dw = (ds_to_dt (ds) == REG_DEP_TRUE | |
3512 | ? estimate_dep_weak (mem1, mem2) | |
3513 | : 0); | |
3514 | ||
3515 | tick_check_dep_with_dw (pro, ds, dw); | |
3516 | } | |
3517 | ||
3518 | /* This structure contains hooks for dependence analysis used when determining | |
3519 | whether an insn is ready for scheduling. */ | |
3520 | static struct sched_deps_info_def tick_check_sched_deps_info = | |
3521 | { | |
3522 | NULL, | |
3523 | ||
3524 | NULL, | |
3525 | NULL, | |
3526 | NULL, | |
3527 | NULL, | |
3528 | NULL, | |
3529 | NULL, | |
3530 | haifa_note_reg_set, | |
3531 | haifa_note_reg_clobber, | |
3532 | haifa_note_reg_use, | |
3533 | tick_check_note_mem_dep, | |
3534 | tick_check_note_dep, | |
3535 | ||
3536 | 0, 0, 0 | |
3537 | }; | |
3538 | ||
3539 | /* Estimate number of cycles from the current cycle of FENCE until EXPR can be | |
3540 | scheduled. Return 0 if all data from producers in DC is ready. */ | |
3541 | int | |
3542 | tick_check_p (expr_t expr, deps_t dc, fence_t fence) | |
3543 | { | |
3544 | int cycles_left; | |
3545 | /* Initialize variables. */ | |
3546 | tick_check_data.expr = expr; | |
3547 | tick_check_data.cycle = 0; | |
3548 | tick_check_data.seen_true_dep_p = false; | |
3549 | sched_deps_info = &tick_check_sched_deps_info; | |
48e1416a | 3550 | |
e1ab7874 | 3551 | gcc_assert (!dc->readonly); |
3552 | dc->readonly = 1; | |
3553 | deps_analyze_insn (dc, EXPR_INSN_RTX (expr)); | |
3554 | dc->readonly = 0; | |
3555 | ||
3556 | cycles_left = tick_check_data.cycle - FENCE_CYCLE (fence); | |
3557 | ||
3558 | return cycles_left >= 0 ? cycles_left : 0; | |
3559 | } | |
3560 | \f | |
3561 | ||
3562 | /* Functions to work with insns. */ | |
3563 | ||
3564 | /* Returns true if LHS of INSN is the same as DEST of an insn | |
3565 | being moved. */ | |
3566 | bool | |
3567 | lhs_of_insn_equals_to_dest_p (insn_t insn, rtx dest) | |
3568 | { | |
3569 | rtx lhs = INSN_LHS (insn); | |
3570 | ||
3571 | if (lhs == NULL || dest == NULL) | |
3572 | return false; | |
48e1416a | 3573 | |
e1ab7874 | 3574 | return rtx_equal_p (lhs, dest); |
3575 | } | |
3576 | ||
3577 | /* Return s_i_d entry of INSN. Callable from debugger. */ | |
3578 | sel_insn_data_def | |
3579 | insn_sid (insn_t insn) | |
3580 | { | |
3581 | return *SID (insn); | |
3582 | } | |
3583 | ||
3584 | /* True when INSN is a speculative check. We can tell this by looking | |
3585 | at the data structures of the selective scheduler, not by examining | |
3586 | the pattern. */ | |
3587 | bool | |
3588 | sel_insn_is_speculation_check (rtx insn) | |
3589 | { | |
f1f41a6c | 3590 | return s_i_d.exists () && !! INSN_SPEC_CHECKED_DS (insn); |
e1ab7874 | 3591 | } |
3592 | ||
48e1416a | 3593 | /* Extracts machine mode MODE and destination location DST_LOC |
e1ab7874 | 3594 | for given INSN. */ |
3595 | void | |
3754d046 | 3596 | get_dest_and_mode (rtx insn, rtx *dst_loc, machine_mode *mode) |
e1ab7874 | 3597 | { |
3598 | rtx pat = PATTERN (insn); | |
3599 | ||
3600 | gcc_assert (dst_loc); | |
3601 | gcc_assert (GET_CODE (pat) == SET); | |
3602 | ||
3603 | *dst_loc = SET_DEST (pat); | |
3604 | ||
3605 | gcc_assert (*dst_loc); | |
3606 | gcc_assert (MEM_P (*dst_loc) || REG_P (*dst_loc)); | |
3607 | ||
3608 | if (mode) | |
3609 | *mode = GET_MODE (*dst_loc); | |
3610 | } | |
3611 | ||
48e1416a | 3612 | /* Returns true when moving through JUMP will result in bookkeeping |
e1ab7874 | 3613 | creation. */ |
3614 | bool | |
3615 | bookkeeping_can_be_created_if_moved_through_p (insn_t jump) | |
3616 | { | |
3617 | insn_t succ; | |
3618 | succ_iterator si; | |
3619 | ||
3620 | FOR_EACH_SUCC (succ, si, jump) | |
3621 | if (sel_num_cfg_preds_gt_1 (succ)) | |
3622 | return true; | |
3623 | ||
3624 | return false; | |
3625 | } | |
3626 | ||
3627 | /* Return 'true' if INSN is the only one in its basic block. */ | |
3628 | static bool | |
3629 | insn_is_the_only_one_in_bb_p (insn_t insn) | |
3630 | { | |
3631 | return sel_bb_head_p (insn) && sel_bb_end_p (insn); | |
3632 | } | |
3633 | ||
3634 | #ifdef ENABLE_CHECKING | |
48e1416a | 3635 | /* Check that the region we're scheduling still has at most one |
e1ab7874 | 3636 | backedge. */ |
3637 | static void | |
3638 | verify_backedges (void) | |
3639 | { | |
3640 | if (pipelining_p) | |
3641 | { | |
3642 | int i, n = 0; | |
3643 | edge e; | |
3644 | edge_iterator ei; | |
48e1416a | 3645 | |
e1ab7874 | 3646 | for (i = 0; i < current_nr_blocks; i++) |
f5a6b05f | 3647 | FOR_EACH_EDGE (e, ei, BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i))->succs) |
e1ab7874 | 3648 | if (in_current_region_p (e->dest) |
3649 | && BLOCK_TO_BB (e->dest->index) < i) | |
3650 | n++; | |
48e1416a | 3651 | |
e1ab7874 | 3652 | gcc_assert (n <= 1); |
3653 | } | |
3654 | } | |
3655 | #endif | |
3656 | \f | |
3657 | ||
3658 | /* Functions to work with control flow. */ | |
3659 | ||
93919afc | 3660 | /* Recompute BLOCK_TO_BB and BB_FOR_BLOCK for current region so that blocks |
3661 | are sorted in topological order (it might have been invalidated by | |
3662 | redirecting an edge). */ | |
3663 | static void | |
3664 | sel_recompute_toporder (void) | |
3665 | { | |
3666 | int i, n, rgn; | |
3667 | int *postorder, n_blocks; | |
3668 | ||
a28770e1 | 3669 | postorder = XALLOCAVEC (int, n_basic_blocks_for_fn (cfun)); |
93919afc | 3670 | n_blocks = post_order_compute (postorder, false, false); |
3671 | ||
3672 | rgn = CONTAINING_RGN (BB_TO_BLOCK (0)); | |
3673 | for (n = 0, i = n_blocks - 1; i >= 0; i--) | |
3674 | if (CONTAINING_RGN (postorder[i]) == rgn) | |
3675 | { | |
3676 | BLOCK_TO_BB (postorder[i]) = n; | |
3677 | BB_TO_BLOCK (n) = postorder[i]; | |
3678 | n++; | |
3679 | } | |
3680 | ||
3681 | /* Assert that we updated info for all blocks. We may miss some blocks if | |
3682 | this function is called when redirecting an edge made a block | |
3683 | unreachable, but that block is not deleted yet. */ | |
3684 | gcc_assert (n == RGN_NR_BLOCKS (rgn)); | |
3685 | } | |
3686 | ||
e1ab7874 | 3687 | /* Tidy the possibly empty block BB. */ |
81d1ad0f | 3688 | static bool |
6f0e7980 | 3689 | maybe_tidy_empty_bb (basic_block bb) |
e1ab7874 | 3690 | { |
ef4cf572 | 3691 | basic_block succ_bb, pred_bb, note_bb; |
f1f41a6c | 3692 | vec<basic_block> dom_bbs; |
df6266b9 | 3693 | edge e; |
3694 | edge_iterator ei; | |
e1ab7874 | 3695 | bool rescan_p; |
3696 | ||
3697 | /* Keep empty bb only if this block immediately precedes EXIT and | |
61e213e2 | 3698 | has incoming non-fallthrough edge, or it has no predecessors or |
3699 | successors. Otherwise remove it. */ | |
9845d120 | 3700 | if (!sel_bb_empty_p (bb) |
48e1416a | 3701 | || (single_succ_p (bb) |
34154e27 | 3702 | && single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun) |
48e1416a | 3703 | && (!single_pred_p (bb) |
61e213e2 | 3704 | || !(single_pred_edge (bb)->flags & EDGE_FALLTHRU))) |
3705 | || EDGE_COUNT (bb->preds) == 0 | |
3706 | || EDGE_COUNT (bb->succs) == 0) | |
e1ab7874 | 3707 | return false; |
3708 | ||
df6266b9 | 3709 | /* Do not attempt to redirect complex edges. */ |
3710 | FOR_EACH_EDGE (e, ei, bb->preds) | |
3711 | if (e->flags & EDGE_COMPLEX) | |
3712 | return false; | |
a62f9dca | 3713 | else if (e->flags & EDGE_FALLTHRU) |
3714 | { | |
3715 | rtx note; | |
3716 | /* If prev bb ends with asm goto, see if any of the | |
3717 | ASM_OPERANDS_LABELs don't point to the fallthru | |
3718 | label. Do not attempt to redirect it in that case. */ | |
3719 | if (JUMP_P (BB_END (e->src)) | |
3720 | && (note = extract_asm_operands (PATTERN (BB_END (e->src))))) | |
3721 | { | |
3722 | int i, n = ASM_OPERANDS_LABEL_LENGTH (note); | |
3723 | ||
3724 | for (i = 0; i < n; ++i) | |
3725 | if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (bb)) | |
3726 | return false; | |
3727 | } | |
3728 | } | |
df6266b9 | 3729 | |
e1ab7874 | 3730 | free_data_sets (bb); |
3731 | ||
3732 | /* Do not delete BB if it has more than one successor. | |
3733 | That can occur when we moving a jump. */ | |
3734 | if (!single_succ_p (bb)) | |
3735 | { | |
3736 | gcc_assert (can_merge_blocks_p (bb->prev_bb, bb)); | |
3737 | sel_merge_blocks (bb->prev_bb, bb); | |
3738 | return true; | |
3739 | } | |
3740 | ||
3741 | succ_bb = single_succ (bb); | |
3742 | rescan_p = true; | |
3743 | pred_bb = NULL; | |
f1f41a6c | 3744 | dom_bbs.create (0); |
e1ab7874 | 3745 | |
ef4cf572 | 3746 | /* Save a pred/succ from the current region to attach the notes to. */ |
3747 | note_bb = NULL; | |
3748 | FOR_EACH_EDGE (e, ei, bb->preds) | |
3749 | if (in_current_region_p (e->src)) | |
3750 | { | |
3751 | note_bb = e->src; | |
3752 | break; | |
3753 | } | |
3754 | if (note_bb == NULL) | |
3755 | note_bb = succ_bb; | |
3756 | ||
e1ab7874 | 3757 | /* Redirect all non-fallthru edges to the next bb. */ |
3758 | while (rescan_p) | |
3759 | { | |
e1ab7874 | 3760 | rescan_p = false; |
3761 | ||
3762 | FOR_EACH_EDGE (e, ei, bb->preds) | |
3763 | { | |
3764 | pred_bb = e->src; | |
3765 | ||
3766 | if (!(e->flags & EDGE_FALLTHRU)) | |
3767 | { | |
6f0e7980 | 3768 | /* We can not invalidate computed topological order by moving |
1a5dbaab | 3769 | the edge destination block (E->SUCC) along a fallthru edge. |
3770 | ||
3771 | We will update dominators here only when we'll get | |
3772 | an unreachable block when redirecting, otherwise | |
3773 | sel_redirect_edge_and_branch will take care of it. */ | |
3774 | if (e->dest != bb | |
3775 | && single_pred_p (e->dest)) | |
f1f41a6c | 3776 | dom_bbs.safe_push (e->dest); |
6f0e7980 | 3777 | sel_redirect_edge_and_branch (e, succ_bb); |
e1ab7874 | 3778 | rescan_p = true; |
3779 | break; | |
3780 | } | |
6f0e7980 | 3781 | /* If the edge is fallthru, but PRED_BB ends in a conditional jump |
3782 | to BB (so there is no non-fallthru edge from PRED_BB to BB), we | |
3783 | still have to adjust it. */ | |
3784 | else if (single_succ_p (pred_bb) && any_condjump_p (BB_END (pred_bb))) | |
3785 | { | |
3786 | /* If possible, try to remove the unneeded conditional jump. */ | |
3787 | if (INSN_SCHED_TIMES (BB_END (pred_bb)) == 0 | |
3788 | && !IN_CURRENT_FENCE_P (BB_END (pred_bb))) | |
3789 | { | |
3790 | if (!sel_remove_insn (BB_END (pred_bb), false, false)) | |
3791 | tidy_fallthru_edge (e); | |
3792 | } | |
3793 | else | |
3794 | sel_redirect_edge_and_branch (e, succ_bb); | |
3795 | rescan_p = true; | |
3796 | break; | |
3797 | } | |
e1ab7874 | 3798 | } |
3799 | } | |
3800 | ||
e1ab7874 | 3801 | if (can_merge_blocks_p (bb->prev_bb, bb)) |
3802 | sel_merge_blocks (bb->prev_bb, bb); | |
3803 | else | |
e1ab7874 | 3804 | { |
0424f393 | 3805 | /* This is a block without fallthru predecessor. Just delete it. */ |
ef4cf572 | 3806 | gcc_assert (note_bb); |
3807 | move_bb_info (note_bb, bb); | |
e1ab7874 | 3808 | remove_empty_bb (bb, true); |
3809 | } | |
3810 | ||
f1f41a6c | 3811 | if (!dom_bbs.is_empty ()) |
1a5dbaab | 3812 | { |
f1f41a6c | 3813 | dom_bbs.safe_push (succ_bb); |
1a5dbaab | 3814 | iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, false); |
f1f41a6c | 3815 | dom_bbs.release (); |
1a5dbaab | 3816 | } |
3817 | ||
e1ab7874 | 3818 | return true; |
3819 | } | |
3820 | ||
48e1416a | 3821 | /* Tidy the control flow after we have removed original insn from |
e1ab7874 | 3822 | XBB. Return true if we have removed some blocks. When FULL_TIDYING |
3823 | is true, also try to optimize control flow on non-empty blocks. */ | |
3824 | bool | |
3825 | tidy_control_flow (basic_block xbb, bool full_tidying) | |
3826 | { | |
3827 | bool changed = true; | |
9845d120 | 3828 | insn_t first, last; |
48e1416a | 3829 | |
e1ab7874 | 3830 | /* First check whether XBB is empty. */ |
6f0e7980 | 3831 | changed = maybe_tidy_empty_bb (xbb); |
e1ab7874 | 3832 | if (changed || !full_tidying) |
3833 | return changed; | |
48e1416a | 3834 | |
e1ab7874 | 3835 | /* Check if there is a unnecessary jump after insn left. */ |
49087fba | 3836 | if (bb_has_removable_jump_to_p (xbb, xbb->next_bb) |
e1ab7874 | 3837 | && INSN_SCHED_TIMES (BB_END (xbb)) == 0 |
3838 | && !IN_CURRENT_FENCE_P (BB_END (xbb))) | |
3839 | { | |
3840 | if (sel_remove_insn (BB_END (xbb), false, false)) | |
3841 | return true; | |
3842 | tidy_fallthru_edge (EDGE_SUCC (xbb, 0)); | |
3843 | } | |
3844 | ||
9845d120 | 3845 | first = sel_bb_head (xbb); |
3846 | last = sel_bb_end (xbb); | |
3847 | if (MAY_HAVE_DEBUG_INSNS) | |
3848 | { | |
3849 | if (first != last && DEBUG_INSN_P (first)) | |
3850 | do | |
3851 | first = NEXT_INSN (first); | |
3852 | while (first != last && (DEBUG_INSN_P (first) || NOTE_P (first))); | |
3853 | ||
3854 | if (first != last && DEBUG_INSN_P (last)) | |
3855 | do | |
3856 | last = PREV_INSN (last); | |
3857 | while (first != last && (DEBUG_INSN_P (last) || NOTE_P (last))); | |
3858 | } | |
e1ab7874 | 3859 | /* Check if there is an unnecessary jump in previous basic block leading |
48e1416a | 3860 | to next basic block left after removing INSN from stream. |
3861 | If it is so, remove that jump and redirect edge to current | |
3862 | basic block (where there was INSN before deletion). This way | |
3863 | when NOP will be deleted several instructions later with its | |
3864 | basic block we will not get a jump to next instruction, which | |
e1ab7874 | 3865 | can be harmful. */ |
9845d120 | 3866 | if (first == last |
e1ab7874 | 3867 | && !sel_bb_empty_p (xbb) |
9845d120 | 3868 | && INSN_NOP_P (last) |
e1ab7874 | 3869 | /* Flow goes fallthru from current block to the next. */ |
3870 | && EDGE_COUNT (xbb->succs) == 1 | |
3871 | && (EDGE_SUCC (xbb, 0)->flags & EDGE_FALLTHRU) | |
3872 | /* When successor is an EXIT block, it may not be the next block. */ | |
34154e27 | 3873 | && single_succ (xbb) != EXIT_BLOCK_PTR_FOR_FN (cfun) |
e1ab7874 | 3874 | /* And unconditional jump in previous basic block leads to |
3875 | next basic block of XBB and this jump can be safely removed. */ | |
3876 | && in_current_region_p (xbb->prev_bb) | |
49087fba | 3877 | && bb_has_removable_jump_to_p (xbb->prev_bb, xbb->next_bb) |
e1ab7874 | 3878 | && INSN_SCHED_TIMES (BB_END (xbb->prev_bb)) == 0 |
3879 | /* Also this jump is not at the scheduling boundary. */ | |
3880 | && !IN_CURRENT_FENCE_P (BB_END (xbb->prev_bb))) | |
3881 | { | |
93919afc | 3882 | bool recompute_toporder_p; |
e1ab7874 | 3883 | /* Clear data structures of jump - jump itself will be removed |
3884 | by sel_redirect_edge_and_branch. */ | |
3885 | clear_expr (INSN_EXPR (BB_END (xbb->prev_bb))); | |
93919afc | 3886 | recompute_toporder_p |
3887 | = sel_redirect_edge_and_branch (EDGE_SUCC (xbb->prev_bb, 0), xbb); | |
3888 | ||
e1ab7874 | 3889 | gcc_assert (EDGE_SUCC (xbb->prev_bb, 0)->flags & EDGE_FALLTHRU); |
3890 | ||
3891 | /* It can turn out that after removing unused jump, basic block | |
3892 | that contained that jump, becomes empty too. In such case | |
3893 | remove it too. */ | |
3894 | if (sel_bb_empty_p (xbb->prev_bb)) | |
6f0e7980 | 3895 | changed = maybe_tidy_empty_bb (xbb->prev_bb); |
3896 | if (recompute_toporder_p) | |
93919afc | 3897 | sel_recompute_toporder (); |
e1ab7874 | 3898 | } |
7af466ad | 3899 | |
3900 | #ifdef ENABLE_CHECKING | |
3901 | verify_backedges (); | |
1a5dbaab | 3902 | verify_dominators (CDI_DOMINATORS); |
7af466ad | 3903 | #endif |
3904 | ||
e1ab7874 | 3905 | return changed; |
3906 | } | |
3907 | ||
93919afc | 3908 | /* Purge meaningless empty blocks in the middle of a region. */ |
3909 | void | |
3910 | purge_empty_blocks (void) | |
3911 | { | |
a6e634c6 | 3912 | int i; |
93919afc | 3913 | |
a6e634c6 | 3914 | /* Do not attempt to delete the first basic block in the region. */ |
3915 | for (i = 1; i < current_nr_blocks; ) | |
93919afc | 3916 | { |
f5a6b05f | 3917 | basic_block b = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i)); |
93919afc | 3918 | |
6f0e7980 | 3919 | if (maybe_tidy_empty_bb (b)) |
93919afc | 3920 | continue; |
3921 | ||
3922 | i++; | |
3923 | } | |
3924 | } | |
3925 | ||
48e1416a | 3926 | /* Rip-off INSN from the insn stream. When ONLY_DISCONNECT is true, |
3927 | do not delete insn's data, because it will be later re-emitted. | |
e1ab7874 | 3928 | Return true if we have removed some blocks afterwards. */ |
3929 | bool | |
3930 | sel_remove_insn (insn_t insn, bool only_disconnect, bool full_tidying) | |
3931 | { | |
3932 | basic_block bb = BLOCK_FOR_INSN (insn); | |
3933 | ||
3934 | gcc_assert (INSN_IN_STREAM_P (insn)); | |
3935 | ||
9845d120 | 3936 | if (DEBUG_INSN_P (insn) && BB_AV_SET_VALID_P (bb)) |
3937 | { | |
3938 | expr_t expr; | |
3939 | av_set_iterator i; | |
3940 | ||
3941 | /* When we remove a debug insn that is head of a BB, it remains | |
3942 | in the AV_SET of the block, but it shouldn't. */ | |
3943 | FOR_EACH_EXPR_1 (expr, i, &BB_AV_SET (bb)) | |
3944 | if (EXPR_INSN_RTX (expr) == insn) | |
3945 | { | |
3946 | av_set_iter_remove (&i); | |
3947 | break; | |
3948 | } | |
3949 | } | |
3950 | ||
e1ab7874 | 3951 | if (only_disconnect) |
93ff53d3 | 3952 | remove_insn (insn); |
e1ab7874 | 3953 | else |
3954 | { | |
93ff53d3 | 3955 | delete_insn (insn); |
e1ab7874 | 3956 | clear_expr (INSN_EXPR (insn)); |
3957 | } | |
3958 | ||
93ff53d3 | 3959 | /* It is necessary to NULL these fields in case we are going to re-insert |
3960 | INSN into the insns stream, as will usually happen in the ONLY_DISCONNECT | |
3961 | case, but also for NOPs that we will return to the nop pool. */ | |
4a57a2e8 | 3962 | SET_PREV_INSN (insn) = NULL_RTX; |
3963 | SET_NEXT_INSN (insn) = NULL_RTX; | |
93ff53d3 | 3964 | set_block_for_insn (insn, NULL); |
e1ab7874 | 3965 | |
3966 | return tidy_control_flow (bb, full_tidying); | |
3967 | } | |
3968 | ||
3969 | /* Estimate number of the insns in BB. */ | |
3970 | static int | |
3971 | sel_estimate_number_of_insns (basic_block bb) | |
3972 | { | |
3973 | int res = 0; | |
3974 | insn_t insn = NEXT_INSN (BB_HEAD (bb)), next_tail = NEXT_INSN (BB_END (bb)); | |
3975 | ||
3976 | for (; insn != next_tail; insn = NEXT_INSN (insn)) | |
9845d120 | 3977 | if (NONDEBUG_INSN_P (insn)) |
e1ab7874 | 3978 | res++; |
3979 | ||
3980 | return res; | |
3981 | } | |
3982 | ||
3983 | /* We don't need separate luids for notes or labels. */ | |
3984 | static int | |
3985 | sel_luid_for_non_insn (rtx x) | |
3986 | { | |
3987 | gcc_assert (NOTE_P (x) || LABEL_P (x)); | |
3988 | ||
3989 | return -1; | |
3990 | } | |
3991 | ||
bdcc104c | 3992 | /* Find the proper seqno for inserting at INSN by successors. |
3993 | Return -1 if no successors with positive seqno exist. */ | |
e1ab7874 | 3994 | static int |
2f3c9801 | 3995 | get_seqno_by_succs (rtx_insn *insn) |
bdcc104c | 3996 | { |
3997 | basic_block bb = BLOCK_FOR_INSN (insn); | |
2f3c9801 | 3998 | rtx_insn *tmp = insn, *end = BB_END (bb); |
bdcc104c | 3999 | int seqno; |
4000 | insn_t succ = NULL; | |
4001 | succ_iterator si; | |
4002 | ||
4003 | while (tmp != end) | |
4004 | { | |
4005 | tmp = NEXT_INSN (tmp); | |
4006 | if (INSN_P (tmp)) | |
4007 | return INSN_SEQNO (tmp); | |
4008 | } | |
4009 | ||
4010 | seqno = INT_MAX; | |
4011 | ||
4012 | FOR_EACH_SUCC_1 (succ, si, end, SUCCS_NORMAL) | |
4013 | if (INSN_SEQNO (succ) > 0) | |
4014 | seqno = MIN (seqno, INSN_SEQNO (succ)); | |
4015 | ||
4016 | if (seqno == INT_MAX) | |
4017 | return -1; | |
4018 | ||
4019 | return seqno; | |
4020 | } | |
4021 | ||
8d1881f5 | 4022 | /* Compute seqno for INSN by its preds or succs. Use OLD_SEQNO to compute |
4023 | seqno in corner cases. */ | |
bdcc104c | 4024 | static int |
8d1881f5 | 4025 | get_seqno_for_a_jump (insn_t insn, int old_seqno) |
e1ab7874 | 4026 | { |
4027 | int seqno; | |
4028 | ||
4029 | gcc_assert (INSN_SIMPLEJUMP_P (insn)); | |
4030 | ||
4031 | if (!sel_bb_head_p (insn)) | |
4032 | seqno = INSN_SEQNO (PREV_INSN (insn)); | |
4033 | else | |
4034 | { | |
4035 | basic_block bb = BLOCK_FOR_INSN (insn); | |
4036 | ||
4037 | if (single_pred_p (bb) | |
4038 | && !in_current_region_p (single_pred (bb))) | |
4039 | { | |
4040 | /* We can have preds outside a region when splitting edges | |
48e1416a | 4041 | for pipelining of an outer loop. Use succ instead. |
e1ab7874 | 4042 | There should be only one of them. */ |
4043 | insn_t succ = NULL; | |
4044 | succ_iterator si; | |
4045 | bool first = true; | |
48e1416a | 4046 | |
e1ab7874 | 4047 | gcc_assert (flag_sel_sched_pipelining_outer_loops |
4048 | && current_loop_nest); | |
48e1416a | 4049 | FOR_EACH_SUCC_1 (succ, si, insn, |
e1ab7874 | 4050 | SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS) |
4051 | { | |
4052 | gcc_assert (first); | |
4053 | first = false; | |
4054 | } | |
4055 | ||
4056 | gcc_assert (succ != NULL); | |
4057 | seqno = INSN_SEQNO (succ); | |
4058 | } | |
4059 | else | |
4060 | { | |
4061 | insn_t *preds; | |
4062 | int n; | |
4063 | ||
4064 | cfg_preds (BLOCK_FOR_INSN (insn), &preds, &n); | |
e1ab7874 | 4065 | |
bdcc104c | 4066 | gcc_assert (n > 0); |
4067 | /* For one predecessor, use simple method. */ | |
4068 | if (n == 1) | |
4069 | seqno = INSN_SEQNO (preds[0]); | |
4070 | else | |
4071 | seqno = get_seqno_by_preds (insn); | |
48e1416a | 4072 | |
e1ab7874 | 4073 | free (preds); |
4074 | } | |
4075 | } | |
4076 | ||
bdcc104c | 4077 | /* We were unable to find a good seqno among preds. */ |
4078 | if (seqno < 0) | |
4079 | seqno = get_seqno_by_succs (insn); | |
4080 | ||
8d1881f5 | 4081 | if (seqno < 0) |
4082 | { | |
4083 | /* The only case where this could be here legally is that the only | |
4084 | unscheduled insn was a conditional jump that got removed and turned | |
4085 | into this unconditional one. Initialize from the old seqno | |
4086 | of that jump passed down to here. */ | |
4087 | seqno = old_seqno; | |
4088 | } | |
bdcc104c | 4089 | |
8d1881f5 | 4090 | gcc_assert (seqno >= 0); |
e1ab7874 | 4091 | return seqno; |
4092 | } | |
4093 | ||
961d3eb8 | 4094 | /* Find the proper seqno for inserting at INSN. Returns -1 if no predecessors |
4095 | with positive seqno exist. */ | |
e1ab7874 | 4096 | int |
91a55c11 | 4097 | get_seqno_by_preds (rtx_insn *insn) |
e1ab7874 | 4098 | { |
4099 | basic_block bb = BLOCK_FOR_INSN (insn); | |
91a55c11 | 4100 | rtx_insn *tmp = insn, *head = BB_HEAD (bb); |
e1ab7874 | 4101 | insn_t *preds; |
4102 | int n, i, seqno; | |
4103 | ||
4104 | while (tmp != head) | |
bdcc104c | 4105 | { |
e1ab7874 | 4106 | tmp = PREV_INSN (tmp); |
bdcc104c | 4107 | if (INSN_P (tmp)) |
4108 | return INSN_SEQNO (tmp); | |
4109 | } | |
48e1416a | 4110 | |
e1ab7874 | 4111 | cfg_preds (bb, &preds, &n); |
4112 | for (i = 0, seqno = -1; i < n; i++) | |
4113 | seqno = MAX (seqno, INSN_SEQNO (preds[i])); | |
4114 | ||
e1ab7874 | 4115 | return seqno; |
4116 | } | |
4117 | ||
4118 | \f | |
4119 | ||
4120 | /* Extend pass-scope data structures for basic blocks. */ | |
4121 | void | |
4122 | sel_extend_global_bb_info (void) | |
4123 | { | |
fe672ac0 | 4124 | sel_global_bb_info.safe_grow_cleared (last_basic_block_for_fn (cfun)); |
e1ab7874 | 4125 | } |
4126 | ||
4127 | /* Extend region-scope data structures for basic blocks. */ | |
4128 | static void | |
4129 | extend_region_bb_info (void) | |
4130 | { | |
fe672ac0 | 4131 | sel_region_bb_info.safe_grow_cleared (last_basic_block_for_fn (cfun)); |
e1ab7874 | 4132 | } |
4133 | ||
4134 | /* Extend all data structures to fit for all basic blocks. */ | |
4135 | static void | |
4136 | extend_bb_info (void) | |
4137 | { | |
4138 | sel_extend_global_bb_info (); | |
4139 | extend_region_bb_info (); | |
4140 | } | |
4141 | ||
4142 | /* Finalize pass-scope data structures for basic blocks. */ | |
4143 | void | |
4144 | sel_finish_global_bb_info (void) | |
4145 | { | |
f1f41a6c | 4146 | sel_global_bb_info.release (); |
e1ab7874 | 4147 | } |
4148 | ||
4149 | /* Finalize region-scope data structures for basic blocks. */ | |
4150 | static void | |
4151 | finish_region_bb_info (void) | |
4152 | { | |
f1f41a6c | 4153 | sel_region_bb_info.release (); |
e1ab7874 | 4154 | } |
4155 | \f | |
4156 | ||
4157 | /* Data for each insn in current region. */ | |
1e094109 | 4158 | vec<sel_insn_data_def> s_i_d = vNULL; |
e1ab7874 | 4159 | |
e1ab7874 | 4160 | /* Extend data structures for insns from current region. */ |
4161 | static void | |
4162 | extend_insn_data (void) | |
4163 | { | |
4164 | int reserve; | |
48e1416a | 4165 | |
e1ab7874 | 4166 | sched_extend_target (); |
4167 | sched_deps_init (false); | |
4168 | ||
4169 | /* Extend data structures for insns from current region. */ | |
f1f41a6c | 4170 | reserve = (sched_max_luid + 1 - s_i_d.length ()); |
4171 | if (reserve > 0 && ! s_i_d.space (reserve)) | |
d9ab2038 | 4172 | { |
4173 | int size; | |
4174 | ||
4175 | if (sched_max_luid / 2 > 1024) | |
4176 | size = sched_max_luid + 1024; | |
4177 | else | |
4178 | size = 3 * sched_max_luid / 2; | |
48e1416a | 4179 | |
d9ab2038 | 4180 | |
f1f41a6c | 4181 | s_i_d.safe_grow_cleared (size); |
d9ab2038 | 4182 | } |
e1ab7874 | 4183 | } |
4184 | ||
4185 | /* Finalize data structures for insns from current region. */ | |
4186 | static void | |
4187 | finish_insns (void) | |
4188 | { | |
4189 | unsigned i; | |
4190 | ||
4191 | /* Clear here all dependence contexts that may have left from insns that were | |
4192 | removed during the scheduling. */ | |
f1f41a6c | 4193 | for (i = 0; i < s_i_d.length (); i++) |
e1ab7874 | 4194 | { |
f1f41a6c | 4195 | sel_insn_data_def *sid_entry = &s_i_d[i]; |
48e1416a | 4196 | |
e1ab7874 | 4197 | if (sid_entry->live) |
4198 | return_regset_to_pool (sid_entry->live); | |
4199 | if (sid_entry->analyzed_deps) | |
4200 | { | |
4201 | BITMAP_FREE (sid_entry->analyzed_deps); | |
4202 | BITMAP_FREE (sid_entry->found_deps); | |
4203 | htab_delete (sid_entry->transformed_insns); | |
4204 | free_deps (&sid_entry->deps_context); | |
4205 | } | |
4206 | if (EXPR_VINSN (&sid_entry->expr)) | |
4207 | { | |
4208 | clear_expr (&sid_entry->expr); | |
48e1416a | 4209 | |
e1ab7874 | 4210 | /* Also, clear CANT_MOVE bit here, because we really don't want it |
4211 | to be passed to the next region. */ | |
4212 | CANT_MOVE_BY_LUID (i) = 0; | |
4213 | } | |
4214 | } | |
48e1416a | 4215 | |
f1f41a6c | 4216 | s_i_d.release (); |
e1ab7874 | 4217 | } |
4218 | ||
4219 | /* A proxy to pass initialization data to init_insn (). */ | |
4220 | static sel_insn_data_def _insn_init_ssid; | |
4221 | static sel_insn_data_t insn_init_ssid = &_insn_init_ssid; | |
4222 | ||
4223 | /* If true create a new vinsn. Otherwise use the one from EXPR. */ | |
4224 | static bool insn_init_create_new_vinsn_p; | |
4225 | ||
4226 | /* Set all necessary data for initialization of the new insn[s]. */ | |
4227 | static expr_t | |
4228 | set_insn_init (expr_t expr, vinsn_t vi, int seqno) | |
4229 | { | |
4230 | expr_t x = &insn_init_ssid->expr; | |
4231 | ||
4232 | copy_expr_onside (x, expr); | |
4233 | if (vi != NULL) | |
4234 | { | |
4235 | insn_init_create_new_vinsn_p = false; | |
4236 | change_vinsn_in_expr (x, vi); | |
4237 | } | |
4238 | else | |
4239 | insn_init_create_new_vinsn_p = true; | |
4240 | ||
4241 | insn_init_ssid->seqno = seqno; | |
4242 | return x; | |
4243 | } | |
4244 | ||
4245 | /* Init data for INSN. */ | |
4246 | static void | |
4247 | init_insn_data (insn_t insn) | |
4248 | { | |
4249 | expr_t expr; | |
4250 | sel_insn_data_t ssid = insn_init_ssid; | |
4251 | ||
4252 | /* The fields mentioned below are special and hence are not being | |
4253 | propagated to the new insns. */ | |
4254 | gcc_assert (!ssid->asm_p && ssid->sched_next == NULL | |
4255 | && !ssid->after_stall_p && ssid->sched_cycle == 0); | |
4256 | gcc_assert (INSN_P (insn) && INSN_LUID (insn) > 0); | |
4257 | ||
4258 | expr = INSN_EXPR (insn); | |
4259 | copy_expr (expr, &ssid->expr); | |
4260 | prepare_insn_expr (insn, ssid->seqno); | |
4261 | ||
4262 | if (insn_init_create_new_vinsn_p) | |
4263 | change_vinsn_in_expr (expr, vinsn_create (insn, init_insn_force_unique_p)); | |
48e1416a | 4264 | |
e1ab7874 | 4265 | if (first_time_insn_init (insn)) |
4266 | init_first_time_insn_data (insn); | |
4267 | } | |
4268 | ||
4269 | /* This is used to initialize spurious jumps generated by | |
8d1881f5 | 4270 | sel_redirect_edge (). OLD_SEQNO is used for initializing seqnos |
4271 | in corner cases within get_seqno_for_a_jump. */ | |
e1ab7874 | 4272 | static void |
8d1881f5 | 4273 | init_simplejump_data (insn_t insn, int old_seqno) |
e1ab7874 | 4274 | { |
4275 | init_expr (INSN_EXPR (insn), vinsn_create (insn, false), 0, | |
f1f41a6c | 4276 | REG_BR_PROB_BASE, 0, 0, 0, 0, 0, 0, |
1e094109 | 4277 | vNULL, true, false, false, |
e1ab7874 | 4278 | false, true); |
8d1881f5 | 4279 | INSN_SEQNO (insn) = get_seqno_for_a_jump (insn, old_seqno); |
e1ab7874 | 4280 | init_first_time_insn_data (insn); |
4281 | } | |
4282 | ||
48e1416a | 4283 | /* Perform deferred initialization of insns. This is used to process |
8d1881f5 | 4284 | a new jump that may be created by redirect_edge. OLD_SEQNO is used |
4285 | for initializing simplejumps in init_simplejump_data. */ | |
4286 | static void | |
4287 | sel_init_new_insn (insn_t insn, int flags, int old_seqno) | |
e1ab7874 | 4288 | { |
4289 | /* We create data structures for bb when the first insn is emitted in it. */ | |
4290 | if (INSN_P (insn) | |
4291 | && INSN_IN_STREAM_P (insn) | |
4292 | && insn_is_the_only_one_in_bb_p (insn)) | |
4293 | { | |
4294 | extend_bb_info (); | |
4295 | create_initial_data_sets (BLOCK_FOR_INSN (insn)); | |
4296 | } | |
48e1416a | 4297 | |
e1ab7874 | 4298 | if (flags & INSN_INIT_TODO_LUID) |
52d7e28c | 4299 | { |
4300 | sched_extend_luids (); | |
4301 | sched_init_insn_luid (insn); | |
4302 | } | |
e1ab7874 | 4303 | |
4304 | if (flags & INSN_INIT_TODO_SSID) | |
4305 | { | |
4306 | extend_insn_data (); | |
4307 | init_insn_data (insn); | |
4308 | clear_expr (&insn_init_ssid->expr); | |
4309 | } | |
4310 | ||
4311 | if (flags & INSN_INIT_TODO_SIMPLEJUMP) | |
4312 | { | |
4313 | extend_insn_data (); | |
8d1881f5 | 4314 | init_simplejump_data (insn, old_seqno); |
e1ab7874 | 4315 | } |
48e1416a | 4316 | |
e1ab7874 | 4317 | gcc_assert (CONTAINING_RGN (BLOCK_NUM (insn)) |
4318 | == CONTAINING_RGN (BB_TO_BLOCK (0))); | |
4319 | } | |
4320 | \f | |
4321 | ||
4322 | /* Functions to init/finish work with lv sets. */ | |
4323 | ||
4324 | /* Init BB_LV_SET of BB from DF_LR_IN set of BB. */ | |
4325 | static void | |
4326 | init_lv_set (basic_block bb) | |
4327 | { | |
4328 | gcc_assert (!BB_LV_SET_VALID_P (bb)); | |
4329 | ||
4330 | BB_LV_SET (bb) = get_regset_from_pool (); | |
48e1416a | 4331 | COPY_REG_SET (BB_LV_SET (bb), DF_LR_IN (bb)); |
e1ab7874 | 4332 | BB_LV_SET_VALID_P (bb) = true; |
4333 | } | |
4334 | ||
4335 | /* Copy liveness information to BB from FROM_BB. */ | |
4336 | static void | |
4337 | copy_lv_set_from (basic_block bb, basic_block from_bb) | |
4338 | { | |
4339 | gcc_assert (!BB_LV_SET_VALID_P (bb)); | |
48e1416a | 4340 | |
e1ab7874 | 4341 | COPY_REG_SET (BB_LV_SET (bb), BB_LV_SET (from_bb)); |
4342 | BB_LV_SET_VALID_P (bb) = true; | |
48e1416a | 4343 | } |
e1ab7874 | 4344 | |
4345 | /* Initialize lv set of all bb headers. */ | |
4346 | void | |
4347 | init_lv_sets (void) | |
4348 | { | |
4349 | basic_block bb; | |
4350 | ||
4351 | /* Initialize of LV sets. */ | |
fc00614f | 4352 | FOR_EACH_BB_FN (bb, cfun) |
e1ab7874 | 4353 | init_lv_set (bb); |
4354 | ||
4355 | /* Don't forget EXIT_BLOCK. */ | |
34154e27 | 4356 | init_lv_set (EXIT_BLOCK_PTR_FOR_FN (cfun)); |
e1ab7874 | 4357 | } |
4358 | ||
4359 | /* Release lv set of HEAD. */ | |
4360 | static void | |
4361 | free_lv_set (basic_block bb) | |
4362 | { | |
4363 | gcc_assert (BB_LV_SET (bb) != NULL); | |
4364 | ||
4365 | return_regset_to_pool (BB_LV_SET (bb)); | |
4366 | BB_LV_SET (bb) = NULL; | |
4367 | BB_LV_SET_VALID_P (bb) = false; | |
4368 | } | |
4369 | ||
4370 | /* Finalize lv sets of all bb headers. */ | |
4371 | void | |
4372 | free_lv_sets (void) | |
4373 | { | |
4374 | basic_block bb; | |
4375 | ||
4376 | /* Don't forget EXIT_BLOCK. */ | |
34154e27 | 4377 | free_lv_set (EXIT_BLOCK_PTR_FOR_FN (cfun)); |
e1ab7874 | 4378 | |
4379 | /* Free LV sets. */ | |
fc00614f | 4380 | FOR_EACH_BB_FN (bb, cfun) |
e1ab7874 | 4381 | if (BB_LV_SET (bb)) |
4382 | free_lv_set (bb); | |
4383 | } | |
4384 | ||
c1c8a3d0 | 4385 | /* Mark AV_SET for BB as invalid, so this set will be updated the next time |
4386 | compute_av() processes BB. This function is called when creating new basic | |
4387 | blocks, as well as for blocks (either new or existing) where new jumps are | |
4388 | created when the control flow is being updated. */ | |
e1ab7874 | 4389 | static void |
4390 | invalidate_av_set (basic_block bb) | |
4391 | { | |
e1ab7874 | 4392 | BB_AV_LEVEL (bb) = -1; |
4393 | } | |
4394 | ||
4395 | /* Create initial data sets for BB (they will be invalid). */ | |
4396 | static void | |
4397 | create_initial_data_sets (basic_block bb) | |
4398 | { | |
4399 | if (BB_LV_SET (bb)) | |
4400 | BB_LV_SET_VALID_P (bb) = false; | |
4401 | else | |
4402 | BB_LV_SET (bb) = get_regset_from_pool (); | |
4403 | invalidate_av_set (bb); | |
4404 | } | |
4405 | ||
4406 | /* Free av set of BB. */ | |
4407 | static void | |
4408 | free_av_set (basic_block bb) | |
4409 | { | |
4410 | av_set_clear (&BB_AV_SET (bb)); | |
4411 | BB_AV_LEVEL (bb) = 0; | |
4412 | } | |
4413 | ||
4414 | /* Free data sets of BB. */ | |
4415 | void | |
4416 | free_data_sets (basic_block bb) | |
4417 | { | |
4418 | free_lv_set (bb); | |
4419 | free_av_set (bb); | |
4420 | } | |
4421 | ||
4422 | /* Exchange lv sets of TO and FROM. */ | |
4423 | static void | |
4424 | exchange_lv_sets (basic_block to, basic_block from) | |
4425 | { | |
4426 | { | |
4427 | regset to_lv_set = BB_LV_SET (to); | |
4428 | ||
4429 | BB_LV_SET (to) = BB_LV_SET (from); | |
4430 | BB_LV_SET (from) = to_lv_set; | |
4431 | } | |
4432 | ||
4433 | { | |
4434 | bool to_lv_set_valid_p = BB_LV_SET_VALID_P (to); | |
4435 | ||
4436 | BB_LV_SET_VALID_P (to) = BB_LV_SET_VALID_P (from); | |
4437 | BB_LV_SET_VALID_P (from) = to_lv_set_valid_p; | |
4438 | } | |
4439 | } | |
4440 | ||
4441 | ||
4442 | /* Exchange av sets of TO and FROM. */ | |
4443 | static void | |
4444 | exchange_av_sets (basic_block to, basic_block from) | |
4445 | { | |
4446 | { | |
4447 | av_set_t to_av_set = BB_AV_SET (to); | |
4448 | ||
4449 | BB_AV_SET (to) = BB_AV_SET (from); | |
4450 | BB_AV_SET (from) = to_av_set; | |
4451 | } | |
4452 | ||
4453 | { | |
4454 | int to_av_level = BB_AV_LEVEL (to); | |
4455 | ||
4456 | BB_AV_LEVEL (to) = BB_AV_LEVEL (from); | |
4457 | BB_AV_LEVEL (from) = to_av_level; | |
4458 | } | |
4459 | } | |
4460 | ||
4461 | /* Exchange data sets of TO and FROM. */ | |
4462 | void | |
4463 | exchange_data_sets (basic_block to, basic_block from) | |
4464 | { | |
4465 | exchange_lv_sets (to, from); | |
4466 | exchange_av_sets (to, from); | |
4467 | } | |
4468 | ||
4469 | /* Copy data sets of FROM to TO. */ | |
4470 | void | |
4471 | copy_data_sets (basic_block to, basic_block from) | |
4472 | { | |
4473 | gcc_assert (!BB_LV_SET_VALID_P (to) && !BB_AV_SET_VALID_P (to)); | |
4474 | gcc_assert (BB_AV_SET (to) == NULL); | |
4475 | ||
4476 | BB_AV_LEVEL (to) = BB_AV_LEVEL (from); | |
4477 | BB_LV_SET_VALID_P (to) = BB_LV_SET_VALID_P (from); | |
4478 | ||
4479 | if (BB_AV_SET_VALID_P (from)) | |
4480 | { | |
4481 | BB_AV_SET (to) = av_set_copy (BB_AV_SET (from)); | |
4482 | } | |
4483 | if (BB_LV_SET_VALID_P (from)) | |
4484 | { | |
4485 | gcc_assert (BB_LV_SET (to) != NULL); | |
4486 | COPY_REG_SET (BB_LV_SET (to), BB_LV_SET (from)); | |
4487 | } | |
4488 | } | |
4489 | ||
4490 | /* Return an av set for INSN, if any. */ | |
4491 | av_set_t | |
4492 | get_av_set (insn_t insn) | |
4493 | { | |
4494 | av_set_t av_set; | |
4495 | ||
4496 | gcc_assert (AV_SET_VALID_P (insn)); | |
4497 | ||
4498 | if (sel_bb_head_p (insn)) | |
4499 | av_set = BB_AV_SET (BLOCK_FOR_INSN (insn)); | |
4500 | else | |
4501 | av_set = NULL; | |
4502 | ||
4503 | return av_set; | |
4504 | } | |
4505 | ||
4506 | /* Implementation of AV_LEVEL () macro. Return AV_LEVEL () of INSN. */ | |
4507 | int | |
4508 | get_av_level (insn_t insn) | |
4509 | { | |
4510 | int av_level; | |
4511 | ||
4512 | gcc_assert (INSN_P (insn)); | |
4513 | ||
4514 | if (sel_bb_head_p (insn)) | |
4515 | av_level = BB_AV_LEVEL (BLOCK_FOR_INSN (insn)); | |
4516 | else | |
4517 | av_level = INSN_WS_LEVEL (insn); | |
4518 | ||
4519 | return av_level; | |
4520 | } | |
4521 | ||
4522 | \f | |
4523 | ||
4524 | /* Variables to work with control-flow graph. */ | |
4525 | ||
4526 | /* The basic block that already has been processed by the sched_data_update (), | |
4527 | but hasn't been in sel_add_bb () yet. */ | |
f1f41a6c | 4528 | static vec<basic_block> |
1e094109 | 4529 | last_added_blocks = vNULL; |
e1ab7874 | 4530 | |
4531 | /* A pool for allocating successor infos. */ | |
4532 | static struct | |
4533 | { | |
4534 | /* A stack for saving succs_info structures. */ | |
4535 | struct succs_info *stack; | |
4536 | ||
4537 | /* Its size. */ | |
4538 | int size; | |
4539 | ||
4540 | /* Top of the stack. */ | |
4541 | int top; | |
4542 | ||
4543 | /* Maximal value of the top. */ | |
4544 | int max_top; | |
4545 | } succs_info_pool; | |
4546 | ||
4547 | /* Functions to work with control-flow graph. */ | |
4548 | ||
4549 | /* Return basic block note of BB. */ | |
179c282d | 4550 | rtx_insn * |
e1ab7874 | 4551 | sel_bb_head (basic_block bb) |
4552 | { | |
179c282d | 4553 | rtx_insn *head; |
e1ab7874 | 4554 | |
34154e27 | 4555 | if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun)) |
e1ab7874 | 4556 | { |
4557 | gcc_assert (exit_insn != NULL_RTX); | |
4558 | head = exit_insn; | |
4559 | } | |
4560 | else | |
4561 | { | |
4562 | insn_t note; | |
4563 | ||
4564 | note = bb_note (bb); | |
4565 | head = next_nonnote_insn (note); | |
4566 | ||
cabd2128 | 4567 | if (head && (BARRIER_P (head) || BLOCK_FOR_INSN (head) != bb)) |
179c282d | 4568 | head = NULL; |
e1ab7874 | 4569 | } |
4570 | ||
4571 | return head; | |
4572 | } | |
4573 | ||
4574 | /* Return true if INSN is a basic block header. */ | |
4575 | bool | |
4576 | sel_bb_head_p (insn_t insn) | |
4577 | { | |
4578 | return sel_bb_head (BLOCK_FOR_INSN (insn)) == insn; | |
4579 | } | |
4580 | ||
4581 | /* Return last insn of BB. */ | |
179c282d | 4582 | rtx_insn * |
e1ab7874 | 4583 | sel_bb_end (basic_block bb) |
4584 | { | |
4585 | if (sel_bb_empty_p (bb)) | |
179c282d | 4586 | return NULL; |
e1ab7874 | 4587 | |
34154e27 | 4588 | gcc_assert (bb != EXIT_BLOCK_PTR_FOR_FN (cfun)); |
e1ab7874 | 4589 | |
4590 | return BB_END (bb); | |
4591 | } | |
4592 | ||
4593 | /* Return true if INSN is the last insn in its basic block. */ | |
4594 | bool | |
4595 | sel_bb_end_p (insn_t insn) | |
4596 | { | |
4597 | return insn == sel_bb_end (BLOCK_FOR_INSN (insn)); | |
4598 | } | |
4599 | ||
4600 | /* Return true if BB consist of single NOTE_INSN_BASIC_BLOCK. */ | |
4601 | bool | |
4602 | sel_bb_empty_p (basic_block bb) | |
4603 | { | |
4604 | return sel_bb_head (bb) == NULL; | |
4605 | } | |
4606 | ||
4607 | /* True when BB belongs to the current scheduling region. */ | |
4608 | bool | |
4609 | in_current_region_p (basic_block bb) | |
4610 | { | |
4611 | if (bb->index < NUM_FIXED_BLOCKS) | |
4612 | return false; | |
4613 | ||
4614 | return CONTAINING_RGN (bb->index) == CONTAINING_RGN (BB_TO_BLOCK (0)); | |
4615 | } | |
4616 | ||
4617 | /* Return the block which is a fallthru bb of a conditional jump JUMP. */ | |
4618 | basic_block | |
93ee8dfb | 4619 | fallthru_bb_of_jump (const rtx_insn *jump) |
e1ab7874 | 4620 | { |
4621 | if (!JUMP_P (jump)) | |
4622 | return NULL; | |
4623 | ||
e1ab7874 | 4624 | if (!any_condjump_p (jump)) |
4625 | return NULL; | |
4626 | ||
bf19734b | 4627 | /* A basic block that ends with a conditional jump may still have one successor |
4628 | (and be followed by a barrier), we are not interested. */ | |
4629 | if (single_succ_p (BLOCK_FOR_INSN (jump))) | |
4630 | return NULL; | |
4631 | ||
e1ab7874 | 4632 | return FALLTHRU_EDGE (BLOCK_FOR_INSN (jump))->dest; |
4633 | } | |
4634 | ||
4635 | /* Remove all notes from BB. */ | |
4636 | static void | |
4637 | init_bb (basic_block bb) | |
4638 | { | |
4639 | remove_notes (bb_note (bb), BB_END (bb)); | |
e97a173d | 4640 | BB_NOTE_LIST (bb) = note_list; |
e1ab7874 | 4641 | } |
4642 | ||
4643 | void | |
52d7e28c | 4644 | sel_init_bbs (bb_vec_t bbs) |
e1ab7874 | 4645 | { |
4646 | const struct sched_scan_info_def ssi = | |
4647 | { | |
4648 | extend_bb_info, /* extend_bb */ | |
4649 | init_bb, /* init_bb */ | |
4650 | NULL, /* extend_insn */ | |
4651 | NULL /* init_insn */ | |
4652 | }; | |
4653 | ||
52d7e28c | 4654 | sched_scan (&ssi, bbs); |
e1ab7874 | 4655 | } |
4656 | ||
3baa98a0 | 4657 | /* Restore notes for the whole region. */ |
e1ab7874 | 4658 | static void |
3baa98a0 | 4659 | sel_restore_notes (void) |
e1ab7874 | 4660 | { |
4661 | int bb; | |
3baa98a0 | 4662 | insn_t insn; |
e1ab7874 | 4663 | |
4664 | for (bb = 0; bb < current_nr_blocks; bb++) | |
4665 | { | |
4666 | basic_block first, last; | |
4667 | ||
4668 | first = EBB_FIRST_BB (bb); | |
4669 | last = EBB_LAST_BB (bb)->next_bb; | |
4670 | ||
4671 | do | |
4672 | { | |
4673 | note_list = BB_NOTE_LIST (first); | |
4674 | restore_other_notes (NULL, first); | |
e97a173d | 4675 | BB_NOTE_LIST (first) = NULL; |
e1ab7874 | 4676 | |
3baa98a0 | 4677 | FOR_BB_INSNS (first, insn) |
4678 | if (NONDEBUG_INSN_P (insn)) | |
4679 | reemit_notes (insn); | |
4680 | ||
e1ab7874 | 4681 | first = first->next_bb; |
4682 | } | |
4683 | while (first != last); | |
4684 | } | |
4685 | } | |
4686 | ||
4687 | /* Free per-bb data structures. */ | |
4688 | void | |
4689 | sel_finish_bbs (void) | |
4690 | { | |
3baa98a0 | 4691 | sel_restore_notes (); |
e1ab7874 | 4692 | |
4693 | /* Remove current loop preheader from this loop. */ | |
4694 | if (current_loop_nest) | |
4695 | sel_remove_loop_preheader (); | |
4696 | ||
4697 | finish_region_bb_info (); | |
4698 | } | |
4699 | ||
4700 | /* Return true if INSN has a single successor of type FLAGS. */ | |
4701 | bool | |
4702 | sel_insn_has_single_succ_p (insn_t insn, int flags) | |
4703 | { | |
4704 | insn_t succ; | |
4705 | succ_iterator si; | |
4706 | bool first_p = true; | |
4707 | ||
4708 | FOR_EACH_SUCC_1 (succ, si, insn, flags) | |
4709 | { | |
4710 | if (first_p) | |
4711 | first_p = false; | |
4712 | else | |
4713 | return false; | |
4714 | } | |
4715 | ||
4716 | return true; | |
4717 | } | |
4718 | ||
4719 | /* Allocate successor's info. */ | |
4720 | static struct succs_info * | |
4721 | alloc_succs_info (void) | |
4722 | { | |
4723 | if (succs_info_pool.top == succs_info_pool.max_top) | |
4724 | { | |
4725 | int i; | |
48e1416a | 4726 | |
e1ab7874 | 4727 | if (++succs_info_pool.max_top >= succs_info_pool.size) |
4728 | gcc_unreachable (); | |
4729 | ||
4730 | i = ++succs_info_pool.top; | |
f1f41a6c | 4731 | succs_info_pool.stack[i].succs_ok.create (10); |
4732 | succs_info_pool.stack[i].succs_other.create (10); | |
4733 | succs_info_pool.stack[i].probs_ok.create (10); | |
e1ab7874 | 4734 | } |
4735 | else | |
4736 | succs_info_pool.top++; | |
4737 | ||
4738 | return &succs_info_pool.stack[succs_info_pool.top]; | |
4739 | } | |
4740 | ||
4741 | /* Free successor's info. */ | |
4742 | void | |
4743 | free_succs_info (struct succs_info * sinfo) | |
4744 | { | |
48e1416a | 4745 | gcc_assert (succs_info_pool.top >= 0 |
e1ab7874 | 4746 | && &succs_info_pool.stack[succs_info_pool.top] == sinfo); |
4747 | succs_info_pool.top--; | |
4748 | ||
4749 | /* Clear stale info. */ | |
f1f41a6c | 4750 | sinfo->succs_ok.block_remove (0, sinfo->succs_ok.length ()); |
4751 | sinfo->succs_other.block_remove (0, sinfo->succs_other.length ()); | |
4752 | sinfo->probs_ok.block_remove (0, sinfo->probs_ok.length ()); | |
e1ab7874 | 4753 | sinfo->all_prob = 0; |
4754 | sinfo->succs_ok_n = 0; | |
4755 | sinfo->all_succs_n = 0; | |
4756 | } | |
4757 | ||
48e1416a | 4758 | /* Compute successor info for INSN. FLAGS are the flags passed |
e1ab7874 | 4759 | to the FOR_EACH_SUCC_1 iterator. */ |
4760 | struct succs_info * | |
4761 | compute_succs_info (insn_t insn, short flags) | |
4762 | { | |
4763 | succ_iterator si; | |
4764 | insn_t succ; | |
4765 | struct succs_info *sinfo = alloc_succs_info (); | |
4766 | ||
4767 | /* Traverse *all* successors and decide what to do with each. */ | |
4768 | FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_ALL) | |
4769 | { | |
4770 | /* FIXME: this doesn't work for skipping to loop exits, as we don't | |
4771 | perform code motion through inner loops. */ | |
4772 | short current_flags = si.current_flags & ~SUCCS_SKIP_TO_LOOP_EXITS; | |
4773 | ||
4774 | if (current_flags & flags) | |
4775 | { | |
f1f41a6c | 4776 | sinfo->succs_ok.safe_push (succ); |
4777 | sinfo->probs_ok.safe_push ( | |
4778 | /* FIXME: Improve calculation when skipping | |
4779 | inner loop to exits. */ | |
4780 | si.bb_end ? si.e1->probability : REG_BR_PROB_BASE); | |
e1ab7874 | 4781 | sinfo->succs_ok_n++; |
4782 | } | |
4783 | else | |
f1f41a6c | 4784 | sinfo->succs_other.safe_push (succ); |
e1ab7874 | 4785 | |
4786 | /* Compute all_prob. */ | |
4787 | if (!si.bb_end) | |
4788 | sinfo->all_prob = REG_BR_PROB_BASE; | |
4789 | else | |
4790 | sinfo->all_prob += si.e1->probability; | |
4791 | ||
4792 | sinfo->all_succs_n++; | |
4793 | } | |
4794 | ||
4795 | return sinfo; | |
4796 | } | |
4797 | ||
48e1416a | 4798 | /* Return the predecessors of BB in PREDS and their number in N. |
e1ab7874 | 4799 | Empty blocks are skipped. SIZE is used to allocate PREDS. */ |
4800 | static void | |
4801 | cfg_preds_1 (basic_block bb, insn_t **preds, int *n, int *size) | |
4802 | { | |
4803 | edge e; | |
4804 | edge_iterator ei; | |
4805 | ||
4806 | gcc_assert (BLOCK_TO_BB (bb->index) != 0); | |
4807 | ||
4808 | FOR_EACH_EDGE (e, ei, bb->preds) | |
4809 | { | |
4810 | basic_block pred_bb = e->src; | |
4811 | insn_t bb_end = BB_END (pred_bb); | |
4812 | ||
f1ec9c64 | 4813 | if (!in_current_region_p (pred_bb)) |
4814 | { | |
4815 | gcc_assert (flag_sel_sched_pipelining_outer_loops | |
4816 | && current_loop_nest); | |
4817 | continue; | |
4818 | } | |
e1ab7874 | 4819 | |
4820 | if (sel_bb_empty_p (pred_bb)) | |
4821 | cfg_preds_1 (pred_bb, preds, n, size); | |
4822 | else | |
4823 | { | |
4824 | if (*n == *size) | |
48e1416a | 4825 | *preds = XRESIZEVEC (insn_t, *preds, |
e1ab7874 | 4826 | (*size = 2 * *size + 1)); |
4827 | (*preds)[(*n)++] = bb_end; | |
4828 | } | |
4829 | } | |
4830 | ||
f1ec9c64 | 4831 | gcc_assert (*n != 0 |
4832 | || (flag_sel_sched_pipelining_outer_loops | |
4833 | && current_loop_nest)); | |
e1ab7874 | 4834 | } |
4835 | ||
48e1416a | 4836 | /* Find all predecessors of BB and record them in PREDS and their number |
4837 | in N. Empty blocks are skipped, and only normal (forward in-region) | |
e1ab7874 | 4838 | edges are processed. */ |
4839 | static void | |
4840 | cfg_preds (basic_block bb, insn_t **preds, int *n) | |
4841 | { | |
4842 | int size = 0; | |
4843 | ||
4844 | *preds = NULL; | |
4845 | *n = 0; | |
4846 | cfg_preds_1 (bb, preds, n, &size); | |
4847 | } | |
4848 | ||
4849 | /* Returns true if we are moving INSN through join point. */ | |
4850 | bool | |
4851 | sel_num_cfg_preds_gt_1 (insn_t insn) | |
4852 | { | |
4853 | basic_block bb; | |
4854 | ||
4855 | if (!sel_bb_head_p (insn) || INSN_BB (insn) == 0) | |
4856 | return false; | |
4857 | ||
4858 | bb = BLOCK_FOR_INSN (insn); | |
4859 | ||
4860 | while (1) | |
4861 | { | |
4862 | if (EDGE_COUNT (bb->preds) > 1) | |
4863 | return true; | |
4864 | ||
4865 | gcc_assert (EDGE_PRED (bb, 0)->dest == bb); | |
4866 | bb = EDGE_PRED (bb, 0)->src; | |
4867 | ||
4868 | if (!sel_bb_empty_p (bb)) | |
4869 | break; | |
4870 | } | |
4871 | ||
4872 | return false; | |
4873 | } | |
4874 | ||
48e1416a | 4875 | /* Returns true when BB should be the end of an ebb. Adapted from the |
e1ab7874 | 4876 | code in sched-ebb.c. */ |
4877 | bool | |
4878 | bb_ends_ebb_p (basic_block bb) | |
4879 | { | |
4880 | basic_block next_bb = bb_next_bb (bb); | |
4881 | edge e; | |
48e1416a | 4882 | |
34154e27 | 4883 | if (next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun) |
e1ab7874 | 4884 | || bitmap_bit_p (forced_ebb_heads, next_bb->index) |
4885 | || (LABEL_P (BB_HEAD (next_bb)) | |
4886 | /* NB: LABEL_NUSES () is not maintained outside of jump.c. | |
4887 | Work around that. */ | |
4888 | && !single_pred_p (next_bb))) | |
4889 | return true; | |
4890 | ||
4891 | if (!in_current_region_p (next_bb)) | |
4892 | return true; | |
4893 | ||
7f58c05e | 4894 | e = find_fallthru_edge (bb->succs); |
4895 | if (e) | |
4896 | { | |
4897 | gcc_assert (e->dest == next_bb); | |
4898 | ||
4899 | return false; | |
4900 | } | |
e1ab7874 | 4901 | |
4902 | return true; | |
4903 | } | |
4904 | ||
4905 | /* Returns true when INSN and SUCC are in the same EBB, given that SUCC is a | |
4906 | successor of INSN. */ | |
4907 | bool | |
4908 | in_same_ebb_p (insn_t insn, insn_t succ) | |
4909 | { | |
4910 | basic_block ptr = BLOCK_FOR_INSN (insn); | |
4911 | ||
9af5ce0c | 4912 | for (;;) |
e1ab7874 | 4913 | { |
4914 | if (ptr == BLOCK_FOR_INSN (succ)) | |
4915 | return true; | |
48e1416a | 4916 | |
e1ab7874 | 4917 | if (bb_ends_ebb_p (ptr)) |
4918 | return false; | |
4919 | ||
4920 | ptr = bb_next_bb (ptr); | |
4921 | } | |
4922 | ||
4923 | gcc_unreachable (); | |
4924 | return false; | |
4925 | } | |
4926 | ||
4927 | /* Recomputes the reverse topological order for the function and | |
4928 | saves it in REV_TOP_ORDER_INDEX. REV_TOP_ORDER_INDEX_LEN is also | |
4929 | modified appropriately. */ | |
4930 | static void | |
4931 | recompute_rev_top_order (void) | |
4932 | { | |
4933 | int *postorder; | |
4934 | int n_blocks, i; | |
4935 | ||
fe672ac0 | 4936 | if (!rev_top_order_index |
4937 | || rev_top_order_index_len < last_basic_block_for_fn (cfun)) | |
e1ab7874 | 4938 | { |
fe672ac0 | 4939 | rev_top_order_index_len = last_basic_block_for_fn (cfun); |
e1ab7874 | 4940 | rev_top_order_index = XRESIZEVEC (int, rev_top_order_index, |
4941 | rev_top_order_index_len); | |
4942 | } | |
4943 | ||
a28770e1 | 4944 | postorder = XNEWVEC (int, n_basic_blocks_for_fn (cfun)); |
e1ab7874 | 4945 | |
4946 | n_blocks = post_order_compute (postorder, true, false); | |
a28770e1 | 4947 | gcc_assert (n_basic_blocks_for_fn (cfun) == n_blocks); |
e1ab7874 | 4948 | |
4949 | /* Build reverse function: for each basic block with BB->INDEX == K | |
4950 | rev_top_order_index[K] is it's reverse topological sort number. */ | |
4951 | for (i = 0; i < n_blocks; i++) | |
4952 | { | |
4953 | gcc_assert (postorder[i] < rev_top_order_index_len); | |
4954 | rev_top_order_index[postorder[i]] = i; | |
4955 | } | |
4956 | ||
4957 | free (postorder); | |
4958 | } | |
4959 | ||
4960 | /* Clear all flags from insns in BB that could spoil its rescheduling. */ | |
4961 | void | |
4962 | clear_outdated_rtx_info (basic_block bb) | |
4963 | { | |
91a55c11 | 4964 | rtx_insn *insn; |
e1ab7874 | 4965 | |
4966 | FOR_BB_INSNS (bb, insn) | |
4967 | if (INSN_P (insn)) | |
4968 | { | |
4969 | SCHED_GROUP_P (insn) = 0; | |
4970 | INSN_AFTER_STALL_P (insn) = 0; | |
4971 | INSN_SCHED_TIMES (insn) = 0; | |
4972 | EXPR_PRIORITY_ADJ (INSN_EXPR (insn)) = 0; | |
4973 | ||
4974 | /* We cannot use the changed caches, as previously we could ignore | |
48e1416a | 4975 | the LHS dependence due to enabled renaming and transform |
e1ab7874 | 4976 | the expression, and currently we'll be unable to do this. */ |
4977 | htab_empty (INSN_TRANSFORMED_INSNS (insn)); | |
4978 | } | |
4979 | } | |
4980 | ||
4981 | /* Add BB_NOTE to the pool of available basic block notes. */ | |
4982 | static void | |
4983 | return_bb_to_pool (basic_block bb) | |
4984 | { | |
4985 | rtx note = bb_note (bb); | |
4986 | ||
4987 | gcc_assert (NOTE_BASIC_BLOCK (note) == bb | |
4988 | && bb->aux == NULL); | |
4989 | ||
4990 | /* It turns out that current cfg infrastructure does not support | |
4991 | reuse of basic blocks. Don't bother for now. */ | |
f1f41a6c | 4992 | /*bb_note_pool.safe_push (note);*/ |
e1ab7874 | 4993 | } |
4994 | ||
4995 | /* Get a bb_note from pool or return NULL_RTX if pool is empty. */ | |
cef3d8ad | 4996 | static rtx_note * |
e1ab7874 | 4997 | get_bb_note_from_pool (void) |
4998 | { | |
f1f41a6c | 4999 | if (bb_note_pool.is_empty ()) |
cef3d8ad | 5000 | return NULL; |
e1ab7874 | 5001 | else |
5002 | { | |
cef3d8ad | 5003 | rtx_note *note = bb_note_pool.pop (); |
e1ab7874 | 5004 | |
4a57a2e8 | 5005 | SET_PREV_INSN (note) = NULL_RTX; |
5006 | SET_NEXT_INSN (note) = NULL_RTX; | |
e1ab7874 | 5007 | |
5008 | return note; | |
5009 | } | |
5010 | } | |
5011 | ||
5012 | /* Free bb_note_pool. */ | |
5013 | void | |
5014 | free_bb_note_pool (void) | |
5015 | { | |
f1f41a6c | 5016 | bb_note_pool.release (); |
e1ab7874 | 5017 | } |
5018 | ||
5019 | /* Setup scheduler pool and successor structure. */ | |
5020 | void | |
5021 | alloc_sched_pools (void) | |
5022 | { | |
5023 | int succs_size; | |
5024 | ||
5025 | succs_size = MAX_WS + 1; | |
48e1416a | 5026 | succs_info_pool.stack = XCNEWVEC (struct succs_info, succs_size); |
e1ab7874 | 5027 | succs_info_pool.size = succs_size; |
5028 | succs_info_pool.top = -1; | |
5029 | succs_info_pool.max_top = -1; | |
5030 | ||
48e1416a | 5031 | sched_lists_pool = create_alloc_pool ("sel-sched-lists", |
e1ab7874 | 5032 | sizeof (struct _list_node), 500); |
5033 | } | |
5034 | ||
5035 | /* Free the pools. */ | |
5036 | void | |
5037 | free_sched_pools (void) | |
5038 | { | |
5039 | int i; | |
48e1416a | 5040 | |
e1ab7874 | 5041 | free_alloc_pool (sched_lists_pool); |
5042 | gcc_assert (succs_info_pool.top == -1); | |
862c1934 | 5043 | for (i = 0; i <= succs_info_pool.max_top; i++) |
e1ab7874 | 5044 | { |
f1f41a6c | 5045 | succs_info_pool.stack[i].succs_ok.release (); |
5046 | succs_info_pool.stack[i].succs_other.release (); | |
5047 | succs_info_pool.stack[i].probs_ok.release (); | |
e1ab7874 | 5048 | } |
5049 | free (succs_info_pool.stack); | |
5050 | } | |
5051 | \f | |
5052 | ||
48e1416a | 5053 | /* Returns a position in RGN where BB can be inserted retaining |
e1ab7874 | 5054 | topological order. */ |
5055 | static int | |
5056 | find_place_to_insert_bb (basic_block bb, int rgn) | |
5057 | { | |
5058 | bool has_preds_outside_rgn = false; | |
5059 | edge e; | |
5060 | edge_iterator ei; | |
48e1416a | 5061 | |
e1ab7874 | 5062 | /* Find whether we have preds outside the region. */ |
5063 | FOR_EACH_EDGE (e, ei, bb->preds) | |
5064 | if (!in_current_region_p (e->src)) | |
5065 | { | |
5066 | has_preds_outside_rgn = true; | |
5067 | break; | |
5068 | } | |
48e1416a | 5069 | |
e1ab7874 | 5070 | /* Recompute the top order -- needed when we have > 1 pred |
5071 | and in case we don't have preds outside. */ | |
5072 | if (flag_sel_sched_pipelining_outer_loops | |
5073 | && (has_preds_outside_rgn || EDGE_COUNT (bb->preds) > 1)) | |
5074 | { | |
5075 | int i, bbi = bb->index, cur_bbi; | |
5076 | ||
5077 | recompute_rev_top_order (); | |
5078 | for (i = RGN_NR_BLOCKS (rgn) - 1; i >= 0; i--) | |
5079 | { | |
5080 | cur_bbi = BB_TO_BLOCK (i); | |
48e1416a | 5081 | if (rev_top_order_index[bbi] |
e1ab7874 | 5082 | < rev_top_order_index[cur_bbi]) |
5083 | break; | |
5084 | } | |
48e1416a | 5085 | |
9d75589a | 5086 | /* We skipped the right block, so we increase i. We accommodate |
e1ab7874 | 5087 | it for increasing by step later, so we decrease i. */ |
5088 | return (i + 1) - 1; | |
5089 | } | |
5090 | else if (has_preds_outside_rgn) | |
5091 | { | |
5092 | /* This is the case when we generate an extra empty block | |
5093 | to serve as region head during pipelining. */ | |
5094 | e = EDGE_SUCC (bb, 0); | |
5095 | gcc_assert (EDGE_COUNT (bb->succs) == 1 | |
5096 | && in_current_region_p (EDGE_SUCC (bb, 0)->dest) | |
5097 | && (BLOCK_TO_BB (e->dest->index) == 0)); | |
5098 | return -1; | |
5099 | } | |
5100 | ||
5101 | /* We don't have preds outside the region. We should have | |
5102 | the only pred, because the multiple preds case comes from | |
5103 | the pipelining of outer loops, and that is handled above. | |
5104 | Just take the bbi of this single pred. */ | |
5105 | if (EDGE_COUNT (bb->succs) > 0) | |
5106 | { | |
5107 | int pred_bbi; | |
48e1416a | 5108 | |
e1ab7874 | 5109 | gcc_assert (EDGE_COUNT (bb->preds) == 1); |
48e1416a | 5110 | |
e1ab7874 | 5111 | pred_bbi = EDGE_PRED (bb, 0)->src->index; |
5112 | return BLOCK_TO_BB (pred_bbi); | |
5113 | } | |
5114 | else | |
5115 | /* BB has no successors. It is safe to put it in the end. */ | |
5116 | return current_nr_blocks - 1; | |
5117 | } | |
5118 | ||
5119 | /* Deletes an empty basic block freeing its data. */ | |
5120 | static void | |
5121 | delete_and_free_basic_block (basic_block bb) | |
5122 | { | |
5123 | gcc_assert (sel_bb_empty_p (bb)); | |
5124 | ||
5125 | if (BB_LV_SET (bb)) | |
5126 | free_lv_set (bb); | |
5127 | ||
5128 | bitmap_clear_bit (blocks_to_reschedule, bb->index); | |
5129 | ||
48e1416a | 5130 | /* Can't assert av_set properties because we use sel_aremove_bb |
5131 | when removing loop preheader from the region. At the point of | |
e1ab7874 | 5132 | removing the preheader we already have deallocated sel_region_bb_info. */ |
5133 | gcc_assert (BB_LV_SET (bb) == NULL | |
5134 | && !BB_LV_SET_VALID_P (bb) | |
5135 | && BB_AV_LEVEL (bb) == 0 | |
5136 | && BB_AV_SET (bb) == NULL); | |
48e1416a | 5137 | |
e1ab7874 | 5138 | delete_basic_block (bb); |
5139 | } | |
5140 | ||
5141 | /* Add BB to the current region and update the region data. */ | |
5142 | static void | |
5143 | add_block_to_current_region (basic_block bb) | |
5144 | { | |
5145 | int i, pos, bbi = -2, rgn; | |
5146 | ||
5147 | rgn = CONTAINING_RGN (BB_TO_BLOCK (0)); | |
5148 | bbi = find_place_to_insert_bb (bb, rgn); | |
5149 | bbi += 1; | |
5150 | pos = RGN_BLOCKS (rgn) + bbi; | |
5151 | ||
5152 | gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0 | |
5153 | && ebb_head[bbi] == pos); | |
48e1416a | 5154 | |
e1ab7874 | 5155 | /* Make a place for the new block. */ |
5156 | extend_regions (); | |
5157 | ||
5158 | for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--) | |
5159 | BLOCK_TO_BB (rgn_bb_table[i])++; | |
48e1416a | 5160 | |
e1ab7874 | 5161 | memmove (rgn_bb_table + pos + 1, |
5162 | rgn_bb_table + pos, | |
5163 | (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table)); | |
5164 | ||
5165 | /* Initialize data for BB. */ | |
5166 | rgn_bb_table[pos] = bb->index; | |
5167 | BLOCK_TO_BB (bb->index) = bbi; | |
5168 | CONTAINING_RGN (bb->index) = rgn; | |
5169 | ||
5170 | RGN_NR_BLOCKS (rgn)++; | |
48e1416a | 5171 | |
e1ab7874 | 5172 | for (i = rgn + 1; i <= nr_regions; i++) |
5173 | RGN_BLOCKS (i)++; | |
5174 | } | |
5175 | ||
5176 | /* Remove BB from the current region and update the region data. */ | |
5177 | static void | |
5178 | remove_bb_from_region (basic_block bb) | |
5179 | { | |
5180 | int i, pos, bbi = -2, rgn; | |
5181 | ||
5182 | rgn = CONTAINING_RGN (BB_TO_BLOCK (0)); | |
5183 | bbi = BLOCK_TO_BB (bb->index); | |
5184 | pos = RGN_BLOCKS (rgn) + bbi; | |
5185 | ||
5186 | gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0 | |
5187 | && ebb_head[bbi] == pos); | |
5188 | ||
5189 | for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--) | |
5190 | BLOCK_TO_BB (rgn_bb_table[i])--; | |
5191 | ||
5192 | memmove (rgn_bb_table + pos, | |
5193 | rgn_bb_table + pos + 1, | |
5194 | (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table)); | |
5195 | ||
5196 | RGN_NR_BLOCKS (rgn)--; | |
5197 | for (i = rgn + 1; i <= nr_regions; i++) | |
5198 | RGN_BLOCKS (i)--; | |
5199 | } | |
5200 | ||
48e1416a | 5201 | /* Add BB to the current region and update all data. If BB is NULL, add all |
e1ab7874 | 5202 | blocks from last_added_blocks vector. */ |
5203 | static void | |
5204 | sel_add_bb (basic_block bb) | |
5205 | { | |
5206 | /* Extend luids so that new notes will receive zero luids. */ | |
52d7e28c | 5207 | sched_extend_luids (); |
e1ab7874 | 5208 | sched_init_bbs (); |
52d7e28c | 5209 | sel_init_bbs (last_added_blocks); |
e1ab7874 | 5210 | |
48e1416a | 5211 | /* When bb is passed explicitly, the vector should contain |
e1ab7874 | 5212 | the only element that equals to bb; otherwise, the vector |
5213 | should not be NULL. */ | |
f1f41a6c | 5214 | gcc_assert (last_added_blocks.exists ()); |
48e1416a | 5215 | |
e1ab7874 | 5216 | if (bb != NULL) |
5217 | { | |
f1f41a6c | 5218 | gcc_assert (last_added_blocks.length () == 1 |
5219 | && last_added_blocks[0] == bb); | |
e1ab7874 | 5220 | add_block_to_current_region (bb); |
5221 | ||
5222 | /* We associate creating/deleting data sets with the first insn | |
5223 | appearing / disappearing in the bb. */ | |
5224 | if (!sel_bb_empty_p (bb) && BB_LV_SET (bb) == NULL) | |
5225 | create_initial_data_sets (bb); | |
48e1416a | 5226 | |
f1f41a6c | 5227 | last_added_blocks.release (); |
e1ab7874 | 5228 | } |
5229 | else | |
5230 | /* BB is NULL - process LAST_ADDED_BLOCKS instead. */ | |
5231 | { | |
5232 | int i; | |
5233 | basic_block temp_bb = NULL; | |
5234 | ||
48e1416a | 5235 | for (i = 0; |
f1f41a6c | 5236 | last_added_blocks.iterate (i, &bb); i++) |
e1ab7874 | 5237 | { |
5238 | add_block_to_current_region (bb); | |
5239 | temp_bb = bb; | |
5240 | } | |
5241 | ||
48e1416a | 5242 | /* We need to fetch at least one bb so we know the region |
e1ab7874 | 5243 | to update. */ |
5244 | gcc_assert (temp_bb != NULL); | |
5245 | bb = temp_bb; | |
5246 | ||
f1f41a6c | 5247 | last_added_blocks.release (); |
e1ab7874 | 5248 | } |
5249 | ||
5250 | rgn_setup_region (CONTAINING_RGN (bb->index)); | |
5251 | } | |
5252 | ||
48e1416a | 5253 | /* Remove BB from the current region and update all data. |
e1ab7874 | 5254 | If REMOVE_FROM_CFG_PBB is true, also remove the block cfom cfg. */ |
5255 | static void | |
5256 | sel_remove_bb (basic_block bb, bool remove_from_cfg_p) | |
5257 | { | |
0424f393 | 5258 | unsigned idx = bb->index; |
5259 | ||
e1ab7874 | 5260 | gcc_assert (bb != NULL && BB_NOTE_LIST (bb) == NULL_RTX); |
48e1416a | 5261 | |
e1ab7874 | 5262 | remove_bb_from_region (bb); |
5263 | return_bb_to_pool (bb); | |
0424f393 | 5264 | bitmap_clear_bit (blocks_to_reschedule, idx); |
48e1416a | 5265 | |
e1ab7874 | 5266 | if (remove_from_cfg_p) |
1a5dbaab | 5267 | { |
5268 | basic_block succ = single_succ (bb); | |
5269 | delete_and_free_basic_block (bb); | |
5270 | set_immediate_dominator (CDI_DOMINATORS, succ, | |
5271 | recompute_dominator (CDI_DOMINATORS, succ)); | |
5272 | } | |
e1ab7874 | 5273 | |
0424f393 | 5274 | rgn_setup_region (CONTAINING_RGN (idx)); |
e1ab7874 | 5275 | } |
5276 | ||
5277 | /* Concatenate info of EMPTY_BB to info of MERGE_BB. */ | |
5278 | static void | |
5279 | move_bb_info (basic_block merge_bb, basic_block empty_bb) | |
5280 | { | |
ef4cf572 | 5281 | if (in_current_region_p (merge_bb)) |
5282 | concat_note_lists (BB_NOTE_LIST (empty_bb), | |
e97a173d | 5283 | &BB_NOTE_LIST (merge_bb)); |
5284 | BB_NOTE_LIST (empty_bb) = NULL; | |
e1ab7874 | 5285 | |
5286 | } | |
5287 | ||
e1ab7874 | 5288 | /* Remove EMPTY_BB. If REMOVE_FROM_CFG_P is false, remove EMPTY_BB from |
5289 | region, but keep it in CFG. */ | |
5290 | static void | |
5291 | remove_empty_bb (basic_block empty_bb, bool remove_from_cfg_p) | |
5292 | { | |
5293 | /* The block should contain just a note or a label. | |
5294 | We try to check whether it is unused below. */ | |
5295 | gcc_assert (BB_HEAD (empty_bb) == BB_END (empty_bb) | |
5296 | || LABEL_P (BB_HEAD (empty_bb))); | |
5297 | ||
5298 | /* If basic block has predecessors or successors, redirect them. */ | |
5299 | if (remove_from_cfg_p | |
5300 | && (EDGE_COUNT (empty_bb->preds) > 0 | |
5301 | || EDGE_COUNT (empty_bb->succs) > 0)) | |
5302 | { | |
5303 | basic_block pred; | |
5304 | basic_block succ; | |
5305 | ||
5306 | /* We need to init PRED and SUCC before redirecting edges. */ | |
5307 | if (EDGE_COUNT (empty_bb->preds) > 0) | |
5308 | { | |
5309 | edge e; | |
5310 | ||
5311 | gcc_assert (EDGE_COUNT (empty_bb->preds) == 1); | |
5312 | ||
5313 | e = EDGE_PRED (empty_bb, 0); | |
5314 | gcc_assert (e->src == empty_bb->prev_bb | |
5315 | && (e->flags & EDGE_FALLTHRU)); | |
5316 | ||
5317 | pred = empty_bb->prev_bb; | |
5318 | } | |
5319 | else | |
5320 | pred = NULL; | |
5321 | ||
5322 | if (EDGE_COUNT (empty_bb->succs) > 0) | |
5323 | { | |
5324 | /* We do not check fallthruness here as above, because | |
5325 | after removing a jump the edge may actually be not fallthru. */ | |
5326 | gcc_assert (EDGE_COUNT (empty_bb->succs) == 1); | |
5327 | succ = EDGE_SUCC (empty_bb, 0)->dest; | |
5328 | } | |
5329 | else | |
5330 | succ = NULL; | |
5331 | ||
5332 | if (EDGE_COUNT (empty_bb->preds) > 0 && succ != NULL) | |
5333 | { | |
5334 | edge e = EDGE_PRED (empty_bb, 0); | |
5335 | ||
5336 | if (e->flags & EDGE_FALLTHRU) | |
5337 | redirect_edge_succ_nodup (e, succ); | |
5338 | else | |
5339 | sel_redirect_edge_and_branch (EDGE_PRED (empty_bb, 0), succ); | |
5340 | } | |
5341 | ||
5342 | if (EDGE_COUNT (empty_bb->succs) > 0 && pred != NULL) | |
5343 | { | |
5344 | edge e = EDGE_SUCC (empty_bb, 0); | |
5345 | ||
5346 | if (find_edge (pred, e->dest) == NULL) | |
5347 | redirect_edge_pred (e, pred); | |
5348 | } | |
5349 | } | |
5350 | ||
5351 | /* Finish removing. */ | |
5352 | sel_remove_bb (empty_bb, remove_from_cfg_p); | |
5353 | } | |
5354 | ||
48e1416a | 5355 | /* An implementation of create_basic_block hook, which additionally updates |
e1ab7874 | 5356 | per-bb data structures. */ |
5357 | static basic_block | |
5358 | sel_create_basic_block (void *headp, void *endp, basic_block after) | |
5359 | { | |
5360 | basic_block new_bb; | |
cef3d8ad | 5361 | rtx_note *new_bb_note; |
48e1416a | 5362 | |
5363 | gcc_assert (flag_sel_sched_pipelining_outer_loops | |
f1f41a6c | 5364 | || !last_added_blocks.exists ()); |
e1ab7874 | 5365 | |
5366 | new_bb_note = get_bb_note_from_pool (); | |
5367 | ||
5368 | if (new_bb_note == NULL_RTX) | |
5369 | new_bb = orig_cfg_hooks.create_basic_block (headp, endp, after); | |
5370 | else | |
5371 | { | |
3c3f97b4 | 5372 | new_bb = create_basic_block_structure ((rtx_insn *) headp, |
5373 | (rtx_insn *) endp, | |
e1ab7874 | 5374 | new_bb_note, after); |
5375 | new_bb->aux = NULL; | |
5376 | } | |
5377 | ||
f1f41a6c | 5378 | last_added_blocks.safe_push (new_bb); |
e1ab7874 | 5379 | |
5380 | return new_bb; | |
5381 | } | |
5382 | ||
5383 | /* Implement sched_init_only_bb (). */ | |
5384 | static void | |
5385 | sel_init_only_bb (basic_block bb, basic_block after) | |
5386 | { | |
5387 | gcc_assert (after == NULL); | |
5388 | ||
5389 | extend_regions (); | |
5390 | rgn_make_new_region_out_of_new_block (bb); | |
5391 | } | |
5392 | ||
5393 | /* Update the latch when we've splitted or merged it from FROM block to TO. | |
5394 | This should be checked for all outer loops, too. */ | |
5395 | static void | |
5396 | change_loops_latches (basic_block from, basic_block to) | |
5397 | { | |
5398 | gcc_assert (from != to); | |
5399 | ||
5400 | if (current_loop_nest) | |
5401 | { | |
5402 | struct loop *loop; | |
5403 | ||
5404 | for (loop = current_loop_nest; loop; loop = loop_outer (loop)) | |
5405 | if (considered_for_pipelining_p (loop) && loop->latch == from) | |
5406 | { | |
5407 | gcc_assert (loop == current_loop_nest); | |
5408 | loop->latch = to; | |
5409 | gcc_assert (loop_latch_edge (loop)); | |
5410 | } | |
5411 | } | |
5412 | } | |
5413 | ||
48e1416a | 5414 | /* Splits BB on two basic blocks, adding it to the region and extending |
e1ab7874 | 5415 | per-bb data structures. Returns the newly created bb. */ |
5416 | static basic_block | |
5417 | sel_split_block (basic_block bb, rtx after) | |
5418 | { | |
5419 | basic_block new_bb; | |
5420 | insn_t insn; | |
5421 | ||
5422 | new_bb = sched_split_block_1 (bb, after); | |
5423 | sel_add_bb (new_bb); | |
5424 | ||
5425 | /* This should be called after sel_add_bb, because this uses | |
48e1416a | 5426 | CONTAINING_RGN for the new block, which is not yet initialized. |
e1ab7874 | 5427 | FIXME: this function may be a no-op now. */ |
5428 | change_loops_latches (bb, new_bb); | |
5429 | ||
5430 | /* Update ORIG_BB_INDEX for insns moved into the new block. */ | |
5431 | FOR_BB_INSNS (new_bb, insn) | |
5432 | if (INSN_P (insn)) | |
5433 | EXPR_ORIG_BB_INDEX (INSN_EXPR (insn)) = new_bb->index; | |
5434 | ||
5435 | if (sel_bb_empty_p (bb)) | |
5436 | { | |
5437 | gcc_assert (!sel_bb_empty_p (new_bb)); | |
5438 | ||
5439 | /* NEW_BB has data sets that need to be updated and BB holds | |
5440 | data sets that should be removed. Exchange these data sets | |
5441 | so that we won't lose BB's valid data sets. */ | |
5442 | exchange_data_sets (new_bb, bb); | |
5443 | free_data_sets (bb); | |
5444 | } | |
5445 | ||
5446 | if (!sel_bb_empty_p (new_bb) | |
5447 | && bitmap_bit_p (blocks_to_reschedule, bb->index)) | |
5448 | bitmap_set_bit (blocks_to_reschedule, new_bb->index); | |
5449 | ||
5450 | return new_bb; | |
5451 | } | |
5452 | ||
5453 | /* If BB ends with a jump insn whose ID is bigger then PREV_MAX_UID, return it. | |
5454 | Otherwise returns NULL. */ | |
04d073df | 5455 | static rtx_insn * |
e1ab7874 | 5456 | check_for_new_jump (basic_block bb, int prev_max_uid) |
5457 | { | |
04d073df | 5458 | rtx_insn *end; |
e1ab7874 | 5459 | |
5460 | end = sel_bb_end (bb); | |
5461 | if (end && INSN_UID (end) >= prev_max_uid) | |
5462 | return end; | |
5463 | return NULL; | |
5464 | } | |
5465 | ||
48e1416a | 5466 | /* Look for a new jump either in FROM_BB block or in newly created JUMP_BB block. |
e1ab7874 | 5467 | New means having UID at least equal to PREV_MAX_UID. */ |
04d073df | 5468 | static rtx_insn * |
e1ab7874 | 5469 | find_new_jump (basic_block from, basic_block jump_bb, int prev_max_uid) |
5470 | { | |
04d073df | 5471 | rtx_insn *jump; |
e1ab7874 | 5472 | |
5473 | /* Return immediately if no new insns were emitted. */ | |
5474 | if (get_max_uid () == prev_max_uid) | |
5475 | return NULL; | |
48e1416a | 5476 | |
e1ab7874 | 5477 | /* Now check both blocks for new jumps. It will ever be only one. */ |
5478 | if ((jump = check_for_new_jump (from, prev_max_uid))) | |
5479 | return jump; | |
5480 | ||
5481 | if (jump_bb != NULL | |
5482 | && (jump = check_for_new_jump (jump_bb, prev_max_uid))) | |
5483 | return jump; | |
5484 | return NULL; | |
5485 | } | |
5486 | ||
5487 | /* Splits E and adds the newly created basic block to the current region. | |
5488 | Returns this basic block. */ | |
5489 | basic_block | |
5490 | sel_split_edge (edge e) | |
5491 | { | |
5492 | basic_block new_bb, src, other_bb = NULL; | |
5493 | int prev_max_uid; | |
04d073df | 5494 | rtx_insn *jump; |
e1ab7874 | 5495 | |
5496 | src = e->src; | |
5497 | prev_max_uid = get_max_uid (); | |
5498 | new_bb = split_edge (e); | |
5499 | ||
48e1416a | 5500 | if (flag_sel_sched_pipelining_outer_loops |
e1ab7874 | 5501 | && current_loop_nest) |
5502 | { | |
5503 | int i; | |
5504 | basic_block bb; | |
5505 | ||
48e1416a | 5506 | /* Some of the basic blocks might not have been added to the loop. |
e1ab7874 | 5507 | Add them here, until this is fixed in force_fallthru. */ |
48e1416a | 5508 | for (i = 0; |
f1f41a6c | 5509 | last_added_blocks.iterate (i, &bb); i++) |
e1ab7874 | 5510 | if (!bb->loop_father) |
5511 | { | |
5512 | add_bb_to_loop (bb, e->dest->loop_father); | |
5513 | ||
5514 | gcc_assert (!other_bb && (new_bb->index != bb->index)); | |
5515 | other_bb = bb; | |
5516 | } | |
5517 | } | |
5518 | ||
5519 | /* Add all last_added_blocks to the region. */ | |
5520 | sel_add_bb (NULL); | |
5521 | ||
5522 | jump = find_new_jump (src, new_bb, prev_max_uid); | |
5523 | if (jump) | |
5524 | sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP); | |
5525 | ||
5526 | /* Put the correct lv set on this block. */ | |
5527 | if (other_bb && !sel_bb_empty_p (other_bb)) | |
5528 | compute_live (sel_bb_head (other_bb)); | |
5529 | ||
5530 | return new_bb; | |
5531 | } | |
5532 | ||
5533 | /* Implement sched_create_empty_bb (). */ | |
5534 | static basic_block | |
5535 | sel_create_empty_bb (basic_block after) | |
5536 | { | |
5537 | basic_block new_bb; | |
5538 | ||
5539 | new_bb = sched_create_empty_bb_1 (after); | |
5540 | ||
5541 | /* We'll explicitly initialize NEW_BB via sel_init_only_bb () a bit | |
5542 | later. */ | |
f1f41a6c | 5543 | gcc_assert (last_added_blocks.length () == 1 |
5544 | && last_added_blocks[0] == new_bb); | |
e1ab7874 | 5545 | |
f1f41a6c | 5546 | last_added_blocks.release (); |
e1ab7874 | 5547 | return new_bb; |
5548 | } | |
5549 | ||
5550 | /* Implement sched_create_recovery_block. ORIG_INSN is where block | |
5551 | will be splitted to insert a check. */ | |
5552 | basic_block | |
5553 | sel_create_recovery_block (insn_t orig_insn) | |
5554 | { | |
5555 | basic_block first_bb, second_bb, recovery_block; | |
5556 | basic_block before_recovery = NULL; | |
04d073df | 5557 | rtx_insn *jump; |
e1ab7874 | 5558 | |
5559 | first_bb = BLOCK_FOR_INSN (orig_insn); | |
5560 | if (sel_bb_end_p (orig_insn)) | |
5561 | { | |
5562 | /* Avoid introducing an empty block while splitting. */ | |
5563 | gcc_assert (single_succ_p (first_bb)); | |
5564 | second_bb = single_succ (first_bb); | |
5565 | } | |
5566 | else | |
5567 | second_bb = sched_split_block (first_bb, orig_insn); | |
5568 | ||
5569 | recovery_block = sched_create_recovery_block (&before_recovery); | |
5570 | if (before_recovery) | |
34154e27 | 5571 | copy_lv_set_from (before_recovery, EXIT_BLOCK_PTR_FOR_FN (cfun)); |
e1ab7874 | 5572 | |
5573 | gcc_assert (sel_bb_empty_p (recovery_block)); | |
5574 | sched_create_recovery_edges (first_bb, recovery_block, second_bb); | |
5575 | if (current_loops != NULL) | |
5576 | add_bb_to_loop (recovery_block, first_bb->loop_father); | |
48e1416a | 5577 | |
e1ab7874 | 5578 | sel_add_bb (recovery_block); |
48e1416a | 5579 | |
e1ab7874 | 5580 | jump = BB_END (recovery_block); |
5581 | gcc_assert (sel_bb_head (recovery_block) == jump); | |
5582 | sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP); | |
5583 | ||
5584 | return recovery_block; | |
5585 | } | |
5586 | ||
5587 | /* Merge basic block B into basic block A. */ | |
0424f393 | 5588 | static void |
e1ab7874 | 5589 | sel_merge_blocks (basic_block a, basic_block b) |
5590 | { | |
0424f393 | 5591 | gcc_assert (sel_bb_empty_p (b) |
5592 | && EDGE_COUNT (b->preds) == 1 | |
5593 | && EDGE_PRED (b, 0)->src == b->prev_bb); | |
e1ab7874 | 5594 | |
0424f393 | 5595 | move_bb_info (b->prev_bb, b); |
5596 | remove_empty_bb (b, false); | |
5597 | merge_blocks (a, b); | |
e1ab7874 | 5598 | change_loops_latches (b, a); |
5599 | } | |
5600 | ||
5601 | /* A wrapper for redirect_edge_and_branch_force, which also initializes | |
8d1881f5 | 5602 | data structures for possibly created bb and insns. */ |
e1ab7874 | 5603 | void |
5604 | sel_redirect_edge_and_branch_force (edge e, basic_block to) | |
5605 | { | |
1a5dbaab | 5606 | basic_block jump_bb, src, orig_dest = e->dest; |
e1ab7874 | 5607 | int prev_max_uid; |
04d073df | 5608 | rtx_insn *jump; |
8d1881f5 | 5609 | int old_seqno = -1; |
48e1416a | 5610 | |
1a5dbaab | 5611 | /* This function is now used only for bookkeeping code creation, where |
5612 | we'll never get the single pred of orig_dest block and thus will not | |
5613 | hit unreachable blocks when updating dominator info. */ | |
5614 | gcc_assert (!sel_bb_empty_p (e->src) | |
5615 | && !single_pred_p (orig_dest)); | |
e1ab7874 | 5616 | src = e->src; |
5617 | prev_max_uid = get_max_uid (); | |
8d1881f5 | 5618 | /* Compute and pass old_seqno down to sel_init_new_insn only for the case |
5619 | when the conditional jump being redirected may become unconditional. */ | |
5620 | if (any_condjump_p (BB_END (src)) | |
5621 | && INSN_SEQNO (BB_END (src)) >= 0) | |
5622 | old_seqno = INSN_SEQNO (BB_END (src)); | |
e1ab7874 | 5623 | |
8d1881f5 | 5624 | jump_bb = redirect_edge_and_branch_force (e, to); |
e1ab7874 | 5625 | if (jump_bb != NULL) |
5626 | sel_add_bb (jump_bb); | |
5627 | ||
5628 | /* This function could not be used to spoil the loop structure by now, | |
5629 | thus we don't care to update anything. But check it to be sure. */ | |
5630 | if (current_loop_nest | |
5631 | && pipelining_p) | |
5632 | gcc_assert (loop_latch_edge (current_loop_nest)); | |
48e1416a | 5633 | |
e1ab7874 | 5634 | jump = find_new_jump (src, jump_bb, prev_max_uid); |
5635 | if (jump) | |
8d1881f5 | 5636 | sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP, |
5637 | old_seqno); | |
1a5dbaab | 5638 | set_immediate_dominator (CDI_DOMINATORS, to, |
5639 | recompute_dominator (CDI_DOMINATORS, to)); | |
5640 | set_immediate_dominator (CDI_DOMINATORS, orig_dest, | |
5641 | recompute_dominator (CDI_DOMINATORS, orig_dest)); | |
e1ab7874 | 5642 | } |
5643 | ||
93919afc | 5644 | /* A wrapper for redirect_edge_and_branch. Return TRUE if blocks connected by |
5645 | redirected edge are in reverse topological order. */ | |
5646 | bool | |
e1ab7874 | 5647 | sel_redirect_edge_and_branch (edge e, basic_block to) |
5648 | { | |
5649 | bool latch_edge_p; | |
1a5dbaab | 5650 | basic_block src, orig_dest = e->dest; |
e1ab7874 | 5651 | int prev_max_uid; |
04d073df | 5652 | rtx_insn *jump; |
df6266b9 | 5653 | edge redirected; |
93919afc | 5654 | bool recompute_toporder_p = false; |
1a5dbaab | 5655 | bool maybe_unreachable = single_pred_p (orig_dest); |
8d1881f5 | 5656 | int old_seqno = -1; |
e1ab7874 | 5657 | |
5658 | latch_edge_p = (pipelining_p | |
5659 | && current_loop_nest | |
5660 | && e == loop_latch_edge (current_loop_nest)); | |
5661 | ||
5662 | src = e->src; | |
5663 | prev_max_uid = get_max_uid (); | |
df6266b9 | 5664 | |
8d1881f5 | 5665 | /* Compute and pass old_seqno down to sel_init_new_insn only for the case |
5666 | when the conditional jump being redirected may become unconditional. */ | |
5667 | if (any_condjump_p (BB_END (src)) | |
5668 | && INSN_SEQNO (BB_END (src)) >= 0) | |
5669 | old_seqno = INSN_SEQNO (BB_END (src)); | |
5670 | ||
df6266b9 | 5671 | redirected = redirect_edge_and_branch (e, to); |
5672 | ||
f1f41a6c | 5673 | gcc_assert (redirected && !last_added_blocks.exists ()); |
e1ab7874 | 5674 | |
5675 | /* When we've redirected a latch edge, update the header. */ | |
5676 | if (latch_edge_p) | |
5677 | { | |
5678 | current_loop_nest->header = to; | |
5679 | gcc_assert (loop_latch_edge (current_loop_nest)); | |
5680 | } | |
5681 | ||
93919afc | 5682 | /* In rare situations, the topological relation between the blocks connected |
5683 | by the redirected edge can change (see PR42245 for an example). Update | |
5684 | block_to_bb/bb_to_block. */ | |
5685 | if (CONTAINING_RGN (e->src->index) == CONTAINING_RGN (to->index) | |
5686 | && BLOCK_TO_BB (e->src->index) > BLOCK_TO_BB (to->index)) | |
5687 | recompute_toporder_p = true; | |
5688 | ||
e1ab7874 | 5689 | jump = find_new_jump (src, NULL, prev_max_uid); |
5690 | if (jump) | |
8d1881f5 | 5691 | sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP, old_seqno); |
93919afc | 5692 | |
1a5dbaab | 5693 | /* Only update dominator info when we don't have unreachable blocks. |
5694 | Otherwise we'll update in maybe_tidy_empty_bb. */ | |
5695 | if (!maybe_unreachable) | |
5696 | { | |
5697 | set_immediate_dominator (CDI_DOMINATORS, to, | |
5698 | recompute_dominator (CDI_DOMINATORS, to)); | |
5699 | set_immediate_dominator (CDI_DOMINATORS, orig_dest, | |
5700 | recompute_dominator (CDI_DOMINATORS, orig_dest)); | |
5701 | } | |
93919afc | 5702 | return recompute_toporder_p; |
e1ab7874 | 5703 | } |
5704 | ||
5705 | /* This variable holds the cfg hooks used by the selective scheduler. */ | |
5706 | static struct cfg_hooks sel_cfg_hooks; | |
5707 | ||
5708 | /* Register sel-sched cfg hooks. */ | |
5709 | void | |
5710 | sel_register_cfg_hooks (void) | |
5711 | { | |
5712 | sched_split_block = sel_split_block; | |
5713 | ||
5714 | orig_cfg_hooks = get_cfg_hooks (); | |
5715 | sel_cfg_hooks = orig_cfg_hooks; | |
5716 | ||
5717 | sel_cfg_hooks.create_basic_block = sel_create_basic_block; | |
5718 | ||
5719 | set_cfg_hooks (sel_cfg_hooks); | |
5720 | ||
5721 | sched_init_only_bb = sel_init_only_bb; | |
5722 | sched_split_block = sel_split_block; | |
5723 | sched_create_empty_bb = sel_create_empty_bb; | |
5724 | } | |
5725 | ||
5726 | /* Unregister sel-sched cfg hooks. */ | |
5727 | void | |
5728 | sel_unregister_cfg_hooks (void) | |
5729 | { | |
5730 | sched_create_empty_bb = NULL; | |
5731 | sched_split_block = NULL; | |
5732 | sched_init_only_bb = NULL; | |
5733 | ||
5734 | set_cfg_hooks (orig_cfg_hooks); | |
5735 | } | |
5736 | \f | |
5737 | ||
5738 | /* Emit an insn rtx based on PATTERN. If a jump insn is wanted, | |
5739 | LABEL is where this jump should be directed. */ | |
3aaa3eec | 5740 | rtx_insn * |
e1ab7874 | 5741 | create_insn_rtx_from_pattern (rtx pattern, rtx label) |
5742 | { | |
3aaa3eec | 5743 | rtx_insn *insn_rtx; |
e1ab7874 | 5744 | |
5745 | gcc_assert (!INSN_P (pattern)); | |
5746 | ||
5747 | start_sequence (); | |
5748 | ||
5749 | if (label == NULL_RTX) | |
5750 | insn_rtx = emit_insn (pattern); | |
9845d120 | 5751 | else if (DEBUG_INSN_P (label)) |
5752 | insn_rtx = emit_debug_insn (pattern); | |
e1ab7874 | 5753 | else |
5754 | { | |
5755 | insn_rtx = emit_jump_insn (pattern); | |
5756 | JUMP_LABEL (insn_rtx) = label; | |
5757 | ++LABEL_NUSES (label); | |
5758 | } | |
5759 | ||
5760 | end_sequence (); | |
5761 | ||
52d7e28c | 5762 | sched_extend_luids (); |
e1ab7874 | 5763 | sched_extend_target (); |
5764 | sched_deps_init (false); | |
5765 | ||
5766 | /* Initialize INSN_CODE now. */ | |
5767 | recog_memoized (insn_rtx); | |
5768 | return insn_rtx; | |
5769 | } | |
5770 | ||
5771 | /* Create a new vinsn for INSN_RTX. FORCE_UNIQUE_P is true when the vinsn | |
5772 | must not be clonable. */ | |
5773 | vinsn_t | |
2f3c9801 | 5774 | create_vinsn_from_insn_rtx (rtx_insn *insn_rtx, bool force_unique_p) |
e1ab7874 | 5775 | { |
5776 | gcc_assert (INSN_P (insn_rtx) && !INSN_IN_STREAM_P (insn_rtx)); | |
5777 | ||
5778 | /* If VINSN_TYPE is not USE, retain its uniqueness. */ | |
5779 | return vinsn_create (insn_rtx, force_unique_p); | |
5780 | } | |
5781 | ||
5782 | /* Create a copy of INSN_RTX. */ | |
3aaa3eec | 5783 | rtx_insn * |
e1ab7874 | 5784 | create_copy_of_insn_rtx (rtx insn_rtx) |
5785 | { | |
3aaa3eec | 5786 | rtx_insn *res; |
5787 | rtx link; | |
e1ab7874 | 5788 | |
9845d120 | 5789 | if (DEBUG_INSN_P (insn_rtx)) |
5790 | return create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)), | |
5791 | insn_rtx); | |
5792 | ||
e1ab7874 | 5793 | gcc_assert (NONJUMP_INSN_P (insn_rtx)); |
5794 | ||
5795 | res = create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)), | |
5796 | NULL_RTX); | |
114c1eb1 | 5797 | |
5798 | /* Copy all REG_NOTES except REG_EQUAL/REG_EQUIV and REG_LABEL_OPERAND | |
5799 | since mark_jump_label will make them. REG_LABEL_TARGETs are created | |
5800 | there too, but are supposed to be sticky, so we copy them. */ | |
5801 | for (link = REG_NOTES (insn_rtx); link; link = XEXP (link, 1)) | |
5802 | if (REG_NOTE_KIND (link) != REG_LABEL_OPERAND | |
5803 | && REG_NOTE_KIND (link) != REG_EQUAL | |
5804 | && REG_NOTE_KIND (link) != REG_EQUIV) | |
5805 | { | |
5806 | if (GET_CODE (link) == EXPR_LIST) | |
5807 | add_reg_note (res, REG_NOTE_KIND (link), | |
5808 | copy_insn_1 (XEXP (link, 0))); | |
5809 | else | |
5810 | add_reg_note (res, REG_NOTE_KIND (link), XEXP (link, 0)); | |
5811 | } | |
5812 | ||
e1ab7874 | 5813 | return res; |
5814 | } | |
5815 | ||
5816 | /* Change vinsn field of EXPR to hold NEW_VINSN. */ | |
5817 | void | |
5818 | change_vinsn_in_expr (expr_t expr, vinsn_t new_vinsn) | |
5819 | { | |
5820 | vinsn_detach (EXPR_VINSN (expr)); | |
5821 | ||
5822 | EXPR_VINSN (expr) = new_vinsn; | |
5823 | vinsn_attach (new_vinsn); | |
5824 | } | |
5825 | ||
5826 | /* Helpers for global init. */ | |
5827 | /* This structure is used to be able to call existing bundling mechanism | |
5828 | and calculate insn priorities. */ | |
48e1416a | 5829 | static struct haifa_sched_info sched_sel_haifa_sched_info = |
e1ab7874 | 5830 | { |
5831 | NULL, /* init_ready_list */ | |
5832 | NULL, /* can_schedule_ready_p */ | |
5833 | NULL, /* schedule_more_p */ | |
5834 | NULL, /* new_ready */ | |
5835 | NULL, /* rgn_rank */ | |
5836 | sel_print_insn, /* rgn_print_insn */ | |
5837 | contributes_to_priority, | |
4db82bc9 | 5838 | NULL, /* insn_finishes_block_p */ |
e1ab7874 | 5839 | |
5840 | NULL, NULL, | |
5841 | NULL, NULL, | |
5842 | 0, 0, | |
5843 | ||
5844 | NULL, /* add_remove_insn */ | |
5845 | NULL, /* begin_schedule_ready */ | |
d2412f57 | 5846 | NULL, /* begin_move_insn */ |
e1ab7874 | 5847 | NULL, /* advance_target_bb */ |
e2f4a6ff | 5848 | |
5849 | NULL, | |
5850 | NULL, | |
5851 | ||
e1ab7874 | 5852 | SEL_SCHED | NEW_BBS |
5853 | }; | |
5854 | ||
5855 | /* Setup special insns used in the scheduler. */ | |
48e1416a | 5856 | void |
e1ab7874 | 5857 | setup_nop_and_exit_insns (void) |
5858 | { | |
5859 | gcc_assert (nop_pattern == NULL_RTX | |
5860 | && exit_insn == NULL_RTX); | |
5861 | ||
bc9cb5ed | 5862 | nop_pattern = constm1_rtx; |
e1ab7874 | 5863 | |
5864 | start_sequence (); | |
5865 | emit_insn (nop_pattern); | |
5866 | exit_insn = get_insns (); | |
5867 | end_sequence (); | |
34154e27 | 5868 | set_block_for_insn (exit_insn, EXIT_BLOCK_PTR_FOR_FN (cfun)); |
e1ab7874 | 5869 | } |
5870 | ||
5871 | /* Free special insns used in the scheduler. */ | |
5872 | void | |
5873 | free_nop_and_exit_insns (void) | |
5874 | { | |
179c282d | 5875 | exit_insn = NULL; |
e1ab7874 | 5876 | nop_pattern = NULL_RTX; |
5877 | } | |
5878 | ||
5879 | /* Setup a special vinsn used in new insns initialization. */ | |
5880 | void | |
5881 | setup_nop_vinsn (void) | |
5882 | { | |
5883 | nop_vinsn = vinsn_create (exit_insn, false); | |
5884 | vinsn_attach (nop_vinsn); | |
5885 | } | |
5886 | ||
5887 | /* Free a special vinsn used in new insns initialization. */ | |
5888 | void | |
5889 | free_nop_vinsn (void) | |
5890 | { | |
5891 | gcc_assert (VINSN_COUNT (nop_vinsn) == 1); | |
5892 | vinsn_detach (nop_vinsn); | |
5893 | nop_vinsn = NULL; | |
5894 | } | |
5895 | ||
5896 | /* Call a set_sched_flags hook. */ | |
5897 | void | |
5898 | sel_set_sched_flags (void) | |
5899 | { | |
48e1416a | 5900 | /* ??? This means that set_sched_flags were called, and we decided to |
e1ab7874 | 5901 | support speculation. However, set_sched_flags also modifies flags |
48e1416a | 5902 | on current_sched_info, doing this only at global init. And we |
e1ab7874 | 5903 | sometimes change c_s_i later. So put the correct flags again. */ |
5904 | if (spec_info && targetm.sched.set_sched_flags) | |
5905 | targetm.sched.set_sched_flags (spec_info); | |
5906 | } | |
5907 | ||
5908 | /* Setup pointers to global sched info structures. */ | |
5909 | void | |
5910 | sel_setup_sched_infos (void) | |
5911 | { | |
5912 | rgn_setup_common_sched_info (); | |
5913 | ||
5914 | memcpy (&sel_common_sched_info, common_sched_info, | |
5915 | sizeof (sel_common_sched_info)); | |
5916 | ||
5917 | sel_common_sched_info.fix_recovery_cfg = NULL; | |
5918 | sel_common_sched_info.add_block = NULL; | |
5919 | sel_common_sched_info.estimate_number_of_insns | |
5920 | = sel_estimate_number_of_insns; | |
5921 | sel_common_sched_info.luid_for_non_insn = sel_luid_for_non_insn; | |
5922 | sel_common_sched_info.sched_pass_id = SCHED_SEL_PASS; | |
5923 | ||
5924 | common_sched_info = &sel_common_sched_info; | |
5925 | ||
5926 | current_sched_info = &sched_sel_haifa_sched_info; | |
48e1416a | 5927 | current_sched_info->sched_max_insns_priority = |
e1ab7874 | 5928 | get_rgn_sched_max_insns_priority (); |
48e1416a | 5929 | |
e1ab7874 | 5930 | sel_set_sched_flags (); |
5931 | } | |
5932 | \f | |
5933 | ||
5934 | /* Adds basic block BB to region RGN at the position *BB_ORD_INDEX, | |
5935 | *BB_ORD_INDEX after that is increased. */ | |
5936 | static void | |
5937 | sel_add_block_to_region (basic_block bb, int *bb_ord_index, int rgn) | |
5938 | { | |
5939 | RGN_NR_BLOCKS (rgn) += 1; | |
5940 | RGN_DONT_CALC_DEPS (rgn) = 0; | |
5941 | RGN_HAS_REAL_EBB (rgn) = 0; | |
5942 | CONTAINING_RGN (bb->index) = rgn; | |
5943 | BLOCK_TO_BB (bb->index) = *bb_ord_index; | |
5944 | rgn_bb_table[RGN_BLOCKS (rgn) + *bb_ord_index] = bb->index; | |
5945 | (*bb_ord_index)++; | |
5946 | ||
5947 | /* FIXME: it is true only when not scheduling ebbs. */ | |
5948 | RGN_BLOCKS (rgn + 1) = RGN_BLOCKS (rgn) + RGN_NR_BLOCKS (rgn); | |
5949 | } | |
5950 | ||
5951 | /* Functions to support pipelining of outer loops. */ | |
5952 | ||
5953 | /* Creates a new empty region and returns it's number. */ | |
5954 | static int | |
5955 | sel_create_new_region (void) | |
5956 | { | |
5957 | int new_rgn_number = nr_regions; | |
5958 | ||
5959 | RGN_NR_BLOCKS (new_rgn_number) = 0; | |
5960 | ||
5961 | /* FIXME: This will work only when EBBs are not created. */ | |
5962 | if (new_rgn_number != 0) | |
48e1416a | 5963 | RGN_BLOCKS (new_rgn_number) = RGN_BLOCKS (new_rgn_number - 1) + |
e1ab7874 | 5964 | RGN_NR_BLOCKS (new_rgn_number - 1); |
5965 | else | |
5966 | RGN_BLOCKS (new_rgn_number) = 0; | |
5967 | ||
5968 | /* Set the blocks of the next region so the other functions may | |
5969 | calculate the number of blocks in the region. */ | |
48e1416a | 5970 | RGN_BLOCKS (new_rgn_number + 1) = RGN_BLOCKS (new_rgn_number) + |
e1ab7874 | 5971 | RGN_NR_BLOCKS (new_rgn_number); |
5972 | ||
5973 | nr_regions++; | |
5974 | ||
5975 | return new_rgn_number; | |
5976 | } | |
5977 | ||
5978 | /* If X has a smaller topological sort number than Y, returns -1; | |
5979 | if greater, returns 1. */ | |
5980 | static int | |
5981 | bb_top_order_comparator (const void *x, const void *y) | |
5982 | { | |
5983 | basic_block bb1 = *(const basic_block *) x; | |
5984 | basic_block bb2 = *(const basic_block *) y; | |
5985 | ||
48e1416a | 5986 | gcc_assert (bb1 == bb2 |
5987 | || rev_top_order_index[bb1->index] | |
e1ab7874 | 5988 | != rev_top_order_index[bb2->index]); |
5989 | ||
5990 | /* It's a reverse topological order in REV_TOP_ORDER_INDEX, so | |
5991 | bbs with greater number should go earlier. */ | |
5992 | if (rev_top_order_index[bb1->index] > rev_top_order_index[bb2->index]) | |
5993 | return -1; | |
5994 | else | |
5995 | return 1; | |
5996 | } | |
5997 | ||
48e1416a | 5998 | /* Create a region for LOOP and return its number. If we don't want |
e1ab7874 | 5999 | to pipeline LOOP, return -1. */ |
6000 | static int | |
6001 | make_region_from_loop (struct loop *loop) | |
6002 | { | |
6003 | unsigned int i; | |
6004 | int new_rgn_number = -1; | |
6005 | struct loop *inner; | |
6006 | ||
6007 | /* Basic block index, to be assigned to BLOCK_TO_BB. */ | |
6008 | int bb_ord_index = 0; | |
6009 | basic_block *loop_blocks; | |
6010 | basic_block preheader_block; | |
6011 | ||
48e1416a | 6012 | if (loop->num_nodes |
e1ab7874 | 6013 | > (unsigned) PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_BLOCKS)) |
6014 | return -1; | |
48e1416a | 6015 | |
e1ab7874 | 6016 | /* Don't pipeline loops whose latch belongs to some of its inner loops. */ |
6017 | for (inner = loop->inner; inner; inner = inner->inner) | |
6018 | if (flow_bb_inside_loop_p (inner, loop->latch)) | |
6019 | return -1; | |
6020 | ||
6021 | loop->ninsns = num_loop_insns (loop); | |
6022 | if ((int) loop->ninsns > PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_INSNS)) | |
6023 | return -1; | |
6024 | ||
6025 | loop_blocks = get_loop_body_in_custom_order (loop, bb_top_order_comparator); | |
6026 | ||
6027 | for (i = 0; i < loop->num_nodes; i++) | |
6028 | if (loop_blocks[i]->flags & BB_IRREDUCIBLE_LOOP) | |
6029 | { | |
6030 | free (loop_blocks); | |
6031 | return -1; | |
6032 | } | |
6033 | ||
6034 | preheader_block = loop_preheader_edge (loop)->src; | |
6035 | gcc_assert (preheader_block); | |
6036 | gcc_assert (loop_blocks[0] == loop->header); | |
6037 | ||
6038 | new_rgn_number = sel_create_new_region (); | |
6039 | ||
6040 | sel_add_block_to_region (preheader_block, &bb_ord_index, new_rgn_number); | |
08b7917c | 6041 | bitmap_set_bit (bbs_in_loop_rgns, preheader_block->index); |
e1ab7874 | 6042 | |
6043 | for (i = 0; i < loop->num_nodes; i++) | |
6044 | { | |
6045 | /* Add only those blocks that haven't been scheduled in the inner loop. | |
6046 | The exception is the basic blocks with bookkeeping code - they should | |
48e1416a | 6047 | be added to the region (and they actually don't belong to the loop |
e1ab7874 | 6048 | body, but to the region containing that loop body). */ |
6049 | ||
6050 | gcc_assert (new_rgn_number >= 0); | |
6051 | ||
08b7917c | 6052 | if (! bitmap_bit_p (bbs_in_loop_rgns, loop_blocks[i]->index)) |
e1ab7874 | 6053 | { |
48e1416a | 6054 | sel_add_block_to_region (loop_blocks[i], &bb_ord_index, |
e1ab7874 | 6055 | new_rgn_number); |
08b7917c | 6056 | bitmap_set_bit (bbs_in_loop_rgns, loop_blocks[i]->index); |
e1ab7874 | 6057 | } |
6058 | } | |
6059 | ||
6060 | free (loop_blocks); | |
6061 | MARK_LOOP_FOR_PIPELINING (loop); | |
6062 | ||
6063 | return new_rgn_number; | |
6064 | } | |
6065 | ||
6066 | /* Create a new region from preheader blocks LOOP_BLOCKS. */ | |
6067 | void | |
f1f41a6c | 6068 | make_region_from_loop_preheader (vec<basic_block> *&loop_blocks) |
e1ab7874 | 6069 | { |
6070 | unsigned int i; | |
6071 | int new_rgn_number = -1; | |
6072 | basic_block bb; | |
6073 | ||
6074 | /* Basic block index, to be assigned to BLOCK_TO_BB. */ | |
6075 | int bb_ord_index = 0; | |
6076 | ||
6077 | new_rgn_number = sel_create_new_region (); | |
6078 | ||
f1f41a6c | 6079 | FOR_EACH_VEC_ELT (*loop_blocks, i, bb) |
e1ab7874 | 6080 | { |
6081 | gcc_assert (new_rgn_number >= 0); | |
6082 | ||
6083 | sel_add_block_to_region (bb, &bb_ord_index, new_rgn_number); | |
6084 | } | |
6085 | ||
f1f41a6c | 6086 | vec_free (loop_blocks); |
e1ab7874 | 6087 | } |
6088 | ||
6089 | ||
6090 | /* Create region(s) from loop nest LOOP, such that inner loops will be | |
48e1416a | 6091 | pipelined before outer loops. Returns true when a region for LOOP |
e1ab7874 | 6092 | is created. */ |
6093 | static bool | |
6094 | make_regions_from_loop_nest (struct loop *loop) | |
48e1416a | 6095 | { |
e1ab7874 | 6096 | struct loop *cur_loop; |
6097 | int rgn_number; | |
6098 | ||
6099 | /* Traverse all inner nodes of the loop. */ | |
6100 | for (cur_loop = loop->inner; cur_loop; cur_loop = cur_loop->next) | |
08b7917c | 6101 | if (! bitmap_bit_p (bbs_in_loop_rgns, cur_loop->header->index)) |
e1ab7874 | 6102 | return false; |
6103 | ||
6104 | /* At this moment all regular inner loops should have been pipelined. | |
6105 | Try to create a region from this loop. */ | |
6106 | rgn_number = make_region_from_loop (loop); | |
6107 | ||
6108 | if (rgn_number < 0) | |
6109 | return false; | |
6110 | ||
f1f41a6c | 6111 | loop_nests.safe_push (loop); |
e1ab7874 | 6112 | return true; |
6113 | } | |
6114 | ||
6115 | /* Initalize data structures needed. */ | |
6116 | void | |
6117 | sel_init_pipelining (void) | |
6118 | { | |
6119 | /* Collect loop information to be used in outer loops pipelining. */ | |
6120 | loop_optimizer_init (LOOPS_HAVE_PREHEADERS | |
6121 | | LOOPS_HAVE_FALLTHRU_PREHEADERS | |
6122 | | LOOPS_HAVE_RECORDED_EXITS | |
6123 | | LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS); | |
6124 | current_loop_nest = NULL; | |
6125 | ||
fe672ac0 | 6126 | bbs_in_loop_rgns = sbitmap_alloc (last_basic_block_for_fn (cfun)); |
53c5d9d4 | 6127 | bitmap_clear (bbs_in_loop_rgns); |
e1ab7874 | 6128 | |
6129 | recompute_rev_top_order (); | |
6130 | } | |
6131 | ||
6132 | /* Returns a struct loop for region RGN. */ | |
6133 | loop_p | |
6134 | get_loop_nest_for_rgn (unsigned int rgn) | |
6135 | { | |
6136 | /* Regions created with extend_rgns don't have corresponding loop nests, | |
6137 | because they don't represent loops. */ | |
f1f41a6c | 6138 | if (rgn < loop_nests.length ()) |
6139 | return loop_nests[rgn]; | |
e1ab7874 | 6140 | else |
6141 | return NULL; | |
6142 | } | |
6143 | ||
6144 | /* True when LOOP was included into pipelining regions. */ | |
6145 | bool | |
6146 | considered_for_pipelining_p (struct loop *loop) | |
6147 | { | |
6148 | if (loop_depth (loop) == 0) | |
6149 | return false; | |
6150 | ||
48e1416a | 6151 | /* Now, the loop could be too large or irreducible. Check whether its |
6152 | region is in LOOP_NESTS. | |
6153 | We determine the region number of LOOP as the region number of its | |
6154 | latch. We can't use header here, because this header could be | |
e1ab7874 | 6155 | just removed preheader and it will give us the wrong region number. |
6156 | Latch can't be used because it could be in the inner loop too. */ | |
a2d56a0e | 6157 | if (LOOP_MARKED_FOR_PIPELINING_P (loop)) |
e1ab7874 | 6158 | { |
6159 | int rgn = CONTAINING_RGN (loop->latch->index); | |
6160 | ||
f1f41a6c | 6161 | gcc_assert ((unsigned) rgn < loop_nests.length ()); |
e1ab7874 | 6162 | return true; |
6163 | } | |
48e1416a | 6164 | |
e1ab7874 | 6165 | return false; |
6166 | } | |
6167 | ||
48e1416a | 6168 | /* Makes regions from the rest of the blocks, after loops are chosen |
e1ab7874 | 6169 | for pipelining. */ |
6170 | static void | |
6171 | make_regions_from_the_rest (void) | |
6172 | { | |
6173 | int cur_rgn_blocks; | |
6174 | int *loop_hdr; | |
6175 | int i; | |
6176 | ||
6177 | basic_block bb; | |
6178 | edge e; | |
6179 | edge_iterator ei; | |
6180 | int *degree; | |
e1ab7874 | 6181 | |
6182 | /* Index in rgn_bb_table where to start allocating new regions. */ | |
6183 | cur_rgn_blocks = nr_regions ? RGN_BLOCKS (nr_regions) : 0; | |
e1ab7874 | 6184 | |
48e1416a | 6185 | /* Make regions from all the rest basic blocks - those that don't belong to |
e1ab7874 | 6186 | any loop or belong to irreducible loops. Prepare the data structures |
6187 | for extend_rgns. */ | |
6188 | ||
6189 | /* LOOP_HDR[I] == -1 if I-th bb doesn't belong to any loop, | |
6190 | LOOP_HDR[I] == LOOP_HDR[J] iff basic blocks I and J reside within the same | |
6191 | loop. */ | |
fe672ac0 | 6192 | loop_hdr = XNEWVEC (int, last_basic_block_for_fn (cfun)); |
6193 | degree = XCNEWVEC (int, last_basic_block_for_fn (cfun)); | |
e1ab7874 | 6194 | |
6195 | ||
6196 | /* For each basic block that belongs to some loop assign the number | |
6197 | of innermost loop it belongs to. */ | |
fe672ac0 | 6198 | for (i = 0; i < last_basic_block_for_fn (cfun); i++) |
e1ab7874 | 6199 | loop_hdr[i] = -1; |
6200 | ||
fc00614f | 6201 | FOR_EACH_BB_FN (bb, cfun) |
e1ab7874 | 6202 | { |
9c26ddef | 6203 | if (bb->loop_father && bb->loop_father->num != 0 |
e1ab7874 | 6204 | && !(bb->flags & BB_IRREDUCIBLE_LOOP)) |
6205 | loop_hdr[bb->index] = bb->loop_father->num; | |
6206 | } | |
6207 | ||
48e1416a | 6208 | /* For each basic block degree is calculated as the number of incoming |
e1ab7874 | 6209 | edges, that are going out of bbs that are not yet scheduled. |
6210 | The basic blocks that are scheduled have degree value of zero. */ | |
fc00614f | 6211 | FOR_EACH_BB_FN (bb, cfun) |
e1ab7874 | 6212 | { |
6213 | degree[bb->index] = 0; | |
6214 | ||
08b7917c | 6215 | if (!bitmap_bit_p (bbs_in_loop_rgns, bb->index)) |
e1ab7874 | 6216 | { |
6217 | FOR_EACH_EDGE (e, ei, bb->preds) | |
08b7917c | 6218 | if (!bitmap_bit_p (bbs_in_loop_rgns, e->src->index)) |
e1ab7874 | 6219 | degree[bb->index]++; |
6220 | } | |
6221 | else | |
6222 | degree[bb->index] = -1; | |
6223 | } | |
6224 | ||
6225 | extend_rgns (degree, &cur_rgn_blocks, bbs_in_loop_rgns, loop_hdr); | |
6226 | ||
6227 | /* Any block that did not end up in a region is placed into a region | |
6228 | by itself. */ | |
fc00614f | 6229 | FOR_EACH_BB_FN (bb, cfun) |
e1ab7874 | 6230 | if (degree[bb->index] >= 0) |
6231 | { | |
6232 | rgn_bb_table[cur_rgn_blocks] = bb->index; | |
6233 | RGN_NR_BLOCKS (nr_regions) = 1; | |
6234 | RGN_BLOCKS (nr_regions) = cur_rgn_blocks++; | |
6235 | RGN_DONT_CALC_DEPS (nr_regions) = 0; | |
6236 | RGN_HAS_REAL_EBB (nr_regions) = 0; | |
6237 | CONTAINING_RGN (bb->index) = nr_regions++; | |
6238 | BLOCK_TO_BB (bb->index) = 0; | |
6239 | } | |
6240 | ||
6241 | free (degree); | |
6242 | free (loop_hdr); | |
6243 | } | |
6244 | ||
6245 | /* Free data structures used in pipelining of loops. */ | |
6246 | void sel_finish_pipelining (void) | |
6247 | { | |
e1ab7874 | 6248 | struct loop *loop; |
6249 | ||
6250 | /* Release aux fields so we don't free them later by mistake. */ | |
f21d4d00 | 6251 | FOR_EACH_LOOP (loop, 0) |
e1ab7874 | 6252 | loop->aux = NULL; |
6253 | ||
6254 | loop_optimizer_finalize (); | |
6255 | ||
f1f41a6c | 6256 | loop_nests.release (); |
e1ab7874 | 6257 | |
6258 | free (rev_top_order_index); | |
6259 | rev_top_order_index = NULL; | |
6260 | } | |
6261 | ||
48e1416a | 6262 | /* This function replaces the find_rgns when |
e1ab7874 | 6263 | FLAG_SEL_SCHED_PIPELINING_OUTER_LOOPS is set. */ |
48e1416a | 6264 | void |
e1ab7874 | 6265 | sel_find_rgns (void) |
6266 | { | |
6267 | sel_init_pipelining (); | |
6268 | extend_regions (); | |
6269 | ||
6270 | if (current_loops) | |
6271 | { | |
6272 | loop_p loop; | |
e1ab7874 | 6273 | |
f21d4d00 | 6274 | FOR_EACH_LOOP (loop, (flag_sel_sched_pipelining_outer_loops |
6275 | ? LI_FROM_INNERMOST | |
6276 | : LI_ONLY_INNERMOST)) | |
e1ab7874 | 6277 | make_regions_from_loop_nest (loop); |
6278 | } | |
6279 | ||
6280 | /* Make regions from all the rest basic blocks and schedule them. | |
48e1416a | 6281 | These blocks include blocks that don't belong to any loop or belong |
e1ab7874 | 6282 | to irreducible loops. */ |
6283 | make_regions_from_the_rest (); | |
6284 | ||
6285 | /* We don't need bbs_in_loop_rgns anymore. */ | |
6286 | sbitmap_free (bbs_in_loop_rgns); | |
6287 | bbs_in_loop_rgns = NULL; | |
6288 | } | |
6289 | ||
b73edd22 | 6290 | /* Add the preheader blocks from previous loop to current region taking |
6291 | it from LOOP_PREHEADER_BLOCKS (current_loop_nest) and record them in *BBS. | |
e1ab7874 | 6292 | This function is only used with -fsel-sched-pipelining-outer-loops. */ |
6293 | void | |
b73edd22 | 6294 | sel_add_loop_preheaders (bb_vec_t *bbs) |
e1ab7874 | 6295 | { |
6296 | int i; | |
6297 | basic_block bb; | |
f1f41a6c | 6298 | vec<basic_block> *preheader_blocks |
e1ab7874 | 6299 | = LOOP_PREHEADER_BLOCKS (current_loop_nest); |
6300 | ||
f1f41a6c | 6301 | if (!preheader_blocks) |
6302 | return; | |
6303 | ||
6304 | for (i = 0; preheader_blocks->iterate (i, &bb); i++) | |
a2d56a0e | 6305 | { |
f1f41a6c | 6306 | bbs->safe_push (bb); |
6307 | last_added_blocks.safe_push (bb); | |
e1ab7874 | 6308 | sel_add_bb (bb); |
a2d56a0e | 6309 | } |
e1ab7874 | 6310 | |
f1f41a6c | 6311 | vec_free (preheader_blocks); |
e1ab7874 | 6312 | } |
6313 | ||
48e1416a | 6314 | /* While pipelining outer loops, returns TRUE if BB is a loop preheader. |
6315 | Please note that the function should also work when pipelining_p is | |
6316 | false, because it is used when deciding whether we should or should | |
e1ab7874 | 6317 | not reschedule pipelined code. */ |
6318 | bool | |
6319 | sel_is_loop_preheader_p (basic_block bb) | |
6320 | { | |
6321 | if (current_loop_nest) | |
6322 | { | |
6323 | struct loop *outer; | |
6324 | ||
6325 | if (preheader_removed) | |
6326 | return false; | |
6327 | ||
6328 | /* Preheader is the first block in the region. */ | |
6329 | if (BLOCK_TO_BB (bb->index) == 0) | |
6330 | return true; | |
6331 | ||
6332 | /* We used to find a preheader with the topological information. | |
6333 | Check that the above code is equivalent to what we did before. */ | |
6334 | ||
6335 | if (in_current_region_p (current_loop_nest->header)) | |
48e1416a | 6336 | gcc_assert (!(BLOCK_TO_BB (bb->index) |
e1ab7874 | 6337 | < BLOCK_TO_BB (current_loop_nest->header->index))); |
6338 | ||
6339 | /* Support the situation when the latch block of outer loop | |
6340 | could be from here. */ | |
6341 | for (outer = loop_outer (current_loop_nest); | |
6342 | outer; | |
6343 | outer = loop_outer (outer)) | |
6344 | if (considered_for_pipelining_p (outer) && outer->latch == bb) | |
6345 | gcc_unreachable (); | |
6346 | } | |
6347 | ||
6348 | return false; | |
6349 | } | |
6350 | ||
49087fba | 6351 | /* Check whether JUMP_BB ends with a jump insn that leads only to DEST_BB and |
6352 | can be removed, making the corresponding edge fallthrough (assuming that | |
6353 | all basic blocks between JUMP_BB and DEST_BB are empty). */ | |
6354 | static bool | |
6355 | bb_has_removable_jump_to_p (basic_block jump_bb, basic_block dest_bb) | |
e1ab7874 | 6356 | { |
4b816303 | 6357 | if (!onlyjump_p (BB_END (jump_bb)) |
6358 | || tablejump_p (BB_END (jump_bb), NULL, NULL)) | |
e1ab7874 | 6359 | return false; |
6360 | ||
48e1416a | 6361 | /* Several outgoing edges, abnormal edge or destination of jump is |
e1ab7874 | 6362 | not DEST_BB. */ |
6363 | if (EDGE_COUNT (jump_bb->succs) != 1 | |
49087fba | 6364 | || EDGE_SUCC (jump_bb, 0)->flags & (EDGE_ABNORMAL | EDGE_CROSSING) |
e1ab7874 | 6365 | || EDGE_SUCC (jump_bb, 0)->dest != dest_bb) |
6366 | return false; | |
6367 | ||
6368 | /* If not anything of the upper. */ | |
6369 | return true; | |
6370 | } | |
6371 | ||
6372 | /* Removes the loop preheader from the current region and saves it in | |
48e1416a | 6373 | PREHEADER_BLOCKS of the father loop, so they will be added later to |
e1ab7874 | 6374 | region that represents an outer loop. */ |
6375 | static void | |
6376 | sel_remove_loop_preheader (void) | |
6377 | { | |
6378 | int i, old_len; | |
6379 | int cur_rgn = CONTAINING_RGN (BB_TO_BLOCK (0)); | |
6380 | basic_block bb; | |
6381 | bool all_empty_p = true; | |
f1f41a6c | 6382 | vec<basic_block> *preheader_blocks |
e1ab7874 | 6383 | = LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest)); |
6384 | ||
f1f41a6c | 6385 | vec_check_alloc (preheader_blocks, 0); |
6386 | ||
e1ab7874 | 6387 | gcc_assert (current_loop_nest); |
f1f41a6c | 6388 | old_len = preheader_blocks->length (); |
e1ab7874 | 6389 | |
6390 | /* Add blocks that aren't within the current loop to PREHEADER_BLOCKS. */ | |
6391 | for (i = 0; i < RGN_NR_BLOCKS (cur_rgn); i++) | |
6392 | { | |
f5a6b05f | 6393 | bb = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i)); |
e1ab7874 | 6394 | |
48e1416a | 6395 | /* If the basic block belongs to region, but doesn't belong to |
e1ab7874 | 6396 | corresponding loop, then it should be a preheader. */ |
6397 | if (sel_is_loop_preheader_p (bb)) | |
6398 | { | |
f1f41a6c | 6399 | preheader_blocks->safe_push (bb); |
e1ab7874 | 6400 | if (BB_END (bb) != bb_note (bb)) |
6401 | all_empty_p = false; | |
6402 | } | |
6403 | } | |
48e1416a | 6404 | |
e1ab7874 | 6405 | /* Remove these blocks only after iterating over the whole region. */ |
f1f41a6c | 6406 | for (i = preheader_blocks->length () - 1; i >= old_len; i--) |
e1ab7874 | 6407 | { |
f1f41a6c | 6408 | bb = (*preheader_blocks)[i]; |
e1ab7874 | 6409 | sel_remove_bb (bb, false); |
6410 | } | |
6411 | ||
6412 | if (!considered_for_pipelining_p (loop_outer (current_loop_nest))) | |
6413 | { | |
6414 | if (!all_empty_p) | |
6415 | /* Immediately create new region from preheader. */ | |
f1f41a6c | 6416 | make_region_from_loop_preheader (preheader_blocks); |
e1ab7874 | 6417 | else |
6418 | { | |
6419 | /* If all preheader blocks are empty - dont create new empty region. | |
6420 | Instead, remove them completely. */ | |
f1f41a6c | 6421 | FOR_EACH_VEC_ELT (*preheader_blocks, i, bb) |
e1ab7874 | 6422 | { |
6423 | edge e; | |
6424 | edge_iterator ei; | |
6425 | basic_block prev_bb = bb->prev_bb, next_bb = bb->next_bb; | |
6426 | ||
6427 | /* Redirect all incoming edges to next basic block. */ | |
6428 | for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); ) | |
6429 | { | |
6430 | if (! (e->flags & EDGE_FALLTHRU)) | |
6431 | redirect_edge_and_branch (e, bb->next_bb); | |
6432 | else | |
6433 | redirect_edge_succ (e, bb->next_bb); | |
6434 | } | |
6435 | gcc_assert (BB_NOTE_LIST (bb) == NULL); | |
6436 | delete_and_free_basic_block (bb); | |
6437 | ||
48e1416a | 6438 | /* Check if after deleting preheader there is a nonconditional |
6439 | jump in PREV_BB that leads to the next basic block NEXT_BB. | |
6440 | If it is so - delete this jump and clear data sets of its | |
e1ab7874 | 6441 | basic block if it becomes empty. */ |
6442 | if (next_bb->prev_bb == prev_bb | |
34154e27 | 6443 | && prev_bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) |
49087fba | 6444 | && bb_has_removable_jump_to_p (prev_bb, next_bb)) |
e1ab7874 | 6445 | { |
6446 | redirect_edge_and_branch (EDGE_SUCC (prev_bb, 0), next_bb); | |
6447 | if (BB_END (prev_bb) == bb_note (prev_bb)) | |
6448 | free_data_sets (prev_bb); | |
6449 | } | |
1a5dbaab | 6450 | |
6451 | set_immediate_dominator (CDI_DOMINATORS, next_bb, | |
6452 | recompute_dominator (CDI_DOMINATORS, | |
6453 | next_bb)); | |
e1ab7874 | 6454 | } |
6455 | } | |
f1f41a6c | 6456 | vec_free (preheader_blocks); |
e1ab7874 | 6457 | } |
6458 | else | |
6459 | /* Store preheader within the father's loop structure. */ | |
6460 | SET_LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest), | |
6461 | preheader_blocks); | |
6462 | } | |
7c5928c3 | 6463 | |
e1ab7874 | 6464 | #endif |