]>
Commit | Line | Data |
---|---|---|
e1ab7874 | 1 | /* Instruction scheduling pass. Selective scheduler and pipeliner. |
fbd26352 | 2 | Copyright (C) 2006-2019 Free Software Foundation, Inc. |
e1ab7874 | 3 | |
4 | This file is part of GCC. | |
5 | ||
6 | GCC is free software; you can redistribute it and/or modify it under | |
7 | the terms of the GNU General Public License as published by the Free | |
8 | Software Foundation; either version 3, or (at your option) any later | |
9 | version. | |
10 | ||
11 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
14 | for more details. | |
15 | ||
16 | You should have received a copy of the GNU General Public License | |
17 | along with GCC; see the file COPYING3. If not see | |
18 | <http://www.gnu.org/licenses/>. */ | |
19 | ||
20 | #include "config.h" | |
21 | #include "system.h" | |
22 | #include "coretypes.h" | |
9ef16211 | 23 | #include "backend.h" |
d040a5b0 | 24 | #include "cfghooks.h" |
9ef16211 | 25 | #include "tree.h" |
e1ab7874 | 26 | #include "rtl.h" |
9ef16211 | 27 | #include "df.h" |
ad7b10a2 | 28 | #include "memmodel.h" |
e1ab7874 | 29 | #include "tm_p.h" |
94ea8568 | 30 | #include "cfgrtl.h" |
31 | #include "cfganal.h" | |
32 | #include "cfgbuild.h" | |
e1ab7874 | 33 | #include "insn-config.h" |
34 | #include "insn-attr.h" | |
e1ab7874 | 35 | #include "recog.h" |
36 | #include "params.h" | |
37 | #include "target.h" | |
e1ab7874 | 38 | #include "sched-int.h" |
06f9d6ef | 39 | #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */ |
e1ab7874 | 40 | |
41 | #ifdef INSN_SCHEDULING | |
9ef16211 | 42 | #include "regset.h" |
43 | #include "cfgloop.h" | |
e1ab7874 | 44 | #include "sel-sched-ir.h" |
45 | /* We don't have to use it except for sel_print_insn. */ | |
46 | #include "sel-sched-dump.h" | |
47 | ||
48 | /* A vector holding bb info for whole scheduling pass. */ | |
16fb756f | 49 | vec<sel_global_bb_info_def> sel_global_bb_info; |
e1ab7874 | 50 | |
51 | /* A vector holding bb info. */ | |
16fb756f | 52 | vec<sel_region_bb_info_def> sel_region_bb_info; |
e1ab7874 | 53 | |
54 | /* A pool for allocating all lists. */ | |
1dc6c44d | 55 | object_allocator<_list_node> sched_lists_pool ("sel-sched-lists"); |
e1ab7874 | 56 | |
57 | /* This contains information about successors for compute_av_set. */ | |
58 | struct succs_info current_succs; | |
59 | ||
60 | /* Data structure to describe interaction with the generic scheduler utils. */ | |
61 | static struct common_sched_info_def sel_common_sched_info; | |
62 | ||
63 | /* The loop nest being pipelined. */ | |
64 | struct loop *current_loop_nest; | |
65 | ||
66 | /* LOOP_NESTS is a vector containing the corresponding loop nest for | |
67 | each region. */ | |
16fb756f | 68 | static vec<loop_p> loop_nests; |
e1ab7874 | 69 | |
70 | /* Saves blocks already in loop regions, indexed by bb->index. */ | |
71 | static sbitmap bbs_in_loop_rgns = NULL; | |
72 | ||
73 | /* CFG hooks that are saved before changing create_basic_block hook. */ | |
74 | static struct cfg_hooks orig_cfg_hooks; | |
75 | \f | |
76 | ||
77 | /* Array containing reverse topological index of function basic blocks, | |
78 | indexed by BB->INDEX. */ | |
79 | static int *rev_top_order_index = NULL; | |
80 | ||
81 | /* Length of the above array. */ | |
82 | static int rev_top_order_index_len = -1; | |
83 | ||
84 | /* A regset pool structure. */ | |
85 | static struct | |
86 | { | |
87 | /* The stack to which regsets are returned. */ | |
88 | regset *v; | |
89 | ||
90 | /* Its pointer. */ | |
91 | int n; | |
92 | ||
93 | /* Its size. */ | |
94 | int s; | |
95 | ||
96 | /* In VV we save all generated regsets so that, when destructing the | |
97 | pool, we can compare it with V and check that every regset was returned | |
98 | back to pool. */ | |
99 | regset *vv; | |
100 | ||
101 | /* The pointer of VV stack. */ | |
102 | int nn; | |
103 | ||
104 | /* Its size. */ | |
105 | int ss; | |
106 | ||
107 | /* The difference between allocated and returned regsets. */ | |
108 | int diff; | |
109 | } regset_pool = { NULL, 0, 0, NULL, 0, 0, 0 }; | |
110 | ||
111 | /* This represents the nop pool. */ | |
112 | static struct | |
113 | { | |
114 | /* The vector which holds previously emitted nops. */ | |
115 | insn_t *v; | |
116 | ||
117 | /* Its pointer. */ | |
118 | int n; | |
119 | ||
120 | /* Its size. */ | |
48e1416a | 121 | int s; |
e1ab7874 | 122 | } nop_pool = { NULL, 0, 0 }; |
123 | ||
124 | /* The pool for basic block notes. */ | |
cef3d8ad | 125 | static vec<rtx_note *> bb_note_pool; |
e1ab7874 | 126 | |
127 | /* A NOP pattern used to emit placeholder insns. */ | |
128 | rtx nop_pattern = NULL_RTX; | |
129 | /* A special instruction that resides in EXIT_BLOCK. | |
130 | EXIT_INSN is successor of the insns that lead to EXIT_BLOCK. */ | |
179c282d | 131 | rtx_insn *exit_insn = NULL; |
e1ab7874 | 132 | |
48e1416a | 133 | /* TRUE if while scheduling current region, which is loop, its preheader |
e1ab7874 | 134 | was removed. */ |
135 | bool preheader_removed = false; | |
136 | \f | |
137 | ||
138 | /* Forward static declarations. */ | |
139 | static void fence_clear (fence_t); | |
140 | ||
141 | static void deps_init_id (idata_t, insn_t, bool); | |
142 | static void init_id_from_df (idata_t, insn_t, bool); | |
143 | static expr_t set_insn_init (expr_t, vinsn_t, int); | |
144 | ||
145 | static void cfg_preds (basic_block, insn_t **, int *); | |
146 | static void prepare_insn_expr (insn_t, int); | |
f1f41a6c | 147 | static void free_history_vect (vec<expr_history_def> &); |
e1ab7874 | 148 | |
149 | static void move_bb_info (basic_block, basic_block); | |
150 | static void remove_empty_bb (basic_block, bool); | |
0424f393 | 151 | static void sel_merge_blocks (basic_block, basic_block); |
e1ab7874 | 152 | static void sel_remove_loop_preheader (void); |
49087fba | 153 | static bool bb_has_removable_jump_to_p (basic_block, basic_block); |
e1ab7874 | 154 | |
155 | static bool insn_is_the_only_one_in_bb_p (insn_t); | |
156 | static void create_initial_data_sets (basic_block); | |
157 | ||
9845d120 | 158 | static void free_av_set (basic_block); |
e1ab7874 | 159 | static void invalidate_av_set (basic_block); |
160 | static void extend_insn_data (void); | |
8d1881f5 | 161 | static void sel_init_new_insn (insn_t, int, int = -1); |
e1ab7874 | 162 | static void finish_insns (void); |
163 | \f | |
164 | /* Various list functions. */ | |
165 | ||
166 | /* Copy an instruction list L. */ | |
167 | ilist_t | |
168 | ilist_copy (ilist_t l) | |
169 | { | |
170 | ilist_t head = NULL, *tailp = &head; | |
171 | ||
172 | while (l) | |
173 | { | |
174 | ilist_add (tailp, ILIST_INSN (l)); | |
175 | tailp = &ILIST_NEXT (*tailp); | |
176 | l = ILIST_NEXT (l); | |
177 | } | |
178 | ||
179 | return head; | |
180 | } | |
181 | ||
182 | /* Invert an instruction list L. */ | |
183 | ilist_t | |
184 | ilist_invert (ilist_t l) | |
185 | { | |
186 | ilist_t res = NULL; | |
187 | ||
188 | while (l) | |
189 | { | |
190 | ilist_add (&res, ILIST_INSN (l)); | |
191 | l = ILIST_NEXT (l); | |
192 | } | |
193 | ||
194 | return res; | |
195 | } | |
196 | ||
197 | /* Add a new boundary to the LP list with parameters TO, PTR, and DC. */ | |
198 | void | |
199 | blist_add (blist_t *lp, insn_t to, ilist_t ptr, deps_t dc) | |
200 | { | |
201 | bnd_t bnd; | |
202 | ||
203 | _list_add (lp); | |
204 | bnd = BLIST_BND (*lp); | |
205 | ||
2f3c9801 | 206 | BND_TO (bnd) = to; |
e1ab7874 | 207 | BND_PTR (bnd) = ptr; |
208 | BND_AV (bnd) = NULL; | |
209 | BND_AV1 (bnd) = NULL; | |
210 | BND_DC (bnd) = dc; | |
211 | } | |
212 | ||
213 | /* Remove the list note pointed to by LP. */ | |
214 | void | |
215 | blist_remove (blist_t *lp) | |
216 | { | |
217 | bnd_t b = BLIST_BND (*lp); | |
218 | ||
219 | av_set_clear (&BND_AV (b)); | |
220 | av_set_clear (&BND_AV1 (b)); | |
221 | ilist_clear (&BND_PTR (b)); | |
222 | ||
223 | _list_remove (lp); | |
224 | } | |
225 | ||
226 | /* Init a fence tail L. */ | |
227 | void | |
228 | flist_tail_init (flist_tail_t l) | |
229 | { | |
230 | FLIST_TAIL_HEAD (l) = NULL; | |
231 | FLIST_TAIL_TAILP (l) = &FLIST_TAIL_HEAD (l); | |
232 | } | |
233 | ||
234 | /* Try to find fence corresponding to INSN in L. */ | |
235 | fence_t | |
236 | flist_lookup (flist_t l, insn_t insn) | |
237 | { | |
238 | while (l) | |
239 | { | |
240 | if (FENCE_INSN (FLIST_FENCE (l)) == insn) | |
241 | return FLIST_FENCE (l); | |
242 | ||
243 | l = FLIST_NEXT (l); | |
244 | } | |
245 | ||
246 | return NULL; | |
247 | } | |
248 | ||
249 | /* Init the fields of F before running fill_insns. */ | |
250 | static void | |
251 | init_fence_for_scheduling (fence_t f) | |
252 | { | |
253 | FENCE_BNDS (f) = NULL; | |
254 | FENCE_PROCESSED_P (f) = false; | |
255 | FENCE_SCHEDULED_P (f) = false; | |
256 | } | |
257 | ||
258 | /* Add new fence consisting of INSN and STATE to the list pointed to by LP. */ | |
259 | static void | |
48e1416a | 260 | flist_add (flist_t *lp, insn_t insn, state_t state, deps_t dc, void *tc, |
2f3c9801 | 261 | insn_t last_scheduled_insn, vec<rtx_insn *, va_gc> *executing_insns, |
48e1416a | 262 | int *ready_ticks, int ready_ticks_size, insn_t sched_next, |
abb9c563 | 263 | int cycle, int cycle_issued_insns, int issue_more, |
e1ab7874 | 264 | bool starts_cycle_p, bool after_stall_p) |
265 | { | |
266 | fence_t f; | |
267 | ||
268 | _list_add (lp); | |
269 | f = FLIST_FENCE (*lp); | |
270 | ||
271 | FENCE_INSN (f) = insn; | |
272 | ||
273 | gcc_assert (state != NULL); | |
274 | FENCE_STATE (f) = state; | |
275 | ||
276 | FENCE_CYCLE (f) = cycle; | |
277 | FENCE_ISSUED_INSNS (f) = cycle_issued_insns; | |
278 | FENCE_STARTS_CYCLE_P (f) = starts_cycle_p; | |
279 | FENCE_AFTER_STALL_P (f) = after_stall_p; | |
280 | ||
281 | gcc_assert (dc != NULL); | |
282 | FENCE_DC (f) = dc; | |
283 | ||
284 | gcc_assert (tc != NULL || targetm.sched.alloc_sched_context == NULL); | |
285 | FENCE_TC (f) = tc; | |
286 | ||
287 | FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn; | |
abb9c563 | 288 | FENCE_ISSUE_MORE (f) = issue_more; |
e1ab7874 | 289 | FENCE_EXECUTING_INSNS (f) = executing_insns; |
290 | FENCE_READY_TICKS (f) = ready_ticks; | |
291 | FENCE_READY_TICKS_SIZE (f) = ready_ticks_size; | |
292 | FENCE_SCHED_NEXT (f) = sched_next; | |
293 | ||
294 | init_fence_for_scheduling (f); | |
295 | } | |
296 | ||
297 | /* Remove the head node of the list pointed to by LP. */ | |
298 | static void | |
299 | flist_remove (flist_t *lp) | |
300 | { | |
301 | if (FENCE_INSN (FLIST_FENCE (*lp))) | |
302 | fence_clear (FLIST_FENCE (*lp)); | |
303 | _list_remove (lp); | |
304 | } | |
305 | ||
306 | /* Clear the fence list pointed to by LP. */ | |
307 | void | |
308 | flist_clear (flist_t *lp) | |
309 | { | |
310 | while (*lp) | |
311 | flist_remove (lp); | |
312 | } | |
313 | ||
314 | /* Add ORIGINAL_INSN the def list DL honoring CROSSES_CALL. */ | |
315 | void | |
316 | def_list_add (def_list_t *dl, insn_t original_insn, bool crosses_call) | |
317 | { | |
318 | def_t d; | |
48e1416a | 319 | |
e1ab7874 | 320 | _list_add (dl); |
321 | d = DEF_LIST_DEF (*dl); | |
322 | ||
323 | d->orig_insn = original_insn; | |
324 | d->crosses_call = crosses_call; | |
325 | } | |
326 | \f | |
327 | ||
328 | /* Functions to work with target contexts. */ | |
329 | ||
48e1416a | 330 | /* Bulk target context. It is convenient for debugging purposes to ensure |
e1ab7874 | 331 | that there are no uninitialized (null) target contexts. */ |
332 | static tc_t bulk_tc = (tc_t) 1; | |
333 | ||
48e1416a | 334 | /* Target hooks wrappers. In the future we can provide some default |
e1ab7874 | 335 | implementations for them. */ |
336 | ||
337 | /* Allocate a store for the target context. */ | |
338 | static tc_t | |
339 | alloc_target_context (void) | |
340 | { | |
341 | return (targetm.sched.alloc_sched_context | |
342 | ? targetm.sched.alloc_sched_context () : bulk_tc); | |
343 | } | |
344 | ||
345 | /* Init target context TC. | |
346 | If CLEAN_P is true, then make TC as it is beginning of the scheduler. | |
347 | Overwise, copy current backend context to TC. */ | |
348 | static void | |
349 | init_target_context (tc_t tc, bool clean_p) | |
350 | { | |
351 | if (targetm.sched.init_sched_context) | |
352 | targetm.sched.init_sched_context (tc, clean_p); | |
353 | } | |
354 | ||
355 | /* Allocate and initialize a target context. Meaning of CLEAN_P is the same as | |
356 | int init_target_context (). */ | |
357 | tc_t | |
358 | create_target_context (bool clean_p) | |
359 | { | |
360 | tc_t tc = alloc_target_context (); | |
361 | ||
362 | init_target_context (tc, clean_p); | |
363 | return tc; | |
364 | } | |
365 | ||
366 | /* Copy TC to the current backend context. */ | |
367 | void | |
368 | set_target_context (tc_t tc) | |
369 | { | |
370 | if (targetm.sched.set_sched_context) | |
371 | targetm.sched.set_sched_context (tc); | |
372 | } | |
373 | ||
374 | /* TC is about to be destroyed. Free any internal data. */ | |
375 | static void | |
376 | clear_target_context (tc_t tc) | |
377 | { | |
378 | if (targetm.sched.clear_sched_context) | |
379 | targetm.sched.clear_sched_context (tc); | |
380 | } | |
381 | ||
382 | /* Clear and free it. */ | |
383 | static void | |
384 | delete_target_context (tc_t tc) | |
385 | { | |
386 | clear_target_context (tc); | |
387 | ||
388 | if (targetm.sched.free_sched_context) | |
389 | targetm.sched.free_sched_context (tc); | |
390 | } | |
391 | ||
392 | /* Make a copy of FROM in TO. | |
393 | NB: May be this should be a hook. */ | |
394 | static void | |
395 | copy_target_context (tc_t to, tc_t from) | |
396 | { | |
397 | tc_t tmp = create_target_context (false); | |
398 | ||
399 | set_target_context (from); | |
400 | init_target_context (to, false); | |
401 | ||
402 | set_target_context (tmp); | |
403 | delete_target_context (tmp); | |
404 | } | |
405 | ||
406 | /* Create a copy of TC. */ | |
407 | static tc_t | |
408 | create_copy_of_target_context (tc_t tc) | |
409 | { | |
410 | tc_t copy = alloc_target_context (); | |
411 | ||
412 | copy_target_context (copy, tc); | |
413 | ||
414 | return copy; | |
415 | } | |
416 | ||
417 | /* Clear TC and initialize it according to CLEAN_P. The meaning of CLEAN_P | |
418 | is the same as in init_target_context (). */ | |
419 | void | |
420 | reset_target_context (tc_t tc, bool clean_p) | |
421 | { | |
422 | clear_target_context (tc); | |
423 | init_target_context (tc, clean_p); | |
424 | } | |
425 | \f | |
48e1416a | 426 | /* Functions to work with dependence contexts. |
68e419a1 | 427 | Dc (aka deps context, aka deps_t, aka struct deps_desc *) is short for dependence |
e1ab7874 | 428 | context. It accumulates information about processed insns to decide if |
429 | current insn is dependent on the processed ones. */ | |
430 | ||
431 | /* Make a copy of FROM in TO. */ | |
432 | static void | |
433 | copy_deps_context (deps_t to, deps_t from) | |
434 | { | |
d9ab2038 | 435 | init_deps (to, false); |
e1ab7874 | 436 | deps_join (to, from); |
437 | } | |
438 | ||
439 | /* Allocate store for dep context. */ | |
440 | static deps_t | |
441 | alloc_deps_context (void) | |
442 | { | |
68e419a1 | 443 | return XNEW (struct deps_desc); |
e1ab7874 | 444 | } |
445 | ||
446 | /* Allocate and initialize dep context. */ | |
447 | static deps_t | |
448 | create_deps_context (void) | |
449 | { | |
450 | deps_t dc = alloc_deps_context (); | |
451 | ||
d9ab2038 | 452 | init_deps (dc, false); |
e1ab7874 | 453 | return dc; |
454 | } | |
455 | ||
456 | /* Create a copy of FROM. */ | |
457 | static deps_t | |
458 | create_copy_of_deps_context (deps_t from) | |
459 | { | |
460 | deps_t to = alloc_deps_context (); | |
461 | ||
462 | copy_deps_context (to, from); | |
463 | return to; | |
464 | } | |
465 | ||
466 | /* Clean up internal data of DC. */ | |
467 | static void | |
468 | clear_deps_context (deps_t dc) | |
469 | { | |
470 | free_deps (dc); | |
471 | } | |
472 | ||
473 | /* Clear and free DC. */ | |
474 | static void | |
475 | delete_deps_context (deps_t dc) | |
476 | { | |
477 | clear_deps_context (dc); | |
478 | free (dc); | |
479 | } | |
480 | ||
481 | /* Clear and init DC. */ | |
482 | static void | |
483 | reset_deps_context (deps_t dc) | |
484 | { | |
485 | clear_deps_context (dc); | |
d9ab2038 | 486 | init_deps (dc, false); |
e1ab7874 | 487 | } |
488 | ||
48e1416a | 489 | /* This structure describes the dependence analysis hooks for advancing |
e1ab7874 | 490 | dependence context. */ |
491 | static struct sched_deps_info_def advance_deps_context_sched_deps_info = | |
492 | { | |
493 | NULL, | |
494 | ||
495 | NULL, /* start_insn */ | |
496 | NULL, /* finish_insn */ | |
497 | NULL, /* start_lhs */ | |
498 | NULL, /* finish_lhs */ | |
499 | NULL, /* start_rhs */ | |
500 | NULL, /* finish_rhs */ | |
501 | haifa_note_reg_set, | |
502 | haifa_note_reg_clobber, | |
503 | haifa_note_reg_use, | |
504 | NULL, /* note_mem_dep */ | |
505 | NULL, /* note_dep */ | |
506 | ||
507 | 0, 0, 0 | |
508 | }; | |
509 | ||
510 | /* Process INSN and add its impact on DC. */ | |
511 | void | |
512 | advance_deps_context (deps_t dc, insn_t insn) | |
513 | { | |
514 | sched_deps_info = &advance_deps_context_sched_deps_info; | |
2f3c9801 | 515 | deps_analyze_insn (dc, insn); |
e1ab7874 | 516 | } |
517 | \f | |
518 | ||
519 | /* Functions to work with DFA states. */ | |
520 | ||
521 | /* Allocate store for a DFA state. */ | |
522 | static state_t | |
523 | state_alloc (void) | |
524 | { | |
525 | return xmalloc (dfa_state_size); | |
526 | } | |
527 | ||
528 | /* Allocate and initialize DFA state. */ | |
529 | static state_t | |
530 | state_create (void) | |
531 | { | |
532 | state_t state = state_alloc (); | |
533 | ||
534 | state_reset (state); | |
535 | advance_state (state); | |
536 | return state; | |
537 | } | |
538 | ||
539 | /* Free DFA state. */ | |
540 | static void | |
541 | state_free (state_t state) | |
542 | { | |
543 | free (state); | |
544 | } | |
545 | ||
546 | /* Make a copy of FROM in TO. */ | |
547 | static void | |
548 | state_copy (state_t to, state_t from) | |
549 | { | |
550 | memcpy (to, from, dfa_state_size); | |
551 | } | |
552 | ||
553 | /* Create a copy of FROM. */ | |
554 | static state_t | |
555 | state_create_copy (state_t from) | |
556 | { | |
557 | state_t to = state_alloc (); | |
558 | ||
559 | state_copy (to, from); | |
560 | return to; | |
561 | } | |
562 | \f | |
563 | ||
564 | /* Functions to work with fences. */ | |
565 | ||
566 | /* Clear the fence. */ | |
567 | static void | |
568 | fence_clear (fence_t f) | |
569 | { | |
570 | state_t s = FENCE_STATE (f); | |
571 | deps_t dc = FENCE_DC (f); | |
572 | void *tc = FENCE_TC (f); | |
573 | ||
574 | ilist_clear (&FENCE_BNDS (f)); | |
575 | ||
576 | gcc_assert ((s != NULL && dc != NULL && tc != NULL) | |
577 | || (s == NULL && dc == NULL && tc == NULL)); | |
578 | ||
dd045aee | 579 | free (s); |
e1ab7874 | 580 | |
581 | if (dc != NULL) | |
582 | delete_deps_context (dc); | |
583 | ||
584 | if (tc != NULL) | |
585 | delete_target_context (tc); | |
f1f41a6c | 586 | vec_free (FENCE_EXECUTING_INSNS (f)); |
e1ab7874 | 587 | free (FENCE_READY_TICKS (f)); |
588 | FENCE_READY_TICKS (f) = NULL; | |
589 | } | |
590 | ||
591 | /* Init a list of fences with successors of OLD_FENCE. */ | |
592 | void | |
593 | init_fences (insn_t old_fence) | |
594 | { | |
595 | insn_t succ; | |
596 | succ_iterator si; | |
597 | bool first = true; | |
598 | int ready_ticks_size = get_max_uid () + 1; | |
48e1416a | 599 | |
600 | FOR_EACH_SUCC_1 (succ, si, old_fence, | |
e1ab7874 | 601 | SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS) |
602 | { | |
48e1416a | 603 | |
e1ab7874 | 604 | if (first) |
605 | first = false; | |
606 | else | |
607 | gcc_assert (flag_sel_sched_pipelining_outer_loops); | |
608 | ||
609 | flist_add (&fences, succ, | |
610 | state_create (), | |
611 | create_deps_context () /* dc */, | |
612 | create_target_context (true) /* tc */, | |
2f3c9801 | 613 | NULL /* last_scheduled_insn */, |
e1ab7874 | 614 | NULL, /* executing_insns */ |
615 | XCNEWVEC (int, ready_ticks_size), /* ready_ticks */ | |
616 | ready_ticks_size, | |
2f3c9801 | 617 | NULL /* sched_next */, |
48e1416a | 618 | 1 /* cycle */, 0 /* cycle_issued_insns */, |
abb9c563 | 619 | issue_rate, /* issue_more */ |
48e1416a | 620 | 1 /* starts_cycle_p */, 0 /* after_stall_p */); |
e1ab7874 | 621 | } |
622 | } | |
623 | ||
624 | /* Merges two fences (filling fields of fence F with resulting values) by | |
625 | following rules: 1) state, target context and last scheduled insn are | |
48e1416a | 626 | propagated from fallthrough edge if it is available; |
e1ab7874 | 627 | 2) deps context and cycle is propagated from more probable edge; |
48e1416a | 628 | 3) all other fields are set to corresponding constant values. |
e1ab7874 | 629 | |
48e1416a | 630 | INSN, STATE, DC, TC, LAST_SCHEDULED_INSN, EXECUTING_INSNS, |
abb9c563 | 631 | READY_TICKS, READY_TICKS_SIZE, SCHED_NEXT, CYCLE, ISSUE_MORE |
632 | and AFTER_STALL_P are the corresponding fields of the second fence. */ | |
e1ab7874 | 633 | static void |
634 | merge_fences (fence_t f, insn_t insn, | |
48e1416a | 635 | state_t state, deps_t dc, void *tc, |
2f3c9801 | 636 | rtx_insn *last_scheduled_insn, |
637 | vec<rtx_insn *, va_gc> *executing_insns, | |
e1ab7874 | 638 | int *ready_ticks, int ready_ticks_size, |
abb9c563 | 639 | rtx sched_next, int cycle, int issue_more, bool after_stall_p) |
e1ab7874 | 640 | { |
641 | insn_t last_scheduled_insn_old = FENCE_LAST_SCHEDULED_INSN (f); | |
642 | ||
643 | gcc_assert (sel_bb_head_p (FENCE_INSN (f)) | |
644 | && !sched_next && !FENCE_SCHED_NEXT (f)); | |
645 | ||
48e1416a | 646 | /* Check if we can decide which path fences came. |
e1ab7874 | 647 | If we can't (or don't want to) - reset all. */ |
648 | if (last_scheduled_insn == NULL | |
649 | || last_scheduled_insn_old == NULL | |
48e1416a | 650 | /* This is a case when INSN is reachable on several paths from |
651 | one insn (this can happen when pipelining of outer loops is on and | |
652 | there are two edges: one going around of inner loop and the other - | |
e1ab7874 | 653 | right through it; in such case just reset everything). */ |
654 | || last_scheduled_insn == last_scheduled_insn_old) | |
655 | { | |
656 | state_reset (FENCE_STATE (f)); | |
657 | state_free (state); | |
48e1416a | 658 | |
e1ab7874 | 659 | reset_deps_context (FENCE_DC (f)); |
660 | delete_deps_context (dc); | |
48e1416a | 661 | |
e1ab7874 | 662 | reset_target_context (FENCE_TC (f), true); |
663 | delete_target_context (tc); | |
664 | ||
665 | if (cycle > FENCE_CYCLE (f)) | |
666 | FENCE_CYCLE (f) = cycle; | |
667 | ||
668 | FENCE_LAST_SCHEDULED_INSN (f) = NULL; | |
abb9c563 | 669 | FENCE_ISSUE_MORE (f) = issue_rate; |
f1f41a6c | 670 | vec_free (executing_insns); |
e1ab7874 | 671 | free (ready_ticks); |
672 | if (FENCE_EXECUTING_INSNS (f)) | |
f1f41a6c | 673 | FENCE_EXECUTING_INSNS (f)->block_remove (0, |
674 | FENCE_EXECUTING_INSNS (f)->length ()); | |
e1ab7874 | 675 | if (FENCE_READY_TICKS (f)) |
676 | memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f)); | |
677 | } | |
678 | else | |
679 | { | |
680 | edge edge_old = NULL, edge_new = NULL; | |
681 | edge candidate; | |
682 | succ_iterator si; | |
683 | insn_t succ; | |
48e1416a | 684 | |
e1ab7874 | 685 | /* Find fallthrough edge. */ |
686 | gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb); | |
7f58c05e | 687 | candidate = find_fallthru_edge_from (BLOCK_FOR_INSN (insn)->prev_bb); |
e1ab7874 | 688 | |
689 | if (!candidate | |
690 | || (candidate->src != BLOCK_FOR_INSN (last_scheduled_insn) | |
691 | && candidate->src != BLOCK_FOR_INSN (last_scheduled_insn_old))) | |
692 | { | |
693 | /* No fallthrough edge leading to basic block of INSN. */ | |
694 | state_reset (FENCE_STATE (f)); | |
695 | state_free (state); | |
48e1416a | 696 | |
e1ab7874 | 697 | reset_target_context (FENCE_TC (f), true); |
698 | delete_target_context (tc); | |
48e1416a | 699 | |
e1ab7874 | 700 | FENCE_LAST_SCHEDULED_INSN (f) = NULL; |
abb9c563 | 701 | FENCE_ISSUE_MORE (f) = issue_rate; |
e1ab7874 | 702 | } |
703 | else | |
704 | if (candidate->src == BLOCK_FOR_INSN (last_scheduled_insn)) | |
705 | { | |
48e1416a | 706 | /* Would be weird if same insn is successor of several fallthrough |
e1ab7874 | 707 | edges. */ |
708 | gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb | |
709 | != BLOCK_FOR_INSN (last_scheduled_insn_old)); | |
710 | ||
711 | state_free (FENCE_STATE (f)); | |
712 | FENCE_STATE (f) = state; | |
713 | ||
714 | delete_target_context (FENCE_TC (f)); | |
715 | FENCE_TC (f) = tc; | |
716 | ||
717 | FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn; | |
abb9c563 | 718 | FENCE_ISSUE_MORE (f) = issue_more; |
e1ab7874 | 719 | } |
720 | else | |
721 | { | |
722 | /* Leave STATE, TC and LAST_SCHEDULED_INSN fields untouched. */ | |
723 | state_free (state); | |
724 | delete_target_context (tc); | |
725 | ||
726 | gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb | |
727 | != BLOCK_FOR_INSN (last_scheduled_insn)); | |
728 | } | |
729 | ||
730 | /* Find edge of first predecessor (last_scheduled_insn_old->insn). */ | |
731 | FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn_old, | |
732 | SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS) | |
733 | { | |
734 | if (succ == insn) | |
735 | { | |
736 | /* No same successor allowed from several edges. */ | |
737 | gcc_assert (!edge_old); | |
738 | edge_old = si.e1; | |
739 | } | |
740 | } | |
741 | /* Find edge of second predecessor (last_scheduled_insn->insn). */ | |
742 | FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn, | |
743 | SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS) | |
744 | { | |
745 | if (succ == insn) | |
746 | { | |
747 | /* No same successor allowed from several edges. */ | |
748 | gcc_assert (!edge_new); | |
749 | edge_new = si.e1; | |
750 | } | |
751 | } | |
752 | ||
753 | /* Check if we can choose most probable predecessor. */ | |
754 | if (edge_old == NULL || edge_new == NULL) | |
755 | { | |
756 | reset_deps_context (FENCE_DC (f)); | |
757 | delete_deps_context (dc); | |
f1f41a6c | 758 | vec_free (executing_insns); |
e1ab7874 | 759 | free (ready_ticks); |
48e1416a | 760 | |
e1ab7874 | 761 | FENCE_CYCLE (f) = MAX (FENCE_CYCLE (f), cycle); |
762 | if (FENCE_EXECUTING_INSNS (f)) | |
f1f41a6c | 763 | FENCE_EXECUTING_INSNS (f)->block_remove (0, |
764 | FENCE_EXECUTING_INSNS (f)->length ()); | |
e1ab7874 | 765 | if (FENCE_READY_TICKS (f)) |
766 | memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f)); | |
767 | } | |
768 | else | |
769 | if (edge_new->probability > edge_old->probability) | |
770 | { | |
771 | delete_deps_context (FENCE_DC (f)); | |
772 | FENCE_DC (f) = dc; | |
f1f41a6c | 773 | vec_free (FENCE_EXECUTING_INSNS (f)); |
e1ab7874 | 774 | FENCE_EXECUTING_INSNS (f) = executing_insns; |
775 | free (FENCE_READY_TICKS (f)); | |
776 | FENCE_READY_TICKS (f) = ready_ticks; | |
777 | FENCE_READY_TICKS_SIZE (f) = ready_ticks_size; | |
778 | FENCE_CYCLE (f) = cycle; | |
779 | } | |
780 | else | |
781 | { | |
782 | /* Leave DC and CYCLE untouched. */ | |
783 | delete_deps_context (dc); | |
f1f41a6c | 784 | vec_free (executing_insns); |
e1ab7874 | 785 | free (ready_ticks); |
786 | } | |
787 | } | |
788 | ||
789 | /* Fill remaining invariant fields. */ | |
790 | if (after_stall_p) | |
791 | FENCE_AFTER_STALL_P (f) = 1; | |
792 | ||
793 | FENCE_ISSUED_INSNS (f) = 0; | |
794 | FENCE_STARTS_CYCLE_P (f) = 1; | |
795 | FENCE_SCHED_NEXT (f) = NULL; | |
796 | } | |
797 | ||
48e1416a | 798 | /* Add a new fence to NEW_FENCES list, initializing it from all |
e1ab7874 | 799 | other parameters. */ |
800 | static void | |
801 | add_to_fences (flist_tail_t new_fences, insn_t insn, | |
2f3c9801 | 802 | state_t state, deps_t dc, void *tc, |
803 | rtx_insn *last_scheduled_insn, | |
804 | vec<rtx_insn *, va_gc> *executing_insns, int *ready_ticks, | |
805 | int ready_ticks_size, rtx_insn *sched_next, int cycle, | |
abb9c563 | 806 | int cycle_issued_insns, int issue_rate, |
807 | bool starts_cycle_p, bool after_stall_p) | |
e1ab7874 | 808 | { |
809 | fence_t f = flist_lookup (FLIST_TAIL_HEAD (new_fences), insn); | |
810 | ||
811 | if (! f) | |
812 | { | |
813 | flist_add (FLIST_TAIL_TAILP (new_fences), insn, state, dc, tc, | |
48e1416a | 814 | last_scheduled_insn, executing_insns, ready_ticks, |
e1ab7874 | 815 | ready_ticks_size, sched_next, cycle, cycle_issued_insns, |
abb9c563 | 816 | issue_rate, starts_cycle_p, after_stall_p); |
e1ab7874 | 817 | |
818 | FLIST_TAIL_TAILP (new_fences) | |
819 | = &FLIST_NEXT (*FLIST_TAIL_TAILP (new_fences)); | |
820 | } | |
821 | else | |
822 | { | |
48e1416a | 823 | merge_fences (f, insn, state, dc, tc, last_scheduled_insn, |
824 | executing_insns, ready_ticks, ready_ticks_size, | |
abb9c563 | 825 | sched_next, cycle, issue_rate, after_stall_p); |
e1ab7874 | 826 | } |
827 | } | |
828 | ||
829 | /* Move the first fence in the OLD_FENCES list to NEW_FENCES. */ | |
830 | void | |
831 | move_fence_to_fences (flist_t old_fences, flist_tail_t new_fences) | |
832 | { | |
833 | fence_t f, old; | |
834 | flist_t *tailp = FLIST_TAIL_TAILP (new_fences); | |
835 | ||
836 | old = FLIST_FENCE (old_fences); | |
48e1416a | 837 | f = flist_lookup (FLIST_TAIL_HEAD (new_fences), |
e1ab7874 | 838 | FENCE_INSN (FLIST_FENCE (old_fences))); |
839 | if (f) | |
840 | { | |
841 | merge_fences (f, old->insn, old->state, old->dc, old->tc, | |
842 | old->last_scheduled_insn, old->executing_insns, | |
843 | old->ready_ticks, old->ready_ticks_size, | |
abb9c563 | 844 | old->sched_next, old->cycle, old->issue_more, |
e1ab7874 | 845 | old->after_stall_p); |
846 | } | |
847 | else | |
848 | { | |
849 | _list_add (tailp); | |
850 | FLIST_TAIL_TAILP (new_fences) = &FLIST_NEXT (*tailp); | |
851 | *FLIST_FENCE (*tailp) = *old; | |
852 | init_fence_for_scheduling (FLIST_FENCE (*tailp)); | |
853 | } | |
854 | FENCE_INSN (old) = NULL; | |
855 | } | |
856 | ||
48e1416a | 857 | /* Add a new fence to NEW_FENCES list and initialize most of its data |
e1ab7874 | 858 | as a clean one. */ |
859 | void | |
860 | add_clean_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence) | |
861 | { | |
862 | int ready_ticks_size = get_max_uid () + 1; | |
48e1416a | 863 | |
e1ab7874 | 864 | add_to_fences (new_fences, |
865 | succ, state_create (), create_deps_context (), | |
866 | create_target_context (true), | |
2f3c9801 | 867 | NULL, NULL, |
e1ab7874 | 868 | XCNEWVEC (int, ready_ticks_size), ready_ticks_size, |
2f3c9801 | 869 | NULL, FENCE_CYCLE (fence) + 1, |
abb9c563 | 870 | 0, issue_rate, 1, FENCE_AFTER_STALL_P (fence)); |
e1ab7874 | 871 | } |
872 | ||
48e1416a | 873 | /* Add a new fence to NEW_FENCES list and initialize all of its data |
e1ab7874 | 874 | from FENCE and SUCC. */ |
875 | void | |
876 | add_dirty_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence) | |
877 | { | |
48e1416a | 878 | int * new_ready_ticks |
e1ab7874 | 879 | = XNEWVEC (int, FENCE_READY_TICKS_SIZE (fence)); |
48e1416a | 880 | |
e1ab7874 | 881 | memcpy (new_ready_ticks, FENCE_READY_TICKS (fence), |
882 | FENCE_READY_TICKS_SIZE (fence) * sizeof (int)); | |
883 | add_to_fences (new_fences, | |
884 | succ, state_create_copy (FENCE_STATE (fence)), | |
885 | create_copy_of_deps_context (FENCE_DC (fence)), | |
886 | create_copy_of_target_context (FENCE_TC (fence)), | |
48e1416a | 887 | FENCE_LAST_SCHEDULED_INSN (fence), |
f1f41a6c | 888 | vec_safe_copy (FENCE_EXECUTING_INSNS (fence)), |
e1ab7874 | 889 | new_ready_ticks, |
890 | FENCE_READY_TICKS_SIZE (fence), | |
891 | FENCE_SCHED_NEXT (fence), | |
892 | FENCE_CYCLE (fence), | |
893 | FENCE_ISSUED_INSNS (fence), | |
abb9c563 | 894 | FENCE_ISSUE_MORE (fence), |
e1ab7874 | 895 | FENCE_STARTS_CYCLE_P (fence), |
896 | FENCE_AFTER_STALL_P (fence)); | |
897 | } | |
898 | \f | |
899 | ||
900 | /* Functions to work with regset and nop pools. */ | |
901 | ||
902 | /* Returns the new regset from pool. It might have some of the bits set | |
903 | from the previous usage. */ | |
904 | regset | |
905 | get_regset_from_pool (void) | |
906 | { | |
907 | regset rs; | |
908 | ||
909 | if (regset_pool.n != 0) | |
910 | rs = regset_pool.v[--regset_pool.n]; | |
911 | else | |
912 | /* We need to create the regset. */ | |
913 | { | |
914 | rs = ALLOC_REG_SET (®_obstack); | |
915 | ||
916 | if (regset_pool.nn == regset_pool.ss) | |
917 | regset_pool.vv = XRESIZEVEC (regset, regset_pool.vv, | |
918 | (regset_pool.ss = 2 * regset_pool.ss + 1)); | |
919 | regset_pool.vv[regset_pool.nn++] = rs; | |
920 | } | |
921 | ||
922 | regset_pool.diff++; | |
923 | ||
924 | return rs; | |
925 | } | |
926 | ||
927 | /* Same as above, but returns the empty regset. */ | |
928 | regset | |
929 | get_clear_regset_from_pool (void) | |
930 | { | |
931 | regset rs = get_regset_from_pool (); | |
932 | ||
933 | CLEAR_REG_SET (rs); | |
934 | return rs; | |
935 | } | |
936 | ||
937 | /* Return regset RS to the pool for future use. */ | |
938 | void | |
939 | return_regset_to_pool (regset rs) | |
940 | { | |
bc9cb5ed | 941 | gcc_assert (rs); |
e1ab7874 | 942 | regset_pool.diff--; |
943 | ||
944 | if (regset_pool.n == regset_pool.s) | |
945 | regset_pool.v = XRESIZEVEC (regset, regset_pool.v, | |
946 | (regset_pool.s = 2 * regset_pool.s + 1)); | |
947 | regset_pool.v[regset_pool.n++] = rs; | |
948 | } | |
949 | ||
950 | /* This is used as a qsort callback for sorting regset pool stacks. | |
951 | X and XX are addresses of two regsets. They are never equal. */ | |
952 | static int | |
953 | cmp_v_in_regset_pool (const void *x, const void *xx) | |
954 | { | |
c72f63ac | 955 | uintptr_t r1 = (uintptr_t) *((const regset *) x); |
956 | uintptr_t r2 = (uintptr_t) *((const regset *) xx); | |
957 | if (r1 > r2) | |
958 | return 1; | |
959 | else if (r1 < r2) | |
960 | return -1; | |
961 | gcc_unreachable (); | |
e1ab7874 | 962 | } |
963 | ||
382ecba7 | 964 | /* Free the regset pool possibly checking for memory leaks. */ |
e1ab7874 | 965 | void |
966 | free_regset_pool (void) | |
967 | { | |
382ecba7 | 968 | if (flag_checking) |
969 | { | |
970 | regset *v = regset_pool.v; | |
971 | int i = 0; | |
972 | int n = regset_pool.n; | |
48e1416a | 973 | |
382ecba7 | 974 | regset *vv = regset_pool.vv; |
975 | int ii = 0; | |
976 | int nn = regset_pool.nn; | |
48e1416a | 977 | |
382ecba7 | 978 | int diff = 0; |
48e1416a | 979 | |
382ecba7 | 980 | gcc_assert (n <= nn); |
48e1416a | 981 | |
382ecba7 | 982 | /* Sort both vectors so it will be possible to compare them. */ |
983 | qsort (v, n, sizeof (*v), cmp_v_in_regset_pool); | |
984 | qsort (vv, nn, sizeof (*vv), cmp_v_in_regset_pool); | |
48e1416a | 985 | |
382ecba7 | 986 | while (ii < nn) |
987 | { | |
988 | if (v[i] == vv[ii]) | |
989 | i++; | |
990 | else | |
991 | /* VV[II] was lost. */ | |
992 | diff++; | |
48e1416a | 993 | |
382ecba7 | 994 | ii++; |
995 | } | |
48e1416a | 996 | |
382ecba7 | 997 | gcc_assert (diff == regset_pool.diff); |
998 | } | |
48e1416a | 999 | |
e1ab7874 | 1000 | /* If not true - we have a memory leak. */ |
1001 | gcc_assert (regset_pool.diff == 0); | |
48e1416a | 1002 | |
e1ab7874 | 1003 | while (regset_pool.n) |
1004 | { | |
1005 | --regset_pool.n; | |
1006 | FREE_REG_SET (regset_pool.v[regset_pool.n]); | |
1007 | } | |
1008 | ||
1009 | free (regset_pool.v); | |
1010 | regset_pool.v = NULL; | |
1011 | regset_pool.s = 0; | |
48e1416a | 1012 | |
e1ab7874 | 1013 | free (regset_pool.vv); |
1014 | regset_pool.vv = NULL; | |
1015 | regset_pool.nn = 0; | |
1016 | regset_pool.ss = 0; | |
1017 | ||
1018 | regset_pool.diff = 0; | |
1019 | } | |
1020 | \f | |
1021 | ||
48e1416a | 1022 | /* Functions to work with nop pools. NOP insns are used as temporary |
1023 | placeholders of the insns being scheduled to allow correct update of | |
e1ab7874 | 1024 | the data sets. When update is finished, NOPs are deleted. */ |
1025 | ||
1026 | /* A vinsn that is used to represent a nop. This vinsn is shared among all | |
1027 | nops sel-sched generates. */ | |
1028 | static vinsn_t nop_vinsn = NULL; | |
1029 | ||
1030 | /* Emit a nop before INSN, taking it from pool. */ | |
1031 | insn_t | |
1032 | get_nop_from_pool (insn_t insn) | |
1033 | { | |
2f3c9801 | 1034 | rtx nop_pat; |
e1ab7874 | 1035 | insn_t nop; |
1036 | bool old_p = nop_pool.n != 0; | |
1037 | int flags; | |
1038 | ||
1039 | if (old_p) | |
2f3c9801 | 1040 | nop_pat = nop_pool.v[--nop_pool.n]; |
e1ab7874 | 1041 | else |
2f3c9801 | 1042 | nop_pat = nop_pattern; |
e1ab7874 | 1043 | |
2f3c9801 | 1044 | nop = emit_insn_before (nop_pat, insn); |
e1ab7874 | 1045 | |
1046 | if (old_p) | |
1047 | flags = INSN_INIT_TODO_SSID; | |
1048 | else | |
1049 | flags = INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID; | |
1050 | ||
1051 | set_insn_init (INSN_EXPR (insn), nop_vinsn, INSN_SEQNO (insn)); | |
1052 | sel_init_new_insn (nop, flags); | |
1053 | ||
1054 | return nop; | |
1055 | } | |
1056 | ||
1057 | /* Remove NOP from the instruction stream and return it to the pool. */ | |
1058 | void | |
9845d120 | 1059 | return_nop_to_pool (insn_t nop, bool full_tidying) |
e1ab7874 | 1060 | { |
1061 | gcc_assert (INSN_IN_STREAM_P (nop)); | |
9845d120 | 1062 | sel_remove_insn (nop, false, full_tidying); |
e1ab7874 | 1063 | |
93ff53d3 | 1064 | /* We'll recycle this nop. */ |
dd1286fb | 1065 | nop->set_undeleted (); |
93ff53d3 | 1066 | |
e1ab7874 | 1067 | if (nop_pool.n == nop_pool.s) |
2f3c9801 | 1068 | nop_pool.v = XRESIZEVEC (rtx_insn *, nop_pool.v, |
e1ab7874 | 1069 | (nop_pool.s = 2 * nop_pool.s + 1)); |
1070 | nop_pool.v[nop_pool.n++] = nop; | |
1071 | } | |
1072 | ||
1073 | /* Free the nop pool. */ | |
1074 | void | |
1075 | free_nop_pool (void) | |
1076 | { | |
1077 | nop_pool.n = 0; | |
1078 | nop_pool.s = 0; | |
1079 | free (nop_pool.v); | |
1080 | nop_pool.v = NULL; | |
1081 | } | |
1082 | \f | |
1083 | ||
48e1416a | 1084 | /* Skip unspec to support ia64 speculation. Called from rtx_equal_p_cb. |
e1ab7874 | 1085 | The callback is given two rtxes XX and YY and writes the new rtxes |
1086 | to NX and NY in case some needs to be skipped. */ | |
1087 | static int | |
1088 | skip_unspecs_callback (const_rtx *xx, const_rtx *yy, rtx *nx, rtx* ny) | |
1089 | { | |
1090 | const_rtx x = *xx; | |
1091 | const_rtx y = *yy; | |
48e1416a | 1092 | |
e1ab7874 | 1093 | if (GET_CODE (x) == UNSPEC |
1094 | && (targetm.sched.skip_rtx_p == NULL | |
1095 | || targetm.sched.skip_rtx_p (x))) | |
1096 | { | |
1097 | *nx = XVECEXP (x, 0, 0); | |
1098 | *ny = CONST_CAST_RTX (y); | |
1099 | return 1; | |
1100 | } | |
48e1416a | 1101 | |
e1ab7874 | 1102 | if (GET_CODE (y) == UNSPEC |
1103 | && (targetm.sched.skip_rtx_p == NULL | |
1104 | || targetm.sched.skip_rtx_p (y))) | |
1105 | { | |
1106 | *nx = CONST_CAST_RTX (x); | |
1107 | *ny = XVECEXP (y, 0, 0); | |
1108 | return 1; | |
1109 | } | |
48e1416a | 1110 | |
e1ab7874 | 1111 | return 0; |
1112 | } | |
1113 | ||
48e1416a | 1114 | /* Callback, called from hash_rtx_cb. Helps to hash UNSPEC rtx X in a correct way |
e1ab7874 | 1115 | to support ia64 speculation. When changes are needed, new rtx X and new mode |
1116 | NMODE are written, and the callback returns true. */ | |
1117 | static int | |
3754d046 | 1118 | hash_with_unspec_callback (const_rtx x, machine_mode mode ATTRIBUTE_UNUSED, |
1119 | rtx *nx, machine_mode* nmode) | |
e1ab7874 | 1120 | { |
48e1416a | 1121 | if (GET_CODE (x) == UNSPEC |
e1ab7874 | 1122 | && targetm.sched.skip_rtx_p |
1123 | && targetm.sched.skip_rtx_p (x)) | |
1124 | { | |
1125 | *nx = XVECEXP (x, 0 ,0); | |
8458f4ca | 1126 | *nmode = VOIDmode; |
e1ab7874 | 1127 | return 1; |
1128 | } | |
48e1416a | 1129 | |
e1ab7874 | 1130 | return 0; |
1131 | } | |
1132 | ||
1133 | /* Returns LHS and RHS are ok to be scheduled separately. */ | |
1134 | static bool | |
1135 | lhs_and_rhs_separable_p (rtx lhs, rtx rhs) | |
1136 | { | |
1137 | if (lhs == NULL || rhs == NULL) | |
1138 | return false; | |
1139 | ||
e913b5cd | 1140 | /* Do not schedule constants as rhs: no point to use reg, if const |
1141 | can be used. Moreover, scheduling const as rhs may lead to mode | |
1142 | mismatch cause consts don't have modes but they could be merged | |
1143 | from branches where the same const used in different modes. */ | |
e1ab7874 | 1144 | if (CONSTANT_P (rhs)) |
1145 | return false; | |
1146 | ||
1147 | /* ??? Do not rename predicate registers to avoid ICEs in bundling. */ | |
1148 | if (COMPARISON_P (rhs)) | |
1149 | return false; | |
1150 | ||
1151 | /* Do not allow single REG to be an rhs. */ | |
1152 | if (REG_P (rhs)) | |
1153 | return false; | |
1154 | ||
48e1416a | 1155 | /* See comment at find_used_regs_1 (*1) for explanation of this |
e1ab7874 | 1156 | restriction. */ |
1157 | /* FIXME: remove this later. */ | |
1158 | if (MEM_P (lhs)) | |
1159 | return false; | |
1160 | ||
1161 | /* This will filter all tricky things like ZERO_EXTRACT etc. | |
1162 | For now we don't handle it. */ | |
1163 | if (!REG_P (lhs) && !MEM_P (lhs)) | |
1164 | return false; | |
1165 | ||
1166 | return true; | |
1167 | } | |
1168 | ||
48e1416a | 1169 | /* Initialize vinsn VI for INSN. Only for use from vinsn_create (). When |
1170 | FORCE_UNIQUE_P is true, the resulting vinsn will not be clonable. This is | |
e1ab7874 | 1171 | used e.g. for insns from recovery blocks. */ |
1172 | static void | |
1173 | vinsn_init (vinsn_t vi, insn_t insn, bool force_unique_p) | |
1174 | { | |
1175 | hash_rtx_callback_function hrcf; | |
1176 | int insn_class; | |
1177 | ||
69c5a18c | 1178 | VINSN_INSN_RTX (vi) = insn; |
e1ab7874 | 1179 | VINSN_COUNT (vi) = 0; |
1180 | vi->cost = -1; | |
48e1416a | 1181 | |
bc9cb5ed | 1182 | if (INSN_NOP_P (insn)) |
1183 | return; | |
1184 | ||
e1ab7874 | 1185 | if (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL) |
1186 | init_id_from_df (VINSN_ID (vi), insn, force_unique_p); | |
1187 | else | |
1188 | deps_init_id (VINSN_ID (vi), insn, force_unique_p); | |
48e1416a | 1189 | |
e1ab7874 | 1190 | /* Hash vinsn depending on whether it is separable or not. */ |
1191 | hrcf = targetm.sched.skip_rtx_p ? hash_with_unspec_callback : NULL; | |
1192 | if (VINSN_SEPARABLE_P (vi)) | |
1193 | { | |
1194 | rtx rhs = VINSN_RHS (vi); | |
1195 | ||
1196 | VINSN_HASH (vi) = hash_rtx_cb (rhs, GET_MODE (rhs), | |
1197 | NULL, NULL, false, hrcf); | |
1198 | VINSN_HASH_RTX (vi) = hash_rtx_cb (VINSN_PATTERN (vi), | |
1199 | VOIDmode, NULL, NULL, | |
1200 | false, hrcf); | |
1201 | } | |
1202 | else | |
1203 | { | |
1204 | VINSN_HASH (vi) = hash_rtx_cb (VINSN_PATTERN (vi), VOIDmode, | |
1205 | NULL, NULL, false, hrcf); | |
1206 | VINSN_HASH_RTX (vi) = VINSN_HASH (vi); | |
1207 | } | |
48e1416a | 1208 | |
e1ab7874 | 1209 | insn_class = haifa_classify_insn (insn); |
1210 | if (insn_class >= 2 | |
1211 | && (!targetm.sched.get_insn_spec_ds | |
1212 | || ((targetm.sched.get_insn_spec_ds (insn) & BEGIN_CONTROL) | |
1213 | == 0))) | |
1214 | VINSN_MAY_TRAP_P (vi) = true; | |
1215 | else | |
1216 | VINSN_MAY_TRAP_P (vi) = false; | |
1217 | } | |
1218 | ||
1219 | /* Indicate that VI has become the part of an rtx object. */ | |
1220 | void | |
1221 | vinsn_attach (vinsn_t vi) | |
1222 | { | |
1223 | /* Assert that VI is not pending for deletion. */ | |
1224 | gcc_assert (VINSN_INSN_RTX (vi)); | |
1225 | ||
1226 | VINSN_COUNT (vi)++; | |
1227 | } | |
1228 | ||
48e1416a | 1229 | /* Create and init VI from the INSN. Use UNIQUE_P for determining the correct |
e1ab7874 | 1230 | VINSN_TYPE (VI). */ |
1231 | static vinsn_t | |
1232 | vinsn_create (insn_t insn, bool force_unique_p) | |
1233 | { | |
1234 | vinsn_t vi = XCNEW (struct vinsn_def); | |
1235 | ||
1236 | vinsn_init (vi, insn, force_unique_p); | |
1237 | return vi; | |
1238 | } | |
1239 | ||
1240 | /* Return a copy of VI. When REATTACH_P is true, detach VI and attach | |
1241 | the copy. */ | |
48e1416a | 1242 | vinsn_t |
e1ab7874 | 1243 | vinsn_copy (vinsn_t vi, bool reattach_p) |
1244 | { | |
04d073df | 1245 | rtx_insn *copy; |
e1ab7874 | 1246 | bool unique = VINSN_UNIQUE_P (vi); |
1247 | vinsn_t new_vi; | |
48e1416a | 1248 | |
e1ab7874 | 1249 | copy = create_copy_of_insn_rtx (VINSN_INSN_RTX (vi)); |
1250 | new_vi = create_vinsn_from_insn_rtx (copy, unique); | |
1251 | if (reattach_p) | |
1252 | { | |
1253 | vinsn_detach (vi); | |
1254 | vinsn_attach (new_vi); | |
1255 | } | |
1256 | ||
1257 | return new_vi; | |
1258 | } | |
1259 | ||
1260 | /* Delete the VI vinsn and free its data. */ | |
1261 | static void | |
1262 | vinsn_delete (vinsn_t vi) | |
1263 | { | |
1264 | gcc_assert (VINSN_COUNT (vi) == 0); | |
1265 | ||
bc9cb5ed | 1266 | if (!INSN_NOP_P (VINSN_INSN_RTX (vi))) |
1267 | { | |
1268 | return_regset_to_pool (VINSN_REG_SETS (vi)); | |
1269 | return_regset_to_pool (VINSN_REG_USES (vi)); | |
1270 | return_regset_to_pool (VINSN_REG_CLOBBERS (vi)); | |
1271 | } | |
e1ab7874 | 1272 | |
1273 | free (vi); | |
1274 | } | |
1275 | ||
48e1416a | 1276 | /* Indicate that VI is no longer a part of some rtx object. |
e1ab7874 | 1277 | Remove VI if it is no longer needed. */ |
1278 | void | |
1279 | vinsn_detach (vinsn_t vi) | |
1280 | { | |
1281 | gcc_assert (VINSN_COUNT (vi) > 0); | |
1282 | ||
1283 | if (--VINSN_COUNT (vi) == 0) | |
1284 | vinsn_delete (vi); | |
1285 | } | |
1286 | ||
1287 | /* Returns TRUE if VI is a branch. */ | |
1288 | bool | |
1289 | vinsn_cond_branch_p (vinsn_t vi) | |
1290 | { | |
1291 | insn_t insn; | |
1292 | ||
1293 | if (!VINSN_UNIQUE_P (vi)) | |
1294 | return false; | |
1295 | ||
1296 | insn = VINSN_INSN_RTX (vi); | |
1297 | if (BB_END (BLOCK_FOR_INSN (insn)) != insn) | |
1298 | return false; | |
1299 | ||
1300 | return control_flow_insn_p (insn); | |
1301 | } | |
1302 | ||
1303 | /* Return latency of INSN. */ | |
1304 | static int | |
ed3e6e5d | 1305 | sel_insn_rtx_cost (rtx_insn *insn) |
e1ab7874 | 1306 | { |
1307 | int cost; | |
1308 | ||
1309 | /* A USE insn, or something else we don't need to | |
1310 | understand. We can't pass these directly to | |
1311 | result_ready_cost or insn_default_latency because it will | |
1312 | trigger a fatal error for unrecognizable insns. */ | |
1313 | if (recog_memoized (insn) < 0) | |
1314 | cost = 0; | |
1315 | else | |
1316 | { | |
1317 | cost = insn_default_latency (insn); | |
1318 | ||
1319 | if (cost < 0) | |
1320 | cost = 0; | |
1321 | } | |
1322 | ||
1323 | return cost; | |
1324 | } | |
1325 | ||
1326 | /* Return the cost of the VI. | |
5e53acc3 | 1327 | !!! FIXME: Unify with haifa-sched.c: insn_sched_cost (). */ |
e1ab7874 | 1328 | int |
1329 | sel_vinsn_cost (vinsn_t vi) | |
1330 | { | |
1331 | int cost = vi->cost; | |
1332 | ||
1333 | if (cost < 0) | |
1334 | { | |
1335 | cost = sel_insn_rtx_cost (VINSN_INSN_RTX (vi)); | |
1336 | vi->cost = cost; | |
1337 | } | |
1338 | ||
1339 | return cost; | |
1340 | } | |
1341 | \f | |
1342 | ||
1343 | /* Functions for insn emitting. */ | |
1344 | ||
1345 | /* Emit new insn after AFTER based on PATTERN and initialize its data from | |
1346 | EXPR and SEQNO. */ | |
1347 | insn_t | |
1348 | sel_gen_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno, insn_t after) | |
1349 | { | |
1350 | insn_t new_insn; | |
1351 | ||
1352 | gcc_assert (EXPR_TARGET_AVAILABLE (expr) == true); | |
1353 | ||
1354 | new_insn = emit_insn_after (pattern, after); | |
1355 | set_insn_init (expr, NULL, seqno); | |
1356 | sel_init_new_insn (new_insn, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID); | |
1357 | ||
1358 | return new_insn; | |
1359 | } | |
1360 | ||
1361 | /* Force newly generated vinsns to be unique. */ | |
1362 | static bool init_insn_force_unique_p = false; | |
1363 | ||
1364 | /* Emit new speculation recovery insn after AFTER based on PATTERN and | |
1365 | initialize its data from EXPR and SEQNO. */ | |
1366 | insn_t | |
1367 | sel_gen_recovery_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno, | |
1368 | insn_t after) | |
1369 | { | |
1370 | insn_t insn; | |
1371 | ||
1372 | gcc_assert (!init_insn_force_unique_p); | |
1373 | ||
1374 | init_insn_force_unique_p = true; | |
1375 | insn = sel_gen_insn_from_rtx_after (pattern, expr, seqno, after); | |
1376 | CANT_MOVE (insn) = 1; | |
1377 | init_insn_force_unique_p = false; | |
1378 | ||
1379 | return insn; | |
1380 | } | |
1381 | ||
1382 | /* Emit new insn after AFTER based on EXPR and SEQNO. If VINSN is not NULL, | |
48e1416a | 1383 | take it as a new vinsn instead of EXPR's vinsn. |
1384 | We simplify insns later, after scheduling region in | |
e1ab7874 | 1385 | simplify_changed_insns. */ |
1386 | insn_t | |
48e1416a | 1387 | sel_gen_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno, |
e1ab7874 | 1388 | insn_t after) |
1389 | { | |
1390 | expr_t emit_expr; | |
1391 | insn_t insn; | |
1392 | int flags; | |
48e1416a | 1393 | |
1394 | emit_expr = set_insn_init (expr, vinsn ? vinsn : EXPR_VINSN (expr), | |
e1ab7874 | 1395 | seqno); |
1396 | insn = EXPR_INSN_RTX (emit_expr); | |
2b7454f2 | 1397 | |
1398 | /* The insn may come from the transformation cache, which may hold already | |
1399 | deleted insns, so mark it as not deleted. */ | |
dd1286fb | 1400 | insn->set_undeleted (); |
2b7454f2 | 1401 | |
48e1416a | 1402 | add_insn_after (insn, after, BLOCK_FOR_INSN (insn)); |
e1ab7874 | 1403 | |
1404 | flags = INSN_INIT_TODO_SSID; | |
1405 | if (INSN_LUID (insn) == 0) | |
1406 | flags |= INSN_INIT_TODO_LUID; | |
1407 | sel_init_new_insn (insn, flags); | |
1408 | ||
1409 | return insn; | |
1410 | } | |
1411 | ||
1412 | /* Move insn from EXPR after AFTER. */ | |
1413 | insn_t | |
1414 | sel_move_insn (expr_t expr, int seqno, insn_t after) | |
1415 | { | |
1416 | insn_t insn = EXPR_INSN_RTX (expr); | |
1417 | basic_block bb = BLOCK_FOR_INSN (after); | |
1418 | insn_t next = NEXT_INSN (after); | |
1419 | ||
1420 | /* Assert that in move_op we disconnected this insn properly. */ | |
1421 | gcc_assert (EXPR_VINSN (INSN_EXPR (insn)) != NULL); | |
4a57a2e8 | 1422 | SET_PREV_INSN (insn) = after; |
1423 | SET_NEXT_INSN (insn) = next; | |
e1ab7874 | 1424 | |
4a57a2e8 | 1425 | SET_NEXT_INSN (after) = insn; |
1426 | SET_PREV_INSN (next) = insn; | |
e1ab7874 | 1427 | |
1428 | /* Update links from insn to bb and vice versa. */ | |
1429 | df_insn_change_bb (insn, bb); | |
1430 | if (BB_END (bb) == after) | |
26bb3cb2 | 1431 | BB_END (bb) = insn; |
48e1416a | 1432 | |
e1ab7874 | 1433 | prepare_insn_expr (insn, seqno); |
1434 | return insn; | |
1435 | } | |
1436 | ||
1437 | \f | |
1438 | /* Functions to work with right-hand sides. */ | |
1439 | ||
48e1416a | 1440 | /* Search for a hash value determined by UID/NEW_VINSN in a sorted vector |
e1ab7874 | 1441 | VECT and return true when found. Use NEW_VINSN for comparison only when |
48e1416a | 1442 | COMPARE_VINSNS is true. Write to INDP the index on which |
1443 | the search has stopped, such that inserting the new element at INDP will | |
e1ab7874 | 1444 | retain VECT's sort order. */ |
1445 | static bool | |
f1f41a6c | 1446 | find_in_history_vect_1 (vec<expr_history_def> vect, |
48e1416a | 1447 | unsigned uid, vinsn_t new_vinsn, |
e1ab7874 | 1448 | bool compare_vinsns, int *indp) |
1449 | { | |
1450 | expr_history_def *arr; | |
f1f41a6c | 1451 | int i, j, len = vect.length (); |
e1ab7874 | 1452 | |
1453 | if (len == 0) | |
1454 | { | |
1455 | *indp = 0; | |
1456 | return false; | |
1457 | } | |
1458 | ||
f1f41a6c | 1459 | arr = vect.address (); |
e1ab7874 | 1460 | i = 0, j = len - 1; |
1461 | ||
1462 | while (i <= j) | |
1463 | { | |
1464 | unsigned auid = arr[i].uid; | |
48e1416a | 1465 | vinsn_t avinsn = arr[i].new_expr_vinsn; |
e1ab7874 | 1466 | |
1467 | if (auid == uid | |
48e1416a | 1468 | /* When undoing transformation on a bookkeeping copy, the new vinsn |
1469 | may not be exactly equal to the one that is saved in the vector. | |
e1ab7874 | 1470 | This is because the insn whose copy we're checking was possibly |
1471 | substituted itself. */ | |
48e1416a | 1472 | && (! compare_vinsns |
e1ab7874 | 1473 | || vinsn_equal_p (avinsn, new_vinsn))) |
1474 | { | |
1475 | *indp = i; | |
1476 | return true; | |
1477 | } | |
1478 | else if (auid > uid) | |
1479 | break; | |
1480 | i++; | |
1481 | } | |
1482 | ||
1483 | *indp = i; | |
1484 | return false; | |
1485 | } | |
1486 | ||
48e1416a | 1487 | /* Search for a uid of INSN and NEW_VINSN in a sorted vector VECT. Return |
1488 | the position found or -1, if no such value is in vector. | |
e1ab7874 | 1489 | Search also for UIDs of insn's originators, if ORIGINATORS_P is true. */ |
1490 | int | |
f1f41a6c | 1491 | find_in_history_vect (vec<expr_history_def> vect, rtx insn, |
e1ab7874 | 1492 | vinsn_t new_vinsn, bool originators_p) |
1493 | { | |
1494 | int ind; | |
1495 | ||
48e1416a | 1496 | if (find_in_history_vect_1 (vect, INSN_UID (insn), new_vinsn, |
e1ab7874 | 1497 | false, &ind)) |
1498 | return ind; | |
1499 | ||
1500 | if (INSN_ORIGINATORS (insn) && originators_p) | |
1501 | { | |
1502 | unsigned uid; | |
1503 | bitmap_iterator bi; | |
1504 | ||
1505 | EXECUTE_IF_SET_IN_BITMAP (INSN_ORIGINATORS (insn), 0, uid, bi) | |
1506 | if (find_in_history_vect_1 (vect, uid, new_vinsn, false, &ind)) | |
1507 | return ind; | |
1508 | } | |
48e1416a | 1509 | |
e1ab7874 | 1510 | return -1; |
1511 | } | |
1512 | ||
48e1416a | 1513 | /* Insert new element in a sorted history vector pointed to by PVECT, |
1514 | if it is not there already. The element is searched using | |
e1ab7874 | 1515 | UID/NEW_EXPR_VINSN pair. TYPE, OLD_EXPR_VINSN and SPEC_DS save |
1516 | the history of a transformation. */ | |
1517 | void | |
f1f41a6c | 1518 | insert_in_history_vect (vec<expr_history_def> *pvect, |
e1ab7874 | 1519 | unsigned uid, enum local_trans_type type, |
48e1416a | 1520 | vinsn_t old_expr_vinsn, vinsn_t new_expr_vinsn, |
e1ab7874 | 1521 | ds_t spec_ds) |
1522 | { | |
f1f41a6c | 1523 | vec<expr_history_def> vect = *pvect; |
e1ab7874 | 1524 | expr_history_def temp; |
1525 | bool res; | |
1526 | int ind; | |
1527 | ||
1528 | res = find_in_history_vect_1 (vect, uid, new_expr_vinsn, true, &ind); | |
1529 | ||
1530 | if (res) | |
1531 | { | |
f1f41a6c | 1532 | expr_history_def *phist = &vect[ind]; |
e1ab7874 | 1533 | |
48e1416a | 1534 | /* It is possible that speculation types of expressions that were |
e1ab7874 | 1535 | propagated through different paths will be different here. In this |
1536 | case, merge the status to get the correct check later. */ | |
1537 | if (phist->spec_ds != spec_ds) | |
1538 | phist->spec_ds = ds_max_merge (phist->spec_ds, spec_ds); | |
1539 | return; | |
1540 | } | |
48e1416a | 1541 | |
e1ab7874 | 1542 | temp.uid = uid; |
1543 | temp.old_expr_vinsn = old_expr_vinsn; | |
48e1416a | 1544 | temp.new_expr_vinsn = new_expr_vinsn; |
e1ab7874 | 1545 | temp.spec_ds = spec_ds; |
1546 | temp.type = type; | |
1547 | ||
1548 | vinsn_attach (old_expr_vinsn); | |
1549 | vinsn_attach (new_expr_vinsn); | |
f1f41a6c | 1550 | vect.safe_insert (ind, temp); |
e1ab7874 | 1551 | *pvect = vect; |
1552 | } | |
1553 | ||
1554 | /* Free history vector PVECT. */ | |
1555 | static void | |
f1f41a6c | 1556 | free_history_vect (vec<expr_history_def> &pvect) |
e1ab7874 | 1557 | { |
1558 | unsigned i; | |
1559 | expr_history_def *phist; | |
1560 | ||
f1f41a6c | 1561 | if (! pvect.exists ()) |
e1ab7874 | 1562 | return; |
48e1416a | 1563 | |
f1f41a6c | 1564 | for (i = 0; pvect.iterate (i, &phist); i++) |
e1ab7874 | 1565 | { |
1566 | vinsn_detach (phist->old_expr_vinsn); | |
1567 | vinsn_detach (phist->new_expr_vinsn); | |
1568 | } | |
48e1416a | 1569 | |
f1f41a6c | 1570 | pvect.release (); |
e1ab7874 | 1571 | } |
1572 | ||
c53624fb | 1573 | /* Merge vector FROM to PVECT. */ |
1574 | static void | |
f1f41a6c | 1575 | merge_history_vect (vec<expr_history_def> *pvect, |
1576 | vec<expr_history_def> from) | |
c53624fb | 1577 | { |
1578 | expr_history_def *phist; | |
1579 | int i; | |
1580 | ||
1581 | /* We keep this vector sorted. */ | |
f1f41a6c | 1582 | for (i = 0; from.iterate (i, &phist); i++) |
c53624fb | 1583 | insert_in_history_vect (pvect, phist->uid, phist->type, |
1584 | phist->old_expr_vinsn, phist->new_expr_vinsn, | |
1585 | phist->spec_ds); | |
1586 | } | |
e1ab7874 | 1587 | |
1588 | /* Compare two vinsns as rhses if possible and as vinsns otherwise. */ | |
1589 | bool | |
1590 | vinsn_equal_p (vinsn_t x, vinsn_t y) | |
1591 | { | |
1592 | rtx_equal_p_callback_function repcf; | |
1593 | ||
1594 | if (x == y) | |
1595 | return true; | |
1596 | ||
1597 | if (VINSN_TYPE (x) != VINSN_TYPE (y)) | |
1598 | return false; | |
1599 | ||
1600 | if (VINSN_HASH (x) != VINSN_HASH (y)) | |
1601 | return false; | |
1602 | ||
1603 | repcf = targetm.sched.skip_rtx_p ? skip_unspecs_callback : NULL; | |
48e1416a | 1604 | if (VINSN_SEPARABLE_P (x)) |
e1ab7874 | 1605 | { |
1606 | /* Compare RHSes of VINSNs. */ | |
1607 | gcc_assert (VINSN_RHS (x)); | |
1608 | gcc_assert (VINSN_RHS (y)); | |
1609 | ||
1610 | return rtx_equal_p_cb (VINSN_RHS (x), VINSN_RHS (y), repcf); | |
1611 | } | |
1612 | ||
1613 | return rtx_equal_p_cb (VINSN_PATTERN (x), VINSN_PATTERN (y), repcf); | |
1614 | } | |
1615 | \f | |
1616 | ||
1617 | /* Functions for working with expressions. */ | |
1618 | ||
1619 | /* Initialize EXPR. */ | |
1620 | static void | |
1621 | init_expr (expr_t expr, vinsn_t vi, int spec, int use, int priority, | |
1622 | int sched_times, int orig_bb_index, ds_t spec_done_ds, | |
1623 | ds_t spec_to_check_ds, int orig_sched_cycle, | |
f1f41a6c | 1624 | vec<expr_history_def> history, |
1625 | signed char target_available, | |
e1ab7874 | 1626 | bool was_substituted, bool was_renamed, bool needs_spec_check_p, |
1627 | bool cant_move) | |
1628 | { | |
1629 | vinsn_attach (vi); | |
1630 | ||
1631 | EXPR_VINSN (expr) = vi; | |
1632 | EXPR_SPEC (expr) = spec; | |
1633 | EXPR_USEFULNESS (expr) = use; | |
1634 | EXPR_PRIORITY (expr) = priority; | |
1635 | EXPR_PRIORITY_ADJ (expr) = 0; | |
1636 | EXPR_SCHED_TIMES (expr) = sched_times; | |
1637 | EXPR_ORIG_BB_INDEX (expr) = orig_bb_index; | |
1638 | EXPR_ORIG_SCHED_CYCLE (expr) = orig_sched_cycle; | |
1639 | EXPR_SPEC_DONE_DS (expr) = spec_done_ds; | |
1640 | EXPR_SPEC_TO_CHECK_DS (expr) = spec_to_check_ds; | |
1641 | ||
f1f41a6c | 1642 | if (history.exists ()) |
e1ab7874 | 1643 | EXPR_HISTORY_OF_CHANGES (expr) = history; |
1644 | else | |
f1f41a6c | 1645 | EXPR_HISTORY_OF_CHANGES (expr).create (0); |
e1ab7874 | 1646 | |
1647 | EXPR_TARGET_AVAILABLE (expr) = target_available; | |
1648 | EXPR_WAS_SUBSTITUTED (expr) = was_substituted; | |
1649 | EXPR_WAS_RENAMED (expr) = was_renamed; | |
1650 | EXPR_NEEDS_SPEC_CHECK_P (expr) = needs_spec_check_p; | |
1651 | EXPR_CANT_MOVE (expr) = cant_move; | |
1652 | } | |
1653 | ||
1654 | /* Make a copy of the expr FROM into the expr TO. */ | |
1655 | void | |
1656 | copy_expr (expr_t to, expr_t from) | |
1657 | { | |
1e094109 | 1658 | vec<expr_history_def> temp = vNULL; |
e1ab7874 | 1659 | |
f1f41a6c | 1660 | if (EXPR_HISTORY_OF_CHANGES (from).exists ()) |
e1ab7874 | 1661 | { |
1662 | unsigned i; | |
1663 | expr_history_def *phist; | |
1664 | ||
f1f41a6c | 1665 | temp = EXPR_HISTORY_OF_CHANGES (from).copy (); |
48e1416a | 1666 | for (i = 0; |
f1f41a6c | 1667 | temp.iterate (i, &phist); |
e1ab7874 | 1668 | i++) |
1669 | { | |
1670 | vinsn_attach (phist->old_expr_vinsn); | |
1671 | vinsn_attach (phist->new_expr_vinsn); | |
1672 | } | |
1673 | } | |
1674 | ||
48e1416a | 1675 | init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from), |
e1ab7874 | 1676 | EXPR_USEFULNESS (from), EXPR_PRIORITY (from), |
1677 | EXPR_SCHED_TIMES (from), EXPR_ORIG_BB_INDEX (from), | |
48e1416a | 1678 | EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from), |
e1ab7874 | 1679 | EXPR_ORIG_SCHED_CYCLE (from), temp, |
48e1416a | 1680 | EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from), |
e1ab7874 | 1681 | EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from), |
1682 | EXPR_CANT_MOVE (from)); | |
1683 | } | |
1684 | ||
48e1416a | 1685 | /* Same, but the final expr will not ever be in av sets, so don't copy |
e1ab7874 | 1686 | "uninteresting" data such as bitmap cache. */ |
1687 | void | |
1688 | copy_expr_onside (expr_t to, expr_t from) | |
1689 | { | |
1690 | init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from), EXPR_USEFULNESS (from), | |
1691 | EXPR_PRIORITY (from), EXPR_SCHED_TIMES (from), 0, | |
f1f41a6c | 1692 | EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from), 0, |
1e094109 | 1693 | vNULL, |
e1ab7874 | 1694 | EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from), |
1695 | EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from), | |
1696 | EXPR_CANT_MOVE (from)); | |
1697 | } | |
1698 | ||
1699 | /* Prepare the expr of INSN for scheduling. Used when moving insn and when | |
1700 | initializing new insns. */ | |
1701 | static void | |
1702 | prepare_insn_expr (insn_t insn, int seqno) | |
1703 | { | |
1704 | expr_t expr = INSN_EXPR (insn); | |
1705 | ds_t ds; | |
48e1416a | 1706 | |
e1ab7874 | 1707 | INSN_SEQNO (insn) = seqno; |
1708 | EXPR_ORIG_BB_INDEX (expr) = BLOCK_NUM (insn); | |
1709 | EXPR_SPEC (expr) = 0; | |
1710 | EXPR_ORIG_SCHED_CYCLE (expr) = 0; | |
1711 | EXPR_WAS_SUBSTITUTED (expr) = 0; | |
1712 | EXPR_WAS_RENAMED (expr) = 0; | |
1713 | EXPR_TARGET_AVAILABLE (expr) = 1; | |
1714 | INSN_LIVE_VALID_P (insn) = false; | |
1715 | ||
1716 | /* ??? If this expression is speculative, make its dependence | |
1717 | as weak as possible. We can filter this expression later | |
1718 | in process_spec_exprs, because we do not distinguish | |
1719 | between the status we got during compute_av_set and the | |
1720 | existing status. To be fixed. */ | |
1721 | ds = EXPR_SPEC_DONE_DS (expr); | |
1722 | if (ds) | |
1723 | EXPR_SPEC_DONE_DS (expr) = ds_get_max_dep_weak (ds); | |
1724 | ||
f1f41a6c | 1725 | free_history_vect (EXPR_HISTORY_OF_CHANGES (expr)); |
e1ab7874 | 1726 | } |
1727 | ||
1728 | /* Update target_available bits when merging exprs TO and FROM. SPLIT_POINT | |
48e1416a | 1729 | is non-null when expressions are merged from different successors at |
e1ab7874 | 1730 | a split point. */ |
1731 | static void | |
1732 | update_target_availability (expr_t to, expr_t from, insn_t split_point) | |
1733 | { | |
48e1416a | 1734 | if (EXPR_TARGET_AVAILABLE (to) < 0 |
e1ab7874 | 1735 | || EXPR_TARGET_AVAILABLE (from) < 0) |
1736 | EXPR_TARGET_AVAILABLE (to) = -1; | |
1737 | else | |
1738 | { | |
1739 | /* We try to detect the case when one of the expressions | |
1740 | can only be reached through another one. In this case, | |
1741 | we can do better. */ | |
1742 | if (split_point == NULL) | |
1743 | { | |
1744 | int toind, fromind; | |
1745 | ||
1746 | toind = EXPR_ORIG_BB_INDEX (to); | |
1747 | fromind = EXPR_ORIG_BB_INDEX (from); | |
48e1416a | 1748 | |
e1ab7874 | 1749 | if (toind && toind == fromind) |
48e1416a | 1750 | /* Do nothing -- everything is done in |
e1ab7874 | 1751 | merge_with_other_exprs. */ |
1752 | ; | |
1753 | else | |
1754 | EXPR_TARGET_AVAILABLE (to) = -1; | |
1755 | } | |
d6726470 | 1756 | else if (EXPR_TARGET_AVAILABLE (from) == 0 |
1757 | && EXPR_LHS (from) | |
1758 | && REG_P (EXPR_LHS (from)) | |
1759 | && REGNO (EXPR_LHS (to)) != REGNO (EXPR_LHS (from))) | |
1760 | EXPR_TARGET_AVAILABLE (to) = -1; | |
e1ab7874 | 1761 | else |
1762 | EXPR_TARGET_AVAILABLE (to) &= EXPR_TARGET_AVAILABLE (from); | |
1763 | } | |
1764 | } | |
1765 | ||
1766 | /* Update speculation bits when merging exprs TO and FROM. SPLIT_POINT | |
48e1416a | 1767 | is non-null when expressions are merged from different successors at |
e1ab7874 | 1768 | a split point. */ |
1769 | static void | |
1770 | update_speculative_bits (expr_t to, expr_t from, insn_t split_point) | |
1771 | { | |
1772 | ds_t old_to_ds, old_from_ds; | |
1773 | ||
1774 | old_to_ds = EXPR_SPEC_DONE_DS (to); | |
1775 | old_from_ds = EXPR_SPEC_DONE_DS (from); | |
48e1416a | 1776 | |
e1ab7874 | 1777 | EXPR_SPEC_DONE_DS (to) = ds_max_merge (old_to_ds, old_from_ds); |
1778 | EXPR_SPEC_TO_CHECK_DS (to) |= EXPR_SPEC_TO_CHECK_DS (from); | |
1779 | EXPR_NEEDS_SPEC_CHECK_P (to) |= EXPR_NEEDS_SPEC_CHECK_P (from); | |
1780 | ||
1781 | /* When merging e.g. control & data speculative exprs, or a control | |
48e1416a | 1782 | speculative with a control&data speculative one, we really have |
e1ab7874 | 1783 | to change vinsn too. Also, when speculative status is changed, |
1784 | we also need to record this as a transformation in expr's history. */ | |
1785 | if ((old_to_ds & SPECULATIVE) || (old_from_ds & SPECULATIVE)) | |
1786 | { | |
1787 | old_to_ds = ds_get_speculation_types (old_to_ds); | |
1788 | old_from_ds = ds_get_speculation_types (old_from_ds); | |
48e1416a | 1789 | |
e1ab7874 | 1790 | if (old_to_ds != old_from_ds) |
1791 | { | |
1792 | ds_t record_ds; | |
48e1416a | 1793 | |
1794 | /* When both expressions are speculative, we need to change | |
e1ab7874 | 1795 | the vinsn first. */ |
1796 | if ((old_to_ds & SPECULATIVE) && (old_from_ds & SPECULATIVE)) | |
1797 | { | |
1798 | int res; | |
48e1416a | 1799 | |
e1ab7874 | 1800 | res = speculate_expr (to, EXPR_SPEC_DONE_DS (to)); |
1801 | gcc_assert (res >= 0); | |
1802 | } | |
1803 | ||
1804 | if (split_point != NULL) | |
1805 | { | |
1806 | /* Record the change with proper status. */ | |
1807 | record_ds = EXPR_SPEC_DONE_DS (to) & SPECULATIVE; | |
1808 | record_ds &= ~(old_to_ds & SPECULATIVE); | |
1809 | record_ds &= ~(old_from_ds & SPECULATIVE); | |
48e1416a | 1810 | |
1811 | insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to), | |
1812 | INSN_UID (split_point), TRANS_SPECULATION, | |
e1ab7874 | 1813 | EXPR_VINSN (from), EXPR_VINSN (to), |
1814 | record_ds); | |
1815 | } | |
1816 | } | |
1817 | } | |
1818 | } | |
1819 | ||
1820 | ||
1821 | /* Merge bits of FROM expr to TO expr. When SPLIT_POINT is not NULL, | |
1822 | this is done along different paths. */ | |
1823 | void | |
1824 | merge_expr_data (expr_t to, expr_t from, insn_t split_point) | |
1825 | { | |
32bbc704 | 1826 | /* Choose the maximum of the specs of merged exprs. This is required |
1827 | for correctness of bookkeeping. */ | |
1828 | if (EXPR_SPEC (to) < EXPR_SPEC (from)) | |
e1ab7874 | 1829 | EXPR_SPEC (to) = EXPR_SPEC (from); |
1830 | ||
1831 | if (split_point) | |
1832 | EXPR_USEFULNESS (to) += EXPR_USEFULNESS (from); | |
1833 | else | |
48e1416a | 1834 | EXPR_USEFULNESS (to) = MAX (EXPR_USEFULNESS (to), |
e1ab7874 | 1835 | EXPR_USEFULNESS (from)); |
1836 | ||
1837 | if (EXPR_PRIORITY (to) < EXPR_PRIORITY (from)) | |
1838 | EXPR_PRIORITY (to) = EXPR_PRIORITY (from); | |
1839 | ||
d7a270ab | 1840 | /* We merge sched-times half-way to the larger value to avoid the endless |
1841 | pipelining of unneeded insns. The average seems to be good compromise | |
1842 | between pipelining opportunities and avoiding extra work. */ | |
1843 | if (EXPR_SCHED_TIMES (to) != EXPR_SCHED_TIMES (from)) | |
1844 | EXPR_SCHED_TIMES (to) = ((EXPR_SCHED_TIMES (from) + EXPR_SCHED_TIMES (to) | |
1845 | + 1) / 2); | |
e1ab7874 | 1846 | |
1847 | if (EXPR_ORIG_BB_INDEX (to) != EXPR_ORIG_BB_INDEX (from)) | |
1848 | EXPR_ORIG_BB_INDEX (to) = 0; | |
1849 | ||
48e1416a | 1850 | EXPR_ORIG_SCHED_CYCLE (to) = MIN (EXPR_ORIG_SCHED_CYCLE (to), |
e1ab7874 | 1851 | EXPR_ORIG_SCHED_CYCLE (from)); |
1852 | ||
e1ab7874 | 1853 | EXPR_WAS_SUBSTITUTED (to) |= EXPR_WAS_SUBSTITUTED (from); |
1854 | EXPR_WAS_RENAMED (to) |= EXPR_WAS_RENAMED (from); | |
1855 | EXPR_CANT_MOVE (to) |= EXPR_CANT_MOVE (from); | |
1856 | ||
c53624fb | 1857 | merge_history_vect (&EXPR_HISTORY_OF_CHANGES (to), |
1858 | EXPR_HISTORY_OF_CHANGES (from)); | |
e1ab7874 | 1859 | update_target_availability (to, from, split_point); |
1860 | update_speculative_bits (to, from, split_point); | |
1861 | } | |
1862 | ||
1863 | /* Merge bits of FROM expr to TO expr. Vinsns in the exprs should be equal | |
48e1416a | 1864 | in terms of vinsn_equal_p. SPLIT_POINT is non-null when expressions |
e1ab7874 | 1865 | are merged from different successors at a split point. */ |
1866 | void | |
1867 | merge_expr (expr_t to, expr_t from, insn_t split_point) | |
1868 | { | |
1869 | vinsn_t to_vi = EXPR_VINSN (to); | |
1870 | vinsn_t from_vi = EXPR_VINSN (from); | |
1871 | ||
1872 | gcc_assert (vinsn_equal_p (to_vi, from_vi)); | |
1873 | ||
1874 | /* Make sure that speculative pattern is propagated into exprs that | |
1875 | have non-speculative one. This will provide us with consistent | |
1876 | speculative bits and speculative patterns inside expr. */ | |
8d811ff9 | 1877 | if (EXPR_SPEC_DONE_DS (to) == 0 |
1878 | && (EXPR_SPEC_DONE_DS (from) != 0 | |
1879 | /* Do likewise for volatile insns, so that we always retain | |
1880 | the may_trap_p bit on the resulting expression. However, | |
1881 | avoid propagating the trapping bit into the instructions | |
1882 | already speculated. This would result in replacing the | |
1883 | speculative pattern with the non-speculative one and breaking | |
1884 | the speculation support. */ | |
1885 | || (!VINSN_MAY_TRAP_P (EXPR_VINSN (to)) | |
1886 | && VINSN_MAY_TRAP_P (EXPR_VINSN (from))))) | |
e1ab7874 | 1887 | change_vinsn_in_expr (to, EXPR_VINSN (from)); |
1888 | ||
1889 | merge_expr_data (to, from, split_point); | |
1890 | gcc_assert (EXPR_USEFULNESS (to) <= REG_BR_PROB_BASE); | |
1891 | } | |
1892 | ||
1893 | /* Clear the information of this EXPR. */ | |
1894 | void | |
1895 | clear_expr (expr_t expr) | |
1896 | { | |
48e1416a | 1897 | |
e1ab7874 | 1898 | vinsn_detach (EXPR_VINSN (expr)); |
1899 | EXPR_VINSN (expr) = NULL; | |
1900 | ||
f1f41a6c | 1901 | free_history_vect (EXPR_HISTORY_OF_CHANGES (expr)); |
e1ab7874 | 1902 | } |
1903 | ||
1904 | /* For a given LV_SET, mark EXPR having unavailable target register. */ | |
1905 | static void | |
1906 | set_unavailable_target_for_expr (expr_t expr, regset lv_set) | |
1907 | { | |
1908 | if (EXPR_SEPARABLE_P (expr)) | |
1909 | { | |
1910 | if (REG_P (EXPR_LHS (expr)) | |
1f53e226 | 1911 | && register_unavailable_p (lv_set, EXPR_LHS (expr))) |
e1ab7874 | 1912 | { |
48e1416a | 1913 | /* If it's an insn like r1 = use (r1, ...), and it exists in |
1914 | different forms in each of the av_sets being merged, we can't say | |
1915 | whether original destination register is available or not. | |
1916 | However, this still works if destination register is not used | |
e1ab7874 | 1917 | in the original expression: if the branch at which LV_SET we're |
1918 | looking here is not actually 'other branch' in sense that same | |
48e1416a | 1919 | expression is available through it (but it can't be determined |
e1ab7874 | 1920 | at computation stage because of transformations on one of the |
48e1416a | 1921 | branches), it still won't affect the availability. |
1922 | Liveness of a register somewhere on a code motion path means | |
1923 | it's either read somewhere on a codemotion path, live on | |
e1ab7874 | 1924 | 'other' branch, live at the point immediately following |
1925 | the original operation, or is read by the original operation. | |
1926 | The latter case is filtered out in the condition below. | |
1927 | It still doesn't cover the case when register is defined and used | |
1928 | somewhere within the code motion path, and in this case we could | |
1929 | miss a unifying code motion along both branches using a renamed | |
1930 | register, but it won't affect a code correctness since upon | |
1931 | an actual code motion a bookkeeping code would be generated. */ | |
1f53e226 | 1932 | if (register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)), |
1933 | EXPR_LHS (expr))) | |
e1ab7874 | 1934 | EXPR_TARGET_AVAILABLE (expr) = -1; |
1935 | else | |
1936 | EXPR_TARGET_AVAILABLE (expr) = false; | |
1937 | } | |
1938 | } | |
1939 | else | |
1940 | { | |
1941 | unsigned regno; | |
1942 | reg_set_iterator rsi; | |
48e1416a | 1943 | |
1944 | EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_SETS (EXPR_VINSN (expr)), | |
e1ab7874 | 1945 | 0, regno, rsi) |
1946 | if (bitmap_bit_p (lv_set, regno)) | |
1947 | { | |
1948 | EXPR_TARGET_AVAILABLE (expr) = false; | |
1949 | break; | |
1950 | } | |
1951 | ||
1952 | EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_CLOBBERS (EXPR_VINSN (expr)), | |
1953 | 0, regno, rsi) | |
1954 | if (bitmap_bit_p (lv_set, regno)) | |
1955 | { | |
1956 | EXPR_TARGET_AVAILABLE (expr) = false; | |
1957 | break; | |
1958 | } | |
1959 | } | |
1960 | } | |
1961 | ||
48e1416a | 1962 | /* Try to make EXPR speculative. Return 1 when EXPR's pattern |
e1ab7874 | 1963 | or dependence status have changed, 2 when also the target register |
1964 | became unavailable, 0 if nothing had to be changed. */ | |
1965 | int | |
1966 | speculate_expr (expr_t expr, ds_t ds) | |
1967 | { | |
1968 | int res; | |
04d073df | 1969 | rtx_insn *orig_insn_rtx; |
e1ab7874 | 1970 | rtx spec_pat; |
1971 | ds_t target_ds, current_ds; | |
1972 | ||
1973 | /* Obtain the status we need to put on EXPR. */ | |
1974 | target_ds = (ds & SPECULATIVE); | |
1975 | current_ds = EXPR_SPEC_DONE_DS (expr); | |
1976 | ds = ds_full_merge (current_ds, target_ds, NULL_RTX, NULL_RTX); | |
1977 | ||
1978 | orig_insn_rtx = EXPR_INSN_RTX (expr); | |
1979 | ||
1980 | res = sched_speculate_insn (orig_insn_rtx, ds, &spec_pat); | |
1981 | ||
1982 | switch (res) | |
1983 | { | |
1984 | case 0: | |
1985 | EXPR_SPEC_DONE_DS (expr) = ds; | |
1986 | return current_ds != ds ? 1 : 0; | |
48e1416a | 1987 | |
e1ab7874 | 1988 | case 1: |
1989 | { | |
04d073df | 1990 | rtx_insn *spec_insn_rtx = |
1991 | create_insn_rtx_from_pattern (spec_pat, NULL_RTX); | |
e1ab7874 | 1992 | vinsn_t spec_vinsn = create_vinsn_from_insn_rtx (spec_insn_rtx, false); |
1993 | ||
1994 | change_vinsn_in_expr (expr, spec_vinsn); | |
1995 | EXPR_SPEC_DONE_DS (expr) = ds; | |
1996 | EXPR_NEEDS_SPEC_CHECK_P (expr) = true; | |
1997 | ||
48e1416a | 1998 | /* Do not allow clobbering the address register of speculative |
e1ab7874 | 1999 | insns. */ |
1f53e226 | 2000 | if (register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)), |
2001 | expr_dest_reg (expr))) | |
e1ab7874 | 2002 | { |
2003 | EXPR_TARGET_AVAILABLE (expr) = false; | |
2004 | return 2; | |
2005 | } | |
2006 | ||
2007 | return 1; | |
2008 | } | |
2009 | ||
2010 | case -1: | |
2011 | return -1; | |
2012 | ||
2013 | default: | |
2014 | gcc_unreachable (); | |
2015 | return -1; | |
2016 | } | |
2017 | } | |
2018 | ||
2019 | /* Return a destination register, if any, of EXPR. */ | |
2020 | rtx | |
2021 | expr_dest_reg (expr_t expr) | |
2022 | { | |
2023 | rtx dest = VINSN_LHS (EXPR_VINSN (expr)); | |
2024 | ||
2025 | if (dest != NULL_RTX && REG_P (dest)) | |
2026 | return dest; | |
2027 | ||
2028 | return NULL_RTX; | |
2029 | } | |
2030 | ||
2031 | /* Returns the REGNO of the R's destination. */ | |
2032 | unsigned | |
2033 | expr_dest_regno (expr_t expr) | |
2034 | { | |
2035 | rtx dest = expr_dest_reg (expr); | |
2036 | ||
2037 | gcc_assert (dest != NULL_RTX); | |
2038 | return REGNO (dest); | |
2039 | } | |
2040 | ||
48e1416a | 2041 | /* For a given LV_SET, mark all expressions in JOIN_SET, but not present in |
e1ab7874 | 2042 | AV_SET having unavailable target register. */ |
2043 | void | |
2044 | mark_unavailable_targets (av_set_t join_set, av_set_t av_set, regset lv_set) | |
2045 | { | |
2046 | expr_t expr; | |
2047 | av_set_iterator avi; | |
2048 | ||
2049 | FOR_EACH_EXPR (expr, avi, join_set) | |
2050 | if (av_set_lookup (av_set, EXPR_VINSN (expr)) == NULL) | |
2051 | set_unavailable_target_for_expr (expr, lv_set); | |
2052 | } | |
2053 | \f | |
2054 | ||
1f53e226 | 2055 | /* Returns true if REG (at least partially) is present in REGS. */ |
2056 | bool | |
2057 | register_unavailable_p (regset regs, rtx reg) | |
2058 | { | |
2059 | unsigned regno, end_regno; | |
2060 | ||
2061 | regno = REGNO (reg); | |
2062 | if (bitmap_bit_p (regs, regno)) | |
2063 | return true; | |
2064 | ||
2065 | end_regno = END_REGNO (reg); | |
2066 | ||
2067 | while (++regno < end_regno) | |
2068 | if (bitmap_bit_p (regs, regno)) | |
2069 | return true; | |
2070 | ||
2071 | return false; | |
2072 | } | |
2073 | ||
e1ab7874 | 2074 | /* Av set functions. */ |
2075 | ||
2076 | /* Add a new element to av set SETP. | |
2077 | Return the element added. */ | |
2078 | static av_set_t | |
2079 | av_set_add_element (av_set_t *setp) | |
2080 | { | |
2081 | /* Insert at the beginning of the list. */ | |
2082 | _list_add (setp); | |
2083 | return *setp; | |
2084 | } | |
2085 | ||
2086 | /* Add EXPR to SETP. */ | |
2087 | void | |
2088 | av_set_add (av_set_t *setp, expr_t expr) | |
2089 | { | |
2090 | av_set_t elem; | |
48e1416a | 2091 | |
e1ab7874 | 2092 | gcc_assert (!INSN_NOP_P (EXPR_INSN_RTX (expr))); |
2093 | elem = av_set_add_element (setp); | |
2094 | copy_expr (_AV_SET_EXPR (elem), expr); | |
2095 | } | |
2096 | ||
2097 | /* Same, but do not copy EXPR. */ | |
2098 | static void | |
2099 | av_set_add_nocopy (av_set_t *setp, expr_t expr) | |
2100 | { | |
2101 | av_set_t elem; | |
2102 | ||
2103 | elem = av_set_add_element (setp); | |
2104 | *_AV_SET_EXPR (elem) = *expr; | |
2105 | } | |
2106 | ||
2107 | /* Remove expr pointed to by IP from the av_set. */ | |
2108 | void | |
2109 | av_set_iter_remove (av_set_iterator *ip) | |
2110 | { | |
2111 | clear_expr (_AV_SET_EXPR (*ip->lp)); | |
2112 | _list_iter_remove (ip); | |
2113 | } | |
2114 | ||
2115 | /* Search for an expr in SET, such that it's equivalent to SOUGHT_VINSN in the | |
2116 | sense of vinsn_equal_p function. Return NULL if no such expr is | |
2117 | in SET was found. */ | |
2118 | expr_t | |
2119 | av_set_lookup (av_set_t set, vinsn_t sought_vinsn) | |
2120 | { | |
2121 | expr_t expr; | |
2122 | av_set_iterator i; | |
2123 | ||
2124 | FOR_EACH_EXPR (expr, i, set) | |
2125 | if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn)) | |
2126 | return expr; | |
2127 | return NULL; | |
2128 | } | |
2129 | ||
2130 | /* Same, but also remove the EXPR found. */ | |
2131 | static expr_t | |
2132 | av_set_lookup_and_remove (av_set_t *setp, vinsn_t sought_vinsn) | |
2133 | { | |
2134 | expr_t expr; | |
2135 | av_set_iterator i; | |
2136 | ||
2137 | FOR_EACH_EXPR_1 (expr, i, setp) | |
2138 | if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn)) | |
2139 | { | |
2140 | _list_iter_remove_nofree (&i); | |
2141 | return expr; | |
2142 | } | |
2143 | return NULL; | |
2144 | } | |
2145 | ||
2146 | /* Search for an expr in SET, such that it's equivalent to EXPR in the | |
2147 | sense of vinsn_equal_p function of their vinsns, but not EXPR itself. | |
2148 | Returns NULL if no such expr is in SET was found. */ | |
2149 | static expr_t | |
2150 | av_set_lookup_other_equiv_expr (av_set_t set, expr_t expr) | |
2151 | { | |
2152 | expr_t cur_expr; | |
2153 | av_set_iterator i; | |
2154 | ||
2155 | FOR_EACH_EXPR (cur_expr, i, set) | |
2156 | { | |
2157 | if (cur_expr == expr) | |
2158 | continue; | |
2159 | if (vinsn_equal_p (EXPR_VINSN (cur_expr), EXPR_VINSN (expr))) | |
2160 | return cur_expr; | |
2161 | } | |
2162 | ||
2163 | return NULL; | |
2164 | } | |
2165 | ||
2166 | /* If other expression is already in AVP, remove one of them. */ | |
2167 | expr_t | |
2168 | merge_with_other_exprs (av_set_t *avp, av_set_iterator *ip, expr_t expr) | |
2169 | { | |
2170 | expr_t expr2; | |
2171 | ||
2172 | expr2 = av_set_lookup_other_equiv_expr (*avp, expr); | |
2173 | if (expr2 != NULL) | |
2174 | { | |
2175 | /* Reset target availability on merge, since taking it only from one | |
2176 | of the exprs would be controversial for different code. */ | |
2177 | EXPR_TARGET_AVAILABLE (expr2) = -1; | |
2178 | EXPR_USEFULNESS (expr2) = 0; | |
2179 | ||
2180 | merge_expr (expr2, expr, NULL); | |
48e1416a | 2181 | |
e1ab7874 | 2182 | /* Fix usefulness as it should be now REG_BR_PROB_BASE. */ |
2183 | EXPR_USEFULNESS (expr2) = REG_BR_PROB_BASE; | |
48e1416a | 2184 | |
e1ab7874 | 2185 | av_set_iter_remove (ip); |
2186 | return expr2; | |
2187 | } | |
2188 | ||
2189 | return expr; | |
2190 | } | |
2191 | ||
2192 | /* Return true if there is an expr that correlates to VI in SET. */ | |
2193 | bool | |
2194 | av_set_is_in_p (av_set_t set, vinsn_t vi) | |
2195 | { | |
2196 | return av_set_lookup (set, vi) != NULL; | |
2197 | } | |
2198 | ||
2199 | /* Return a copy of SET. */ | |
2200 | av_set_t | |
2201 | av_set_copy (av_set_t set) | |
2202 | { | |
2203 | expr_t expr; | |
2204 | av_set_iterator i; | |
2205 | av_set_t res = NULL; | |
2206 | ||
2207 | FOR_EACH_EXPR (expr, i, set) | |
2208 | av_set_add (&res, expr); | |
2209 | ||
2210 | return res; | |
2211 | } | |
2212 | ||
2213 | /* Join two av sets that do not have common elements by attaching second set | |
2214 | (pointed to by FROMP) to the end of first set (TO_TAILP must point to | |
2215 | _AV_SET_NEXT of first set's last element). */ | |
2216 | static void | |
2217 | join_distinct_sets (av_set_t *to_tailp, av_set_t *fromp) | |
2218 | { | |
2219 | gcc_assert (*to_tailp == NULL); | |
2220 | *to_tailp = *fromp; | |
2221 | *fromp = NULL; | |
2222 | } | |
2223 | ||
2224 | /* Makes set pointed to by TO to be the union of TO and FROM. Clear av_set | |
2225 | pointed to by FROMP afterwards. */ | |
2226 | void | |
2227 | av_set_union_and_clear (av_set_t *top, av_set_t *fromp, insn_t insn) | |
2228 | { | |
2229 | expr_t expr1; | |
2230 | av_set_iterator i; | |
2231 | ||
2232 | /* Delete from TOP all exprs, that present in FROMP. */ | |
2233 | FOR_EACH_EXPR_1 (expr1, i, top) | |
2234 | { | |
2235 | expr_t expr2 = av_set_lookup (*fromp, EXPR_VINSN (expr1)); | |
2236 | ||
2237 | if (expr2) | |
2238 | { | |
2239 | merge_expr (expr2, expr1, insn); | |
2240 | av_set_iter_remove (&i); | |
2241 | } | |
2242 | } | |
2243 | ||
2244 | join_distinct_sets (i.lp, fromp); | |
2245 | } | |
2246 | ||
48e1416a | 2247 | /* Same as above, but also update availability of target register in |
e1ab7874 | 2248 | TOP judging by TO_LV_SET and FROM_LV_SET. */ |
2249 | void | |
2250 | av_set_union_and_live (av_set_t *top, av_set_t *fromp, regset to_lv_set, | |
2251 | regset from_lv_set, insn_t insn) | |
2252 | { | |
2253 | expr_t expr1; | |
2254 | av_set_iterator i; | |
2255 | av_set_t *to_tailp, in_both_set = NULL; | |
2256 | ||
2257 | /* Delete from TOP all expres, that present in FROMP. */ | |
2258 | FOR_EACH_EXPR_1 (expr1, i, top) | |
2259 | { | |
2260 | expr_t expr2 = av_set_lookup_and_remove (fromp, EXPR_VINSN (expr1)); | |
2261 | ||
2262 | if (expr2) | |
2263 | { | |
48e1416a | 2264 | /* It may be that the expressions have different destination |
e1ab7874 | 2265 | registers, in which case we need to check liveness here. */ |
2266 | if (EXPR_SEPARABLE_P (expr1)) | |
2267 | { | |
48e1416a | 2268 | int regno1 = (REG_P (EXPR_LHS (expr1)) |
e1ab7874 | 2269 | ? (int) expr_dest_regno (expr1) : -1); |
48e1416a | 2270 | int regno2 = (REG_P (EXPR_LHS (expr2)) |
e1ab7874 | 2271 | ? (int) expr_dest_regno (expr2) : -1); |
48e1416a | 2272 | |
2273 | /* ??? We don't have a way to check restrictions for | |
e1ab7874 | 2274 | *other* register on the current path, we did it only |
2275 | for the current target register. Give up. */ | |
2276 | if (regno1 != regno2) | |
2277 | EXPR_TARGET_AVAILABLE (expr2) = -1; | |
2278 | } | |
2279 | else if (EXPR_INSN_RTX (expr1) != EXPR_INSN_RTX (expr2)) | |
2280 | EXPR_TARGET_AVAILABLE (expr2) = -1; | |
2281 | ||
2282 | merge_expr (expr2, expr1, insn); | |
2283 | av_set_add_nocopy (&in_both_set, expr2); | |
2284 | av_set_iter_remove (&i); | |
2285 | } | |
2286 | else | |
48e1416a | 2287 | /* EXPR1 is present in TOP, but not in FROMP. Check it on |
e1ab7874 | 2288 | FROM_LV_SET. */ |
2289 | set_unavailable_target_for_expr (expr1, from_lv_set); | |
2290 | } | |
2291 | to_tailp = i.lp; | |
2292 | ||
2293 | /* These expressions are not present in TOP. Check liveness | |
2294 | restrictions on TO_LV_SET. */ | |
2295 | FOR_EACH_EXPR (expr1, i, *fromp) | |
2296 | set_unavailable_target_for_expr (expr1, to_lv_set); | |
2297 | ||
2298 | join_distinct_sets (i.lp, &in_both_set); | |
2299 | join_distinct_sets (to_tailp, fromp); | |
2300 | } | |
2301 | ||
2302 | /* Clear av_set pointed to by SETP. */ | |
2303 | void | |
2304 | av_set_clear (av_set_t *setp) | |
2305 | { | |
2306 | expr_t expr; | |
2307 | av_set_iterator i; | |
2308 | ||
2309 | FOR_EACH_EXPR_1 (expr, i, setp) | |
2310 | av_set_iter_remove (&i); | |
2311 | ||
2312 | gcc_assert (*setp == NULL); | |
2313 | } | |
2314 | ||
2315 | /* Leave only one non-speculative element in the SETP. */ | |
2316 | void | |
2317 | av_set_leave_one_nonspec (av_set_t *setp) | |
2318 | { | |
2319 | expr_t expr; | |
2320 | av_set_iterator i; | |
2321 | bool has_one_nonspec = false; | |
2322 | ||
48e1416a | 2323 | /* Keep all speculative exprs, and leave one non-speculative |
e1ab7874 | 2324 | (the first one). */ |
2325 | FOR_EACH_EXPR_1 (expr, i, setp) | |
2326 | { | |
2327 | if (!EXPR_SPEC_DONE_DS (expr)) | |
2328 | { | |
2329 | if (has_one_nonspec) | |
2330 | av_set_iter_remove (&i); | |
2331 | else | |
2332 | has_one_nonspec = true; | |
2333 | } | |
2334 | } | |
2335 | } | |
2336 | ||
2337 | /* Return the N'th element of the SET. */ | |
2338 | expr_t | |
2339 | av_set_element (av_set_t set, int n) | |
2340 | { | |
2341 | expr_t expr; | |
2342 | av_set_iterator i; | |
2343 | ||
2344 | FOR_EACH_EXPR (expr, i, set) | |
2345 | if (n-- == 0) | |
2346 | return expr; | |
2347 | ||
2348 | gcc_unreachable (); | |
2349 | return NULL; | |
2350 | } | |
2351 | ||
2352 | /* Deletes all expressions from AVP that are conditional branches (IFs). */ | |
2353 | void | |
2354 | av_set_substract_cond_branches (av_set_t *avp) | |
2355 | { | |
2356 | av_set_iterator i; | |
2357 | expr_t expr; | |
2358 | ||
2359 | FOR_EACH_EXPR_1 (expr, i, avp) | |
2360 | if (vinsn_cond_branch_p (EXPR_VINSN (expr))) | |
2361 | av_set_iter_remove (&i); | |
2362 | } | |
2363 | ||
48e1416a | 2364 | /* Multiplies usefulness attribute of each member of av-set *AVP by |
e1ab7874 | 2365 | value PROB / ALL_PROB. */ |
2366 | void | |
2367 | av_set_split_usefulness (av_set_t av, int prob, int all_prob) | |
2368 | { | |
2369 | av_set_iterator i; | |
2370 | expr_t expr; | |
2371 | ||
2372 | FOR_EACH_EXPR (expr, i, av) | |
48e1416a | 2373 | EXPR_USEFULNESS (expr) = (all_prob |
e1ab7874 | 2374 | ? (EXPR_USEFULNESS (expr) * prob) / all_prob |
2375 | : 0); | |
2376 | } | |
2377 | ||
2378 | /* Leave in AVP only those expressions, which are present in AV, | |
c53624fb | 2379 | and return it, merging history expressions. */ |
e1ab7874 | 2380 | void |
c53624fb | 2381 | av_set_code_motion_filter (av_set_t *avp, av_set_t av) |
e1ab7874 | 2382 | { |
2383 | av_set_iterator i; | |
c53624fb | 2384 | expr_t expr, expr2; |
e1ab7874 | 2385 | |
2386 | FOR_EACH_EXPR_1 (expr, i, avp) | |
c53624fb | 2387 | if ((expr2 = av_set_lookup (av, EXPR_VINSN (expr))) == NULL) |
e1ab7874 | 2388 | av_set_iter_remove (&i); |
c53624fb | 2389 | else |
2390 | /* When updating av sets in bookkeeping blocks, we can add more insns | |
2391 | there which will be transformed but the upper av sets will not | |
2392 | reflect those transformations. We then fail to undo those | |
2393 | when searching for such insns. So merge the history saved | |
2394 | in the av set of the block we are processing. */ | |
2395 | merge_history_vect (&EXPR_HISTORY_OF_CHANGES (expr), | |
2396 | EXPR_HISTORY_OF_CHANGES (expr2)); | |
e1ab7874 | 2397 | } |
2398 | ||
2399 | \f | |
2400 | ||
2401 | /* Dependence hooks to initialize insn data. */ | |
2402 | ||
2403 | /* This is used in hooks callable from dependence analysis when initializing | |
2404 | instruction's data. */ | |
2405 | static struct | |
2406 | { | |
2407 | /* Where the dependence was found (lhs/rhs). */ | |
2408 | deps_where_t where; | |
2409 | ||
2410 | /* The actual data object to initialize. */ | |
2411 | idata_t id; | |
2412 | ||
2413 | /* True when the insn should not be made clonable. */ | |
2414 | bool force_unique_p; | |
2415 | ||
2416 | /* True when insn should be treated as of type USE, i.e. never renamed. */ | |
2417 | bool force_use_p; | |
2418 | } deps_init_id_data; | |
2419 | ||
2420 | ||
48e1416a | 2421 | /* Setup ID for INSN. FORCE_UNIQUE_P is true when INSN should not be |
e1ab7874 | 2422 | clonable. */ |
2423 | static void | |
2424 | setup_id_for_insn (idata_t id, insn_t insn, bool force_unique_p) | |
2425 | { | |
2426 | int type; | |
48e1416a | 2427 | |
e1ab7874 | 2428 | /* Determine whether INSN could be cloned and return appropriate vinsn type. |
2429 | That clonable insns which can be separated into lhs and rhs have type SET. | |
2430 | Other clonable insns have type USE. */ | |
2431 | type = GET_CODE (insn); | |
2432 | ||
2433 | /* Only regular insns could be cloned. */ | |
2434 | if (type == INSN && !force_unique_p) | |
2435 | type = SET; | |
2436 | else if (type == JUMP_INSN && simplejump_p (insn)) | |
2437 | type = PC; | |
9845d120 | 2438 | else if (type == DEBUG_INSN) |
2439 | type = !force_unique_p ? USE : INSN; | |
48e1416a | 2440 | |
e1ab7874 | 2441 | IDATA_TYPE (id) = type; |
2442 | IDATA_REG_SETS (id) = get_clear_regset_from_pool (); | |
2443 | IDATA_REG_USES (id) = get_clear_regset_from_pool (); | |
2444 | IDATA_REG_CLOBBERS (id) = get_clear_regset_from_pool (); | |
2445 | } | |
2446 | ||
2447 | /* Start initializing insn data. */ | |
2448 | static void | |
2449 | deps_init_id_start_insn (insn_t insn) | |
2450 | { | |
2451 | gcc_assert (deps_init_id_data.where == DEPS_IN_NOWHERE); | |
2452 | ||
2453 | setup_id_for_insn (deps_init_id_data.id, insn, | |
2454 | deps_init_id_data.force_unique_p); | |
2455 | deps_init_id_data.where = DEPS_IN_INSN; | |
2456 | } | |
2457 | ||
2458 | /* Start initializing lhs data. */ | |
2459 | static void | |
2460 | deps_init_id_start_lhs (rtx lhs) | |
2461 | { | |
2462 | gcc_assert (deps_init_id_data.where == DEPS_IN_INSN); | |
2463 | gcc_assert (IDATA_LHS (deps_init_id_data.id) == NULL); | |
2464 | ||
2465 | if (IDATA_TYPE (deps_init_id_data.id) == SET) | |
2466 | { | |
2467 | IDATA_LHS (deps_init_id_data.id) = lhs; | |
2468 | deps_init_id_data.where = DEPS_IN_LHS; | |
2469 | } | |
2470 | } | |
2471 | ||
2472 | /* Finish initializing lhs data. */ | |
2473 | static void | |
2474 | deps_init_id_finish_lhs (void) | |
2475 | { | |
2476 | deps_init_id_data.where = DEPS_IN_INSN; | |
2477 | } | |
2478 | ||
2479 | /* Note a set of REGNO. */ | |
2480 | static void | |
2481 | deps_init_id_note_reg_set (int regno) | |
2482 | { | |
2483 | haifa_note_reg_set (regno); | |
2484 | ||
2485 | if (deps_init_id_data.where == DEPS_IN_RHS) | |
2486 | deps_init_id_data.force_use_p = true; | |
2487 | ||
2488 | if (IDATA_TYPE (deps_init_id_data.id) != PC) | |
2489 | SET_REGNO_REG_SET (IDATA_REG_SETS (deps_init_id_data.id), regno); | |
2490 | ||
2491 | #ifdef STACK_REGS | |
48e1416a | 2492 | /* Make instructions that set stack registers to be ineligible for |
e1ab7874 | 2493 | renaming to avoid issues with find_used_regs. */ |
2494 | if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG)) | |
2495 | deps_init_id_data.force_use_p = true; | |
2496 | #endif | |
2497 | } | |
2498 | ||
2499 | /* Note a clobber of REGNO. */ | |
2500 | static void | |
2501 | deps_init_id_note_reg_clobber (int regno) | |
2502 | { | |
2503 | haifa_note_reg_clobber (regno); | |
2504 | ||
2505 | if (deps_init_id_data.where == DEPS_IN_RHS) | |
2506 | deps_init_id_data.force_use_p = true; | |
2507 | ||
2508 | if (IDATA_TYPE (deps_init_id_data.id) != PC) | |
2509 | SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (deps_init_id_data.id), regno); | |
2510 | } | |
2511 | ||
2512 | /* Note a use of REGNO. */ | |
2513 | static void | |
2514 | deps_init_id_note_reg_use (int regno) | |
2515 | { | |
2516 | haifa_note_reg_use (regno); | |
2517 | ||
2518 | if (IDATA_TYPE (deps_init_id_data.id) != PC) | |
2519 | SET_REGNO_REG_SET (IDATA_REG_USES (deps_init_id_data.id), regno); | |
2520 | } | |
2521 | ||
2522 | /* Start initializing rhs data. */ | |
2523 | static void | |
2524 | deps_init_id_start_rhs (rtx rhs) | |
2525 | { | |
2526 | gcc_assert (deps_init_id_data.where == DEPS_IN_INSN); | |
2527 | ||
2528 | /* And there was no sel_deps_reset_to_insn (). */ | |
2529 | if (IDATA_LHS (deps_init_id_data.id) != NULL) | |
2530 | { | |
2531 | IDATA_RHS (deps_init_id_data.id) = rhs; | |
2532 | deps_init_id_data.where = DEPS_IN_RHS; | |
2533 | } | |
2534 | } | |
2535 | ||
2536 | /* Finish initializing rhs data. */ | |
2537 | static void | |
2538 | deps_init_id_finish_rhs (void) | |
2539 | { | |
2540 | gcc_assert (deps_init_id_data.where == DEPS_IN_RHS | |
2541 | || deps_init_id_data.where == DEPS_IN_INSN); | |
2542 | deps_init_id_data.where = DEPS_IN_INSN; | |
2543 | } | |
2544 | ||
2545 | /* Finish initializing insn data. */ | |
2546 | static void | |
2547 | deps_init_id_finish_insn (void) | |
2548 | { | |
2549 | gcc_assert (deps_init_id_data.where == DEPS_IN_INSN); | |
2550 | ||
2551 | if (IDATA_TYPE (deps_init_id_data.id) == SET) | |
2552 | { | |
2553 | rtx lhs = IDATA_LHS (deps_init_id_data.id); | |
2554 | rtx rhs = IDATA_RHS (deps_init_id_data.id); | |
2555 | ||
2556 | if (lhs == NULL || rhs == NULL || !lhs_and_rhs_separable_p (lhs, rhs) | |
2557 | || deps_init_id_data.force_use_p) | |
2558 | { | |
48e1416a | 2559 | /* This should be a USE, as we don't want to schedule its RHS |
e1ab7874 | 2560 | separately. However, we still want to have them recorded |
48e1416a | 2561 | for the purposes of substitution. That's why we don't |
e1ab7874 | 2562 | simply call downgrade_to_use () here. */ |
2563 | gcc_assert (IDATA_TYPE (deps_init_id_data.id) == SET); | |
2564 | gcc_assert (!lhs == !rhs); | |
2565 | ||
2566 | IDATA_TYPE (deps_init_id_data.id) = USE; | |
2567 | } | |
2568 | } | |
2569 | ||
2570 | deps_init_id_data.where = DEPS_IN_NOWHERE; | |
2571 | } | |
2572 | ||
2573 | /* This is dependence info used for initializing insn's data. */ | |
2574 | static struct sched_deps_info_def deps_init_id_sched_deps_info; | |
2575 | ||
2576 | /* This initializes most of the static part of the above structure. */ | |
2577 | static const struct sched_deps_info_def const_deps_init_id_sched_deps_info = | |
2578 | { | |
2579 | NULL, | |
2580 | ||
2581 | deps_init_id_start_insn, | |
2582 | deps_init_id_finish_insn, | |
2583 | deps_init_id_start_lhs, | |
2584 | deps_init_id_finish_lhs, | |
2585 | deps_init_id_start_rhs, | |
2586 | deps_init_id_finish_rhs, | |
2587 | deps_init_id_note_reg_set, | |
2588 | deps_init_id_note_reg_clobber, | |
2589 | deps_init_id_note_reg_use, | |
2590 | NULL, /* note_mem_dep */ | |
2591 | NULL, /* note_dep */ | |
2592 | ||
2593 | 0, /* use_cselib */ | |
2594 | 0, /* use_deps_list */ | |
2595 | 0 /* generate_spec_deps */ | |
2596 | }; | |
2597 | ||
2598 | /* Initialize INSN's lhs and rhs in ID. When FORCE_UNIQUE_P is true, | |
2599 | we don't actually need information about lhs and rhs. */ | |
2600 | static void | |
2601 | setup_id_lhs_rhs (idata_t id, insn_t insn, bool force_unique_p) | |
2602 | { | |
2603 | rtx pat = PATTERN (insn); | |
48e1416a | 2604 | |
971ba038 | 2605 | if (NONJUMP_INSN_P (insn) |
48e1416a | 2606 | && GET_CODE (pat) == SET |
e1ab7874 | 2607 | && !force_unique_p) |
2608 | { | |
2609 | IDATA_RHS (id) = SET_SRC (pat); | |
2610 | IDATA_LHS (id) = SET_DEST (pat); | |
2611 | } | |
2612 | else | |
2613 | IDATA_LHS (id) = IDATA_RHS (id) = NULL; | |
2614 | } | |
2615 | ||
2616 | /* Possibly downgrade INSN to USE. */ | |
2617 | static void | |
2618 | maybe_downgrade_id_to_use (idata_t id, insn_t insn) | |
2619 | { | |
2620 | bool must_be_use = false; | |
be10bb5a | 2621 | df_ref def; |
e1ab7874 | 2622 | rtx lhs = IDATA_LHS (id); |
2623 | rtx rhs = IDATA_RHS (id); | |
48e1416a | 2624 | |
e1ab7874 | 2625 | /* We downgrade only SETs. */ |
2626 | if (IDATA_TYPE (id) != SET) | |
2627 | return; | |
2628 | ||
2629 | if (!lhs || !lhs_and_rhs_separable_p (lhs, rhs)) | |
2630 | { | |
2631 | IDATA_TYPE (id) = USE; | |
2632 | return; | |
2633 | } | |
48e1416a | 2634 | |
be10bb5a | 2635 | FOR_EACH_INSN_DEF (def, insn) |
e1ab7874 | 2636 | { |
e1ab7874 | 2637 | if (DF_REF_INSN (def) |
2638 | && DF_REF_FLAGS_IS_SET (def, DF_REF_PRE_POST_MODIFY) | |
2639 | && loc_mentioned_in_p (DF_REF_LOC (def), IDATA_RHS (id))) | |
2640 | { | |
2641 | must_be_use = true; | |
2642 | break; | |
2643 | } | |
2644 | ||
2645 | #ifdef STACK_REGS | |
48e1416a | 2646 | /* Make instructions that set stack registers to be ineligible for |
e1ab7874 | 2647 | renaming to avoid issues with find_used_regs. */ |
2648 | if (IN_RANGE (DF_REF_REGNO (def), FIRST_STACK_REG, LAST_STACK_REG)) | |
2649 | { | |
2650 | must_be_use = true; | |
2651 | break; | |
2652 | } | |
2653 | #endif | |
48e1416a | 2654 | } |
2655 | ||
e1ab7874 | 2656 | if (must_be_use) |
2657 | IDATA_TYPE (id) = USE; | |
2658 | } | |
2659 | ||
acf58115 | 2660 | /* Setup implicit register clobbers calculated by sched-deps for INSN |
2661 | before reload and save them in ID. */ | |
2662 | static void | |
2663 | setup_id_implicit_regs (idata_t id, insn_t insn) | |
2664 | { | |
2665 | if (reload_completed) | |
2666 | return; | |
2667 | ||
2668 | HARD_REG_SET temp; | |
2669 | unsigned regno; | |
2670 | hard_reg_set_iterator hrsi; | |
2671 | ||
2672 | get_implicit_reg_pending_clobbers (&temp, insn); | |
2673 | EXECUTE_IF_SET_IN_HARD_REG_SET (temp, 0, regno, hrsi) | |
2674 | SET_REGNO_REG_SET (IDATA_REG_SETS (id), regno); | |
2675 | } | |
2676 | ||
e1ab7874 | 2677 | /* Setup register sets describing INSN in ID. */ |
2678 | static void | |
2679 | setup_id_reg_sets (idata_t id, insn_t insn) | |
2680 | { | |
be10bb5a | 2681 | struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn); |
2682 | df_ref def, use; | |
e1ab7874 | 2683 | regset tmp = get_clear_regset_from_pool (); |
48e1416a | 2684 | |
be10bb5a | 2685 | FOR_EACH_INSN_INFO_DEF (def, insn_info) |
e1ab7874 | 2686 | { |
e1ab7874 | 2687 | unsigned int regno = DF_REF_REGNO (def); |
48e1416a | 2688 | |
e1ab7874 | 2689 | /* Post modifies are treated like clobbers by sched-deps.c. */ |
2690 | if (DF_REF_FLAGS_IS_SET (def, (DF_REF_MUST_CLOBBER | |
2691 | | DF_REF_PRE_POST_MODIFY))) | |
2692 | SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (id), regno); | |
2693 | else if (! DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER)) | |
2694 | { | |
2695 | SET_REGNO_REG_SET (IDATA_REG_SETS (id), regno); | |
2696 | ||
2697 | #ifdef STACK_REGS | |
48e1416a | 2698 | /* For stack registers, treat writes to them as writes |
e1ab7874 | 2699 | to the first one to be consistent with sched-deps.c. */ |
2700 | if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG)) | |
2701 | SET_REGNO_REG_SET (IDATA_REG_SETS (id), FIRST_STACK_REG); | |
2702 | #endif | |
2703 | } | |
2704 | /* Mark special refs that generate read/write def pair. */ | |
2705 | if (DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL) | |
2706 | || regno == STACK_POINTER_REGNUM) | |
2707 | bitmap_set_bit (tmp, regno); | |
2708 | } | |
48e1416a | 2709 | |
be10bb5a | 2710 | FOR_EACH_INSN_INFO_USE (use, insn_info) |
e1ab7874 | 2711 | { |
e1ab7874 | 2712 | unsigned int regno = DF_REF_REGNO (use); |
2713 | ||
2714 | /* When these refs are met for the first time, skip them, as | |
2715 | these uses are just counterparts of some defs. */ | |
2716 | if (bitmap_bit_p (tmp, regno)) | |
2717 | bitmap_clear_bit (tmp, regno); | |
2718 | else if (! DF_REF_FLAGS_IS_SET (use, DF_REF_CALL_STACK_USAGE)) | |
2719 | { | |
2720 | SET_REGNO_REG_SET (IDATA_REG_USES (id), regno); | |
2721 | ||
2722 | #ifdef STACK_REGS | |
48e1416a | 2723 | /* For stack registers, treat reads from them as reads from |
e1ab7874 | 2724 | the first one to be consistent with sched-deps.c. */ |
2725 | if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG)) | |
2726 | SET_REGNO_REG_SET (IDATA_REG_USES (id), FIRST_STACK_REG); | |
2727 | #endif | |
2728 | } | |
2729 | } | |
2730 | ||
acf58115 | 2731 | /* Also get implicit reg clobbers from sched-deps. */ |
2732 | setup_id_implicit_regs (id, insn); | |
2733 | ||
e1ab7874 | 2734 | return_regset_to_pool (tmp); |
2735 | } | |
2736 | ||
2737 | /* Initialize instruction data for INSN in ID using DF's data. */ | |
2738 | static void | |
2739 | init_id_from_df (idata_t id, insn_t insn, bool force_unique_p) | |
2740 | { | |
2741 | gcc_assert (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL); | |
2742 | ||
2743 | setup_id_for_insn (id, insn, force_unique_p); | |
2744 | setup_id_lhs_rhs (id, insn, force_unique_p); | |
2745 | ||
2746 | if (INSN_NOP_P (insn)) | |
2747 | return; | |
2748 | ||
2749 | maybe_downgrade_id_to_use (id, insn); | |
2750 | setup_id_reg_sets (id, insn); | |
2751 | } | |
2752 | ||
2753 | /* Initialize instruction data for INSN in ID. */ | |
2754 | static void | |
2755 | deps_init_id (idata_t id, insn_t insn, bool force_unique_p) | |
2756 | { | |
68e419a1 | 2757 | struct deps_desc _dc, *dc = &_dc; |
e1ab7874 | 2758 | |
2759 | deps_init_id_data.where = DEPS_IN_NOWHERE; | |
2760 | deps_init_id_data.id = id; | |
2761 | deps_init_id_data.force_unique_p = force_unique_p; | |
2762 | deps_init_id_data.force_use_p = false; | |
2763 | ||
d9ab2038 | 2764 | init_deps (dc, false); |
e1ab7874 | 2765 | memcpy (&deps_init_id_sched_deps_info, |
2766 | &const_deps_init_id_sched_deps_info, | |
2767 | sizeof (deps_init_id_sched_deps_info)); | |
e1ab7874 | 2768 | if (spec_info != NULL) |
2769 | deps_init_id_sched_deps_info.generate_spec_deps = 1; | |
e1ab7874 | 2770 | sched_deps_info = &deps_init_id_sched_deps_info; |
2771 | ||
2f3c9801 | 2772 | deps_analyze_insn (dc, insn); |
acf58115 | 2773 | /* Implicit reg clobbers received from sched-deps separately. */ |
2774 | setup_id_implicit_regs (id, insn); | |
e1ab7874 | 2775 | |
2776 | free_deps (dc); | |
e1ab7874 | 2777 | deps_init_id_data.id = NULL; |
2778 | } | |
2779 | ||
2780 | \f | |
52d7e28c | 2781 | struct sched_scan_info_def |
2782 | { | |
2783 | /* This hook notifies scheduler frontend to extend its internal per basic | |
2784 | block data structures. This hook should be called once before a series of | |
2785 | calls to bb_init (). */ | |
2786 | void (*extend_bb) (void); | |
2787 | ||
2788 | /* This hook makes scheduler frontend to initialize its internal data | |
2789 | structures for the passed basic block. */ | |
2790 | void (*init_bb) (basic_block); | |
2791 | ||
2792 | /* This hook notifies scheduler frontend to extend its internal per insn data | |
2793 | structures. This hook should be called once before a series of calls to | |
2794 | insn_init (). */ | |
2795 | void (*extend_insn) (void); | |
2796 | ||
2797 | /* This hook makes scheduler frontend to initialize its internal data | |
2798 | structures for the passed insn. */ | |
2f3c9801 | 2799 | void (*init_insn) (insn_t); |
52d7e28c | 2800 | }; |
2801 | ||
2802 | /* A driver function to add a set of basic blocks (BBS) to the | |
2803 | scheduling region. */ | |
2804 | static void | |
2805 | sched_scan (const struct sched_scan_info_def *ssi, bb_vec_t bbs) | |
2806 | { | |
2807 | unsigned i; | |
2808 | basic_block bb; | |
2809 | ||
2810 | if (ssi->extend_bb) | |
2811 | ssi->extend_bb (); | |
2812 | ||
2813 | if (ssi->init_bb) | |
f1f41a6c | 2814 | FOR_EACH_VEC_ELT (bbs, i, bb) |
52d7e28c | 2815 | ssi->init_bb (bb); |
2816 | ||
2817 | if (ssi->extend_insn) | |
2818 | ssi->extend_insn (); | |
2819 | ||
2820 | if (ssi->init_insn) | |
f1f41a6c | 2821 | FOR_EACH_VEC_ELT (bbs, i, bb) |
52d7e28c | 2822 | { |
2f3c9801 | 2823 | rtx_insn *insn; |
52d7e28c | 2824 | |
2825 | FOR_BB_INSNS (bb, insn) | |
2826 | ssi->init_insn (insn); | |
2827 | } | |
2828 | } | |
e1ab7874 | 2829 | |
2830 | /* Implement hooks for collecting fundamental insn properties like if insn is | |
2831 | an ASM or is within a SCHED_GROUP. */ | |
2832 | ||
2833 | /* True when a "one-time init" data for INSN was already inited. */ | |
2834 | static bool | |
2835 | first_time_insn_init (insn_t insn) | |
2836 | { | |
2837 | return INSN_LIVE (insn) == NULL; | |
2838 | } | |
2839 | ||
2840 | /* Hash an entry in a transformed_insns hashtable. */ | |
2841 | static hashval_t | |
2842 | hash_transformed_insns (const void *p) | |
2843 | { | |
2844 | return VINSN_HASH_RTX (((const struct transformed_insns *) p)->vinsn_old); | |
2845 | } | |
2846 | ||
2847 | /* Compare the entries in a transformed_insns hashtable. */ | |
2848 | static int | |
2849 | eq_transformed_insns (const void *p, const void *q) | |
2850 | { | |
04d073df | 2851 | rtx_insn *i1 = |
2852 | VINSN_INSN_RTX (((const struct transformed_insns *) p)->vinsn_old); | |
2853 | rtx_insn *i2 = | |
2854 | VINSN_INSN_RTX (((const struct transformed_insns *) q)->vinsn_old); | |
e1ab7874 | 2855 | |
2856 | if (INSN_UID (i1) == INSN_UID (i2)) | |
2857 | return 1; | |
2858 | return rtx_equal_p (PATTERN (i1), PATTERN (i2)); | |
2859 | } | |
2860 | ||
2861 | /* Free an entry in a transformed_insns hashtable. */ | |
2862 | static void | |
2863 | free_transformed_insns (void *p) | |
2864 | { | |
2865 | struct transformed_insns *pti = (struct transformed_insns *) p; | |
2866 | ||
2867 | vinsn_detach (pti->vinsn_old); | |
2868 | vinsn_detach (pti->vinsn_new); | |
2869 | free (pti); | |
2870 | } | |
2871 | ||
48e1416a | 2872 | /* Init the s_i_d data for INSN which should be inited just once, when |
e1ab7874 | 2873 | we first see the insn. */ |
2874 | static void | |
2875 | init_first_time_insn_data (insn_t insn) | |
2876 | { | |
2877 | /* This should not be set if this is the first time we init data for | |
2878 | insn. */ | |
2879 | gcc_assert (first_time_insn_init (insn)); | |
48e1416a | 2880 | |
e1ab7874 | 2881 | /* These are needed for nops too. */ |
2882 | INSN_LIVE (insn) = get_regset_from_pool (); | |
2883 | INSN_LIVE_VALID_P (insn) = false; | |
d9ab2038 | 2884 | |
e1ab7874 | 2885 | if (!INSN_NOP_P (insn)) |
2886 | { | |
2887 | INSN_ANALYZED_DEPS (insn) = BITMAP_ALLOC (NULL); | |
2888 | INSN_FOUND_DEPS (insn) = BITMAP_ALLOC (NULL); | |
48e1416a | 2889 | INSN_TRANSFORMED_INSNS (insn) |
e1ab7874 | 2890 | = htab_create (16, hash_transformed_insns, |
2891 | eq_transformed_insns, free_transformed_insns); | |
d9ab2038 | 2892 | init_deps (&INSN_DEPS_CONTEXT (insn), true); |
e1ab7874 | 2893 | } |
2894 | } | |
2895 | ||
48e1416a | 2896 | /* Free almost all above data for INSN that is scheduled already. |
d9ab2038 | 2897 | Used for extra-large basic blocks. */ |
2898 | void | |
2899 | free_data_for_scheduled_insn (insn_t insn) | |
e1ab7874 | 2900 | { |
2901 | gcc_assert (! first_time_insn_init (insn)); | |
48e1416a | 2902 | |
d9ab2038 | 2903 | if (! INSN_ANALYZED_DEPS (insn)) |
2904 | return; | |
48e1416a | 2905 | |
e1ab7874 | 2906 | BITMAP_FREE (INSN_ANALYZED_DEPS (insn)); |
2907 | BITMAP_FREE (INSN_FOUND_DEPS (insn)); | |
2908 | htab_delete (INSN_TRANSFORMED_INSNS (insn)); | |
48e1416a | 2909 | |
e1ab7874 | 2910 | /* This is allocated only for bookkeeping insns. */ |
2911 | if (INSN_ORIGINATORS (insn)) | |
2912 | BITMAP_FREE (INSN_ORIGINATORS (insn)); | |
2913 | free_deps (&INSN_DEPS_CONTEXT (insn)); | |
d9ab2038 | 2914 | |
2915 | INSN_ANALYZED_DEPS (insn) = NULL; | |
2916 | ||
48e1416a | 2917 | /* Clear the readonly flag so we would ICE when trying to recalculate |
d9ab2038 | 2918 | the deps context (as we believe that it should not happen). */ |
2919 | (&INSN_DEPS_CONTEXT (insn))->readonly = 0; | |
2920 | } | |
2921 | ||
2922 | /* Free the same data as above for INSN. */ | |
2923 | static void | |
2924 | free_first_time_insn_data (insn_t insn) | |
2925 | { | |
2926 | gcc_assert (! first_time_insn_init (insn)); | |
2927 | ||
2928 | free_data_for_scheduled_insn (insn); | |
2929 | return_regset_to_pool (INSN_LIVE (insn)); | |
2930 | INSN_LIVE (insn) = NULL; | |
2931 | INSN_LIVE_VALID_P (insn) = false; | |
e1ab7874 | 2932 | } |
2933 | ||
2934 | /* Initialize region-scope data structures for basic blocks. */ | |
2935 | static void | |
2936 | init_global_and_expr_for_bb (basic_block bb) | |
2937 | { | |
2938 | if (sel_bb_empty_p (bb)) | |
2939 | return; | |
2940 | ||
2941 | invalidate_av_set (bb); | |
2942 | } | |
2943 | ||
2944 | /* Data for global dependency analysis (to initialize CANT_MOVE and | |
2945 | SCHED_GROUP_P). */ | |
2946 | static struct | |
2947 | { | |
2948 | /* Previous insn. */ | |
2949 | insn_t prev_insn; | |
2950 | } init_global_data; | |
2951 | ||
2952 | /* Determine if INSN is in the sched_group, is an asm or should not be | |
2953 | cloned. After that initialize its expr. */ | |
2954 | static void | |
2955 | init_global_and_expr_for_insn (insn_t insn) | |
2956 | { | |
2957 | if (LABEL_P (insn)) | |
2958 | return; | |
2959 | ||
2960 | if (NOTE_INSN_BASIC_BLOCK_P (insn)) | |
2961 | { | |
2f3c9801 | 2962 | init_global_data.prev_insn = NULL; |
e1ab7874 | 2963 | return; |
2964 | } | |
2965 | ||
2966 | gcc_assert (INSN_P (insn)); | |
2967 | ||
2968 | if (SCHED_GROUP_P (insn)) | |
2969 | /* Setup a sched_group. */ | |
2970 | { | |
2971 | insn_t prev_insn = init_global_data.prev_insn; | |
2972 | ||
2973 | if (prev_insn) | |
2974 | INSN_SCHED_NEXT (prev_insn) = insn; | |
2975 | ||
2976 | init_global_data.prev_insn = insn; | |
2977 | } | |
2978 | else | |
2f3c9801 | 2979 | init_global_data.prev_insn = NULL; |
e1ab7874 | 2980 | |
2981 | if (GET_CODE (PATTERN (insn)) == ASM_INPUT | |
2982 | || asm_noperands (PATTERN (insn)) >= 0) | |
2983 | /* Mark INSN as an asm. */ | |
2984 | INSN_ASM_P (insn) = true; | |
2985 | ||
2986 | { | |
2987 | bool force_unique_p; | |
2988 | ds_t spec_done_ds; | |
2989 | ||
982b0787 | 2990 | /* Certain instructions cannot be cloned, and frame related insns and |
2991 | the insn adjacent to NOTE_INSN_EPILOGUE_BEG cannot be moved out of | |
2992 | their block. */ | |
2993 | if (prologue_epilogue_contains (insn)) | |
2994 | { | |
2995 | if (RTX_FRAME_RELATED_P (insn)) | |
2996 | CANT_MOVE (insn) = 1; | |
2997 | else | |
2998 | { | |
2999 | rtx note; | |
3000 | for (note = REG_NOTES (insn); note; note = XEXP (note, 1)) | |
3001 | if (REG_NOTE_KIND (note) == REG_SAVE_NOTE | |
3002 | && ((enum insn_note) INTVAL (XEXP (note, 0)) | |
3003 | == NOTE_INSN_EPILOGUE_BEG)) | |
3004 | { | |
3005 | CANT_MOVE (insn) = 1; | |
3006 | break; | |
3007 | } | |
3008 | } | |
3009 | force_unique_p = true; | |
3010 | } | |
e1ab7874 | 3011 | else |
982b0787 | 3012 | if (CANT_MOVE (insn) |
3013 | || INSN_ASM_P (insn) | |
3014 | || SCHED_GROUP_P (insn) | |
a8d6ade3 | 3015 | || CALL_P (insn) |
982b0787 | 3016 | /* Exception handling insns are always unique. */ |
3017 | || (cfun->can_throw_non_call_exceptions && can_throw_internal (insn)) | |
3018 | /* TRAP_IF though have an INSN code is control_flow_insn_p (). */ | |
13434dcb | 3019 | || control_flow_insn_p (insn) |
3020 | || volatile_insn_p (PATTERN (insn)) | |
3021 | || (targetm.cannot_copy_insn_p | |
3022 | && targetm.cannot_copy_insn_p (insn))) | |
982b0787 | 3023 | force_unique_p = true; |
3024 | else | |
3025 | force_unique_p = false; | |
e1ab7874 | 3026 | |
3027 | if (targetm.sched.get_insn_spec_ds) | |
3028 | { | |
3029 | spec_done_ds = targetm.sched.get_insn_spec_ds (insn); | |
3030 | spec_done_ds = ds_get_max_dep_weak (spec_done_ds); | |
3031 | } | |
3032 | else | |
3033 | spec_done_ds = 0; | |
3034 | ||
3035 | /* Initialize INSN's expr. */ | |
3036 | init_expr (INSN_EXPR (insn), vinsn_create (insn, force_unique_p), 0, | |
3037 | REG_BR_PROB_BASE, INSN_PRIORITY (insn), 0, BLOCK_NUM (insn), | |
1e094109 | 3038 | spec_done_ds, 0, 0, vNULL, true, |
f1f41a6c | 3039 | false, false, false, CANT_MOVE (insn)); |
e1ab7874 | 3040 | } |
3041 | ||
3042 | init_first_time_insn_data (insn); | |
3043 | } | |
3044 | ||
3045 | /* Scan the region and initialize instruction data for basic blocks BBS. */ | |
3046 | void | |
3047 | sel_init_global_and_expr (bb_vec_t bbs) | |
3048 | { | |
3049 | /* ??? It would be nice to implement push / pop scheme for sched_infos. */ | |
3050 | const struct sched_scan_info_def ssi = | |
3051 | { | |
3052 | NULL, /* extend_bb */ | |
3053 | init_global_and_expr_for_bb, /* init_bb */ | |
3054 | extend_insn_data, /* extend_insn */ | |
3055 | init_global_and_expr_for_insn /* init_insn */ | |
3056 | }; | |
48e1416a | 3057 | |
52d7e28c | 3058 | sched_scan (&ssi, bbs); |
e1ab7874 | 3059 | } |
3060 | ||
3061 | /* Finalize region-scope data structures for basic blocks. */ | |
3062 | static void | |
3063 | finish_global_and_expr_for_bb (basic_block bb) | |
3064 | { | |
3065 | av_set_clear (&BB_AV_SET (bb)); | |
3066 | BB_AV_LEVEL (bb) = 0; | |
3067 | } | |
3068 | ||
3069 | /* Finalize INSN's data. */ | |
3070 | static void | |
3071 | finish_global_and_expr_insn (insn_t insn) | |
3072 | { | |
3073 | if (LABEL_P (insn) || NOTE_INSN_BASIC_BLOCK_P (insn)) | |
3074 | return; | |
3075 | ||
3076 | gcc_assert (INSN_P (insn)); | |
3077 | ||
3078 | if (INSN_LUID (insn) > 0) | |
3079 | { | |
3080 | free_first_time_insn_data (insn); | |
3081 | INSN_WS_LEVEL (insn) = 0; | |
3082 | CANT_MOVE (insn) = 0; | |
48e1416a | 3083 | |
3084 | /* We can no longer assert this, as vinsns of this insn could be | |
3085 | easily live in other insn's caches. This should be changed to | |
e1ab7874 | 3086 | a counter-like approach among all vinsns. */ |
3087 | gcc_assert (true || VINSN_COUNT (INSN_VINSN (insn)) == 1); | |
3088 | clear_expr (INSN_EXPR (insn)); | |
3089 | } | |
3090 | } | |
3091 | ||
3092 | /* Finalize per instruction data for the whole region. */ | |
3093 | void | |
3094 | sel_finish_global_and_expr (void) | |
3095 | { | |
3096 | { | |
3097 | bb_vec_t bbs; | |
3098 | int i; | |
3099 | ||
f1f41a6c | 3100 | bbs.create (current_nr_blocks); |
e1ab7874 | 3101 | |
3102 | for (i = 0; i < current_nr_blocks; i++) | |
f5a6b05f | 3103 | bbs.quick_push (BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i))); |
e1ab7874 | 3104 | |
3105 | /* Clear AV_SETs and INSN_EXPRs. */ | |
3106 | { | |
3107 | const struct sched_scan_info_def ssi = | |
3108 | { | |
3109 | NULL, /* extend_bb */ | |
3110 | finish_global_and_expr_for_bb, /* init_bb */ | |
3111 | NULL, /* extend_insn */ | |
3112 | finish_global_and_expr_insn /* init_insn */ | |
3113 | }; | |
3114 | ||
52d7e28c | 3115 | sched_scan (&ssi, bbs); |
e1ab7874 | 3116 | } |
3117 | ||
f1f41a6c | 3118 | bbs.release (); |
e1ab7874 | 3119 | } |
3120 | ||
3121 | finish_insns (); | |
3122 | } | |
3123 | \f | |
3124 | ||
48e1416a | 3125 | /* In the below hooks, we merely calculate whether or not a dependence |
3126 | exists, and in what part of insn. However, we will need more data | |
e1ab7874 | 3127 | when we'll start caching dependence requests. */ |
3128 | ||
3129 | /* Container to hold information for dependency analysis. */ | |
3130 | static struct | |
3131 | { | |
3132 | deps_t dc; | |
3133 | ||
3134 | /* A variable to track which part of rtx we are scanning in | |
3135 | sched-deps.c: sched_analyze_insn (). */ | |
3136 | deps_where_t where; | |
3137 | ||
3138 | /* Current producer. */ | |
3139 | insn_t pro; | |
3140 | ||
3141 | /* Current consumer. */ | |
3142 | vinsn_t con; | |
3143 | ||
3144 | /* Is SEL_DEPS_HAS_DEP_P[DEPS_IN_X] is true, then X has a dependence. | |
3145 | X is from { INSN, LHS, RHS }. */ | |
3146 | ds_t has_dep_p[DEPS_IN_NOWHERE]; | |
3147 | } has_dependence_data; | |
3148 | ||
3149 | /* Start analyzing dependencies of INSN. */ | |
3150 | static void | |
3151 | has_dependence_start_insn (insn_t insn ATTRIBUTE_UNUSED) | |
3152 | { | |
3153 | gcc_assert (has_dependence_data.where == DEPS_IN_NOWHERE); | |
3154 | ||
3155 | has_dependence_data.where = DEPS_IN_INSN; | |
3156 | } | |
3157 | ||
3158 | /* Finish analyzing dependencies of an insn. */ | |
3159 | static void | |
3160 | has_dependence_finish_insn (void) | |
3161 | { | |
3162 | gcc_assert (has_dependence_data.where == DEPS_IN_INSN); | |
3163 | ||
3164 | has_dependence_data.where = DEPS_IN_NOWHERE; | |
3165 | } | |
3166 | ||
3167 | /* Start analyzing dependencies of LHS. */ | |
3168 | static void | |
3169 | has_dependence_start_lhs (rtx lhs ATTRIBUTE_UNUSED) | |
3170 | { | |
3171 | gcc_assert (has_dependence_data.where == DEPS_IN_INSN); | |
3172 | ||
3173 | if (VINSN_LHS (has_dependence_data.con) != NULL) | |
3174 | has_dependence_data.where = DEPS_IN_LHS; | |
3175 | } | |
3176 | ||
3177 | /* Finish analyzing dependencies of an lhs. */ | |
3178 | static void | |
3179 | has_dependence_finish_lhs (void) | |
3180 | { | |
3181 | has_dependence_data.where = DEPS_IN_INSN; | |
3182 | } | |
3183 | ||
3184 | /* Start analyzing dependencies of RHS. */ | |
3185 | static void | |
3186 | has_dependence_start_rhs (rtx rhs ATTRIBUTE_UNUSED) | |
3187 | { | |
3188 | gcc_assert (has_dependence_data.where == DEPS_IN_INSN); | |
3189 | ||
3190 | if (VINSN_RHS (has_dependence_data.con) != NULL) | |
3191 | has_dependence_data.where = DEPS_IN_RHS; | |
3192 | } | |
3193 | ||
3194 | /* Start analyzing dependencies of an rhs. */ | |
3195 | static void | |
3196 | has_dependence_finish_rhs (void) | |
3197 | { | |
3198 | gcc_assert (has_dependence_data.where == DEPS_IN_RHS | |
3199 | || has_dependence_data.where == DEPS_IN_INSN); | |
3200 | ||
3201 | has_dependence_data.where = DEPS_IN_INSN; | |
3202 | } | |
3203 | ||
3204 | /* Note a set of REGNO. */ | |
3205 | static void | |
3206 | has_dependence_note_reg_set (int regno) | |
3207 | { | |
3208 | struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno]; | |
3209 | ||
3210 | if (!sched_insns_conditions_mutex_p (has_dependence_data.pro, | |
3211 | VINSN_INSN_RTX | |
3212 | (has_dependence_data.con))) | |
3213 | { | |
3214 | ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where]; | |
3215 | ||
3216 | if (reg_last->sets != NULL | |
3217 | || reg_last->clobbers != NULL) | |
3218 | *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT; | |
3219 | ||
a9bfd373 | 3220 | if (reg_last->uses || reg_last->implicit_sets) |
e1ab7874 | 3221 | *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI; |
3222 | } | |
3223 | } | |
3224 | ||
3225 | /* Note a clobber of REGNO. */ | |
3226 | static void | |
3227 | has_dependence_note_reg_clobber (int regno) | |
3228 | { | |
3229 | struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno]; | |
3230 | ||
3231 | if (!sched_insns_conditions_mutex_p (has_dependence_data.pro, | |
3232 | VINSN_INSN_RTX | |
3233 | (has_dependence_data.con))) | |
3234 | { | |
3235 | ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where]; | |
3236 | ||
3237 | if (reg_last->sets) | |
3238 | *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT; | |
48e1416a | 3239 | |
a9bfd373 | 3240 | if (reg_last->uses || reg_last->implicit_sets) |
e1ab7874 | 3241 | *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI; |
3242 | } | |
3243 | } | |
3244 | ||
3245 | /* Note a use of REGNO. */ | |
3246 | static void | |
3247 | has_dependence_note_reg_use (int regno) | |
3248 | { | |
3249 | struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno]; | |
3250 | ||
3251 | if (!sched_insns_conditions_mutex_p (has_dependence_data.pro, | |
3252 | VINSN_INSN_RTX | |
3253 | (has_dependence_data.con))) | |
3254 | { | |
3255 | ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where]; | |
3256 | ||
3257 | if (reg_last->sets) | |
3258 | *dsp = (*dsp & ~SPECULATIVE) | DEP_TRUE; | |
3259 | ||
a9bfd373 | 3260 | if (reg_last->clobbers || reg_last->implicit_sets) |
e1ab7874 | 3261 | *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI; |
3262 | ||
b0691607 | 3263 | /* Merge BE_IN_SPEC bits into *DSP when the dependency producer |
3264 | is actually a check insn. We need to do this for any register | |
3265 | read-read dependency with the check unless we track properly | |
3266 | all registers written by BE_IN_SPEC-speculated insns, as | |
3267 | we don't have explicit dependence lists. See PR 53975. */ | |
e1ab7874 | 3268 | if (reg_last->uses) |
3269 | { | |
3270 | ds_t pro_spec_checked_ds; | |
3271 | ||
3272 | pro_spec_checked_ds = INSN_SPEC_CHECKED_DS (has_dependence_data.pro); | |
3273 | pro_spec_checked_ds = ds_get_max_dep_weak (pro_spec_checked_ds); | |
3274 | ||
b0691607 | 3275 | if (pro_spec_checked_ds != 0) |
e1ab7874 | 3276 | *dsp = ds_full_merge (*dsp, pro_spec_checked_ds, |
3277 | NULL_RTX, NULL_RTX); | |
3278 | } | |
3279 | } | |
3280 | } | |
3281 | ||
3282 | /* Note a memory dependence. */ | |
3283 | static void | |
3284 | has_dependence_note_mem_dep (rtx mem ATTRIBUTE_UNUSED, | |
3285 | rtx pending_mem ATTRIBUTE_UNUSED, | |
3286 | insn_t pending_insn ATTRIBUTE_UNUSED, | |
3287 | ds_t ds ATTRIBUTE_UNUSED) | |
3288 | { | |
3289 | if (!sched_insns_conditions_mutex_p (has_dependence_data.pro, | |
3290 | VINSN_INSN_RTX (has_dependence_data.con))) | |
3291 | { | |
3292 | ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where]; | |
3293 | ||
3294 | *dsp = ds_full_merge (ds, *dsp, pending_mem, mem); | |
3295 | } | |
3296 | } | |
3297 | ||
3298 | /* Note a dependence. */ | |
3299 | static void | |
8ffee455 | 3300 | has_dependence_note_dep (insn_t pro, ds_t ds ATTRIBUTE_UNUSED) |
3301 | { | |
3302 | insn_t real_pro = has_dependence_data.pro; | |
3303 | insn_t real_con = VINSN_INSN_RTX (has_dependence_data.con); | |
3304 | ||
3305 | /* We do not allow for debug insns to move through others unless they | |
3306 | are at the start of bb. This movement may create bookkeeping copies | |
3307 | that later would not be able to move up, violating the invariant | |
3308 | that a bookkeeping copy should be movable as the original insn. | |
3309 | Detect that here and allow that movement if we allowed it before | |
3310 | in the first place. */ | |
0aa56820 | 3311 | if (DEBUG_INSN_P (real_con) && !DEBUG_INSN_P (real_pro) |
8ffee455 | 3312 | && INSN_UID (NEXT_INSN (pro)) == INSN_UID (real_con)) |
3313 | return; | |
3314 | ||
3315 | if (!sched_insns_conditions_mutex_p (real_pro, real_con)) | |
e1ab7874 | 3316 | { |
3317 | ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where]; | |
3318 | ||
3319 | *dsp = ds_full_merge (ds, *dsp, NULL_RTX, NULL_RTX); | |
3320 | } | |
3321 | } | |
3322 | ||
3323 | /* Mark the insn as having a hard dependence that prevents speculation. */ | |
3324 | void | |
3325 | sel_mark_hard_insn (rtx insn) | |
3326 | { | |
3327 | int i; | |
3328 | ||
3329 | /* Only work when we're in has_dependence_p mode. | |
3330 | ??? This is a hack, this should actually be a hook. */ | |
3331 | if (!has_dependence_data.dc || !has_dependence_data.pro) | |
3332 | return; | |
3333 | ||
3334 | gcc_assert (insn == VINSN_INSN_RTX (has_dependence_data.con)); | |
3335 | gcc_assert (has_dependence_data.where == DEPS_IN_INSN); | |
3336 | ||
3337 | for (i = 0; i < DEPS_IN_NOWHERE; i++) | |
3338 | has_dependence_data.has_dep_p[i] &= ~SPECULATIVE; | |
3339 | } | |
3340 | ||
3341 | /* This structure holds the hooks for the dependency analysis used when | |
3342 | actually processing dependencies in the scheduler. */ | |
3343 | static struct sched_deps_info_def has_dependence_sched_deps_info; | |
3344 | ||
3345 | /* This initializes most of the fields of the above structure. */ | |
3346 | static const struct sched_deps_info_def const_has_dependence_sched_deps_info = | |
3347 | { | |
3348 | NULL, | |
3349 | ||
3350 | has_dependence_start_insn, | |
3351 | has_dependence_finish_insn, | |
3352 | has_dependence_start_lhs, | |
3353 | has_dependence_finish_lhs, | |
3354 | has_dependence_start_rhs, | |
3355 | has_dependence_finish_rhs, | |
3356 | has_dependence_note_reg_set, | |
3357 | has_dependence_note_reg_clobber, | |
3358 | has_dependence_note_reg_use, | |
3359 | has_dependence_note_mem_dep, | |
3360 | has_dependence_note_dep, | |
3361 | ||
3362 | 0, /* use_cselib */ | |
3363 | 0, /* use_deps_list */ | |
3364 | 0 /* generate_spec_deps */ | |
3365 | }; | |
3366 | ||
3367 | /* Initialize has_dependence_sched_deps_info with extra spec field. */ | |
3368 | static void | |
3369 | setup_has_dependence_sched_deps_info (void) | |
3370 | { | |
3371 | memcpy (&has_dependence_sched_deps_info, | |
3372 | &const_has_dependence_sched_deps_info, | |
3373 | sizeof (has_dependence_sched_deps_info)); | |
3374 | ||
3375 | if (spec_info != NULL) | |
3376 | has_dependence_sched_deps_info.generate_spec_deps = 1; | |
3377 | ||
3378 | sched_deps_info = &has_dependence_sched_deps_info; | |
3379 | } | |
3380 | ||
3381 | /* Remove all dependences found and recorded in has_dependence_data array. */ | |
3382 | void | |
3383 | sel_clear_has_dependence (void) | |
3384 | { | |
3385 | int i; | |
3386 | ||
3387 | for (i = 0; i < DEPS_IN_NOWHERE; i++) | |
3388 | has_dependence_data.has_dep_p[i] = 0; | |
3389 | } | |
3390 | ||
3391 | /* Return nonzero if EXPR has is dependent upon PRED. Return the pointer | |
3392 | to the dependence information array in HAS_DEP_PP. */ | |
3393 | ds_t | |
3394 | has_dependence_p (expr_t expr, insn_t pred, ds_t **has_dep_pp) | |
3395 | { | |
3396 | int i; | |
3397 | ds_t ds; | |
68e419a1 | 3398 | struct deps_desc *dc; |
e1ab7874 | 3399 | |
3400 | if (INSN_SIMPLEJUMP_P (pred)) | |
3401 | /* Unconditional jump is just a transfer of control flow. | |
3402 | Ignore it. */ | |
3403 | return false; | |
3404 | ||
3405 | dc = &INSN_DEPS_CONTEXT (pred); | |
d9ab2038 | 3406 | |
3407 | /* We init this field lazily. */ | |
3408 | if (dc->reg_last == NULL) | |
3409 | init_deps_reg_last (dc); | |
48e1416a | 3410 | |
e1ab7874 | 3411 | if (!dc->readonly) |
3412 | { | |
3413 | has_dependence_data.pro = NULL; | |
3414 | /* Initialize empty dep context with information about PRED. */ | |
3415 | advance_deps_context (dc, pred); | |
3416 | dc->readonly = 1; | |
3417 | } | |
3418 | ||
3419 | has_dependence_data.where = DEPS_IN_NOWHERE; | |
3420 | has_dependence_data.pro = pred; | |
3421 | has_dependence_data.con = EXPR_VINSN (expr); | |
3422 | has_dependence_data.dc = dc; | |
3423 | ||
3424 | sel_clear_has_dependence (); | |
3425 | ||
3426 | /* Now catch all dependencies that would be generated between PRED and | |
3427 | INSN. */ | |
3428 | setup_has_dependence_sched_deps_info (); | |
3429 | deps_analyze_insn (dc, EXPR_INSN_RTX (expr)); | |
3430 | has_dependence_data.dc = NULL; | |
3431 | ||
3432 | /* When a barrier was found, set DEPS_IN_INSN bits. */ | |
3433 | if (dc->last_reg_pending_barrier == TRUE_BARRIER) | |
3434 | has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_TRUE; | |
3435 | else if (dc->last_reg_pending_barrier == MOVE_BARRIER) | |
3436 | has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI; | |
3437 | ||
3438 | /* Do not allow stores to memory to move through checks. Currently | |
3439 | we don't move this to sched-deps.c as the check doesn't have | |
48e1416a | 3440 | obvious places to which this dependence can be attached. |
e1ab7874 | 3441 | FIMXE: this should go to a hook. */ |
3442 | if (EXPR_LHS (expr) | |
3443 | && MEM_P (EXPR_LHS (expr)) | |
3444 | && sel_insn_is_speculation_check (pred)) | |
3445 | has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI; | |
48e1416a | 3446 | |
e1ab7874 | 3447 | *has_dep_pp = has_dependence_data.has_dep_p; |
3448 | ds = 0; | |
3449 | for (i = 0; i < DEPS_IN_NOWHERE; i++) | |
3450 | ds = ds_full_merge (ds, has_dependence_data.has_dep_p[i], | |
3451 | NULL_RTX, NULL_RTX); | |
3452 | ||
3453 | return ds; | |
3454 | } | |
3455 | \f | |
3456 | ||
48e1416a | 3457 | /* Dependence hooks implementation that checks dependence latency constraints |
3458 | on the insns being scheduled. The entry point for these routines is | |
3459 | tick_check_p predicate. */ | |
e1ab7874 | 3460 | |
3461 | static struct | |
3462 | { | |
3463 | /* An expr we are currently checking. */ | |
3464 | expr_t expr; | |
3465 | ||
3466 | /* A minimal cycle for its scheduling. */ | |
3467 | int cycle; | |
3468 | ||
3469 | /* Whether we have seen a true dependence while checking. */ | |
3470 | bool seen_true_dep_p; | |
3471 | } tick_check_data; | |
3472 | ||
3473 | /* Update minimal scheduling cycle for tick_check_insn given that it depends | |
3474 | on PRO with status DS and weight DW. */ | |
3475 | static void | |
3476 | tick_check_dep_with_dw (insn_t pro_insn, ds_t ds, dw_t dw) | |
3477 | { | |
3478 | expr_t con_expr = tick_check_data.expr; | |
3479 | insn_t con_insn = EXPR_INSN_RTX (con_expr); | |
3480 | ||
3481 | if (con_insn != pro_insn) | |
3482 | { | |
3483 | enum reg_note dt; | |
3484 | int tick; | |
3485 | ||
3486 | if (/* PROducer was removed from above due to pipelining. */ | |
3487 | !INSN_IN_STREAM_P (pro_insn) | |
3488 | /* Or PROducer was originally on the next iteration regarding the | |
3489 | CONsumer. */ | |
3490 | || (INSN_SCHED_TIMES (pro_insn) | |
3491 | - EXPR_SCHED_TIMES (con_expr)) > 1) | |
3492 | /* Don't count this dependence. */ | |
3493 | return; | |
3494 | ||
3495 | dt = ds_to_dt (ds); | |
3496 | if (dt == REG_DEP_TRUE) | |
3497 | tick_check_data.seen_true_dep_p = true; | |
3498 | ||
3499 | gcc_assert (INSN_SCHED_CYCLE (pro_insn) > 0); | |
3500 | ||
3501 | { | |
3502 | dep_def _dep, *dep = &_dep; | |
3503 | ||
3504 | init_dep (dep, pro_insn, con_insn, dt); | |
3505 | ||
3506 | tick = INSN_SCHED_CYCLE (pro_insn) + dep_cost_1 (dep, dw); | |
3507 | } | |
3508 | ||
3509 | /* When there are several kinds of dependencies between pro and con, | |
3510 | only REG_DEP_TRUE should be taken into account. */ | |
3511 | if (tick > tick_check_data.cycle | |
3512 | && (dt == REG_DEP_TRUE || !tick_check_data.seen_true_dep_p)) | |
3513 | tick_check_data.cycle = tick; | |
3514 | } | |
3515 | } | |
3516 | ||
3517 | /* An implementation of note_dep hook. */ | |
3518 | static void | |
3519 | tick_check_note_dep (insn_t pro, ds_t ds) | |
3520 | { | |
3521 | tick_check_dep_with_dw (pro, ds, 0); | |
3522 | } | |
3523 | ||
3524 | /* An implementation of note_mem_dep hook. */ | |
3525 | static void | |
3526 | tick_check_note_mem_dep (rtx mem1, rtx mem2, insn_t pro, ds_t ds) | |
3527 | { | |
3528 | dw_t dw; | |
3529 | ||
3530 | dw = (ds_to_dt (ds) == REG_DEP_TRUE | |
3531 | ? estimate_dep_weak (mem1, mem2) | |
3532 | : 0); | |
3533 | ||
3534 | tick_check_dep_with_dw (pro, ds, dw); | |
3535 | } | |
3536 | ||
3537 | /* This structure contains hooks for dependence analysis used when determining | |
3538 | whether an insn is ready for scheduling. */ | |
3539 | static struct sched_deps_info_def tick_check_sched_deps_info = | |
3540 | { | |
3541 | NULL, | |
3542 | ||
3543 | NULL, | |
3544 | NULL, | |
3545 | NULL, | |
3546 | NULL, | |
3547 | NULL, | |
3548 | NULL, | |
3549 | haifa_note_reg_set, | |
3550 | haifa_note_reg_clobber, | |
3551 | haifa_note_reg_use, | |
3552 | tick_check_note_mem_dep, | |
3553 | tick_check_note_dep, | |
3554 | ||
3555 | 0, 0, 0 | |
3556 | }; | |
3557 | ||
3558 | /* Estimate number of cycles from the current cycle of FENCE until EXPR can be | |
3559 | scheduled. Return 0 if all data from producers in DC is ready. */ | |
3560 | int | |
3561 | tick_check_p (expr_t expr, deps_t dc, fence_t fence) | |
3562 | { | |
3563 | int cycles_left; | |
3564 | /* Initialize variables. */ | |
3565 | tick_check_data.expr = expr; | |
3566 | tick_check_data.cycle = 0; | |
3567 | tick_check_data.seen_true_dep_p = false; | |
3568 | sched_deps_info = &tick_check_sched_deps_info; | |
48e1416a | 3569 | |
e1ab7874 | 3570 | gcc_assert (!dc->readonly); |
3571 | dc->readonly = 1; | |
3572 | deps_analyze_insn (dc, EXPR_INSN_RTX (expr)); | |
3573 | dc->readonly = 0; | |
3574 | ||
3575 | cycles_left = tick_check_data.cycle - FENCE_CYCLE (fence); | |
3576 | ||
3577 | return cycles_left >= 0 ? cycles_left : 0; | |
3578 | } | |
3579 | \f | |
3580 | ||
3581 | /* Functions to work with insns. */ | |
3582 | ||
3583 | /* Returns true if LHS of INSN is the same as DEST of an insn | |
3584 | being moved. */ | |
3585 | bool | |
3586 | lhs_of_insn_equals_to_dest_p (insn_t insn, rtx dest) | |
3587 | { | |
3588 | rtx lhs = INSN_LHS (insn); | |
3589 | ||
3590 | if (lhs == NULL || dest == NULL) | |
3591 | return false; | |
48e1416a | 3592 | |
e1ab7874 | 3593 | return rtx_equal_p (lhs, dest); |
3594 | } | |
3595 | ||
3596 | /* Return s_i_d entry of INSN. Callable from debugger. */ | |
3597 | sel_insn_data_def | |
3598 | insn_sid (insn_t insn) | |
3599 | { | |
3600 | return *SID (insn); | |
3601 | } | |
3602 | ||
3603 | /* True when INSN is a speculative check. We can tell this by looking | |
3604 | at the data structures of the selective scheduler, not by examining | |
3605 | the pattern. */ | |
3606 | bool | |
3607 | sel_insn_is_speculation_check (rtx insn) | |
3608 | { | |
f1f41a6c | 3609 | return s_i_d.exists () && !! INSN_SPEC_CHECKED_DS (insn); |
e1ab7874 | 3610 | } |
3611 | ||
48e1416a | 3612 | /* Extracts machine mode MODE and destination location DST_LOC |
e1ab7874 | 3613 | for given INSN. */ |
3614 | void | |
3754d046 | 3615 | get_dest_and_mode (rtx insn, rtx *dst_loc, machine_mode *mode) |
e1ab7874 | 3616 | { |
3617 | rtx pat = PATTERN (insn); | |
3618 | ||
3619 | gcc_assert (dst_loc); | |
3620 | gcc_assert (GET_CODE (pat) == SET); | |
3621 | ||
3622 | *dst_loc = SET_DEST (pat); | |
3623 | ||
3624 | gcc_assert (*dst_loc); | |
3625 | gcc_assert (MEM_P (*dst_loc) || REG_P (*dst_loc)); | |
3626 | ||
3627 | if (mode) | |
3628 | *mode = GET_MODE (*dst_loc); | |
3629 | } | |
3630 | ||
48e1416a | 3631 | /* Returns true when moving through JUMP will result in bookkeeping |
e1ab7874 | 3632 | creation. */ |
3633 | bool | |
3634 | bookkeeping_can_be_created_if_moved_through_p (insn_t jump) | |
3635 | { | |
3636 | insn_t succ; | |
3637 | succ_iterator si; | |
3638 | ||
3639 | FOR_EACH_SUCC (succ, si, jump) | |
3640 | if (sel_num_cfg_preds_gt_1 (succ)) | |
3641 | return true; | |
3642 | ||
3643 | return false; | |
3644 | } | |
3645 | ||
3646 | /* Return 'true' if INSN is the only one in its basic block. */ | |
3647 | static bool | |
3648 | insn_is_the_only_one_in_bb_p (insn_t insn) | |
3649 | { | |
3650 | return sel_bb_head_p (insn) && sel_bb_end_p (insn); | |
3651 | } | |
3652 | ||
48e1416a | 3653 | /* Check that the region we're scheduling still has at most one |
e1ab7874 | 3654 | backedge. */ |
3655 | static void | |
3656 | verify_backedges (void) | |
3657 | { | |
3658 | if (pipelining_p) | |
3659 | { | |
3660 | int i, n = 0; | |
3661 | edge e; | |
3662 | edge_iterator ei; | |
48e1416a | 3663 | |
e1ab7874 | 3664 | for (i = 0; i < current_nr_blocks; i++) |
f5a6b05f | 3665 | FOR_EACH_EDGE (e, ei, BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i))->succs) |
e1ab7874 | 3666 | if (in_current_region_p (e->dest) |
3667 | && BLOCK_TO_BB (e->dest->index) < i) | |
3668 | n++; | |
48e1416a | 3669 | |
e1ab7874 | 3670 | gcc_assert (n <= 1); |
3671 | } | |
3672 | } | |
e1ab7874 | 3673 | \f |
3674 | ||
3675 | /* Functions to work with control flow. */ | |
3676 | ||
93919afc | 3677 | /* Recompute BLOCK_TO_BB and BB_FOR_BLOCK for current region so that blocks |
3678 | are sorted in topological order (it might have been invalidated by | |
3679 | redirecting an edge). */ | |
3680 | static void | |
3681 | sel_recompute_toporder (void) | |
3682 | { | |
3683 | int i, n, rgn; | |
3684 | int *postorder, n_blocks; | |
3685 | ||
a28770e1 | 3686 | postorder = XALLOCAVEC (int, n_basic_blocks_for_fn (cfun)); |
93919afc | 3687 | n_blocks = post_order_compute (postorder, false, false); |
3688 | ||
3689 | rgn = CONTAINING_RGN (BB_TO_BLOCK (0)); | |
3690 | for (n = 0, i = n_blocks - 1; i >= 0; i--) | |
3691 | if (CONTAINING_RGN (postorder[i]) == rgn) | |
3692 | { | |
3693 | BLOCK_TO_BB (postorder[i]) = n; | |
3694 | BB_TO_BLOCK (n) = postorder[i]; | |
3695 | n++; | |
3696 | } | |
3697 | ||
3698 | /* Assert that we updated info for all blocks. We may miss some blocks if | |
3699 | this function is called when redirecting an edge made a block | |
3700 | unreachable, but that block is not deleted yet. */ | |
3701 | gcc_assert (n == RGN_NR_BLOCKS (rgn)); | |
3702 | } | |
3703 | ||
e1ab7874 | 3704 | /* Tidy the possibly empty block BB. */ |
81d1ad0f | 3705 | static bool |
6f0e7980 | 3706 | maybe_tidy_empty_bb (basic_block bb) |
e1ab7874 | 3707 | { |
ef4cf572 | 3708 | basic_block succ_bb, pred_bb, note_bb; |
f1f41a6c | 3709 | vec<basic_block> dom_bbs; |
df6266b9 | 3710 | edge e; |
3711 | edge_iterator ei; | |
e1ab7874 | 3712 | bool rescan_p; |
3713 | ||
3714 | /* Keep empty bb only if this block immediately precedes EXIT and | |
61e213e2 | 3715 | has incoming non-fallthrough edge, or it has no predecessors or |
3716 | successors. Otherwise remove it. */ | |
9845d120 | 3717 | if (!sel_bb_empty_p (bb) |
48e1416a | 3718 | || (single_succ_p (bb) |
34154e27 | 3719 | && single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun) |
48e1416a | 3720 | && (!single_pred_p (bb) |
61e213e2 | 3721 | || !(single_pred_edge (bb)->flags & EDGE_FALLTHRU))) |
3722 | || EDGE_COUNT (bb->preds) == 0 | |
3723 | || EDGE_COUNT (bb->succs) == 0) | |
e1ab7874 | 3724 | return false; |
3725 | ||
df6266b9 | 3726 | /* Do not attempt to redirect complex edges. */ |
3727 | FOR_EACH_EDGE (e, ei, bb->preds) | |
3728 | if (e->flags & EDGE_COMPLEX) | |
3729 | return false; | |
a62f9dca | 3730 | else if (e->flags & EDGE_FALLTHRU) |
3731 | { | |
3732 | rtx note; | |
3733 | /* If prev bb ends with asm goto, see if any of the | |
3734 | ASM_OPERANDS_LABELs don't point to the fallthru | |
3735 | label. Do not attempt to redirect it in that case. */ | |
3736 | if (JUMP_P (BB_END (e->src)) | |
3737 | && (note = extract_asm_operands (PATTERN (BB_END (e->src))))) | |
3738 | { | |
3739 | int i, n = ASM_OPERANDS_LABEL_LENGTH (note); | |
3740 | ||
3741 | for (i = 0; i < n; ++i) | |
3742 | if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (bb)) | |
3743 | return false; | |
3744 | } | |
3745 | } | |
df6266b9 | 3746 | |
e1ab7874 | 3747 | free_data_sets (bb); |
3748 | ||
3749 | /* Do not delete BB if it has more than one successor. | |
3750 | That can occur when we moving a jump. */ | |
3751 | if (!single_succ_p (bb)) | |
3752 | { | |
3753 | gcc_assert (can_merge_blocks_p (bb->prev_bb, bb)); | |
3754 | sel_merge_blocks (bb->prev_bb, bb); | |
3755 | return true; | |
3756 | } | |
3757 | ||
3758 | succ_bb = single_succ (bb); | |
3759 | rescan_p = true; | |
3760 | pred_bb = NULL; | |
f1f41a6c | 3761 | dom_bbs.create (0); |
e1ab7874 | 3762 | |
ef4cf572 | 3763 | /* Save a pred/succ from the current region to attach the notes to. */ |
3764 | note_bb = NULL; | |
3765 | FOR_EACH_EDGE (e, ei, bb->preds) | |
3766 | if (in_current_region_p (e->src)) | |
3767 | { | |
3768 | note_bb = e->src; | |
3769 | break; | |
3770 | } | |
3771 | if (note_bb == NULL) | |
3772 | note_bb = succ_bb; | |
3773 | ||
e1ab7874 | 3774 | /* Redirect all non-fallthru edges to the next bb. */ |
3775 | while (rescan_p) | |
3776 | { | |
e1ab7874 | 3777 | rescan_p = false; |
3778 | ||
3779 | FOR_EACH_EDGE (e, ei, bb->preds) | |
3780 | { | |
3781 | pred_bb = e->src; | |
3782 | ||
3783 | if (!(e->flags & EDGE_FALLTHRU)) | |
3784 | { | |
f4d3c071 | 3785 | /* We cannot invalidate computed topological order by moving |
1a5dbaab | 3786 | the edge destination block (E->SUCC) along a fallthru edge. |
3787 | ||
3788 | We will update dominators here only when we'll get | |
3789 | an unreachable block when redirecting, otherwise | |
3790 | sel_redirect_edge_and_branch will take care of it. */ | |
3791 | if (e->dest != bb | |
3792 | && single_pred_p (e->dest)) | |
f1f41a6c | 3793 | dom_bbs.safe_push (e->dest); |
6f0e7980 | 3794 | sel_redirect_edge_and_branch (e, succ_bb); |
e1ab7874 | 3795 | rescan_p = true; |
3796 | break; | |
3797 | } | |
6f0e7980 | 3798 | /* If the edge is fallthru, but PRED_BB ends in a conditional jump |
3799 | to BB (so there is no non-fallthru edge from PRED_BB to BB), we | |
3800 | still have to adjust it. */ | |
3801 | else if (single_succ_p (pred_bb) && any_condjump_p (BB_END (pred_bb))) | |
3802 | { | |
3803 | /* If possible, try to remove the unneeded conditional jump. */ | |
3804 | if (INSN_SCHED_TIMES (BB_END (pred_bb)) == 0 | |
3805 | && !IN_CURRENT_FENCE_P (BB_END (pred_bb))) | |
3806 | { | |
3807 | if (!sel_remove_insn (BB_END (pred_bb), false, false)) | |
3808 | tidy_fallthru_edge (e); | |
3809 | } | |
3810 | else | |
3811 | sel_redirect_edge_and_branch (e, succ_bb); | |
3812 | rescan_p = true; | |
3813 | break; | |
3814 | } | |
e1ab7874 | 3815 | } |
3816 | } | |
3817 | ||
e1ab7874 | 3818 | if (can_merge_blocks_p (bb->prev_bb, bb)) |
3819 | sel_merge_blocks (bb->prev_bb, bb); | |
3820 | else | |
e1ab7874 | 3821 | { |
0424f393 | 3822 | /* This is a block without fallthru predecessor. Just delete it. */ |
ef4cf572 | 3823 | gcc_assert (note_bb); |
3824 | move_bb_info (note_bb, bb); | |
e1ab7874 | 3825 | remove_empty_bb (bb, true); |
3826 | } | |
3827 | ||
f1f41a6c | 3828 | if (!dom_bbs.is_empty ()) |
1a5dbaab | 3829 | { |
f1f41a6c | 3830 | dom_bbs.safe_push (succ_bb); |
1a5dbaab | 3831 | iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, false); |
f1f41a6c | 3832 | dom_bbs.release (); |
1a5dbaab | 3833 | } |
3834 | ||
e1ab7874 | 3835 | return true; |
3836 | } | |
3837 | ||
48e1416a | 3838 | /* Tidy the control flow after we have removed original insn from |
e1ab7874 | 3839 | XBB. Return true if we have removed some blocks. When FULL_TIDYING |
3840 | is true, also try to optimize control flow on non-empty blocks. */ | |
3841 | bool | |
3842 | tidy_control_flow (basic_block xbb, bool full_tidying) | |
3843 | { | |
3844 | bool changed = true; | |
9845d120 | 3845 | insn_t first, last; |
48e1416a | 3846 | |
e1ab7874 | 3847 | /* First check whether XBB is empty. */ |
6f0e7980 | 3848 | changed = maybe_tidy_empty_bb (xbb); |
e1ab7874 | 3849 | if (changed || !full_tidying) |
3850 | return changed; | |
48e1416a | 3851 | |
e1ab7874 | 3852 | /* Check if there is a unnecessary jump after insn left. */ |
49087fba | 3853 | if (bb_has_removable_jump_to_p (xbb, xbb->next_bb) |
e1ab7874 | 3854 | && INSN_SCHED_TIMES (BB_END (xbb)) == 0 |
3855 | && !IN_CURRENT_FENCE_P (BB_END (xbb))) | |
3856 | { | |
ccf06fde | 3857 | /* We used to call sel_remove_insn here that can trigger tidy_control_flow |
3858 | before we fix up the fallthru edge. Correct that ordering by | |
3859 | explicitly doing the latter before the former. */ | |
3860 | clear_expr (INSN_EXPR (BB_END (xbb))); | |
e1ab7874 | 3861 | tidy_fallthru_edge (EDGE_SUCC (xbb, 0)); |
ccf06fde | 3862 | if (tidy_control_flow (xbb, false)) |
3863 | return true; | |
e1ab7874 | 3864 | } |
3865 | ||
9845d120 | 3866 | first = sel_bb_head (xbb); |
3867 | last = sel_bb_end (xbb); | |
3868 | if (MAY_HAVE_DEBUG_INSNS) | |
3869 | { | |
3870 | if (first != last && DEBUG_INSN_P (first)) | |
3871 | do | |
3872 | first = NEXT_INSN (first); | |
3873 | while (first != last && (DEBUG_INSN_P (first) || NOTE_P (first))); | |
3874 | ||
3875 | if (first != last && DEBUG_INSN_P (last)) | |
3876 | do | |
3877 | last = PREV_INSN (last); | |
3878 | while (first != last && (DEBUG_INSN_P (last) || NOTE_P (last))); | |
3879 | } | |
e1ab7874 | 3880 | /* Check if there is an unnecessary jump in previous basic block leading |
48e1416a | 3881 | to next basic block left after removing INSN from stream. |
3882 | If it is so, remove that jump and redirect edge to current | |
3883 | basic block (where there was INSN before deletion). This way | |
3884 | when NOP will be deleted several instructions later with its | |
3885 | basic block we will not get a jump to next instruction, which | |
e1ab7874 | 3886 | can be harmful. */ |
9845d120 | 3887 | if (first == last |
e1ab7874 | 3888 | && !sel_bb_empty_p (xbb) |
9845d120 | 3889 | && INSN_NOP_P (last) |
e1ab7874 | 3890 | /* Flow goes fallthru from current block to the next. */ |
3891 | && EDGE_COUNT (xbb->succs) == 1 | |
3892 | && (EDGE_SUCC (xbb, 0)->flags & EDGE_FALLTHRU) | |
3893 | /* When successor is an EXIT block, it may not be the next block. */ | |
34154e27 | 3894 | && single_succ (xbb) != EXIT_BLOCK_PTR_FOR_FN (cfun) |
e1ab7874 | 3895 | /* And unconditional jump in previous basic block leads to |
3896 | next basic block of XBB and this jump can be safely removed. */ | |
3897 | && in_current_region_p (xbb->prev_bb) | |
49087fba | 3898 | && bb_has_removable_jump_to_p (xbb->prev_bb, xbb->next_bb) |
e1ab7874 | 3899 | && INSN_SCHED_TIMES (BB_END (xbb->prev_bb)) == 0 |
3900 | /* Also this jump is not at the scheduling boundary. */ | |
3901 | && !IN_CURRENT_FENCE_P (BB_END (xbb->prev_bb))) | |
3902 | { | |
93919afc | 3903 | bool recompute_toporder_p; |
e1ab7874 | 3904 | /* Clear data structures of jump - jump itself will be removed |
3905 | by sel_redirect_edge_and_branch. */ | |
3906 | clear_expr (INSN_EXPR (BB_END (xbb->prev_bb))); | |
93919afc | 3907 | recompute_toporder_p |
3908 | = sel_redirect_edge_and_branch (EDGE_SUCC (xbb->prev_bb, 0), xbb); | |
3909 | ||
e1ab7874 | 3910 | gcc_assert (EDGE_SUCC (xbb->prev_bb, 0)->flags & EDGE_FALLTHRU); |
3911 | ||
8ffee455 | 3912 | /* We could have skipped some debug insns which did not get removed with the block, |
3913 | and the seqnos could become incorrect. Fix them up here. */ | |
3914 | if (MAY_HAVE_DEBUG_INSNS && (sel_bb_head (xbb) != first || sel_bb_end (xbb) != last)) | |
3915 | { | |
3916 | if (!sel_bb_empty_p (xbb->prev_bb)) | |
3917 | { | |
3918 | int prev_seqno = INSN_SEQNO (sel_bb_end (xbb->prev_bb)); | |
3919 | if (prev_seqno > INSN_SEQNO (sel_bb_head (xbb))) | |
3920 | for (insn_t insn = sel_bb_head (xbb); insn != first; insn = NEXT_INSN (insn)) | |
3921 | INSN_SEQNO (insn) = prev_seqno + 1; | |
3922 | } | |
3923 | } | |
3924 | ||
e1ab7874 | 3925 | /* It can turn out that after removing unused jump, basic block |
3926 | that contained that jump, becomes empty too. In such case | |
3927 | remove it too. */ | |
3928 | if (sel_bb_empty_p (xbb->prev_bb)) | |
6f0e7980 | 3929 | changed = maybe_tidy_empty_bb (xbb->prev_bb); |
3930 | if (recompute_toporder_p) | |
93919afc | 3931 | sel_recompute_toporder (); |
e1ab7874 | 3932 | } |
7af466ad | 3933 | |
382ecba7 | 3934 | /* TODO: use separate flag for CFG checking. */ |
3935 | if (flag_checking) | |
3936 | { | |
3937 | verify_backedges (); | |
3938 | verify_dominators (CDI_DOMINATORS); | |
3939 | } | |
7af466ad | 3940 | |
e1ab7874 | 3941 | return changed; |
3942 | } | |
3943 | ||
93919afc | 3944 | /* Purge meaningless empty blocks in the middle of a region. */ |
3945 | void | |
3946 | purge_empty_blocks (void) | |
3947 | { | |
a6e634c6 | 3948 | int i; |
93919afc | 3949 | |
a6e634c6 | 3950 | /* Do not attempt to delete the first basic block in the region. */ |
3951 | for (i = 1; i < current_nr_blocks; ) | |
93919afc | 3952 | { |
f5a6b05f | 3953 | basic_block b = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i)); |
93919afc | 3954 | |
6f0e7980 | 3955 | if (maybe_tidy_empty_bb (b)) |
93919afc | 3956 | continue; |
3957 | ||
3958 | i++; | |
3959 | } | |
3960 | } | |
3961 | ||
48e1416a | 3962 | /* Rip-off INSN from the insn stream. When ONLY_DISCONNECT is true, |
3963 | do not delete insn's data, because it will be later re-emitted. | |
e1ab7874 | 3964 | Return true if we have removed some blocks afterwards. */ |
3965 | bool | |
3966 | sel_remove_insn (insn_t insn, bool only_disconnect, bool full_tidying) | |
3967 | { | |
3968 | basic_block bb = BLOCK_FOR_INSN (insn); | |
3969 | ||
3970 | gcc_assert (INSN_IN_STREAM_P (insn)); | |
3971 | ||
9845d120 | 3972 | if (DEBUG_INSN_P (insn) && BB_AV_SET_VALID_P (bb)) |
3973 | { | |
3974 | expr_t expr; | |
3975 | av_set_iterator i; | |
3976 | ||
3977 | /* When we remove a debug insn that is head of a BB, it remains | |
3978 | in the AV_SET of the block, but it shouldn't. */ | |
3979 | FOR_EACH_EXPR_1 (expr, i, &BB_AV_SET (bb)) | |
3980 | if (EXPR_INSN_RTX (expr) == insn) | |
3981 | { | |
3982 | av_set_iter_remove (&i); | |
3983 | break; | |
3984 | } | |
3985 | } | |
3986 | ||
e1ab7874 | 3987 | if (only_disconnect) |
93ff53d3 | 3988 | remove_insn (insn); |
e1ab7874 | 3989 | else |
3990 | { | |
93ff53d3 | 3991 | delete_insn (insn); |
e1ab7874 | 3992 | clear_expr (INSN_EXPR (insn)); |
3993 | } | |
3994 | ||
93ff53d3 | 3995 | /* It is necessary to NULL these fields in case we are going to re-insert |
3996 | INSN into the insns stream, as will usually happen in the ONLY_DISCONNECT | |
3997 | case, but also for NOPs that we will return to the nop pool. */ | |
4a57a2e8 | 3998 | SET_PREV_INSN (insn) = NULL_RTX; |
3999 | SET_NEXT_INSN (insn) = NULL_RTX; | |
93ff53d3 | 4000 | set_block_for_insn (insn, NULL); |
e1ab7874 | 4001 | |
4002 | return tidy_control_flow (bb, full_tidying); | |
4003 | } | |
4004 | ||
4005 | /* Estimate number of the insns in BB. */ | |
4006 | static int | |
4007 | sel_estimate_number_of_insns (basic_block bb) | |
4008 | { | |
4009 | int res = 0; | |
4010 | insn_t insn = NEXT_INSN (BB_HEAD (bb)), next_tail = NEXT_INSN (BB_END (bb)); | |
4011 | ||
4012 | for (; insn != next_tail; insn = NEXT_INSN (insn)) | |
9845d120 | 4013 | if (NONDEBUG_INSN_P (insn)) |
e1ab7874 | 4014 | res++; |
4015 | ||
4016 | return res; | |
4017 | } | |
4018 | ||
4019 | /* We don't need separate luids for notes or labels. */ | |
4020 | static int | |
4021 | sel_luid_for_non_insn (rtx x) | |
4022 | { | |
4023 | gcc_assert (NOTE_P (x) || LABEL_P (x)); | |
4024 | ||
4025 | return -1; | |
4026 | } | |
4027 | ||
bdcc104c | 4028 | /* Find the proper seqno for inserting at INSN by successors. |
4029 | Return -1 if no successors with positive seqno exist. */ | |
e1ab7874 | 4030 | static int |
2f3c9801 | 4031 | get_seqno_by_succs (rtx_insn *insn) |
bdcc104c | 4032 | { |
4033 | basic_block bb = BLOCK_FOR_INSN (insn); | |
2f3c9801 | 4034 | rtx_insn *tmp = insn, *end = BB_END (bb); |
bdcc104c | 4035 | int seqno; |
4036 | insn_t succ = NULL; | |
4037 | succ_iterator si; | |
4038 | ||
4039 | while (tmp != end) | |
4040 | { | |
4041 | tmp = NEXT_INSN (tmp); | |
4042 | if (INSN_P (tmp)) | |
4043 | return INSN_SEQNO (tmp); | |
4044 | } | |
4045 | ||
4046 | seqno = INT_MAX; | |
4047 | ||
4048 | FOR_EACH_SUCC_1 (succ, si, end, SUCCS_NORMAL) | |
4049 | if (INSN_SEQNO (succ) > 0) | |
4050 | seqno = MIN (seqno, INSN_SEQNO (succ)); | |
4051 | ||
4052 | if (seqno == INT_MAX) | |
4053 | return -1; | |
4054 | ||
4055 | return seqno; | |
4056 | } | |
4057 | ||
8d1881f5 | 4058 | /* Compute seqno for INSN by its preds or succs. Use OLD_SEQNO to compute |
4059 | seqno in corner cases. */ | |
bdcc104c | 4060 | static int |
8d1881f5 | 4061 | get_seqno_for_a_jump (insn_t insn, int old_seqno) |
e1ab7874 | 4062 | { |
4063 | int seqno; | |
4064 | ||
4065 | gcc_assert (INSN_SIMPLEJUMP_P (insn)); | |
4066 | ||
4067 | if (!sel_bb_head_p (insn)) | |
4068 | seqno = INSN_SEQNO (PREV_INSN (insn)); | |
4069 | else | |
4070 | { | |
4071 | basic_block bb = BLOCK_FOR_INSN (insn); | |
4072 | ||
4073 | if (single_pred_p (bb) | |
4074 | && !in_current_region_p (single_pred (bb))) | |
4075 | { | |
4076 | /* We can have preds outside a region when splitting edges | |
48e1416a | 4077 | for pipelining of an outer loop. Use succ instead. |
e1ab7874 | 4078 | There should be only one of them. */ |
4079 | insn_t succ = NULL; | |
4080 | succ_iterator si; | |
4081 | bool first = true; | |
48e1416a | 4082 | |
e1ab7874 | 4083 | gcc_assert (flag_sel_sched_pipelining_outer_loops |
4084 | && current_loop_nest); | |
48e1416a | 4085 | FOR_EACH_SUCC_1 (succ, si, insn, |
e1ab7874 | 4086 | SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS) |
4087 | { | |
4088 | gcc_assert (first); | |
4089 | first = false; | |
4090 | } | |
4091 | ||
4092 | gcc_assert (succ != NULL); | |
4093 | seqno = INSN_SEQNO (succ); | |
4094 | } | |
4095 | else | |
4096 | { | |
4097 | insn_t *preds; | |
4098 | int n; | |
4099 | ||
4100 | cfg_preds (BLOCK_FOR_INSN (insn), &preds, &n); | |
e1ab7874 | 4101 | |
bdcc104c | 4102 | gcc_assert (n > 0); |
4103 | /* For one predecessor, use simple method. */ | |
4104 | if (n == 1) | |
4105 | seqno = INSN_SEQNO (preds[0]); | |
4106 | else | |
4107 | seqno = get_seqno_by_preds (insn); | |
48e1416a | 4108 | |
e1ab7874 | 4109 | free (preds); |
4110 | } | |
4111 | } | |
4112 | ||
bdcc104c | 4113 | /* We were unable to find a good seqno among preds. */ |
4114 | if (seqno < 0) | |
4115 | seqno = get_seqno_by_succs (insn); | |
4116 | ||
8d1881f5 | 4117 | if (seqno < 0) |
4118 | { | |
4119 | /* The only case where this could be here legally is that the only | |
4120 | unscheduled insn was a conditional jump that got removed and turned | |
4121 | into this unconditional one. Initialize from the old seqno | |
4122 | of that jump passed down to here. */ | |
4123 | seqno = old_seqno; | |
4124 | } | |
bdcc104c | 4125 | |
8d1881f5 | 4126 | gcc_assert (seqno >= 0); |
e1ab7874 | 4127 | return seqno; |
4128 | } | |
4129 | ||
961d3eb8 | 4130 | /* Find the proper seqno for inserting at INSN. Returns -1 if no predecessors |
4131 | with positive seqno exist. */ | |
e1ab7874 | 4132 | int |
91a55c11 | 4133 | get_seqno_by_preds (rtx_insn *insn) |
e1ab7874 | 4134 | { |
4135 | basic_block bb = BLOCK_FOR_INSN (insn); | |
91a55c11 | 4136 | rtx_insn *tmp = insn, *head = BB_HEAD (bb); |
e1ab7874 | 4137 | insn_t *preds; |
4138 | int n, i, seqno; | |
4139 | ||
738eb905 | 4140 | /* Loop backwards from INSN to HEAD including both. */ |
4141 | while (1) | |
bdcc104c | 4142 | { |
bdcc104c | 4143 | if (INSN_P (tmp)) |
738eb905 | 4144 | return INSN_SEQNO (tmp); |
4145 | if (tmp == head) | |
4146 | break; | |
4147 | tmp = PREV_INSN (tmp); | |
bdcc104c | 4148 | } |
48e1416a | 4149 | |
e1ab7874 | 4150 | cfg_preds (bb, &preds, &n); |
4151 | for (i = 0, seqno = -1; i < n; i++) | |
4152 | seqno = MAX (seqno, INSN_SEQNO (preds[i])); | |
4153 | ||
e1ab7874 | 4154 | return seqno; |
4155 | } | |
4156 | ||
4157 | \f | |
4158 | ||
4159 | /* Extend pass-scope data structures for basic blocks. */ | |
4160 | void | |
4161 | sel_extend_global_bb_info (void) | |
4162 | { | |
fe672ac0 | 4163 | sel_global_bb_info.safe_grow_cleared (last_basic_block_for_fn (cfun)); |
e1ab7874 | 4164 | } |
4165 | ||
4166 | /* Extend region-scope data structures for basic blocks. */ | |
4167 | static void | |
4168 | extend_region_bb_info (void) | |
4169 | { | |
fe672ac0 | 4170 | sel_region_bb_info.safe_grow_cleared (last_basic_block_for_fn (cfun)); |
e1ab7874 | 4171 | } |
4172 | ||
4173 | /* Extend all data structures to fit for all basic blocks. */ | |
4174 | static void | |
4175 | extend_bb_info (void) | |
4176 | { | |
4177 | sel_extend_global_bb_info (); | |
4178 | extend_region_bb_info (); | |
4179 | } | |
4180 | ||
4181 | /* Finalize pass-scope data structures for basic blocks. */ | |
4182 | void | |
4183 | sel_finish_global_bb_info (void) | |
4184 | { | |
f1f41a6c | 4185 | sel_global_bb_info.release (); |
e1ab7874 | 4186 | } |
4187 | ||
4188 | /* Finalize region-scope data structures for basic blocks. */ | |
4189 | static void | |
4190 | finish_region_bb_info (void) | |
4191 | { | |
f1f41a6c | 4192 | sel_region_bb_info.release (); |
e1ab7874 | 4193 | } |
4194 | \f | |
4195 | ||
4196 | /* Data for each insn in current region. */ | |
16fb756f | 4197 | vec<sel_insn_data_def> s_i_d; |
e1ab7874 | 4198 | |
e1ab7874 | 4199 | /* Extend data structures for insns from current region. */ |
4200 | static void | |
4201 | extend_insn_data (void) | |
4202 | { | |
4203 | int reserve; | |
48e1416a | 4204 | |
e1ab7874 | 4205 | sched_extend_target (); |
4206 | sched_deps_init (false); | |
4207 | ||
4208 | /* Extend data structures for insns from current region. */ | |
f1f41a6c | 4209 | reserve = (sched_max_luid + 1 - s_i_d.length ()); |
4210 | if (reserve > 0 && ! s_i_d.space (reserve)) | |
d9ab2038 | 4211 | { |
4212 | int size; | |
4213 | ||
4214 | if (sched_max_luid / 2 > 1024) | |
4215 | size = sched_max_luid + 1024; | |
4216 | else | |
4217 | size = 3 * sched_max_luid / 2; | |
48e1416a | 4218 | |
d9ab2038 | 4219 | |
f1f41a6c | 4220 | s_i_d.safe_grow_cleared (size); |
d9ab2038 | 4221 | } |
e1ab7874 | 4222 | } |
4223 | ||
4224 | /* Finalize data structures for insns from current region. */ | |
4225 | static void | |
4226 | finish_insns (void) | |
4227 | { | |
4228 | unsigned i; | |
4229 | ||
4230 | /* Clear here all dependence contexts that may have left from insns that were | |
4231 | removed during the scheduling. */ | |
f1f41a6c | 4232 | for (i = 0; i < s_i_d.length (); i++) |
e1ab7874 | 4233 | { |
f1f41a6c | 4234 | sel_insn_data_def *sid_entry = &s_i_d[i]; |
48e1416a | 4235 | |
e1ab7874 | 4236 | if (sid_entry->live) |
4237 | return_regset_to_pool (sid_entry->live); | |
4238 | if (sid_entry->analyzed_deps) | |
4239 | { | |
4240 | BITMAP_FREE (sid_entry->analyzed_deps); | |
4241 | BITMAP_FREE (sid_entry->found_deps); | |
4242 | htab_delete (sid_entry->transformed_insns); | |
4243 | free_deps (&sid_entry->deps_context); | |
4244 | } | |
4245 | if (EXPR_VINSN (&sid_entry->expr)) | |
4246 | { | |
4247 | clear_expr (&sid_entry->expr); | |
48e1416a | 4248 | |
e1ab7874 | 4249 | /* Also, clear CANT_MOVE bit here, because we really don't want it |
4250 | to be passed to the next region. */ | |
4251 | CANT_MOVE_BY_LUID (i) = 0; | |
4252 | } | |
4253 | } | |
48e1416a | 4254 | |
f1f41a6c | 4255 | s_i_d.release (); |
e1ab7874 | 4256 | } |
4257 | ||
4258 | /* A proxy to pass initialization data to init_insn (). */ | |
4259 | static sel_insn_data_def _insn_init_ssid; | |
4260 | static sel_insn_data_t insn_init_ssid = &_insn_init_ssid; | |
4261 | ||
4262 | /* If true create a new vinsn. Otherwise use the one from EXPR. */ | |
4263 | static bool insn_init_create_new_vinsn_p; | |
4264 | ||
4265 | /* Set all necessary data for initialization of the new insn[s]. */ | |
4266 | static expr_t | |
4267 | set_insn_init (expr_t expr, vinsn_t vi, int seqno) | |
4268 | { | |
4269 | expr_t x = &insn_init_ssid->expr; | |
4270 | ||
4271 | copy_expr_onside (x, expr); | |
4272 | if (vi != NULL) | |
4273 | { | |
4274 | insn_init_create_new_vinsn_p = false; | |
4275 | change_vinsn_in_expr (x, vi); | |
4276 | } | |
4277 | else | |
4278 | insn_init_create_new_vinsn_p = true; | |
4279 | ||
4280 | insn_init_ssid->seqno = seqno; | |
4281 | return x; | |
4282 | } | |
4283 | ||
4284 | /* Init data for INSN. */ | |
4285 | static void | |
4286 | init_insn_data (insn_t insn) | |
4287 | { | |
4288 | expr_t expr; | |
4289 | sel_insn_data_t ssid = insn_init_ssid; | |
4290 | ||
4291 | /* The fields mentioned below are special and hence are not being | |
4292 | propagated to the new insns. */ | |
4293 | gcc_assert (!ssid->asm_p && ssid->sched_next == NULL | |
4294 | && !ssid->after_stall_p && ssid->sched_cycle == 0); | |
4295 | gcc_assert (INSN_P (insn) && INSN_LUID (insn) > 0); | |
4296 | ||
4297 | expr = INSN_EXPR (insn); | |
4298 | copy_expr (expr, &ssid->expr); | |
4299 | prepare_insn_expr (insn, ssid->seqno); | |
4300 | ||
4301 | if (insn_init_create_new_vinsn_p) | |
4302 | change_vinsn_in_expr (expr, vinsn_create (insn, init_insn_force_unique_p)); | |
48e1416a | 4303 | |
e1ab7874 | 4304 | if (first_time_insn_init (insn)) |
4305 | init_first_time_insn_data (insn); | |
4306 | } | |
4307 | ||
4308 | /* This is used to initialize spurious jumps generated by | |
8d1881f5 | 4309 | sel_redirect_edge (). OLD_SEQNO is used for initializing seqnos |
4310 | in corner cases within get_seqno_for_a_jump. */ | |
e1ab7874 | 4311 | static void |
8d1881f5 | 4312 | init_simplejump_data (insn_t insn, int old_seqno) |
e1ab7874 | 4313 | { |
4314 | init_expr (INSN_EXPR (insn), vinsn_create (insn, false), 0, | |
f1f41a6c | 4315 | REG_BR_PROB_BASE, 0, 0, 0, 0, 0, 0, |
1e094109 | 4316 | vNULL, true, false, false, |
e1ab7874 | 4317 | false, true); |
8d1881f5 | 4318 | INSN_SEQNO (insn) = get_seqno_for_a_jump (insn, old_seqno); |
e1ab7874 | 4319 | init_first_time_insn_data (insn); |
4320 | } | |
4321 | ||
48e1416a | 4322 | /* Perform deferred initialization of insns. This is used to process |
8d1881f5 | 4323 | a new jump that may be created by redirect_edge. OLD_SEQNO is used |
4324 | for initializing simplejumps in init_simplejump_data. */ | |
4325 | static void | |
4326 | sel_init_new_insn (insn_t insn, int flags, int old_seqno) | |
e1ab7874 | 4327 | { |
4328 | /* We create data structures for bb when the first insn is emitted in it. */ | |
4329 | if (INSN_P (insn) | |
4330 | && INSN_IN_STREAM_P (insn) | |
4331 | && insn_is_the_only_one_in_bb_p (insn)) | |
4332 | { | |
4333 | extend_bb_info (); | |
4334 | create_initial_data_sets (BLOCK_FOR_INSN (insn)); | |
4335 | } | |
48e1416a | 4336 | |
e1ab7874 | 4337 | if (flags & INSN_INIT_TODO_LUID) |
52d7e28c | 4338 | { |
4339 | sched_extend_luids (); | |
4340 | sched_init_insn_luid (insn); | |
4341 | } | |
e1ab7874 | 4342 | |
4343 | if (flags & INSN_INIT_TODO_SSID) | |
4344 | { | |
4345 | extend_insn_data (); | |
4346 | init_insn_data (insn); | |
4347 | clear_expr (&insn_init_ssid->expr); | |
4348 | } | |
4349 | ||
4350 | if (flags & INSN_INIT_TODO_SIMPLEJUMP) | |
4351 | { | |
4352 | extend_insn_data (); | |
8d1881f5 | 4353 | init_simplejump_data (insn, old_seqno); |
e1ab7874 | 4354 | } |
48e1416a | 4355 | |
e1ab7874 | 4356 | gcc_assert (CONTAINING_RGN (BLOCK_NUM (insn)) |
4357 | == CONTAINING_RGN (BB_TO_BLOCK (0))); | |
4358 | } | |
4359 | \f | |
4360 | ||
4361 | /* Functions to init/finish work with lv sets. */ | |
4362 | ||
4363 | /* Init BB_LV_SET of BB from DF_LR_IN set of BB. */ | |
4364 | static void | |
4365 | init_lv_set (basic_block bb) | |
4366 | { | |
4367 | gcc_assert (!BB_LV_SET_VALID_P (bb)); | |
4368 | ||
4369 | BB_LV_SET (bb) = get_regset_from_pool (); | |
48e1416a | 4370 | COPY_REG_SET (BB_LV_SET (bb), DF_LR_IN (bb)); |
e1ab7874 | 4371 | BB_LV_SET_VALID_P (bb) = true; |
4372 | } | |
4373 | ||
4374 | /* Copy liveness information to BB from FROM_BB. */ | |
4375 | static void | |
4376 | copy_lv_set_from (basic_block bb, basic_block from_bb) | |
4377 | { | |
4378 | gcc_assert (!BB_LV_SET_VALID_P (bb)); | |
48e1416a | 4379 | |
e1ab7874 | 4380 | COPY_REG_SET (BB_LV_SET (bb), BB_LV_SET (from_bb)); |
4381 | BB_LV_SET_VALID_P (bb) = true; | |
48e1416a | 4382 | } |
e1ab7874 | 4383 | |
4384 | /* Initialize lv set of all bb headers. */ | |
4385 | void | |
4386 | init_lv_sets (void) | |
4387 | { | |
4388 | basic_block bb; | |
4389 | ||
4390 | /* Initialize of LV sets. */ | |
fc00614f | 4391 | FOR_EACH_BB_FN (bb, cfun) |
e1ab7874 | 4392 | init_lv_set (bb); |
4393 | ||
4394 | /* Don't forget EXIT_BLOCK. */ | |
34154e27 | 4395 | init_lv_set (EXIT_BLOCK_PTR_FOR_FN (cfun)); |
e1ab7874 | 4396 | } |
4397 | ||
4398 | /* Release lv set of HEAD. */ | |
4399 | static void | |
4400 | free_lv_set (basic_block bb) | |
4401 | { | |
4402 | gcc_assert (BB_LV_SET (bb) != NULL); | |
4403 | ||
4404 | return_regset_to_pool (BB_LV_SET (bb)); | |
4405 | BB_LV_SET (bb) = NULL; | |
4406 | BB_LV_SET_VALID_P (bb) = false; | |
4407 | } | |
4408 | ||
4409 | /* Finalize lv sets of all bb headers. */ | |
4410 | void | |
4411 | free_lv_sets (void) | |
4412 | { | |
4413 | basic_block bb; | |
4414 | ||
4415 | /* Don't forget EXIT_BLOCK. */ | |
34154e27 | 4416 | free_lv_set (EXIT_BLOCK_PTR_FOR_FN (cfun)); |
e1ab7874 | 4417 | |
4418 | /* Free LV sets. */ | |
fc00614f | 4419 | FOR_EACH_BB_FN (bb, cfun) |
e1ab7874 | 4420 | if (BB_LV_SET (bb)) |
4421 | free_lv_set (bb); | |
4422 | } | |
4423 | ||
c1c8a3d0 | 4424 | /* Mark AV_SET for BB as invalid, so this set will be updated the next time |
4425 | compute_av() processes BB. This function is called when creating new basic | |
4426 | blocks, as well as for blocks (either new or existing) where new jumps are | |
4427 | created when the control flow is being updated. */ | |
e1ab7874 | 4428 | static void |
4429 | invalidate_av_set (basic_block bb) | |
4430 | { | |
e1ab7874 | 4431 | BB_AV_LEVEL (bb) = -1; |
4432 | } | |
4433 | ||
4434 | /* Create initial data sets for BB (they will be invalid). */ | |
4435 | static void | |
4436 | create_initial_data_sets (basic_block bb) | |
4437 | { | |
4438 | if (BB_LV_SET (bb)) | |
4439 | BB_LV_SET_VALID_P (bb) = false; | |
4440 | else | |
4441 | BB_LV_SET (bb) = get_regset_from_pool (); | |
4442 | invalidate_av_set (bb); | |
4443 | } | |
4444 | ||
4445 | /* Free av set of BB. */ | |
4446 | static void | |
4447 | free_av_set (basic_block bb) | |
4448 | { | |
4449 | av_set_clear (&BB_AV_SET (bb)); | |
4450 | BB_AV_LEVEL (bb) = 0; | |
4451 | } | |
4452 | ||
4453 | /* Free data sets of BB. */ | |
4454 | void | |
4455 | free_data_sets (basic_block bb) | |
4456 | { | |
4457 | free_lv_set (bb); | |
4458 | free_av_set (bb); | |
4459 | } | |
4460 | ||
e1ab7874 | 4461 | /* Exchange data sets of TO and FROM. */ |
4462 | void | |
4463 | exchange_data_sets (basic_block to, basic_block from) | |
4464 | { | |
a4f59596 | 4465 | /* Exchange lv sets of TO and FROM. */ |
4466 | std::swap (BB_LV_SET (from), BB_LV_SET (to)); | |
4467 | std::swap (BB_LV_SET_VALID_P (from), BB_LV_SET_VALID_P (to)); | |
4468 | ||
4469 | /* Exchange av sets of TO and FROM. */ | |
4470 | std::swap (BB_AV_SET (from), BB_AV_SET (to)); | |
4471 | std::swap (BB_AV_LEVEL (from), BB_AV_LEVEL (to)); | |
e1ab7874 | 4472 | } |
4473 | ||
4474 | /* Copy data sets of FROM to TO. */ | |
4475 | void | |
4476 | copy_data_sets (basic_block to, basic_block from) | |
4477 | { | |
4478 | gcc_assert (!BB_LV_SET_VALID_P (to) && !BB_AV_SET_VALID_P (to)); | |
4479 | gcc_assert (BB_AV_SET (to) == NULL); | |
4480 | ||
4481 | BB_AV_LEVEL (to) = BB_AV_LEVEL (from); | |
4482 | BB_LV_SET_VALID_P (to) = BB_LV_SET_VALID_P (from); | |
4483 | ||
4484 | if (BB_AV_SET_VALID_P (from)) | |
4485 | { | |
4486 | BB_AV_SET (to) = av_set_copy (BB_AV_SET (from)); | |
4487 | } | |
4488 | if (BB_LV_SET_VALID_P (from)) | |
4489 | { | |
4490 | gcc_assert (BB_LV_SET (to) != NULL); | |
4491 | COPY_REG_SET (BB_LV_SET (to), BB_LV_SET (from)); | |
4492 | } | |
4493 | } | |
4494 | ||
4495 | /* Return an av set for INSN, if any. */ | |
4496 | av_set_t | |
4497 | get_av_set (insn_t insn) | |
4498 | { | |
4499 | av_set_t av_set; | |
4500 | ||
4501 | gcc_assert (AV_SET_VALID_P (insn)); | |
4502 | ||
4503 | if (sel_bb_head_p (insn)) | |
4504 | av_set = BB_AV_SET (BLOCK_FOR_INSN (insn)); | |
4505 | else | |
4506 | av_set = NULL; | |
4507 | ||
4508 | return av_set; | |
4509 | } | |
4510 | ||
4511 | /* Implementation of AV_LEVEL () macro. Return AV_LEVEL () of INSN. */ | |
4512 | int | |
4513 | get_av_level (insn_t insn) | |
4514 | { | |
4515 | int av_level; | |
4516 | ||
4517 | gcc_assert (INSN_P (insn)); | |
4518 | ||
4519 | if (sel_bb_head_p (insn)) | |
4520 | av_level = BB_AV_LEVEL (BLOCK_FOR_INSN (insn)); | |
4521 | else | |
4522 | av_level = INSN_WS_LEVEL (insn); | |
4523 | ||
4524 | return av_level; | |
4525 | } | |
4526 | ||
4527 | \f | |
4528 | ||
4529 | /* Variables to work with control-flow graph. */ | |
4530 | ||
4531 | /* The basic block that already has been processed by the sched_data_update (), | |
4532 | but hasn't been in sel_add_bb () yet. */ | |
16fb756f | 4533 | static vec<basic_block> last_added_blocks; |
e1ab7874 | 4534 | |
4535 | /* A pool for allocating successor infos. */ | |
4536 | static struct | |
4537 | { | |
4538 | /* A stack for saving succs_info structures. */ | |
4539 | struct succs_info *stack; | |
4540 | ||
4541 | /* Its size. */ | |
4542 | int size; | |
4543 | ||
4544 | /* Top of the stack. */ | |
4545 | int top; | |
4546 | ||
4547 | /* Maximal value of the top. */ | |
4548 | int max_top; | |
4549 | } succs_info_pool; | |
4550 | ||
4551 | /* Functions to work with control-flow graph. */ | |
4552 | ||
4553 | /* Return basic block note of BB. */ | |
179c282d | 4554 | rtx_insn * |
e1ab7874 | 4555 | sel_bb_head (basic_block bb) |
4556 | { | |
179c282d | 4557 | rtx_insn *head; |
e1ab7874 | 4558 | |
34154e27 | 4559 | if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun)) |
e1ab7874 | 4560 | { |
4561 | gcc_assert (exit_insn != NULL_RTX); | |
4562 | head = exit_insn; | |
4563 | } | |
4564 | else | |
4565 | { | |
9ed997be | 4566 | rtx_note *note = bb_note (bb); |
e1ab7874 | 4567 | head = next_nonnote_insn (note); |
4568 | ||
cabd2128 | 4569 | if (head && (BARRIER_P (head) || BLOCK_FOR_INSN (head) != bb)) |
179c282d | 4570 | head = NULL; |
e1ab7874 | 4571 | } |
4572 | ||
4573 | return head; | |
4574 | } | |
4575 | ||
4576 | /* Return true if INSN is a basic block header. */ | |
4577 | bool | |
4578 | sel_bb_head_p (insn_t insn) | |
4579 | { | |
4580 | return sel_bb_head (BLOCK_FOR_INSN (insn)) == insn; | |
4581 | } | |
4582 | ||
4583 | /* Return last insn of BB. */ | |
179c282d | 4584 | rtx_insn * |
e1ab7874 | 4585 | sel_bb_end (basic_block bb) |
4586 | { | |
4587 | if (sel_bb_empty_p (bb)) | |
179c282d | 4588 | return NULL; |
e1ab7874 | 4589 | |
34154e27 | 4590 | gcc_assert (bb != EXIT_BLOCK_PTR_FOR_FN (cfun)); |
e1ab7874 | 4591 | |
4592 | return BB_END (bb); | |
4593 | } | |
4594 | ||
4595 | /* Return true if INSN is the last insn in its basic block. */ | |
4596 | bool | |
4597 | sel_bb_end_p (insn_t insn) | |
4598 | { | |
4599 | return insn == sel_bb_end (BLOCK_FOR_INSN (insn)); | |
4600 | } | |
4601 | ||
4602 | /* Return true if BB consist of single NOTE_INSN_BASIC_BLOCK. */ | |
4603 | bool | |
4604 | sel_bb_empty_p (basic_block bb) | |
4605 | { | |
4606 | return sel_bb_head (bb) == NULL; | |
4607 | } | |
4608 | ||
4609 | /* True when BB belongs to the current scheduling region. */ | |
4610 | bool | |
4611 | in_current_region_p (basic_block bb) | |
4612 | { | |
4613 | if (bb->index < NUM_FIXED_BLOCKS) | |
4614 | return false; | |
4615 | ||
4616 | return CONTAINING_RGN (bb->index) == CONTAINING_RGN (BB_TO_BLOCK (0)); | |
4617 | } | |
4618 | ||
4619 | /* Return the block which is a fallthru bb of a conditional jump JUMP. */ | |
4620 | basic_block | |
93ee8dfb | 4621 | fallthru_bb_of_jump (const rtx_insn *jump) |
e1ab7874 | 4622 | { |
4623 | if (!JUMP_P (jump)) | |
4624 | return NULL; | |
4625 | ||
e1ab7874 | 4626 | if (!any_condjump_p (jump)) |
4627 | return NULL; | |
4628 | ||
bf19734b | 4629 | /* A basic block that ends with a conditional jump may still have one successor |
4630 | (and be followed by a barrier), we are not interested. */ | |
4631 | if (single_succ_p (BLOCK_FOR_INSN (jump))) | |
4632 | return NULL; | |
4633 | ||
e1ab7874 | 4634 | return FALLTHRU_EDGE (BLOCK_FOR_INSN (jump))->dest; |
4635 | } | |
4636 | ||
4637 | /* Remove all notes from BB. */ | |
4638 | static void | |
4639 | init_bb (basic_block bb) | |
4640 | { | |
4641 | remove_notes (bb_note (bb), BB_END (bb)); | |
e97a173d | 4642 | BB_NOTE_LIST (bb) = note_list; |
e1ab7874 | 4643 | } |
4644 | ||
4645 | void | |
52d7e28c | 4646 | sel_init_bbs (bb_vec_t bbs) |
e1ab7874 | 4647 | { |
4648 | const struct sched_scan_info_def ssi = | |
4649 | { | |
4650 | extend_bb_info, /* extend_bb */ | |
4651 | init_bb, /* init_bb */ | |
4652 | NULL, /* extend_insn */ | |
4653 | NULL /* init_insn */ | |
4654 | }; | |
4655 | ||
52d7e28c | 4656 | sched_scan (&ssi, bbs); |
e1ab7874 | 4657 | } |
4658 | ||
3baa98a0 | 4659 | /* Restore notes for the whole region. */ |
e1ab7874 | 4660 | static void |
3baa98a0 | 4661 | sel_restore_notes (void) |
e1ab7874 | 4662 | { |
4663 | int bb; | |
3baa98a0 | 4664 | insn_t insn; |
e1ab7874 | 4665 | |
4666 | for (bb = 0; bb < current_nr_blocks; bb++) | |
4667 | { | |
4668 | basic_block first, last; | |
4669 | ||
4670 | first = EBB_FIRST_BB (bb); | |
4671 | last = EBB_LAST_BB (bb)->next_bb; | |
4672 | ||
4673 | do | |
4674 | { | |
4675 | note_list = BB_NOTE_LIST (first); | |
4676 | restore_other_notes (NULL, first); | |
e97a173d | 4677 | BB_NOTE_LIST (first) = NULL; |
e1ab7874 | 4678 | |
3baa98a0 | 4679 | FOR_BB_INSNS (first, insn) |
4680 | if (NONDEBUG_INSN_P (insn)) | |
4681 | reemit_notes (insn); | |
4682 | ||
e1ab7874 | 4683 | first = first->next_bb; |
4684 | } | |
4685 | while (first != last); | |
4686 | } | |
4687 | } | |
4688 | ||
4689 | /* Free per-bb data structures. */ | |
4690 | void | |
4691 | sel_finish_bbs (void) | |
4692 | { | |
3baa98a0 | 4693 | sel_restore_notes (); |
e1ab7874 | 4694 | |
4695 | /* Remove current loop preheader from this loop. */ | |
4696 | if (current_loop_nest) | |
4697 | sel_remove_loop_preheader (); | |
4698 | ||
4699 | finish_region_bb_info (); | |
4700 | } | |
4701 | ||
4702 | /* Return true if INSN has a single successor of type FLAGS. */ | |
4703 | bool | |
4704 | sel_insn_has_single_succ_p (insn_t insn, int flags) | |
4705 | { | |
4706 | insn_t succ; | |
4707 | succ_iterator si; | |
4708 | bool first_p = true; | |
4709 | ||
4710 | FOR_EACH_SUCC_1 (succ, si, insn, flags) | |
4711 | { | |
4712 | if (first_p) | |
4713 | first_p = false; | |
4714 | else | |
4715 | return false; | |
4716 | } | |
4717 | ||
4718 | return true; | |
4719 | } | |
4720 | ||
4721 | /* Allocate successor's info. */ | |
4722 | static struct succs_info * | |
4723 | alloc_succs_info (void) | |
4724 | { | |
4725 | if (succs_info_pool.top == succs_info_pool.max_top) | |
4726 | { | |
4727 | int i; | |
48e1416a | 4728 | |
e1ab7874 | 4729 | if (++succs_info_pool.max_top >= succs_info_pool.size) |
4730 | gcc_unreachable (); | |
4731 | ||
4732 | i = ++succs_info_pool.top; | |
f1f41a6c | 4733 | succs_info_pool.stack[i].succs_ok.create (10); |
4734 | succs_info_pool.stack[i].succs_other.create (10); | |
4735 | succs_info_pool.stack[i].probs_ok.create (10); | |
e1ab7874 | 4736 | } |
4737 | else | |
4738 | succs_info_pool.top++; | |
4739 | ||
4740 | return &succs_info_pool.stack[succs_info_pool.top]; | |
4741 | } | |
4742 | ||
4743 | /* Free successor's info. */ | |
4744 | void | |
4745 | free_succs_info (struct succs_info * sinfo) | |
4746 | { | |
48e1416a | 4747 | gcc_assert (succs_info_pool.top >= 0 |
e1ab7874 | 4748 | && &succs_info_pool.stack[succs_info_pool.top] == sinfo); |
4749 | succs_info_pool.top--; | |
4750 | ||
4751 | /* Clear stale info. */ | |
f1f41a6c | 4752 | sinfo->succs_ok.block_remove (0, sinfo->succs_ok.length ()); |
4753 | sinfo->succs_other.block_remove (0, sinfo->succs_other.length ()); | |
4754 | sinfo->probs_ok.block_remove (0, sinfo->probs_ok.length ()); | |
e1ab7874 | 4755 | sinfo->all_prob = 0; |
4756 | sinfo->succs_ok_n = 0; | |
4757 | sinfo->all_succs_n = 0; | |
4758 | } | |
4759 | ||
48e1416a | 4760 | /* Compute successor info for INSN. FLAGS are the flags passed |
e1ab7874 | 4761 | to the FOR_EACH_SUCC_1 iterator. */ |
4762 | struct succs_info * | |
4763 | compute_succs_info (insn_t insn, short flags) | |
4764 | { | |
4765 | succ_iterator si; | |
4766 | insn_t succ; | |
4767 | struct succs_info *sinfo = alloc_succs_info (); | |
4768 | ||
4769 | /* Traverse *all* successors and decide what to do with each. */ | |
4770 | FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_ALL) | |
4771 | { | |
4772 | /* FIXME: this doesn't work for skipping to loop exits, as we don't | |
4773 | perform code motion through inner loops. */ | |
4774 | short current_flags = si.current_flags & ~SUCCS_SKIP_TO_LOOP_EXITS; | |
4775 | ||
4776 | if (current_flags & flags) | |
4777 | { | |
f1f41a6c | 4778 | sinfo->succs_ok.safe_push (succ); |
4779 | sinfo->probs_ok.safe_push ( | |
4780 | /* FIXME: Improve calculation when skipping | |
4781 | inner loop to exits. */ | |
7c6fa2d9 | 4782 | si.bb_end |
4783 | ? (si.e1->probability.initialized_p () | |
4784 | ? si.e1->probability.to_reg_br_prob_base () | |
4785 | : 0) | |
4786 | : REG_BR_PROB_BASE); | |
e1ab7874 | 4787 | sinfo->succs_ok_n++; |
4788 | } | |
4789 | else | |
f1f41a6c | 4790 | sinfo->succs_other.safe_push (succ); |
e1ab7874 | 4791 | |
4792 | /* Compute all_prob. */ | |
4793 | if (!si.bb_end) | |
4794 | sinfo->all_prob = REG_BR_PROB_BASE; | |
720cfc43 | 4795 | else if (si.e1->probability.initialized_p ()) |
4796 | sinfo->all_prob += si.e1->probability.to_reg_br_prob_base (); | |
e1ab7874 | 4797 | |
4798 | sinfo->all_succs_n++; | |
4799 | } | |
4800 | ||
4801 | return sinfo; | |
4802 | } | |
4803 | ||
48e1416a | 4804 | /* Return the predecessors of BB in PREDS and their number in N. |
e1ab7874 | 4805 | Empty blocks are skipped. SIZE is used to allocate PREDS. */ |
4806 | static void | |
4807 | cfg_preds_1 (basic_block bb, insn_t **preds, int *n, int *size) | |
4808 | { | |
4809 | edge e; | |
4810 | edge_iterator ei; | |
4811 | ||
4812 | gcc_assert (BLOCK_TO_BB (bb->index) != 0); | |
4813 | ||
4814 | FOR_EACH_EDGE (e, ei, bb->preds) | |
4815 | { | |
4816 | basic_block pred_bb = e->src; | |
4817 | insn_t bb_end = BB_END (pred_bb); | |
4818 | ||
f1ec9c64 | 4819 | if (!in_current_region_p (pred_bb)) |
4820 | { | |
4821 | gcc_assert (flag_sel_sched_pipelining_outer_loops | |
4822 | && current_loop_nest); | |
4823 | continue; | |
4824 | } | |
e1ab7874 | 4825 | |
4826 | if (sel_bb_empty_p (pred_bb)) | |
4827 | cfg_preds_1 (pred_bb, preds, n, size); | |
4828 | else | |
4829 | { | |
4830 | if (*n == *size) | |
48e1416a | 4831 | *preds = XRESIZEVEC (insn_t, *preds, |
e1ab7874 | 4832 | (*size = 2 * *size + 1)); |
4833 | (*preds)[(*n)++] = bb_end; | |
4834 | } | |
4835 | } | |
4836 | ||
f1ec9c64 | 4837 | gcc_assert (*n != 0 |
4838 | || (flag_sel_sched_pipelining_outer_loops | |
4839 | && current_loop_nest)); | |
e1ab7874 | 4840 | } |
4841 | ||
48e1416a | 4842 | /* Find all predecessors of BB and record them in PREDS and their number |
4843 | in N. Empty blocks are skipped, and only normal (forward in-region) | |
e1ab7874 | 4844 | edges are processed. */ |
4845 | static void | |
4846 | cfg_preds (basic_block bb, insn_t **preds, int *n) | |
4847 | { | |
4848 | int size = 0; | |
4849 | ||
4850 | *preds = NULL; | |
4851 | *n = 0; | |
4852 | cfg_preds_1 (bb, preds, n, &size); | |
4853 | } | |
4854 | ||
4855 | /* Returns true if we are moving INSN through join point. */ | |
4856 | bool | |
4857 | sel_num_cfg_preds_gt_1 (insn_t insn) | |
4858 | { | |
4859 | basic_block bb; | |
4860 | ||
4861 | if (!sel_bb_head_p (insn) || INSN_BB (insn) == 0) | |
4862 | return false; | |
4863 | ||
4864 | bb = BLOCK_FOR_INSN (insn); | |
4865 | ||
4866 | while (1) | |
4867 | { | |
4868 | if (EDGE_COUNT (bb->preds) > 1) | |
4869 | return true; | |
4870 | ||
4871 | gcc_assert (EDGE_PRED (bb, 0)->dest == bb); | |
4872 | bb = EDGE_PRED (bb, 0)->src; | |
4873 | ||
4874 | if (!sel_bb_empty_p (bb)) | |
4875 | break; | |
4876 | } | |
4877 | ||
4878 | return false; | |
4879 | } | |
4880 | ||
48e1416a | 4881 | /* Returns true when BB should be the end of an ebb. Adapted from the |
e1ab7874 | 4882 | code in sched-ebb.c. */ |
4883 | bool | |
4884 | bb_ends_ebb_p (basic_block bb) | |
4885 | { | |
4886 | basic_block next_bb = bb_next_bb (bb); | |
4887 | edge e; | |
48e1416a | 4888 | |
34154e27 | 4889 | if (next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun) |
e1ab7874 | 4890 | || bitmap_bit_p (forced_ebb_heads, next_bb->index) |
4891 | || (LABEL_P (BB_HEAD (next_bb)) | |
4892 | /* NB: LABEL_NUSES () is not maintained outside of jump.c. | |
4893 | Work around that. */ | |
4894 | && !single_pred_p (next_bb))) | |
4895 | return true; | |
4896 | ||
4897 | if (!in_current_region_p (next_bb)) | |
4898 | return true; | |
4899 | ||
7f58c05e | 4900 | e = find_fallthru_edge (bb->succs); |
4901 | if (e) | |
4902 | { | |
4903 | gcc_assert (e->dest == next_bb); | |
4904 | ||
4905 | return false; | |
4906 | } | |
e1ab7874 | 4907 | |
4908 | return true; | |
4909 | } | |
4910 | ||
4911 | /* Returns true when INSN and SUCC are in the same EBB, given that SUCC is a | |
4912 | successor of INSN. */ | |
4913 | bool | |
4914 | in_same_ebb_p (insn_t insn, insn_t succ) | |
4915 | { | |
4916 | basic_block ptr = BLOCK_FOR_INSN (insn); | |
4917 | ||
9af5ce0c | 4918 | for (;;) |
e1ab7874 | 4919 | { |
4920 | if (ptr == BLOCK_FOR_INSN (succ)) | |
4921 | return true; | |
48e1416a | 4922 | |
e1ab7874 | 4923 | if (bb_ends_ebb_p (ptr)) |
4924 | return false; | |
4925 | ||
4926 | ptr = bb_next_bb (ptr); | |
4927 | } | |
4928 | ||
4929 | gcc_unreachable (); | |
4930 | return false; | |
4931 | } | |
4932 | ||
4933 | /* Recomputes the reverse topological order for the function and | |
4934 | saves it in REV_TOP_ORDER_INDEX. REV_TOP_ORDER_INDEX_LEN is also | |
4935 | modified appropriately. */ | |
4936 | static void | |
4937 | recompute_rev_top_order (void) | |
4938 | { | |
4939 | int *postorder; | |
4940 | int n_blocks, i; | |
4941 | ||
fe672ac0 | 4942 | if (!rev_top_order_index |
4943 | || rev_top_order_index_len < last_basic_block_for_fn (cfun)) | |
e1ab7874 | 4944 | { |
fe672ac0 | 4945 | rev_top_order_index_len = last_basic_block_for_fn (cfun); |
e1ab7874 | 4946 | rev_top_order_index = XRESIZEVEC (int, rev_top_order_index, |
4947 | rev_top_order_index_len); | |
4948 | } | |
4949 | ||
a28770e1 | 4950 | postorder = XNEWVEC (int, n_basic_blocks_for_fn (cfun)); |
e1ab7874 | 4951 | |
4952 | n_blocks = post_order_compute (postorder, true, false); | |
a28770e1 | 4953 | gcc_assert (n_basic_blocks_for_fn (cfun) == n_blocks); |
e1ab7874 | 4954 | |
4955 | /* Build reverse function: for each basic block with BB->INDEX == K | |
4956 | rev_top_order_index[K] is it's reverse topological sort number. */ | |
4957 | for (i = 0; i < n_blocks; i++) | |
4958 | { | |
4959 | gcc_assert (postorder[i] < rev_top_order_index_len); | |
4960 | rev_top_order_index[postorder[i]] = i; | |
4961 | } | |
4962 | ||
4963 | free (postorder); | |
4964 | } | |
4965 | ||
4966 | /* Clear all flags from insns in BB that could spoil its rescheduling. */ | |
4967 | void | |
4968 | clear_outdated_rtx_info (basic_block bb) | |
4969 | { | |
91a55c11 | 4970 | rtx_insn *insn; |
e1ab7874 | 4971 | |
4972 | FOR_BB_INSNS (bb, insn) | |
4973 | if (INSN_P (insn)) | |
4974 | { | |
4975 | SCHED_GROUP_P (insn) = 0; | |
4976 | INSN_AFTER_STALL_P (insn) = 0; | |
4977 | INSN_SCHED_TIMES (insn) = 0; | |
4978 | EXPR_PRIORITY_ADJ (INSN_EXPR (insn)) = 0; | |
4979 | ||
4980 | /* We cannot use the changed caches, as previously we could ignore | |
48e1416a | 4981 | the LHS dependence due to enabled renaming and transform |
e1ab7874 | 4982 | the expression, and currently we'll be unable to do this. */ |
4983 | htab_empty (INSN_TRANSFORMED_INSNS (insn)); | |
4984 | } | |
4985 | } | |
4986 | ||
4987 | /* Add BB_NOTE to the pool of available basic block notes. */ | |
4988 | static void | |
4989 | return_bb_to_pool (basic_block bb) | |
4990 | { | |
9ed997be | 4991 | rtx_note *note = bb_note (bb); |
e1ab7874 | 4992 | |
4993 | gcc_assert (NOTE_BASIC_BLOCK (note) == bb | |
4994 | && bb->aux == NULL); | |
4995 | ||
4996 | /* It turns out that current cfg infrastructure does not support | |
4997 | reuse of basic blocks. Don't bother for now. */ | |
f1f41a6c | 4998 | /*bb_note_pool.safe_push (note);*/ |
e1ab7874 | 4999 | } |
5000 | ||
5001 | /* Get a bb_note from pool or return NULL_RTX if pool is empty. */ | |
cef3d8ad | 5002 | static rtx_note * |
e1ab7874 | 5003 | get_bb_note_from_pool (void) |
5004 | { | |
f1f41a6c | 5005 | if (bb_note_pool.is_empty ()) |
cef3d8ad | 5006 | return NULL; |
e1ab7874 | 5007 | else |
5008 | { | |
cef3d8ad | 5009 | rtx_note *note = bb_note_pool.pop (); |
e1ab7874 | 5010 | |
4a57a2e8 | 5011 | SET_PREV_INSN (note) = NULL_RTX; |
5012 | SET_NEXT_INSN (note) = NULL_RTX; | |
e1ab7874 | 5013 | |
5014 | return note; | |
5015 | } | |
5016 | } | |
5017 | ||
5018 | /* Free bb_note_pool. */ | |
5019 | void | |
5020 | free_bb_note_pool (void) | |
5021 | { | |
f1f41a6c | 5022 | bb_note_pool.release (); |
e1ab7874 | 5023 | } |
5024 | ||
5025 | /* Setup scheduler pool and successor structure. */ | |
5026 | void | |
5027 | alloc_sched_pools (void) | |
5028 | { | |
5029 | int succs_size; | |
5030 | ||
5031 | succs_size = MAX_WS + 1; | |
48e1416a | 5032 | succs_info_pool.stack = XCNEWVEC (struct succs_info, succs_size); |
e1ab7874 | 5033 | succs_info_pool.size = succs_size; |
5034 | succs_info_pool.top = -1; | |
5035 | succs_info_pool.max_top = -1; | |
e1ab7874 | 5036 | } |
5037 | ||
5038 | /* Free the pools. */ | |
5039 | void | |
5040 | free_sched_pools (void) | |
5041 | { | |
5042 | int i; | |
48e1416a | 5043 | |
e26b6f42 | 5044 | sched_lists_pool.release (); |
e1ab7874 | 5045 | gcc_assert (succs_info_pool.top == -1); |
862c1934 | 5046 | for (i = 0; i <= succs_info_pool.max_top; i++) |
e1ab7874 | 5047 | { |
f1f41a6c | 5048 | succs_info_pool.stack[i].succs_ok.release (); |
5049 | succs_info_pool.stack[i].succs_other.release (); | |
5050 | succs_info_pool.stack[i].probs_ok.release (); | |
e1ab7874 | 5051 | } |
5052 | free (succs_info_pool.stack); | |
5053 | } | |
5054 | \f | |
5055 | ||
48e1416a | 5056 | /* Returns a position in RGN where BB can be inserted retaining |
e1ab7874 | 5057 | topological order. */ |
5058 | static int | |
5059 | find_place_to_insert_bb (basic_block bb, int rgn) | |
5060 | { | |
5061 | bool has_preds_outside_rgn = false; | |
5062 | edge e; | |
5063 | edge_iterator ei; | |
48e1416a | 5064 | |
e1ab7874 | 5065 | /* Find whether we have preds outside the region. */ |
5066 | FOR_EACH_EDGE (e, ei, bb->preds) | |
5067 | if (!in_current_region_p (e->src)) | |
5068 | { | |
5069 | has_preds_outside_rgn = true; | |
5070 | break; | |
5071 | } | |
48e1416a | 5072 | |
e1ab7874 | 5073 | /* Recompute the top order -- needed when we have > 1 pred |
5074 | and in case we don't have preds outside. */ | |
5075 | if (flag_sel_sched_pipelining_outer_loops | |
5076 | && (has_preds_outside_rgn || EDGE_COUNT (bb->preds) > 1)) | |
5077 | { | |
5078 | int i, bbi = bb->index, cur_bbi; | |
5079 | ||
5080 | recompute_rev_top_order (); | |
5081 | for (i = RGN_NR_BLOCKS (rgn) - 1; i >= 0; i--) | |
5082 | { | |
5083 | cur_bbi = BB_TO_BLOCK (i); | |
48e1416a | 5084 | if (rev_top_order_index[bbi] |
e1ab7874 | 5085 | < rev_top_order_index[cur_bbi]) |
5086 | break; | |
5087 | } | |
48e1416a | 5088 | |
9d75589a | 5089 | /* We skipped the right block, so we increase i. We accommodate |
e1ab7874 | 5090 | it for increasing by step later, so we decrease i. */ |
5091 | return (i + 1) - 1; | |
5092 | } | |
5093 | else if (has_preds_outside_rgn) | |
5094 | { | |
5095 | /* This is the case when we generate an extra empty block | |
5096 | to serve as region head during pipelining. */ | |
5097 | e = EDGE_SUCC (bb, 0); | |
5098 | gcc_assert (EDGE_COUNT (bb->succs) == 1 | |
5099 | && in_current_region_p (EDGE_SUCC (bb, 0)->dest) | |
5100 | && (BLOCK_TO_BB (e->dest->index) == 0)); | |
5101 | return -1; | |
5102 | } | |
5103 | ||
5104 | /* We don't have preds outside the region. We should have | |
5105 | the only pred, because the multiple preds case comes from | |
5106 | the pipelining of outer loops, and that is handled above. | |
5107 | Just take the bbi of this single pred. */ | |
5108 | if (EDGE_COUNT (bb->succs) > 0) | |
5109 | { | |
5110 | int pred_bbi; | |
48e1416a | 5111 | |
e1ab7874 | 5112 | gcc_assert (EDGE_COUNT (bb->preds) == 1); |
48e1416a | 5113 | |
e1ab7874 | 5114 | pred_bbi = EDGE_PRED (bb, 0)->src->index; |
5115 | return BLOCK_TO_BB (pred_bbi); | |
5116 | } | |
5117 | else | |
5118 | /* BB has no successors. It is safe to put it in the end. */ | |
5119 | return current_nr_blocks - 1; | |
5120 | } | |
5121 | ||
5122 | /* Deletes an empty basic block freeing its data. */ | |
5123 | static void | |
5124 | delete_and_free_basic_block (basic_block bb) | |
5125 | { | |
5126 | gcc_assert (sel_bb_empty_p (bb)); | |
5127 | ||
5128 | if (BB_LV_SET (bb)) | |
5129 | free_lv_set (bb); | |
5130 | ||
5131 | bitmap_clear_bit (blocks_to_reschedule, bb->index); | |
5132 | ||
48e1416a | 5133 | /* Can't assert av_set properties because we use sel_aremove_bb |
5134 | when removing loop preheader from the region. At the point of | |
e1ab7874 | 5135 | removing the preheader we already have deallocated sel_region_bb_info. */ |
5136 | gcc_assert (BB_LV_SET (bb) == NULL | |
5137 | && !BB_LV_SET_VALID_P (bb) | |
5138 | && BB_AV_LEVEL (bb) == 0 | |
5139 | && BB_AV_SET (bb) == NULL); | |
48e1416a | 5140 | |
e1ab7874 | 5141 | delete_basic_block (bb); |
5142 | } | |
5143 | ||
5144 | /* Add BB to the current region and update the region data. */ | |
5145 | static void | |
5146 | add_block_to_current_region (basic_block bb) | |
5147 | { | |
5148 | int i, pos, bbi = -2, rgn; | |
5149 | ||
5150 | rgn = CONTAINING_RGN (BB_TO_BLOCK (0)); | |
5151 | bbi = find_place_to_insert_bb (bb, rgn); | |
5152 | bbi += 1; | |
5153 | pos = RGN_BLOCKS (rgn) + bbi; | |
5154 | ||
5155 | gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0 | |
5156 | && ebb_head[bbi] == pos); | |
48e1416a | 5157 | |
e1ab7874 | 5158 | /* Make a place for the new block. */ |
5159 | extend_regions (); | |
5160 | ||
5161 | for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--) | |
5162 | BLOCK_TO_BB (rgn_bb_table[i])++; | |
48e1416a | 5163 | |
e1ab7874 | 5164 | memmove (rgn_bb_table + pos + 1, |
5165 | rgn_bb_table + pos, | |
5166 | (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table)); | |
5167 | ||
5168 | /* Initialize data for BB. */ | |
5169 | rgn_bb_table[pos] = bb->index; | |
5170 | BLOCK_TO_BB (bb->index) = bbi; | |
5171 | CONTAINING_RGN (bb->index) = rgn; | |
5172 | ||
5173 | RGN_NR_BLOCKS (rgn)++; | |
48e1416a | 5174 | |
e1ab7874 | 5175 | for (i = rgn + 1; i <= nr_regions; i++) |
5176 | RGN_BLOCKS (i)++; | |
5177 | } | |
5178 | ||
5179 | /* Remove BB from the current region and update the region data. */ | |
5180 | static void | |
5181 | remove_bb_from_region (basic_block bb) | |
5182 | { | |
5183 | int i, pos, bbi = -2, rgn; | |
5184 | ||
5185 | rgn = CONTAINING_RGN (BB_TO_BLOCK (0)); | |
5186 | bbi = BLOCK_TO_BB (bb->index); | |
5187 | pos = RGN_BLOCKS (rgn) + bbi; | |
5188 | ||
5189 | gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0 | |
5190 | && ebb_head[bbi] == pos); | |
5191 | ||
5192 | for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--) | |
5193 | BLOCK_TO_BB (rgn_bb_table[i])--; | |
5194 | ||
5195 | memmove (rgn_bb_table + pos, | |
5196 | rgn_bb_table + pos + 1, | |
5197 | (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table)); | |
5198 | ||
5199 | RGN_NR_BLOCKS (rgn)--; | |
5200 | for (i = rgn + 1; i <= nr_regions; i++) | |
5201 | RGN_BLOCKS (i)--; | |
5202 | } | |
5203 | ||
48e1416a | 5204 | /* Add BB to the current region and update all data. If BB is NULL, add all |
e1ab7874 | 5205 | blocks from last_added_blocks vector. */ |
5206 | static void | |
5207 | sel_add_bb (basic_block bb) | |
5208 | { | |
5209 | /* Extend luids so that new notes will receive zero luids. */ | |
52d7e28c | 5210 | sched_extend_luids (); |
e1ab7874 | 5211 | sched_init_bbs (); |
52d7e28c | 5212 | sel_init_bbs (last_added_blocks); |
e1ab7874 | 5213 | |
48e1416a | 5214 | /* When bb is passed explicitly, the vector should contain |
e1ab7874 | 5215 | the only element that equals to bb; otherwise, the vector |
5216 | should not be NULL. */ | |
f1f41a6c | 5217 | gcc_assert (last_added_blocks.exists ()); |
48e1416a | 5218 | |
e1ab7874 | 5219 | if (bb != NULL) |
5220 | { | |
f1f41a6c | 5221 | gcc_assert (last_added_blocks.length () == 1 |
5222 | && last_added_blocks[0] == bb); | |
e1ab7874 | 5223 | add_block_to_current_region (bb); |
5224 | ||
5225 | /* We associate creating/deleting data sets with the first insn | |
5226 | appearing / disappearing in the bb. */ | |
5227 | if (!sel_bb_empty_p (bb) && BB_LV_SET (bb) == NULL) | |
5228 | create_initial_data_sets (bb); | |
48e1416a | 5229 | |
f1f41a6c | 5230 | last_added_blocks.release (); |
e1ab7874 | 5231 | } |
5232 | else | |
5233 | /* BB is NULL - process LAST_ADDED_BLOCKS instead. */ | |
5234 | { | |
5235 | int i; | |
5236 | basic_block temp_bb = NULL; | |
5237 | ||
48e1416a | 5238 | for (i = 0; |
f1f41a6c | 5239 | last_added_blocks.iterate (i, &bb); i++) |
e1ab7874 | 5240 | { |
5241 | add_block_to_current_region (bb); | |
5242 | temp_bb = bb; | |
5243 | } | |
5244 | ||
48e1416a | 5245 | /* We need to fetch at least one bb so we know the region |
e1ab7874 | 5246 | to update. */ |
5247 | gcc_assert (temp_bb != NULL); | |
5248 | bb = temp_bb; | |
5249 | ||
f1f41a6c | 5250 | last_added_blocks.release (); |
e1ab7874 | 5251 | } |
5252 | ||
5253 | rgn_setup_region (CONTAINING_RGN (bb->index)); | |
5254 | } | |
5255 | ||
48e1416a | 5256 | /* Remove BB from the current region and update all data. |
e1ab7874 | 5257 | If REMOVE_FROM_CFG_PBB is true, also remove the block cfom cfg. */ |
5258 | static void | |
5259 | sel_remove_bb (basic_block bb, bool remove_from_cfg_p) | |
5260 | { | |
0424f393 | 5261 | unsigned idx = bb->index; |
5262 | ||
e1ab7874 | 5263 | gcc_assert (bb != NULL && BB_NOTE_LIST (bb) == NULL_RTX); |
48e1416a | 5264 | |
e1ab7874 | 5265 | remove_bb_from_region (bb); |
5266 | return_bb_to_pool (bb); | |
0424f393 | 5267 | bitmap_clear_bit (blocks_to_reschedule, idx); |
48e1416a | 5268 | |
e1ab7874 | 5269 | if (remove_from_cfg_p) |
1a5dbaab | 5270 | { |
5271 | basic_block succ = single_succ (bb); | |
5272 | delete_and_free_basic_block (bb); | |
5273 | set_immediate_dominator (CDI_DOMINATORS, succ, | |
5274 | recompute_dominator (CDI_DOMINATORS, succ)); | |
5275 | } | |
e1ab7874 | 5276 | |
0424f393 | 5277 | rgn_setup_region (CONTAINING_RGN (idx)); |
e1ab7874 | 5278 | } |
5279 | ||
5280 | /* Concatenate info of EMPTY_BB to info of MERGE_BB. */ | |
5281 | static void | |
5282 | move_bb_info (basic_block merge_bb, basic_block empty_bb) | |
5283 | { | |
ef4cf572 | 5284 | if (in_current_region_p (merge_bb)) |
5285 | concat_note_lists (BB_NOTE_LIST (empty_bb), | |
e97a173d | 5286 | &BB_NOTE_LIST (merge_bb)); |
5287 | BB_NOTE_LIST (empty_bb) = NULL; | |
e1ab7874 | 5288 | |
5289 | } | |
5290 | ||
e1ab7874 | 5291 | /* Remove EMPTY_BB. If REMOVE_FROM_CFG_P is false, remove EMPTY_BB from |
5292 | region, but keep it in CFG. */ | |
5293 | static void | |
5294 | remove_empty_bb (basic_block empty_bb, bool remove_from_cfg_p) | |
5295 | { | |
5296 | /* The block should contain just a note or a label. | |
5297 | We try to check whether it is unused below. */ | |
5298 | gcc_assert (BB_HEAD (empty_bb) == BB_END (empty_bb) | |
5299 | || LABEL_P (BB_HEAD (empty_bb))); | |
5300 | ||
5301 | /* If basic block has predecessors or successors, redirect them. */ | |
5302 | if (remove_from_cfg_p | |
5303 | && (EDGE_COUNT (empty_bb->preds) > 0 | |
5304 | || EDGE_COUNT (empty_bb->succs) > 0)) | |
5305 | { | |
5306 | basic_block pred; | |
5307 | basic_block succ; | |
5308 | ||
5309 | /* We need to init PRED and SUCC before redirecting edges. */ | |
5310 | if (EDGE_COUNT (empty_bb->preds) > 0) | |
5311 | { | |
5312 | edge e; | |
5313 | ||
5314 | gcc_assert (EDGE_COUNT (empty_bb->preds) == 1); | |
5315 | ||
5316 | e = EDGE_PRED (empty_bb, 0); | |
5317 | gcc_assert (e->src == empty_bb->prev_bb | |
5318 | && (e->flags & EDGE_FALLTHRU)); | |
5319 | ||
5320 | pred = empty_bb->prev_bb; | |
5321 | } | |
5322 | else | |
5323 | pred = NULL; | |
5324 | ||
5325 | if (EDGE_COUNT (empty_bb->succs) > 0) | |
5326 | { | |
5327 | /* We do not check fallthruness here as above, because | |
5328 | after removing a jump the edge may actually be not fallthru. */ | |
5329 | gcc_assert (EDGE_COUNT (empty_bb->succs) == 1); | |
5330 | succ = EDGE_SUCC (empty_bb, 0)->dest; | |
5331 | } | |
5332 | else | |
5333 | succ = NULL; | |
5334 | ||
5335 | if (EDGE_COUNT (empty_bb->preds) > 0 && succ != NULL) | |
5336 | { | |
5337 | edge e = EDGE_PRED (empty_bb, 0); | |
5338 | ||
5339 | if (e->flags & EDGE_FALLTHRU) | |
5340 | redirect_edge_succ_nodup (e, succ); | |
5341 | else | |
5342 | sel_redirect_edge_and_branch (EDGE_PRED (empty_bb, 0), succ); | |
5343 | } | |
5344 | ||
5345 | if (EDGE_COUNT (empty_bb->succs) > 0 && pred != NULL) | |
5346 | { | |
5347 | edge e = EDGE_SUCC (empty_bb, 0); | |
5348 | ||
5349 | if (find_edge (pred, e->dest) == NULL) | |
5350 | redirect_edge_pred (e, pred); | |
5351 | } | |
5352 | } | |
5353 | ||
5354 | /* Finish removing. */ | |
5355 | sel_remove_bb (empty_bb, remove_from_cfg_p); | |
5356 | } | |
5357 | ||
48e1416a | 5358 | /* An implementation of create_basic_block hook, which additionally updates |
e1ab7874 | 5359 | per-bb data structures. */ |
5360 | static basic_block | |
5361 | sel_create_basic_block (void *headp, void *endp, basic_block after) | |
5362 | { | |
5363 | basic_block new_bb; | |
cef3d8ad | 5364 | rtx_note *new_bb_note; |
48e1416a | 5365 | |
5366 | gcc_assert (flag_sel_sched_pipelining_outer_loops | |
f1f41a6c | 5367 | || !last_added_blocks.exists ()); |
e1ab7874 | 5368 | |
5369 | new_bb_note = get_bb_note_from_pool (); | |
5370 | ||
5371 | if (new_bb_note == NULL_RTX) | |
5372 | new_bb = orig_cfg_hooks.create_basic_block (headp, endp, after); | |
5373 | else | |
5374 | { | |
3c3f97b4 | 5375 | new_bb = create_basic_block_structure ((rtx_insn *) headp, |
5376 | (rtx_insn *) endp, | |
e1ab7874 | 5377 | new_bb_note, after); |
5378 | new_bb->aux = NULL; | |
5379 | } | |
5380 | ||
f1f41a6c | 5381 | last_added_blocks.safe_push (new_bb); |
e1ab7874 | 5382 | |
5383 | return new_bb; | |
5384 | } | |
5385 | ||
5386 | /* Implement sched_init_only_bb (). */ | |
5387 | static void | |
5388 | sel_init_only_bb (basic_block bb, basic_block after) | |
5389 | { | |
5390 | gcc_assert (after == NULL); | |
5391 | ||
5392 | extend_regions (); | |
5393 | rgn_make_new_region_out_of_new_block (bb); | |
5394 | } | |
5395 | ||
5396 | /* Update the latch when we've splitted or merged it from FROM block to TO. | |
5397 | This should be checked for all outer loops, too. */ | |
5398 | static void | |
5399 | change_loops_latches (basic_block from, basic_block to) | |
5400 | { | |
5401 | gcc_assert (from != to); | |
5402 | ||
5403 | if (current_loop_nest) | |
5404 | { | |
5405 | struct loop *loop; | |
5406 | ||
5407 | for (loop = current_loop_nest; loop; loop = loop_outer (loop)) | |
5408 | if (considered_for_pipelining_p (loop) && loop->latch == from) | |
5409 | { | |
5410 | gcc_assert (loop == current_loop_nest); | |
5411 | loop->latch = to; | |
5412 | gcc_assert (loop_latch_edge (loop)); | |
5413 | } | |
5414 | } | |
5415 | } | |
5416 | ||
48e1416a | 5417 | /* Splits BB on two basic blocks, adding it to the region and extending |
e1ab7874 | 5418 | per-bb data structures. Returns the newly created bb. */ |
5419 | static basic_block | |
5420 | sel_split_block (basic_block bb, rtx after) | |
5421 | { | |
5422 | basic_block new_bb; | |
5423 | insn_t insn; | |
5424 | ||
5425 | new_bb = sched_split_block_1 (bb, after); | |
5426 | sel_add_bb (new_bb); | |
5427 | ||
5428 | /* This should be called after sel_add_bb, because this uses | |
48e1416a | 5429 | CONTAINING_RGN for the new block, which is not yet initialized. |
e1ab7874 | 5430 | FIXME: this function may be a no-op now. */ |
5431 | change_loops_latches (bb, new_bb); | |
5432 | ||
5433 | /* Update ORIG_BB_INDEX for insns moved into the new block. */ | |
5434 | FOR_BB_INSNS (new_bb, insn) | |
5435 | if (INSN_P (insn)) | |
5436 | EXPR_ORIG_BB_INDEX (INSN_EXPR (insn)) = new_bb->index; | |
5437 | ||
5438 | if (sel_bb_empty_p (bb)) | |
5439 | { | |
5440 | gcc_assert (!sel_bb_empty_p (new_bb)); | |
5441 | ||
5442 | /* NEW_BB has data sets that need to be updated and BB holds | |
5443 | data sets that should be removed. Exchange these data sets | |
5444 | so that we won't lose BB's valid data sets. */ | |
5445 | exchange_data_sets (new_bb, bb); | |
5446 | free_data_sets (bb); | |
5447 | } | |
5448 | ||
5449 | if (!sel_bb_empty_p (new_bb) | |
5450 | && bitmap_bit_p (blocks_to_reschedule, bb->index)) | |
5451 | bitmap_set_bit (blocks_to_reschedule, new_bb->index); | |
5452 | ||
5453 | return new_bb; | |
5454 | } | |
5455 | ||
5456 | /* If BB ends with a jump insn whose ID is bigger then PREV_MAX_UID, return it. | |
5457 | Otherwise returns NULL. */ | |
04d073df | 5458 | static rtx_insn * |
e1ab7874 | 5459 | check_for_new_jump (basic_block bb, int prev_max_uid) |
5460 | { | |
04d073df | 5461 | rtx_insn *end; |
e1ab7874 | 5462 | |
5463 | end = sel_bb_end (bb); | |
5464 | if (end && INSN_UID (end) >= prev_max_uid) | |
5465 | return end; | |
5466 | return NULL; | |
5467 | } | |
5468 | ||
48e1416a | 5469 | /* Look for a new jump either in FROM_BB block or in newly created JUMP_BB block. |
e1ab7874 | 5470 | New means having UID at least equal to PREV_MAX_UID. */ |
04d073df | 5471 | static rtx_insn * |
e1ab7874 | 5472 | find_new_jump (basic_block from, basic_block jump_bb, int prev_max_uid) |
5473 | { | |
04d073df | 5474 | rtx_insn *jump; |
e1ab7874 | 5475 | |
5476 | /* Return immediately if no new insns were emitted. */ | |
5477 | if (get_max_uid () == prev_max_uid) | |
5478 | return NULL; | |
48e1416a | 5479 | |
e1ab7874 | 5480 | /* Now check both blocks for new jumps. It will ever be only one. */ |
5481 | if ((jump = check_for_new_jump (from, prev_max_uid))) | |
5482 | return jump; | |
5483 | ||
5484 | if (jump_bb != NULL | |
5485 | && (jump = check_for_new_jump (jump_bb, prev_max_uid))) | |
5486 | return jump; | |
5487 | return NULL; | |
5488 | } | |
5489 | ||
5490 | /* Splits E and adds the newly created basic block to the current region. | |
5491 | Returns this basic block. */ | |
5492 | basic_block | |
5493 | sel_split_edge (edge e) | |
5494 | { | |
5495 | basic_block new_bb, src, other_bb = NULL; | |
5496 | int prev_max_uid; | |
04d073df | 5497 | rtx_insn *jump; |
e1ab7874 | 5498 | |
5499 | src = e->src; | |
5500 | prev_max_uid = get_max_uid (); | |
5501 | new_bb = split_edge (e); | |
5502 | ||
48e1416a | 5503 | if (flag_sel_sched_pipelining_outer_loops |
e1ab7874 | 5504 | && current_loop_nest) |
5505 | { | |
5506 | int i; | |
5507 | basic_block bb; | |
5508 | ||
48e1416a | 5509 | /* Some of the basic blocks might not have been added to the loop. |
e1ab7874 | 5510 | Add them here, until this is fixed in force_fallthru. */ |
48e1416a | 5511 | for (i = 0; |
f1f41a6c | 5512 | last_added_blocks.iterate (i, &bb); i++) |
e1ab7874 | 5513 | if (!bb->loop_father) |
5514 | { | |
5515 | add_bb_to_loop (bb, e->dest->loop_father); | |
5516 | ||
5517 | gcc_assert (!other_bb && (new_bb->index != bb->index)); | |
5518 | other_bb = bb; | |
5519 | } | |
5520 | } | |
5521 | ||
5522 | /* Add all last_added_blocks to the region. */ | |
5523 | sel_add_bb (NULL); | |
5524 | ||
5525 | jump = find_new_jump (src, new_bb, prev_max_uid); | |
5526 | if (jump) | |
5527 | sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP); | |
5528 | ||
5529 | /* Put the correct lv set on this block. */ | |
5530 | if (other_bb && !sel_bb_empty_p (other_bb)) | |
5531 | compute_live (sel_bb_head (other_bb)); | |
5532 | ||
5533 | return new_bb; | |
5534 | } | |
5535 | ||
5536 | /* Implement sched_create_empty_bb (). */ | |
5537 | static basic_block | |
5538 | sel_create_empty_bb (basic_block after) | |
5539 | { | |
5540 | basic_block new_bb; | |
5541 | ||
5542 | new_bb = sched_create_empty_bb_1 (after); | |
5543 | ||
5544 | /* We'll explicitly initialize NEW_BB via sel_init_only_bb () a bit | |
5545 | later. */ | |
f1f41a6c | 5546 | gcc_assert (last_added_blocks.length () == 1 |
5547 | && last_added_blocks[0] == new_bb); | |
e1ab7874 | 5548 | |
f1f41a6c | 5549 | last_added_blocks.release (); |
e1ab7874 | 5550 | return new_bb; |
5551 | } | |
5552 | ||
5553 | /* Implement sched_create_recovery_block. ORIG_INSN is where block | |
5554 | will be splitted to insert a check. */ | |
5555 | basic_block | |
5556 | sel_create_recovery_block (insn_t orig_insn) | |
5557 | { | |
5558 | basic_block first_bb, second_bb, recovery_block; | |
5559 | basic_block before_recovery = NULL; | |
04d073df | 5560 | rtx_insn *jump; |
e1ab7874 | 5561 | |
5562 | first_bb = BLOCK_FOR_INSN (orig_insn); | |
5563 | if (sel_bb_end_p (orig_insn)) | |
5564 | { | |
5565 | /* Avoid introducing an empty block while splitting. */ | |
5566 | gcc_assert (single_succ_p (first_bb)); | |
5567 | second_bb = single_succ (first_bb); | |
5568 | } | |
5569 | else | |
5570 | second_bb = sched_split_block (first_bb, orig_insn); | |
5571 | ||
5572 | recovery_block = sched_create_recovery_block (&before_recovery); | |
5573 | if (before_recovery) | |
34154e27 | 5574 | copy_lv_set_from (before_recovery, EXIT_BLOCK_PTR_FOR_FN (cfun)); |
e1ab7874 | 5575 | |
5576 | gcc_assert (sel_bb_empty_p (recovery_block)); | |
5577 | sched_create_recovery_edges (first_bb, recovery_block, second_bb); | |
5578 | if (current_loops != NULL) | |
5579 | add_bb_to_loop (recovery_block, first_bb->loop_father); | |
48e1416a | 5580 | |
e1ab7874 | 5581 | sel_add_bb (recovery_block); |
48e1416a | 5582 | |
e1ab7874 | 5583 | jump = BB_END (recovery_block); |
5584 | gcc_assert (sel_bb_head (recovery_block) == jump); | |
5585 | sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP); | |
5586 | ||
5587 | return recovery_block; | |
5588 | } | |
5589 | ||
5590 | /* Merge basic block B into basic block A. */ | |
0424f393 | 5591 | static void |
e1ab7874 | 5592 | sel_merge_blocks (basic_block a, basic_block b) |
5593 | { | |
0424f393 | 5594 | gcc_assert (sel_bb_empty_p (b) |
5595 | && EDGE_COUNT (b->preds) == 1 | |
5596 | && EDGE_PRED (b, 0)->src == b->prev_bb); | |
e1ab7874 | 5597 | |
0424f393 | 5598 | move_bb_info (b->prev_bb, b); |
5599 | remove_empty_bb (b, false); | |
5600 | merge_blocks (a, b); | |
e1ab7874 | 5601 | change_loops_latches (b, a); |
5602 | } | |
5603 | ||
5604 | /* A wrapper for redirect_edge_and_branch_force, which also initializes | |
8d1881f5 | 5605 | data structures for possibly created bb and insns. */ |
e1ab7874 | 5606 | void |
5607 | sel_redirect_edge_and_branch_force (edge e, basic_block to) | |
5608 | { | |
1a5dbaab | 5609 | basic_block jump_bb, src, orig_dest = e->dest; |
e1ab7874 | 5610 | int prev_max_uid; |
04d073df | 5611 | rtx_insn *jump; |
8d1881f5 | 5612 | int old_seqno = -1; |
48e1416a | 5613 | |
1a5dbaab | 5614 | /* This function is now used only for bookkeeping code creation, where |
5615 | we'll never get the single pred of orig_dest block and thus will not | |
5616 | hit unreachable blocks when updating dominator info. */ | |
5617 | gcc_assert (!sel_bb_empty_p (e->src) | |
5618 | && !single_pred_p (orig_dest)); | |
e1ab7874 | 5619 | src = e->src; |
5620 | prev_max_uid = get_max_uid (); | |
8d1881f5 | 5621 | /* Compute and pass old_seqno down to sel_init_new_insn only for the case |
5622 | when the conditional jump being redirected may become unconditional. */ | |
5623 | if (any_condjump_p (BB_END (src)) | |
5624 | && INSN_SEQNO (BB_END (src)) >= 0) | |
5625 | old_seqno = INSN_SEQNO (BB_END (src)); | |
e1ab7874 | 5626 | |
8d1881f5 | 5627 | jump_bb = redirect_edge_and_branch_force (e, to); |
e1ab7874 | 5628 | if (jump_bb != NULL) |
5629 | sel_add_bb (jump_bb); | |
5630 | ||
5631 | /* This function could not be used to spoil the loop structure by now, | |
5632 | thus we don't care to update anything. But check it to be sure. */ | |
5633 | if (current_loop_nest | |
5634 | && pipelining_p) | |
5635 | gcc_assert (loop_latch_edge (current_loop_nest)); | |
48e1416a | 5636 | |
e1ab7874 | 5637 | jump = find_new_jump (src, jump_bb, prev_max_uid); |
5638 | if (jump) | |
8d1881f5 | 5639 | sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP, |
5640 | old_seqno); | |
1a5dbaab | 5641 | set_immediate_dominator (CDI_DOMINATORS, to, |
5642 | recompute_dominator (CDI_DOMINATORS, to)); | |
5643 | set_immediate_dominator (CDI_DOMINATORS, orig_dest, | |
5644 | recompute_dominator (CDI_DOMINATORS, orig_dest)); | |
e1ab7874 | 5645 | } |
5646 | ||
93919afc | 5647 | /* A wrapper for redirect_edge_and_branch. Return TRUE if blocks connected by |
5648 | redirected edge are in reverse topological order. */ | |
5649 | bool | |
e1ab7874 | 5650 | sel_redirect_edge_and_branch (edge e, basic_block to) |
5651 | { | |
5652 | bool latch_edge_p; | |
1a5dbaab | 5653 | basic_block src, orig_dest = e->dest; |
e1ab7874 | 5654 | int prev_max_uid; |
04d073df | 5655 | rtx_insn *jump; |
df6266b9 | 5656 | edge redirected; |
93919afc | 5657 | bool recompute_toporder_p = false; |
1a5dbaab | 5658 | bool maybe_unreachable = single_pred_p (orig_dest); |
8d1881f5 | 5659 | int old_seqno = -1; |
e1ab7874 | 5660 | |
5661 | latch_edge_p = (pipelining_p | |
5662 | && current_loop_nest | |
5663 | && e == loop_latch_edge (current_loop_nest)); | |
5664 | ||
5665 | src = e->src; | |
5666 | prev_max_uid = get_max_uid (); | |
df6266b9 | 5667 | |
8d1881f5 | 5668 | /* Compute and pass old_seqno down to sel_init_new_insn only for the case |
5669 | when the conditional jump being redirected may become unconditional. */ | |
5670 | if (any_condjump_p (BB_END (src)) | |
5671 | && INSN_SEQNO (BB_END (src)) >= 0) | |
5672 | old_seqno = INSN_SEQNO (BB_END (src)); | |
5673 | ||
df6266b9 | 5674 | redirected = redirect_edge_and_branch (e, to); |
5675 | ||
f1f41a6c | 5676 | gcc_assert (redirected && !last_added_blocks.exists ()); |
e1ab7874 | 5677 | |
5678 | /* When we've redirected a latch edge, update the header. */ | |
5679 | if (latch_edge_p) | |
5680 | { | |
5681 | current_loop_nest->header = to; | |
5682 | gcc_assert (loop_latch_edge (current_loop_nest)); | |
5683 | } | |
5684 | ||
93919afc | 5685 | /* In rare situations, the topological relation between the blocks connected |
5686 | by the redirected edge can change (see PR42245 for an example). Update | |
5687 | block_to_bb/bb_to_block. */ | |
5688 | if (CONTAINING_RGN (e->src->index) == CONTAINING_RGN (to->index) | |
5689 | && BLOCK_TO_BB (e->src->index) > BLOCK_TO_BB (to->index)) | |
5690 | recompute_toporder_p = true; | |
5691 | ||
e1ab7874 | 5692 | jump = find_new_jump (src, NULL, prev_max_uid); |
5693 | if (jump) | |
8d1881f5 | 5694 | sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP, old_seqno); |
93919afc | 5695 | |
1a5dbaab | 5696 | /* Only update dominator info when we don't have unreachable blocks. |
5697 | Otherwise we'll update in maybe_tidy_empty_bb. */ | |
5698 | if (!maybe_unreachable) | |
5699 | { | |
5700 | set_immediate_dominator (CDI_DOMINATORS, to, | |
5701 | recompute_dominator (CDI_DOMINATORS, to)); | |
5702 | set_immediate_dominator (CDI_DOMINATORS, orig_dest, | |
5703 | recompute_dominator (CDI_DOMINATORS, orig_dest)); | |
5704 | } | |
93919afc | 5705 | return recompute_toporder_p; |
e1ab7874 | 5706 | } |
5707 | ||
5708 | /* This variable holds the cfg hooks used by the selective scheduler. */ | |
5709 | static struct cfg_hooks sel_cfg_hooks; | |
5710 | ||
5711 | /* Register sel-sched cfg hooks. */ | |
5712 | void | |
5713 | sel_register_cfg_hooks (void) | |
5714 | { | |
5715 | sched_split_block = sel_split_block; | |
5716 | ||
5717 | orig_cfg_hooks = get_cfg_hooks (); | |
5718 | sel_cfg_hooks = orig_cfg_hooks; | |
5719 | ||
5720 | sel_cfg_hooks.create_basic_block = sel_create_basic_block; | |
5721 | ||
5722 | set_cfg_hooks (sel_cfg_hooks); | |
5723 | ||
5724 | sched_init_only_bb = sel_init_only_bb; | |
5725 | sched_split_block = sel_split_block; | |
5726 | sched_create_empty_bb = sel_create_empty_bb; | |
5727 | } | |
5728 | ||
5729 | /* Unregister sel-sched cfg hooks. */ | |
5730 | void | |
5731 | sel_unregister_cfg_hooks (void) | |
5732 | { | |
5733 | sched_create_empty_bb = NULL; | |
5734 | sched_split_block = NULL; | |
5735 | sched_init_only_bb = NULL; | |
5736 | ||
5737 | set_cfg_hooks (orig_cfg_hooks); | |
5738 | } | |
5739 | \f | |
5740 | ||
5741 | /* Emit an insn rtx based on PATTERN. If a jump insn is wanted, | |
5742 | LABEL is where this jump should be directed. */ | |
3aaa3eec | 5743 | rtx_insn * |
e1ab7874 | 5744 | create_insn_rtx_from_pattern (rtx pattern, rtx label) |
5745 | { | |
3aaa3eec | 5746 | rtx_insn *insn_rtx; |
e1ab7874 | 5747 | |
5748 | gcc_assert (!INSN_P (pattern)); | |
5749 | ||
5750 | start_sequence (); | |
5751 | ||
5752 | if (label == NULL_RTX) | |
5753 | insn_rtx = emit_insn (pattern); | |
9845d120 | 5754 | else if (DEBUG_INSN_P (label)) |
5755 | insn_rtx = emit_debug_insn (pattern); | |
e1ab7874 | 5756 | else |
5757 | { | |
5758 | insn_rtx = emit_jump_insn (pattern); | |
5759 | JUMP_LABEL (insn_rtx) = label; | |
5760 | ++LABEL_NUSES (label); | |
5761 | } | |
5762 | ||
5763 | end_sequence (); | |
5764 | ||
52d7e28c | 5765 | sched_extend_luids (); |
e1ab7874 | 5766 | sched_extend_target (); |
5767 | sched_deps_init (false); | |
5768 | ||
5769 | /* Initialize INSN_CODE now. */ | |
5770 | recog_memoized (insn_rtx); | |
5771 | return insn_rtx; | |
5772 | } | |
5773 | ||
5774 | /* Create a new vinsn for INSN_RTX. FORCE_UNIQUE_P is true when the vinsn | |
5775 | must not be clonable. */ | |
5776 | vinsn_t | |
2f3c9801 | 5777 | create_vinsn_from_insn_rtx (rtx_insn *insn_rtx, bool force_unique_p) |
e1ab7874 | 5778 | { |
5779 | gcc_assert (INSN_P (insn_rtx) && !INSN_IN_STREAM_P (insn_rtx)); | |
5780 | ||
5781 | /* If VINSN_TYPE is not USE, retain its uniqueness. */ | |
5782 | return vinsn_create (insn_rtx, force_unique_p); | |
5783 | } | |
5784 | ||
5785 | /* Create a copy of INSN_RTX. */ | |
3aaa3eec | 5786 | rtx_insn * |
e1ab7874 | 5787 | create_copy_of_insn_rtx (rtx insn_rtx) |
5788 | { | |
3aaa3eec | 5789 | rtx_insn *res; |
5790 | rtx link; | |
e1ab7874 | 5791 | |
9845d120 | 5792 | if (DEBUG_INSN_P (insn_rtx)) |
5793 | return create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)), | |
5794 | insn_rtx); | |
5795 | ||
e1ab7874 | 5796 | gcc_assert (NONJUMP_INSN_P (insn_rtx)); |
5797 | ||
5798 | res = create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)), | |
5799 | NULL_RTX); | |
114c1eb1 | 5800 | |
bb99ba64 | 5801 | /* Locate the end of existing REG_NOTES in NEW_RTX. */ |
5802 | rtx *ptail = ®_NOTES (res); | |
5803 | while (*ptail != NULL_RTX) | |
5804 | ptail = &XEXP (*ptail, 1); | |
5805 | ||
114c1eb1 | 5806 | /* Copy all REG_NOTES except REG_EQUAL/REG_EQUIV and REG_LABEL_OPERAND |
5807 | since mark_jump_label will make them. REG_LABEL_TARGETs are created | |
5808 | there too, but are supposed to be sticky, so we copy them. */ | |
5809 | for (link = REG_NOTES (insn_rtx); link; link = XEXP (link, 1)) | |
5810 | if (REG_NOTE_KIND (link) != REG_LABEL_OPERAND | |
5811 | && REG_NOTE_KIND (link) != REG_EQUAL | |
5812 | && REG_NOTE_KIND (link) != REG_EQUIV) | |
5813 | { | |
bb99ba64 | 5814 | *ptail = duplicate_reg_note (link); |
5815 | ptail = &XEXP (*ptail, 1); | |
114c1eb1 | 5816 | } |
5817 | ||
e1ab7874 | 5818 | return res; |
5819 | } | |
5820 | ||
5821 | /* Change vinsn field of EXPR to hold NEW_VINSN. */ | |
5822 | void | |
5823 | change_vinsn_in_expr (expr_t expr, vinsn_t new_vinsn) | |
5824 | { | |
5825 | vinsn_detach (EXPR_VINSN (expr)); | |
5826 | ||
5827 | EXPR_VINSN (expr) = new_vinsn; | |
5828 | vinsn_attach (new_vinsn); | |
5829 | } | |
5830 | ||
5831 | /* Helpers for global init. */ | |
5832 | /* This structure is used to be able to call existing bundling mechanism | |
5833 | and calculate insn priorities. */ | |
48e1416a | 5834 | static struct haifa_sched_info sched_sel_haifa_sched_info = |
e1ab7874 | 5835 | { |
5836 | NULL, /* init_ready_list */ | |
5837 | NULL, /* can_schedule_ready_p */ | |
5838 | NULL, /* schedule_more_p */ | |
5839 | NULL, /* new_ready */ | |
5840 | NULL, /* rgn_rank */ | |
5841 | sel_print_insn, /* rgn_print_insn */ | |
5842 | contributes_to_priority, | |
4db82bc9 | 5843 | NULL, /* insn_finishes_block_p */ |
e1ab7874 | 5844 | |
5845 | NULL, NULL, | |
5846 | NULL, NULL, | |
5847 | 0, 0, | |
5848 | ||
5849 | NULL, /* add_remove_insn */ | |
5850 | NULL, /* begin_schedule_ready */ | |
d2412f57 | 5851 | NULL, /* begin_move_insn */ |
e1ab7874 | 5852 | NULL, /* advance_target_bb */ |
e2f4a6ff | 5853 | |
5854 | NULL, | |
5855 | NULL, | |
5856 | ||
e1ab7874 | 5857 | SEL_SCHED | NEW_BBS |
5858 | }; | |
5859 | ||
5860 | /* Setup special insns used in the scheduler. */ | |
48e1416a | 5861 | void |
e1ab7874 | 5862 | setup_nop_and_exit_insns (void) |
5863 | { | |
5864 | gcc_assert (nop_pattern == NULL_RTX | |
5865 | && exit_insn == NULL_RTX); | |
5866 | ||
bc9cb5ed | 5867 | nop_pattern = constm1_rtx; |
e1ab7874 | 5868 | |
5869 | start_sequence (); | |
5870 | emit_insn (nop_pattern); | |
5871 | exit_insn = get_insns (); | |
5872 | end_sequence (); | |
34154e27 | 5873 | set_block_for_insn (exit_insn, EXIT_BLOCK_PTR_FOR_FN (cfun)); |
e1ab7874 | 5874 | } |
5875 | ||
5876 | /* Free special insns used in the scheduler. */ | |
5877 | void | |
5878 | free_nop_and_exit_insns (void) | |
5879 | { | |
179c282d | 5880 | exit_insn = NULL; |
e1ab7874 | 5881 | nop_pattern = NULL_RTX; |
5882 | } | |
5883 | ||
5884 | /* Setup a special vinsn used in new insns initialization. */ | |
5885 | void | |
5886 | setup_nop_vinsn (void) | |
5887 | { | |
5888 | nop_vinsn = vinsn_create (exit_insn, false); | |
5889 | vinsn_attach (nop_vinsn); | |
5890 | } | |
5891 | ||
5892 | /* Free a special vinsn used in new insns initialization. */ | |
5893 | void | |
5894 | free_nop_vinsn (void) | |
5895 | { | |
5896 | gcc_assert (VINSN_COUNT (nop_vinsn) == 1); | |
5897 | vinsn_detach (nop_vinsn); | |
5898 | nop_vinsn = NULL; | |
5899 | } | |
5900 | ||
5901 | /* Call a set_sched_flags hook. */ | |
5902 | void | |
5903 | sel_set_sched_flags (void) | |
5904 | { | |
48e1416a | 5905 | /* ??? This means that set_sched_flags were called, and we decided to |
e1ab7874 | 5906 | support speculation. However, set_sched_flags also modifies flags |
48e1416a | 5907 | on current_sched_info, doing this only at global init. And we |
e1ab7874 | 5908 | sometimes change c_s_i later. So put the correct flags again. */ |
5909 | if (spec_info && targetm.sched.set_sched_flags) | |
5910 | targetm.sched.set_sched_flags (spec_info); | |
5911 | } | |
5912 | ||
5913 | /* Setup pointers to global sched info structures. */ | |
5914 | void | |
5915 | sel_setup_sched_infos (void) | |
5916 | { | |
5917 | rgn_setup_common_sched_info (); | |
5918 | ||
5919 | memcpy (&sel_common_sched_info, common_sched_info, | |
5920 | sizeof (sel_common_sched_info)); | |
5921 | ||
5922 | sel_common_sched_info.fix_recovery_cfg = NULL; | |
5923 | sel_common_sched_info.add_block = NULL; | |
5924 | sel_common_sched_info.estimate_number_of_insns | |
5925 | = sel_estimate_number_of_insns; | |
5926 | sel_common_sched_info.luid_for_non_insn = sel_luid_for_non_insn; | |
5927 | sel_common_sched_info.sched_pass_id = SCHED_SEL_PASS; | |
5928 | ||
5929 | common_sched_info = &sel_common_sched_info; | |
5930 | ||
5931 | current_sched_info = &sched_sel_haifa_sched_info; | |
48e1416a | 5932 | current_sched_info->sched_max_insns_priority = |
e1ab7874 | 5933 | get_rgn_sched_max_insns_priority (); |
48e1416a | 5934 | |
e1ab7874 | 5935 | sel_set_sched_flags (); |
5936 | } | |
5937 | \f | |
5938 | ||
5939 | /* Adds basic block BB to region RGN at the position *BB_ORD_INDEX, | |
5940 | *BB_ORD_INDEX after that is increased. */ | |
5941 | static void | |
5942 | sel_add_block_to_region (basic_block bb, int *bb_ord_index, int rgn) | |
5943 | { | |
5944 | RGN_NR_BLOCKS (rgn) += 1; | |
5945 | RGN_DONT_CALC_DEPS (rgn) = 0; | |
5946 | RGN_HAS_REAL_EBB (rgn) = 0; | |
5947 | CONTAINING_RGN (bb->index) = rgn; | |
5948 | BLOCK_TO_BB (bb->index) = *bb_ord_index; | |
5949 | rgn_bb_table[RGN_BLOCKS (rgn) + *bb_ord_index] = bb->index; | |
5950 | (*bb_ord_index)++; | |
5951 | ||
5952 | /* FIXME: it is true only when not scheduling ebbs. */ | |
5953 | RGN_BLOCKS (rgn + 1) = RGN_BLOCKS (rgn) + RGN_NR_BLOCKS (rgn); | |
5954 | } | |
5955 | ||
5956 | /* Functions to support pipelining of outer loops. */ | |
5957 | ||
5958 | /* Creates a new empty region and returns it's number. */ | |
5959 | static int | |
5960 | sel_create_new_region (void) | |
5961 | { | |
5962 | int new_rgn_number = nr_regions; | |
5963 | ||
5964 | RGN_NR_BLOCKS (new_rgn_number) = 0; | |
5965 | ||
5966 | /* FIXME: This will work only when EBBs are not created. */ | |
5967 | if (new_rgn_number != 0) | |
48e1416a | 5968 | RGN_BLOCKS (new_rgn_number) = RGN_BLOCKS (new_rgn_number - 1) + |
e1ab7874 | 5969 | RGN_NR_BLOCKS (new_rgn_number - 1); |
5970 | else | |
5971 | RGN_BLOCKS (new_rgn_number) = 0; | |
5972 | ||
5973 | /* Set the blocks of the next region so the other functions may | |
5974 | calculate the number of blocks in the region. */ | |
48e1416a | 5975 | RGN_BLOCKS (new_rgn_number + 1) = RGN_BLOCKS (new_rgn_number) + |
e1ab7874 | 5976 | RGN_NR_BLOCKS (new_rgn_number); |
5977 | ||
5978 | nr_regions++; | |
5979 | ||
5980 | return new_rgn_number; | |
5981 | } | |
5982 | ||
5983 | /* If X has a smaller topological sort number than Y, returns -1; | |
5984 | if greater, returns 1. */ | |
5985 | static int | |
5986 | bb_top_order_comparator (const void *x, const void *y) | |
5987 | { | |
5988 | basic_block bb1 = *(const basic_block *) x; | |
5989 | basic_block bb2 = *(const basic_block *) y; | |
5990 | ||
48e1416a | 5991 | gcc_assert (bb1 == bb2 |
5992 | || rev_top_order_index[bb1->index] | |
e1ab7874 | 5993 | != rev_top_order_index[bb2->index]); |
5994 | ||
5995 | /* It's a reverse topological order in REV_TOP_ORDER_INDEX, so | |
5996 | bbs with greater number should go earlier. */ | |
5997 | if (rev_top_order_index[bb1->index] > rev_top_order_index[bb2->index]) | |
5998 | return -1; | |
5999 | else | |
6000 | return 1; | |
6001 | } | |
6002 | ||
48e1416a | 6003 | /* Create a region for LOOP and return its number. If we don't want |
e1ab7874 | 6004 | to pipeline LOOP, return -1. */ |
6005 | static int | |
6006 | make_region_from_loop (struct loop *loop) | |
6007 | { | |
6008 | unsigned int i; | |
6009 | int new_rgn_number = -1; | |
6010 | struct loop *inner; | |
6011 | ||
6012 | /* Basic block index, to be assigned to BLOCK_TO_BB. */ | |
6013 | int bb_ord_index = 0; | |
6014 | basic_block *loop_blocks; | |
6015 | basic_block preheader_block; | |
6016 | ||
48e1416a | 6017 | if (loop->num_nodes |
e1ab7874 | 6018 | > (unsigned) PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_BLOCKS)) |
6019 | return -1; | |
48e1416a | 6020 | |
e1ab7874 | 6021 | /* Don't pipeline loops whose latch belongs to some of its inner loops. */ |
6022 | for (inner = loop->inner; inner; inner = inner->inner) | |
6023 | if (flow_bb_inside_loop_p (inner, loop->latch)) | |
6024 | return -1; | |
6025 | ||
6026 | loop->ninsns = num_loop_insns (loop); | |
6027 | if ((int) loop->ninsns > PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_INSNS)) | |
6028 | return -1; | |
6029 | ||
6030 | loop_blocks = get_loop_body_in_custom_order (loop, bb_top_order_comparator); | |
6031 | ||
6032 | for (i = 0; i < loop->num_nodes; i++) | |
6033 | if (loop_blocks[i]->flags & BB_IRREDUCIBLE_LOOP) | |
6034 | { | |
6035 | free (loop_blocks); | |
6036 | return -1; | |
6037 | } | |
6038 | ||
6039 | preheader_block = loop_preheader_edge (loop)->src; | |
6040 | gcc_assert (preheader_block); | |
6041 | gcc_assert (loop_blocks[0] == loop->header); | |
6042 | ||
6043 | new_rgn_number = sel_create_new_region (); | |
6044 | ||
6045 | sel_add_block_to_region (preheader_block, &bb_ord_index, new_rgn_number); | |
08b7917c | 6046 | bitmap_set_bit (bbs_in_loop_rgns, preheader_block->index); |
e1ab7874 | 6047 | |
6048 | for (i = 0; i < loop->num_nodes; i++) | |
6049 | { | |
6050 | /* Add only those blocks that haven't been scheduled in the inner loop. | |
6051 | The exception is the basic blocks with bookkeeping code - they should | |
48e1416a | 6052 | be added to the region (and they actually don't belong to the loop |
e1ab7874 | 6053 | body, but to the region containing that loop body). */ |
6054 | ||
6055 | gcc_assert (new_rgn_number >= 0); | |
6056 | ||
08b7917c | 6057 | if (! bitmap_bit_p (bbs_in_loop_rgns, loop_blocks[i]->index)) |
e1ab7874 | 6058 | { |
48e1416a | 6059 | sel_add_block_to_region (loop_blocks[i], &bb_ord_index, |
e1ab7874 | 6060 | new_rgn_number); |
08b7917c | 6061 | bitmap_set_bit (bbs_in_loop_rgns, loop_blocks[i]->index); |
e1ab7874 | 6062 | } |
6063 | } | |
6064 | ||
6065 | free (loop_blocks); | |
6066 | MARK_LOOP_FOR_PIPELINING (loop); | |
6067 | ||
6068 | return new_rgn_number; | |
6069 | } | |
6070 | ||
6071 | /* Create a new region from preheader blocks LOOP_BLOCKS. */ | |
6072 | void | |
f1f41a6c | 6073 | make_region_from_loop_preheader (vec<basic_block> *&loop_blocks) |
e1ab7874 | 6074 | { |
6075 | unsigned int i; | |
6076 | int new_rgn_number = -1; | |
6077 | basic_block bb; | |
6078 | ||
6079 | /* Basic block index, to be assigned to BLOCK_TO_BB. */ | |
6080 | int bb_ord_index = 0; | |
6081 | ||
6082 | new_rgn_number = sel_create_new_region (); | |
6083 | ||
f1f41a6c | 6084 | FOR_EACH_VEC_ELT (*loop_blocks, i, bb) |
e1ab7874 | 6085 | { |
6086 | gcc_assert (new_rgn_number >= 0); | |
6087 | ||
6088 | sel_add_block_to_region (bb, &bb_ord_index, new_rgn_number); | |
6089 | } | |
6090 | ||
f1f41a6c | 6091 | vec_free (loop_blocks); |
e1ab7874 | 6092 | } |
6093 | ||
6094 | ||
6095 | /* Create region(s) from loop nest LOOP, such that inner loops will be | |
48e1416a | 6096 | pipelined before outer loops. Returns true when a region for LOOP |
e1ab7874 | 6097 | is created. */ |
6098 | static bool | |
6099 | make_regions_from_loop_nest (struct loop *loop) | |
48e1416a | 6100 | { |
e1ab7874 | 6101 | struct loop *cur_loop; |
6102 | int rgn_number; | |
6103 | ||
6104 | /* Traverse all inner nodes of the loop. */ | |
6105 | for (cur_loop = loop->inner; cur_loop; cur_loop = cur_loop->next) | |
08b7917c | 6106 | if (! bitmap_bit_p (bbs_in_loop_rgns, cur_loop->header->index)) |
e1ab7874 | 6107 | return false; |
6108 | ||
6109 | /* At this moment all regular inner loops should have been pipelined. | |
6110 | Try to create a region from this loop. */ | |
6111 | rgn_number = make_region_from_loop (loop); | |
6112 | ||
6113 | if (rgn_number < 0) | |
6114 | return false; | |
6115 | ||
f1f41a6c | 6116 | loop_nests.safe_push (loop); |
e1ab7874 | 6117 | return true; |
6118 | } | |
6119 | ||
6120 | /* Initalize data structures needed. */ | |
6121 | void | |
6122 | sel_init_pipelining (void) | |
6123 | { | |
6124 | /* Collect loop information to be used in outer loops pipelining. */ | |
6125 | loop_optimizer_init (LOOPS_HAVE_PREHEADERS | |
6126 | | LOOPS_HAVE_FALLTHRU_PREHEADERS | |
6127 | | LOOPS_HAVE_RECORDED_EXITS | |
6128 | | LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS); | |
6129 | current_loop_nest = NULL; | |
6130 | ||
fe672ac0 | 6131 | bbs_in_loop_rgns = sbitmap_alloc (last_basic_block_for_fn (cfun)); |
53c5d9d4 | 6132 | bitmap_clear (bbs_in_loop_rgns); |
e1ab7874 | 6133 | |
6134 | recompute_rev_top_order (); | |
6135 | } | |
6136 | ||
6137 | /* Returns a struct loop for region RGN. */ | |
6138 | loop_p | |
6139 | get_loop_nest_for_rgn (unsigned int rgn) | |
6140 | { | |
6141 | /* Regions created with extend_rgns don't have corresponding loop nests, | |
6142 | because they don't represent loops. */ | |
f1f41a6c | 6143 | if (rgn < loop_nests.length ()) |
6144 | return loop_nests[rgn]; | |
e1ab7874 | 6145 | else |
6146 | return NULL; | |
6147 | } | |
6148 | ||
6149 | /* True when LOOP was included into pipelining regions. */ | |
6150 | bool | |
6151 | considered_for_pipelining_p (struct loop *loop) | |
6152 | { | |
6153 | if (loop_depth (loop) == 0) | |
6154 | return false; | |
6155 | ||
48e1416a | 6156 | /* Now, the loop could be too large or irreducible. Check whether its |
6157 | region is in LOOP_NESTS. | |
6158 | We determine the region number of LOOP as the region number of its | |
6159 | latch. We can't use header here, because this header could be | |
e1ab7874 | 6160 | just removed preheader and it will give us the wrong region number. |
6161 | Latch can't be used because it could be in the inner loop too. */ | |
a2d56a0e | 6162 | if (LOOP_MARKED_FOR_PIPELINING_P (loop)) |
e1ab7874 | 6163 | { |
6164 | int rgn = CONTAINING_RGN (loop->latch->index); | |
6165 | ||
f1f41a6c | 6166 | gcc_assert ((unsigned) rgn < loop_nests.length ()); |
e1ab7874 | 6167 | return true; |
6168 | } | |
48e1416a | 6169 | |
e1ab7874 | 6170 | return false; |
6171 | } | |
6172 | ||
48e1416a | 6173 | /* Makes regions from the rest of the blocks, after loops are chosen |
e1ab7874 | 6174 | for pipelining. */ |
6175 | static void | |
6176 | make_regions_from_the_rest (void) | |
6177 | { | |
6178 | int cur_rgn_blocks; | |
6179 | int *loop_hdr; | |
6180 | int i; | |
6181 | ||
6182 | basic_block bb; | |
6183 | edge e; | |
6184 | edge_iterator ei; | |
6185 | int *degree; | |
e1ab7874 | 6186 | |
6187 | /* Index in rgn_bb_table where to start allocating new regions. */ | |
6188 | cur_rgn_blocks = nr_regions ? RGN_BLOCKS (nr_regions) : 0; | |
e1ab7874 | 6189 | |
48e1416a | 6190 | /* Make regions from all the rest basic blocks - those that don't belong to |
e1ab7874 | 6191 | any loop or belong to irreducible loops. Prepare the data structures |
6192 | for extend_rgns. */ | |
6193 | ||
6194 | /* LOOP_HDR[I] == -1 if I-th bb doesn't belong to any loop, | |
6195 | LOOP_HDR[I] == LOOP_HDR[J] iff basic blocks I and J reside within the same | |
6196 | loop. */ | |
fe672ac0 | 6197 | loop_hdr = XNEWVEC (int, last_basic_block_for_fn (cfun)); |
6198 | degree = XCNEWVEC (int, last_basic_block_for_fn (cfun)); | |
e1ab7874 | 6199 | |
6200 | ||
6201 | /* For each basic block that belongs to some loop assign the number | |
6202 | of innermost loop it belongs to. */ | |
fe672ac0 | 6203 | for (i = 0; i < last_basic_block_for_fn (cfun); i++) |
e1ab7874 | 6204 | loop_hdr[i] = -1; |
6205 | ||
fc00614f | 6206 | FOR_EACH_BB_FN (bb, cfun) |
e1ab7874 | 6207 | { |
9c26ddef | 6208 | if (bb->loop_father && bb->loop_father->num != 0 |
e1ab7874 | 6209 | && !(bb->flags & BB_IRREDUCIBLE_LOOP)) |
6210 | loop_hdr[bb->index] = bb->loop_father->num; | |
6211 | } | |
6212 | ||
48e1416a | 6213 | /* For each basic block degree is calculated as the number of incoming |
e1ab7874 | 6214 | edges, that are going out of bbs that are not yet scheduled. |
6215 | The basic blocks that are scheduled have degree value of zero. */ | |
fc00614f | 6216 | FOR_EACH_BB_FN (bb, cfun) |
e1ab7874 | 6217 | { |
6218 | degree[bb->index] = 0; | |
6219 | ||
08b7917c | 6220 | if (!bitmap_bit_p (bbs_in_loop_rgns, bb->index)) |
e1ab7874 | 6221 | { |
6222 | FOR_EACH_EDGE (e, ei, bb->preds) | |
08b7917c | 6223 | if (!bitmap_bit_p (bbs_in_loop_rgns, e->src->index)) |
e1ab7874 | 6224 | degree[bb->index]++; |
6225 | } | |
6226 | else | |
6227 | degree[bb->index] = -1; | |
6228 | } | |
6229 | ||
6230 | extend_rgns (degree, &cur_rgn_blocks, bbs_in_loop_rgns, loop_hdr); | |
6231 | ||
6232 | /* Any block that did not end up in a region is placed into a region | |
6233 | by itself. */ | |
fc00614f | 6234 | FOR_EACH_BB_FN (bb, cfun) |
e1ab7874 | 6235 | if (degree[bb->index] >= 0) |
6236 | { | |
6237 | rgn_bb_table[cur_rgn_blocks] = bb->index; | |
6238 | RGN_NR_BLOCKS (nr_regions) = 1; | |
6239 | RGN_BLOCKS (nr_regions) = cur_rgn_blocks++; | |
6240 | RGN_DONT_CALC_DEPS (nr_regions) = 0; | |
6241 | RGN_HAS_REAL_EBB (nr_regions) = 0; | |
6242 | CONTAINING_RGN (bb->index) = nr_regions++; | |
6243 | BLOCK_TO_BB (bb->index) = 0; | |
6244 | } | |
6245 | ||
6246 | free (degree); | |
6247 | free (loop_hdr); | |
6248 | } | |
6249 | ||
6250 | /* Free data structures used in pipelining of loops. */ | |
6251 | void sel_finish_pipelining (void) | |
6252 | { | |
e1ab7874 | 6253 | struct loop *loop; |
6254 | ||
6255 | /* Release aux fields so we don't free them later by mistake. */ | |
f21d4d00 | 6256 | FOR_EACH_LOOP (loop, 0) |
e1ab7874 | 6257 | loop->aux = NULL; |
6258 | ||
6259 | loop_optimizer_finalize (); | |
6260 | ||
f1f41a6c | 6261 | loop_nests.release (); |
e1ab7874 | 6262 | |
6263 | free (rev_top_order_index); | |
6264 | rev_top_order_index = NULL; | |
6265 | } | |
6266 | ||
48e1416a | 6267 | /* This function replaces the find_rgns when |
e1ab7874 | 6268 | FLAG_SEL_SCHED_PIPELINING_OUTER_LOOPS is set. */ |
48e1416a | 6269 | void |
e1ab7874 | 6270 | sel_find_rgns (void) |
6271 | { | |
6272 | sel_init_pipelining (); | |
6273 | extend_regions (); | |
6274 | ||
6275 | if (current_loops) | |
6276 | { | |
6277 | loop_p loop; | |
e1ab7874 | 6278 | |
f21d4d00 | 6279 | FOR_EACH_LOOP (loop, (flag_sel_sched_pipelining_outer_loops |
6280 | ? LI_FROM_INNERMOST | |
6281 | : LI_ONLY_INNERMOST)) | |
e1ab7874 | 6282 | make_regions_from_loop_nest (loop); |
6283 | } | |
6284 | ||
6285 | /* Make regions from all the rest basic blocks and schedule them. | |
48e1416a | 6286 | These blocks include blocks that don't belong to any loop or belong |
e1ab7874 | 6287 | to irreducible loops. */ |
6288 | make_regions_from_the_rest (); | |
6289 | ||
6290 | /* We don't need bbs_in_loop_rgns anymore. */ | |
6291 | sbitmap_free (bbs_in_loop_rgns); | |
6292 | bbs_in_loop_rgns = NULL; | |
6293 | } | |
6294 | ||
b73edd22 | 6295 | /* Add the preheader blocks from previous loop to current region taking |
6296 | it from LOOP_PREHEADER_BLOCKS (current_loop_nest) and record them in *BBS. | |
e1ab7874 | 6297 | This function is only used with -fsel-sched-pipelining-outer-loops. */ |
6298 | void | |
b73edd22 | 6299 | sel_add_loop_preheaders (bb_vec_t *bbs) |
e1ab7874 | 6300 | { |
6301 | int i; | |
6302 | basic_block bb; | |
f1f41a6c | 6303 | vec<basic_block> *preheader_blocks |
e1ab7874 | 6304 | = LOOP_PREHEADER_BLOCKS (current_loop_nest); |
6305 | ||
f1f41a6c | 6306 | if (!preheader_blocks) |
6307 | return; | |
6308 | ||
6309 | for (i = 0; preheader_blocks->iterate (i, &bb); i++) | |
a2d56a0e | 6310 | { |
f1f41a6c | 6311 | bbs->safe_push (bb); |
6312 | last_added_blocks.safe_push (bb); | |
e1ab7874 | 6313 | sel_add_bb (bb); |
a2d56a0e | 6314 | } |
e1ab7874 | 6315 | |
f1f41a6c | 6316 | vec_free (preheader_blocks); |
e1ab7874 | 6317 | } |
6318 | ||
48e1416a | 6319 | /* While pipelining outer loops, returns TRUE if BB is a loop preheader. |
6320 | Please note that the function should also work when pipelining_p is | |
6321 | false, because it is used when deciding whether we should or should | |
e1ab7874 | 6322 | not reschedule pipelined code. */ |
6323 | bool | |
6324 | sel_is_loop_preheader_p (basic_block bb) | |
6325 | { | |
6326 | if (current_loop_nest) | |
6327 | { | |
6328 | struct loop *outer; | |
6329 | ||
6330 | if (preheader_removed) | |
6331 | return false; | |
6332 | ||
6333 | /* Preheader is the first block in the region. */ | |
6334 | if (BLOCK_TO_BB (bb->index) == 0) | |
6335 | return true; | |
6336 | ||
6337 | /* We used to find a preheader with the topological information. | |
6338 | Check that the above code is equivalent to what we did before. */ | |
6339 | ||
6340 | if (in_current_region_p (current_loop_nest->header)) | |
48e1416a | 6341 | gcc_assert (!(BLOCK_TO_BB (bb->index) |
e1ab7874 | 6342 | < BLOCK_TO_BB (current_loop_nest->header->index))); |
6343 | ||
6344 | /* Support the situation when the latch block of outer loop | |
6345 | could be from here. */ | |
6346 | for (outer = loop_outer (current_loop_nest); | |
6347 | outer; | |
6348 | outer = loop_outer (outer)) | |
6349 | if (considered_for_pipelining_p (outer) && outer->latch == bb) | |
6350 | gcc_unreachable (); | |
6351 | } | |
6352 | ||
6353 | return false; | |
6354 | } | |
6355 | ||
49087fba | 6356 | /* Check whether JUMP_BB ends with a jump insn that leads only to DEST_BB and |
6357 | can be removed, making the corresponding edge fallthrough (assuming that | |
6358 | all basic blocks between JUMP_BB and DEST_BB are empty). */ | |
6359 | static bool | |
6360 | bb_has_removable_jump_to_p (basic_block jump_bb, basic_block dest_bb) | |
e1ab7874 | 6361 | { |
4b816303 | 6362 | if (!onlyjump_p (BB_END (jump_bb)) |
6363 | || tablejump_p (BB_END (jump_bb), NULL, NULL)) | |
e1ab7874 | 6364 | return false; |
6365 | ||
48e1416a | 6366 | /* Several outgoing edges, abnormal edge or destination of jump is |
e1ab7874 | 6367 | not DEST_BB. */ |
6368 | if (EDGE_COUNT (jump_bb->succs) != 1 | |
49087fba | 6369 | || EDGE_SUCC (jump_bb, 0)->flags & (EDGE_ABNORMAL | EDGE_CROSSING) |
e1ab7874 | 6370 | || EDGE_SUCC (jump_bb, 0)->dest != dest_bb) |
6371 | return false; | |
6372 | ||
6373 | /* If not anything of the upper. */ | |
6374 | return true; | |
6375 | } | |
6376 | ||
6377 | /* Removes the loop preheader from the current region and saves it in | |
48e1416a | 6378 | PREHEADER_BLOCKS of the father loop, so they will be added later to |
e1ab7874 | 6379 | region that represents an outer loop. */ |
6380 | static void | |
6381 | sel_remove_loop_preheader (void) | |
6382 | { | |
6383 | int i, old_len; | |
6384 | int cur_rgn = CONTAINING_RGN (BB_TO_BLOCK (0)); | |
6385 | basic_block bb; | |
6386 | bool all_empty_p = true; | |
f1f41a6c | 6387 | vec<basic_block> *preheader_blocks |
e1ab7874 | 6388 | = LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest)); |
6389 | ||
f1f41a6c | 6390 | vec_check_alloc (preheader_blocks, 0); |
6391 | ||
e1ab7874 | 6392 | gcc_assert (current_loop_nest); |
f1f41a6c | 6393 | old_len = preheader_blocks->length (); |
e1ab7874 | 6394 | |
6395 | /* Add blocks that aren't within the current loop to PREHEADER_BLOCKS. */ | |
6396 | for (i = 0; i < RGN_NR_BLOCKS (cur_rgn); i++) | |
6397 | { | |
f5a6b05f | 6398 | bb = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i)); |
e1ab7874 | 6399 | |
48e1416a | 6400 | /* If the basic block belongs to region, but doesn't belong to |
e1ab7874 | 6401 | corresponding loop, then it should be a preheader. */ |
6402 | if (sel_is_loop_preheader_p (bb)) | |
6403 | { | |
f1f41a6c | 6404 | preheader_blocks->safe_push (bb); |
e1ab7874 | 6405 | if (BB_END (bb) != bb_note (bb)) |
6406 | all_empty_p = false; | |
6407 | } | |
6408 | } | |
48e1416a | 6409 | |
e1ab7874 | 6410 | /* Remove these blocks only after iterating over the whole region. */ |
f1f41a6c | 6411 | for (i = preheader_blocks->length () - 1; i >= old_len; i--) |
e1ab7874 | 6412 | { |
f1f41a6c | 6413 | bb = (*preheader_blocks)[i]; |
e1ab7874 | 6414 | sel_remove_bb (bb, false); |
6415 | } | |
6416 | ||
6417 | if (!considered_for_pipelining_p (loop_outer (current_loop_nest))) | |
6418 | { | |
6419 | if (!all_empty_p) | |
6420 | /* Immediately create new region from preheader. */ | |
f1f41a6c | 6421 | make_region_from_loop_preheader (preheader_blocks); |
e1ab7874 | 6422 | else |
6423 | { | |
6424 | /* If all preheader blocks are empty - dont create new empty region. | |
6425 | Instead, remove them completely. */ | |
f1f41a6c | 6426 | FOR_EACH_VEC_ELT (*preheader_blocks, i, bb) |
e1ab7874 | 6427 | { |
6428 | edge e; | |
6429 | edge_iterator ei; | |
6430 | basic_block prev_bb = bb->prev_bb, next_bb = bb->next_bb; | |
6431 | ||
6432 | /* Redirect all incoming edges to next basic block. */ | |
6433 | for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); ) | |
6434 | { | |
6435 | if (! (e->flags & EDGE_FALLTHRU)) | |
6436 | redirect_edge_and_branch (e, bb->next_bb); | |
6437 | else | |
6438 | redirect_edge_succ (e, bb->next_bb); | |
6439 | } | |
6440 | gcc_assert (BB_NOTE_LIST (bb) == NULL); | |
6441 | delete_and_free_basic_block (bb); | |
6442 | ||
48e1416a | 6443 | /* Check if after deleting preheader there is a nonconditional |
6444 | jump in PREV_BB that leads to the next basic block NEXT_BB. | |
6445 | If it is so - delete this jump and clear data sets of its | |
e1ab7874 | 6446 | basic block if it becomes empty. */ |
6447 | if (next_bb->prev_bb == prev_bb | |
34154e27 | 6448 | && prev_bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) |
49087fba | 6449 | && bb_has_removable_jump_to_p (prev_bb, next_bb)) |
e1ab7874 | 6450 | { |
6451 | redirect_edge_and_branch (EDGE_SUCC (prev_bb, 0), next_bb); | |
6452 | if (BB_END (prev_bb) == bb_note (prev_bb)) | |
6453 | free_data_sets (prev_bb); | |
6454 | } | |
1a5dbaab | 6455 | |
6456 | set_immediate_dominator (CDI_DOMINATORS, next_bb, | |
6457 | recompute_dominator (CDI_DOMINATORS, | |
6458 | next_bb)); | |
e1ab7874 | 6459 | } |
6460 | } | |
f1f41a6c | 6461 | vec_free (preheader_blocks); |
e1ab7874 | 6462 | } |
6463 | else | |
6464 | /* Store preheader within the father's loop structure. */ | |
6465 | SET_LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest), | |
6466 | preheader_blocks); | |
6467 | } | |
7c5928c3 | 6468 | |
e1ab7874 | 6469 | #endif |