]>
Commit | Line | Data |
---|---|---|
e1ab7874 | 1 | /* Instruction scheduling pass. Selective scheduler and pipeliner. |
fbd26352 | 2 | Copyright (C) 2006-2019 Free Software Foundation, Inc. |
e1ab7874 | 3 | |
4 | This file is part of GCC. | |
5 | ||
6 | GCC is free software; you can redistribute it and/or modify it under | |
7 | the terms of the GNU General Public License as published by the Free | |
8 | Software Foundation; either version 3, or (at your option) any later | |
9 | version. | |
10 | ||
11 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
14 | for more details. | |
15 | ||
16 | You should have received a copy of the GNU General Public License | |
17 | along with GCC; see the file COPYING3. If not see | |
18 | <http://www.gnu.org/licenses/>. */ | |
19 | ||
20 | #include "config.h" | |
21 | #include "system.h" | |
22 | #include "coretypes.h" | |
9ef16211 | 23 | #include "backend.h" |
d040a5b0 | 24 | #include "cfghooks.h" |
9ef16211 | 25 | #include "tree.h" |
e1ab7874 | 26 | #include "rtl.h" |
9ef16211 | 27 | #include "df.h" |
ad7b10a2 | 28 | #include "memmodel.h" |
e1ab7874 | 29 | #include "tm_p.h" |
94ea8568 | 30 | #include "cfgrtl.h" |
31 | #include "cfganal.h" | |
32 | #include "cfgbuild.h" | |
e1ab7874 | 33 | #include "insn-config.h" |
34 | #include "insn-attr.h" | |
e1ab7874 | 35 | #include "recog.h" |
36 | #include "params.h" | |
37 | #include "target.h" | |
e1ab7874 | 38 | #include "sched-int.h" |
06f9d6ef | 39 | #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */ |
e1ab7874 | 40 | |
41 | #ifdef INSN_SCHEDULING | |
9ef16211 | 42 | #include "regset.h" |
43 | #include "cfgloop.h" | |
e1ab7874 | 44 | #include "sel-sched-ir.h" |
45 | /* We don't have to use it except for sel_print_insn. */ | |
46 | #include "sel-sched-dump.h" | |
47 | ||
48 | /* A vector holding bb info for whole scheduling pass. */ | |
16fb756f | 49 | vec<sel_global_bb_info_def> sel_global_bb_info; |
e1ab7874 | 50 | |
51 | /* A vector holding bb info. */ | |
16fb756f | 52 | vec<sel_region_bb_info_def> sel_region_bb_info; |
e1ab7874 | 53 | |
54 | /* A pool for allocating all lists. */ | |
1dc6c44d | 55 | object_allocator<_list_node> sched_lists_pool ("sel-sched-lists"); |
e1ab7874 | 56 | |
57 | /* This contains information about successors for compute_av_set. */ | |
58 | struct succs_info current_succs; | |
59 | ||
60 | /* Data structure to describe interaction with the generic scheduler utils. */ | |
61 | static struct common_sched_info_def sel_common_sched_info; | |
62 | ||
63 | /* The loop nest being pipelined. */ | |
2e966e2a | 64 | class loop *current_loop_nest; |
e1ab7874 | 65 | |
66 | /* LOOP_NESTS is a vector containing the corresponding loop nest for | |
67 | each region. */ | |
16fb756f | 68 | static vec<loop_p> loop_nests; |
e1ab7874 | 69 | |
70 | /* Saves blocks already in loop regions, indexed by bb->index. */ | |
71 | static sbitmap bbs_in_loop_rgns = NULL; | |
72 | ||
73 | /* CFG hooks that are saved before changing create_basic_block hook. */ | |
74 | static struct cfg_hooks orig_cfg_hooks; | |
75 | \f | |
76 | ||
77 | /* Array containing reverse topological index of function basic blocks, | |
78 | indexed by BB->INDEX. */ | |
79 | static int *rev_top_order_index = NULL; | |
80 | ||
81 | /* Length of the above array. */ | |
82 | static int rev_top_order_index_len = -1; | |
83 | ||
84 | /* A regset pool structure. */ | |
85 | static struct | |
86 | { | |
87 | /* The stack to which regsets are returned. */ | |
88 | regset *v; | |
89 | ||
90 | /* Its pointer. */ | |
91 | int n; | |
92 | ||
93 | /* Its size. */ | |
94 | int s; | |
95 | ||
96 | /* In VV we save all generated regsets so that, when destructing the | |
97 | pool, we can compare it with V and check that every regset was returned | |
98 | back to pool. */ | |
99 | regset *vv; | |
100 | ||
101 | /* The pointer of VV stack. */ | |
102 | int nn; | |
103 | ||
104 | /* Its size. */ | |
105 | int ss; | |
106 | ||
107 | /* The difference between allocated and returned regsets. */ | |
108 | int diff; | |
109 | } regset_pool = { NULL, 0, 0, NULL, 0, 0, 0 }; | |
110 | ||
111 | /* This represents the nop pool. */ | |
112 | static struct | |
113 | { | |
114 | /* The vector which holds previously emitted nops. */ | |
115 | insn_t *v; | |
116 | ||
117 | /* Its pointer. */ | |
118 | int n; | |
119 | ||
120 | /* Its size. */ | |
48e1416a | 121 | int s; |
e1ab7874 | 122 | } nop_pool = { NULL, 0, 0 }; |
123 | ||
124 | /* The pool for basic block notes. */ | |
cef3d8ad | 125 | static vec<rtx_note *> bb_note_pool; |
e1ab7874 | 126 | |
127 | /* A NOP pattern used to emit placeholder insns. */ | |
128 | rtx nop_pattern = NULL_RTX; | |
129 | /* A special instruction that resides in EXIT_BLOCK. | |
130 | EXIT_INSN is successor of the insns that lead to EXIT_BLOCK. */ | |
179c282d | 131 | rtx_insn *exit_insn = NULL; |
e1ab7874 | 132 | |
48e1416a | 133 | /* TRUE if while scheduling current region, which is loop, its preheader |
e1ab7874 | 134 | was removed. */ |
135 | bool preheader_removed = false; | |
136 | \f | |
137 | ||
138 | /* Forward static declarations. */ | |
139 | static void fence_clear (fence_t); | |
140 | ||
141 | static void deps_init_id (idata_t, insn_t, bool); | |
142 | static void init_id_from_df (idata_t, insn_t, bool); | |
143 | static expr_t set_insn_init (expr_t, vinsn_t, int); | |
144 | ||
145 | static void cfg_preds (basic_block, insn_t **, int *); | |
146 | static void prepare_insn_expr (insn_t, int); | |
f1f41a6c | 147 | static void free_history_vect (vec<expr_history_def> &); |
e1ab7874 | 148 | |
149 | static void move_bb_info (basic_block, basic_block); | |
150 | static void remove_empty_bb (basic_block, bool); | |
0424f393 | 151 | static void sel_merge_blocks (basic_block, basic_block); |
e1ab7874 | 152 | static void sel_remove_loop_preheader (void); |
49087fba | 153 | static bool bb_has_removable_jump_to_p (basic_block, basic_block); |
e1ab7874 | 154 | |
155 | static bool insn_is_the_only_one_in_bb_p (insn_t); | |
156 | static void create_initial_data_sets (basic_block); | |
157 | ||
9845d120 | 158 | static void free_av_set (basic_block); |
e1ab7874 | 159 | static void invalidate_av_set (basic_block); |
160 | static void extend_insn_data (void); | |
8d1881f5 | 161 | static void sel_init_new_insn (insn_t, int, int = -1); |
e1ab7874 | 162 | static void finish_insns (void); |
163 | \f | |
164 | /* Various list functions. */ | |
165 | ||
166 | /* Copy an instruction list L. */ | |
167 | ilist_t | |
168 | ilist_copy (ilist_t l) | |
169 | { | |
170 | ilist_t head = NULL, *tailp = &head; | |
171 | ||
172 | while (l) | |
173 | { | |
174 | ilist_add (tailp, ILIST_INSN (l)); | |
175 | tailp = &ILIST_NEXT (*tailp); | |
176 | l = ILIST_NEXT (l); | |
177 | } | |
178 | ||
179 | return head; | |
180 | } | |
181 | ||
182 | /* Invert an instruction list L. */ | |
183 | ilist_t | |
184 | ilist_invert (ilist_t l) | |
185 | { | |
186 | ilist_t res = NULL; | |
187 | ||
188 | while (l) | |
189 | { | |
190 | ilist_add (&res, ILIST_INSN (l)); | |
191 | l = ILIST_NEXT (l); | |
192 | } | |
193 | ||
194 | return res; | |
195 | } | |
196 | ||
197 | /* Add a new boundary to the LP list with parameters TO, PTR, and DC. */ | |
198 | void | |
199 | blist_add (blist_t *lp, insn_t to, ilist_t ptr, deps_t dc) | |
200 | { | |
201 | bnd_t bnd; | |
202 | ||
203 | _list_add (lp); | |
204 | bnd = BLIST_BND (*lp); | |
205 | ||
2f3c9801 | 206 | BND_TO (bnd) = to; |
e1ab7874 | 207 | BND_PTR (bnd) = ptr; |
208 | BND_AV (bnd) = NULL; | |
209 | BND_AV1 (bnd) = NULL; | |
210 | BND_DC (bnd) = dc; | |
211 | } | |
212 | ||
213 | /* Remove the list note pointed to by LP. */ | |
214 | void | |
215 | blist_remove (blist_t *lp) | |
216 | { | |
217 | bnd_t b = BLIST_BND (*lp); | |
218 | ||
219 | av_set_clear (&BND_AV (b)); | |
220 | av_set_clear (&BND_AV1 (b)); | |
221 | ilist_clear (&BND_PTR (b)); | |
222 | ||
223 | _list_remove (lp); | |
224 | } | |
225 | ||
226 | /* Init a fence tail L. */ | |
227 | void | |
228 | flist_tail_init (flist_tail_t l) | |
229 | { | |
230 | FLIST_TAIL_HEAD (l) = NULL; | |
231 | FLIST_TAIL_TAILP (l) = &FLIST_TAIL_HEAD (l); | |
232 | } | |
233 | ||
234 | /* Try to find fence corresponding to INSN in L. */ | |
235 | fence_t | |
236 | flist_lookup (flist_t l, insn_t insn) | |
237 | { | |
238 | while (l) | |
239 | { | |
240 | if (FENCE_INSN (FLIST_FENCE (l)) == insn) | |
241 | return FLIST_FENCE (l); | |
242 | ||
243 | l = FLIST_NEXT (l); | |
244 | } | |
245 | ||
246 | return NULL; | |
247 | } | |
248 | ||
249 | /* Init the fields of F before running fill_insns. */ | |
250 | static void | |
251 | init_fence_for_scheduling (fence_t f) | |
252 | { | |
253 | FENCE_BNDS (f) = NULL; | |
254 | FENCE_PROCESSED_P (f) = false; | |
255 | FENCE_SCHEDULED_P (f) = false; | |
256 | } | |
257 | ||
258 | /* Add new fence consisting of INSN and STATE to the list pointed to by LP. */ | |
259 | static void | |
48e1416a | 260 | flist_add (flist_t *lp, insn_t insn, state_t state, deps_t dc, void *tc, |
2f3c9801 | 261 | insn_t last_scheduled_insn, vec<rtx_insn *, va_gc> *executing_insns, |
48e1416a | 262 | int *ready_ticks, int ready_ticks_size, insn_t sched_next, |
abb9c563 | 263 | int cycle, int cycle_issued_insns, int issue_more, |
e1ab7874 | 264 | bool starts_cycle_p, bool after_stall_p) |
265 | { | |
266 | fence_t f; | |
267 | ||
268 | _list_add (lp); | |
269 | f = FLIST_FENCE (*lp); | |
270 | ||
271 | FENCE_INSN (f) = insn; | |
272 | ||
273 | gcc_assert (state != NULL); | |
274 | FENCE_STATE (f) = state; | |
275 | ||
276 | FENCE_CYCLE (f) = cycle; | |
277 | FENCE_ISSUED_INSNS (f) = cycle_issued_insns; | |
278 | FENCE_STARTS_CYCLE_P (f) = starts_cycle_p; | |
279 | FENCE_AFTER_STALL_P (f) = after_stall_p; | |
280 | ||
281 | gcc_assert (dc != NULL); | |
282 | FENCE_DC (f) = dc; | |
283 | ||
284 | gcc_assert (tc != NULL || targetm.sched.alloc_sched_context == NULL); | |
285 | FENCE_TC (f) = tc; | |
286 | ||
287 | FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn; | |
abb9c563 | 288 | FENCE_ISSUE_MORE (f) = issue_more; |
e1ab7874 | 289 | FENCE_EXECUTING_INSNS (f) = executing_insns; |
290 | FENCE_READY_TICKS (f) = ready_ticks; | |
291 | FENCE_READY_TICKS_SIZE (f) = ready_ticks_size; | |
292 | FENCE_SCHED_NEXT (f) = sched_next; | |
293 | ||
294 | init_fence_for_scheduling (f); | |
295 | } | |
296 | ||
297 | /* Remove the head node of the list pointed to by LP. */ | |
298 | static void | |
299 | flist_remove (flist_t *lp) | |
300 | { | |
301 | if (FENCE_INSN (FLIST_FENCE (*lp))) | |
302 | fence_clear (FLIST_FENCE (*lp)); | |
303 | _list_remove (lp); | |
304 | } | |
305 | ||
306 | /* Clear the fence list pointed to by LP. */ | |
307 | void | |
308 | flist_clear (flist_t *lp) | |
309 | { | |
310 | while (*lp) | |
311 | flist_remove (lp); | |
312 | } | |
313 | ||
314 | /* Add ORIGINAL_INSN the def list DL honoring CROSSES_CALL. */ | |
315 | void | |
316 | def_list_add (def_list_t *dl, insn_t original_insn, bool crosses_call) | |
317 | { | |
318 | def_t d; | |
48e1416a | 319 | |
e1ab7874 | 320 | _list_add (dl); |
321 | d = DEF_LIST_DEF (*dl); | |
322 | ||
323 | d->orig_insn = original_insn; | |
324 | d->crosses_call = crosses_call; | |
325 | } | |
326 | \f | |
327 | ||
328 | /* Functions to work with target contexts. */ | |
329 | ||
48e1416a | 330 | /* Bulk target context. It is convenient for debugging purposes to ensure |
e1ab7874 | 331 | that there are no uninitialized (null) target contexts. */ |
332 | static tc_t bulk_tc = (tc_t) 1; | |
333 | ||
48e1416a | 334 | /* Target hooks wrappers. In the future we can provide some default |
e1ab7874 | 335 | implementations for them. */ |
336 | ||
337 | /* Allocate a store for the target context. */ | |
338 | static tc_t | |
339 | alloc_target_context (void) | |
340 | { | |
341 | return (targetm.sched.alloc_sched_context | |
342 | ? targetm.sched.alloc_sched_context () : bulk_tc); | |
343 | } | |
344 | ||
345 | /* Init target context TC. | |
346 | If CLEAN_P is true, then make TC as it is beginning of the scheduler. | |
347 | Overwise, copy current backend context to TC. */ | |
348 | static void | |
349 | init_target_context (tc_t tc, bool clean_p) | |
350 | { | |
351 | if (targetm.sched.init_sched_context) | |
352 | targetm.sched.init_sched_context (tc, clean_p); | |
353 | } | |
354 | ||
355 | /* Allocate and initialize a target context. Meaning of CLEAN_P is the same as | |
356 | int init_target_context (). */ | |
357 | tc_t | |
358 | create_target_context (bool clean_p) | |
359 | { | |
360 | tc_t tc = alloc_target_context (); | |
361 | ||
362 | init_target_context (tc, clean_p); | |
363 | return tc; | |
364 | } | |
365 | ||
366 | /* Copy TC to the current backend context. */ | |
367 | void | |
368 | set_target_context (tc_t tc) | |
369 | { | |
370 | if (targetm.sched.set_sched_context) | |
371 | targetm.sched.set_sched_context (tc); | |
372 | } | |
373 | ||
374 | /* TC is about to be destroyed. Free any internal data. */ | |
375 | static void | |
376 | clear_target_context (tc_t tc) | |
377 | { | |
378 | if (targetm.sched.clear_sched_context) | |
379 | targetm.sched.clear_sched_context (tc); | |
380 | } | |
381 | ||
382 | /* Clear and free it. */ | |
383 | static void | |
384 | delete_target_context (tc_t tc) | |
385 | { | |
386 | clear_target_context (tc); | |
387 | ||
388 | if (targetm.sched.free_sched_context) | |
389 | targetm.sched.free_sched_context (tc); | |
390 | } | |
391 | ||
392 | /* Make a copy of FROM in TO. | |
393 | NB: May be this should be a hook. */ | |
394 | static void | |
395 | copy_target_context (tc_t to, tc_t from) | |
396 | { | |
397 | tc_t tmp = create_target_context (false); | |
398 | ||
399 | set_target_context (from); | |
400 | init_target_context (to, false); | |
401 | ||
402 | set_target_context (tmp); | |
403 | delete_target_context (tmp); | |
404 | } | |
405 | ||
406 | /* Create a copy of TC. */ | |
407 | static tc_t | |
408 | create_copy_of_target_context (tc_t tc) | |
409 | { | |
410 | tc_t copy = alloc_target_context (); | |
411 | ||
412 | copy_target_context (copy, tc); | |
413 | ||
414 | return copy; | |
415 | } | |
416 | ||
417 | /* Clear TC and initialize it according to CLEAN_P. The meaning of CLEAN_P | |
418 | is the same as in init_target_context (). */ | |
419 | void | |
420 | reset_target_context (tc_t tc, bool clean_p) | |
421 | { | |
422 | clear_target_context (tc); | |
423 | init_target_context (tc, clean_p); | |
424 | } | |
425 | \f | |
48e1416a | 426 | /* Functions to work with dependence contexts. |
2e966e2a | 427 | Dc (aka deps context, aka deps_t, aka class deps_desc *) is short for dependence |
e1ab7874 | 428 | context. It accumulates information about processed insns to decide if |
429 | current insn is dependent on the processed ones. */ | |
430 | ||
431 | /* Make a copy of FROM in TO. */ | |
432 | static void | |
433 | copy_deps_context (deps_t to, deps_t from) | |
434 | { | |
d9ab2038 | 435 | init_deps (to, false); |
e1ab7874 | 436 | deps_join (to, from); |
437 | } | |
438 | ||
439 | /* Allocate store for dep context. */ | |
440 | static deps_t | |
441 | alloc_deps_context (void) | |
442 | { | |
2e966e2a | 443 | return XNEW (class deps_desc); |
e1ab7874 | 444 | } |
445 | ||
446 | /* Allocate and initialize dep context. */ | |
447 | static deps_t | |
448 | create_deps_context (void) | |
449 | { | |
450 | deps_t dc = alloc_deps_context (); | |
451 | ||
d9ab2038 | 452 | init_deps (dc, false); |
e1ab7874 | 453 | return dc; |
454 | } | |
455 | ||
456 | /* Create a copy of FROM. */ | |
457 | static deps_t | |
458 | create_copy_of_deps_context (deps_t from) | |
459 | { | |
460 | deps_t to = alloc_deps_context (); | |
461 | ||
462 | copy_deps_context (to, from); | |
463 | return to; | |
464 | } | |
465 | ||
466 | /* Clean up internal data of DC. */ | |
467 | static void | |
468 | clear_deps_context (deps_t dc) | |
469 | { | |
470 | free_deps (dc); | |
471 | } | |
472 | ||
473 | /* Clear and free DC. */ | |
474 | static void | |
475 | delete_deps_context (deps_t dc) | |
476 | { | |
477 | clear_deps_context (dc); | |
478 | free (dc); | |
479 | } | |
480 | ||
481 | /* Clear and init DC. */ | |
482 | static void | |
483 | reset_deps_context (deps_t dc) | |
484 | { | |
485 | clear_deps_context (dc); | |
d9ab2038 | 486 | init_deps (dc, false); |
e1ab7874 | 487 | } |
488 | ||
48e1416a | 489 | /* This structure describes the dependence analysis hooks for advancing |
e1ab7874 | 490 | dependence context. */ |
491 | static struct sched_deps_info_def advance_deps_context_sched_deps_info = | |
492 | { | |
493 | NULL, | |
494 | ||
495 | NULL, /* start_insn */ | |
496 | NULL, /* finish_insn */ | |
497 | NULL, /* start_lhs */ | |
498 | NULL, /* finish_lhs */ | |
499 | NULL, /* start_rhs */ | |
500 | NULL, /* finish_rhs */ | |
501 | haifa_note_reg_set, | |
502 | haifa_note_reg_clobber, | |
503 | haifa_note_reg_use, | |
504 | NULL, /* note_mem_dep */ | |
505 | NULL, /* note_dep */ | |
506 | ||
507 | 0, 0, 0 | |
508 | }; | |
509 | ||
510 | /* Process INSN and add its impact on DC. */ | |
511 | void | |
512 | advance_deps_context (deps_t dc, insn_t insn) | |
513 | { | |
514 | sched_deps_info = &advance_deps_context_sched_deps_info; | |
2f3c9801 | 515 | deps_analyze_insn (dc, insn); |
e1ab7874 | 516 | } |
517 | \f | |
518 | ||
519 | /* Functions to work with DFA states. */ | |
520 | ||
521 | /* Allocate store for a DFA state. */ | |
522 | static state_t | |
523 | state_alloc (void) | |
524 | { | |
525 | return xmalloc (dfa_state_size); | |
526 | } | |
527 | ||
528 | /* Allocate and initialize DFA state. */ | |
529 | static state_t | |
530 | state_create (void) | |
531 | { | |
532 | state_t state = state_alloc (); | |
533 | ||
534 | state_reset (state); | |
535 | advance_state (state); | |
536 | return state; | |
537 | } | |
538 | ||
539 | /* Free DFA state. */ | |
540 | static void | |
541 | state_free (state_t state) | |
542 | { | |
543 | free (state); | |
544 | } | |
545 | ||
546 | /* Make a copy of FROM in TO. */ | |
547 | static void | |
548 | state_copy (state_t to, state_t from) | |
549 | { | |
550 | memcpy (to, from, dfa_state_size); | |
551 | } | |
552 | ||
553 | /* Create a copy of FROM. */ | |
554 | static state_t | |
555 | state_create_copy (state_t from) | |
556 | { | |
557 | state_t to = state_alloc (); | |
558 | ||
559 | state_copy (to, from); | |
560 | return to; | |
561 | } | |
562 | \f | |
563 | ||
564 | /* Functions to work with fences. */ | |
565 | ||
566 | /* Clear the fence. */ | |
567 | static void | |
568 | fence_clear (fence_t f) | |
569 | { | |
570 | state_t s = FENCE_STATE (f); | |
571 | deps_t dc = FENCE_DC (f); | |
572 | void *tc = FENCE_TC (f); | |
573 | ||
574 | ilist_clear (&FENCE_BNDS (f)); | |
575 | ||
576 | gcc_assert ((s != NULL && dc != NULL && tc != NULL) | |
577 | || (s == NULL && dc == NULL && tc == NULL)); | |
578 | ||
dd045aee | 579 | free (s); |
e1ab7874 | 580 | |
581 | if (dc != NULL) | |
582 | delete_deps_context (dc); | |
583 | ||
584 | if (tc != NULL) | |
585 | delete_target_context (tc); | |
f1f41a6c | 586 | vec_free (FENCE_EXECUTING_INSNS (f)); |
e1ab7874 | 587 | free (FENCE_READY_TICKS (f)); |
588 | FENCE_READY_TICKS (f) = NULL; | |
589 | } | |
590 | ||
591 | /* Init a list of fences with successors of OLD_FENCE. */ | |
592 | void | |
593 | init_fences (insn_t old_fence) | |
594 | { | |
595 | insn_t succ; | |
596 | succ_iterator si; | |
597 | bool first = true; | |
598 | int ready_ticks_size = get_max_uid () + 1; | |
48e1416a | 599 | |
600 | FOR_EACH_SUCC_1 (succ, si, old_fence, | |
e1ab7874 | 601 | SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS) |
602 | { | |
48e1416a | 603 | |
e1ab7874 | 604 | if (first) |
605 | first = false; | |
606 | else | |
607 | gcc_assert (flag_sel_sched_pipelining_outer_loops); | |
608 | ||
609 | flist_add (&fences, succ, | |
610 | state_create (), | |
611 | create_deps_context () /* dc */, | |
612 | create_target_context (true) /* tc */, | |
2f3c9801 | 613 | NULL /* last_scheduled_insn */, |
e1ab7874 | 614 | NULL, /* executing_insns */ |
615 | XCNEWVEC (int, ready_ticks_size), /* ready_ticks */ | |
616 | ready_ticks_size, | |
2f3c9801 | 617 | NULL /* sched_next */, |
48e1416a | 618 | 1 /* cycle */, 0 /* cycle_issued_insns */, |
abb9c563 | 619 | issue_rate, /* issue_more */ |
48e1416a | 620 | 1 /* starts_cycle_p */, 0 /* after_stall_p */); |
e1ab7874 | 621 | } |
622 | } | |
623 | ||
624 | /* Merges two fences (filling fields of fence F with resulting values) by | |
625 | following rules: 1) state, target context and last scheduled insn are | |
48e1416a | 626 | propagated from fallthrough edge if it is available; |
e1ab7874 | 627 | 2) deps context and cycle is propagated from more probable edge; |
48e1416a | 628 | 3) all other fields are set to corresponding constant values. |
e1ab7874 | 629 | |
48e1416a | 630 | INSN, STATE, DC, TC, LAST_SCHEDULED_INSN, EXECUTING_INSNS, |
abb9c563 | 631 | READY_TICKS, READY_TICKS_SIZE, SCHED_NEXT, CYCLE, ISSUE_MORE |
632 | and AFTER_STALL_P are the corresponding fields of the second fence. */ | |
e1ab7874 | 633 | static void |
634 | merge_fences (fence_t f, insn_t insn, | |
48e1416a | 635 | state_t state, deps_t dc, void *tc, |
2f3c9801 | 636 | rtx_insn *last_scheduled_insn, |
637 | vec<rtx_insn *, va_gc> *executing_insns, | |
e1ab7874 | 638 | int *ready_ticks, int ready_ticks_size, |
abb9c563 | 639 | rtx sched_next, int cycle, int issue_more, bool after_stall_p) |
e1ab7874 | 640 | { |
641 | insn_t last_scheduled_insn_old = FENCE_LAST_SCHEDULED_INSN (f); | |
642 | ||
643 | gcc_assert (sel_bb_head_p (FENCE_INSN (f)) | |
644 | && !sched_next && !FENCE_SCHED_NEXT (f)); | |
645 | ||
48e1416a | 646 | /* Check if we can decide which path fences came. |
e1ab7874 | 647 | If we can't (or don't want to) - reset all. */ |
648 | if (last_scheduled_insn == NULL | |
649 | || last_scheduled_insn_old == NULL | |
48e1416a | 650 | /* This is a case when INSN is reachable on several paths from |
651 | one insn (this can happen when pipelining of outer loops is on and | |
652 | there are two edges: one going around of inner loop and the other - | |
e1ab7874 | 653 | right through it; in such case just reset everything). */ |
654 | || last_scheduled_insn == last_scheduled_insn_old) | |
655 | { | |
656 | state_reset (FENCE_STATE (f)); | |
657 | state_free (state); | |
48e1416a | 658 | |
e1ab7874 | 659 | reset_deps_context (FENCE_DC (f)); |
660 | delete_deps_context (dc); | |
48e1416a | 661 | |
e1ab7874 | 662 | reset_target_context (FENCE_TC (f), true); |
663 | delete_target_context (tc); | |
664 | ||
665 | if (cycle > FENCE_CYCLE (f)) | |
666 | FENCE_CYCLE (f) = cycle; | |
667 | ||
668 | FENCE_LAST_SCHEDULED_INSN (f) = NULL; | |
abb9c563 | 669 | FENCE_ISSUE_MORE (f) = issue_rate; |
f1f41a6c | 670 | vec_free (executing_insns); |
e1ab7874 | 671 | free (ready_ticks); |
672 | if (FENCE_EXECUTING_INSNS (f)) | |
f1f41a6c | 673 | FENCE_EXECUTING_INSNS (f)->block_remove (0, |
674 | FENCE_EXECUTING_INSNS (f)->length ()); | |
e1ab7874 | 675 | if (FENCE_READY_TICKS (f)) |
676 | memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f)); | |
677 | } | |
678 | else | |
679 | { | |
680 | edge edge_old = NULL, edge_new = NULL; | |
681 | edge candidate; | |
682 | succ_iterator si; | |
683 | insn_t succ; | |
48e1416a | 684 | |
e1ab7874 | 685 | /* Find fallthrough edge. */ |
686 | gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb); | |
7f58c05e | 687 | candidate = find_fallthru_edge_from (BLOCK_FOR_INSN (insn)->prev_bb); |
e1ab7874 | 688 | |
689 | if (!candidate | |
690 | || (candidate->src != BLOCK_FOR_INSN (last_scheduled_insn) | |
691 | && candidate->src != BLOCK_FOR_INSN (last_scheduled_insn_old))) | |
692 | { | |
693 | /* No fallthrough edge leading to basic block of INSN. */ | |
694 | state_reset (FENCE_STATE (f)); | |
695 | state_free (state); | |
48e1416a | 696 | |
e1ab7874 | 697 | reset_target_context (FENCE_TC (f), true); |
698 | delete_target_context (tc); | |
48e1416a | 699 | |
e1ab7874 | 700 | FENCE_LAST_SCHEDULED_INSN (f) = NULL; |
abb9c563 | 701 | FENCE_ISSUE_MORE (f) = issue_rate; |
e1ab7874 | 702 | } |
703 | else | |
704 | if (candidate->src == BLOCK_FOR_INSN (last_scheduled_insn)) | |
705 | { | |
e1ab7874 | 706 | state_free (FENCE_STATE (f)); |
707 | FENCE_STATE (f) = state; | |
708 | ||
709 | delete_target_context (FENCE_TC (f)); | |
710 | FENCE_TC (f) = tc; | |
711 | ||
712 | FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn; | |
abb9c563 | 713 | FENCE_ISSUE_MORE (f) = issue_more; |
e1ab7874 | 714 | } |
715 | else | |
716 | { | |
717 | /* Leave STATE, TC and LAST_SCHEDULED_INSN fields untouched. */ | |
718 | state_free (state); | |
719 | delete_target_context (tc); | |
720 | ||
721 | gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb | |
722 | != BLOCK_FOR_INSN (last_scheduled_insn)); | |
723 | } | |
724 | ||
725 | /* Find edge of first predecessor (last_scheduled_insn_old->insn). */ | |
726 | FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn_old, | |
727 | SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS) | |
728 | { | |
729 | if (succ == insn) | |
730 | { | |
731 | /* No same successor allowed from several edges. */ | |
732 | gcc_assert (!edge_old); | |
733 | edge_old = si.e1; | |
734 | } | |
735 | } | |
736 | /* Find edge of second predecessor (last_scheduled_insn->insn). */ | |
737 | FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn, | |
738 | SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS) | |
739 | { | |
740 | if (succ == insn) | |
741 | { | |
742 | /* No same successor allowed from several edges. */ | |
743 | gcc_assert (!edge_new); | |
744 | edge_new = si.e1; | |
745 | } | |
746 | } | |
747 | ||
748 | /* Check if we can choose most probable predecessor. */ | |
749 | if (edge_old == NULL || edge_new == NULL) | |
750 | { | |
751 | reset_deps_context (FENCE_DC (f)); | |
752 | delete_deps_context (dc); | |
f1f41a6c | 753 | vec_free (executing_insns); |
e1ab7874 | 754 | free (ready_ticks); |
48e1416a | 755 | |
e1ab7874 | 756 | FENCE_CYCLE (f) = MAX (FENCE_CYCLE (f), cycle); |
757 | if (FENCE_EXECUTING_INSNS (f)) | |
f1f41a6c | 758 | FENCE_EXECUTING_INSNS (f)->block_remove (0, |
759 | FENCE_EXECUTING_INSNS (f)->length ()); | |
e1ab7874 | 760 | if (FENCE_READY_TICKS (f)) |
761 | memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f)); | |
762 | } | |
763 | else | |
764 | if (edge_new->probability > edge_old->probability) | |
765 | { | |
766 | delete_deps_context (FENCE_DC (f)); | |
767 | FENCE_DC (f) = dc; | |
f1f41a6c | 768 | vec_free (FENCE_EXECUTING_INSNS (f)); |
e1ab7874 | 769 | FENCE_EXECUTING_INSNS (f) = executing_insns; |
770 | free (FENCE_READY_TICKS (f)); | |
771 | FENCE_READY_TICKS (f) = ready_ticks; | |
772 | FENCE_READY_TICKS_SIZE (f) = ready_ticks_size; | |
773 | FENCE_CYCLE (f) = cycle; | |
774 | } | |
775 | else | |
776 | { | |
777 | /* Leave DC and CYCLE untouched. */ | |
778 | delete_deps_context (dc); | |
f1f41a6c | 779 | vec_free (executing_insns); |
e1ab7874 | 780 | free (ready_ticks); |
781 | } | |
782 | } | |
783 | ||
784 | /* Fill remaining invariant fields. */ | |
785 | if (after_stall_p) | |
786 | FENCE_AFTER_STALL_P (f) = 1; | |
787 | ||
788 | FENCE_ISSUED_INSNS (f) = 0; | |
789 | FENCE_STARTS_CYCLE_P (f) = 1; | |
790 | FENCE_SCHED_NEXT (f) = NULL; | |
791 | } | |
792 | ||
48e1416a | 793 | /* Add a new fence to NEW_FENCES list, initializing it from all |
e1ab7874 | 794 | other parameters. */ |
795 | static void | |
796 | add_to_fences (flist_tail_t new_fences, insn_t insn, | |
2f3c9801 | 797 | state_t state, deps_t dc, void *tc, |
798 | rtx_insn *last_scheduled_insn, | |
799 | vec<rtx_insn *, va_gc> *executing_insns, int *ready_ticks, | |
800 | int ready_ticks_size, rtx_insn *sched_next, int cycle, | |
abb9c563 | 801 | int cycle_issued_insns, int issue_rate, |
802 | bool starts_cycle_p, bool after_stall_p) | |
e1ab7874 | 803 | { |
804 | fence_t f = flist_lookup (FLIST_TAIL_HEAD (new_fences), insn); | |
805 | ||
806 | if (! f) | |
807 | { | |
808 | flist_add (FLIST_TAIL_TAILP (new_fences), insn, state, dc, tc, | |
48e1416a | 809 | last_scheduled_insn, executing_insns, ready_ticks, |
e1ab7874 | 810 | ready_ticks_size, sched_next, cycle, cycle_issued_insns, |
abb9c563 | 811 | issue_rate, starts_cycle_p, after_stall_p); |
e1ab7874 | 812 | |
813 | FLIST_TAIL_TAILP (new_fences) | |
814 | = &FLIST_NEXT (*FLIST_TAIL_TAILP (new_fences)); | |
815 | } | |
816 | else | |
817 | { | |
48e1416a | 818 | merge_fences (f, insn, state, dc, tc, last_scheduled_insn, |
819 | executing_insns, ready_ticks, ready_ticks_size, | |
abb9c563 | 820 | sched_next, cycle, issue_rate, after_stall_p); |
e1ab7874 | 821 | } |
822 | } | |
823 | ||
824 | /* Move the first fence in the OLD_FENCES list to NEW_FENCES. */ | |
825 | void | |
826 | move_fence_to_fences (flist_t old_fences, flist_tail_t new_fences) | |
827 | { | |
828 | fence_t f, old; | |
829 | flist_t *tailp = FLIST_TAIL_TAILP (new_fences); | |
830 | ||
831 | old = FLIST_FENCE (old_fences); | |
48e1416a | 832 | f = flist_lookup (FLIST_TAIL_HEAD (new_fences), |
e1ab7874 | 833 | FENCE_INSN (FLIST_FENCE (old_fences))); |
834 | if (f) | |
835 | { | |
836 | merge_fences (f, old->insn, old->state, old->dc, old->tc, | |
837 | old->last_scheduled_insn, old->executing_insns, | |
838 | old->ready_ticks, old->ready_ticks_size, | |
abb9c563 | 839 | old->sched_next, old->cycle, old->issue_more, |
e1ab7874 | 840 | old->after_stall_p); |
841 | } | |
842 | else | |
843 | { | |
844 | _list_add (tailp); | |
845 | FLIST_TAIL_TAILP (new_fences) = &FLIST_NEXT (*tailp); | |
846 | *FLIST_FENCE (*tailp) = *old; | |
847 | init_fence_for_scheduling (FLIST_FENCE (*tailp)); | |
848 | } | |
849 | FENCE_INSN (old) = NULL; | |
850 | } | |
851 | ||
48e1416a | 852 | /* Add a new fence to NEW_FENCES list and initialize most of its data |
e1ab7874 | 853 | as a clean one. */ |
854 | void | |
855 | add_clean_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence) | |
856 | { | |
857 | int ready_ticks_size = get_max_uid () + 1; | |
48e1416a | 858 | |
e1ab7874 | 859 | add_to_fences (new_fences, |
860 | succ, state_create (), create_deps_context (), | |
861 | create_target_context (true), | |
2f3c9801 | 862 | NULL, NULL, |
e1ab7874 | 863 | XCNEWVEC (int, ready_ticks_size), ready_ticks_size, |
2f3c9801 | 864 | NULL, FENCE_CYCLE (fence) + 1, |
abb9c563 | 865 | 0, issue_rate, 1, FENCE_AFTER_STALL_P (fence)); |
e1ab7874 | 866 | } |
867 | ||
48e1416a | 868 | /* Add a new fence to NEW_FENCES list and initialize all of its data |
e1ab7874 | 869 | from FENCE and SUCC. */ |
870 | void | |
871 | add_dirty_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence) | |
872 | { | |
48e1416a | 873 | int * new_ready_ticks |
e1ab7874 | 874 | = XNEWVEC (int, FENCE_READY_TICKS_SIZE (fence)); |
48e1416a | 875 | |
e1ab7874 | 876 | memcpy (new_ready_ticks, FENCE_READY_TICKS (fence), |
877 | FENCE_READY_TICKS_SIZE (fence) * sizeof (int)); | |
878 | add_to_fences (new_fences, | |
879 | succ, state_create_copy (FENCE_STATE (fence)), | |
880 | create_copy_of_deps_context (FENCE_DC (fence)), | |
881 | create_copy_of_target_context (FENCE_TC (fence)), | |
48e1416a | 882 | FENCE_LAST_SCHEDULED_INSN (fence), |
f1f41a6c | 883 | vec_safe_copy (FENCE_EXECUTING_INSNS (fence)), |
e1ab7874 | 884 | new_ready_ticks, |
885 | FENCE_READY_TICKS_SIZE (fence), | |
886 | FENCE_SCHED_NEXT (fence), | |
887 | FENCE_CYCLE (fence), | |
888 | FENCE_ISSUED_INSNS (fence), | |
abb9c563 | 889 | FENCE_ISSUE_MORE (fence), |
e1ab7874 | 890 | FENCE_STARTS_CYCLE_P (fence), |
891 | FENCE_AFTER_STALL_P (fence)); | |
892 | } | |
893 | \f | |
894 | ||
895 | /* Functions to work with regset and nop pools. */ | |
896 | ||
897 | /* Returns the new regset from pool. It might have some of the bits set | |
898 | from the previous usage. */ | |
899 | regset | |
900 | get_regset_from_pool (void) | |
901 | { | |
902 | regset rs; | |
903 | ||
904 | if (regset_pool.n != 0) | |
905 | rs = regset_pool.v[--regset_pool.n]; | |
906 | else | |
907 | /* We need to create the regset. */ | |
908 | { | |
909 | rs = ALLOC_REG_SET (®_obstack); | |
910 | ||
911 | if (regset_pool.nn == regset_pool.ss) | |
912 | regset_pool.vv = XRESIZEVEC (regset, regset_pool.vv, | |
913 | (regset_pool.ss = 2 * regset_pool.ss + 1)); | |
914 | regset_pool.vv[regset_pool.nn++] = rs; | |
915 | } | |
916 | ||
917 | regset_pool.diff++; | |
918 | ||
919 | return rs; | |
920 | } | |
921 | ||
922 | /* Same as above, but returns the empty regset. */ | |
923 | regset | |
924 | get_clear_regset_from_pool (void) | |
925 | { | |
926 | regset rs = get_regset_from_pool (); | |
927 | ||
928 | CLEAR_REG_SET (rs); | |
929 | return rs; | |
930 | } | |
931 | ||
932 | /* Return regset RS to the pool for future use. */ | |
933 | void | |
934 | return_regset_to_pool (regset rs) | |
935 | { | |
bc9cb5ed | 936 | gcc_assert (rs); |
e1ab7874 | 937 | regset_pool.diff--; |
938 | ||
939 | if (regset_pool.n == regset_pool.s) | |
940 | regset_pool.v = XRESIZEVEC (regset, regset_pool.v, | |
941 | (regset_pool.s = 2 * regset_pool.s + 1)); | |
942 | regset_pool.v[regset_pool.n++] = rs; | |
943 | } | |
944 | ||
945 | /* This is used as a qsort callback for sorting regset pool stacks. | |
946 | X and XX are addresses of two regsets. They are never equal. */ | |
947 | static int | |
948 | cmp_v_in_regset_pool (const void *x, const void *xx) | |
949 | { | |
c72f63ac | 950 | uintptr_t r1 = (uintptr_t) *((const regset *) x); |
951 | uintptr_t r2 = (uintptr_t) *((const regset *) xx); | |
952 | if (r1 > r2) | |
953 | return 1; | |
954 | else if (r1 < r2) | |
955 | return -1; | |
956 | gcc_unreachable (); | |
e1ab7874 | 957 | } |
958 | ||
382ecba7 | 959 | /* Free the regset pool possibly checking for memory leaks. */ |
e1ab7874 | 960 | void |
961 | free_regset_pool (void) | |
962 | { | |
382ecba7 | 963 | if (flag_checking) |
964 | { | |
965 | regset *v = regset_pool.v; | |
966 | int i = 0; | |
967 | int n = regset_pool.n; | |
48e1416a | 968 | |
382ecba7 | 969 | regset *vv = regset_pool.vv; |
970 | int ii = 0; | |
971 | int nn = regset_pool.nn; | |
48e1416a | 972 | |
382ecba7 | 973 | int diff = 0; |
48e1416a | 974 | |
382ecba7 | 975 | gcc_assert (n <= nn); |
48e1416a | 976 | |
382ecba7 | 977 | /* Sort both vectors so it will be possible to compare them. */ |
978 | qsort (v, n, sizeof (*v), cmp_v_in_regset_pool); | |
979 | qsort (vv, nn, sizeof (*vv), cmp_v_in_regset_pool); | |
48e1416a | 980 | |
382ecba7 | 981 | while (ii < nn) |
982 | { | |
983 | if (v[i] == vv[ii]) | |
984 | i++; | |
985 | else | |
986 | /* VV[II] was lost. */ | |
987 | diff++; | |
48e1416a | 988 | |
382ecba7 | 989 | ii++; |
990 | } | |
48e1416a | 991 | |
382ecba7 | 992 | gcc_assert (diff == regset_pool.diff); |
993 | } | |
48e1416a | 994 | |
e1ab7874 | 995 | /* If not true - we have a memory leak. */ |
996 | gcc_assert (regset_pool.diff == 0); | |
48e1416a | 997 | |
e1ab7874 | 998 | while (regset_pool.n) |
999 | { | |
1000 | --regset_pool.n; | |
1001 | FREE_REG_SET (regset_pool.v[regset_pool.n]); | |
1002 | } | |
1003 | ||
1004 | free (regset_pool.v); | |
1005 | regset_pool.v = NULL; | |
1006 | regset_pool.s = 0; | |
48e1416a | 1007 | |
e1ab7874 | 1008 | free (regset_pool.vv); |
1009 | regset_pool.vv = NULL; | |
1010 | regset_pool.nn = 0; | |
1011 | regset_pool.ss = 0; | |
1012 | ||
1013 | regset_pool.diff = 0; | |
1014 | } | |
1015 | \f | |
1016 | ||
48e1416a | 1017 | /* Functions to work with nop pools. NOP insns are used as temporary |
1018 | placeholders of the insns being scheduled to allow correct update of | |
e1ab7874 | 1019 | the data sets. When update is finished, NOPs are deleted. */ |
1020 | ||
1021 | /* A vinsn that is used to represent a nop. This vinsn is shared among all | |
1022 | nops sel-sched generates. */ | |
1023 | static vinsn_t nop_vinsn = NULL; | |
1024 | ||
1025 | /* Emit a nop before INSN, taking it from pool. */ | |
1026 | insn_t | |
1027 | get_nop_from_pool (insn_t insn) | |
1028 | { | |
2f3c9801 | 1029 | rtx nop_pat; |
e1ab7874 | 1030 | insn_t nop; |
1031 | bool old_p = nop_pool.n != 0; | |
1032 | int flags; | |
1033 | ||
1034 | if (old_p) | |
2f3c9801 | 1035 | nop_pat = nop_pool.v[--nop_pool.n]; |
e1ab7874 | 1036 | else |
2f3c9801 | 1037 | nop_pat = nop_pattern; |
e1ab7874 | 1038 | |
2f3c9801 | 1039 | nop = emit_insn_before (nop_pat, insn); |
e1ab7874 | 1040 | |
1041 | if (old_p) | |
1042 | flags = INSN_INIT_TODO_SSID; | |
1043 | else | |
1044 | flags = INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID; | |
1045 | ||
1046 | set_insn_init (INSN_EXPR (insn), nop_vinsn, INSN_SEQNO (insn)); | |
1047 | sel_init_new_insn (nop, flags); | |
1048 | ||
1049 | return nop; | |
1050 | } | |
1051 | ||
1052 | /* Remove NOP from the instruction stream and return it to the pool. */ | |
1053 | void | |
9845d120 | 1054 | return_nop_to_pool (insn_t nop, bool full_tidying) |
e1ab7874 | 1055 | { |
1056 | gcc_assert (INSN_IN_STREAM_P (nop)); | |
9845d120 | 1057 | sel_remove_insn (nop, false, full_tidying); |
e1ab7874 | 1058 | |
93ff53d3 | 1059 | /* We'll recycle this nop. */ |
dd1286fb | 1060 | nop->set_undeleted (); |
93ff53d3 | 1061 | |
e1ab7874 | 1062 | if (nop_pool.n == nop_pool.s) |
2f3c9801 | 1063 | nop_pool.v = XRESIZEVEC (rtx_insn *, nop_pool.v, |
e1ab7874 | 1064 | (nop_pool.s = 2 * nop_pool.s + 1)); |
1065 | nop_pool.v[nop_pool.n++] = nop; | |
1066 | } | |
1067 | ||
1068 | /* Free the nop pool. */ | |
1069 | void | |
1070 | free_nop_pool (void) | |
1071 | { | |
1072 | nop_pool.n = 0; | |
1073 | nop_pool.s = 0; | |
1074 | free (nop_pool.v); | |
1075 | nop_pool.v = NULL; | |
1076 | } | |
1077 | \f | |
1078 | ||
48e1416a | 1079 | /* Skip unspec to support ia64 speculation. Called from rtx_equal_p_cb. |
e1ab7874 | 1080 | The callback is given two rtxes XX and YY and writes the new rtxes |
1081 | to NX and NY in case some needs to be skipped. */ | |
1082 | static int | |
1083 | skip_unspecs_callback (const_rtx *xx, const_rtx *yy, rtx *nx, rtx* ny) | |
1084 | { | |
1085 | const_rtx x = *xx; | |
1086 | const_rtx y = *yy; | |
48e1416a | 1087 | |
e1ab7874 | 1088 | if (GET_CODE (x) == UNSPEC |
1089 | && (targetm.sched.skip_rtx_p == NULL | |
1090 | || targetm.sched.skip_rtx_p (x))) | |
1091 | { | |
1092 | *nx = XVECEXP (x, 0, 0); | |
1093 | *ny = CONST_CAST_RTX (y); | |
1094 | return 1; | |
1095 | } | |
48e1416a | 1096 | |
e1ab7874 | 1097 | if (GET_CODE (y) == UNSPEC |
1098 | && (targetm.sched.skip_rtx_p == NULL | |
1099 | || targetm.sched.skip_rtx_p (y))) | |
1100 | { | |
1101 | *nx = CONST_CAST_RTX (x); | |
1102 | *ny = XVECEXP (y, 0, 0); | |
1103 | return 1; | |
1104 | } | |
48e1416a | 1105 | |
e1ab7874 | 1106 | return 0; |
1107 | } | |
1108 | ||
48e1416a | 1109 | /* Callback, called from hash_rtx_cb. Helps to hash UNSPEC rtx X in a correct way |
e1ab7874 | 1110 | to support ia64 speculation. When changes are needed, new rtx X and new mode |
1111 | NMODE are written, and the callback returns true. */ | |
1112 | static int | |
3754d046 | 1113 | hash_with_unspec_callback (const_rtx x, machine_mode mode ATTRIBUTE_UNUSED, |
1114 | rtx *nx, machine_mode* nmode) | |
e1ab7874 | 1115 | { |
48e1416a | 1116 | if (GET_CODE (x) == UNSPEC |
e1ab7874 | 1117 | && targetm.sched.skip_rtx_p |
1118 | && targetm.sched.skip_rtx_p (x)) | |
1119 | { | |
1120 | *nx = XVECEXP (x, 0 ,0); | |
8458f4ca | 1121 | *nmode = VOIDmode; |
e1ab7874 | 1122 | return 1; |
1123 | } | |
48e1416a | 1124 | |
e1ab7874 | 1125 | return 0; |
1126 | } | |
1127 | ||
1128 | /* Returns LHS and RHS are ok to be scheduled separately. */ | |
1129 | static bool | |
1130 | lhs_and_rhs_separable_p (rtx lhs, rtx rhs) | |
1131 | { | |
1132 | if (lhs == NULL || rhs == NULL) | |
1133 | return false; | |
1134 | ||
e913b5cd | 1135 | /* Do not schedule constants as rhs: no point to use reg, if const |
1136 | can be used. Moreover, scheduling const as rhs may lead to mode | |
1137 | mismatch cause consts don't have modes but they could be merged | |
1138 | from branches where the same const used in different modes. */ | |
e1ab7874 | 1139 | if (CONSTANT_P (rhs)) |
1140 | return false; | |
1141 | ||
1142 | /* ??? Do not rename predicate registers to avoid ICEs in bundling. */ | |
1143 | if (COMPARISON_P (rhs)) | |
1144 | return false; | |
1145 | ||
1146 | /* Do not allow single REG to be an rhs. */ | |
1147 | if (REG_P (rhs)) | |
1148 | return false; | |
1149 | ||
48e1416a | 1150 | /* See comment at find_used_regs_1 (*1) for explanation of this |
e1ab7874 | 1151 | restriction. */ |
1152 | /* FIXME: remove this later. */ | |
1153 | if (MEM_P (lhs)) | |
1154 | return false; | |
1155 | ||
1156 | /* This will filter all tricky things like ZERO_EXTRACT etc. | |
1157 | For now we don't handle it. */ | |
1158 | if (!REG_P (lhs) && !MEM_P (lhs)) | |
1159 | return false; | |
1160 | ||
1161 | return true; | |
1162 | } | |
1163 | ||
48e1416a | 1164 | /* Initialize vinsn VI for INSN. Only for use from vinsn_create (). When |
1165 | FORCE_UNIQUE_P is true, the resulting vinsn will not be clonable. This is | |
e1ab7874 | 1166 | used e.g. for insns from recovery blocks. */ |
1167 | static void | |
1168 | vinsn_init (vinsn_t vi, insn_t insn, bool force_unique_p) | |
1169 | { | |
1170 | hash_rtx_callback_function hrcf; | |
1171 | int insn_class; | |
1172 | ||
69c5a18c | 1173 | VINSN_INSN_RTX (vi) = insn; |
e1ab7874 | 1174 | VINSN_COUNT (vi) = 0; |
1175 | vi->cost = -1; | |
48e1416a | 1176 | |
bc9cb5ed | 1177 | if (INSN_NOP_P (insn)) |
1178 | return; | |
1179 | ||
e1ab7874 | 1180 | if (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL) |
1181 | init_id_from_df (VINSN_ID (vi), insn, force_unique_p); | |
1182 | else | |
1183 | deps_init_id (VINSN_ID (vi), insn, force_unique_p); | |
48e1416a | 1184 | |
e1ab7874 | 1185 | /* Hash vinsn depending on whether it is separable or not. */ |
1186 | hrcf = targetm.sched.skip_rtx_p ? hash_with_unspec_callback : NULL; | |
1187 | if (VINSN_SEPARABLE_P (vi)) | |
1188 | { | |
1189 | rtx rhs = VINSN_RHS (vi); | |
1190 | ||
1191 | VINSN_HASH (vi) = hash_rtx_cb (rhs, GET_MODE (rhs), | |
1192 | NULL, NULL, false, hrcf); | |
1193 | VINSN_HASH_RTX (vi) = hash_rtx_cb (VINSN_PATTERN (vi), | |
1194 | VOIDmode, NULL, NULL, | |
1195 | false, hrcf); | |
1196 | } | |
1197 | else | |
1198 | { | |
1199 | VINSN_HASH (vi) = hash_rtx_cb (VINSN_PATTERN (vi), VOIDmode, | |
1200 | NULL, NULL, false, hrcf); | |
1201 | VINSN_HASH_RTX (vi) = VINSN_HASH (vi); | |
1202 | } | |
48e1416a | 1203 | |
e1ab7874 | 1204 | insn_class = haifa_classify_insn (insn); |
1205 | if (insn_class >= 2 | |
1206 | && (!targetm.sched.get_insn_spec_ds | |
1207 | || ((targetm.sched.get_insn_spec_ds (insn) & BEGIN_CONTROL) | |
1208 | == 0))) | |
1209 | VINSN_MAY_TRAP_P (vi) = true; | |
1210 | else | |
1211 | VINSN_MAY_TRAP_P (vi) = false; | |
1212 | } | |
1213 | ||
1214 | /* Indicate that VI has become the part of an rtx object. */ | |
1215 | void | |
1216 | vinsn_attach (vinsn_t vi) | |
1217 | { | |
1218 | /* Assert that VI is not pending for deletion. */ | |
1219 | gcc_assert (VINSN_INSN_RTX (vi)); | |
1220 | ||
1221 | VINSN_COUNT (vi)++; | |
1222 | } | |
1223 | ||
48e1416a | 1224 | /* Create and init VI from the INSN. Use UNIQUE_P for determining the correct |
e1ab7874 | 1225 | VINSN_TYPE (VI). */ |
1226 | static vinsn_t | |
1227 | vinsn_create (insn_t insn, bool force_unique_p) | |
1228 | { | |
1229 | vinsn_t vi = XCNEW (struct vinsn_def); | |
1230 | ||
1231 | vinsn_init (vi, insn, force_unique_p); | |
1232 | return vi; | |
1233 | } | |
1234 | ||
1235 | /* Return a copy of VI. When REATTACH_P is true, detach VI and attach | |
1236 | the copy. */ | |
48e1416a | 1237 | vinsn_t |
e1ab7874 | 1238 | vinsn_copy (vinsn_t vi, bool reattach_p) |
1239 | { | |
04d073df | 1240 | rtx_insn *copy; |
e1ab7874 | 1241 | bool unique = VINSN_UNIQUE_P (vi); |
1242 | vinsn_t new_vi; | |
48e1416a | 1243 | |
e1ab7874 | 1244 | copy = create_copy_of_insn_rtx (VINSN_INSN_RTX (vi)); |
1245 | new_vi = create_vinsn_from_insn_rtx (copy, unique); | |
1246 | if (reattach_p) | |
1247 | { | |
1248 | vinsn_detach (vi); | |
1249 | vinsn_attach (new_vi); | |
1250 | } | |
1251 | ||
1252 | return new_vi; | |
1253 | } | |
1254 | ||
1255 | /* Delete the VI vinsn and free its data. */ | |
1256 | static void | |
1257 | vinsn_delete (vinsn_t vi) | |
1258 | { | |
1259 | gcc_assert (VINSN_COUNT (vi) == 0); | |
1260 | ||
bc9cb5ed | 1261 | if (!INSN_NOP_P (VINSN_INSN_RTX (vi))) |
1262 | { | |
1263 | return_regset_to_pool (VINSN_REG_SETS (vi)); | |
1264 | return_regset_to_pool (VINSN_REG_USES (vi)); | |
1265 | return_regset_to_pool (VINSN_REG_CLOBBERS (vi)); | |
1266 | } | |
e1ab7874 | 1267 | |
1268 | free (vi); | |
1269 | } | |
1270 | ||
48e1416a | 1271 | /* Indicate that VI is no longer a part of some rtx object. |
e1ab7874 | 1272 | Remove VI if it is no longer needed. */ |
1273 | void | |
1274 | vinsn_detach (vinsn_t vi) | |
1275 | { | |
1276 | gcc_assert (VINSN_COUNT (vi) > 0); | |
1277 | ||
1278 | if (--VINSN_COUNT (vi) == 0) | |
1279 | vinsn_delete (vi); | |
1280 | } | |
1281 | ||
1282 | /* Returns TRUE if VI is a branch. */ | |
1283 | bool | |
1284 | vinsn_cond_branch_p (vinsn_t vi) | |
1285 | { | |
1286 | insn_t insn; | |
1287 | ||
1288 | if (!VINSN_UNIQUE_P (vi)) | |
1289 | return false; | |
1290 | ||
1291 | insn = VINSN_INSN_RTX (vi); | |
1292 | if (BB_END (BLOCK_FOR_INSN (insn)) != insn) | |
1293 | return false; | |
1294 | ||
1295 | return control_flow_insn_p (insn); | |
1296 | } | |
1297 | ||
1298 | /* Return latency of INSN. */ | |
1299 | static int | |
ed3e6e5d | 1300 | sel_insn_rtx_cost (rtx_insn *insn) |
e1ab7874 | 1301 | { |
1302 | int cost; | |
1303 | ||
1304 | /* A USE insn, or something else we don't need to | |
1305 | understand. We can't pass these directly to | |
1306 | result_ready_cost or insn_default_latency because it will | |
1307 | trigger a fatal error for unrecognizable insns. */ | |
1308 | if (recog_memoized (insn) < 0) | |
1309 | cost = 0; | |
1310 | else | |
1311 | { | |
1312 | cost = insn_default_latency (insn); | |
1313 | ||
1314 | if (cost < 0) | |
1315 | cost = 0; | |
1316 | } | |
1317 | ||
1318 | return cost; | |
1319 | } | |
1320 | ||
1321 | /* Return the cost of the VI. | |
5e53acc3 | 1322 | !!! FIXME: Unify with haifa-sched.c: insn_sched_cost (). */ |
e1ab7874 | 1323 | int |
1324 | sel_vinsn_cost (vinsn_t vi) | |
1325 | { | |
1326 | int cost = vi->cost; | |
1327 | ||
1328 | if (cost < 0) | |
1329 | { | |
1330 | cost = sel_insn_rtx_cost (VINSN_INSN_RTX (vi)); | |
1331 | vi->cost = cost; | |
1332 | } | |
1333 | ||
1334 | return cost; | |
1335 | } | |
1336 | \f | |
1337 | ||
1338 | /* Functions for insn emitting. */ | |
1339 | ||
1340 | /* Emit new insn after AFTER based on PATTERN and initialize its data from | |
1341 | EXPR and SEQNO. */ | |
1342 | insn_t | |
1343 | sel_gen_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno, insn_t after) | |
1344 | { | |
1345 | insn_t new_insn; | |
1346 | ||
1347 | gcc_assert (EXPR_TARGET_AVAILABLE (expr) == true); | |
1348 | ||
1349 | new_insn = emit_insn_after (pattern, after); | |
1350 | set_insn_init (expr, NULL, seqno); | |
1351 | sel_init_new_insn (new_insn, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID); | |
1352 | ||
1353 | return new_insn; | |
1354 | } | |
1355 | ||
1356 | /* Force newly generated vinsns to be unique. */ | |
1357 | static bool init_insn_force_unique_p = false; | |
1358 | ||
1359 | /* Emit new speculation recovery insn after AFTER based on PATTERN and | |
1360 | initialize its data from EXPR and SEQNO. */ | |
1361 | insn_t | |
1362 | sel_gen_recovery_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno, | |
1363 | insn_t after) | |
1364 | { | |
1365 | insn_t insn; | |
1366 | ||
1367 | gcc_assert (!init_insn_force_unique_p); | |
1368 | ||
1369 | init_insn_force_unique_p = true; | |
1370 | insn = sel_gen_insn_from_rtx_after (pattern, expr, seqno, after); | |
1371 | CANT_MOVE (insn) = 1; | |
1372 | init_insn_force_unique_p = false; | |
1373 | ||
1374 | return insn; | |
1375 | } | |
1376 | ||
1377 | /* Emit new insn after AFTER based on EXPR and SEQNO. If VINSN is not NULL, | |
48e1416a | 1378 | take it as a new vinsn instead of EXPR's vinsn. |
1379 | We simplify insns later, after scheduling region in | |
e1ab7874 | 1380 | simplify_changed_insns. */ |
1381 | insn_t | |
48e1416a | 1382 | sel_gen_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno, |
e1ab7874 | 1383 | insn_t after) |
1384 | { | |
1385 | expr_t emit_expr; | |
1386 | insn_t insn; | |
1387 | int flags; | |
48e1416a | 1388 | |
1389 | emit_expr = set_insn_init (expr, vinsn ? vinsn : EXPR_VINSN (expr), | |
e1ab7874 | 1390 | seqno); |
1391 | insn = EXPR_INSN_RTX (emit_expr); | |
2b7454f2 | 1392 | |
1393 | /* The insn may come from the transformation cache, which may hold already | |
1394 | deleted insns, so mark it as not deleted. */ | |
dd1286fb | 1395 | insn->set_undeleted (); |
2b7454f2 | 1396 | |
48e1416a | 1397 | add_insn_after (insn, after, BLOCK_FOR_INSN (insn)); |
e1ab7874 | 1398 | |
1399 | flags = INSN_INIT_TODO_SSID; | |
1400 | if (INSN_LUID (insn) == 0) | |
1401 | flags |= INSN_INIT_TODO_LUID; | |
1402 | sel_init_new_insn (insn, flags); | |
1403 | ||
1404 | return insn; | |
1405 | } | |
1406 | ||
1407 | /* Move insn from EXPR after AFTER. */ | |
1408 | insn_t | |
1409 | sel_move_insn (expr_t expr, int seqno, insn_t after) | |
1410 | { | |
1411 | insn_t insn = EXPR_INSN_RTX (expr); | |
1412 | basic_block bb = BLOCK_FOR_INSN (after); | |
1413 | insn_t next = NEXT_INSN (after); | |
1414 | ||
1415 | /* Assert that in move_op we disconnected this insn properly. */ | |
1416 | gcc_assert (EXPR_VINSN (INSN_EXPR (insn)) != NULL); | |
4a57a2e8 | 1417 | SET_PREV_INSN (insn) = after; |
1418 | SET_NEXT_INSN (insn) = next; | |
e1ab7874 | 1419 | |
4a57a2e8 | 1420 | SET_NEXT_INSN (after) = insn; |
1421 | SET_PREV_INSN (next) = insn; | |
e1ab7874 | 1422 | |
1423 | /* Update links from insn to bb and vice versa. */ | |
1424 | df_insn_change_bb (insn, bb); | |
1425 | if (BB_END (bb) == after) | |
26bb3cb2 | 1426 | BB_END (bb) = insn; |
48e1416a | 1427 | |
e1ab7874 | 1428 | prepare_insn_expr (insn, seqno); |
1429 | return insn; | |
1430 | } | |
1431 | ||
1432 | \f | |
1433 | /* Functions to work with right-hand sides. */ | |
1434 | ||
48e1416a | 1435 | /* Search for a hash value determined by UID/NEW_VINSN in a sorted vector |
e1ab7874 | 1436 | VECT and return true when found. Use NEW_VINSN for comparison only when |
48e1416a | 1437 | COMPARE_VINSNS is true. Write to INDP the index on which |
1438 | the search has stopped, such that inserting the new element at INDP will | |
e1ab7874 | 1439 | retain VECT's sort order. */ |
1440 | static bool | |
f1f41a6c | 1441 | find_in_history_vect_1 (vec<expr_history_def> vect, |
48e1416a | 1442 | unsigned uid, vinsn_t new_vinsn, |
e1ab7874 | 1443 | bool compare_vinsns, int *indp) |
1444 | { | |
1445 | expr_history_def *arr; | |
f1f41a6c | 1446 | int i, j, len = vect.length (); |
e1ab7874 | 1447 | |
1448 | if (len == 0) | |
1449 | { | |
1450 | *indp = 0; | |
1451 | return false; | |
1452 | } | |
1453 | ||
f1f41a6c | 1454 | arr = vect.address (); |
e1ab7874 | 1455 | i = 0, j = len - 1; |
1456 | ||
1457 | while (i <= j) | |
1458 | { | |
1459 | unsigned auid = arr[i].uid; | |
48e1416a | 1460 | vinsn_t avinsn = arr[i].new_expr_vinsn; |
e1ab7874 | 1461 | |
1462 | if (auid == uid | |
48e1416a | 1463 | /* When undoing transformation on a bookkeeping copy, the new vinsn |
1464 | may not be exactly equal to the one that is saved in the vector. | |
e1ab7874 | 1465 | This is because the insn whose copy we're checking was possibly |
1466 | substituted itself. */ | |
48e1416a | 1467 | && (! compare_vinsns |
e1ab7874 | 1468 | || vinsn_equal_p (avinsn, new_vinsn))) |
1469 | { | |
1470 | *indp = i; | |
1471 | return true; | |
1472 | } | |
1473 | else if (auid > uid) | |
1474 | break; | |
1475 | i++; | |
1476 | } | |
1477 | ||
1478 | *indp = i; | |
1479 | return false; | |
1480 | } | |
1481 | ||
48e1416a | 1482 | /* Search for a uid of INSN and NEW_VINSN in a sorted vector VECT. Return |
1483 | the position found or -1, if no such value is in vector. | |
e1ab7874 | 1484 | Search also for UIDs of insn's originators, if ORIGINATORS_P is true. */ |
1485 | int | |
f1f41a6c | 1486 | find_in_history_vect (vec<expr_history_def> vect, rtx insn, |
e1ab7874 | 1487 | vinsn_t new_vinsn, bool originators_p) |
1488 | { | |
1489 | int ind; | |
1490 | ||
48e1416a | 1491 | if (find_in_history_vect_1 (vect, INSN_UID (insn), new_vinsn, |
e1ab7874 | 1492 | false, &ind)) |
1493 | return ind; | |
1494 | ||
1495 | if (INSN_ORIGINATORS (insn) && originators_p) | |
1496 | { | |
1497 | unsigned uid; | |
1498 | bitmap_iterator bi; | |
1499 | ||
1500 | EXECUTE_IF_SET_IN_BITMAP (INSN_ORIGINATORS (insn), 0, uid, bi) | |
1501 | if (find_in_history_vect_1 (vect, uid, new_vinsn, false, &ind)) | |
1502 | return ind; | |
1503 | } | |
48e1416a | 1504 | |
e1ab7874 | 1505 | return -1; |
1506 | } | |
1507 | ||
48e1416a | 1508 | /* Insert new element in a sorted history vector pointed to by PVECT, |
1509 | if it is not there already. The element is searched using | |
e1ab7874 | 1510 | UID/NEW_EXPR_VINSN pair. TYPE, OLD_EXPR_VINSN and SPEC_DS save |
1511 | the history of a transformation. */ | |
1512 | void | |
f1f41a6c | 1513 | insert_in_history_vect (vec<expr_history_def> *pvect, |
e1ab7874 | 1514 | unsigned uid, enum local_trans_type type, |
48e1416a | 1515 | vinsn_t old_expr_vinsn, vinsn_t new_expr_vinsn, |
e1ab7874 | 1516 | ds_t spec_ds) |
1517 | { | |
f1f41a6c | 1518 | vec<expr_history_def> vect = *pvect; |
e1ab7874 | 1519 | expr_history_def temp; |
1520 | bool res; | |
1521 | int ind; | |
1522 | ||
1523 | res = find_in_history_vect_1 (vect, uid, new_expr_vinsn, true, &ind); | |
1524 | ||
1525 | if (res) | |
1526 | { | |
f1f41a6c | 1527 | expr_history_def *phist = &vect[ind]; |
e1ab7874 | 1528 | |
48e1416a | 1529 | /* It is possible that speculation types of expressions that were |
e1ab7874 | 1530 | propagated through different paths will be different here. In this |
1531 | case, merge the status to get the correct check later. */ | |
1532 | if (phist->spec_ds != spec_ds) | |
1533 | phist->spec_ds = ds_max_merge (phist->spec_ds, spec_ds); | |
1534 | return; | |
1535 | } | |
48e1416a | 1536 | |
e1ab7874 | 1537 | temp.uid = uid; |
1538 | temp.old_expr_vinsn = old_expr_vinsn; | |
48e1416a | 1539 | temp.new_expr_vinsn = new_expr_vinsn; |
e1ab7874 | 1540 | temp.spec_ds = spec_ds; |
1541 | temp.type = type; | |
1542 | ||
1543 | vinsn_attach (old_expr_vinsn); | |
1544 | vinsn_attach (new_expr_vinsn); | |
f1f41a6c | 1545 | vect.safe_insert (ind, temp); |
e1ab7874 | 1546 | *pvect = vect; |
1547 | } | |
1548 | ||
1549 | /* Free history vector PVECT. */ | |
1550 | static void | |
f1f41a6c | 1551 | free_history_vect (vec<expr_history_def> &pvect) |
e1ab7874 | 1552 | { |
1553 | unsigned i; | |
1554 | expr_history_def *phist; | |
1555 | ||
f1f41a6c | 1556 | if (! pvect.exists ()) |
e1ab7874 | 1557 | return; |
48e1416a | 1558 | |
f1f41a6c | 1559 | for (i = 0; pvect.iterate (i, &phist); i++) |
e1ab7874 | 1560 | { |
1561 | vinsn_detach (phist->old_expr_vinsn); | |
1562 | vinsn_detach (phist->new_expr_vinsn); | |
1563 | } | |
48e1416a | 1564 | |
f1f41a6c | 1565 | pvect.release (); |
e1ab7874 | 1566 | } |
1567 | ||
c53624fb | 1568 | /* Merge vector FROM to PVECT. */ |
1569 | static void | |
f1f41a6c | 1570 | merge_history_vect (vec<expr_history_def> *pvect, |
1571 | vec<expr_history_def> from) | |
c53624fb | 1572 | { |
1573 | expr_history_def *phist; | |
1574 | int i; | |
1575 | ||
1576 | /* We keep this vector sorted. */ | |
f1f41a6c | 1577 | for (i = 0; from.iterate (i, &phist); i++) |
c53624fb | 1578 | insert_in_history_vect (pvect, phist->uid, phist->type, |
1579 | phist->old_expr_vinsn, phist->new_expr_vinsn, | |
1580 | phist->spec_ds); | |
1581 | } | |
e1ab7874 | 1582 | |
1583 | /* Compare two vinsns as rhses if possible and as vinsns otherwise. */ | |
1584 | bool | |
1585 | vinsn_equal_p (vinsn_t x, vinsn_t y) | |
1586 | { | |
1587 | rtx_equal_p_callback_function repcf; | |
1588 | ||
1589 | if (x == y) | |
1590 | return true; | |
1591 | ||
1592 | if (VINSN_TYPE (x) != VINSN_TYPE (y)) | |
1593 | return false; | |
1594 | ||
1595 | if (VINSN_HASH (x) != VINSN_HASH (y)) | |
1596 | return false; | |
1597 | ||
1598 | repcf = targetm.sched.skip_rtx_p ? skip_unspecs_callback : NULL; | |
48e1416a | 1599 | if (VINSN_SEPARABLE_P (x)) |
e1ab7874 | 1600 | { |
1601 | /* Compare RHSes of VINSNs. */ | |
1602 | gcc_assert (VINSN_RHS (x)); | |
1603 | gcc_assert (VINSN_RHS (y)); | |
1604 | ||
1605 | return rtx_equal_p_cb (VINSN_RHS (x), VINSN_RHS (y), repcf); | |
1606 | } | |
1607 | ||
1608 | return rtx_equal_p_cb (VINSN_PATTERN (x), VINSN_PATTERN (y), repcf); | |
1609 | } | |
1610 | \f | |
1611 | ||
1612 | /* Functions for working with expressions. */ | |
1613 | ||
1614 | /* Initialize EXPR. */ | |
1615 | static void | |
1616 | init_expr (expr_t expr, vinsn_t vi, int spec, int use, int priority, | |
1617 | int sched_times, int orig_bb_index, ds_t spec_done_ds, | |
1618 | ds_t spec_to_check_ds, int orig_sched_cycle, | |
f1f41a6c | 1619 | vec<expr_history_def> history, |
1620 | signed char target_available, | |
e1ab7874 | 1621 | bool was_substituted, bool was_renamed, bool needs_spec_check_p, |
1622 | bool cant_move) | |
1623 | { | |
1624 | vinsn_attach (vi); | |
1625 | ||
1626 | EXPR_VINSN (expr) = vi; | |
1627 | EXPR_SPEC (expr) = spec; | |
1628 | EXPR_USEFULNESS (expr) = use; | |
1629 | EXPR_PRIORITY (expr) = priority; | |
1630 | EXPR_PRIORITY_ADJ (expr) = 0; | |
1631 | EXPR_SCHED_TIMES (expr) = sched_times; | |
1632 | EXPR_ORIG_BB_INDEX (expr) = orig_bb_index; | |
1633 | EXPR_ORIG_SCHED_CYCLE (expr) = orig_sched_cycle; | |
1634 | EXPR_SPEC_DONE_DS (expr) = spec_done_ds; | |
1635 | EXPR_SPEC_TO_CHECK_DS (expr) = spec_to_check_ds; | |
1636 | ||
f1f41a6c | 1637 | if (history.exists ()) |
e1ab7874 | 1638 | EXPR_HISTORY_OF_CHANGES (expr) = history; |
1639 | else | |
f1f41a6c | 1640 | EXPR_HISTORY_OF_CHANGES (expr).create (0); |
e1ab7874 | 1641 | |
1642 | EXPR_TARGET_AVAILABLE (expr) = target_available; | |
1643 | EXPR_WAS_SUBSTITUTED (expr) = was_substituted; | |
1644 | EXPR_WAS_RENAMED (expr) = was_renamed; | |
1645 | EXPR_NEEDS_SPEC_CHECK_P (expr) = needs_spec_check_p; | |
1646 | EXPR_CANT_MOVE (expr) = cant_move; | |
1647 | } | |
1648 | ||
1649 | /* Make a copy of the expr FROM into the expr TO. */ | |
1650 | void | |
1651 | copy_expr (expr_t to, expr_t from) | |
1652 | { | |
1e094109 | 1653 | vec<expr_history_def> temp = vNULL; |
e1ab7874 | 1654 | |
f1f41a6c | 1655 | if (EXPR_HISTORY_OF_CHANGES (from).exists ()) |
e1ab7874 | 1656 | { |
1657 | unsigned i; | |
1658 | expr_history_def *phist; | |
1659 | ||
f1f41a6c | 1660 | temp = EXPR_HISTORY_OF_CHANGES (from).copy (); |
48e1416a | 1661 | for (i = 0; |
f1f41a6c | 1662 | temp.iterate (i, &phist); |
e1ab7874 | 1663 | i++) |
1664 | { | |
1665 | vinsn_attach (phist->old_expr_vinsn); | |
1666 | vinsn_attach (phist->new_expr_vinsn); | |
1667 | } | |
1668 | } | |
1669 | ||
48e1416a | 1670 | init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from), |
e1ab7874 | 1671 | EXPR_USEFULNESS (from), EXPR_PRIORITY (from), |
1672 | EXPR_SCHED_TIMES (from), EXPR_ORIG_BB_INDEX (from), | |
48e1416a | 1673 | EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from), |
e1ab7874 | 1674 | EXPR_ORIG_SCHED_CYCLE (from), temp, |
48e1416a | 1675 | EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from), |
e1ab7874 | 1676 | EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from), |
1677 | EXPR_CANT_MOVE (from)); | |
1678 | } | |
1679 | ||
48e1416a | 1680 | /* Same, but the final expr will not ever be in av sets, so don't copy |
e1ab7874 | 1681 | "uninteresting" data such as bitmap cache. */ |
1682 | void | |
1683 | copy_expr_onside (expr_t to, expr_t from) | |
1684 | { | |
1685 | init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from), EXPR_USEFULNESS (from), | |
1686 | EXPR_PRIORITY (from), EXPR_SCHED_TIMES (from), 0, | |
f1f41a6c | 1687 | EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from), 0, |
1e094109 | 1688 | vNULL, |
e1ab7874 | 1689 | EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from), |
1690 | EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from), | |
1691 | EXPR_CANT_MOVE (from)); | |
1692 | } | |
1693 | ||
1694 | /* Prepare the expr of INSN for scheduling. Used when moving insn and when | |
1695 | initializing new insns. */ | |
1696 | static void | |
1697 | prepare_insn_expr (insn_t insn, int seqno) | |
1698 | { | |
1699 | expr_t expr = INSN_EXPR (insn); | |
1700 | ds_t ds; | |
48e1416a | 1701 | |
e1ab7874 | 1702 | INSN_SEQNO (insn) = seqno; |
1703 | EXPR_ORIG_BB_INDEX (expr) = BLOCK_NUM (insn); | |
1704 | EXPR_SPEC (expr) = 0; | |
1705 | EXPR_ORIG_SCHED_CYCLE (expr) = 0; | |
1706 | EXPR_WAS_SUBSTITUTED (expr) = 0; | |
1707 | EXPR_WAS_RENAMED (expr) = 0; | |
1708 | EXPR_TARGET_AVAILABLE (expr) = 1; | |
1709 | INSN_LIVE_VALID_P (insn) = false; | |
1710 | ||
1711 | /* ??? If this expression is speculative, make its dependence | |
1712 | as weak as possible. We can filter this expression later | |
1713 | in process_spec_exprs, because we do not distinguish | |
1714 | between the status we got during compute_av_set and the | |
1715 | existing status. To be fixed. */ | |
1716 | ds = EXPR_SPEC_DONE_DS (expr); | |
1717 | if (ds) | |
1718 | EXPR_SPEC_DONE_DS (expr) = ds_get_max_dep_weak (ds); | |
1719 | ||
f1f41a6c | 1720 | free_history_vect (EXPR_HISTORY_OF_CHANGES (expr)); |
e1ab7874 | 1721 | } |
1722 | ||
1723 | /* Update target_available bits when merging exprs TO and FROM. SPLIT_POINT | |
48e1416a | 1724 | is non-null when expressions are merged from different successors at |
e1ab7874 | 1725 | a split point. */ |
1726 | static void | |
1727 | update_target_availability (expr_t to, expr_t from, insn_t split_point) | |
1728 | { | |
48e1416a | 1729 | if (EXPR_TARGET_AVAILABLE (to) < 0 |
e1ab7874 | 1730 | || EXPR_TARGET_AVAILABLE (from) < 0) |
1731 | EXPR_TARGET_AVAILABLE (to) = -1; | |
1732 | else | |
1733 | { | |
1734 | /* We try to detect the case when one of the expressions | |
1735 | can only be reached through another one. In this case, | |
1736 | we can do better. */ | |
1737 | if (split_point == NULL) | |
1738 | { | |
1739 | int toind, fromind; | |
1740 | ||
1741 | toind = EXPR_ORIG_BB_INDEX (to); | |
1742 | fromind = EXPR_ORIG_BB_INDEX (from); | |
48e1416a | 1743 | |
e1ab7874 | 1744 | if (toind && toind == fromind) |
48e1416a | 1745 | /* Do nothing -- everything is done in |
e1ab7874 | 1746 | merge_with_other_exprs. */ |
1747 | ; | |
1748 | else | |
1749 | EXPR_TARGET_AVAILABLE (to) = -1; | |
1750 | } | |
d6726470 | 1751 | else if (EXPR_TARGET_AVAILABLE (from) == 0 |
1752 | && EXPR_LHS (from) | |
1753 | && REG_P (EXPR_LHS (from)) | |
1754 | && REGNO (EXPR_LHS (to)) != REGNO (EXPR_LHS (from))) | |
1755 | EXPR_TARGET_AVAILABLE (to) = -1; | |
e1ab7874 | 1756 | else |
1757 | EXPR_TARGET_AVAILABLE (to) &= EXPR_TARGET_AVAILABLE (from); | |
1758 | } | |
1759 | } | |
1760 | ||
1761 | /* Update speculation bits when merging exprs TO and FROM. SPLIT_POINT | |
48e1416a | 1762 | is non-null when expressions are merged from different successors at |
e1ab7874 | 1763 | a split point. */ |
1764 | static void | |
1765 | update_speculative_bits (expr_t to, expr_t from, insn_t split_point) | |
1766 | { | |
1767 | ds_t old_to_ds, old_from_ds; | |
1768 | ||
1769 | old_to_ds = EXPR_SPEC_DONE_DS (to); | |
1770 | old_from_ds = EXPR_SPEC_DONE_DS (from); | |
48e1416a | 1771 | |
e1ab7874 | 1772 | EXPR_SPEC_DONE_DS (to) = ds_max_merge (old_to_ds, old_from_ds); |
1773 | EXPR_SPEC_TO_CHECK_DS (to) |= EXPR_SPEC_TO_CHECK_DS (from); | |
1774 | EXPR_NEEDS_SPEC_CHECK_P (to) |= EXPR_NEEDS_SPEC_CHECK_P (from); | |
1775 | ||
1776 | /* When merging e.g. control & data speculative exprs, or a control | |
48e1416a | 1777 | speculative with a control&data speculative one, we really have |
e1ab7874 | 1778 | to change vinsn too. Also, when speculative status is changed, |
1779 | we also need to record this as a transformation in expr's history. */ | |
1780 | if ((old_to_ds & SPECULATIVE) || (old_from_ds & SPECULATIVE)) | |
1781 | { | |
1782 | old_to_ds = ds_get_speculation_types (old_to_ds); | |
1783 | old_from_ds = ds_get_speculation_types (old_from_ds); | |
48e1416a | 1784 | |
e1ab7874 | 1785 | if (old_to_ds != old_from_ds) |
1786 | { | |
1787 | ds_t record_ds; | |
48e1416a | 1788 | |
1789 | /* When both expressions are speculative, we need to change | |
e1ab7874 | 1790 | the vinsn first. */ |
1791 | if ((old_to_ds & SPECULATIVE) && (old_from_ds & SPECULATIVE)) | |
1792 | { | |
1793 | int res; | |
48e1416a | 1794 | |
e1ab7874 | 1795 | res = speculate_expr (to, EXPR_SPEC_DONE_DS (to)); |
1796 | gcc_assert (res >= 0); | |
1797 | } | |
1798 | ||
1799 | if (split_point != NULL) | |
1800 | { | |
1801 | /* Record the change with proper status. */ | |
1802 | record_ds = EXPR_SPEC_DONE_DS (to) & SPECULATIVE; | |
1803 | record_ds &= ~(old_to_ds & SPECULATIVE); | |
1804 | record_ds &= ~(old_from_ds & SPECULATIVE); | |
48e1416a | 1805 | |
1806 | insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to), | |
1807 | INSN_UID (split_point), TRANS_SPECULATION, | |
e1ab7874 | 1808 | EXPR_VINSN (from), EXPR_VINSN (to), |
1809 | record_ds); | |
1810 | } | |
1811 | } | |
1812 | } | |
1813 | } | |
1814 | ||
1815 | ||
1816 | /* Merge bits of FROM expr to TO expr. When SPLIT_POINT is not NULL, | |
1817 | this is done along different paths. */ | |
1818 | void | |
1819 | merge_expr_data (expr_t to, expr_t from, insn_t split_point) | |
1820 | { | |
32bbc704 | 1821 | /* Choose the maximum of the specs of merged exprs. This is required |
1822 | for correctness of bookkeeping. */ | |
1823 | if (EXPR_SPEC (to) < EXPR_SPEC (from)) | |
e1ab7874 | 1824 | EXPR_SPEC (to) = EXPR_SPEC (from); |
1825 | ||
1826 | if (split_point) | |
1827 | EXPR_USEFULNESS (to) += EXPR_USEFULNESS (from); | |
1828 | else | |
48e1416a | 1829 | EXPR_USEFULNESS (to) = MAX (EXPR_USEFULNESS (to), |
e1ab7874 | 1830 | EXPR_USEFULNESS (from)); |
1831 | ||
1832 | if (EXPR_PRIORITY (to) < EXPR_PRIORITY (from)) | |
1833 | EXPR_PRIORITY (to) = EXPR_PRIORITY (from); | |
1834 | ||
d7a270ab | 1835 | /* We merge sched-times half-way to the larger value to avoid the endless |
1836 | pipelining of unneeded insns. The average seems to be good compromise | |
1837 | between pipelining opportunities and avoiding extra work. */ | |
1838 | if (EXPR_SCHED_TIMES (to) != EXPR_SCHED_TIMES (from)) | |
1839 | EXPR_SCHED_TIMES (to) = ((EXPR_SCHED_TIMES (from) + EXPR_SCHED_TIMES (to) | |
1840 | + 1) / 2); | |
e1ab7874 | 1841 | |
1842 | if (EXPR_ORIG_BB_INDEX (to) != EXPR_ORIG_BB_INDEX (from)) | |
1843 | EXPR_ORIG_BB_INDEX (to) = 0; | |
1844 | ||
48e1416a | 1845 | EXPR_ORIG_SCHED_CYCLE (to) = MIN (EXPR_ORIG_SCHED_CYCLE (to), |
e1ab7874 | 1846 | EXPR_ORIG_SCHED_CYCLE (from)); |
1847 | ||
e1ab7874 | 1848 | EXPR_WAS_SUBSTITUTED (to) |= EXPR_WAS_SUBSTITUTED (from); |
1849 | EXPR_WAS_RENAMED (to) |= EXPR_WAS_RENAMED (from); | |
1850 | EXPR_CANT_MOVE (to) |= EXPR_CANT_MOVE (from); | |
1851 | ||
c53624fb | 1852 | merge_history_vect (&EXPR_HISTORY_OF_CHANGES (to), |
1853 | EXPR_HISTORY_OF_CHANGES (from)); | |
e1ab7874 | 1854 | update_target_availability (to, from, split_point); |
1855 | update_speculative_bits (to, from, split_point); | |
1856 | } | |
1857 | ||
1858 | /* Merge bits of FROM expr to TO expr. Vinsns in the exprs should be equal | |
48e1416a | 1859 | in terms of vinsn_equal_p. SPLIT_POINT is non-null when expressions |
e1ab7874 | 1860 | are merged from different successors at a split point. */ |
1861 | void | |
1862 | merge_expr (expr_t to, expr_t from, insn_t split_point) | |
1863 | { | |
1864 | vinsn_t to_vi = EXPR_VINSN (to); | |
1865 | vinsn_t from_vi = EXPR_VINSN (from); | |
1866 | ||
1867 | gcc_assert (vinsn_equal_p (to_vi, from_vi)); | |
1868 | ||
1869 | /* Make sure that speculative pattern is propagated into exprs that | |
1870 | have non-speculative one. This will provide us with consistent | |
1871 | speculative bits and speculative patterns inside expr. */ | |
8d811ff9 | 1872 | if (EXPR_SPEC_DONE_DS (to) == 0 |
1873 | && (EXPR_SPEC_DONE_DS (from) != 0 | |
1874 | /* Do likewise for volatile insns, so that we always retain | |
1875 | the may_trap_p bit on the resulting expression. However, | |
1876 | avoid propagating the trapping bit into the instructions | |
1877 | already speculated. This would result in replacing the | |
1878 | speculative pattern with the non-speculative one and breaking | |
1879 | the speculation support. */ | |
1880 | || (!VINSN_MAY_TRAP_P (EXPR_VINSN (to)) | |
1881 | && VINSN_MAY_TRAP_P (EXPR_VINSN (from))))) | |
e1ab7874 | 1882 | change_vinsn_in_expr (to, EXPR_VINSN (from)); |
1883 | ||
1884 | merge_expr_data (to, from, split_point); | |
1885 | gcc_assert (EXPR_USEFULNESS (to) <= REG_BR_PROB_BASE); | |
1886 | } | |
1887 | ||
1888 | /* Clear the information of this EXPR. */ | |
1889 | void | |
1890 | clear_expr (expr_t expr) | |
1891 | { | |
48e1416a | 1892 | |
e1ab7874 | 1893 | vinsn_detach (EXPR_VINSN (expr)); |
1894 | EXPR_VINSN (expr) = NULL; | |
1895 | ||
f1f41a6c | 1896 | free_history_vect (EXPR_HISTORY_OF_CHANGES (expr)); |
e1ab7874 | 1897 | } |
1898 | ||
1899 | /* For a given LV_SET, mark EXPR having unavailable target register. */ | |
1900 | static void | |
1901 | set_unavailable_target_for_expr (expr_t expr, regset lv_set) | |
1902 | { | |
1903 | if (EXPR_SEPARABLE_P (expr)) | |
1904 | { | |
1905 | if (REG_P (EXPR_LHS (expr)) | |
1f53e226 | 1906 | && register_unavailable_p (lv_set, EXPR_LHS (expr))) |
e1ab7874 | 1907 | { |
48e1416a | 1908 | /* If it's an insn like r1 = use (r1, ...), and it exists in |
1909 | different forms in each of the av_sets being merged, we can't say | |
1910 | whether original destination register is available or not. | |
1911 | However, this still works if destination register is not used | |
e1ab7874 | 1912 | in the original expression: if the branch at which LV_SET we're |
1913 | looking here is not actually 'other branch' in sense that same | |
48e1416a | 1914 | expression is available through it (but it can't be determined |
e1ab7874 | 1915 | at computation stage because of transformations on one of the |
48e1416a | 1916 | branches), it still won't affect the availability. |
1917 | Liveness of a register somewhere on a code motion path means | |
1918 | it's either read somewhere on a codemotion path, live on | |
e1ab7874 | 1919 | 'other' branch, live at the point immediately following |
1920 | the original operation, or is read by the original operation. | |
1921 | The latter case is filtered out in the condition below. | |
1922 | It still doesn't cover the case when register is defined and used | |
1923 | somewhere within the code motion path, and in this case we could | |
1924 | miss a unifying code motion along both branches using a renamed | |
1925 | register, but it won't affect a code correctness since upon | |
1926 | an actual code motion a bookkeeping code would be generated. */ | |
1f53e226 | 1927 | if (register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)), |
1928 | EXPR_LHS (expr))) | |
e1ab7874 | 1929 | EXPR_TARGET_AVAILABLE (expr) = -1; |
1930 | else | |
1931 | EXPR_TARGET_AVAILABLE (expr) = false; | |
1932 | } | |
1933 | } | |
1934 | else | |
1935 | { | |
1936 | unsigned regno; | |
1937 | reg_set_iterator rsi; | |
48e1416a | 1938 | |
1939 | EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_SETS (EXPR_VINSN (expr)), | |
e1ab7874 | 1940 | 0, regno, rsi) |
1941 | if (bitmap_bit_p (lv_set, regno)) | |
1942 | { | |
1943 | EXPR_TARGET_AVAILABLE (expr) = false; | |
1944 | break; | |
1945 | } | |
1946 | ||
1947 | EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_CLOBBERS (EXPR_VINSN (expr)), | |
1948 | 0, regno, rsi) | |
1949 | if (bitmap_bit_p (lv_set, regno)) | |
1950 | { | |
1951 | EXPR_TARGET_AVAILABLE (expr) = false; | |
1952 | break; | |
1953 | } | |
1954 | } | |
1955 | } | |
1956 | ||
48e1416a | 1957 | /* Try to make EXPR speculative. Return 1 when EXPR's pattern |
e1ab7874 | 1958 | or dependence status have changed, 2 when also the target register |
1959 | became unavailable, 0 if nothing had to be changed. */ | |
1960 | int | |
1961 | speculate_expr (expr_t expr, ds_t ds) | |
1962 | { | |
1963 | int res; | |
04d073df | 1964 | rtx_insn *orig_insn_rtx; |
e1ab7874 | 1965 | rtx spec_pat; |
1966 | ds_t target_ds, current_ds; | |
1967 | ||
1968 | /* Obtain the status we need to put on EXPR. */ | |
1969 | target_ds = (ds & SPECULATIVE); | |
1970 | current_ds = EXPR_SPEC_DONE_DS (expr); | |
1971 | ds = ds_full_merge (current_ds, target_ds, NULL_RTX, NULL_RTX); | |
1972 | ||
1973 | orig_insn_rtx = EXPR_INSN_RTX (expr); | |
1974 | ||
1975 | res = sched_speculate_insn (orig_insn_rtx, ds, &spec_pat); | |
1976 | ||
1977 | switch (res) | |
1978 | { | |
1979 | case 0: | |
1980 | EXPR_SPEC_DONE_DS (expr) = ds; | |
1981 | return current_ds != ds ? 1 : 0; | |
48e1416a | 1982 | |
e1ab7874 | 1983 | case 1: |
1984 | { | |
04d073df | 1985 | rtx_insn *spec_insn_rtx = |
1986 | create_insn_rtx_from_pattern (spec_pat, NULL_RTX); | |
e1ab7874 | 1987 | vinsn_t spec_vinsn = create_vinsn_from_insn_rtx (spec_insn_rtx, false); |
1988 | ||
1989 | change_vinsn_in_expr (expr, spec_vinsn); | |
1990 | EXPR_SPEC_DONE_DS (expr) = ds; | |
1991 | EXPR_NEEDS_SPEC_CHECK_P (expr) = true; | |
1992 | ||
48e1416a | 1993 | /* Do not allow clobbering the address register of speculative |
e1ab7874 | 1994 | insns. */ |
1f53e226 | 1995 | if (register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)), |
1996 | expr_dest_reg (expr))) | |
e1ab7874 | 1997 | { |
1998 | EXPR_TARGET_AVAILABLE (expr) = false; | |
1999 | return 2; | |
2000 | } | |
2001 | ||
2002 | return 1; | |
2003 | } | |
2004 | ||
2005 | case -1: | |
2006 | return -1; | |
2007 | ||
2008 | default: | |
2009 | gcc_unreachable (); | |
2010 | return -1; | |
2011 | } | |
2012 | } | |
2013 | ||
2014 | /* Return a destination register, if any, of EXPR. */ | |
2015 | rtx | |
2016 | expr_dest_reg (expr_t expr) | |
2017 | { | |
2018 | rtx dest = VINSN_LHS (EXPR_VINSN (expr)); | |
2019 | ||
2020 | if (dest != NULL_RTX && REG_P (dest)) | |
2021 | return dest; | |
2022 | ||
2023 | return NULL_RTX; | |
2024 | } | |
2025 | ||
2026 | /* Returns the REGNO of the R's destination. */ | |
2027 | unsigned | |
2028 | expr_dest_regno (expr_t expr) | |
2029 | { | |
2030 | rtx dest = expr_dest_reg (expr); | |
2031 | ||
2032 | gcc_assert (dest != NULL_RTX); | |
2033 | return REGNO (dest); | |
2034 | } | |
2035 | ||
48e1416a | 2036 | /* For a given LV_SET, mark all expressions in JOIN_SET, but not present in |
e1ab7874 | 2037 | AV_SET having unavailable target register. */ |
2038 | void | |
2039 | mark_unavailable_targets (av_set_t join_set, av_set_t av_set, regset lv_set) | |
2040 | { | |
2041 | expr_t expr; | |
2042 | av_set_iterator avi; | |
2043 | ||
2044 | FOR_EACH_EXPR (expr, avi, join_set) | |
2045 | if (av_set_lookup (av_set, EXPR_VINSN (expr)) == NULL) | |
2046 | set_unavailable_target_for_expr (expr, lv_set); | |
2047 | } | |
2048 | \f | |
2049 | ||
1f53e226 | 2050 | /* Returns true if REG (at least partially) is present in REGS. */ |
2051 | bool | |
2052 | register_unavailable_p (regset regs, rtx reg) | |
2053 | { | |
2054 | unsigned regno, end_regno; | |
2055 | ||
2056 | regno = REGNO (reg); | |
2057 | if (bitmap_bit_p (regs, regno)) | |
2058 | return true; | |
2059 | ||
2060 | end_regno = END_REGNO (reg); | |
2061 | ||
2062 | while (++regno < end_regno) | |
2063 | if (bitmap_bit_p (regs, regno)) | |
2064 | return true; | |
2065 | ||
2066 | return false; | |
2067 | } | |
2068 | ||
e1ab7874 | 2069 | /* Av set functions. */ |
2070 | ||
2071 | /* Add a new element to av set SETP. | |
2072 | Return the element added. */ | |
2073 | static av_set_t | |
2074 | av_set_add_element (av_set_t *setp) | |
2075 | { | |
2076 | /* Insert at the beginning of the list. */ | |
2077 | _list_add (setp); | |
2078 | return *setp; | |
2079 | } | |
2080 | ||
2081 | /* Add EXPR to SETP. */ | |
2082 | void | |
2083 | av_set_add (av_set_t *setp, expr_t expr) | |
2084 | { | |
2085 | av_set_t elem; | |
48e1416a | 2086 | |
e1ab7874 | 2087 | gcc_assert (!INSN_NOP_P (EXPR_INSN_RTX (expr))); |
2088 | elem = av_set_add_element (setp); | |
2089 | copy_expr (_AV_SET_EXPR (elem), expr); | |
2090 | } | |
2091 | ||
2092 | /* Same, but do not copy EXPR. */ | |
2093 | static void | |
2094 | av_set_add_nocopy (av_set_t *setp, expr_t expr) | |
2095 | { | |
2096 | av_set_t elem; | |
2097 | ||
2098 | elem = av_set_add_element (setp); | |
2099 | *_AV_SET_EXPR (elem) = *expr; | |
2100 | } | |
2101 | ||
2102 | /* Remove expr pointed to by IP from the av_set. */ | |
2103 | void | |
2104 | av_set_iter_remove (av_set_iterator *ip) | |
2105 | { | |
2106 | clear_expr (_AV_SET_EXPR (*ip->lp)); | |
2107 | _list_iter_remove (ip); | |
2108 | } | |
2109 | ||
2110 | /* Search for an expr in SET, such that it's equivalent to SOUGHT_VINSN in the | |
2111 | sense of vinsn_equal_p function. Return NULL if no such expr is | |
2112 | in SET was found. */ | |
2113 | expr_t | |
2114 | av_set_lookup (av_set_t set, vinsn_t sought_vinsn) | |
2115 | { | |
2116 | expr_t expr; | |
2117 | av_set_iterator i; | |
2118 | ||
2119 | FOR_EACH_EXPR (expr, i, set) | |
2120 | if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn)) | |
2121 | return expr; | |
2122 | return NULL; | |
2123 | } | |
2124 | ||
2125 | /* Same, but also remove the EXPR found. */ | |
2126 | static expr_t | |
2127 | av_set_lookup_and_remove (av_set_t *setp, vinsn_t sought_vinsn) | |
2128 | { | |
2129 | expr_t expr; | |
2130 | av_set_iterator i; | |
2131 | ||
2132 | FOR_EACH_EXPR_1 (expr, i, setp) | |
2133 | if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn)) | |
2134 | { | |
2135 | _list_iter_remove_nofree (&i); | |
2136 | return expr; | |
2137 | } | |
2138 | return NULL; | |
2139 | } | |
2140 | ||
2141 | /* Search for an expr in SET, such that it's equivalent to EXPR in the | |
2142 | sense of vinsn_equal_p function of their vinsns, but not EXPR itself. | |
2143 | Returns NULL if no such expr is in SET was found. */ | |
2144 | static expr_t | |
2145 | av_set_lookup_other_equiv_expr (av_set_t set, expr_t expr) | |
2146 | { | |
2147 | expr_t cur_expr; | |
2148 | av_set_iterator i; | |
2149 | ||
2150 | FOR_EACH_EXPR (cur_expr, i, set) | |
2151 | { | |
2152 | if (cur_expr == expr) | |
2153 | continue; | |
2154 | if (vinsn_equal_p (EXPR_VINSN (cur_expr), EXPR_VINSN (expr))) | |
2155 | return cur_expr; | |
2156 | } | |
2157 | ||
2158 | return NULL; | |
2159 | } | |
2160 | ||
2161 | /* If other expression is already in AVP, remove one of them. */ | |
2162 | expr_t | |
2163 | merge_with_other_exprs (av_set_t *avp, av_set_iterator *ip, expr_t expr) | |
2164 | { | |
2165 | expr_t expr2; | |
2166 | ||
2167 | expr2 = av_set_lookup_other_equiv_expr (*avp, expr); | |
2168 | if (expr2 != NULL) | |
2169 | { | |
2170 | /* Reset target availability on merge, since taking it only from one | |
2171 | of the exprs would be controversial for different code. */ | |
2172 | EXPR_TARGET_AVAILABLE (expr2) = -1; | |
2173 | EXPR_USEFULNESS (expr2) = 0; | |
2174 | ||
2175 | merge_expr (expr2, expr, NULL); | |
48e1416a | 2176 | |
e1ab7874 | 2177 | /* Fix usefulness as it should be now REG_BR_PROB_BASE. */ |
2178 | EXPR_USEFULNESS (expr2) = REG_BR_PROB_BASE; | |
48e1416a | 2179 | |
e1ab7874 | 2180 | av_set_iter_remove (ip); |
2181 | return expr2; | |
2182 | } | |
2183 | ||
2184 | return expr; | |
2185 | } | |
2186 | ||
2187 | /* Return true if there is an expr that correlates to VI in SET. */ | |
2188 | bool | |
2189 | av_set_is_in_p (av_set_t set, vinsn_t vi) | |
2190 | { | |
2191 | return av_set_lookup (set, vi) != NULL; | |
2192 | } | |
2193 | ||
2194 | /* Return a copy of SET. */ | |
2195 | av_set_t | |
2196 | av_set_copy (av_set_t set) | |
2197 | { | |
2198 | expr_t expr; | |
2199 | av_set_iterator i; | |
2200 | av_set_t res = NULL; | |
2201 | ||
2202 | FOR_EACH_EXPR (expr, i, set) | |
2203 | av_set_add (&res, expr); | |
2204 | ||
2205 | return res; | |
2206 | } | |
2207 | ||
2208 | /* Join two av sets that do not have common elements by attaching second set | |
2209 | (pointed to by FROMP) to the end of first set (TO_TAILP must point to | |
2210 | _AV_SET_NEXT of first set's last element). */ | |
2211 | static void | |
2212 | join_distinct_sets (av_set_t *to_tailp, av_set_t *fromp) | |
2213 | { | |
2214 | gcc_assert (*to_tailp == NULL); | |
2215 | *to_tailp = *fromp; | |
2216 | *fromp = NULL; | |
2217 | } | |
2218 | ||
2219 | /* Makes set pointed to by TO to be the union of TO and FROM. Clear av_set | |
2220 | pointed to by FROMP afterwards. */ | |
2221 | void | |
2222 | av_set_union_and_clear (av_set_t *top, av_set_t *fromp, insn_t insn) | |
2223 | { | |
2224 | expr_t expr1; | |
2225 | av_set_iterator i; | |
2226 | ||
2227 | /* Delete from TOP all exprs, that present in FROMP. */ | |
2228 | FOR_EACH_EXPR_1 (expr1, i, top) | |
2229 | { | |
2230 | expr_t expr2 = av_set_lookup (*fromp, EXPR_VINSN (expr1)); | |
2231 | ||
2232 | if (expr2) | |
2233 | { | |
2234 | merge_expr (expr2, expr1, insn); | |
2235 | av_set_iter_remove (&i); | |
2236 | } | |
2237 | } | |
2238 | ||
2239 | join_distinct_sets (i.lp, fromp); | |
2240 | } | |
2241 | ||
48e1416a | 2242 | /* Same as above, but also update availability of target register in |
e1ab7874 | 2243 | TOP judging by TO_LV_SET and FROM_LV_SET. */ |
2244 | void | |
2245 | av_set_union_and_live (av_set_t *top, av_set_t *fromp, regset to_lv_set, | |
2246 | regset from_lv_set, insn_t insn) | |
2247 | { | |
2248 | expr_t expr1; | |
2249 | av_set_iterator i; | |
2250 | av_set_t *to_tailp, in_both_set = NULL; | |
2251 | ||
2252 | /* Delete from TOP all expres, that present in FROMP. */ | |
2253 | FOR_EACH_EXPR_1 (expr1, i, top) | |
2254 | { | |
2255 | expr_t expr2 = av_set_lookup_and_remove (fromp, EXPR_VINSN (expr1)); | |
2256 | ||
2257 | if (expr2) | |
2258 | { | |
48e1416a | 2259 | /* It may be that the expressions have different destination |
e1ab7874 | 2260 | registers, in which case we need to check liveness here. */ |
2261 | if (EXPR_SEPARABLE_P (expr1)) | |
2262 | { | |
48e1416a | 2263 | int regno1 = (REG_P (EXPR_LHS (expr1)) |
e1ab7874 | 2264 | ? (int) expr_dest_regno (expr1) : -1); |
48e1416a | 2265 | int regno2 = (REG_P (EXPR_LHS (expr2)) |
e1ab7874 | 2266 | ? (int) expr_dest_regno (expr2) : -1); |
48e1416a | 2267 | |
2268 | /* ??? We don't have a way to check restrictions for | |
e1ab7874 | 2269 | *other* register on the current path, we did it only |
2270 | for the current target register. Give up. */ | |
2271 | if (regno1 != regno2) | |
2272 | EXPR_TARGET_AVAILABLE (expr2) = -1; | |
2273 | } | |
2274 | else if (EXPR_INSN_RTX (expr1) != EXPR_INSN_RTX (expr2)) | |
2275 | EXPR_TARGET_AVAILABLE (expr2) = -1; | |
2276 | ||
2277 | merge_expr (expr2, expr1, insn); | |
2278 | av_set_add_nocopy (&in_both_set, expr2); | |
2279 | av_set_iter_remove (&i); | |
2280 | } | |
2281 | else | |
48e1416a | 2282 | /* EXPR1 is present in TOP, but not in FROMP. Check it on |
e1ab7874 | 2283 | FROM_LV_SET. */ |
2284 | set_unavailable_target_for_expr (expr1, from_lv_set); | |
2285 | } | |
2286 | to_tailp = i.lp; | |
2287 | ||
2288 | /* These expressions are not present in TOP. Check liveness | |
2289 | restrictions on TO_LV_SET. */ | |
2290 | FOR_EACH_EXPR (expr1, i, *fromp) | |
2291 | set_unavailable_target_for_expr (expr1, to_lv_set); | |
2292 | ||
2293 | join_distinct_sets (i.lp, &in_both_set); | |
2294 | join_distinct_sets (to_tailp, fromp); | |
2295 | } | |
2296 | ||
2297 | /* Clear av_set pointed to by SETP. */ | |
2298 | void | |
2299 | av_set_clear (av_set_t *setp) | |
2300 | { | |
2301 | expr_t expr; | |
2302 | av_set_iterator i; | |
2303 | ||
2304 | FOR_EACH_EXPR_1 (expr, i, setp) | |
2305 | av_set_iter_remove (&i); | |
2306 | ||
2307 | gcc_assert (*setp == NULL); | |
2308 | } | |
2309 | ||
2310 | /* Leave only one non-speculative element in the SETP. */ | |
2311 | void | |
2312 | av_set_leave_one_nonspec (av_set_t *setp) | |
2313 | { | |
2314 | expr_t expr; | |
2315 | av_set_iterator i; | |
2316 | bool has_one_nonspec = false; | |
2317 | ||
48e1416a | 2318 | /* Keep all speculative exprs, and leave one non-speculative |
e1ab7874 | 2319 | (the first one). */ |
2320 | FOR_EACH_EXPR_1 (expr, i, setp) | |
2321 | { | |
2322 | if (!EXPR_SPEC_DONE_DS (expr)) | |
2323 | { | |
2324 | if (has_one_nonspec) | |
2325 | av_set_iter_remove (&i); | |
2326 | else | |
2327 | has_one_nonspec = true; | |
2328 | } | |
2329 | } | |
2330 | } | |
2331 | ||
2332 | /* Return the N'th element of the SET. */ | |
2333 | expr_t | |
2334 | av_set_element (av_set_t set, int n) | |
2335 | { | |
2336 | expr_t expr; | |
2337 | av_set_iterator i; | |
2338 | ||
2339 | FOR_EACH_EXPR (expr, i, set) | |
2340 | if (n-- == 0) | |
2341 | return expr; | |
2342 | ||
2343 | gcc_unreachable (); | |
2344 | return NULL; | |
2345 | } | |
2346 | ||
2347 | /* Deletes all expressions from AVP that are conditional branches (IFs). */ | |
2348 | void | |
2349 | av_set_substract_cond_branches (av_set_t *avp) | |
2350 | { | |
2351 | av_set_iterator i; | |
2352 | expr_t expr; | |
2353 | ||
2354 | FOR_EACH_EXPR_1 (expr, i, avp) | |
2355 | if (vinsn_cond_branch_p (EXPR_VINSN (expr))) | |
2356 | av_set_iter_remove (&i); | |
2357 | } | |
2358 | ||
48e1416a | 2359 | /* Multiplies usefulness attribute of each member of av-set *AVP by |
e1ab7874 | 2360 | value PROB / ALL_PROB. */ |
2361 | void | |
2362 | av_set_split_usefulness (av_set_t av, int prob, int all_prob) | |
2363 | { | |
2364 | av_set_iterator i; | |
2365 | expr_t expr; | |
2366 | ||
2367 | FOR_EACH_EXPR (expr, i, av) | |
48e1416a | 2368 | EXPR_USEFULNESS (expr) = (all_prob |
e1ab7874 | 2369 | ? (EXPR_USEFULNESS (expr) * prob) / all_prob |
2370 | : 0); | |
2371 | } | |
2372 | ||
2373 | /* Leave in AVP only those expressions, which are present in AV, | |
c53624fb | 2374 | and return it, merging history expressions. */ |
e1ab7874 | 2375 | void |
c53624fb | 2376 | av_set_code_motion_filter (av_set_t *avp, av_set_t av) |
e1ab7874 | 2377 | { |
2378 | av_set_iterator i; | |
c53624fb | 2379 | expr_t expr, expr2; |
e1ab7874 | 2380 | |
2381 | FOR_EACH_EXPR_1 (expr, i, avp) | |
c53624fb | 2382 | if ((expr2 = av_set_lookup (av, EXPR_VINSN (expr))) == NULL) |
e1ab7874 | 2383 | av_set_iter_remove (&i); |
c53624fb | 2384 | else |
2385 | /* When updating av sets in bookkeeping blocks, we can add more insns | |
2386 | there which will be transformed but the upper av sets will not | |
2387 | reflect those transformations. We then fail to undo those | |
2388 | when searching for such insns. So merge the history saved | |
2389 | in the av set of the block we are processing. */ | |
2390 | merge_history_vect (&EXPR_HISTORY_OF_CHANGES (expr), | |
2391 | EXPR_HISTORY_OF_CHANGES (expr2)); | |
e1ab7874 | 2392 | } |
2393 | ||
2394 | \f | |
2395 | ||
2396 | /* Dependence hooks to initialize insn data. */ | |
2397 | ||
2398 | /* This is used in hooks callable from dependence analysis when initializing | |
2399 | instruction's data. */ | |
2400 | static struct | |
2401 | { | |
2402 | /* Where the dependence was found (lhs/rhs). */ | |
2403 | deps_where_t where; | |
2404 | ||
2405 | /* The actual data object to initialize. */ | |
2406 | idata_t id; | |
2407 | ||
2408 | /* True when the insn should not be made clonable. */ | |
2409 | bool force_unique_p; | |
2410 | ||
2411 | /* True when insn should be treated as of type USE, i.e. never renamed. */ | |
2412 | bool force_use_p; | |
2413 | } deps_init_id_data; | |
2414 | ||
2415 | ||
48e1416a | 2416 | /* Setup ID for INSN. FORCE_UNIQUE_P is true when INSN should not be |
e1ab7874 | 2417 | clonable. */ |
2418 | static void | |
2419 | setup_id_for_insn (idata_t id, insn_t insn, bool force_unique_p) | |
2420 | { | |
2421 | int type; | |
48e1416a | 2422 | |
e1ab7874 | 2423 | /* Determine whether INSN could be cloned and return appropriate vinsn type. |
2424 | That clonable insns which can be separated into lhs and rhs have type SET. | |
2425 | Other clonable insns have type USE. */ | |
2426 | type = GET_CODE (insn); | |
2427 | ||
2428 | /* Only regular insns could be cloned. */ | |
2429 | if (type == INSN && !force_unique_p) | |
2430 | type = SET; | |
2431 | else if (type == JUMP_INSN && simplejump_p (insn)) | |
2432 | type = PC; | |
9845d120 | 2433 | else if (type == DEBUG_INSN) |
2434 | type = !force_unique_p ? USE : INSN; | |
48e1416a | 2435 | |
e1ab7874 | 2436 | IDATA_TYPE (id) = type; |
2437 | IDATA_REG_SETS (id) = get_clear_regset_from_pool (); | |
2438 | IDATA_REG_USES (id) = get_clear_regset_from_pool (); | |
2439 | IDATA_REG_CLOBBERS (id) = get_clear_regset_from_pool (); | |
2440 | } | |
2441 | ||
2442 | /* Start initializing insn data. */ | |
2443 | static void | |
2444 | deps_init_id_start_insn (insn_t insn) | |
2445 | { | |
2446 | gcc_assert (deps_init_id_data.where == DEPS_IN_NOWHERE); | |
2447 | ||
2448 | setup_id_for_insn (deps_init_id_data.id, insn, | |
2449 | deps_init_id_data.force_unique_p); | |
2450 | deps_init_id_data.where = DEPS_IN_INSN; | |
2451 | } | |
2452 | ||
2453 | /* Start initializing lhs data. */ | |
2454 | static void | |
2455 | deps_init_id_start_lhs (rtx lhs) | |
2456 | { | |
2457 | gcc_assert (deps_init_id_data.where == DEPS_IN_INSN); | |
2458 | gcc_assert (IDATA_LHS (deps_init_id_data.id) == NULL); | |
2459 | ||
2460 | if (IDATA_TYPE (deps_init_id_data.id) == SET) | |
2461 | { | |
2462 | IDATA_LHS (deps_init_id_data.id) = lhs; | |
2463 | deps_init_id_data.where = DEPS_IN_LHS; | |
2464 | } | |
2465 | } | |
2466 | ||
2467 | /* Finish initializing lhs data. */ | |
2468 | static void | |
2469 | deps_init_id_finish_lhs (void) | |
2470 | { | |
2471 | deps_init_id_data.where = DEPS_IN_INSN; | |
2472 | } | |
2473 | ||
2474 | /* Note a set of REGNO. */ | |
2475 | static void | |
2476 | deps_init_id_note_reg_set (int regno) | |
2477 | { | |
2478 | haifa_note_reg_set (regno); | |
2479 | ||
2480 | if (deps_init_id_data.where == DEPS_IN_RHS) | |
2481 | deps_init_id_data.force_use_p = true; | |
2482 | ||
2483 | if (IDATA_TYPE (deps_init_id_data.id) != PC) | |
2484 | SET_REGNO_REG_SET (IDATA_REG_SETS (deps_init_id_data.id), regno); | |
2485 | ||
2486 | #ifdef STACK_REGS | |
48e1416a | 2487 | /* Make instructions that set stack registers to be ineligible for |
e1ab7874 | 2488 | renaming to avoid issues with find_used_regs. */ |
2489 | if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG)) | |
2490 | deps_init_id_data.force_use_p = true; | |
2491 | #endif | |
2492 | } | |
2493 | ||
2494 | /* Note a clobber of REGNO. */ | |
2495 | static void | |
2496 | deps_init_id_note_reg_clobber (int regno) | |
2497 | { | |
2498 | haifa_note_reg_clobber (regno); | |
2499 | ||
2500 | if (deps_init_id_data.where == DEPS_IN_RHS) | |
2501 | deps_init_id_data.force_use_p = true; | |
2502 | ||
2503 | if (IDATA_TYPE (deps_init_id_data.id) != PC) | |
2504 | SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (deps_init_id_data.id), regno); | |
2505 | } | |
2506 | ||
2507 | /* Note a use of REGNO. */ | |
2508 | static void | |
2509 | deps_init_id_note_reg_use (int regno) | |
2510 | { | |
2511 | haifa_note_reg_use (regno); | |
2512 | ||
2513 | if (IDATA_TYPE (deps_init_id_data.id) != PC) | |
2514 | SET_REGNO_REG_SET (IDATA_REG_USES (deps_init_id_data.id), regno); | |
2515 | } | |
2516 | ||
2517 | /* Start initializing rhs data. */ | |
2518 | static void | |
2519 | deps_init_id_start_rhs (rtx rhs) | |
2520 | { | |
2521 | gcc_assert (deps_init_id_data.where == DEPS_IN_INSN); | |
2522 | ||
2523 | /* And there was no sel_deps_reset_to_insn (). */ | |
2524 | if (IDATA_LHS (deps_init_id_data.id) != NULL) | |
2525 | { | |
2526 | IDATA_RHS (deps_init_id_data.id) = rhs; | |
2527 | deps_init_id_data.where = DEPS_IN_RHS; | |
2528 | } | |
2529 | } | |
2530 | ||
2531 | /* Finish initializing rhs data. */ | |
2532 | static void | |
2533 | deps_init_id_finish_rhs (void) | |
2534 | { | |
2535 | gcc_assert (deps_init_id_data.where == DEPS_IN_RHS | |
2536 | || deps_init_id_data.where == DEPS_IN_INSN); | |
2537 | deps_init_id_data.where = DEPS_IN_INSN; | |
2538 | } | |
2539 | ||
2540 | /* Finish initializing insn data. */ | |
2541 | static void | |
2542 | deps_init_id_finish_insn (void) | |
2543 | { | |
2544 | gcc_assert (deps_init_id_data.where == DEPS_IN_INSN); | |
2545 | ||
2546 | if (IDATA_TYPE (deps_init_id_data.id) == SET) | |
2547 | { | |
2548 | rtx lhs = IDATA_LHS (deps_init_id_data.id); | |
2549 | rtx rhs = IDATA_RHS (deps_init_id_data.id); | |
2550 | ||
2551 | if (lhs == NULL || rhs == NULL || !lhs_and_rhs_separable_p (lhs, rhs) | |
2552 | || deps_init_id_data.force_use_p) | |
2553 | { | |
48e1416a | 2554 | /* This should be a USE, as we don't want to schedule its RHS |
e1ab7874 | 2555 | separately. However, we still want to have them recorded |
48e1416a | 2556 | for the purposes of substitution. That's why we don't |
e1ab7874 | 2557 | simply call downgrade_to_use () here. */ |
2558 | gcc_assert (IDATA_TYPE (deps_init_id_data.id) == SET); | |
2559 | gcc_assert (!lhs == !rhs); | |
2560 | ||
2561 | IDATA_TYPE (deps_init_id_data.id) = USE; | |
2562 | } | |
2563 | } | |
2564 | ||
2565 | deps_init_id_data.where = DEPS_IN_NOWHERE; | |
2566 | } | |
2567 | ||
2568 | /* This is dependence info used for initializing insn's data. */ | |
2569 | static struct sched_deps_info_def deps_init_id_sched_deps_info; | |
2570 | ||
2571 | /* This initializes most of the static part of the above structure. */ | |
2572 | static const struct sched_deps_info_def const_deps_init_id_sched_deps_info = | |
2573 | { | |
2574 | NULL, | |
2575 | ||
2576 | deps_init_id_start_insn, | |
2577 | deps_init_id_finish_insn, | |
2578 | deps_init_id_start_lhs, | |
2579 | deps_init_id_finish_lhs, | |
2580 | deps_init_id_start_rhs, | |
2581 | deps_init_id_finish_rhs, | |
2582 | deps_init_id_note_reg_set, | |
2583 | deps_init_id_note_reg_clobber, | |
2584 | deps_init_id_note_reg_use, | |
2585 | NULL, /* note_mem_dep */ | |
2586 | NULL, /* note_dep */ | |
2587 | ||
2588 | 0, /* use_cselib */ | |
2589 | 0, /* use_deps_list */ | |
2590 | 0 /* generate_spec_deps */ | |
2591 | }; | |
2592 | ||
2593 | /* Initialize INSN's lhs and rhs in ID. When FORCE_UNIQUE_P is true, | |
2594 | we don't actually need information about lhs and rhs. */ | |
2595 | static void | |
2596 | setup_id_lhs_rhs (idata_t id, insn_t insn, bool force_unique_p) | |
2597 | { | |
2598 | rtx pat = PATTERN (insn); | |
48e1416a | 2599 | |
971ba038 | 2600 | if (NONJUMP_INSN_P (insn) |
48e1416a | 2601 | && GET_CODE (pat) == SET |
e1ab7874 | 2602 | && !force_unique_p) |
2603 | { | |
2604 | IDATA_RHS (id) = SET_SRC (pat); | |
2605 | IDATA_LHS (id) = SET_DEST (pat); | |
2606 | } | |
2607 | else | |
2608 | IDATA_LHS (id) = IDATA_RHS (id) = NULL; | |
2609 | } | |
2610 | ||
2611 | /* Possibly downgrade INSN to USE. */ | |
2612 | static void | |
2613 | maybe_downgrade_id_to_use (idata_t id, insn_t insn) | |
2614 | { | |
2615 | bool must_be_use = false; | |
be10bb5a | 2616 | df_ref def; |
e1ab7874 | 2617 | rtx lhs = IDATA_LHS (id); |
2618 | rtx rhs = IDATA_RHS (id); | |
48e1416a | 2619 | |
e1ab7874 | 2620 | /* We downgrade only SETs. */ |
2621 | if (IDATA_TYPE (id) != SET) | |
2622 | return; | |
2623 | ||
2624 | if (!lhs || !lhs_and_rhs_separable_p (lhs, rhs)) | |
2625 | { | |
2626 | IDATA_TYPE (id) = USE; | |
2627 | return; | |
2628 | } | |
48e1416a | 2629 | |
be10bb5a | 2630 | FOR_EACH_INSN_DEF (def, insn) |
e1ab7874 | 2631 | { |
e1ab7874 | 2632 | if (DF_REF_INSN (def) |
2633 | && DF_REF_FLAGS_IS_SET (def, DF_REF_PRE_POST_MODIFY) | |
2634 | && loc_mentioned_in_p (DF_REF_LOC (def), IDATA_RHS (id))) | |
2635 | { | |
2636 | must_be_use = true; | |
2637 | break; | |
2638 | } | |
2639 | ||
2640 | #ifdef STACK_REGS | |
48e1416a | 2641 | /* Make instructions that set stack registers to be ineligible for |
e1ab7874 | 2642 | renaming to avoid issues with find_used_regs. */ |
2643 | if (IN_RANGE (DF_REF_REGNO (def), FIRST_STACK_REG, LAST_STACK_REG)) | |
2644 | { | |
2645 | must_be_use = true; | |
2646 | break; | |
2647 | } | |
2648 | #endif | |
48e1416a | 2649 | } |
2650 | ||
e1ab7874 | 2651 | if (must_be_use) |
2652 | IDATA_TYPE (id) = USE; | |
2653 | } | |
2654 | ||
acf58115 | 2655 | /* Setup implicit register clobbers calculated by sched-deps for INSN |
2656 | before reload and save them in ID. */ | |
2657 | static void | |
2658 | setup_id_implicit_regs (idata_t id, insn_t insn) | |
2659 | { | |
2660 | if (reload_completed) | |
2661 | return; | |
2662 | ||
2663 | HARD_REG_SET temp; | |
2664 | unsigned regno; | |
2665 | hard_reg_set_iterator hrsi; | |
2666 | ||
2667 | get_implicit_reg_pending_clobbers (&temp, insn); | |
2668 | EXECUTE_IF_SET_IN_HARD_REG_SET (temp, 0, regno, hrsi) | |
2669 | SET_REGNO_REG_SET (IDATA_REG_SETS (id), regno); | |
2670 | } | |
2671 | ||
e1ab7874 | 2672 | /* Setup register sets describing INSN in ID. */ |
2673 | static void | |
2674 | setup_id_reg_sets (idata_t id, insn_t insn) | |
2675 | { | |
be10bb5a | 2676 | struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn); |
2677 | df_ref def, use; | |
e1ab7874 | 2678 | regset tmp = get_clear_regset_from_pool (); |
48e1416a | 2679 | |
be10bb5a | 2680 | FOR_EACH_INSN_INFO_DEF (def, insn_info) |
e1ab7874 | 2681 | { |
e1ab7874 | 2682 | unsigned int regno = DF_REF_REGNO (def); |
48e1416a | 2683 | |
e1ab7874 | 2684 | /* Post modifies are treated like clobbers by sched-deps.c. */ |
2685 | if (DF_REF_FLAGS_IS_SET (def, (DF_REF_MUST_CLOBBER | |
2686 | | DF_REF_PRE_POST_MODIFY))) | |
2687 | SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (id), regno); | |
2688 | else if (! DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER)) | |
2689 | { | |
2690 | SET_REGNO_REG_SET (IDATA_REG_SETS (id), regno); | |
2691 | ||
2692 | #ifdef STACK_REGS | |
48e1416a | 2693 | /* For stack registers, treat writes to them as writes |
e1ab7874 | 2694 | to the first one to be consistent with sched-deps.c. */ |
2695 | if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG)) | |
2696 | SET_REGNO_REG_SET (IDATA_REG_SETS (id), FIRST_STACK_REG); | |
2697 | #endif | |
2698 | } | |
2699 | /* Mark special refs that generate read/write def pair. */ | |
2700 | if (DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL) | |
2701 | || regno == STACK_POINTER_REGNUM) | |
2702 | bitmap_set_bit (tmp, regno); | |
2703 | } | |
48e1416a | 2704 | |
be10bb5a | 2705 | FOR_EACH_INSN_INFO_USE (use, insn_info) |
e1ab7874 | 2706 | { |
e1ab7874 | 2707 | unsigned int regno = DF_REF_REGNO (use); |
2708 | ||
2709 | /* When these refs are met for the first time, skip them, as | |
2710 | these uses are just counterparts of some defs. */ | |
2711 | if (bitmap_bit_p (tmp, regno)) | |
2712 | bitmap_clear_bit (tmp, regno); | |
2713 | else if (! DF_REF_FLAGS_IS_SET (use, DF_REF_CALL_STACK_USAGE)) | |
2714 | { | |
2715 | SET_REGNO_REG_SET (IDATA_REG_USES (id), regno); | |
2716 | ||
2717 | #ifdef STACK_REGS | |
48e1416a | 2718 | /* For stack registers, treat reads from them as reads from |
e1ab7874 | 2719 | the first one to be consistent with sched-deps.c. */ |
2720 | if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG)) | |
2721 | SET_REGNO_REG_SET (IDATA_REG_USES (id), FIRST_STACK_REG); | |
2722 | #endif | |
2723 | } | |
2724 | } | |
2725 | ||
acf58115 | 2726 | /* Also get implicit reg clobbers from sched-deps. */ |
2727 | setup_id_implicit_regs (id, insn); | |
2728 | ||
e1ab7874 | 2729 | return_regset_to_pool (tmp); |
2730 | } | |
2731 | ||
2732 | /* Initialize instruction data for INSN in ID using DF's data. */ | |
2733 | static void | |
2734 | init_id_from_df (idata_t id, insn_t insn, bool force_unique_p) | |
2735 | { | |
2736 | gcc_assert (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL); | |
2737 | ||
2738 | setup_id_for_insn (id, insn, force_unique_p); | |
2739 | setup_id_lhs_rhs (id, insn, force_unique_p); | |
2740 | ||
2741 | if (INSN_NOP_P (insn)) | |
2742 | return; | |
2743 | ||
2744 | maybe_downgrade_id_to_use (id, insn); | |
2745 | setup_id_reg_sets (id, insn); | |
2746 | } | |
2747 | ||
2748 | /* Initialize instruction data for INSN in ID. */ | |
2749 | static void | |
2750 | deps_init_id (idata_t id, insn_t insn, bool force_unique_p) | |
2751 | { | |
2e966e2a | 2752 | class deps_desc _dc, *dc = &_dc; |
e1ab7874 | 2753 | |
2754 | deps_init_id_data.where = DEPS_IN_NOWHERE; | |
2755 | deps_init_id_data.id = id; | |
2756 | deps_init_id_data.force_unique_p = force_unique_p; | |
2757 | deps_init_id_data.force_use_p = false; | |
2758 | ||
d9ab2038 | 2759 | init_deps (dc, false); |
e1ab7874 | 2760 | memcpy (&deps_init_id_sched_deps_info, |
2761 | &const_deps_init_id_sched_deps_info, | |
2762 | sizeof (deps_init_id_sched_deps_info)); | |
e1ab7874 | 2763 | if (spec_info != NULL) |
2764 | deps_init_id_sched_deps_info.generate_spec_deps = 1; | |
e1ab7874 | 2765 | sched_deps_info = &deps_init_id_sched_deps_info; |
2766 | ||
2f3c9801 | 2767 | deps_analyze_insn (dc, insn); |
acf58115 | 2768 | /* Implicit reg clobbers received from sched-deps separately. */ |
2769 | setup_id_implicit_regs (id, insn); | |
e1ab7874 | 2770 | |
2771 | free_deps (dc); | |
e1ab7874 | 2772 | deps_init_id_data.id = NULL; |
2773 | } | |
2774 | ||
2775 | \f | |
52d7e28c | 2776 | struct sched_scan_info_def |
2777 | { | |
2778 | /* This hook notifies scheduler frontend to extend its internal per basic | |
2779 | block data structures. This hook should be called once before a series of | |
2780 | calls to bb_init (). */ | |
2781 | void (*extend_bb) (void); | |
2782 | ||
2783 | /* This hook makes scheduler frontend to initialize its internal data | |
2784 | structures for the passed basic block. */ | |
2785 | void (*init_bb) (basic_block); | |
2786 | ||
2787 | /* This hook notifies scheduler frontend to extend its internal per insn data | |
2788 | structures. This hook should be called once before a series of calls to | |
2789 | insn_init (). */ | |
2790 | void (*extend_insn) (void); | |
2791 | ||
2792 | /* This hook makes scheduler frontend to initialize its internal data | |
2793 | structures for the passed insn. */ | |
2f3c9801 | 2794 | void (*init_insn) (insn_t); |
52d7e28c | 2795 | }; |
2796 | ||
2797 | /* A driver function to add a set of basic blocks (BBS) to the | |
2798 | scheduling region. */ | |
2799 | static void | |
2800 | sched_scan (const struct sched_scan_info_def *ssi, bb_vec_t bbs) | |
2801 | { | |
2802 | unsigned i; | |
2803 | basic_block bb; | |
2804 | ||
2805 | if (ssi->extend_bb) | |
2806 | ssi->extend_bb (); | |
2807 | ||
2808 | if (ssi->init_bb) | |
f1f41a6c | 2809 | FOR_EACH_VEC_ELT (bbs, i, bb) |
52d7e28c | 2810 | ssi->init_bb (bb); |
2811 | ||
2812 | if (ssi->extend_insn) | |
2813 | ssi->extend_insn (); | |
2814 | ||
2815 | if (ssi->init_insn) | |
f1f41a6c | 2816 | FOR_EACH_VEC_ELT (bbs, i, bb) |
52d7e28c | 2817 | { |
2f3c9801 | 2818 | rtx_insn *insn; |
52d7e28c | 2819 | |
2820 | FOR_BB_INSNS (bb, insn) | |
2821 | ssi->init_insn (insn); | |
2822 | } | |
2823 | } | |
e1ab7874 | 2824 | |
2825 | /* Implement hooks for collecting fundamental insn properties like if insn is | |
2826 | an ASM or is within a SCHED_GROUP. */ | |
2827 | ||
2828 | /* True when a "one-time init" data for INSN was already inited. */ | |
2829 | static bool | |
2830 | first_time_insn_init (insn_t insn) | |
2831 | { | |
2832 | return INSN_LIVE (insn) == NULL; | |
2833 | } | |
2834 | ||
2835 | /* Hash an entry in a transformed_insns hashtable. */ | |
2836 | static hashval_t | |
2837 | hash_transformed_insns (const void *p) | |
2838 | { | |
2839 | return VINSN_HASH_RTX (((const struct transformed_insns *) p)->vinsn_old); | |
2840 | } | |
2841 | ||
2842 | /* Compare the entries in a transformed_insns hashtable. */ | |
2843 | static int | |
2844 | eq_transformed_insns (const void *p, const void *q) | |
2845 | { | |
04d073df | 2846 | rtx_insn *i1 = |
2847 | VINSN_INSN_RTX (((const struct transformed_insns *) p)->vinsn_old); | |
2848 | rtx_insn *i2 = | |
2849 | VINSN_INSN_RTX (((const struct transformed_insns *) q)->vinsn_old); | |
e1ab7874 | 2850 | |
2851 | if (INSN_UID (i1) == INSN_UID (i2)) | |
2852 | return 1; | |
2853 | return rtx_equal_p (PATTERN (i1), PATTERN (i2)); | |
2854 | } | |
2855 | ||
2856 | /* Free an entry in a transformed_insns hashtable. */ | |
2857 | static void | |
2858 | free_transformed_insns (void *p) | |
2859 | { | |
2860 | struct transformed_insns *pti = (struct transformed_insns *) p; | |
2861 | ||
2862 | vinsn_detach (pti->vinsn_old); | |
2863 | vinsn_detach (pti->vinsn_new); | |
2864 | free (pti); | |
2865 | } | |
2866 | ||
48e1416a | 2867 | /* Init the s_i_d data for INSN which should be inited just once, when |
e1ab7874 | 2868 | we first see the insn. */ |
2869 | static void | |
2870 | init_first_time_insn_data (insn_t insn) | |
2871 | { | |
2872 | /* This should not be set if this is the first time we init data for | |
2873 | insn. */ | |
2874 | gcc_assert (first_time_insn_init (insn)); | |
48e1416a | 2875 | |
e1ab7874 | 2876 | /* These are needed for nops too. */ |
2877 | INSN_LIVE (insn) = get_regset_from_pool (); | |
2878 | INSN_LIVE_VALID_P (insn) = false; | |
d9ab2038 | 2879 | |
e1ab7874 | 2880 | if (!INSN_NOP_P (insn)) |
2881 | { | |
2882 | INSN_ANALYZED_DEPS (insn) = BITMAP_ALLOC (NULL); | |
2883 | INSN_FOUND_DEPS (insn) = BITMAP_ALLOC (NULL); | |
48e1416a | 2884 | INSN_TRANSFORMED_INSNS (insn) |
e1ab7874 | 2885 | = htab_create (16, hash_transformed_insns, |
2886 | eq_transformed_insns, free_transformed_insns); | |
d9ab2038 | 2887 | init_deps (&INSN_DEPS_CONTEXT (insn), true); |
e1ab7874 | 2888 | } |
2889 | } | |
2890 | ||
48e1416a | 2891 | /* Free almost all above data for INSN that is scheduled already. |
d9ab2038 | 2892 | Used for extra-large basic blocks. */ |
2893 | void | |
2894 | free_data_for_scheduled_insn (insn_t insn) | |
e1ab7874 | 2895 | { |
2896 | gcc_assert (! first_time_insn_init (insn)); | |
48e1416a | 2897 | |
d9ab2038 | 2898 | if (! INSN_ANALYZED_DEPS (insn)) |
2899 | return; | |
48e1416a | 2900 | |
e1ab7874 | 2901 | BITMAP_FREE (INSN_ANALYZED_DEPS (insn)); |
2902 | BITMAP_FREE (INSN_FOUND_DEPS (insn)); | |
2903 | htab_delete (INSN_TRANSFORMED_INSNS (insn)); | |
48e1416a | 2904 | |
e1ab7874 | 2905 | /* This is allocated only for bookkeeping insns. */ |
2906 | if (INSN_ORIGINATORS (insn)) | |
2907 | BITMAP_FREE (INSN_ORIGINATORS (insn)); | |
2908 | free_deps (&INSN_DEPS_CONTEXT (insn)); | |
d9ab2038 | 2909 | |
2910 | INSN_ANALYZED_DEPS (insn) = NULL; | |
2911 | ||
48e1416a | 2912 | /* Clear the readonly flag so we would ICE when trying to recalculate |
d9ab2038 | 2913 | the deps context (as we believe that it should not happen). */ |
2914 | (&INSN_DEPS_CONTEXT (insn))->readonly = 0; | |
2915 | } | |
2916 | ||
2917 | /* Free the same data as above for INSN. */ | |
2918 | static void | |
2919 | free_first_time_insn_data (insn_t insn) | |
2920 | { | |
2921 | gcc_assert (! first_time_insn_init (insn)); | |
2922 | ||
2923 | free_data_for_scheduled_insn (insn); | |
2924 | return_regset_to_pool (INSN_LIVE (insn)); | |
2925 | INSN_LIVE (insn) = NULL; | |
2926 | INSN_LIVE_VALID_P (insn) = false; | |
e1ab7874 | 2927 | } |
2928 | ||
2929 | /* Initialize region-scope data structures for basic blocks. */ | |
2930 | static void | |
2931 | init_global_and_expr_for_bb (basic_block bb) | |
2932 | { | |
2933 | if (sel_bb_empty_p (bb)) | |
2934 | return; | |
2935 | ||
2936 | invalidate_av_set (bb); | |
2937 | } | |
2938 | ||
2939 | /* Data for global dependency analysis (to initialize CANT_MOVE and | |
2940 | SCHED_GROUP_P). */ | |
2941 | static struct | |
2942 | { | |
2943 | /* Previous insn. */ | |
2944 | insn_t prev_insn; | |
2945 | } init_global_data; | |
2946 | ||
2947 | /* Determine if INSN is in the sched_group, is an asm or should not be | |
2948 | cloned. After that initialize its expr. */ | |
2949 | static void | |
2950 | init_global_and_expr_for_insn (insn_t insn) | |
2951 | { | |
2952 | if (LABEL_P (insn)) | |
2953 | return; | |
2954 | ||
2955 | if (NOTE_INSN_BASIC_BLOCK_P (insn)) | |
2956 | { | |
2f3c9801 | 2957 | init_global_data.prev_insn = NULL; |
e1ab7874 | 2958 | return; |
2959 | } | |
2960 | ||
2961 | gcc_assert (INSN_P (insn)); | |
2962 | ||
2963 | if (SCHED_GROUP_P (insn)) | |
2964 | /* Setup a sched_group. */ | |
2965 | { | |
2966 | insn_t prev_insn = init_global_data.prev_insn; | |
2967 | ||
2968 | if (prev_insn) | |
2969 | INSN_SCHED_NEXT (prev_insn) = insn; | |
2970 | ||
2971 | init_global_data.prev_insn = insn; | |
2972 | } | |
2973 | else | |
2f3c9801 | 2974 | init_global_data.prev_insn = NULL; |
e1ab7874 | 2975 | |
2976 | if (GET_CODE (PATTERN (insn)) == ASM_INPUT | |
2977 | || asm_noperands (PATTERN (insn)) >= 0) | |
2978 | /* Mark INSN as an asm. */ | |
2979 | INSN_ASM_P (insn) = true; | |
2980 | ||
2981 | { | |
2982 | bool force_unique_p; | |
2983 | ds_t spec_done_ds; | |
2984 | ||
982b0787 | 2985 | /* Certain instructions cannot be cloned, and frame related insns and |
2986 | the insn adjacent to NOTE_INSN_EPILOGUE_BEG cannot be moved out of | |
2987 | their block. */ | |
2988 | if (prologue_epilogue_contains (insn)) | |
2989 | { | |
2990 | if (RTX_FRAME_RELATED_P (insn)) | |
2991 | CANT_MOVE (insn) = 1; | |
2992 | else | |
2993 | { | |
2994 | rtx note; | |
2995 | for (note = REG_NOTES (insn); note; note = XEXP (note, 1)) | |
2996 | if (REG_NOTE_KIND (note) == REG_SAVE_NOTE | |
2997 | && ((enum insn_note) INTVAL (XEXP (note, 0)) | |
2998 | == NOTE_INSN_EPILOGUE_BEG)) | |
2999 | { | |
3000 | CANT_MOVE (insn) = 1; | |
3001 | break; | |
3002 | } | |
3003 | } | |
3004 | force_unique_p = true; | |
3005 | } | |
e1ab7874 | 3006 | else |
982b0787 | 3007 | if (CANT_MOVE (insn) |
3008 | || INSN_ASM_P (insn) | |
3009 | || SCHED_GROUP_P (insn) | |
a8d6ade3 | 3010 | || CALL_P (insn) |
982b0787 | 3011 | /* Exception handling insns are always unique. */ |
3012 | || (cfun->can_throw_non_call_exceptions && can_throw_internal (insn)) | |
3013 | /* TRAP_IF though have an INSN code is control_flow_insn_p (). */ | |
13434dcb | 3014 | || control_flow_insn_p (insn) |
3015 | || volatile_insn_p (PATTERN (insn)) | |
3016 | || (targetm.cannot_copy_insn_p | |
3017 | && targetm.cannot_copy_insn_p (insn))) | |
982b0787 | 3018 | force_unique_p = true; |
3019 | else | |
3020 | force_unique_p = false; | |
e1ab7874 | 3021 | |
3022 | if (targetm.sched.get_insn_spec_ds) | |
3023 | { | |
3024 | spec_done_ds = targetm.sched.get_insn_spec_ds (insn); | |
3025 | spec_done_ds = ds_get_max_dep_weak (spec_done_ds); | |
3026 | } | |
3027 | else | |
3028 | spec_done_ds = 0; | |
3029 | ||
3030 | /* Initialize INSN's expr. */ | |
3031 | init_expr (INSN_EXPR (insn), vinsn_create (insn, force_unique_p), 0, | |
3032 | REG_BR_PROB_BASE, INSN_PRIORITY (insn), 0, BLOCK_NUM (insn), | |
1e094109 | 3033 | spec_done_ds, 0, 0, vNULL, true, |
f1f41a6c | 3034 | false, false, false, CANT_MOVE (insn)); |
e1ab7874 | 3035 | } |
3036 | ||
3037 | init_first_time_insn_data (insn); | |
3038 | } | |
3039 | ||
3040 | /* Scan the region and initialize instruction data for basic blocks BBS. */ | |
3041 | void | |
3042 | sel_init_global_and_expr (bb_vec_t bbs) | |
3043 | { | |
3044 | /* ??? It would be nice to implement push / pop scheme for sched_infos. */ | |
3045 | const struct sched_scan_info_def ssi = | |
3046 | { | |
3047 | NULL, /* extend_bb */ | |
3048 | init_global_and_expr_for_bb, /* init_bb */ | |
3049 | extend_insn_data, /* extend_insn */ | |
3050 | init_global_and_expr_for_insn /* init_insn */ | |
3051 | }; | |
48e1416a | 3052 | |
52d7e28c | 3053 | sched_scan (&ssi, bbs); |
e1ab7874 | 3054 | } |
3055 | ||
3056 | /* Finalize region-scope data structures for basic blocks. */ | |
3057 | static void | |
3058 | finish_global_and_expr_for_bb (basic_block bb) | |
3059 | { | |
3060 | av_set_clear (&BB_AV_SET (bb)); | |
3061 | BB_AV_LEVEL (bb) = 0; | |
3062 | } | |
3063 | ||
3064 | /* Finalize INSN's data. */ | |
3065 | static void | |
3066 | finish_global_and_expr_insn (insn_t insn) | |
3067 | { | |
3068 | if (LABEL_P (insn) || NOTE_INSN_BASIC_BLOCK_P (insn)) | |
3069 | return; | |
3070 | ||
3071 | gcc_assert (INSN_P (insn)); | |
3072 | ||
3073 | if (INSN_LUID (insn) > 0) | |
3074 | { | |
3075 | free_first_time_insn_data (insn); | |
3076 | INSN_WS_LEVEL (insn) = 0; | |
3077 | CANT_MOVE (insn) = 0; | |
48e1416a | 3078 | |
3079 | /* We can no longer assert this, as vinsns of this insn could be | |
3080 | easily live in other insn's caches. This should be changed to | |
e1ab7874 | 3081 | a counter-like approach among all vinsns. */ |
3082 | gcc_assert (true || VINSN_COUNT (INSN_VINSN (insn)) == 1); | |
3083 | clear_expr (INSN_EXPR (insn)); | |
3084 | } | |
3085 | } | |
3086 | ||
3087 | /* Finalize per instruction data for the whole region. */ | |
3088 | void | |
3089 | sel_finish_global_and_expr (void) | |
3090 | { | |
3091 | { | |
3092 | bb_vec_t bbs; | |
3093 | int i; | |
3094 | ||
f1f41a6c | 3095 | bbs.create (current_nr_blocks); |
e1ab7874 | 3096 | |
3097 | for (i = 0; i < current_nr_blocks; i++) | |
f5a6b05f | 3098 | bbs.quick_push (BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i))); |
e1ab7874 | 3099 | |
3100 | /* Clear AV_SETs and INSN_EXPRs. */ | |
3101 | { | |
3102 | const struct sched_scan_info_def ssi = | |
3103 | { | |
3104 | NULL, /* extend_bb */ | |
3105 | finish_global_and_expr_for_bb, /* init_bb */ | |
3106 | NULL, /* extend_insn */ | |
3107 | finish_global_and_expr_insn /* init_insn */ | |
3108 | }; | |
3109 | ||
52d7e28c | 3110 | sched_scan (&ssi, bbs); |
e1ab7874 | 3111 | } |
3112 | ||
f1f41a6c | 3113 | bbs.release (); |
e1ab7874 | 3114 | } |
3115 | ||
3116 | finish_insns (); | |
3117 | } | |
3118 | \f | |
3119 | ||
48e1416a | 3120 | /* In the below hooks, we merely calculate whether or not a dependence |
3121 | exists, and in what part of insn. However, we will need more data | |
e1ab7874 | 3122 | when we'll start caching dependence requests. */ |
3123 | ||
3124 | /* Container to hold information for dependency analysis. */ | |
3125 | static struct | |
3126 | { | |
3127 | deps_t dc; | |
3128 | ||
3129 | /* A variable to track which part of rtx we are scanning in | |
3130 | sched-deps.c: sched_analyze_insn (). */ | |
3131 | deps_where_t where; | |
3132 | ||
3133 | /* Current producer. */ | |
3134 | insn_t pro; | |
3135 | ||
3136 | /* Current consumer. */ | |
3137 | vinsn_t con; | |
3138 | ||
3139 | /* Is SEL_DEPS_HAS_DEP_P[DEPS_IN_X] is true, then X has a dependence. | |
3140 | X is from { INSN, LHS, RHS }. */ | |
3141 | ds_t has_dep_p[DEPS_IN_NOWHERE]; | |
3142 | } has_dependence_data; | |
3143 | ||
3144 | /* Start analyzing dependencies of INSN. */ | |
3145 | static void | |
3146 | has_dependence_start_insn (insn_t insn ATTRIBUTE_UNUSED) | |
3147 | { | |
3148 | gcc_assert (has_dependence_data.where == DEPS_IN_NOWHERE); | |
3149 | ||
3150 | has_dependence_data.where = DEPS_IN_INSN; | |
3151 | } | |
3152 | ||
3153 | /* Finish analyzing dependencies of an insn. */ | |
3154 | static void | |
3155 | has_dependence_finish_insn (void) | |
3156 | { | |
3157 | gcc_assert (has_dependence_data.where == DEPS_IN_INSN); | |
3158 | ||
3159 | has_dependence_data.where = DEPS_IN_NOWHERE; | |
3160 | } | |
3161 | ||
3162 | /* Start analyzing dependencies of LHS. */ | |
3163 | static void | |
3164 | has_dependence_start_lhs (rtx lhs ATTRIBUTE_UNUSED) | |
3165 | { | |
3166 | gcc_assert (has_dependence_data.where == DEPS_IN_INSN); | |
3167 | ||
3168 | if (VINSN_LHS (has_dependence_data.con) != NULL) | |
3169 | has_dependence_data.where = DEPS_IN_LHS; | |
3170 | } | |
3171 | ||
3172 | /* Finish analyzing dependencies of an lhs. */ | |
3173 | static void | |
3174 | has_dependence_finish_lhs (void) | |
3175 | { | |
3176 | has_dependence_data.where = DEPS_IN_INSN; | |
3177 | } | |
3178 | ||
3179 | /* Start analyzing dependencies of RHS. */ | |
3180 | static void | |
3181 | has_dependence_start_rhs (rtx rhs ATTRIBUTE_UNUSED) | |
3182 | { | |
3183 | gcc_assert (has_dependence_data.where == DEPS_IN_INSN); | |
3184 | ||
3185 | if (VINSN_RHS (has_dependence_data.con) != NULL) | |
3186 | has_dependence_data.where = DEPS_IN_RHS; | |
3187 | } | |
3188 | ||
3189 | /* Start analyzing dependencies of an rhs. */ | |
3190 | static void | |
3191 | has_dependence_finish_rhs (void) | |
3192 | { | |
3193 | gcc_assert (has_dependence_data.where == DEPS_IN_RHS | |
3194 | || has_dependence_data.where == DEPS_IN_INSN); | |
3195 | ||
3196 | has_dependence_data.where = DEPS_IN_INSN; | |
3197 | } | |
3198 | ||
3199 | /* Note a set of REGNO. */ | |
3200 | static void | |
3201 | has_dependence_note_reg_set (int regno) | |
3202 | { | |
3203 | struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno]; | |
3204 | ||
3205 | if (!sched_insns_conditions_mutex_p (has_dependence_data.pro, | |
3206 | VINSN_INSN_RTX | |
3207 | (has_dependence_data.con))) | |
3208 | { | |
3209 | ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where]; | |
3210 | ||
3211 | if (reg_last->sets != NULL | |
3212 | || reg_last->clobbers != NULL) | |
3213 | *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT; | |
3214 | ||
a9bfd373 | 3215 | if (reg_last->uses || reg_last->implicit_sets) |
e1ab7874 | 3216 | *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI; |
3217 | } | |
3218 | } | |
3219 | ||
3220 | /* Note a clobber of REGNO. */ | |
3221 | static void | |
3222 | has_dependence_note_reg_clobber (int regno) | |
3223 | { | |
3224 | struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno]; | |
3225 | ||
3226 | if (!sched_insns_conditions_mutex_p (has_dependence_data.pro, | |
3227 | VINSN_INSN_RTX | |
3228 | (has_dependence_data.con))) | |
3229 | { | |
3230 | ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where]; | |
3231 | ||
3232 | if (reg_last->sets) | |
3233 | *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT; | |
48e1416a | 3234 | |
a9bfd373 | 3235 | if (reg_last->uses || reg_last->implicit_sets) |
e1ab7874 | 3236 | *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI; |
3237 | } | |
3238 | } | |
3239 | ||
3240 | /* Note a use of REGNO. */ | |
3241 | static void | |
3242 | has_dependence_note_reg_use (int regno) | |
3243 | { | |
3244 | struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno]; | |
3245 | ||
3246 | if (!sched_insns_conditions_mutex_p (has_dependence_data.pro, | |
3247 | VINSN_INSN_RTX | |
3248 | (has_dependence_data.con))) | |
3249 | { | |
3250 | ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where]; | |
3251 | ||
3252 | if (reg_last->sets) | |
3253 | *dsp = (*dsp & ~SPECULATIVE) | DEP_TRUE; | |
3254 | ||
a9bfd373 | 3255 | if (reg_last->clobbers || reg_last->implicit_sets) |
e1ab7874 | 3256 | *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI; |
3257 | ||
b0691607 | 3258 | /* Merge BE_IN_SPEC bits into *DSP when the dependency producer |
3259 | is actually a check insn. We need to do this for any register | |
3260 | read-read dependency with the check unless we track properly | |
3261 | all registers written by BE_IN_SPEC-speculated insns, as | |
3262 | we don't have explicit dependence lists. See PR 53975. */ | |
e1ab7874 | 3263 | if (reg_last->uses) |
3264 | { | |
3265 | ds_t pro_spec_checked_ds; | |
3266 | ||
3267 | pro_spec_checked_ds = INSN_SPEC_CHECKED_DS (has_dependence_data.pro); | |
3268 | pro_spec_checked_ds = ds_get_max_dep_weak (pro_spec_checked_ds); | |
3269 | ||
b0691607 | 3270 | if (pro_spec_checked_ds != 0) |
e1ab7874 | 3271 | *dsp = ds_full_merge (*dsp, pro_spec_checked_ds, |
3272 | NULL_RTX, NULL_RTX); | |
3273 | } | |
3274 | } | |
3275 | } | |
3276 | ||
3277 | /* Note a memory dependence. */ | |
3278 | static void | |
3279 | has_dependence_note_mem_dep (rtx mem ATTRIBUTE_UNUSED, | |
3280 | rtx pending_mem ATTRIBUTE_UNUSED, | |
3281 | insn_t pending_insn ATTRIBUTE_UNUSED, | |
3282 | ds_t ds ATTRIBUTE_UNUSED) | |
3283 | { | |
3284 | if (!sched_insns_conditions_mutex_p (has_dependence_data.pro, | |
3285 | VINSN_INSN_RTX (has_dependence_data.con))) | |
3286 | { | |
3287 | ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where]; | |
3288 | ||
3289 | *dsp = ds_full_merge (ds, *dsp, pending_mem, mem); | |
3290 | } | |
3291 | } | |
3292 | ||
3293 | /* Note a dependence. */ | |
3294 | static void | |
8ffee455 | 3295 | has_dependence_note_dep (insn_t pro, ds_t ds ATTRIBUTE_UNUSED) |
3296 | { | |
3297 | insn_t real_pro = has_dependence_data.pro; | |
3298 | insn_t real_con = VINSN_INSN_RTX (has_dependence_data.con); | |
3299 | ||
3300 | /* We do not allow for debug insns to move through others unless they | |
3301 | are at the start of bb. This movement may create bookkeeping copies | |
3302 | that later would not be able to move up, violating the invariant | |
3303 | that a bookkeeping copy should be movable as the original insn. | |
3304 | Detect that here and allow that movement if we allowed it before | |
3305 | in the first place. */ | |
0aa56820 | 3306 | if (DEBUG_INSN_P (real_con) && !DEBUG_INSN_P (real_pro) |
8ffee455 | 3307 | && INSN_UID (NEXT_INSN (pro)) == INSN_UID (real_con)) |
3308 | return; | |
3309 | ||
3310 | if (!sched_insns_conditions_mutex_p (real_pro, real_con)) | |
e1ab7874 | 3311 | { |
3312 | ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where]; | |
3313 | ||
3314 | *dsp = ds_full_merge (ds, *dsp, NULL_RTX, NULL_RTX); | |
3315 | } | |
3316 | } | |
3317 | ||
3318 | /* Mark the insn as having a hard dependence that prevents speculation. */ | |
3319 | void | |
3320 | sel_mark_hard_insn (rtx insn) | |
3321 | { | |
3322 | int i; | |
3323 | ||
3324 | /* Only work when we're in has_dependence_p mode. | |
3325 | ??? This is a hack, this should actually be a hook. */ | |
3326 | if (!has_dependence_data.dc || !has_dependence_data.pro) | |
3327 | return; | |
3328 | ||
3329 | gcc_assert (insn == VINSN_INSN_RTX (has_dependence_data.con)); | |
3330 | gcc_assert (has_dependence_data.where == DEPS_IN_INSN); | |
3331 | ||
3332 | for (i = 0; i < DEPS_IN_NOWHERE; i++) | |
3333 | has_dependence_data.has_dep_p[i] &= ~SPECULATIVE; | |
3334 | } | |
3335 | ||
3336 | /* This structure holds the hooks for the dependency analysis used when | |
3337 | actually processing dependencies in the scheduler. */ | |
3338 | static struct sched_deps_info_def has_dependence_sched_deps_info; | |
3339 | ||
3340 | /* This initializes most of the fields of the above structure. */ | |
3341 | static const struct sched_deps_info_def const_has_dependence_sched_deps_info = | |
3342 | { | |
3343 | NULL, | |
3344 | ||
3345 | has_dependence_start_insn, | |
3346 | has_dependence_finish_insn, | |
3347 | has_dependence_start_lhs, | |
3348 | has_dependence_finish_lhs, | |
3349 | has_dependence_start_rhs, | |
3350 | has_dependence_finish_rhs, | |
3351 | has_dependence_note_reg_set, | |
3352 | has_dependence_note_reg_clobber, | |
3353 | has_dependence_note_reg_use, | |
3354 | has_dependence_note_mem_dep, | |
3355 | has_dependence_note_dep, | |
3356 | ||
3357 | 0, /* use_cselib */ | |
3358 | 0, /* use_deps_list */ | |
3359 | 0 /* generate_spec_deps */ | |
3360 | }; | |
3361 | ||
3362 | /* Initialize has_dependence_sched_deps_info with extra spec field. */ | |
3363 | static void | |
3364 | setup_has_dependence_sched_deps_info (void) | |
3365 | { | |
3366 | memcpy (&has_dependence_sched_deps_info, | |
3367 | &const_has_dependence_sched_deps_info, | |
3368 | sizeof (has_dependence_sched_deps_info)); | |
3369 | ||
3370 | if (spec_info != NULL) | |
3371 | has_dependence_sched_deps_info.generate_spec_deps = 1; | |
3372 | ||
3373 | sched_deps_info = &has_dependence_sched_deps_info; | |
3374 | } | |
3375 | ||
3376 | /* Remove all dependences found and recorded in has_dependence_data array. */ | |
3377 | void | |
3378 | sel_clear_has_dependence (void) | |
3379 | { | |
3380 | int i; | |
3381 | ||
3382 | for (i = 0; i < DEPS_IN_NOWHERE; i++) | |
3383 | has_dependence_data.has_dep_p[i] = 0; | |
3384 | } | |
3385 | ||
3386 | /* Return nonzero if EXPR has is dependent upon PRED. Return the pointer | |
3387 | to the dependence information array in HAS_DEP_PP. */ | |
3388 | ds_t | |
3389 | has_dependence_p (expr_t expr, insn_t pred, ds_t **has_dep_pp) | |
3390 | { | |
3391 | int i; | |
3392 | ds_t ds; | |
2e966e2a | 3393 | class deps_desc *dc; |
e1ab7874 | 3394 | |
3395 | if (INSN_SIMPLEJUMP_P (pred)) | |
3396 | /* Unconditional jump is just a transfer of control flow. | |
3397 | Ignore it. */ | |
3398 | return false; | |
3399 | ||
3400 | dc = &INSN_DEPS_CONTEXT (pred); | |
d9ab2038 | 3401 | |
3402 | /* We init this field lazily. */ | |
3403 | if (dc->reg_last == NULL) | |
3404 | init_deps_reg_last (dc); | |
48e1416a | 3405 | |
e1ab7874 | 3406 | if (!dc->readonly) |
3407 | { | |
3408 | has_dependence_data.pro = NULL; | |
3409 | /* Initialize empty dep context with information about PRED. */ | |
3410 | advance_deps_context (dc, pred); | |
3411 | dc->readonly = 1; | |
3412 | } | |
3413 | ||
3414 | has_dependence_data.where = DEPS_IN_NOWHERE; | |
3415 | has_dependence_data.pro = pred; | |
3416 | has_dependence_data.con = EXPR_VINSN (expr); | |
3417 | has_dependence_data.dc = dc; | |
3418 | ||
3419 | sel_clear_has_dependence (); | |
3420 | ||
3421 | /* Now catch all dependencies that would be generated between PRED and | |
3422 | INSN. */ | |
3423 | setup_has_dependence_sched_deps_info (); | |
3424 | deps_analyze_insn (dc, EXPR_INSN_RTX (expr)); | |
3425 | has_dependence_data.dc = NULL; | |
3426 | ||
3427 | /* When a barrier was found, set DEPS_IN_INSN bits. */ | |
3428 | if (dc->last_reg_pending_barrier == TRUE_BARRIER) | |
3429 | has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_TRUE; | |
3430 | else if (dc->last_reg_pending_barrier == MOVE_BARRIER) | |
3431 | has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI; | |
3432 | ||
3433 | /* Do not allow stores to memory to move through checks. Currently | |
3434 | we don't move this to sched-deps.c as the check doesn't have | |
48e1416a | 3435 | obvious places to which this dependence can be attached. |
e1ab7874 | 3436 | FIMXE: this should go to a hook. */ |
3437 | if (EXPR_LHS (expr) | |
3438 | && MEM_P (EXPR_LHS (expr)) | |
3439 | && sel_insn_is_speculation_check (pred)) | |
3440 | has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI; | |
48e1416a | 3441 | |
e1ab7874 | 3442 | *has_dep_pp = has_dependence_data.has_dep_p; |
3443 | ds = 0; | |
3444 | for (i = 0; i < DEPS_IN_NOWHERE; i++) | |
3445 | ds = ds_full_merge (ds, has_dependence_data.has_dep_p[i], | |
3446 | NULL_RTX, NULL_RTX); | |
3447 | ||
3448 | return ds; | |
3449 | } | |
3450 | \f | |
3451 | ||
48e1416a | 3452 | /* Dependence hooks implementation that checks dependence latency constraints |
3453 | on the insns being scheduled. The entry point for these routines is | |
3454 | tick_check_p predicate. */ | |
e1ab7874 | 3455 | |
3456 | static struct | |
3457 | { | |
3458 | /* An expr we are currently checking. */ | |
3459 | expr_t expr; | |
3460 | ||
3461 | /* A minimal cycle for its scheduling. */ | |
3462 | int cycle; | |
3463 | ||
3464 | /* Whether we have seen a true dependence while checking. */ | |
3465 | bool seen_true_dep_p; | |
3466 | } tick_check_data; | |
3467 | ||
3468 | /* Update minimal scheduling cycle for tick_check_insn given that it depends | |
3469 | on PRO with status DS and weight DW. */ | |
3470 | static void | |
3471 | tick_check_dep_with_dw (insn_t pro_insn, ds_t ds, dw_t dw) | |
3472 | { | |
3473 | expr_t con_expr = tick_check_data.expr; | |
3474 | insn_t con_insn = EXPR_INSN_RTX (con_expr); | |
3475 | ||
3476 | if (con_insn != pro_insn) | |
3477 | { | |
3478 | enum reg_note dt; | |
3479 | int tick; | |
3480 | ||
3481 | if (/* PROducer was removed from above due to pipelining. */ | |
3482 | !INSN_IN_STREAM_P (pro_insn) | |
3483 | /* Or PROducer was originally on the next iteration regarding the | |
3484 | CONsumer. */ | |
3485 | || (INSN_SCHED_TIMES (pro_insn) | |
3486 | - EXPR_SCHED_TIMES (con_expr)) > 1) | |
3487 | /* Don't count this dependence. */ | |
3488 | return; | |
3489 | ||
3490 | dt = ds_to_dt (ds); | |
3491 | if (dt == REG_DEP_TRUE) | |
3492 | tick_check_data.seen_true_dep_p = true; | |
3493 | ||
3494 | gcc_assert (INSN_SCHED_CYCLE (pro_insn) > 0); | |
3495 | ||
3496 | { | |
3497 | dep_def _dep, *dep = &_dep; | |
3498 | ||
3499 | init_dep (dep, pro_insn, con_insn, dt); | |
3500 | ||
3501 | tick = INSN_SCHED_CYCLE (pro_insn) + dep_cost_1 (dep, dw); | |
3502 | } | |
3503 | ||
3504 | /* When there are several kinds of dependencies between pro and con, | |
3505 | only REG_DEP_TRUE should be taken into account. */ | |
3506 | if (tick > tick_check_data.cycle | |
3507 | && (dt == REG_DEP_TRUE || !tick_check_data.seen_true_dep_p)) | |
3508 | tick_check_data.cycle = tick; | |
3509 | } | |
3510 | } | |
3511 | ||
3512 | /* An implementation of note_dep hook. */ | |
3513 | static void | |
3514 | tick_check_note_dep (insn_t pro, ds_t ds) | |
3515 | { | |
3516 | tick_check_dep_with_dw (pro, ds, 0); | |
3517 | } | |
3518 | ||
3519 | /* An implementation of note_mem_dep hook. */ | |
3520 | static void | |
3521 | tick_check_note_mem_dep (rtx mem1, rtx mem2, insn_t pro, ds_t ds) | |
3522 | { | |
3523 | dw_t dw; | |
3524 | ||
3525 | dw = (ds_to_dt (ds) == REG_DEP_TRUE | |
3526 | ? estimate_dep_weak (mem1, mem2) | |
3527 | : 0); | |
3528 | ||
3529 | tick_check_dep_with_dw (pro, ds, dw); | |
3530 | } | |
3531 | ||
3532 | /* This structure contains hooks for dependence analysis used when determining | |
3533 | whether an insn is ready for scheduling. */ | |
3534 | static struct sched_deps_info_def tick_check_sched_deps_info = | |
3535 | { | |
3536 | NULL, | |
3537 | ||
3538 | NULL, | |
3539 | NULL, | |
3540 | NULL, | |
3541 | NULL, | |
3542 | NULL, | |
3543 | NULL, | |
3544 | haifa_note_reg_set, | |
3545 | haifa_note_reg_clobber, | |
3546 | haifa_note_reg_use, | |
3547 | tick_check_note_mem_dep, | |
3548 | tick_check_note_dep, | |
3549 | ||
3550 | 0, 0, 0 | |
3551 | }; | |
3552 | ||
3553 | /* Estimate number of cycles from the current cycle of FENCE until EXPR can be | |
3554 | scheduled. Return 0 if all data from producers in DC is ready. */ | |
3555 | int | |
3556 | tick_check_p (expr_t expr, deps_t dc, fence_t fence) | |
3557 | { | |
3558 | int cycles_left; | |
3559 | /* Initialize variables. */ | |
3560 | tick_check_data.expr = expr; | |
3561 | tick_check_data.cycle = 0; | |
3562 | tick_check_data.seen_true_dep_p = false; | |
3563 | sched_deps_info = &tick_check_sched_deps_info; | |
48e1416a | 3564 | |
e1ab7874 | 3565 | gcc_assert (!dc->readonly); |
3566 | dc->readonly = 1; | |
3567 | deps_analyze_insn (dc, EXPR_INSN_RTX (expr)); | |
3568 | dc->readonly = 0; | |
3569 | ||
3570 | cycles_left = tick_check_data.cycle - FENCE_CYCLE (fence); | |
3571 | ||
3572 | return cycles_left >= 0 ? cycles_left : 0; | |
3573 | } | |
3574 | \f | |
3575 | ||
3576 | /* Functions to work with insns. */ | |
3577 | ||
3578 | /* Returns true if LHS of INSN is the same as DEST of an insn | |
3579 | being moved. */ | |
3580 | bool | |
3581 | lhs_of_insn_equals_to_dest_p (insn_t insn, rtx dest) | |
3582 | { | |
3583 | rtx lhs = INSN_LHS (insn); | |
3584 | ||
3585 | if (lhs == NULL || dest == NULL) | |
3586 | return false; | |
48e1416a | 3587 | |
e1ab7874 | 3588 | return rtx_equal_p (lhs, dest); |
3589 | } | |
3590 | ||
3591 | /* Return s_i_d entry of INSN. Callable from debugger. */ | |
3592 | sel_insn_data_def | |
3593 | insn_sid (insn_t insn) | |
3594 | { | |
3595 | return *SID (insn); | |
3596 | } | |
3597 | ||
3598 | /* True when INSN is a speculative check. We can tell this by looking | |
3599 | at the data structures of the selective scheduler, not by examining | |
3600 | the pattern. */ | |
3601 | bool | |
3602 | sel_insn_is_speculation_check (rtx insn) | |
3603 | { | |
f1f41a6c | 3604 | return s_i_d.exists () && !! INSN_SPEC_CHECKED_DS (insn); |
e1ab7874 | 3605 | } |
3606 | ||
48e1416a | 3607 | /* Extracts machine mode MODE and destination location DST_LOC |
e1ab7874 | 3608 | for given INSN. */ |
3609 | void | |
3754d046 | 3610 | get_dest_and_mode (rtx insn, rtx *dst_loc, machine_mode *mode) |
e1ab7874 | 3611 | { |
3612 | rtx pat = PATTERN (insn); | |
3613 | ||
3614 | gcc_assert (dst_loc); | |
3615 | gcc_assert (GET_CODE (pat) == SET); | |
3616 | ||
3617 | *dst_loc = SET_DEST (pat); | |
3618 | ||
3619 | gcc_assert (*dst_loc); | |
3620 | gcc_assert (MEM_P (*dst_loc) || REG_P (*dst_loc)); | |
3621 | ||
3622 | if (mode) | |
3623 | *mode = GET_MODE (*dst_loc); | |
3624 | } | |
3625 | ||
48e1416a | 3626 | /* Returns true when moving through JUMP will result in bookkeeping |
e1ab7874 | 3627 | creation. */ |
3628 | bool | |
3629 | bookkeeping_can_be_created_if_moved_through_p (insn_t jump) | |
3630 | { | |
3631 | insn_t succ; | |
3632 | succ_iterator si; | |
3633 | ||
3634 | FOR_EACH_SUCC (succ, si, jump) | |
3635 | if (sel_num_cfg_preds_gt_1 (succ)) | |
3636 | return true; | |
3637 | ||
3638 | return false; | |
3639 | } | |
3640 | ||
3641 | /* Return 'true' if INSN is the only one in its basic block. */ | |
3642 | static bool | |
3643 | insn_is_the_only_one_in_bb_p (insn_t insn) | |
3644 | { | |
3645 | return sel_bb_head_p (insn) && sel_bb_end_p (insn); | |
3646 | } | |
3647 | ||
48e1416a | 3648 | /* Check that the region we're scheduling still has at most one |
e1ab7874 | 3649 | backedge. */ |
3650 | static void | |
3651 | verify_backedges (void) | |
3652 | { | |
3653 | if (pipelining_p) | |
3654 | { | |
3655 | int i, n = 0; | |
3656 | edge e; | |
3657 | edge_iterator ei; | |
48e1416a | 3658 | |
e1ab7874 | 3659 | for (i = 0; i < current_nr_blocks; i++) |
f5a6b05f | 3660 | FOR_EACH_EDGE (e, ei, BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i))->succs) |
e1ab7874 | 3661 | if (in_current_region_p (e->dest) |
3662 | && BLOCK_TO_BB (e->dest->index) < i) | |
3663 | n++; | |
48e1416a | 3664 | |
e1ab7874 | 3665 | gcc_assert (n <= 1); |
3666 | } | |
3667 | } | |
e1ab7874 | 3668 | \f |
3669 | ||
3670 | /* Functions to work with control flow. */ | |
3671 | ||
93919afc | 3672 | /* Recompute BLOCK_TO_BB and BB_FOR_BLOCK for current region so that blocks |
3673 | are sorted in topological order (it might have been invalidated by | |
3674 | redirecting an edge). */ | |
3675 | static void | |
3676 | sel_recompute_toporder (void) | |
3677 | { | |
3678 | int i, n, rgn; | |
3679 | int *postorder, n_blocks; | |
3680 | ||
a28770e1 | 3681 | postorder = XALLOCAVEC (int, n_basic_blocks_for_fn (cfun)); |
93919afc | 3682 | n_blocks = post_order_compute (postorder, false, false); |
3683 | ||
3684 | rgn = CONTAINING_RGN (BB_TO_BLOCK (0)); | |
3685 | for (n = 0, i = n_blocks - 1; i >= 0; i--) | |
3686 | if (CONTAINING_RGN (postorder[i]) == rgn) | |
3687 | { | |
3688 | BLOCK_TO_BB (postorder[i]) = n; | |
3689 | BB_TO_BLOCK (n) = postorder[i]; | |
3690 | n++; | |
3691 | } | |
3692 | ||
3693 | /* Assert that we updated info for all blocks. We may miss some blocks if | |
3694 | this function is called when redirecting an edge made a block | |
3695 | unreachable, but that block is not deleted yet. */ | |
3696 | gcc_assert (n == RGN_NR_BLOCKS (rgn)); | |
3697 | } | |
3698 | ||
e1ab7874 | 3699 | /* Tidy the possibly empty block BB. */ |
81d1ad0f | 3700 | static bool |
6f0e7980 | 3701 | maybe_tidy_empty_bb (basic_block bb) |
e1ab7874 | 3702 | { |
ef4cf572 | 3703 | basic_block succ_bb, pred_bb, note_bb; |
f1f41a6c | 3704 | vec<basic_block> dom_bbs; |
df6266b9 | 3705 | edge e; |
3706 | edge_iterator ei; | |
e1ab7874 | 3707 | bool rescan_p; |
3708 | ||
3709 | /* Keep empty bb only if this block immediately precedes EXIT and | |
61e213e2 | 3710 | has incoming non-fallthrough edge, or it has no predecessors or |
3711 | successors. Otherwise remove it. */ | |
9845d120 | 3712 | if (!sel_bb_empty_p (bb) |
48e1416a | 3713 | || (single_succ_p (bb) |
34154e27 | 3714 | && single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun) |
48e1416a | 3715 | && (!single_pred_p (bb) |
61e213e2 | 3716 | || !(single_pred_edge (bb)->flags & EDGE_FALLTHRU))) |
3717 | || EDGE_COUNT (bb->preds) == 0 | |
3718 | || EDGE_COUNT (bb->succs) == 0) | |
e1ab7874 | 3719 | return false; |
3720 | ||
df6266b9 | 3721 | /* Do not attempt to redirect complex edges. */ |
3722 | FOR_EACH_EDGE (e, ei, bb->preds) | |
3723 | if (e->flags & EDGE_COMPLEX) | |
3724 | return false; | |
a62f9dca | 3725 | else if (e->flags & EDGE_FALLTHRU) |
3726 | { | |
3727 | rtx note; | |
3728 | /* If prev bb ends with asm goto, see if any of the | |
3729 | ASM_OPERANDS_LABELs don't point to the fallthru | |
3730 | label. Do not attempt to redirect it in that case. */ | |
3731 | if (JUMP_P (BB_END (e->src)) | |
3732 | && (note = extract_asm_operands (PATTERN (BB_END (e->src))))) | |
3733 | { | |
3734 | int i, n = ASM_OPERANDS_LABEL_LENGTH (note); | |
3735 | ||
3736 | for (i = 0; i < n; ++i) | |
3737 | if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (bb)) | |
3738 | return false; | |
3739 | } | |
3740 | } | |
df6266b9 | 3741 | |
e1ab7874 | 3742 | free_data_sets (bb); |
3743 | ||
3744 | /* Do not delete BB if it has more than one successor. | |
3745 | That can occur when we moving a jump. */ | |
3746 | if (!single_succ_p (bb)) | |
3747 | { | |
3748 | gcc_assert (can_merge_blocks_p (bb->prev_bb, bb)); | |
3749 | sel_merge_blocks (bb->prev_bb, bb); | |
3750 | return true; | |
3751 | } | |
3752 | ||
3753 | succ_bb = single_succ (bb); | |
3754 | rescan_p = true; | |
3755 | pred_bb = NULL; | |
f1f41a6c | 3756 | dom_bbs.create (0); |
e1ab7874 | 3757 | |
ef4cf572 | 3758 | /* Save a pred/succ from the current region to attach the notes to. */ |
3759 | note_bb = NULL; | |
3760 | FOR_EACH_EDGE (e, ei, bb->preds) | |
3761 | if (in_current_region_p (e->src)) | |
3762 | { | |
3763 | note_bb = e->src; | |
3764 | break; | |
3765 | } | |
3766 | if (note_bb == NULL) | |
3767 | note_bb = succ_bb; | |
3768 | ||
e1ab7874 | 3769 | /* Redirect all non-fallthru edges to the next bb. */ |
3770 | while (rescan_p) | |
3771 | { | |
e1ab7874 | 3772 | rescan_p = false; |
3773 | ||
3774 | FOR_EACH_EDGE (e, ei, bb->preds) | |
3775 | { | |
3776 | pred_bb = e->src; | |
3777 | ||
3778 | if (!(e->flags & EDGE_FALLTHRU)) | |
3779 | { | |
f4d3c071 | 3780 | /* We cannot invalidate computed topological order by moving |
1a5dbaab | 3781 | the edge destination block (E->SUCC) along a fallthru edge. |
3782 | ||
3783 | We will update dominators here only when we'll get | |
3784 | an unreachable block when redirecting, otherwise | |
3785 | sel_redirect_edge_and_branch will take care of it. */ | |
3786 | if (e->dest != bb | |
3787 | && single_pred_p (e->dest)) | |
f1f41a6c | 3788 | dom_bbs.safe_push (e->dest); |
6f0e7980 | 3789 | sel_redirect_edge_and_branch (e, succ_bb); |
e1ab7874 | 3790 | rescan_p = true; |
3791 | break; | |
3792 | } | |
6f0e7980 | 3793 | /* If the edge is fallthru, but PRED_BB ends in a conditional jump |
3794 | to BB (so there is no non-fallthru edge from PRED_BB to BB), we | |
3795 | still have to adjust it. */ | |
3796 | else if (single_succ_p (pred_bb) && any_condjump_p (BB_END (pred_bb))) | |
3797 | { | |
3798 | /* If possible, try to remove the unneeded conditional jump. */ | |
3799 | if (INSN_SCHED_TIMES (BB_END (pred_bb)) == 0 | |
3800 | && !IN_CURRENT_FENCE_P (BB_END (pred_bb))) | |
3801 | { | |
3802 | if (!sel_remove_insn (BB_END (pred_bb), false, false)) | |
3803 | tidy_fallthru_edge (e); | |
3804 | } | |
3805 | else | |
3806 | sel_redirect_edge_and_branch (e, succ_bb); | |
3807 | rescan_p = true; | |
3808 | break; | |
3809 | } | |
e1ab7874 | 3810 | } |
3811 | } | |
3812 | ||
e1ab7874 | 3813 | if (can_merge_blocks_p (bb->prev_bb, bb)) |
3814 | sel_merge_blocks (bb->prev_bb, bb); | |
3815 | else | |
e1ab7874 | 3816 | { |
0424f393 | 3817 | /* This is a block without fallthru predecessor. Just delete it. */ |
ef4cf572 | 3818 | gcc_assert (note_bb); |
3819 | move_bb_info (note_bb, bb); | |
e1ab7874 | 3820 | remove_empty_bb (bb, true); |
3821 | } | |
3822 | ||
f1f41a6c | 3823 | if (!dom_bbs.is_empty ()) |
1a5dbaab | 3824 | { |
f1f41a6c | 3825 | dom_bbs.safe_push (succ_bb); |
1a5dbaab | 3826 | iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, false); |
f1f41a6c | 3827 | dom_bbs.release (); |
1a5dbaab | 3828 | } |
3829 | ||
e1ab7874 | 3830 | return true; |
3831 | } | |
3832 | ||
48e1416a | 3833 | /* Tidy the control flow after we have removed original insn from |
e1ab7874 | 3834 | XBB. Return true if we have removed some blocks. When FULL_TIDYING |
3835 | is true, also try to optimize control flow on non-empty blocks. */ | |
3836 | bool | |
3837 | tidy_control_flow (basic_block xbb, bool full_tidying) | |
3838 | { | |
3839 | bool changed = true; | |
9845d120 | 3840 | insn_t first, last; |
48e1416a | 3841 | |
e1ab7874 | 3842 | /* First check whether XBB is empty. */ |
6f0e7980 | 3843 | changed = maybe_tidy_empty_bb (xbb); |
e1ab7874 | 3844 | if (changed || !full_tidying) |
3845 | return changed; | |
48e1416a | 3846 | |
e1ab7874 | 3847 | /* Check if there is a unnecessary jump after insn left. */ |
49087fba | 3848 | if (bb_has_removable_jump_to_p (xbb, xbb->next_bb) |
e1ab7874 | 3849 | && INSN_SCHED_TIMES (BB_END (xbb)) == 0 |
3850 | && !IN_CURRENT_FENCE_P (BB_END (xbb))) | |
3851 | { | |
ccf06fde | 3852 | /* We used to call sel_remove_insn here that can trigger tidy_control_flow |
3853 | before we fix up the fallthru edge. Correct that ordering by | |
3854 | explicitly doing the latter before the former. */ | |
3855 | clear_expr (INSN_EXPR (BB_END (xbb))); | |
e1ab7874 | 3856 | tidy_fallthru_edge (EDGE_SUCC (xbb, 0)); |
ccf06fde | 3857 | if (tidy_control_flow (xbb, false)) |
3858 | return true; | |
e1ab7874 | 3859 | } |
3860 | ||
9845d120 | 3861 | first = sel_bb_head (xbb); |
3862 | last = sel_bb_end (xbb); | |
3863 | if (MAY_HAVE_DEBUG_INSNS) | |
3864 | { | |
3865 | if (first != last && DEBUG_INSN_P (first)) | |
3866 | do | |
3867 | first = NEXT_INSN (first); | |
3868 | while (first != last && (DEBUG_INSN_P (first) || NOTE_P (first))); | |
3869 | ||
3870 | if (first != last && DEBUG_INSN_P (last)) | |
3871 | do | |
3872 | last = PREV_INSN (last); | |
3873 | while (first != last && (DEBUG_INSN_P (last) || NOTE_P (last))); | |
3874 | } | |
e1ab7874 | 3875 | /* Check if there is an unnecessary jump in previous basic block leading |
48e1416a | 3876 | to next basic block left after removing INSN from stream. |
3877 | If it is so, remove that jump and redirect edge to current | |
3878 | basic block (where there was INSN before deletion). This way | |
3879 | when NOP will be deleted several instructions later with its | |
3880 | basic block we will not get a jump to next instruction, which | |
e1ab7874 | 3881 | can be harmful. */ |
9845d120 | 3882 | if (first == last |
e1ab7874 | 3883 | && !sel_bb_empty_p (xbb) |
9845d120 | 3884 | && INSN_NOP_P (last) |
e1ab7874 | 3885 | /* Flow goes fallthru from current block to the next. */ |
3886 | && EDGE_COUNT (xbb->succs) == 1 | |
3887 | && (EDGE_SUCC (xbb, 0)->flags & EDGE_FALLTHRU) | |
3888 | /* When successor is an EXIT block, it may not be the next block. */ | |
34154e27 | 3889 | && single_succ (xbb) != EXIT_BLOCK_PTR_FOR_FN (cfun) |
e1ab7874 | 3890 | /* And unconditional jump in previous basic block leads to |
3891 | next basic block of XBB and this jump can be safely removed. */ | |
3892 | && in_current_region_p (xbb->prev_bb) | |
49087fba | 3893 | && bb_has_removable_jump_to_p (xbb->prev_bb, xbb->next_bb) |
e1ab7874 | 3894 | && INSN_SCHED_TIMES (BB_END (xbb->prev_bb)) == 0 |
3895 | /* Also this jump is not at the scheduling boundary. */ | |
3896 | && !IN_CURRENT_FENCE_P (BB_END (xbb->prev_bb))) | |
3897 | { | |
93919afc | 3898 | bool recompute_toporder_p; |
e1ab7874 | 3899 | /* Clear data structures of jump - jump itself will be removed |
3900 | by sel_redirect_edge_and_branch. */ | |
3901 | clear_expr (INSN_EXPR (BB_END (xbb->prev_bb))); | |
93919afc | 3902 | recompute_toporder_p |
3903 | = sel_redirect_edge_and_branch (EDGE_SUCC (xbb->prev_bb, 0), xbb); | |
3904 | ||
e1ab7874 | 3905 | gcc_assert (EDGE_SUCC (xbb->prev_bb, 0)->flags & EDGE_FALLTHRU); |
3906 | ||
8ffee455 | 3907 | /* We could have skipped some debug insns which did not get removed with the block, |
3908 | and the seqnos could become incorrect. Fix them up here. */ | |
3909 | if (MAY_HAVE_DEBUG_INSNS && (sel_bb_head (xbb) != first || sel_bb_end (xbb) != last)) | |
3910 | { | |
3911 | if (!sel_bb_empty_p (xbb->prev_bb)) | |
3912 | { | |
3913 | int prev_seqno = INSN_SEQNO (sel_bb_end (xbb->prev_bb)); | |
3914 | if (prev_seqno > INSN_SEQNO (sel_bb_head (xbb))) | |
3915 | for (insn_t insn = sel_bb_head (xbb); insn != first; insn = NEXT_INSN (insn)) | |
3916 | INSN_SEQNO (insn) = prev_seqno + 1; | |
3917 | } | |
3918 | } | |
3919 | ||
e1ab7874 | 3920 | /* It can turn out that after removing unused jump, basic block |
3921 | that contained that jump, becomes empty too. In such case | |
3922 | remove it too. */ | |
3923 | if (sel_bb_empty_p (xbb->prev_bb)) | |
6f0e7980 | 3924 | changed = maybe_tidy_empty_bb (xbb->prev_bb); |
3925 | if (recompute_toporder_p) | |
93919afc | 3926 | sel_recompute_toporder (); |
e1ab7874 | 3927 | } |
7af466ad | 3928 | |
382ecba7 | 3929 | /* TODO: use separate flag for CFG checking. */ |
3930 | if (flag_checking) | |
3931 | { | |
3932 | verify_backedges (); | |
3933 | verify_dominators (CDI_DOMINATORS); | |
3934 | } | |
7af466ad | 3935 | |
e1ab7874 | 3936 | return changed; |
3937 | } | |
3938 | ||
93919afc | 3939 | /* Purge meaningless empty blocks in the middle of a region. */ |
3940 | void | |
3941 | purge_empty_blocks (void) | |
3942 | { | |
a6e634c6 | 3943 | int i; |
93919afc | 3944 | |
a6e634c6 | 3945 | /* Do not attempt to delete the first basic block in the region. */ |
3946 | for (i = 1; i < current_nr_blocks; ) | |
93919afc | 3947 | { |
f5a6b05f | 3948 | basic_block b = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i)); |
93919afc | 3949 | |
6f0e7980 | 3950 | if (maybe_tidy_empty_bb (b)) |
93919afc | 3951 | continue; |
3952 | ||
3953 | i++; | |
3954 | } | |
3955 | } | |
3956 | ||
48e1416a | 3957 | /* Rip-off INSN from the insn stream. When ONLY_DISCONNECT is true, |
3958 | do not delete insn's data, because it will be later re-emitted. | |
e1ab7874 | 3959 | Return true if we have removed some blocks afterwards. */ |
3960 | bool | |
3961 | sel_remove_insn (insn_t insn, bool only_disconnect, bool full_tidying) | |
3962 | { | |
3963 | basic_block bb = BLOCK_FOR_INSN (insn); | |
3964 | ||
3965 | gcc_assert (INSN_IN_STREAM_P (insn)); | |
3966 | ||
9845d120 | 3967 | if (DEBUG_INSN_P (insn) && BB_AV_SET_VALID_P (bb)) |
3968 | { | |
3969 | expr_t expr; | |
3970 | av_set_iterator i; | |
3971 | ||
3972 | /* When we remove a debug insn that is head of a BB, it remains | |
3973 | in the AV_SET of the block, but it shouldn't. */ | |
3974 | FOR_EACH_EXPR_1 (expr, i, &BB_AV_SET (bb)) | |
3975 | if (EXPR_INSN_RTX (expr) == insn) | |
3976 | { | |
3977 | av_set_iter_remove (&i); | |
3978 | break; | |
3979 | } | |
3980 | } | |
3981 | ||
e1ab7874 | 3982 | if (only_disconnect) |
93ff53d3 | 3983 | remove_insn (insn); |
e1ab7874 | 3984 | else |
3985 | { | |
93ff53d3 | 3986 | delete_insn (insn); |
e1ab7874 | 3987 | clear_expr (INSN_EXPR (insn)); |
3988 | } | |
3989 | ||
93ff53d3 | 3990 | /* It is necessary to NULL these fields in case we are going to re-insert |
3991 | INSN into the insns stream, as will usually happen in the ONLY_DISCONNECT | |
3992 | case, but also for NOPs that we will return to the nop pool. */ | |
4a57a2e8 | 3993 | SET_PREV_INSN (insn) = NULL_RTX; |
3994 | SET_NEXT_INSN (insn) = NULL_RTX; | |
93ff53d3 | 3995 | set_block_for_insn (insn, NULL); |
e1ab7874 | 3996 | |
3997 | return tidy_control_flow (bb, full_tidying); | |
3998 | } | |
3999 | ||
4000 | /* Estimate number of the insns in BB. */ | |
4001 | static int | |
4002 | sel_estimate_number_of_insns (basic_block bb) | |
4003 | { | |
4004 | int res = 0; | |
4005 | insn_t insn = NEXT_INSN (BB_HEAD (bb)), next_tail = NEXT_INSN (BB_END (bb)); | |
4006 | ||
4007 | for (; insn != next_tail; insn = NEXT_INSN (insn)) | |
9845d120 | 4008 | if (NONDEBUG_INSN_P (insn)) |
e1ab7874 | 4009 | res++; |
4010 | ||
4011 | return res; | |
4012 | } | |
4013 | ||
4014 | /* We don't need separate luids for notes or labels. */ | |
4015 | static int | |
4016 | sel_luid_for_non_insn (rtx x) | |
4017 | { | |
4018 | gcc_assert (NOTE_P (x) || LABEL_P (x)); | |
4019 | ||
4020 | return -1; | |
4021 | } | |
4022 | ||
bdcc104c | 4023 | /* Find the proper seqno for inserting at INSN by successors. |
4024 | Return -1 if no successors with positive seqno exist. */ | |
e1ab7874 | 4025 | static int |
2f3c9801 | 4026 | get_seqno_by_succs (rtx_insn *insn) |
bdcc104c | 4027 | { |
4028 | basic_block bb = BLOCK_FOR_INSN (insn); | |
2f3c9801 | 4029 | rtx_insn *tmp = insn, *end = BB_END (bb); |
bdcc104c | 4030 | int seqno; |
4031 | insn_t succ = NULL; | |
4032 | succ_iterator si; | |
4033 | ||
4034 | while (tmp != end) | |
4035 | { | |
4036 | tmp = NEXT_INSN (tmp); | |
4037 | if (INSN_P (tmp)) | |
4038 | return INSN_SEQNO (tmp); | |
4039 | } | |
4040 | ||
4041 | seqno = INT_MAX; | |
4042 | ||
4043 | FOR_EACH_SUCC_1 (succ, si, end, SUCCS_NORMAL) | |
4044 | if (INSN_SEQNO (succ) > 0) | |
4045 | seqno = MIN (seqno, INSN_SEQNO (succ)); | |
4046 | ||
4047 | if (seqno == INT_MAX) | |
4048 | return -1; | |
4049 | ||
4050 | return seqno; | |
4051 | } | |
4052 | ||
8d1881f5 | 4053 | /* Compute seqno for INSN by its preds or succs. Use OLD_SEQNO to compute |
4054 | seqno in corner cases. */ | |
bdcc104c | 4055 | static int |
8d1881f5 | 4056 | get_seqno_for_a_jump (insn_t insn, int old_seqno) |
e1ab7874 | 4057 | { |
4058 | int seqno; | |
4059 | ||
4060 | gcc_assert (INSN_SIMPLEJUMP_P (insn)); | |
4061 | ||
4062 | if (!sel_bb_head_p (insn)) | |
4063 | seqno = INSN_SEQNO (PREV_INSN (insn)); | |
4064 | else | |
4065 | { | |
4066 | basic_block bb = BLOCK_FOR_INSN (insn); | |
4067 | ||
4068 | if (single_pred_p (bb) | |
4069 | && !in_current_region_p (single_pred (bb))) | |
4070 | { | |
4071 | /* We can have preds outside a region when splitting edges | |
48e1416a | 4072 | for pipelining of an outer loop. Use succ instead. |
e1ab7874 | 4073 | There should be only one of them. */ |
4074 | insn_t succ = NULL; | |
4075 | succ_iterator si; | |
4076 | bool first = true; | |
48e1416a | 4077 | |
e1ab7874 | 4078 | gcc_assert (flag_sel_sched_pipelining_outer_loops |
4079 | && current_loop_nest); | |
48e1416a | 4080 | FOR_EACH_SUCC_1 (succ, si, insn, |
e1ab7874 | 4081 | SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS) |
4082 | { | |
4083 | gcc_assert (first); | |
4084 | first = false; | |
4085 | } | |
4086 | ||
4087 | gcc_assert (succ != NULL); | |
4088 | seqno = INSN_SEQNO (succ); | |
4089 | } | |
4090 | else | |
4091 | { | |
4092 | insn_t *preds; | |
4093 | int n; | |
4094 | ||
4095 | cfg_preds (BLOCK_FOR_INSN (insn), &preds, &n); | |
e1ab7874 | 4096 | |
bdcc104c | 4097 | gcc_assert (n > 0); |
4098 | /* For one predecessor, use simple method. */ | |
4099 | if (n == 1) | |
4100 | seqno = INSN_SEQNO (preds[0]); | |
4101 | else | |
4102 | seqno = get_seqno_by_preds (insn); | |
48e1416a | 4103 | |
e1ab7874 | 4104 | free (preds); |
4105 | } | |
4106 | } | |
4107 | ||
bdcc104c | 4108 | /* We were unable to find a good seqno among preds. */ |
4109 | if (seqno < 0) | |
4110 | seqno = get_seqno_by_succs (insn); | |
4111 | ||
8d1881f5 | 4112 | if (seqno < 0) |
4113 | { | |
4114 | /* The only case where this could be here legally is that the only | |
4115 | unscheduled insn was a conditional jump that got removed and turned | |
4116 | into this unconditional one. Initialize from the old seqno | |
4117 | of that jump passed down to here. */ | |
4118 | seqno = old_seqno; | |
4119 | } | |
bdcc104c | 4120 | |
8d1881f5 | 4121 | gcc_assert (seqno >= 0); |
e1ab7874 | 4122 | return seqno; |
4123 | } | |
4124 | ||
961d3eb8 | 4125 | /* Find the proper seqno for inserting at INSN. Returns -1 if no predecessors |
4126 | with positive seqno exist. */ | |
e1ab7874 | 4127 | int |
91a55c11 | 4128 | get_seqno_by_preds (rtx_insn *insn) |
e1ab7874 | 4129 | { |
4130 | basic_block bb = BLOCK_FOR_INSN (insn); | |
91a55c11 | 4131 | rtx_insn *tmp = insn, *head = BB_HEAD (bb); |
e1ab7874 | 4132 | insn_t *preds; |
4133 | int n, i, seqno; | |
4134 | ||
738eb905 | 4135 | /* Loop backwards from INSN to HEAD including both. */ |
4136 | while (1) | |
bdcc104c | 4137 | { |
bdcc104c | 4138 | if (INSN_P (tmp)) |
738eb905 | 4139 | return INSN_SEQNO (tmp); |
4140 | if (tmp == head) | |
4141 | break; | |
4142 | tmp = PREV_INSN (tmp); | |
bdcc104c | 4143 | } |
48e1416a | 4144 | |
e1ab7874 | 4145 | cfg_preds (bb, &preds, &n); |
4146 | for (i = 0, seqno = -1; i < n; i++) | |
4147 | seqno = MAX (seqno, INSN_SEQNO (preds[i])); | |
4148 | ||
e1ab7874 | 4149 | return seqno; |
4150 | } | |
4151 | ||
4152 | \f | |
4153 | ||
4154 | /* Extend pass-scope data structures for basic blocks. */ | |
4155 | void | |
4156 | sel_extend_global_bb_info (void) | |
4157 | { | |
fe672ac0 | 4158 | sel_global_bb_info.safe_grow_cleared (last_basic_block_for_fn (cfun)); |
e1ab7874 | 4159 | } |
4160 | ||
4161 | /* Extend region-scope data structures for basic blocks. */ | |
4162 | static void | |
4163 | extend_region_bb_info (void) | |
4164 | { | |
fe672ac0 | 4165 | sel_region_bb_info.safe_grow_cleared (last_basic_block_for_fn (cfun)); |
e1ab7874 | 4166 | } |
4167 | ||
4168 | /* Extend all data structures to fit for all basic blocks. */ | |
4169 | static void | |
4170 | extend_bb_info (void) | |
4171 | { | |
4172 | sel_extend_global_bb_info (); | |
4173 | extend_region_bb_info (); | |
4174 | } | |
4175 | ||
4176 | /* Finalize pass-scope data structures for basic blocks. */ | |
4177 | void | |
4178 | sel_finish_global_bb_info (void) | |
4179 | { | |
f1f41a6c | 4180 | sel_global_bb_info.release (); |
e1ab7874 | 4181 | } |
4182 | ||
4183 | /* Finalize region-scope data structures for basic blocks. */ | |
4184 | static void | |
4185 | finish_region_bb_info (void) | |
4186 | { | |
f1f41a6c | 4187 | sel_region_bb_info.release (); |
e1ab7874 | 4188 | } |
4189 | \f | |
4190 | ||
4191 | /* Data for each insn in current region. */ | |
16fb756f | 4192 | vec<sel_insn_data_def> s_i_d; |
e1ab7874 | 4193 | |
e1ab7874 | 4194 | /* Extend data structures for insns from current region. */ |
4195 | static void | |
4196 | extend_insn_data (void) | |
4197 | { | |
4198 | int reserve; | |
48e1416a | 4199 | |
e1ab7874 | 4200 | sched_extend_target (); |
4201 | sched_deps_init (false); | |
4202 | ||
4203 | /* Extend data structures for insns from current region. */ | |
f1f41a6c | 4204 | reserve = (sched_max_luid + 1 - s_i_d.length ()); |
4205 | if (reserve > 0 && ! s_i_d.space (reserve)) | |
d9ab2038 | 4206 | { |
4207 | int size; | |
4208 | ||
4209 | if (sched_max_luid / 2 > 1024) | |
4210 | size = sched_max_luid + 1024; | |
4211 | else | |
4212 | size = 3 * sched_max_luid / 2; | |
48e1416a | 4213 | |
d9ab2038 | 4214 | |
f1f41a6c | 4215 | s_i_d.safe_grow_cleared (size); |
d9ab2038 | 4216 | } |
e1ab7874 | 4217 | } |
4218 | ||
4219 | /* Finalize data structures for insns from current region. */ | |
4220 | static void | |
4221 | finish_insns (void) | |
4222 | { | |
4223 | unsigned i; | |
4224 | ||
4225 | /* Clear here all dependence contexts that may have left from insns that were | |
4226 | removed during the scheduling. */ | |
f1f41a6c | 4227 | for (i = 0; i < s_i_d.length (); i++) |
e1ab7874 | 4228 | { |
f1f41a6c | 4229 | sel_insn_data_def *sid_entry = &s_i_d[i]; |
48e1416a | 4230 | |
e1ab7874 | 4231 | if (sid_entry->live) |
4232 | return_regset_to_pool (sid_entry->live); | |
4233 | if (sid_entry->analyzed_deps) | |
4234 | { | |
4235 | BITMAP_FREE (sid_entry->analyzed_deps); | |
4236 | BITMAP_FREE (sid_entry->found_deps); | |
4237 | htab_delete (sid_entry->transformed_insns); | |
4238 | free_deps (&sid_entry->deps_context); | |
4239 | } | |
4240 | if (EXPR_VINSN (&sid_entry->expr)) | |
4241 | { | |
4242 | clear_expr (&sid_entry->expr); | |
48e1416a | 4243 | |
e1ab7874 | 4244 | /* Also, clear CANT_MOVE bit here, because we really don't want it |
4245 | to be passed to the next region. */ | |
4246 | CANT_MOVE_BY_LUID (i) = 0; | |
4247 | } | |
4248 | } | |
48e1416a | 4249 | |
f1f41a6c | 4250 | s_i_d.release (); |
e1ab7874 | 4251 | } |
4252 | ||
4253 | /* A proxy to pass initialization data to init_insn (). */ | |
4254 | static sel_insn_data_def _insn_init_ssid; | |
4255 | static sel_insn_data_t insn_init_ssid = &_insn_init_ssid; | |
4256 | ||
4257 | /* If true create a new vinsn. Otherwise use the one from EXPR. */ | |
4258 | static bool insn_init_create_new_vinsn_p; | |
4259 | ||
4260 | /* Set all necessary data for initialization of the new insn[s]. */ | |
4261 | static expr_t | |
4262 | set_insn_init (expr_t expr, vinsn_t vi, int seqno) | |
4263 | { | |
4264 | expr_t x = &insn_init_ssid->expr; | |
4265 | ||
4266 | copy_expr_onside (x, expr); | |
4267 | if (vi != NULL) | |
4268 | { | |
4269 | insn_init_create_new_vinsn_p = false; | |
4270 | change_vinsn_in_expr (x, vi); | |
4271 | } | |
4272 | else | |
4273 | insn_init_create_new_vinsn_p = true; | |
4274 | ||
4275 | insn_init_ssid->seqno = seqno; | |
4276 | return x; | |
4277 | } | |
4278 | ||
4279 | /* Init data for INSN. */ | |
4280 | static void | |
4281 | init_insn_data (insn_t insn) | |
4282 | { | |
4283 | expr_t expr; | |
4284 | sel_insn_data_t ssid = insn_init_ssid; | |
4285 | ||
4286 | /* The fields mentioned below are special and hence are not being | |
4287 | propagated to the new insns. */ | |
4288 | gcc_assert (!ssid->asm_p && ssid->sched_next == NULL | |
4289 | && !ssid->after_stall_p && ssid->sched_cycle == 0); | |
4290 | gcc_assert (INSN_P (insn) && INSN_LUID (insn) > 0); | |
4291 | ||
4292 | expr = INSN_EXPR (insn); | |
4293 | copy_expr (expr, &ssid->expr); | |
4294 | prepare_insn_expr (insn, ssid->seqno); | |
4295 | ||
4296 | if (insn_init_create_new_vinsn_p) | |
4297 | change_vinsn_in_expr (expr, vinsn_create (insn, init_insn_force_unique_p)); | |
48e1416a | 4298 | |
e1ab7874 | 4299 | if (first_time_insn_init (insn)) |
4300 | init_first_time_insn_data (insn); | |
4301 | } | |
4302 | ||
4303 | /* This is used to initialize spurious jumps generated by | |
8d1881f5 | 4304 | sel_redirect_edge (). OLD_SEQNO is used for initializing seqnos |
4305 | in corner cases within get_seqno_for_a_jump. */ | |
e1ab7874 | 4306 | static void |
8d1881f5 | 4307 | init_simplejump_data (insn_t insn, int old_seqno) |
e1ab7874 | 4308 | { |
4309 | init_expr (INSN_EXPR (insn), vinsn_create (insn, false), 0, | |
f1f41a6c | 4310 | REG_BR_PROB_BASE, 0, 0, 0, 0, 0, 0, |
1e094109 | 4311 | vNULL, true, false, false, |
e1ab7874 | 4312 | false, true); |
8d1881f5 | 4313 | INSN_SEQNO (insn) = get_seqno_for_a_jump (insn, old_seqno); |
e1ab7874 | 4314 | init_first_time_insn_data (insn); |
4315 | } | |
4316 | ||
48e1416a | 4317 | /* Perform deferred initialization of insns. This is used to process |
8d1881f5 | 4318 | a new jump that may be created by redirect_edge. OLD_SEQNO is used |
4319 | for initializing simplejumps in init_simplejump_data. */ | |
4320 | static void | |
4321 | sel_init_new_insn (insn_t insn, int flags, int old_seqno) | |
e1ab7874 | 4322 | { |
4323 | /* We create data structures for bb when the first insn is emitted in it. */ | |
4324 | if (INSN_P (insn) | |
4325 | && INSN_IN_STREAM_P (insn) | |
4326 | && insn_is_the_only_one_in_bb_p (insn)) | |
4327 | { | |
4328 | extend_bb_info (); | |
4329 | create_initial_data_sets (BLOCK_FOR_INSN (insn)); | |
4330 | } | |
48e1416a | 4331 | |
e1ab7874 | 4332 | if (flags & INSN_INIT_TODO_LUID) |
52d7e28c | 4333 | { |
4334 | sched_extend_luids (); | |
4335 | sched_init_insn_luid (insn); | |
4336 | } | |
e1ab7874 | 4337 | |
4338 | if (flags & INSN_INIT_TODO_SSID) | |
4339 | { | |
4340 | extend_insn_data (); | |
4341 | init_insn_data (insn); | |
4342 | clear_expr (&insn_init_ssid->expr); | |
4343 | } | |
4344 | ||
4345 | if (flags & INSN_INIT_TODO_SIMPLEJUMP) | |
4346 | { | |
4347 | extend_insn_data (); | |
8d1881f5 | 4348 | init_simplejump_data (insn, old_seqno); |
e1ab7874 | 4349 | } |
48e1416a | 4350 | |
e1ab7874 | 4351 | gcc_assert (CONTAINING_RGN (BLOCK_NUM (insn)) |
4352 | == CONTAINING_RGN (BB_TO_BLOCK (0))); | |
4353 | } | |
4354 | \f | |
4355 | ||
4356 | /* Functions to init/finish work with lv sets. */ | |
4357 | ||
4358 | /* Init BB_LV_SET of BB from DF_LR_IN set of BB. */ | |
4359 | static void | |
4360 | init_lv_set (basic_block bb) | |
4361 | { | |
4362 | gcc_assert (!BB_LV_SET_VALID_P (bb)); | |
4363 | ||
4364 | BB_LV_SET (bb) = get_regset_from_pool (); | |
48e1416a | 4365 | COPY_REG_SET (BB_LV_SET (bb), DF_LR_IN (bb)); |
e1ab7874 | 4366 | BB_LV_SET_VALID_P (bb) = true; |
4367 | } | |
4368 | ||
4369 | /* Copy liveness information to BB from FROM_BB. */ | |
4370 | static void | |
4371 | copy_lv_set_from (basic_block bb, basic_block from_bb) | |
4372 | { | |
4373 | gcc_assert (!BB_LV_SET_VALID_P (bb)); | |
48e1416a | 4374 | |
e1ab7874 | 4375 | COPY_REG_SET (BB_LV_SET (bb), BB_LV_SET (from_bb)); |
4376 | BB_LV_SET_VALID_P (bb) = true; | |
48e1416a | 4377 | } |
e1ab7874 | 4378 | |
4379 | /* Initialize lv set of all bb headers. */ | |
4380 | void | |
4381 | init_lv_sets (void) | |
4382 | { | |
4383 | basic_block bb; | |
4384 | ||
4385 | /* Initialize of LV sets. */ | |
fc00614f | 4386 | FOR_EACH_BB_FN (bb, cfun) |
e1ab7874 | 4387 | init_lv_set (bb); |
4388 | ||
4389 | /* Don't forget EXIT_BLOCK. */ | |
34154e27 | 4390 | init_lv_set (EXIT_BLOCK_PTR_FOR_FN (cfun)); |
e1ab7874 | 4391 | } |
4392 | ||
4393 | /* Release lv set of HEAD. */ | |
4394 | static void | |
4395 | free_lv_set (basic_block bb) | |
4396 | { | |
4397 | gcc_assert (BB_LV_SET (bb) != NULL); | |
4398 | ||
4399 | return_regset_to_pool (BB_LV_SET (bb)); | |
4400 | BB_LV_SET (bb) = NULL; | |
4401 | BB_LV_SET_VALID_P (bb) = false; | |
4402 | } | |
4403 | ||
4404 | /* Finalize lv sets of all bb headers. */ | |
4405 | void | |
4406 | free_lv_sets (void) | |
4407 | { | |
4408 | basic_block bb; | |
4409 | ||
4410 | /* Don't forget EXIT_BLOCK. */ | |
34154e27 | 4411 | free_lv_set (EXIT_BLOCK_PTR_FOR_FN (cfun)); |
e1ab7874 | 4412 | |
4413 | /* Free LV sets. */ | |
fc00614f | 4414 | FOR_EACH_BB_FN (bb, cfun) |
e1ab7874 | 4415 | if (BB_LV_SET (bb)) |
4416 | free_lv_set (bb); | |
4417 | } | |
4418 | ||
c1c8a3d0 | 4419 | /* Mark AV_SET for BB as invalid, so this set will be updated the next time |
4420 | compute_av() processes BB. This function is called when creating new basic | |
4421 | blocks, as well as for blocks (either new or existing) where new jumps are | |
4422 | created when the control flow is being updated. */ | |
e1ab7874 | 4423 | static void |
4424 | invalidate_av_set (basic_block bb) | |
4425 | { | |
e1ab7874 | 4426 | BB_AV_LEVEL (bb) = -1; |
4427 | } | |
4428 | ||
4429 | /* Create initial data sets for BB (they will be invalid). */ | |
4430 | static void | |
4431 | create_initial_data_sets (basic_block bb) | |
4432 | { | |
4433 | if (BB_LV_SET (bb)) | |
4434 | BB_LV_SET_VALID_P (bb) = false; | |
4435 | else | |
4436 | BB_LV_SET (bb) = get_regset_from_pool (); | |
4437 | invalidate_av_set (bb); | |
4438 | } | |
4439 | ||
4440 | /* Free av set of BB. */ | |
4441 | static void | |
4442 | free_av_set (basic_block bb) | |
4443 | { | |
4444 | av_set_clear (&BB_AV_SET (bb)); | |
4445 | BB_AV_LEVEL (bb) = 0; | |
4446 | } | |
4447 | ||
4448 | /* Free data sets of BB. */ | |
4449 | void | |
4450 | free_data_sets (basic_block bb) | |
4451 | { | |
4452 | free_lv_set (bb); | |
4453 | free_av_set (bb); | |
4454 | } | |
4455 | ||
e1ab7874 | 4456 | /* Exchange data sets of TO and FROM. */ |
4457 | void | |
4458 | exchange_data_sets (basic_block to, basic_block from) | |
4459 | { | |
a4f59596 | 4460 | /* Exchange lv sets of TO and FROM. */ |
4461 | std::swap (BB_LV_SET (from), BB_LV_SET (to)); | |
4462 | std::swap (BB_LV_SET_VALID_P (from), BB_LV_SET_VALID_P (to)); | |
4463 | ||
4464 | /* Exchange av sets of TO and FROM. */ | |
4465 | std::swap (BB_AV_SET (from), BB_AV_SET (to)); | |
4466 | std::swap (BB_AV_LEVEL (from), BB_AV_LEVEL (to)); | |
e1ab7874 | 4467 | } |
4468 | ||
4469 | /* Copy data sets of FROM to TO. */ | |
4470 | void | |
4471 | copy_data_sets (basic_block to, basic_block from) | |
4472 | { | |
4473 | gcc_assert (!BB_LV_SET_VALID_P (to) && !BB_AV_SET_VALID_P (to)); | |
4474 | gcc_assert (BB_AV_SET (to) == NULL); | |
4475 | ||
4476 | BB_AV_LEVEL (to) = BB_AV_LEVEL (from); | |
4477 | BB_LV_SET_VALID_P (to) = BB_LV_SET_VALID_P (from); | |
4478 | ||
4479 | if (BB_AV_SET_VALID_P (from)) | |
4480 | { | |
4481 | BB_AV_SET (to) = av_set_copy (BB_AV_SET (from)); | |
4482 | } | |
4483 | if (BB_LV_SET_VALID_P (from)) | |
4484 | { | |
4485 | gcc_assert (BB_LV_SET (to) != NULL); | |
4486 | COPY_REG_SET (BB_LV_SET (to), BB_LV_SET (from)); | |
4487 | } | |
4488 | } | |
4489 | ||
4490 | /* Return an av set for INSN, if any. */ | |
4491 | av_set_t | |
4492 | get_av_set (insn_t insn) | |
4493 | { | |
4494 | av_set_t av_set; | |
4495 | ||
4496 | gcc_assert (AV_SET_VALID_P (insn)); | |
4497 | ||
4498 | if (sel_bb_head_p (insn)) | |
4499 | av_set = BB_AV_SET (BLOCK_FOR_INSN (insn)); | |
4500 | else | |
4501 | av_set = NULL; | |
4502 | ||
4503 | return av_set; | |
4504 | } | |
4505 | ||
4506 | /* Implementation of AV_LEVEL () macro. Return AV_LEVEL () of INSN. */ | |
4507 | int | |
4508 | get_av_level (insn_t insn) | |
4509 | { | |
4510 | int av_level; | |
4511 | ||
4512 | gcc_assert (INSN_P (insn)); | |
4513 | ||
4514 | if (sel_bb_head_p (insn)) | |
4515 | av_level = BB_AV_LEVEL (BLOCK_FOR_INSN (insn)); | |
4516 | else | |
4517 | av_level = INSN_WS_LEVEL (insn); | |
4518 | ||
4519 | return av_level; | |
4520 | } | |
4521 | ||
4522 | \f | |
4523 | ||
4524 | /* Variables to work with control-flow graph. */ | |
4525 | ||
4526 | /* The basic block that already has been processed by the sched_data_update (), | |
4527 | but hasn't been in sel_add_bb () yet. */ | |
16fb756f | 4528 | static vec<basic_block> last_added_blocks; |
e1ab7874 | 4529 | |
4530 | /* A pool for allocating successor infos. */ | |
4531 | static struct | |
4532 | { | |
4533 | /* A stack for saving succs_info structures. */ | |
4534 | struct succs_info *stack; | |
4535 | ||
4536 | /* Its size. */ | |
4537 | int size; | |
4538 | ||
4539 | /* Top of the stack. */ | |
4540 | int top; | |
4541 | ||
4542 | /* Maximal value of the top. */ | |
4543 | int max_top; | |
4544 | } succs_info_pool; | |
4545 | ||
4546 | /* Functions to work with control-flow graph. */ | |
4547 | ||
4548 | /* Return basic block note of BB. */ | |
179c282d | 4549 | rtx_insn * |
e1ab7874 | 4550 | sel_bb_head (basic_block bb) |
4551 | { | |
179c282d | 4552 | rtx_insn *head; |
e1ab7874 | 4553 | |
34154e27 | 4554 | if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun)) |
e1ab7874 | 4555 | { |
4556 | gcc_assert (exit_insn != NULL_RTX); | |
4557 | head = exit_insn; | |
4558 | } | |
4559 | else | |
4560 | { | |
9ed997be | 4561 | rtx_note *note = bb_note (bb); |
e1ab7874 | 4562 | head = next_nonnote_insn (note); |
4563 | ||
cabd2128 | 4564 | if (head && (BARRIER_P (head) || BLOCK_FOR_INSN (head) != bb)) |
179c282d | 4565 | head = NULL; |
e1ab7874 | 4566 | } |
4567 | ||
4568 | return head; | |
4569 | } | |
4570 | ||
4571 | /* Return true if INSN is a basic block header. */ | |
4572 | bool | |
4573 | sel_bb_head_p (insn_t insn) | |
4574 | { | |
4575 | return sel_bb_head (BLOCK_FOR_INSN (insn)) == insn; | |
4576 | } | |
4577 | ||
4578 | /* Return last insn of BB. */ | |
179c282d | 4579 | rtx_insn * |
e1ab7874 | 4580 | sel_bb_end (basic_block bb) |
4581 | { | |
4582 | if (sel_bb_empty_p (bb)) | |
179c282d | 4583 | return NULL; |
e1ab7874 | 4584 | |
34154e27 | 4585 | gcc_assert (bb != EXIT_BLOCK_PTR_FOR_FN (cfun)); |
e1ab7874 | 4586 | |
4587 | return BB_END (bb); | |
4588 | } | |
4589 | ||
4590 | /* Return true if INSN is the last insn in its basic block. */ | |
4591 | bool | |
4592 | sel_bb_end_p (insn_t insn) | |
4593 | { | |
4594 | return insn == sel_bb_end (BLOCK_FOR_INSN (insn)); | |
4595 | } | |
4596 | ||
4597 | /* Return true if BB consist of single NOTE_INSN_BASIC_BLOCK. */ | |
4598 | bool | |
4599 | sel_bb_empty_p (basic_block bb) | |
4600 | { | |
4601 | return sel_bb_head (bb) == NULL; | |
4602 | } | |
4603 | ||
4604 | /* True when BB belongs to the current scheduling region. */ | |
4605 | bool | |
4606 | in_current_region_p (basic_block bb) | |
4607 | { | |
4608 | if (bb->index < NUM_FIXED_BLOCKS) | |
4609 | return false; | |
4610 | ||
4611 | return CONTAINING_RGN (bb->index) == CONTAINING_RGN (BB_TO_BLOCK (0)); | |
4612 | } | |
4613 | ||
4614 | /* Return the block which is a fallthru bb of a conditional jump JUMP. */ | |
4615 | basic_block | |
93ee8dfb | 4616 | fallthru_bb_of_jump (const rtx_insn *jump) |
e1ab7874 | 4617 | { |
4618 | if (!JUMP_P (jump)) | |
4619 | return NULL; | |
4620 | ||
e1ab7874 | 4621 | if (!any_condjump_p (jump)) |
4622 | return NULL; | |
4623 | ||
bf19734b | 4624 | /* A basic block that ends with a conditional jump may still have one successor |
4625 | (and be followed by a barrier), we are not interested. */ | |
4626 | if (single_succ_p (BLOCK_FOR_INSN (jump))) | |
4627 | return NULL; | |
4628 | ||
e1ab7874 | 4629 | return FALLTHRU_EDGE (BLOCK_FOR_INSN (jump))->dest; |
4630 | } | |
4631 | ||
4632 | /* Remove all notes from BB. */ | |
4633 | static void | |
4634 | init_bb (basic_block bb) | |
4635 | { | |
4636 | remove_notes (bb_note (bb), BB_END (bb)); | |
e97a173d | 4637 | BB_NOTE_LIST (bb) = note_list; |
e1ab7874 | 4638 | } |
4639 | ||
4640 | void | |
52d7e28c | 4641 | sel_init_bbs (bb_vec_t bbs) |
e1ab7874 | 4642 | { |
4643 | const struct sched_scan_info_def ssi = | |
4644 | { | |
4645 | extend_bb_info, /* extend_bb */ | |
4646 | init_bb, /* init_bb */ | |
4647 | NULL, /* extend_insn */ | |
4648 | NULL /* init_insn */ | |
4649 | }; | |
4650 | ||
52d7e28c | 4651 | sched_scan (&ssi, bbs); |
e1ab7874 | 4652 | } |
4653 | ||
3baa98a0 | 4654 | /* Restore notes for the whole region. */ |
e1ab7874 | 4655 | static void |
3baa98a0 | 4656 | sel_restore_notes (void) |
e1ab7874 | 4657 | { |
4658 | int bb; | |
3baa98a0 | 4659 | insn_t insn; |
e1ab7874 | 4660 | |
4661 | for (bb = 0; bb < current_nr_blocks; bb++) | |
4662 | { | |
4663 | basic_block first, last; | |
4664 | ||
4665 | first = EBB_FIRST_BB (bb); | |
4666 | last = EBB_LAST_BB (bb)->next_bb; | |
4667 | ||
4668 | do | |
4669 | { | |
4670 | note_list = BB_NOTE_LIST (first); | |
4671 | restore_other_notes (NULL, first); | |
e97a173d | 4672 | BB_NOTE_LIST (first) = NULL; |
e1ab7874 | 4673 | |
3baa98a0 | 4674 | FOR_BB_INSNS (first, insn) |
4675 | if (NONDEBUG_INSN_P (insn)) | |
4676 | reemit_notes (insn); | |
4677 | ||
e1ab7874 | 4678 | first = first->next_bb; |
4679 | } | |
4680 | while (first != last); | |
4681 | } | |
4682 | } | |
4683 | ||
4684 | /* Free per-bb data structures. */ | |
4685 | void | |
4686 | sel_finish_bbs (void) | |
4687 | { | |
3baa98a0 | 4688 | sel_restore_notes (); |
e1ab7874 | 4689 | |
4690 | /* Remove current loop preheader from this loop. */ | |
4691 | if (current_loop_nest) | |
4692 | sel_remove_loop_preheader (); | |
4693 | ||
4694 | finish_region_bb_info (); | |
4695 | } | |
4696 | ||
4697 | /* Return true if INSN has a single successor of type FLAGS. */ | |
4698 | bool | |
4699 | sel_insn_has_single_succ_p (insn_t insn, int flags) | |
4700 | { | |
4701 | insn_t succ; | |
4702 | succ_iterator si; | |
4703 | bool first_p = true; | |
4704 | ||
4705 | FOR_EACH_SUCC_1 (succ, si, insn, flags) | |
4706 | { | |
4707 | if (first_p) | |
4708 | first_p = false; | |
4709 | else | |
4710 | return false; | |
4711 | } | |
4712 | ||
4713 | return true; | |
4714 | } | |
4715 | ||
4716 | /* Allocate successor's info. */ | |
4717 | static struct succs_info * | |
4718 | alloc_succs_info (void) | |
4719 | { | |
4720 | if (succs_info_pool.top == succs_info_pool.max_top) | |
4721 | { | |
4722 | int i; | |
48e1416a | 4723 | |
e1ab7874 | 4724 | if (++succs_info_pool.max_top >= succs_info_pool.size) |
4725 | gcc_unreachable (); | |
4726 | ||
4727 | i = ++succs_info_pool.top; | |
f1f41a6c | 4728 | succs_info_pool.stack[i].succs_ok.create (10); |
4729 | succs_info_pool.stack[i].succs_other.create (10); | |
4730 | succs_info_pool.stack[i].probs_ok.create (10); | |
e1ab7874 | 4731 | } |
4732 | else | |
4733 | succs_info_pool.top++; | |
4734 | ||
4735 | return &succs_info_pool.stack[succs_info_pool.top]; | |
4736 | } | |
4737 | ||
4738 | /* Free successor's info. */ | |
4739 | void | |
4740 | free_succs_info (struct succs_info * sinfo) | |
4741 | { | |
48e1416a | 4742 | gcc_assert (succs_info_pool.top >= 0 |
e1ab7874 | 4743 | && &succs_info_pool.stack[succs_info_pool.top] == sinfo); |
4744 | succs_info_pool.top--; | |
4745 | ||
4746 | /* Clear stale info. */ | |
f1f41a6c | 4747 | sinfo->succs_ok.block_remove (0, sinfo->succs_ok.length ()); |
4748 | sinfo->succs_other.block_remove (0, sinfo->succs_other.length ()); | |
4749 | sinfo->probs_ok.block_remove (0, sinfo->probs_ok.length ()); | |
e1ab7874 | 4750 | sinfo->all_prob = 0; |
4751 | sinfo->succs_ok_n = 0; | |
4752 | sinfo->all_succs_n = 0; | |
4753 | } | |
4754 | ||
48e1416a | 4755 | /* Compute successor info for INSN. FLAGS are the flags passed |
e1ab7874 | 4756 | to the FOR_EACH_SUCC_1 iterator. */ |
4757 | struct succs_info * | |
4758 | compute_succs_info (insn_t insn, short flags) | |
4759 | { | |
4760 | succ_iterator si; | |
4761 | insn_t succ; | |
4762 | struct succs_info *sinfo = alloc_succs_info (); | |
4763 | ||
4764 | /* Traverse *all* successors and decide what to do with each. */ | |
4765 | FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_ALL) | |
4766 | { | |
4767 | /* FIXME: this doesn't work for skipping to loop exits, as we don't | |
4768 | perform code motion through inner loops. */ | |
4769 | short current_flags = si.current_flags & ~SUCCS_SKIP_TO_LOOP_EXITS; | |
4770 | ||
4771 | if (current_flags & flags) | |
4772 | { | |
f1f41a6c | 4773 | sinfo->succs_ok.safe_push (succ); |
4774 | sinfo->probs_ok.safe_push ( | |
4775 | /* FIXME: Improve calculation when skipping | |
4776 | inner loop to exits. */ | |
7c6fa2d9 | 4777 | si.bb_end |
4778 | ? (si.e1->probability.initialized_p () | |
4779 | ? si.e1->probability.to_reg_br_prob_base () | |
4780 | : 0) | |
4781 | : REG_BR_PROB_BASE); | |
e1ab7874 | 4782 | sinfo->succs_ok_n++; |
4783 | } | |
4784 | else | |
f1f41a6c | 4785 | sinfo->succs_other.safe_push (succ); |
e1ab7874 | 4786 | |
4787 | /* Compute all_prob. */ | |
4788 | if (!si.bb_end) | |
4789 | sinfo->all_prob = REG_BR_PROB_BASE; | |
720cfc43 | 4790 | else if (si.e1->probability.initialized_p ()) |
4791 | sinfo->all_prob += si.e1->probability.to_reg_br_prob_base (); | |
e1ab7874 | 4792 | |
4793 | sinfo->all_succs_n++; | |
4794 | } | |
4795 | ||
4796 | return sinfo; | |
4797 | } | |
4798 | ||
48e1416a | 4799 | /* Return the predecessors of BB in PREDS and their number in N. |
e1ab7874 | 4800 | Empty blocks are skipped. SIZE is used to allocate PREDS. */ |
4801 | static void | |
4802 | cfg_preds_1 (basic_block bb, insn_t **preds, int *n, int *size) | |
4803 | { | |
4804 | edge e; | |
4805 | edge_iterator ei; | |
4806 | ||
4807 | gcc_assert (BLOCK_TO_BB (bb->index) != 0); | |
4808 | ||
4809 | FOR_EACH_EDGE (e, ei, bb->preds) | |
4810 | { | |
4811 | basic_block pred_bb = e->src; | |
4812 | insn_t bb_end = BB_END (pred_bb); | |
4813 | ||
f1ec9c64 | 4814 | if (!in_current_region_p (pred_bb)) |
4815 | { | |
4816 | gcc_assert (flag_sel_sched_pipelining_outer_loops | |
4817 | && current_loop_nest); | |
4818 | continue; | |
4819 | } | |
e1ab7874 | 4820 | |
4821 | if (sel_bb_empty_p (pred_bb)) | |
4822 | cfg_preds_1 (pred_bb, preds, n, size); | |
4823 | else | |
4824 | { | |
4825 | if (*n == *size) | |
48e1416a | 4826 | *preds = XRESIZEVEC (insn_t, *preds, |
e1ab7874 | 4827 | (*size = 2 * *size + 1)); |
4828 | (*preds)[(*n)++] = bb_end; | |
4829 | } | |
4830 | } | |
4831 | ||
f1ec9c64 | 4832 | gcc_assert (*n != 0 |
4833 | || (flag_sel_sched_pipelining_outer_loops | |
4834 | && current_loop_nest)); | |
e1ab7874 | 4835 | } |
4836 | ||
48e1416a | 4837 | /* Find all predecessors of BB and record them in PREDS and their number |
4838 | in N. Empty blocks are skipped, and only normal (forward in-region) | |
e1ab7874 | 4839 | edges are processed. */ |
4840 | static void | |
4841 | cfg_preds (basic_block bb, insn_t **preds, int *n) | |
4842 | { | |
4843 | int size = 0; | |
4844 | ||
4845 | *preds = NULL; | |
4846 | *n = 0; | |
4847 | cfg_preds_1 (bb, preds, n, &size); | |
4848 | } | |
4849 | ||
4850 | /* Returns true if we are moving INSN through join point. */ | |
4851 | bool | |
4852 | sel_num_cfg_preds_gt_1 (insn_t insn) | |
4853 | { | |
4854 | basic_block bb; | |
4855 | ||
4856 | if (!sel_bb_head_p (insn) || INSN_BB (insn) == 0) | |
4857 | return false; | |
4858 | ||
4859 | bb = BLOCK_FOR_INSN (insn); | |
4860 | ||
4861 | while (1) | |
4862 | { | |
4863 | if (EDGE_COUNT (bb->preds) > 1) | |
4864 | return true; | |
4865 | ||
4866 | gcc_assert (EDGE_PRED (bb, 0)->dest == bb); | |
4867 | bb = EDGE_PRED (bb, 0)->src; | |
4868 | ||
4869 | if (!sel_bb_empty_p (bb)) | |
4870 | break; | |
4871 | } | |
4872 | ||
4873 | return false; | |
4874 | } | |
4875 | ||
48e1416a | 4876 | /* Returns true when BB should be the end of an ebb. Adapted from the |
e1ab7874 | 4877 | code in sched-ebb.c. */ |
4878 | bool | |
4879 | bb_ends_ebb_p (basic_block bb) | |
4880 | { | |
4881 | basic_block next_bb = bb_next_bb (bb); | |
4882 | edge e; | |
48e1416a | 4883 | |
34154e27 | 4884 | if (next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun) |
e1ab7874 | 4885 | || bitmap_bit_p (forced_ebb_heads, next_bb->index) |
4886 | || (LABEL_P (BB_HEAD (next_bb)) | |
4887 | /* NB: LABEL_NUSES () is not maintained outside of jump.c. | |
4888 | Work around that. */ | |
4889 | && !single_pred_p (next_bb))) | |
4890 | return true; | |
4891 | ||
4892 | if (!in_current_region_p (next_bb)) | |
4893 | return true; | |
4894 | ||
7f58c05e | 4895 | e = find_fallthru_edge (bb->succs); |
4896 | if (e) | |
4897 | { | |
4898 | gcc_assert (e->dest == next_bb); | |
4899 | ||
4900 | return false; | |
4901 | } | |
e1ab7874 | 4902 | |
4903 | return true; | |
4904 | } | |
4905 | ||
4906 | /* Returns true when INSN and SUCC are in the same EBB, given that SUCC is a | |
4907 | successor of INSN. */ | |
4908 | bool | |
4909 | in_same_ebb_p (insn_t insn, insn_t succ) | |
4910 | { | |
4911 | basic_block ptr = BLOCK_FOR_INSN (insn); | |
4912 | ||
9af5ce0c | 4913 | for (;;) |
e1ab7874 | 4914 | { |
4915 | if (ptr == BLOCK_FOR_INSN (succ)) | |
4916 | return true; | |
48e1416a | 4917 | |
e1ab7874 | 4918 | if (bb_ends_ebb_p (ptr)) |
4919 | return false; | |
4920 | ||
4921 | ptr = bb_next_bb (ptr); | |
4922 | } | |
4923 | ||
4924 | gcc_unreachable (); | |
4925 | return false; | |
4926 | } | |
4927 | ||
4928 | /* Recomputes the reverse topological order for the function and | |
4929 | saves it in REV_TOP_ORDER_INDEX. REV_TOP_ORDER_INDEX_LEN is also | |
4930 | modified appropriately. */ | |
4931 | static void | |
4932 | recompute_rev_top_order (void) | |
4933 | { | |
4934 | int *postorder; | |
4935 | int n_blocks, i; | |
4936 | ||
fe672ac0 | 4937 | if (!rev_top_order_index |
4938 | || rev_top_order_index_len < last_basic_block_for_fn (cfun)) | |
e1ab7874 | 4939 | { |
fe672ac0 | 4940 | rev_top_order_index_len = last_basic_block_for_fn (cfun); |
e1ab7874 | 4941 | rev_top_order_index = XRESIZEVEC (int, rev_top_order_index, |
4942 | rev_top_order_index_len); | |
4943 | } | |
4944 | ||
a28770e1 | 4945 | postorder = XNEWVEC (int, n_basic_blocks_for_fn (cfun)); |
e1ab7874 | 4946 | |
4947 | n_blocks = post_order_compute (postorder, true, false); | |
a28770e1 | 4948 | gcc_assert (n_basic_blocks_for_fn (cfun) == n_blocks); |
e1ab7874 | 4949 | |
4950 | /* Build reverse function: for each basic block with BB->INDEX == K | |
4951 | rev_top_order_index[K] is it's reverse topological sort number. */ | |
4952 | for (i = 0; i < n_blocks; i++) | |
4953 | { | |
4954 | gcc_assert (postorder[i] < rev_top_order_index_len); | |
4955 | rev_top_order_index[postorder[i]] = i; | |
4956 | } | |
4957 | ||
4958 | free (postorder); | |
4959 | } | |
4960 | ||
4961 | /* Clear all flags from insns in BB that could spoil its rescheduling. */ | |
4962 | void | |
4963 | clear_outdated_rtx_info (basic_block bb) | |
4964 | { | |
91a55c11 | 4965 | rtx_insn *insn; |
e1ab7874 | 4966 | |
4967 | FOR_BB_INSNS (bb, insn) | |
4968 | if (INSN_P (insn)) | |
4969 | { | |
4970 | SCHED_GROUP_P (insn) = 0; | |
4971 | INSN_AFTER_STALL_P (insn) = 0; | |
4972 | INSN_SCHED_TIMES (insn) = 0; | |
4973 | EXPR_PRIORITY_ADJ (INSN_EXPR (insn)) = 0; | |
4974 | ||
4975 | /* We cannot use the changed caches, as previously we could ignore | |
48e1416a | 4976 | the LHS dependence due to enabled renaming and transform |
e1ab7874 | 4977 | the expression, and currently we'll be unable to do this. */ |
4978 | htab_empty (INSN_TRANSFORMED_INSNS (insn)); | |
4979 | } | |
4980 | } | |
4981 | ||
4982 | /* Add BB_NOTE to the pool of available basic block notes. */ | |
4983 | static void | |
4984 | return_bb_to_pool (basic_block bb) | |
4985 | { | |
9ed997be | 4986 | rtx_note *note = bb_note (bb); |
e1ab7874 | 4987 | |
4988 | gcc_assert (NOTE_BASIC_BLOCK (note) == bb | |
4989 | && bb->aux == NULL); | |
4990 | ||
4991 | /* It turns out that current cfg infrastructure does not support | |
4992 | reuse of basic blocks. Don't bother for now. */ | |
f1f41a6c | 4993 | /*bb_note_pool.safe_push (note);*/ |
e1ab7874 | 4994 | } |
4995 | ||
4996 | /* Get a bb_note from pool or return NULL_RTX if pool is empty. */ | |
cef3d8ad | 4997 | static rtx_note * |
e1ab7874 | 4998 | get_bb_note_from_pool (void) |
4999 | { | |
f1f41a6c | 5000 | if (bb_note_pool.is_empty ()) |
cef3d8ad | 5001 | return NULL; |
e1ab7874 | 5002 | else |
5003 | { | |
cef3d8ad | 5004 | rtx_note *note = bb_note_pool.pop (); |
e1ab7874 | 5005 | |
4a57a2e8 | 5006 | SET_PREV_INSN (note) = NULL_RTX; |
5007 | SET_NEXT_INSN (note) = NULL_RTX; | |
e1ab7874 | 5008 | |
5009 | return note; | |
5010 | } | |
5011 | } | |
5012 | ||
5013 | /* Free bb_note_pool. */ | |
5014 | void | |
5015 | free_bb_note_pool (void) | |
5016 | { | |
f1f41a6c | 5017 | bb_note_pool.release (); |
e1ab7874 | 5018 | } |
5019 | ||
5020 | /* Setup scheduler pool and successor structure. */ | |
5021 | void | |
5022 | alloc_sched_pools (void) | |
5023 | { | |
5024 | int succs_size; | |
5025 | ||
5026 | succs_size = MAX_WS + 1; | |
48e1416a | 5027 | succs_info_pool.stack = XCNEWVEC (struct succs_info, succs_size); |
e1ab7874 | 5028 | succs_info_pool.size = succs_size; |
5029 | succs_info_pool.top = -1; | |
5030 | succs_info_pool.max_top = -1; | |
e1ab7874 | 5031 | } |
5032 | ||
5033 | /* Free the pools. */ | |
5034 | void | |
5035 | free_sched_pools (void) | |
5036 | { | |
5037 | int i; | |
48e1416a | 5038 | |
e26b6f42 | 5039 | sched_lists_pool.release (); |
e1ab7874 | 5040 | gcc_assert (succs_info_pool.top == -1); |
862c1934 | 5041 | for (i = 0; i <= succs_info_pool.max_top; i++) |
e1ab7874 | 5042 | { |
f1f41a6c | 5043 | succs_info_pool.stack[i].succs_ok.release (); |
5044 | succs_info_pool.stack[i].succs_other.release (); | |
5045 | succs_info_pool.stack[i].probs_ok.release (); | |
e1ab7874 | 5046 | } |
5047 | free (succs_info_pool.stack); | |
5048 | } | |
5049 | \f | |
5050 | ||
48e1416a | 5051 | /* Returns a position in RGN where BB can be inserted retaining |
e1ab7874 | 5052 | topological order. */ |
5053 | static int | |
5054 | find_place_to_insert_bb (basic_block bb, int rgn) | |
5055 | { | |
5056 | bool has_preds_outside_rgn = false; | |
5057 | edge e; | |
5058 | edge_iterator ei; | |
48e1416a | 5059 | |
e1ab7874 | 5060 | /* Find whether we have preds outside the region. */ |
5061 | FOR_EACH_EDGE (e, ei, bb->preds) | |
5062 | if (!in_current_region_p (e->src)) | |
5063 | { | |
5064 | has_preds_outside_rgn = true; | |
5065 | break; | |
5066 | } | |
48e1416a | 5067 | |
e1ab7874 | 5068 | /* Recompute the top order -- needed when we have > 1 pred |
5069 | and in case we don't have preds outside. */ | |
5070 | if (flag_sel_sched_pipelining_outer_loops | |
5071 | && (has_preds_outside_rgn || EDGE_COUNT (bb->preds) > 1)) | |
5072 | { | |
5073 | int i, bbi = bb->index, cur_bbi; | |
5074 | ||
5075 | recompute_rev_top_order (); | |
5076 | for (i = RGN_NR_BLOCKS (rgn) - 1; i >= 0; i--) | |
5077 | { | |
5078 | cur_bbi = BB_TO_BLOCK (i); | |
48e1416a | 5079 | if (rev_top_order_index[bbi] |
e1ab7874 | 5080 | < rev_top_order_index[cur_bbi]) |
5081 | break; | |
5082 | } | |
48e1416a | 5083 | |
9d75589a | 5084 | /* We skipped the right block, so we increase i. We accommodate |
e1ab7874 | 5085 | it for increasing by step later, so we decrease i. */ |
5086 | return (i + 1) - 1; | |
5087 | } | |
5088 | else if (has_preds_outside_rgn) | |
5089 | { | |
5090 | /* This is the case when we generate an extra empty block | |
5091 | to serve as region head during pipelining. */ | |
5092 | e = EDGE_SUCC (bb, 0); | |
5093 | gcc_assert (EDGE_COUNT (bb->succs) == 1 | |
5094 | && in_current_region_p (EDGE_SUCC (bb, 0)->dest) | |
5095 | && (BLOCK_TO_BB (e->dest->index) == 0)); | |
5096 | return -1; | |
5097 | } | |
5098 | ||
5099 | /* We don't have preds outside the region. We should have | |
5100 | the only pred, because the multiple preds case comes from | |
5101 | the pipelining of outer loops, and that is handled above. | |
5102 | Just take the bbi of this single pred. */ | |
5103 | if (EDGE_COUNT (bb->succs) > 0) | |
5104 | { | |
5105 | int pred_bbi; | |
48e1416a | 5106 | |
e1ab7874 | 5107 | gcc_assert (EDGE_COUNT (bb->preds) == 1); |
48e1416a | 5108 | |
e1ab7874 | 5109 | pred_bbi = EDGE_PRED (bb, 0)->src->index; |
5110 | return BLOCK_TO_BB (pred_bbi); | |
5111 | } | |
5112 | else | |
5113 | /* BB has no successors. It is safe to put it in the end. */ | |
5114 | return current_nr_blocks - 1; | |
5115 | } | |
5116 | ||
5117 | /* Deletes an empty basic block freeing its data. */ | |
5118 | static void | |
5119 | delete_and_free_basic_block (basic_block bb) | |
5120 | { | |
5121 | gcc_assert (sel_bb_empty_p (bb)); | |
5122 | ||
5123 | if (BB_LV_SET (bb)) | |
5124 | free_lv_set (bb); | |
5125 | ||
5126 | bitmap_clear_bit (blocks_to_reschedule, bb->index); | |
5127 | ||
48e1416a | 5128 | /* Can't assert av_set properties because we use sel_aremove_bb |
5129 | when removing loop preheader from the region. At the point of | |
e1ab7874 | 5130 | removing the preheader we already have deallocated sel_region_bb_info. */ |
5131 | gcc_assert (BB_LV_SET (bb) == NULL | |
5132 | && !BB_LV_SET_VALID_P (bb) | |
5133 | && BB_AV_LEVEL (bb) == 0 | |
5134 | && BB_AV_SET (bb) == NULL); | |
48e1416a | 5135 | |
e1ab7874 | 5136 | delete_basic_block (bb); |
5137 | } | |
5138 | ||
5139 | /* Add BB to the current region and update the region data. */ | |
5140 | static void | |
5141 | add_block_to_current_region (basic_block bb) | |
5142 | { | |
5143 | int i, pos, bbi = -2, rgn; | |
5144 | ||
5145 | rgn = CONTAINING_RGN (BB_TO_BLOCK (0)); | |
5146 | bbi = find_place_to_insert_bb (bb, rgn); | |
5147 | bbi += 1; | |
5148 | pos = RGN_BLOCKS (rgn) + bbi; | |
5149 | ||
5150 | gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0 | |
5151 | && ebb_head[bbi] == pos); | |
48e1416a | 5152 | |
e1ab7874 | 5153 | /* Make a place for the new block. */ |
5154 | extend_regions (); | |
5155 | ||
5156 | for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--) | |
5157 | BLOCK_TO_BB (rgn_bb_table[i])++; | |
48e1416a | 5158 | |
e1ab7874 | 5159 | memmove (rgn_bb_table + pos + 1, |
5160 | rgn_bb_table + pos, | |
5161 | (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table)); | |
5162 | ||
5163 | /* Initialize data for BB. */ | |
5164 | rgn_bb_table[pos] = bb->index; | |
5165 | BLOCK_TO_BB (bb->index) = bbi; | |
5166 | CONTAINING_RGN (bb->index) = rgn; | |
5167 | ||
5168 | RGN_NR_BLOCKS (rgn)++; | |
48e1416a | 5169 | |
e1ab7874 | 5170 | for (i = rgn + 1; i <= nr_regions; i++) |
5171 | RGN_BLOCKS (i)++; | |
5172 | } | |
5173 | ||
5174 | /* Remove BB from the current region and update the region data. */ | |
5175 | static void | |
5176 | remove_bb_from_region (basic_block bb) | |
5177 | { | |
5178 | int i, pos, bbi = -2, rgn; | |
5179 | ||
5180 | rgn = CONTAINING_RGN (BB_TO_BLOCK (0)); | |
5181 | bbi = BLOCK_TO_BB (bb->index); | |
5182 | pos = RGN_BLOCKS (rgn) + bbi; | |
5183 | ||
5184 | gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0 | |
5185 | && ebb_head[bbi] == pos); | |
5186 | ||
5187 | for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--) | |
5188 | BLOCK_TO_BB (rgn_bb_table[i])--; | |
5189 | ||
5190 | memmove (rgn_bb_table + pos, | |
5191 | rgn_bb_table + pos + 1, | |
5192 | (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table)); | |
5193 | ||
5194 | RGN_NR_BLOCKS (rgn)--; | |
5195 | for (i = rgn + 1; i <= nr_regions; i++) | |
5196 | RGN_BLOCKS (i)--; | |
5197 | } | |
5198 | ||
48e1416a | 5199 | /* Add BB to the current region and update all data. If BB is NULL, add all |
e1ab7874 | 5200 | blocks from last_added_blocks vector. */ |
5201 | static void | |
5202 | sel_add_bb (basic_block bb) | |
5203 | { | |
5204 | /* Extend luids so that new notes will receive zero luids. */ | |
52d7e28c | 5205 | sched_extend_luids (); |
e1ab7874 | 5206 | sched_init_bbs (); |
52d7e28c | 5207 | sel_init_bbs (last_added_blocks); |
e1ab7874 | 5208 | |
48e1416a | 5209 | /* When bb is passed explicitly, the vector should contain |
e1ab7874 | 5210 | the only element that equals to bb; otherwise, the vector |
5211 | should not be NULL. */ | |
f1f41a6c | 5212 | gcc_assert (last_added_blocks.exists ()); |
48e1416a | 5213 | |
e1ab7874 | 5214 | if (bb != NULL) |
5215 | { | |
f1f41a6c | 5216 | gcc_assert (last_added_blocks.length () == 1 |
5217 | && last_added_blocks[0] == bb); | |
e1ab7874 | 5218 | add_block_to_current_region (bb); |
5219 | ||
5220 | /* We associate creating/deleting data sets with the first insn | |
5221 | appearing / disappearing in the bb. */ | |
5222 | if (!sel_bb_empty_p (bb) && BB_LV_SET (bb) == NULL) | |
5223 | create_initial_data_sets (bb); | |
48e1416a | 5224 | |
f1f41a6c | 5225 | last_added_blocks.release (); |
e1ab7874 | 5226 | } |
5227 | else | |
5228 | /* BB is NULL - process LAST_ADDED_BLOCKS instead. */ | |
5229 | { | |
5230 | int i; | |
5231 | basic_block temp_bb = NULL; | |
5232 | ||
48e1416a | 5233 | for (i = 0; |
f1f41a6c | 5234 | last_added_blocks.iterate (i, &bb); i++) |
e1ab7874 | 5235 | { |
5236 | add_block_to_current_region (bb); | |
5237 | temp_bb = bb; | |
5238 | } | |
5239 | ||
48e1416a | 5240 | /* We need to fetch at least one bb so we know the region |
e1ab7874 | 5241 | to update. */ |
5242 | gcc_assert (temp_bb != NULL); | |
5243 | bb = temp_bb; | |
5244 | ||
f1f41a6c | 5245 | last_added_blocks.release (); |
e1ab7874 | 5246 | } |
5247 | ||
5248 | rgn_setup_region (CONTAINING_RGN (bb->index)); | |
5249 | } | |
5250 | ||
48e1416a | 5251 | /* Remove BB from the current region and update all data. |
e1ab7874 | 5252 | If REMOVE_FROM_CFG_PBB is true, also remove the block cfom cfg. */ |
5253 | static void | |
5254 | sel_remove_bb (basic_block bb, bool remove_from_cfg_p) | |
5255 | { | |
0424f393 | 5256 | unsigned idx = bb->index; |
5257 | ||
e1ab7874 | 5258 | gcc_assert (bb != NULL && BB_NOTE_LIST (bb) == NULL_RTX); |
48e1416a | 5259 | |
e1ab7874 | 5260 | remove_bb_from_region (bb); |
5261 | return_bb_to_pool (bb); | |
0424f393 | 5262 | bitmap_clear_bit (blocks_to_reschedule, idx); |
48e1416a | 5263 | |
e1ab7874 | 5264 | if (remove_from_cfg_p) |
1a5dbaab | 5265 | { |
5266 | basic_block succ = single_succ (bb); | |
5267 | delete_and_free_basic_block (bb); | |
5268 | set_immediate_dominator (CDI_DOMINATORS, succ, | |
5269 | recompute_dominator (CDI_DOMINATORS, succ)); | |
5270 | } | |
e1ab7874 | 5271 | |
0424f393 | 5272 | rgn_setup_region (CONTAINING_RGN (idx)); |
e1ab7874 | 5273 | } |
5274 | ||
5275 | /* Concatenate info of EMPTY_BB to info of MERGE_BB. */ | |
5276 | static void | |
5277 | move_bb_info (basic_block merge_bb, basic_block empty_bb) | |
5278 | { | |
ef4cf572 | 5279 | if (in_current_region_p (merge_bb)) |
5280 | concat_note_lists (BB_NOTE_LIST (empty_bb), | |
e97a173d | 5281 | &BB_NOTE_LIST (merge_bb)); |
5282 | BB_NOTE_LIST (empty_bb) = NULL; | |
e1ab7874 | 5283 | |
5284 | } | |
5285 | ||
e1ab7874 | 5286 | /* Remove EMPTY_BB. If REMOVE_FROM_CFG_P is false, remove EMPTY_BB from |
5287 | region, but keep it in CFG. */ | |
5288 | static void | |
5289 | remove_empty_bb (basic_block empty_bb, bool remove_from_cfg_p) | |
5290 | { | |
5291 | /* The block should contain just a note or a label. | |
5292 | We try to check whether it is unused below. */ | |
5293 | gcc_assert (BB_HEAD (empty_bb) == BB_END (empty_bb) | |
5294 | || LABEL_P (BB_HEAD (empty_bb))); | |
5295 | ||
5296 | /* If basic block has predecessors or successors, redirect them. */ | |
5297 | if (remove_from_cfg_p | |
5298 | && (EDGE_COUNT (empty_bb->preds) > 0 | |
5299 | || EDGE_COUNT (empty_bb->succs) > 0)) | |
5300 | { | |
5301 | basic_block pred; | |
5302 | basic_block succ; | |
5303 | ||
5304 | /* We need to init PRED and SUCC before redirecting edges. */ | |
5305 | if (EDGE_COUNT (empty_bb->preds) > 0) | |
5306 | { | |
5307 | edge e; | |
5308 | ||
5309 | gcc_assert (EDGE_COUNT (empty_bb->preds) == 1); | |
5310 | ||
5311 | e = EDGE_PRED (empty_bb, 0); | |
5312 | gcc_assert (e->src == empty_bb->prev_bb | |
5313 | && (e->flags & EDGE_FALLTHRU)); | |
5314 | ||
5315 | pred = empty_bb->prev_bb; | |
5316 | } | |
5317 | else | |
5318 | pred = NULL; | |
5319 | ||
5320 | if (EDGE_COUNT (empty_bb->succs) > 0) | |
5321 | { | |
5322 | /* We do not check fallthruness here as above, because | |
5323 | after removing a jump the edge may actually be not fallthru. */ | |
5324 | gcc_assert (EDGE_COUNT (empty_bb->succs) == 1); | |
5325 | succ = EDGE_SUCC (empty_bb, 0)->dest; | |
5326 | } | |
5327 | else | |
5328 | succ = NULL; | |
5329 | ||
5330 | if (EDGE_COUNT (empty_bb->preds) > 0 && succ != NULL) | |
5331 | { | |
5332 | edge e = EDGE_PRED (empty_bb, 0); | |
5333 | ||
5334 | if (e->flags & EDGE_FALLTHRU) | |
5335 | redirect_edge_succ_nodup (e, succ); | |
5336 | else | |
5337 | sel_redirect_edge_and_branch (EDGE_PRED (empty_bb, 0), succ); | |
5338 | } | |
5339 | ||
5340 | if (EDGE_COUNT (empty_bb->succs) > 0 && pred != NULL) | |
5341 | { | |
5342 | edge e = EDGE_SUCC (empty_bb, 0); | |
5343 | ||
5344 | if (find_edge (pred, e->dest) == NULL) | |
5345 | redirect_edge_pred (e, pred); | |
5346 | } | |
5347 | } | |
5348 | ||
5349 | /* Finish removing. */ | |
5350 | sel_remove_bb (empty_bb, remove_from_cfg_p); | |
5351 | } | |
5352 | ||
48e1416a | 5353 | /* An implementation of create_basic_block hook, which additionally updates |
e1ab7874 | 5354 | per-bb data structures. */ |
5355 | static basic_block | |
5356 | sel_create_basic_block (void *headp, void *endp, basic_block after) | |
5357 | { | |
5358 | basic_block new_bb; | |
cef3d8ad | 5359 | rtx_note *new_bb_note; |
48e1416a | 5360 | |
5361 | gcc_assert (flag_sel_sched_pipelining_outer_loops | |
f1f41a6c | 5362 | || !last_added_blocks.exists ()); |
e1ab7874 | 5363 | |
5364 | new_bb_note = get_bb_note_from_pool (); | |
5365 | ||
5366 | if (new_bb_note == NULL_RTX) | |
5367 | new_bb = orig_cfg_hooks.create_basic_block (headp, endp, after); | |
5368 | else | |
5369 | { | |
3c3f97b4 | 5370 | new_bb = create_basic_block_structure ((rtx_insn *) headp, |
5371 | (rtx_insn *) endp, | |
e1ab7874 | 5372 | new_bb_note, after); |
5373 | new_bb->aux = NULL; | |
5374 | } | |
5375 | ||
f1f41a6c | 5376 | last_added_blocks.safe_push (new_bb); |
e1ab7874 | 5377 | |
5378 | return new_bb; | |
5379 | } | |
5380 | ||
5381 | /* Implement sched_init_only_bb (). */ | |
5382 | static void | |
5383 | sel_init_only_bb (basic_block bb, basic_block after) | |
5384 | { | |
5385 | gcc_assert (after == NULL); | |
5386 | ||
5387 | extend_regions (); | |
5388 | rgn_make_new_region_out_of_new_block (bb); | |
5389 | } | |
5390 | ||
5391 | /* Update the latch when we've splitted or merged it from FROM block to TO. | |
5392 | This should be checked for all outer loops, too. */ | |
5393 | static void | |
5394 | change_loops_latches (basic_block from, basic_block to) | |
5395 | { | |
5396 | gcc_assert (from != to); | |
5397 | ||
5398 | if (current_loop_nest) | |
5399 | { | |
2e966e2a | 5400 | class loop *loop; |
e1ab7874 | 5401 | |
5402 | for (loop = current_loop_nest; loop; loop = loop_outer (loop)) | |
5403 | if (considered_for_pipelining_p (loop) && loop->latch == from) | |
5404 | { | |
5405 | gcc_assert (loop == current_loop_nest); | |
5406 | loop->latch = to; | |
5407 | gcc_assert (loop_latch_edge (loop)); | |
5408 | } | |
5409 | } | |
5410 | } | |
5411 | ||
48e1416a | 5412 | /* Splits BB on two basic blocks, adding it to the region and extending |
e1ab7874 | 5413 | per-bb data structures. Returns the newly created bb. */ |
5414 | static basic_block | |
5415 | sel_split_block (basic_block bb, rtx after) | |
5416 | { | |
5417 | basic_block new_bb; | |
5418 | insn_t insn; | |
5419 | ||
5420 | new_bb = sched_split_block_1 (bb, after); | |
5421 | sel_add_bb (new_bb); | |
5422 | ||
5423 | /* This should be called after sel_add_bb, because this uses | |
48e1416a | 5424 | CONTAINING_RGN for the new block, which is not yet initialized. |
e1ab7874 | 5425 | FIXME: this function may be a no-op now. */ |
5426 | change_loops_latches (bb, new_bb); | |
5427 | ||
5428 | /* Update ORIG_BB_INDEX for insns moved into the new block. */ | |
5429 | FOR_BB_INSNS (new_bb, insn) | |
5430 | if (INSN_P (insn)) | |
5431 | EXPR_ORIG_BB_INDEX (INSN_EXPR (insn)) = new_bb->index; | |
5432 | ||
5433 | if (sel_bb_empty_p (bb)) | |
5434 | { | |
5435 | gcc_assert (!sel_bb_empty_p (new_bb)); | |
5436 | ||
5437 | /* NEW_BB has data sets that need to be updated and BB holds | |
5438 | data sets that should be removed. Exchange these data sets | |
5439 | so that we won't lose BB's valid data sets. */ | |
5440 | exchange_data_sets (new_bb, bb); | |
5441 | free_data_sets (bb); | |
5442 | } | |
5443 | ||
5444 | if (!sel_bb_empty_p (new_bb) | |
5445 | && bitmap_bit_p (blocks_to_reschedule, bb->index)) | |
5446 | bitmap_set_bit (blocks_to_reschedule, new_bb->index); | |
5447 | ||
5448 | return new_bb; | |
5449 | } | |
5450 | ||
5451 | /* If BB ends with a jump insn whose ID is bigger then PREV_MAX_UID, return it. | |
5452 | Otherwise returns NULL. */ | |
04d073df | 5453 | static rtx_insn * |
e1ab7874 | 5454 | check_for_new_jump (basic_block bb, int prev_max_uid) |
5455 | { | |
04d073df | 5456 | rtx_insn *end; |
e1ab7874 | 5457 | |
5458 | end = sel_bb_end (bb); | |
5459 | if (end && INSN_UID (end) >= prev_max_uid) | |
5460 | return end; | |
5461 | return NULL; | |
5462 | } | |
5463 | ||
48e1416a | 5464 | /* Look for a new jump either in FROM_BB block or in newly created JUMP_BB block. |
e1ab7874 | 5465 | New means having UID at least equal to PREV_MAX_UID. */ |
04d073df | 5466 | static rtx_insn * |
e1ab7874 | 5467 | find_new_jump (basic_block from, basic_block jump_bb, int prev_max_uid) |
5468 | { | |
04d073df | 5469 | rtx_insn *jump; |
e1ab7874 | 5470 | |
5471 | /* Return immediately if no new insns were emitted. */ | |
5472 | if (get_max_uid () == prev_max_uid) | |
5473 | return NULL; | |
48e1416a | 5474 | |
e1ab7874 | 5475 | /* Now check both blocks for new jumps. It will ever be only one. */ |
5476 | if ((jump = check_for_new_jump (from, prev_max_uid))) | |
5477 | return jump; | |
5478 | ||
5479 | if (jump_bb != NULL | |
5480 | && (jump = check_for_new_jump (jump_bb, prev_max_uid))) | |
5481 | return jump; | |
5482 | return NULL; | |
5483 | } | |
5484 | ||
5485 | /* Splits E and adds the newly created basic block to the current region. | |
5486 | Returns this basic block. */ | |
5487 | basic_block | |
5488 | sel_split_edge (edge e) | |
5489 | { | |
5490 | basic_block new_bb, src, other_bb = NULL; | |
5491 | int prev_max_uid; | |
04d073df | 5492 | rtx_insn *jump; |
e1ab7874 | 5493 | |
5494 | src = e->src; | |
5495 | prev_max_uid = get_max_uid (); | |
5496 | new_bb = split_edge (e); | |
5497 | ||
48e1416a | 5498 | if (flag_sel_sched_pipelining_outer_loops |
e1ab7874 | 5499 | && current_loop_nest) |
5500 | { | |
5501 | int i; | |
5502 | basic_block bb; | |
5503 | ||
48e1416a | 5504 | /* Some of the basic blocks might not have been added to the loop. |
e1ab7874 | 5505 | Add them here, until this is fixed in force_fallthru. */ |
48e1416a | 5506 | for (i = 0; |
f1f41a6c | 5507 | last_added_blocks.iterate (i, &bb); i++) |
e1ab7874 | 5508 | if (!bb->loop_father) |
5509 | { | |
5510 | add_bb_to_loop (bb, e->dest->loop_father); | |
5511 | ||
5512 | gcc_assert (!other_bb && (new_bb->index != bb->index)); | |
5513 | other_bb = bb; | |
5514 | } | |
5515 | } | |
5516 | ||
5517 | /* Add all last_added_blocks to the region. */ | |
5518 | sel_add_bb (NULL); | |
5519 | ||
5520 | jump = find_new_jump (src, new_bb, prev_max_uid); | |
5521 | if (jump) | |
5522 | sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP); | |
5523 | ||
5524 | /* Put the correct lv set on this block. */ | |
5525 | if (other_bb && !sel_bb_empty_p (other_bb)) | |
5526 | compute_live (sel_bb_head (other_bb)); | |
5527 | ||
5528 | return new_bb; | |
5529 | } | |
5530 | ||
5531 | /* Implement sched_create_empty_bb (). */ | |
5532 | static basic_block | |
5533 | sel_create_empty_bb (basic_block after) | |
5534 | { | |
5535 | basic_block new_bb; | |
5536 | ||
5537 | new_bb = sched_create_empty_bb_1 (after); | |
5538 | ||
5539 | /* We'll explicitly initialize NEW_BB via sel_init_only_bb () a bit | |
5540 | later. */ | |
f1f41a6c | 5541 | gcc_assert (last_added_blocks.length () == 1 |
5542 | && last_added_blocks[0] == new_bb); | |
e1ab7874 | 5543 | |
f1f41a6c | 5544 | last_added_blocks.release (); |
e1ab7874 | 5545 | return new_bb; |
5546 | } | |
5547 | ||
5548 | /* Implement sched_create_recovery_block. ORIG_INSN is where block | |
5549 | will be splitted to insert a check. */ | |
5550 | basic_block | |
5551 | sel_create_recovery_block (insn_t orig_insn) | |
5552 | { | |
5553 | basic_block first_bb, second_bb, recovery_block; | |
5554 | basic_block before_recovery = NULL; | |
04d073df | 5555 | rtx_insn *jump; |
e1ab7874 | 5556 | |
5557 | first_bb = BLOCK_FOR_INSN (orig_insn); | |
5558 | if (sel_bb_end_p (orig_insn)) | |
5559 | { | |
5560 | /* Avoid introducing an empty block while splitting. */ | |
5561 | gcc_assert (single_succ_p (first_bb)); | |
5562 | second_bb = single_succ (first_bb); | |
5563 | } | |
5564 | else | |
5565 | second_bb = sched_split_block (first_bb, orig_insn); | |
5566 | ||
5567 | recovery_block = sched_create_recovery_block (&before_recovery); | |
5568 | if (before_recovery) | |
34154e27 | 5569 | copy_lv_set_from (before_recovery, EXIT_BLOCK_PTR_FOR_FN (cfun)); |
e1ab7874 | 5570 | |
5571 | gcc_assert (sel_bb_empty_p (recovery_block)); | |
5572 | sched_create_recovery_edges (first_bb, recovery_block, second_bb); | |
5573 | if (current_loops != NULL) | |
5574 | add_bb_to_loop (recovery_block, first_bb->loop_father); | |
48e1416a | 5575 | |
e1ab7874 | 5576 | sel_add_bb (recovery_block); |
48e1416a | 5577 | |
e1ab7874 | 5578 | jump = BB_END (recovery_block); |
5579 | gcc_assert (sel_bb_head (recovery_block) == jump); | |
5580 | sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP); | |
5581 | ||
5582 | return recovery_block; | |
5583 | } | |
5584 | ||
5585 | /* Merge basic block B into basic block A. */ | |
0424f393 | 5586 | static void |
e1ab7874 | 5587 | sel_merge_blocks (basic_block a, basic_block b) |
5588 | { | |
0424f393 | 5589 | gcc_assert (sel_bb_empty_p (b) |
5590 | && EDGE_COUNT (b->preds) == 1 | |
5591 | && EDGE_PRED (b, 0)->src == b->prev_bb); | |
e1ab7874 | 5592 | |
0424f393 | 5593 | move_bb_info (b->prev_bb, b); |
5594 | remove_empty_bb (b, false); | |
5595 | merge_blocks (a, b); | |
e1ab7874 | 5596 | change_loops_latches (b, a); |
5597 | } | |
5598 | ||
5599 | /* A wrapper for redirect_edge_and_branch_force, which also initializes | |
8d1881f5 | 5600 | data structures for possibly created bb and insns. */ |
e1ab7874 | 5601 | void |
5602 | sel_redirect_edge_and_branch_force (edge e, basic_block to) | |
5603 | { | |
1a5dbaab | 5604 | basic_block jump_bb, src, orig_dest = e->dest; |
e1ab7874 | 5605 | int prev_max_uid; |
04d073df | 5606 | rtx_insn *jump; |
8d1881f5 | 5607 | int old_seqno = -1; |
48e1416a | 5608 | |
1a5dbaab | 5609 | /* This function is now used only for bookkeeping code creation, where |
5610 | we'll never get the single pred of orig_dest block and thus will not | |
5611 | hit unreachable blocks when updating dominator info. */ | |
5612 | gcc_assert (!sel_bb_empty_p (e->src) | |
5613 | && !single_pred_p (orig_dest)); | |
e1ab7874 | 5614 | src = e->src; |
5615 | prev_max_uid = get_max_uid (); | |
8d1881f5 | 5616 | /* Compute and pass old_seqno down to sel_init_new_insn only for the case |
5617 | when the conditional jump being redirected may become unconditional. */ | |
5618 | if (any_condjump_p (BB_END (src)) | |
5619 | && INSN_SEQNO (BB_END (src)) >= 0) | |
5620 | old_seqno = INSN_SEQNO (BB_END (src)); | |
e1ab7874 | 5621 | |
8d1881f5 | 5622 | jump_bb = redirect_edge_and_branch_force (e, to); |
e1ab7874 | 5623 | if (jump_bb != NULL) |
5624 | sel_add_bb (jump_bb); | |
5625 | ||
5626 | /* This function could not be used to spoil the loop structure by now, | |
5627 | thus we don't care to update anything. But check it to be sure. */ | |
5628 | if (current_loop_nest | |
5629 | && pipelining_p) | |
5630 | gcc_assert (loop_latch_edge (current_loop_nest)); | |
48e1416a | 5631 | |
e1ab7874 | 5632 | jump = find_new_jump (src, jump_bb, prev_max_uid); |
5633 | if (jump) | |
8d1881f5 | 5634 | sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP, |
5635 | old_seqno); | |
1a5dbaab | 5636 | set_immediate_dominator (CDI_DOMINATORS, to, |
5637 | recompute_dominator (CDI_DOMINATORS, to)); | |
5638 | set_immediate_dominator (CDI_DOMINATORS, orig_dest, | |
5639 | recompute_dominator (CDI_DOMINATORS, orig_dest)); | |
36aec94f | 5640 | if (jump && sel_bb_head_p (jump)) |
5641 | compute_live (jump); | |
e1ab7874 | 5642 | } |
5643 | ||
93919afc | 5644 | /* A wrapper for redirect_edge_and_branch. Return TRUE if blocks connected by |
5645 | redirected edge are in reverse topological order. */ | |
5646 | bool | |
e1ab7874 | 5647 | sel_redirect_edge_and_branch (edge e, basic_block to) |
5648 | { | |
5649 | bool latch_edge_p; | |
1a5dbaab | 5650 | basic_block src, orig_dest = e->dest; |
e1ab7874 | 5651 | int prev_max_uid; |
04d073df | 5652 | rtx_insn *jump; |
df6266b9 | 5653 | edge redirected; |
93919afc | 5654 | bool recompute_toporder_p = false; |
1a5dbaab | 5655 | bool maybe_unreachable = single_pred_p (orig_dest); |
8d1881f5 | 5656 | int old_seqno = -1; |
e1ab7874 | 5657 | |
5658 | latch_edge_p = (pipelining_p | |
5659 | && current_loop_nest | |
5660 | && e == loop_latch_edge (current_loop_nest)); | |
5661 | ||
5662 | src = e->src; | |
5663 | prev_max_uid = get_max_uid (); | |
df6266b9 | 5664 | |
8d1881f5 | 5665 | /* Compute and pass old_seqno down to sel_init_new_insn only for the case |
5666 | when the conditional jump being redirected may become unconditional. */ | |
5667 | if (any_condjump_p (BB_END (src)) | |
5668 | && INSN_SEQNO (BB_END (src)) >= 0) | |
5669 | old_seqno = INSN_SEQNO (BB_END (src)); | |
5670 | ||
df6266b9 | 5671 | redirected = redirect_edge_and_branch (e, to); |
5672 | ||
f1f41a6c | 5673 | gcc_assert (redirected && !last_added_blocks.exists ()); |
e1ab7874 | 5674 | |
5675 | /* When we've redirected a latch edge, update the header. */ | |
5676 | if (latch_edge_p) | |
5677 | { | |
5678 | current_loop_nest->header = to; | |
5679 | gcc_assert (loop_latch_edge (current_loop_nest)); | |
5680 | } | |
5681 | ||
93919afc | 5682 | /* In rare situations, the topological relation between the blocks connected |
5683 | by the redirected edge can change (see PR42245 for an example). Update | |
5684 | block_to_bb/bb_to_block. */ | |
5685 | if (CONTAINING_RGN (e->src->index) == CONTAINING_RGN (to->index) | |
5686 | && BLOCK_TO_BB (e->src->index) > BLOCK_TO_BB (to->index)) | |
5687 | recompute_toporder_p = true; | |
5688 | ||
e1ab7874 | 5689 | jump = find_new_jump (src, NULL, prev_max_uid); |
5690 | if (jump) | |
8d1881f5 | 5691 | sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP, old_seqno); |
93919afc | 5692 | |
1a5dbaab | 5693 | /* Only update dominator info when we don't have unreachable blocks. |
5694 | Otherwise we'll update in maybe_tidy_empty_bb. */ | |
5695 | if (!maybe_unreachable) | |
5696 | { | |
5697 | set_immediate_dominator (CDI_DOMINATORS, to, | |
5698 | recompute_dominator (CDI_DOMINATORS, to)); | |
5699 | set_immediate_dominator (CDI_DOMINATORS, orig_dest, | |
5700 | recompute_dominator (CDI_DOMINATORS, orig_dest)); | |
5701 | } | |
36aec94f | 5702 | if (jump && sel_bb_head_p (jump)) |
5703 | compute_live (jump); | |
93919afc | 5704 | return recompute_toporder_p; |
e1ab7874 | 5705 | } |
5706 | ||
5707 | /* This variable holds the cfg hooks used by the selective scheduler. */ | |
5708 | static struct cfg_hooks sel_cfg_hooks; | |
5709 | ||
5710 | /* Register sel-sched cfg hooks. */ | |
5711 | void | |
5712 | sel_register_cfg_hooks (void) | |
5713 | { | |
5714 | sched_split_block = sel_split_block; | |
5715 | ||
5716 | orig_cfg_hooks = get_cfg_hooks (); | |
5717 | sel_cfg_hooks = orig_cfg_hooks; | |
5718 | ||
5719 | sel_cfg_hooks.create_basic_block = sel_create_basic_block; | |
5720 | ||
5721 | set_cfg_hooks (sel_cfg_hooks); | |
5722 | ||
5723 | sched_init_only_bb = sel_init_only_bb; | |
5724 | sched_split_block = sel_split_block; | |
5725 | sched_create_empty_bb = sel_create_empty_bb; | |
5726 | } | |
5727 | ||
5728 | /* Unregister sel-sched cfg hooks. */ | |
5729 | void | |
5730 | sel_unregister_cfg_hooks (void) | |
5731 | { | |
5732 | sched_create_empty_bb = NULL; | |
5733 | sched_split_block = NULL; | |
5734 | sched_init_only_bb = NULL; | |
5735 | ||
5736 | set_cfg_hooks (orig_cfg_hooks); | |
5737 | } | |
5738 | \f | |
5739 | ||
5740 | /* Emit an insn rtx based on PATTERN. If a jump insn is wanted, | |
5741 | LABEL is where this jump should be directed. */ | |
3aaa3eec | 5742 | rtx_insn * |
e1ab7874 | 5743 | create_insn_rtx_from_pattern (rtx pattern, rtx label) |
5744 | { | |
3aaa3eec | 5745 | rtx_insn *insn_rtx; |
e1ab7874 | 5746 | |
5747 | gcc_assert (!INSN_P (pattern)); | |
5748 | ||
5749 | start_sequence (); | |
5750 | ||
5751 | if (label == NULL_RTX) | |
5752 | insn_rtx = emit_insn (pattern); | |
9845d120 | 5753 | else if (DEBUG_INSN_P (label)) |
5754 | insn_rtx = emit_debug_insn (pattern); | |
e1ab7874 | 5755 | else |
5756 | { | |
5757 | insn_rtx = emit_jump_insn (pattern); | |
5758 | JUMP_LABEL (insn_rtx) = label; | |
5759 | ++LABEL_NUSES (label); | |
5760 | } | |
5761 | ||
5762 | end_sequence (); | |
5763 | ||
52d7e28c | 5764 | sched_extend_luids (); |
e1ab7874 | 5765 | sched_extend_target (); |
5766 | sched_deps_init (false); | |
5767 | ||
5768 | /* Initialize INSN_CODE now. */ | |
5769 | recog_memoized (insn_rtx); | |
5770 | return insn_rtx; | |
5771 | } | |
5772 | ||
5773 | /* Create a new vinsn for INSN_RTX. FORCE_UNIQUE_P is true when the vinsn | |
5774 | must not be clonable. */ | |
5775 | vinsn_t | |
2f3c9801 | 5776 | create_vinsn_from_insn_rtx (rtx_insn *insn_rtx, bool force_unique_p) |
e1ab7874 | 5777 | { |
5778 | gcc_assert (INSN_P (insn_rtx) && !INSN_IN_STREAM_P (insn_rtx)); | |
5779 | ||
5780 | /* If VINSN_TYPE is not USE, retain its uniqueness. */ | |
5781 | return vinsn_create (insn_rtx, force_unique_p); | |
5782 | } | |
5783 | ||
5784 | /* Create a copy of INSN_RTX. */ | |
3aaa3eec | 5785 | rtx_insn * |
e1ab7874 | 5786 | create_copy_of_insn_rtx (rtx insn_rtx) |
5787 | { | |
3aaa3eec | 5788 | rtx_insn *res; |
5789 | rtx link; | |
e1ab7874 | 5790 | |
9845d120 | 5791 | if (DEBUG_INSN_P (insn_rtx)) |
5792 | return create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)), | |
5793 | insn_rtx); | |
5794 | ||
e1ab7874 | 5795 | gcc_assert (NONJUMP_INSN_P (insn_rtx)); |
5796 | ||
5797 | res = create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)), | |
5798 | NULL_RTX); | |
114c1eb1 | 5799 | |
bb99ba64 | 5800 | /* Locate the end of existing REG_NOTES in NEW_RTX. */ |
5801 | rtx *ptail = ®_NOTES (res); | |
5802 | while (*ptail != NULL_RTX) | |
5803 | ptail = &XEXP (*ptail, 1); | |
5804 | ||
114c1eb1 | 5805 | /* Copy all REG_NOTES except REG_EQUAL/REG_EQUIV and REG_LABEL_OPERAND |
5806 | since mark_jump_label will make them. REG_LABEL_TARGETs are created | |
5807 | there too, but are supposed to be sticky, so we copy them. */ | |
5808 | for (link = REG_NOTES (insn_rtx); link; link = XEXP (link, 1)) | |
5809 | if (REG_NOTE_KIND (link) != REG_LABEL_OPERAND | |
5810 | && REG_NOTE_KIND (link) != REG_EQUAL | |
5811 | && REG_NOTE_KIND (link) != REG_EQUIV) | |
5812 | { | |
bb99ba64 | 5813 | *ptail = duplicate_reg_note (link); |
5814 | ptail = &XEXP (*ptail, 1); | |
114c1eb1 | 5815 | } |
5816 | ||
e1ab7874 | 5817 | return res; |
5818 | } | |
5819 | ||
5820 | /* Change vinsn field of EXPR to hold NEW_VINSN. */ | |
5821 | void | |
5822 | change_vinsn_in_expr (expr_t expr, vinsn_t new_vinsn) | |
5823 | { | |
5824 | vinsn_detach (EXPR_VINSN (expr)); | |
5825 | ||
5826 | EXPR_VINSN (expr) = new_vinsn; | |
5827 | vinsn_attach (new_vinsn); | |
5828 | } | |
5829 | ||
5830 | /* Helpers for global init. */ | |
5831 | /* This structure is used to be able to call existing bundling mechanism | |
5832 | and calculate insn priorities. */ | |
48e1416a | 5833 | static struct haifa_sched_info sched_sel_haifa_sched_info = |
e1ab7874 | 5834 | { |
5835 | NULL, /* init_ready_list */ | |
5836 | NULL, /* can_schedule_ready_p */ | |
5837 | NULL, /* schedule_more_p */ | |
5838 | NULL, /* new_ready */ | |
5839 | NULL, /* rgn_rank */ | |
5840 | sel_print_insn, /* rgn_print_insn */ | |
5841 | contributes_to_priority, | |
4db82bc9 | 5842 | NULL, /* insn_finishes_block_p */ |
e1ab7874 | 5843 | |
5844 | NULL, NULL, | |
5845 | NULL, NULL, | |
5846 | 0, 0, | |
5847 | ||
5848 | NULL, /* add_remove_insn */ | |
5849 | NULL, /* begin_schedule_ready */ | |
d2412f57 | 5850 | NULL, /* begin_move_insn */ |
e1ab7874 | 5851 | NULL, /* advance_target_bb */ |
e2f4a6ff | 5852 | |
5853 | NULL, | |
5854 | NULL, | |
5855 | ||
e1ab7874 | 5856 | SEL_SCHED | NEW_BBS |
5857 | }; | |
5858 | ||
5859 | /* Setup special insns used in the scheduler. */ | |
48e1416a | 5860 | void |
e1ab7874 | 5861 | setup_nop_and_exit_insns (void) |
5862 | { | |
5863 | gcc_assert (nop_pattern == NULL_RTX | |
5864 | && exit_insn == NULL_RTX); | |
5865 | ||
bc9cb5ed | 5866 | nop_pattern = constm1_rtx; |
e1ab7874 | 5867 | |
5868 | start_sequence (); | |
5869 | emit_insn (nop_pattern); | |
5870 | exit_insn = get_insns (); | |
5871 | end_sequence (); | |
34154e27 | 5872 | set_block_for_insn (exit_insn, EXIT_BLOCK_PTR_FOR_FN (cfun)); |
e1ab7874 | 5873 | } |
5874 | ||
5875 | /* Free special insns used in the scheduler. */ | |
5876 | void | |
5877 | free_nop_and_exit_insns (void) | |
5878 | { | |
179c282d | 5879 | exit_insn = NULL; |
e1ab7874 | 5880 | nop_pattern = NULL_RTX; |
5881 | } | |
5882 | ||
5883 | /* Setup a special vinsn used in new insns initialization. */ | |
5884 | void | |
5885 | setup_nop_vinsn (void) | |
5886 | { | |
5887 | nop_vinsn = vinsn_create (exit_insn, false); | |
5888 | vinsn_attach (nop_vinsn); | |
5889 | } | |
5890 | ||
5891 | /* Free a special vinsn used in new insns initialization. */ | |
5892 | void | |
5893 | free_nop_vinsn (void) | |
5894 | { | |
5895 | gcc_assert (VINSN_COUNT (nop_vinsn) == 1); | |
5896 | vinsn_detach (nop_vinsn); | |
5897 | nop_vinsn = NULL; | |
5898 | } | |
5899 | ||
5900 | /* Call a set_sched_flags hook. */ | |
5901 | void | |
5902 | sel_set_sched_flags (void) | |
5903 | { | |
48e1416a | 5904 | /* ??? This means that set_sched_flags were called, and we decided to |
e1ab7874 | 5905 | support speculation. However, set_sched_flags also modifies flags |
48e1416a | 5906 | on current_sched_info, doing this only at global init. And we |
e1ab7874 | 5907 | sometimes change c_s_i later. So put the correct flags again. */ |
5908 | if (spec_info && targetm.sched.set_sched_flags) | |
5909 | targetm.sched.set_sched_flags (spec_info); | |
5910 | } | |
5911 | ||
5912 | /* Setup pointers to global sched info structures. */ | |
5913 | void | |
5914 | sel_setup_sched_infos (void) | |
5915 | { | |
5916 | rgn_setup_common_sched_info (); | |
5917 | ||
5918 | memcpy (&sel_common_sched_info, common_sched_info, | |
5919 | sizeof (sel_common_sched_info)); | |
5920 | ||
5921 | sel_common_sched_info.fix_recovery_cfg = NULL; | |
5922 | sel_common_sched_info.add_block = NULL; | |
5923 | sel_common_sched_info.estimate_number_of_insns | |
5924 | = sel_estimate_number_of_insns; | |
5925 | sel_common_sched_info.luid_for_non_insn = sel_luid_for_non_insn; | |
5926 | sel_common_sched_info.sched_pass_id = SCHED_SEL_PASS; | |
5927 | ||
5928 | common_sched_info = &sel_common_sched_info; | |
5929 | ||
5930 | current_sched_info = &sched_sel_haifa_sched_info; | |
48e1416a | 5931 | current_sched_info->sched_max_insns_priority = |
e1ab7874 | 5932 | get_rgn_sched_max_insns_priority (); |
48e1416a | 5933 | |
e1ab7874 | 5934 | sel_set_sched_flags (); |
5935 | } | |
5936 | \f | |
5937 | ||
5938 | /* Adds basic block BB to region RGN at the position *BB_ORD_INDEX, | |
5939 | *BB_ORD_INDEX after that is increased. */ | |
5940 | static void | |
5941 | sel_add_block_to_region (basic_block bb, int *bb_ord_index, int rgn) | |
5942 | { | |
5943 | RGN_NR_BLOCKS (rgn) += 1; | |
5944 | RGN_DONT_CALC_DEPS (rgn) = 0; | |
5945 | RGN_HAS_REAL_EBB (rgn) = 0; | |
5946 | CONTAINING_RGN (bb->index) = rgn; | |
5947 | BLOCK_TO_BB (bb->index) = *bb_ord_index; | |
5948 | rgn_bb_table[RGN_BLOCKS (rgn) + *bb_ord_index] = bb->index; | |
5949 | (*bb_ord_index)++; | |
5950 | ||
5951 | /* FIXME: it is true only when not scheduling ebbs. */ | |
5952 | RGN_BLOCKS (rgn + 1) = RGN_BLOCKS (rgn) + RGN_NR_BLOCKS (rgn); | |
5953 | } | |
5954 | ||
5955 | /* Functions to support pipelining of outer loops. */ | |
5956 | ||
5957 | /* Creates a new empty region and returns it's number. */ | |
5958 | static int | |
5959 | sel_create_new_region (void) | |
5960 | { | |
5961 | int new_rgn_number = nr_regions; | |
5962 | ||
5963 | RGN_NR_BLOCKS (new_rgn_number) = 0; | |
5964 | ||
5965 | /* FIXME: This will work only when EBBs are not created. */ | |
5966 | if (new_rgn_number != 0) | |
48e1416a | 5967 | RGN_BLOCKS (new_rgn_number) = RGN_BLOCKS (new_rgn_number - 1) + |
e1ab7874 | 5968 | RGN_NR_BLOCKS (new_rgn_number - 1); |
5969 | else | |
5970 | RGN_BLOCKS (new_rgn_number) = 0; | |
5971 | ||
5972 | /* Set the blocks of the next region so the other functions may | |
5973 | calculate the number of blocks in the region. */ | |
48e1416a | 5974 | RGN_BLOCKS (new_rgn_number + 1) = RGN_BLOCKS (new_rgn_number) + |
e1ab7874 | 5975 | RGN_NR_BLOCKS (new_rgn_number); |
5976 | ||
5977 | nr_regions++; | |
5978 | ||
5979 | return new_rgn_number; | |
5980 | } | |
5981 | ||
5982 | /* If X has a smaller topological sort number than Y, returns -1; | |
5983 | if greater, returns 1. */ | |
5984 | static int | |
5985 | bb_top_order_comparator (const void *x, const void *y) | |
5986 | { | |
5987 | basic_block bb1 = *(const basic_block *) x; | |
5988 | basic_block bb2 = *(const basic_block *) y; | |
5989 | ||
48e1416a | 5990 | gcc_assert (bb1 == bb2 |
5991 | || rev_top_order_index[bb1->index] | |
e1ab7874 | 5992 | != rev_top_order_index[bb2->index]); |
5993 | ||
5994 | /* It's a reverse topological order in REV_TOP_ORDER_INDEX, so | |
5995 | bbs with greater number should go earlier. */ | |
5996 | if (rev_top_order_index[bb1->index] > rev_top_order_index[bb2->index]) | |
5997 | return -1; | |
5998 | else | |
5999 | return 1; | |
6000 | } | |
6001 | ||
48e1416a | 6002 | /* Create a region for LOOP and return its number. If we don't want |
e1ab7874 | 6003 | to pipeline LOOP, return -1. */ |
6004 | static int | |
2e966e2a | 6005 | make_region_from_loop (class loop *loop) |
e1ab7874 | 6006 | { |
6007 | unsigned int i; | |
6008 | int new_rgn_number = -1; | |
2e966e2a | 6009 | class loop *inner; |
e1ab7874 | 6010 | |
6011 | /* Basic block index, to be assigned to BLOCK_TO_BB. */ | |
6012 | int bb_ord_index = 0; | |
6013 | basic_block *loop_blocks; | |
6014 | basic_block preheader_block; | |
6015 | ||
48e1416a | 6016 | if (loop->num_nodes |
e1ab7874 | 6017 | > (unsigned) PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_BLOCKS)) |
6018 | return -1; | |
48e1416a | 6019 | |
e1ab7874 | 6020 | /* Don't pipeline loops whose latch belongs to some of its inner loops. */ |
6021 | for (inner = loop->inner; inner; inner = inner->inner) | |
6022 | if (flow_bb_inside_loop_p (inner, loop->latch)) | |
6023 | return -1; | |
6024 | ||
6025 | loop->ninsns = num_loop_insns (loop); | |
6026 | if ((int) loop->ninsns > PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_INSNS)) | |
6027 | return -1; | |
6028 | ||
6029 | loop_blocks = get_loop_body_in_custom_order (loop, bb_top_order_comparator); | |
6030 | ||
6031 | for (i = 0; i < loop->num_nodes; i++) | |
6032 | if (loop_blocks[i]->flags & BB_IRREDUCIBLE_LOOP) | |
6033 | { | |
6034 | free (loop_blocks); | |
6035 | return -1; | |
6036 | } | |
6037 | ||
6038 | preheader_block = loop_preheader_edge (loop)->src; | |
6039 | gcc_assert (preheader_block); | |
6040 | gcc_assert (loop_blocks[0] == loop->header); | |
6041 | ||
6042 | new_rgn_number = sel_create_new_region (); | |
6043 | ||
6044 | sel_add_block_to_region (preheader_block, &bb_ord_index, new_rgn_number); | |
08b7917c | 6045 | bitmap_set_bit (bbs_in_loop_rgns, preheader_block->index); |
e1ab7874 | 6046 | |
6047 | for (i = 0; i < loop->num_nodes; i++) | |
6048 | { | |
6049 | /* Add only those blocks that haven't been scheduled in the inner loop. | |
6050 | The exception is the basic blocks with bookkeeping code - they should | |
48e1416a | 6051 | be added to the region (and they actually don't belong to the loop |
e1ab7874 | 6052 | body, but to the region containing that loop body). */ |
6053 | ||
6054 | gcc_assert (new_rgn_number >= 0); | |
6055 | ||
08b7917c | 6056 | if (! bitmap_bit_p (bbs_in_loop_rgns, loop_blocks[i]->index)) |
e1ab7874 | 6057 | { |
48e1416a | 6058 | sel_add_block_to_region (loop_blocks[i], &bb_ord_index, |
e1ab7874 | 6059 | new_rgn_number); |
08b7917c | 6060 | bitmap_set_bit (bbs_in_loop_rgns, loop_blocks[i]->index); |
e1ab7874 | 6061 | } |
6062 | } | |
6063 | ||
6064 | free (loop_blocks); | |
6065 | MARK_LOOP_FOR_PIPELINING (loop); | |
6066 | ||
6067 | return new_rgn_number; | |
6068 | } | |
6069 | ||
6070 | /* Create a new region from preheader blocks LOOP_BLOCKS. */ | |
6071 | void | |
f1f41a6c | 6072 | make_region_from_loop_preheader (vec<basic_block> *&loop_blocks) |
e1ab7874 | 6073 | { |
6074 | unsigned int i; | |
6075 | int new_rgn_number = -1; | |
6076 | basic_block bb; | |
6077 | ||
6078 | /* Basic block index, to be assigned to BLOCK_TO_BB. */ | |
6079 | int bb_ord_index = 0; | |
6080 | ||
6081 | new_rgn_number = sel_create_new_region (); | |
6082 | ||
f1f41a6c | 6083 | FOR_EACH_VEC_ELT (*loop_blocks, i, bb) |
e1ab7874 | 6084 | { |
6085 | gcc_assert (new_rgn_number >= 0); | |
6086 | ||
6087 | sel_add_block_to_region (bb, &bb_ord_index, new_rgn_number); | |
6088 | } | |
6089 | ||
f1f41a6c | 6090 | vec_free (loop_blocks); |
e1ab7874 | 6091 | } |
6092 | ||
6093 | ||
6094 | /* Create region(s) from loop nest LOOP, such that inner loops will be | |
48e1416a | 6095 | pipelined before outer loops. Returns true when a region for LOOP |
e1ab7874 | 6096 | is created. */ |
6097 | static bool | |
2e966e2a | 6098 | make_regions_from_loop_nest (class loop *loop) |
48e1416a | 6099 | { |
2e966e2a | 6100 | class loop *cur_loop; |
e1ab7874 | 6101 | int rgn_number; |
6102 | ||
6103 | /* Traverse all inner nodes of the loop. */ | |
6104 | for (cur_loop = loop->inner; cur_loop; cur_loop = cur_loop->next) | |
08b7917c | 6105 | if (! bitmap_bit_p (bbs_in_loop_rgns, cur_loop->header->index)) |
e1ab7874 | 6106 | return false; |
6107 | ||
6108 | /* At this moment all regular inner loops should have been pipelined. | |
6109 | Try to create a region from this loop. */ | |
6110 | rgn_number = make_region_from_loop (loop); | |
6111 | ||
6112 | if (rgn_number < 0) | |
6113 | return false; | |
6114 | ||
f1f41a6c | 6115 | loop_nests.safe_push (loop); |
e1ab7874 | 6116 | return true; |
6117 | } | |
6118 | ||
6119 | /* Initalize data structures needed. */ | |
6120 | void | |
6121 | sel_init_pipelining (void) | |
6122 | { | |
6123 | /* Collect loop information to be used in outer loops pipelining. */ | |
6124 | loop_optimizer_init (LOOPS_HAVE_PREHEADERS | |
6125 | | LOOPS_HAVE_FALLTHRU_PREHEADERS | |
6126 | | LOOPS_HAVE_RECORDED_EXITS | |
6127 | | LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS); | |
6128 | current_loop_nest = NULL; | |
6129 | ||
fe672ac0 | 6130 | bbs_in_loop_rgns = sbitmap_alloc (last_basic_block_for_fn (cfun)); |
53c5d9d4 | 6131 | bitmap_clear (bbs_in_loop_rgns); |
e1ab7874 | 6132 | |
6133 | recompute_rev_top_order (); | |
6134 | } | |
6135 | ||
2e966e2a | 6136 | /* Returns a class loop for region RGN. */ |
e1ab7874 | 6137 | loop_p |
6138 | get_loop_nest_for_rgn (unsigned int rgn) | |
6139 | { | |
6140 | /* Regions created with extend_rgns don't have corresponding loop nests, | |
6141 | because they don't represent loops. */ | |
f1f41a6c | 6142 | if (rgn < loop_nests.length ()) |
6143 | return loop_nests[rgn]; | |
e1ab7874 | 6144 | else |
6145 | return NULL; | |
6146 | } | |
6147 | ||
6148 | /* True when LOOP was included into pipelining regions. */ | |
6149 | bool | |
2e966e2a | 6150 | considered_for_pipelining_p (class loop *loop) |
e1ab7874 | 6151 | { |
6152 | if (loop_depth (loop) == 0) | |
6153 | return false; | |
6154 | ||
48e1416a | 6155 | /* Now, the loop could be too large or irreducible. Check whether its |
6156 | region is in LOOP_NESTS. | |
6157 | We determine the region number of LOOP as the region number of its | |
6158 | latch. We can't use header here, because this header could be | |
e1ab7874 | 6159 | just removed preheader and it will give us the wrong region number. |
6160 | Latch can't be used because it could be in the inner loop too. */ | |
a2d56a0e | 6161 | if (LOOP_MARKED_FOR_PIPELINING_P (loop)) |
e1ab7874 | 6162 | { |
6163 | int rgn = CONTAINING_RGN (loop->latch->index); | |
6164 | ||
f1f41a6c | 6165 | gcc_assert ((unsigned) rgn < loop_nests.length ()); |
e1ab7874 | 6166 | return true; |
6167 | } | |
48e1416a | 6168 | |
e1ab7874 | 6169 | return false; |
6170 | } | |
6171 | ||
48e1416a | 6172 | /* Makes regions from the rest of the blocks, after loops are chosen |
e1ab7874 | 6173 | for pipelining. */ |
6174 | static void | |
6175 | make_regions_from_the_rest (void) | |
6176 | { | |
6177 | int cur_rgn_blocks; | |
6178 | int *loop_hdr; | |
6179 | int i; | |
6180 | ||
6181 | basic_block bb; | |
6182 | edge e; | |
6183 | edge_iterator ei; | |
6184 | int *degree; | |
e1ab7874 | 6185 | |
6186 | /* Index in rgn_bb_table where to start allocating new regions. */ | |
6187 | cur_rgn_blocks = nr_regions ? RGN_BLOCKS (nr_regions) : 0; | |
e1ab7874 | 6188 | |
48e1416a | 6189 | /* Make regions from all the rest basic blocks - those that don't belong to |
e1ab7874 | 6190 | any loop or belong to irreducible loops. Prepare the data structures |
6191 | for extend_rgns. */ | |
6192 | ||
6193 | /* LOOP_HDR[I] == -1 if I-th bb doesn't belong to any loop, | |
6194 | LOOP_HDR[I] == LOOP_HDR[J] iff basic blocks I and J reside within the same | |
6195 | loop. */ | |
fe672ac0 | 6196 | loop_hdr = XNEWVEC (int, last_basic_block_for_fn (cfun)); |
6197 | degree = XCNEWVEC (int, last_basic_block_for_fn (cfun)); | |
e1ab7874 | 6198 | |
6199 | ||
6200 | /* For each basic block that belongs to some loop assign the number | |
6201 | of innermost loop it belongs to. */ | |
fe672ac0 | 6202 | for (i = 0; i < last_basic_block_for_fn (cfun); i++) |
e1ab7874 | 6203 | loop_hdr[i] = -1; |
6204 | ||
fc00614f | 6205 | FOR_EACH_BB_FN (bb, cfun) |
e1ab7874 | 6206 | { |
9c26ddef | 6207 | if (bb->loop_father && bb->loop_father->num != 0 |
e1ab7874 | 6208 | && !(bb->flags & BB_IRREDUCIBLE_LOOP)) |
6209 | loop_hdr[bb->index] = bb->loop_father->num; | |
6210 | } | |
6211 | ||
48e1416a | 6212 | /* For each basic block degree is calculated as the number of incoming |
e1ab7874 | 6213 | edges, that are going out of bbs that are not yet scheduled. |
6214 | The basic blocks that are scheduled have degree value of zero. */ | |
fc00614f | 6215 | FOR_EACH_BB_FN (bb, cfun) |
e1ab7874 | 6216 | { |
6217 | degree[bb->index] = 0; | |
6218 | ||
08b7917c | 6219 | if (!bitmap_bit_p (bbs_in_loop_rgns, bb->index)) |
e1ab7874 | 6220 | { |
6221 | FOR_EACH_EDGE (e, ei, bb->preds) | |
08b7917c | 6222 | if (!bitmap_bit_p (bbs_in_loop_rgns, e->src->index)) |
e1ab7874 | 6223 | degree[bb->index]++; |
6224 | } | |
6225 | else | |
6226 | degree[bb->index] = -1; | |
6227 | } | |
6228 | ||
6229 | extend_rgns (degree, &cur_rgn_blocks, bbs_in_loop_rgns, loop_hdr); | |
6230 | ||
6231 | /* Any block that did not end up in a region is placed into a region | |
6232 | by itself. */ | |
fc00614f | 6233 | FOR_EACH_BB_FN (bb, cfun) |
e1ab7874 | 6234 | if (degree[bb->index] >= 0) |
6235 | { | |
6236 | rgn_bb_table[cur_rgn_blocks] = bb->index; | |
6237 | RGN_NR_BLOCKS (nr_regions) = 1; | |
6238 | RGN_BLOCKS (nr_regions) = cur_rgn_blocks++; | |
6239 | RGN_DONT_CALC_DEPS (nr_regions) = 0; | |
6240 | RGN_HAS_REAL_EBB (nr_regions) = 0; | |
6241 | CONTAINING_RGN (bb->index) = nr_regions++; | |
6242 | BLOCK_TO_BB (bb->index) = 0; | |
6243 | } | |
6244 | ||
6245 | free (degree); | |
6246 | free (loop_hdr); | |
6247 | } | |
6248 | ||
6249 | /* Free data structures used in pipelining of loops. */ | |
6250 | void sel_finish_pipelining (void) | |
6251 | { | |
2e966e2a | 6252 | class loop *loop; |
e1ab7874 | 6253 | |
6254 | /* Release aux fields so we don't free them later by mistake. */ | |
f21d4d00 | 6255 | FOR_EACH_LOOP (loop, 0) |
e1ab7874 | 6256 | loop->aux = NULL; |
6257 | ||
6258 | loop_optimizer_finalize (); | |
6259 | ||
f1f41a6c | 6260 | loop_nests.release (); |
e1ab7874 | 6261 | |
6262 | free (rev_top_order_index); | |
6263 | rev_top_order_index = NULL; | |
6264 | } | |
6265 | ||
48e1416a | 6266 | /* This function replaces the find_rgns when |
e1ab7874 | 6267 | FLAG_SEL_SCHED_PIPELINING_OUTER_LOOPS is set. */ |
48e1416a | 6268 | void |
e1ab7874 | 6269 | sel_find_rgns (void) |
6270 | { | |
6271 | sel_init_pipelining (); | |
6272 | extend_regions (); | |
6273 | ||
6274 | if (current_loops) | |
6275 | { | |
6276 | loop_p loop; | |
e1ab7874 | 6277 | |
f21d4d00 | 6278 | FOR_EACH_LOOP (loop, (flag_sel_sched_pipelining_outer_loops |
6279 | ? LI_FROM_INNERMOST | |
6280 | : LI_ONLY_INNERMOST)) | |
e1ab7874 | 6281 | make_regions_from_loop_nest (loop); |
6282 | } | |
6283 | ||
6284 | /* Make regions from all the rest basic blocks and schedule them. | |
48e1416a | 6285 | These blocks include blocks that don't belong to any loop or belong |
e1ab7874 | 6286 | to irreducible loops. */ |
6287 | make_regions_from_the_rest (); | |
6288 | ||
6289 | /* We don't need bbs_in_loop_rgns anymore. */ | |
6290 | sbitmap_free (bbs_in_loop_rgns); | |
6291 | bbs_in_loop_rgns = NULL; | |
6292 | } | |
6293 | ||
b73edd22 | 6294 | /* Add the preheader blocks from previous loop to current region taking |
6295 | it from LOOP_PREHEADER_BLOCKS (current_loop_nest) and record them in *BBS. | |
e1ab7874 | 6296 | This function is only used with -fsel-sched-pipelining-outer-loops. */ |
6297 | void | |
b73edd22 | 6298 | sel_add_loop_preheaders (bb_vec_t *bbs) |
e1ab7874 | 6299 | { |
6300 | int i; | |
6301 | basic_block bb; | |
f1f41a6c | 6302 | vec<basic_block> *preheader_blocks |
e1ab7874 | 6303 | = LOOP_PREHEADER_BLOCKS (current_loop_nest); |
6304 | ||
f1f41a6c | 6305 | if (!preheader_blocks) |
6306 | return; | |
6307 | ||
6308 | for (i = 0; preheader_blocks->iterate (i, &bb); i++) | |
a2d56a0e | 6309 | { |
f1f41a6c | 6310 | bbs->safe_push (bb); |
6311 | last_added_blocks.safe_push (bb); | |
e1ab7874 | 6312 | sel_add_bb (bb); |
a2d56a0e | 6313 | } |
e1ab7874 | 6314 | |
f1f41a6c | 6315 | vec_free (preheader_blocks); |
e1ab7874 | 6316 | } |
6317 | ||
48e1416a | 6318 | /* While pipelining outer loops, returns TRUE if BB is a loop preheader. |
6319 | Please note that the function should also work when pipelining_p is | |
6320 | false, because it is used when deciding whether we should or should | |
e1ab7874 | 6321 | not reschedule pipelined code. */ |
6322 | bool | |
6323 | sel_is_loop_preheader_p (basic_block bb) | |
6324 | { | |
6325 | if (current_loop_nest) | |
6326 | { | |
2e966e2a | 6327 | class loop *outer; |
e1ab7874 | 6328 | |
6329 | if (preheader_removed) | |
6330 | return false; | |
6331 | ||
6332 | /* Preheader is the first block in the region. */ | |
6333 | if (BLOCK_TO_BB (bb->index) == 0) | |
6334 | return true; | |
6335 | ||
6336 | /* We used to find a preheader with the topological information. | |
6337 | Check that the above code is equivalent to what we did before. */ | |
6338 | ||
6339 | if (in_current_region_p (current_loop_nest->header)) | |
48e1416a | 6340 | gcc_assert (!(BLOCK_TO_BB (bb->index) |
e1ab7874 | 6341 | < BLOCK_TO_BB (current_loop_nest->header->index))); |
6342 | ||
6343 | /* Support the situation when the latch block of outer loop | |
6344 | could be from here. */ | |
6345 | for (outer = loop_outer (current_loop_nest); | |
6346 | outer; | |
6347 | outer = loop_outer (outer)) | |
6348 | if (considered_for_pipelining_p (outer) && outer->latch == bb) | |
6349 | gcc_unreachable (); | |
6350 | } | |
6351 | ||
6352 | return false; | |
6353 | } | |
6354 | ||
49087fba | 6355 | /* Check whether JUMP_BB ends with a jump insn that leads only to DEST_BB and |
6356 | can be removed, making the corresponding edge fallthrough (assuming that | |
6357 | all basic blocks between JUMP_BB and DEST_BB are empty). */ | |
6358 | static bool | |
6359 | bb_has_removable_jump_to_p (basic_block jump_bb, basic_block dest_bb) | |
e1ab7874 | 6360 | { |
4b816303 | 6361 | if (!onlyjump_p (BB_END (jump_bb)) |
6362 | || tablejump_p (BB_END (jump_bb), NULL, NULL)) | |
e1ab7874 | 6363 | return false; |
6364 | ||
48e1416a | 6365 | /* Several outgoing edges, abnormal edge or destination of jump is |
e1ab7874 | 6366 | not DEST_BB. */ |
6367 | if (EDGE_COUNT (jump_bb->succs) != 1 | |
49087fba | 6368 | || EDGE_SUCC (jump_bb, 0)->flags & (EDGE_ABNORMAL | EDGE_CROSSING) |
e1ab7874 | 6369 | || EDGE_SUCC (jump_bb, 0)->dest != dest_bb) |
6370 | return false; | |
6371 | ||
6372 | /* If not anything of the upper. */ | |
6373 | return true; | |
6374 | } | |
6375 | ||
6376 | /* Removes the loop preheader from the current region and saves it in | |
48e1416a | 6377 | PREHEADER_BLOCKS of the father loop, so they will be added later to |
e1ab7874 | 6378 | region that represents an outer loop. */ |
6379 | static void | |
6380 | sel_remove_loop_preheader (void) | |
6381 | { | |
6382 | int i, old_len; | |
6383 | int cur_rgn = CONTAINING_RGN (BB_TO_BLOCK (0)); | |
6384 | basic_block bb; | |
6385 | bool all_empty_p = true; | |
f1f41a6c | 6386 | vec<basic_block> *preheader_blocks |
e1ab7874 | 6387 | = LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest)); |
6388 | ||
f1f41a6c | 6389 | vec_check_alloc (preheader_blocks, 0); |
6390 | ||
e1ab7874 | 6391 | gcc_assert (current_loop_nest); |
f1f41a6c | 6392 | old_len = preheader_blocks->length (); |
e1ab7874 | 6393 | |
6394 | /* Add blocks that aren't within the current loop to PREHEADER_BLOCKS. */ | |
6395 | for (i = 0; i < RGN_NR_BLOCKS (cur_rgn); i++) | |
6396 | { | |
f5a6b05f | 6397 | bb = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i)); |
e1ab7874 | 6398 | |
48e1416a | 6399 | /* If the basic block belongs to region, but doesn't belong to |
e1ab7874 | 6400 | corresponding loop, then it should be a preheader. */ |
6401 | if (sel_is_loop_preheader_p (bb)) | |
6402 | { | |
f1f41a6c | 6403 | preheader_blocks->safe_push (bb); |
e1ab7874 | 6404 | if (BB_END (bb) != bb_note (bb)) |
6405 | all_empty_p = false; | |
6406 | } | |
6407 | } | |
48e1416a | 6408 | |
e1ab7874 | 6409 | /* Remove these blocks only after iterating over the whole region. */ |
f1f41a6c | 6410 | for (i = preheader_blocks->length () - 1; i >= old_len; i--) |
e1ab7874 | 6411 | { |
f1f41a6c | 6412 | bb = (*preheader_blocks)[i]; |
e1ab7874 | 6413 | sel_remove_bb (bb, false); |
6414 | } | |
6415 | ||
6416 | if (!considered_for_pipelining_p (loop_outer (current_loop_nest))) | |
6417 | { | |
6418 | if (!all_empty_p) | |
6419 | /* Immediately create new region from preheader. */ | |
f1f41a6c | 6420 | make_region_from_loop_preheader (preheader_blocks); |
e1ab7874 | 6421 | else |
6422 | { | |
6423 | /* If all preheader blocks are empty - dont create new empty region. | |
6424 | Instead, remove them completely. */ | |
f1f41a6c | 6425 | FOR_EACH_VEC_ELT (*preheader_blocks, i, bb) |
e1ab7874 | 6426 | { |
6427 | edge e; | |
6428 | edge_iterator ei; | |
6429 | basic_block prev_bb = bb->prev_bb, next_bb = bb->next_bb; | |
6430 | ||
6431 | /* Redirect all incoming edges to next basic block. */ | |
6432 | for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); ) | |
6433 | { | |
6434 | if (! (e->flags & EDGE_FALLTHRU)) | |
6435 | redirect_edge_and_branch (e, bb->next_bb); | |
6436 | else | |
6437 | redirect_edge_succ (e, bb->next_bb); | |
6438 | } | |
6439 | gcc_assert (BB_NOTE_LIST (bb) == NULL); | |
6440 | delete_and_free_basic_block (bb); | |
6441 | ||
48e1416a | 6442 | /* Check if after deleting preheader there is a nonconditional |
6443 | jump in PREV_BB that leads to the next basic block NEXT_BB. | |
6444 | If it is so - delete this jump and clear data sets of its | |
e1ab7874 | 6445 | basic block if it becomes empty. */ |
6446 | if (next_bb->prev_bb == prev_bb | |
34154e27 | 6447 | && prev_bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) |
49087fba | 6448 | && bb_has_removable_jump_to_p (prev_bb, next_bb)) |
e1ab7874 | 6449 | { |
6450 | redirect_edge_and_branch (EDGE_SUCC (prev_bb, 0), next_bb); | |
6451 | if (BB_END (prev_bb) == bb_note (prev_bb)) | |
6452 | free_data_sets (prev_bb); | |
6453 | } | |
1a5dbaab | 6454 | |
6455 | set_immediate_dominator (CDI_DOMINATORS, next_bb, | |
6456 | recompute_dominator (CDI_DOMINATORS, | |
6457 | next_bb)); | |
e1ab7874 | 6458 | } |
6459 | } | |
f1f41a6c | 6460 | vec_free (preheader_blocks); |
e1ab7874 | 6461 | } |
6462 | else | |
6463 | /* Store preheader within the father's loop structure. */ | |
6464 | SET_LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest), | |
6465 | preheader_blocks); | |
6466 | } | |
7c5928c3 | 6467 | |
e1ab7874 | 6468 | #endif |