]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/cfgrtl.c
re PR bootstrap/79069 (Bootstrap failure on s390x-linux while building libgo)
[thirdparty/gcc.git] / gcc / cfgrtl.c
1 /* Control flow graph manipulation code for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* This file contains low level functions to manipulate the CFG and analyze it
21 that are aware of the RTL intermediate language.
22
23 Available functionality:
24 - Basic CFG/RTL manipulation API documented in cfghooks.h
25 - CFG-aware instruction chain manipulation
26 delete_insn, delete_insn_chain
27 - Edge splitting and committing to edges
28 insert_insn_on_edge, commit_edge_insertions
29 - CFG updating after insn simplification
30 purge_dead_edges, purge_all_dead_edges
31 - CFG fixing after coarse manipulation
32 fixup_abnormal_edges
33
34 Functions not supposed for generic use:
35 - Infrastructure to determine quickly basic block for insn
36 compute_bb_for_insn, update_bb_for_insn, set_block_for_insn,
37 - Edge redirection with updating and optimizing of insn chain
38 block_label, tidy_fallthru_edge, force_nonfallthru */
39 \f
40 #include "config.h"
41 #include "system.h"
42 #include "coretypes.h"
43 #include "backend.h"
44 #include "target.h"
45 #include "rtl.h"
46 #include "tree.h"
47 #include "cfghooks.h"
48 #include "df.h"
49 #include "insn-config.h"
50 #include "memmodel.h"
51 #include "emit-rtl.h"
52 #include "cfgrtl.h"
53 #include "cfganal.h"
54 #include "cfgbuild.h"
55 #include "cfgcleanup.h"
56 #include "bb-reorder.h"
57 #include "rtl-error.h"
58 #include "insn-attr.h"
59 #include "dojump.h"
60 #include "expr.h"
61 #include "cfgloop.h"
62 #include "tree-pass.h"
63 #include "print-rtl.h"
64
65 /* Holds the interesting leading and trailing notes for the function.
66 Only applicable if the CFG is in cfglayout mode. */
67 static GTY(()) rtx_insn *cfg_layout_function_footer;
68 static GTY(()) rtx_insn *cfg_layout_function_header;
69
70 static rtx_insn *skip_insns_after_block (basic_block);
71 static void record_effective_endpoints (void);
72 static void fixup_reorder_chain (void);
73
74 void verify_insn_chain (void);
75 static void fixup_fallthru_exit_predecessor (void);
76 static int can_delete_note_p (const rtx_note *);
77 static int can_delete_label_p (const rtx_code_label *);
78 static basic_block rtl_split_edge (edge);
79 static bool rtl_move_block_after (basic_block, basic_block);
80 static int rtl_verify_flow_info (void);
81 static basic_block cfg_layout_split_block (basic_block, void *);
82 static edge cfg_layout_redirect_edge_and_branch (edge, basic_block);
83 static basic_block cfg_layout_redirect_edge_and_branch_force (edge, basic_block);
84 static void cfg_layout_delete_block (basic_block);
85 static void rtl_delete_block (basic_block);
86 static basic_block rtl_redirect_edge_and_branch_force (edge, basic_block);
87 static edge rtl_redirect_edge_and_branch (edge, basic_block);
88 static basic_block rtl_split_block (basic_block, void *);
89 static void rtl_dump_bb (FILE *, basic_block, int, int);
90 static int rtl_verify_flow_info_1 (void);
91 static void rtl_make_forwarder_block (edge);
92 \f
93 /* Return true if NOTE is not one of the ones that must be kept paired,
94 so that we may simply delete it. */
95
96 static int
97 can_delete_note_p (const rtx_note *note)
98 {
99 switch (NOTE_KIND (note))
100 {
101 case NOTE_INSN_DELETED:
102 case NOTE_INSN_BASIC_BLOCK:
103 case NOTE_INSN_EPILOGUE_BEG:
104 return true;
105
106 default:
107 return false;
108 }
109 }
110
111 /* True if a given label can be deleted. */
112
113 static int
114 can_delete_label_p (const rtx_code_label *label)
115 {
116 return (!LABEL_PRESERVE_P (label)
117 /* User declared labels must be preserved. */
118 && LABEL_NAME (label) == 0
119 && !vec_safe_contains<rtx_insn *> (forced_labels,
120 const_cast<rtx_code_label *> (label)));
121 }
122
123 /* Delete INSN by patching it out. */
124
125 void
126 delete_insn (rtx_insn *insn)
127 {
128 rtx note;
129 bool really_delete = true;
130
131 if (LABEL_P (insn))
132 {
133 /* Some labels can't be directly removed from the INSN chain, as they
134 might be references via variables, constant pool etc.
135 Convert them to the special NOTE_INSN_DELETED_LABEL note. */
136 if (! can_delete_label_p (as_a <rtx_code_label *> (insn)))
137 {
138 const char *name = LABEL_NAME (insn);
139 basic_block bb = BLOCK_FOR_INSN (insn);
140 rtx_insn *bb_note = NEXT_INSN (insn);
141
142 really_delete = false;
143 PUT_CODE (insn, NOTE);
144 NOTE_KIND (insn) = NOTE_INSN_DELETED_LABEL;
145 NOTE_DELETED_LABEL_NAME (insn) = name;
146
147 /* If the note following the label starts a basic block, and the
148 label is a member of the same basic block, interchange the two. */
149 if (bb_note != NULL_RTX
150 && NOTE_INSN_BASIC_BLOCK_P (bb_note)
151 && bb != NULL
152 && bb == BLOCK_FOR_INSN (bb_note))
153 {
154 reorder_insns_nobb (insn, insn, bb_note);
155 BB_HEAD (bb) = bb_note;
156 if (BB_END (bb) == bb_note)
157 BB_END (bb) = insn;
158 }
159 }
160
161 remove_node_from_insn_list (insn, &nonlocal_goto_handler_labels);
162 }
163
164 if (really_delete)
165 {
166 /* If this insn has already been deleted, something is very wrong. */
167 gcc_assert (!insn->deleted ());
168 if (INSN_P (insn))
169 df_insn_delete (insn);
170 remove_insn (insn);
171 insn->set_deleted ();
172 }
173
174 /* If deleting a jump, decrement the use count of the label. Deleting
175 the label itself should happen in the normal course of block merging. */
176 if (JUMP_P (insn))
177 {
178 if (JUMP_LABEL (insn)
179 && LABEL_P (JUMP_LABEL (insn)))
180 LABEL_NUSES (JUMP_LABEL (insn))--;
181
182 /* If there are more targets, remove them too. */
183 while ((note
184 = find_reg_note (insn, REG_LABEL_TARGET, NULL_RTX)) != NULL_RTX
185 && LABEL_P (XEXP (note, 0)))
186 {
187 LABEL_NUSES (XEXP (note, 0))--;
188 remove_note (insn, note);
189 }
190 }
191
192 /* Also if deleting any insn that references a label as an operand. */
193 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX)) != NULL_RTX
194 && LABEL_P (XEXP (note, 0)))
195 {
196 LABEL_NUSES (XEXP (note, 0))--;
197 remove_note (insn, note);
198 }
199
200 if (rtx_jump_table_data *table = dyn_cast <rtx_jump_table_data *> (insn))
201 {
202 rtvec vec = table->get_labels ();
203 int len = GET_NUM_ELEM (vec);
204 int i;
205
206 for (i = 0; i < len; i++)
207 {
208 rtx label = XEXP (RTVEC_ELT (vec, i), 0);
209
210 /* When deleting code in bulk (e.g. removing many unreachable
211 blocks) we can delete a label that's a target of the vector
212 before deleting the vector itself. */
213 if (!NOTE_P (label))
214 LABEL_NUSES (label)--;
215 }
216 }
217 }
218
219 /* Like delete_insn but also purge dead edges from BB.
220 Return true if any edges are eliminated. */
221
222 bool
223 delete_insn_and_edges (rtx_insn *insn)
224 {
225 bool purge = false;
226
227 if (INSN_P (insn)
228 && BLOCK_FOR_INSN (insn)
229 && BB_END (BLOCK_FOR_INSN (insn)) == insn)
230 purge = true;
231 delete_insn (insn);
232 if (purge)
233 return purge_dead_edges (BLOCK_FOR_INSN (insn));
234 return false;
235 }
236
237 /* Unlink a chain of insns between START and FINISH, leaving notes
238 that must be paired. If CLEAR_BB is true, we set bb field for
239 insns that cannot be removed to NULL. */
240
241 void
242 delete_insn_chain (rtx start, rtx_insn *finish, bool clear_bb)
243 {
244 /* Unchain the insns one by one. It would be quicker to delete all of these
245 with a single unchaining, rather than one at a time, but we need to keep
246 the NOTE's. */
247 rtx_insn *current = finish;
248 while (1)
249 {
250 rtx_insn *prev = PREV_INSN (current);
251 if (NOTE_P (current) && !can_delete_note_p (as_a <rtx_note *> (current)))
252 ;
253 else
254 delete_insn (current);
255
256 if (clear_bb && !current->deleted ())
257 set_block_for_insn (current, NULL);
258
259 if (current == start)
260 break;
261 current = prev;
262 }
263 }
264 \f
265 /* Create a new basic block consisting of the instructions between HEAD and END
266 inclusive. This function is designed to allow fast BB construction - reuses
267 the note and basic block struct in BB_NOTE, if any and do not grow
268 BASIC_BLOCK chain and should be used directly only by CFG construction code.
269 END can be NULL in to create new empty basic block before HEAD. Both END
270 and HEAD can be NULL to create basic block at the end of INSN chain.
271 AFTER is the basic block we should be put after. */
272
273 basic_block
274 create_basic_block_structure (rtx_insn *head, rtx_insn *end, rtx_note *bb_note,
275 basic_block after)
276 {
277 basic_block bb;
278
279 if (bb_note
280 && (bb = NOTE_BASIC_BLOCK (bb_note)) != NULL
281 && bb->aux == NULL)
282 {
283 /* If we found an existing note, thread it back onto the chain. */
284
285 rtx_insn *after;
286
287 if (LABEL_P (head))
288 after = head;
289 else
290 {
291 after = PREV_INSN (head);
292 head = bb_note;
293 }
294
295 if (after != bb_note && NEXT_INSN (after) != bb_note)
296 reorder_insns_nobb (bb_note, bb_note, after);
297 }
298 else
299 {
300 /* Otherwise we must create a note and a basic block structure. */
301
302 bb = alloc_block ();
303
304 init_rtl_bb_info (bb);
305 if (!head && !end)
306 head = end = bb_note
307 = emit_note_after (NOTE_INSN_BASIC_BLOCK, get_last_insn ());
308 else if (LABEL_P (head) && end)
309 {
310 bb_note = emit_note_after (NOTE_INSN_BASIC_BLOCK, head);
311 if (head == end)
312 end = bb_note;
313 }
314 else
315 {
316 bb_note = emit_note_before (NOTE_INSN_BASIC_BLOCK, head);
317 head = bb_note;
318 if (!end)
319 end = head;
320 }
321
322 NOTE_BASIC_BLOCK (bb_note) = bb;
323 }
324
325 /* Always include the bb note in the block. */
326 if (NEXT_INSN (end) == bb_note)
327 end = bb_note;
328
329 BB_HEAD (bb) = head;
330 BB_END (bb) = end;
331 bb->index = last_basic_block_for_fn (cfun)++;
332 bb->flags = BB_NEW | BB_RTL;
333 link_block (bb, after);
334 SET_BASIC_BLOCK_FOR_FN (cfun, bb->index, bb);
335 df_bb_refs_record (bb->index, false);
336 update_bb_for_insn (bb);
337 BB_SET_PARTITION (bb, BB_UNPARTITIONED);
338
339 /* Tag the block so that we know it has been used when considering
340 other basic block notes. */
341 bb->aux = bb;
342
343 return bb;
344 }
345
346 /* Create new basic block consisting of instructions in between HEAD and END
347 and place it to the BB chain after block AFTER. END can be NULL to
348 create a new empty basic block before HEAD. Both END and HEAD can be
349 NULL to create basic block at the end of INSN chain. */
350
351 static basic_block
352 rtl_create_basic_block (void *headp, void *endp, basic_block after)
353 {
354 rtx_insn *head = (rtx_insn *) headp;
355 rtx_insn *end = (rtx_insn *) endp;
356 basic_block bb;
357
358 /* Grow the basic block array if needed. */
359 if ((size_t) last_basic_block_for_fn (cfun)
360 >= basic_block_info_for_fn (cfun)->length ())
361 {
362 size_t new_size =
363 (last_basic_block_for_fn (cfun)
364 + (last_basic_block_for_fn (cfun) + 3) / 4);
365 vec_safe_grow_cleared (basic_block_info_for_fn (cfun), new_size);
366 }
367
368 n_basic_blocks_for_fn (cfun)++;
369
370 bb = create_basic_block_structure (head, end, NULL, after);
371 bb->aux = NULL;
372 return bb;
373 }
374
375 static basic_block
376 cfg_layout_create_basic_block (void *head, void *end, basic_block after)
377 {
378 basic_block newbb = rtl_create_basic_block (head, end, after);
379
380 return newbb;
381 }
382 \f
383 /* Delete the insns in a (non-live) block. We physically delete every
384 non-deleted-note insn, and update the flow graph appropriately.
385
386 Return nonzero if we deleted an exception handler. */
387
388 /* ??? Preserving all such notes strikes me as wrong. It would be nice
389 to post-process the stream to remove empty blocks, loops, ranges, etc. */
390
391 static void
392 rtl_delete_block (basic_block b)
393 {
394 rtx_insn *insn, *end;
395
396 /* If the head of this block is a CODE_LABEL, then it might be the
397 label for an exception handler which can't be reached. We need
398 to remove the label from the exception_handler_label list. */
399 insn = BB_HEAD (b);
400
401 end = get_last_bb_insn (b);
402
403 /* Selectively delete the entire chain. */
404 BB_HEAD (b) = NULL;
405 delete_insn_chain (insn, end, true);
406
407
408 if (dump_file)
409 fprintf (dump_file, "deleting block %d\n", b->index);
410 df_bb_delete (b->index);
411 }
412 \f
413 /* Records the basic block struct in BLOCK_FOR_INSN for every insn. */
414
415 void
416 compute_bb_for_insn (void)
417 {
418 basic_block bb;
419
420 FOR_EACH_BB_FN (bb, cfun)
421 {
422 rtx_insn *end = BB_END (bb);
423 rtx_insn *insn;
424
425 for (insn = BB_HEAD (bb); ; insn = NEXT_INSN (insn))
426 {
427 BLOCK_FOR_INSN (insn) = bb;
428 if (insn == end)
429 break;
430 }
431 }
432 }
433
434 /* Release the basic_block_for_insn array. */
435
436 unsigned int
437 free_bb_for_insn (void)
438 {
439 rtx_insn *insn;
440 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
441 if (!BARRIER_P (insn))
442 BLOCK_FOR_INSN (insn) = NULL;
443 return 0;
444 }
445
446 namespace {
447
448 const pass_data pass_data_free_cfg =
449 {
450 RTL_PASS, /* type */
451 "*free_cfg", /* name */
452 OPTGROUP_NONE, /* optinfo_flags */
453 TV_NONE, /* tv_id */
454 0, /* properties_required */
455 0, /* properties_provided */
456 PROP_cfg, /* properties_destroyed */
457 0, /* todo_flags_start */
458 0, /* todo_flags_finish */
459 };
460
461 class pass_free_cfg : public rtl_opt_pass
462 {
463 public:
464 pass_free_cfg (gcc::context *ctxt)
465 : rtl_opt_pass (pass_data_free_cfg, ctxt)
466 {}
467
468 /* opt_pass methods: */
469 virtual unsigned int execute (function *);
470
471 }; // class pass_free_cfg
472
473 unsigned int
474 pass_free_cfg::execute (function *)
475 {
476 /* The resource.c machinery uses DF but the CFG isn't guaranteed to be
477 valid at that point so it would be too late to call df_analyze. */
478 if (DELAY_SLOTS && optimize > 0 && flag_delayed_branch)
479 {
480 df_note_add_problem ();
481 df_analyze ();
482 }
483
484 if (crtl->has_bb_partition)
485 insert_section_boundary_note ();
486
487 free_bb_for_insn ();
488 return 0;
489 }
490
491 } // anon namespace
492
493 rtl_opt_pass *
494 make_pass_free_cfg (gcc::context *ctxt)
495 {
496 return new pass_free_cfg (ctxt);
497 }
498
499 /* Return RTX to emit after when we want to emit code on the entry of function. */
500 rtx_insn *
501 entry_of_function (void)
502 {
503 return (n_basic_blocks_for_fn (cfun) > NUM_FIXED_BLOCKS ?
504 BB_HEAD (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb) : get_insns ());
505 }
506
507 /* Emit INSN at the entry point of the function, ensuring that it is only
508 executed once per function. */
509 void
510 emit_insn_at_entry (rtx insn)
511 {
512 edge_iterator ei = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
513 edge e = ei_safe_edge (ei);
514 gcc_assert (e->flags & EDGE_FALLTHRU);
515
516 insert_insn_on_edge (insn, e);
517 commit_edge_insertions ();
518 }
519
520 /* Update BLOCK_FOR_INSN of insns between BEGIN and END
521 (or BARRIER if found) and notify df of the bb change.
522 The insn chain range is inclusive
523 (i.e. both BEGIN and END will be updated. */
524
525 static void
526 update_bb_for_insn_chain (rtx_insn *begin, rtx_insn *end, basic_block bb)
527 {
528 rtx_insn *insn;
529
530 end = NEXT_INSN (end);
531 for (insn = begin; insn != end; insn = NEXT_INSN (insn))
532 if (!BARRIER_P (insn))
533 df_insn_change_bb (insn, bb);
534 }
535
536 /* Update BLOCK_FOR_INSN of insns in BB to BB,
537 and notify df of the change. */
538
539 void
540 update_bb_for_insn (basic_block bb)
541 {
542 update_bb_for_insn_chain (BB_HEAD (bb), BB_END (bb), bb);
543 }
544
545 \f
546 /* Like active_insn_p, except keep the return value clobber around
547 even after reload. */
548
549 static bool
550 flow_active_insn_p (const rtx_insn *insn)
551 {
552 if (active_insn_p (insn))
553 return true;
554
555 /* A clobber of the function return value exists for buggy
556 programs that fail to return a value. Its effect is to
557 keep the return value from being live across the entire
558 function. If we allow it to be skipped, we introduce the
559 possibility for register lifetime confusion. */
560 if (GET_CODE (PATTERN (insn)) == CLOBBER
561 && REG_P (XEXP (PATTERN (insn), 0))
562 && REG_FUNCTION_VALUE_P (XEXP (PATTERN (insn), 0)))
563 return true;
564
565 return false;
566 }
567
568 /* Return true if the block has no effect and only forwards control flow to
569 its single destination. */
570
571 bool
572 contains_no_active_insn_p (const_basic_block bb)
573 {
574 rtx_insn *insn;
575
576 if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
577 || bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
578 || !single_succ_p (bb)
579 || (single_succ_edge (bb)->flags & EDGE_FAKE) != 0)
580 return false;
581
582 for (insn = BB_HEAD (bb); insn != BB_END (bb); insn = NEXT_INSN (insn))
583 if (INSN_P (insn) && flow_active_insn_p (insn))
584 return false;
585
586 return (!INSN_P (insn)
587 || (JUMP_P (insn) && simplejump_p (insn))
588 || !flow_active_insn_p (insn));
589 }
590
591 /* Likewise, but protect loop latches, headers and preheaders. */
592 /* FIXME: Make this a cfg hook. */
593
594 bool
595 forwarder_block_p (const_basic_block bb)
596 {
597 if (!contains_no_active_insn_p (bb))
598 return false;
599
600 /* Protect loop latches, headers and preheaders. */
601 if (current_loops)
602 {
603 basic_block dest;
604 if (bb->loop_father->header == bb)
605 return false;
606 dest = EDGE_SUCC (bb, 0)->dest;
607 if (dest->loop_father->header == dest)
608 return false;
609 }
610
611 return true;
612 }
613
614 /* Return nonzero if we can reach target from src by falling through. */
615 /* FIXME: Make this a cfg hook, the result is only valid in cfgrtl mode. */
616
617 bool
618 can_fallthru (basic_block src, basic_block target)
619 {
620 rtx_insn *insn = BB_END (src);
621 rtx_insn *insn2;
622 edge e;
623 edge_iterator ei;
624
625 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
626 return true;
627 if (src->next_bb != target)
628 return false;
629
630 /* ??? Later we may add code to move jump tables offline. */
631 if (tablejump_p (insn, NULL, NULL))
632 return false;
633
634 FOR_EACH_EDGE (e, ei, src->succs)
635 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
636 && e->flags & EDGE_FALLTHRU)
637 return false;
638
639 insn2 = BB_HEAD (target);
640 if (!active_insn_p (insn2))
641 insn2 = next_active_insn (insn2);
642
643 return next_active_insn (insn) == insn2;
644 }
645
646 /* Return nonzero if we could reach target from src by falling through,
647 if the target was made adjacent. If we already have a fall-through
648 edge to the exit block, we can't do that. */
649 static bool
650 could_fall_through (basic_block src, basic_block target)
651 {
652 edge e;
653 edge_iterator ei;
654
655 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
656 return true;
657 FOR_EACH_EDGE (e, ei, src->succs)
658 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
659 && e->flags & EDGE_FALLTHRU)
660 return 0;
661 return true;
662 }
663 \f
664 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
665 rtx_note *
666 bb_note (basic_block bb)
667 {
668 rtx_insn *note;
669
670 note = BB_HEAD (bb);
671 if (LABEL_P (note))
672 note = NEXT_INSN (note);
673
674 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
675 return as_a <rtx_note *> (note);
676 }
677
678 /* Return the INSN immediately following the NOTE_INSN_BASIC_BLOCK
679 note associated with the BLOCK. */
680
681 static rtx_insn *
682 first_insn_after_basic_block_note (basic_block block)
683 {
684 rtx_insn *insn;
685
686 /* Get the first instruction in the block. */
687 insn = BB_HEAD (block);
688
689 if (insn == NULL_RTX)
690 return NULL;
691 if (LABEL_P (insn))
692 insn = NEXT_INSN (insn);
693 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
694
695 return NEXT_INSN (insn);
696 }
697
698 /* Creates a new basic block just after basic block BB by splitting
699 everything after specified instruction INSNP. */
700
701 static basic_block
702 rtl_split_block (basic_block bb, void *insnp)
703 {
704 basic_block new_bb;
705 rtx_insn *insn = (rtx_insn *) insnp;
706 edge e;
707 edge_iterator ei;
708
709 if (!insn)
710 {
711 insn = first_insn_after_basic_block_note (bb);
712
713 if (insn)
714 {
715 rtx_insn *next = insn;
716
717 insn = PREV_INSN (insn);
718
719 /* If the block contains only debug insns, insn would have
720 been NULL in a non-debug compilation, and then we'd end
721 up emitting a DELETED note. For -fcompare-debug
722 stability, emit the note too. */
723 if (insn != BB_END (bb)
724 && DEBUG_INSN_P (next)
725 && DEBUG_INSN_P (BB_END (bb)))
726 {
727 while (next != BB_END (bb) && DEBUG_INSN_P (next))
728 next = NEXT_INSN (next);
729
730 if (next == BB_END (bb))
731 emit_note_after (NOTE_INSN_DELETED, next);
732 }
733 }
734 else
735 insn = get_last_insn ();
736 }
737
738 /* We probably should check type of the insn so that we do not create
739 inconsistent cfg. It is checked in verify_flow_info anyway, so do not
740 bother. */
741 if (insn == BB_END (bb))
742 emit_note_after (NOTE_INSN_DELETED, insn);
743
744 /* Create the new basic block. */
745 new_bb = create_basic_block (NEXT_INSN (insn), BB_END (bb), bb);
746 BB_COPY_PARTITION (new_bb, bb);
747 BB_END (bb) = insn;
748
749 /* Redirect the outgoing edges. */
750 new_bb->succs = bb->succs;
751 bb->succs = NULL;
752 FOR_EACH_EDGE (e, ei, new_bb->succs)
753 e->src = new_bb;
754
755 /* The new block starts off being dirty. */
756 df_set_bb_dirty (bb);
757 return new_bb;
758 }
759
760 /* Return true if the single edge between blocks A and B is the only place
761 in RTL which holds some unique locus. */
762
763 static bool
764 unique_locus_on_edge_between_p (basic_block a, basic_block b)
765 {
766 const location_t goto_locus = EDGE_SUCC (a, 0)->goto_locus;
767 rtx_insn *insn, *end;
768
769 if (LOCATION_LOCUS (goto_locus) == UNKNOWN_LOCATION)
770 return false;
771
772 /* First scan block A backward. */
773 insn = BB_END (a);
774 end = PREV_INSN (BB_HEAD (a));
775 while (insn != end && (!NONDEBUG_INSN_P (insn) || !INSN_HAS_LOCATION (insn)))
776 insn = PREV_INSN (insn);
777
778 if (insn != end && INSN_LOCATION (insn) == goto_locus)
779 return false;
780
781 /* Then scan block B forward. */
782 insn = BB_HEAD (b);
783 if (insn)
784 {
785 end = NEXT_INSN (BB_END (b));
786 while (insn != end && !NONDEBUG_INSN_P (insn))
787 insn = NEXT_INSN (insn);
788
789 if (insn != end && INSN_HAS_LOCATION (insn)
790 && INSN_LOCATION (insn) == goto_locus)
791 return false;
792 }
793
794 return true;
795 }
796
797 /* If the single edge between blocks A and B is the only place in RTL which
798 holds some unique locus, emit a nop with that locus between the blocks. */
799
800 static void
801 emit_nop_for_unique_locus_between (basic_block a, basic_block b)
802 {
803 if (!unique_locus_on_edge_between_p (a, b))
804 return;
805
806 BB_END (a) = emit_insn_after_noloc (gen_nop (), BB_END (a), a);
807 INSN_LOCATION (BB_END (a)) = EDGE_SUCC (a, 0)->goto_locus;
808 }
809
810 /* Blocks A and B are to be merged into a single block A. The insns
811 are already contiguous. */
812
813 static void
814 rtl_merge_blocks (basic_block a, basic_block b)
815 {
816 rtx_insn *b_head = BB_HEAD (b), *b_end = BB_END (b), *a_end = BB_END (a);
817 rtx_insn *del_first = NULL, *del_last = NULL;
818 rtx_insn *b_debug_start = b_end, *b_debug_end = b_end;
819 bool forwarder_p = (b->flags & BB_FORWARDER_BLOCK) != 0;
820 int b_empty = 0;
821
822 if (dump_file)
823 fprintf (dump_file, "Merging block %d into block %d...\n", b->index,
824 a->index);
825
826 while (DEBUG_INSN_P (b_end))
827 b_end = PREV_INSN (b_debug_start = b_end);
828
829 /* If there was a CODE_LABEL beginning B, delete it. */
830 if (LABEL_P (b_head))
831 {
832 /* Detect basic blocks with nothing but a label. This can happen
833 in particular at the end of a function. */
834 if (b_head == b_end)
835 b_empty = 1;
836
837 del_first = del_last = b_head;
838 b_head = NEXT_INSN (b_head);
839 }
840
841 /* Delete the basic block note and handle blocks containing just that
842 note. */
843 if (NOTE_INSN_BASIC_BLOCK_P (b_head))
844 {
845 if (b_head == b_end)
846 b_empty = 1;
847 if (! del_last)
848 del_first = b_head;
849
850 del_last = b_head;
851 b_head = NEXT_INSN (b_head);
852 }
853
854 /* If there was a jump out of A, delete it. */
855 if (JUMP_P (a_end))
856 {
857 rtx_insn *prev;
858
859 for (prev = PREV_INSN (a_end); ; prev = PREV_INSN (prev))
860 if (!NOTE_P (prev)
861 || NOTE_INSN_BASIC_BLOCK_P (prev)
862 || prev == BB_HEAD (a))
863 break;
864
865 del_first = a_end;
866
867 /* If this was a conditional jump, we need to also delete
868 the insn that set cc0. */
869 if (HAVE_cc0 && only_sets_cc0_p (prev))
870 {
871 rtx_insn *tmp = prev;
872
873 prev = prev_nonnote_insn (prev);
874 if (!prev)
875 prev = BB_HEAD (a);
876 del_first = tmp;
877 }
878
879 a_end = PREV_INSN (del_first);
880 }
881 else if (BARRIER_P (NEXT_INSN (a_end)))
882 del_first = NEXT_INSN (a_end);
883
884 /* Delete everything marked above as well as crap that might be
885 hanging out between the two blocks. */
886 BB_END (a) = a_end;
887 BB_HEAD (b) = b_empty ? NULL : b_head;
888 delete_insn_chain (del_first, del_last, true);
889
890 /* When not optimizing and the edge is the only place in RTL which holds
891 some unique locus, emit a nop with that locus in between. */
892 if (!optimize)
893 {
894 emit_nop_for_unique_locus_between (a, b);
895 a_end = BB_END (a);
896 }
897
898 /* Reassociate the insns of B with A. */
899 if (!b_empty)
900 {
901 update_bb_for_insn_chain (a_end, b_debug_end, a);
902
903 BB_END (a) = b_debug_end;
904 BB_HEAD (b) = NULL;
905 }
906 else if (b_end != b_debug_end)
907 {
908 /* Move any deleted labels and other notes between the end of A
909 and the debug insns that make up B after the debug insns,
910 bringing the debug insns into A while keeping the notes after
911 the end of A. */
912 if (NEXT_INSN (a_end) != b_debug_start)
913 reorder_insns_nobb (NEXT_INSN (a_end), PREV_INSN (b_debug_start),
914 b_debug_end);
915 update_bb_for_insn_chain (b_debug_start, b_debug_end, a);
916 BB_END (a) = b_debug_end;
917 }
918
919 df_bb_delete (b->index);
920
921 /* If B was a forwarder block, propagate the locus on the edge. */
922 if (forwarder_p
923 && LOCATION_LOCUS (EDGE_SUCC (b, 0)->goto_locus) == UNKNOWN_LOCATION)
924 EDGE_SUCC (b, 0)->goto_locus = EDGE_SUCC (a, 0)->goto_locus;
925
926 if (dump_file)
927 fprintf (dump_file, "Merged blocks %d and %d.\n", a->index, b->index);
928 }
929
930
931 /* Return true when block A and B can be merged. */
932
933 static bool
934 rtl_can_merge_blocks (basic_block a, basic_block b)
935 {
936 /* If we are partitioning hot/cold basic blocks, we don't want to
937 mess up unconditional or indirect jumps that cross between hot
938 and cold sections.
939
940 Basic block partitioning may result in some jumps that appear to
941 be optimizable (or blocks that appear to be mergeable), but which really
942 must be left untouched (they are required to make it safely across
943 partition boundaries). See the comments at the top of
944 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
945
946 if (BB_PARTITION (a) != BB_PARTITION (b))
947 return false;
948
949 /* Protect the loop latches. */
950 if (current_loops && b->loop_father->latch == b)
951 return false;
952
953 /* There must be exactly one edge in between the blocks. */
954 return (single_succ_p (a)
955 && single_succ (a) == b
956 && single_pred_p (b)
957 && a != b
958 /* Must be simple edge. */
959 && !(single_succ_edge (a)->flags & EDGE_COMPLEX)
960 && a->next_bb == b
961 && a != ENTRY_BLOCK_PTR_FOR_FN (cfun)
962 && b != EXIT_BLOCK_PTR_FOR_FN (cfun)
963 /* If the jump insn has side effects,
964 we can't kill the edge. */
965 && (!JUMP_P (BB_END (a))
966 || (reload_completed
967 ? simplejump_p (BB_END (a)) : onlyjump_p (BB_END (a)))));
968 }
969 \f
970 /* Return the label in the head of basic block BLOCK. Create one if it doesn't
971 exist. */
972
973 rtx_code_label *
974 block_label (basic_block block)
975 {
976 if (block == EXIT_BLOCK_PTR_FOR_FN (cfun))
977 return NULL;
978
979 if (!LABEL_P (BB_HEAD (block)))
980 {
981 BB_HEAD (block) = emit_label_before (gen_label_rtx (), BB_HEAD (block));
982 }
983
984 return as_a <rtx_code_label *> (BB_HEAD (block));
985 }
986
987 /* Attempt to perform edge redirection by replacing possibly complex jump
988 instruction by unconditional jump or removing jump completely. This can
989 apply only if all edges now point to the same block. The parameters and
990 return values are equivalent to redirect_edge_and_branch. */
991
992 edge
993 try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout)
994 {
995 basic_block src = e->src;
996 rtx_insn *insn = BB_END (src), *kill_from;
997 rtx set;
998 int fallthru = 0;
999
1000 /* If we are partitioning hot/cold basic blocks, we don't want to
1001 mess up unconditional or indirect jumps that cross between hot
1002 and cold sections.
1003
1004 Basic block partitioning may result in some jumps that appear to
1005 be optimizable (or blocks that appear to be mergeable), but which really
1006 must be left untouched (they are required to make it safely across
1007 partition boundaries). See the comments at the top of
1008 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1009
1010 if (BB_PARTITION (src) != BB_PARTITION (target))
1011 return NULL;
1012
1013 /* We can replace or remove a complex jump only when we have exactly
1014 two edges. Also, if we have exactly one outgoing edge, we can
1015 redirect that. */
1016 if (EDGE_COUNT (src->succs) >= 3
1017 /* Verify that all targets will be TARGET. Specifically, the
1018 edge that is not E must also go to TARGET. */
1019 || (EDGE_COUNT (src->succs) == 2
1020 && EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)->dest != target))
1021 return NULL;
1022
1023 if (!onlyjump_p (insn))
1024 return NULL;
1025 if ((!optimize || reload_completed) && tablejump_p (insn, NULL, NULL))
1026 return NULL;
1027
1028 /* Avoid removing branch with side effects. */
1029 set = single_set (insn);
1030 if (!set || side_effects_p (set))
1031 return NULL;
1032
1033 /* In case we zap a conditional jump, we'll need to kill
1034 the cc0 setter too. */
1035 kill_from = insn;
1036 if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, PATTERN (insn))
1037 && only_sets_cc0_p (PREV_INSN (insn)))
1038 kill_from = PREV_INSN (insn);
1039
1040 /* See if we can create the fallthru edge. */
1041 if (in_cfglayout || can_fallthru (src, target))
1042 {
1043 if (dump_file)
1044 fprintf (dump_file, "Removing jump %i.\n", INSN_UID (insn));
1045 fallthru = 1;
1046
1047 /* Selectively unlink whole insn chain. */
1048 if (in_cfglayout)
1049 {
1050 rtx_insn *insn = BB_FOOTER (src);
1051
1052 delete_insn_chain (kill_from, BB_END (src), false);
1053
1054 /* Remove barriers but keep jumptables. */
1055 while (insn)
1056 {
1057 if (BARRIER_P (insn))
1058 {
1059 if (PREV_INSN (insn))
1060 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
1061 else
1062 BB_FOOTER (src) = NEXT_INSN (insn);
1063 if (NEXT_INSN (insn))
1064 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
1065 }
1066 if (LABEL_P (insn))
1067 break;
1068 insn = NEXT_INSN (insn);
1069 }
1070 }
1071 else
1072 delete_insn_chain (kill_from, PREV_INSN (BB_HEAD (target)),
1073 false);
1074 }
1075
1076 /* If this already is simplejump, redirect it. */
1077 else if (simplejump_p (insn))
1078 {
1079 if (e->dest == target)
1080 return NULL;
1081 if (dump_file)
1082 fprintf (dump_file, "Redirecting jump %i from %i to %i.\n",
1083 INSN_UID (insn), e->dest->index, target->index);
1084 if (!redirect_jump (as_a <rtx_jump_insn *> (insn),
1085 block_label (target), 0))
1086 {
1087 gcc_assert (target == EXIT_BLOCK_PTR_FOR_FN (cfun));
1088 return NULL;
1089 }
1090 }
1091
1092 /* Cannot do anything for target exit block. */
1093 else if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
1094 return NULL;
1095
1096 /* Or replace possibly complicated jump insn by simple jump insn. */
1097 else
1098 {
1099 rtx_code_label *target_label = block_label (target);
1100 rtx_insn *barrier;
1101 rtx_insn *label;
1102 rtx_jump_table_data *table;
1103
1104 emit_jump_insn_after_noloc (targetm.gen_jump (target_label), insn);
1105 JUMP_LABEL (BB_END (src)) = target_label;
1106 LABEL_NUSES (target_label)++;
1107 if (dump_file)
1108 fprintf (dump_file, "Replacing insn %i by jump %i\n",
1109 INSN_UID (insn), INSN_UID (BB_END (src)));
1110
1111
1112 delete_insn_chain (kill_from, insn, false);
1113
1114 /* Recognize a tablejump that we are converting to a
1115 simple jump and remove its associated CODE_LABEL
1116 and ADDR_VEC or ADDR_DIFF_VEC. */
1117 if (tablejump_p (insn, &label, &table))
1118 delete_insn_chain (label, table, false);
1119
1120 barrier = next_nonnote_insn (BB_END (src));
1121 if (!barrier || !BARRIER_P (barrier))
1122 emit_barrier_after (BB_END (src));
1123 else
1124 {
1125 if (barrier != NEXT_INSN (BB_END (src)))
1126 {
1127 /* Move the jump before barrier so that the notes
1128 which originally were or were created before jump table are
1129 inside the basic block. */
1130 rtx_insn *new_insn = BB_END (src);
1131
1132 update_bb_for_insn_chain (NEXT_INSN (BB_END (src)),
1133 PREV_INSN (barrier), src);
1134
1135 SET_NEXT_INSN (PREV_INSN (new_insn)) = NEXT_INSN (new_insn);
1136 SET_PREV_INSN (NEXT_INSN (new_insn)) = PREV_INSN (new_insn);
1137
1138 SET_NEXT_INSN (new_insn) = barrier;
1139 SET_NEXT_INSN (PREV_INSN (barrier)) = new_insn;
1140
1141 SET_PREV_INSN (new_insn) = PREV_INSN (barrier);
1142 SET_PREV_INSN (barrier) = new_insn;
1143 }
1144 }
1145 }
1146
1147 /* Keep only one edge out and set proper flags. */
1148 if (!single_succ_p (src))
1149 remove_edge (e);
1150 gcc_assert (single_succ_p (src));
1151
1152 e = single_succ_edge (src);
1153 if (fallthru)
1154 e->flags = EDGE_FALLTHRU;
1155 else
1156 e->flags = 0;
1157
1158 e->probability = REG_BR_PROB_BASE;
1159 e->count = src->count;
1160
1161 if (e->dest != target)
1162 redirect_edge_succ (e, target);
1163 return e;
1164 }
1165
1166 /* Subroutine of redirect_branch_edge that tries to patch the jump
1167 instruction INSN so that it reaches block NEW. Do this
1168 only when it originally reached block OLD. Return true if this
1169 worked or the original target wasn't OLD, return false if redirection
1170 doesn't work. */
1171
1172 static bool
1173 patch_jump_insn (rtx_insn *insn, rtx_insn *old_label, basic_block new_bb)
1174 {
1175 rtx_jump_table_data *table;
1176 rtx tmp;
1177 /* Recognize a tablejump and adjust all matching cases. */
1178 if (tablejump_p (insn, NULL, &table))
1179 {
1180 rtvec vec;
1181 int j;
1182 rtx_code_label *new_label = block_label (new_bb);
1183
1184 if (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
1185 return false;
1186 vec = table->get_labels ();
1187
1188 for (j = GET_NUM_ELEM (vec) - 1; j >= 0; --j)
1189 if (XEXP (RTVEC_ELT (vec, j), 0) == old_label)
1190 {
1191 RTVEC_ELT (vec, j) = gen_rtx_LABEL_REF (Pmode, new_label);
1192 --LABEL_NUSES (old_label);
1193 ++LABEL_NUSES (new_label);
1194 }
1195
1196 /* Handle casesi dispatch insns. */
1197 if ((tmp = single_set (insn)) != NULL
1198 && SET_DEST (tmp) == pc_rtx
1199 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
1200 && GET_CODE (XEXP (SET_SRC (tmp), 2)) == LABEL_REF
1201 && label_ref_label (XEXP (SET_SRC (tmp), 2)) == old_label)
1202 {
1203 XEXP (SET_SRC (tmp), 2) = gen_rtx_LABEL_REF (Pmode,
1204 new_label);
1205 --LABEL_NUSES (old_label);
1206 ++LABEL_NUSES (new_label);
1207 }
1208 }
1209 else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
1210 {
1211 int i, n = ASM_OPERANDS_LABEL_LENGTH (tmp);
1212 rtx note;
1213
1214 if (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
1215 return false;
1216 rtx_code_label *new_label = block_label (new_bb);
1217
1218 for (i = 0; i < n; ++i)
1219 {
1220 rtx old_ref = ASM_OPERANDS_LABEL (tmp, i);
1221 gcc_assert (GET_CODE (old_ref) == LABEL_REF);
1222 if (XEXP (old_ref, 0) == old_label)
1223 {
1224 ASM_OPERANDS_LABEL (tmp, i)
1225 = gen_rtx_LABEL_REF (Pmode, new_label);
1226 --LABEL_NUSES (old_label);
1227 ++LABEL_NUSES (new_label);
1228 }
1229 }
1230
1231 if (JUMP_LABEL (insn) == old_label)
1232 {
1233 JUMP_LABEL (insn) = new_label;
1234 note = find_reg_note (insn, REG_LABEL_TARGET, new_label);
1235 if (note)
1236 remove_note (insn, note);
1237 }
1238 else
1239 {
1240 note = find_reg_note (insn, REG_LABEL_TARGET, old_label);
1241 if (note)
1242 remove_note (insn, note);
1243 if (JUMP_LABEL (insn) != new_label
1244 && !find_reg_note (insn, REG_LABEL_TARGET, new_label))
1245 add_reg_note (insn, REG_LABEL_TARGET, new_label);
1246 }
1247 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, old_label))
1248 != NULL_RTX)
1249 XEXP (note, 0) = new_label;
1250 }
1251 else
1252 {
1253 /* ?? We may play the games with moving the named labels from
1254 one basic block to the other in case only one computed_jump is
1255 available. */
1256 if (computed_jump_p (insn)
1257 /* A return instruction can't be redirected. */
1258 || returnjump_p (insn))
1259 return false;
1260
1261 if (!currently_expanding_to_rtl || JUMP_LABEL (insn) == old_label)
1262 {
1263 /* If the insn doesn't go where we think, we're confused. */
1264 gcc_assert (JUMP_LABEL (insn) == old_label);
1265
1266 /* If the substitution doesn't succeed, die. This can happen
1267 if the back end emitted unrecognizable instructions or if
1268 target is exit block on some arches. */
1269 if (!redirect_jump (as_a <rtx_jump_insn *> (insn),
1270 block_label (new_bb), 0))
1271 {
1272 gcc_assert (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun));
1273 return false;
1274 }
1275 }
1276 }
1277 return true;
1278 }
1279
1280
1281 /* Redirect edge representing branch of (un)conditional jump or tablejump,
1282 NULL on failure */
1283 static edge
1284 redirect_branch_edge (edge e, basic_block target)
1285 {
1286 rtx_insn *old_label = BB_HEAD (e->dest);
1287 basic_block src = e->src;
1288 rtx_insn *insn = BB_END (src);
1289
1290 /* We can only redirect non-fallthru edges of jump insn. */
1291 if (e->flags & EDGE_FALLTHRU)
1292 return NULL;
1293 else if (!JUMP_P (insn) && !currently_expanding_to_rtl)
1294 return NULL;
1295
1296 if (!currently_expanding_to_rtl)
1297 {
1298 if (!patch_jump_insn (as_a <rtx_jump_insn *> (insn), old_label, target))
1299 return NULL;
1300 }
1301 else
1302 /* When expanding this BB might actually contain multiple
1303 jumps (i.e. not yet split by find_many_sub_basic_blocks).
1304 Redirect all of those that match our label. */
1305 FOR_BB_INSNS (src, insn)
1306 if (JUMP_P (insn) && !patch_jump_insn (as_a <rtx_jump_insn *> (insn),
1307 old_label, target))
1308 return NULL;
1309
1310 if (dump_file)
1311 fprintf (dump_file, "Edge %i->%i redirected to %i\n",
1312 e->src->index, e->dest->index, target->index);
1313
1314 if (e->dest != target)
1315 e = redirect_edge_succ_nodup (e, target);
1316
1317 return e;
1318 }
1319
1320 /* Called when edge E has been redirected to a new destination,
1321 in order to update the region crossing flag on the edge and
1322 jump. */
1323
1324 static void
1325 fixup_partition_crossing (edge e)
1326 {
1327 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) || e->dest
1328 == EXIT_BLOCK_PTR_FOR_FN (cfun))
1329 return;
1330 /* If we redirected an existing edge, it may already be marked
1331 crossing, even though the new src is missing a reg crossing note.
1332 But make sure reg crossing note doesn't already exist before
1333 inserting. */
1334 if (BB_PARTITION (e->src) != BB_PARTITION (e->dest))
1335 {
1336 e->flags |= EDGE_CROSSING;
1337 if (JUMP_P (BB_END (e->src))
1338 && !CROSSING_JUMP_P (BB_END (e->src)))
1339 CROSSING_JUMP_P (BB_END (e->src)) = 1;
1340 }
1341 else if (BB_PARTITION (e->src) == BB_PARTITION (e->dest))
1342 {
1343 e->flags &= ~EDGE_CROSSING;
1344 /* Remove the section crossing note from jump at end of
1345 src if it exists, and if no other successors are
1346 still crossing. */
1347 if (JUMP_P (BB_END (e->src)) && CROSSING_JUMP_P (BB_END (e->src)))
1348 {
1349 bool has_crossing_succ = false;
1350 edge e2;
1351 edge_iterator ei;
1352 FOR_EACH_EDGE (e2, ei, e->src->succs)
1353 {
1354 has_crossing_succ |= (e2->flags & EDGE_CROSSING);
1355 if (has_crossing_succ)
1356 break;
1357 }
1358 if (!has_crossing_succ)
1359 CROSSING_JUMP_P (BB_END (e->src)) = 0;
1360 }
1361 }
1362 }
1363
1364 /* Called when block BB has been reassigned to the cold partition,
1365 because it is now dominated by another cold block,
1366 to ensure that the region crossing attributes are updated. */
1367
1368 static void
1369 fixup_new_cold_bb (basic_block bb)
1370 {
1371 edge e;
1372 edge_iterator ei;
1373
1374 /* This is called when a hot bb is found to now be dominated
1375 by a cold bb and therefore needs to become cold. Therefore,
1376 its preds will no longer be region crossing. Any non-dominating
1377 preds that were previously hot would also have become cold
1378 in the caller for the same region. Any preds that were previously
1379 region-crossing will be adjusted in fixup_partition_crossing. */
1380 FOR_EACH_EDGE (e, ei, bb->preds)
1381 {
1382 fixup_partition_crossing (e);
1383 }
1384
1385 /* Possibly need to make bb's successor edges region crossing,
1386 or remove stale region crossing. */
1387 FOR_EACH_EDGE (e, ei, bb->succs)
1388 {
1389 /* We can't have fall-through edges across partition boundaries.
1390 Note that force_nonfallthru will do any necessary partition
1391 boundary fixup by calling fixup_partition_crossing itself. */
1392 if ((e->flags & EDGE_FALLTHRU)
1393 && BB_PARTITION (bb) != BB_PARTITION (e->dest)
1394 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1395 force_nonfallthru (e);
1396 else
1397 fixup_partition_crossing (e);
1398 }
1399 }
1400
1401 /* Attempt to change code to redirect edge E to TARGET. Don't do that on
1402 expense of adding new instructions or reordering basic blocks.
1403
1404 Function can be also called with edge destination equivalent to the TARGET.
1405 Then it should try the simplifications and do nothing if none is possible.
1406
1407 Return edge representing the branch if transformation succeeded. Return NULL
1408 on failure.
1409 We still return NULL in case E already destinated TARGET and we didn't
1410 managed to simplify instruction stream. */
1411
1412 static edge
1413 rtl_redirect_edge_and_branch (edge e, basic_block target)
1414 {
1415 edge ret;
1416 basic_block src = e->src;
1417 basic_block dest = e->dest;
1418
1419 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
1420 return NULL;
1421
1422 if (dest == target)
1423 return e;
1424
1425 if ((ret = try_redirect_by_replacing_jump (e, target, false)) != NULL)
1426 {
1427 df_set_bb_dirty (src);
1428 fixup_partition_crossing (ret);
1429 return ret;
1430 }
1431
1432 ret = redirect_branch_edge (e, target);
1433 if (!ret)
1434 return NULL;
1435
1436 df_set_bb_dirty (src);
1437 fixup_partition_crossing (ret);
1438 return ret;
1439 }
1440
1441 /* Emit a barrier after BB, into the footer if we are in CFGLAYOUT mode. */
1442
1443 void
1444 emit_barrier_after_bb (basic_block bb)
1445 {
1446 rtx_barrier *barrier = emit_barrier_after (BB_END (bb));
1447 gcc_assert (current_ir_type () == IR_RTL_CFGRTL
1448 || current_ir_type () == IR_RTL_CFGLAYOUT);
1449 if (current_ir_type () == IR_RTL_CFGLAYOUT)
1450 {
1451 rtx_insn *insn = unlink_insn_chain (barrier, barrier);
1452
1453 if (BB_FOOTER (bb))
1454 {
1455 rtx_insn *footer_tail = BB_FOOTER (bb);
1456
1457 while (NEXT_INSN (footer_tail))
1458 footer_tail = NEXT_INSN (footer_tail);
1459 if (!BARRIER_P (footer_tail))
1460 {
1461 SET_NEXT_INSN (footer_tail) = insn;
1462 SET_PREV_INSN (insn) = footer_tail;
1463 }
1464 }
1465 else
1466 BB_FOOTER (bb) = insn;
1467 }
1468 }
1469
1470 /* Like force_nonfallthru below, but additionally performs redirection
1471 Used by redirect_edge_and_branch_force. JUMP_LABEL is used only
1472 when redirecting to the EXIT_BLOCK, it is either ret_rtx or
1473 simple_return_rtx, indicating which kind of returnjump to create.
1474 It should be NULL otherwise. */
1475
1476 basic_block
1477 force_nonfallthru_and_redirect (edge e, basic_block target, rtx jump_label)
1478 {
1479 basic_block jump_block, new_bb = NULL, src = e->src;
1480 rtx note;
1481 edge new_edge;
1482 int abnormal_edge_flags = 0;
1483 bool asm_goto_edge = false;
1484 int loc;
1485
1486 /* In the case the last instruction is conditional jump to the next
1487 instruction, first redirect the jump itself and then continue
1488 by creating a basic block afterwards to redirect fallthru edge. */
1489 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
1490 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
1491 && any_condjump_p (BB_END (e->src))
1492 && JUMP_LABEL (BB_END (e->src)) == BB_HEAD (e->dest))
1493 {
1494 rtx note;
1495 edge b = unchecked_make_edge (e->src, target, 0);
1496 bool redirected;
1497
1498 redirected = redirect_jump (as_a <rtx_jump_insn *> (BB_END (e->src)),
1499 block_label (target), 0);
1500 gcc_assert (redirected);
1501
1502 note = find_reg_note (BB_END (e->src), REG_BR_PROB, NULL_RTX);
1503 if (note)
1504 {
1505 int prob = XINT (note, 0);
1506
1507 b->probability = prob;
1508 /* Update this to use GCOV_COMPUTE_SCALE. */
1509 b->count = e->count * prob / REG_BR_PROB_BASE;
1510 e->probability -= e->probability;
1511 e->count -= b->count;
1512 if (e->probability < 0)
1513 e->probability = 0;
1514 if (e->count < 0)
1515 e->count = 0;
1516 }
1517 }
1518
1519 if (e->flags & EDGE_ABNORMAL)
1520 {
1521 /* Irritating special case - fallthru edge to the same block as abnormal
1522 edge.
1523 We can't redirect abnormal edge, but we still can split the fallthru
1524 one and create separate abnormal edge to original destination.
1525 This allows bb-reorder to make such edge non-fallthru. */
1526 gcc_assert (e->dest == target);
1527 abnormal_edge_flags = e->flags & ~EDGE_FALLTHRU;
1528 e->flags &= EDGE_FALLTHRU;
1529 }
1530 else
1531 {
1532 gcc_assert (e->flags & EDGE_FALLTHRU);
1533 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1534 {
1535 /* We can't redirect the entry block. Create an empty block
1536 at the start of the function which we use to add the new
1537 jump. */
1538 edge tmp;
1539 edge_iterator ei;
1540 bool found = false;
1541
1542 basic_block bb = create_basic_block (BB_HEAD (e->dest), NULL,
1543 ENTRY_BLOCK_PTR_FOR_FN (cfun));
1544
1545 /* Change the existing edge's source to be the new block, and add
1546 a new edge from the entry block to the new block. */
1547 e->src = bb;
1548 for (ei = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
1549 (tmp = ei_safe_edge (ei)); )
1550 {
1551 if (tmp == e)
1552 {
1553 ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs->unordered_remove (ei.index);
1554 found = true;
1555 break;
1556 }
1557 else
1558 ei_next (&ei);
1559 }
1560
1561 gcc_assert (found);
1562
1563 vec_safe_push (bb->succs, e);
1564 make_single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), bb,
1565 EDGE_FALLTHRU);
1566 }
1567 }
1568
1569 /* If e->src ends with asm goto, see if any of the ASM_OPERANDS_LABELs
1570 don't point to the target or fallthru label. */
1571 if (JUMP_P (BB_END (e->src))
1572 && target != EXIT_BLOCK_PTR_FOR_FN (cfun)
1573 && (e->flags & EDGE_FALLTHRU)
1574 && (note = extract_asm_operands (PATTERN (BB_END (e->src)))))
1575 {
1576 int i, n = ASM_OPERANDS_LABEL_LENGTH (note);
1577 bool adjust_jump_target = false;
1578
1579 for (i = 0; i < n; ++i)
1580 {
1581 if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (e->dest))
1582 {
1583 LABEL_NUSES (XEXP (ASM_OPERANDS_LABEL (note, i), 0))--;
1584 XEXP (ASM_OPERANDS_LABEL (note, i), 0) = block_label (target);
1585 LABEL_NUSES (XEXP (ASM_OPERANDS_LABEL (note, i), 0))++;
1586 adjust_jump_target = true;
1587 }
1588 if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (target))
1589 asm_goto_edge = true;
1590 }
1591 if (adjust_jump_target)
1592 {
1593 rtx_insn *insn = BB_END (e->src);
1594 rtx note;
1595 rtx_insn *old_label = BB_HEAD (e->dest);
1596 rtx_insn *new_label = BB_HEAD (target);
1597
1598 if (JUMP_LABEL (insn) == old_label)
1599 {
1600 JUMP_LABEL (insn) = new_label;
1601 note = find_reg_note (insn, REG_LABEL_TARGET, new_label);
1602 if (note)
1603 remove_note (insn, note);
1604 }
1605 else
1606 {
1607 note = find_reg_note (insn, REG_LABEL_TARGET, old_label);
1608 if (note)
1609 remove_note (insn, note);
1610 if (JUMP_LABEL (insn) != new_label
1611 && !find_reg_note (insn, REG_LABEL_TARGET, new_label))
1612 add_reg_note (insn, REG_LABEL_TARGET, new_label);
1613 }
1614 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, old_label))
1615 != NULL_RTX)
1616 XEXP (note, 0) = new_label;
1617 }
1618 }
1619
1620 if (EDGE_COUNT (e->src->succs) >= 2 || abnormal_edge_flags || asm_goto_edge)
1621 {
1622 rtx_insn *new_head;
1623 gcov_type count = e->count;
1624 int probability = e->probability;
1625 /* Create the new structures. */
1626
1627 /* If the old block ended with a tablejump, skip its table
1628 by searching forward from there. Otherwise start searching
1629 forward from the last instruction of the old block. */
1630 rtx_jump_table_data *table;
1631 if (tablejump_p (BB_END (e->src), NULL, &table))
1632 new_head = table;
1633 else
1634 new_head = BB_END (e->src);
1635 new_head = NEXT_INSN (new_head);
1636
1637 jump_block = create_basic_block (new_head, NULL, e->src);
1638 jump_block->count = count;
1639 jump_block->frequency = EDGE_FREQUENCY (e);
1640
1641 /* Make sure new block ends up in correct hot/cold section. */
1642
1643 BB_COPY_PARTITION (jump_block, e->src);
1644
1645 /* Wire edge in. */
1646 new_edge = make_edge (e->src, jump_block, EDGE_FALLTHRU);
1647 new_edge->probability = probability;
1648 new_edge->count = count;
1649
1650 /* Redirect old edge. */
1651 redirect_edge_pred (e, jump_block);
1652 e->probability = REG_BR_PROB_BASE;
1653
1654 /* If e->src was previously region crossing, it no longer is
1655 and the reg crossing note should be removed. */
1656 fixup_partition_crossing (new_edge);
1657
1658 /* If asm goto has any label refs to target's label,
1659 add also edge from asm goto bb to target. */
1660 if (asm_goto_edge)
1661 {
1662 new_edge->probability /= 2;
1663 new_edge->count /= 2;
1664 jump_block->count /= 2;
1665 jump_block->frequency /= 2;
1666 new_edge = make_edge (new_edge->src, target,
1667 e->flags & ~EDGE_FALLTHRU);
1668 new_edge->probability = probability - probability / 2;
1669 new_edge->count = count - count / 2;
1670 }
1671
1672 new_bb = jump_block;
1673 }
1674 else
1675 jump_block = e->src;
1676
1677 loc = e->goto_locus;
1678 e->flags &= ~EDGE_FALLTHRU;
1679 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
1680 {
1681 if (jump_label == ret_rtx)
1682 emit_jump_insn_after_setloc (targetm.gen_return (),
1683 BB_END (jump_block), loc);
1684 else
1685 {
1686 gcc_assert (jump_label == simple_return_rtx);
1687 emit_jump_insn_after_setloc (targetm.gen_simple_return (),
1688 BB_END (jump_block), loc);
1689 }
1690 set_return_jump_label (BB_END (jump_block));
1691 }
1692 else
1693 {
1694 rtx_code_label *label = block_label (target);
1695 emit_jump_insn_after_setloc (targetm.gen_jump (label),
1696 BB_END (jump_block), loc);
1697 JUMP_LABEL (BB_END (jump_block)) = label;
1698 LABEL_NUSES (label)++;
1699 }
1700
1701 /* We might be in cfg layout mode, and if so, the following routine will
1702 insert the barrier correctly. */
1703 emit_barrier_after_bb (jump_block);
1704 redirect_edge_succ_nodup (e, target);
1705
1706 if (abnormal_edge_flags)
1707 make_edge (src, target, abnormal_edge_flags);
1708
1709 df_mark_solutions_dirty ();
1710 fixup_partition_crossing (e);
1711 return new_bb;
1712 }
1713
1714 /* Edge E is assumed to be fallthru edge. Emit needed jump instruction
1715 (and possibly create new basic block) to make edge non-fallthru.
1716 Return newly created BB or NULL if none. */
1717
1718 static basic_block
1719 rtl_force_nonfallthru (edge e)
1720 {
1721 return force_nonfallthru_and_redirect (e, e->dest, NULL_RTX);
1722 }
1723
1724 /* Redirect edge even at the expense of creating new jump insn or
1725 basic block. Return new basic block if created, NULL otherwise.
1726 Conversion must be possible. */
1727
1728 static basic_block
1729 rtl_redirect_edge_and_branch_force (edge e, basic_block target)
1730 {
1731 if (redirect_edge_and_branch (e, target)
1732 || e->dest == target)
1733 return NULL;
1734
1735 /* In case the edge redirection failed, try to force it to be non-fallthru
1736 and redirect newly created simplejump. */
1737 df_set_bb_dirty (e->src);
1738 return force_nonfallthru_and_redirect (e, target, NULL_RTX);
1739 }
1740
1741 /* The given edge should potentially be a fallthru edge. If that is in
1742 fact true, delete the jump and barriers that are in the way. */
1743
1744 static void
1745 rtl_tidy_fallthru_edge (edge e)
1746 {
1747 rtx_insn *q;
1748 basic_block b = e->src, c = b->next_bb;
1749
1750 /* ??? In a late-running flow pass, other folks may have deleted basic
1751 blocks by nopping out blocks, leaving multiple BARRIERs between here
1752 and the target label. They ought to be chastised and fixed.
1753
1754 We can also wind up with a sequence of undeletable labels between
1755 one block and the next.
1756
1757 So search through a sequence of barriers, labels, and notes for
1758 the head of block C and assert that we really do fall through. */
1759
1760 for (q = NEXT_INSN (BB_END (b)); q != BB_HEAD (c); q = NEXT_INSN (q))
1761 if (INSN_P (q))
1762 return;
1763
1764 /* Remove what will soon cease being the jump insn from the source block.
1765 If block B consisted only of this single jump, turn it into a deleted
1766 note. */
1767 q = BB_END (b);
1768 if (JUMP_P (q)
1769 && onlyjump_p (q)
1770 && (any_uncondjump_p (q)
1771 || single_succ_p (b)))
1772 {
1773 rtx_insn *label;
1774 rtx_jump_table_data *table;
1775
1776 if (tablejump_p (q, &label, &table))
1777 {
1778 /* The label is likely mentioned in some instruction before
1779 the tablejump and might not be DCEd, so turn it into
1780 a note instead and move before the tablejump that is going to
1781 be deleted. */
1782 const char *name = LABEL_NAME (label);
1783 PUT_CODE (label, NOTE);
1784 NOTE_KIND (label) = NOTE_INSN_DELETED_LABEL;
1785 NOTE_DELETED_LABEL_NAME (label) = name;
1786 reorder_insns (label, label, PREV_INSN (q));
1787 delete_insn (table);
1788 }
1789
1790 /* If this was a conditional jump, we need to also delete
1791 the insn that set cc0. */
1792 if (HAVE_cc0 && any_condjump_p (q) && only_sets_cc0_p (PREV_INSN (q)))
1793 q = PREV_INSN (q);
1794
1795 q = PREV_INSN (q);
1796 }
1797 /* Unconditional jumps with side-effects (i.e. which we can't just delete
1798 together with the barrier) should never have a fallthru edge. */
1799 else if (JUMP_P (q) && any_uncondjump_p (q))
1800 return;
1801
1802 /* Selectively unlink the sequence. */
1803 if (q != PREV_INSN (BB_HEAD (c)))
1804 delete_insn_chain (NEXT_INSN (q), PREV_INSN (BB_HEAD (c)), false);
1805
1806 e->flags |= EDGE_FALLTHRU;
1807 }
1808 \f
1809 /* Should move basic block BB after basic block AFTER. NIY. */
1810
1811 static bool
1812 rtl_move_block_after (basic_block bb ATTRIBUTE_UNUSED,
1813 basic_block after ATTRIBUTE_UNUSED)
1814 {
1815 return false;
1816 }
1817
1818 /* Locate the last bb in the same partition as START_BB. */
1819
1820 static basic_block
1821 last_bb_in_partition (basic_block start_bb)
1822 {
1823 basic_block bb;
1824 FOR_BB_BETWEEN (bb, start_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
1825 {
1826 if (BB_PARTITION (start_bb) != BB_PARTITION (bb->next_bb))
1827 return bb;
1828 }
1829 /* Return bb before the exit block. */
1830 return bb->prev_bb;
1831 }
1832
1833 /* Split a (typically critical) edge. Return the new block.
1834 The edge must not be abnormal.
1835
1836 ??? The code generally expects to be called on critical edges.
1837 The case of a block ending in an unconditional jump to a
1838 block with multiple predecessors is not handled optimally. */
1839
1840 static basic_block
1841 rtl_split_edge (edge edge_in)
1842 {
1843 basic_block bb, new_bb;
1844 rtx_insn *before;
1845
1846 /* Abnormal edges cannot be split. */
1847 gcc_assert (!(edge_in->flags & EDGE_ABNORMAL));
1848
1849 /* We are going to place the new block in front of edge destination.
1850 Avoid existence of fallthru predecessors. */
1851 if ((edge_in->flags & EDGE_FALLTHRU) == 0)
1852 {
1853 edge e = find_fallthru_edge (edge_in->dest->preds);
1854
1855 if (e)
1856 force_nonfallthru (e);
1857 }
1858
1859 /* Create the basic block note. */
1860 if (edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1861 before = BB_HEAD (edge_in->dest);
1862 else
1863 before = NULL;
1864
1865 /* If this is a fall through edge to the exit block, the blocks might be
1866 not adjacent, and the right place is after the source. */
1867 if ((edge_in->flags & EDGE_FALLTHRU)
1868 && edge_in->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
1869 {
1870 before = NEXT_INSN (BB_END (edge_in->src));
1871 bb = create_basic_block (before, NULL, edge_in->src);
1872 BB_COPY_PARTITION (bb, edge_in->src);
1873 }
1874 else
1875 {
1876 if (edge_in->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1877 {
1878 bb = create_basic_block (before, NULL, edge_in->dest->prev_bb);
1879 BB_COPY_PARTITION (bb, edge_in->dest);
1880 }
1881 else
1882 {
1883 basic_block after = edge_in->dest->prev_bb;
1884 /* If this is post-bb reordering, and the edge crosses a partition
1885 boundary, the new block needs to be inserted in the bb chain
1886 at the end of the src partition (since we put the new bb into
1887 that partition, see below). Otherwise we may end up creating
1888 an extra partition crossing in the chain, which is illegal.
1889 It can't go after the src, because src may have a fall-through
1890 to a different block. */
1891 if (crtl->bb_reorder_complete
1892 && (edge_in->flags & EDGE_CROSSING))
1893 {
1894 after = last_bb_in_partition (edge_in->src);
1895 before = get_last_bb_insn (after);
1896 /* The instruction following the last bb in partition should
1897 be a barrier, since it cannot end in a fall-through. */
1898 gcc_checking_assert (BARRIER_P (before));
1899 before = NEXT_INSN (before);
1900 }
1901 bb = create_basic_block (before, NULL, after);
1902 /* Put the split bb into the src partition, to avoid creating
1903 a situation where a cold bb dominates a hot bb, in the case
1904 where src is cold and dest is hot. The src will dominate
1905 the new bb (whereas it might not have dominated dest). */
1906 BB_COPY_PARTITION (bb, edge_in->src);
1907 }
1908 }
1909
1910 make_single_succ_edge (bb, edge_in->dest, EDGE_FALLTHRU);
1911
1912 /* Can't allow a region crossing edge to be fallthrough. */
1913 if (BB_PARTITION (bb) != BB_PARTITION (edge_in->dest)
1914 && edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1915 {
1916 new_bb = force_nonfallthru (single_succ_edge (bb));
1917 gcc_assert (!new_bb);
1918 }
1919
1920 /* For non-fallthru edges, we must adjust the predecessor's
1921 jump instruction to target our new block. */
1922 if ((edge_in->flags & EDGE_FALLTHRU) == 0)
1923 {
1924 edge redirected = redirect_edge_and_branch (edge_in, bb);
1925 gcc_assert (redirected);
1926 }
1927 else
1928 {
1929 if (edge_in->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
1930 {
1931 /* For asm goto even splitting of fallthru edge might
1932 need insn patching, as other labels might point to the
1933 old label. */
1934 rtx_insn *last = BB_END (edge_in->src);
1935 if (last
1936 && JUMP_P (last)
1937 && edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
1938 && extract_asm_operands (PATTERN (last)) != NULL_RTX
1939 && patch_jump_insn (last, before, bb))
1940 df_set_bb_dirty (edge_in->src);
1941 }
1942 redirect_edge_succ (edge_in, bb);
1943 }
1944
1945 return bb;
1946 }
1947
1948 /* Queue instructions for insertion on an edge between two basic blocks.
1949 The new instructions and basic blocks (if any) will not appear in the
1950 CFG until commit_edge_insertions is called. */
1951
1952 void
1953 insert_insn_on_edge (rtx pattern, edge e)
1954 {
1955 /* We cannot insert instructions on an abnormal critical edge.
1956 It will be easier to find the culprit if we die now. */
1957 gcc_assert (!((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e)));
1958
1959 if (e->insns.r == NULL_RTX)
1960 start_sequence ();
1961 else
1962 push_to_sequence (e->insns.r);
1963
1964 emit_insn (pattern);
1965
1966 e->insns.r = get_insns ();
1967 end_sequence ();
1968 }
1969
1970 /* Update the CFG for the instructions queued on edge E. */
1971
1972 void
1973 commit_one_edge_insertion (edge e)
1974 {
1975 rtx_insn *before = NULL, *after = NULL, *insns, *tmp, *last;
1976 basic_block bb;
1977
1978 /* Pull the insns off the edge now since the edge might go away. */
1979 insns = e->insns.r;
1980 e->insns.r = NULL;
1981
1982 /* Figure out where to put these insns. If the destination has
1983 one predecessor, insert there. Except for the exit block. */
1984 if (single_pred_p (e->dest) && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1985 {
1986 bb = e->dest;
1987
1988 /* Get the location correct wrt a code label, and "nice" wrt
1989 a basic block note, and before everything else. */
1990 tmp = BB_HEAD (bb);
1991 if (LABEL_P (tmp))
1992 tmp = NEXT_INSN (tmp);
1993 if (NOTE_INSN_BASIC_BLOCK_P (tmp))
1994 tmp = NEXT_INSN (tmp);
1995 if (tmp == BB_HEAD (bb))
1996 before = tmp;
1997 else if (tmp)
1998 after = PREV_INSN (tmp);
1999 else
2000 after = get_last_insn ();
2001 }
2002
2003 /* If the source has one successor and the edge is not abnormal,
2004 insert there. Except for the entry block.
2005 Don't do this if the predecessor ends in a jump other than
2006 unconditional simple jump. E.g. for asm goto that points all
2007 its labels at the fallthru basic block, we can't insert instructions
2008 before the asm goto, as the asm goto can have various of side effects,
2009 and can't emit instructions after the asm goto, as it must end
2010 the basic block. */
2011 else if ((e->flags & EDGE_ABNORMAL) == 0
2012 && single_succ_p (e->src)
2013 && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
2014 && (!JUMP_P (BB_END (e->src))
2015 || simplejump_p (BB_END (e->src))))
2016 {
2017 bb = e->src;
2018
2019 /* It is possible to have a non-simple jump here. Consider a target
2020 where some forms of unconditional jumps clobber a register. This
2021 happens on the fr30 for example.
2022
2023 We know this block has a single successor, so we can just emit
2024 the queued insns before the jump. */
2025 if (JUMP_P (BB_END (bb)))
2026 before = BB_END (bb);
2027 else
2028 {
2029 /* We'd better be fallthru, or we've lost track of what's what. */
2030 gcc_assert (e->flags & EDGE_FALLTHRU);
2031
2032 after = BB_END (bb);
2033 }
2034 }
2035
2036 /* Otherwise we must split the edge. */
2037 else
2038 {
2039 bb = split_edge (e);
2040
2041 /* If E crossed a partition boundary, we needed to make bb end in
2042 a region-crossing jump, even though it was originally fallthru. */
2043 if (JUMP_P (BB_END (bb)))
2044 before = BB_END (bb);
2045 else
2046 after = BB_END (bb);
2047 }
2048
2049 /* Now that we've found the spot, do the insertion. */
2050 if (before)
2051 {
2052 emit_insn_before_noloc (insns, before, bb);
2053 last = prev_nonnote_insn (before);
2054 }
2055 else
2056 last = emit_insn_after_noloc (insns, after, bb);
2057
2058 if (returnjump_p (last))
2059 {
2060 /* ??? Remove all outgoing edges from BB and add one for EXIT.
2061 This is not currently a problem because this only happens
2062 for the (single) epilogue, which already has a fallthru edge
2063 to EXIT. */
2064
2065 e = single_succ_edge (bb);
2066 gcc_assert (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
2067 && single_succ_p (bb) && (e->flags & EDGE_FALLTHRU));
2068
2069 e->flags &= ~EDGE_FALLTHRU;
2070 emit_barrier_after (last);
2071
2072 if (before)
2073 delete_insn (before);
2074 }
2075 else
2076 gcc_assert (!JUMP_P (last));
2077 }
2078
2079 /* Update the CFG for all queued instructions. */
2080
2081 void
2082 commit_edge_insertions (void)
2083 {
2084 basic_block bb;
2085
2086 /* Optimization passes that invoke this routine can cause hot blocks
2087 previously reached by both hot and cold blocks to become dominated only
2088 by cold blocks. This will cause the verification below to fail,
2089 and lead to now cold code in the hot section. In some cases this
2090 may only be visible after newly unreachable blocks are deleted,
2091 which will be done by fixup_partitions. */
2092 fixup_partitions ();
2093
2094 checking_verify_flow_info ();
2095
2096 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
2097 EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
2098 {
2099 edge e;
2100 edge_iterator ei;
2101
2102 FOR_EACH_EDGE (e, ei, bb->succs)
2103 if (e->insns.r)
2104 commit_one_edge_insertion (e);
2105 }
2106 }
2107 \f
2108
2109 /* Print out RTL-specific basic block information (live information
2110 at start and end with TDF_DETAILS). FLAGS are the TDF_* masks
2111 documented in dumpfile.h. */
2112
2113 static void
2114 rtl_dump_bb (FILE *outf, basic_block bb, int indent, int flags)
2115 {
2116 rtx_insn *insn;
2117 rtx_insn *last;
2118 char *s_indent;
2119
2120 s_indent = (char *) alloca ((size_t) indent + 1);
2121 memset (s_indent, ' ', (size_t) indent);
2122 s_indent[indent] = '\0';
2123
2124 if (df && (flags & TDF_DETAILS))
2125 {
2126 df_dump_top (bb, outf);
2127 putc ('\n', outf);
2128 }
2129
2130 if (bb->index != ENTRY_BLOCK && bb->index != EXIT_BLOCK)
2131 for (insn = BB_HEAD (bb), last = NEXT_INSN (BB_END (bb)); insn != last;
2132 insn = NEXT_INSN (insn))
2133 {
2134 if (flags & TDF_DETAILS)
2135 df_dump_insn_top (insn, outf);
2136 if (! (flags & TDF_SLIM))
2137 print_rtl_single (outf, insn);
2138 else
2139 dump_insn_slim (outf, insn);
2140 if (flags & TDF_DETAILS)
2141 df_dump_insn_bottom (insn, outf);
2142 }
2143
2144 if (df && (flags & TDF_DETAILS))
2145 {
2146 df_dump_bottom (bb, outf);
2147 putc ('\n', outf);
2148 }
2149
2150 }
2151 \f
2152 /* Like dump_function_to_file, but for RTL. Print out dataflow information
2153 for the start of each basic block. FLAGS are the TDF_* masks documented
2154 in dumpfile.h. */
2155
2156 void
2157 print_rtl_with_bb (FILE *outf, const rtx_insn *rtx_first, int flags)
2158 {
2159 const rtx_insn *tmp_rtx;
2160 if (rtx_first == 0)
2161 fprintf (outf, "(nil)\n");
2162 else
2163 {
2164 enum bb_state { NOT_IN_BB, IN_ONE_BB, IN_MULTIPLE_BB };
2165 int max_uid = get_max_uid ();
2166 basic_block *start = XCNEWVEC (basic_block, max_uid);
2167 basic_block *end = XCNEWVEC (basic_block, max_uid);
2168 enum bb_state *in_bb_p = XCNEWVEC (enum bb_state, max_uid);
2169 basic_block bb;
2170
2171 /* After freeing the CFG, we still have BLOCK_FOR_INSN set on most
2172 insns, but the CFG is not maintained so the basic block info
2173 is not reliable. Therefore it's omitted from the dumps. */
2174 if (! (cfun->curr_properties & PROP_cfg))
2175 flags &= ~TDF_BLOCKS;
2176
2177 if (df)
2178 df_dump_start (outf);
2179
2180 if (flags & TDF_BLOCKS)
2181 {
2182 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2183 {
2184 rtx_insn *x;
2185
2186 start[INSN_UID (BB_HEAD (bb))] = bb;
2187 end[INSN_UID (BB_END (bb))] = bb;
2188 for (x = BB_HEAD (bb); x != NULL_RTX; x = NEXT_INSN (x))
2189 {
2190 enum bb_state state = IN_MULTIPLE_BB;
2191
2192 if (in_bb_p[INSN_UID (x)] == NOT_IN_BB)
2193 state = IN_ONE_BB;
2194 in_bb_p[INSN_UID (x)] = state;
2195
2196 if (x == BB_END (bb))
2197 break;
2198 }
2199 }
2200 }
2201
2202 for (tmp_rtx = rtx_first; NULL != tmp_rtx; tmp_rtx = NEXT_INSN (tmp_rtx))
2203 {
2204 if (flags & TDF_BLOCKS)
2205 {
2206 bb = start[INSN_UID (tmp_rtx)];
2207 if (bb != NULL)
2208 {
2209 dump_bb_info (outf, bb, 0, dump_flags | TDF_COMMENT, true, false);
2210 if (df && (flags & TDF_DETAILS))
2211 df_dump_top (bb, outf);
2212 }
2213
2214 if (in_bb_p[INSN_UID (tmp_rtx)] == NOT_IN_BB
2215 && !NOTE_P (tmp_rtx)
2216 && !BARRIER_P (tmp_rtx))
2217 fprintf (outf, ";; Insn is not within a basic block\n");
2218 else if (in_bb_p[INSN_UID (tmp_rtx)] == IN_MULTIPLE_BB)
2219 fprintf (outf, ";; Insn is in multiple basic blocks\n");
2220 }
2221
2222 if (flags & TDF_DETAILS)
2223 df_dump_insn_top (tmp_rtx, outf);
2224 if (! (flags & TDF_SLIM))
2225 print_rtl_single (outf, tmp_rtx);
2226 else
2227 dump_insn_slim (outf, tmp_rtx);
2228 if (flags & TDF_DETAILS)
2229 df_dump_insn_bottom (tmp_rtx, outf);
2230
2231 if (flags & TDF_BLOCKS)
2232 {
2233 bb = end[INSN_UID (tmp_rtx)];
2234 if (bb != NULL)
2235 {
2236 dump_bb_info (outf, bb, 0, dump_flags | TDF_COMMENT, false, true);
2237 if (df && (flags & TDF_DETAILS))
2238 df_dump_bottom (bb, outf);
2239 putc ('\n', outf);
2240 }
2241 }
2242 }
2243
2244 free (start);
2245 free (end);
2246 free (in_bb_p);
2247 }
2248 }
2249 \f
2250 /* Update the branch probability of BB if a REG_BR_PROB is present. */
2251
2252 void
2253 update_br_prob_note (basic_block bb)
2254 {
2255 rtx note;
2256 if (!JUMP_P (BB_END (bb)))
2257 return;
2258 note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX);
2259 if (!note || XINT (note, 0) == BRANCH_EDGE (bb)->probability)
2260 return;
2261 XINT (note, 0) = BRANCH_EDGE (bb)->probability;
2262 }
2263
2264 /* Get the last insn associated with block BB (that includes barriers and
2265 tablejumps after BB). */
2266 rtx_insn *
2267 get_last_bb_insn (basic_block bb)
2268 {
2269 rtx_jump_table_data *table;
2270 rtx_insn *tmp;
2271 rtx_insn *end = BB_END (bb);
2272
2273 /* Include any jump table following the basic block. */
2274 if (tablejump_p (end, NULL, &table))
2275 end = table;
2276
2277 /* Include any barriers that may follow the basic block. */
2278 tmp = next_nonnote_insn_bb (end);
2279 while (tmp && BARRIER_P (tmp))
2280 {
2281 end = tmp;
2282 tmp = next_nonnote_insn_bb (end);
2283 }
2284
2285 return end;
2286 }
2287
2288 /* Sanity check partition hotness to ensure that basic blocks in
2289   the cold partition don't dominate basic blocks in the hot partition.
2290 If FLAG_ONLY is true, report violations as errors. Otherwise
2291 re-mark the dominated blocks as cold, since this is run after
2292 cfg optimizations that may make hot blocks previously reached
2293 by both hot and cold blocks now only reachable along cold paths. */
2294
2295 static vec<basic_block>
2296 find_partition_fixes (bool flag_only)
2297 {
2298 basic_block bb;
2299 vec<basic_block> bbs_in_cold_partition = vNULL;
2300 vec<basic_block> bbs_to_fix = vNULL;
2301
2302 /* Callers check this. */
2303 gcc_checking_assert (crtl->has_bb_partition);
2304
2305 FOR_EACH_BB_FN (bb, cfun)
2306 if ((BB_PARTITION (bb) == BB_COLD_PARTITION))
2307 bbs_in_cold_partition.safe_push (bb);
2308
2309 if (bbs_in_cold_partition.is_empty ())
2310 return vNULL;
2311
2312 bool dom_calculated_here = !dom_info_available_p (CDI_DOMINATORS);
2313
2314 if (dom_calculated_here)
2315 calculate_dominance_info (CDI_DOMINATORS);
2316
2317 while (! bbs_in_cold_partition.is_empty ())
2318 {
2319 bb = bbs_in_cold_partition.pop ();
2320 /* Any blocks dominated by a block in the cold section
2321 must also be cold. */
2322 basic_block son;
2323 for (son = first_dom_son (CDI_DOMINATORS, bb);
2324 son;
2325 son = next_dom_son (CDI_DOMINATORS, son))
2326 {
2327 /* If son is not yet cold, then mark it cold here and
2328 enqueue it for further processing. */
2329 if ((BB_PARTITION (son) != BB_COLD_PARTITION))
2330 {
2331 if (flag_only)
2332 error ("non-cold basic block %d dominated "
2333 "by a block in the cold partition (%d)", son->index, bb->index);
2334 else
2335 BB_SET_PARTITION (son, BB_COLD_PARTITION);
2336 bbs_to_fix.safe_push (son);
2337 bbs_in_cold_partition.safe_push (son);
2338 }
2339 }
2340 }
2341
2342 if (dom_calculated_here)
2343 free_dominance_info (CDI_DOMINATORS);
2344
2345 return bbs_to_fix;
2346 }
2347
2348 /* Perform cleanup on the hot/cold bb partitioning after optimization
2349 passes that modify the cfg. */
2350
2351 void
2352 fixup_partitions (void)
2353 {
2354 basic_block bb;
2355
2356 if (!crtl->has_bb_partition)
2357 return;
2358
2359 /* Delete any blocks that became unreachable and weren't
2360 already cleaned up, for example during edge forwarding
2361 and convert_jumps_to_returns. This will expose more
2362 opportunities for fixing the partition boundaries here.
2363 Also, the calculation of the dominance graph during verification
2364 will assert if there are unreachable nodes. */
2365 delete_unreachable_blocks ();
2366
2367 /* If there are partitions, do a sanity check on them: A basic block in
2368   a cold partition cannot dominate a basic block in a hot partition.
2369 Fixup any that now violate this requirement, as a result of edge
2370 forwarding and unreachable block deletion.  */
2371 vec<basic_block> bbs_to_fix = find_partition_fixes (false);
2372
2373 /* Do the partition fixup after all necessary blocks have been converted to
2374 cold, so that we only update the region crossings the minimum number of
2375 places, which can require forcing edges to be non fallthru. */
2376 while (! bbs_to_fix.is_empty ())
2377 {
2378 bb = bbs_to_fix.pop ();
2379 fixup_new_cold_bb (bb);
2380 }
2381 }
2382
2383 /* Verify, in the basic block chain, that there is at most one switch
2384 between hot/cold partitions. This condition will not be true until
2385 after reorder_basic_blocks is called. */
2386
2387 static int
2388 verify_hot_cold_block_grouping (void)
2389 {
2390 basic_block bb;
2391 int err = 0;
2392 bool switched_sections = false;
2393 int current_partition = BB_UNPARTITIONED;
2394
2395 /* Even after bb reordering is complete, we go into cfglayout mode
2396 again (in compgoto). Ensure we don't call this before going back
2397 into linearized RTL when any layout fixes would have been committed. */
2398 if (!crtl->bb_reorder_complete
2399 || current_ir_type () != IR_RTL_CFGRTL)
2400 return err;
2401
2402 FOR_EACH_BB_FN (bb, cfun)
2403 {
2404 if (current_partition != BB_UNPARTITIONED
2405 && BB_PARTITION (bb) != current_partition)
2406 {
2407 if (switched_sections)
2408 {
2409 error ("multiple hot/cold transitions found (bb %i)",
2410 bb->index);
2411 err = 1;
2412 }
2413 else
2414 switched_sections = true;
2415
2416 if (!crtl->has_bb_partition)
2417 error ("partition found but function partition flag not set");
2418 }
2419 current_partition = BB_PARTITION (bb);
2420 }
2421
2422 return err;
2423 }
2424 \f
2425
2426 /* Perform several checks on the edges out of each block, such as
2427 the consistency of the branch probabilities, the correctness
2428 of hot/cold partition crossing edges, and the number of expected
2429 successor edges. Also verify that the dominance relationship
2430 between hot/cold blocks is sane. */
2431
2432 static int
2433 rtl_verify_edges (void)
2434 {
2435 int err = 0;
2436 basic_block bb;
2437
2438 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2439 {
2440 int n_fallthru = 0, n_branch = 0, n_abnormal_call = 0, n_sibcall = 0;
2441 int n_eh = 0, n_abnormal = 0;
2442 edge e, fallthru = NULL;
2443 edge_iterator ei;
2444 rtx note;
2445 bool has_crossing_edge = false;
2446
2447 if (JUMP_P (BB_END (bb))
2448 && (note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX))
2449 && EDGE_COUNT (bb->succs) >= 2
2450 && any_condjump_p (BB_END (bb)))
2451 {
2452 if (XINT (note, 0) != BRANCH_EDGE (bb)->probability
2453 && profile_status_for_fn (cfun) != PROFILE_ABSENT)
2454 {
2455 error ("verify_flow_info: REG_BR_PROB does not match cfg %i %i",
2456 XINT (note, 0), BRANCH_EDGE (bb)->probability);
2457 err = 1;
2458 }
2459 }
2460
2461 FOR_EACH_EDGE (e, ei, bb->succs)
2462 {
2463 bool is_crossing;
2464
2465 if (e->flags & EDGE_FALLTHRU)
2466 n_fallthru++, fallthru = e;
2467
2468 is_crossing = (BB_PARTITION (e->src) != BB_PARTITION (e->dest)
2469 && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
2470 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun));
2471 has_crossing_edge |= is_crossing;
2472 if (e->flags & EDGE_CROSSING)
2473 {
2474 if (!is_crossing)
2475 {
2476 error ("EDGE_CROSSING incorrectly set across same section");
2477 err = 1;
2478 }
2479 if (e->flags & EDGE_FALLTHRU)
2480 {
2481 error ("fallthru edge crosses section boundary in bb %i",
2482 e->src->index);
2483 err = 1;
2484 }
2485 if (e->flags & EDGE_EH)
2486 {
2487 error ("EH edge crosses section boundary in bb %i",
2488 e->src->index);
2489 err = 1;
2490 }
2491 if (JUMP_P (BB_END (bb)) && !CROSSING_JUMP_P (BB_END (bb)))
2492 {
2493 error ("No region crossing jump at section boundary in bb %i",
2494 bb->index);
2495 err = 1;
2496 }
2497 }
2498 else if (is_crossing)
2499 {
2500 error ("EDGE_CROSSING missing across section boundary");
2501 err = 1;
2502 }
2503
2504 if ((e->flags & ~(EDGE_DFS_BACK
2505 | EDGE_CAN_FALLTHRU
2506 | EDGE_IRREDUCIBLE_LOOP
2507 | EDGE_LOOP_EXIT
2508 | EDGE_CROSSING
2509 | EDGE_PRESERVE)) == 0)
2510 n_branch++;
2511
2512 if (e->flags & EDGE_ABNORMAL_CALL)
2513 n_abnormal_call++;
2514
2515 if (e->flags & EDGE_SIBCALL)
2516 n_sibcall++;
2517
2518 if (e->flags & EDGE_EH)
2519 n_eh++;
2520
2521 if (e->flags & EDGE_ABNORMAL)
2522 n_abnormal++;
2523 }
2524
2525 if (!has_crossing_edge
2526 && JUMP_P (BB_END (bb))
2527 && CROSSING_JUMP_P (BB_END (bb)))
2528 {
2529 print_rtl_with_bb (stderr, get_insns (), TDF_RTL | TDF_BLOCKS | TDF_DETAILS);
2530 error ("Region crossing jump across same section in bb %i",
2531 bb->index);
2532 err = 1;
2533 }
2534
2535 if (n_eh && !find_reg_note (BB_END (bb), REG_EH_REGION, NULL_RTX))
2536 {
2537 error ("missing REG_EH_REGION note at the end of bb %i", bb->index);
2538 err = 1;
2539 }
2540 if (n_eh > 1)
2541 {
2542 error ("too many exception handling edges in bb %i", bb->index);
2543 err = 1;
2544 }
2545 if (n_branch
2546 && (!JUMP_P (BB_END (bb))
2547 || (n_branch > 1 && (any_uncondjump_p (BB_END (bb))
2548 || any_condjump_p (BB_END (bb))))))
2549 {
2550 error ("too many outgoing branch edges from bb %i", bb->index);
2551 err = 1;
2552 }
2553 if (n_fallthru && any_uncondjump_p (BB_END (bb)))
2554 {
2555 error ("fallthru edge after unconditional jump in bb %i", bb->index);
2556 err = 1;
2557 }
2558 if (n_branch != 1 && any_uncondjump_p (BB_END (bb)))
2559 {
2560 error ("wrong number of branch edges after unconditional jump"
2561 " in bb %i", bb->index);
2562 err = 1;
2563 }
2564 if (n_branch != 1 && any_condjump_p (BB_END (bb))
2565 && JUMP_LABEL (BB_END (bb)) != BB_HEAD (fallthru->dest))
2566 {
2567 error ("wrong amount of branch edges after conditional jump"
2568 " in bb %i", bb->index);
2569 err = 1;
2570 }
2571 if (n_abnormal_call && !CALL_P (BB_END (bb)))
2572 {
2573 error ("abnormal call edges for non-call insn in bb %i", bb->index);
2574 err = 1;
2575 }
2576 if (n_sibcall && !CALL_P (BB_END (bb)))
2577 {
2578 error ("sibcall edges for non-call insn in bb %i", bb->index);
2579 err = 1;
2580 }
2581 if (n_abnormal > n_eh
2582 && !(CALL_P (BB_END (bb))
2583 && n_abnormal == n_abnormal_call + n_sibcall)
2584 && (!JUMP_P (BB_END (bb))
2585 || any_condjump_p (BB_END (bb))
2586 || any_uncondjump_p (BB_END (bb))))
2587 {
2588 error ("abnormal edges for no purpose in bb %i", bb->index);
2589 err = 1;
2590 }
2591 }
2592
2593 /* If there are partitions, do a sanity check on them: A basic block in
2594   a cold partition cannot dominate a basic block in a hot partition.  */
2595 if (crtl->has_bb_partition && !err)
2596 {
2597 vec<basic_block> bbs_to_fix = find_partition_fixes (true);
2598 err = !bbs_to_fix.is_empty ();
2599 }
2600
2601 /* Clean up. */
2602 return err;
2603 }
2604
2605 /* Checks on the instructions within blocks. Currently checks that each
2606 block starts with a basic block note, and that basic block notes and
2607 control flow jumps are not found in the middle of the block. */
2608
2609 static int
2610 rtl_verify_bb_insns (void)
2611 {
2612 rtx_insn *x;
2613 int err = 0;
2614 basic_block bb;
2615
2616 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2617 {
2618 /* Now check the header of basic
2619 block. It ought to contain optional CODE_LABEL followed
2620 by NOTE_BASIC_BLOCK. */
2621 x = BB_HEAD (bb);
2622 if (LABEL_P (x))
2623 {
2624 if (BB_END (bb) == x)
2625 {
2626 error ("NOTE_INSN_BASIC_BLOCK is missing for block %d",
2627 bb->index);
2628 err = 1;
2629 }
2630
2631 x = NEXT_INSN (x);
2632 }
2633
2634 if (!NOTE_INSN_BASIC_BLOCK_P (x) || NOTE_BASIC_BLOCK (x) != bb)
2635 {
2636 error ("NOTE_INSN_BASIC_BLOCK is missing for block %d",
2637 bb->index);
2638 err = 1;
2639 }
2640
2641 if (BB_END (bb) == x)
2642 /* Do checks for empty blocks here. */
2643 ;
2644 else
2645 for (x = NEXT_INSN (x); x; x = NEXT_INSN (x))
2646 {
2647 if (NOTE_INSN_BASIC_BLOCK_P (x))
2648 {
2649 error ("NOTE_INSN_BASIC_BLOCK %d in middle of basic block %d",
2650 INSN_UID (x), bb->index);
2651 err = 1;
2652 }
2653
2654 if (x == BB_END (bb))
2655 break;
2656
2657 if (control_flow_insn_p (x))
2658 {
2659 error ("in basic block %d:", bb->index);
2660 fatal_insn ("flow control insn inside a basic block", x);
2661 }
2662 }
2663 }
2664
2665 /* Clean up. */
2666 return err;
2667 }
2668
2669 /* Verify that block pointers for instructions in basic blocks, headers and
2670 footers are set appropriately. */
2671
2672 static int
2673 rtl_verify_bb_pointers (void)
2674 {
2675 int err = 0;
2676 basic_block bb;
2677
2678 /* Check the general integrity of the basic blocks. */
2679 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2680 {
2681 rtx_insn *insn;
2682
2683 if (!(bb->flags & BB_RTL))
2684 {
2685 error ("BB_RTL flag not set for block %d", bb->index);
2686 err = 1;
2687 }
2688
2689 FOR_BB_INSNS (bb, insn)
2690 if (BLOCK_FOR_INSN (insn) != bb)
2691 {
2692 error ("insn %d basic block pointer is %d, should be %d",
2693 INSN_UID (insn),
2694 BLOCK_FOR_INSN (insn) ? BLOCK_FOR_INSN (insn)->index : 0,
2695 bb->index);
2696 err = 1;
2697 }
2698
2699 for (insn = BB_HEADER (bb); insn; insn = NEXT_INSN (insn))
2700 if (!BARRIER_P (insn)
2701 && BLOCK_FOR_INSN (insn) != NULL)
2702 {
2703 error ("insn %d in header of bb %d has non-NULL basic block",
2704 INSN_UID (insn), bb->index);
2705 err = 1;
2706 }
2707 for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2708 if (!BARRIER_P (insn)
2709 && BLOCK_FOR_INSN (insn) != NULL)
2710 {
2711 error ("insn %d in footer of bb %d has non-NULL basic block",
2712 INSN_UID (insn), bb->index);
2713 err = 1;
2714 }
2715 }
2716
2717 /* Clean up. */
2718 return err;
2719 }
2720
2721 /* Verify the CFG and RTL consistency common for both underlying RTL and
2722 cfglayout RTL.
2723
2724 Currently it does following checks:
2725
2726 - overlapping of basic blocks
2727 - insns with wrong BLOCK_FOR_INSN pointers
2728 - headers of basic blocks (the NOTE_INSN_BASIC_BLOCK note)
2729 - tails of basic blocks (ensure that boundary is necessary)
2730 - scans body of the basic block for JUMP_INSN, CODE_LABEL
2731 and NOTE_INSN_BASIC_BLOCK
2732 - verify that no fall_thru edge crosses hot/cold partition boundaries
2733 - verify that there are no pending RTL branch predictions
2734 - verify that hot blocks are not dominated by cold blocks
2735
2736 In future it can be extended check a lot of other stuff as well
2737 (reachability of basic blocks, life information, etc. etc.). */
2738
2739 static int
2740 rtl_verify_flow_info_1 (void)
2741 {
2742 int err = 0;
2743
2744 err |= rtl_verify_bb_pointers ();
2745
2746 err |= rtl_verify_bb_insns ();
2747
2748 err |= rtl_verify_edges ();
2749
2750 return err;
2751 }
2752
2753 /* Walk the instruction chain and verify that bb head/end pointers
2754 are correct, and that instructions are in exactly one bb and have
2755 correct block pointers. */
2756
2757 static int
2758 rtl_verify_bb_insn_chain (void)
2759 {
2760 basic_block bb;
2761 int err = 0;
2762 rtx_insn *x;
2763 rtx_insn *last_head = get_last_insn ();
2764 basic_block *bb_info;
2765 const int max_uid = get_max_uid ();
2766
2767 bb_info = XCNEWVEC (basic_block, max_uid);
2768
2769 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2770 {
2771 rtx_insn *head = BB_HEAD (bb);
2772 rtx_insn *end = BB_END (bb);
2773
2774 for (x = last_head; x != NULL_RTX; x = PREV_INSN (x))
2775 {
2776 /* Verify the end of the basic block is in the INSN chain. */
2777 if (x == end)
2778 break;
2779
2780 /* And that the code outside of basic blocks has NULL bb field. */
2781 if (!BARRIER_P (x)
2782 && BLOCK_FOR_INSN (x) != NULL)
2783 {
2784 error ("insn %d outside of basic blocks has non-NULL bb field",
2785 INSN_UID (x));
2786 err = 1;
2787 }
2788 }
2789
2790 if (!x)
2791 {
2792 error ("end insn %d for block %d not found in the insn stream",
2793 INSN_UID (end), bb->index);
2794 err = 1;
2795 }
2796
2797 /* Work backwards from the end to the head of the basic block
2798 to verify the head is in the RTL chain. */
2799 for (; x != NULL_RTX; x = PREV_INSN (x))
2800 {
2801 /* While walking over the insn chain, verify insns appear
2802 in only one basic block. */
2803 if (bb_info[INSN_UID (x)] != NULL)
2804 {
2805 error ("insn %d is in multiple basic blocks (%d and %d)",
2806 INSN_UID (x), bb->index, bb_info[INSN_UID (x)]->index);
2807 err = 1;
2808 }
2809
2810 bb_info[INSN_UID (x)] = bb;
2811
2812 if (x == head)
2813 break;
2814 }
2815 if (!x)
2816 {
2817 error ("head insn %d for block %d not found in the insn stream",
2818 INSN_UID (head), bb->index);
2819 err = 1;
2820 }
2821
2822 last_head = PREV_INSN (x);
2823 }
2824
2825 for (x = last_head; x != NULL_RTX; x = PREV_INSN (x))
2826 {
2827 /* Check that the code before the first basic block has NULL
2828 bb field. */
2829 if (!BARRIER_P (x)
2830 && BLOCK_FOR_INSN (x) != NULL)
2831 {
2832 error ("insn %d outside of basic blocks has non-NULL bb field",
2833 INSN_UID (x));
2834 err = 1;
2835 }
2836 }
2837 free (bb_info);
2838
2839 return err;
2840 }
2841
2842 /* Verify that fallthru edges point to adjacent blocks in layout order and
2843 that barriers exist after non-fallthru blocks. */
2844
2845 static int
2846 rtl_verify_fallthru (void)
2847 {
2848 basic_block bb;
2849 int err = 0;
2850
2851 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2852 {
2853 edge e;
2854
2855 e = find_fallthru_edge (bb->succs);
2856 if (!e)
2857 {
2858 rtx_insn *insn;
2859
2860 /* Ensure existence of barrier in BB with no fallthru edges. */
2861 for (insn = NEXT_INSN (BB_END (bb)); ; insn = NEXT_INSN (insn))
2862 {
2863 if (!insn || NOTE_INSN_BASIC_BLOCK_P (insn))
2864 {
2865 error ("missing barrier after block %i", bb->index);
2866 err = 1;
2867 break;
2868 }
2869 if (BARRIER_P (insn))
2870 break;
2871 }
2872 }
2873 else if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
2874 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
2875 {
2876 rtx_insn *insn;
2877
2878 if (e->src->next_bb != e->dest)
2879 {
2880 error
2881 ("verify_flow_info: Incorrect blocks for fallthru %i->%i",
2882 e->src->index, e->dest->index);
2883 err = 1;
2884 }
2885 else
2886 for (insn = NEXT_INSN (BB_END (e->src)); insn != BB_HEAD (e->dest);
2887 insn = NEXT_INSN (insn))
2888 if (BARRIER_P (insn) || INSN_P (insn))
2889 {
2890 error ("verify_flow_info: Incorrect fallthru %i->%i",
2891 e->src->index, e->dest->index);
2892 fatal_insn ("wrong insn in the fallthru edge", insn);
2893 err = 1;
2894 }
2895 }
2896 }
2897
2898 return err;
2899 }
2900
2901 /* Verify that blocks are laid out in consecutive order. While walking the
2902 instructions, verify that all expected instructions are inside the basic
2903 blocks, and that all returns are followed by barriers. */
2904
2905 static int
2906 rtl_verify_bb_layout (void)
2907 {
2908 basic_block bb;
2909 int err = 0;
2910 rtx_insn *x;
2911 int num_bb_notes;
2912 rtx_insn * const rtx_first = get_insns ();
2913 basic_block last_bb_seen = ENTRY_BLOCK_PTR_FOR_FN (cfun), curr_bb = NULL;
2914
2915 num_bb_notes = 0;
2916 last_bb_seen = ENTRY_BLOCK_PTR_FOR_FN (cfun);
2917
2918 for (x = rtx_first; x; x = NEXT_INSN (x))
2919 {
2920 if (NOTE_INSN_BASIC_BLOCK_P (x))
2921 {
2922 bb = NOTE_BASIC_BLOCK (x);
2923
2924 num_bb_notes++;
2925 if (bb != last_bb_seen->next_bb)
2926 internal_error ("basic blocks not laid down consecutively");
2927
2928 curr_bb = last_bb_seen = bb;
2929 }
2930
2931 if (!curr_bb)
2932 {
2933 switch (GET_CODE (x))
2934 {
2935 case BARRIER:
2936 case NOTE:
2937 break;
2938
2939 case CODE_LABEL:
2940 /* An ADDR_VEC is placed outside any basic block. */
2941 if (NEXT_INSN (x)
2942 && JUMP_TABLE_DATA_P (NEXT_INSN (x)))
2943 x = NEXT_INSN (x);
2944
2945 /* But in any case, non-deletable labels can appear anywhere. */
2946 break;
2947
2948 default:
2949 fatal_insn ("insn outside basic block", x);
2950 }
2951 }
2952
2953 if (JUMP_P (x)
2954 && returnjump_p (x) && ! condjump_p (x)
2955 && ! (next_nonnote_insn (x) && BARRIER_P (next_nonnote_insn (x))))
2956 fatal_insn ("return not followed by barrier", x);
2957
2958 if (curr_bb && x == BB_END (curr_bb))
2959 curr_bb = NULL;
2960 }
2961
2962 if (num_bb_notes != n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS)
2963 internal_error
2964 ("number of bb notes in insn chain (%d) != n_basic_blocks (%d)",
2965 num_bb_notes, n_basic_blocks_for_fn (cfun));
2966
2967 return err;
2968 }
2969
2970 /* Verify the CFG and RTL consistency common for both underlying RTL and
2971 cfglayout RTL, plus consistency checks specific to linearized RTL mode.
2972
2973 Currently it does following checks:
2974 - all checks of rtl_verify_flow_info_1
2975 - test head/end pointers
2976 - check that blocks are laid out in consecutive order
2977 - check that all insns are in the basic blocks
2978 (except the switch handling code, barriers and notes)
2979 - check that all returns are followed by barriers
2980 - check that all fallthru edge points to the adjacent blocks
2981 - verify that there is a single hot/cold partition boundary after bbro */
2982
2983 static int
2984 rtl_verify_flow_info (void)
2985 {
2986 int err = 0;
2987
2988 err |= rtl_verify_flow_info_1 ();
2989
2990 err |= rtl_verify_bb_insn_chain ();
2991
2992 err |= rtl_verify_fallthru ();
2993
2994 err |= rtl_verify_bb_layout ();
2995
2996 err |= verify_hot_cold_block_grouping ();
2997
2998 return err;
2999 }
3000 \f
3001 /* Assume that the preceding pass has possibly eliminated jump instructions
3002 or converted the unconditional jumps. Eliminate the edges from CFG.
3003 Return true if any edges are eliminated. */
3004
3005 bool
3006 purge_dead_edges (basic_block bb)
3007 {
3008 edge e;
3009 rtx_insn *insn = BB_END (bb);
3010 rtx note;
3011 bool purged = false;
3012 bool found;
3013 edge_iterator ei;
3014
3015 if (DEBUG_INSN_P (insn) && insn != BB_HEAD (bb))
3016 do
3017 insn = PREV_INSN (insn);
3018 while ((DEBUG_INSN_P (insn) || NOTE_P (insn)) && insn != BB_HEAD (bb));
3019
3020 /* If this instruction cannot trap, remove REG_EH_REGION notes. */
3021 if (NONJUMP_INSN_P (insn)
3022 && (note = find_reg_note (insn, REG_EH_REGION, NULL)))
3023 {
3024 rtx eqnote;
3025
3026 if (! may_trap_p (PATTERN (insn))
3027 || ((eqnote = find_reg_equal_equiv_note (insn))
3028 && ! may_trap_p (XEXP (eqnote, 0))))
3029 remove_note (insn, note);
3030 }
3031
3032 /* Cleanup abnormal edges caused by exceptions or non-local gotos. */
3033 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
3034 {
3035 bool remove = false;
3036
3037 /* There are three types of edges we need to handle correctly here: EH
3038 edges, abnormal call EH edges, and abnormal call non-EH edges. The
3039 latter can appear when nonlocal gotos are used. */
3040 if (e->flags & EDGE_ABNORMAL_CALL)
3041 {
3042 if (!CALL_P (insn))
3043 remove = true;
3044 else if (can_nonlocal_goto (insn))
3045 ;
3046 else if ((e->flags & EDGE_EH) && can_throw_internal (insn))
3047 ;
3048 else if (flag_tm && find_reg_note (insn, REG_TM, NULL))
3049 ;
3050 else
3051 remove = true;
3052 }
3053 else if (e->flags & EDGE_EH)
3054 remove = !can_throw_internal (insn);
3055
3056 if (remove)
3057 {
3058 remove_edge (e);
3059 df_set_bb_dirty (bb);
3060 purged = true;
3061 }
3062 else
3063 ei_next (&ei);
3064 }
3065
3066 if (JUMP_P (insn))
3067 {
3068 rtx note;
3069 edge b,f;
3070 edge_iterator ei;
3071
3072 /* We do care only about conditional jumps and simplejumps. */
3073 if (!any_condjump_p (insn)
3074 && !returnjump_p (insn)
3075 && !simplejump_p (insn))
3076 return purged;
3077
3078 /* Branch probability/prediction notes are defined only for
3079 condjumps. We've possibly turned condjump into simplejump. */
3080 if (simplejump_p (insn))
3081 {
3082 note = find_reg_note (insn, REG_BR_PROB, NULL);
3083 if (note)
3084 remove_note (insn, note);
3085 while ((note = find_reg_note (insn, REG_BR_PRED, NULL)))
3086 remove_note (insn, note);
3087 }
3088
3089 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
3090 {
3091 /* Avoid abnormal flags to leak from computed jumps turned
3092 into simplejumps. */
3093
3094 e->flags &= ~EDGE_ABNORMAL;
3095
3096 /* See if this edge is one we should keep. */
3097 if ((e->flags & EDGE_FALLTHRU) && any_condjump_p (insn))
3098 /* A conditional jump can fall through into the next
3099 block, so we should keep the edge. */
3100 {
3101 ei_next (&ei);
3102 continue;
3103 }
3104 else if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
3105 && BB_HEAD (e->dest) == JUMP_LABEL (insn))
3106 /* If the destination block is the target of the jump,
3107 keep the edge. */
3108 {
3109 ei_next (&ei);
3110 continue;
3111 }
3112 else if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
3113 && returnjump_p (insn))
3114 /* If the destination block is the exit block, and this
3115 instruction is a return, then keep the edge. */
3116 {
3117 ei_next (&ei);
3118 continue;
3119 }
3120 else if ((e->flags & EDGE_EH) && can_throw_internal (insn))
3121 /* Keep the edges that correspond to exceptions thrown by
3122 this instruction and rematerialize the EDGE_ABNORMAL
3123 flag we just cleared above. */
3124 {
3125 e->flags |= EDGE_ABNORMAL;
3126 ei_next (&ei);
3127 continue;
3128 }
3129
3130 /* We do not need this edge. */
3131 df_set_bb_dirty (bb);
3132 purged = true;
3133 remove_edge (e);
3134 }
3135
3136 if (EDGE_COUNT (bb->succs) == 0 || !purged)
3137 return purged;
3138
3139 if (dump_file)
3140 fprintf (dump_file, "Purged edges from bb %i\n", bb->index);
3141
3142 if (!optimize)
3143 return purged;
3144
3145 /* Redistribute probabilities. */
3146 if (single_succ_p (bb))
3147 {
3148 single_succ_edge (bb)->probability = REG_BR_PROB_BASE;
3149 single_succ_edge (bb)->count = bb->count;
3150 }
3151 else
3152 {
3153 note = find_reg_note (insn, REG_BR_PROB, NULL);
3154 if (!note)
3155 return purged;
3156
3157 b = BRANCH_EDGE (bb);
3158 f = FALLTHRU_EDGE (bb);
3159 b->probability = XINT (note, 0);
3160 f->probability = REG_BR_PROB_BASE - b->probability;
3161 /* Update these to use GCOV_COMPUTE_SCALE. */
3162 b->count = bb->count * b->probability / REG_BR_PROB_BASE;
3163 f->count = bb->count * f->probability / REG_BR_PROB_BASE;
3164 }
3165
3166 return purged;
3167 }
3168 else if (CALL_P (insn) && SIBLING_CALL_P (insn))
3169 {
3170 /* First, there should not be any EH or ABCALL edges resulting
3171 from non-local gotos and the like. If there were, we shouldn't
3172 have created the sibcall in the first place. Second, there
3173 should of course never have been a fallthru edge. */
3174 gcc_assert (single_succ_p (bb));
3175 gcc_assert (single_succ_edge (bb)->flags
3176 == (EDGE_SIBCALL | EDGE_ABNORMAL));
3177
3178 return 0;
3179 }
3180
3181 /* If we don't see a jump insn, we don't know exactly why the block would
3182 have been broken at this point. Look for a simple, non-fallthru edge,
3183 as these are only created by conditional branches. If we find such an
3184 edge we know that there used to be a jump here and can then safely
3185 remove all non-fallthru edges. */
3186 found = false;
3187 FOR_EACH_EDGE (e, ei, bb->succs)
3188 if (! (e->flags & (EDGE_COMPLEX | EDGE_FALLTHRU)))
3189 {
3190 found = true;
3191 break;
3192 }
3193
3194 if (!found)
3195 return purged;
3196
3197 /* Remove all but the fake and fallthru edges. The fake edge may be
3198 the only successor for this block in the case of noreturn
3199 calls. */
3200 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
3201 {
3202 if (!(e->flags & (EDGE_FALLTHRU | EDGE_FAKE)))
3203 {
3204 df_set_bb_dirty (bb);
3205 remove_edge (e);
3206 purged = true;
3207 }
3208 else
3209 ei_next (&ei);
3210 }
3211
3212 gcc_assert (single_succ_p (bb));
3213
3214 single_succ_edge (bb)->probability = REG_BR_PROB_BASE;
3215 single_succ_edge (bb)->count = bb->count;
3216
3217 if (dump_file)
3218 fprintf (dump_file, "Purged non-fallthru edges from bb %i\n",
3219 bb->index);
3220 return purged;
3221 }
3222
3223 /* Search all basic blocks for potentially dead edges and purge them. Return
3224 true if some edge has been eliminated. */
3225
3226 bool
3227 purge_all_dead_edges (void)
3228 {
3229 int purged = false;
3230 basic_block bb;
3231
3232 FOR_EACH_BB_FN (bb, cfun)
3233 {
3234 bool purged_here = purge_dead_edges (bb);
3235
3236 purged |= purged_here;
3237 }
3238
3239 return purged;
3240 }
3241
3242 /* This is used by a few passes that emit some instructions after abnormal
3243 calls, moving the basic block's end, while they in fact do want to emit
3244 them on the fallthru edge. Look for abnormal call edges, find backward
3245 the call in the block and insert the instructions on the edge instead.
3246
3247 Similarly, handle instructions throwing exceptions internally.
3248
3249 Return true when instructions have been found and inserted on edges. */
3250
3251 bool
3252 fixup_abnormal_edges (void)
3253 {
3254 bool inserted = false;
3255 basic_block bb;
3256
3257 FOR_EACH_BB_FN (bb, cfun)
3258 {
3259 edge e;
3260 edge_iterator ei;
3261
3262 /* Look for cases we are interested in - calls or instructions causing
3263 exceptions. */
3264 FOR_EACH_EDGE (e, ei, bb->succs)
3265 if ((e->flags & EDGE_ABNORMAL_CALL)
3266 || ((e->flags & (EDGE_ABNORMAL | EDGE_EH))
3267 == (EDGE_ABNORMAL | EDGE_EH)))
3268 break;
3269
3270 if (e && !CALL_P (BB_END (bb)) && !can_throw_internal (BB_END (bb)))
3271 {
3272 rtx_insn *insn;
3273
3274 /* Get past the new insns generated. Allow notes, as the insns
3275 may be already deleted. */
3276 insn = BB_END (bb);
3277 while ((NONJUMP_INSN_P (insn) || NOTE_P (insn))
3278 && !can_throw_internal (insn)
3279 && insn != BB_HEAD (bb))
3280 insn = PREV_INSN (insn);
3281
3282 if (CALL_P (insn) || can_throw_internal (insn))
3283 {
3284 rtx_insn *stop, *next;
3285
3286 e = find_fallthru_edge (bb->succs);
3287
3288 stop = NEXT_INSN (BB_END (bb));
3289 BB_END (bb) = insn;
3290
3291 for (insn = NEXT_INSN (insn); insn != stop; insn = next)
3292 {
3293 next = NEXT_INSN (insn);
3294 if (INSN_P (insn))
3295 {
3296 delete_insn (insn);
3297
3298 /* Sometimes there's still the return value USE.
3299 If it's placed after a trapping call (i.e. that
3300 call is the last insn anyway), we have no fallthru
3301 edge. Simply delete this use and don't try to insert
3302 on the non-existent edge. */
3303 if (GET_CODE (PATTERN (insn)) != USE)
3304 {
3305 /* We're not deleting it, we're moving it. */
3306 insn->set_undeleted ();
3307 SET_PREV_INSN (insn) = NULL_RTX;
3308 SET_NEXT_INSN (insn) = NULL_RTX;
3309
3310 insert_insn_on_edge (insn, e);
3311 inserted = true;
3312 }
3313 }
3314 else if (!BARRIER_P (insn))
3315 set_block_for_insn (insn, NULL);
3316 }
3317 }
3318
3319 /* It may be that we don't find any trapping insn. In this
3320 case we discovered quite late that the insn that had been
3321 marked as can_throw_internal in fact couldn't trap at all.
3322 So we should in fact delete the EH edges out of the block. */
3323 else
3324 purge_dead_edges (bb);
3325 }
3326 }
3327
3328 return inserted;
3329 }
3330 \f
3331 /* Cut the insns from FIRST to LAST out of the insns stream. */
3332
3333 rtx_insn *
3334 unlink_insn_chain (rtx_insn *first, rtx_insn *last)
3335 {
3336 rtx_insn *prevfirst = PREV_INSN (first);
3337 rtx_insn *nextlast = NEXT_INSN (last);
3338
3339 SET_PREV_INSN (first) = NULL;
3340 SET_NEXT_INSN (last) = NULL;
3341 if (prevfirst)
3342 SET_NEXT_INSN (prevfirst) = nextlast;
3343 if (nextlast)
3344 SET_PREV_INSN (nextlast) = prevfirst;
3345 else
3346 set_last_insn (prevfirst);
3347 if (!prevfirst)
3348 set_first_insn (nextlast);
3349 return first;
3350 }
3351 \f
3352 /* Skip over inter-block insns occurring after BB which are typically
3353 associated with BB (e.g., barriers). If there are any such insns,
3354 we return the last one. Otherwise, we return the end of BB. */
3355
3356 static rtx_insn *
3357 skip_insns_after_block (basic_block bb)
3358 {
3359 rtx_insn *insn, *last_insn, *next_head, *prev;
3360
3361 next_head = NULL;
3362 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
3363 next_head = BB_HEAD (bb->next_bb);
3364
3365 for (last_insn = insn = BB_END (bb); (insn = NEXT_INSN (insn)) != 0; )
3366 {
3367 if (insn == next_head)
3368 break;
3369
3370 switch (GET_CODE (insn))
3371 {
3372 case BARRIER:
3373 last_insn = insn;
3374 continue;
3375
3376 case NOTE:
3377 switch (NOTE_KIND (insn))
3378 {
3379 case NOTE_INSN_BLOCK_END:
3380 gcc_unreachable ();
3381 continue;
3382 default:
3383 continue;
3384 break;
3385 }
3386 break;
3387
3388 case CODE_LABEL:
3389 if (NEXT_INSN (insn)
3390 && JUMP_TABLE_DATA_P (NEXT_INSN (insn)))
3391 {
3392 insn = NEXT_INSN (insn);
3393 last_insn = insn;
3394 continue;
3395 }
3396 break;
3397
3398 default:
3399 break;
3400 }
3401
3402 break;
3403 }
3404
3405 /* It is possible to hit contradictory sequence. For instance:
3406
3407 jump_insn
3408 NOTE_INSN_BLOCK_BEG
3409 barrier
3410
3411 Where barrier belongs to jump_insn, but the note does not. This can be
3412 created by removing the basic block originally following
3413 NOTE_INSN_BLOCK_BEG. In such case reorder the notes. */
3414
3415 for (insn = last_insn; insn != BB_END (bb); insn = prev)
3416 {
3417 prev = PREV_INSN (insn);
3418 if (NOTE_P (insn))
3419 switch (NOTE_KIND (insn))
3420 {
3421 case NOTE_INSN_BLOCK_END:
3422 gcc_unreachable ();
3423 break;
3424 case NOTE_INSN_DELETED:
3425 case NOTE_INSN_DELETED_LABEL:
3426 case NOTE_INSN_DELETED_DEBUG_LABEL:
3427 continue;
3428 default:
3429 reorder_insns (insn, insn, last_insn);
3430 }
3431 }
3432
3433 return last_insn;
3434 }
3435
3436 /* Locate or create a label for a given basic block. */
3437
3438 static rtx_insn *
3439 label_for_bb (basic_block bb)
3440 {
3441 rtx_insn *label = BB_HEAD (bb);
3442
3443 if (!LABEL_P (label))
3444 {
3445 if (dump_file)
3446 fprintf (dump_file, "Emitting label for block %d\n", bb->index);
3447
3448 label = block_label (bb);
3449 }
3450
3451 return label;
3452 }
3453
3454 /* Locate the effective beginning and end of the insn chain for each
3455 block, as defined by skip_insns_after_block above. */
3456
3457 static void
3458 record_effective_endpoints (void)
3459 {
3460 rtx_insn *next_insn;
3461 basic_block bb;
3462 rtx_insn *insn;
3463
3464 for (insn = get_insns ();
3465 insn
3466 && NOTE_P (insn)
3467 && NOTE_KIND (insn) != NOTE_INSN_BASIC_BLOCK;
3468 insn = NEXT_INSN (insn))
3469 continue;
3470 /* No basic blocks at all? */
3471 gcc_assert (insn);
3472
3473 if (PREV_INSN (insn))
3474 cfg_layout_function_header =
3475 unlink_insn_chain (get_insns (), PREV_INSN (insn));
3476 else
3477 cfg_layout_function_header = NULL;
3478
3479 next_insn = get_insns ();
3480 FOR_EACH_BB_FN (bb, cfun)
3481 {
3482 rtx_insn *end;
3483
3484 if (PREV_INSN (BB_HEAD (bb)) && next_insn != BB_HEAD (bb))
3485 BB_HEADER (bb) = unlink_insn_chain (next_insn,
3486 PREV_INSN (BB_HEAD (bb)));
3487 end = skip_insns_after_block (bb);
3488 if (NEXT_INSN (BB_END (bb)) && BB_END (bb) != end)
3489 BB_FOOTER (bb) = unlink_insn_chain (NEXT_INSN (BB_END (bb)), end);
3490 next_insn = NEXT_INSN (BB_END (bb));
3491 }
3492
3493 cfg_layout_function_footer = next_insn;
3494 if (cfg_layout_function_footer)
3495 cfg_layout_function_footer = unlink_insn_chain (cfg_layout_function_footer, get_last_insn ());
3496 }
3497 \f
3498 namespace {
3499
3500 const pass_data pass_data_into_cfg_layout_mode =
3501 {
3502 RTL_PASS, /* type */
3503 "into_cfglayout", /* name */
3504 OPTGROUP_NONE, /* optinfo_flags */
3505 TV_CFG, /* tv_id */
3506 0, /* properties_required */
3507 PROP_cfglayout, /* properties_provided */
3508 0, /* properties_destroyed */
3509 0, /* todo_flags_start */
3510 0, /* todo_flags_finish */
3511 };
3512
3513 class pass_into_cfg_layout_mode : public rtl_opt_pass
3514 {
3515 public:
3516 pass_into_cfg_layout_mode (gcc::context *ctxt)
3517 : rtl_opt_pass (pass_data_into_cfg_layout_mode, ctxt)
3518 {}
3519
3520 /* opt_pass methods: */
3521 virtual unsigned int execute (function *)
3522 {
3523 cfg_layout_initialize (0);
3524 return 0;
3525 }
3526
3527 }; // class pass_into_cfg_layout_mode
3528
3529 } // anon namespace
3530
3531 rtl_opt_pass *
3532 make_pass_into_cfg_layout_mode (gcc::context *ctxt)
3533 {
3534 return new pass_into_cfg_layout_mode (ctxt);
3535 }
3536
3537 namespace {
3538
3539 const pass_data pass_data_outof_cfg_layout_mode =
3540 {
3541 RTL_PASS, /* type */
3542 "outof_cfglayout", /* name */
3543 OPTGROUP_NONE, /* optinfo_flags */
3544 TV_CFG, /* tv_id */
3545 0, /* properties_required */
3546 0, /* properties_provided */
3547 PROP_cfglayout, /* properties_destroyed */
3548 0, /* todo_flags_start */
3549 0, /* todo_flags_finish */
3550 };
3551
3552 class pass_outof_cfg_layout_mode : public rtl_opt_pass
3553 {
3554 public:
3555 pass_outof_cfg_layout_mode (gcc::context *ctxt)
3556 : rtl_opt_pass (pass_data_outof_cfg_layout_mode, ctxt)
3557 {}
3558
3559 /* opt_pass methods: */
3560 virtual unsigned int execute (function *);
3561
3562 }; // class pass_outof_cfg_layout_mode
3563
3564 unsigned int
3565 pass_outof_cfg_layout_mode::execute (function *fun)
3566 {
3567 basic_block bb;
3568
3569 FOR_EACH_BB_FN (bb, fun)
3570 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (fun))
3571 bb->aux = bb->next_bb;
3572
3573 cfg_layout_finalize ();
3574
3575 return 0;
3576 }
3577
3578 } // anon namespace
3579
3580 rtl_opt_pass *
3581 make_pass_outof_cfg_layout_mode (gcc::context *ctxt)
3582 {
3583 return new pass_outof_cfg_layout_mode (ctxt);
3584 }
3585 \f
3586
3587 /* Link the basic blocks in the correct order, compacting the basic
3588 block queue while at it. If STAY_IN_CFGLAYOUT_MODE is false, this
3589 function also clears the basic block header and footer fields.
3590
3591 This function is usually called after a pass (e.g. tracer) finishes
3592 some transformations while in cfglayout mode. The required sequence
3593 of the basic blocks is in a linked list along the bb->aux field.
3594 This functions re-links the basic block prev_bb and next_bb pointers
3595 accordingly, and it compacts and renumbers the blocks.
3596
3597 FIXME: This currently works only for RTL, but the only RTL-specific
3598 bits are the STAY_IN_CFGLAYOUT_MODE bits. The tracer pass was moved
3599 to GIMPLE a long time ago, but it doesn't relink the basic block
3600 chain. It could do that (to give better initial RTL) if this function
3601 is made IR-agnostic (and moved to cfganal.c or cfg.c while at it). */
3602
3603 void
3604 relink_block_chain (bool stay_in_cfglayout_mode)
3605 {
3606 basic_block bb, prev_bb;
3607 int index;
3608
3609 /* Maybe dump the re-ordered sequence. */
3610 if (dump_file)
3611 {
3612 fprintf (dump_file, "Reordered sequence:\n");
3613 for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb, index =
3614 NUM_FIXED_BLOCKS;
3615 bb;
3616 bb = (basic_block) bb->aux, index++)
3617 {
3618 fprintf (dump_file, " %i ", index);
3619 if (get_bb_original (bb))
3620 fprintf (dump_file, "duplicate of %i ",
3621 get_bb_original (bb)->index);
3622 else if (forwarder_block_p (bb)
3623 && !LABEL_P (BB_HEAD (bb)))
3624 fprintf (dump_file, "compensation ");
3625 else
3626 fprintf (dump_file, "bb %i ", bb->index);
3627 fprintf (dump_file, " [%i]\n", bb->frequency);
3628 }
3629 }
3630
3631 /* Now reorder the blocks. */
3632 prev_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
3633 bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
3634 for (; bb; prev_bb = bb, bb = (basic_block) bb->aux)
3635 {
3636 bb->prev_bb = prev_bb;
3637 prev_bb->next_bb = bb;
3638 }
3639 prev_bb->next_bb = EXIT_BLOCK_PTR_FOR_FN (cfun);
3640 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb = prev_bb;
3641
3642 /* Then, clean up the aux fields. */
3643 FOR_ALL_BB_FN (bb, cfun)
3644 {
3645 bb->aux = NULL;
3646 if (!stay_in_cfglayout_mode)
3647 BB_HEADER (bb) = BB_FOOTER (bb) = NULL;
3648 }
3649
3650 /* Maybe reset the original copy tables, they are not valid anymore
3651 when we renumber the basic blocks in compact_blocks. If we are
3652 are going out of cfglayout mode, don't re-allocate the tables. */
3653 free_original_copy_tables ();
3654 if (stay_in_cfglayout_mode)
3655 initialize_original_copy_tables ();
3656
3657 /* Finally, put basic_block_info in the new order. */
3658 compact_blocks ();
3659 }
3660 \f
3661
3662 /* Given a reorder chain, rearrange the code to match. */
3663
3664 static void
3665 fixup_reorder_chain (void)
3666 {
3667 basic_block bb;
3668 rtx_insn *insn = NULL;
3669
3670 if (cfg_layout_function_header)
3671 {
3672 set_first_insn (cfg_layout_function_header);
3673 insn = cfg_layout_function_header;
3674 while (NEXT_INSN (insn))
3675 insn = NEXT_INSN (insn);
3676 }
3677
3678 /* First do the bulk reordering -- rechain the blocks without regard to
3679 the needed changes to jumps and labels. */
3680
3681 for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; bb; bb = (basic_block)
3682 bb->aux)
3683 {
3684 if (BB_HEADER (bb))
3685 {
3686 if (insn)
3687 SET_NEXT_INSN (insn) = BB_HEADER (bb);
3688 else
3689 set_first_insn (BB_HEADER (bb));
3690 SET_PREV_INSN (BB_HEADER (bb)) = insn;
3691 insn = BB_HEADER (bb);
3692 while (NEXT_INSN (insn))
3693 insn = NEXT_INSN (insn);
3694 }
3695 if (insn)
3696 SET_NEXT_INSN (insn) = BB_HEAD (bb);
3697 else
3698 set_first_insn (BB_HEAD (bb));
3699 SET_PREV_INSN (BB_HEAD (bb)) = insn;
3700 insn = BB_END (bb);
3701 if (BB_FOOTER (bb))
3702 {
3703 SET_NEXT_INSN (insn) = BB_FOOTER (bb);
3704 SET_PREV_INSN (BB_FOOTER (bb)) = insn;
3705 while (NEXT_INSN (insn))
3706 insn = NEXT_INSN (insn);
3707 }
3708 }
3709
3710 SET_NEXT_INSN (insn) = cfg_layout_function_footer;
3711 if (cfg_layout_function_footer)
3712 SET_PREV_INSN (cfg_layout_function_footer) = insn;
3713
3714 while (NEXT_INSN (insn))
3715 insn = NEXT_INSN (insn);
3716
3717 set_last_insn (insn);
3718 if (flag_checking)
3719 verify_insn_chain ();
3720
3721 /* Now add jumps and labels as needed to match the blocks new
3722 outgoing edges. */
3723
3724 for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; bb ; bb = (basic_block)
3725 bb->aux)
3726 {
3727 edge e_fall, e_taken, e;
3728 rtx_insn *bb_end_insn;
3729 rtx ret_label = NULL_RTX;
3730 basic_block nb;
3731 edge_iterator ei;
3732
3733 if (EDGE_COUNT (bb->succs) == 0)
3734 continue;
3735
3736 /* Find the old fallthru edge, and another non-EH edge for
3737 a taken jump. */
3738 e_taken = e_fall = NULL;
3739
3740 FOR_EACH_EDGE (e, ei, bb->succs)
3741 if (e->flags & EDGE_FALLTHRU)
3742 e_fall = e;
3743 else if (! (e->flags & EDGE_EH))
3744 e_taken = e;
3745
3746 bb_end_insn = BB_END (bb);
3747 if (rtx_jump_insn *bb_end_jump = dyn_cast <rtx_jump_insn *> (bb_end_insn))
3748 {
3749 ret_label = JUMP_LABEL (bb_end_jump);
3750 if (any_condjump_p (bb_end_jump))
3751 {
3752 /* This might happen if the conditional jump has side
3753 effects and could therefore not be optimized away.
3754 Make the basic block to end with a barrier in order
3755 to prevent rtl_verify_flow_info from complaining. */
3756 if (!e_fall)
3757 {
3758 gcc_assert (!onlyjump_p (bb_end_jump)
3759 || returnjump_p (bb_end_jump)
3760 || (e_taken->flags & EDGE_CROSSING));
3761 emit_barrier_after (bb_end_jump);
3762 continue;
3763 }
3764
3765 /* If the old fallthru is still next, nothing to do. */
3766 if (bb->aux == e_fall->dest
3767 || e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3768 continue;
3769
3770 /* The degenerated case of conditional jump jumping to the next
3771 instruction can happen for jumps with side effects. We need
3772 to construct a forwarder block and this will be done just
3773 fine by force_nonfallthru below. */
3774 if (!e_taken)
3775 ;
3776
3777 /* There is another special case: if *neither* block is next,
3778 such as happens at the very end of a function, then we'll
3779 need to add a new unconditional jump. Choose the taken
3780 edge based on known or assumed probability. */
3781 else if (bb->aux != e_taken->dest)
3782 {
3783 rtx note = find_reg_note (bb_end_jump, REG_BR_PROB, 0);
3784
3785 if (note
3786 && XINT (note, 0) < REG_BR_PROB_BASE / 2
3787 && invert_jump (bb_end_jump,
3788 (e_fall->dest
3789 == EXIT_BLOCK_PTR_FOR_FN (cfun)
3790 ? NULL_RTX
3791 : label_for_bb (e_fall->dest)), 0))
3792 {
3793 e_fall->flags &= ~EDGE_FALLTHRU;
3794 gcc_checking_assert (could_fall_through
3795 (e_taken->src, e_taken->dest));
3796 e_taken->flags |= EDGE_FALLTHRU;
3797 update_br_prob_note (bb);
3798 e = e_fall, e_fall = e_taken, e_taken = e;
3799 }
3800 }
3801
3802 /* If the "jumping" edge is a crossing edge, and the fall
3803 through edge is non-crossing, leave things as they are. */
3804 else if ((e_taken->flags & EDGE_CROSSING)
3805 && !(e_fall->flags & EDGE_CROSSING))
3806 continue;
3807
3808 /* Otherwise we can try to invert the jump. This will
3809 basically never fail, however, keep up the pretense. */
3810 else if (invert_jump (bb_end_jump,
3811 (e_fall->dest
3812 == EXIT_BLOCK_PTR_FOR_FN (cfun)
3813 ? NULL_RTX
3814 : label_for_bb (e_fall->dest)), 0))
3815 {
3816 e_fall->flags &= ~EDGE_FALLTHRU;
3817 gcc_checking_assert (could_fall_through
3818 (e_taken->src, e_taken->dest));
3819 e_taken->flags |= EDGE_FALLTHRU;
3820 update_br_prob_note (bb);
3821 if (LABEL_NUSES (ret_label) == 0
3822 && single_pred_p (e_taken->dest))
3823 delete_insn (as_a<rtx_insn *> (ret_label));
3824 continue;
3825 }
3826 }
3827 else if (extract_asm_operands (PATTERN (bb_end_insn)) != NULL)
3828 {
3829 /* If the old fallthru is still next or if
3830 asm goto doesn't have a fallthru (e.g. when followed by
3831 __builtin_unreachable ()), nothing to do. */
3832 if (! e_fall
3833 || bb->aux == e_fall->dest
3834 || e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3835 continue;
3836
3837 /* Otherwise we'll have to use the fallthru fixup below. */
3838 }
3839 else
3840 {
3841 /* Otherwise we have some return, switch or computed
3842 jump. In the 99% case, there should not have been a
3843 fallthru edge. */
3844 gcc_assert (returnjump_p (bb_end_insn) || !e_fall);
3845 continue;
3846 }
3847 }
3848 else
3849 {
3850 /* No fallthru implies a noreturn function with EH edges, or
3851 something similarly bizarre. In any case, we don't need to
3852 do anything. */
3853 if (! e_fall)
3854 continue;
3855
3856 /* If the fallthru block is still next, nothing to do. */
3857 if (bb->aux == e_fall->dest)
3858 continue;
3859
3860 /* A fallthru to exit block. */
3861 if (e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3862 continue;
3863 }
3864
3865 /* We got here if we need to add a new jump insn.
3866 Note force_nonfallthru can delete E_FALL and thus we have to
3867 save E_FALL->src prior to the call to force_nonfallthru. */
3868 nb = force_nonfallthru_and_redirect (e_fall, e_fall->dest, ret_label);
3869 if (nb)
3870 {
3871 nb->aux = bb->aux;
3872 bb->aux = nb;
3873 /* Don't process this new block. */
3874 bb = nb;
3875 }
3876 }
3877
3878 relink_block_chain (/*stay_in_cfglayout_mode=*/false);
3879
3880 /* Annoying special case - jump around dead jumptables left in the code. */
3881 FOR_EACH_BB_FN (bb, cfun)
3882 {
3883 edge e = find_fallthru_edge (bb->succs);
3884
3885 if (e && !can_fallthru (e->src, e->dest))
3886 force_nonfallthru (e);
3887 }
3888
3889 /* Ensure goto_locus from edges has some instructions with that locus
3890 in RTL. */
3891 if (!optimize)
3892 FOR_EACH_BB_FN (bb, cfun)
3893 {
3894 edge e;
3895 edge_iterator ei;
3896
3897 FOR_EACH_EDGE (e, ei, bb->succs)
3898 if (LOCATION_LOCUS (e->goto_locus) != UNKNOWN_LOCATION
3899 && !(e->flags & EDGE_ABNORMAL))
3900 {
3901 edge e2;
3902 edge_iterator ei2;
3903 basic_block dest, nb;
3904 rtx_insn *end;
3905
3906 insn = BB_END (e->src);
3907 end = PREV_INSN (BB_HEAD (e->src));
3908 while (insn != end
3909 && (!NONDEBUG_INSN_P (insn) || !INSN_HAS_LOCATION (insn)))
3910 insn = PREV_INSN (insn);
3911 if (insn != end
3912 && INSN_LOCATION (insn) == e->goto_locus)
3913 continue;
3914 if (simplejump_p (BB_END (e->src))
3915 && !INSN_HAS_LOCATION (BB_END (e->src)))
3916 {
3917 INSN_LOCATION (BB_END (e->src)) = e->goto_locus;
3918 continue;
3919 }
3920 dest = e->dest;
3921 if (dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3922 {
3923 /* Non-fallthru edges to the exit block cannot be split. */
3924 if (!(e->flags & EDGE_FALLTHRU))
3925 continue;
3926 }
3927 else
3928 {
3929 insn = BB_HEAD (dest);
3930 end = NEXT_INSN (BB_END (dest));
3931 while (insn != end && !NONDEBUG_INSN_P (insn))
3932 insn = NEXT_INSN (insn);
3933 if (insn != end && INSN_HAS_LOCATION (insn)
3934 && INSN_LOCATION (insn) == e->goto_locus)
3935 continue;
3936 }
3937 nb = split_edge (e);
3938 if (!INSN_P (BB_END (nb)))
3939 BB_END (nb) = emit_insn_after_noloc (gen_nop (), BB_END (nb),
3940 nb);
3941 INSN_LOCATION (BB_END (nb)) = e->goto_locus;
3942
3943 /* If there are other incoming edges to the destination block
3944 with the same goto locus, redirect them to the new block as
3945 well, this can prevent other such blocks from being created
3946 in subsequent iterations of the loop. */
3947 for (ei2 = ei_start (dest->preds); (e2 = ei_safe_edge (ei2)); )
3948 if (LOCATION_LOCUS (e2->goto_locus) != UNKNOWN_LOCATION
3949 && !(e2->flags & (EDGE_ABNORMAL | EDGE_FALLTHRU))
3950 && e->goto_locus == e2->goto_locus)
3951 redirect_edge_and_branch (e2, nb);
3952 else
3953 ei_next (&ei2);
3954 }
3955 }
3956 }
3957 \f
3958 /* Perform sanity checks on the insn chain.
3959 1. Check that next/prev pointers are consistent in both the forward and
3960 reverse direction.
3961 2. Count insns in chain, going both directions, and check if equal.
3962 3. Check that get_last_insn () returns the actual end of chain. */
3963
3964 DEBUG_FUNCTION void
3965 verify_insn_chain (void)
3966 {
3967 rtx_insn *x, *prevx, *nextx;
3968 int insn_cnt1, insn_cnt2;
3969
3970 for (prevx = NULL, insn_cnt1 = 1, x = get_insns ();
3971 x != 0;
3972 prevx = x, insn_cnt1++, x = NEXT_INSN (x))
3973 gcc_assert (PREV_INSN (x) == prevx);
3974
3975 gcc_assert (prevx == get_last_insn ());
3976
3977 for (nextx = NULL, insn_cnt2 = 1, x = get_last_insn ();
3978 x != 0;
3979 nextx = x, insn_cnt2++, x = PREV_INSN (x))
3980 gcc_assert (NEXT_INSN (x) == nextx);
3981
3982 gcc_assert (insn_cnt1 == insn_cnt2);
3983 }
3984 \f
3985 /* If we have assembler epilogues, the block falling through to exit must
3986 be the last one in the reordered chain when we reach final. Ensure
3987 that this condition is met. */
3988 static void
3989 fixup_fallthru_exit_predecessor (void)
3990 {
3991 edge e;
3992 basic_block bb = NULL;
3993
3994 /* This transformation is not valid before reload, because we might
3995 separate a call from the instruction that copies the return
3996 value. */
3997 gcc_assert (reload_completed);
3998
3999 e = find_fallthru_edge (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
4000 if (e)
4001 bb = e->src;
4002
4003 if (bb && bb->aux)
4004 {
4005 basic_block c = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
4006
4007 /* If the very first block is the one with the fall-through exit
4008 edge, we have to split that block. */
4009 if (c == bb)
4010 {
4011 bb = split_block_after_labels (bb)->dest;
4012 bb->aux = c->aux;
4013 c->aux = bb;
4014 BB_FOOTER (bb) = BB_FOOTER (c);
4015 BB_FOOTER (c) = NULL;
4016 }
4017
4018 while (c->aux != bb)
4019 c = (basic_block) c->aux;
4020
4021 c->aux = bb->aux;
4022 while (c->aux)
4023 c = (basic_block) c->aux;
4024
4025 c->aux = bb;
4026 bb->aux = NULL;
4027 }
4028 }
4029
4030 /* In case there are more than one fallthru predecessors of exit, force that
4031 there is only one. */
4032
4033 static void
4034 force_one_exit_fallthru (void)
4035 {
4036 edge e, predecessor = NULL;
4037 bool more = false;
4038 edge_iterator ei;
4039 basic_block forwarder, bb;
4040
4041 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
4042 if (e->flags & EDGE_FALLTHRU)
4043 {
4044 if (predecessor == NULL)
4045 predecessor = e;
4046 else
4047 {
4048 more = true;
4049 break;
4050 }
4051 }
4052
4053 if (!more)
4054 return;
4055
4056 /* Exit has several fallthru predecessors. Create a forwarder block for
4057 them. */
4058 forwarder = split_edge (predecessor);
4059 for (ei = ei_start (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
4060 (e = ei_safe_edge (ei)); )
4061 {
4062 if (e->src == forwarder
4063 || !(e->flags & EDGE_FALLTHRU))
4064 ei_next (&ei);
4065 else
4066 redirect_edge_and_branch_force (e, forwarder);
4067 }
4068
4069 /* Fix up the chain of blocks -- make FORWARDER immediately precede the
4070 exit block. */
4071 FOR_EACH_BB_FN (bb, cfun)
4072 {
4073 if (bb->aux == NULL && bb != forwarder)
4074 {
4075 bb->aux = forwarder;
4076 break;
4077 }
4078 }
4079 }
4080 \f
4081 /* Return true in case it is possible to duplicate the basic block BB. */
4082
4083 static bool
4084 cfg_layout_can_duplicate_bb_p (const_basic_block bb)
4085 {
4086 /* Do not attempt to duplicate tablejumps, as we need to unshare
4087 the dispatch table. This is difficult to do, as the instructions
4088 computing jump destination may be hoisted outside the basic block. */
4089 if (tablejump_p (BB_END (bb), NULL, NULL))
4090 return false;
4091
4092 /* Do not duplicate blocks containing insns that can't be copied. */
4093 if (targetm.cannot_copy_insn_p)
4094 {
4095 rtx_insn *insn = BB_HEAD (bb);
4096 while (1)
4097 {
4098 if (INSN_P (insn) && targetm.cannot_copy_insn_p (insn))
4099 return false;
4100 if (insn == BB_END (bb))
4101 break;
4102 insn = NEXT_INSN (insn);
4103 }
4104 }
4105
4106 return true;
4107 }
4108
4109 rtx_insn *
4110 duplicate_insn_chain (rtx_insn *from, rtx_insn *to)
4111 {
4112 rtx_insn *insn, *next, *copy;
4113 rtx_note *last;
4114
4115 /* Avoid updating of boundaries of previous basic block. The
4116 note will get removed from insn stream in fixup. */
4117 last = emit_note (NOTE_INSN_DELETED);
4118
4119 /* Create copy at the end of INSN chain. The chain will
4120 be reordered later. */
4121 for (insn = from; insn != NEXT_INSN (to); insn = NEXT_INSN (insn))
4122 {
4123 switch (GET_CODE (insn))
4124 {
4125 case DEBUG_INSN:
4126 /* Don't duplicate label debug insns. */
4127 if (TREE_CODE (INSN_VAR_LOCATION_DECL (insn)) == LABEL_DECL)
4128 break;
4129 /* FALLTHRU */
4130 case INSN:
4131 case CALL_INSN:
4132 case JUMP_INSN:
4133 copy = emit_copy_of_insn_after (insn, get_last_insn ());
4134 if (JUMP_P (insn) && JUMP_LABEL (insn) != NULL_RTX
4135 && ANY_RETURN_P (JUMP_LABEL (insn)))
4136 JUMP_LABEL (copy) = JUMP_LABEL (insn);
4137 maybe_copy_prologue_epilogue_insn (insn, copy);
4138 break;
4139
4140 case JUMP_TABLE_DATA:
4141 /* Avoid copying of dispatch tables. We never duplicate
4142 tablejumps, so this can hit only in case the table got
4143 moved far from original jump.
4144 Avoid copying following barrier as well if any
4145 (and debug insns in between). */
4146 for (next = NEXT_INSN (insn);
4147 next != NEXT_INSN (to);
4148 next = NEXT_INSN (next))
4149 if (!DEBUG_INSN_P (next))
4150 break;
4151 if (next != NEXT_INSN (to) && BARRIER_P (next))
4152 insn = next;
4153 break;
4154
4155 case CODE_LABEL:
4156 break;
4157
4158 case BARRIER:
4159 emit_barrier ();
4160 break;
4161
4162 case NOTE:
4163 switch (NOTE_KIND (insn))
4164 {
4165 /* In case prologue is empty and function contain label
4166 in first BB, we may want to copy the block. */
4167 case NOTE_INSN_PROLOGUE_END:
4168
4169 case NOTE_INSN_DELETED:
4170 case NOTE_INSN_DELETED_LABEL:
4171 case NOTE_INSN_DELETED_DEBUG_LABEL:
4172 /* No problem to strip these. */
4173 case NOTE_INSN_FUNCTION_BEG:
4174 /* There is always just single entry to function. */
4175 case NOTE_INSN_BASIC_BLOCK:
4176 /* We should only switch text sections once. */
4177 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
4178 break;
4179
4180 case NOTE_INSN_EPILOGUE_BEG:
4181 case NOTE_INSN_UPDATE_SJLJ_CONTEXT:
4182 emit_note_copy (as_a <rtx_note *> (insn));
4183 break;
4184
4185 default:
4186 /* All other notes should have already been eliminated. */
4187 gcc_unreachable ();
4188 }
4189 break;
4190 default:
4191 gcc_unreachable ();
4192 }
4193 }
4194 insn = NEXT_INSN (last);
4195 delete_insn (last);
4196 return insn;
4197 }
4198
4199 /* Create a duplicate of the basic block BB. */
4200
4201 static basic_block
4202 cfg_layout_duplicate_bb (basic_block bb)
4203 {
4204 rtx_insn *insn;
4205 basic_block new_bb;
4206
4207 insn = duplicate_insn_chain (BB_HEAD (bb), BB_END (bb));
4208 new_bb = create_basic_block (insn,
4209 insn ? get_last_insn () : NULL,
4210 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
4211
4212 BB_COPY_PARTITION (new_bb, bb);
4213 if (BB_HEADER (bb))
4214 {
4215 insn = BB_HEADER (bb);
4216 while (NEXT_INSN (insn))
4217 insn = NEXT_INSN (insn);
4218 insn = duplicate_insn_chain (BB_HEADER (bb), insn);
4219 if (insn)
4220 BB_HEADER (new_bb) = unlink_insn_chain (insn, get_last_insn ());
4221 }
4222
4223 if (BB_FOOTER (bb))
4224 {
4225 insn = BB_FOOTER (bb);
4226 while (NEXT_INSN (insn))
4227 insn = NEXT_INSN (insn);
4228 insn = duplicate_insn_chain (BB_FOOTER (bb), insn);
4229 if (insn)
4230 BB_FOOTER (new_bb) = unlink_insn_chain (insn, get_last_insn ());
4231 }
4232
4233 return new_bb;
4234 }
4235
4236 \f
4237 /* Main entry point to this module - initialize the datastructures for
4238 CFG layout changes. It keeps LOOPS up-to-date if not null.
4239
4240 FLAGS is a set of additional flags to pass to cleanup_cfg(). */
4241
4242 void
4243 cfg_layout_initialize (unsigned int flags)
4244 {
4245 rtx_insn_list *x;
4246 basic_block bb;
4247
4248 /* Once bb partitioning is complete, cfg layout mode should not be
4249 re-entered. Entering cfg layout mode may require fixups. As an
4250 example, if edge forwarding performed when optimizing the cfg
4251 layout required moving a block from the hot to the cold
4252 section. This would create an illegal partitioning unless some
4253 manual fixup was performed. */
4254 gcc_assert (!(crtl->bb_reorder_complete
4255 && flag_reorder_blocks_and_partition));
4256
4257 initialize_original_copy_tables ();
4258
4259 cfg_layout_rtl_register_cfg_hooks ();
4260
4261 record_effective_endpoints ();
4262
4263 /* Make sure that the targets of non local gotos are marked. */
4264 for (x = nonlocal_goto_handler_labels; x; x = x->next ())
4265 {
4266 bb = BLOCK_FOR_INSN (x->insn ());
4267 bb->flags |= BB_NON_LOCAL_GOTO_TARGET;
4268 }
4269
4270 cleanup_cfg (CLEANUP_CFGLAYOUT | flags);
4271 }
4272
4273 /* Splits superblocks. */
4274 void
4275 break_superblocks (void)
4276 {
4277 bool need = false;
4278 basic_block bb;
4279
4280 auto_sbitmap superblocks (last_basic_block_for_fn (cfun));
4281 bitmap_clear (superblocks);
4282
4283 FOR_EACH_BB_FN (bb, cfun)
4284 if (bb->flags & BB_SUPERBLOCK)
4285 {
4286 bb->flags &= ~BB_SUPERBLOCK;
4287 bitmap_set_bit (superblocks, bb->index);
4288 need = true;
4289 }
4290
4291 if (need)
4292 {
4293 rebuild_jump_labels (get_insns ());
4294 find_many_sub_basic_blocks (superblocks);
4295 }
4296 }
4297
4298 /* Finalize the changes: reorder insn list according to the sequence specified
4299 by aux pointers, enter compensation code, rebuild scope forest. */
4300
4301 void
4302 cfg_layout_finalize (void)
4303 {
4304 checking_verify_flow_info ();
4305 free_dominance_info (CDI_DOMINATORS);
4306 force_one_exit_fallthru ();
4307 rtl_register_cfg_hooks ();
4308 if (reload_completed && !targetm.have_epilogue ())
4309 fixup_fallthru_exit_predecessor ();
4310 fixup_reorder_chain ();
4311
4312 rebuild_jump_labels (get_insns ());
4313 delete_dead_jumptables ();
4314
4315 if (flag_checking)
4316 verify_insn_chain ();
4317 checking_verify_flow_info ();
4318 }
4319
4320
4321 /* Same as split_block but update cfg_layout structures. */
4322
4323 static basic_block
4324 cfg_layout_split_block (basic_block bb, void *insnp)
4325 {
4326 rtx insn = (rtx) insnp;
4327 basic_block new_bb = rtl_split_block (bb, insn);
4328
4329 BB_FOOTER (new_bb) = BB_FOOTER (bb);
4330 BB_FOOTER (bb) = NULL;
4331
4332 return new_bb;
4333 }
4334
4335 /* Redirect Edge to DEST. */
4336 static edge
4337 cfg_layout_redirect_edge_and_branch (edge e, basic_block dest)
4338 {
4339 basic_block src = e->src;
4340 edge ret;
4341
4342 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
4343 return NULL;
4344
4345 if (e->dest == dest)
4346 return e;
4347
4348 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
4349 && (ret = try_redirect_by_replacing_jump (e, dest, true)))
4350 {
4351 df_set_bb_dirty (src);
4352 return ret;
4353 }
4354
4355 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
4356 && (e->flags & EDGE_FALLTHRU) && !(e->flags & EDGE_COMPLEX))
4357 {
4358 if (dump_file)
4359 fprintf (dump_file, "Redirecting entry edge from bb %i to %i\n",
4360 e->src->index, dest->index);
4361
4362 df_set_bb_dirty (e->src);
4363 redirect_edge_succ (e, dest);
4364 return e;
4365 }
4366
4367 /* Redirect_edge_and_branch may decide to turn branch into fallthru edge
4368 in the case the basic block appears to be in sequence. Avoid this
4369 transformation. */
4370
4371 if (e->flags & EDGE_FALLTHRU)
4372 {
4373 /* Redirect any branch edges unified with the fallthru one. */
4374 if (JUMP_P (BB_END (src))
4375 && label_is_jump_target_p (BB_HEAD (e->dest),
4376 BB_END (src)))
4377 {
4378 edge redirected;
4379
4380 if (dump_file)
4381 fprintf (dump_file, "Fallthru edge unified with branch "
4382 "%i->%i redirected to %i\n",
4383 e->src->index, e->dest->index, dest->index);
4384 e->flags &= ~EDGE_FALLTHRU;
4385 redirected = redirect_branch_edge (e, dest);
4386 gcc_assert (redirected);
4387 redirected->flags |= EDGE_FALLTHRU;
4388 df_set_bb_dirty (redirected->src);
4389 return redirected;
4390 }
4391 /* In case we are redirecting fallthru edge to the branch edge
4392 of conditional jump, remove it. */
4393 if (EDGE_COUNT (src->succs) == 2)
4394 {
4395 /* Find the edge that is different from E. */
4396 edge s = EDGE_SUCC (src, EDGE_SUCC (src, 0) == e);
4397
4398 if (s->dest == dest
4399 && any_condjump_p (BB_END (src))
4400 && onlyjump_p (BB_END (src)))
4401 delete_insn (BB_END (src));
4402 }
4403 if (dump_file)
4404 fprintf (dump_file, "Redirecting fallthru edge %i->%i to %i\n",
4405 e->src->index, e->dest->index, dest->index);
4406 ret = redirect_edge_succ_nodup (e, dest);
4407 }
4408 else
4409 ret = redirect_branch_edge (e, dest);
4410
4411 /* We don't want simplejumps in the insn stream during cfglayout. */
4412 gcc_assert (!simplejump_p (BB_END (src)));
4413
4414 df_set_bb_dirty (src);
4415 return ret;
4416 }
4417
4418 /* Simple wrapper as we always can redirect fallthru edges. */
4419 static basic_block
4420 cfg_layout_redirect_edge_and_branch_force (edge e, basic_block dest)
4421 {
4422 edge redirected = cfg_layout_redirect_edge_and_branch (e, dest);
4423
4424 gcc_assert (redirected);
4425 return NULL;
4426 }
4427
4428 /* Same as delete_basic_block but update cfg_layout structures. */
4429
4430 static void
4431 cfg_layout_delete_block (basic_block bb)
4432 {
4433 rtx_insn *insn, *next, *prev = PREV_INSN (BB_HEAD (bb)), *remaints;
4434 rtx_insn **to;
4435
4436 if (BB_HEADER (bb))
4437 {
4438 next = BB_HEAD (bb);
4439 if (prev)
4440 SET_NEXT_INSN (prev) = BB_HEADER (bb);
4441 else
4442 set_first_insn (BB_HEADER (bb));
4443 SET_PREV_INSN (BB_HEADER (bb)) = prev;
4444 insn = BB_HEADER (bb);
4445 while (NEXT_INSN (insn))
4446 insn = NEXT_INSN (insn);
4447 SET_NEXT_INSN (insn) = next;
4448 SET_PREV_INSN (next) = insn;
4449 }
4450 next = NEXT_INSN (BB_END (bb));
4451 if (BB_FOOTER (bb))
4452 {
4453 insn = BB_FOOTER (bb);
4454 while (insn)
4455 {
4456 if (BARRIER_P (insn))
4457 {
4458 if (PREV_INSN (insn))
4459 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
4460 else
4461 BB_FOOTER (bb) = NEXT_INSN (insn);
4462 if (NEXT_INSN (insn))
4463 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
4464 }
4465 if (LABEL_P (insn))
4466 break;
4467 insn = NEXT_INSN (insn);
4468 }
4469 if (BB_FOOTER (bb))
4470 {
4471 insn = BB_END (bb);
4472 SET_NEXT_INSN (insn) = BB_FOOTER (bb);
4473 SET_PREV_INSN (BB_FOOTER (bb)) = insn;
4474 while (NEXT_INSN (insn))
4475 insn = NEXT_INSN (insn);
4476 SET_NEXT_INSN (insn) = next;
4477 if (next)
4478 SET_PREV_INSN (next) = insn;
4479 else
4480 set_last_insn (insn);
4481 }
4482 }
4483 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
4484 to = &BB_HEADER (bb->next_bb);
4485 else
4486 to = &cfg_layout_function_footer;
4487
4488 rtl_delete_block (bb);
4489
4490 if (prev)
4491 prev = NEXT_INSN (prev);
4492 else
4493 prev = get_insns ();
4494 if (next)
4495 next = PREV_INSN (next);
4496 else
4497 next = get_last_insn ();
4498
4499 if (next && NEXT_INSN (next) != prev)
4500 {
4501 remaints = unlink_insn_chain (prev, next);
4502 insn = remaints;
4503 while (NEXT_INSN (insn))
4504 insn = NEXT_INSN (insn);
4505 SET_NEXT_INSN (insn) = *to;
4506 if (*to)
4507 SET_PREV_INSN (*to) = insn;
4508 *to = remaints;
4509 }
4510 }
4511
4512 /* Return true when blocks A and B can be safely merged. */
4513
4514 static bool
4515 cfg_layout_can_merge_blocks_p (basic_block a, basic_block b)
4516 {
4517 /* If we are partitioning hot/cold basic blocks, we don't want to
4518 mess up unconditional or indirect jumps that cross between hot
4519 and cold sections.
4520
4521 Basic block partitioning may result in some jumps that appear to
4522 be optimizable (or blocks that appear to be mergeable), but which really
4523 must be left untouched (they are required to make it safely across
4524 partition boundaries). See the comments at the top of
4525 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
4526
4527 if (BB_PARTITION (a) != BB_PARTITION (b))
4528 return false;
4529
4530 /* Protect the loop latches. */
4531 if (current_loops && b->loop_father->latch == b)
4532 return false;
4533
4534 /* If we would end up moving B's instructions, make sure it doesn't fall
4535 through into the exit block, since we cannot recover from a fallthrough
4536 edge into the exit block occurring in the middle of a function. */
4537 if (NEXT_INSN (BB_END (a)) != BB_HEAD (b))
4538 {
4539 edge e = find_fallthru_edge (b->succs);
4540 if (e && e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
4541 return false;
4542 }
4543
4544 /* There must be exactly one edge in between the blocks. */
4545 return (single_succ_p (a)
4546 && single_succ (a) == b
4547 && single_pred_p (b) == 1
4548 && a != b
4549 /* Must be simple edge. */
4550 && !(single_succ_edge (a)->flags & EDGE_COMPLEX)
4551 && a != ENTRY_BLOCK_PTR_FOR_FN (cfun)
4552 && b != EXIT_BLOCK_PTR_FOR_FN (cfun)
4553 /* If the jump insn has side effects, we can't kill the edge.
4554 When not optimizing, try_redirect_by_replacing_jump will
4555 not allow us to redirect an edge by replacing a table jump. */
4556 && (!JUMP_P (BB_END (a))
4557 || ((!optimize || reload_completed)
4558 ? simplejump_p (BB_END (a)) : onlyjump_p (BB_END (a)))));
4559 }
4560
4561 /* Merge block A and B. The blocks must be mergeable. */
4562
4563 static void
4564 cfg_layout_merge_blocks (basic_block a, basic_block b)
4565 {
4566 bool forwarder_p = (b->flags & BB_FORWARDER_BLOCK) != 0;
4567 rtx_insn *insn;
4568
4569 gcc_checking_assert (cfg_layout_can_merge_blocks_p (a, b));
4570
4571 if (dump_file)
4572 fprintf (dump_file, "Merging block %d into block %d...\n", b->index,
4573 a->index);
4574
4575 /* If there was a CODE_LABEL beginning B, delete it. */
4576 if (LABEL_P (BB_HEAD (b)))
4577 {
4578 delete_insn (BB_HEAD (b));
4579 }
4580
4581 /* We should have fallthru edge in a, or we can do dummy redirection to get
4582 it cleaned up. */
4583 if (JUMP_P (BB_END (a)))
4584 try_redirect_by_replacing_jump (EDGE_SUCC (a, 0), b, true);
4585 gcc_assert (!JUMP_P (BB_END (a)));
4586
4587 /* When not optimizing and the edge is the only place in RTL which holds
4588 some unique locus, emit a nop with that locus in between. */
4589 if (!optimize)
4590 emit_nop_for_unique_locus_between (a, b);
4591
4592 /* Move things from b->footer after a->footer. */
4593 if (BB_FOOTER (b))
4594 {
4595 if (!BB_FOOTER (a))
4596 BB_FOOTER (a) = BB_FOOTER (b);
4597 else
4598 {
4599 rtx_insn *last = BB_FOOTER (a);
4600
4601 while (NEXT_INSN (last))
4602 last = NEXT_INSN (last);
4603 SET_NEXT_INSN (last) = BB_FOOTER (b);
4604 SET_PREV_INSN (BB_FOOTER (b)) = last;
4605 }
4606 BB_FOOTER (b) = NULL;
4607 }
4608
4609 /* Move things from b->header before a->footer.
4610 Note that this may include dead tablejump data, but we don't clean
4611 those up until we go out of cfglayout mode. */
4612 if (BB_HEADER (b))
4613 {
4614 if (! BB_FOOTER (a))
4615 BB_FOOTER (a) = BB_HEADER (b);
4616 else
4617 {
4618 rtx_insn *last = BB_HEADER (b);
4619
4620 while (NEXT_INSN (last))
4621 last = NEXT_INSN (last);
4622 SET_NEXT_INSN (last) = BB_FOOTER (a);
4623 SET_PREV_INSN (BB_FOOTER (a)) = last;
4624 BB_FOOTER (a) = BB_HEADER (b);
4625 }
4626 BB_HEADER (b) = NULL;
4627 }
4628
4629 /* In the case basic blocks are not adjacent, move them around. */
4630 if (NEXT_INSN (BB_END (a)) != BB_HEAD (b))
4631 {
4632 insn = unlink_insn_chain (BB_HEAD (b), BB_END (b));
4633
4634 emit_insn_after_noloc (insn, BB_END (a), a);
4635 }
4636 /* Otherwise just re-associate the instructions. */
4637 else
4638 {
4639 insn = BB_HEAD (b);
4640 BB_END (a) = BB_END (b);
4641 }
4642
4643 /* emit_insn_after_noloc doesn't call df_insn_change_bb.
4644 We need to explicitly call. */
4645 update_bb_for_insn_chain (insn, BB_END (b), a);
4646
4647 /* Skip possible DELETED_LABEL insn. */
4648 if (!NOTE_INSN_BASIC_BLOCK_P (insn))
4649 insn = NEXT_INSN (insn);
4650 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
4651 BB_HEAD (b) = BB_END (b) = NULL;
4652 delete_insn (insn);
4653
4654 df_bb_delete (b->index);
4655
4656 /* If B was a forwarder block, propagate the locus on the edge. */
4657 if (forwarder_p
4658 && LOCATION_LOCUS (EDGE_SUCC (b, 0)->goto_locus) == UNKNOWN_LOCATION)
4659 EDGE_SUCC (b, 0)->goto_locus = EDGE_SUCC (a, 0)->goto_locus;
4660
4661 if (dump_file)
4662 fprintf (dump_file, "Merged blocks %d and %d.\n", a->index, b->index);
4663 }
4664
4665 /* Split edge E. */
4666
4667 static basic_block
4668 cfg_layout_split_edge (edge e)
4669 {
4670 basic_block new_bb =
4671 create_basic_block (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
4672 ? NEXT_INSN (BB_END (e->src)) : get_insns (),
4673 NULL_RTX, e->src);
4674
4675 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
4676 BB_COPY_PARTITION (new_bb, e->src);
4677 else
4678 BB_COPY_PARTITION (new_bb, e->dest);
4679 make_edge (new_bb, e->dest, EDGE_FALLTHRU);
4680 redirect_edge_and_branch_force (e, new_bb);
4681
4682 return new_bb;
4683 }
4684
4685 /* Do postprocessing after making a forwarder block joined by edge FALLTHRU. */
4686
4687 static void
4688 rtl_make_forwarder_block (edge fallthru ATTRIBUTE_UNUSED)
4689 {
4690 }
4691
4692 /* Return true if BB contains only labels or non-executable
4693 instructions. */
4694
4695 static bool
4696 rtl_block_empty_p (basic_block bb)
4697 {
4698 rtx_insn *insn;
4699
4700 if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
4701 || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
4702 return true;
4703
4704 FOR_BB_INSNS (bb, insn)
4705 if (NONDEBUG_INSN_P (insn) && !any_uncondjump_p (insn))
4706 return false;
4707
4708 return true;
4709 }
4710
4711 /* Split a basic block if it ends with a conditional branch and if
4712 the other part of the block is not empty. */
4713
4714 static basic_block
4715 rtl_split_block_before_cond_jump (basic_block bb)
4716 {
4717 rtx_insn *insn;
4718 rtx_insn *split_point = NULL;
4719 rtx_insn *last = NULL;
4720 bool found_code = false;
4721
4722 FOR_BB_INSNS (bb, insn)
4723 {
4724 if (any_condjump_p (insn))
4725 split_point = last;
4726 else if (NONDEBUG_INSN_P (insn))
4727 found_code = true;
4728 last = insn;
4729 }
4730
4731 /* Did not find everything. */
4732 if (found_code && split_point)
4733 return split_block (bb, split_point)->dest;
4734 else
4735 return NULL;
4736 }
4737
4738 /* Return 1 if BB ends with a call, possibly followed by some
4739 instructions that must stay with the call, 0 otherwise. */
4740
4741 static bool
4742 rtl_block_ends_with_call_p (basic_block bb)
4743 {
4744 rtx_insn *insn = BB_END (bb);
4745
4746 while (!CALL_P (insn)
4747 && insn != BB_HEAD (bb)
4748 && (keep_with_call_p (insn)
4749 || NOTE_P (insn)
4750 || DEBUG_INSN_P (insn)))
4751 insn = PREV_INSN (insn);
4752 return (CALL_P (insn));
4753 }
4754
4755 /* Return 1 if BB ends with a conditional branch, 0 otherwise. */
4756
4757 static bool
4758 rtl_block_ends_with_condjump_p (const_basic_block bb)
4759 {
4760 return any_condjump_p (BB_END (bb));
4761 }
4762
4763 /* Return true if we need to add fake edge to exit.
4764 Helper function for rtl_flow_call_edges_add. */
4765
4766 static bool
4767 need_fake_edge_p (const rtx_insn *insn)
4768 {
4769 if (!INSN_P (insn))
4770 return false;
4771
4772 if ((CALL_P (insn)
4773 && !SIBLING_CALL_P (insn)
4774 && !find_reg_note (insn, REG_NORETURN, NULL)
4775 && !(RTL_CONST_OR_PURE_CALL_P (insn))))
4776 return true;
4777
4778 return ((GET_CODE (PATTERN (insn)) == ASM_OPERANDS
4779 && MEM_VOLATILE_P (PATTERN (insn)))
4780 || (GET_CODE (PATTERN (insn)) == PARALLEL
4781 && asm_noperands (insn) != -1
4782 && MEM_VOLATILE_P (XVECEXP (PATTERN (insn), 0, 0)))
4783 || GET_CODE (PATTERN (insn)) == ASM_INPUT);
4784 }
4785
4786 /* Add fake edges to the function exit for any non constant and non noreturn
4787 calls, volatile inline assembly in the bitmap of blocks specified by
4788 BLOCKS or to the whole CFG if BLOCKS is zero. Return the number of blocks
4789 that were split.
4790
4791 The goal is to expose cases in which entering a basic block does not imply
4792 that all subsequent instructions must be executed. */
4793
4794 static int
4795 rtl_flow_call_edges_add (sbitmap blocks)
4796 {
4797 int i;
4798 int blocks_split = 0;
4799 int last_bb = last_basic_block_for_fn (cfun);
4800 bool check_last_block = false;
4801
4802 if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
4803 return 0;
4804
4805 if (! blocks)
4806 check_last_block = true;
4807 else
4808 check_last_block = bitmap_bit_p (blocks,
4809 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb->index);
4810
4811 /* In the last basic block, before epilogue generation, there will be
4812 a fallthru edge to EXIT. Special care is required if the last insn
4813 of the last basic block is a call because make_edge folds duplicate
4814 edges, which would result in the fallthru edge also being marked
4815 fake, which would result in the fallthru edge being removed by
4816 remove_fake_edges, which would result in an invalid CFG.
4817
4818 Moreover, we can't elide the outgoing fake edge, since the block
4819 profiler needs to take this into account in order to solve the minimal
4820 spanning tree in the case that the call doesn't return.
4821
4822 Handle this by adding a dummy instruction in a new last basic block. */
4823 if (check_last_block)
4824 {
4825 basic_block bb = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
4826 rtx_insn *insn = BB_END (bb);
4827
4828 /* Back up past insns that must be kept in the same block as a call. */
4829 while (insn != BB_HEAD (bb)
4830 && keep_with_call_p (insn))
4831 insn = PREV_INSN (insn);
4832
4833 if (need_fake_edge_p (insn))
4834 {
4835 edge e;
4836
4837 e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
4838 if (e)
4839 {
4840 insert_insn_on_edge (gen_use (const0_rtx), e);
4841 commit_edge_insertions ();
4842 }
4843 }
4844 }
4845
4846 /* Now add fake edges to the function exit for any non constant
4847 calls since there is no way that we can determine if they will
4848 return or not... */
4849
4850 for (i = NUM_FIXED_BLOCKS; i < last_bb; i++)
4851 {
4852 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
4853 rtx_insn *insn;
4854 rtx_insn *prev_insn;
4855
4856 if (!bb)
4857 continue;
4858
4859 if (blocks && !bitmap_bit_p (blocks, i))
4860 continue;
4861
4862 for (insn = BB_END (bb); ; insn = prev_insn)
4863 {
4864 prev_insn = PREV_INSN (insn);
4865 if (need_fake_edge_p (insn))
4866 {
4867 edge e;
4868 rtx_insn *split_at_insn = insn;
4869
4870 /* Don't split the block between a call and an insn that should
4871 remain in the same block as the call. */
4872 if (CALL_P (insn))
4873 while (split_at_insn != BB_END (bb)
4874 && keep_with_call_p (NEXT_INSN (split_at_insn)))
4875 split_at_insn = NEXT_INSN (split_at_insn);
4876
4877 /* The handling above of the final block before the epilogue
4878 should be enough to verify that there is no edge to the exit
4879 block in CFG already. Calling make_edge in such case would
4880 cause us to mark that edge as fake and remove it later. */
4881
4882 if (flag_checking && split_at_insn == BB_END (bb))
4883 {
4884 e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
4885 gcc_assert (e == NULL);
4886 }
4887
4888 /* Note that the following may create a new basic block
4889 and renumber the existing basic blocks. */
4890 if (split_at_insn != BB_END (bb))
4891 {
4892 e = split_block (bb, split_at_insn);
4893 if (e)
4894 blocks_split++;
4895 }
4896
4897 make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
4898 }
4899
4900 if (insn == BB_HEAD (bb))
4901 break;
4902 }
4903 }
4904
4905 if (blocks_split)
4906 verify_flow_info ();
4907
4908 return blocks_split;
4909 }
4910
4911 /* Add COMP_RTX as a condition at end of COND_BB. FIRST_HEAD is
4912 the conditional branch target, SECOND_HEAD should be the fall-thru
4913 there is no need to handle this here the loop versioning code handles
4914 this. the reason for SECON_HEAD is that it is needed for condition
4915 in trees, and this should be of the same type since it is a hook. */
4916 static void
4917 rtl_lv_add_condition_to_bb (basic_block first_head ,
4918 basic_block second_head ATTRIBUTE_UNUSED,
4919 basic_block cond_bb, void *comp_rtx)
4920 {
4921 rtx_code_label *label;
4922 rtx_insn *seq, *jump;
4923 rtx op0 = XEXP ((rtx)comp_rtx, 0);
4924 rtx op1 = XEXP ((rtx)comp_rtx, 1);
4925 enum rtx_code comp = GET_CODE ((rtx)comp_rtx);
4926 machine_mode mode;
4927
4928
4929 label = block_label (first_head);
4930 mode = GET_MODE (op0);
4931 if (mode == VOIDmode)
4932 mode = GET_MODE (op1);
4933
4934 start_sequence ();
4935 op0 = force_operand (op0, NULL_RTX);
4936 op1 = force_operand (op1, NULL_RTX);
4937 do_compare_rtx_and_jump (op0, op1, comp, 0, mode, NULL_RTX, NULL, label, -1);
4938 jump = get_last_insn ();
4939 JUMP_LABEL (jump) = label;
4940 LABEL_NUSES (label)++;
4941 seq = get_insns ();
4942 end_sequence ();
4943
4944 /* Add the new cond, in the new head. */
4945 emit_insn_after (seq, BB_END (cond_bb));
4946 }
4947
4948
4949 /* Given a block B with unconditional branch at its end, get the
4950 store the return the branch edge and the fall-thru edge in
4951 BRANCH_EDGE and FALLTHRU_EDGE respectively. */
4952 static void
4953 rtl_extract_cond_bb_edges (basic_block b, edge *branch_edge,
4954 edge *fallthru_edge)
4955 {
4956 edge e = EDGE_SUCC (b, 0);
4957
4958 if (e->flags & EDGE_FALLTHRU)
4959 {
4960 *fallthru_edge = e;
4961 *branch_edge = EDGE_SUCC (b, 1);
4962 }
4963 else
4964 {
4965 *branch_edge = e;
4966 *fallthru_edge = EDGE_SUCC (b, 1);
4967 }
4968 }
4969
4970 void
4971 init_rtl_bb_info (basic_block bb)
4972 {
4973 gcc_assert (!bb->il.x.rtl);
4974 bb->il.x.head_ = NULL;
4975 bb->il.x.rtl = ggc_cleared_alloc<rtl_bb_info> ();
4976 }
4977
4978 /* Returns true if it is possible to remove edge E by redirecting
4979 it to the destination of the other edge from E->src. */
4980
4981 static bool
4982 rtl_can_remove_branch_p (const_edge e)
4983 {
4984 const_basic_block src = e->src;
4985 const_basic_block target = EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)->dest;
4986 const rtx_insn *insn = BB_END (src);
4987 rtx set;
4988
4989 /* The conditions are taken from try_redirect_by_replacing_jump. */
4990 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
4991 return false;
4992
4993 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
4994 return false;
4995
4996 if (BB_PARTITION (src) != BB_PARTITION (target))
4997 return false;
4998
4999 if (!onlyjump_p (insn)
5000 || tablejump_p (insn, NULL, NULL))
5001 return false;
5002
5003 set = single_set (insn);
5004 if (!set || side_effects_p (set))
5005 return false;
5006
5007 return true;
5008 }
5009
5010 static basic_block
5011 rtl_duplicate_bb (basic_block bb)
5012 {
5013 bb = cfg_layout_duplicate_bb (bb);
5014 bb->aux = NULL;
5015 return bb;
5016 }
5017
5018 /* Do book-keeping of basic block BB for the profile consistency checker.
5019 If AFTER_PASS is 0, do pre-pass accounting, or if AFTER_PASS is 1
5020 then do post-pass accounting. Store the counting in RECORD. */
5021 static void
5022 rtl_account_profile_record (basic_block bb, int after_pass,
5023 struct profile_record *record)
5024 {
5025 rtx_insn *insn;
5026 FOR_BB_INSNS (bb, insn)
5027 if (INSN_P (insn))
5028 {
5029 record->size[after_pass]
5030 += insn_rtx_cost (PATTERN (insn), false);
5031 if (profile_status_for_fn (cfun) == PROFILE_READ)
5032 record->time[after_pass]
5033 += insn_rtx_cost (PATTERN (insn), true) * bb->count;
5034 else if (profile_status_for_fn (cfun) == PROFILE_GUESSED)
5035 record->time[after_pass]
5036 += insn_rtx_cost (PATTERN (insn), true) * bb->frequency;
5037 }
5038 }
5039
5040 /* Implementation of CFG manipulation for linearized RTL. */
5041 struct cfg_hooks rtl_cfg_hooks = {
5042 "rtl",
5043 rtl_verify_flow_info,
5044 rtl_dump_bb,
5045 rtl_dump_bb_for_graph,
5046 rtl_create_basic_block,
5047 rtl_redirect_edge_and_branch,
5048 rtl_redirect_edge_and_branch_force,
5049 rtl_can_remove_branch_p,
5050 rtl_delete_block,
5051 rtl_split_block,
5052 rtl_move_block_after,
5053 rtl_can_merge_blocks, /* can_merge_blocks_p */
5054 rtl_merge_blocks,
5055 rtl_predict_edge,
5056 rtl_predicted_by_p,
5057 cfg_layout_can_duplicate_bb_p,
5058 rtl_duplicate_bb,
5059 rtl_split_edge,
5060 rtl_make_forwarder_block,
5061 rtl_tidy_fallthru_edge,
5062 rtl_force_nonfallthru,
5063 rtl_block_ends_with_call_p,
5064 rtl_block_ends_with_condjump_p,
5065 rtl_flow_call_edges_add,
5066 NULL, /* execute_on_growing_pred */
5067 NULL, /* execute_on_shrinking_pred */
5068 NULL, /* duplicate loop for trees */
5069 NULL, /* lv_add_condition_to_bb */
5070 NULL, /* lv_adjust_loop_header_phi*/
5071 NULL, /* extract_cond_bb_edges */
5072 NULL, /* flush_pending_stmts */
5073 rtl_block_empty_p, /* block_empty_p */
5074 rtl_split_block_before_cond_jump, /* split_block_before_cond_jump */
5075 rtl_account_profile_record,
5076 };
5077
5078 /* Implementation of CFG manipulation for cfg layout RTL, where
5079 basic block connected via fallthru edges does not have to be adjacent.
5080 This representation will hopefully become the default one in future
5081 version of the compiler. */
5082
5083 struct cfg_hooks cfg_layout_rtl_cfg_hooks = {
5084 "cfglayout mode",
5085 rtl_verify_flow_info_1,
5086 rtl_dump_bb,
5087 rtl_dump_bb_for_graph,
5088 cfg_layout_create_basic_block,
5089 cfg_layout_redirect_edge_and_branch,
5090 cfg_layout_redirect_edge_and_branch_force,
5091 rtl_can_remove_branch_p,
5092 cfg_layout_delete_block,
5093 cfg_layout_split_block,
5094 rtl_move_block_after,
5095 cfg_layout_can_merge_blocks_p,
5096 cfg_layout_merge_blocks,
5097 rtl_predict_edge,
5098 rtl_predicted_by_p,
5099 cfg_layout_can_duplicate_bb_p,
5100 cfg_layout_duplicate_bb,
5101 cfg_layout_split_edge,
5102 rtl_make_forwarder_block,
5103 NULL, /* tidy_fallthru_edge */
5104 rtl_force_nonfallthru,
5105 rtl_block_ends_with_call_p,
5106 rtl_block_ends_with_condjump_p,
5107 rtl_flow_call_edges_add,
5108 NULL, /* execute_on_growing_pred */
5109 NULL, /* execute_on_shrinking_pred */
5110 duplicate_loop_to_header_edge, /* duplicate loop for trees */
5111 rtl_lv_add_condition_to_bb, /* lv_add_condition_to_bb */
5112 NULL, /* lv_adjust_loop_header_phi*/
5113 rtl_extract_cond_bb_edges, /* extract_cond_bb_edges */
5114 NULL, /* flush_pending_stmts */
5115 rtl_block_empty_p, /* block_empty_p */
5116 rtl_split_block_before_cond_jump, /* split_block_before_cond_jump */
5117 rtl_account_profile_record,
5118 };
5119
5120 #include "gt-cfgrtl.h"