]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/cfgrtl.c
Allow automatics in equivalences
[thirdparty/gcc.git] / gcc / cfgrtl.c
1 /* Control flow graph manipulation code for GNU compiler.
2 Copyright (C) 1987-2019 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* This file contains low level functions to manipulate the CFG and analyze it
21 that are aware of the RTL intermediate language.
22
23 Available functionality:
24 - Basic CFG/RTL manipulation API documented in cfghooks.h
25 - CFG-aware instruction chain manipulation
26 delete_insn, delete_insn_chain
27 - Edge splitting and committing to edges
28 insert_insn_on_edge, commit_edge_insertions
29 - CFG updating after insn simplification
30 purge_dead_edges, purge_all_dead_edges
31 - CFG fixing after coarse manipulation
32 fixup_abnormal_edges
33
34 Functions not supposed for generic use:
35 - Infrastructure to determine quickly basic block for insn
36 compute_bb_for_insn, update_bb_for_insn, set_block_for_insn,
37 - Edge redirection with updating and optimizing of insn chain
38 block_label, tidy_fallthru_edge, force_nonfallthru */
39 \f
40 #include "config.h"
41 #include "system.h"
42 #include "coretypes.h"
43 #include "backend.h"
44 #include "target.h"
45 #include "rtl.h"
46 #include "tree.h"
47 #include "cfghooks.h"
48 #include "df.h"
49 #include "insn-config.h"
50 #include "memmodel.h"
51 #include "emit-rtl.h"
52 #include "cfgrtl.h"
53 #include "cfganal.h"
54 #include "cfgbuild.h"
55 #include "cfgcleanup.h"
56 #include "bb-reorder.h"
57 #include "rtl-error.h"
58 #include "insn-attr.h"
59 #include "dojump.h"
60 #include "expr.h"
61 #include "cfgloop.h"
62 #include "tree-pass.h"
63 #include "print-rtl.h"
64
65 /* Disable warnings about missing quoting in GCC diagnostics. */
66 #if __GNUC__ >= 10
67 # pragma GCC diagnostic push
68 # pragma GCC diagnostic ignored "-Wformat-diag"
69 #endif
70
71 /* Holds the interesting leading and trailing notes for the function.
72 Only applicable if the CFG is in cfglayout mode. */
73 static GTY(()) rtx_insn *cfg_layout_function_footer;
74 static GTY(()) rtx_insn *cfg_layout_function_header;
75
76 static rtx_insn *skip_insns_after_block (basic_block);
77 static void record_effective_endpoints (void);
78 static void fixup_reorder_chain (void);
79
80 void verify_insn_chain (void);
81 static void fixup_fallthru_exit_predecessor (void);
82 static int can_delete_note_p (const rtx_note *);
83 static int can_delete_label_p (const rtx_code_label *);
84 static basic_block rtl_split_edge (edge);
85 static bool rtl_move_block_after (basic_block, basic_block);
86 static int rtl_verify_flow_info (void);
87 static basic_block cfg_layout_split_block (basic_block, void *);
88 static edge cfg_layout_redirect_edge_and_branch (edge, basic_block);
89 static basic_block cfg_layout_redirect_edge_and_branch_force (edge, basic_block);
90 static void cfg_layout_delete_block (basic_block);
91 static void rtl_delete_block (basic_block);
92 static basic_block rtl_redirect_edge_and_branch_force (edge, basic_block);
93 static edge rtl_redirect_edge_and_branch (edge, basic_block);
94 static basic_block rtl_split_block (basic_block, void *);
95 static void rtl_dump_bb (FILE *, basic_block, int, dump_flags_t);
96 static int rtl_verify_flow_info_1 (void);
97 static void rtl_make_forwarder_block (edge);
98 \f
99 /* Return true if NOTE is not one of the ones that must be kept paired,
100 so that we may simply delete it. */
101
102 static int
103 can_delete_note_p (const rtx_note *note)
104 {
105 switch (NOTE_KIND (note))
106 {
107 case NOTE_INSN_DELETED:
108 case NOTE_INSN_BASIC_BLOCK:
109 case NOTE_INSN_EPILOGUE_BEG:
110 return true;
111
112 default:
113 return false;
114 }
115 }
116
117 /* True if a given label can be deleted. */
118
119 static int
120 can_delete_label_p (const rtx_code_label *label)
121 {
122 return (!LABEL_PRESERVE_P (label)
123 /* User declared labels must be preserved. */
124 && LABEL_NAME (label) == 0
125 && !vec_safe_contains<rtx_insn *> (forced_labels,
126 const_cast<rtx_code_label *> (label)));
127 }
128
129 /* Delete INSN by patching it out. */
130
131 void
132 delete_insn (rtx_insn *insn)
133 {
134 rtx note;
135 bool really_delete = true;
136
137 if (LABEL_P (insn))
138 {
139 /* Some labels can't be directly removed from the INSN chain, as they
140 might be references via variables, constant pool etc.
141 Convert them to the special NOTE_INSN_DELETED_LABEL note. */
142 if (! can_delete_label_p (as_a <rtx_code_label *> (insn)))
143 {
144 const char *name = LABEL_NAME (insn);
145 basic_block bb = BLOCK_FOR_INSN (insn);
146 rtx_insn *bb_note = NEXT_INSN (insn);
147
148 really_delete = false;
149 PUT_CODE (insn, NOTE);
150 NOTE_KIND (insn) = NOTE_INSN_DELETED_LABEL;
151 NOTE_DELETED_LABEL_NAME (insn) = name;
152
153 /* If the note following the label starts a basic block, and the
154 label is a member of the same basic block, interchange the two. */
155 if (bb_note != NULL_RTX
156 && NOTE_INSN_BASIC_BLOCK_P (bb_note)
157 && bb != NULL
158 && bb == BLOCK_FOR_INSN (bb_note))
159 {
160 reorder_insns_nobb (insn, insn, bb_note);
161 BB_HEAD (bb) = bb_note;
162 if (BB_END (bb) == bb_note)
163 BB_END (bb) = insn;
164 }
165 }
166
167 remove_node_from_insn_list (insn, &nonlocal_goto_handler_labels);
168 }
169
170 if (really_delete)
171 {
172 /* If this insn has already been deleted, something is very wrong. */
173 gcc_assert (!insn->deleted ());
174 if (INSN_P (insn))
175 df_insn_delete (insn);
176 remove_insn (insn);
177 insn->set_deleted ();
178 }
179
180 /* If deleting a jump, decrement the use count of the label. Deleting
181 the label itself should happen in the normal course of block merging. */
182 if (JUMP_P (insn))
183 {
184 if (JUMP_LABEL (insn)
185 && LABEL_P (JUMP_LABEL (insn)))
186 LABEL_NUSES (JUMP_LABEL (insn))--;
187
188 /* If there are more targets, remove them too. */
189 while ((note
190 = find_reg_note (insn, REG_LABEL_TARGET, NULL_RTX)) != NULL_RTX
191 && LABEL_P (XEXP (note, 0)))
192 {
193 LABEL_NUSES (XEXP (note, 0))--;
194 remove_note (insn, note);
195 }
196 }
197
198 /* Also if deleting any insn that references a label as an operand. */
199 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX)) != NULL_RTX
200 && LABEL_P (XEXP (note, 0)))
201 {
202 LABEL_NUSES (XEXP (note, 0))--;
203 remove_note (insn, note);
204 }
205
206 if (rtx_jump_table_data *table = dyn_cast <rtx_jump_table_data *> (insn))
207 {
208 rtvec vec = table->get_labels ();
209 int len = GET_NUM_ELEM (vec);
210 int i;
211
212 for (i = 0; i < len; i++)
213 {
214 rtx label = XEXP (RTVEC_ELT (vec, i), 0);
215
216 /* When deleting code in bulk (e.g. removing many unreachable
217 blocks) we can delete a label that's a target of the vector
218 before deleting the vector itself. */
219 if (!NOTE_P (label))
220 LABEL_NUSES (label)--;
221 }
222 }
223 }
224
225 /* Like delete_insn but also purge dead edges from BB.
226 Return true if any edges are eliminated. */
227
228 bool
229 delete_insn_and_edges (rtx_insn *insn)
230 {
231 bool purge = false;
232
233 if (INSN_P (insn)
234 && BLOCK_FOR_INSN (insn)
235 && BB_END (BLOCK_FOR_INSN (insn)) == insn)
236 purge = true;
237 delete_insn (insn);
238 if (purge)
239 return purge_dead_edges (BLOCK_FOR_INSN (insn));
240 return false;
241 }
242
243 /* Unlink a chain of insns between START and FINISH, leaving notes
244 that must be paired. If CLEAR_BB is true, we set bb field for
245 insns that cannot be removed to NULL. */
246
247 void
248 delete_insn_chain (rtx start, rtx_insn *finish, bool clear_bb)
249 {
250 /* Unchain the insns one by one. It would be quicker to delete all of these
251 with a single unchaining, rather than one at a time, but we need to keep
252 the NOTE's. */
253 rtx_insn *current = finish;
254 while (1)
255 {
256 rtx_insn *prev = PREV_INSN (current);
257 if (NOTE_P (current) && !can_delete_note_p (as_a <rtx_note *> (current)))
258 ;
259 else
260 delete_insn (current);
261
262 if (clear_bb && !current->deleted ())
263 set_block_for_insn (current, NULL);
264
265 if (current == start)
266 break;
267 current = prev;
268 }
269 }
270 \f
271 /* Create a new basic block consisting of the instructions between HEAD and END
272 inclusive. This function is designed to allow fast BB construction - reuses
273 the note and basic block struct in BB_NOTE, if any and do not grow
274 BASIC_BLOCK chain and should be used directly only by CFG construction code.
275 END can be NULL in to create new empty basic block before HEAD. Both END
276 and HEAD can be NULL to create basic block at the end of INSN chain.
277 AFTER is the basic block we should be put after. */
278
279 basic_block
280 create_basic_block_structure (rtx_insn *head, rtx_insn *end, rtx_note *bb_note,
281 basic_block after)
282 {
283 basic_block bb;
284
285 if (bb_note
286 && (bb = NOTE_BASIC_BLOCK (bb_note)) != NULL
287 && bb->aux == NULL)
288 {
289 /* If we found an existing note, thread it back onto the chain. */
290
291 rtx_insn *after;
292
293 if (LABEL_P (head))
294 after = head;
295 else
296 {
297 after = PREV_INSN (head);
298 head = bb_note;
299 }
300
301 if (after != bb_note && NEXT_INSN (after) != bb_note)
302 reorder_insns_nobb (bb_note, bb_note, after);
303 }
304 else
305 {
306 /* Otherwise we must create a note and a basic block structure. */
307
308 bb = alloc_block ();
309
310 init_rtl_bb_info (bb);
311 if (!head && !end)
312 head = end = bb_note
313 = emit_note_after (NOTE_INSN_BASIC_BLOCK, get_last_insn ());
314 else if (LABEL_P (head) && end)
315 {
316 bb_note = emit_note_after (NOTE_INSN_BASIC_BLOCK, head);
317 if (head == end)
318 end = bb_note;
319 }
320 else
321 {
322 bb_note = emit_note_before (NOTE_INSN_BASIC_BLOCK, head);
323 head = bb_note;
324 if (!end)
325 end = head;
326 }
327
328 NOTE_BASIC_BLOCK (bb_note) = bb;
329 }
330
331 /* Always include the bb note in the block. */
332 if (NEXT_INSN (end) == bb_note)
333 end = bb_note;
334
335 BB_HEAD (bb) = head;
336 BB_END (bb) = end;
337 bb->index = last_basic_block_for_fn (cfun)++;
338 bb->flags = BB_NEW | BB_RTL;
339 link_block (bb, after);
340 SET_BASIC_BLOCK_FOR_FN (cfun, bb->index, bb);
341 df_bb_refs_record (bb->index, false);
342 update_bb_for_insn (bb);
343 BB_SET_PARTITION (bb, BB_UNPARTITIONED);
344
345 /* Tag the block so that we know it has been used when considering
346 other basic block notes. */
347 bb->aux = bb;
348
349 return bb;
350 }
351
352 /* Create new basic block consisting of instructions in between HEAD and END
353 and place it to the BB chain after block AFTER. END can be NULL to
354 create a new empty basic block before HEAD. Both END and HEAD can be
355 NULL to create basic block at the end of INSN chain. */
356
357 static basic_block
358 rtl_create_basic_block (void *headp, void *endp, basic_block after)
359 {
360 rtx_insn *head = (rtx_insn *) headp;
361 rtx_insn *end = (rtx_insn *) endp;
362 basic_block bb;
363
364 /* Grow the basic block array if needed. */
365 if ((size_t) last_basic_block_for_fn (cfun)
366 >= basic_block_info_for_fn (cfun)->length ())
367 {
368 size_t new_size =
369 (last_basic_block_for_fn (cfun)
370 + (last_basic_block_for_fn (cfun) + 3) / 4);
371 vec_safe_grow_cleared (basic_block_info_for_fn (cfun), new_size);
372 }
373
374 n_basic_blocks_for_fn (cfun)++;
375
376 bb = create_basic_block_structure (head, end, NULL, after);
377 bb->aux = NULL;
378 return bb;
379 }
380
381 static basic_block
382 cfg_layout_create_basic_block (void *head, void *end, basic_block after)
383 {
384 basic_block newbb = rtl_create_basic_block (head, end, after);
385
386 return newbb;
387 }
388 \f
389 /* Delete the insns in a (non-live) block. We physically delete every
390 non-deleted-note insn, and update the flow graph appropriately.
391
392 Return nonzero if we deleted an exception handler. */
393
394 /* ??? Preserving all such notes strikes me as wrong. It would be nice
395 to post-process the stream to remove empty blocks, loops, ranges, etc. */
396
397 static void
398 rtl_delete_block (basic_block b)
399 {
400 rtx_insn *insn, *end;
401
402 /* If the head of this block is a CODE_LABEL, then it might be the
403 label for an exception handler which can't be reached. We need
404 to remove the label from the exception_handler_label list. */
405 insn = BB_HEAD (b);
406
407 end = get_last_bb_insn (b);
408
409 /* Selectively delete the entire chain. */
410 BB_HEAD (b) = NULL;
411 delete_insn_chain (insn, end, true);
412
413
414 if (dump_file)
415 fprintf (dump_file, "deleting block %d\n", b->index);
416 df_bb_delete (b->index);
417 }
418 \f
419 /* Records the basic block struct in BLOCK_FOR_INSN for every insn. */
420
421 void
422 compute_bb_for_insn (void)
423 {
424 basic_block bb;
425
426 FOR_EACH_BB_FN (bb, cfun)
427 {
428 rtx_insn *end = BB_END (bb);
429 rtx_insn *insn;
430
431 for (insn = BB_HEAD (bb); ; insn = NEXT_INSN (insn))
432 {
433 BLOCK_FOR_INSN (insn) = bb;
434 if (insn == end)
435 break;
436 }
437 }
438 }
439
440 /* Release the basic_block_for_insn array. */
441
442 unsigned int
443 free_bb_for_insn (void)
444 {
445 rtx_insn *insn;
446 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
447 if (!BARRIER_P (insn))
448 BLOCK_FOR_INSN (insn) = NULL;
449 return 0;
450 }
451
452 namespace {
453
454 const pass_data pass_data_free_cfg =
455 {
456 RTL_PASS, /* type */
457 "*free_cfg", /* name */
458 OPTGROUP_NONE, /* optinfo_flags */
459 TV_NONE, /* tv_id */
460 0, /* properties_required */
461 0, /* properties_provided */
462 PROP_cfg, /* properties_destroyed */
463 0, /* todo_flags_start */
464 0, /* todo_flags_finish */
465 };
466
467 class pass_free_cfg : public rtl_opt_pass
468 {
469 public:
470 pass_free_cfg (gcc::context *ctxt)
471 : rtl_opt_pass (pass_data_free_cfg, ctxt)
472 {}
473
474 /* opt_pass methods: */
475 virtual unsigned int execute (function *);
476
477 }; // class pass_free_cfg
478
479 unsigned int
480 pass_free_cfg::execute (function *)
481 {
482 /* The resource.c machinery uses DF but the CFG isn't guaranteed to be
483 valid at that point so it would be too late to call df_analyze. */
484 if (DELAY_SLOTS && optimize > 0 && flag_delayed_branch)
485 {
486 df_note_add_problem ();
487 df_analyze ();
488 }
489
490 if (crtl->has_bb_partition)
491 insert_section_boundary_note ();
492
493 free_bb_for_insn ();
494 return 0;
495 }
496
497 } // anon namespace
498
499 rtl_opt_pass *
500 make_pass_free_cfg (gcc::context *ctxt)
501 {
502 return new pass_free_cfg (ctxt);
503 }
504
505 /* Return RTX to emit after when we want to emit code on the entry of function. */
506 rtx_insn *
507 entry_of_function (void)
508 {
509 return (n_basic_blocks_for_fn (cfun) > NUM_FIXED_BLOCKS ?
510 BB_HEAD (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb) : get_insns ());
511 }
512
513 /* Emit INSN at the entry point of the function, ensuring that it is only
514 executed once per function. */
515 void
516 emit_insn_at_entry (rtx insn)
517 {
518 edge_iterator ei = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
519 edge e = ei_safe_edge (ei);
520 gcc_assert (e->flags & EDGE_FALLTHRU);
521
522 insert_insn_on_edge (insn, e);
523 commit_edge_insertions ();
524 }
525
526 /* Update BLOCK_FOR_INSN of insns between BEGIN and END
527 (or BARRIER if found) and notify df of the bb change.
528 The insn chain range is inclusive
529 (i.e. both BEGIN and END will be updated. */
530
531 static void
532 update_bb_for_insn_chain (rtx_insn *begin, rtx_insn *end, basic_block bb)
533 {
534 rtx_insn *insn;
535
536 end = NEXT_INSN (end);
537 for (insn = begin; insn != end; insn = NEXT_INSN (insn))
538 if (!BARRIER_P (insn))
539 df_insn_change_bb (insn, bb);
540 }
541
542 /* Update BLOCK_FOR_INSN of insns in BB to BB,
543 and notify df of the change. */
544
545 void
546 update_bb_for_insn (basic_block bb)
547 {
548 update_bb_for_insn_chain (BB_HEAD (bb), BB_END (bb), bb);
549 }
550
551 \f
552 /* Like active_insn_p, except keep the return value use or clobber around
553 even after reload. */
554
555 static bool
556 flow_active_insn_p (const rtx_insn *insn)
557 {
558 if (active_insn_p (insn))
559 return true;
560
561 /* A clobber of the function return value exists for buggy
562 programs that fail to return a value. Its effect is to
563 keep the return value from being live across the entire
564 function. If we allow it to be skipped, we introduce the
565 possibility for register lifetime confusion.
566 Similarly, keep a USE of the function return value, otherwise
567 the USE is dropped and we could fail to thread jump if USE
568 appears on some paths and not on others, see PR90257. */
569 if ((GET_CODE (PATTERN (insn)) == CLOBBER
570 || GET_CODE (PATTERN (insn)) == USE)
571 && REG_P (XEXP (PATTERN (insn), 0))
572 && REG_FUNCTION_VALUE_P (XEXP (PATTERN (insn), 0)))
573 return true;
574
575 return false;
576 }
577
578 /* Return true if the block has no effect and only forwards control flow to
579 its single destination. */
580
581 bool
582 contains_no_active_insn_p (const_basic_block bb)
583 {
584 rtx_insn *insn;
585
586 if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
587 || bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
588 || !single_succ_p (bb)
589 || (single_succ_edge (bb)->flags & EDGE_FAKE) != 0)
590 return false;
591
592 for (insn = BB_HEAD (bb); insn != BB_END (bb); insn = NEXT_INSN (insn))
593 if (INSN_P (insn) && flow_active_insn_p (insn))
594 return false;
595
596 return (!INSN_P (insn)
597 || (JUMP_P (insn) && simplejump_p (insn))
598 || !flow_active_insn_p (insn));
599 }
600
601 /* Likewise, but protect loop latches, headers and preheaders. */
602 /* FIXME: Make this a cfg hook. */
603
604 bool
605 forwarder_block_p (const_basic_block bb)
606 {
607 if (!contains_no_active_insn_p (bb))
608 return false;
609
610 /* Protect loop latches, headers and preheaders. */
611 if (current_loops)
612 {
613 basic_block dest;
614 if (bb->loop_father->header == bb)
615 return false;
616 dest = EDGE_SUCC (bb, 0)->dest;
617 if (dest->loop_father->header == dest)
618 return false;
619 }
620
621 return true;
622 }
623
624 /* Return nonzero if we can reach target from src by falling through. */
625 /* FIXME: Make this a cfg hook, the result is only valid in cfgrtl mode. */
626
627 bool
628 can_fallthru (basic_block src, basic_block target)
629 {
630 rtx_insn *insn = BB_END (src);
631 rtx_insn *insn2;
632 edge e;
633 edge_iterator ei;
634
635 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
636 return true;
637 if (src->next_bb != target)
638 return false;
639
640 /* ??? Later we may add code to move jump tables offline. */
641 if (tablejump_p (insn, NULL, NULL))
642 return false;
643
644 FOR_EACH_EDGE (e, ei, src->succs)
645 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
646 && e->flags & EDGE_FALLTHRU)
647 return false;
648
649 insn2 = BB_HEAD (target);
650 if (!active_insn_p (insn2))
651 insn2 = next_active_insn (insn2);
652
653 return next_active_insn (insn) == insn2;
654 }
655
656 /* Return nonzero if we could reach target from src by falling through,
657 if the target was made adjacent. If we already have a fall-through
658 edge to the exit block, we can't do that. */
659 static bool
660 could_fall_through (basic_block src, basic_block target)
661 {
662 edge e;
663 edge_iterator ei;
664
665 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
666 return true;
667 FOR_EACH_EDGE (e, ei, src->succs)
668 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
669 && e->flags & EDGE_FALLTHRU)
670 return 0;
671 return true;
672 }
673 \f
674 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
675 rtx_note *
676 bb_note (basic_block bb)
677 {
678 rtx_insn *note;
679
680 note = BB_HEAD (bb);
681 if (LABEL_P (note))
682 note = NEXT_INSN (note);
683
684 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
685 return as_a <rtx_note *> (note);
686 }
687
688 /* Return the INSN immediately following the NOTE_INSN_BASIC_BLOCK
689 note associated with the BLOCK. */
690
691 static rtx_insn *
692 first_insn_after_basic_block_note (basic_block block)
693 {
694 rtx_insn *insn;
695
696 /* Get the first instruction in the block. */
697 insn = BB_HEAD (block);
698
699 if (insn == NULL_RTX)
700 return NULL;
701 if (LABEL_P (insn))
702 insn = NEXT_INSN (insn);
703 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
704
705 return NEXT_INSN (insn);
706 }
707
708 /* Creates a new basic block just after basic block BB by splitting
709 everything after specified instruction INSNP. */
710
711 static basic_block
712 rtl_split_block (basic_block bb, void *insnp)
713 {
714 basic_block new_bb;
715 rtx_insn *insn = (rtx_insn *) insnp;
716 edge e;
717 edge_iterator ei;
718
719 if (!insn)
720 {
721 insn = first_insn_after_basic_block_note (bb);
722
723 if (insn)
724 {
725 rtx_insn *next = insn;
726
727 insn = PREV_INSN (insn);
728
729 /* If the block contains only debug insns, insn would have
730 been NULL in a non-debug compilation, and then we'd end
731 up emitting a DELETED note. For -fcompare-debug
732 stability, emit the note too. */
733 if (insn != BB_END (bb)
734 && DEBUG_INSN_P (next)
735 && DEBUG_INSN_P (BB_END (bb)))
736 {
737 while (next != BB_END (bb) && DEBUG_INSN_P (next))
738 next = NEXT_INSN (next);
739
740 if (next == BB_END (bb))
741 emit_note_after (NOTE_INSN_DELETED, next);
742 }
743 }
744 else
745 insn = get_last_insn ();
746 }
747
748 /* We probably should check type of the insn so that we do not create
749 inconsistent cfg. It is checked in verify_flow_info anyway, so do not
750 bother. */
751 if (insn == BB_END (bb))
752 emit_note_after (NOTE_INSN_DELETED, insn);
753
754 /* Create the new basic block. */
755 new_bb = create_basic_block (NEXT_INSN (insn), BB_END (bb), bb);
756 BB_COPY_PARTITION (new_bb, bb);
757 BB_END (bb) = insn;
758
759 /* Redirect the outgoing edges. */
760 new_bb->succs = bb->succs;
761 bb->succs = NULL;
762 FOR_EACH_EDGE (e, ei, new_bb->succs)
763 e->src = new_bb;
764
765 /* The new block starts off being dirty. */
766 df_set_bb_dirty (bb);
767 return new_bb;
768 }
769
770 /* Return true if the single edge between blocks A and B is the only place
771 in RTL which holds some unique locus. */
772
773 static bool
774 unique_locus_on_edge_between_p (basic_block a, basic_block b)
775 {
776 const location_t goto_locus = EDGE_SUCC (a, 0)->goto_locus;
777 rtx_insn *insn, *end;
778
779 if (LOCATION_LOCUS (goto_locus) == UNKNOWN_LOCATION)
780 return false;
781
782 /* First scan block A backward. */
783 insn = BB_END (a);
784 end = PREV_INSN (BB_HEAD (a));
785 while (insn != end && (!NONDEBUG_INSN_P (insn) || !INSN_HAS_LOCATION (insn)))
786 insn = PREV_INSN (insn);
787
788 if (insn != end && INSN_LOCATION (insn) == goto_locus)
789 return false;
790
791 /* Then scan block B forward. */
792 insn = BB_HEAD (b);
793 if (insn)
794 {
795 end = NEXT_INSN (BB_END (b));
796 while (insn != end && !NONDEBUG_INSN_P (insn))
797 insn = NEXT_INSN (insn);
798
799 if (insn != end && INSN_HAS_LOCATION (insn)
800 && INSN_LOCATION (insn) == goto_locus)
801 return false;
802 }
803
804 return true;
805 }
806
807 /* If the single edge between blocks A and B is the only place in RTL which
808 holds some unique locus, emit a nop with that locus between the blocks. */
809
810 static void
811 emit_nop_for_unique_locus_between (basic_block a, basic_block b)
812 {
813 if (!unique_locus_on_edge_between_p (a, b))
814 return;
815
816 BB_END (a) = emit_insn_after_noloc (gen_nop (), BB_END (a), a);
817 INSN_LOCATION (BB_END (a)) = EDGE_SUCC (a, 0)->goto_locus;
818 }
819
820 /* Blocks A and B are to be merged into a single block A. The insns
821 are already contiguous. */
822
823 static void
824 rtl_merge_blocks (basic_block a, basic_block b)
825 {
826 /* If B is a forwarder block whose outgoing edge has no location, we'll
827 propagate the locus of the edge between A and B onto it. */
828 const bool forward_edge_locus
829 = (b->flags & BB_FORWARDER_BLOCK) != 0
830 && LOCATION_LOCUS (EDGE_SUCC (b, 0)->goto_locus) == UNKNOWN_LOCATION;
831 rtx_insn *b_head = BB_HEAD (b), *b_end = BB_END (b), *a_end = BB_END (a);
832 rtx_insn *del_first = NULL, *del_last = NULL;
833 rtx_insn *b_debug_start = b_end, *b_debug_end = b_end;
834 int b_empty = 0;
835
836 if (dump_file)
837 fprintf (dump_file, "Merging block %d into block %d...\n", b->index,
838 a->index);
839
840 while (DEBUG_INSN_P (b_end))
841 b_end = PREV_INSN (b_debug_start = b_end);
842
843 /* If there was a CODE_LABEL beginning B, delete it. */
844 if (LABEL_P (b_head))
845 {
846 /* Detect basic blocks with nothing but a label. This can happen
847 in particular at the end of a function. */
848 if (b_head == b_end)
849 b_empty = 1;
850
851 del_first = del_last = b_head;
852 b_head = NEXT_INSN (b_head);
853 }
854
855 /* Delete the basic block note and handle blocks containing just that
856 note. */
857 if (NOTE_INSN_BASIC_BLOCK_P (b_head))
858 {
859 if (b_head == b_end)
860 b_empty = 1;
861 if (! del_last)
862 del_first = b_head;
863
864 del_last = b_head;
865 b_head = NEXT_INSN (b_head);
866 }
867
868 /* If there was a jump out of A, delete it. */
869 if (JUMP_P (a_end))
870 {
871 rtx_insn *prev;
872
873 for (prev = PREV_INSN (a_end); ; prev = PREV_INSN (prev))
874 if (!NOTE_P (prev)
875 || NOTE_INSN_BASIC_BLOCK_P (prev)
876 || prev == BB_HEAD (a))
877 break;
878
879 del_first = a_end;
880
881 /* If this was a conditional jump, we need to also delete
882 the insn that set cc0. */
883 if (HAVE_cc0 && only_sets_cc0_p (prev))
884 {
885 rtx_insn *tmp = prev;
886
887 prev = prev_nonnote_insn (prev);
888 if (!prev)
889 prev = BB_HEAD (a);
890 del_first = tmp;
891 }
892
893 a_end = PREV_INSN (del_first);
894 }
895 else if (BARRIER_P (NEXT_INSN (a_end)))
896 del_first = NEXT_INSN (a_end);
897
898 /* Delete everything marked above as well as crap that might be
899 hanging out between the two blocks. */
900 BB_END (a) = a_end;
901 BB_HEAD (b) = b_empty ? NULL : b_head;
902 delete_insn_chain (del_first, del_last, true);
903
904 /* If not optimizing, preserve the locus of the single edge between
905 blocks A and B if necessary by emitting a nop. */
906 if (!optimize
907 && !forward_edge_locus
908 && !DECL_IGNORED_P (current_function_decl))
909 {
910 emit_nop_for_unique_locus_between (a, b);
911 a_end = BB_END (a);
912 }
913
914 /* Reassociate the insns of B with A. */
915 if (!b_empty)
916 {
917 update_bb_for_insn_chain (a_end, b_debug_end, a);
918
919 BB_END (a) = b_debug_end;
920 BB_HEAD (b) = NULL;
921 }
922 else if (b_end != b_debug_end)
923 {
924 /* Move any deleted labels and other notes between the end of A
925 and the debug insns that make up B after the debug insns,
926 bringing the debug insns into A while keeping the notes after
927 the end of A. */
928 if (NEXT_INSN (a_end) != b_debug_start)
929 reorder_insns_nobb (NEXT_INSN (a_end), PREV_INSN (b_debug_start),
930 b_debug_end);
931 update_bb_for_insn_chain (b_debug_start, b_debug_end, a);
932 BB_END (a) = b_debug_end;
933 }
934
935 df_bb_delete (b->index);
936
937 if (forward_edge_locus)
938 EDGE_SUCC (b, 0)->goto_locus = EDGE_SUCC (a, 0)->goto_locus;
939
940 if (dump_file)
941 fprintf (dump_file, "Merged blocks %d and %d.\n", a->index, b->index);
942 }
943
944
945 /* Return true when block A and B can be merged. */
946
947 static bool
948 rtl_can_merge_blocks (basic_block a, basic_block b)
949 {
950 /* If we are partitioning hot/cold basic blocks, we don't want to
951 mess up unconditional or indirect jumps that cross between hot
952 and cold sections.
953
954 Basic block partitioning may result in some jumps that appear to
955 be optimizable (or blocks that appear to be mergeable), but which really
956 must be left untouched (they are required to make it safely across
957 partition boundaries). See the comments at the top of
958 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
959
960 if (BB_PARTITION (a) != BB_PARTITION (b))
961 return false;
962
963 /* Protect the loop latches. */
964 if (current_loops && b->loop_father->latch == b)
965 return false;
966
967 /* There must be exactly one edge in between the blocks. */
968 return (single_succ_p (a)
969 && single_succ (a) == b
970 && single_pred_p (b)
971 && a != b
972 /* Must be simple edge. */
973 && !(single_succ_edge (a)->flags & EDGE_COMPLEX)
974 && a->next_bb == b
975 && a != ENTRY_BLOCK_PTR_FOR_FN (cfun)
976 && b != EXIT_BLOCK_PTR_FOR_FN (cfun)
977 /* If the jump insn has side effects,
978 we can't kill the edge. */
979 && (!JUMP_P (BB_END (a))
980 || (reload_completed
981 ? simplejump_p (BB_END (a)) : onlyjump_p (BB_END (a)))));
982 }
983 \f
984 /* Return the label in the head of basic block BLOCK. Create one if it doesn't
985 exist. */
986
987 rtx_code_label *
988 block_label (basic_block block)
989 {
990 if (block == EXIT_BLOCK_PTR_FOR_FN (cfun))
991 return NULL;
992
993 if (!LABEL_P (BB_HEAD (block)))
994 {
995 BB_HEAD (block) = emit_label_before (gen_label_rtx (), BB_HEAD (block));
996 }
997
998 return as_a <rtx_code_label *> (BB_HEAD (block));
999 }
1000
1001 /* Remove all barriers from BB_FOOTER of a BB. */
1002
1003 static void
1004 remove_barriers_from_footer (basic_block bb)
1005 {
1006 rtx_insn *insn = BB_FOOTER (bb);
1007
1008 /* Remove barriers but keep jumptables. */
1009 while (insn)
1010 {
1011 if (BARRIER_P (insn))
1012 {
1013 if (PREV_INSN (insn))
1014 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
1015 else
1016 BB_FOOTER (bb) = NEXT_INSN (insn);
1017 if (NEXT_INSN (insn))
1018 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
1019 }
1020 if (LABEL_P (insn))
1021 return;
1022 insn = NEXT_INSN (insn);
1023 }
1024 }
1025
1026 /* Attempt to perform edge redirection by replacing possibly complex jump
1027 instruction by unconditional jump or removing jump completely. This can
1028 apply only if all edges now point to the same block. The parameters and
1029 return values are equivalent to redirect_edge_and_branch. */
1030
1031 edge
1032 try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout)
1033 {
1034 basic_block src = e->src;
1035 rtx_insn *insn = BB_END (src), *kill_from;
1036 rtx set;
1037 int fallthru = 0;
1038
1039 /* If we are partitioning hot/cold basic blocks, we don't want to
1040 mess up unconditional or indirect jumps that cross between hot
1041 and cold sections.
1042
1043 Basic block partitioning may result in some jumps that appear to
1044 be optimizable (or blocks that appear to be mergeable), but which really
1045 must be left untouched (they are required to make it safely across
1046 partition boundaries). See the comments at the top of
1047 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1048
1049 if (BB_PARTITION (src) != BB_PARTITION (target))
1050 return NULL;
1051
1052 /* We can replace or remove a complex jump only when we have exactly
1053 two edges. Also, if we have exactly one outgoing edge, we can
1054 redirect that. */
1055 if (EDGE_COUNT (src->succs) >= 3
1056 /* Verify that all targets will be TARGET. Specifically, the
1057 edge that is not E must also go to TARGET. */
1058 || (EDGE_COUNT (src->succs) == 2
1059 && EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)->dest != target))
1060 return NULL;
1061
1062 if (!onlyjump_p (insn))
1063 return NULL;
1064 if ((!optimize || reload_completed) && tablejump_p (insn, NULL, NULL))
1065 return NULL;
1066
1067 /* Avoid removing branch with side effects. */
1068 set = single_set (insn);
1069 if (!set || side_effects_p (set))
1070 return NULL;
1071
1072 /* In case we zap a conditional jump, we'll need to kill
1073 the cc0 setter too. */
1074 kill_from = insn;
1075 if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, PATTERN (insn))
1076 && only_sets_cc0_p (PREV_INSN (insn)))
1077 kill_from = PREV_INSN (insn);
1078
1079 /* See if we can create the fallthru edge. */
1080 if (in_cfglayout || can_fallthru (src, target))
1081 {
1082 if (dump_file)
1083 fprintf (dump_file, "Removing jump %i.\n", INSN_UID (insn));
1084 fallthru = 1;
1085
1086 /* Selectively unlink whole insn chain. */
1087 if (in_cfglayout)
1088 {
1089 delete_insn_chain (kill_from, BB_END (src), false);
1090 remove_barriers_from_footer (src);
1091 }
1092 else
1093 delete_insn_chain (kill_from, PREV_INSN (BB_HEAD (target)),
1094 false);
1095 }
1096
1097 /* If this already is simplejump, redirect it. */
1098 else if (simplejump_p (insn))
1099 {
1100 if (e->dest == target)
1101 return NULL;
1102 if (dump_file)
1103 fprintf (dump_file, "Redirecting jump %i from %i to %i.\n",
1104 INSN_UID (insn), e->dest->index, target->index);
1105 if (!redirect_jump (as_a <rtx_jump_insn *> (insn),
1106 block_label (target), 0))
1107 {
1108 gcc_assert (target == EXIT_BLOCK_PTR_FOR_FN (cfun));
1109 return NULL;
1110 }
1111 }
1112
1113 /* Cannot do anything for target exit block. */
1114 else if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
1115 return NULL;
1116
1117 /* Or replace possibly complicated jump insn by simple jump insn. */
1118 else
1119 {
1120 rtx_code_label *target_label = block_label (target);
1121 rtx_insn *barrier;
1122 rtx_insn *label;
1123 rtx_jump_table_data *table;
1124
1125 emit_jump_insn_after_noloc (targetm.gen_jump (target_label), insn);
1126 JUMP_LABEL (BB_END (src)) = target_label;
1127 LABEL_NUSES (target_label)++;
1128 if (dump_file)
1129 fprintf (dump_file, "Replacing insn %i by jump %i\n",
1130 INSN_UID (insn), INSN_UID (BB_END (src)));
1131
1132
1133 delete_insn_chain (kill_from, insn, false);
1134
1135 /* Recognize a tablejump that we are converting to a
1136 simple jump and remove its associated CODE_LABEL
1137 and ADDR_VEC or ADDR_DIFF_VEC. */
1138 if (tablejump_p (insn, &label, &table))
1139 delete_insn_chain (label, table, false);
1140
1141 barrier = next_nonnote_nondebug_insn (BB_END (src));
1142 if (!barrier || !BARRIER_P (barrier))
1143 emit_barrier_after (BB_END (src));
1144 else
1145 {
1146 if (barrier != NEXT_INSN (BB_END (src)))
1147 {
1148 /* Move the jump before barrier so that the notes
1149 which originally were or were created before jump table are
1150 inside the basic block. */
1151 rtx_insn *new_insn = BB_END (src);
1152
1153 update_bb_for_insn_chain (NEXT_INSN (BB_END (src)),
1154 PREV_INSN (barrier), src);
1155
1156 SET_NEXT_INSN (PREV_INSN (new_insn)) = NEXT_INSN (new_insn);
1157 SET_PREV_INSN (NEXT_INSN (new_insn)) = PREV_INSN (new_insn);
1158
1159 SET_NEXT_INSN (new_insn) = barrier;
1160 SET_NEXT_INSN (PREV_INSN (barrier)) = new_insn;
1161
1162 SET_PREV_INSN (new_insn) = PREV_INSN (barrier);
1163 SET_PREV_INSN (barrier) = new_insn;
1164 }
1165 }
1166 }
1167
1168 /* Keep only one edge out and set proper flags. */
1169 if (!single_succ_p (src))
1170 remove_edge (e);
1171 gcc_assert (single_succ_p (src));
1172
1173 e = single_succ_edge (src);
1174 if (fallthru)
1175 e->flags = EDGE_FALLTHRU;
1176 else
1177 e->flags = 0;
1178
1179 e->probability = profile_probability::always ();
1180
1181 if (e->dest != target)
1182 redirect_edge_succ (e, target);
1183 return e;
1184 }
1185
1186 /* Subroutine of redirect_branch_edge that tries to patch the jump
1187 instruction INSN so that it reaches block NEW. Do this
1188 only when it originally reached block OLD. Return true if this
1189 worked or the original target wasn't OLD, return false if redirection
1190 doesn't work. */
1191
1192 static bool
1193 patch_jump_insn (rtx_insn *insn, rtx_insn *old_label, basic_block new_bb)
1194 {
1195 rtx_jump_table_data *table;
1196 rtx tmp;
1197 /* Recognize a tablejump and adjust all matching cases. */
1198 if (tablejump_p (insn, NULL, &table))
1199 {
1200 rtvec vec;
1201 int j;
1202 rtx_code_label *new_label = block_label (new_bb);
1203
1204 if (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
1205 return false;
1206 vec = table->get_labels ();
1207
1208 for (j = GET_NUM_ELEM (vec) - 1; j >= 0; --j)
1209 if (XEXP (RTVEC_ELT (vec, j), 0) == old_label)
1210 {
1211 RTVEC_ELT (vec, j) = gen_rtx_LABEL_REF (Pmode, new_label);
1212 --LABEL_NUSES (old_label);
1213 ++LABEL_NUSES (new_label);
1214 }
1215
1216 /* Handle casesi dispatch insns. */
1217 if ((tmp = tablejump_casesi_pattern (insn)) != NULL_RTX
1218 && label_ref_label (XEXP (SET_SRC (tmp), 2)) == old_label)
1219 {
1220 XEXP (SET_SRC (tmp), 2) = gen_rtx_LABEL_REF (Pmode,
1221 new_label);
1222 --LABEL_NUSES (old_label);
1223 ++LABEL_NUSES (new_label);
1224 }
1225 }
1226 else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
1227 {
1228 int i, n = ASM_OPERANDS_LABEL_LENGTH (tmp);
1229 rtx note;
1230
1231 if (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
1232 return false;
1233 rtx_code_label *new_label = block_label (new_bb);
1234
1235 for (i = 0; i < n; ++i)
1236 {
1237 rtx old_ref = ASM_OPERANDS_LABEL (tmp, i);
1238 gcc_assert (GET_CODE (old_ref) == LABEL_REF);
1239 if (XEXP (old_ref, 0) == old_label)
1240 {
1241 ASM_OPERANDS_LABEL (tmp, i)
1242 = gen_rtx_LABEL_REF (Pmode, new_label);
1243 --LABEL_NUSES (old_label);
1244 ++LABEL_NUSES (new_label);
1245 }
1246 }
1247
1248 if (JUMP_LABEL (insn) == old_label)
1249 {
1250 JUMP_LABEL (insn) = new_label;
1251 note = find_reg_note (insn, REG_LABEL_TARGET, new_label);
1252 if (note)
1253 remove_note (insn, note);
1254 }
1255 else
1256 {
1257 note = find_reg_note (insn, REG_LABEL_TARGET, old_label);
1258 if (note)
1259 remove_note (insn, note);
1260 if (JUMP_LABEL (insn) != new_label
1261 && !find_reg_note (insn, REG_LABEL_TARGET, new_label))
1262 add_reg_note (insn, REG_LABEL_TARGET, new_label);
1263 }
1264 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, old_label))
1265 != NULL_RTX)
1266 XEXP (note, 0) = new_label;
1267 }
1268 else
1269 {
1270 /* ?? We may play the games with moving the named labels from
1271 one basic block to the other in case only one computed_jump is
1272 available. */
1273 if (computed_jump_p (insn)
1274 /* A return instruction can't be redirected. */
1275 || returnjump_p (insn))
1276 return false;
1277
1278 if (!currently_expanding_to_rtl || JUMP_LABEL (insn) == old_label)
1279 {
1280 /* If the insn doesn't go where we think, we're confused. */
1281 gcc_assert (JUMP_LABEL (insn) == old_label);
1282
1283 /* If the substitution doesn't succeed, die. This can happen
1284 if the back end emitted unrecognizable instructions or if
1285 target is exit block on some arches. Or for crossing
1286 jumps. */
1287 if (!redirect_jump (as_a <rtx_jump_insn *> (insn),
1288 block_label (new_bb), 0))
1289 {
1290 gcc_assert (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
1291 || CROSSING_JUMP_P (insn));
1292 return false;
1293 }
1294 }
1295 }
1296 return true;
1297 }
1298
1299
1300 /* Redirect edge representing branch of (un)conditional jump or tablejump,
1301 NULL on failure */
1302 static edge
1303 redirect_branch_edge (edge e, basic_block target)
1304 {
1305 rtx_insn *old_label = BB_HEAD (e->dest);
1306 basic_block src = e->src;
1307 rtx_insn *insn = BB_END (src);
1308
1309 /* We can only redirect non-fallthru edges of jump insn. */
1310 if (e->flags & EDGE_FALLTHRU)
1311 return NULL;
1312 else if (!JUMP_P (insn) && !currently_expanding_to_rtl)
1313 return NULL;
1314
1315 if (!currently_expanding_to_rtl)
1316 {
1317 if (!patch_jump_insn (as_a <rtx_jump_insn *> (insn), old_label, target))
1318 return NULL;
1319 }
1320 else
1321 /* When expanding this BB might actually contain multiple
1322 jumps (i.e. not yet split by find_many_sub_basic_blocks).
1323 Redirect all of those that match our label. */
1324 FOR_BB_INSNS (src, insn)
1325 if (JUMP_P (insn) && !patch_jump_insn (as_a <rtx_jump_insn *> (insn),
1326 old_label, target))
1327 return NULL;
1328
1329 if (dump_file)
1330 fprintf (dump_file, "Edge %i->%i redirected to %i\n",
1331 e->src->index, e->dest->index, target->index);
1332
1333 if (e->dest != target)
1334 e = redirect_edge_succ_nodup (e, target);
1335
1336 return e;
1337 }
1338
1339 /* Called when edge E has been redirected to a new destination,
1340 in order to update the region crossing flag on the edge and
1341 jump. */
1342
1343 static void
1344 fixup_partition_crossing (edge e)
1345 {
1346 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) || e->dest
1347 == EXIT_BLOCK_PTR_FOR_FN (cfun))
1348 return;
1349 /* If we redirected an existing edge, it may already be marked
1350 crossing, even though the new src is missing a reg crossing note.
1351 But make sure reg crossing note doesn't already exist before
1352 inserting. */
1353 if (BB_PARTITION (e->src) != BB_PARTITION (e->dest))
1354 {
1355 e->flags |= EDGE_CROSSING;
1356 if (JUMP_P (BB_END (e->src)))
1357 CROSSING_JUMP_P (BB_END (e->src)) = 1;
1358 }
1359 else if (BB_PARTITION (e->src) == BB_PARTITION (e->dest))
1360 {
1361 e->flags &= ~EDGE_CROSSING;
1362 /* Remove the section crossing note from jump at end of
1363 src if it exists, and if no other successors are
1364 still crossing. */
1365 if (JUMP_P (BB_END (e->src)) && CROSSING_JUMP_P (BB_END (e->src)))
1366 {
1367 bool has_crossing_succ = false;
1368 edge e2;
1369 edge_iterator ei;
1370 FOR_EACH_EDGE (e2, ei, e->src->succs)
1371 {
1372 has_crossing_succ |= (e2->flags & EDGE_CROSSING);
1373 if (has_crossing_succ)
1374 break;
1375 }
1376 if (!has_crossing_succ)
1377 CROSSING_JUMP_P (BB_END (e->src)) = 0;
1378 }
1379 }
1380 }
1381
1382 /* Called when block BB has been reassigned to the cold partition,
1383 because it is now dominated by another cold block,
1384 to ensure that the region crossing attributes are updated. */
1385
1386 static void
1387 fixup_new_cold_bb (basic_block bb)
1388 {
1389 edge e;
1390 edge_iterator ei;
1391
1392 /* This is called when a hot bb is found to now be dominated
1393 by a cold bb and therefore needs to become cold. Therefore,
1394 its preds will no longer be region crossing. Any non-dominating
1395 preds that were previously hot would also have become cold
1396 in the caller for the same region. Any preds that were previously
1397 region-crossing will be adjusted in fixup_partition_crossing. */
1398 FOR_EACH_EDGE (e, ei, bb->preds)
1399 {
1400 fixup_partition_crossing (e);
1401 }
1402
1403 /* Possibly need to make bb's successor edges region crossing,
1404 or remove stale region crossing. */
1405 FOR_EACH_EDGE (e, ei, bb->succs)
1406 {
1407 /* We can't have fall-through edges across partition boundaries.
1408 Note that force_nonfallthru will do any necessary partition
1409 boundary fixup by calling fixup_partition_crossing itself. */
1410 if ((e->flags & EDGE_FALLTHRU)
1411 && BB_PARTITION (bb) != BB_PARTITION (e->dest)
1412 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1413 force_nonfallthru (e);
1414 else
1415 fixup_partition_crossing (e);
1416 }
1417 }
1418
1419 /* Attempt to change code to redirect edge E to TARGET. Don't do that on
1420 expense of adding new instructions or reordering basic blocks.
1421
1422 Function can be also called with edge destination equivalent to the TARGET.
1423 Then it should try the simplifications and do nothing if none is possible.
1424
1425 Return edge representing the branch if transformation succeeded. Return NULL
1426 on failure.
1427 We still return NULL in case E already destinated TARGET and we didn't
1428 managed to simplify instruction stream. */
1429
1430 static edge
1431 rtl_redirect_edge_and_branch (edge e, basic_block target)
1432 {
1433 edge ret;
1434 basic_block src = e->src;
1435 basic_block dest = e->dest;
1436
1437 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
1438 return NULL;
1439
1440 if (dest == target)
1441 return e;
1442
1443 if ((ret = try_redirect_by_replacing_jump (e, target, false)) != NULL)
1444 {
1445 df_set_bb_dirty (src);
1446 fixup_partition_crossing (ret);
1447 return ret;
1448 }
1449
1450 ret = redirect_branch_edge (e, target);
1451 if (!ret)
1452 return NULL;
1453
1454 df_set_bb_dirty (src);
1455 fixup_partition_crossing (ret);
1456 return ret;
1457 }
1458
1459 /* Emit a barrier after BB, into the footer if we are in CFGLAYOUT mode. */
1460
1461 void
1462 emit_barrier_after_bb (basic_block bb)
1463 {
1464 rtx_barrier *barrier = emit_barrier_after (BB_END (bb));
1465 gcc_assert (current_ir_type () == IR_RTL_CFGRTL
1466 || current_ir_type () == IR_RTL_CFGLAYOUT);
1467 if (current_ir_type () == IR_RTL_CFGLAYOUT)
1468 {
1469 rtx_insn *insn = unlink_insn_chain (barrier, barrier);
1470
1471 if (BB_FOOTER (bb))
1472 {
1473 rtx_insn *footer_tail = BB_FOOTER (bb);
1474
1475 while (NEXT_INSN (footer_tail))
1476 footer_tail = NEXT_INSN (footer_tail);
1477 if (!BARRIER_P (footer_tail))
1478 {
1479 SET_NEXT_INSN (footer_tail) = insn;
1480 SET_PREV_INSN (insn) = footer_tail;
1481 }
1482 }
1483 else
1484 BB_FOOTER (bb) = insn;
1485 }
1486 }
1487
1488 /* Like force_nonfallthru below, but additionally performs redirection
1489 Used by redirect_edge_and_branch_force. JUMP_LABEL is used only
1490 when redirecting to the EXIT_BLOCK, it is either ret_rtx or
1491 simple_return_rtx, indicating which kind of returnjump to create.
1492 It should be NULL otherwise. */
1493
1494 basic_block
1495 force_nonfallthru_and_redirect (edge e, basic_block target, rtx jump_label)
1496 {
1497 basic_block jump_block, new_bb = NULL, src = e->src;
1498 rtx note;
1499 edge new_edge;
1500 int abnormal_edge_flags = 0;
1501 bool asm_goto_edge = false;
1502 int loc;
1503
1504 /* In the case the last instruction is conditional jump to the next
1505 instruction, first redirect the jump itself and then continue
1506 by creating a basic block afterwards to redirect fallthru edge. */
1507 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
1508 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
1509 && any_condjump_p (BB_END (e->src))
1510 && JUMP_LABEL (BB_END (e->src)) == BB_HEAD (e->dest))
1511 {
1512 rtx note;
1513 edge b = unchecked_make_edge (e->src, target, 0);
1514 bool redirected;
1515
1516 redirected = redirect_jump (as_a <rtx_jump_insn *> (BB_END (e->src)),
1517 block_label (target), 0);
1518 gcc_assert (redirected);
1519
1520 note = find_reg_note (BB_END (e->src), REG_BR_PROB, NULL_RTX);
1521 if (note)
1522 {
1523 int prob = XINT (note, 0);
1524
1525 b->probability = profile_probability::from_reg_br_prob_note (prob);
1526 e->probability -= e->probability;
1527 }
1528 }
1529
1530 if (e->flags & EDGE_ABNORMAL)
1531 {
1532 /* Irritating special case - fallthru edge to the same block as abnormal
1533 edge.
1534 We can't redirect abnormal edge, but we still can split the fallthru
1535 one and create separate abnormal edge to original destination.
1536 This allows bb-reorder to make such edge non-fallthru. */
1537 gcc_assert (e->dest == target);
1538 abnormal_edge_flags = e->flags & ~EDGE_FALLTHRU;
1539 e->flags &= EDGE_FALLTHRU;
1540 }
1541 else
1542 {
1543 gcc_assert (e->flags & EDGE_FALLTHRU);
1544 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1545 {
1546 /* We can't redirect the entry block. Create an empty block
1547 at the start of the function which we use to add the new
1548 jump. */
1549 edge tmp;
1550 edge_iterator ei;
1551 bool found = false;
1552
1553 basic_block bb = create_basic_block (BB_HEAD (e->dest), NULL,
1554 ENTRY_BLOCK_PTR_FOR_FN (cfun));
1555 bb->count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
1556
1557 /* Make sure new block ends up in correct hot/cold section. */
1558 BB_COPY_PARTITION (bb, e->dest);
1559
1560 /* Change the existing edge's source to be the new block, and add
1561 a new edge from the entry block to the new block. */
1562 e->src = bb;
1563 for (ei = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
1564 (tmp = ei_safe_edge (ei)); )
1565 {
1566 if (tmp == e)
1567 {
1568 ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs->unordered_remove (ei.index);
1569 found = true;
1570 break;
1571 }
1572 else
1573 ei_next (&ei);
1574 }
1575
1576 gcc_assert (found);
1577
1578 vec_safe_push (bb->succs, e);
1579 make_single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), bb,
1580 EDGE_FALLTHRU);
1581 }
1582 }
1583
1584 /* If e->src ends with asm goto, see if any of the ASM_OPERANDS_LABELs
1585 don't point to the target or fallthru label. */
1586 if (JUMP_P (BB_END (e->src))
1587 && target != EXIT_BLOCK_PTR_FOR_FN (cfun)
1588 && (e->flags & EDGE_FALLTHRU)
1589 && (note = extract_asm_operands (PATTERN (BB_END (e->src)))))
1590 {
1591 int i, n = ASM_OPERANDS_LABEL_LENGTH (note);
1592 bool adjust_jump_target = false;
1593
1594 for (i = 0; i < n; ++i)
1595 {
1596 if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (e->dest))
1597 {
1598 LABEL_NUSES (XEXP (ASM_OPERANDS_LABEL (note, i), 0))--;
1599 XEXP (ASM_OPERANDS_LABEL (note, i), 0) = block_label (target);
1600 LABEL_NUSES (XEXP (ASM_OPERANDS_LABEL (note, i), 0))++;
1601 adjust_jump_target = true;
1602 }
1603 if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (target))
1604 asm_goto_edge = true;
1605 }
1606 if (adjust_jump_target)
1607 {
1608 rtx_insn *insn = BB_END (e->src);
1609 rtx note;
1610 rtx_insn *old_label = BB_HEAD (e->dest);
1611 rtx_insn *new_label = BB_HEAD (target);
1612
1613 if (JUMP_LABEL (insn) == old_label)
1614 {
1615 JUMP_LABEL (insn) = new_label;
1616 note = find_reg_note (insn, REG_LABEL_TARGET, new_label);
1617 if (note)
1618 remove_note (insn, note);
1619 }
1620 else
1621 {
1622 note = find_reg_note (insn, REG_LABEL_TARGET, old_label);
1623 if (note)
1624 remove_note (insn, note);
1625 if (JUMP_LABEL (insn) != new_label
1626 && !find_reg_note (insn, REG_LABEL_TARGET, new_label))
1627 add_reg_note (insn, REG_LABEL_TARGET, new_label);
1628 }
1629 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, old_label))
1630 != NULL_RTX)
1631 XEXP (note, 0) = new_label;
1632 }
1633 }
1634
1635 if (EDGE_COUNT (e->src->succs) >= 2 || abnormal_edge_flags || asm_goto_edge)
1636 {
1637 rtx_insn *new_head;
1638 profile_count count = e->count ();
1639 profile_probability probability = e->probability;
1640 /* Create the new structures. */
1641
1642 /* If the old block ended with a tablejump, skip its table
1643 by searching forward from there. Otherwise start searching
1644 forward from the last instruction of the old block. */
1645 rtx_jump_table_data *table;
1646 if (tablejump_p (BB_END (e->src), NULL, &table))
1647 new_head = table;
1648 else
1649 new_head = BB_END (e->src);
1650 new_head = NEXT_INSN (new_head);
1651
1652 jump_block = create_basic_block (new_head, NULL, e->src);
1653 jump_block->count = count;
1654
1655 /* Make sure new block ends up in correct hot/cold section. */
1656
1657 BB_COPY_PARTITION (jump_block, e->src);
1658
1659 /* Wire edge in. */
1660 new_edge = make_edge (e->src, jump_block, EDGE_FALLTHRU);
1661 new_edge->probability = probability;
1662
1663 /* Redirect old edge. */
1664 redirect_edge_pred (e, jump_block);
1665 e->probability = profile_probability::always ();
1666
1667 /* If e->src was previously region crossing, it no longer is
1668 and the reg crossing note should be removed. */
1669 fixup_partition_crossing (new_edge);
1670
1671 /* If asm goto has any label refs to target's label,
1672 add also edge from asm goto bb to target. */
1673 if (asm_goto_edge)
1674 {
1675 new_edge->probability = new_edge->probability.apply_scale (1, 2);
1676 jump_block->count = jump_block->count.apply_scale (1, 2);
1677 edge new_edge2 = make_edge (new_edge->src, target,
1678 e->flags & ~EDGE_FALLTHRU);
1679 new_edge2->probability = probability - new_edge->probability;
1680 }
1681
1682 new_bb = jump_block;
1683 }
1684 else
1685 jump_block = e->src;
1686
1687 loc = e->goto_locus;
1688 e->flags &= ~EDGE_FALLTHRU;
1689 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
1690 {
1691 if (jump_label == ret_rtx)
1692 emit_jump_insn_after_setloc (targetm.gen_return (),
1693 BB_END (jump_block), loc);
1694 else
1695 {
1696 gcc_assert (jump_label == simple_return_rtx);
1697 emit_jump_insn_after_setloc (targetm.gen_simple_return (),
1698 BB_END (jump_block), loc);
1699 }
1700 set_return_jump_label (BB_END (jump_block));
1701 }
1702 else
1703 {
1704 rtx_code_label *label = block_label (target);
1705 emit_jump_insn_after_setloc (targetm.gen_jump (label),
1706 BB_END (jump_block), loc);
1707 JUMP_LABEL (BB_END (jump_block)) = label;
1708 LABEL_NUSES (label)++;
1709 }
1710
1711 /* We might be in cfg layout mode, and if so, the following routine will
1712 insert the barrier correctly. */
1713 emit_barrier_after_bb (jump_block);
1714 redirect_edge_succ_nodup (e, target);
1715
1716 if (abnormal_edge_flags)
1717 make_edge (src, target, abnormal_edge_flags);
1718
1719 df_mark_solutions_dirty ();
1720 fixup_partition_crossing (e);
1721 return new_bb;
1722 }
1723
1724 /* Edge E is assumed to be fallthru edge. Emit needed jump instruction
1725 (and possibly create new basic block) to make edge non-fallthru.
1726 Return newly created BB or NULL if none. */
1727
1728 static basic_block
1729 rtl_force_nonfallthru (edge e)
1730 {
1731 return force_nonfallthru_and_redirect (e, e->dest, NULL_RTX);
1732 }
1733
1734 /* Redirect edge even at the expense of creating new jump insn or
1735 basic block. Return new basic block if created, NULL otherwise.
1736 Conversion must be possible. */
1737
1738 static basic_block
1739 rtl_redirect_edge_and_branch_force (edge e, basic_block target)
1740 {
1741 if (redirect_edge_and_branch (e, target)
1742 || e->dest == target)
1743 return NULL;
1744
1745 /* In case the edge redirection failed, try to force it to be non-fallthru
1746 and redirect newly created simplejump. */
1747 df_set_bb_dirty (e->src);
1748 return force_nonfallthru_and_redirect (e, target, NULL_RTX);
1749 }
1750
1751 /* The given edge should potentially be a fallthru edge. If that is in
1752 fact true, delete the jump and barriers that are in the way. */
1753
1754 static void
1755 rtl_tidy_fallthru_edge (edge e)
1756 {
1757 rtx_insn *q;
1758 basic_block b = e->src, c = b->next_bb;
1759
1760 /* ??? In a late-running flow pass, other folks may have deleted basic
1761 blocks by nopping out blocks, leaving multiple BARRIERs between here
1762 and the target label. They ought to be chastised and fixed.
1763
1764 We can also wind up with a sequence of undeletable labels between
1765 one block and the next.
1766
1767 So search through a sequence of barriers, labels, and notes for
1768 the head of block C and assert that we really do fall through. */
1769
1770 for (q = NEXT_INSN (BB_END (b)); q != BB_HEAD (c); q = NEXT_INSN (q))
1771 if (NONDEBUG_INSN_P (q))
1772 return;
1773
1774 /* Remove what will soon cease being the jump insn from the source block.
1775 If block B consisted only of this single jump, turn it into a deleted
1776 note. */
1777 q = BB_END (b);
1778 if (JUMP_P (q)
1779 && onlyjump_p (q)
1780 && (any_uncondjump_p (q)
1781 || single_succ_p (b)))
1782 {
1783 rtx_insn *label;
1784 rtx_jump_table_data *table;
1785
1786 if (tablejump_p (q, &label, &table))
1787 {
1788 /* The label is likely mentioned in some instruction before
1789 the tablejump and might not be DCEd, so turn it into
1790 a note instead and move before the tablejump that is going to
1791 be deleted. */
1792 const char *name = LABEL_NAME (label);
1793 PUT_CODE (label, NOTE);
1794 NOTE_KIND (label) = NOTE_INSN_DELETED_LABEL;
1795 NOTE_DELETED_LABEL_NAME (label) = name;
1796 reorder_insns (label, label, PREV_INSN (q));
1797 delete_insn (table);
1798 }
1799
1800 /* If this was a conditional jump, we need to also delete
1801 the insn that set cc0. */
1802 if (HAVE_cc0 && any_condjump_p (q) && only_sets_cc0_p (PREV_INSN (q)))
1803 q = PREV_INSN (q);
1804
1805 q = PREV_INSN (q);
1806 }
1807 /* Unconditional jumps with side-effects (i.e. which we can't just delete
1808 together with the barrier) should never have a fallthru edge. */
1809 else if (JUMP_P (q) && any_uncondjump_p (q))
1810 return;
1811
1812 /* Selectively unlink the sequence. */
1813 if (q != PREV_INSN (BB_HEAD (c)))
1814 delete_insn_chain (NEXT_INSN (q), PREV_INSN (BB_HEAD (c)), false);
1815
1816 e->flags |= EDGE_FALLTHRU;
1817 }
1818 \f
1819 /* Should move basic block BB after basic block AFTER. NIY. */
1820
1821 static bool
1822 rtl_move_block_after (basic_block bb ATTRIBUTE_UNUSED,
1823 basic_block after ATTRIBUTE_UNUSED)
1824 {
1825 return false;
1826 }
1827
1828 /* Locate the last bb in the same partition as START_BB. */
1829
1830 static basic_block
1831 last_bb_in_partition (basic_block start_bb)
1832 {
1833 basic_block bb;
1834 FOR_BB_BETWEEN (bb, start_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
1835 {
1836 if (BB_PARTITION (start_bb) != BB_PARTITION (bb->next_bb))
1837 return bb;
1838 }
1839 /* Return bb before the exit block. */
1840 return bb->prev_bb;
1841 }
1842
1843 /* Split a (typically critical) edge. Return the new block.
1844 The edge must not be abnormal.
1845
1846 ??? The code generally expects to be called on critical edges.
1847 The case of a block ending in an unconditional jump to a
1848 block with multiple predecessors is not handled optimally. */
1849
1850 static basic_block
1851 rtl_split_edge (edge edge_in)
1852 {
1853 basic_block bb, new_bb;
1854 rtx_insn *before;
1855
1856 /* Abnormal edges cannot be split. */
1857 gcc_assert (!(edge_in->flags & EDGE_ABNORMAL));
1858
1859 /* We are going to place the new block in front of edge destination.
1860 Avoid existence of fallthru predecessors. */
1861 if ((edge_in->flags & EDGE_FALLTHRU) == 0)
1862 {
1863 edge e = find_fallthru_edge (edge_in->dest->preds);
1864
1865 if (e)
1866 force_nonfallthru (e);
1867 }
1868
1869 /* Create the basic block note. */
1870 if (edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1871 before = BB_HEAD (edge_in->dest);
1872 else
1873 before = NULL;
1874
1875 /* If this is a fall through edge to the exit block, the blocks might be
1876 not adjacent, and the right place is after the source. */
1877 if ((edge_in->flags & EDGE_FALLTHRU)
1878 && edge_in->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
1879 {
1880 before = NEXT_INSN (BB_END (edge_in->src));
1881 bb = create_basic_block (before, NULL, edge_in->src);
1882 BB_COPY_PARTITION (bb, edge_in->src);
1883 }
1884 else
1885 {
1886 if (edge_in->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1887 {
1888 bb = create_basic_block (before, NULL, edge_in->dest->prev_bb);
1889 BB_COPY_PARTITION (bb, edge_in->dest);
1890 }
1891 else
1892 {
1893 basic_block after = edge_in->dest->prev_bb;
1894 /* If this is post-bb reordering, and the edge crosses a partition
1895 boundary, the new block needs to be inserted in the bb chain
1896 at the end of the src partition (since we put the new bb into
1897 that partition, see below). Otherwise we may end up creating
1898 an extra partition crossing in the chain, which is illegal.
1899 It can't go after the src, because src may have a fall-through
1900 to a different block. */
1901 if (crtl->bb_reorder_complete
1902 && (edge_in->flags & EDGE_CROSSING))
1903 {
1904 after = last_bb_in_partition (edge_in->src);
1905 before = get_last_bb_insn (after);
1906 /* The instruction following the last bb in partition should
1907 be a barrier, since it cannot end in a fall-through. */
1908 gcc_checking_assert (BARRIER_P (before));
1909 before = NEXT_INSN (before);
1910 }
1911 bb = create_basic_block (before, NULL, after);
1912 /* Put the split bb into the src partition, to avoid creating
1913 a situation where a cold bb dominates a hot bb, in the case
1914 where src is cold and dest is hot. The src will dominate
1915 the new bb (whereas it might not have dominated dest). */
1916 BB_COPY_PARTITION (bb, edge_in->src);
1917 }
1918 }
1919
1920 make_single_succ_edge (bb, edge_in->dest, EDGE_FALLTHRU);
1921
1922 /* Can't allow a region crossing edge to be fallthrough. */
1923 if (BB_PARTITION (bb) != BB_PARTITION (edge_in->dest)
1924 && edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1925 {
1926 new_bb = force_nonfallthru (single_succ_edge (bb));
1927 gcc_assert (!new_bb);
1928 }
1929
1930 /* For non-fallthru edges, we must adjust the predecessor's
1931 jump instruction to target our new block. */
1932 if ((edge_in->flags & EDGE_FALLTHRU) == 0)
1933 {
1934 edge redirected = redirect_edge_and_branch (edge_in, bb);
1935 gcc_assert (redirected);
1936 }
1937 else
1938 {
1939 if (edge_in->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
1940 {
1941 /* For asm goto even splitting of fallthru edge might
1942 need insn patching, as other labels might point to the
1943 old label. */
1944 rtx_insn *last = BB_END (edge_in->src);
1945 if (last
1946 && JUMP_P (last)
1947 && edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
1948 && (extract_asm_operands (PATTERN (last))
1949 || JUMP_LABEL (last) == before)
1950 && patch_jump_insn (last, before, bb))
1951 df_set_bb_dirty (edge_in->src);
1952 }
1953 redirect_edge_succ (edge_in, bb);
1954 }
1955
1956 return bb;
1957 }
1958
1959 /* Queue instructions for insertion on an edge between two basic blocks.
1960 The new instructions and basic blocks (if any) will not appear in the
1961 CFG until commit_edge_insertions is called. */
1962
1963 void
1964 insert_insn_on_edge (rtx pattern, edge e)
1965 {
1966 /* We cannot insert instructions on an abnormal critical edge.
1967 It will be easier to find the culprit if we die now. */
1968 gcc_assert (!((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e)));
1969
1970 if (e->insns.r == NULL_RTX)
1971 start_sequence ();
1972 else
1973 push_to_sequence (e->insns.r);
1974
1975 emit_insn (pattern);
1976
1977 e->insns.r = get_insns ();
1978 end_sequence ();
1979 }
1980
1981 /* Update the CFG for the instructions queued on edge E. */
1982
1983 void
1984 commit_one_edge_insertion (edge e)
1985 {
1986 rtx_insn *before = NULL, *after = NULL, *insns, *tmp, *last;
1987 basic_block bb;
1988
1989 /* Pull the insns off the edge now since the edge might go away. */
1990 insns = e->insns.r;
1991 e->insns.r = NULL;
1992
1993 /* Figure out where to put these insns. If the destination has
1994 one predecessor, insert there. Except for the exit block. */
1995 if (single_pred_p (e->dest) && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1996 {
1997 bb = e->dest;
1998
1999 /* Get the location correct wrt a code label, and "nice" wrt
2000 a basic block note, and before everything else. */
2001 tmp = BB_HEAD (bb);
2002 if (LABEL_P (tmp))
2003 tmp = NEXT_INSN (tmp);
2004 if (NOTE_INSN_BASIC_BLOCK_P (tmp))
2005 tmp = NEXT_INSN (tmp);
2006 if (tmp == BB_HEAD (bb))
2007 before = tmp;
2008 else if (tmp)
2009 after = PREV_INSN (tmp);
2010 else
2011 after = get_last_insn ();
2012 }
2013
2014 /* If the source has one successor and the edge is not abnormal,
2015 insert there. Except for the entry block.
2016 Don't do this if the predecessor ends in a jump other than
2017 unconditional simple jump. E.g. for asm goto that points all
2018 its labels at the fallthru basic block, we can't insert instructions
2019 before the asm goto, as the asm goto can have various of side effects,
2020 and can't emit instructions after the asm goto, as it must end
2021 the basic block. */
2022 else if ((e->flags & EDGE_ABNORMAL) == 0
2023 && single_succ_p (e->src)
2024 && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
2025 && (!JUMP_P (BB_END (e->src))
2026 || simplejump_p (BB_END (e->src))))
2027 {
2028 bb = e->src;
2029
2030 /* It is possible to have a non-simple jump here. Consider a target
2031 where some forms of unconditional jumps clobber a register. This
2032 happens on the fr30 for example.
2033
2034 We know this block has a single successor, so we can just emit
2035 the queued insns before the jump. */
2036 if (JUMP_P (BB_END (bb)))
2037 before = BB_END (bb);
2038 else
2039 {
2040 /* We'd better be fallthru, or we've lost track of what's what. */
2041 gcc_assert (e->flags & EDGE_FALLTHRU);
2042
2043 after = BB_END (bb);
2044 }
2045 }
2046
2047 /* Otherwise we must split the edge. */
2048 else
2049 {
2050 bb = split_edge (e);
2051
2052 /* If E crossed a partition boundary, we needed to make bb end in
2053 a region-crossing jump, even though it was originally fallthru. */
2054 if (JUMP_P (BB_END (bb)))
2055 before = BB_END (bb);
2056 else
2057 after = BB_END (bb);
2058 }
2059
2060 /* Now that we've found the spot, do the insertion. */
2061 if (before)
2062 {
2063 emit_insn_before_noloc (insns, before, bb);
2064 last = prev_nonnote_insn (before);
2065 }
2066 else
2067 last = emit_insn_after_noloc (insns, after, bb);
2068
2069 if (returnjump_p (last))
2070 {
2071 /* ??? Remove all outgoing edges from BB and add one for EXIT.
2072 This is not currently a problem because this only happens
2073 for the (single) epilogue, which already has a fallthru edge
2074 to EXIT. */
2075
2076 e = single_succ_edge (bb);
2077 gcc_assert (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
2078 && single_succ_p (bb) && (e->flags & EDGE_FALLTHRU));
2079
2080 e->flags &= ~EDGE_FALLTHRU;
2081 emit_barrier_after (last);
2082
2083 if (before)
2084 delete_insn (before);
2085 }
2086 else
2087 gcc_assert (!JUMP_P (last));
2088 }
2089
2090 /* Update the CFG for all queued instructions. */
2091
2092 void
2093 commit_edge_insertions (void)
2094 {
2095 basic_block bb;
2096
2097 /* Optimization passes that invoke this routine can cause hot blocks
2098 previously reached by both hot and cold blocks to become dominated only
2099 by cold blocks. This will cause the verification below to fail,
2100 and lead to now cold code in the hot section. In some cases this
2101 may only be visible after newly unreachable blocks are deleted,
2102 which will be done by fixup_partitions. */
2103 fixup_partitions ();
2104
2105 if (!currently_expanding_to_rtl)
2106 checking_verify_flow_info ();
2107
2108 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
2109 EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
2110 {
2111 edge e;
2112 edge_iterator ei;
2113
2114 FOR_EACH_EDGE (e, ei, bb->succs)
2115 if (e->insns.r)
2116 {
2117 if (currently_expanding_to_rtl)
2118 rebuild_jump_labels_chain (e->insns.r);
2119 commit_one_edge_insertion (e);
2120 }
2121 }
2122 }
2123 \f
2124
2125 /* Print out RTL-specific basic block information (live information
2126 at start and end with TDF_DETAILS). FLAGS are the TDF_* masks
2127 documented in dumpfile.h. */
2128
2129 static void
2130 rtl_dump_bb (FILE *outf, basic_block bb, int indent, dump_flags_t flags)
2131 {
2132 char *s_indent;
2133
2134 s_indent = (char *) alloca ((size_t) indent + 1);
2135 memset (s_indent, ' ', (size_t) indent);
2136 s_indent[indent] = '\0';
2137
2138 if (df && (flags & TDF_DETAILS))
2139 {
2140 df_dump_top (bb, outf);
2141 putc ('\n', outf);
2142 }
2143
2144 if (bb->index != ENTRY_BLOCK && bb->index != EXIT_BLOCK)
2145 {
2146 rtx_insn *last = BB_END (bb);
2147 if (last)
2148 last = NEXT_INSN (last);
2149 for (rtx_insn *insn = BB_HEAD (bb); insn != last; insn = NEXT_INSN (insn))
2150 {
2151 if (flags & TDF_DETAILS)
2152 df_dump_insn_top (insn, outf);
2153 if (! (flags & TDF_SLIM))
2154 print_rtl_single (outf, insn);
2155 else
2156 dump_insn_slim (outf, insn);
2157 if (flags & TDF_DETAILS)
2158 df_dump_insn_bottom (insn, outf);
2159 }
2160 }
2161
2162 if (df && (flags & TDF_DETAILS))
2163 {
2164 df_dump_bottom (bb, outf);
2165 putc ('\n', outf);
2166 }
2167
2168 }
2169 \f
2170 /* Like dump_function_to_file, but for RTL. Print out dataflow information
2171 for the start of each basic block. FLAGS are the TDF_* masks documented
2172 in dumpfile.h. */
2173
2174 void
2175 print_rtl_with_bb (FILE *outf, const rtx_insn *rtx_first, dump_flags_t flags)
2176 {
2177 const rtx_insn *tmp_rtx;
2178 if (rtx_first == 0)
2179 fprintf (outf, "(nil)\n");
2180 else
2181 {
2182 enum bb_state { NOT_IN_BB, IN_ONE_BB, IN_MULTIPLE_BB };
2183 int max_uid = get_max_uid ();
2184 basic_block *start = XCNEWVEC (basic_block, max_uid);
2185 basic_block *end = XCNEWVEC (basic_block, max_uid);
2186 enum bb_state *in_bb_p = XCNEWVEC (enum bb_state, max_uid);
2187 basic_block bb;
2188
2189 /* After freeing the CFG, we still have BLOCK_FOR_INSN set on most
2190 insns, but the CFG is not maintained so the basic block info
2191 is not reliable. Therefore it's omitted from the dumps. */
2192 if (! (cfun->curr_properties & PROP_cfg))
2193 flags &= ~TDF_BLOCKS;
2194
2195 if (df)
2196 df_dump_start (outf);
2197
2198 if (cfun->curr_properties & PROP_cfg)
2199 {
2200 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2201 {
2202 rtx_insn *x;
2203
2204 start[INSN_UID (BB_HEAD (bb))] = bb;
2205 end[INSN_UID (BB_END (bb))] = bb;
2206 if (flags & TDF_BLOCKS)
2207 {
2208 for (x = BB_HEAD (bb); x != NULL_RTX; x = NEXT_INSN (x))
2209 {
2210 enum bb_state state = IN_MULTIPLE_BB;
2211
2212 if (in_bb_p[INSN_UID (x)] == NOT_IN_BB)
2213 state = IN_ONE_BB;
2214 in_bb_p[INSN_UID (x)] = state;
2215
2216 if (x == BB_END (bb))
2217 break;
2218 }
2219 }
2220 }
2221 }
2222
2223 for (tmp_rtx = rtx_first; tmp_rtx != NULL; tmp_rtx = NEXT_INSN (tmp_rtx))
2224 {
2225 if (flags & TDF_BLOCKS)
2226 {
2227 bb = start[INSN_UID (tmp_rtx)];
2228 if (bb != NULL)
2229 {
2230 dump_bb_info (outf, bb, 0, dump_flags, true, false);
2231 if (df && (flags & TDF_DETAILS))
2232 df_dump_top (bb, outf);
2233 }
2234
2235 if (in_bb_p[INSN_UID (tmp_rtx)] == NOT_IN_BB
2236 && !NOTE_P (tmp_rtx)
2237 && !BARRIER_P (tmp_rtx))
2238 fprintf (outf, ";; Insn is not within a basic block\n");
2239 else if (in_bb_p[INSN_UID (tmp_rtx)] == IN_MULTIPLE_BB)
2240 fprintf (outf, ";; Insn is in multiple basic blocks\n");
2241 }
2242
2243 if (flags & TDF_DETAILS)
2244 df_dump_insn_top (tmp_rtx, outf);
2245 if (! (flags & TDF_SLIM))
2246 print_rtl_single (outf, tmp_rtx);
2247 else
2248 dump_insn_slim (outf, tmp_rtx);
2249 if (flags & TDF_DETAILS)
2250 df_dump_insn_bottom (tmp_rtx, outf);
2251
2252 bb = end[INSN_UID (tmp_rtx)];
2253 if (bb != NULL)
2254 {
2255 if (flags & TDF_BLOCKS)
2256 {
2257 dump_bb_info (outf, bb, 0, dump_flags, false, true);
2258 if (df && (flags & TDF_DETAILS))
2259 df_dump_bottom (bb, outf);
2260 putc ('\n', outf);
2261 }
2262 /* Emit a hint if the fallthrough target of current basic block
2263 isn't the one placed right next. */
2264 else if (EDGE_COUNT (bb->succs) > 0)
2265 {
2266 gcc_assert (BB_END (bb) == tmp_rtx);
2267 const rtx_insn *ninsn = NEXT_INSN (tmp_rtx);
2268 /* Bypass intervening deleted-insn notes and debug insns. */
2269 while (ninsn
2270 && !NONDEBUG_INSN_P (ninsn)
2271 && !start[INSN_UID (ninsn)])
2272 ninsn = NEXT_INSN (ninsn);
2273 edge e = find_fallthru_edge (bb->succs);
2274 if (e && ninsn)
2275 {
2276 basic_block dest = e->dest;
2277 if (start[INSN_UID (ninsn)] != dest)
2278 fprintf (outf, "%s ; pc falls through to BB %d\n",
2279 print_rtx_head, dest->index);
2280 }
2281 }
2282 }
2283 }
2284
2285 free (start);
2286 free (end);
2287 free (in_bb_p);
2288 }
2289 }
2290 \f
2291 /* Update the branch probability of BB if a REG_BR_PROB is present. */
2292
2293 void
2294 update_br_prob_note (basic_block bb)
2295 {
2296 rtx note;
2297 note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX);
2298 if (!JUMP_P (BB_END (bb)) || !BRANCH_EDGE (bb)->probability.initialized_p ())
2299 {
2300 if (note)
2301 {
2302 rtx *note_link, this_rtx;
2303
2304 note_link = &REG_NOTES (BB_END (bb));
2305 for (this_rtx = *note_link; this_rtx; this_rtx = XEXP (this_rtx, 1))
2306 if (this_rtx == note)
2307 {
2308 *note_link = XEXP (this_rtx, 1);
2309 break;
2310 }
2311 }
2312 return;
2313 }
2314 if (!note
2315 || XINT (note, 0) == BRANCH_EDGE (bb)->probability.to_reg_br_prob_note ())
2316 return;
2317 XINT (note, 0) = BRANCH_EDGE (bb)->probability.to_reg_br_prob_note ();
2318 }
2319
2320 /* Get the last insn associated with block BB (that includes barriers and
2321 tablejumps after BB). */
2322 rtx_insn *
2323 get_last_bb_insn (basic_block bb)
2324 {
2325 rtx_jump_table_data *table;
2326 rtx_insn *tmp;
2327 rtx_insn *end = BB_END (bb);
2328
2329 /* Include any jump table following the basic block. */
2330 if (tablejump_p (end, NULL, &table))
2331 end = table;
2332
2333 /* Include any barriers that may follow the basic block. */
2334 tmp = next_nonnote_nondebug_insn_bb (end);
2335 while (tmp && BARRIER_P (tmp))
2336 {
2337 end = tmp;
2338 tmp = next_nonnote_nondebug_insn_bb (end);
2339 }
2340
2341 return end;
2342 }
2343
2344 /* Add all BBs reachable from entry via hot paths into the SET. */
2345
2346 void
2347 find_bbs_reachable_by_hot_paths (hash_set<basic_block> *set)
2348 {
2349 auto_vec<basic_block, 64> worklist;
2350
2351 set->add (ENTRY_BLOCK_PTR_FOR_FN (cfun));
2352 worklist.safe_push (ENTRY_BLOCK_PTR_FOR_FN (cfun));
2353
2354 while (worklist.length () > 0)
2355 {
2356 basic_block bb = worklist.pop ();
2357 edge_iterator ei;
2358 edge e;
2359
2360 FOR_EACH_EDGE (e, ei, bb->succs)
2361 if (BB_PARTITION (e->dest) != BB_COLD_PARTITION
2362 && !set->add (e->dest))
2363 worklist.safe_push (e->dest);
2364 }
2365 }
2366
2367 /* Sanity check partition hotness to ensure that basic blocks in
2368   the cold partition don't dominate basic blocks in the hot partition.
2369 If FLAG_ONLY is true, report violations as errors. Otherwise
2370 re-mark the dominated blocks as cold, since this is run after
2371 cfg optimizations that may make hot blocks previously reached
2372 by both hot and cold blocks now only reachable along cold paths. */
2373
2374 static vec<basic_block>
2375 find_partition_fixes (bool flag_only)
2376 {
2377 basic_block bb;
2378 vec<basic_block> bbs_in_cold_partition = vNULL;
2379 vec<basic_block> bbs_to_fix = vNULL;
2380 hash_set<basic_block> set;
2381
2382 /* Callers check this. */
2383 gcc_checking_assert (crtl->has_bb_partition);
2384
2385 find_bbs_reachable_by_hot_paths (&set);
2386
2387 FOR_EACH_BB_FN (bb, cfun)
2388 if (!set.contains (bb)
2389 && BB_PARTITION (bb) != BB_COLD_PARTITION)
2390 {
2391 if (flag_only)
2392 error ("non-cold basic block %d reachable only "
2393 "by paths crossing the cold partition", bb->index);
2394 else
2395 BB_SET_PARTITION (bb, BB_COLD_PARTITION);
2396 bbs_to_fix.safe_push (bb);
2397 bbs_in_cold_partition.safe_push (bb);
2398 }
2399
2400 return bbs_to_fix;
2401 }
2402
2403 /* Perform cleanup on the hot/cold bb partitioning after optimization
2404 passes that modify the cfg. */
2405
2406 void
2407 fixup_partitions (void)
2408 {
2409 basic_block bb;
2410
2411 if (!crtl->has_bb_partition)
2412 return;
2413
2414 /* Delete any blocks that became unreachable and weren't
2415 already cleaned up, for example during edge forwarding
2416 and convert_jumps_to_returns. This will expose more
2417 opportunities for fixing the partition boundaries here.
2418 Also, the calculation of the dominance graph during verification
2419 will assert if there are unreachable nodes. */
2420 delete_unreachable_blocks ();
2421
2422 /* If there are partitions, do a sanity check on them: A basic block in
2423   a cold partition cannot dominate a basic block in a hot partition.
2424 Fixup any that now violate this requirement, as a result of edge
2425 forwarding and unreachable block deletion.  */
2426 vec<basic_block> bbs_to_fix = find_partition_fixes (false);
2427
2428 /* Do the partition fixup after all necessary blocks have been converted to
2429 cold, so that we only update the region crossings the minimum number of
2430 places, which can require forcing edges to be non fallthru. */
2431 while (! bbs_to_fix.is_empty ())
2432 {
2433 bb = bbs_to_fix.pop ();
2434 fixup_new_cold_bb (bb);
2435 }
2436 }
2437
2438 /* Verify, in the basic block chain, that there is at most one switch
2439 between hot/cold partitions. This condition will not be true until
2440 after reorder_basic_blocks is called. */
2441
2442 static int
2443 verify_hot_cold_block_grouping (void)
2444 {
2445 basic_block bb;
2446 int err = 0;
2447 bool switched_sections = false;
2448 int current_partition = BB_UNPARTITIONED;
2449
2450 /* Even after bb reordering is complete, we go into cfglayout mode
2451 again (in compgoto). Ensure we don't call this before going back
2452 into linearized RTL when any layout fixes would have been committed. */
2453 if (!crtl->bb_reorder_complete
2454 || current_ir_type () != IR_RTL_CFGRTL)
2455 return err;
2456
2457 FOR_EACH_BB_FN (bb, cfun)
2458 {
2459 if (current_partition != BB_UNPARTITIONED
2460 && BB_PARTITION (bb) != current_partition)
2461 {
2462 if (switched_sections)
2463 {
2464 error ("multiple hot/cold transitions found (bb %i)",
2465 bb->index);
2466 err = 1;
2467 }
2468 else
2469 switched_sections = true;
2470
2471 if (!crtl->has_bb_partition)
2472 error ("partition found but function partition flag not set");
2473 }
2474 current_partition = BB_PARTITION (bb);
2475 }
2476
2477 return err;
2478 }
2479 \f
2480
2481 /* Perform several checks on the edges out of each block, such as
2482 the consistency of the branch probabilities, the correctness
2483 of hot/cold partition crossing edges, and the number of expected
2484 successor edges. Also verify that the dominance relationship
2485 between hot/cold blocks is sane. */
2486
2487 static int
2488 rtl_verify_edges (void)
2489 {
2490 int err = 0;
2491 basic_block bb;
2492
2493 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2494 {
2495 int n_fallthru = 0, n_branch = 0, n_abnormal_call = 0, n_sibcall = 0;
2496 int n_eh = 0, n_abnormal = 0;
2497 edge e, fallthru = NULL;
2498 edge_iterator ei;
2499 rtx note;
2500 bool has_crossing_edge = false;
2501
2502 if (JUMP_P (BB_END (bb))
2503 && (note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX))
2504 && EDGE_COUNT (bb->succs) >= 2
2505 && any_condjump_p (BB_END (bb)))
2506 {
2507 if (!BRANCH_EDGE (bb)->probability.initialized_p ())
2508 {
2509 if (profile_status_for_fn (cfun) != PROFILE_ABSENT)
2510 {
2511 error ("verify_flow_info: "
2512 "REG_BR_PROB is set but cfg probability is not");
2513 err = 1;
2514 }
2515 }
2516 else if (XINT (note, 0)
2517 != BRANCH_EDGE (bb)->probability.to_reg_br_prob_note ()
2518 && profile_status_for_fn (cfun) != PROFILE_ABSENT)
2519 {
2520 error ("verify_flow_info: REG_BR_PROB does not match cfg %i %i",
2521 XINT (note, 0),
2522 BRANCH_EDGE (bb)->probability.to_reg_br_prob_note ());
2523 err = 1;
2524 }
2525 }
2526
2527 FOR_EACH_EDGE (e, ei, bb->succs)
2528 {
2529 bool is_crossing;
2530
2531 if (e->flags & EDGE_FALLTHRU)
2532 n_fallthru++, fallthru = e;
2533
2534 is_crossing = (BB_PARTITION (e->src) != BB_PARTITION (e->dest)
2535 && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
2536 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun));
2537 has_crossing_edge |= is_crossing;
2538 if (e->flags & EDGE_CROSSING)
2539 {
2540 if (!is_crossing)
2541 {
2542 error ("EDGE_CROSSING incorrectly set across same section");
2543 err = 1;
2544 }
2545 if (e->flags & EDGE_FALLTHRU)
2546 {
2547 error ("fallthru edge crosses section boundary in bb %i",
2548 e->src->index);
2549 err = 1;
2550 }
2551 if (e->flags & EDGE_EH)
2552 {
2553 error ("EH edge crosses section boundary in bb %i",
2554 e->src->index);
2555 err = 1;
2556 }
2557 if (JUMP_P (BB_END (bb)) && !CROSSING_JUMP_P (BB_END (bb)))
2558 {
2559 error ("No region crossing jump at section boundary in bb %i",
2560 bb->index);
2561 err = 1;
2562 }
2563 }
2564 else if (is_crossing)
2565 {
2566 error ("EDGE_CROSSING missing across section boundary");
2567 err = 1;
2568 }
2569
2570 if ((e->flags & ~(EDGE_DFS_BACK
2571 | EDGE_CAN_FALLTHRU
2572 | EDGE_IRREDUCIBLE_LOOP
2573 | EDGE_LOOP_EXIT
2574 | EDGE_CROSSING
2575 | EDGE_PRESERVE)) == 0)
2576 n_branch++;
2577
2578 if (e->flags & EDGE_ABNORMAL_CALL)
2579 n_abnormal_call++;
2580
2581 if (e->flags & EDGE_SIBCALL)
2582 n_sibcall++;
2583
2584 if (e->flags & EDGE_EH)
2585 n_eh++;
2586
2587 if (e->flags & EDGE_ABNORMAL)
2588 n_abnormal++;
2589 }
2590
2591 if (!has_crossing_edge
2592 && JUMP_P (BB_END (bb))
2593 && CROSSING_JUMP_P (BB_END (bb)))
2594 {
2595 print_rtl_with_bb (stderr, get_insns (), TDF_BLOCKS | TDF_DETAILS);
2596 error ("Region crossing jump across same section in bb %i",
2597 bb->index);
2598 err = 1;
2599 }
2600
2601 if (n_eh && !find_reg_note (BB_END (bb), REG_EH_REGION, NULL_RTX))
2602 {
2603 error ("missing REG_EH_REGION note at the end of bb %i", bb->index);
2604 err = 1;
2605 }
2606 if (n_eh > 1)
2607 {
2608 error ("too many exception handling edges in bb %i", bb->index);
2609 err = 1;
2610 }
2611 if (n_branch
2612 && (!JUMP_P (BB_END (bb))
2613 || (n_branch > 1 && (any_uncondjump_p (BB_END (bb))
2614 || any_condjump_p (BB_END (bb))))))
2615 {
2616 error ("too many outgoing branch edges from bb %i", bb->index);
2617 err = 1;
2618 }
2619 if (n_fallthru && any_uncondjump_p (BB_END (bb)))
2620 {
2621 error ("fallthru edge after unconditional jump in bb %i", bb->index);
2622 err = 1;
2623 }
2624 if (n_branch != 1 && any_uncondjump_p (BB_END (bb)))
2625 {
2626 error ("wrong number of branch edges after unconditional jump"
2627 " in bb %i", bb->index);
2628 err = 1;
2629 }
2630 if (n_branch != 1 && any_condjump_p (BB_END (bb))
2631 && JUMP_LABEL (BB_END (bb)) != BB_HEAD (fallthru->dest))
2632 {
2633 error ("wrong amount of branch edges after conditional jump"
2634 " in bb %i", bb->index);
2635 err = 1;
2636 }
2637 if (n_abnormal_call && !CALL_P (BB_END (bb)))
2638 {
2639 error ("abnormal call edges for non-call insn in bb %i", bb->index);
2640 err = 1;
2641 }
2642 if (n_sibcall && !CALL_P (BB_END (bb)))
2643 {
2644 error ("sibcall edges for non-call insn in bb %i", bb->index);
2645 err = 1;
2646 }
2647 if (n_abnormal > n_eh
2648 && !(CALL_P (BB_END (bb))
2649 && n_abnormal == n_abnormal_call + n_sibcall)
2650 && (!JUMP_P (BB_END (bb))
2651 || any_condjump_p (BB_END (bb))
2652 || any_uncondjump_p (BB_END (bb))))
2653 {
2654 error ("abnormal edges for no purpose in bb %i", bb->index);
2655 err = 1;
2656 }
2657
2658 int has_eh = -1;
2659 FOR_EACH_EDGE (e, ei, bb->preds)
2660 {
2661 if (has_eh == -1)
2662 has_eh = (e->flags & EDGE_EH);
2663 if ((e->flags & EDGE_EH) == has_eh)
2664 continue;
2665 error ("EH incoming edge mixed with non-EH incoming edges "
2666 "in bb %i", bb->index);
2667 err = 1;
2668 break;
2669 }
2670 }
2671
2672 /* If there are partitions, do a sanity check on them: A basic block in
2673   a cold partition cannot dominate a basic block in a hot partition.  */
2674 if (crtl->has_bb_partition && !err
2675 && current_ir_type () == IR_RTL_CFGLAYOUT)
2676 {
2677 vec<basic_block> bbs_to_fix = find_partition_fixes (true);
2678 err = !bbs_to_fix.is_empty ();
2679 }
2680
2681 /* Clean up. */
2682 return err;
2683 }
2684
2685 /* Checks on the instructions within blocks. Currently checks that each
2686 block starts with a basic block note, and that basic block notes and
2687 control flow jumps are not found in the middle of the block. */
2688
2689 static int
2690 rtl_verify_bb_insns (void)
2691 {
2692 rtx_insn *x;
2693 int err = 0;
2694 basic_block bb;
2695
2696 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2697 {
2698 /* Now check the header of basic
2699 block. It ought to contain optional CODE_LABEL followed
2700 by NOTE_BASIC_BLOCK. */
2701 x = BB_HEAD (bb);
2702 if (LABEL_P (x))
2703 {
2704 if (BB_END (bb) == x)
2705 {
2706 error ("NOTE_INSN_BASIC_BLOCK is missing for block %d",
2707 bb->index);
2708 err = 1;
2709 }
2710
2711 x = NEXT_INSN (x);
2712 }
2713
2714 if (!NOTE_INSN_BASIC_BLOCK_P (x) || NOTE_BASIC_BLOCK (x) != bb)
2715 {
2716 error ("NOTE_INSN_BASIC_BLOCK is missing for block %d",
2717 bb->index);
2718 err = 1;
2719 }
2720
2721 if (BB_END (bb) == x)
2722 /* Do checks for empty blocks here. */
2723 ;
2724 else
2725 for (x = NEXT_INSN (x); x; x = NEXT_INSN (x))
2726 {
2727 if (NOTE_INSN_BASIC_BLOCK_P (x))
2728 {
2729 error ("NOTE_INSN_BASIC_BLOCK %d in middle of basic block %d",
2730 INSN_UID (x), bb->index);
2731 err = 1;
2732 }
2733
2734 if (x == BB_END (bb))
2735 break;
2736
2737 if (control_flow_insn_p (x))
2738 {
2739 error ("in basic block %d:", bb->index);
2740 fatal_insn ("flow control insn inside a basic block", x);
2741 }
2742 }
2743 }
2744
2745 /* Clean up. */
2746 return err;
2747 }
2748
2749 /* Verify that block pointers for instructions in basic blocks, headers and
2750 footers are set appropriately. */
2751
2752 static int
2753 rtl_verify_bb_pointers (void)
2754 {
2755 int err = 0;
2756 basic_block bb;
2757
2758 /* Check the general integrity of the basic blocks. */
2759 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2760 {
2761 rtx_insn *insn;
2762
2763 if (!(bb->flags & BB_RTL))
2764 {
2765 error ("BB_RTL flag not set for block %d", bb->index);
2766 err = 1;
2767 }
2768
2769 FOR_BB_INSNS (bb, insn)
2770 if (BLOCK_FOR_INSN (insn) != bb)
2771 {
2772 error ("insn %d basic block pointer is %d, should be %d",
2773 INSN_UID (insn),
2774 BLOCK_FOR_INSN (insn) ? BLOCK_FOR_INSN (insn)->index : 0,
2775 bb->index);
2776 err = 1;
2777 }
2778
2779 for (insn = BB_HEADER (bb); insn; insn = NEXT_INSN (insn))
2780 if (!BARRIER_P (insn)
2781 && BLOCK_FOR_INSN (insn) != NULL)
2782 {
2783 error ("insn %d in header of bb %d has non-NULL basic block",
2784 INSN_UID (insn), bb->index);
2785 err = 1;
2786 }
2787 for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2788 if (!BARRIER_P (insn)
2789 && BLOCK_FOR_INSN (insn) != NULL)
2790 {
2791 error ("insn %d in footer of bb %d has non-NULL basic block",
2792 INSN_UID (insn), bb->index);
2793 err = 1;
2794 }
2795 }
2796
2797 /* Clean up. */
2798 return err;
2799 }
2800
2801 /* Verify the CFG and RTL consistency common for both underlying RTL and
2802 cfglayout RTL.
2803
2804 Currently it does following checks:
2805
2806 - overlapping of basic blocks
2807 - insns with wrong BLOCK_FOR_INSN pointers
2808 - headers of basic blocks (the NOTE_INSN_BASIC_BLOCK note)
2809 - tails of basic blocks (ensure that boundary is necessary)
2810 - scans body of the basic block for JUMP_INSN, CODE_LABEL
2811 and NOTE_INSN_BASIC_BLOCK
2812 - verify that no fall_thru edge crosses hot/cold partition boundaries
2813 - verify that there are no pending RTL branch predictions
2814 - verify that hot blocks are not dominated by cold blocks
2815
2816 In future it can be extended check a lot of other stuff as well
2817 (reachability of basic blocks, life information, etc. etc.). */
2818
2819 static int
2820 rtl_verify_flow_info_1 (void)
2821 {
2822 int err = 0;
2823
2824 err |= rtl_verify_bb_pointers ();
2825
2826 err |= rtl_verify_bb_insns ();
2827
2828 err |= rtl_verify_edges ();
2829
2830 return err;
2831 }
2832
2833 /* Walk the instruction chain and verify that bb head/end pointers
2834 are correct, and that instructions are in exactly one bb and have
2835 correct block pointers. */
2836
2837 static int
2838 rtl_verify_bb_insn_chain (void)
2839 {
2840 basic_block bb;
2841 int err = 0;
2842 rtx_insn *x;
2843 rtx_insn *last_head = get_last_insn ();
2844 basic_block *bb_info;
2845 const int max_uid = get_max_uid ();
2846
2847 bb_info = XCNEWVEC (basic_block, max_uid);
2848
2849 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2850 {
2851 rtx_insn *head = BB_HEAD (bb);
2852 rtx_insn *end = BB_END (bb);
2853
2854 for (x = last_head; x != NULL_RTX; x = PREV_INSN (x))
2855 {
2856 /* Verify the end of the basic block is in the INSN chain. */
2857 if (x == end)
2858 break;
2859
2860 /* And that the code outside of basic blocks has NULL bb field. */
2861 if (!BARRIER_P (x)
2862 && BLOCK_FOR_INSN (x) != NULL)
2863 {
2864 error ("insn %d outside of basic blocks has non-NULL bb field",
2865 INSN_UID (x));
2866 err = 1;
2867 }
2868 }
2869
2870 if (!x)
2871 {
2872 error ("end insn %d for block %d not found in the insn stream",
2873 INSN_UID (end), bb->index);
2874 err = 1;
2875 }
2876
2877 /* Work backwards from the end to the head of the basic block
2878 to verify the head is in the RTL chain. */
2879 for (; x != NULL_RTX; x = PREV_INSN (x))
2880 {
2881 /* While walking over the insn chain, verify insns appear
2882 in only one basic block. */
2883 if (bb_info[INSN_UID (x)] != NULL)
2884 {
2885 error ("insn %d is in multiple basic blocks (%d and %d)",
2886 INSN_UID (x), bb->index, bb_info[INSN_UID (x)]->index);
2887 err = 1;
2888 }
2889
2890 bb_info[INSN_UID (x)] = bb;
2891
2892 if (x == head)
2893 break;
2894 }
2895 if (!x)
2896 {
2897 error ("head insn %d for block %d not found in the insn stream",
2898 INSN_UID (head), bb->index);
2899 err = 1;
2900 }
2901
2902 last_head = PREV_INSN (x);
2903 }
2904
2905 for (x = last_head; x != NULL_RTX; x = PREV_INSN (x))
2906 {
2907 /* Check that the code before the first basic block has NULL
2908 bb field. */
2909 if (!BARRIER_P (x)
2910 && BLOCK_FOR_INSN (x) != NULL)
2911 {
2912 error ("insn %d outside of basic blocks has non-NULL bb field",
2913 INSN_UID (x));
2914 err = 1;
2915 }
2916 }
2917 free (bb_info);
2918
2919 return err;
2920 }
2921
2922 /* Verify that fallthru edges point to adjacent blocks in layout order and
2923 that barriers exist after non-fallthru blocks. */
2924
2925 static int
2926 rtl_verify_fallthru (void)
2927 {
2928 basic_block bb;
2929 int err = 0;
2930
2931 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2932 {
2933 edge e;
2934
2935 e = find_fallthru_edge (bb->succs);
2936 if (!e)
2937 {
2938 rtx_insn *insn;
2939
2940 /* Ensure existence of barrier in BB with no fallthru edges. */
2941 for (insn = NEXT_INSN (BB_END (bb)); ; insn = NEXT_INSN (insn))
2942 {
2943 if (!insn || NOTE_INSN_BASIC_BLOCK_P (insn))
2944 {
2945 error ("missing barrier after block %i", bb->index);
2946 err = 1;
2947 break;
2948 }
2949 if (BARRIER_P (insn))
2950 break;
2951 }
2952 }
2953 else if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
2954 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
2955 {
2956 rtx_insn *insn;
2957
2958 if (e->src->next_bb != e->dest)
2959 {
2960 error
2961 ("verify_flow_info: Incorrect blocks for fallthru %i->%i",
2962 e->src->index, e->dest->index);
2963 err = 1;
2964 }
2965 else
2966 for (insn = NEXT_INSN (BB_END (e->src)); insn != BB_HEAD (e->dest);
2967 insn = NEXT_INSN (insn))
2968 if (BARRIER_P (insn) || NONDEBUG_INSN_P (insn))
2969 {
2970 error ("verify_flow_info: Incorrect fallthru %i->%i",
2971 e->src->index, e->dest->index);
2972 fatal_insn ("wrong insn in the fallthru edge", insn);
2973 err = 1;
2974 }
2975 }
2976 }
2977
2978 return err;
2979 }
2980
2981 /* Verify that blocks are laid out in consecutive order. While walking the
2982 instructions, verify that all expected instructions are inside the basic
2983 blocks, and that all returns are followed by barriers. */
2984
2985 static int
2986 rtl_verify_bb_layout (void)
2987 {
2988 basic_block bb;
2989 int err = 0;
2990 rtx_insn *x, *y;
2991 int num_bb_notes;
2992 rtx_insn * const rtx_first = get_insns ();
2993 basic_block last_bb_seen = ENTRY_BLOCK_PTR_FOR_FN (cfun), curr_bb = NULL;
2994
2995 num_bb_notes = 0;
2996
2997 for (x = rtx_first; x; x = NEXT_INSN (x))
2998 {
2999 if (NOTE_INSN_BASIC_BLOCK_P (x))
3000 {
3001 bb = NOTE_BASIC_BLOCK (x);
3002
3003 num_bb_notes++;
3004 if (bb != last_bb_seen->next_bb)
3005 internal_error ("basic blocks not laid down consecutively");
3006
3007 curr_bb = last_bb_seen = bb;
3008 }
3009
3010 if (!curr_bb)
3011 {
3012 switch (GET_CODE (x))
3013 {
3014 case BARRIER:
3015 case NOTE:
3016 break;
3017
3018 case CODE_LABEL:
3019 /* An ADDR_VEC is placed outside any basic block. */
3020 if (NEXT_INSN (x)
3021 && JUMP_TABLE_DATA_P (NEXT_INSN (x)))
3022 x = NEXT_INSN (x);
3023
3024 /* But in any case, non-deletable labels can appear anywhere. */
3025 break;
3026
3027 default:
3028 fatal_insn ("insn outside basic block", x);
3029 }
3030 }
3031
3032 if (JUMP_P (x)
3033 && returnjump_p (x) && ! condjump_p (x)
3034 && ! ((y = next_nonnote_nondebug_insn (x))
3035 && BARRIER_P (y)))
3036 fatal_insn ("return not followed by barrier", x);
3037
3038 if (curr_bb && x == BB_END (curr_bb))
3039 curr_bb = NULL;
3040 }
3041
3042 if (num_bb_notes != n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS)
3043 internal_error
3044 ("number of bb notes in insn chain (%d) != n_basic_blocks (%d)",
3045 num_bb_notes, n_basic_blocks_for_fn (cfun));
3046
3047 return err;
3048 }
3049
3050 /* Verify the CFG and RTL consistency common for both underlying RTL and
3051 cfglayout RTL, plus consistency checks specific to linearized RTL mode.
3052
3053 Currently it does following checks:
3054 - all checks of rtl_verify_flow_info_1
3055 - test head/end pointers
3056 - check that blocks are laid out in consecutive order
3057 - check that all insns are in the basic blocks
3058 (except the switch handling code, barriers and notes)
3059 - check that all returns are followed by barriers
3060 - check that all fallthru edge points to the adjacent blocks
3061 - verify that there is a single hot/cold partition boundary after bbro */
3062
3063 static int
3064 rtl_verify_flow_info (void)
3065 {
3066 int err = 0;
3067
3068 err |= rtl_verify_flow_info_1 ();
3069
3070 err |= rtl_verify_bb_insn_chain ();
3071
3072 err |= rtl_verify_fallthru ();
3073
3074 err |= rtl_verify_bb_layout ();
3075
3076 err |= verify_hot_cold_block_grouping ();
3077
3078 return err;
3079 }
3080 \f
3081 /* Assume that the preceding pass has possibly eliminated jump instructions
3082 or converted the unconditional jumps. Eliminate the edges from CFG.
3083 Return true if any edges are eliminated. */
3084
3085 bool
3086 purge_dead_edges (basic_block bb)
3087 {
3088 edge e;
3089 rtx_insn *insn = BB_END (bb);
3090 rtx note;
3091 bool purged = false;
3092 bool found;
3093 edge_iterator ei;
3094
3095 if (DEBUG_INSN_P (insn) && insn != BB_HEAD (bb))
3096 do
3097 insn = PREV_INSN (insn);
3098 while ((DEBUG_INSN_P (insn) || NOTE_P (insn)) && insn != BB_HEAD (bb));
3099
3100 /* If this instruction cannot trap, remove REG_EH_REGION notes. */
3101 if (NONJUMP_INSN_P (insn)
3102 && (note = find_reg_note (insn, REG_EH_REGION, NULL)))
3103 {
3104 rtx eqnote;
3105
3106 if (! may_trap_p (PATTERN (insn))
3107 || ((eqnote = find_reg_equal_equiv_note (insn))
3108 && ! may_trap_p (XEXP (eqnote, 0))))
3109 remove_note (insn, note);
3110 }
3111
3112 /* Cleanup abnormal edges caused by exceptions or non-local gotos. */
3113 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
3114 {
3115 bool remove = false;
3116
3117 /* There are three types of edges we need to handle correctly here: EH
3118 edges, abnormal call EH edges, and abnormal call non-EH edges. The
3119 latter can appear when nonlocal gotos are used. */
3120 if (e->flags & EDGE_ABNORMAL_CALL)
3121 {
3122 if (!CALL_P (insn))
3123 remove = true;
3124 else if (can_nonlocal_goto (insn))
3125 ;
3126 else if ((e->flags & EDGE_EH) && can_throw_internal (insn))
3127 ;
3128 else if (flag_tm && find_reg_note (insn, REG_TM, NULL))
3129 ;
3130 else
3131 remove = true;
3132 }
3133 else if (e->flags & EDGE_EH)
3134 remove = !can_throw_internal (insn);
3135
3136 if (remove)
3137 {
3138 remove_edge (e);
3139 df_set_bb_dirty (bb);
3140 purged = true;
3141 }
3142 else
3143 ei_next (&ei);
3144 }
3145
3146 if (JUMP_P (insn))
3147 {
3148 rtx note;
3149 edge b,f;
3150 edge_iterator ei;
3151
3152 /* We do care only about conditional jumps and simplejumps. */
3153 if (!any_condjump_p (insn)
3154 && !returnjump_p (insn)
3155 && !simplejump_p (insn))
3156 return purged;
3157
3158 /* Branch probability/prediction notes are defined only for
3159 condjumps. We've possibly turned condjump into simplejump. */
3160 if (simplejump_p (insn))
3161 {
3162 note = find_reg_note (insn, REG_BR_PROB, NULL);
3163 if (note)
3164 remove_note (insn, note);
3165 while ((note = find_reg_note (insn, REG_BR_PRED, NULL)))
3166 remove_note (insn, note);
3167 }
3168
3169 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
3170 {
3171 /* Avoid abnormal flags to leak from computed jumps turned
3172 into simplejumps. */
3173
3174 e->flags &= ~EDGE_ABNORMAL;
3175
3176 /* See if this edge is one we should keep. */
3177 if ((e->flags & EDGE_FALLTHRU) && any_condjump_p (insn))
3178 /* A conditional jump can fall through into the next
3179 block, so we should keep the edge. */
3180 {
3181 ei_next (&ei);
3182 continue;
3183 }
3184 else if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
3185 && BB_HEAD (e->dest) == JUMP_LABEL (insn))
3186 /* If the destination block is the target of the jump,
3187 keep the edge. */
3188 {
3189 ei_next (&ei);
3190 continue;
3191 }
3192 else if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
3193 && returnjump_p (insn))
3194 /* If the destination block is the exit block, and this
3195 instruction is a return, then keep the edge. */
3196 {
3197 ei_next (&ei);
3198 continue;
3199 }
3200 else if ((e->flags & EDGE_EH) && can_throw_internal (insn))
3201 /* Keep the edges that correspond to exceptions thrown by
3202 this instruction and rematerialize the EDGE_ABNORMAL
3203 flag we just cleared above. */
3204 {
3205 e->flags |= EDGE_ABNORMAL;
3206 ei_next (&ei);
3207 continue;
3208 }
3209
3210 /* We do not need this edge. */
3211 df_set_bb_dirty (bb);
3212 purged = true;
3213 remove_edge (e);
3214 }
3215
3216 if (EDGE_COUNT (bb->succs) == 0 || !purged)
3217 return purged;
3218
3219 if (dump_file)
3220 fprintf (dump_file, "Purged edges from bb %i\n", bb->index);
3221
3222 if (!optimize)
3223 return purged;
3224
3225 /* Redistribute probabilities. */
3226 if (single_succ_p (bb))
3227 {
3228 single_succ_edge (bb)->probability = profile_probability::always ();
3229 }
3230 else
3231 {
3232 note = find_reg_note (insn, REG_BR_PROB, NULL);
3233 if (!note)
3234 return purged;
3235
3236 b = BRANCH_EDGE (bb);
3237 f = FALLTHRU_EDGE (bb);
3238 b->probability = profile_probability::from_reg_br_prob_note
3239 (XINT (note, 0));
3240 f->probability = b->probability.invert ();
3241 }
3242
3243 return purged;
3244 }
3245 else if (CALL_P (insn) && SIBLING_CALL_P (insn))
3246 {
3247 /* First, there should not be any EH or ABCALL edges resulting
3248 from non-local gotos and the like. If there were, we shouldn't
3249 have created the sibcall in the first place. Second, there
3250 should of course never have been a fallthru edge. */
3251 gcc_assert (single_succ_p (bb));
3252 gcc_assert (single_succ_edge (bb)->flags
3253 == (EDGE_SIBCALL | EDGE_ABNORMAL));
3254
3255 return 0;
3256 }
3257
3258 /* If we don't see a jump insn, we don't know exactly why the block would
3259 have been broken at this point. Look for a simple, non-fallthru edge,
3260 as these are only created by conditional branches. If we find such an
3261 edge we know that there used to be a jump here and can then safely
3262 remove all non-fallthru edges. */
3263 found = false;
3264 FOR_EACH_EDGE (e, ei, bb->succs)
3265 if (! (e->flags & (EDGE_COMPLEX | EDGE_FALLTHRU)))
3266 {
3267 found = true;
3268 break;
3269 }
3270
3271 if (!found)
3272 return purged;
3273
3274 /* Remove all but the fake and fallthru edges. The fake edge may be
3275 the only successor for this block in the case of noreturn
3276 calls. */
3277 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
3278 {
3279 if (!(e->flags & (EDGE_FALLTHRU | EDGE_FAKE)))
3280 {
3281 df_set_bb_dirty (bb);
3282 remove_edge (e);
3283 purged = true;
3284 }
3285 else
3286 ei_next (&ei);
3287 }
3288
3289 gcc_assert (single_succ_p (bb));
3290
3291 single_succ_edge (bb)->probability = profile_probability::always ();
3292
3293 if (dump_file)
3294 fprintf (dump_file, "Purged non-fallthru edges from bb %i\n",
3295 bb->index);
3296 return purged;
3297 }
3298
3299 /* Search all basic blocks for potentially dead edges and purge them. Return
3300 true if some edge has been eliminated. */
3301
3302 bool
3303 purge_all_dead_edges (void)
3304 {
3305 int purged = false;
3306 basic_block bb;
3307
3308 FOR_EACH_BB_FN (bb, cfun)
3309 {
3310 bool purged_here = purge_dead_edges (bb);
3311
3312 purged |= purged_here;
3313 }
3314
3315 return purged;
3316 }
3317
3318 /* This is used by a few passes that emit some instructions after abnormal
3319 calls, moving the basic block's end, while they in fact do want to emit
3320 them on the fallthru edge. Look for abnormal call edges, find backward
3321 the call in the block and insert the instructions on the edge instead.
3322
3323 Similarly, handle instructions throwing exceptions internally.
3324
3325 Return true when instructions have been found and inserted on edges. */
3326
3327 bool
3328 fixup_abnormal_edges (void)
3329 {
3330 bool inserted = false;
3331 basic_block bb;
3332
3333 FOR_EACH_BB_FN (bb, cfun)
3334 {
3335 edge e;
3336 edge_iterator ei;
3337
3338 /* Look for cases we are interested in - calls or instructions causing
3339 exceptions. */
3340 FOR_EACH_EDGE (e, ei, bb->succs)
3341 if ((e->flags & EDGE_ABNORMAL_CALL)
3342 || ((e->flags & (EDGE_ABNORMAL | EDGE_EH))
3343 == (EDGE_ABNORMAL | EDGE_EH)))
3344 break;
3345
3346 if (e && !CALL_P (BB_END (bb)) && !can_throw_internal (BB_END (bb)))
3347 {
3348 rtx_insn *insn;
3349
3350 /* Get past the new insns generated. Allow notes, as the insns
3351 may be already deleted. */
3352 insn = BB_END (bb);
3353 while ((NONJUMP_INSN_P (insn) || NOTE_P (insn))
3354 && !can_throw_internal (insn)
3355 && insn != BB_HEAD (bb))
3356 insn = PREV_INSN (insn);
3357
3358 if (CALL_P (insn) || can_throw_internal (insn))
3359 {
3360 rtx_insn *stop, *next;
3361
3362 e = find_fallthru_edge (bb->succs);
3363
3364 stop = NEXT_INSN (BB_END (bb));
3365 BB_END (bb) = insn;
3366
3367 for (insn = NEXT_INSN (insn); insn != stop; insn = next)
3368 {
3369 next = NEXT_INSN (insn);
3370 if (INSN_P (insn))
3371 {
3372 delete_insn (insn);
3373
3374 /* Sometimes there's still the return value USE.
3375 If it's placed after a trapping call (i.e. that
3376 call is the last insn anyway), we have no fallthru
3377 edge. Simply delete this use and don't try to insert
3378 on the non-existent edge.
3379 Similarly, sometimes a call that can throw is
3380 followed in the source with __builtin_unreachable (),
3381 meaning that there is UB if the call returns rather
3382 than throws. If there weren't any instructions
3383 following such calls before, supposedly even the ones
3384 we've deleted aren't significant and can be
3385 removed. */
3386 if (e)
3387 {
3388 /* We're not deleting it, we're moving it. */
3389 insn->set_undeleted ();
3390 SET_PREV_INSN (insn) = NULL_RTX;
3391 SET_NEXT_INSN (insn) = NULL_RTX;
3392
3393 insert_insn_on_edge (insn, e);
3394 inserted = true;
3395 }
3396 }
3397 else if (!BARRIER_P (insn))
3398 set_block_for_insn (insn, NULL);
3399 }
3400 }
3401
3402 /* It may be that we don't find any trapping insn. In this
3403 case we discovered quite late that the insn that had been
3404 marked as can_throw_internal in fact couldn't trap at all.
3405 So we should in fact delete the EH edges out of the block. */
3406 else
3407 purge_dead_edges (bb);
3408 }
3409 }
3410
3411 return inserted;
3412 }
3413 \f
3414 /* Cut the insns from FIRST to LAST out of the insns stream. */
3415
3416 rtx_insn *
3417 unlink_insn_chain (rtx_insn *first, rtx_insn *last)
3418 {
3419 rtx_insn *prevfirst = PREV_INSN (first);
3420 rtx_insn *nextlast = NEXT_INSN (last);
3421
3422 SET_PREV_INSN (first) = NULL;
3423 SET_NEXT_INSN (last) = NULL;
3424 if (prevfirst)
3425 SET_NEXT_INSN (prevfirst) = nextlast;
3426 if (nextlast)
3427 SET_PREV_INSN (nextlast) = prevfirst;
3428 else
3429 set_last_insn (prevfirst);
3430 if (!prevfirst)
3431 set_first_insn (nextlast);
3432 return first;
3433 }
3434 \f
3435 /* Skip over inter-block insns occurring after BB which are typically
3436 associated with BB (e.g., barriers). If there are any such insns,
3437 we return the last one. Otherwise, we return the end of BB. */
3438
3439 static rtx_insn *
3440 skip_insns_after_block (basic_block bb)
3441 {
3442 rtx_insn *insn, *last_insn, *next_head, *prev;
3443
3444 next_head = NULL;
3445 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
3446 next_head = BB_HEAD (bb->next_bb);
3447
3448 for (last_insn = insn = BB_END (bb); (insn = NEXT_INSN (insn)) != 0; )
3449 {
3450 if (insn == next_head)
3451 break;
3452
3453 switch (GET_CODE (insn))
3454 {
3455 case BARRIER:
3456 last_insn = insn;
3457 continue;
3458
3459 case NOTE:
3460 switch (NOTE_KIND (insn))
3461 {
3462 case NOTE_INSN_BLOCK_END:
3463 gcc_unreachable ();
3464 continue;
3465 default:
3466 continue;
3467 break;
3468 }
3469 break;
3470
3471 case CODE_LABEL:
3472 if (NEXT_INSN (insn)
3473 && JUMP_TABLE_DATA_P (NEXT_INSN (insn)))
3474 {
3475 insn = NEXT_INSN (insn);
3476 last_insn = insn;
3477 continue;
3478 }
3479 break;
3480
3481 default:
3482 break;
3483 }
3484
3485 break;
3486 }
3487
3488 /* It is possible to hit contradictory sequence. For instance:
3489
3490 jump_insn
3491 NOTE_INSN_BLOCK_BEG
3492 barrier
3493
3494 Where barrier belongs to jump_insn, but the note does not. This can be
3495 created by removing the basic block originally following
3496 NOTE_INSN_BLOCK_BEG. In such case reorder the notes. */
3497
3498 for (insn = last_insn; insn != BB_END (bb); insn = prev)
3499 {
3500 prev = PREV_INSN (insn);
3501 if (NOTE_P (insn))
3502 switch (NOTE_KIND (insn))
3503 {
3504 case NOTE_INSN_BLOCK_END:
3505 gcc_unreachable ();
3506 break;
3507 case NOTE_INSN_DELETED:
3508 case NOTE_INSN_DELETED_LABEL:
3509 case NOTE_INSN_DELETED_DEBUG_LABEL:
3510 continue;
3511 default:
3512 reorder_insns (insn, insn, last_insn);
3513 }
3514 }
3515
3516 return last_insn;
3517 }
3518
3519 /* Locate or create a label for a given basic block. */
3520
3521 static rtx_insn *
3522 label_for_bb (basic_block bb)
3523 {
3524 rtx_insn *label = BB_HEAD (bb);
3525
3526 if (!LABEL_P (label))
3527 {
3528 if (dump_file)
3529 fprintf (dump_file, "Emitting label for block %d\n", bb->index);
3530
3531 label = block_label (bb);
3532 }
3533
3534 return label;
3535 }
3536
3537 /* Locate the effective beginning and end of the insn chain for each
3538 block, as defined by skip_insns_after_block above. */
3539
3540 static void
3541 record_effective_endpoints (void)
3542 {
3543 rtx_insn *next_insn;
3544 basic_block bb;
3545 rtx_insn *insn;
3546
3547 for (insn = get_insns ();
3548 insn
3549 && NOTE_P (insn)
3550 && NOTE_KIND (insn) != NOTE_INSN_BASIC_BLOCK;
3551 insn = NEXT_INSN (insn))
3552 continue;
3553 /* No basic blocks at all? */
3554 gcc_assert (insn);
3555
3556 if (PREV_INSN (insn))
3557 cfg_layout_function_header =
3558 unlink_insn_chain (get_insns (), PREV_INSN (insn));
3559 else
3560 cfg_layout_function_header = NULL;
3561
3562 next_insn = get_insns ();
3563 FOR_EACH_BB_FN (bb, cfun)
3564 {
3565 rtx_insn *end;
3566
3567 if (PREV_INSN (BB_HEAD (bb)) && next_insn != BB_HEAD (bb))
3568 BB_HEADER (bb) = unlink_insn_chain (next_insn,
3569 PREV_INSN (BB_HEAD (bb)));
3570 end = skip_insns_after_block (bb);
3571 if (NEXT_INSN (BB_END (bb)) && BB_END (bb) != end)
3572 BB_FOOTER (bb) = unlink_insn_chain (NEXT_INSN (BB_END (bb)), end);
3573 next_insn = NEXT_INSN (BB_END (bb));
3574 }
3575
3576 cfg_layout_function_footer = next_insn;
3577 if (cfg_layout_function_footer)
3578 cfg_layout_function_footer = unlink_insn_chain (cfg_layout_function_footer, get_last_insn ());
3579 }
3580 \f
3581 namespace {
3582
3583 const pass_data pass_data_into_cfg_layout_mode =
3584 {
3585 RTL_PASS, /* type */
3586 "into_cfglayout", /* name */
3587 OPTGROUP_NONE, /* optinfo_flags */
3588 TV_CFG, /* tv_id */
3589 0, /* properties_required */
3590 PROP_cfglayout, /* properties_provided */
3591 0, /* properties_destroyed */
3592 0, /* todo_flags_start */
3593 0, /* todo_flags_finish */
3594 };
3595
3596 class pass_into_cfg_layout_mode : public rtl_opt_pass
3597 {
3598 public:
3599 pass_into_cfg_layout_mode (gcc::context *ctxt)
3600 : rtl_opt_pass (pass_data_into_cfg_layout_mode, ctxt)
3601 {}
3602
3603 /* opt_pass methods: */
3604 virtual unsigned int execute (function *)
3605 {
3606 cfg_layout_initialize (0);
3607 return 0;
3608 }
3609
3610 }; // class pass_into_cfg_layout_mode
3611
3612 } // anon namespace
3613
3614 rtl_opt_pass *
3615 make_pass_into_cfg_layout_mode (gcc::context *ctxt)
3616 {
3617 return new pass_into_cfg_layout_mode (ctxt);
3618 }
3619
3620 namespace {
3621
3622 const pass_data pass_data_outof_cfg_layout_mode =
3623 {
3624 RTL_PASS, /* type */
3625 "outof_cfglayout", /* name */
3626 OPTGROUP_NONE, /* optinfo_flags */
3627 TV_CFG, /* tv_id */
3628 0, /* properties_required */
3629 0, /* properties_provided */
3630 PROP_cfglayout, /* properties_destroyed */
3631 0, /* todo_flags_start */
3632 0, /* todo_flags_finish */
3633 };
3634
3635 class pass_outof_cfg_layout_mode : public rtl_opt_pass
3636 {
3637 public:
3638 pass_outof_cfg_layout_mode (gcc::context *ctxt)
3639 : rtl_opt_pass (pass_data_outof_cfg_layout_mode, ctxt)
3640 {}
3641
3642 /* opt_pass methods: */
3643 virtual unsigned int execute (function *);
3644
3645 }; // class pass_outof_cfg_layout_mode
3646
3647 unsigned int
3648 pass_outof_cfg_layout_mode::execute (function *fun)
3649 {
3650 basic_block bb;
3651
3652 FOR_EACH_BB_FN (bb, fun)
3653 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (fun))
3654 bb->aux = bb->next_bb;
3655
3656 cfg_layout_finalize ();
3657
3658 return 0;
3659 }
3660
3661 } // anon namespace
3662
3663 rtl_opt_pass *
3664 make_pass_outof_cfg_layout_mode (gcc::context *ctxt)
3665 {
3666 return new pass_outof_cfg_layout_mode (ctxt);
3667 }
3668 \f
3669
3670 /* Link the basic blocks in the correct order, compacting the basic
3671 block queue while at it. If STAY_IN_CFGLAYOUT_MODE is false, this
3672 function also clears the basic block header and footer fields.
3673
3674 This function is usually called after a pass (e.g. tracer) finishes
3675 some transformations while in cfglayout mode. The required sequence
3676 of the basic blocks is in a linked list along the bb->aux field.
3677 This functions re-links the basic block prev_bb and next_bb pointers
3678 accordingly, and it compacts and renumbers the blocks.
3679
3680 FIXME: This currently works only for RTL, but the only RTL-specific
3681 bits are the STAY_IN_CFGLAYOUT_MODE bits. The tracer pass was moved
3682 to GIMPLE a long time ago, but it doesn't relink the basic block
3683 chain. It could do that (to give better initial RTL) if this function
3684 is made IR-agnostic (and moved to cfganal.c or cfg.c while at it). */
3685
3686 void
3687 relink_block_chain (bool stay_in_cfglayout_mode)
3688 {
3689 basic_block bb, prev_bb;
3690 int index;
3691
3692 /* Maybe dump the re-ordered sequence. */
3693 if (dump_file)
3694 {
3695 fprintf (dump_file, "Reordered sequence:\n");
3696 for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb, index =
3697 NUM_FIXED_BLOCKS;
3698 bb;
3699 bb = (basic_block) bb->aux, index++)
3700 {
3701 fprintf (dump_file, " %i ", index);
3702 if (get_bb_original (bb))
3703 fprintf (dump_file, "duplicate of %i\n",
3704 get_bb_original (bb)->index);
3705 else if (forwarder_block_p (bb)
3706 && !LABEL_P (BB_HEAD (bb)))
3707 fprintf (dump_file, "compensation\n");
3708 else
3709 fprintf (dump_file, "bb %i\n", bb->index);
3710 }
3711 }
3712
3713 /* Now reorder the blocks. */
3714 prev_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
3715 bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
3716 for (; bb; prev_bb = bb, bb = (basic_block) bb->aux)
3717 {
3718 bb->prev_bb = prev_bb;
3719 prev_bb->next_bb = bb;
3720 }
3721 prev_bb->next_bb = EXIT_BLOCK_PTR_FOR_FN (cfun);
3722 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb = prev_bb;
3723
3724 /* Then, clean up the aux fields. */
3725 FOR_ALL_BB_FN (bb, cfun)
3726 {
3727 bb->aux = NULL;
3728 if (!stay_in_cfglayout_mode)
3729 BB_HEADER (bb) = BB_FOOTER (bb) = NULL;
3730 }
3731
3732 /* Maybe reset the original copy tables, they are not valid anymore
3733 when we renumber the basic blocks in compact_blocks. If we are
3734 are going out of cfglayout mode, don't re-allocate the tables. */
3735 if (original_copy_tables_initialized_p ())
3736 free_original_copy_tables ();
3737 if (stay_in_cfglayout_mode)
3738 initialize_original_copy_tables ();
3739
3740 /* Finally, put basic_block_info in the new order. */
3741 compact_blocks ();
3742 }
3743 \f
3744
3745 /* Given a reorder chain, rearrange the code to match. */
3746
3747 static void
3748 fixup_reorder_chain (void)
3749 {
3750 basic_block bb;
3751 rtx_insn *insn = NULL;
3752
3753 if (cfg_layout_function_header)
3754 {
3755 set_first_insn (cfg_layout_function_header);
3756 insn = cfg_layout_function_header;
3757 while (NEXT_INSN (insn))
3758 insn = NEXT_INSN (insn);
3759 }
3760
3761 /* First do the bulk reordering -- rechain the blocks without regard to
3762 the needed changes to jumps and labels. */
3763
3764 for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; bb; bb = (basic_block)
3765 bb->aux)
3766 {
3767 if (BB_HEADER (bb))
3768 {
3769 if (insn)
3770 SET_NEXT_INSN (insn) = BB_HEADER (bb);
3771 else
3772 set_first_insn (BB_HEADER (bb));
3773 SET_PREV_INSN (BB_HEADER (bb)) = insn;
3774 insn = BB_HEADER (bb);
3775 while (NEXT_INSN (insn))
3776 insn = NEXT_INSN (insn);
3777 }
3778 if (insn)
3779 SET_NEXT_INSN (insn) = BB_HEAD (bb);
3780 else
3781 set_first_insn (BB_HEAD (bb));
3782 SET_PREV_INSN (BB_HEAD (bb)) = insn;
3783 insn = BB_END (bb);
3784 if (BB_FOOTER (bb))
3785 {
3786 SET_NEXT_INSN (insn) = BB_FOOTER (bb);
3787 SET_PREV_INSN (BB_FOOTER (bb)) = insn;
3788 while (NEXT_INSN (insn))
3789 insn = NEXT_INSN (insn);
3790 }
3791 }
3792
3793 SET_NEXT_INSN (insn) = cfg_layout_function_footer;
3794 if (cfg_layout_function_footer)
3795 SET_PREV_INSN (cfg_layout_function_footer) = insn;
3796
3797 while (NEXT_INSN (insn))
3798 insn = NEXT_INSN (insn);
3799
3800 set_last_insn (insn);
3801 if (flag_checking)
3802 verify_insn_chain ();
3803
3804 /* Now add jumps and labels as needed to match the blocks new
3805 outgoing edges. */
3806
3807 for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; bb ; bb = (basic_block)
3808 bb->aux)
3809 {
3810 edge e_fall, e_taken, e;
3811 rtx_insn *bb_end_insn;
3812 rtx ret_label = NULL_RTX;
3813 basic_block nb;
3814 edge_iterator ei;
3815
3816 if (EDGE_COUNT (bb->succs) == 0)
3817 continue;
3818
3819 /* Find the old fallthru edge, and another non-EH edge for
3820 a taken jump. */
3821 e_taken = e_fall = NULL;
3822
3823 FOR_EACH_EDGE (e, ei, bb->succs)
3824 if (e->flags & EDGE_FALLTHRU)
3825 e_fall = e;
3826 else if (! (e->flags & EDGE_EH))
3827 e_taken = e;
3828
3829 bb_end_insn = BB_END (bb);
3830 if (rtx_jump_insn *bb_end_jump = dyn_cast <rtx_jump_insn *> (bb_end_insn))
3831 {
3832 ret_label = JUMP_LABEL (bb_end_jump);
3833 if (any_condjump_p (bb_end_jump))
3834 {
3835 /* This might happen if the conditional jump has side
3836 effects and could therefore not be optimized away.
3837 Make the basic block to end with a barrier in order
3838 to prevent rtl_verify_flow_info from complaining. */
3839 if (!e_fall)
3840 {
3841 gcc_assert (!onlyjump_p (bb_end_jump)
3842 || returnjump_p (bb_end_jump)
3843 || (e_taken->flags & EDGE_CROSSING));
3844 emit_barrier_after (bb_end_jump);
3845 continue;
3846 }
3847
3848 /* If the old fallthru is still next, nothing to do. */
3849 if (bb->aux == e_fall->dest
3850 || e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3851 continue;
3852
3853 /* The degenerated case of conditional jump jumping to the next
3854 instruction can happen for jumps with side effects. We need
3855 to construct a forwarder block and this will be done just
3856 fine by force_nonfallthru below. */
3857 if (!e_taken)
3858 ;
3859
3860 /* There is another special case: if *neither* block is next,
3861 such as happens at the very end of a function, then we'll
3862 need to add a new unconditional jump. Choose the taken
3863 edge based on known or assumed probability. */
3864 else if (bb->aux != e_taken->dest)
3865 {
3866 rtx note = find_reg_note (bb_end_jump, REG_BR_PROB, 0);
3867
3868 if (note
3869 && profile_probability::from_reg_br_prob_note
3870 (XINT (note, 0)) < profile_probability::even ()
3871 && invert_jump (bb_end_jump,
3872 (e_fall->dest
3873 == EXIT_BLOCK_PTR_FOR_FN (cfun)
3874 ? NULL_RTX
3875 : label_for_bb (e_fall->dest)), 0))
3876 {
3877 e_fall->flags &= ~EDGE_FALLTHRU;
3878 gcc_checking_assert (could_fall_through
3879 (e_taken->src, e_taken->dest));
3880 e_taken->flags |= EDGE_FALLTHRU;
3881 update_br_prob_note (bb);
3882 e = e_fall, e_fall = e_taken, e_taken = e;
3883 }
3884 }
3885
3886 /* If the "jumping" edge is a crossing edge, and the fall
3887 through edge is non-crossing, leave things as they are. */
3888 else if ((e_taken->flags & EDGE_CROSSING)
3889 && !(e_fall->flags & EDGE_CROSSING))
3890 continue;
3891
3892 /* Otherwise we can try to invert the jump. This will
3893 basically never fail, however, keep up the pretense. */
3894 else if (invert_jump (bb_end_jump,
3895 (e_fall->dest
3896 == EXIT_BLOCK_PTR_FOR_FN (cfun)
3897 ? NULL_RTX
3898 : label_for_bb (e_fall->dest)), 0))
3899 {
3900 e_fall->flags &= ~EDGE_FALLTHRU;
3901 gcc_checking_assert (could_fall_through
3902 (e_taken->src, e_taken->dest));
3903 e_taken->flags |= EDGE_FALLTHRU;
3904 update_br_prob_note (bb);
3905 if (LABEL_NUSES (ret_label) == 0
3906 && single_pred_p (e_taken->dest))
3907 delete_insn (as_a<rtx_insn *> (ret_label));
3908 continue;
3909 }
3910 }
3911 else if (extract_asm_operands (PATTERN (bb_end_insn)) != NULL)
3912 {
3913 /* If the old fallthru is still next or if
3914 asm goto doesn't have a fallthru (e.g. when followed by
3915 __builtin_unreachable ()), nothing to do. */
3916 if (! e_fall
3917 || bb->aux == e_fall->dest
3918 || e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3919 continue;
3920
3921 /* Otherwise we'll have to use the fallthru fixup below. */
3922 }
3923 else
3924 {
3925 /* Otherwise we have some return, switch or computed
3926 jump. In the 99% case, there should not have been a
3927 fallthru edge. */
3928 gcc_assert (returnjump_p (bb_end_insn) || !e_fall);
3929 continue;
3930 }
3931 }
3932 else
3933 {
3934 /* No fallthru implies a noreturn function with EH edges, or
3935 something similarly bizarre. In any case, we don't need to
3936 do anything. */
3937 if (! e_fall)
3938 continue;
3939
3940 /* If the fallthru block is still next, nothing to do. */
3941 if (bb->aux == e_fall->dest)
3942 continue;
3943
3944 /* A fallthru to exit block. */
3945 if (e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3946 continue;
3947 }
3948
3949 /* We got here if we need to add a new jump insn.
3950 Note force_nonfallthru can delete E_FALL and thus we have to
3951 save E_FALL->src prior to the call to force_nonfallthru. */
3952 nb = force_nonfallthru_and_redirect (e_fall, e_fall->dest, ret_label);
3953 if (nb)
3954 {
3955 nb->aux = bb->aux;
3956 bb->aux = nb;
3957 /* Don't process this new block. */
3958 bb = nb;
3959 }
3960 }
3961
3962 relink_block_chain (/*stay_in_cfglayout_mode=*/false);
3963
3964 /* Annoying special case - jump around dead jumptables left in the code. */
3965 FOR_EACH_BB_FN (bb, cfun)
3966 {
3967 edge e = find_fallthru_edge (bb->succs);
3968
3969 if (e && !can_fallthru (e->src, e->dest))
3970 force_nonfallthru (e);
3971 }
3972
3973 /* Ensure goto_locus from edges has some instructions with that locus in RTL
3974 when not optimizing. */
3975 if (!optimize && !DECL_IGNORED_P (current_function_decl))
3976 FOR_EACH_BB_FN (bb, cfun)
3977 {
3978 edge e;
3979 edge_iterator ei;
3980
3981 FOR_EACH_EDGE (e, ei, bb->succs)
3982 if (LOCATION_LOCUS (e->goto_locus) != UNKNOWN_LOCATION
3983 && !(e->flags & EDGE_ABNORMAL))
3984 {
3985 edge e2;
3986 edge_iterator ei2;
3987 basic_block dest, nb;
3988 rtx_insn *end;
3989
3990 insn = BB_END (e->src);
3991 end = PREV_INSN (BB_HEAD (e->src));
3992 while (insn != end
3993 && (!NONDEBUG_INSN_P (insn) || !INSN_HAS_LOCATION (insn)))
3994 insn = PREV_INSN (insn);
3995 if (insn != end
3996 && INSN_LOCATION (insn) == e->goto_locus)
3997 continue;
3998 if (simplejump_p (BB_END (e->src))
3999 && !INSN_HAS_LOCATION (BB_END (e->src)))
4000 {
4001 INSN_LOCATION (BB_END (e->src)) = e->goto_locus;
4002 continue;
4003 }
4004 dest = e->dest;
4005 if (dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
4006 {
4007 /* Non-fallthru edges to the exit block cannot be split. */
4008 if (!(e->flags & EDGE_FALLTHRU))
4009 continue;
4010 }
4011 else
4012 {
4013 insn = BB_HEAD (dest);
4014 end = NEXT_INSN (BB_END (dest));
4015 while (insn != end && !NONDEBUG_INSN_P (insn))
4016 insn = NEXT_INSN (insn);
4017 if (insn != end && INSN_HAS_LOCATION (insn)
4018 && INSN_LOCATION (insn) == e->goto_locus)
4019 continue;
4020 }
4021 nb = split_edge (e);
4022 if (!INSN_P (BB_END (nb)))
4023 BB_END (nb) = emit_insn_after_noloc (gen_nop (), BB_END (nb),
4024 nb);
4025 INSN_LOCATION (BB_END (nb)) = e->goto_locus;
4026
4027 /* If there are other incoming edges to the destination block
4028 with the same goto locus, redirect them to the new block as
4029 well, this can prevent other such blocks from being created
4030 in subsequent iterations of the loop. */
4031 for (ei2 = ei_start (dest->preds); (e2 = ei_safe_edge (ei2)); )
4032 if (LOCATION_LOCUS (e2->goto_locus) != UNKNOWN_LOCATION
4033 && !(e2->flags & (EDGE_ABNORMAL | EDGE_FALLTHRU))
4034 && e->goto_locus == e2->goto_locus)
4035 redirect_edge_and_branch (e2, nb);
4036 else
4037 ei_next (&ei2);
4038 }
4039 }
4040 }
4041 \f
4042 /* Perform sanity checks on the insn chain.
4043 1. Check that next/prev pointers are consistent in both the forward and
4044 reverse direction.
4045 2. Count insns in chain, going both directions, and check if equal.
4046 3. Check that get_last_insn () returns the actual end of chain. */
4047
4048 DEBUG_FUNCTION void
4049 verify_insn_chain (void)
4050 {
4051 rtx_insn *x, *prevx, *nextx;
4052 int insn_cnt1, insn_cnt2;
4053
4054 for (prevx = NULL, insn_cnt1 = 1, x = get_insns ();
4055 x != 0;
4056 prevx = x, insn_cnt1++, x = NEXT_INSN (x))
4057 gcc_assert (PREV_INSN (x) == prevx);
4058
4059 gcc_assert (prevx == get_last_insn ());
4060
4061 for (nextx = NULL, insn_cnt2 = 1, x = get_last_insn ();
4062 x != 0;
4063 nextx = x, insn_cnt2++, x = PREV_INSN (x))
4064 gcc_assert (NEXT_INSN (x) == nextx);
4065
4066 gcc_assert (insn_cnt1 == insn_cnt2);
4067 }
4068 \f
4069 /* If we have assembler epilogues, the block falling through to exit must
4070 be the last one in the reordered chain when we reach final. Ensure
4071 that this condition is met. */
4072 static void
4073 fixup_fallthru_exit_predecessor (void)
4074 {
4075 edge e;
4076 basic_block bb = NULL;
4077
4078 /* This transformation is not valid before reload, because we might
4079 separate a call from the instruction that copies the return
4080 value. */
4081 gcc_assert (reload_completed);
4082
4083 e = find_fallthru_edge (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
4084 if (e)
4085 bb = e->src;
4086
4087 if (bb && bb->aux)
4088 {
4089 basic_block c = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
4090
4091 /* If the very first block is the one with the fall-through exit
4092 edge, we have to split that block. */
4093 if (c == bb)
4094 {
4095 bb = split_block_after_labels (bb)->dest;
4096 bb->aux = c->aux;
4097 c->aux = bb;
4098 BB_FOOTER (bb) = BB_FOOTER (c);
4099 BB_FOOTER (c) = NULL;
4100 }
4101
4102 while (c->aux != bb)
4103 c = (basic_block) c->aux;
4104
4105 c->aux = bb->aux;
4106 while (c->aux)
4107 c = (basic_block) c->aux;
4108
4109 c->aux = bb;
4110 bb->aux = NULL;
4111 }
4112 }
4113
4114 /* In case there are more than one fallthru predecessors of exit, force that
4115 there is only one. */
4116
4117 static void
4118 force_one_exit_fallthru (void)
4119 {
4120 edge e, predecessor = NULL;
4121 bool more = false;
4122 edge_iterator ei;
4123 basic_block forwarder, bb;
4124
4125 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
4126 if (e->flags & EDGE_FALLTHRU)
4127 {
4128 if (predecessor == NULL)
4129 predecessor = e;
4130 else
4131 {
4132 more = true;
4133 break;
4134 }
4135 }
4136
4137 if (!more)
4138 return;
4139
4140 /* Exit has several fallthru predecessors. Create a forwarder block for
4141 them. */
4142 forwarder = split_edge (predecessor);
4143 for (ei = ei_start (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
4144 (e = ei_safe_edge (ei)); )
4145 {
4146 if (e->src == forwarder
4147 || !(e->flags & EDGE_FALLTHRU))
4148 ei_next (&ei);
4149 else
4150 redirect_edge_and_branch_force (e, forwarder);
4151 }
4152
4153 /* Fix up the chain of blocks -- make FORWARDER immediately precede the
4154 exit block. */
4155 FOR_EACH_BB_FN (bb, cfun)
4156 {
4157 if (bb->aux == NULL && bb != forwarder)
4158 {
4159 bb->aux = forwarder;
4160 break;
4161 }
4162 }
4163 }
4164 \f
4165 /* Return true in case it is possible to duplicate the basic block BB. */
4166
4167 static bool
4168 cfg_layout_can_duplicate_bb_p (const_basic_block bb)
4169 {
4170 /* Do not attempt to duplicate tablejumps, as we need to unshare
4171 the dispatch table. This is difficult to do, as the instructions
4172 computing jump destination may be hoisted outside the basic block. */
4173 if (tablejump_p (BB_END (bb), NULL, NULL))
4174 return false;
4175
4176 /* Do not duplicate blocks containing insns that can't be copied. */
4177 if (targetm.cannot_copy_insn_p)
4178 {
4179 rtx_insn *insn = BB_HEAD (bb);
4180 while (1)
4181 {
4182 if (INSN_P (insn) && targetm.cannot_copy_insn_p (insn))
4183 return false;
4184 if (insn == BB_END (bb))
4185 break;
4186 insn = NEXT_INSN (insn);
4187 }
4188 }
4189
4190 return true;
4191 }
4192
4193 rtx_insn *
4194 duplicate_insn_chain (rtx_insn *from, rtx_insn *to)
4195 {
4196 rtx_insn *insn, *next, *copy;
4197 rtx_note *last;
4198
4199 /* Avoid updating of boundaries of previous basic block. The
4200 note will get removed from insn stream in fixup. */
4201 last = emit_note (NOTE_INSN_DELETED);
4202
4203 /* Create copy at the end of INSN chain. The chain will
4204 be reordered later. */
4205 for (insn = from; insn != NEXT_INSN (to); insn = NEXT_INSN (insn))
4206 {
4207 switch (GET_CODE (insn))
4208 {
4209 case DEBUG_INSN:
4210 /* Don't duplicate label debug insns. */
4211 if (DEBUG_BIND_INSN_P (insn)
4212 && TREE_CODE (INSN_VAR_LOCATION_DECL (insn)) == LABEL_DECL)
4213 break;
4214 /* FALLTHRU */
4215 case INSN:
4216 case CALL_INSN:
4217 case JUMP_INSN:
4218 copy = emit_copy_of_insn_after (insn, get_last_insn ());
4219 if (JUMP_P (insn) && JUMP_LABEL (insn) != NULL_RTX
4220 && ANY_RETURN_P (JUMP_LABEL (insn)))
4221 JUMP_LABEL (copy) = JUMP_LABEL (insn);
4222 maybe_copy_prologue_epilogue_insn (insn, copy);
4223 break;
4224
4225 case JUMP_TABLE_DATA:
4226 /* Avoid copying of dispatch tables. We never duplicate
4227 tablejumps, so this can hit only in case the table got
4228 moved far from original jump.
4229 Avoid copying following barrier as well if any
4230 (and debug insns in between). */
4231 for (next = NEXT_INSN (insn);
4232 next != NEXT_INSN (to);
4233 next = NEXT_INSN (next))
4234 if (!DEBUG_INSN_P (next))
4235 break;
4236 if (next != NEXT_INSN (to) && BARRIER_P (next))
4237 insn = next;
4238 break;
4239
4240 case CODE_LABEL:
4241 break;
4242
4243 case BARRIER:
4244 emit_barrier ();
4245 break;
4246
4247 case NOTE:
4248 switch (NOTE_KIND (insn))
4249 {
4250 /* In case prologue is empty and function contain label
4251 in first BB, we may want to copy the block. */
4252 case NOTE_INSN_PROLOGUE_END:
4253
4254 case NOTE_INSN_DELETED:
4255 case NOTE_INSN_DELETED_LABEL:
4256 case NOTE_INSN_DELETED_DEBUG_LABEL:
4257 /* No problem to strip these. */
4258 case NOTE_INSN_FUNCTION_BEG:
4259 /* There is always just single entry to function. */
4260 case NOTE_INSN_BASIC_BLOCK:
4261 /* We should only switch text sections once. */
4262 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
4263 break;
4264
4265 case NOTE_INSN_EPILOGUE_BEG:
4266 case NOTE_INSN_UPDATE_SJLJ_CONTEXT:
4267 emit_note_copy (as_a <rtx_note *> (insn));
4268 break;
4269
4270 default:
4271 /* All other notes should have already been eliminated. */
4272 gcc_unreachable ();
4273 }
4274 break;
4275 default:
4276 gcc_unreachable ();
4277 }
4278 }
4279 insn = NEXT_INSN (last);
4280 delete_insn (last);
4281 return insn;
4282 }
4283
4284 /* Create a duplicate of the basic block BB. */
4285
4286 static basic_block
4287 cfg_layout_duplicate_bb (basic_block bb, copy_bb_data *)
4288 {
4289 rtx_insn *insn;
4290 basic_block new_bb;
4291
4292 insn = duplicate_insn_chain (BB_HEAD (bb), BB_END (bb));
4293 new_bb = create_basic_block (insn,
4294 insn ? get_last_insn () : NULL,
4295 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
4296
4297 BB_COPY_PARTITION (new_bb, bb);
4298 if (BB_HEADER (bb))
4299 {
4300 insn = BB_HEADER (bb);
4301 while (NEXT_INSN (insn))
4302 insn = NEXT_INSN (insn);
4303 insn = duplicate_insn_chain (BB_HEADER (bb), insn);
4304 if (insn)
4305 BB_HEADER (new_bb) = unlink_insn_chain (insn, get_last_insn ());
4306 }
4307
4308 if (BB_FOOTER (bb))
4309 {
4310 insn = BB_FOOTER (bb);
4311 while (NEXT_INSN (insn))
4312 insn = NEXT_INSN (insn);
4313 insn = duplicate_insn_chain (BB_FOOTER (bb), insn);
4314 if (insn)
4315 BB_FOOTER (new_bb) = unlink_insn_chain (insn, get_last_insn ());
4316 }
4317
4318 return new_bb;
4319 }
4320
4321 \f
4322 /* Main entry point to this module - initialize the datastructures for
4323 CFG layout changes. It keeps LOOPS up-to-date if not null.
4324
4325 FLAGS is a set of additional flags to pass to cleanup_cfg(). */
4326
4327 void
4328 cfg_layout_initialize (int flags)
4329 {
4330 rtx_insn_list *x;
4331 basic_block bb;
4332
4333 /* Once bb partitioning is complete, cfg layout mode should not be
4334 re-entered. Entering cfg layout mode may require fixups. As an
4335 example, if edge forwarding performed when optimizing the cfg
4336 layout required moving a block from the hot to the cold
4337 section. This would create an illegal partitioning unless some
4338 manual fixup was performed. */
4339 gcc_assert (!crtl->bb_reorder_complete || !crtl->has_bb_partition);
4340
4341 initialize_original_copy_tables ();
4342
4343 cfg_layout_rtl_register_cfg_hooks ();
4344
4345 record_effective_endpoints ();
4346
4347 /* Make sure that the targets of non local gotos are marked. */
4348 for (x = nonlocal_goto_handler_labels; x; x = x->next ())
4349 {
4350 bb = BLOCK_FOR_INSN (x->insn ());
4351 bb->flags |= BB_NON_LOCAL_GOTO_TARGET;
4352 }
4353
4354 cleanup_cfg (CLEANUP_CFGLAYOUT | flags);
4355 }
4356
4357 /* Splits superblocks. */
4358 void
4359 break_superblocks (void)
4360 {
4361 bool need = false;
4362 basic_block bb;
4363
4364 auto_sbitmap superblocks (last_basic_block_for_fn (cfun));
4365 bitmap_clear (superblocks);
4366
4367 FOR_EACH_BB_FN (bb, cfun)
4368 if (bb->flags & BB_SUPERBLOCK)
4369 {
4370 bb->flags &= ~BB_SUPERBLOCK;
4371 bitmap_set_bit (superblocks, bb->index);
4372 need = true;
4373 }
4374
4375 if (need)
4376 {
4377 rebuild_jump_labels (get_insns ());
4378 find_many_sub_basic_blocks (superblocks);
4379 }
4380 }
4381
4382 /* Finalize the changes: reorder insn list according to the sequence specified
4383 by aux pointers, enter compensation code, rebuild scope forest. */
4384
4385 void
4386 cfg_layout_finalize (void)
4387 {
4388 free_dominance_info (CDI_DOMINATORS);
4389 force_one_exit_fallthru ();
4390 rtl_register_cfg_hooks ();
4391 if (reload_completed && !targetm.have_epilogue ())
4392 fixup_fallthru_exit_predecessor ();
4393 fixup_reorder_chain ();
4394
4395 rebuild_jump_labels (get_insns ());
4396 delete_dead_jumptables ();
4397
4398 if (flag_checking)
4399 verify_insn_chain ();
4400 checking_verify_flow_info ();
4401 }
4402
4403
4404 /* Same as split_block but update cfg_layout structures. */
4405
4406 static basic_block
4407 cfg_layout_split_block (basic_block bb, void *insnp)
4408 {
4409 rtx insn = (rtx) insnp;
4410 basic_block new_bb = rtl_split_block (bb, insn);
4411
4412 BB_FOOTER (new_bb) = BB_FOOTER (bb);
4413 BB_FOOTER (bb) = NULL;
4414
4415 return new_bb;
4416 }
4417
4418 /* Redirect Edge to DEST. */
4419 static edge
4420 cfg_layout_redirect_edge_and_branch (edge e, basic_block dest)
4421 {
4422 basic_block src = e->src;
4423 edge ret;
4424
4425 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
4426 return NULL;
4427
4428 if (e->dest == dest)
4429 return e;
4430
4431 if (e->flags & EDGE_CROSSING
4432 && BB_PARTITION (e->src) == BB_PARTITION (dest)
4433 && simplejump_p (BB_END (src)))
4434 {
4435 if (dump_file)
4436 fprintf (dump_file,
4437 "Removing crossing jump while redirecting edge form %i to %i\n",
4438 e->src->index, dest->index);
4439 delete_insn (BB_END (src));
4440 remove_barriers_from_footer (src);
4441 e->flags |= EDGE_FALLTHRU;
4442 }
4443
4444 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
4445 && (ret = try_redirect_by_replacing_jump (e, dest, true)))
4446 {
4447 df_set_bb_dirty (src);
4448 return ret;
4449 }
4450
4451 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
4452 && (e->flags & EDGE_FALLTHRU) && !(e->flags & EDGE_COMPLEX))
4453 {
4454 if (dump_file)
4455 fprintf (dump_file, "Redirecting entry edge from bb %i to %i\n",
4456 e->src->index, dest->index);
4457
4458 df_set_bb_dirty (e->src);
4459 redirect_edge_succ (e, dest);
4460 return e;
4461 }
4462
4463 /* Redirect_edge_and_branch may decide to turn branch into fallthru edge
4464 in the case the basic block appears to be in sequence. Avoid this
4465 transformation. */
4466
4467 if (e->flags & EDGE_FALLTHRU)
4468 {
4469 /* Redirect any branch edges unified with the fallthru one. */
4470 if (JUMP_P (BB_END (src))
4471 && label_is_jump_target_p (BB_HEAD (e->dest),
4472 BB_END (src)))
4473 {
4474 edge redirected;
4475
4476 if (dump_file)
4477 fprintf (dump_file, "Fallthru edge unified with branch "
4478 "%i->%i redirected to %i\n",
4479 e->src->index, e->dest->index, dest->index);
4480 e->flags &= ~EDGE_FALLTHRU;
4481 redirected = redirect_branch_edge (e, dest);
4482 gcc_assert (redirected);
4483 redirected->flags |= EDGE_FALLTHRU;
4484 df_set_bb_dirty (redirected->src);
4485 return redirected;
4486 }
4487 /* In case we are redirecting fallthru edge to the branch edge
4488 of conditional jump, remove it. */
4489 if (EDGE_COUNT (src->succs) == 2)
4490 {
4491 /* Find the edge that is different from E. */
4492 edge s = EDGE_SUCC (src, EDGE_SUCC (src, 0) == e);
4493
4494 if (s->dest == dest
4495 && any_condjump_p (BB_END (src))
4496 && onlyjump_p (BB_END (src)))
4497 delete_insn (BB_END (src));
4498 }
4499 if (dump_file)
4500 fprintf (dump_file, "Redirecting fallthru edge %i->%i to %i\n",
4501 e->src->index, e->dest->index, dest->index);
4502 ret = redirect_edge_succ_nodup (e, dest);
4503 }
4504 else
4505 ret = redirect_branch_edge (e, dest);
4506
4507 if (!ret)
4508 return NULL;
4509
4510 fixup_partition_crossing (ret);
4511 /* We don't want simplejumps in the insn stream during cfglayout. */
4512 gcc_assert (!simplejump_p (BB_END (src)) || CROSSING_JUMP_P (BB_END (src)));
4513
4514 df_set_bb_dirty (src);
4515 return ret;
4516 }
4517
4518 /* Simple wrapper as we always can redirect fallthru edges. */
4519 static basic_block
4520 cfg_layout_redirect_edge_and_branch_force (edge e, basic_block dest)
4521 {
4522 edge redirected = cfg_layout_redirect_edge_and_branch (e, dest);
4523
4524 gcc_assert (redirected);
4525 return NULL;
4526 }
4527
4528 /* Same as delete_basic_block but update cfg_layout structures. */
4529
4530 static void
4531 cfg_layout_delete_block (basic_block bb)
4532 {
4533 rtx_insn *insn, *next, *prev = PREV_INSN (BB_HEAD (bb)), *remaints;
4534 rtx_insn **to;
4535
4536 if (BB_HEADER (bb))
4537 {
4538 next = BB_HEAD (bb);
4539 if (prev)
4540 SET_NEXT_INSN (prev) = BB_HEADER (bb);
4541 else
4542 set_first_insn (BB_HEADER (bb));
4543 SET_PREV_INSN (BB_HEADER (bb)) = prev;
4544 insn = BB_HEADER (bb);
4545 while (NEXT_INSN (insn))
4546 insn = NEXT_INSN (insn);
4547 SET_NEXT_INSN (insn) = next;
4548 SET_PREV_INSN (next) = insn;
4549 }
4550 next = NEXT_INSN (BB_END (bb));
4551 if (BB_FOOTER (bb))
4552 {
4553 insn = BB_FOOTER (bb);
4554 while (insn)
4555 {
4556 if (BARRIER_P (insn))
4557 {
4558 if (PREV_INSN (insn))
4559 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
4560 else
4561 BB_FOOTER (bb) = NEXT_INSN (insn);
4562 if (NEXT_INSN (insn))
4563 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
4564 }
4565 if (LABEL_P (insn))
4566 break;
4567 insn = NEXT_INSN (insn);
4568 }
4569 if (BB_FOOTER (bb))
4570 {
4571 insn = BB_END (bb);
4572 SET_NEXT_INSN (insn) = BB_FOOTER (bb);
4573 SET_PREV_INSN (BB_FOOTER (bb)) = insn;
4574 while (NEXT_INSN (insn))
4575 insn = NEXT_INSN (insn);
4576 SET_NEXT_INSN (insn) = next;
4577 if (next)
4578 SET_PREV_INSN (next) = insn;
4579 else
4580 set_last_insn (insn);
4581 }
4582 }
4583 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
4584 to = &BB_HEADER (bb->next_bb);
4585 else
4586 to = &cfg_layout_function_footer;
4587
4588 rtl_delete_block (bb);
4589
4590 if (prev)
4591 prev = NEXT_INSN (prev);
4592 else
4593 prev = get_insns ();
4594 if (next)
4595 next = PREV_INSN (next);
4596 else
4597 next = get_last_insn ();
4598
4599 if (next && NEXT_INSN (next) != prev)
4600 {
4601 remaints = unlink_insn_chain (prev, next);
4602 insn = remaints;
4603 while (NEXT_INSN (insn))
4604 insn = NEXT_INSN (insn);
4605 SET_NEXT_INSN (insn) = *to;
4606 if (*to)
4607 SET_PREV_INSN (*to) = insn;
4608 *to = remaints;
4609 }
4610 }
4611
4612 /* Return true when blocks A and B can be safely merged. */
4613
4614 static bool
4615 cfg_layout_can_merge_blocks_p (basic_block a, basic_block b)
4616 {
4617 /* If we are partitioning hot/cold basic blocks, we don't want to
4618 mess up unconditional or indirect jumps that cross between hot
4619 and cold sections.
4620
4621 Basic block partitioning may result in some jumps that appear to
4622 be optimizable (or blocks that appear to be mergeable), but which really
4623 must be left untouched (they are required to make it safely across
4624 partition boundaries). See the comments at the top of
4625 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
4626
4627 if (BB_PARTITION (a) != BB_PARTITION (b))
4628 return false;
4629
4630 /* Protect the loop latches. */
4631 if (current_loops && b->loop_father->latch == b)
4632 return false;
4633
4634 /* If we would end up moving B's instructions, make sure it doesn't fall
4635 through into the exit block, since we cannot recover from a fallthrough
4636 edge into the exit block occurring in the middle of a function. */
4637 if (NEXT_INSN (BB_END (a)) != BB_HEAD (b))
4638 {
4639 edge e = find_fallthru_edge (b->succs);
4640 if (e && e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
4641 return false;
4642 }
4643
4644 /* There must be exactly one edge in between the blocks. */
4645 return (single_succ_p (a)
4646 && single_succ (a) == b
4647 && single_pred_p (b) == 1
4648 && a != b
4649 /* Must be simple edge. */
4650 && !(single_succ_edge (a)->flags & EDGE_COMPLEX)
4651 && a != ENTRY_BLOCK_PTR_FOR_FN (cfun)
4652 && b != EXIT_BLOCK_PTR_FOR_FN (cfun)
4653 /* If the jump insn has side effects, we can't kill the edge.
4654 When not optimizing, try_redirect_by_replacing_jump will
4655 not allow us to redirect an edge by replacing a table jump. */
4656 && (!JUMP_P (BB_END (a))
4657 || ((!optimize || reload_completed)
4658 ? simplejump_p (BB_END (a)) : onlyjump_p (BB_END (a)))));
4659 }
4660
4661 /* Merge block A and B. The blocks must be mergeable. */
4662
4663 static void
4664 cfg_layout_merge_blocks (basic_block a, basic_block b)
4665 {
4666 /* If B is a forwarder block whose outgoing edge has no location, we'll
4667 propagate the locus of the edge between A and B onto it. */
4668 const bool forward_edge_locus
4669 = (b->flags & BB_FORWARDER_BLOCK) != 0
4670 && LOCATION_LOCUS (EDGE_SUCC (b, 0)->goto_locus) == UNKNOWN_LOCATION;
4671 rtx_insn *insn;
4672
4673 gcc_checking_assert (cfg_layout_can_merge_blocks_p (a, b));
4674
4675 if (dump_file)
4676 fprintf (dump_file, "Merging block %d into block %d...\n", b->index,
4677 a->index);
4678
4679 /* If there was a CODE_LABEL beginning B, delete it. */
4680 if (LABEL_P (BB_HEAD (b)))
4681 {
4682 delete_insn (BB_HEAD (b));
4683 }
4684
4685 /* We should have fallthru edge in a, or we can do dummy redirection to get
4686 it cleaned up. */
4687 if (JUMP_P (BB_END (a)))
4688 try_redirect_by_replacing_jump (EDGE_SUCC (a, 0), b, true);
4689 gcc_assert (!JUMP_P (BB_END (a)));
4690
4691 /* If not optimizing, preserve the locus of the single edge between
4692 blocks A and B if necessary by emitting a nop. */
4693 if (!optimize
4694 && !forward_edge_locus
4695 && !DECL_IGNORED_P (current_function_decl))
4696 emit_nop_for_unique_locus_between (a, b);
4697
4698 /* Move things from b->footer after a->footer. */
4699 if (BB_FOOTER (b))
4700 {
4701 if (!BB_FOOTER (a))
4702 BB_FOOTER (a) = BB_FOOTER (b);
4703 else
4704 {
4705 rtx_insn *last = BB_FOOTER (a);
4706
4707 while (NEXT_INSN (last))
4708 last = NEXT_INSN (last);
4709 SET_NEXT_INSN (last) = BB_FOOTER (b);
4710 SET_PREV_INSN (BB_FOOTER (b)) = last;
4711 }
4712 BB_FOOTER (b) = NULL;
4713 }
4714
4715 /* Move things from b->header before a->footer.
4716 Note that this may include dead tablejump data, but we don't clean
4717 those up until we go out of cfglayout mode. */
4718 if (BB_HEADER (b))
4719 {
4720 if (! BB_FOOTER (a))
4721 BB_FOOTER (a) = BB_HEADER (b);
4722 else
4723 {
4724 rtx_insn *last = BB_HEADER (b);
4725
4726 while (NEXT_INSN (last))
4727 last = NEXT_INSN (last);
4728 SET_NEXT_INSN (last) = BB_FOOTER (a);
4729 SET_PREV_INSN (BB_FOOTER (a)) = last;
4730 BB_FOOTER (a) = BB_HEADER (b);
4731 }
4732 BB_HEADER (b) = NULL;
4733 }
4734
4735 /* In the case basic blocks are not adjacent, move them around. */
4736 if (NEXT_INSN (BB_END (a)) != BB_HEAD (b))
4737 {
4738 insn = unlink_insn_chain (BB_HEAD (b), BB_END (b));
4739
4740 emit_insn_after_noloc (insn, BB_END (a), a);
4741 }
4742 /* Otherwise just re-associate the instructions. */
4743 else
4744 {
4745 insn = BB_HEAD (b);
4746 BB_END (a) = BB_END (b);
4747 }
4748
4749 /* emit_insn_after_noloc doesn't call df_insn_change_bb.
4750 We need to explicitly call. */
4751 update_bb_for_insn_chain (insn, BB_END (b), a);
4752
4753 /* Skip possible DELETED_LABEL insn. */
4754 if (!NOTE_INSN_BASIC_BLOCK_P (insn))
4755 insn = NEXT_INSN (insn);
4756 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
4757 BB_HEAD (b) = BB_END (b) = NULL;
4758 delete_insn (insn);
4759
4760 df_bb_delete (b->index);
4761
4762 if (forward_edge_locus)
4763 EDGE_SUCC (b, 0)->goto_locus = EDGE_SUCC (a, 0)->goto_locus;
4764
4765 if (dump_file)
4766 fprintf (dump_file, "Merged blocks %d and %d.\n", a->index, b->index);
4767 }
4768
4769 /* Split edge E. */
4770
4771 static basic_block
4772 cfg_layout_split_edge (edge e)
4773 {
4774 basic_block new_bb =
4775 create_basic_block (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
4776 ? NEXT_INSN (BB_END (e->src)) : get_insns (),
4777 NULL_RTX, e->src);
4778
4779 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
4780 BB_COPY_PARTITION (new_bb, e->src);
4781 else
4782 BB_COPY_PARTITION (new_bb, e->dest);
4783 make_edge (new_bb, e->dest, EDGE_FALLTHRU);
4784 redirect_edge_and_branch_force (e, new_bb);
4785
4786 return new_bb;
4787 }
4788
4789 /* Do postprocessing after making a forwarder block joined by edge FALLTHRU. */
4790
4791 static void
4792 rtl_make_forwarder_block (edge fallthru ATTRIBUTE_UNUSED)
4793 {
4794 }
4795
4796 /* Return true if BB contains only labels or non-executable
4797 instructions. */
4798
4799 static bool
4800 rtl_block_empty_p (basic_block bb)
4801 {
4802 rtx_insn *insn;
4803
4804 if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
4805 || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
4806 return true;
4807
4808 FOR_BB_INSNS (bb, insn)
4809 if (NONDEBUG_INSN_P (insn) && !any_uncondjump_p (insn))
4810 return false;
4811
4812 return true;
4813 }
4814
4815 /* Split a basic block if it ends with a conditional branch and if
4816 the other part of the block is not empty. */
4817
4818 static basic_block
4819 rtl_split_block_before_cond_jump (basic_block bb)
4820 {
4821 rtx_insn *insn;
4822 rtx_insn *split_point = NULL;
4823 rtx_insn *last = NULL;
4824 bool found_code = false;
4825
4826 FOR_BB_INSNS (bb, insn)
4827 {
4828 if (any_condjump_p (insn))
4829 split_point = last;
4830 else if (NONDEBUG_INSN_P (insn))
4831 found_code = true;
4832 last = insn;
4833 }
4834
4835 /* Did not find everything. */
4836 if (found_code && split_point)
4837 return split_block (bb, split_point)->dest;
4838 else
4839 return NULL;
4840 }
4841
4842 /* Return 1 if BB ends with a call, possibly followed by some
4843 instructions that must stay with the call, 0 otherwise. */
4844
4845 static bool
4846 rtl_block_ends_with_call_p (basic_block bb)
4847 {
4848 rtx_insn *insn = BB_END (bb);
4849
4850 while (!CALL_P (insn)
4851 && insn != BB_HEAD (bb)
4852 && (keep_with_call_p (insn)
4853 || NOTE_P (insn)
4854 || DEBUG_INSN_P (insn)))
4855 insn = PREV_INSN (insn);
4856 return (CALL_P (insn));
4857 }
4858
4859 /* Return 1 if BB ends with a conditional branch, 0 otherwise. */
4860
4861 static bool
4862 rtl_block_ends_with_condjump_p (const_basic_block bb)
4863 {
4864 return any_condjump_p (BB_END (bb));
4865 }
4866
4867 /* Return true if we need to add fake edge to exit.
4868 Helper function for rtl_flow_call_edges_add. */
4869
4870 static bool
4871 need_fake_edge_p (const rtx_insn *insn)
4872 {
4873 if (!INSN_P (insn))
4874 return false;
4875
4876 if ((CALL_P (insn)
4877 && !SIBLING_CALL_P (insn)
4878 && !find_reg_note (insn, REG_NORETURN, NULL)
4879 && !(RTL_CONST_OR_PURE_CALL_P (insn))))
4880 return true;
4881
4882 return ((GET_CODE (PATTERN (insn)) == ASM_OPERANDS
4883 && MEM_VOLATILE_P (PATTERN (insn)))
4884 || (GET_CODE (PATTERN (insn)) == PARALLEL
4885 && asm_noperands (insn) != -1
4886 && MEM_VOLATILE_P (XVECEXP (PATTERN (insn), 0, 0)))
4887 || GET_CODE (PATTERN (insn)) == ASM_INPUT);
4888 }
4889
4890 /* Add fake edges to the function exit for any non constant and non noreturn
4891 calls, volatile inline assembly in the bitmap of blocks specified by
4892 BLOCKS or to the whole CFG if BLOCKS is zero. Return the number of blocks
4893 that were split.
4894
4895 The goal is to expose cases in which entering a basic block does not imply
4896 that all subsequent instructions must be executed. */
4897
4898 static int
4899 rtl_flow_call_edges_add (sbitmap blocks)
4900 {
4901 int i;
4902 int blocks_split = 0;
4903 int last_bb = last_basic_block_for_fn (cfun);
4904 bool check_last_block = false;
4905
4906 if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
4907 return 0;
4908
4909 if (! blocks)
4910 check_last_block = true;
4911 else
4912 check_last_block = bitmap_bit_p (blocks,
4913 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb->index);
4914
4915 /* In the last basic block, before epilogue generation, there will be
4916 a fallthru edge to EXIT. Special care is required if the last insn
4917 of the last basic block is a call because make_edge folds duplicate
4918 edges, which would result in the fallthru edge also being marked
4919 fake, which would result in the fallthru edge being removed by
4920 remove_fake_edges, which would result in an invalid CFG.
4921
4922 Moreover, we can't elide the outgoing fake edge, since the block
4923 profiler needs to take this into account in order to solve the minimal
4924 spanning tree in the case that the call doesn't return.
4925
4926 Handle this by adding a dummy instruction in a new last basic block. */
4927 if (check_last_block)
4928 {
4929 basic_block bb = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
4930 rtx_insn *insn = BB_END (bb);
4931
4932 /* Back up past insns that must be kept in the same block as a call. */
4933 while (insn != BB_HEAD (bb)
4934 && keep_with_call_p (insn))
4935 insn = PREV_INSN (insn);
4936
4937 if (need_fake_edge_p (insn))
4938 {
4939 edge e;
4940
4941 e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
4942 if (e)
4943 {
4944 insert_insn_on_edge (gen_use (const0_rtx), e);
4945 commit_edge_insertions ();
4946 }
4947 }
4948 }
4949
4950 /* Now add fake edges to the function exit for any non constant
4951 calls since there is no way that we can determine if they will
4952 return or not... */
4953
4954 for (i = NUM_FIXED_BLOCKS; i < last_bb; i++)
4955 {
4956 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
4957 rtx_insn *insn;
4958 rtx_insn *prev_insn;
4959
4960 if (!bb)
4961 continue;
4962
4963 if (blocks && !bitmap_bit_p (blocks, i))
4964 continue;
4965
4966 for (insn = BB_END (bb); ; insn = prev_insn)
4967 {
4968 prev_insn = PREV_INSN (insn);
4969 if (need_fake_edge_p (insn))
4970 {
4971 edge e;
4972 rtx_insn *split_at_insn = insn;
4973
4974 /* Don't split the block between a call and an insn that should
4975 remain in the same block as the call. */
4976 if (CALL_P (insn))
4977 while (split_at_insn != BB_END (bb)
4978 && keep_with_call_p (NEXT_INSN (split_at_insn)))
4979 split_at_insn = NEXT_INSN (split_at_insn);
4980
4981 /* The handling above of the final block before the epilogue
4982 should be enough to verify that there is no edge to the exit
4983 block in CFG already. Calling make_edge in such case would
4984 cause us to mark that edge as fake and remove it later. */
4985
4986 if (flag_checking && split_at_insn == BB_END (bb))
4987 {
4988 e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
4989 gcc_assert (e == NULL);
4990 }
4991
4992 /* Note that the following may create a new basic block
4993 and renumber the existing basic blocks. */
4994 if (split_at_insn != BB_END (bb))
4995 {
4996 e = split_block (bb, split_at_insn);
4997 if (e)
4998 blocks_split++;
4999 }
5000
5001 edge ne = make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
5002 ne->probability = profile_probability::guessed_never ();
5003 }
5004
5005 if (insn == BB_HEAD (bb))
5006 break;
5007 }
5008 }
5009
5010 if (blocks_split)
5011 verify_flow_info ();
5012
5013 return blocks_split;
5014 }
5015
5016 /* Add COMP_RTX as a condition at end of COND_BB. FIRST_HEAD is
5017 the conditional branch target, SECOND_HEAD should be the fall-thru
5018 there is no need to handle this here the loop versioning code handles
5019 this. the reason for SECON_HEAD is that it is needed for condition
5020 in trees, and this should be of the same type since it is a hook. */
5021 static void
5022 rtl_lv_add_condition_to_bb (basic_block first_head ,
5023 basic_block second_head ATTRIBUTE_UNUSED,
5024 basic_block cond_bb, void *comp_rtx)
5025 {
5026 rtx_code_label *label;
5027 rtx_insn *seq, *jump;
5028 rtx op0 = XEXP ((rtx)comp_rtx, 0);
5029 rtx op1 = XEXP ((rtx)comp_rtx, 1);
5030 enum rtx_code comp = GET_CODE ((rtx)comp_rtx);
5031 machine_mode mode;
5032
5033
5034 label = block_label (first_head);
5035 mode = GET_MODE (op0);
5036 if (mode == VOIDmode)
5037 mode = GET_MODE (op1);
5038
5039 start_sequence ();
5040 op0 = force_operand (op0, NULL_RTX);
5041 op1 = force_operand (op1, NULL_RTX);
5042 do_compare_rtx_and_jump (op0, op1, comp, 0, mode, NULL_RTX, NULL, label,
5043 profile_probability::uninitialized ());
5044 jump = get_last_insn ();
5045 JUMP_LABEL (jump) = label;
5046 LABEL_NUSES (label)++;
5047 seq = get_insns ();
5048 end_sequence ();
5049
5050 /* Add the new cond, in the new head. */
5051 emit_insn_after (seq, BB_END (cond_bb));
5052 }
5053
5054
5055 /* Given a block B with unconditional branch at its end, get the
5056 store the return the branch edge and the fall-thru edge in
5057 BRANCH_EDGE and FALLTHRU_EDGE respectively. */
5058 static void
5059 rtl_extract_cond_bb_edges (basic_block b, edge *branch_edge,
5060 edge *fallthru_edge)
5061 {
5062 edge e = EDGE_SUCC (b, 0);
5063
5064 if (e->flags & EDGE_FALLTHRU)
5065 {
5066 *fallthru_edge = e;
5067 *branch_edge = EDGE_SUCC (b, 1);
5068 }
5069 else
5070 {
5071 *branch_edge = e;
5072 *fallthru_edge = EDGE_SUCC (b, 1);
5073 }
5074 }
5075
5076 void
5077 init_rtl_bb_info (basic_block bb)
5078 {
5079 gcc_assert (!bb->il.x.rtl);
5080 bb->il.x.head_ = NULL;
5081 bb->il.x.rtl = ggc_cleared_alloc<rtl_bb_info> ();
5082 }
5083
5084 /* Returns true if it is possible to remove edge E by redirecting
5085 it to the destination of the other edge from E->src. */
5086
5087 static bool
5088 rtl_can_remove_branch_p (const_edge e)
5089 {
5090 const_basic_block src = e->src;
5091 const_basic_block target = EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)->dest;
5092 const rtx_insn *insn = BB_END (src);
5093 rtx set;
5094
5095 /* The conditions are taken from try_redirect_by_replacing_jump. */
5096 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
5097 return false;
5098
5099 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
5100 return false;
5101
5102 if (BB_PARTITION (src) != BB_PARTITION (target))
5103 return false;
5104
5105 if (!onlyjump_p (insn)
5106 || tablejump_p (insn, NULL, NULL))
5107 return false;
5108
5109 set = single_set (insn);
5110 if (!set || side_effects_p (set))
5111 return false;
5112
5113 return true;
5114 }
5115
5116 static basic_block
5117 rtl_duplicate_bb (basic_block bb, copy_bb_data *id)
5118 {
5119 bb = cfg_layout_duplicate_bb (bb, id);
5120 bb->aux = NULL;
5121 return bb;
5122 }
5123
5124 /* Do book-keeping of basic block BB for the profile consistency checker.
5125 Store the counting in RECORD. */
5126 static void
5127 rtl_account_profile_record (basic_block bb, struct profile_record *record)
5128 {
5129 rtx_insn *insn;
5130 FOR_BB_INSNS (bb, insn)
5131 if (INSN_P (insn))
5132 {
5133 record->size += insn_cost (insn, false);
5134 if (bb->count.initialized_p ())
5135 record->time
5136 += insn_cost (insn, true) * bb->count.to_gcov_type ();
5137 else if (profile_status_for_fn (cfun) == PROFILE_GUESSED)
5138 record->time
5139 += insn_cost (insn, true) * bb->count.to_frequency (cfun);
5140 }
5141 }
5142
5143 /* Implementation of CFG manipulation for linearized RTL. */
5144 struct cfg_hooks rtl_cfg_hooks = {
5145 "rtl",
5146 rtl_verify_flow_info,
5147 rtl_dump_bb,
5148 rtl_dump_bb_for_graph,
5149 rtl_create_basic_block,
5150 rtl_redirect_edge_and_branch,
5151 rtl_redirect_edge_and_branch_force,
5152 rtl_can_remove_branch_p,
5153 rtl_delete_block,
5154 rtl_split_block,
5155 rtl_move_block_after,
5156 rtl_can_merge_blocks, /* can_merge_blocks_p */
5157 rtl_merge_blocks,
5158 rtl_predict_edge,
5159 rtl_predicted_by_p,
5160 cfg_layout_can_duplicate_bb_p,
5161 rtl_duplicate_bb,
5162 rtl_split_edge,
5163 rtl_make_forwarder_block,
5164 rtl_tidy_fallthru_edge,
5165 rtl_force_nonfallthru,
5166 rtl_block_ends_with_call_p,
5167 rtl_block_ends_with_condjump_p,
5168 rtl_flow_call_edges_add,
5169 NULL, /* execute_on_growing_pred */
5170 NULL, /* execute_on_shrinking_pred */
5171 NULL, /* duplicate loop for trees */
5172 NULL, /* lv_add_condition_to_bb */
5173 NULL, /* lv_adjust_loop_header_phi*/
5174 NULL, /* extract_cond_bb_edges */
5175 NULL, /* flush_pending_stmts */
5176 rtl_block_empty_p, /* block_empty_p */
5177 rtl_split_block_before_cond_jump, /* split_block_before_cond_jump */
5178 rtl_account_profile_record,
5179 };
5180
5181 /* Implementation of CFG manipulation for cfg layout RTL, where
5182 basic block connected via fallthru edges does not have to be adjacent.
5183 This representation will hopefully become the default one in future
5184 version of the compiler. */
5185
5186 struct cfg_hooks cfg_layout_rtl_cfg_hooks = {
5187 "cfglayout mode",
5188 rtl_verify_flow_info_1,
5189 rtl_dump_bb,
5190 rtl_dump_bb_for_graph,
5191 cfg_layout_create_basic_block,
5192 cfg_layout_redirect_edge_and_branch,
5193 cfg_layout_redirect_edge_and_branch_force,
5194 rtl_can_remove_branch_p,
5195 cfg_layout_delete_block,
5196 cfg_layout_split_block,
5197 rtl_move_block_after,
5198 cfg_layout_can_merge_blocks_p,
5199 cfg_layout_merge_blocks,
5200 rtl_predict_edge,
5201 rtl_predicted_by_p,
5202 cfg_layout_can_duplicate_bb_p,
5203 cfg_layout_duplicate_bb,
5204 cfg_layout_split_edge,
5205 rtl_make_forwarder_block,
5206 NULL, /* tidy_fallthru_edge */
5207 rtl_force_nonfallthru,
5208 rtl_block_ends_with_call_p,
5209 rtl_block_ends_with_condjump_p,
5210 rtl_flow_call_edges_add,
5211 NULL, /* execute_on_growing_pred */
5212 NULL, /* execute_on_shrinking_pred */
5213 duplicate_loop_to_header_edge, /* duplicate loop for trees */
5214 rtl_lv_add_condition_to_bb, /* lv_add_condition_to_bb */
5215 NULL, /* lv_adjust_loop_header_phi*/
5216 rtl_extract_cond_bb_edges, /* extract_cond_bb_edges */
5217 NULL, /* flush_pending_stmts */
5218 rtl_block_empty_p, /* block_empty_p */
5219 rtl_split_block_before_cond_jump, /* split_block_before_cond_jump */
5220 rtl_account_profile_record,
5221 };
5222
5223 #include "gt-cfgrtl.h"
5224
5225 #if __GNUC__ >= 10
5226 # pragma GCC diagnostic pop
5227 #endif