]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/cfgrtl.c
Update copyright years.
[thirdparty/gcc.git] / gcc / cfgrtl.c
1 /* Control flow graph manipulation code for GNU compiler.
2 Copyright (C) 1987-2021 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* This file contains low level functions to manipulate the CFG and analyze it
21 that are aware of the RTL intermediate language.
22
23 Available functionality:
24 - Basic CFG/RTL manipulation API documented in cfghooks.h
25 - CFG-aware instruction chain manipulation
26 delete_insn, delete_insn_chain
27 - Edge splitting and committing to edges
28 insert_insn_on_edge, commit_edge_insertions
29 - CFG updating after insn simplification
30 purge_dead_edges, purge_all_dead_edges
31 - CFG fixing after coarse manipulation
32 fixup_abnormal_edges
33
34 Functions not supposed for generic use:
35 - Infrastructure to determine quickly basic block for insn
36 compute_bb_for_insn, update_bb_for_insn, set_block_for_insn,
37 - Edge redirection with updating and optimizing of insn chain
38 block_label, tidy_fallthru_edge, force_nonfallthru */
39 \f
40 #include "config.h"
41 #include "system.h"
42 #include "coretypes.h"
43 #include "backend.h"
44 #include "target.h"
45 #include "rtl.h"
46 #include "tree.h"
47 #include "cfghooks.h"
48 #include "df.h"
49 #include "insn-config.h"
50 #include "memmodel.h"
51 #include "emit-rtl.h"
52 #include "cfgrtl.h"
53 #include "cfganal.h"
54 #include "cfgbuild.h"
55 #include "cfgcleanup.h"
56 #include "bb-reorder.h"
57 #include "rtl-error.h"
58 #include "insn-attr.h"
59 #include "dojump.h"
60 #include "expr.h"
61 #include "cfgloop.h"
62 #include "tree-pass.h"
63 #include "print-rtl.h"
64 #include "rtl-iter.h"
65 #include "gimplify.h"
66
67 /* Disable warnings about missing quoting in GCC diagnostics. */
68 #if __GNUC__ >= 10
69 # pragma GCC diagnostic push
70 # pragma GCC diagnostic ignored "-Wformat-diag"
71 #endif
72
73 /* Holds the interesting leading and trailing notes for the function.
74 Only applicable if the CFG is in cfglayout mode. */
75 static GTY(()) rtx_insn *cfg_layout_function_footer;
76 static GTY(()) rtx_insn *cfg_layout_function_header;
77
78 static rtx_insn *skip_insns_after_block (basic_block);
79 static void record_effective_endpoints (void);
80 static void fixup_reorder_chain (void);
81
82 void verify_insn_chain (void);
83 static void fixup_fallthru_exit_predecessor (void);
84 static int can_delete_note_p (const rtx_note *);
85 static int can_delete_label_p (const rtx_code_label *);
86 static basic_block rtl_split_edge (edge);
87 static bool rtl_move_block_after (basic_block, basic_block);
88 static int rtl_verify_flow_info (void);
89 static basic_block cfg_layout_split_block (basic_block, void *);
90 static edge cfg_layout_redirect_edge_and_branch (edge, basic_block);
91 static basic_block cfg_layout_redirect_edge_and_branch_force (edge, basic_block);
92 static void cfg_layout_delete_block (basic_block);
93 static void rtl_delete_block (basic_block);
94 static basic_block rtl_redirect_edge_and_branch_force (edge, basic_block);
95 static edge rtl_redirect_edge_and_branch (edge, basic_block);
96 static basic_block rtl_split_block (basic_block, void *);
97 static void rtl_dump_bb (FILE *, basic_block, int, dump_flags_t);
98 static int rtl_verify_flow_info_1 (void);
99 static void rtl_make_forwarder_block (edge);
100 static bool rtl_bb_info_initialized_p (basic_block bb);
101 \f
102 /* Return true if NOTE is not one of the ones that must be kept paired,
103 so that we may simply delete it. */
104
105 static int
106 can_delete_note_p (const rtx_note *note)
107 {
108 switch (NOTE_KIND (note))
109 {
110 case NOTE_INSN_DELETED:
111 case NOTE_INSN_BASIC_BLOCK:
112 case NOTE_INSN_EPILOGUE_BEG:
113 return true;
114
115 default:
116 return false;
117 }
118 }
119
120 /* True if a given label can be deleted. */
121
122 static int
123 can_delete_label_p (const rtx_code_label *label)
124 {
125 return (!LABEL_PRESERVE_P (label)
126 /* User declared labels must be preserved. */
127 && LABEL_NAME (label) == 0
128 && !vec_safe_contains<rtx_insn *> (forced_labels,
129 const_cast<rtx_code_label *> (label)));
130 }
131
132 /* Delete INSN by patching it out. */
133
134 void
135 delete_insn (rtx_insn *insn)
136 {
137 rtx note;
138 bool really_delete = true;
139
140 if (LABEL_P (insn))
141 {
142 /* Some labels can't be directly removed from the INSN chain, as they
143 might be references via variables, constant pool etc.
144 Convert them to the special NOTE_INSN_DELETED_LABEL note. */
145 if (! can_delete_label_p (as_a <rtx_code_label *> (insn)))
146 {
147 const char *name = LABEL_NAME (insn);
148 basic_block bb = BLOCK_FOR_INSN (insn);
149 rtx_insn *bb_note = NEXT_INSN (insn);
150
151 really_delete = false;
152 PUT_CODE (insn, NOTE);
153 NOTE_KIND (insn) = NOTE_INSN_DELETED_LABEL;
154 NOTE_DELETED_LABEL_NAME (insn) = name;
155
156 /* If the note following the label starts a basic block, and the
157 label is a member of the same basic block, interchange the two. */
158 if (bb_note != NULL_RTX
159 && NOTE_INSN_BASIC_BLOCK_P (bb_note)
160 && bb != NULL
161 && bb == BLOCK_FOR_INSN (bb_note))
162 {
163 reorder_insns_nobb (insn, insn, bb_note);
164 BB_HEAD (bb) = bb_note;
165 if (BB_END (bb) == bb_note)
166 BB_END (bb) = insn;
167 }
168 }
169
170 remove_node_from_insn_list (insn, &nonlocal_goto_handler_labels);
171 }
172
173 if (really_delete)
174 {
175 /* If this insn has already been deleted, something is very wrong. */
176 gcc_assert (!insn->deleted ());
177 if (INSN_P (insn))
178 df_insn_delete (insn);
179 remove_insn (insn);
180 insn->set_deleted ();
181 }
182
183 /* If deleting a jump, decrement the use count of the label. Deleting
184 the label itself should happen in the normal course of block merging. */
185 if (JUMP_P (insn))
186 {
187 if (JUMP_LABEL (insn)
188 && LABEL_P (JUMP_LABEL (insn)))
189 LABEL_NUSES (JUMP_LABEL (insn))--;
190
191 /* If there are more targets, remove them too. */
192 while ((note
193 = find_reg_note (insn, REG_LABEL_TARGET, NULL_RTX)) != NULL_RTX
194 && LABEL_P (XEXP (note, 0)))
195 {
196 LABEL_NUSES (XEXP (note, 0))--;
197 remove_note (insn, note);
198 }
199 }
200
201 /* Also if deleting any insn that references a label as an operand. */
202 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX)) != NULL_RTX
203 && LABEL_P (XEXP (note, 0)))
204 {
205 LABEL_NUSES (XEXP (note, 0))--;
206 remove_note (insn, note);
207 }
208
209 if (rtx_jump_table_data *table = dyn_cast <rtx_jump_table_data *> (insn))
210 {
211 rtvec vec = table->get_labels ();
212 int len = GET_NUM_ELEM (vec);
213 int i;
214
215 for (i = 0; i < len; i++)
216 {
217 rtx label = XEXP (RTVEC_ELT (vec, i), 0);
218
219 /* When deleting code in bulk (e.g. removing many unreachable
220 blocks) we can delete a label that's a target of the vector
221 before deleting the vector itself. */
222 if (!NOTE_P (label))
223 LABEL_NUSES (label)--;
224 }
225 }
226 }
227
228 /* Like delete_insn but also purge dead edges from BB.
229 Return true if any edges are eliminated. */
230
231 bool
232 delete_insn_and_edges (rtx_insn *insn)
233 {
234 bool purge = false;
235
236 if (INSN_P (insn) && BLOCK_FOR_INSN (insn))
237 {
238 basic_block bb = BLOCK_FOR_INSN (insn);
239 if (BB_END (bb) == insn)
240 purge = true;
241 else if (DEBUG_INSN_P (BB_END (bb)))
242 for (rtx_insn *dinsn = NEXT_INSN (insn);
243 DEBUG_INSN_P (dinsn); dinsn = NEXT_INSN (dinsn))
244 if (BB_END (bb) == dinsn)
245 {
246 purge = true;
247 break;
248 }
249 }
250 delete_insn (insn);
251 if (purge)
252 return purge_dead_edges (BLOCK_FOR_INSN (insn));
253 return false;
254 }
255
256 /* Unlink a chain of insns between START and FINISH, leaving notes
257 that must be paired. If CLEAR_BB is true, we set bb field for
258 insns that cannot be removed to NULL. */
259
260 void
261 delete_insn_chain (rtx start, rtx_insn *finish, bool clear_bb)
262 {
263 /* Unchain the insns one by one. It would be quicker to delete all of these
264 with a single unchaining, rather than one at a time, but we need to keep
265 the NOTE's. */
266 rtx_insn *current = finish;
267 while (1)
268 {
269 rtx_insn *prev = PREV_INSN (current);
270 if (NOTE_P (current) && !can_delete_note_p (as_a <rtx_note *> (current)))
271 ;
272 else
273 delete_insn (current);
274
275 if (clear_bb && !current->deleted ())
276 set_block_for_insn (current, NULL);
277
278 if (current == start)
279 break;
280 current = prev;
281 }
282 }
283 \f
284 /* Create a new basic block consisting of the instructions between HEAD and END
285 inclusive. This function is designed to allow fast BB construction - reuses
286 the note and basic block struct in BB_NOTE, if any and do not grow
287 BASIC_BLOCK chain and should be used directly only by CFG construction code.
288 END can be NULL in to create new empty basic block before HEAD. Both END
289 and HEAD can be NULL to create basic block at the end of INSN chain.
290 AFTER is the basic block we should be put after. */
291
292 basic_block
293 create_basic_block_structure (rtx_insn *head, rtx_insn *end, rtx_note *bb_note,
294 basic_block after)
295 {
296 basic_block bb;
297
298 if (bb_note
299 && (bb = NOTE_BASIC_BLOCK (bb_note)) != NULL
300 && bb->aux == NULL)
301 {
302 /* If we found an existing note, thread it back onto the chain. */
303
304 rtx_insn *after;
305
306 if (LABEL_P (head))
307 after = head;
308 else
309 {
310 after = PREV_INSN (head);
311 head = bb_note;
312 }
313
314 if (after != bb_note && NEXT_INSN (after) != bb_note)
315 reorder_insns_nobb (bb_note, bb_note, after);
316 }
317 else
318 {
319 /* Otherwise we must create a note and a basic block structure. */
320
321 bb = alloc_block ();
322
323 init_rtl_bb_info (bb);
324 if (!head && !end)
325 head = end = bb_note
326 = emit_note_after (NOTE_INSN_BASIC_BLOCK, get_last_insn ());
327 else if (LABEL_P (head) && end)
328 {
329 bb_note = emit_note_after (NOTE_INSN_BASIC_BLOCK, head);
330 if (head == end)
331 end = bb_note;
332 }
333 else
334 {
335 bb_note = emit_note_before (NOTE_INSN_BASIC_BLOCK, head);
336 head = bb_note;
337 if (!end)
338 end = head;
339 }
340
341 NOTE_BASIC_BLOCK (bb_note) = bb;
342 }
343
344 /* Always include the bb note in the block. */
345 if (NEXT_INSN (end) == bb_note)
346 end = bb_note;
347
348 BB_HEAD (bb) = head;
349 BB_END (bb) = end;
350 bb->index = last_basic_block_for_fn (cfun)++;
351 bb->flags = BB_NEW | BB_RTL;
352 link_block (bb, after);
353 SET_BASIC_BLOCK_FOR_FN (cfun, bb->index, bb);
354 df_bb_refs_record (bb->index, false);
355 update_bb_for_insn (bb);
356 BB_SET_PARTITION (bb, BB_UNPARTITIONED);
357
358 /* Tag the block so that we know it has been used when considering
359 other basic block notes. */
360 bb->aux = bb;
361
362 return bb;
363 }
364
365 /* Create new basic block consisting of instructions in between HEAD and END
366 and place it to the BB chain after block AFTER. END can be NULL to
367 create a new empty basic block before HEAD. Both END and HEAD can be
368 NULL to create basic block at the end of INSN chain. */
369
370 static basic_block
371 rtl_create_basic_block (void *headp, void *endp, basic_block after)
372 {
373 rtx_insn *head = (rtx_insn *) headp;
374 rtx_insn *end = (rtx_insn *) endp;
375 basic_block bb;
376
377 /* Grow the basic block array if needed. */
378 if ((size_t) last_basic_block_for_fn (cfun)
379 >= basic_block_info_for_fn (cfun)->length ())
380 vec_safe_grow_cleared (basic_block_info_for_fn (cfun),
381 last_basic_block_for_fn (cfun) + 1);
382
383 n_basic_blocks_for_fn (cfun)++;
384
385 bb = create_basic_block_structure (head, end, NULL, after);
386 bb->aux = NULL;
387 return bb;
388 }
389
390 static basic_block
391 cfg_layout_create_basic_block (void *head, void *end, basic_block after)
392 {
393 basic_block newbb = rtl_create_basic_block (head, end, after);
394
395 return newbb;
396 }
397 \f
398 /* Delete the insns in a (non-live) block. We physically delete every
399 non-deleted-note insn, and update the flow graph appropriately.
400
401 Return nonzero if we deleted an exception handler. */
402
403 /* ??? Preserving all such notes strikes me as wrong. It would be nice
404 to post-process the stream to remove empty blocks, loops, ranges, etc. */
405
406 static void
407 rtl_delete_block (basic_block b)
408 {
409 rtx_insn *insn, *end;
410
411 /* If the head of this block is a CODE_LABEL, then it might be the
412 label for an exception handler which can't be reached. We need
413 to remove the label from the exception_handler_label list. */
414 insn = BB_HEAD (b);
415
416 end = get_last_bb_insn (b);
417
418 /* Selectively delete the entire chain. */
419 BB_HEAD (b) = NULL;
420 delete_insn_chain (insn, end, true);
421
422
423 if (dump_file)
424 fprintf (dump_file, "deleting block %d\n", b->index);
425 df_bb_delete (b->index);
426 }
427 \f
428 /* Records the basic block struct in BLOCK_FOR_INSN for every insn. */
429
430 void
431 compute_bb_for_insn (void)
432 {
433 basic_block bb;
434
435 FOR_EACH_BB_FN (bb, cfun)
436 {
437 rtx_insn *end = BB_END (bb);
438 rtx_insn *insn;
439
440 for (insn = BB_HEAD (bb); ; insn = NEXT_INSN (insn))
441 {
442 BLOCK_FOR_INSN (insn) = bb;
443 if (insn == end)
444 break;
445 }
446 }
447 }
448
449 /* Release the basic_block_for_insn array. */
450
451 unsigned int
452 free_bb_for_insn (void)
453 {
454 rtx_insn *insn;
455 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
456 if (!BARRIER_P (insn))
457 BLOCK_FOR_INSN (insn) = NULL;
458 return 0;
459 }
460
461 namespace {
462
463 const pass_data pass_data_free_cfg =
464 {
465 RTL_PASS, /* type */
466 "*free_cfg", /* name */
467 OPTGROUP_NONE, /* optinfo_flags */
468 TV_NONE, /* tv_id */
469 0, /* properties_required */
470 0, /* properties_provided */
471 PROP_cfg, /* properties_destroyed */
472 0, /* todo_flags_start */
473 0, /* todo_flags_finish */
474 };
475
476 class pass_free_cfg : public rtl_opt_pass
477 {
478 public:
479 pass_free_cfg (gcc::context *ctxt)
480 : rtl_opt_pass (pass_data_free_cfg, ctxt)
481 {}
482
483 /* opt_pass methods: */
484 virtual unsigned int execute (function *);
485
486 }; // class pass_free_cfg
487
488 unsigned int
489 pass_free_cfg::execute (function *)
490 {
491 /* The resource.c machinery uses DF but the CFG isn't guaranteed to be
492 valid at that point so it would be too late to call df_analyze. */
493 if (DELAY_SLOTS && optimize > 0 && flag_delayed_branch)
494 {
495 df_note_add_problem ();
496 df_analyze ();
497 }
498
499 if (crtl->has_bb_partition)
500 insert_section_boundary_note ();
501
502 free_bb_for_insn ();
503 return 0;
504 }
505
506 } // anon namespace
507
508 rtl_opt_pass *
509 make_pass_free_cfg (gcc::context *ctxt)
510 {
511 return new pass_free_cfg (ctxt);
512 }
513
514 /* Return RTX to emit after when we want to emit code on the entry of function. */
515 rtx_insn *
516 entry_of_function (void)
517 {
518 return (n_basic_blocks_for_fn (cfun) > NUM_FIXED_BLOCKS ?
519 BB_HEAD (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb) : get_insns ());
520 }
521
522 /* Emit INSN at the entry point of the function, ensuring that it is only
523 executed once per function. */
524 void
525 emit_insn_at_entry (rtx insn)
526 {
527 edge_iterator ei = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
528 edge e = ei_safe_edge (ei);
529 gcc_assert (e->flags & EDGE_FALLTHRU);
530
531 insert_insn_on_edge (insn, e);
532 commit_edge_insertions ();
533 }
534
535 /* Update BLOCK_FOR_INSN of insns between BEGIN and END
536 (or BARRIER if found) and notify df of the bb change.
537 The insn chain range is inclusive
538 (i.e. both BEGIN and END will be updated. */
539
540 static void
541 update_bb_for_insn_chain (rtx_insn *begin, rtx_insn *end, basic_block bb)
542 {
543 rtx_insn *insn;
544
545 end = NEXT_INSN (end);
546 for (insn = begin; insn != end; insn = NEXT_INSN (insn))
547 if (!BARRIER_P (insn))
548 df_insn_change_bb (insn, bb);
549 }
550
551 /* Update BLOCK_FOR_INSN of insns in BB to BB,
552 and notify df of the change. */
553
554 void
555 update_bb_for_insn (basic_block bb)
556 {
557 update_bb_for_insn_chain (BB_HEAD (bb), BB_END (bb), bb);
558 }
559
560 \f
561 /* Like active_insn_p, except keep the return value use or clobber around
562 even after reload. */
563
564 static bool
565 flow_active_insn_p (const rtx_insn *insn)
566 {
567 if (active_insn_p (insn))
568 return true;
569
570 /* A clobber of the function return value exists for buggy
571 programs that fail to return a value. Its effect is to
572 keep the return value from being live across the entire
573 function. If we allow it to be skipped, we introduce the
574 possibility for register lifetime confusion.
575 Similarly, keep a USE of the function return value, otherwise
576 the USE is dropped and we could fail to thread jump if USE
577 appears on some paths and not on others, see PR90257. */
578 if ((GET_CODE (PATTERN (insn)) == CLOBBER
579 || GET_CODE (PATTERN (insn)) == USE)
580 && REG_P (XEXP (PATTERN (insn), 0))
581 && REG_FUNCTION_VALUE_P (XEXP (PATTERN (insn), 0)))
582 return true;
583
584 return false;
585 }
586
587 /* Return true if the block has no effect and only forwards control flow to
588 its single destination. */
589
590 bool
591 contains_no_active_insn_p (const_basic_block bb)
592 {
593 rtx_insn *insn;
594
595 if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
596 || bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
597 || !single_succ_p (bb)
598 || (single_succ_edge (bb)->flags & EDGE_FAKE) != 0)
599 return false;
600
601 for (insn = BB_HEAD (bb); insn != BB_END (bb); insn = NEXT_INSN (insn))
602 if (INSN_P (insn) && flow_active_insn_p (insn))
603 return false;
604
605 return (!INSN_P (insn)
606 || (JUMP_P (insn) && simplejump_p (insn))
607 || !flow_active_insn_p (insn));
608 }
609
610 /* Likewise, but protect loop latches, headers and preheaders. */
611 /* FIXME: Make this a cfg hook. */
612
613 bool
614 forwarder_block_p (const_basic_block bb)
615 {
616 if (!contains_no_active_insn_p (bb))
617 return false;
618
619 /* Protect loop latches, headers and preheaders. */
620 if (current_loops)
621 {
622 basic_block dest;
623 if (bb->loop_father->header == bb)
624 return false;
625 dest = EDGE_SUCC (bb, 0)->dest;
626 if (dest->loop_father->header == dest)
627 return false;
628 }
629
630 return true;
631 }
632
633 /* Return nonzero if we can reach target from src by falling through. */
634 /* FIXME: Make this a cfg hook, the result is only valid in cfgrtl mode. */
635
636 bool
637 can_fallthru (basic_block src, basic_block target)
638 {
639 rtx_insn *insn = BB_END (src);
640 rtx_insn *insn2;
641 edge e;
642 edge_iterator ei;
643
644 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
645 return true;
646 if (src->next_bb != target)
647 return false;
648
649 /* ??? Later we may add code to move jump tables offline. */
650 if (tablejump_p (insn, NULL, NULL))
651 return false;
652
653 FOR_EACH_EDGE (e, ei, src->succs)
654 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
655 && e->flags & EDGE_FALLTHRU)
656 return false;
657
658 insn2 = BB_HEAD (target);
659 if (!active_insn_p (insn2))
660 insn2 = next_active_insn (insn2);
661
662 return next_active_insn (insn) == insn2;
663 }
664
665 /* Return nonzero if we could reach target from src by falling through,
666 if the target was made adjacent. If we already have a fall-through
667 edge to the exit block, we can't do that. */
668 static bool
669 could_fall_through (basic_block src, basic_block target)
670 {
671 edge e;
672 edge_iterator ei;
673
674 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
675 return true;
676 FOR_EACH_EDGE (e, ei, src->succs)
677 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
678 && e->flags & EDGE_FALLTHRU)
679 return 0;
680 return true;
681 }
682 \f
683 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
684 rtx_note *
685 bb_note (basic_block bb)
686 {
687 rtx_insn *note;
688
689 note = BB_HEAD (bb);
690 if (LABEL_P (note))
691 note = NEXT_INSN (note);
692
693 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
694 return as_a <rtx_note *> (note);
695 }
696
697 /* Return the INSN immediately following the NOTE_INSN_BASIC_BLOCK
698 note associated with the BLOCK. */
699
700 static rtx_insn *
701 first_insn_after_basic_block_note (basic_block block)
702 {
703 rtx_insn *insn;
704
705 /* Get the first instruction in the block. */
706 insn = BB_HEAD (block);
707
708 if (insn == NULL_RTX)
709 return NULL;
710 if (LABEL_P (insn))
711 insn = NEXT_INSN (insn);
712 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
713
714 return NEXT_INSN (insn);
715 }
716
717 /* Creates a new basic block just after basic block BB by splitting
718 everything after specified instruction INSNP. */
719
720 static basic_block
721 rtl_split_block (basic_block bb, void *insnp)
722 {
723 basic_block new_bb;
724 rtx_insn *insn = (rtx_insn *) insnp;
725 edge e;
726 edge_iterator ei;
727
728 if (!insn)
729 {
730 insn = first_insn_after_basic_block_note (bb);
731
732 if (insn)
733 {
734 rtx_insn *next = insn;
735
736 insn = PREV_INSN (insn);
737
738 /* If the block contains only debug insns, insn would have
739 been NULL in a non-debug compilation, and then we'd end
740 up emitting a DELETED note. For -fcompare-debug
741 stability, emit the note too. */
742 if (insn != BB_END (bb)
743 && DEBUG_INSN_P (next)
744 && DEBUG_INSN_P (BB_END (bb)))
745 {
746 while (next != BB_END (bb) && DEBUG_INSN_P (next))
747 next = NEXT_INSN (next);
748
749 if (next == BB_END (bb))
750 emit_note_after (NOTE_INSN_DELETED, next);
751 }
752 }
753 else
754 insn = get_last_insn ();
755 }
756
757 /* We probably should check type of the insn so that we do not create
758 inconsistent cfg. It is checked in verify_flow_info anyway, so do not
759 bother. */
760 if (insn == BB_END (bb))
761 emit_note_after (NOTE_INSN_DELETED, insn);
762
763 /* Create the new basic block. */
764 new_bb = create_basic_block (NEXT_INSN (insn), BB_END (bb), bb);
765 BB_COPY_PARTITION (new_bb, bb);
766 BB_END (bb) = insn;
767
768 /* Redirect the outgoing edges. */
769 new_bb->succs = bb->succs;
770 bb->succs = NULL;
771 FOR_EACH_EDGE (e, ei, new_bb->succs)
772 e->src = new_bb;
773
774 /* The new block starts off being dirty. */
775 df_set_bb_dirty (bb);
776 return new_bb;
777 }
778
779 /* Return true if the single edge between blocks A and B is the only place
780 in RTL which holds some unique locus. */
781
782 static bool
783 unique_locus_on_edge_between_p (basic_block a, basic_block b)
784 {
785 const location_t goto_locus = EDGE_SUCC (a, 0)->goto_locus;
786 rtx_insn *insn, *end;
787
788 if (LOCATION_LOCUS (goto_locus) == UNKNOWN_LOCATION)
789 return false;
790
791 /* First scan block A backward. */
792 insn = BB_END (a);
793 end = PREV_INSN (BB_HEAD (a));
794 while (insn != end && (!NONDEBUG_INSN_P (insn) || !INSN_HAS_LOCATION (insn)))
795 insn = PREV_INSN (insn);
796
797 if (insn != end && INSN_LOCATION (insn) == goto_locus)
798 return false;
799
800 /* Then scan block B forward. */
801 insn = BB_HEAD (b);
802 if (insn)
803 {
804 end = NEXT_INSN (BB_END (b));
805 while (insn != end && !NONDEBUG_INSN_P (insn))
806 insn = NEXT_INSN (insn);
807
808 if (insn != end && INSN_HAS_LOCATION (insn)
809 && INSN_LOCATION (insn) == goto_locus)
810 return false;
811 }
812
813 return true;
814 }
815
816 /* If the single edge between blocks A and B is the only place in RTL which
817 holds some unique locus, emit a nop with that locus between the blocks. */
818
819 static void
820 emit_nop_for_unique_locus_between (basic_block a, basic_block b)
821 {
822 if (!unique_locus_on_edge_between_p (a, b))
823 return;
824
825 BB_END (a) = emit_insn_after_noloc (gen_nop (), BB_END (a), a);
826 INSN_LOCATION (BB_END (a)) = EDGE_SUCC (a, 0)->goto_locus;
827 }
828
829 /* Blocks A and B are to be merged into a single block A. The insns
830 are already contiguous. */
831
832 static void
833 rtl_merge_blocks (basic_block a, basic_block b)
834 {
835 /* If B is a forwarder block whose outgoing edge has no location, we'll
836 propagate the locus of the edge between A and B onto it. */
837 const bool forward_edge_locus
838 = (b->flags & BB_FORWARDER_BLOCK) != 0
839 && LOCATION_LOCUS (EDGE_SUCC (b, 0)->goto_locus) == UNKNOWN_LOCATION;
840 rtx_insn *b_head = BB_HEAD (b), *b_end = BB_END (b), *a_end = BB_END (a);
841 rtx_insn *del_first = NULL, *del_last = NULL;
842 rtx_insn *b_debug_start = b_end, *b_debug_end = b_end;
843 int b_empty = 0;
844
845 if (dump_file)
846 fprintf (dump_file, "Merging block %d into block %d...\n", b->index,
847 a->index);
848
849 while (DEBUG_INSN_P (b_end))
850 b_end = PREV_INSN (b_debug_start = b_end);
851
852 /* If there was a CODE_LABEL beginning B, delete it. */
853 if (LABEL_P (b_head))
854 {
855 /* Detect basic blocks with nothing but a label. This can happen
856 in particular at the end of a function. */
857 if (b_head == b_end)
858 b_empty = 1;
859
860 del_first = del_last = b_head;
861 b_head = NEXT_INSN (b_head);
862 }
863
864 /* Delete the basic block note and handle blocks containing just that
865 note. */
866 if (NOTE_INSN_BASIC_BLOCK_P (b_head))
867 {
868 if (b_head == b_end)
869 b_empty = 1;
870 if (! del_last)
871 del_first = b_head;
872
873 del_last = b_head;
874 b_head = NEXT_INSN (b_head);
875 }
876
877 /* If there was a jump out of A, delete it. */
878 if (JUMP_P (a_end))
879 {
880 rtx_insn *prev;
881
882 for (prev = PREV_INSN (a_end); ; prev = PREV_INSN (prev))
883 if (!NOTE_P (prev)
884 || NOTE_INSN_BASIC_BLOCK_P (prev)
885 || prev == BB_HEAD (a))
886 break;
887
888 del_first = a_end;
889
890 /* If this was a conditional jump, we need to also delete
891 the insn that set cc0. */
892 if (HAVE_cc0 && only_sets_cc0_p (prev))
893 {
894 rtx_insn *tmp = prev;
895
896 prev = prev_nonnote_insn (prev);
897 if (!prev)
898 prev = BB_HEAD (a);
899 del_first = tmp;
900 }
901
902 a_end = PREV_INSN (del_first);
903 }
904 else if (BARRIER_P (NEXT_INSN (a_end)))
905 del_first = NEXT_INSN (a_end);
906
907 /* Delete everything marked above as well as crap that might be
908 hanging out between the two blocks. */
909 BB_END (a) = a_end;
910 BB_HEAD (b) = b_empty ? NULL : b_head;
911 delete_insn_chain (del_first, del_last, true);
912
913 /* If not optimizing, preserve the locus of the single edge between
914 blocks A and B if necessary by emitting a nop. */
915 if (!optimize
916 && !forward_edge_locus
917 && !DECL_IGNORED_P (current_function_decl))
918 {
919 emit_nop_for_unique_locus_between (a, b);
920 a_end = BB_END (a);
921 }
922
923 /* Reassociate the insns of B with A. */
924 if (!b_empty)
925 {
926 update_bb_for_insn_chain (a_end, b_debug_end, a);
927
928 BB_END (a) = b_debug_end;
929 BB_HEAD (b) = NULL;
930 }
931 else if (b_end != b_debug_end)
932 {
933 /* Move any deleted labels and other notes between the end of A
934 and the debug insns that make up B after the debug insns,
935 bringing the debug insns into A while keeping the notes after
936 the end of A. */
937 if (NEXT_INSN (a_end) != b_debug_start)
938 reorder_insns_nobb (NEXT_INSN (a_end), PREV_INSN (b_debug_start),
939 b_debug_end);
940 update_bb_for_insn_chain (b_debug_start, b_debug_end, a);
941 BB_END (a) = b_debug_end;
942 }
943
944 df_bb_delete (b->index);
945
946 if (forward_edge_locus)
947 EDGE_SUCC (b, 0)->goto_locus = EDGE_SUCC (a, 0)->goto_locus;
948
949 if (dump_file)
950 fprintf (dump_file, "Merged blocks %d and %d.\n", a->index, b->index);
951 }
952
953
954 /* Return true when block A and B can be merged. */
955
956 static bool
957 rtl_can_merge_blocks (basic_block a, basic_block b)
958 {
959 /* If we are partitioning hot/cold basic blocks, we don't want to
960 mess up unconditional or indirect jumps that cross between hot
961 and cold sections.
962
963 Basic block partitioning may result in some jumps that appear to
964 be optimizable (or blocks that appear to be mergeable), but which really
965 must be left untouched (they are required to make it safely across
966 partition boundaries). See the comments at the top of
967 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
968
969 if (BB_PARTITION (a) != BB_PARTITION (b))
970 return false;
971
972 /* Protect the loop latches. */
973 if (current_loops && b->loop_father->latch == b)
974 return false;
975
976 /* There must be exactly one edge in between the blocks. */
977 return (single_succ_p (a)
978 && single_succ (a) == b
979 && single_pred_p (b)
980 && a != b
981 /* Must be simple edge. */
982 && !(single_succ_edge (a)->flags & EDGE_COMPLEX)
983 && a->next_bb == b
984 && a != ENTRY_BLOCK_PTR_FOR_FN (cfun)
985 && b != EXIT_BLOCK_PTR_FOR_FN (cfun)
986 /* If the jump insn has side effects,
987 we can't kill the edge. */
988 && (!JUMP_P (BB_END (a))
989 || (reload_completed
990 ? simplejump_p (BB_END (a)) : onlyjump_p (BB_END (a)))));
991 }
992 \f
993 /* Return the label in the head of basic block BLOCK. Create one if it doesn't
994 exist. */
995
996 rtx_code_label *
997 block_label (basic_block block)
998 {
999 if (block == EXIT_BLOCK_PTR_FOR_FN (cfun))
1000 return NULL;
1001
1002 if (!LABEL_P (BB_HEAD (block)))
1003 {
1004 BB_HEAD (block) = emit_label_before (gen_label_rtx (), BB_HEAD (block));
1005 }
1006
1007 return as_a <rtx_code_label *> (BB_HEAD (block));
1008 }
1009
1010 /* Remove all barriers from BB_FOOTER of a BB. */
1011
1012 static void
1013 remove_barriers_from_footer (basic_block bb)
1014 {
1015 rtx_insn *insn = BB_FOOTER (bb);
1016
1017 /* Remove barriers but keep jumptables. */
1018 while (insn)
1019 {
1020 if (BARRIER_P (insn))
1021 {
1022 if (PREV_INSN (insn))
1023 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
1024 else
1025 BB_FOOTER (bb) = NEXT_INSN (insn);
1026 if (NEXT_INSN (insn))
1027 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
1028 }
1029 if (LABEL_P (insn))
1030 return;
1031 insn = NEXT_INSN (insn);
1032 }
1033 }
1034
1035 /* Attempt to perform edge redirection by replacing possibly complex jump
1036 instruction by unconditional jump or removing jump completely. This can
1037 apply only if all edges now point to the same block. The parameters and
1038 return values are equivalent to redirect_edge_and_branch. */
1039
1040 edge
1041 try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout)
1042 {
1043 basic_block src = e->src;
1044 rtx_insn *insn = BB_END (src), *kill_from;
1045 rtx set;
1046 int fallthru = 0;
1047
1048 /* If we are partitioning hot/cold basic blocks, we don't want to
1049 mess up unconditional or indirect jumps that cross between hot
1050 and cold sections.
1051
1052 Basic block partitioning may result in some jumps that appear to
1053 be optimizable (or blocks that appear to be mergeable), but which really
1054 must be left untouched (they are required to make it safely across
1055 partition boundaries). See the comments at the top of
1056 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1057
1058 if (BB_PARTITION (src) != BB_PARTITION (target))
1059 return NULL;
1060
1061 /* We can replace or remove a complex jump only when we have exactly
1062 two edges. Also, if we have exactly one outgoing edge, we can
1063 redirect that. */
1064 if (EDGE_COUNT (src->succs) >= 3
1065 /* Verify that all targets will be TARGET. Specifically, the
1066 edge that is not E must also go to TARGET. */
1067 || (EDGE_COUNT (src->succs) == 2
1068 && EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)->dest != target))
1069 return NULL;
1070
1071 if (!onlyjump_p (insn))
1072 return NULL;
1073 if ((!optimize || reload_completed) && tablejump_p (insn, NULL, NULL))
1074 return NULL;
1075
1076 /* Avoid removing branch with side effects. */
1077 set = single_set (insn);
1078 if (!set || side_effects_p (set))
1079 return NULL;
1080
1081 /* In case we zap a conditional jump, we'll need to kill
1082 the cc0 setter too. */
1083 kill_from = insn;
1084 if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, PATTERN (insn))
1085 && only_sets_cc0_p (PREV_INSN (insn)))
1086 kill_from = PREV_INSN (insn);
1087
1088 /* See if we can create the fallthru edge. */
1089 if (in_cfglayout || can_fallthru (src, target))
1090 {
1091 if (dump_file)
1092 fprintf (dump_file, "Removing jump %i.\n", INSN_UID (insn));
1093 fallthru = 1;
1094
1095 /* Selectively unlink whole insn chain. */
1096 if (in_cfglayout)
1097 {
1098 delete_insn_chain (kill_from, BB_END (src), false);
1099 remove_barriers_from_footer (src);
1100 }
1101 else
1102 delete_insn_chain (kill_from, PREV_INSN (BB_HEAD (target)),
1103 false);
1104 }
1105
1106 /* If this already is simplejump, redirect it. */
1107 else if (simplejump_p (insn))
1108 {
1109 if (e->dest == target)
1110 return NULL;
1111 if (dump_file)
1112 fprintf (dump_file, "Redirecting jump %i from %i to %i.\n",
1113 INSN_UID (insn), e->dest->index, target->index);
1114 if (!redirect_jump (as_a <rtx_jump_insn *> (insn),
1115 block_label (target), 0))
1116 {
1117 gcc_assert (target == EXIT_BLOCK_PTR_FOR_FN (cfun));
1118 return NULL;
1119 }
1120 }
1121
1122 /* Cannot do anything for target exit block. */
1123 else if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
1124 return NULL;
1125
1126 /* Or replace possibly complicated jump insn by simple jump insn. */
1127 else
1128 {
1129 rtx_code_label *target_label = block_label (target);
1130 rtx_insn *barrier;
1131 rtx_insn *label;
1132 rtx_jump_table_data *table;
1133
1134 emit_jump_insn_after_noloc (targetm.gen_jump (target_label), insn);
1135 JUMP_LABEL (BB_END (src)) = target_label;
1136 LABEL_NUSES (target_label)++;
1137 if (dump_file)
1138 fprintf (dump_file, "Replacing insn %i by jump %i\n",
1139 INSN_UID (insn), INSN_UID (BB_END (src)));
1140
1141
1142 delete_insn_chain (kill_from, insn, false);
1143
1144 /* Recognize a tablejump that we are converting to a
1145 simple jump and remove its associated CODE_LABEL
1146 and ADDR_VEC or ADDR_DIFF_VEC. */
1147 if (tablejump_p (insn, &label, &table))
1148 delete_insn_chain (label, table, false);
1149
1150 barrier = next_nonnote_nondebug_insn (BB_END (src));
1151 if (!barrier || !BARRIER_P (barrier))
1152 emit_barrier_after (BB_END (src));
1153 else
1154 {
1155 if (barrier != NEXT_INSN (BB_END (src)))
1156 {
1157 /* Move the jump before barrier so that the notes
1158 which originally were or were created before jump table are
1159 inside the basic block. */
1160 rtx_insn *new_insn = BB_END (src);
1161
1162 update_bb_for_insn_chain (NEXT_INSN (BB_END (src)),
1163 PREV_INSN (barrier), src);
1164
1165 SET_NEXT_INSN (PREV_INSN (new_insn)) = NEXT_INSN (new_insn);
1166 SET_PREV_INSN (NEXT_INSN (new_insn)) = PREV_INSN (new_insn);
1167
1168 SET_NEXT_INSN (new_insn) = barrier;
1169 SET_NEXT_INSN (PREV_INSN (barrier)) = new_insn;
1170
1171 SET_PREV_INSN (new_insn) = PREV_INSN (barrier);
1172 SET_PREV_INSN (barrier) = new_insn;
1173 }
1174 }
1175 }
1176
1177 /* Keep only one edge out and set proper flags. */
1178 if (!single_succ_p (src))
1179 remove_edge (e);
1180 gcc_assert (single_succ_p (src));
1181
1182 e = single_succ_edge (src);
1183 if (fallthru)
1184 e->flags = EDGE_FALLTHRU;
1185 else
1186 e->flags = 0;
1187
1188 e->probability = profile_probability::always ();
1189
1190 if (e->dest != target)
1191 redirect_edge_succ (e, target);
1192 return e;
1193 }
1194
1195 /* Subroutine of redirect_branch_edge that tries to patch the jump
1196 instruction INSN so that it reaches block NEW. Do this
1197 only when it originally reached block OLD. Return true if this
1198 worked or the original target wasn't OLD, return false if redirection
1199 doesn't work. */
1200
1201 static bool
1202 patch_jump_insn (rtx_insn *insn, rtx_insn *old_label, basic_block new_bb)
1203 {
1204 rtx_jump_table_data *table;
1205 rtx tmp;
1206 /* Recognize a tablejump and adjust all matching cases. */
1207 if (tablejump_p (insn, NULL, &table))
1208 {
1209 rtvec vec;
1210 int j;
1211 rtx_code_label *new_label = block_label (new_bb);
1212
1213 if (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
1214 return false;
1215 vec = table->get_labels ();
1216
1217 for (j = GET_NUM_ELEM (vec) - 1; j >= 0; --j)
1218 if (XEXP (RTVEC_ELT (vec, j), 0) == old_label)
1219 {
1220 RTVEC_ELT (vec, j) = gen_rtx_LABEL_REF (Pmode, new_label);
1221 --LABEL_NUSES (old_label);
1222 ++LABEL_NUSES (new_label);
1223 }
1224
1225 /* Handle casesi dispatch insns. */
1226 if ((tmp = tablejump_casesi_pattern (insn)) != NULL_RTX
1227 && label_ref_label (XEXP (SET_SRC (tmp), 2)) == old_label)
1228 {
1229 XEXP (SET_SRC (tmp), 2) = gen_rtx_LABEL_REF (Pmode,
1230 new_label);
1231 --LABEL_NUSES (old_label);
1232 ++LABEL_NUSES (new_label);
1233 }
1234 }
1235 else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
1236 {
1237 int i, n = ASM_OPERANDS_LABEL_LENGTH (tmp);
1238 rtx note;
1239
1240 if (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
1241 return false;
1242 rtx_code_label *new_label = block_label (new_bb);
1243
1244 for (i = 0; i < n; ++i)
1245 {
1246 rtx old_ref = ASM_OPERANDS_LABEL (tmp, i);
1247 gcc_assert (GET_CODE (old_ref) == LABEL_REF);
1248 if (XEXP (old_ref, 0) == old_label)
1249 {
1250 ASM_OPERANDS_LABEL (tmp, i)
1251 = gen_rtx_LABEL_REF (Pmode, new_label);
1252 --LABEL_NUSES (old_label);
1253 ++LABEL_NUSES (new_label);
1254 }
1255 }
1256
1257 if (JUMP_LABEL (insn) == old_label)
1258 {
1259 JUMP_LABEL (insn) = new_label;
1260 note = find_reg_note (insn, REG_LABEL_TARGET, new_label);
1261 if (note)
1262 remove_note (insn, note);
1263 }
1264 else
1265 {
1266 note = find_reg_note (insn, REG_LABEL_TARGET, old_label);
1267 if (note)
1268 remove_note (insn, note);
1269 if (JUMP_LABEL (insn) != new_label
1270 && !find_reg_note (insn, REG_LABEL_TARGET, new_label))
1271 add_reg_note (insn, REG_LABEL_TARGET, new_label);
1272 }
1273 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, old_label))
1274 != NULL_RTX)
1275 XEXP (note, 0) = new_label;
1276 }
1277 else
1278 {
1279 /* ?? We may play the games with moving the named labels from
1280 one basic block to the other in case only one computed_jump is
1281 available. */
1282 if (computed_jump_p (insn)
1283 /* A return instruction can't be redirected. */
1284 || returnjump_p (insn))
1285 return false;
1286
1287 if (!currently_expanding_to_rtl || JUMP_LABEL (insn) == old_label)
1288 {
1289 /* If the insn doesn't go where we think, we're confused. */
1290 gcc_assert (JUMP_LABEL (insn) == old_label);
1291
1292 /* If the substitution doesn't succeed, die. This can happen
1293 if the back end emitted unrecognizable instructions or if
1294 target is exit block on some arches. Or for crossing
1295 jumps. */
1296 if (!redirect_jump (as_a <rtx_jump_insn *> (insn),
1297 block_label (new_bb), 0))
1298 {
1299 gcc_assert (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
1300 || CROSSING_JUMP_P (insn));
1301 return false;
1302 }
1303 }
1304 }
1305 return true;
1306 }
1307
1308
1309 /* Redirect edge representing branch of (un)conditional jump or tablejump,
1310 NULL on failure */
1311 static edge
1312 redirect_branch_edge (edge e, basic_block target)
1313 {
1314 rtx_insn *old_label = BB_HEAD (e->dest);
1315 basic_block src = e->src;
1316 rtx_insn *insn = BB_END (src);
1317
1318 /* We can only redirect non-fallthru edges of jump insn. */
1319 if (e->flags & EDGE_FALLTHRU)
1320 return NULL;
1321 else if (!JUMP_P (insn) && !currently_expanding_to_rtl)
1322 return NULL;
1323
1324 if (!currently_expanding_to_rtl)
1325 {
1326 if (!patch_jump_insn (as_a <rtx_jump_insn *> (insn), old_label, target))
1327 return NULL;
1328 }
1329 else
1330 /* When expanding this BB might actually contain multiple
1331 jumps (i.e. not yet split by find_many_sub_basic_blocks).
1332 Redirect all of those that match our label. */
1333 FOR_BB_INSNS (src, insn)
1334 if (JUMP_P (insn) && !patch_jump_insn (as_a <rtx_jump_insn *> (insn),
1335 old_label, target))
1336 return NULL;
1337
1338 if (dump_file)
1339 fprintf (dump_file, "Edge %i->%i redirected to %i\n",
1340 e->src->index, e->dest->index, target->index);
1341
1342 if (e->dest != target)
1343 e = redirect_edge_succ_nodup (e, target);
1344
1345 return e;
1346 }
1347
1348 /* Called when edge E has been redirected to a new destination,
1349 in order to update the region crossing flag on the edge and
1350 jump. */
1351
1352 static void
1353 fixup_partition_crossing (edge e)
1354 {
1355 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) || e->dest
1356 == EXIT_BLOCK_PTR_FOR_FN (cfun))
1357 return;
1358 /* If we redirected an existing edge, it may already be marked
1359 crossing, even though the new src is missing a reg crossing note.
1360 But make sure reg crossing note doesn't already exist before
1361 inserting. */
1362 if (BB_PARTITION (e->src) != BB_PARTITION (e->dest))
1363 {
1364 e->flags |= EDGE_CROSSING;
1365 if (JUMP_P (BB_END (e->src)))
1366 CROSSING_JUMP_P (BB_END (e->src)) = 1;
1367 }
1368 else if (BB_PARTITION (e->src) == BB_PARTITION (e->dest))
1369 {
1370 e->flags &= ~EDGE_CROSSING;
1371 /* Remove the section crossing note from jump at end of
1372 src if it exists, and if no other successors are
1373 still crossing. */
1374 if (JUMP_P (BB_END (e->src)) && CROSSING_JUMP_P (BB_END (e->src)))
1375 {
1376 bool has_crossing_succ = false;
1377 edge e2;
1378 edge_iterator ei;
1379 FOR_EACH_EDGE (e2, ei, e->src->succs)
1380 {
1381 has_crossing_succ |= (e2->flags & EDGE_CROSSING);
1382 if (has_crossing_succ)
1383 break;
1384 }
1385 if (!has_crossing_succ)
1386 CROSSING_JUMP_P (BB_END (e->src)) = 0;
1387 }
1388 }
1389 }
1390
1391 /* Called when block BB has been reassigned to the cold partition,
1392 because it is now dominated by another cold block,
1393 to ensure that the region crossing attributes are updated. */
1394
1395 static void
1396 fixup_new_cold_bb (basic_block bb)
1397 {
1398 edge e;
1399 edge_iterator ei;
1400
1401 /* This is called when a hot bb is found to now be dominated
1402 by a cold bb and therefore needs to become cold. Therefore,
1403 its preds will no longer be region crossing. Any non-dominating
1404 preds that were previously hot would also have become cold
1405 in the caller for the same region. Any preds that were previously
1406 region-crossing will be adjusted in fixup_partition_crossing. */
1407 FOR_EACH_EDGE (e, ei, bb->preds)
1408 {
1409 fixup_partition_crossing (e);
1410 }
1411
1412 /* Possibly need to make bb's successor edges region crossing,
1413 or remove stale region crossing. */
1414 FOR_EACH_EDGE (e, ei, bb->succs)
1415 {
1416 /* We can't have fall-through edges across partition boundaries.
1417 Note that force_nonfallthru will do any necessary partition
1418 boundary fixup by calling fixup_partition_crossing itself. */
1419 if ((e->flags & EDGE_FALLTHRU)
1420 && BB_PARTITION (bb) != BB_PARTITION (e->dest)
1421 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1422 force_nonfallthru (e);
1423 else
1424 fixup_partition_crossing (e);
1425 }
1426 }
1427
1428 /* Attempt to change code to redirect edge E to TARGET. Don't do that on
1429 expense of adding new instructions or reordering basic blocks.
1430
1431 Function can be also called with edge destination equivalent to the TARGET.
1432 Then it should try the simplifications and do nothing if none is possible.
1433
1434 Return edge representing the branch if transformation succeeded. Return NULL
1435 on failure.
1436 We still return NULL in case E already destinated TARGET and we didn't
1437 managed to simplify instruction stream. */
1438
1439 static edge
1440 rtl_redirect_edge_and_branch (edge e, basic_block target)
1441 {
1442 edge ret;
1443 basic_block src = e->src;
1444 basic_block dest = e->dest;
1445
1446 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
1447 return NULL;
1448
1449 if (dest == target)
1450 return e;
1451
1452 if ((ret = try_redirect_by_replacing_jump (e, target, false)) != NULL)
1453 {
1454 df_set_bb_dirty (src);
1455 fixup_partition_crossing (ret);
1456 return ret;
1457 }
1458
1459 ret = redirect_branch_edge (e, target);
1460 if (!ret)
1461 return NULL;
1462
1463 df_set_bb_dirty (src);
1464 fixup_partition_crossing (ret);
1465 return ret;
1466 }
1467
1468 /* Emit a barrier after BB, into the footer if we are in CFGLAYOUT mode. */
1469
1470 void
1471 emit_barrier_after_bb (basic_block bb)
1472 {
1473 rtx_barrier *barrier = emit_barrier_after (BB_END (bb));
1474 gcc_assert (current_ir_type () == IR_RTL_CFGRTL
1475 || current_ir_type () == IR_RTL_CFGLAYOUT);
1476 if (current_ir_type () == IR_RTL_CFGLAYOUT)
1477 {
1478 rtx_insn *insn = unlink_insn_chain (barrier, barrier);
1479
1480 if (BB_FOOTER (bb))
1481 {
1482 rtx_insn *footer_tail = BB_FOOTER (bb);
1483
1484 while (NEXT_INSN (footer_tail))
1485 footer_tail = NEXT_INSN (footer_tail);
1486 if (!BARRIER_P (footer_tail))
1487 {
1488 SET_NEXT_INSN (footer_tail) = insn;
1489 SET_PREV_INSN (insn) = footer_tail;
1490 }
1491 }
1492 else
1493 BB_FOOTER (bb) = insn;
1494 }
1495 }
1496
1497 /* Like force_nonfallthru below, but additionally performs redirection
1498 Used by redirect_edge_and_branch_force. JUMP_LABEL is used only
1499 when redirecting to the EXIT_BLOCK, it is either ret_rtx or
1500 simple_return_rtx, indicating which kind of returnjump to create.
1501 It should be NULL otherwise. */
1502
1503 basic_block
1504 force_nonfallthru_and_redirect (edge e, basic_block target, rtx jump_label)
1505 {
1506 basic_block jump_block, new_bb = NULL, src = e->src;
1507 rtx note;
1508 edge new_edge;
1509 int abnormal_edge_flags = 0;
1510 bool asm_goto_edge = false;
1511 int loc;
1512
1513 /* In the case the last instruction is conditional jump to the next
1514 instruction, first redirect the jump itself and then continue
1515 by creating a basic block afterwards to redirect fallthru edge. */
1516 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
1517 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
1518 && any_condjump_p (BB_END (e->src))
1519 && JUMP_LABEL (BB_END (e->src)) == BB_HEAD (e->dest))
1520 {
1521 rtx note;
1522 edge b = unchecked_make_edge (e->src, target, 0);
1523 bool redirected;
1524
1525 redirected = redirect_jump (as_a <rtx_jump_insn *> (BB_END (e->src)),
1526 block_label (target), 0);
1527 gcc_assert (redirected);
1528
1529 note = find_reg_note (BB_END (e->src), REG_BR_PROB, NULL_RTX);
1530 if (note)
1531 {
1532 int prob = XINT (note, 0);
1533
1534 b->probability = profile_probability::from_reg_br_prob_note (prob);
1535 e->probability -= e->probability;
1536 }
1537 }
1538
1539 if (e->flags & EDGE_ABNORMAL)
1540 {
1541 /* Irritating special case - fallthru edge to the same block as abnormal
1542 edge.
1543 We can't redirect abnormal edge, but we still can split the fallthru
1544 one and create separate abnormal edge to original destination.
1545 This allows bb-reorder to make such edge non-fallthru. */
1546 gcc_assert (e->dest == target);
1547 abnormal_edge_flags = e->flags & ~EDGE_FALLTHRU;
1548 e->flags &= EDGE_FALLTHRU;
1549 }
1550 else
1551 {
1552 gcc_assert (e->flags & EDGE_FALLTHRU);
1553 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1554 {
1555 /* We can't redirect the entry block. Create an empty block
1556 at the start of the function which we use to add the new
1557 jump. */
1558 edge tmp;
1559 edge_iterator ei;
1560 bool found = false;
1561
1562 basic_block bb = create_basic_block (BB_HEAD (e->dest), NULL,
1563 ENTRY_BLOCK_PTR_FOR_FN (cfun));
1564 bb->count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
1565
1566 /* Make sure new block ends up in correct hot/cold section. */
1567 BB_COPY_PARTITION (bb, e->dest);
1568
1569 /* Change the existing edge's source to be the new block, and add
1570 a new edge from the entry block to the new block. */
1571 e->src = bb;
1572 for (ei = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
1573 (tmp = ei_safe_edge (ei)); )
1574 {
1575 if (tmp == e)
1576 {
1577 ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs->unordered_remove (ei.index);
1578 found = true;
1579 break;
1580 }
1581 else
1582 ei_next (&ei);
1583 }
1584
1585 gcc_assert (found);
1586
1587 vec_safe_push (bb->succs, e);
1588 make_single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), bb,
1589 EDGE_FALLTHRU);
1590 }
1591 }
1592
1593 /* If e->src ends with asm goto, see if any of the ASM_OPERANDS_LABELs
1594 don't point to the target or fallthru label. */
1595 if (JUMP_P (BB_END (e->src))
1596 && target != EXIT_BLOCK_PTR_FOR_FN (cfun)
1597 && (e->flags & EDGE_FALLTHRU)
1598 && (note = extract_asm_operands (PATTERN (BB_END (e->src)))))
1599 {
1600 int i, n = ASM_OPERANDS_LABEL_LENGTH (note);
1601 bool adjust_jump_target = false;
1602
1603 for (i = 0; i < n; ++i)
1604 {
1605 if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (e->dest))
1606 {
1607 LABEL_NUSES (XEXP (ASM_OPERANDS_LABEL (note, i), 0))--;
1608 XEXP (ASM_OPERANDS_LABEL (note, i), 0) = block_label (target);
1609 LABEL_NUSES (XEXP (ASM_OPERANDS_LABEL (note, i), 0))++;
1610 adjust_jump_target = true;
1611 }
1612 if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (target))
1613 asm_goto_edge = true;
1614 }
1615 if (adjust_jump_target)
1616 {
1617 rtx_insn *insn = BB_END (e->src);
1618 rtx note;
1619 rtx_insn *old_label = BB_HEAD (e->dest);
1620 rtx_insn *new_label = BB_HEAD (target);
1621
1622 if (JUMP_LABEL (insn) == old_label)
1623 {
1624 JUMP_LABEL (insn) = new_label;
1625 note = find_reg_note (insn, REG_LABEL_TARGET, new_label);
1626 if (note)
1627 remove_note (insn, note);
1628 }
1629 else
1630 {
1631 note = find_reg_note (insn, REG_LABEL_TARGET, old_label);
1632 if (note)
1633 remove_note (insn, note);
1634 if (JUMP_LABEL (insn) != new_label
1635 && !find_reg_note (insn, REG_LABEL_TARGET, new_label))
1636 add_reg_note (insn, REG_LABEL_TARGET, new_label);
1637 }
1638 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, old_label))
1639 != NULL_RTX)
1640 XEXP (note, 0) = new_label;
1641 }
1642 }
1643
1644 if (EDGE_COUNT (e->src->succs) >= 2 || abnormal_edge_flags || asm_goto_edge)
1645 {
1646 rtx_insn *new_head;
1647 profile_count count = e->count ();
1648 profile_probability probability = e->probability;
1649 /* Create the new structures. */
1650
1651 /* If the old block ended with a tablejump, skip its table
1652 by searching forward from there. Otherwise start searching
1653 forward from the last instruction of the old block. */
1654 rtx_jump_table_data *table;
1655 if (tablejump_p (BB_END (e->src), NULL, &table))
1656 new_head = table;
1657 else
1658 new_head = BB_END (e->src);
1659 new_head = NEXT_INSN (new_head);
1660
1661 jump_block = create_basic_block (new_head, NULL, e->src);
1662 jump_block->count = count;
1663
1664 /* Make sure new block ends up in correct hot/cold section. */
1665
1666 BB_COPY_PARTITION (jump_block, e->src);
1667
1668 /* Wire edge in. */
1669 new_edge = make_edge (e->src, jump_block, EDGE_FALLTHRU);
1670 new_edge->probability = probability;
1671
1672 /* Redirect old edge. */
1673 redirect_edge_pred (e, jump_block);
1674 e->probability = profile_probability::always ();
1675
1676 /* If e->src was previously region crossing, it no longer is
1677 and the reg crossing note should be removed. */
1678 fixup_partition_crossing (new_edge);
1679
1680 /* If asm goto has any label refs to target's label,
1681 add also edge from asm goto bb to target. */
1682 if (asm_goto_edge)
1683 {
1684 new_edge->probability = new_edge->probability.apply_scale (1, 2);
1685 jump_block->count = jump_block->count.apply_scale (1, 2);
1686 edge new_edge2 = make_edge (new_edge->src, target,
1687 e->flags & ~EDGE_FALLTHRU);
1688 new_edge2->probability = probability - new_edge->probability;
1689 }
1690
1691 new_bb = jump_block;
1692 }
1693 else
1694 jump_block = e->src;
1695
1696 loc = e->goto_locus;
1697 e->flags &= ~EDGE_FALLTHRU;
1698 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
1699 {
1700 if (jump_label == ret_rtx)
1701 emit_jump_insn_after_setloc (targetm.gen_return (),
1702 BB_END (jump_block), loc);
1703 else
1704 {
1705 gcc_assert (jump_label == simple_return_rtx);
1706 emit_jump_insn_after_setloc (targetm.gen_simple_return (),
1707 BB_END (jump_block), loc);
1708 }
1709 set_return_jump_label (BB_END (jump_block));
1710 }
1711 else
1712 {
1713 rtx_code_label *label = block_label (target);
1714 emit_jump_insn_after_setloc (targetm.gen_jump (label),
1715 BB_END (jump_block), loc);
1716 JUMP_LABEL (BB_END (jump_block)) = label;
1717 LABEL_NUSES (label)++;
1718 }
1719
1720 /* We might be in cfg layout mode, and if so, the following routine will
1721 insert the barrier correctly. */
1722 emit_barrier_after_bb (jump_block);
1723 redirect_edge_succ_nodup (e, target);
1724
1725 if (abnormal_edge_flags)
1726 make_edge (src, target, abnormal_edge_flags);
1727
1728 df_mark_solutions_dirty ();
1729 fixup_partition_crossing (e);
1730 return new_bb;
1731 }
1732
1733 /* Edge E is assumed to be fallthru edge. Emit needed jump instruction
1734 (and possibly create new basic block) to make edge non-fallthru.
1735 Return newly created BB or NULL if none. */
1736
1737 static basic_block
1738 rtl_force_nonfallthru (edge e)
1739 {
1740 return force_nonfallthru_and_redirect (e, e->dest, NULL_RTX);
1741 }
1742
1743 /* Redirect edge even at the expense of creating new jump insn or
1744 basic block. Return new basic block if created, NULL otherwise.
1745 Conversion must be possible. */
1746
1747 static basic_block
1748 rtl_redirect_edge_and_branch_force (edge e, basic_block target)
1749 {
1750 if (redirect_edge_and_branch (e, target)
1751 || e->dest == target)
1752 return NULL;
1753
1754 /* In case the edge redirection failed, try to force it to be non-fallthru
1755 and redirect newly created simplejump. */
1756 df_set_bb_dirty (e->src);
1757 return force_nonfallthru_and_redirect (e, target, NULL_RTX);
1758 }
1759
1760 /* The given edge should potentially be a fallthru edge. If that is in
1761 fact true, delete the jump and barriers that are in the way. */
1762
1763 static void
1764 rtl_tidy_fallthru_edge (edge e)
1765 {
1766 rtx_insn *q;
1767 basic_block b = e->src, c = b->next_bb;
1768
1769 /* ??? In a late-running flow pass, other folks may have deleted basic
1770 blocks by nopping out blocks, leaving multiple BARRIERs between here
1771 and the target label. They ought to be chastised and fixed.
1772
1773 We can also wind up with a sequence of undeletable labels between
1774 one block and the next.
1775
1776 So search through a sequence of barriers, labels, and notes for
1777 the head of block C and assert that we really do fall through. */
1778
1779 for (q = NEXT_INSN (BB_END (b)); q != BB_HEAD (c); q = NEXT_INSN (q))
1780 if (NONDEBUG_INSN_P (q))
1781 return;
1782
1783 /* Remove what will soon cease being the jump insn from the source block.
1784 If block B consisted only of this single jump, turn it into a deleted
1785 note. */
1786 q = BB_END (b);
1787 if (JUMP_P (q)
1788 && onlyjump_p (q)
1789 && (any_uncondjump_p (q)
1790 || single_succ_p (b)))
1791 {
1792 rtx_insn *label;
1793 rtx_jump_table_data *table;
1794
1795 if (tablejump_p (q, &label, &table))
1796 {
1797 /* The label is likely mentioned in some instruction before
1798 the tablejump and might not be DCEd, so turn it into
1799 a note instead and move before the tablejump that is going to
1800 be deleted. */
1801 const char *name = LABEL_NAME (label);
1802 PUT_CODE (label, NOTE);
1803 NOTE_KIND (label) = NOTE_INSN_DELETED_LABEL;
1804 NOTE_DELETED_LABEL_NAME (label) = name;
1805 reorder_insns (label, label, PREV_INSN (q));
1806 delete_insn (table);
1807 }
1808
1809 /* If this was a conditional jump, we need to also delete
1810 the insn that set cc0. */
1811 if (HAVE_cc0 && any_condjump_p (q) && only_sets_cc0_p (PREV_INSN (q)))
1812 q = PREV_INSN (q);
1813
1814 q = PREV_INSN (q);
1815 }
1816 /* Unconditional jumps with side-effects (i.e. which we can't just delete
1817 together with the barrier) should never have a fallthru edge. */
1818 else if (JUMP_P (q) && any_uncondjump_p (q))
1819 return;
1820
1821 /* Selectively unlink the sequence. */
1822 if (q != PREV_INSN (BB_HEAD (c)))
1823 delete_insn_chain (NEXT_INSN (q), PREV_INSN (BB_HEAD (c)), false);
1824
1825 e->flags |= EDGE_FALLTHRU;
1826 }
1827 \f
1828 /* Should move basic block BB after basic block AFTER. NIY. */
1829
1830 static bool
1831 rtl_move_block_after (basic_block bb ATTRIBUTE_UNUSED,
1832 basic_block after ATTRIBUTE_UNUSED)
1833 {
1834 return false;
1835 }
1836
1837 /* Locate the last bb in the same partition as START_BB. */
1838
1839 static basic_block
1840 last_bb_in_partition (basic_block start_bb)
1841 {
1842 basic_block bb;
1843 FOR_BB_BETWEEN (bb, start_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
1844 {
1845 if (BB_PARTITION (start_bb) != BB_PARTITION (bb->next_bb))
1846 return bb;
1847 }
1848 /* Return bb before the exit block. */
1849 return bb->prev_bb;
1850 }
1851
1852 /* Split a (typically critical) edge. Return the new block.
1853 The edge must not be abnormal.
1854
1855 ??? The code generally expects to be called on critical edges.
1856 The case of a block ending in an unconditional jump to a
1857 block with multiple predecessors is not handled optimally. */
1858
1859 static basic_block
1860 rtl_split_edge (edge edge_in)
1861 {
1862 basic_block bb, new_bb;
1863 rtx_insn *before;
1864
1865 /* Abnormal edges cannot be split. */
1866 gcc_assert (!(edge_in->flags & EDGE_ABNORMAL));
1867
1868 /* We are going to place the new block in front of edge destination.
1869 Avoid existence of fallthru predecessors. */
1870 if ((edge_in->flags & EDGE_FALLTHRU) == 0)
1871 {
1872 edge e = find_fallthru_edge (edge_in->dest->preds);
1873
1874 if (e)
1875 force_nonfallthru (e);
1876 }
1877
1878 /* Create the basic block note. */
1879 if (edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1880 before = BB_HEAD (edge_in->dest);
1881 else
1882 before = NULL;
1883
1884 /* If this is a fall through edge to the exit block, the blocks might be
1885 not adjacent, and the right place is after the source. */
1886 if ((edge_in->flags & EDGE_FALLTHRU)
1887 && edge_in->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
1888 {
1889 before = NEXT_INSN (BB_END (edge_in->src));
1890 bb = create_basic_block (before, NULL, edge_in->src);
1891 BB_COPY_PARTITION (bb, edge_in->src);
1892 }
1893 else
1894 {
1895 if (edge_in->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1896 {
1897 bb = create_basic_block (before, NULL, edge_in->dest->prev_bb);
1898 BB_COPY_PARTITION (bb, edge_in->dest);
1899 }
1900 else
1901 {
1902 basic_block after = edge_in->dest->prev_bb;
1903 /* If this is post-bb reordering, and the edge crosses a partition
1904 boundary, the new block needs to be inserted in the bb chain
1905 at the end of the src partition (since we put the new bb into
1906 that partition, see below). Otherwise we may end up creating
1907 an extra partition crossing in the chain, which is illegal.
1908 It can't go after the src, because src may have a fall-through
1909 to a different block. */
1910 if (crtl->bb_reorder_complete
1911 && (edge_in->flags & EDGE_CROSSING))
1912 {
1913 after = last_bb_in_partition (edge_in->src);
1914 before = get_last_bb_insn (after);
1915 /* The instruction following the last bb in partition should
1916 be a barrier, since it cannot end in a fall-through. */
1917 gcc_checking_assert (BARRIER_P (before));
1918 before = NEXT_INSN (before);
1919 }
1920 bb = create_basic_block (before, NULL, after);
1921 /* Put the split bb into the src partition, to avoid creating
1922 a situation where a cold bb dominates a hot bb, in the case
1923 where src is cold and dest is hot. The src will dominate
1924 the new bb (whereas it might not have dominated dest). */
1925 BB_COPY_PARTITION (bb, edge_in->src);
1926 }
1927 }
1928
1929 make_single_succ_edge (bb, edge_in->dest, EDGE_FALLTHRU);
1930
1931 /* Can't allow a region crossing edge to be fallthrough. */
1932 if (BB_PARTITION (bb) != BB_PARTITION (edge_in->dest)
1933 && edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1934 {
1935 new_bb = force_nonfallthru (single_succ_edge (bb));
1936 gcc_assert (!new_bb);
1937 }
1938
1939 /* For non-fallthru edges, we must adjust the predecessor's
1940 jump instruction to target our new block. */
1941 if ((edge_in->flags & EDGE_FALLTHRU) == 0)
1942 {
1943 edge redirected = redirect_edge_and_branch (edge_in, bb);
1944 gcc_assert (redirected);
1945 }
1946 else
1947 {
1948 if (edge_in->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
1949 {
1950 /* For asm goto even splitting of fallthru edge might
1951 need insn patching, as other labels might point to the
1952 old label. */
1953 rtx_insn *last = BB_END (edge_in->src);
1954 if (last
1955 && JUMP_P (last)
1956 && edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
1957 && (extract_asm_operands (PATTERN (last))
1958 || JUMP_LABEL (last) == before)
1959 && patch_jump_insn (last, before, bb))
1960 df_set_bb_dirty (edge_in->src);
1961 }
1962 redirect_edge_succ (edge_in, bb);
1963 }
1964
1965 return bb;
1966 }
1967
1968 /* Queue instructions for insertion on an edge between two basic blocks.
1969 The new instructions and basic blocks (if any) will not appear in the
1970 CFG until commit_edge_insertions is called. */
1971
1972 void
1973 insert_insn_on_edge (rtx pattern, edge e)
1974 {
1975 /* We cannot insert instructions on an abnormal critical edge.
1976 It will be easier to find the culprit if we die now. */
1977 gcc_assert (!((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e)));
1978
1979 if (e->insns.r == NULL_RTX)
1980 start_sequence ();
1981 else
1982 push_to_sequence (e->insns.r);
1983
1984 emit_insn (pattern);
1985
1986 e->insns.r = get_insns ();
1987 end_sequence ();
1988 }
1989
1990 /* Update the CFG for the instructions queued on edge E. */
1991
1992 void
1993 commit_one_edge_insertion (edge e)
1994 {
1995 rtx_insn *before = NULL, *after = NULL, *insns, *tmp, *last;
1996 basic_block bb;
1997
1998 /* Pull the insns off the edge now since the edge might go away. */
1999 insns = e->insns.r;
2000 e->insns.r = NULL;
2001
2002 /* Figure out where to put these insns. If the destination has
2003 one predecessor, insert there. Except for the exit block. */
2004 if (single_pred_p (e->dest) && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
2005 {
2006 bb = e->dest;
2007
2008 /* Get the location correct wrt a code label, and "nice" wrt
2009 a basic block note, and before everything else. */
2010 tmp = BB_HEAD (bb);
2011 if (LABEL_P (tmp))
2012 tmp = NEXT_INSN (tmp);
2013 if (NOTE_INSN_BASIC_BLOCK_P (tmp))
2014 tmp = NEXT_INSN (tmp);
2015 if (tmp == BB_HEAD (bb))
2016 before = tmp;
2017 else if (tmp)
2018 after = PREV_INSN (tmp);
2019 else
2020 after = get_last_insn ();
2021 }
2022
2023 /* If the source has one successor and the edge is not abnormal,
2024 insert there. Except for the entry block.
2025 Don't do this if the predecessor ends in a jump other than
2026 unconditional simple jump. E.g. for asm goto that points all
2027 its labels at the fallthru basic block, we can't insert instructions
2028 before the asm goto, as the asm goto can have various of side effects,
2029 and can't emit instructions after the asm goto, as it must end
2030 the basic block. */
2031 else if ((e->flags & EDGE_ABNORMAL) == 0
2032 && single_succ_p (e->src)
2033 && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
2034 && (!JUMP_P (BB_END (e->src))
2035 || simplejump_p (BB_END (e->src))))
2036 {
2037 bb = e->src;
2038
2039 /* It is possible to have a non-simple jump here. Consider a target
2040 where some forms of unconditional jumps clobber a register. This
2041 happens on the fr30 for example.
2042
2043 We know this block has a single successor, so we can just emit
2044 the queued insns before the jump. */
2045 if (JUMP_P (BB_END (bb)))
2046 before = BB_END (bb);
2047 else
2048 {
2049 /* We'd better be fallthru, or we've lost track of what's what. */
2050 gcc_assert (e->flags & EDGE_FALLTHRU);
2051
2052 after = BB_END (bb);
2053 }
2054 }
2055
2056 /* Otherwise we must split the edge. */
2057 else
2058 {
2059 bb = split_edge (e);
2060
2061 /* If E crossed a partition boundary, we needed to make bb end in
2062 a region-crossing jump, even though it was originally fallthru. */
2063 if (JUMP_P (BB_END (bb)))
2064 before = BB_END (bb);
2065 else
2066 after = BB_END (bb);
2067 }
2068
2069 /* Now that we've found the spot, do the insertion. */
2070 if (before)
2071 {
2072 emit_insn_before_noloc (insns, before, bb);
2073 last = prev_nonnote_insn (before);
2074 }
2075 else
2076 last = emit_insn_after_noloc (insns, after, bb);
2077
2078 if (returnjump_p (last))
2079 {
2080 /* ??? Remove all outgoing edges from BB and add one for EXIT.
2081 This is not currently a problem because this only happens
2082 for the (single) epilogue, which already has a fallthru edge
2083 to EXIT. */
2084
2085 e = single_succ_edge (bb);
2086 gcc_assert (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
2087 && single_succ_p (bb) && (e->flags & EDGE_FALLTHRU));
2088
2089 e->flags &= ~EDGE_FALLTHRU;
2090 emit_barrier_after (last);
2091
2092 if (before)
2093 delete_insn (before);
2094 }
2095 else
2096 gcc_assert (!JUMP_P (last));
2097 }
2098
2099 /* Update the CFG for all queued instructions. */
2100
2101 void
2102 commit_edge_insertions (void)
2103 {
2104 basic_block bb;
2105
2106 /* Optimization passes that invoke this routine can cause hot blocks
2107 previously reached by both hot and cold blocks to become dominated only
2108 by cold blocks. This will cause the verification below to fail,
2109 and lead to now cold code in the hot section. In some cases this
2110 may only be visible after newly unreachable blocks are deleted,
2111 which will be done by fixup_partitions. */
2112 fixup_partitions ();
2113
2114 if (!currently_expanding_to_rtl)
2115 checking_verify_flow_info ();
2116
2117 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
2118 EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
2119 {
2120 edge e;
2121 edge_iterator ei;
2122
2123 FOR_EACH_EDGE (e, ei, bb->succs)
2124 if (e->insns.r)
2125 {
2126 if (currently_expanding_to_rtl)
2127 rebuild_jump_labels_chain (e->insns.r);
2128 commit_one_edge_insertion (e);
2129 }
2130 }
2131 }
2132 \f
2133
2134 /* Print out RTL-specific basic block information (live information
2135 at start and end with TDF_DETAILS). FLAGS are the TDF_* masks
2136 documented in dumpfile.h. */
2137
2138 static void
2139 rtl_dump_bb (FILE *outf, basic_block bb, int indent, dump_flags_t flags)
2140 {
2141 char *s_indent;
2142
2143 s_indent = (char *) alloca ((size_t) indent + 1);
2144 memset (s_indent, ' ', (size_t) indent);
2145 s_indent[indent] = '\0';
2146
2147 if (df && (flags & TDF_DETAILS))
2148 {
2149 df_dump_top (bb, outf);
2150 putc ('\n', outf);
2151 }
2152
2153 if (bb->index != ENTRY_BLOCK && bb->index != EXIT_BLOCK
2154 && rtl_bb_info_initialized_p (bb))
2155 {
2156 rtx_insn *last = BB_END (bb);
2157 if (last)
2158 last = NEXT_INSN (last);
2159 for (rtx_insn *insn = BB_HEAD (bb); insn != last; insn = NEXT_INSN (insn))
2160 {
2161 if (flags & TDF_DETAILS)
2162 df_dump_insn_top (insn, outf);
2163 if (! (flags & TDF_SLIM))
2164 print_rtl_single (outf, insn);
2165 else
2166 dump_insn_slim (outf, insn);
2167 if (flags & TDF_DETAILS)
2168 df_dump_insn_bottom (insn, outf);
2169 }
2170 }
2171
2172 if (df && (flags & TDF_DETAILS))
2173 {
2174 df_dump_bottom (bb, outf);
2175 putc ('\n', outf);
2176 }
2177
2178 }
2179 \f
2180 /* Like dump_function_to_file, but for RTL. Print out dataflow information
2181 for the start of each basic block. FLAGS are the TDF_* masks documented
2182 in dumpfile.h. */
2183
2184 void
2185 print_rtl_with_bb (FILE *outf, const rtx_insn *rtx_first, dump_flags_t flags)
2186 {
2187 const rtx_insn *tmp_rtx;
2188 if (rtx_first == 0)
2189 fprintf (outf, "(nil)\n");
2190 else
2191 {
2192 enum bb_state { NOT_IN_BB, IN_ONE_BB, IN_MULTIPLE_BB };
2193 int max_uid = get_max_uid ();
2194 basic_block *start = XCNEWVEC (basic_block, max_uid);
2195 basic_block *end = XCNEWVEC (basic_block, max_uid);
2196 enum bb_state *in_bb_p = XCNEWVEC (enum bb_state, max_uid);
2197 basic_block bb;
2198
2199 /* After freeing the CFG, we still have BLOCK_FOR_INSN set on most
2200 insns, but the CFG is not maintained so the basic block info
2201 is not reliable. Therefore it's omitted from the dumps. */
2202 if (! (cfun->curr_properties & PROP_cfg))
2203 flags &= ~TDF_BLOCKS;
2204
2205 if (df)
2206 df_dump_start (outf);
2207
2208 if (cfun->curr_properties & PROP_cfg)
2209 {
2210 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2211 {
2212 rtx_insn *x;
2213
2214 start[INSN_UID (BB_HEAD (bb))] = bb;
2215 end[INSN_UID (BB_END (bb))] = bb;
2216 if (flags & TDF_BLOCKS)
2217 {
2218 for (x = BB_HEAD (bb); x != NULL_RTX; x = NEXT_INSN (x))
2219 {
2220 enum bb_state state = IN_MULTIPLE_BB;
2221
2222 if (in_bb_p[INSN_UID (x)] == NOT_IN_BB)
2223 state = IN_ONE_BB;
2224 in_bb_p[INSN_UID (x)] = state;
2225
2226 if (x == BB_END (bb))
2227 break;
2228 }
2229 }
2230 }
2231 }
2232
2233 for (tmp_rtx = rtx_first; tmp_rtx != NULL; tmp_rtx = NEXT_INSN (tmp_rtx))
2234 {
2235 if (flags & TDF_BLOCKS)
2236 {
2237 bb = start[INSN_UID (tmp_rtx)];
2238 if (bb != NULL)
2239 {
2240 dump_bb_info (outf, bb, 0, dump_flags, true, false);
2241 if (df && (flags & TDF_DETAILS))
2242 df_dump_top (bb, outf);
2243 }
2244
2245 if (in_bb_p[INSN_UID (tmp_rtx)] == NOT_IN_BB
2246 && !NOTE_P (tmp_rtx)
2247 && !BARRIER_P (tmp_rtx))
2248 fprintf (outf, ";; Insn is not within a basic block\n");
2249 else if (in_bb_p[INSN_UID (tmp_rtx)] == IN_MULTIPLE_BB)
2250 fprintf (outf, ";; Insn is in multiple basic blocks\n");
2251 }
2252
2253 if (flags & TDF_DETAILS)
2254 df_dump_insn_top (tmp_rtx, outf);
2255 if (! (flags & TDF_SLIM))
2256 print_rtl_single (outf, tmp_rtx);
2257 else
2258 dump_insn_slim (outf, tmp_rtx);
2259 if (flags & TDF_DETAILS)
2260 df_dump_insn_bottom (tmp_rtx, outf);
2261
2262 bb = end[INSN_UID (tmp_rtx)];
2263 if (bb != NULL)
2264 {
2265 if (flags & TDF_BLOCKS)
2266 {
2267 dump_bb_info (outf, bb, 0, dump_flags, false, true);
2268 if (df && (flags & TDF_DETAILS))
2269 df_dump_bottom (bb, outf);
2270 putc ('\n', outf);
2271 }
2272 /* Emit a hint if the fallthrough target of current basic block
2273 isn't the one placed right next. */
2274 else if (EDGE_COUNT (bb->succs) > 0)
2275 {
2276 gcc_assert (BB_END (bb) == tmp_rtx);
2277 const rtx_insn *ninsn = NEXT_INSN (tmp_rtx);
2278 /* Bypass intervening deleted-insn notes and debug insns. */
2279 while (ninsn
2280 && !NONDEBUG_INSN_P (ninsn)
2281 && !start[INSN_UID (ninsn)])
2282 ninsn = NEXT_INSN (ninsn);
2283 edge e = find_fallthru_edge (bb->succs);
2284 if (e && ninsn)
2285 {
2286 basic_block dest = e->dest;
2287 if (start[INSN_UID (ninsn)] != dest)
2288 fprintf (outf, "%s ; pc falls through to BB %d\n",
2289 print_rtx_head, dest->index);
2290 }
2291 }
2292 }
2293 }
2294
2295 free (start);
2296 free (end);
2297 free (in_bb_p);
2298 }
2299 }
2300 \f
2301 /* Update the branch probability of BB if a REG_BR_PROB is present. */
2302
2303 void
2304 update_br_prob_note (basic_block bb)
2305 {
2306 rtx note;
2307 note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX);
2308 if (!JUMP_P (BB_END (bb)) || !BRANCH_EDGE (bb)->probability.initialized_p ())
2309 {
2310 if (note)
2311 {
2312 rtx *note_link, this_rtx;
2313
2314 note_link = &REG_NOTES (BB_END (bb));
2315 for (this_rtx = *note_link; this_rtx; this_rtx = XEXP (this_rtx, 1))
2316 if (this_rtx == note)
2317 {
2318 *note_link = XEXP (this_rtx, 1);
2319 break;
2320 }
2321 }
2322 return;
2323 }
2324 if (!note
2325 || XINT (note, 0) == BRANCH_EDGE (bb)->probability.to_reg_br_prob_note ())
2326 return;
2327 XINT (note, 0) = BRANCH_EDGE (bb)->probability.to_reg_br_prob_note ();
2328 }
2329
2330 /* Get the last insn associated with block BB (that includes barriers and
2331 tablejumps after BB). */
2332 rtx_insn *
2333 get_last_bb_insn (basic_block bb)
2334 {
2335 rtx_jump_table_data *table;
2336 rtx_insn *tmp;
2337 rtx_insn *end = BB_END (bb);
2338
2339 /* Include any jump table following the basic block. */
2340 if (tablejump_p (end, NULL, &table))
2341 end = table;
2342
2343 /* Include any barriers that may follow the basic block. */
2344 tmp = next_nonnote_nondebug_insn_bb (end);
2345 while (tmp && BARRIER_P (tmp))
2346 {
2347 end = tmp;
2348 tmp = next_nonnote_nondebug_insn_bb (end);
2349 }
2350
2351 return end;
2352 }
2353
2354 /* Add all BBs reachable from entry via hot paths into the SET. */
2355
2356 void
2357 find_bbs_reachable_by_hot_paths (hash_set<basic_block> *set)
2358 {
2359 auto_vec<basic_block, 64> worklist;
2360
2361 set->add (ENTRY_BLOCK_PTR_FOR_FN (cfun));
2362 worklist.safe_push (ENTRY_BLOCK_PTR_FOR_FN (cfun));
2363
2364 while (worklist.length () > 0)
2365 {
2366 basic_block bb = worklist.pop ();
2367 edge_iterator ei;
2368 edge e;
2369
2370 FOR_EACH_EDGE (e, ei, bb->succs)
2371 if (BB_PARTITION (e->dest) != BB_COLD_PARTITION
2372 && !set->add (e->dest))
2373 worklist.safe_push (e->dest);
2374 }
2375 }
2376
2377 /* Sanity check partition hotness to ensure that basic blocks in
2378   the cold partition don't dominate basic blocks in the hot partition.
2379 If FLAG_ONLY is true, report violations as errors. Otherwise
2380 re-mark the dominated blocks as cold, since this is run after
2381 cfg optimizations that may make hot blocks previously reached
2382 by both hot and cold blocks now only reachable along cold paths. */
2383
2384 static vec<basic_block>
2385 find_partition_fixes (bool flag_only)
2386 {
2387 basic_block bb;
2388 vec<basic_block> bbs_to_fix = vNULL;
2389 hash_set<basic_block> set;
2390
2391 /* Callers check this. */
2392 gcc_checking_assert (crtl->has_bb_partition);
2393
2394 find_bbs_reachable_by_hot_paths (&set);
2395
2396 FOR_EACH_BB_FN (bb, cfun)
2397 if (!set.contains (bb)
2398 && BB_PARTITION (bb) != BB_COLD_PARTITION)
2399 {
2400 if (flag_only)
2401 error ("non-cold basic block %d reachable only "
2402 "by paths crossing the cold partition", bb->index);
2403 else
2404 BB_SET_PARTITION (bb, BB_COLD_PARTITION);
2405 bbs_to_fix.safe_push (bb);
2406 }
2407
2408 return bbs_to_fix;
2409 }
2410
2411 /* Perform cleanup on the hot/cold bb partitioning after optimization
2412 passes that modify the cfg. */
2413
2414 void
2415 fixup_partitions (void)
2416 {
2417 basic_block bb;
2418
2419 if (!crtl->has_bb_partition)
2420 return;
2421
2422 /* Delete any blocks that became unreachable and weren't
2423 already cleaned up, for example during edge forwarding
2424 and convert_jumps_to_returns. This will expose more
2425 opportunities for fixing the partition boundaries here.
2426 Also, the calculation of the dominance graph during verification
2427 will assert if there are unreachable nodes. */
2428 delete_unreachable_blocks ();
2429
2430 /* If there are partitions, do a sanity check on them: A basic block in
2431   a cold partition cannot dominate a basic block in a hot partition.
2432 Fixup any that now violate this requirement, as a result of edge
2433 forwarding and unreachable block deletion.  */
2434 vec<basic_block> bbs_to_fix = find_partition_fixes (false);
2435
2436 /* Do the partition fixup after all necessary blocks have been converted to
2437 cold, so that we only update the region crossings the minimum number of
2438 places, which can require forcing edges to be non fallthru. */
2439 while (! bbs_to_fix.is_empty ())
2440 {
2441 bb = bbs_to_fix.pop ();
2442 fixup_new_cold_bb (bb);
2443 }
2444 }
2445
2446 /* Verify, in the basic block chain, that there is at most one switch
2447 between hot/cold partitions. This condition will not be true until
2448 after reorder_basic_blocks is called. */
2449
2450 static int
2451 verify_hot_cold_block_grouping (void)
2452 {
2453 basic_block bb;
2454 int err = 0;
2455 bool switched_sections = false;
2456 int current_partition = BB_UNPARTITIONED;
2457
2458 /* Even after bb reordering is complete, we go into cfglayout mode
2459 again (in compgoto). Ensure we don't call this before going back
2460 into linearized RTL when any layout fixes would have been committed. */
2461 if (!crtl->bb_reorder_complete
2462 || current_ir_type () != IR_RTL_CFGRTL)
2463 return err;
2464
2465 FOR_EACH_BB_FN (bb, cfun)
2466 {
2467 if (current_partition != BB_UNPARTITIONED
2468 && BB_PARTITION (bb) != current_partition)
2469 {
2470 if (switched_sections)
2471 {
2472 error ("multiple hot/cold transitions found (bb %i)",
2473 bb->index);
2474 err = 1;
2475 }
2476 else
2477 switched_sections = true;
2478
2479 if (!crtl->has_bb_partition)
2480 error ("partition found but function partition flag not set");
2481 }
2482 current_partition = BB_PARTITION (bb);
2483 }
2484
2485 return err;
2486 }
2487 \f
2488
2489 /* Perform several checks on the edges out of each block, such as
2490 the consistency of the branch probabilities, the correctness
2491 of hot/cold partition crossing edges, and the number of expected
2492 successor edges. Also verify that the dominance relationship
2493 between hot/cold blocks is sane. */
2494
2495 static int
2496 rtl_verify_edges (void)
2497 {
2498 int err = 0;
2499 basic_block bb;
2500
2501 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2502 {
2503 int n_fallthru = 0, n_branch = 0, n_abnormal_call = 0, n_sibcall = 0;
2504 int n_eh = 0, n_abnormal = 0;
2505 edge e, fallthru = NULL;
2506 edge_iterator ei;
2507 rtx note;
2508 bool has_crossing_edge = false;
2509
2510 if (JUMP_P (BB_END (bb))
2511 && (note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX))
2512 && EDGE_COUNT (bb->succs) >= 2
2513 && any_condjump_p (BB_END (bb)))
2514 {
2515 if (!BRANCH_EDGE (bb)->probability.initialized_p ())
2516 {
2517 if (profile_status_for_fn (cfun) != PROFILE_ABSENT)
2518 {
2519 error ("verify_flow_info: "
2520 "REG_BR_PROB is set but cfg probability is not");
2521 err = 1;
2522 }
2523 }
2524 else if (XINT (note, 0)
2525 != BRANCH_EDGE (bb)->probability.to_reg_br_prob_note ()
2526 && profile_status_for_fn (cfun) != PROFILE_ABSENT)
2527 {
2528 error ("verify_flow_info: REG_BR_PROB does not match cfg %i %i",
2529 XINT (note, 0),
2530 BRANCH_EDGE (bb)->probability.to_reg_br_prob_note ());
2531 err = 1;
2532 }
2533 }
2534
2535 FOR_EACH_EDGE (e, ei, bb->succs)
2536 {
2537 bool is_crossing;
2538
2539 if (e->flags & EDGE_FALLTHRU)
2540 n_fallthru++, fallthru = e;
2541
2542 is_crossing = (BB_PARTITION (e->src) != BB_PARTITION (e->dest)
2543 && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
2544 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun));
2545 has_crossing_edge |= is_crossing;
2546 if (e->flags & EDGE_CROSSING)
2547 {
2548 if (!is_crossing)
2549 {
2550 error ("EDGE_CROSSING incorrectly set across same section");
2551 err = 1;
2552 }
2553 if (e->flags & EDGE_FALLTHRU)
2554 {
2555 error ("fallthru edge crosses section boundary in bb %i",
2556 e->src->index);
2557 err = 1;
2558 }
2559 if (e->flags & EDGE_EH)
2560 {
2561 error ("EH edge crosses section boundary in bb %i",
2562 e->src->index);
2563 err = 1;
2564 }
2565 if (JUMP_P (BB_END (bb)) && !CROSSING_JUMP_P (BB_END (bb)))
2566 {
2567 error ("No region crossing jump at section boundary in bb %i",
2568 bb->index);
2569 err = 1;
2570 }
2571 }
2572 else if (is_crossing)
2573 {
2574 error ("EDGE_CROSSING missing across section boundary");
2575 err = 1;
2576 }
2577
2578 if ((e->flags & ~(EDGE_DFS_BACK
2579 | EDGE_CAN_FALLTHRU
2580 | EDGE_IRREDUCIBLE_LOOP
2581 | EDGE_LOOP_EXIT
2582 | EDGE_CROSSING
2583 | EDGE_PRESERVE)) == 0)
2584 n_branch++;
2585
2586 if (e->flags & EDGE_ABNORMAL_CALL)
2587 n_abnormal_call++;
2588
2589 if (e->flags & EDGE_SIBCALL)
2590 n_sibcall++;
2591
2592 if (e->flags & EDGE_EH)
2593 n_eh++;
2594
2595 if (e->flags & EDGE_ABNORMAL)
2596 n_abnormal++;
2597 }
2598
2599 if (!has_crossing_edge
2600 && JUMP_P (BB_END (bb))
2601 && CROSSING_JUMP_P (BB_END (bb)))
2602 {
2603 print_rtl_with_bb (stderr, get_insns (), TDF_BLOCKS | TDF_DETAILS);
2604 error ("Region crossing jump across same section in bb %i",
2605 bb->index);
2606 err = 1;
2607 }
2608
2609 if (n_eh && !find_reg_note (BB_END (bb), REG_EH_REGION, NULL_RTX))
2610 {
2611 error ("missing REG_EH_REGION note at the end of bb %i", bb->index);
2612 err = 1;
2613 }
2614 if (n_eh > 1)
2615 {
2616 error ("too many exception handling edges in bb %i", bb->index);
2617 err = 1;
2618 }
2619 if (n_branch
2620 && (!JUMP_P (BB_END (bb))
2621 || (n_branch > 1 && (any_uncondjump_p (BB_END (bb))
2622 || any_condjump_p (BB_END (bb))))))
2623 {
2624 error ("too many outgoing branch edges from bb %i", bb->index);
2625 err = 1;
2626 }
2627 if (n_fallthru && any_uncondjump_p (BB_END (bb)))
2628 {
2629 error ("fallthru edge after unconditional jump in bb %i", bb->index);
2630 err = 1;
2631 }
2632 if (n_branch != 1 && any_uncondjump_p (BB_END (bb)))
2633 {
2634 error ("wrong number of branch edges after unconditional jump"
2635 " in bb %i", bb->index);
2636 err = 1;
2637 }
2638 if (n_branch != 1 && any_condjump_p (BB_END (bb))
2639 && JUMP_LABEL (BB_END (bb)) != BB_HEAD (fallthru->dest))
2640 {
2641 error ("wrong amount of branch edges after conditional jump"
2642 " in bb %i", bb->index);
2643 err = 1;
2644 }
2645 if (n_abnormal_call && !CALL_P (BB_END (bb)))
2646 {
2647 error ("abnormal call edges for non-call insn in bb %i", bb->index);
2648 err = 1;
2649 }
2650 if (n_sibcall && !CALL_P (BB_END (bb)))
2651 {
2652 error ("sibcall edges for non-call insn in bb %i", bb->index);
2653 err = 1;
2654 }
2655 if (n_abnormal > n_eh
2656 && !(CALL_P (BB_END (bb))
2657 && n_abnormal == n_abnormal_call + n_sibcall)
2658 && (!JUMP_P (BB_END (bb))
2659 || any_condjump_p (BB_END (bb))
2660 || any_uncondjump_p (BB_END (bb))))
2661 {
2662 error ("abnormal edges for no purpose in bb %i", bb->index);
2663 err = 1;
2664 }
2665
2666 int has_eh = -1;
2667 FOR_EACH_EDGE (e, ei, bb->preds)
2668 {
2669 if (has_eh == -1)
2670 has_eh = (e->flags & EDGE_EH);
2671 if ((e->flags & EDGE_EH) == has_eh)
2672 continue;
2673 error ("EH incoming edge mixed with non-EH incoming edges "
2674 "in bb %i", bb->index);
2675 err = 1;
2676 break;
2677 }
2678 }
2679
2680 /* If there are partitions, do a sanity check on them: A basic block in
2681   a cold partition cannot dominate a basic block in a hot partition.  */
2682 if (crtl->has_bb_partition && !err
2683 && current_ir_type () == IR_RTL_CFGLAYOUT)
2684 {
2685 vec<basic_block> bbs_to_fix = find_partition_fixes (true);
2686 err = !bbs_to_fix.is_empty ();
2687 }
2688
2689 /* Clean up. */
2690 return err;
2691 }
2692
2693 /* Checks on the instructions within blocks. Currently checks that each
2694 block starts with a basic block note, and that basic block notes and
2695 control flow jumps are not found in the middle of the block. */
2696
2697 static int
2698 rtl_verify_bb_insns (void)
2699 {
2700 rtx_insn *x;
2701 int err = 0;
2702 basic_block bb;
2703
2704 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2705 {
2706 /* Now check the header of basic
2707 block. It ought to contain optional CODE_LABEL followed
2708 by NOTE_BASIC_BLOCK. */
2709 x = BB_HEAD (bb);
2710 if (LABEL_P (x))
2711 {
2712 if (BB_END (bb) == x)
2713 {
2714 error ("NOTE_INSN_BASIC_BLOCK is missing for block %d",
2715 bb->index);
2716 err = 1;
2717 }
2718
2719 x = NEXT_INSN (x);
2720 }
2721
2722 if (!NOTE_INSN_BASIC_BLOCK_P (x) || NOTE_BASIC_BLOCK (x) != bb)
2723 {
2724 error ("NOTE_INSN_BASIC_BLOCK is missing for block %d",
2725 bb->index);
2726 err = 1;
2727 }
2728
2729 if (BB_END (bb) == x)
2730 /* Do checks for empty blocks here. */
2731 ;
2732 else
2733 for (x = NEXT_INSN (x); x; x = NEXT_INSN (x))
2734 {
2735 if (NOTE_INSN_BASIC_BLOCK_P (x))
2736 {
2737 error ("NOTE_INSN_BASIC_BLOCK %d in middle of basic block %d",
2738 INSN_UID (x), bb->index);
2739 err = 1;
2740 }
2741
2742 if (x == BB_END (bb))
2743 break;
2744
2745 if (control_flow_insn_p (x))
2746 {
2747 error ("in basic block %d:", bb->index);
2748 fatal_insn ("flow control insn inside a basic block", x);
2749 }
2750 }
2751 }
2752
2753 /* Clean up. */
2754 return err;
2755 }
2756
2757 /* Verify that block pointers for instructions in basic blocks, headers and
2758 footers are set appropriately. */
2759
2760 static int
2761 rtl_verify_bb_pointers (void)
2762 {
2763 int err = 0;
2764 basic_block bb;
2765
2766 /* Check the general integrity of the basic blocks. */
2767 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2768 {
2769 rtx_insn *insn;
2770
2771 if (!(bb->flags & BB_RTL))
2772 {
2773 error ("BB_RTL flag not set for block %d", bb->index);
2774 err = 1;
2775 }
2776
2777 FOR_BB_INSNS (bb, insn)
2778 if (BLOCK_FOR_INSN (insn) != bb)
2779 {
2780 error ("insn %d basic block pointer is %d, should be %d",
2781 INSN_UID (insn),
2782 BLOCK_FOR_INSN (insn) ? BLOCK_FOR_INSN (insn)->index : 0,
2783 bb->index);
2784 err = 1;
2785 }
2786
2787 for (insn = BB_HEADER (bb); insn; insn = NEXT_INSN (insn))
2788 if (!BARRIER_P (insn)
2789 && BLOCK_FOR_INSN (insn) != NULL)
2790 {
2791 error ("insn %d in header of bb %d has non-NULL basic block",
2792 INSN_UID (insn), bb->index);
2793 err = 1;
2794 }
2795 for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2796 if (!BARRIER_P (insn)
2797 && BLOCK_FOR_INSN (insn) != NULL)
2798 {
2799 error ("insn %d in footer of bb %d has non-NULL basic block",
2800 INSN_UID (insn), bb->index);
2801 err = 1;
2802 }
2803 }
2804
2805 /* Clean up. */
2806 return err;
2807 }
2808
2809 /* Verify the CFG and RTL consistency common for both underlying RTL and
2810 cfglayout RTL.
2811
2812 Currently it does following checks:
2813
2814 - overlapping of basic blocks
2815 - insns with wrong BLOCK_FOR_INSN pointers
2816 - headers of basic blocks (the NOTE_INSN_BASIC_BLOCK note)
2817 - tails of basic blocks (ensure that boundary is necessary)
2818 - scans body of the basic block for JUMP_INSN, CODE_LABEL
2819 and NOTE_INSN_BASIC_BLOCK
2820 - verify that no fall_thru edge crosses hot/cold partition boundaries
2821 - verify that there are no pending RTL branch predictions
2822 - verify that hot blocks are not dominated by cold blocks
2823
2824 In future it can be extended check a lot of other stuff as well
2825 (reachability of basic blocks, life information, etc. etc.). */
2826
2827 static int
2828 rtl_verify_flow_info_1 (void)
2829 {
2830 int err = 0;
2831
2832 err |= rtl_verify_bb_pointers ();
2833
2834 err |= rtl_verify_bb_insns ();
2835
2836 err |= rtl_verify_edges ();
2837
2838 return err;
2839 }
2840
2841 /* Walk the instruction chain and verify that bb head/end pointers
2842 are correct, and that instructions are in exactly one bb and have
2843 correct block pointers. */
2844
2845 static int
2846 rtl_verify_bb_insn_chain (void)
2847 {
2848 basic_block bb;
2849 int err = 0;
2850 rtx_insn *x;
2851 rtx_insn *last_head = get_last_insn ();
2852 basic_block *bb_info;
2853 const int max_uid = get_max_uid ();
2854
2855 bb_info = XCNEWVEC (basic_block, max_uid);
2856
2857 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2858 {
2859 rtx_insn *head = BB_HEAD (bb);
2860 rtx_insn *end = BB_END (bb);
2861
2862 for (x = last_head; x != NULL_RTX; x = PREV_INSN (x))
2863 {
2864 /* Verify the end of the basic block is in the INSN chain. */
2865 if (x == end)
2866 break;
2867
2868 /* And that the code outside of basic blocks has NULL bb field. */
2869 if (!BARRIER_P (x)
2870 && BLOCK_FOR_INSN (x) != NULL)
2871 {
2872 error ("insn %d outside of basic blocks has non-NULL bb field",
2873 INSN_UID (x));
2874 err = 1;
2875 }
2876 }
2877
2878 if (!x)
2879 {
2880 error ("end insn %d for block %d not found in the insn stream",
2881 INSN_UID (end), bb->index);
2882 err = 1;
2883 }
2884
2885 /* Work backwards from the end to the head of the basic block
2886 to verify the head is in the RTL chain. */
2887 for (; x != NULL_RTX; x = PREV_INSN (x))
2888 {
2889 /* While walking over the insn chain, verify insns appear
2890 in only one basic block. */
2891 if (bb_info[INSN_UID (x)] != NULL)
2892 {
2893 error ("insn %d is in multiple basic blocks (%d and %d)",
2894 INSN_UID (x), bb->index, bb_info[INSN_UID (x)]->index);
2895 err = 1;
2896 }
2897
2898 bb_info[INSN_UID (x)] = bb;
2899
2900 if (x == head)
2901 break;
2902 }
2903 if (!x)
2904 {
2905 error ("head insn %d for block %d not found in the insn stream",
2906 INSN_UID (head), bb->index);
2907 err = 1;
2908 }
2909
2910 last_head = PREV_INSN (x);
2911 }
2912
2913 for (x = last_head; x != NULL_RTX; x = PREV_INSN (x))
2914 {
2915 /* Check that the code before the first basic block has NULL
2916 bb field. */
2917 if (!BARRIER_P (x)
2918 && BLOCK_FOR_INSN (x) != NULL)
2919 {
2920 error ("insn %d outside of basic blocks has non-NULL bb field",
2921 INSN_UID (x));
2922 err = 1;
2923 }
2924 }
2925 free (bb_info);
2926
2927 return err;
2928 }
2929
2930 /* Verify that fallthru edges point to adjacent blocks in layout order and
2931 that barriers exist after non-fallthru blocks. */
2932
2933 static int
2934 rtl_verify_fallthru (void)
2935 {
2936 basic_block bb;
2937 int err = 0;
2938
2939 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2940 {
2941 edge e;
2942
2943 e = find_fallthru_edge (bb->succs);
2944 if (!e)
2945 {
2946 rtx_insn *insn;
2947
2948 /* Ensure existence of barrier in BB with no fallthru edges. */
2949 for (insn = NEXT_INSN (BB_END (bb)); ; insn = NEXT_INSN (insn))
2950 {
2951 if (!insn || NOTE_INSN_BASIC_BLOCK_P (insn))
2952 {
2953 error ("missing barrier after block %i", bb->index);
2954 err = 1;
2955 break;
2956 }
2957 if (BARRIER_P (insn))
2958 break;
2959 }
2960 }
2961 else if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
2962 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
2963 {
2964 rtx_insn *insn;
2965
2966 if (e->src->next_bb != e->dest)
2967 {
2968 error
2969 ("verify_flow_info: Incorrect blocks for fallthru %i->%i",
2970 e->src->index, e->dest->index);
2971 err = 1;
2972 }
2973 else
2974 for (insn = NEXT_INSN (BB_END (e->src)); insn != BB_HEAD (e->dest);
2975 insn = NEXT_INSN (insn))
2976 if (BARRIER_P (insn) || NONDEBUG_INSN_P (insn))
2977 {
2978 error ("verify_flow_info: Incorrect fallthru %i->%i",
2979 e->src->index, e->dest->index);
2980 fatal_insn ("wrong insn in the fallthru edge", insn);
2981 err = 1;
2982 }
2983 }
2984 }
2985
2986 return err;
2987 }
2988
2989 /* Verify that blocks are laid out in consecutive order. While walking the
2990 instructions, verify that all expected instructions are inside the basic
2991 blocks, and that all returns are followed by barriers. */
2992
2993 static int
2994 rtl_verify_bb_layout (void)
2995 {
2996 basic_block bb;
2997 int err = 0;
2998 rtx_insn *x, *y;
2999 int num_bb_notes;
3000 rtx_insn * const rtx_first = get_insns ();
3001 basic_block last_bb_seen = ENTRY_BLOCK_PTR_FOR_FN (cfun), curr_bb = NULL;
3002
3003 num_bb_notes = 0;
3004
3005 for (x = rtx_first; x; x = NEXT_INSN (x))
3006 {
3007 if (NOTE_INSN_BASIC_BLOCK_P (x))
3008 {
3009 bb = NOTE_BASIC_BLOCK (x);
3010
3011 num_bb_notes++;
3012 if (bb != last_bb_seen->next_bb)
3013 internal_error ("basic blocks not laid down consecutively");
3014
3015 curr_bb = last_bb_seen = bb;
3016 }
3017
3018 if (!curr_bb)
3019 {
3020 switch (GET_CODE (x))
3021 {
3022 case BARRIER:
3023 case NOTE:
3024 break;
3025
3026 case CODE_LABEL:
3027 /* An ADDR_VEC is placed outside any basic block. */
3028 if (NEXT_INSN (x)
3029 && JUMP_TABLE_DATA_P (NEXT_INSN (x)))
3030 x = NEXT_INSN (x);
3031
3032 /* But in any case, non-deletable labels can appear anywhere. */
3033 break;
3034
3035 default:
3036 fatal_insn ("insn outside basic block", x);
3037 }
3038 }
3039
3040 if (JUMP_P (x)
3041 && returnjump_p (x) && ! condjump_p (x)
3042 && ! ((y = next_nonnote_nondebug_insn (x))
3043 && BARRIER_P (y)))
3044 fatal_insn ("return not followed by barrier", x);
3045
3046 if (curr_bb && x == BB_END (curr_bb))
3047 curr_bb = NULL;
3048 }
3049
3050 if (num_bb_notes != n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS)
3051 internal_error
3052 ("number of bb notes in insn chain (%d) != n_basic_blocks (%d)",
3053 num_bb_notes, n_basic_blocks_for_fn (cfun));
3054
3055 return err;
3056 }
3057
3058 /* Verify the CFG and RTL consistency common for both underlying RTL and
3059 cfglayout RTL, plus consistency checks specific to linearized RTL mode.
3060
3061 Currently it does following checks:
3062 - all checks of rtl_verify_flow_info_1
3063 - test head/end pointers
3064 - check that blocks are laid out in consecutive order
3065 - check that all insns are in the basic blocks
3066 (except the switch handling code, barriers and notes)
3067 - check that all returns are followed by barriers
3068 - check that all fallthru edge points to the adjacent blocks
3069 - verify that there is a single hot/cold partition boundary after bbro */
3070
3071 static int
3072 rtl_verify_flow_info (void)
3073 {
3074 int err = 0;
3075
3076 err |= rtl_verify_flow_info_1 ();
3077
3078 err |= rtl_verify_bb_insn_chain ();
3079
3080 err |= rtl_verify_fallthru ();
3081
3082 err |= rtl_verify_bb_layout ();
3083
3084 err |= verify_hot_cold_block_grouping ();
3085
3086 return err;
3087 }
3088 \f
3089 /* Assume that the preceding pass has possibly eliminated jump instructions
3090 or converted the unconditional jumps. Eliminate the edges from CFG.
3091 Return true if any edges are eliminated. */
3092
3093 bool
3094 purge_dead_edges (basic_block bb)
3095 {
3096 edge e;
3097 rtx_insn *insn = BB_END (bb);
3098 rtx note;
3099 bool purged = false;
3100 bool found;
3101 edge_iterator ei;
3102
3103 if ((DEBUG_INSN_P (insn) || NOTE_P (insn)) && insn != BB_HEAD (bb))
3104 do
3105 insn = PREV_INSN (insn);
3106 while ((DEBUG_INSN_P (insn) || NOTE_P (insn)) && insn != BB_HEAD (bb));
3107
3108 /* If this instruction cannot trap, remove REG_EH_REGION notes. */
3109 if (NONJUMP_INSN_P (insn)
3110 && (note = find_reg_note (insn, REG_EH_REGION, NULL)))
3111 {
3112 rtx eqnote;
3113
3114 if (! may_trap_p (PATTERN (insn))
3115 || ((eqnote = find_reg_equal_equiv_note (insn))
3116 && ! may_trap_p (XEXP (eqnote, 0))))
3117 remove_note (insn, note);
3118 }
3119
3120 /* Cleanup abnormal edges caused by exceptions or non-local gotos. */
3121 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
3122 {
3123 bool remove = false;
3124
3125 /* There are three types of edges we need to handle correctly here: EH
3126 edges, abnormal call EH edges, and abnormal call non-EH edges. The
3127 latter can appear when nonlocal gotos are used. */
3128 if (e->flags & EDGE_ABNORMAL_CALL)
3129 {
3130 if (!CALL_P (insn))
3131 remove = true;
3132 else if (can_nonlocal_goto (insn))
3133 ;
3134 else if ((e->flags & EDGE_EH) && can_throw_internal (insn))
3135 ;
3136 else if (flag_tm && find_reg_note (insn, REG_TM, NULL))
3137 ;
3138 else
3139 remove = true;
3140 }
3141 else if (e->flags & EDGE_EH)
3142 remove = !can_throw_internal (insn);
3143
3144 if (remove)
3145 {
3146 remove_edge (e);
3147 df_set_bb_dirty (bb);
3148 purged = true;
3149 }
3150 else
3151 ei_next (&ei);
3152 }
3153
3154 if (JUMP_P (insn))
3155 {
3156 rtx note;
3157 edge b,f;
3158 edge_iterator ei;
3159
3160 /* We do care only about conditional jumps and simplejumps. */
3161 if (!any_condjump_p (insn)
3162 && !returnjump_p (insn)
3163 && !simplejump_p (insn))
3164 return purged;
3165
3166 /* Branch probability/prediction notes are defined only for
3167 condjumps. We've possibly turned condjump into simplejump. */
3168 if (simplejump_p (insn))
3169 {
3170 note = find_reg_note (insn, REG_BR_PROB, NULL);
3171 if (note)
3172 remove_note (insn, note);
3173 while ((note = find_reg_note (insn, REG_BR_PRED, NULL)))
3174 remove_note (insn, note);
3175 }
3176
3177 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
3178 {
3179 /* Avoid abnormal flags to leak from computed jumps turned
3180 into simplejumps. */
3181
3182 e->flags &= ~EDGE_ABNORMAL;
3183
3184 /* See if this edge is one we should keep. */
3185 if ((e->flags & EDGE_FALLTHRU) && any_condjump_p (insn))
3186 /* A conditional jump can fall through into the next
3187 block, so we should keep the edge. */
3188 {
3189 ei_next (&ei);
3190 continue;
3191 }
3192 else if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
3193 && BB_HEAD (e->dest) == JUMP_LABEL (insn))
3194 /* If the destination block is the target of the jump,
3195 keep the edge. */
3196 {
3197 ei_next (&ei);
3198 continue;
3199 }
3200 else if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
3201 && returnjump_p (insn))
3202 /* If the destination block is the exit block, and this
3203 instruction is a return, then keep the edge. */
3204 {
3205 ei_next (&ei);
3206 continue;
3207 }
3208 else if ((e->flags & EDGE_EH) && can_throw_internal (insn))
3209 /* Keep the edges that correspond to exceptions thrown by
3210 this instruction and rematerialize the EDGE_ABNORMAL
3211 flag we just cleared above. */
3212 {
3213 e->flags |= EDGE_ABNORMAL;
3214 ei_next (&ei);
3215 continue;
3216 }
3217
3218 /* We do not need this edge. */
3219 df_set_bb_dirty (bb);
3220 purged = true;
3221 remove_edge (e);
3222 }
3223
3224 if (EDGE_COUNT (bb->succs) == 0 || !purged)
3225 return purged;
3226
3227 if (dump_file)
3228 fprintf (dump_file, "Purged edges from bb %i\n", bb->index);
3229
3230 if (!optimize)
3231 return purged;
3232
3233 /* Redistribute probabilities. */
3234 if (single_succ_p (bb))
3235 {
3236 single_succ_edge (bb)->probability = profile_probability::always ();
3237 }
3238 else
3239 {
3240 note = find_reg_note (insn, REG_BR_PROB, NULL);
3241 if (!note)
3242 return purged;
3243
3244 b = BRANCH_EDGE (bb);
3245 f = FALLTHRU_EDGE (bb);
3246 b->probability = profile_probability::from_reg_br_prob_note
3247 (XINT (note, 0));
3248 f->probability = b->probability.invert ();
3249 }
3250
3251 return purged;
3252 }
3253 else if (CALL_P (insn) && SIBLING_CALL_P (insn))
3254 {
3255 /* First, there should not be any EH or ABCALL edges resulting
3256 from non-local gotos and the like. If there were, we shouldn't
3257 have created the sibcall in the first place. Second, there
3258 should of course never have been a fallthru edge. */
3259 gcc_assert (single_succ_p (bb));
3260 gcc_assert (single_succ_edge (bb)->flags
3261 == (EDGE_SIBCALL | EDGE_ABNORMAL));
3262
3263 return 0;
3264 }
3265
3266 /* If we don't see a jump insn, we don't know exactly why the block would
3267 have been broken at this point. Look for a simple, non-fallthru edge,
3268 as these are only created by conditional branches. If we find such an
3269 edge we know that there used to be a jump here and can then safely
3270 remove all non-fallthru edges. */
3271 found = false;
3272 FOR_EACH_EDGE (e, ei, bb->succs)
3273 if (! (e->flags & (EDGE_COMPLEX | EDGE_FALLTHRU)))
3274 {
3275 found = true;
3276 break;
3277 }
3278
3279 if (!found)
3280 return purged;
3281
3282 /* Remove all but the fake and fallthru edges. The fake edge may be
3283 the only successor for this block in the case of noreturn
3284 calls. */
3285 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
3286 {
3287 if (!(e->flags & (EDGE_FALLTHRU | EDGE_FAKE)))
3288 {
3289 df_set_bb_dirty (bb);
3290 remove_edge (e);
3291 purged = true;
3292 }
3293 else
3294 ei_next (&ei);
3295 }
3296
3297 gcc_assert (single_succ_p (bb));
3298
3299 single_succ_edge (bb)->probability = profile_probability::always ();
3300
3301 if (dump_file)
3302 fprintf (dump_file, "Purged non-fallthru edges from bb %i\n",
3303 bb->index);
3304 return purged;
3305 }
3306
3307 /* Search all basic blocks for potentially dead edges and purge them. Return
3308 true if some edge has been eliminated. */
3309
3310 bool
3311 purge_all_dead_edges (void)
3312 {
3313 int purged = false;
3314 basic_block bb;
3315
3316 FOR_EACH_BB_FN (bb, cfun)
3317 {
3318 bool purged_here = purge_dead_edges (bb);
3319
3320 purged |= purged_here;
3321 }
3322
3323 return purged;
3324 }
3325
3326 /* This is used by a few passes that emit some instructions after abnormal
3327 calls, moving the basic block's end, while they in fact do want to emit
3328 them on the fallthru edge. Look for abnormal call edges, find backward
3329 the call in the block and insert the instructions on the edge instead.
3330
3331 Similarly, handle instructions throwing exceptions internally.
3332
3333 Return true when instructions have been found and inserted on edges. */
3334
3335 bool
3336 fixup_abnormal_edges (void)
3337 {
3338 bool inserted = false;
3339 basic_block bb;
3340
3341 FOR_EACH_BB_FN (bb, cfun)
3342 {
3343 edge e;
3344 edge_iterator ei;
3345
3346 /* Look for cases we are interested in - calls or instructions causing
3347 exceptions. */
3348 FOR_EACH_EDGE (e, ei, bb->succs)
3349 if ((e->flags & EDGE_ABNORMAL_CALL)
3350 || ((e->flags & (EDGE_ABNORMAL | EDGE_EH))
3351 == (EDGE_ABNORMAL | EDGE_EH)))
3352 break;
3353
3354 if (e && !CALL_P (BB_END (bb)) && !can_throw_internal (BB_END (bb)))
3355 {
3356 rtx_insn *insn;
3357
3358 /* Get past the new insns generated. Allow notes, as the insns
3359 may be already deleted. */
3360 insn = BB_END (bb);
3361 while ((NONJUMP_INSN_P (insn) || NOTE_P (insn))
3362 && !can_throw_internal (insn)
3363 && insn != BB_HEAD (bb))
3364 insn = PREV_INSN (insn);
3365
3366 if (CALL_P (insn) || can_throw_internal (insn))
3367 {
3368 rtx_insn *stop, *next;
3369
3370 e = find_fallthru_edge (bb->succs);
3371
3372 stop = NEXT_INSN (BB_END (bb));
3373 BB_END (bb) = insn;
3374
3375 for (insn = NEXT_INSN (insn); insn != stop; insn = next)
3376 {
3377 next = NEXT_INSN (insn);
3378 if (INSN_P (insn))
3379 {
3380 delete_insn (insn);
3381
3382 /* Sometimes there's still the return value USE.
3383 If it's placed after a trapping call (i.e. that
3384 call is the last insn anyway), we have no fallthru
3385 edge. Simply delete this use and don't try to insert
3386 on the non-existent edge.
3387 Similarly, sometimes a call that can throw is
3388 followed in the source with __builtin_unreachable (),
3389 meaning that there is UB if the call returns rather
3390 than throws. If there weren't any instructions
3391 following such calls before, supposedly even the ones
3392 we've deleted aren't significant and can be
3393 removed. */
3394 if (e)
3395 {
3396 /* We're not deleting it, we're moving it. */
3397 insn->set_undeleted ();
3398 SET_PREV_INSN (insn) = NULL_RTX;
3399 SET_NEXT_INSN (insn) = NULL_RTX;
3400
3401 insert_insn_on_edge (insn, e);
3402 inserted = true;
3403 }
3404 }
3405 else if (!BARRIER_P (insn))
3406 set_block_for_insn (insn, NULL);
3407 }
3408 }
3409
3410 /* It may be that we don't find any trapping insn. In this
3411 case we discovered quite late that the insn that had been
3412 marked as can_throw_internal in fact couldn't trap at all.
3413 So we should in fact delete the EH edges out of the block. */
3414 else
3415 purge_dead_edges (bb);
3416 }
3417 }
3418
3419 return inserted;
3420 }
3421 \f
3422 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
3423 Note that the INSN should be deleted *after* removing dead edges, so
3424 that the kept edge is the fallthrough edge for a (set (pc) (pc))
3425 but not for a (set (pc) (label_ref FOO)). */
3426
3427 void
3428 update_cfg_for_uncondjump (rtx_insn *insn)
3429 {
3430 basic_block bb = BLOCK_FOR_INSN (insn);
3431 gcc_assert (BB_END (bb) == insn);
3432
3433 purge_dead_edges (bb);
3434
3435 if (current_ir_type () != IR_RTL_CFGLAYOUT)
3436 {
3437 if (!find_fallthru_edge (bb->succs))
3438 {
3439 auto barrier = next_nonnote_nondebug_insn (insn);
3440 if (!barrier || !BARRIER_P (barrier))
3441 emit_barrier_after (insn);
3442 }
3443 return;
3444 }
3445
3446 delete_insn (insn);
3447 if (EDGE_COUNT (bb->succs) == 1)
3448 {
3449 rtx_insn *insn;
3450
3451 single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
3452
3453 /* Remove barriers from the footer if there are any. */
3454 for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
3455 if (BARRIER_P (insn))
3456 {
3457 if (PREV_INSN (insn))
3458 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
3459 else
3460 BB_FOOTER (bb) = NEXT_INSN (insn);
3461 if (NEXT_INSN (insn))
3462 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
3463 }
3464 else if (LABEL_P (insn))
3465 break;
3466 }
3467 }
3468 \f
3469 /* Cut the insns from FIRST to LAST out of the insns stream. */
3470
3471 rtx_insn *
3472 unlink_insn_chain (rtx_insn *first, rtx_insn *last)
3473 {
3474 rtx_insn *prevfirst = PREV_INSN (first);
3475 rtx_insn *nextlast = NEXT_INSN (last);
3476
3477 SET_PREV_INSN (first) = NULL;
3478 SET_NEXT_INSN (last) = NULL;
3479 if (prevfirst)
3480 SET_NEXT_INSN (prevfirst) = nextlast;
3481 if (nextlast)
3482 SET_PREV_INSN (nextlast) = prevfirst;
3483 else
3484 set_last_insn (prevfirst);
3485 if (!prevfirst)
3486 set_first_insn (nextlast);
3487 return first;
3488 }
3489 \f
3490 /* Skip over inter-block insns occurring after BB which are typically
3491 associated with BB (e.g., barriers). If there are any such insns,
3492 we return the last one. Otherwise, we return the end of BB. */
3493
3494 static rtx_insn *
3495 skip_insns_after_block (basic_block bb)
3496 {
3497 rtx_insn *insn, *last_insn, *next_head, *prev;
3498
3499 next_head = NULL;
3500 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
3501 next_head = BB_HEAD (bb->next_bb);
3502
3503 for (last_insn = insn = BB_END (bb); (insn = NEXT_INSN (insn)) != 0; )
3504 {
3505 if (insn == next_head)
3506 break;
3507
3508 switch (GET_CODE (insn))
3509 {
3510 case BARRIER:
3511 last_insn = insn;
3512 continue;
3513
3514 case NOTE:
3515 switch (NOTE_KIND (insn))
3516 {
3517 case NOTE_INSN_BLOCK_END:
3518 gcc_unreachable ();
3519 continue;
3520 default:
3521 continue;
3522 break;
3523 }
3524 break;
3525
3526 case CODE_LABEL:
3527 if (NEXT_INSN (insn)
3528 && JUMP_TABLE_DATA_P (NEXT_INSN (insn)))
3529 {
3530 insn = NEXT_INSN (insn);
3531 last_insn = insn;
3532 continue;
3533 }
3534 break;
3535
3536 default:
3537 break;
3538 }
3539
3540 break;
3541 }
3542
3543 /* It is possible to hit contradictory sequence. For instance:
3544
3545 jump_insn
3546 NOTE_INSN_BLOCK_BEG
3547 barrier
3548
3549 Where barrier belongs to jump_insn, but the note does not. This can be
3550 created by removing the basic block originally following
3551 NOTE_INSN_BLOCK_BEG. In such case reorder the notes. */
3552
3553 for (insn = last_insn; insn != BB_END (bb); insn = prev)
3554 {
3555 prev = PREV_INSN (insn);
3556 if (NOTE_P (insn))
3557 switch (NOTE_KIND (insn))
3558 {
3559 case NOTE_INSN_BLOCK_END:
3560 gcc_unreachable ();
3561 break;
3562 case NOTE_INSN_DELETED:
3563 case NOTE_INSN_DELETED_LABEL:
3564 case NOTE_INSN_DELETED_DEBUG_LABEL:
3565 continue;
3566 default:
3567 reorder_insns (insn, insn, last_insn);
3568 }
3569 }
3570
3571 return last_insn;
3572 }
3573
3574 /* Locate or create a label for a given basic block. */
3575
3576 static rtx_insn *
3577 label_for_bb (basic_block bb)
3578 {
3579 rtx_insn *label = BB_HEAD (bb);
3580
3581 if (!LABEL_P (label))
3582 {
3583 if (dump_file)
3584 fprintf (dump_file, "Emitting label for block %d\n", bb->index);
3585
3586 label = block_label (bb);
3587 }
3588
3589 return label;
3590 }
3591
3592 /* Locate the effective beginning and end of the insn chain for each
3593 block, as defined by skip_insns_after_block above. */
3594
3595 static void
3596 record_effective_endpoints (void)
3597 {
3598 rtx_insn *next_insn;
3599 basic_block bb;
3600 rtx_insn *insn;
3601
3602 for (insn = get_insns ();
3603 insn
3604 && NOTE_P (insn)
3605 && NOTE_KIND (insn) != NOTE_INSN_BASIC_BLOCK;
3606 insn = NEXT_INSN (insn))
3607 continue;
3608 /* No basic blocks at all? */
3609 gcc_assert (insn);
3610
3611 if (PREV_INSN (insn))
3612 cfg_layout_function_header =
3613 unlink_insn_chain (get_insns (), PREV_INSN (insn));
3614 else
3615 cfg_layout_function_header = NULL;
3616
3617 next_insn = get_insns ();
3618 FOR_EACH_BB_FN (bb, cfun)
3619 {
3620 rtx_insn *end;
3621
3622 if (PREV_INSN (BB_HEAD (bb)) && next_insn != BB_HEAD (bb))
3623 BB_HEADER (bb) = unlink_insn_chain (next_insn,
3624 PREV_INSN (BB_HEAD (bb)));
3625 end = skip_insns_after_block (bb);
3626 if (NEXT_INSN (BB_END (bb)) && BB_END (bb) != end)
3627 BB_FOOTER (bb) = unlink_insn_chain (NEXT_INSN (BB_END (bb)), end);
3628 next_insn = NEXT_INSN (BB_END (bb));
3629 }
3630
3631 cfg_layout_function_footer = next_insn;
3632 if (cfg_layout_function_footer)
3633 cfg_layout_function_footer = unlink_insn_chain (cfg_layout_function_footer, get_last_insn ());
3634 }
3635 \f
3636 namespace {
3637
3638 const pass_data pass_data_into_cfg_layout_mode =
3639 {
3640 RTL_PASS, /* type */
3641 "into_cfglayout", /* name */
3642 OPTGROUP_NONE, /* optinfo_flags */
3643 TV_CFG, /* tv_id */
3644 0, /* properties_required */
3645 PROP_cfglayout, /* properties_provided */
3646 0, /* properties_destroyed */
3647 0, /* todo_flags_start */
3648 0, /* todo_flags_finish */
3649 };
3650
3651 class pass_into_cfg_layout_mode : public rtl_opt_pass
3652 {
3653 public:
3654 pass_into_cfg_layout_mode (gcc::context *ctxt)
3655 : rtl_opt_pass (pass_data_into_cfg_layout_mode, ctxt)
3656 {}
3657
3658 /* opt_pass methods: */
3659 virtual unsigned int execute (function *)
3660 {
3661 cfg_layout_initialize (0);
3662 return 0;
3663 }
3664
3665 }; // class pass_into_cfg_layout_mode
3666
3667 } // anon namespace
3668
3669 rtl_opt_pass *
3670 make_pass_into_cfg_layout_mode (gcc::context *ctxt)
3671 {
3672 return new pass_into_cfg_layout_mode (ctxt);
3673 }
3674
3675 namespace {
3676
3677 const pass_data pass_data_outof_cfg_layout_mode =
3678 {
3679 RTL_PASS, /* type */
3680 "outof_cfglayout", /* name */
3681 OPTGROUP_NONE, /* optinfo_flags */
3682 TV_CFG, /* tv_id */
3683 0, /* properties_required */
3684 0, /* properties_provided */
3685 PROP_cfglayout, /* properties_destroyed */
3686 0, /* todo_flags_start */
3687 0, /* todo_flags_finish */
3688 };
3689
3690 class pass_outof_cfg_layout_mode : public rtl_opt_pass
3691 {
3692 public:
3693 pass_outof_cfg_layout_mode (gcc::context *ctxt)
3694 : rtl_opt_pass (pass_data_outof_cfg_layout_mode, ctxt)
3695 {}
3696
3697 /* opt_pass methods: */
3698 virtual unsigned int execute (function *);
3699
3700 }; // class pass_outof_cfg_layout_mode
3701
3702 unsigned int
3703 pass_outof_cfg_layout_mode::execute (function *fun)
3704 {
3705 basic_block bb;
3706
3707 FOR_EACH_BB_FN (bb, fun)
3708 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (fun))
3709 bb->aux = bb->next_bb;
3710
3711 cfg_layout_finalize ();
3712
3713 return 0;
3714 }
3715
3716 } // anon namespace
3717
3718 rtl_opt_pass *
3719 make_pass_outof_cfg_layout_mode (gcc::context *ctxt)
3720 {
3721 return new pass_outof_cfg_layout_mode (ctxt);
3722 }
3723 \f
3724
3725 /* Link the basic blocks in the correct order, compacting the basic
3726 block queue while at it. If STAY_IN_CFGLAYOUT_MODE is false, this
3727 function also clears the basic block header and footer fields.
3728
3729 This function is usually called after a pass (e.g. tracer) finishes
3730 some transformations while in cfglayout mode. The required sequence
3731 of the basic blocks is in a linked list along the bb->aux field.
3732 This functions re-links the basic block prev_bb and next_bb pointers
3733 accordingly, and it compacts and renumbers the blocks.
3734
3735 FIXME: This currently works only for RTL, but the only RTL-specific
3736 bits are the STAY_IN_CFGLAYOUT_MODE bits. The tracer pass was moved
3737 to GIMPLE a long time ago, but it doesn't relink the basic block
3738 chain. It could do that (to give better initial RTL) if this function
3739 is made IR-agnostic (and moved to cfganal.c or cfg.c while at it). */
3740
3741 void
3742 relink_block_chain (bool stay_in_cfglayout_mode)
3743 {
3744 basic_block bb, prev_bb;
3745 int index;
3746
3747 /* Maybe dump the re-ordered sequence. */
3748 if (dump_file)
3749 {
3750 fprintf (dump_file, "Reordered sequence:\n");
3751 for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb, index =
3752 NUM_FIXED_BLOCKS;
3753 bb;
3754 bb = (basic_block) bb->aux, index++)
3755 {
3756 fprintf (dump_file, " %i ", index);
3757 if (get_bb_original (bb))
3758 fprintf (dump_file, "duplicate of %i\n",
3759 get_bb_original (bb)->index);
3760 else if (forwarder_block_p (bb)
3761 && !LABEL_P (BB_HEAD (bb)))
3762 fprintf (dump_file, "compensation\n");
3763 else
3764 fprintf (dump_file, "bb %i\n", bb->index);
3765 }
3766 }
3767
3768 /* Now reorder the blocks. */
3769 prev_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
3770 bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
3771 for (; bb; prev_bb = bb, bb = (basic_block) bb->aux)
3772 {
3773 bb->prev_bb = prev_bb;
3774 prev_bb->next_bb = bb;
3775 }
3776 prev_bb->next_bb = EXIT_BLOCK_PTR_FOR_FN (cfun);
3777 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb = prev_bb;
3778
3779 /* Then, clean up the aux fields. */
3780 FOR_ALL_BB_FN (bb, cfun)
3781 {
3782 bb->aux = NULL;
3783 if (!stay_in_cfglayout_mode)
3784 BB_HEADER (bb) = BB_FOOTER (bb) = NULL;
3785 }
3786
3787 /* Maybe reset the original copy tables, they are not valid anymore
3788 when we renumber the basic blocks in compact_blocks. If we are
3789 are going out of cfglayout mode, don't re-allocate the tables. */
3790 if (original_copy_tables_initialized_p ())
3791 free_original_copy_tables ();
3792 if (stay_in_cfglayout_mode)
3793 initialize_original_copy_tables ();
3794
3795 /* Finally, put basic_block_info in the new order. */
3796 compact_blocks ();
3797 }
3798 \f
3799
3800 /* Given a reorder chain, rearrange the code to match. */
3801
3802 static void
3803 fixup_reorder_chain (void)
3804 {
3805 basic_block bb;
3806 rtx_insn *insn = NULL;
3807
3808 if (cfg_layout_function_header)
3809 {
3810 set_first_insn (cfg_layout_function_header);
3811 insn = cfg_layout_function_header;
3812 while (NEXT_INSN (insn))
3813 insn = NEXT_INSN (insn);
3814 }
3815
3816 /* First do the bulk reordering -- rechain the blocks without regard to
3817 the needed changes to jumps and labels. */
3818
3819 for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; bb; bb = (basic_block)
3820 bb->aux)
3821 {
3822 if (BB_HEADER (bb))
3823 {
3824 if (insn)
3825 SET_NEXT_INSN (insn) = BB_HEADER (bb);
3826 else
3827 set_first_insn (BB_HEADER (bb));
3828 SET_PREV_INSN (BB_HEADER (bb)) = insn;
3829 insn = BB_HEADER (bb);
3830 while (NEXT_INSN (insn))
3831 insn = NEXT_INSN (insn);
3832 }
3833 if (insn)
3834 SET_NEXT_INSN (insn) = BB_HEAD (bb);
3835 else
3836 set_first_insn (BB_HEAD (bb));
3837 SET_PREV_INSN (BB_HEAD (bb)) = insn;
3838 insn = BB_END (bb);
3839 if (BB_FOOTER (bb))
3840 {
3841 SET_NEXT_INSN (insn) = BB_FOOTER (bb);
3842 SET_PREV_INSN (BB_FOOTER (bb)) = insn;
3843 while (NEXT_INSN (insn))
3844 insn = NEXT_INSN (insn);
3845 }
3846 }
3847
3848 SET_NEXT_INSN (insn) = cfg_layout_function_footer;
3849 if (cfg_layout_function_footer)
3850 SET_PREV_INSN (cfg_layout_function_footer) = insn;
3851
3852 while (NEXT_INSN (insn))
3853 insn = NEXT_INSN (insn);
3854
3855 set_last_insn (insn);
3856 if (flag_checking)
3857 verify_insn_chain ();
3858
3859 /* Now add jumps and labels as needed to match the blocks new
3860 outgoing edges. */
3861
3862 for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; bb ; bb = (basic_block)
3863 bb->aux)
3864 {
3865 edge e_fall, e_taken, e;
3866 rtx_insn *bb_end_insn;
3867 rtx ret_label = NULL_RTX;
3868 basic_block nb;
3869 edge_iterator ei;
3870
3871 if (EDGE_COUNT (bb->succs) == 0)
3872 continue;
3873
3874 /* Find the old fallthru edge, and another non-EH edge for
3875 a taken jump. */
3876 e_taken = e_fall = NULL;
3877
3878 FOR_EACH_EDGE (e, ei, bb->succs)
3879 if (e->flags & EDGE_FALLTHRU)
3880 e_fall = e;
3881 else if (! (e->flags & EDGE_EH))
3882 e_taken = e;
3883
3884 bb_end_insn = BB_END (bb);
3885 if (rtx_jump_insn *bb_end_jump = dyn_cast <rtx_jump_insn *> (bb_end_insn))
3886 {
3887 ret_label = JUMP_LABEL (bb_end_jump);
3888 if (any_condjump_p (bb_end_jump))
3889 {
3890 /* This might happen if the conditional jump has side
3891 effects and could therefore not be optimized away.
3892 Make the basic block to end with a barrier in order
3893 to prevent rtl_verify_flow_info from complaining. */
3894 if (!e_fall)
3895 {
3896 gcc_assert (!onlyjump_p (bb_end_jump)
3897 || returnjump_p (bb_end_jump)
3898 || (e_taken->flags & EDGE_CROSSING));
3899 emit_barrier_after (bb_end_jump);
3900 continue;
3901 }
3902
3903 /* If the old fallthru is still next, nothing to do. */
3904 if (bb->aux == e_fall->dest
3905 || e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3906 continue;
3907
3908 /* The degenerated case of conditional jump jumping to the next
3909 instruction can happen for jumps with side effects. We need
3910 to construct a forwarder block and this will be done just
3911 fine by force_nonfallthru below. */
3912 if (!e_taken)
3913 ;
3914
3915 /* There is another special case: if *neither* block is next,
3916 such as happens at the very end of a function, then we'll
3917 need to add a new unconditional jump. Choose the taken
3918 edge based on known or assumed probability. */
3919 else if (bb->aux != e_taken->dest)
3920 {
3921 rtx note = find_reg_note (bb_end_jump, REG_BR_PROB, 0);
3922
3923 if (note
3924 && profile_probability::from_reg_br_prob_note
3925 (XINT (note, 0)) < profile_probability::even ()
3926 && invert_jump (bb_end_jump,
3927 (e_fall->dest
3928 == EXIT_BLOCK_PTR_FOR_FN (cfun)
3929 ? NULL_RTX
3930 : label_for_bb (e_fall->dest)), 0))
3931 {
3932 e_fall->flags &= ~EDGE_FALLTHRU;
3933 gcc_checking_assert (could_fall_through
3934 (e_taken->src, e_taken->dest));
3935 e_taken->flags |= EDGE_FALLTHRU;
3936 update_br_prob_note (bb);
3937 e = e_fall, e_fall = e_taken, e_taken = e;
3938 }
3939 }
3940
3941 /* If the "jumping" edge is a crossing edge, and the fall
3942 through edge is non-crossing, leave things as they are. */
3943 else if ((e_taken->flags & EDGE_CROSSING)
3944 && !(e_fall->flags & EDGE_CROSSING))
3945 continue;
3946
3947 /* Otherwise we can try to invert the jump. This will
3948 basically never fail, however, keep up the pretense. */
3949 else if (invert_jump (bb_end_jump,
3950 (e_fall->dest
3951 == EXIT_BLOCK_PTR_FOR_FN (cfun)
3952 ? NULL_RTX
3953 : label_for_bb (e_fall->dest)), 0))
3954 {
3955 e_fall->flags &= ~EDGE_FALLTHRU;
3956 gcc_checking_assert (could_fall_through
3957 (e_taken->src, e_taken->dest));
3958 e_taken->flags |= EDGE_FALLTHRU;
3959 update_br_prob_note (bb);
3960 if (LABEL_NUSES (ret_label) == 0
3961 && single_pred_p (e_taken->dest))
3962 delete_insn (as_a<rtx_insn *> (ret_label));
3963 continue;
3964 }
3965 }
3966 else if (extract_asm_operands (PATTERN (bb_end_insn)) != NULL)
3967 {
3968 /* If the old fallthru is still next or if
3969 asm goto doesn't have a fallthru (e.g. when followed by
3970 __builtin_unreachable ()), nothing to do. */
3971 if (! e_fall
3972 || bb->aux == e_fall->dest
3973 || e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3974 continue;
3975
3976 /* Otherwise we'll have to use the fallthru fixup below. */
3977 }
3978 else
3979 {
3980 /* Otherwise we have some return, switch or computed
3981 jump. In the 99% case, there should not have been a
3982 fallthru edge. */
3983 gcc_assert (returnjump_p (bb_end_insn) || !e_fall);
3984 continue;
3985 }
3986 }
3987 else
3988 {
3989 /* No fallthru implies a noreturn function with EH edges, or
3990 something similarly bizarre. In any case, we don't need to
3991 do anything. */
3992 if (! e_fall)
3993 continue;
3994
3995 /* If the fallthru block is still next, nothing to do. */
3996 if (bb->aux == e_fall->dest)
3997 continue;
3998
3999 /* A fallthru to exit block. */
4000 if (e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
4001 continue;
4002 }
4003
4004 /* We got here if we need to add a new jump insn.
4005 Note force_nonfallthru can delete E_FALL and thus we have to
4006 save E_FALL->src prior to the call to force_nonfallthru. */
4007 nb = force_nonfallthru_and_redirect (e_fall, e_fall->dest, ret_label);
4008 if (nb)
4009 {
4010 nb->aux = bb->aux;
4011 bb->aux = nb;
4012 /* Don't process this new block. */
4013 bb = nb;
4014 }
4015 }
4016
4017 relink_block_chain (/*stay_in_cfglayout_mode=*/false);
4018
4019 /* Annoying special case - jump around dead jumptables left in the code. */
4020 FOR_EACH_BB_FN (bb, cfun)
4021 {
4022 edge e = find_fallthru_edge (bb->succs);
4023
4024 if (e && !can_fallthru (e->src, e->dest))
4025 force_nonfallthru (e);
4026 }
4027
4028 /* Ensure goto_locus from edges has some instructions with that locus in RTL
4029 when not optimizing. */
4030 if (!optimize && !DECL_IGNORED_P (current_function_decl))
4031 FOR_EACH_BB_FN (bb, cfun)
4032 {
4033 edge e;
4034 edge_iterator ei;
4035
4036 FOR_EACH_EDGE (e, ei, bb->succs)
4037 if (LOCATION_LOCUS (e->goto_locus) != UNKNOWN_LOCATION
4038 && !(e->flags & EDGE_ABNORMAL))
4039 {
4040 edge e2;
4041 edge_iterator ei2;
4042 basic_block dest, nb;
4043 rtx_insn *end;
4044
4045 insn = BB_END (e->src);
4046 end = PREV_INSN (BB_HEAD (e->src));
4047 while (insn != end
4048 && (!NONDEBUG_INSN_P (insn) || !INSN_HAS_LOCATION (insn)))
4049 insn = PREV_INSN (insn);
4050 if (insn != end
4051 && INSN_LOCATION (insn) == e->goto_locus)
4052 continue;
4053 if (simplejump_p (BB_END (e->src))
4054 && !INSN_HAS_LOCATION (BB_END (e->src)))
4055 {
4056 INSN_LOCATION (BB_END (e->src)) = e->goto_locus;
4057 continue;
4058 }
4059 dest = e->dest;
4060 if (dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
4061 {
4062 /* Non-fallthru edges to the exit block cannot be split. */
4063 if (!(e->flags & EDGE_FALLTHRU))
4064 continue;
4065 }
4066 else
4067 {
4068 insn = BB_HEAD (dest);
4069 end = NEXT_INSN (BB_END (dest));
4070 while (insn != end && !NONDEBUG_INSN_P (insn))
4071 insn = NEXT_INSN (insn);
4072 if (insn != end && INSN_HAS_LOCATION (insn)
4073 && INSN_LOCATION (insn) == e->goto_locus)
4074 continue;
4075 }
4076 nb = split_edge (e);
4077 if (!INSN_P (BB_END (nb)))
4078 BB_END (nb) = emit_insn_after_noloc (gen_nop (), BB_END (nb),
4079 nb);
4080 INSN_LOCATION (BB_END (nb)) = e->goto_locus;
4081
4082 /* If there are other incoming edges to the destination block
4083 with the same goto locus, redirect them to the new block as
4084 well, this can prevent other such blocks from being created
4085 in subsequent iterations of the loop. */
4086 for (ei2 = ei_start (dest->preds); (e2 = ei_safe_edge (ei2)); )
4087 if (LOCATION_LOCUS (e2->goto_locus) != UNKNOWN_LOCATION
4088 && !(e2->flags & (EDGE_ABNORMAL | EDGE_FALLTHRU))
4089 && e->goto_locus == e2->goto_locus)
4090 redirect_edge_and_branch (e2, nb);
4091 else
4092 ei_next (&ei2);
4093 }
4094 }
4095 }
4096 \f
4097 /* Perform sanity checks on the insn chain.
4098 1. Check that next/prev pointers are consistent in both the forward and
4099 reverse direction.
4100 2. Count insns in chain, going both directions, and check if equal.
4101 3. Check that get_last_insn () returns the actual end of chain. */
4102
4103 DEBUG_FUNCTION void
4104 verify_insn_chain (void)
4105 {
4106 rtx_insn *x, *prevx, *nextx;
4107 int insn_cnt1, insn_cnt2;
4108
4109 for (prevx = NULL, insn_cnt1 = 1, x = get_insns ();
4110 x != 0;
4111 prevx = x, insn_cnt1++, x = NEXT_INSN (x))
4112 gcc_assert (PREV_INSN (x) == prevx);
4113
4114 gcc_assert (prevx == get_last_insn ());
4115
4116 for (nextx = NULL, insn_cnt2 = 1, x = get_last_insn ();
4117 x != 0;
4118 nextx = x, insn_cnt2++, x = PREV_INSN (x))
4119 gcc_assert (NEXT_INSN (x) == nextx);
4120
4121 gcc_assert (insn_cnt1 == insn_cnt2);
4122 }
4123 \f
4124 /* If we have assembler epilogues, the block falling through to exit must
4125 be the last one in the reordered chain when we reach final. Ensure
4126 that this condition is met. */
4127 static void
4128 fixup_fallthru_exit_predecessor (void)
4129 {
4130 edge e;
4131 basic_block bb = NULL;
4132
4133 /* This transformation is not valid before reload, because we might
4134 separate a call from the instruction that copies the return
4135 value. */
4136 gcc_assert (reload_completed);
4137
4138 e = find_fallthru_edge (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
4139 if (e)
4140 bb = e->src;
4141
4142 if (bb && bb->aux)
4143 {
4144 basic_block c = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
4145
4146 /* If the very first block is the one with the fall-through exit
4147 edge, we have to split that block. */
4148 if (c == bb)
4149 {
4150 bb = split_block_after_labels (bb)->dest;
4151 bb->aux = c->aux;
4152 c->aux = bb;
4153 BB_FOOTER (bb) = BB_FOOTER (c);
4154 BB_FOOTER (c) = NULL;
4155 }
4156
4157 while (c->aux != bb)
4158 c = (basic_block) c->aux;
4159
4160 c->aux = bb->aux;
4161 while (c->aux)
4162 c = (basic_block) c->aux;
4163
4164 c->aux = bb;
4165 bb->aux = NULL;
4166 }
4167 }
4168
4169 /* In case there are more than one fallthru predecessors of exit, force that
4170 there is only one. */
4171
4172 static void
4173 force_one_exit_fallthru (void)
4174 {
4175 edge e, predecessor = NULL;
4176 bool more = false;
4177 edge_iterator ei;
4178 basic_block forwarder, bb;
4179
4180 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
4181 if (e->flags & EDGE_FALLTHRU)
4182 {
4183 if (predecessor == NULL)
4184 predecessor = e;
4185 else
4186 {
4187 more = true;
4188 break;
4189 }
4190 }
4191
4192 if (!more)
4193 return;
4194
4195 /* Exit has several fallthru predecessors. Create a forwarder block for
4196 them. */
4197 forwarder = split_edge (predecessor);
4198 for (ei = ei_start (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
4199 (e = ei_safe_edge (ei)); )
4200 {
4201 if (e->src == forwarder
4202 || !(e->flags & EDGE_FALLTHRU))
4203 ei_next (&ei);
4204 else
4205 redirect_edge_and_branch_force (e, forwarder);
4206 }
4207
4208 /* Fix up the chain of blocks -- make FORWARDER immediately precede the
4209 exit block. */
4210 FOR_EACH_BB_FN (bb, cfun)
4211 {
4212 if (bb->aux == NULL && bb != forwarder)
4213 {
4214 bb->aux = forwarder;
4215 break;
4216 }
4217 }
4218 }
4219 \f
4220 /* Return true in case it is possible to duplicate the basic block BB. */
4221
4222 static bool
4223 cfg_layout_can_duplicate_bb_p (const_basic_block bb)
4224 {
4225 /* Do not attempt to duplicate tablejumps, as we need to unshare
4226 the dispatch table. This is difficult to do, as the instructions
4227 computing jump destination may be hoisted outside the basic block. */
4228 if (tablejump_p (BB_END (bb), NULL, NULL))
4229 return false;
4230
4231 /* Do not duplicate blocks containing insns that can't be copied. */
4232 if (targetm.cannot_copy_insn_p)
4233 {
4234 rtx_insn *insn = BB_HEAD (bb);
4235 while (1)
4236 {
4237 if (INSN_P (insn) && targetm.cannot_copy_insn_p (insn))
4238 return false;
4239 if (insn == BB_END (bb))
4240 break;
4241 insn = NEXT_INSN (insn);
4242 }
4243 }
4244
4245 return true;
4246 }
4247
4248 rtx_insn *
4249 duplicate_insn_chain (rtx_insn *from, rtx_insn *to,
4250 class loop *loop, copy_bb_data *id)
4251 {
4252 rtx_insn *insn, *next, *copy;
4253 rtx_note *last;
4254
4255 /* Avoid updating of boundaries of previous basic block. The
4256 note will get removed from insn stream in fixup. */
4257 last = emit_note (NOTE_INSN_DELETED);
4258
4259 /* Create copy at the end of INSN chain. The chain will
4260 be reordered later. */
4261 for (insn = from; insn != NEXT_INSN (to); insn = NEXT_INSN (insn))
4262 {
4263 switch (GET_CODE (insn))
4264 {
4265 case DEBUG_INSN:
4266 /* Don't duplicate label debug insns. */
4267 if (DEBUG_BIND_INSN_P (insn)
4268 && TREE_CODE (INSN_VAR_LOCATION_DECL (insn)) == LABEL_DECL)
4269 break;
4270 /* FALLTHRU */
4271 case INSN:
4272 case CALL_INSN:
4273 case JUMP_INSN:
4274 copy = emit_copy_of_insn_after (insn, get_last_insn ());
4275 if (JUMP_P (insn) && JUMP_LABEL (insn) != NULL_RTX
4276 && ANY_RETURN_P (JUMP_LABEL (insn)))
4277 JUMP_LABEL (copy) = JUMP_LABEL (insn);
4278 maybe_copy_prologue_epilogue_insn (insn, copy);
4279 /* If requested remap dependence info of cliques brought in
4280 via inlining. */
4281 if (id)
4282 {
4283 subrtx_iterator::array_type array;
4284 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), ALL)
4285 if (MEM_P (*iter) && MEM_EXPR (*iter))
4286 {
4287 tree op = MEM_EXPR (*iter);
4288 if (TREE_CODE (op) == WITH_SIZE_EXPR)
4289 op = TREE_OPERAND (op, 0);
4290 while (handled_component_p (op))
4291 op = TREE_OPERAND (op, 0);
4292 if ((TREE_CODE (op) == MEM_REF
4293 || TREE_CODE (op) == TARGET_MEM_REF)
4294 && MR_DEPENDENCE_CLIQUE (op) > 1
4295 && (!loop
4296 || (MR_DEPENDENCE_CLIQUE (op)
4297 != loop->owned_clique)))
4298 {
4299 if (!id->dependence_map)
4300 id->dependence_map = new hash_map<dependence_hash,
4301 unsigned short>;
4302 bool existed;
4303 unsigned short &newc = id->dependence_map->get_or_insert
4304 (MR_DEPENDENCE_CLIQUE (op), &existed);
4305 if (!existed)
4306 {
4307 gcc_assert
4308 (MR_DEPENDENCE_CLIQUE (op) <= cfun->last_clique);
4309 newc = ++cfun->last_clique;
4310 }
4311 /* We cannot adjust MR_DEPENDENCE_CLIQUE in-place
4312 since MEM_EXPR is shared so make a copy and
4313 walk to the subtree again. */
4314 tree new_expr = unshare_expr (MEM_EXPR (*iter));
4315 if (TREE_CODE (new_expr) == WITH_SIZE_EXPR)
4316 new_expr = TREE_OPERAND (new_expr, 0);
4317 while (handled_component_p (new_expr))
4318 new_expr = TREE_OPERAND (new_expr, 0);
4319 MR_DEPENDENCE_CLIQUE (new_expr) = newc;
4320 set_mem_expr (const_cast <rtx> (*iter), new_expr);
4321 }
4322 }
4323 }
4324 break;
4325
4326 case JUMP_TABLE_DATA:
4327 /* Avoid copying of dispatch tables. We never duplicate
4328 tablejumps, so this can hit only in case the table got
4329 moved far from original jump.
4330 Avoid copying following barrier as well if any
4331 (and debug insns in between). */
4332 for (next = NEXT_INSN (insn);
4333 next != NEXT_INSN (to);
4334 next = NEXT_INSN (next))
4335 if (!DEBUG_INSN_P (next))
4336 break;
4337 if (next != NEXT_INSN (to) && BARRIER_P (next))
4338 insn = next;
4339 break;
4340
4341 case CODE_LABEL:
4342 break;
4343
4344 case BARRIER:
4345 emit_barrier ();
4346 break;
4347
4348 case NOTE:
4349 switch (NOTE_KIND (insn))
4350 {
4351 /* In case prologue is empty and function contain label
4352 in first BB, we may want to copy the block. */
4353 case NOTE_INSN_PROLOGUE_END:
4354
4355 case NOTE_INSN_DELETED:
4356 case NOTE_INSN_DELETED_LABEL:
4357 case NOTE_INSN_DELETED_DEBUG_LABEL:
4358 /* No problem to strip these. */
4359 case NOTE_INSN_FUNCTION_BEG:
4360 /* There is always just single entry to function. */
4361 case NOTE_INSN_BASIC_BLOCK:
4362 /* We should only switch text sections once. */
4363 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
4364 break;
4365
4366 case NOTE_INSN_EPILOGUE_BEG:
4367 case NOTE_INSN_UPDATE_SJLJ_CONTEXT:
4368 emit_note_copy (as_a <rtx_note *> (insn));
4369 break;
4370
4371 default:
4372 /* All other notes should have already been eliminated. */
4373 gcc_unreachable ();
4374 }
4375 break;
4376 default:
4377 gcc_unreachable ();
4378 }
4379 }
4380 insn = NEXT_INSN (last);
4381 delete_insn (last);
4382 return insn;
4383 }
4384
4385 /* Create a duplicate of the basic block BB. */
4386
4387 static basic_block
4388 cfg_layout_duplicate_bb (basic_block bb, copy_bb_data *id)
4389 {
4390 rtx_insn *insn;
4391 basic_block new_bb;
4392
4393 class loop *loop = (id && current_loops) ? bb->loop_father : NULL;
4394
4395 insn = duplicate_insn_chain (BB_HEAD (bb), BB_END (bb), loop, id);
4396 new_bb = create_basic_block (insn,
4397 insn ? get_last_insn () : NULL,
4398 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
4399
4400 BB_COPY_PARTITION (new_bb, bb);
4401 if (BB_HEADER (bb))
4402 {
4403 insn = BB_HEADER (bb);
4404 while (NEXT_INSN (insn))
4405 insn = NEXT_INSN (insn);
4406 insn = duplicate_insn_chain (BB_HEADER (bb), insn, loop, id);
4407 if (insn)
4408 BB_HEADER (new_bb) = unlink_insn_chain (insn, get_last_insn ());
4409 }
4410
4411 if (BB_FOOTER (bb))
4412 {
4413 insn = BB_FOOTER (bb);
4414 while (NEXT_INSN (insn))
4415 insn = NEXT_INSN (insn);
4416 insn = duplicate_insn_chain (BB_FOOTER (bb), insn, loop, id);
4417 if (insn)
4418 BB_FOOTER (new_bb) = unlink_insn_chain (insn, get_last_insn ());
4419 }
4420
4421 return new_bb;
4422 }
4423
4424 \f
4425 /* Main entry point to this module - initialize the datastructures for
4426 CFG layout changes. It keeps LOOPS up-to-date if not null.
4427
4428 FLAGS is a set of additional flags to pass to cleanup_cfg(). */
4429
4430 void
4431 cfg_layout_initialize (int flags)
4432 {
4433 rtx_insn_list *x;
4434 basic_block bb;
4435
4436 /* Once bb partitioning is complete, cfg layout mode should not be
4437 re-entered. Entering cfg layout mode may require fixups. As an
4438 example, if edge forwarding performed when optimizing the cfg
4439 layout required moving a block from the hot to the cold
4440 section. This would create an illegal partitioning unless some
4441 manual fixup was performed. */
4442 gcc_assert (!crtl->bb_reorder_complete || !crtl->has_bb_partition);
4443
4444 initialize_original_copy_tables ();
4445
4446 cfg_layout_rtl_register_cfg_hooks ();
4447
4448 record_effective_endpoints ();
4449
4450 /* Make sure that the targets of non local gotos are marked. */
4451 for (x = nonlocal_goto_handler_labels; x; x = x->next ())
4452 {
4453 bb = BLOCK_FOR_INSN (x->insn ());
4454 bb->flags |= BB_NON_LOCAL_GOTO_TARGET;
4455 }
4456
4457 cleanup_cfg (CLEANUP_CFGLAYOUT | flags);
4458 }
4459
4460 /* Splits superblocks. */
4461 void
4462 break_superblocks (void)
4463 {
4464 bool need = false;
4465 basic_block bb;
4466
4467 auto_sbitmap superblocks (last_basic_block_for_fn (cfun));
4468 bitmap_clear (superblocks);
4469
4470 FOR_EACH_BB_FN (bb, cfun)
4471 if (bb->flags & BB_SUPERBLOCK)
4472 {
4473 bb->flags &= ~BB_SUPERBLOCK;
4474 bitmap_set_bit (superblocks, bb->index);
4475 need = true;
4476 }
4477
4478 if (need)
4479 {
4480 rebuild_jump_labels (get_insns ());
4481 find_many_sub_basic_blocks (superblocks);
4482 }
4483 }
4484
4485 /* Finalize the changes: reorder insn list according to the sequence specified
4486 by aux pointers, enter compensation code, rebuild scope forest. */
4487
4488 void
4489 cfg_layout_finalize (void)
4490 {
4491 free_dominance_info (CDI_DOMINATORS);
4492 force_one_exit_fallthru ();
4493 rtl_register_cfg_hooks ();
4494 if (reload_completed && !targetm.have_epilogue ())
4495 fixup_fallthru_exit_predecessor ();
4496 fixup_reorder_chain ();
4497
4498 rebuild_jump_labels (get_insns ());
4499 delete_dead_jumptables ();
4500
4501 if (flag_checking)
4502 verify_insn_chain ();
4503 checking_verify_flow_info ();
4504 }
4505
4506
4507 /* Same as split_block but update cfg_layout structures. */
4508
4509 static basic_block
4510 cfg_layout_split_block (basic_block bb, void *insnp)
4511 {
4512 rtx insn = (rtx) insnp;
4513 basic_block new_bb = rtl_split_block (bb, insn);
4514
4515 BB_FOOTER (new_bb) = BB_FOOTER (bb);
4516 BB_FOOTER (bb) = NULL;
4517
4518 return new_bb;
4519 }
4520
4521 /* Redirect Edge to DEST. */
4522 static edge
4523 cfg_layout_redirect_edge_and_branch (edge e, basic_block dest)
4524 {
4525 basic_block src = e->src;
4526 edge ret;
4527
4528 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
4529 return NULL;
4530
4531 if (e->dest == dest)
4532 return e;
4533
4534 if (e->flags & EDGE_CROSSING
4535 && BB_PARTITION (e->src) == BB_PARTITION (dest)
4536 && simplejump_p (BB_END (src)))
4537 {
4538 if (dump_file)
4539 fprintf (dump_file,
4540 "Removing crossing jump while redirecting edge form %i to %i\n",
4541 e->src->index, dest->index);
4542 delete_insn (BB_END (src));
4543 remove_barriers_from_footer (src);
4544 e->flags |= EDGE_FALLTHRU;
4545 }
4546
4547 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
4548 && (ret = try_redirect_by_replacing_jump (e, dest, true)))
4549 {
4550 df_set_bb_dirty (src);
4551 return ret;
4552 }
4553
4554 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
4555 && (e->flags & EDGE_FALLTHRU) && !(e->flags & EDGE_COMPLEX))
4556 {
4557 if (dump_file)
4558 fprintf (dump_file, "Redirecting entry edge from bb %i to %i\n",
4559 e->src->index, dest->index);
4560
4561 df_set_bb_dirty (e->src);
4562 redirect_edge_succ (e, dest);
4563 return e;
4564 }
4565
4566 /* Redirect_edge_and_branch may decide to turn branch into fallthru edge
4567 in the case the basic block appears to be in sequence. Avoid this
4568 transformation. */
4569
4570 if (e->flags & EDGE_FALLTHRU)
4571 {
4572 /* Redirect any branch edges unified with the fallthru one. */
4573 if (JUMP_P (BB_END (src))
4574 && label_is_jump_target_p (BB_HEAD (e->dest),
4575 BB_END (src)))
4576 {
4577 edge redirected;
4578
4579 if (dump_file)
4580 fprintf (dump_file, "Fallthru edge unified with branch "
4581 "%i->%i redirected to %i\n",
4582 e->src->index, e->dest->index, dest->index);
4583 e->flags &= ~EDGE_FALLTHRU;
4584 redirected = redirect_branch_edge (e, dest);
4585 gcc_assert (redirected);
4586 redirected->flags |= EDGE_FALLTHRU;
4587 df_set_bb_dirty (redirected->src);
4588 return redirected;
4589 }
4590 /* In case we are redirecting fallthru edge to the branch edge
4591 of conditional jump, remove it. */
4592 if (EDGE_COUNT (src->succs) == 2)
4593 {
4594 /* Find the edge that is different from E. */
4595 edge s = EDGE_SUCC (src, EDGE_SUCC (src, 0) == e);
4596
4597 if (s->dest == dest
4598 && any_condjump_p (BB_END (src))
4599 && onlyjump_p (BB_END (src)))
4600 delete_insn (BB_END (src));
4601 }
4602 if (dump_file)
4603 fprintf (dump_file, "Redirecting fallthru edge %i->%i to %i\n",
4604 e->src->index, e->dest->index, dest->index);
4605 ret = redirect_edge_succ_nodup (e, dest);
4606 }
4607 else
4608 ret = redirect_branch_edge (e, dest);
4609
4610 if (!ret)
4611 return NULL;
4612
4613 fixup_partition_crossing (ret);
4614 /* We don't want simplejumps in the insn stream during cfglayout. */
4615 gcc_assert (!simplejump_p (BB_END (src)) || CROSSING_JUMP_P (BB_END (src)));
4616
4617 df_set_bb_dirty (src);
4618 return ret;
4619 }
4620
4621 /* Simple wrapper as we always can redirect fallthru edges. */
4622 static basic_block
4623 cfg_layout_redirect_edge_and_branch_force (edge e, basic_block dest)
4624 {
4625 edge redirected = cfg_layout_redirect_edge_and_branch (e, dest);
4626
4627 gcc_assert (redirected);
4628 return NULL;
4629 }
4630
4631 /* Same as delete_basic_block but update cfg_layout structures. */
4632
4633 static void
4634 cfg_layout_delete_block (basic_block bb)
4635 {
4636 rtx_insn *insn, *next, *prev = PREV_INSN (BB_HEAD (bb)), *remaints;
4637 rtx_insn **to;
4638
4639 if (BB_HEADER (bb))
4640 {
4641 next = BB_HEAD (bb);
4642 if (prev)
4643 SET_NEXT_INSN (prev) = BB_HEADER (bb);
4644 else
4645 set_first_insn (BB_HEADER (bb));
4646 SET_PREV_INSN (BB_HEADER (bb)) = prev;
4647 insn = BB_HEADER (bb);
4648 while (NEXT_INSN (insn))
4649 insn = NEXT_INSN (insn);
4650 SET_NEXT_INSN (insn) = next;
4651 SET_PREV_INSN (next) = insn;
4652 }
4653 next = NEXT_INSN (BB_END (bb));
4654 if (BB_FOOTER (bb))
4655 {
4656 insn = BB_FOOTER (bb);
4657 while (insn)
4658 {
4659 if (BARRIER_P (insn))
4660 {
4661 if (PREV_INSN (insn))
4662 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
4663 else
4664 BB_FOOTER (bb) = NEXT_INSN (insn);
4665 if (NEXT_INSN (insn))
4666 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
4667 }
4668 if (LABEL_P (insn))
4669 break;
4670 insn = NEXT_INSN (insn);
4671 }
4672 if (BB_FOOTER (bb))
4673 {
4674 insn = BB_END (bb);
4675 SET_NEXT_INSN (insn) = BB_FOOTER (bb);
4676 SET_PREV_INSN (BB_FOOTER (bb)) = insn;
4677 while (NEXT_INSN (insn))
4678 insn = NEXT_INSN (insn);
4679 SET_NEXT_INSN (insn) = next;
4680 if (next)
4681 SET_PREV_INSN (next) = insn;
4682 else
4683 set_last_insn (insn);
4684 }
4685 }
4686 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
4687 to = &BB_HEADER (bb->next_bb);
4688 else
4689 to = &cfg_layout_function_footer;
4690
4691 rtl_delete_block (bb);
4692
4693 if (prev)
4694 prev = NEXT_INSN (prev);
4695 else
4696 prev = get_insns ();
4697 if (next)
4698 next = PREV_INSN (next);
4699 else
4700 next = get_last_insn ();
4701
4702 if (next && NEXT_INSN (next) != prev)
4703 {
4704 remaints = unlink_insn_chain (prev, next);
4705 insn = remaints;
4706 while (NEXT_INSN (insn))
4707 insn = NEXT_INSN (insn);
4708 SET_NEXT_INSN (insn) = *to;
4709 if (*to)
4710 SET_PREV_INSN (*to) = insn;
4711 *to = remaints;
4712 }
4713 }
4714
4715 /* Return true when blocks A and B can be safely merged. */
4716
4717 static bool
4718 cfg_layout_can_merge_blocks_p (basic_block a, basic_block b)
4719 {
4720 /* If we are partitioning hot/cold basic blocks, we don't want to
4721 mess up unconditional or indirect jumps that cross between hot
4722 and cold sections.
4723
4724 Basic block partitioning may result in some jumps that appear to
4725 be optimizable (or blocks that appear to be mergeable), but which really
4726 must be left untouched (they are required to make it safely across
4727 partition boundaries). See the comments at the top of
4728 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
4729
4730 if (BB_PARTITION (a) != BB_PARTITION (b))
4731 return false;
4732
4733 /* Protect the loop latches. */
4734 if (current_loops && b->loop_father->latch == b)
4735 return false;
4736
4737 /* If we would end up moving B's instructions, make sure it doesn't fall
4738 through into the exit block, since we cannot recover from a fallthrough
4739 edge into the exit block occurring in the middle of a function. */
4740 if (NEXT_INSN (BB_END (a)) != BB_HEAD (b))
4741 {
4742 edge e = find_fallthru_edge (b->succs);
4743 if (e && e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
4744 return false;
4745 }
4746
4747 /* There must be exactly one edge in between the blocks. */
4748 return (single_succ_p (a)
4749 && single_succ (a) == b
4750 && single_pred_p (b) == 1
4751 && a != b
4752 /* Must be simple edge. */
4753 && !(single_succ_edge (a)->flags & EDGE_COMPLEX)
4754 && a != ENTRY_BLOCK_PTR_FOR_FN (cfun)
4755 && b != EXIT_BLOCK_PTR_FOR_FN (cfun)
4756 /* If the jump insn has side effects, we can't kill the edge.
4757 When not optimizing, try_redirect_by_replacing_jump will
4758 not allow us to redirect an edge by replacing a table jump. */
4759 && (!JUMP_P (BB_END (a))
4760 || ((!optimize || reload_completed)
4761 ? simplejump_p (BB_END (a)) : onlyjump_p (BB_END (a)))));
4762 }
4763
4764 /* Merge block A and B. The blocks must be mergeable. */
4765
4766 static void
4767 cfg_layout_merge_blocks (basic_block a, basic_block b)
4768 {
4769 /* If B is a forwarder block whose outgoing edge has no location, we'll
4770 propagate the locus of the edge between A and B onto it. */
4771 const bool forward_edge_locus
4772 = (b->flags & BB_FORWARDER_BLOCK) != 0
4773 && LOCATION_LOCUS (EDGE_SUCC (b, 0)->goto_locus) == UNKNOWN_LOCATION;
4774 rtx_insn *insn;
4775
4776 gcc_checking_assert (cfg_layout_can_merge_blocks_p (a, b));
4777
4778 if (dump_file)
4779 fprintf (dump_file, "Merging block %d into block %d...\n", b->index,
4780 a->index);
4781
4782 /* If there was a CODE_LABEL beginning B, delete it. */
4783 if (LABEL_P (BB_HEAD (b)))
4784 {
4785 delete_insn (BB_HEAD (b));
4786 }
4787
4788 /* We should have fallthru edge in a, or we can do dummy redirection to get
4789 it cleaned up. */
4790 if (JUMP_P (BB_END (a)))
4791 try_redirect_by_replacing_jump (EDGE_SUCC (a, 0), b, true);
4792 gcc_assert (!JUMP_P (BB_END (a)));
4793
4794 /* If not optimizing, preserve the locus of the single edge between
4795 blocks A and B if necessary by emitting a nop. */
4796 if (!optimize
4797 && !forward_edge_locus
4798 && !DECL_IGNORED_P (current_function_decl))
4799 emit_nop_for_unique_locus_between (a, b);
4800
4801 /* Move things from b->footer after a->footer. */
4802 if (BB_FOOTER (b))
4803 {
4804 if (!BB_FOOTER (a))
4805 BB_FOOTER (a) = BB_FOOTER (b);
4806 else
4807 {
4808 rtx_insn *last = BB_FOOTER (a);
4809
4810 while (NEXT_INSN (last))
4811 last = NEXT_INSN (last);
4812 SET_NEXT_INSN (last) = BB_FOOTER (b);
4813 SET_PREV_INSN (BB_FOOTER (b)) = last;
4814 }
4815 BB_FOOTER (b) = NULL;
4816 }
4817
4818 /* Move things from b->header before a->footer.
4819 Note that this may include dead tablejump data, but we don't clean
4820 those up until we go out of cfglayout mode. */
4821 if (BB_HEADER (b))
4822 {
4823 if (! BB_FOOTER (a))
4824 BB_FOOTER (a) = BB_HEADER (b);
4825 else
4826 {
4827 rtx_insn *last = BB_HEADER (b);
4828
4829 while (NEXT_INSN (last))
4830 last = NEXT_INSN (last);
4831 SET_NEXT_INSN (last) = BB_FOOTER (a);
4832 SET_PREV_INSN (BB_FOOTER (a)) = last;
4833 BB_FOOTER (a) = BB_HEADER (b);
4834 }
4835 BB_HEADER (b) = NULL;
4836 }
4837
4838 /* In the case basic blocks are not adjacent, move them around. */
4839 if (NEXT_INSN (BB_END (a)) != BB_HEAD (b))
4840 {
4841 insn = unlink_insn_chain (BB_HEAD (b), BB_END (b));
4842
4843 emit_insn_after_noloc (insn, BB_END (a), a);
4844 }
4845 /* Otherwise just re-associate the instructions. */
4846 else
4847 {
4848 insn = BB_HEAD (b);
4849 BB_END (a) = BB_END (b);
4850 }
4851
4852 /* emit_insn_after_noloc doesn't call df_insn_change_bb.
4853 We need to explicitly call. */
4854 update_bb_for_insn_chain (insn, BB_END (b), a);
4855
4856 /* Skip possible DELETED_LABEL insn. */
4857 if (!NOTE_INSN_BASIC_BLOCK_P (insn))
4858 insn = NEXT_INSN (insn);
4859 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
4860 BB_HEAD (b) = BB_END (b) = NULL;
4861 delete_insn (insn);
4862
4863 df_bb_delete (b->index);
4864
4865 if (forward_edge_locus)
4866 EDGE_SUCC (b, 0)->goto_locus = EDGE_SUCC (a, 0)->goto_locus;
4867
4868 if (dump_file)
4869 fprintf (dump_file, "Merged blocks %d and %d.\n", a->index, b->index);
4870 }
4871
4872 /* Split edge E. */
4873
4874 static basic_block
4875 cfg_layout_split_edge (edge e)
4876 {
4877 basic_block new_bb =
4878 create_basic_block (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
4879 ? NEXT_INSN (BB_END (e->src)) : get_insns (),
4880 NULL_RTX, e->src);
4881
4882 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
4883 BB_COPY_PARTITION (new_bb, e->src);
4884 else
4885 BB_COPY_PARTITION (new_bb, e->dest);
4886 make_edge (new_bb, e->dest, EDGE_FALLTHRU);
4887 redirect_edge_and_branch_force (e, new_bb);
4888
4889 return new_bb;
4890 }
4891
4892 /* Do postprocessing after making a forwarder block joined by edge FALLTHRU. */
4893
4894 static void
4895 rtl_make_forwarder_block (edge fallthru ATTRIBUTE_UNUSED)
4896 {
4897 }
4898
4899 /* Return true if BB contains only labels or non-executable
4900 instructions. */
4901
4902 static bool
4903 rtl_block_empty_p (basic_block bb)
4904 {
4905 rtx_insn *insn;
4906
4907 if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
4908 || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
4909 return true;
4910
4911 FOR_BB_INSNS (bb, insn)
4912 if (NONDEBUG_INSN_P (insn)
4913 && (!any_uncondjump_p (insn) || !onlyjump_p (insn)))
4914 return false;
4915
4916 return true;
4917 }
4918
4919 /* Split a basic block if it ends with a conditional branch and if
4920 the other part of the block is not empty. */
4921
4922 static basic_block
4923 rtl_split_block_before_cond_jump (basic_block bb)
4924 {
4925 rtx_insn *insn;
4926 rtx_insn *split_point = NULL;
4927 rtx_insn *last = NULL;
4928 bool found_code = false;
4929
4930 FOR_BB_INSNS (bb, insn)
4931 {
4932 if (any_condjump_p (insn))
4933 split_point = last;
4934 else if (NONDEBUG_INSN_P (insn))
4935 found_code = true;
4936 last = insn;
4937 }
4938
4939 /* Did not find everything. */
4940 if (found_code && split_point)
4941 return split_block (bb, split_point)->dest;
4942 else
4943 return NULL;
4944 }
4945
4946 /* Return 1 if BB ends with a call, possibly followed by some
4947 instructions that must stay with the call, 0 otherwise. */
4948
4949 static bool
4950 rtl_block_ends_with_call_p (basic_block bb)
4951 {
4952 rtx_insn *insn = BB_END (bb);
4953
4954 while (!CALL_P (insn)
4955 && insn != BB_HEAD (bb)
4956 && (keep_with_call_p (insn)
4957 || NOTE_P (insn)
4958 || DEBUG_INSN_P (insn)))
4959 insn = PREV_INSN (insn);
4960 return (CALL_P (insn));
4961 }
4962
4963 /* Return 1 if BB ends with a conditional branch, 0 otherwise. */
4964
4965 static bool
4966 rtl_block_ends_with_condjump_p (const_basic_block bb)
4967 {
4968 return any_condjump_p (BB_END (bb));
4969 }
4970
4971 /* Return true if we need to add fake edge to exit.
4972 Helper function for rtl_flow_call_edges_add. */
4973
4974 static bool
4975 need_fake_edge_p (const rtx_insn *insn)
4976 {
4977 if (!INSN_P (insn))
4978 return false;
4979
4980 if ((CALL_P (insn)
4981 && !SIBLING_CALL_P (insn)
4982 && !find_reg_note (insn, REG_NORETURN, NULL)
4983 && !(RTL_CONST_OR_PURE_CALL_P (insn))))
4984 return true;
4985
4986 return ((GET_CODE (PATTERN (insn)) == ASM_OPERANDS
4987 && MEM_VOLATILE_P (PATTERN (insn)))
4988 || (GET_CODE (PATTERN (insn)) == PARALLEL
4989 && asm_noperands (insn) != -1
4990 && MEM_VOLATILE_P (XVECEXP (PATTERN (insn), 0, 0)))
4991 || GET_CODE (PATTERN (insn)) == ASM_INPUT);
4992 }
4993
4994 /* Add fake edges to the function exit for any non constant and non noreturn
4995 calls, volatile inline assembly in the bitmap of blocks specified by
4996 BLOCKS or to the whole CFG if BLOCKS is zero. Return the number of blocks
4997 that were split.
4998
4999 The goal is to expose cases in which entering a basic block does not imply
5000 that all subsequent instructions must be executed. */
5001
5002 static int
5003 rtl_flow_call_edges_add (sbitmap blocks)
5004 {
5005 int i;
5006 int blocks_split = 0;
5007 int last_bb = last_basic_block_for_fn (cfun);
5008 bool check_last_block = false;
5009
5010 if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
5011 return 0;
5012
5013 if (! blocks)
5014 check_last_block = true;
5015 else
5016 check_last_block = bitmap_bit_p (blocks,
5017 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb->index);
5018
5019 /* In the last basic block, before epilogue generation, there will be
5020 a fallthru edge to EXIT. Special care is required if the last insn
5021 of the last basic block is a call because make_edge folds duplicate
5022 edges, which would result in the fallthru edge also being marked
5023 fake, which would result in the fallthru edge being removed by
5024 remove_fake_edges, which would result in an invalid CFG.
5025
5026 Moreover, we can't elide the outgoing fake edge, since the block
5027 profiler needs to take this into account in order to solve the minimal
5028 spanning tree in the case that the call doesn't return.
5029
5030 Handle this by adding a dummy instruction in a new last basic block. */
5031 if (check_last_block)
5032 {
5033 basic_block bb = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
5034 rtx_insn *insn = BB_END (bb);
5035
5036 /* Back up past insns that must be kept in the same block as a call. */
5037 while (insn != BB_HEAD (bb)
5038 && keep_with_call_p (insn))
5039 insn = PREV_INSN (insn);
5040
5041 if (need_fake_edge_p (insn))
5042 {
5043 edge e;
5044
5045 e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
5046 if (e)
5047 {
5048 insert_insn_on_edge (gen_use (const0_rtx), e);
5049 commit_edge_insertions ();
5050 }
5051 }
5052 }
5053
5054 /* Now add fake edges to the function exit for any non constant
5055 calls since there is no way that we can determine if they will
5056 return or not... */
5057
5058 for (i = NUM_FIXED_BLOCKS; i < last_bb; i++)
5059 {
5060 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
5061 rtx_insn *insn;
5062 rtx_insn *prev_insn;
5063
5064 if (!bb)
5065 continue;
5066
5067 if (blocks && !bitmap_bit_p (blocks, i))
5068 continue;
5069
5070 for (insn = BB_END (bb); ; insn = prev_insn)
5071 {
5072 prev_insn = PREV_INSN (insn);
5073 if (need_fake_edge_p (insn))
5074 {
5075 edge e;
5076 rtx_insn *split_at_insn = insn;
5077
5078 /* Don't split the block between a call and an insn that should
5079 remain in the same block as the call. */
5080 if (CALL_P (insn))
5081 while (split_at_insn != BB_END (bb)
5082 && keep_with_call_p (NEXT_INSN (split_at_insn)))
5083 split_at_insn = NEXT_INSN (split_at_insn);
5084
5085 /* The handling above of the final block before the epilogue
5086 should be enough to verify that there is no edge to the exit
5087 block in CFG already. Calling make_edge in such case would
5088 cause us to mark that edge as fake and remove it later. */
5089
5090 if (flag_checking && split_at_insn == BB_END (bb))
5091 {
5092 e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
5093 gcc_assert (e == NULL);
5094 }
5095
5096 /* Note that the following may create a new basic block
5097 and renumber the existing basic blocks. */
5098 if (split_at_insn != BB_END (bb))
5099 {
5100 e = split_block (bb, split_at_insn);
5101 if (e)
5102 blocks_split++;
5103 }
5104
5105 edge ne = make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
5106 ne->probability = profile_probability::guessed_never ();
5107 }
5108
5109 if (insn == BB_HEAD (bb))
5110 break;
5111 }
5112 }
5113
5114 if (blocks_split)
5115 verify_flow_info ();
5116
5117 return blocks_split;
5118 }
5119
5120 /* Add COMP_RTX as a condition at end of COND_BB. FIRST_HEAD is
5121 the conditional branch target, SECOND_HEAD should be the fall-thru
5122 there is no need to handle this here the loop versioning code handles
5123 this. the reason for SECON_HEAD is that it is needed for condition
5124 in trees, and this should be of the same type since it is a hook. */
5125 static void
5126 rtl_lv_add_condition_to_bb (basic_block first_head ,
5127 basic_block second_head ATTRIBUTE_UNUSED,
5128 basic_block cond_bb, void *comp_rtx)
5129 {
5130 rtx_code_label *label;
5131 rtx_insn *seq, *jump;
5132 rtx op0 = XEXP ((rtx)comp_rtx, 0);
5133 rtx op1 = XEXP ((rtx)comp_rtx, 1);
5134 enum rtx_code comp = GET_CODE ((rtx)comp_rtx);
5135 machine_mode mode;
5136
5137
5138 label = block_label (first_head);
5139 mode = GET_MODE (op0);
5140 if (mode == VOIDmode)
5141 mode = GET_MODE (op1);
5142
5143 start_sequence ();
5144 op0 = force_operand (op0, NULL_RTX);
5145 op1 = force_operand (op1, NULL_RTX);
5146 do_compare_rtx_and_jump (op0, op1, comp, 0, mode, NULL_RTX, NULL, label,
5147 profile_probability::uninitialized ());
5148 jump = get_last_insn ();
5149 JUMP_LABEL (jump) = label;
5150 LABEL_NUSES (label)++;
5151 seq = get_insns ();
5152 end_sequence ();
5153
5154 /* Add the new cond, in the new head. */
5155 emit_insn_after (seq, BB_END (cond_bb));
5156 }
5157
5158
5159 /* Given a block B with unconditional branch at its end, get the
5160 store the return the branch edge and the fall-thru edge in
5161 BRANCH_EDGE and FALLTHRU_EDGE respectively. */
5162 static void
5163 rtl_extract_cond_bb_edges (basic_block b, edge *branch_edge,
5164 edge *fallthru_edge)
5165 {
5166 edge e = EDGE_SUCC (b, 0);
5167
5168 if (e->flags & EDGE_FALLTHRU)
5169 {
5170 *fallthru_edge = e;
5171 *branch_edge = EDGE_SUCC (b, 1);
5172 }
5173 else
5174 {
5175 *branch_edge = e;
5176 *fallthru_edge = EDGE_SUCC (b, 1);
5177 }
5178 }
5179
5180 void
5181 init_rtl_bb_info (basic_block bb)
5182 {
5183 gcc_assert (!bb->il.x.rtl);
5184 bb->il.x.head_ = NULL;
5185 bb->il.x.rtl = ggc_cleared_alloc<rtl_bb_info> ();
5186 }
5187
5188 static bool
5189 rtl_bb_info_initialized_p (basic_block bb)
5190 {
5191 return bb->il.x.rtl;
5192 }
5193
5194 /* Returns true if it is possible to remove edge E by redirecting
5195 it to the destination of the other edge from E->src. */
5196
5197 static bool
5198 rtl_can_remove_branch_p (const_edge e)
5199 {
5200 const_basic_block src = e->src;
5201 const_basic_block target = EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)->dest;
5202 const rtx_insn *insn = BB_END (src);
5203 rtx set;
5204
5205 /* The conditions are taken from try_redirect_by_replacing_jump. */
5206 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
5207 return false;
5208
5209 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
5210 return false;
5211
5212 if (BB_PARTITION (src) != BB_PARTITION (target))
5213 return false;
5214
5215 if (!onlyjump_p (insn)
5216 || tablejump_p (insn, NULL, NULL))
5217 return false;
5218
5219 set = single_set (insn);
5220 if (!set || side_effects_p (set))
5221 return false;
5222
5223 return true;
5224 }
5225
5226 static basic_block
5227 rtl_duplicate_bb (basic_block bb, copy_bb_data *id)
5228 {
5229 bb = cfg_layout_duplicate_bb (bb, id);
5230 bb->aux = NULL;
5231 return bb;
5232 }
5233
5234 /* Do book-keeping of basic block BB for the profile consistency checker.
5235 Store the counting in RECORD. */
5236 static void
5237 rtl_account_profile_record (basic_block bb, struct profile_record *record)
5238 {
5239 rtx_insn *insn;
5240 FOR_BB_INSNS (bb, insn)
5241 if (INSN_P (insn))
5242 {
5243 record->size += insn_cost (insn, false);
5244 if (bb->count.initialized_p ())
5245 record->time
5246 += insn_cost (insn, true) * bb->count.to_gcov_type ();
5247 else if (profile_status_for_fn (cfun) == PROFILE_GUESSED)
5248 record->time
5249 += insn_cost (insn, true) * bb->count.to_frequency (cfun);
5250 }
5251 }
5252
5253 /* Implementation of CFG manipulation for linearized RTL. */
5254 struct cfg_hooks rtl_cfg_hooks = {
5255 "rtl",
5256 rtl_verify_flow_info,
5257 rtl_dump_bb,
5258 rtl_dump_bb_for_graph,
5259 rtl_create_basic_block,
5260 rtl_redirect_edge_and_branch,
5261 rtl_redirect_edge_and_branch_force,
5262 rtl_can_remove_branch_p,
5263 rtl_delete_block,
5264 rtl_split_block,
5265 rtl_move_block_after,
5266 rtl_can_merge_blocks, /* can_merge_blocks_p */
5267 rtl_merge_blocks,
5268 rtl_predict_edge,
5269 rtl_predicted_by_p,
5270 cfg_layout_can_duplicate_bb_p,
5271 rtl_duplicate_bb,
5272 rtl_split_edge,
5273 rtl_make_forwarder_block,
5274 rtl_tidy_fallthru_edge,
5275 rtl_force_nonfallthru,
5276 rtl_block_ends_with_call_p,
5277 rtl_block_ends_with_condjump_p,
5278 rtl_flow_call_edges_add,
5279 NULL, /* execute_on_growing_pred */
5280 NULL, /* execute_on_shrinking_pred */
5281 NULL, /* duplicate loop for trees */
5282 NULL, /* lv_add_condition_to_bb */
5283 NULL, /* lv_adjust_loop_header_phi*/
5284 NULL, /* extract_cond_bb_edges */
5285 NULL, /* flush_pending_stmts */
5286 rtl_block_empty_p, /* block_empty_p */
5287 rtl_split_block_before_cond_jump, /* split_block_before_cond_jump */
5288 rtl_account_profile_record,
5289 };
5290
5291 /* Implementation of CFG manipulation for cfg layout RTL, where
5292 basic block connected via fallthru edges does not have to be adjacent.
5293 This representation will hopefully become the default one in future
5294 version of the compiler. */
5295
5296 struct cfg_hooks cfg_layout_rtl_cfg_hooks = {
5297 "cfglayout mode",
5298 rtl_verify_flow_info_1,
5299 rtl_dump_bb,
5300 rtl_dump_bb_for_graph,
5301 cfg_layout_create_basic_block,
5302 cfg_layout_redirect_edge_and_branch,
5303 cfg_layout_redirect_edge_and_branch_force,
5304 rtl_can_remove_branch_p,
5305 cfg_layout_delete_block,
5306 cfg_layout_split_block,
5307 rtl_move_block_after,
5308 cfg_layout_can_merge_blocks_p,
5309 cfg_layout_merge_blocks,
5310 rtl_predict_edge,
5311 rtl_predicted_by_p,
5312 cfg_layout_can_duplicate_bb_p,
5313 cfg_layout_duplicate_bb,
5314 cfg_layout_split_edge,
5315 rtl_make_forwarder_block,
5316 NULL, /* tidy_fallthru_edge */
5317 rtl_force_nonfallthru,
5318 rtl_block_ends_with_call_p,
5319 rtl_block_ends_with_condjump_p,
5320 rtl_flow_call_edges_add,
5321 NULL, /* execute_on_growing_pred */
5322 NULL, /* execute_on_shrinking_pred */
5323 duplicate_loop_to_header_edge, /* duplicate loop for trees */
5324 rtl_lv_add_condition_to_bb, /* lv_add_condition_to_bb */
5325 NULL, /* lv_adjust_loop_header_phi*/
5326 rtl_extract_cond_bb_edges, /* extract_cond_bb_edges */
5327 NULL, /* flush_pending_stmts */
5328 rtl_block_empty_p, /* block_empty_p */
5329 rtl_split_block_before_cond_jump, /* split_block_before_cond_jump */
5330 rtl_account_profile_record,
5331 };
5332
5333 #include "gt-cfgrtl.h"
5334
5335 #if __GNUC__ >= 10
5336 # pragma GCC diagnostic pop
5337 #endif