]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/cfgcleanup.c
Merge tree-ssa-20020619-branch into mainline.
[thirdparty/gcc.git] / gcc / cfgcleanup.c
1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22 /* This file contains optimizer of the control flow. The main entrypoint is
23 cleanup_cfg. Following optimizations are performed:
24
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to its
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
29 eliminated).
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
33
34 #include "config.h"
35 #include "system.h"
36 #include "coretypes.h"
37 #include "tm.h"
38 #include "rtl.h"
39 #include "hard-reg-set.h"
40 #include "basic-block.h"
41 #include "timevar.h"
42 #include "output.h"
43 #include "insn-config.h"
44 #include "flags.h"
45 #include "recog.h"
46 #include "toplev.h"
47 #include "cselib.h"
48 #include "params.h"
49 #include "tm_p.h"
50 #include "target.h"
51 #include "regs.h"
52 #include "cfglayout.h"
53 #include "expr.h"
54
55 /* cleanup_cfg maintains following flags for each basic block. */
56
57 enum bb_flags
58 {
59 /* Set if BB is the forwarder block to avoid too many
60 forwarder_block_p calls. */
61 BB_FORWARDER_BLOCK = 1,
62 BB_NONTHREADABLE_BLOCK = 2
63 };
64
65 #define BB_FLAGS(BB) (enum bb_flags) (BB)->aux
66 #define BB_SET_FLAG(BB, FLAG) \
67 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux | (FLAG))
68 #define BB_CLEAR_FLAG(BB, FLAG) \
69 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux & ~(FLAG))
70
71 #define FORWARDER_BLOCK_P(BB) (BB_FLAGS (BB) & BB_FORWARDER_BLOCK)
72
73 /* Set to true when we are running first pass of try_optimize_cfg loop. */
74 static bool first_pass;
75 static bool try_crossjump_to_edge (int, edge, edge);
76 static bool try_crossjump_bb (int, basic_block);
77 static bool outgoing_edges_match (int, basic_block, basic_block);
78 static int flow_find_cross_jump (int, basic_block, basic_block, rtx *, rtx *);
79 static bool insns_match_p (int, rtx, rtx);
80
81 static bool tail_recursion_label_p (rtx);
82 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
83 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
84 static bool try_optimize_cfg (int);
85 static bool try_simplify_condjump (basic_block);
86 static bool try_forward_edges (int, basic_block);
87 static edge thread_jump (int, edge, basic_block);
88 static bool mark_effect (rtx, bitmap);
89 static void notice_new_block (basic_block);
90 static void update_forwarder_flag (basic_block);
91 static int mentions_nonequal_regs (rtx *, void *);
92 static void merge_memattrs (rtx, rtx);
93 \f
94 /* Set flags for newly created block. */
95
96 static void
97 notice_new_block (basic_block bb)
98 {
99 if (!bb)
100 return;
101
102 if (forwarder_block_p (bb))
103 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
104 }
105
106 /* Recompute forwarder flag after block has been modified. */
107
108 static void
109 update_forwarder_flag (basic_block bb)
110 {
111 if (forwarder_block_p (bb))
112 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
113 else
114 BB_CLEAR_FLAG (bb, BB_FORWARDER_BLOCK);
115 }
116 \f
117 /* Simplify a conditional jump around an unconditional jump.
118 Return true if something changed. */
119
120 static bool
121 try_simplify_condjump (basic_block cbranch_block)
122 {
123 basic_block jump_block, jump_dest_block, cbranch_dest_block;
124 edge cbranch_jump_edge, cbranch_fallthru_edge;
125 rtx cbranch_insn;
126 rtx insn, next;
127 rtx end;
128
129 /* Verify that there are exactly two successors. */
130 if (!cbranch_block->succ
131 || !cbranch_block->succ->succ_next
132 || cbranch_block->succ->succ_next->succ_next)
133 return false;
134
135 /* Verify that we've got a normal conditional branch at the end
136 of the block. */
137 cbranch_insn = BB_END (cbranch_block);
138 if (!any_condjump_p (cbranch_insn))
139 return false;
140
141 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
142 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
143
144 /* The next block must not have multiple predecessors, must not
145 be the last block in the function, and must contain just the
146 unconditional jump. */
147 jump_block = cbranch_fallthru_edge->dest;
148 if (jump_block->pred->pred_next
149 || jump_block->next_bb == EXIT_BLOCK_PTR
150 || !FORWARDER_BLOCK_P (jump_block))
151 return false;
152 jump_dest_block = jump_block->succ->dest;
153
154 /* If we are partitioning hot/cold basic blocks, we don't want to
155 mess up unconditional or indirect jumps that cross between hot
156 and cold sections. */
157
158 if (flag_reorder_blocks_and_partition
159 && (jump_block->partition != jump_dest_block->partition
160 || cbranch_jump_edge->crossing_edge))
161 return false;
162
163 /* The conditional branch must target the block after the
164 unconditional branch. */
165 cbranch_dest_block = cbranch_jump_edge->dest;
166
167 if (!can_fallthru (jump_block, cbranch_dest_block))
168 return false;
169
170 /* Invert the conditional branch. */
171 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
172 return false;
173
174 if (dump_file)
175 fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
176 INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
177
178 /* Success. Update the CFG to match. Note that after this point
179 the edge variable names appear backwards; the redirection is done
180 this way to preserve edge profile data. */
181 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
182 cbranch_dest_block);
183 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
184 jump_dest_block);
185 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
186 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
187 update_br_prob_note (cbranch_block);
188
189 end = BB_END (jump_block);
190 /* Deleting a block may produce unreachable code warning even when we are
191 not deleting anything live. Suppress it by moving all the line number
192 notes out of the block. */
193 for (insn = BB_HEAD (jump_block); insn != NEXT_INSN (BB_END (jump_block));
194 insn = next)
195 {
196 next = NEXT_INSN (insn);
197 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
198 {
199 if (insn == BB_END (jump_block))
200 {
201 BB_END (jump_block) = PREV_INSN (insn);
202 if (insn == end)
203 break;
204 }
205 reorder_insns_nobb (insn, insn, end);
206 end = insn;
207 }
208 }
209 /* Delete the block with the unconditional jump, and clean up the mess. */
210 delete_basic_block (jump_block);
211 tidy_fallthru_edge (cbranch_jump_edge);
212
213 return true;
214 }
215 \f
216 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
217 on register. Used by jump threading. */
218
219 static bool
220 mark_effect (rtx exp, regset nonequal)
221 {
222 int regno;
223 rtx dest;
224 switch (GET_CODE (exp))
225 {
226 /* In case we do clobber the register, mark it as equal, as we know the
227 value is dead so it don't have to match. */
228 case CLOBBER:
229 if (REG_P (XEXP (exp, 0)))
230 {
231 dest = XEXP (exp, 0);
232 regno = REGNO (dest);
233 CLEAR_REGNO_REG_SET (nonequal, regno);
234 if (regno < FIRST_PSEUDO_REGISTER)
235 {
236 int n = hard_regno_nregs[regno][GET_MODE (dest)];
237 while (--n > 0)
238 CLEAR_REGNO_REG_SET (nonequal, regno + n);
239 }
240 }
241 return false;
242
243 case SET:
244 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
245 return false;
246 dest = SET_DEST (exp);
247 if (dest == pc_rtx)
248 return false;
249 if (!REG_P (dest))
250 return true;
251 regno = REGNO (dest);
252 SET_REGNO_REG_SET (nonequal, regno);
253 if (regno < FIRST_PSEUDO_REGISTER)
254 {
255 int n = hard_regno_nregs[regno][GET_MODE (dest)];
256 while (--n > 0)
257 SET_REGNO_REG_SET (nonequal, regno + n);
258 }
259 return false;
260
261 default:
262 return false;
263 }
264 }
265
266 /* Return nonzero if X is a register set in regset DATA.
267 Called via for_each_rtx. */
268 static int
269 mentions_nonequal_regs (rtx *x, void *data)
270 {
271 regset nonequal = (regset) data;
272 if (REG_P (*x))
273 {
274 int regno;
275
276 regno = REGNO (*x);
277 if (REGNO_REG_SET_P (nonequal, regno))
278 return 1;
279 if (regno < FIRST_PSEUDO_REGISTER)
280 {
281 int n = hard_regno_nregs[regno][GET_MODE (*x)];
282 while (--n > 0)
283 if (REGNO_REG_SET_P (nonequal, regno + n))
284 return 1;
285 }
286 }
287 return 0;
288 }
289 /* Attempt to prove that the basic block B will have no side effects and
290 always continues in the same edge if reached via E. Return the edge
291 if exist, NULL otherwise. */
292
293 static edge
294 thread_jump (int mode, edge e, basic_block b)
295 {
296 rtx set1, set2, cond1, cond2, insn;
297 enum rtx_code code1, code2, reversed_code2;
298 bool reverse1 = false;
299 int i;
300 regset nonequal;
301 bool failed = false;
302
303 if (BB_FLAGS (b) & BB_NONTHREADABLE_BLOCK)
304 return NULL;
305
306 /* At the moment, we do handle only conditional jumps, but later we may
307 want to extend this code to tablejumps and others. */
308 if (!e->src->succ->succ_next || e->src->succ->succ_next->succ_next)
309 return NULL;
310 if (!b->succ || !b->succ->succ_next || b->succ->succ_next->succ_next)
311 {
312 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
313 return NULL;
314 }
315
316 /* Second branch must end with onlyjump, as we will eliminate the jump. */
317 if (!any_condjump_p (BB_END (e->src)))
318 return NULL;
319
320 if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
321 {
322 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
323 return NULL;
324 }
325
326 set1 = pc_set (BB_END (e->src));
327 set2 = pc_set (BB_END (b));
328 if (((e->flags & EDGE_FALLTHRU) != 0)
329 != (XEXP (SET_SRC (set1), 1) == pc_rtx))
330 reverse1 = true;
331
332 cond1 = XEXP (SET_SRC (set1), 0);
333 cond2 = XEXP (SET_SRC (set2), 0);
334 if (reverse1)
335 code1 = reversed_comparison_code (cond1, BB_END (e->src));
336 else
337 code1 = GET_CODE (cond1);
338
339 code2 = GET_CODE (cond2);
340 reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
341
342 if (!comparison_dominates_p (code1, code2)
343 && !comparison_dominates_p (code1, reversed_code2))
344 return NULL;
345
346 /* Ensure that the comparison operators are equivalent.
347 ??? This is far too pessimistic. We should allow swapped operands,
348 different CCmodes, or for example comparisons for interval, that
349 dominate even when operands are not equivalent. */
350 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
351 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
352 return NULL;
353
354 /* Short circuit cases where block B contains some side effects, as we can't
355 safely bypass it. */
356 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
357 insn = NEXT_INSN (insn))
358 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
359 {
360 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
361 return NULL;
362 }
363
364 cselib_init (false);
365
366 /* First process all values computed in the source basic block. */
367 for (insn = NEXT_INSN (BB_HEAD (e->src)); insn != NEXT_INSN (BB_END (e->src));
368 insn = NEXT_INSN (insn))
369 if (INSN_P (insn))
370 cselib_process_insn (insn);
371
372 nonequal = BITMAP_XMALLOC();
373 CLEAR_REG_SET (nonequal);
374
375 /* Now assume that we've continued by the edge E to B and continue
376 processing as if it were same basic block.
377 Our goal is to prove that whole block is an NOOP. */
378
379 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b)) && !failed;
380 insn = NEXT_INSN (insn))
381 {
382 if (INSN_P (insn))
383 {
384 rtx pat = PATTERN (insn);
385
386 if (GET_CODE (pat) == PARALLEL)
387 {
388 for (i = 0; i < XVECLEN (pat, 0); i++)
389 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
390 }
391 else
392 failed |= mark_effect (pat, nonequal);
393 }
394
395 cselib_process_insn (insn);
396 }
397
398 /* Later we should clear nonequal of dead registers. So far we don't
399 have life information in cfg_cleanup. */
400 if (failed)
401 {
402 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
403 goto failed_exit;
404 }
405
406 /* cond2 must not mention any register that is not equal to the
407 former block. */
408 if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
409 goto failed_exit;
410
411 /* In case liveness information is available, we need to prove equivalence
412 only of the live values. */
413 if (mode & CLEANUP_UPDATE_LIFE)
414 AND_REG_SET (nonequal, b->global_live_at_end);
415
416 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, goto failed_exit;);
417
418 BITMAP_XFREE (nonequal);
419 cselib_finish ();
420 if ((comparison_dominates_p (code1, code2) != 0)
421 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
422 return BRANCH_EDGE (b);
423 else
424 return FALLTHRU_EDGE (b);
425
426 failed_exit:
427 BITMAP_XFREE (nonequal);
428 cselib_finish ();
429 return NULL;
430 }
431 \f
432 /* Attempt to forward edges leaving basic block B.
433 Return true if successful. */
434
435 static bool
436 try_forward_edges (int mode, basic_block b)
437 {
438 bool changed = false;
439 edge e, next, *threaded_edges = NULL;
440
441 /* If we are partitioning hot/cold basic blocks, we don't want to
442 mess up unconditional or indirect jumps that cross between hot
443 and cold sections. */
444
445 if (flag_reorder_blocks_and_partition
446 && find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))
447 return false;
448
449 for (e = b->succ; e; e = next)
450 {
451 basic_block target, first;
452 int counter;
453 bool threaded = false;
454 int nthreaded_edges = 0;
455 bool may_thread = first_pass | (b->flags & BB_DIRTY);
456
457 next = e->succ_next;
458
459 /* Skip complex edges because we don't know how to update them.
460
461 Still handle fallthru edges, as we can succeed to forward fallthru
462 edge to the same place as the branch edge of conditional branch
463 and turn conditional branch to an unconditional branch. */
464 if (e->flags & EDGE_COMPLEX)
465 continue;
466
467 target = first = e->dest;
468 counter = 0;
469
470 while (counter < n_basic_blocks)
471 {
472 basic_block new_target = NULL;
473 bool new_target_threaded = false;
474 may_thread |= target->flags & BB_DIRTY;
475
476 if (FORWARDER_BLOCK_P (target)
477 && target->succ->dest != EXIT_BLOCK_PTR)
478 {
479 /* Bypass trivial infinite loops. */
480 if (target == target->succ->dest)
481 counter = n_basic_blocks;
482 new_target = target->succ->dest;
483 }
484
485 /* Allow to thread only over one edge at time to simplify updating
486 of probabilities. */
487 else if ((mode & CLEANUP_THREADING) && may_thread)
488 {
489 edge t = thread_jump (mode, e, target);
490 if (t)
491 {
492 if (!threaded_edges)
493 threaded_edges = xmalloc (sizeof (*threaded_edges)
494 * n_basic_blocks);
495 else
496 {
497 int i;
498
499 /* Detect an infinite loop across blocks not
500 including the start block. */
501 for (i = 0; i < nthreaded_edges; ++i)
502 if (threaded_edges[i] == t)
503 break;
504 if (i < nthreaded_edges)
505 {
506 counter = n_basic_blocks;
507 break;
508 }
509 }
510
511 /* Detect an infinite loop across the start block. */
512 if (t->dest == b)
513 break;
514
515 if (nthreaded_edges >= n_basic_blocks)
516 abort ();
517 threaded_edges[nthreaded_edges++] = t;
518
519 new_target = t->dest;
520 new_target_threaded = true;
521 }
522 }
523
524 if (!new_target)
525 break;
526
527 /* Avoid killing of loop pre-headers, as it is the place loop
528 optimizer wants to hoist code to.
529
530 For fallthru forwarders, the LOOP_BEG note must appear between
531 the header of block and CODE_LABEL of the loop, for non forwarders
532 it must appear before the JUMP_INSN. */
533 if ((mode & CLEANUP_PRE_LOOP) && optimize)
534 {
535 rtx insn = (target->succ->flags & EDGE_FALLTHRU
536 ? BB_HEAD (target) : prev_nonnote_insn (BB_END (target)));
537
538 if (GET_CODE (insn) != NOTE)
539 insn = NEXT_INSN (insn);
540
541 for (; insn && GET_CODE (insn) != CODE_LABEL && !INSN_P (insn);
542 insn = NEXT_INSN (insn))
543 if (GET_CODE (insn) == NOTE
544 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
545 break;
546
547 if (GET_CODE (insn) == NOTE)
548 break;
549
550 /* Do not clean up branches to just past the end of a loop
551 at this time; it can mess up the loop optimizer's
552 recognition of some patterns. */
553
554 insn = PREV_INSN (BB_HEAD (target));
555 if (insn && GET_CODE (insn) == NOTE
556 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
557 break;
558 }
559
560 counter++;
561 target = new_target;
562 threaded |= new_target_threaded;
563 }
564
565 if (counter >= n_basic_blocks)
566 {
567 if (dump_file)
568 fprintf (dump_file, "Infinite loop in BB %i.\n",
569 target->index);
570 }
571 else if (target == first)
572 ; /* We didn't do anything. */
573 else
574 {
575 /* Save the values now, as the edge may get removed. */
576 gcov_type edge_count = e->count;
577 int edge_probability = e->probability;
578 int edge_frequency;
579 int n = 0;
580
581 /* Don't force if target is exit block. */
582 if (threaded && target != EXIT_BLOCK_PTR)
583 {
584 notice_new_block (redirect_edge_and_branch_force (e, target));
585 if (dump_file)
586 fprintf (dump_file, "Conditionals threaded.\n");
587 }
588 else if (!redirect_edge_and_branch (e, target))
589 {
590 if (dump_file)
591 fprintf (dump_file,
592 "Forwarding edge %i->%i to %i failed.\n",
593 b->index, e->dest->index, target->index);
594 continue;
595 }
596
597 /* We successfully forwarded the edge. Now update profile
598 data: for each edge we traversed in the chain, remove
599 the original edge's execution count. */
600 edge_frequency = ((edge_probability * b->frequency
601 + REG_BR_PROB_BASE / 2)
602 / REG_BR_PROB_BASE);
603
604 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
605 BB_SET_FLAG (b, BB_FORWARDER_BLOCK);
606
607 do
608 {
609 edge t;
610
611 first->count -= edge_count;
612 if (first->count < 0)
613 first->count = 0;
614 first->frequency -= edge_frequency;
615 if (first->frequency < 0)
616 first->frequency = 0;
617 if (first->succ->succ_next)
618 {
619 edge e;
620 int prob;
621 if (n >= nthreaded_edges)
622 abort ();
623 t = threaded_edges [n++];
624 if (t->src != first)
625 abort ();
626 if (first->frequency)
627 prob = edge_frequency * REG_BR_PROB_BASE / first->frequency;
628 else
629 prob = 0;
630 if (prob > t->probability)
631 prob = t->probability;
632 t->probability -= prob;
633 prob = REG_BR_PROB_BASE - prob;
634 if (prob <= 0)
635 {
636 first->succ->probability = REG_BR_PROB_BASE;
637 first->succ->succ_next->probability = 0;
638 }
639 else
640 for (e = first->succ; e; e = e->succ_next)
641 e->probability = ((e->probability * REG_BR_PROB_BASE)
642 / (double) prob);
643 update_br_prob_note (first);
644 }
645 else
646 {
647 /* It is possible that as the result of
648 threading we've removed edge as it is
649 threaded to the fallthru edge. Avoid
650 getting out of sync. */
651 if (n < nthreaded_edges
652 && first == threaded_edges [n]->src)
653 n++;
654 t = first->succ;
655 }
656
657 t->count -= edge_count;
658 if (t->count < 0)
659 t->count = 0;
660 first = t->dest;
661 }
662 while (first != target);
663
664 changed = true;
665 }
666 }
667
668 if (threaded_edges)
669 free (threaded_edges);
670 return changed;
671 }
672 \f
673 /* Return true if LABEL is used for tail recursion. */
674
675 static bool
676 tail_recursion_label_p (rtx label)
677 {
678 rtx x;
679
680 for (x = tail_recursion_label_list; x; x = XEXP (x, 1))
681 if (label == XEXP (x, 0))
682 return true;
683
684 return false;
685 }
686
687 /* Blocks A and B are to be merged into a single block. A has no incoming
688 fallthru edge, so it can be moved before B without adding or modifying
689 any jumps (aside from the jump from A to B). */
690
691 static void
692 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
693 {
694 rtx barrier;
695
696 /* If we are partitioning hot/cold basic blocks, we don't want to
697 mess up unconditional or indirect jumps that cross between hot
698 and cold sections. */
699
700 if (flag_reorder_blocks_and_partition
701 && (a->partition != b->partition
702 || find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX)))
703 return;
704
705 barrier = next_nonnote_insn (BB_END (a));
706 if (GET_CODE (barrier) != BARRIER)
707 abort ();
708 delete_insn (barrier);
709
710 /* Move block and loop notes out of the chain so that we do not
711 disturb their order.
712
713 ??? A better solution would be to squeeze out all the non-nested notes
714 and adjust the block trees appropriately. Even better would be to have
715 a tighter connection between block trees and rtl so that this is not
716 necessary. */
717 if (squeeze_notes (&BB_HEAD (a), &BB_END (a)))
718 abort ();
719
720 /* Scramble the insn chain. */
721 if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
722 reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
723 a->flags |= BB_DIRTY;
724
725 if (dump_file)
726 fprintf (dump_file, "Moved block %d before %d and merged.\n",
727 a->index, b->index);
728
729 /* Swap the records for the two blocks around. */
730
731 unlink_block (a);
732 link_block (a, b->prev_bb);
733
734 /* Now blocks A and B are contiguous. Merge them. */
735 merge_blocks (a, b);
736 }
737
738 /* Blocks A and B are to be merged into a single block. B has no outgoing
739 fallthru edge, so it can be moved after A without adding or modifying
740 any jumps (aside from the jump from A to B). */
741
742 static void
743 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
744 {
745 rtx barrier, real_b_end;
746 rtx label, table;
747
748 /* If we are partitioning hot/cold basic blocks, we don't want to
749 mess up unconditional or indirect jumps that cross between hot
750 and cold sections. */
751
752 if (flag_reorder_blocks_and_partition
753 && (find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX)
754 || a->partition != b->partition))
755 return;
756
757 real_b_end = BB_END (b);
758
759 /* If there is a jump table following block B temporarily add the jump table
760 to block B so that it will also be moved to the correct location. */
761 if (tablejump_p (BB_END (b), &label, &table)
762 && prev_active_insn (label) == BB_END (b))
763 {
764 BB_END (b) = table;
765 }
766
767 /* There had better have been a barrier there. Delete it. */
768 barrier = NEXT_INSN (BB_END (b));
769 if (barrier && GET_CODE (barrier) == BARRIER)
770 delete_insn (barrier);
771
772 /* Move block and loop notes out of the chain so that we do not
773 disturb their order.
774
775 ??? A better solution would be to squeeze out all the non-nested notes
776 and adjust the block trees appropriately. Even better would be to have
777 a tighter connection between block trees and rtl so that this is not
778 necessary. */
779 if (squeeze_notes (&BB_HEAD (b), &BB_END (b)))
780 abort ();
781
782 /* Scramble the insn chain. */
783 reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
784
785 /* Restore the real end of b. */
786 BB_END (b) = real_b_end;
787
788 if (dump_file)
789 fprintf (dump_file, "Moved block %d after %d and merged.\n",
790 b->index, a->index);
791
792 /* Now blocks A and B are contiguous. Merge them. */
793 merge_blocks (a, b);
794 }
795
796 /* Attempt to merge basic blocks that are potentially non-adjacent.
797 Return NULL iff the attempt failed, otherwise return basic block
798 where cleanup_cfg should continue. Because the merging commonly
799 moves basic block away or introduces another optimization
800 possibility, return basic block just before B so cleanup_cfg don't
801 need to iterate.
802
803 It may be good idea to return basic block before C in the case
804 C has been moved after B and originally appeared earlier in the
805 insn sequence, but we have no information available about the
806 relative ordering of these two. Hopefully it is not too common. */
807
808 static basic_block
809 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
810 {
811 basic_block next;
812 /* If C has a tail recursion label, do not merge. There is no
813 edge recorded from the call_placeholder back to this label, as
814 that would make optimize_sibling_and_tail_recursive_calls more
815 complex for no gain. */
816 if ((mode & CLEANUP_PRE_SIBCALL)
817 && GET_CODE (BB_HEAD (c)) == CODE_LABEL
818 && tail_recursion_label_p (BB_HEAD (c)))
819 return NULL;
820
821 /* If we are partitioning hot/cold basic blocks, we don't want to
822 mess up unconditional or indirect jumps that cross between hot
823 and cold sections. */
824
825 if (flag_reorder_blocks_and_partition
826 && (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
827 || find_reg_note (BB_END (c), REG_CROSSING_JUMP, NULL_RTX)
828 || b->partition != c->partition))
829 return NULL;
830
831
832
833 /* If B has a fallthru edge to C, no need to move anything. */
834 if (e->flags & EDGE_FALLTHRU)
835 {
836 int b_index = b->index, c_index = c->index;
837 merge_blocks (b, c);
838 update_forwarder_flag (b);
839
840 if (dump_file)
841 fprintf (dump_file, "Merged %d and %d without moving.\n",
842 b_index, c_index);
843
844 return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
845 }
846
847 /* Otherwise we will need to move code around. Do that only if expensive
848 transformations are allowed. */
849 else if (mode & CLEANUP_EXPENSIVE)
850 {
851 edge tmp_edge, b_fallthru_edge;
852 bool c_has_outgoing_fallthru;
853 bool b_has_incoming_fallthru;
854
855 /* Avoid overactive code motion, as the forwarder blocks should be
856 eliminated by edge redirection instead. One exception might have
857 been if B is a forwarder block and C has no fallthru edge, but
858 that should be cleaned up by bb-reorder instead. */
859 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
860 return NULL;
861
862 /* We must make sure to not munge nesting of lexical blocks,
863 and loop notes. This is done by squeezing out all the notes
864 and leaving them there to lie. Not ideal, but functional. */
865
866 for (tmp_edge = c->succ; tmp_edge; tmp_edge = tmp_edge->succ_next)
867 if (tmp_edge->flags & EDGE_FALLTHRU)
868 break;
869
870 c_has_outgoing_fallthru = (tmp_edge != NULL);
871
872 for (tmp_edge = b->pred; tmp_edge; tmp_edge = tmp_edge->pred_next)
873 if (tmp_edge->flags & EDGE_FALLTHRU)
874 break;
875
876 b_has_incoming_fallthru = (tmp_edge != NULL);
877 b_fallthru_edge = tmp_edge;
878 next = b->prev_bb;
879 if (next == c)
880 next = next->prev_bb;
881
882 /* Otherwise, we're going to try to move C after B. If C does
883 not have an outgoing fallthru, then it can be moved
884 immediately after B without introducing or modifying jumps. */
885 if (! c_has_outgoing_fallthru)
886 {
887 merge_blocks_move_successor_nojumps (b, c);
888 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
889 }
890
891 /* If B does not have an incoming fallthru, then it can be moved
892 immediately before C without introducing or modifying jumps.
893 C cannot be the first block, so we do not have to worry about
894 accessing a non-existent block. */
895
896 if (b_has_incoming_fallthru)
897 {
898 basic_block bb;
899
900 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
901 return NULL;
902 bb = force_nonfallthru (b_fallthru_edge);
903 if (bb)
904 notice_new_block (bb);
905 }
906
907 merge_blocks_move_predecessor_nojumps (b, c);
908 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
909 }
910
911 return NULL;
912 }
913 \f
914
915 /* Removes the memory attributes of MEM expression
916 if they are not equal. */
917
918 void
919 merge_memattrs (rtx x, rtx y)
920 {
921 int i;
922 int j;
923 enum rtx_code code;
924 const char *fmt;
925
926 if (x == y)
927 return;
928 if (x == 0 || y == 0)
929 return;
930
931 code = GET_CODE (x);
932
933 if (code != GET_CODE (y))
934 return;
935
936 if (GET_MODE (x) != GET_MODE (y))
937 return;
938
939 if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
940 {
941 if (! MEM_ATTRS (x))
942 MEM_ATTRS (y) = 0;
943 else if (! MEM_ATTRS (y))
944 MEM_ATTRS (x) = 0;
945 else
946 {
947 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
948 {
949 set_mem_alias_set (x, 0);
950 set_mem_alias_set (y, 0);
951 }
952
953 if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
954 {
955 set_mem_expr (x, 0);
956 set_mem_expr (y, 0);
957 set_mem_offset (x, 0);
958 set_mem_offset (y, 0);
959 }
960 else if (MEM_OFFSET (x) != MEM_OFFSET (y))
961 {
962 set_mem_offset (x, 0);
963 set_mem_offset (y, 0);
964 }
965
966 set_mem_size (x, MAX (MEM_SIZE (x), MEM_SIZE (y)));
967 set_mem_size (y, MEM_SIZE (x));
968
969 set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
970 set_mem_align (y, MEM_ALIGN (x));
971 }
972 }
973
974 fmt = GET_RTX_FORMAT (code);
975 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
976 {
977 switch (fmt[i])
978 {
979 case 'E':
980 /* Two vectors must have the same length. */
981 if (XVECLEN (x, i) != XVECLEN (y, i))
982 return;
983
984 for (j = 0; j < XVECLEN (x, i); j++)
985 merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
986
987 break;
988
989 case 'e':
990 merge_memattrs (XEXP (x, i), XEXP (y, i));
991 }
992 }
993 return;
994 }
995
996
997 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
998
999 static bool
1000 insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2)
1001 {
1002 rtx p1, p2;
1003
1004 /* Verify that I1 and I2 are equivalent. */
1005 if (GET_CODE (i1) != GET_CODE (i2))
1006 return false;
1007
1008 p1 = PATTERN (i1);
1009 p2 = PATTERN (i2);
1010
1011 if (GET_CODE (p1) != GET_CODE (p2))
1012 return false;
1013
1014 /* If this is a CALL_INSN, compare register usage information.
1015 If we don't check this on stack register machines, the two
1016 CALL_INSNs might be merged leaving reg-stack.c with mismatching
1017 numbers of stack registers in the same basic block.
1018 If we don't check this on machines with delay slots, a delay slot may
1019 be filled that clobbers a parameter expected by the subroutine.
1020
1021 ??? We take the simple route for now and assume that if they're
1022 equal, they were constructed identically. */
1023
1024 if (GET_CODE (i1) == CALL_INSN
1025 && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
1026 CALL_INSN_FUNCTION_USAGE (i2))
1027 || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)))
1028 return false;
1029
1030 #ifdef STACK_REGS
1031 /* If cross_jump_death_matters is not 0, the insn's mode
1032 indicates whether or not the insn contains any stack-like
1033 regs. */
1034
1035 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
1036 {
1037 /* If register stack conversion has already been done, then
1038 death notes must also be compared before it is certain that
1039 the two instruction streams match. */
1040
1041 rtx note;
1042 HARD_REG_SET i1_regset, i2_regset;
1043
1044 CLEAR_HARD_REG_SET (i1_regset);
1045 CLEAR_HARD_REG_SET (i2_regset);
1046
1047 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
1048 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1049 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
1050
1051 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
1052 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1053 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1054
1055 GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
1056
1057 return false;
1058
1059 done:
1060 ;
1061 }
1062 #endif
1063
1064 if (reload_completed
1065 ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1066 return true;
1067
1068 /* Do not do EQUIV substitution after reload. First, we're undoing the
1069 work of reload_cse. Second, we may be undoing the work of the post-
1070 reload splitting pass. */
1071 /* ??? Possibly add a new phase switch variable that can be used by
1072 targets to disallow the troublesome insns after splitting. */
1073 if (!reload_completed)
1074 {
1075 /* The following code helps take care of G++ cleanups. */
1076 rtx equiv1 = find_reg_equal_equiv_note (i1);
1077 rtx equiv2 = find_reg_equal_equiv_note (i2);
1078
1079 if (equiv1 && equiv2
1080 /* If the equivalences are not to a constant, they may
1081 reference pseudos that no longer exist, so we can't
1082 use them. */
1083 && (! reload_completed
1084 || (CONSTANT_P (XEXP (equiv1, 0))
1085 && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
1086 {
1087 rtx s1 = single_set (i1);
1088 rtx s2 = single_set (i2);
1089 if (s1 != 0 && s2 != 0
1090 && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
1091 {
1092 validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
1093 validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
1094 if (! rtx_renumbered_equal_p (p1, p2))
1095 cancel_changes (0);
1096 else if (apply_change_group ())
1097 return true;
1098 }
1099 }
1100 }
1101
1102 return false;
1103 }
1104 \f
1105 /* Look through the insns at the end of BB1 and BB2 and find the longest
1106 sequence that are equivalent. Store the first insns for that sequence
1107 in *F1 and *F2 and return the sequence length.
1108
1109 To simplify callers of this function, if the blocks match exactly,
1110 store the head of the blocks in *F1 and *F2. */
1111
1112 static int
1113 flow_find_cross_jump (int mode ATTRIBUTE_UNUSED, basic_block bb1,
1114 basic_block bb2, rtx *f1, rtx *f2)
1115 {
1116 rtx i1, i2, last1, last2, afterlast1, afterlast2;
1117 int ninsns = 0;
1118
1119 /* Skip simple jumps at the end of the blocks. Complex jumps still
1120 need to be compared for equivalence, which we'll do below. */
1121
1122 i1 = BB_END (bb1);
1123 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
1124 if (onlyjump_p (i1)
1125 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1126 {
1127 last1 = i1;
1128 i1 = PREV_INSN (i1);
1129 }
1130
1131 i2 = BB_END (bb2);
1132 if (onlyjump_p (i2)
1133 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1134 {
1135 last2 = i2;
1136 /* Count everything except for unconditional jump as insn. */
1137 if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
1138 ninsns++;
1139 i2 = PREV_INSN (i2);
1140 }
1141
1142 while (true)
1143 {
1144 /* Ignore notes. */
1145 while (!INSN_P (i1) && i1 != BB_HEAD (bb1))
1146 i1 = PREV_INSN (i1);
1147
1148 while (!INSN_P (i2) && i2 != BB_HEAD (bb2))
1149 i2 = PREV_INSN (i2);
1150
1151 if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1152 break;
1153
1154 if (!insns_match_p (mode, i1, i2))
1155 break;
1156
1157 merge_memattrs (i1, i2);
1158
1159 /* Don't begin a cross-jump with a NOTE insn. */
1160 if (INSN_P (i1))
1161 {
1162 /* If the merged insns have different REG_EQUAL notes, then
1163 remove them. */
1164 rtx equiv1 = find_reg_equal_equiv_note (i1);
1165 rtx equiv2 = find_reg_equal_equiv_note (i2);
1166
1167 if (equiv1 && !equiv2)
1168 remove_note (i1, equiv1);
1169 else if (!equiv1 && equiv2)
1170 remove_note (i2, equiv2);
1171 else if (equiv1 && equiv2
1172 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1173 {
1174 remove_note (i1, equiv1);
1175 remove_note (i2, equiv2);
1176 }
1177
1178 afterlast1 = last1, afterlast2 = last2;
1179 last1 = i1, last2 = i2;
1180 ninsns++;
1181 }
1182
1183 i1 = PREV_INSN (i1);
1184 i2 = PREV_INSN (i2);
1185 }
1186
1187 #ifdef HAVE_cc0
1188 /* Don't allow the insn after a compare to be shared by
1189 cross-jumping unless the compare is also shared. */
1190 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1191 last1 = afterlast1, last2 = afterlast2, ninsns--;
1192 #endif
1193
1194 /* Include preceding notes and labels in the cross-jump. One,
1195 this may bring us to the head of the blocks as requested above.
1196 Two, it keeps line number notes as matched as may be. */
1197 if (ninsns)
1198 {
1199 while (last1 != BB_HEAD (bb1) && !INSN_P (PREV_INSN (last1)))
1200 last1 = PREV_INSN (last1);
1201
1202 if (last1 != BB_HEAD (bb1) && GET_CODE (PREV_INSN (last1)) == CODE_LABEL)
1203 last1 = PREV_INSN (last1);
1204
1205 while (last2 != BB_HEAD (bb2) && !INSN_P (PREV_INSN (last2)))
1206 last2 = PREV_INSN (last2);
1207
1208 if (last2 != BB_HEAD (bb2) && GET_CODE (PREV_INSN (last2)) == CODE_LABEL)
1209 last2 = PREV_INSN (last2);
1210
1211 *f1 = last1;
1212 *f2 = last2;
1213 }
1214
1215 return ninsns;
1216 }
1217
1218 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1219 the branch instruction. This means that if we commonize the control
1220 flow before end of the basic block, the semantic remains unchanged.
1221
1222 We may assume that there exists one edge with a common destination. */
1223
1224 static bool
1225 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1226 {
1227 int nehedges1 = 0, nehedges2 = 0;
1228 edge fallthru1 = 0, fallthru2 = 0;
1229 edge e1, e2;
1230
1231 /* If BB1 has only one successor, we may be looking at either an
1232 unconditional jump, or a fake edge to exit. */
1233 if (bb1->succ && !bb1->succ->succ_next
1234 && (bb1->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1235 && (GET_CODE (BB_END (bb1)) != JUMP_INSN || simplejump_p (BB_END (bb1))))
1236 return (bb2->succ && !bb2->succ->succ_next
1237 && (bb2->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1238 && (GET_CODE (BB_END (bb2)) != JUMP_INSN || simplejump_p (BB_END (bb2))));
1239
1240 /* Match conditional jumps - this may get tricky when fallthru and branch
1241 edges are crossed. */
1242 if (bb1->succ
1243 && bb1->succ->succ_next
1244 && !bb1->succ->succ_next->succ_next
1245 && any_condjump_p (BB_END (bb1))
1246 && onlyjump_p (BB_END (bb1)))
1247 {
1248 edge b1, f1, b2, f2;
1249 bool reverse, match;
1250 rtx set1, set2, cond1, cond2;
1251 enum rtx_code code1, code2;
1252
1253 if (!bb2->succ
1254 || !bb2->succ->succ_next
1255 || bb2->succ->succ_next->succ_next
1256 || !any_condjump_p (BB_END (bb2))
1257 || !onlyjump_p (BB_END (bb2)))
1258 return false;
1259
1260 b1 = BRANCH_EDGE (bb1);
1261 b2 = BRANCH_EDGE (bb2);
1262 f1 = FALLTHRU_EDGE (bb1);
1263 f2 = FALLTHRU_EDGE (bb2);
1264
1265 /* Get around possible forwarders on fallthru edges. Other cases
1266 should be optimized out already. */
1267 if (FORWARDER_BLOCK_P (f1->dest))
1268 f1 = f1->dest->succ;
1269
1270 if (FORWARDER_BLOCK_P (f2->dest))
1271 f2 = f2->dest->succ;
1272
1273 /* To simplify use of this function, return false if there are
1274 unneeded forwarder blocks. These will get eliminated later
1275 during cleanup_cfg. */
1276 if (FORWARDER_BLOCK_P (f1->dest)
1277 || FORWARDER_BLOCK_P (f2->dest)
1278 || FORWARDER_BLOCK_P (b1->dest)
1279 || FORWARDER_BLOCK_P (b2->dest))
1280 return false;
1281
1282 if (f1->dest == f2->dest && b1->dest == b2->dest)
1283 reverse = false;
1284 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1285 reverse = true;
1286 else
1287 return false;
1288
1289 set1 = pc_set (BB_END (bb1));
1290 set2 = pc_set (BB_END (bb2));
1291 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1292 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1293 reverse = !reverse;
1294
1295 cond1 = XEXP (SET_SRC (set1), 0);
1296 cond2 = XEXP (SET_SRC (set2), 0);
1297 code1 = GET_CODE (cond1);
1298 if (reverse)
1299 code2 = reversed_comparison_code (cond2, BB_END (bb2));
1300 else
1301 code2 = GET_CODE (cond2);
1302
1303 if (code2 == UNKNOWN)
1304 return false;
1305
1306 /* Verify codes and operands match. */
1307 match = ((code1 == code2
1308 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1309 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1310 || (code1 == swap_condition (code2)
1311 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1312 XEXP (cond2, 0))
1313 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1314 XEXP (cond2, 1))));
1315
1316 /* If we return true, we will join the blocks. Which means that
1317 we will only have one branch prediction bit to work with. Thus
1318 we require the existing branches to have probabilities that are
1319 roughly similar. */
1320 if (match
1321 && !optimize_size
1322 && maybe_hot_bb_p (bb1)
1323 && maybe_hot_bb_p (bb2))
1324 {
1325 int prob2;
1326
1327 if (b1->dest == b2->dest)
1328 prob2 = b2->probability;
1329 else
1330 /* Do not use f2 probability as f2 may be forwarded. */
1331 prob2 = REG_BR_PROB_BASE - b2->probability;
1332
1333 /* Fail if the difference in probabilities is greater than 50%.
1334 This rules out two well-predicted branches with opposite
1335 outcomes. */
1336 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1337 {
1338 if (dump_file)
1339 fprintf (dump_file,
1340 "Outcomes of branch in bb %i and %i differs to much (%i %i)\n",
1341 bb1->index, bb2->index, b1->probability, prob2);
1342
1343 return false;
1344 }
1345 }
1346
1347 if (dump_file && match)
1348 fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1349 bb1->index, bb2->index);
1350
1351 return match;
1352 }
1353
1354 /* Generic case - we are seeing a computed jump, table jump or trapping
1355 instruction. */
1356
1357 #ifndef CASE_DROPS_THROUGH
1358 /* Check whether there are tablejumps in the end of BB1 and BB2.
1359 Return true if they are identical. */
1360 {
1361 rtx label1, label2;
1362 rtx table1, table2;
1363
1364 if (tablejump_p (BB_END (bb1), &label1, &table1)
1365 && tablejump_p (BB_END (bb2), &label2, &table2)
1366 && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1367 {
1368 /* The labels should never be the same rtx. If they really are same
1369 the jump tables are same too. So disable crossjumping of blocks BB1
1370 and BB2 because when deleting the common insns in the end of BB1
1371 by delete_basic_block () the jump table would be deleted too. */
1372 /* If LABEL2 is referenced in BB1->END do not do anything
1373 because we would loose information when replacing
1374 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1375 if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1376 {
1377 /* Set IDENTICAL to true when the tables are identical. */
1378 bool identical = false;
1379 rtx p1, p2;
1380
1381 p1 = PATTERN (table1);
1382 p2 = PATTERN (table2);
1383 if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1384 {
1385 identical = true;
1386 }
1387 else if (GET_CODE (p1) == ADDR_DIFF_VEC
1388 && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1389 && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1390 && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1391 {
1392 int i;
1393
1394 identical = true;
1395 for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1396 if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1397 identical = false;
1398 }
1399
1400 if (identical)
1401 {
1402 replace_label_data rr;
1403 bool match;
1404
1405 /* Temporarily replace references to LABEL1 with LABEL2
1406 in BB1->END so that we could compare the instructions. */
1407 rr.r1 = label1;
1408 rr.r2 = label2;
1409 rr.update_label_nuses = false;
1410 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1411
1412 match = insns_match_p (mode, BB_END (bb1), BB_END (bb2));
1413 if (dump_file && match)
1414 fprintf (dump_file,
1415 "Tablejumps in bb %i and %i match.\n",
1416 bb1->index, bb2->index);
1417
1418 /* Set the original label in BB1->END because when deleting
1419 a block whose end is a tablejump, the tablejump referenced
1420 from the instruction is deleted too. */
1421 rr.r1 = label2;
1422 rr.r2 = label1;
1423 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1424
1425 return match;
1426 }
1427 }
1428 return false;
1429 }
1430 }
1431 #endif
1432
1433 /* First ensure that the instructions match. There may be many outgoing
1434 edges so this test is generally cheaper. */
1435 if (!insns_match_p (mode, BB_END (bb1), BB_END (bb2)))
1436 return false;
1437
1438 /* Search the outgoing edges, ensure that the counts do match, find possible
1439 fallthru and exception handling edges since these needs more
1440 validation. */
1441 for (e1 = bb1->succ, e2 = bb2->succ; e1 && e2;
1442 e1 = e1->succ_next, e2 = e2->succ_next)
1443 {
1444 if (e1->flags & EDGE_EH)
1445 nehedges1++;
1446
1447 if (e2->flags & EDGE_EH)
1448 nehedges2++;
1449
1450 if (e1->flags & EDGE_FALLTHRU)
1451 fallthru1 = e1;
1452 if (e2->flags & EDGE_FALLTHRU)
1453 fallthru2 = e2;
1454 }
1455
1456 /* If number of edges of various types does not match, fail. */
1457 if (e1 || e2
1458 || nehedges1 != nehedges2
1459 || (fallthru1 != 0) != (fallthru2 != 0))
1460 return false;
1461
1462 /* fallthru edges must be forwarded to the same destination. */
1463 if (fallthru1)
1464 {
1465 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1466 ? fallthru1->dest->succ->dest: fallthru1->dest);
1467 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1468 ? fallthru2->dest->succ->dest: fallthru2->dest);
1469
1470 if (d1 != d2)
1471 return false;
1472 }
1473
1474 /* Ensure the same EH region. */
1475 {
1476 rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1477 rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1478
1479 if (!n1 && n2)
1480 return false;
1481
1482 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1483 return false;
1484 }
1485
1486 /* We don't need to match the rest of edges as above checks should be enough
1487 to ensure that they are equivalent. */
1488 return true;
1489 }
1490
1491 /* E1 and E2 are edges with the same destination block. Search their
1492 predecessors for common code. If found, redirect control flow from
1493 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1494
1495 static bool
1496 try_crossjump_to_edge (int mode, edge e1, edge e2)
1497 {
1498 int nmatch;
1499 basic_block src1 = e1->src, src2 = e2->src;
1500 basic_block redirect_to, redirect_from, to_remove;
1501 rtx newpos1, newpos2;
1502 edge s;
1503
1504 newpos1 = newpos2 = NULL_RTX;
1505
1506 /* If we have partitioned hot/cold basic blocks, it is a bad idea
1507 to try this optimization. */
1508
1509 if (flag_reorder_blocks_and_partition && no_new_pseudos)
1510 return false;
1511
1512 /* Search backward through forwarder blocks. We don't need to worry
1513 about multiple entry or chained forwarders, as they will be optimized
1514 away. We do this to look past the unconditional jump following a
1515 conditional jump that is required due to the current CFG shape. */
1516 if (src1->pred
1517 && !src1->pred->pred_next
1518 && FORWARDER_BLOCK_P (src1))
1519 e1 = src1->pred, src1 = e1->src;
1520
1521 if (src2->pred
1522 && !src2->pred->pred_next
1523 && FORWARDER_BLOCK_P (src2))
1524 e2 = src2->pred, src2 = e2->src;
1525
1526 /* Nothing to do if we reach ENTRY, or a common source block. */
1527 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1528 return false;
1529 if (src1 == src2)
1530 return false;
1531
1532 /* Seeing more than 1 forwarder blocks would confuse us later... */
1533 if (FORWARDER_BLOCK_P (e1->dest)
1534 && FORWARDER_BLOCK_P (e1->dest->succ->dest))
1535 return false;
1536
1537 if (FORWARDER_BLOCK_P (e2->dest)
1538 && FORWARDER_BLOCK_P (e2->dest->succ->dest))
1539 return false;
1540
1541 /* Likewise with dead code (possibly newly created by the other optimizations
1542 of cfg_cleanup). */
1543 if (!src1->pred || !src2->pred)
1544 return false;
1545
1546 /* Look for the common insn sequence, part the first ... */
1547 if (!outgoing_edges_match (mode, src1, src2))
1548 return false;
1549
1550 /* ... and part the second. */
1551 nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1552 if (!nmatch)
1553 return false;
1554
1555 #ifndef CASE_DROPS_THROUGH
1556 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1557 will be deleted.
1558 If we have tablejumps in the end of SRC1 and SRC2
1559 they have been already compared for equivalence in outgoing_edges_match ()
1560 so replace the references to TABLE1 by references to TABLE2. */
1561 {
1562 rtx label1, label2;
1563 rtx table1, table2;
1564
1565 if (tablejump_p (BB_END (src1), &label1, &table1)
1566 && tablejump_p (BB_END (src2), &label2, &table2)
1567 && label1 != label2)
1568 {
1569 replace_label_data rr;
1570 rtx insn;
1571
1572 /* Replace references to LABEL1 with LABEL2. */
1573 rr.r1 = label1;
1574 rr.r2 = label2;
1575 rr.update_label_nuses = true;
1576 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1577 {
1578 /* Do not replace the label in SRC1->END because when deleting
1579 a block whose end is a tablejump, the tablejump referenced
1580 from the instruction is deleted too. */
1581 if (insn != BB_END (src1))
1582 for_each_rtx (&insn, replace_label, &rr);
1583 }
1584 }
1585 }
1586 #endif
1587
1588 /* Avoid splitting if possible. */
1589 if (newpos2 == BB_HEAD (src2))
1590 redirect_to = src2;
1591 else
1592 {
1593 if (dump_file)
1594 fprintf (dump_file, "Splitting bb %i before %i insns\n",
1595 src2->index, nmatch);
1596 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1597 }
1598
1599 if (dump_file)
1600 fprintf (dump_file,
1601 "Cross jumping from bb %i to bb %i; %i common insns\n",
1602 src1->index, src2->index, nmatch);
1603
1604 redirect_to->count += src1->count;
1605 redirect_to->frequency += src1->frequency;
1606 /* We may have some registers visible trought the block. */
1607 redirect_to->flags |= BB_DIRTY;
1608
1609 /* Recompute the frequencies and counts of outgoing edges. */
1610 for (s = redirect_to->succ; s; s = s->succ_next)
1611 {
1612 edge s2;
1613 basic_block d = s->dest;
1614
1615 if (FORWARDER_BLOCK_P (d))
1616 d = d->succ->dest;
1617
1618 for (s2 = src1->succ; ; s2 = s2->succ_next)
1619 {
1620 basic_block d2 = s2->dest;
1621 if (FORWARDER_BLOCK_P (d2))
1622 d2 = d2->succ->dest;
1623 if (d == d2)
1624 break;
1625 }
1626
1627 s->count += s2->count;
1628
1629 /* Take care to update possible forwarder blocks. We verified
1630 that there is no more than one in the chain, so we can't run
1631 into infinite loop. */
1632 if (FORWARDER_BLOCK_P (s->dest))
1633 {
1634 s->dest->succ->count += s2->count;
1635 s->dest->count += s2->count;
1636 s->dest->frequency += EDGE_FREQUENCY (s);
1637 }
1638
1639 if (FORWARDER_BLOCK_P (s2->dest))
1640 {
1641 s2->dest->succ->count -= s2->count;
1642 if (s2->dest->succ->count < 0)
1643 s2->dest->succ->count = 0;
1644 s2->dest->count -= s2->count;
1645 s2->dest->frequency -= EDGE_FREQUENCY (s);
1646 if (s2->dest->frequency < 0)
1647 s2->dest->frequency = 0;
1648 if (s2->dest->count < 0)
1649 s2->dest->count = 0;
1650 }
1651
1652 if (!redirect_to->frequency && !src1->frequency)
1653 s->probability = (s->probability + s2->probability) / 2;
1654 else
1655 s->probability
1656 = ((s->probability * redirect_to->frequency +
1657 s2->probability * src1->frequency)
1658 / (redirect_to->frequency + src1->frequency));
1659 }
1660
1661 update_br_prob_note (redirect_to);
1662
1663 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1664
1665 /* Skip possible basic block header. */
1666 if (GET_CODE (newpos1) == CODE_LABEL)
1667 newpos1 = NEXT_INSN (newpos1);
1668
1669 if (GET_CODE (newpos1) == NOTE)
1670 newpos1 = NEXT_INSN (newpos1);
1671
1672 redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
1673 to_remove = redirect_from->succ->dest;
1674
1675 redirect_edge_and_branch_force (redirect_from->succ, redirect_to);
1676 delete_basic_block (to_remove);
1677
1678 update_forwarder_flag (redirect_from);
1679
1680 return true;
1681 }
1682
1683 /* Search the predecessors of BB for common insn sequences. When found,
1684 share code between them by redirecting control flow. Return true if
1685 any changes made. */
1686
1687 static bool
1688 try_crossjump_bb (int mode, basic_block bb)
1689 {
1690 edge e, e2, nexte2, nexte, fallthru;
1691 bool changed;
1692 int n = 0, max;
1693
1694 /* Nothing to do if there is not at least two incoming edges. */
1695 if (!bb->pred || !bb->pred->pred_next)
1696 return false;
1697
1698 /* If we are partitioning hot/cold basic blocks, we don't want to
1699 mess up unconditional or indirect jumps that cross between hot
1700 and cold sections. */
1701
1702 if (flag_reorder_blocks_and_partition
1703 && (bb->pred->src->partition != bb->pred->pred_next->src->partition
1704 || bb->pred->crossing_edge))
1705 return false;
1706
1707 /* It is always cheapest to redirect a block that ends in a branch to
1708 a block that falls through into BB, as that adds no branches to the
1709 program. We'll try that combination first. */
1710 fallthru = NULL;
1711 max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
1712 for (e = bb->pred; e ; e = e->pred_next, n++)
1713 {
1714 if (e->flags & EDGE_FALLTHRU)
1715 fallthru = e;
1716 if (n > max)
1717 return false;
1718 }
1719
1720 changed = false;
1721 for (e = bb->pred; e; e = nexte)
1722 {
1723 nexte = e->pred_next;
1724
1725 /* As noted above, first try with the fallthru predecessor. */
1726 if (fallthru)
1727 {
1728 /* Don't combine the fallthru edge into anything else.
1729 If there is a match, we'll do it the other way around. */
1730 if (e == fallthru)
1731 continue;
1732 /* If nothing changed since the last attempt, there is nothing
1733 we can do. */
1734 if (!first_pass
1735 && (!(e->src->flags & BB_DIRTY)
1736 && !(fallthru->src->flags & BB_DIRTY)))
1737 continue;
1738
1739 if (try_crossjump_to_edge (mode, e, fallthru))
1740 {
1741 changed = true;
1742 nexte = bb->pred;
1743 continue;
1744 }
1745 }
1746
1747 /* Non-obvious work limiting check: Recognize that we're going
1748 to call try_crossjump_bb on every basic block. So if we have
1749 two blocks with lots of outgoing edges (a switch) and they
1750 share lots of common destinations, then we would do the
1751 cross-jump check once for each common destination.
1752
1753 Now, if the blocks actually are cross-jump candidates, then
1754 all of their destinations will be shared. Which means that
1755 we only need check them for cross-jump candidacy once. We
1756 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1757 choosing to do the check from the block for which the edge
1758 in question is the first successor of A. */
1759 if (e->src->succ != e)
1760 continue;
1761
1762 for (e2 = bb->pred; e2; e2 = nexte2)
1763 {
1764 nexte2 = e2->pred_next;
1765
1766 if (e2 == e)
1767 continue;
1768
1769 /* We've already checked the fallthru edge above. */
1770 if (e2 == fallthru)
1771 continue;
1772
1773 /* The "first successor" check above only prevents multiple
1774 checks of crossjump(A,B). In order to prevent redundant
1775 checks of crossjump(B,A), require that A be the block
1776 with the lowest index. */
1777 if (e->src->index > e2->src->index)
1778 continue;
1779
1780 /* If nothing changed since the last attempt, there is nothing
1781 we can do. */
1782 if (!first_pass
1783 && (!(e->src->flags & BB_DIRTY)
1784 && !(e2->src->flags & BB_DIRTY)))
1785 continue;
1786
1787 if (try_crossjump_to_edge (mode, e, e2))
1788 {
1789 changed = true;
1790 nexte = bb->pred;
1791 break;
1792 }
1793 }
1794 }
1795
1796 return changed;
1797 }
1798
1799 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1800 instructions etc. Return nonzero if changes were made. */
1801
1802 static bool
1803 try_optimize_cfg (int mode)
1804 {
1805 bool changed_overall = false;
1806 bool changed;
1807 int iterations = 0;
1808 basic_block bb, b, next;
1809
1810 if (mode & CLEANUP_CROSSJUMP)
1811 add_noreturn_fake_exit_edges ();
1812
1813 FOR_EACH_BB (bb)
1814 update_forwarder_flag (bb);
1815
1816 if (mode & (CLEANUP_UPDATE_LIFE | CLEANUP_CROSSJUMP | CLEANUP_THREADING))
1817 clear_bb_flags ();
1818
1819 if (! targetm.cannot_modify_jumps_p ())
1820 {
1821 first_pass = true;
1822 /* Attempt to merge blocks as made possible by edge removal. If
1823 a block has only one successor, and the successor has only
1824 one predecessor, they may be combined. */
1825 do
1826 {
1827 changed = false;
1828 iterations++;
1829
1830 if (dump_file)
1831 fprintf (dump_file,
1832 "\n\ntry_optimize_cfg iteration %i\n\n",
1833 iterations);
1834
1835 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
1836 {
1837 basic_block c;
1838 edge s;
1839 bool changed_here = false;
1840
1841 /* Delete trivially dead basic blocks. */
1842 while (b->pred == NULL)
1843 {
1844 c = b->prev_bb;
1845 if (dump_file)
1846 fprintf (dump_file, "Deleting block %i.\n",
1847 b->index);
1848
1849 delete_basic_block (b);
1850 if (!(mode & CLEANUP_CFGLAYOUT))
1851 changed = true;
1852 b = c;
1853 }
1854
1855 /* Remove code labels no longer used. Don't do this
1856 before CALL_PLACEHOLDER is removed, as some branches
1857 may be hidden within. */
1858 if (b->pred->pred_next == NULL
1859 && (b->pred->flags & EDGE_FALLTHRU)
1860 && !(b->pred->flags & EDGE_COMPLEX)
1861 && GET_CODE (BB_HEAD (b)) == CODE_LABEL
1862 && (!(mode & CLEANUP_PRE_SIBCALL)
1863 || !tail_recursion_label_p (BB_HEAD (b)))
1864 /* If the previous block ends with a branch to this
1865 block, we can't delete the label. Normally this
1866 is a condjump that is yet to be simplified, but
1867 if CASE_DROPS_THRU, this can be a tablejump with
1868 some element going to the same place as the
1869 default (fallthru). */
1870 && (b->pred->src == ENTRY_BLOCK_PTR
1871 || GET_CODE (BB_END (b->pred->src)) != JUMP_INSN
1872 || ! label_is_jump_target_p (BB_HEAD (b),
1873 BB_END (b->pred->src))))
1874 {
1875 rtx label = BB_HEAD (b);
1876
1877 delete_insn_chain (label, label);
1878 /* In the case label is undeletable, move it after the
1879 BASIC_BLOCK note. */
1880 if (NOTE_LINE_NUMBER (BB_HEAD (b)) == NOTE_INSN_DELETED_LABEL)
1881 {
1882 rtx bb_note = NEXT_INSN (BB_HEAD (b));
1883
1884 reorder_insns_nobb (label, label, bb_note);
1885 BB_HEAD (b) = bb_note;
1886 }
1887 if (dump_file)
1888 fprintf (dump_file, "Deleted label in block %i.\n",
1889 b->index);
1890 }
1891
1892 /* If we fall through an empty block, we can remove it. */
1893 if (!(mode & CLEANUP_CFGLAYOUT)
1894 && b->pred->pred_next == NULL
1895 && (b->pred->flags & EDGE_FALLTHRU)
1896 && GET_CODE (BB_HEAD (b)) != CODE_LABEL
1897 && FORWARDER_BLOCK_P (b)
1898 /* Note that forwarder_block_p true ensures that
1899 there is a successor for this block. */
1900 && (b->succ->flags & EDGE_FALLTHRU)
1901 && n_basic_blocks > 1)
1902 {
1903 if (dump_file)
1904 fprintf (dump_file,
1905 "Deleting fallthru block %i.\n",
1906 b->index);
1907
1908 c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
1909 redirect_edge_succ_nodup (b->pred, b->succ->dest);
1910 delete_basic_block (b);
1911 changed = true;
1912 b = c;
1913 }
1914
1915 if ((s = b->succ) != NULL
1916 && s->succ_next == NULL
1917 && !(s->flags & EDGE_COMPLEX)
1918 && (c = s->dest) != EXIT_BLOCK_PTR
1919 && c->pred->pred_next == NULL
1920 && b != c)
1921 {
1922 /* When not in cfg_layout mode use code aware of reordering
1923 INSN. This code possibly creates new basic blocks so it
1924 does not fit merge_blocks interface and is kept here in
1925 hope that it will become useless once more of compiler
1926 is transformed to use cfg_layout mode. */
1927
1928 if ((mode & CLEANUP_CFGLAYOUT)
1929 && can_merge_blocks_p (b, c))
1930 {
1931 merge_blocks (b, c);
1932 update_forwarder_flag (b);
1933 changed_here = true;
1934 }
1935 else if (!(mode & CLEANUP_CFGLAYOUT)
1936 /* If the jump insn has side effects,
1937 we can't kill the edge. */
1938 && (GET_CODE (BB_END (b)) != JUMP_INSN
1939 || (reload_completed
1940 ? simplejump_p (BB_END (b))
1941 : onlyjump_p (BB_END (b))))
1942 && (next = merge_blocks_move (s, b, c, mode)))
1943 {
1944 b = next;
1945 changed_here = true;
1946 }
1947 }
1948
1949 /* Simplify branch over branch. */
1950 if ((mode & CLEANUP_EXPENSIVE)
1951 && !(mode & CLEANUP_CFGLAYOUT)
1952 && try_simplify_condjump (b))
1953 changed_here = true;
1954
1955 /* If B has a single outgoing edge, but uses a
1956 non-trivial jump instruction without side-effects, we
1957 can either delete the jump entirely, or replace it
1958 with a simple unconditional jump. */
1959 if (b->succ
1960 && ! b->succ->succ_next
1961 && b->succ->dest != EXIT_BLOCK_PTR
1962 && onlyjump_p (BB_END (b))
1963 && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
1964 && try_redirect_by_replacing_jump (b->succ, b->succ->dest,
1965 (mode & CLEANUP_CFGLAYOUT) != 0))
1966 {
1967 update_forwarder_flag (b);
1968 changed_here = true;
1969 }
1970
1971 /* Simplify branch to branch. */
1972 if (try_forward_edges (mode, b))
1973 changed_here = true;
1974
1975 /* Look for shared code between blocks. */
1976 if ((mode & CLEANUP_CROSSJUMP)
1977 && try_crossjump_bb (mode, b))
1978 changed_here = true;
1979
1980 /* Don't get confused by the index shift caused by
1981 deleting blocks. */
1982 if (!changed_here)
1983 b = b->next_bb;
1984 else
1985 changed = true;
1986 }
1987
1988 if ((mode & CLEANUP_CROSSJUMP)
1989 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
1990 changed = true;
1991
1992 #ifdef ENABLE_CHECKING
1993 if (changed)
1994 verify_flow_info ();
1995 #endif
1996
1997 changed_overall |= changed;
1998 first_pass = false;
1999 }
2000 while (changed);
2001 }
2002
2003 if (mode & CLEANUP_CROSSJUMP)
2004 remove_fake_edges ();
2005
2006 clear_aux_for_blocks ();
2007
2008 return changed_overall;
2009 }
2010 \f
2011 /* Delete all unreachable basic blocks. */
2012
2013 bool
2014 delete_unreachable_blocks (void)
2015 {
2016 bool changed = false;
2017 basic_block b, next_bb;
2018
2019 find_unreachable_blocks ();
2020
2021 /* Delete all unreachable basic blocks. */
2022
2023 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
2024 {
2025 next_bb = b->next_bb;
2026
2027 if (!(b->flags & BB_REACHABLE))
2028 {
2029 delete_basic_block (b);
2030 changed = true;
2031 }
2032 }
2033
2034 if (changed)
2035 tidy_fallthru_edges ();
2036 return changed;
2037 }
2038
2039 /* Merges sequential blocks if possible. */
2040
2041 bool
2042 merge_seq_blocks (void)
2043 {
2044 basic_block bb;
2045 bool changed = false;
2046
2047 for (bb = ENTRY_BLOCK_PTR->next_bb; bb != EXIT_BLOCK_PTR; )
2048 {
2049 if (bb->succ
2050 && !bb->succ->succ_next
2051 && can_merge_blocks_p (bb, bb->succ->dest))
2052 {
2053 /* Merge the blocks and retry. */
2054 merge_blocks (bb, bb->succ->dest);
2055 changed = true;
2056 continue;
2057 }
2058
2059 bb = bb->next_bb;
2060 }
2061
2062 return changed;
2063 }
2064 \f
2065 /* Tidy the CFG by deleting unreachable code and whatnot. */
2066
2067 bool
2068 cleanup_cfg (int mode)
2069 {
2070 bool changed = false;
2071
2072 timevar_push (TV_CLEANUP_CFG);
2073 if (delete_unreachable_blocks ())
2074 {
2075 changed = true;
2076 /* We've possibly created trivially dead code. Cleanup it right
2077 now to introduce more opportunities for try_optimize_cfg. */
2078 if (!(mode & (CLEANUP_NO_INSN_DEL
2079 | CLEANUP_UPDATE_LIFE | CLEANUP_PRE_SIBCALL))
2080 && !reload_completed)
2081 delete_trivially_dead_insns (get_insns(), max_reg_num ());
2082 }
2083
2084 compact_blocks ();
2085
2086 while (try_optimize_cfg (mode))
2087 {
2088 delete_unreachable_blocks (), changed = true;
2089 if (mode & CLEANUP_UPDATE_LIFE)
2090 {
2091 /* Cleaning up CFG introduces more opportunities for dead code
2092 removal that in turn may introduce more opportunities for
2093 cleaning up the CFG. */
2094 if (!update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES,
2095 PROP_DEATH_NOTES
2096 | PROP_SCAN_DEAD_CODE
2097 | PROP_KILL_DEAD_CODE
2098 | ((mode & CLEANUP_LOG_LINKS)
2099 ? PROP_LOG_LINKS : 0)))
2100 break;
2101 }
2102 else if (!(mode & (CLEANUP_NO_INSN_DEL | CLEANUP_PRE_SIBCALL))
2103 && (mode & CLEANUP_EXPENSIVE)
2104 && !reload_completed)
2105 {
2106 if (!delete_trivially_dead_insns (get_insns(), max_reg_num ()))
2107 break;
2108 }
2109 else
2110 break;
2111 delete_dead_jumptables ();
2112 }
2113
2114 /* Kill the data we won't maintain. */
2115 free_EXPR_LIST_list (&label_value_list);
2116 timevar_pop (TV_CLEANUP_CFG);
2117
2118 return changed;
2119 }