]>
Commit | Line | Data |
---|---|---|
65f34de5 | 1 | /* Control flow optimization code for GNU compiler. |
711789cc | 2 | Copyright (C) 1987-2013 Free Software Foundation, Inc. |
65f34de5 | 3 | |
4 | This file is part of GCC. | |
5 | ||
6 | GCC is free software; you can redistribute it and/or modify it under | |
7 | the terms of the GNU General Public License as published by the Free | |
8c4c00c1 | 8 | Software Foundation; either version 3, or (at your option) any later |
65f34de5 | 9 | version. |
10 | ||
11 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
14 | for more details. | |
15 | ||
16 | You should have received a copy of the GNU General Public License | |
8c4c00c1 | 17 | along with GCC; see the file COPYING3. If not see |
18 | <http://www.gnu.org/licenses/>. */ | |
65f34de5 | 19 | |
365db11e | 20 | /* This file contains optimizer of the control flow. The main entry point is |
65f34de5 | 21 | cleanup_cfg. Following optimizations are performed: |
22 | ||
23 | - Unreachable blocks removal | |
3927afe0 | 24 | - Edge forwarding (edge to the forwarder block is forwarded to its |
4a82352a | 25 | successor. Simplification of the branch instruction is performed by |
65f34de5 | 26 | underlying infrastructure so branch can be converted to simplejump or |
35a3065a | 27 | eliminated). |
65f34de5 | 28 | - Cross jumping (tail merging) |
29 | - Conditional jump-around-simplejump simplification | |
30 | - Basic block merging. */ | |
31 | ||
32 | #include "config.h" | |
33 | #include "system.h" | |
805e22b2 | 34 | #include "coretypes.h" |
35 | #include "tm.h" | |
65f34de5 | 36 | #include "rtl.h" |
41a8aa41 | 37 | #include "tree.h" |
65f34de5 | 38 | #include "hard-reg-set.h" |
42fe97ed | 39 | #include "regs.h" |
65f34de5 | 40 | #include "insn-config.h" |
41 | #include "flags.h" | |
42 | #include "recog.h" | |
0b205f4c | 43 | #include "diagnostic-core.h" |
8cd78fca | 44 | #include "cselib.h" |
2a5b4716 | 45 | #include "params.h" |
20eee3f6 | 46 | #include "tm_p.h" |
e27e52e0 | 47 | #include "target.h" |
23a070f3 | 48 | #include "function.h" /* For inline functions in emit-rtl.h they need crtl. */ |
4c74e6d9 | 49 | #include "emit-rtl.h" |
77fce4cd | 50 | #include "tree-pass.h" |
51 | #include "cfgloop.h" | |
52 | #include "expr.h" | |
3072d30e | 53 | #include "df.h" |
1ae2ffa7 | 54 | #include "dce.h" |
4ff06051 | 55 | #include "dbgcnt.h" |
65f34de5 | 56 | |
4fe5a223 | 57 | #define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK) |
a0c938f0 | 58 | |
10d3796f | 59 | /* Set to true when we are running first pass of try_optimize_cfg loop. */ |
60 | static bool first_pass; | |
1ae2ffa7 | 61 | |
9d75589a | 62 | /* Set to true if crossjumps occurred in the latest run of try_optimize_cfg. */ |
1ae2ffa7 | 63 | static bool crossjumps_occured; |
64 | ||
bc6adae4 | 65 | /* Set to true if we couldn't run an optimization due to stale liveness |
66 | information; we should run df_analyze to enable more opportunities. */ | |
67 | static bool block_was_dirty; | |
68 | ||
0f38cfdd | 69 | static bool try_crossjump_to_edge (int, edge, edge, enum replace_direction); |
4c9e08a4 | 70 | static bool try_crossjump_bb (int, basic_block); |
1d133110 | 71 | static bool outgoing_edges_match (int, basic_block, basic_block); |
764aef11 | 72 | static enum replace_direction old_insns_match_p (int, rtx, rtx); |
4c9e08a4 | 73 | |
4c9e08a4 | 74 | static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block); |
75 | static void merge_blocks_move_successor_nojumps (basic_block, basic_block); | |
4c9e08a4 | 76 | static bool try_optimize_cfg (int); |
77 | static bool try_simplify_condjump (basic_block); | |
78 | static bool try_forward_edges (int, basic_block); | |
3072d30e | 79 | static edge thread_jump (edge, basic_block); |
4c9e08a4 | 80 | static bool mark_effect (rtx, bitmap); |
81 | static void notice_new_block (basic_block); | |
82 | static void update_forwarder_flag (basic_block); | |
83 | static int mentions_nonequal_regs (rtx *, void *); | |
1d133110 | 84 | static void merge_memattrs (rtx, rtx); |
33dbe4d1 | 85 | \f |
86 | /* Set flags for newly created block. */ | |
87 | ||
88 | static void | |
4c9e08a4 | 89 | notice_new_block (basic_block bb) |
33dbe4d1 | 90 | { |
91 | if (!bb) | |
92 | return; | |
5cc577b6 | 93 | |
33dbe4d1 | 94 | if (forwarder_block_p (bb)) |
4fe5a223 | 95 | bb->flags |= BB_FORWARDER_BLOCK; |
33dbe4d1 | 96 | } |
97 | ||
98 | /* Recompute forwarder flag after block has been modified. */ | |
99 | ||
100 | static void | |
4c9e08a4 | 101 | update_forwarder_flag (basic_block bb) |
33dbe4d1 | 102 | { |
103 | if (forwarder_block_p (bb)) | |
4fe5a223 | 104 | bb->flags |= BB_FORWARDER_BLOCK; |
33dbe4d1 | 105 | else |
4fe5a223 | 106 | bb->flags &= ~BB_FORWARDER_BLOCK; |
33dbe4d1 | 107 | } |
65f34de5 | 108 | \f |
109 | /* Simplify a conditional jump around an unconditional jump. | |
110 | Return true if something changed. */ | |
111 | ||
112 | static bool | |
4c9e08a4 | 113 | try_simplify_condjump (basic_block cbranch_block) |
65f34de5 | 114 | { |
115 | basic_block jump_block, jump_dest_block, cbranch_dest_block; | |
116 | edge cbranch_jump_edge, cbranch_fallthru_edge; | |
117 | rtx cbranch_insn; | |
118 | ||
119 | /* Verify that there are exactly two successors. */ | |
cd665a06 | 120 | if (EDGE_COUNT (cbranch_block->succs) != 2) |
65f34de5 | 121 | return false; |
122 | ||
123 | /* Verify that we've got a normal conditional branch at the end | |
124 | of the block. */ | |
5496dbfc | 125 | cbranch_insn = BB_END (cbranch_block); |
65f34de5 | 126 | if (!any_condjump_p (cbranch_insn)) |
127 | return false; | |
128 | ||
129 | cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block); | |
130 | cbranch_jump_edge = BRANCH_EDGE (cbranch_block); | |
131 | ||
132 | /* The next block must not have multiple predecessors, must not | |
133 | be the last block in the function, and must contain just the | |
134 | unconditional jump. */ | |
135 | jump_block = cbranch_fallthru_edge->dest; | |
ea091dfd | 136 | if (!single_pred_p (jump_block) |
34154e27 | 137 | || jump_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun) |
33dbe4d1 | 138 | || !FORWARDER_BLOCK_P (jump_block)) |
65f34de5 | 139 | return false; |
ea091dfd | 140 | jump_dest_block = single_succ (jump_block); |
65f34de5 | 141 | |
4f18499c | 142 | /* If we are partitioning hot/cold basic blocks, we don't want to |
143 | mess up unconditional or indirect jumps that cross between hot | |
a0c938f0 | 144 | and cold sections. |
1118aef7 | 145 | |
146 | Basic block partitioning may result in some jumps that appear to | |
a0c938f0 | 147 | be optimizable (or blocks that appear to be mergeable), but which really |
148 | must be left untouched (they are required to make it safely across | |
149 | partition boundaries). See the comments at the top of | |
1118aef7 | 150 | bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */ |
4f18499c | 151 | |
1897b881 | 152 | if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block) |
153 | || (cbranch_jump_edge->flags & EDGE_CROSSING)) | |
4f18499c | 154 | return false; |
155 | ||
65f34de5 | 156 | /* The conditional branch must target the block after the |
157 | unconditional branch. */ | |
158 | cbranch_dest_block = cbranch_jump_edge->dest; | |
159 | ||
34154e27 | 160 | if (cbranch_dest_block == EXIT_BLOCK_PTR_FOR_FN (cfun) |
f3fbd62d | 161 | || !can_fallthru (jump_block, cbranch_dest_block)) |
65f34de5 | 162 | return false; |
163 | ||
b36d64df | 164 | /* Invert the conditional branch. */ |
165 | if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0)) | |
166 | return false; | |
65f34de5 | 167 | |
450d042a | 168 | if (dump_file) |
169 | fprintf (dump_file, "Simplifying condjump %i around jump %i\n", | |
5496dbfc | 170 | INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block))); |
65f34de5 | 171 | |
172 | /* Success. Update the CFG to match. Note that after this point | |
173 | the edge variable names appear backwards; the redirection is done | |
174 | this way to preserve edge profile data. */ | |
175 | cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge, | |
176 | cbranch_dest_block); | |
177 | cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge, | |
178 | jump_dest_block); | |
179 | cbranch_jump_edge->flags |= EDGE_FALLTHRU; | |
180 | cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU; | |
f884e43f | 181 | update_br_prob_note (cbranch_block); |
65f34de5 | 182 | |
183 | /* Delete the block with the unconditional jump, and clean up the mess. */ | |
5f5d4cd1 | 184 | delete_basic_block (jump_block); |
185 | tidy_fallthru_edge (cbranch_jump_edge); | |
fe4306df | 186 | update_forwarder_flag (cbranch_block); |
65f34de5 | 187 | |
188 | return true; | |
189 | } | |
190 | \f | |
8cd78fca | 191 | /* Attempt to prove that operation is NOOP using CSElib or mark the effect |
192 | on register. Used by jump threading. */ | |
5cc577b6 | 193 | |
8cd78fca | 194 | static bool |
4c9e08a4 | 195 | mark_effect (rtx exp, regset nonequal) |
8cd78fca | 196 | { |
20eee3f6 | 197 | int regno; |
198 | rtx dest; | |
8cd78fca | 199 | switch (GET_CODE (exp)) |
200 | { | |
201 | /* In case we do clobber the register, mark it as equal, as we know the | |
a0c938f0 | 202 | value is dead so it don't have to match. */ |
db34a109 | 203 | case CLOBBER: |
204 | if (REG_P (XEXP (exp, 0))) | |
205 | { | |
206 | dest = XEXP (exp, 0); | |
207 | regno = REGNO (dest); | |
771d4616 | 208 | if (HARD_REGISTER_NUM_P (regno)) |
209 | bitmap_clear_range (nonequal, regno, | |
210 | hard_regno_nregs[regno][GET_MODE (dest)]); | |
211 | else | |
212 | bitmap_clear_bit (nonequal, regno); | |
db34a109 | 213 | } |
214 | return false; | |
5cc577b6 | 215 | |
db34a109 | 216 | case SET: |
217 | if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp))) | |
8cd78fca | 218 | return false; |
db34a109 | 219 | dest = SET_DEST (exp); |
220 | if (dest == pc_rtx) | |
8cd78fca | 221 | return false; |
db34a109 | 222 | if (!REG_P (dest)) |
223 | return true; | |
224 | regno = REGNO (dest); | |
771d4616 | 225 | if (HARD_REGISTER_NUM_P (regno)) |
226 | bitmap_set_range (nonequal, regno, | |
227 | hard_regno_nregs[regno][GET_MODE (dest)]); | |
228 | else | |
229 | bitmap_set_bit (nonequal, regno); | |
db34a109 | 230 | return false; |
231 | ||
232 | default: | |
233 | return false; | |
8cd78fca | 234 | } |
235 | } | |
4ccdad8e | 236 | |
845bebef | 237 | /* Return nonzero if X is a register set in regset DATA. |
4ccdad8e | 238 | Called via for_each_rtx. */ |
239 | static int | |
4c9e08a4 | 240 | mentions_nonequal_regs (rtx *x, void *data) |
4ccdad8e | 241 | { |
242 | regset nonequal = (regset) data; | |
243 | if (REG_P (*x)) | |
244 | { | |
245 | int regno; | |
246 | ||
247 | regno = REGNO (*x); | |
248 | if (REGNO_REG_SET_P (nonequal, regno)) | |
249 | return 1; | |
250 | if (regno < FIRST_PSEUDO_REGISTER) | |
251 | { | |
67d6c12b | 252 | int n = hard_regno_nregs[regno][GET_MODE (*x)]; |
4ccdad8e | 253 | while (--n > 0) |
254 | if (REGNO_REG_SET_P (nonequal, regno + n)) | |
255 | return 1; | |
256 | } | |
257 | } | |
258 | return 0; | |
259 | } | |
8cd78fca | 260 | /* Attempt to prove that the basic block B will have no side effects and |
d716ce75 | 261 | always continues in the same edge if reached via E. Return the edge |
8cd78fca | 262 | if exist, NULL otherwise. */ |
263 | ||
264 | static edge | |
3072d30e | 265 | thread_jump (edge e, basic_block b) |
8cd78fca | 266 | { |
267 | rtx set1, set2, cond1, cond2, insn; | |
268 | enum rtx_code code1, code2, reversed_code2; | |
269 | bool reverse1 = false; | |
4f917ffe | 270 | unsigned i; |
8cd78fca | 271 | regset nonequal; |
272 | bool failed = false; | |
8c97cf13 | 273 | reg_set_iterator rsi; |
8cd78fca | 274 | |
4fe5a223 | 275 | if (b->flags & BB_NONTHREADABLE_BLOCK) |
73e2b81c | 276 | return NULL; |
277 | ||
8cd78fca | 278 | /* At the moment, we do handle only conditional jumps, but later we may |
279 | want to extend this code to tablejumps and others. */ | |
cd665a06 | 280 | if (EDGE_COUNT (e->src->succs) != 2) |
8cd78fca | 281 | return NULL; |
cd665a06 | 282 | if (EDGE_COUNT (b->succs) != 2) |
73e2b81c | 283 | { |
4fe5a223 | 284 | b->flags |= BB_NONTHREADABLE_BLOCK; |
73e2b81c | 285 | return NULL; |
286 | } | |
8cd78fca | 287 | |
288 | /* Second branch must end with onlyjump, as we will eliminate the jump. */ | |
5496dbfc | 289 | if (!any_condjump_p (BB_END (e->src))) |
8cd78fca | 290 | return NULL; |
db34a109 | 291 | |
5496dbfc | 292 | if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b))) |
73e2b81c | 293 | { |
4fe5a223 | 294 | b->flags |= BB_NONTHREADABLE_BLOCK; |
73e2b81c | 295 | return NULL; |
296 | } | |
8cd78fca | 297 | |
5496dbfc | 298 | set1 = pc_set (BB_END (e->src)); |
299 | set2 = pc_set (BB_END (b)); | |
8cd78fca | 300 | if (((e->flags & EDGE_FALLTHRU) != 0) |
dd782271 | 301 | != (XEXP (SET_SRC (set1), 1) == pc_rtx)) |
8cd78fca | 302 | reverse1 = true; |
303 | ||
304 | cond1 = XEXP (SET_SRC (set1), 0); | |
305 | cond2 = XEXP (SET_SRC (set2), 0); | |
306 | if (reverse1) | |
5496dbfc | 307 | code1 = reversed_comparison_code (cond1, BB_END (e->src)); |
8cd78fca | 308 | else |
309 | code1 = GET_CODE (cond1); | |
310 | ||
311 | code2 = GET_CODE (cond2); | |
5496dbfc | 312 | reversed_code2 = reversed_comparison_code (cond2, BB_END (b)); |
8cd78fca | 313 | |
314 | if (!comparison_dominates_p (code1, code2) | |
315 | && !comparison_dominates_p (code1, reversed_code2)) | |
316 | return NULL; | |
317 | ||
318 | /* Ensure that the comparison operators are equivalent. | |
d716ce75 | 319 | ??? This is far too pessimistic. We should allow swapped operands, |
8cd78fca | 320 | different CCmodes, or for example comparisons for interval, that |
321 | dominate even when operands are not equivalent. */ | |
322 | if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0)) | |
323 | || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1))) | |
324 | return NULL; | |
325 | ||
326 | /* Short circuit cases where block B contains some side effects, as we can't | |
327 | safely bypass it. */ | |
5496dbfc | 328 | for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b)); |
8cd78fca | 329 | insn = NEXT_INSN (insn)) |
330 | if (INSN_P (insn) && side_effects_p (PATTERN (insn))) | |
73e2b81c | 331 | { |
4fe5a223 | 332 | b->flags |= BB_NONTHREADABLE_BLOCK; |
73e2b81c | 333 | return NULL; |
334 | } | |
8cd78fca | 335 | |
35af0188 | 336 | cselib_init (0); |
8cd78fca | 337 | |
338 | /* First process all values computed in the source basic block. */ | |
4f917ffe | 339 | for (insn = NEXT_INSN (BB_HEAD (e->src)); |
340 | insn != NEXT_INSN (BB_END (e->src)); | |
8cd78fca | 341 | insn = NEXT_INSN (insn)) |
342 | if (INSN_P (insn)) | |
343 | cselib_process_insn (insn); | |
344 | ||
27335ffd | 345 | nonequal = BITMAP_ALLOC (NULL); |
8cd78fca | 346 | CLEAR_REG_SET (nonequal); |
5cc577b6 | 347 | |
8cd78fca | 348 | /* Now assume that we've continued by the edge E to B and continue |
349 | processing as if it were same basic block. | |
8cd78fca | 350 | Our goal is to prove that whole block is an NOOP. */ |
5cc577b6 | 351 | |
4f917ffe | 352 | for (insn = NEXT_INSN (BB_HEAD (b)); |
353 | insn != NEXT_INSN (BB_END (b)) && !failed; | |
8cd78fca | 354 | insn = NEXT_INSN (insn)) |
db34a109 | 355 | { |
356 | if (INSN_P (insn)) | |
357 | { | |
358 | rtx pat = PATTERN (insn); | |
359 | ||
360 | if (GET_CODE (pat) == PARALLEL) | |
361 | { | |
4f917ffe | 362 | for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++) |
db34a109 | 363 | failed |= mark_effect (XVECEXP (pat, 0, i), nonequal); |
364 | } | |
365 | else | |
366 | failed |= mark_effect (pat, nonequal); | |
367 | } | |
5cc577b6 | 368 | |
db34a109 | 369 | cselib_process_insn (insn); |
370 | } | |
8cd78fca | 371 | |
372 | /* Later we should clear nonequal of dead registers. So far we don't | |
373 | have life information in cfg_cleanup. */ | |
374 | if (failed) | |
73e2b81c | 375 | { |
4fe5a223 | 376 | b->flags |= BB_NONTHREADABLE_BLOCK; |
73e2b81c | 377 | goto failed_exit; |
378 | } | |
8cd78fca | 379 | |
4ccdad8e | 380 | /* cond2 must not mention any register that is not equal to the |
381 | former block. */ | |
382 | if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal)) | |
383 | goto failed_exit; | |
384 | ||
8c97cf13 | 385 | EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi) |
386 | goto failed_exit; | |
8cd78fca | 387 | |
27335ffd | 388 | BITMAP_FREE (nonequal); |
8cd78fca | 389 | cselib_finish (); |
390 | if ((comparison_dominates_p (code1, code2) != 0) | |
148444fb | 391 | != (XEXP (SET_SRC (set2), 1) == pc_rtx)) |
8cd78fca | 392 | return BRANCH_EDGE (b); |
393 | else | |
394 | return FALLTHRU_EDGE (b); | |
395 | ||
396 | failed_exit: | |
27335ffd | 397 | BITMAP_FREE (nonequal); |
8cd78fca | 398 | cselib_finish (); |
399 | return NULL; | |
400 | } | |
401 | \f | |
65f34de5 | 402 | /* Attempt to forward edges leaving basic block B. |
4a82352a | 403 | Return true if successful. */ |
65f34de5 | 404 | |
405 | static bool | |
4c9e08a4 | 406 | try_forward_edges (int mode, basic_block b) |
65f34de5 | 407 | { |
408 | bool changed = false; | |
cd665a06 | 409 | edge_iterator ei; |
410 | edge e, *threaded_edges = NULL; | |
65f34de5 | 411 | |
4f18499c | 412 | /* If we are partitioning hot/cold basic blocks, we don't want to |
413 | mess up unconditional or indirect jumps that cross between hot | |
a0c938f0 | 414 | and cold sections. |
415 | ||
1118aef7 | 416 | Basic block partitioning may result in some jumps that appear to |
f0b5f617 | 417 | be optimizable (or blocks that appear to be mergeable), but which really |
418 | must be left untouched (they are required to make it safely across | |
a0c938f0 | 419 | partition boundaries). See the comments at the top of |
1118aef7 | 420 | bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */ |
421 | ||
1897b881 | 422 | if (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)) |
4f18499c | 423 | return false; |
424 | ||
cd665a06 | 425 | for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); ) |
65f34de5 | 426 | { |
427 | basic_block target, first; | |
9c388755 | 428 | int counter, goto_locus; |
8cd78fca | 429 | bool threaded = false; |
d2855ea6 | 430 | int nthreaded_edges = 0; |
bc6adae4 | 431 | bool may_thread = first_pass || (b->flags & BB_MODIFIED) != 0; |
65f34de5 | 432 | |
65f34de5 | 433 | /* Skip complex edges because we don't know how to update them. |
434 | ||
a0c938f0 | 435 | Still handle fallthru edges, as we can succeed to forward fallthru |
436 | edge to the same place as the branch edge of conditional branch | |
437 | and turn conditional branch to an unconditional branch. */ | |
65f34de5 | 438 | if (e->flags & EDGE_COMPLEX) |
cd665a06 | 439 | { |
440 | ei_next (&ei); | |
441 | continue; | |
442 | } | |
65f34de5 | 443 | |
444 | target = first = e->dest; | |
4d2e5d52 | 445 | counter = NUM_FIXED_BLOCKS; |
9c388755 | 446 | goto_locus = e->goto_locus; |
65f34de5 | 447 | |
065efcb1 | 448 | /* If we are partitioning hot/cold basic_blocks, we don't want to mess |
1118aef7 | 449 | up jumps that cross between hot/cold sections. |
450 | ||
451 | Basic block partitioning may result in some jumps that appear | |
a0c938f0 | 452 | to be optimizable (or blocks that appear to be mergeable), but which |
453 | really must be left untouched (they are required to make it safely | |
1118aef7 | 454 | across partition boundaries). See the comments at the top of |
455 | bb-reorder.c:partition_hot_cold_basic_blocks for complete | |
456 | details. */ | |
065efcb1 | 457 | |
34154e27 | 458 | if (first != EXIT_BLOCK_PTR_FOR_FN (cfun) |
065efcb1 | 459 | && find_reg_note (BB_END (first), REG_CROSSING_JUMP, NULL_RTX)) |
aa78dca5 | 460 | return changed; |
065efcb1 | 461 | |
a28770e1 | 462 | while (counter < n_basic_blocks_for_fn (cfun)) |
65f34de5 | 463 | { |
8cd78fca | 464 | basic_block new_target = NULL; |
465 | bool new_target_threaded = false; | |
bc6adae4 | 466 | may_thread |= (target->flags & BB_MODIFIED) != 0; |
8cd78fca | 467 | |
468 | if (FORWARDER_BLOCK_P (target) | |
a0c938f0 | 469 | && !(single_succ_edge (target)->flags & EDGE_CROSSING) |
34154e27 | 470 | && single_succ (target) != EXIT_BLOCK_PTR_FOR_FN (cfun)) |
8cd78fca | 471 | { |
472 | /* Bypass trivial infinite loops. */ | |
ea091dfd | 473 | new_target = single_succ (target); |
474 | if (target == new_target) | |
a28770e1 | 475 | counter = n_basic_blocks_for_fn (cfun); |
9c388755 | 476 | else if (!optimize) |
477 | { | |
478 | /* When not optimizing, ensure that edges or forwarder | |
479 | blocks with different locus are not optimized out. */ | |
18b762f0 | 480 | int new_locus = single_succ_edge (target)->goto_locus; |
481 | int locus = goto_locus; | |
9c388755 | 482 | |
8e7408e3 | 483 | if (new_locus != UNKNOWN_LOCATION |
484 | && locus != UNKNOWN_LOCATION | |
5169661d | 485 | && new_locus != locus) |
18b762f0 | 486 | new_target = NULL; |
487 | else | |
9c388755 | 488 | { |
4fba8976 | 489 | rtx last; |
490 | ||
8e7408e3 | 491 | if (new_locus != UNKNOWN_LOCATION) |
18b762f0 | 492 | locus = new_locus; |
9c388755 | 493 | |
4fba8976 | 494 | last = BB_END (target); |
495 | if (DEBUG_INSN_P (last)) | |
496 | last = prev_nondebug_insn (last); | |
497 | ||
498 | new_locus = last && INSN_P (last) | |
5169661d | 499 | ? INSN_LOCATION (last) : 0; |
18b762f0 | 500 | |
8e7408e3 | 501 | if (new_locus != UNKNOWN_LOCATION |
502 | && locus != UNKNOWN_LOCATION | |
5169661d | 503 | && new_locus != locus) |
18b762f0 | 504 | new_target = NULL; |
505 | else | |
506 | { | |
8e7408e3 | 507 | if (new_locus != UNKNOWN_LOCATION) |
18b762f0 | 508 | locus = new_locus; |
509 | ||
510 | goto_locus = locus; | |
511 | } | |
9c388755 | 512 | } |
513 | } | |
8cd78fca | 514 | } |
5cc577b6 | 515 | |
8cd78fca | 516 | /* Allow to thread only over one edge at time to simplify updating |
517 | of probabilities. */ | |
10d3796f | 518 | else if ((mode & CLEANUP_THREADING) && may_thread) |
8cd78fca | 519 | { |
3072d30e | 520 | edge t = thread_jump (e, target); |
309306ce | 521 | if (t) |
8cd78fca | 522 | { |
d2855ea6 | 523 | if (!threaded_edges) |
a28770e1 | 524 | threaded_edges = XNEWVEC (edge, |
525 | n_basic_blocks_for_fn (cfun)); | |
acf4e6a8 | 526 | else |
527 | { | |
528 | int i; | |
529 | ||
530 | /* Detect an infinite loop across blocks not | |
531 | including the start block. */ | |
532 | for (i = 0; i < nthreaded_edges; ++i) | |
533 | if (threaded_edges[i] == t) | |
534 | break; | |
535 | if (i < nthreaded_edges) | |
e9bc5a2d | 536 | { |
a28770e1 | 537 | counter = n_basic_blocks_for_fn (cfun); |
e9bc5a2d | 538 | break; |
539 | } | |
acf4e6a8 | 540 | } |
541 | ||
542 | /* Detect an infinite loop across the start block. */ | |
543 | if (t->dest == b) | |
544 | break; | |
545 | ||
a28770e1 | 546 | gcc_assert (nthreaded_edges |
547 | < (n_basic_blocks_for_fn (cfun) | |
548 | - NUM_FIXED_BLOCKS)); | |
309306ce | 549 | threaded_edges[nthreaded_edges++] = t; |
acf4e6a8 | 550 | |
551 | new_target = t->dest; | |
552 | new_target_threaded = true; | |
8cd78fca | 553 | } |
554 | } | |
5cc577b6 | 555 | |
8cd78fca | 556 | if (!new_target) |
557 | break; | |
65f34de5 | 558 | |
8cd78fca | 559 | counter++; |
560 | target = new_target; | |
561 | threaded |= new_target_threaded; | |
db34a109 | 562 | } |
65f34de5 | 563 | |
a28770e1 | 564 | if (counter >= n_basic_blocks_for_fn (cfun)) |
65f34de5 | 565 | { |
450d042a | 566 | if (dump_file) |
567 | fprintf (dump_file, "Infinite loop in BB %i.\n", | |
b3d6de89 | 568 | target->index); |
65f34de5 | 569 | } |
570 | else if (target == first) | |
571 | ; /* We didn't do anything. */ | |
572 | else | |
573 | { | |
574 | /* Save the values now, as the edge may get removed. */ | |
575 | gcov_type edge_count = e->count; | |
576 | int edge_probability = e->probability; | |
8cd78fca | 577 | int edge_frequency; |
309306ce | 578 | int n = 0; |
65f34de5 | 579 | |
9c388755 | 580 | e->goto_locus = goto_locus; |
581 | ||
8963581a | 582 | /* Don't force if target is exit block. */ |
34154e27 | 583 | if (threaded && target != EXIT_BLOCK_PTR_FOR_FN (cfun)) |
65f34de5 | 584 | { |
8cd78fca | 585 | notice_new_block (redirect_edge_and_branch_force (e, target)); |
450d042a | 586 | if (dump_file) |
587 | fprintf (dump_file, "Conditionals threaded.\n"); | |
65f34de5 | 588 | } |
8cd78fca | 589 | else if (!redirect_edge_and_branch (e, target)) |
65f34de5 | 590 | { |
450d042a | 591 | if (dump_file) |
592 | fprintf (dump_file, | |
5cc577b6 | 593 | "Forwarding edge %i->%i to %i failed.\n", |
b3d6de89 | 594 | b->index, e->dest->index, target->index); |
cd665a06 | 595 | ei_next (&ei); |
8cd78fca | 596 | continue; |
65f34de5 | 597 | } |
5cc577b6 | 598 | |
8cd78fca | 599 | /* We successfully forwarded the edge. Now update profile |
600 | data: for each edge we traversed in the chain, remove | |
601 | the original edge's execution count. */ | |
f9d4b7f4 | 602 | edge_frequency = apply_probability (b->frequency, edge_probability); |
8cd78fca | 603 | |
8cd78fca | 604 | do |
605 | { | |
606 | edge t; | |
5cc577b6 | 607 | |
ea091dfd | 608 | if (!single_succ_p (first)) |
acf4e6a8 | 609 | { |
cc636d56 | 610 | gcc_assert (n < nthreaded_edges); |
acf4e6a8 | 611 | t = threaded_edges [n++]; |
cc636d56 | 612 | gcc_assert (t->src == first); |
615dd397 | 613 | update_bb_profile_for_threading (first, edge_frequency, |
614 | edge_count, t); | |
f884e43f | 615 | update_br_prob_note (first); |
acf4e6a8 | 616 | } |
8cd78fca | 617 | else |
d2855ea6 | 618 | { |
615dd397 | 619 | first->count -= edge_count; |
620 | if (first->count < 0) | |
621 | first->count = 0; | |
622 | first->frequency -= edge_frequency; | |
623 | if (first->frequency < 0) | |
624 | first->frequency = 0; | |
d2855ea6 | 625 | /* It is possible that as the result of |
626 | threading we've removed edge as it is | |
627 | threaded to the fallthru edge. Avoid | |
628 | getting out of sync. */ | |
629 | if (n < nthreaded_edges | |
630 | && first == threaded_edges [n]->src) | |
631 | n++; | |
ea091dfd | 632 | t = single_succ_edge (first); |
db34a109 | 633 | } |
5cc577b6 | 634 | |
f884e43f | 635 | t->count -= edge_count; |
636 | if (t->count < 0) | |
637 | t->count = 0; | |
8cd78fca | 638 | first = t->dest; |
639 | } | |
640 | while (first != target); | |
641 | ||
642 | changed = true; | |
cd665a06 | 643 | continue; |
65f34de5 | 644 | } |
cd665a06 | 645 | ei_next (&ei); |
65f34de5 | 646 | } |
647 | ||
dd045aee | 648 | free (threaded_edges); |
65f34de5 | 649 | return changed; |
650 | } | |
651 | \f | |
65f34de5 | 652 | |
653 | /* Blocks A and B are to be merged into a single block. A has no incoming | |
654 | fallthru edge, so it can be moved before B without adding or modifying | |
655 | any jumps (aside from the jump from A to B). */ | |
656 | ||
e76f35e8 | 657 | static void |
4c9e08a4 | 658 | merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b) |
65f34de5 | 659 | { |
660 | rtx barrier; | |
65f34de5 | 661 | |
4f18499c | 662 | /* If we are partitioning hot/cold basic blocks, we don't want to |
663 | mess up unconditional or indirect jumps that cross between hot | |
1118aef7 | 664 | and cold sections. |
a0c938f0 | 665 | |
1118aef7 | 666 | Basic block partitioning may result in some jumps that appear to |
a0c938f0 | 667 | be optimizable (or blocks that appear to be mergeable), but which really |
668 | must be left untouched (they are required to make it safely across | |
669 | partition boundaries). See the comments at the top of | |
1118aef7 | 670 | bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */ |
671 | ||
1897b881 | 672 | if (BB_PARTITION (a) != BB_PARTITION (b)) |
4f18499c | 673 | return; |
674 | ||
5496dbfc | 675 | barrier = next_nonnote_insn (BB_END (a)); |
cc636d56 | 676 | gcc_assert (BARRIER_P (barrier)); |
e4bf866d | 677 | delete_insn (barrier); |
65f34de5 | 678 | |
65f34de5 | 679 | /* Scramble the insn chain. */ |
5496dbfc | 680 | if (BB_END (a) != PREV_INSN (BB_HEAD (b))) |
681 | reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b))); | |
3072d30e | 682 | df_set_bb_dirty (a); |
65f34de5 | 683 | |
450d042a | 684 | if (dump_file) |
685 | fprintf (dump_file, "Moved block %d before %d and merged.\n", | |
b3d6de89 | 686 | a->index, b->index); |
65f34de5 | 687 | |
3c0a32c9 | 688 | /* Swap the records for the two blocks around. */ |
65f34de5 | 689 | |
7fa55aef | 690 | unlink_block (a); |
691 | link_block (a, b->prev_bb); | |
692 | ||
65f34de5 | 693 | /* Now blocks A and B are contiguous. Merge them. */ |
c60fa3a7 | 694 | merge_blocks (a, b); |
65f34de5 | 695 | } |
696 | ||
697 | /* Blocks A and B are to be merged into a single block. B has no outgoing | |
698 | fallthru edge, so it can be moved after A without adding or modifying | |
699 | any jumps (aside from the jump from A to B). */ | |
700 | ||
e76f35e8 | 701 | static void |
4c9e08a4 | 702 | merge_blocks_move_successor_nojumps (basic_block a, basic_block b) |
65f34de5 | 703 | { |
f70d6641 | 704 | rtx barrier, real_b_end; |
afff715a | 705 | rtx label, table; |
65f34de5 | 706 | |
4f18499c | 707 | /* If we are partitioning hot/cold basic blocks, we don't want to |
708 | mess up unconditional or indirect jumps that cross between hot | |
a0c938f0 | 709 | and cold sections. |
710 | ||
1118aef7 | 711 | Basic block partitioning may result in some jumps that appear to |
a0c938f0 | 712 | be optimizable (or blocks that appear to be mergeable), but which really |
713 | must be left untouched (they are required to make it safely across | |
714 | partition boundaries). See the comments at the top of | |
1118aef7 | 715 | bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */ |
716 | ||
1897b881 | 717 | if (BB_PARTITION (a) != BB_PARTITION (b)) |
4f18499c | 718 | return; |
719 | ||
5496dbfc | 720 | real_b_end = BB_END (b); |
65f34de5 | 721 | |
afff715a | 722 | /* If there is a jump table following block B temporarily add the jump table |
723 | to block B so that it will also be moved to the correct location. */ | |
5496dbfc | 724 | if (tablejump_p (BB_END (b), &label, &table) |
725 | && prev_active_insn (label) == BB_END (b)) | |
65f34de5 | 726 | { |
5496dbfc | 727 | BB_END (b) = table; |
65f34de5 | 728 | } |
729 | ||
730 | /* There had better have been a barrier there. Delete it. */ | |
5496dbfc | 731 | barrier = NEXT_INSN (BB_END (b)); |
6d7dc5b9 | 732 | if (barrier && BARRIER_P (barrier)) |
e4bf866d | 733 | delete_insn (barrier); |
65f34de5 | 734 | |
65f34de5 | 735 | |
736 | /* Scramble the insn chain. */ | |
5496dbfc | 737 | reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a)); |
65f34de5 | 738 | |
f70d6641 | 739 | /* Restore the real end of b. */ |
5496dbfc | 740 | BB_END (b) = real_b_end; |
f70d6641 | 741 | |
450d042a | 742 | if (dump_file) |
743 | fprintf (dump_file, "Moved block %d after %d and merged.\n", | |
b3d6de89 | 744 | b->index, a->index); |
cd2e6f57 | 745 | |
746 | /* Now blocks A and B are contiguous. Merge them. */ | |
c60fa3a7 | 747 | merge_blocks (a, b); |
65f34de5 | 748 | } |
749 | ||
750 | /* Attempt to merge basic blocks that are potentially non-adjacent. | |
9d574a94 | 751 | Return NULL iff the attempt failed, otherwise return basic block |
752 | where cleanup_cfg should continue. Because the merging commonly | |
753 | moves basic block away or introduces another optimization | |
d01481af | 754 | possibility, return basic block just before B so cleanup_cfg don't |
9d574a94 | 755 | need to iterate. |
756 | ||
757 | It may be good idea to return basic block before C in the case | |
758 | C has been moved after B and originally appeared earlier in the | |
917bbcab | 759 | insn sequence, but we have no information available about the |
9d574a94 | 760 | relative ordering of these two. Hopefully it is not too common. */ |
761 | ||
762 | static basic_block | |
c60fa3a7 | 763 | merge_blocks_move (edge e, basic_block b, basic_block c, int mode) |
65f34de5 | 764 | { |
9d574a94 | 765 | basic_block next; |
65f34de5 | 766 | |
4f18499c | 767 | /* If we are partitioning hot/cold basic blocks, we don't want to |
768 | mess up unconditional or indirect jumps that cross between hot | |
a0c938f0 | 769 | and cold sections. |
770 | ||
1118aef7 | 771 | Basic block partitioning may result in some jumps that appear to |
a0c938f0 | 772 | be optimizable (or blocks that appear to be mergeable), but which really |
773 | must be left untouched (they are required to make it safely across | |
774 | partition boundaries). See the comments at the top of | |
1118aef7 | 775 | bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */ |
776 | ||
1897b881 | 777 | if (BB_PARTITION (b) != BB_PARTITION (c)) |
4f18499c | 778 | return NULL; |
a0c938f0 | 779 | |
65f34de5 | 780 | /* If B has a fallthru edge to C, no need to move anything. */ |
781 | if (e->flags & EDGE_FALLTHRU) | |
782 | { | |
b3d6de89 | 783 | int b_index = b->index, c_index = c->index; |
79f958cb | 784 | |
785 | /* Protect the loop latches. */ | |
786 | if (current_loops && c->loop_father->latch == c) | |
787 | return NULL; | |
788 | ||
c60fa3a7 | 789 | merge_blocks (b, c); |
33dbe4d1 | 790 | update_forwarder_flag (b); |
65f34de5 | 791 | |
450d042a | 792 | if (dump_file) |
793 | fprintf (dump_file, "Merged %d and %d without moving.\n", | |
db34a109 | 794 | b_index, c_index); |
65f34de5 | 795 | |
34154e27 | 796 | return b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? b : b->prev_bb; |
65f34de5 | 797 | } |
5cc577b6 | 798 | |
65f34de5 | 799 | /* Otherwise we will need to move code around. Do that only if expensive |
800 | transformations are allowed. */ | |
801 | else if (mode & CLEANUP_EXPENSIVE) | |
802 | { | |
e76f35e8 | 803 | edge tmp_edge, b_fallthru_edge; |
804 | bool c_has_outgoing_fallthru; | |
805 | bool b_has_incoming_fallthru; | |
65f34de5 | 806 | |
807 | /* Avoid overactive code motion, as the forwarder blocks should be | |
a0c938f0 | 808 | eliminated by edge redirection instead. One exception might have |
65f34de5 | 809 | been if B is a forwarder block and C has no fallthru edge, but |
810 | that should be cleaned up by bb-reorder instead. */ | |
33dbe4d1 | 811 | if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c)) |
9d574a94 | 812 | return NULL; |
65f34de5 | 813 | |
814 | /* We must make sure to not munge nesting of lexical blocks, | |
815 | and loop notes. This is done by squeezing out all the notes | |
816 | and leaving them there to lie. Not ideal, but functional. */ | |
817 | ||
7f58c05e | 818 | tmp_edge = find_fallthru_edge (c->succs); |
65f34de5 | 819 | c_has_outgoing_fallthru = (tmp_edge != NULL); |
65f34de5 | 820 | |
7f58c05e | 821 | tmp_edge = find_fallthru_edge (b->preds); |
65f34de5 | 822 | b_has_incoming_fallthru = (tmp_edge != NULL); |
e76f35e8 | 823 | b_fallthru_edge = tmp_edge; |
9d574a94 | 824 | next = b->prev_bb; |
a6a0ec1c | 825 | if (next == c) |
826 | next = next->prev_bb; | |
e76f35e8 | 827 | |
828 | /* Otherwise, we're going to try to move C after B. If C does | |
829 | not have an outgoing fallthru, then it can be moved | |
830 | immediately after B without introducing or modifying jumps. */ | |
831 | if (! c_has_outgoing_fallthru) | |
832 | { | |
833 | merge_blocks_move_successor_nojumps (b, c); | |
34154e27 | 834 | return next == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? next->next_bb : next; |
e76f35e8 | 835 | } |
65f34de5 | 836 | |
837 | /* If B does not have an incoming fallthru, then it can be moved | |
838 | immediately before C without introducing or modifying jumps. | |
839 | C cannot be the first block, so we do not have to worry about | |
840 | accessing a non-existent block. */ | |
65f34de5 | 841 | |
e76f35e8 | 842 | if (b_has_incoming_fallthru) |
843 | { | |
0922c912 | 844 | basic_block bb; |
5cc577b6 | 845 | |
34154e27 | 846 | if (b_fallthru_edge->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)) |
9d574a94 | 847 | return NULL; |
2a22a8e6 | 848 | bb = force_nonfallthru (b_fallthru_edge); |
849 | if (bb) | |
850 | notice_new_block (bb); | |
e76f35e8 | 851 | } |
5cc577b6 | 852 | |
e76f35e8 | 853 | merge_blocks_move_predecessor_nojumps (b, c); |
34154e27 | 854 | return next == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? next->next_bb : next; |
65f34de5 | 855 | } |
5cc577b6 | 856 | |
63b616bf | 857 | return NULL; |
65f34de5 | 858 | } |
859 | \f | |
1d133110 | 860 | |
861 | /* Removes the memory attributes of MEM expression | |
862 | if they are not equal. */ | |
863 | ||
864 | void | |
865 | merge_memattrs (rtx x, rtx y) | |
866 | { | |
867 | int i; | |
868 | int j; | |
869 | enum rtx_code code; | |
870 | const char *fmt; | |
871 | ||
872 | if (x == y) | |
873 | return; | |
874 | if (x == 0 || y == 0) | |
875 | return; | |
876 | ||
877 | code = GET_CODE (x); | |
878 | ||
879 | if (code != GET_CODE (y)) | |
880 | return; | |
881 | ||
882 | if (GET_MODE (x) != GET_MODE (y)) | |
883 | return; | |
884 | ||
885 | if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y)) | |
886 | { | |
887 | if (! MEM_ATTRS (x)) | |
888 | MEM_ATTRS (y) = 0; | |
889 | else if (! MEM_ATTRS (y)) | |
890 | MEM_ATTRS (x) = 0; | |
a0c938f0 | 891 | else |
1d133110 | 892 | { |
5b2a69fa | 893 | HOST_WIDE_INT mem_size; |
1d133110 | 894 | |
895 | if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y)) | |
896 | { | |
897 | set_mem_alias_set (x, 0); | |
898 | set_mem_alias_set (y, 0); | |
899 | } | |
a0c938f0 | 900 | |
1d133110 | 901 | if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y))) |
902 | { | |
903 | set_mem_expr (x, 0); | |
904 | set_mem_expr (y, 0); | |
da443c27 | 905 | clear_mem_offset (x); |
906 | clear_mem_offset (y); | |
1d133110 | 907 | } |
da443c27 | 908 | else if (MEM_OFFSET_KNOWN_P (x) != MEM_OFFSET_KNOWN_P (y) |
909 | || (MEM_OFFSET_KNOWN_P (x) | |
910 | && MEM_OFFSET (x) != MEM_OFFSET (y))) | |
1d133110 | 911 | { |
da443c27 | 912 | clear_mem_offset (x); |
913 | clear_mem_offset (y); | |
1d133110 | 914 | } |
a0c938f0 | 915 | |
5b2a69fa | 916 | if (MEM_SIZE_KNOWN_P (x) && MEM_SIZE_KNOWN_P (y)) |
917 | { | |
918 | mem_size = MAX (MEM_SIZE (x), MEM_SIZE (y)); | |
919 | set_mem_size (x, mem_size); | |
920 | set_mem_size (y, mem_size); | |
921 | } | |
1d133110 | 922 | else |
5b2a69fa | 923 | { |
924 | clear_mem_size (x); | |
925 | clear_mem_size (y); | |
926 | } | |
1d133110 | 927 | |
928 | set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y))); | |
929 | set_mem_align (y, MEM_ALIGN (x)); | |
930 | } | |
931 | } | |
d8a2f839 | 932 | if (code == MEM) |
933 | { | |
934 | if (MEM_READONLY_P (x) != MEM_READONLY_P (y)) | |
935 | { | |
936 | MEM_READONLY_P (x) = 0; | |
937 | MEM_READONLY_P (y) = 0; | |
938 | } | |
939 | if (MEM_NOTRAP_P (x) != MEM_NOTRAP_P (y)) | |
940 | { | |
941 | MEM_NOTRAP_P (x) = 0; | |
942 | MEM_NOTRAP_P (y) = 0; | |
943 | } | |
944 | if (MEM_VOLATILE_P (x) != MEM_VOLATILE_P (y)) | |
945 | { | |
946 | MEM_VOLATILE_P (x) = 1; | |
947 | MEM_VOLATILE_P (y) = 1; | |
948 | } | |
949 | } | |
a0c938f0 | 950 | |
1d133110 | 951 | fmt = GET_RTX_FORMAT (code); |
952 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
953 | { | |
954 | switch (fmt[i]) | |
955 | { | |
956 | case 'E': | |
957 | /* Two vectors must have the same length. */ | |
958 | if (XVECLEN (x, i) != XVECLEN (y, i)) | |
959 | return; | |
960 | ||
961 | for (j = 0; j < XVECLEN (x, i); j++) | |
962 | merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j)); | |
963 | ||
964 | break; | |
965 | ||
966 | case 'e': | |
967 | merge_memattrs (XEXP (x, i), XEXP (y, i)); | |
968 | } | |
969 | } | |
970 | return; | |
971 | } | |
972 | ||
973 | ||
764aef11 | 974 | /* Checks if patterns P1 and P2 are equivalent, apart from the possibly |
975 | different single sets S1 and S2. */ | |
1d133110 | 976 | |
977 | static bool | |
764aef11 | 978 | equal_different_set_p (rtx p1, rtx s1, rtx p2, rtx s2) |
979 | { | |
980 | int i; | |
981 | rtx e1, e2; | |
982 | ||
983 | if (p1 == s1 && p2 == s2) | |
984 | return true; | |
985 | ||
986 | if (GET_CODE (p1) != PARALLEL || GET_CODE (p2) != PARALLEL) | |
987 | return false; | |
988 | ||
989 | if (XVECLEN (p1, 0) != XVECLEN (p2, 0)) | |
990 | return false; | |
991 | ||
992 | for (i = 0; i < XVECLEN (p1, 0); i++) | |
993 | { | |
994 | e1 = XVECEXP (p1, 0, i); | |
995 | e2 = XVECEXP (p2, 0, i); | |
996 | if (e1 == s1 && e2 == s2) | |
997 | continue; | |
998 | if (reload_completed | |
999 | ? rtx_renumbered_equal_p (e1, e2) : rtx_equal_p (e1, e2)) | |
1000 | continue; | |
1001 | ||
1002 | return false; | |
1003 | } | |
1004 | ||
1005 | return true; | |
1006 | } | |
1007 | ||
1008 | /* Examine register notes on I1 and I2 and return: | |
1009 | - dir_forward if I1 can be replaced by I2, or | |
1010 | - dir_backward if I2 can be replaced by I1, or | |
1011 | - dir_both if both are the case. */ | |
1012 | ||
1013 | static enum replace_direction | |
1014 | can_replace_by (rtx i1, rtx i2) | |
1015 | { | |
1016 | rtx s1, s2, d1, d2, src1, src2, note1, note2; | |
1017 | bool c1, c2; | |
1018 | ||
1019 | /* Check for 2 sets. */ | |
1020 | s1 = single_set (i1); | |
1021 | s2 = single_set (i2); | |
1022 | if (s1 == NULL_RTX || s2 == NULL_RTX) | |
1023 | return dir_none; | |
1024 | ||
1025 | /* Check that the 2 sets set the same dest. */ | |
1026 | d1 = SET_DEST (s1); | |
1027 | d2 = SET_DEST (s2); | |
1028 | if (!(reload_completed | |
1029 | ? rtx_renumbered_equal_p (d1, d2) : rtx_equal_p (d1, d2))) | |
1030 | return dir_none; | |
1031 | ||
1032 | /* Find identical req_equiv or reg_equal note, which implies that the 2 sets | |
1033 | set dest to the same value. */ | |
1034 | note1 = find_reg_equal_equiv_note (i1); | |
1035 | note2 = find_reg_equal_equiv_note (i2); | |
1036 | if (!note1 || !note2 || !rtx_equal_p (XEXP (note1, 0), XEXP (note2, 0)) | |
1037 | || !CONST_INT_P (XEXP (note1, 0))) | |
1038 | return dir_none; | |
1039 | ||
1040 | if (!equal_different_set_p (PATTERN (i1), s1, PATTERN (i2), s2)) | |
1041 | return dir_none; | |
1042 | ||
1043 | /* Although the 2 sets set dest to the same value, we cannot replace | |
1044 | (set (dest) (const_int)) | |
1045 | by | |
1046 | (set (dest) (reg)) | |
1047 | because we don't know if the reg is live and has the same value at the | |
1048 | location of replacement. */ | |
1049 | src1 = SET_SRC (s1); | |
1050 | src2 = SET_SRC (s2); | |
1051 | c1 = CONST_INT_P (src1); | |
1052 | c2 = CONST_INT_P (src2); | |
1053 | if (c1 && c2) | |
1054 | return dir_both; | |
1055 | else if (c2) | |
1056 | return dir_forward; | |
1057 | else if (c1) | |
1058 | return dir_backward; | |
1059 | ||
1060 | return dir_none; | |
1061 | } | |
1062 | ||
1063 | /* Merges directions A and B. */ | |
1064 | ||
1065 | static enum replace_direction | |
1066 | merge_dir (enum replace_direction a, enum replace_direction b) | |
1067 | { | |
1068 | /* Implements the following table: | |
1069 | |bo fw bw no | |
1070 | ---+----------- | |
1071 | bo |bo fw bw no | |
1072 | fw |-- fw no no | |
1073 | bw |-- -- bw no | |
1074 | no |-- -- -- no. */ | |
1075 | ||
1076 | if (a == b) | |
1077 | return a; | |
1078 | ||
1079 | if (a == dir_both) | |
1080 | return b; | |
1081 | if (b == dir_both) | |
1082 | return a; | |
1083 | ||
1084 | return dir_none; | |
1085 | } | |
1086 | ||
1087 | /* Examine I1 and I2 and return: | |
1088 | - dir_forward if I1 can be replaced by I2, or | |
1089 | - dir_backward if I2 can be replaced by I1, or | |
1090 | - dir_both if both are the case. */ | |
1091 | ||
1092 | static enum replace_direction | |
1d133110 | 1093 | old_insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2) |
1094 | { | |
1095 | rtx p1, p2; | |
1096 | ||
1097 | /* Verify that I1 and I2 are equivalent. */ | |
1098 | if (GET_CODE (i1) != GET_CODE (i2)) | |
764aef11 | 1099 | return dir_none; |
1d133110 | 1100 | |
616b875f | 1101 | /* __builtin_unreachable() may lead to empty blocks (ending with |
1102 | NOTE_INSN_BASIC_BLOCK). They may be crossjumped. */ | |
1103 | if (NOTE_INSN_BASIC_BLOCK_P (i1) && NOTE_INSN_BASIC_BLOCK_P (i2)) | |
764aef11 | 1104 | return dir_both; |
616b875f | 1105 | |
dfe00a8f | 1106 | /* ??? Do not allow cross-jumping between different stack levels. */ |
1107 | p1 = find_reg_note (i1, REG_ARGS_SIZE, NULL); | |
1108 | p2 = find_reg_note (i2, REG_ARGS_SIZE, NULL); | |
40125f1c | 1109 | if (p1 && p2) |
1110 | { | |
1111 | p1 = XEXP (p1, 0); | |
1112 | p2 = XEXP (p2, 0); | |
1113 | if (!rtx_equal_p (p1, p2)) | |
1114 | return dir_none; | |
1115 | ||
1116 | /* ??? Worse, this adjustment had better be constant lest we | |
1117 | have differing incoming stack levels. */ | |
1118 | if (!frame_pointer_needed | |
1119 | && find_args_size_adjust (i1) == HOST_WIDE_INT_MIN) | |
1120 | return dir_none; | |
1121 | } | |
1122 | else if (p1 || p2) | |
dfe00a8f | 1123 | return dir_none; |
1124 | ||
17d1c688 | 1125 | p1 = PATTERN (i1); |
1d133110 | 1126 | p2 = PATTERN (i2); |
1127 | ||
1128 | if (GET_CODE (p1) != GET_CODE (p2)) | |
764aef11 | 1129 | return dir_none; |
1d133110 | 1130 | |
1131 | /* If this is a CALL_INSN, compare register usage information. | |
1132 | If we don't check this on stack register machines, the two | |
1133 | CALL_INSNs might be merged leaving reg-stack.c with mismatching | |
1134 | numbers of stack registers in the same basic block. | |
1135 | If we don't check this on machines with delay slots, a delay slot may | |
1136 | be filled that clobbers a parameter expected by the subroutine. | |
1137 | ||
1138 | ??? We take the simple route for now and assume that if they're | |
84c471f5 | 1139 | equal, they were constructed identically. |
1d133110 | 1140 | |
84c471f5 | 1141 | Also check for identical exception regions. */ |
1142 | ||
1143 | if (CALL_P (i1)) | |
1144 | { | |
1145 | /* Ensure the same EH region. */ | |
1146 | rtx n1 = find_reg_note (i1, REG_EH_REGION, 0); | |
1147 | rtx n2 = find_reg_note (i2, REG_EH_REGION, 0); | |
1148 | ||
1149 | if (!n1 && n2) | |
764aef11 | 1150 | return dir_none; |
84c471f5 | 1151 | |
1152 | if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0))) | |
764aef11 | 1153 | return dir_none; |
84c471f5 | 1154 | |
1155 | if (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1), | |
a0c938f0 | 1156 | CALL_INSN_FUNCTION_USAGE (i2)) |
84c471f5 | 1157 | || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)) |
764aef11 | 1158 | return dir_none; |
09c97583 | 1159 | |
1160 | /* For address sanitizer, never crossjump __asan_report_* builtins, | |
1161 | otherwise errors might be reported on incorrect lines. */ | |
9e46467d | 1162 | if (flag_sanitize & SANITIZE_ADDRESS) |
09c97583 | 1163 | { |
1164 | rtx call = get_call_rtx_from (i1); | |
1165 | if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF) | |
1166 | { | |
1167 | rtx symbol = XEXP (XEXP (call, 0), 0); | |
1168 | if (SYMBOL_REF_DECL (symbol) | |
1169 | && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL) | |
1170 | { | |
1171 | if ((DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol)) | |
1172 | == BUILT_IN_NORMAL) | |
1173 | && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)) | |
1174 | >= BUILT_IN_ASAN_REPORT_LOAD1 | |
1175 | && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)) | |
1176 | <= BUILT_IN_ASAN_REPORT_STORE16) | |
1177 | return dir_none; | |
1178 | } | |
1179 | } | |
1180 | } | |
84c471f5 | 1181 | } |
1d133110 | 1182 | |
1183 | #ifdef STACK_REGS | |
1184 | /* If cross_jump_death_matters is not 0, the insn's mode | |
1185 | indicates whether or not the insn contains any stack-like | |
1186 | regs. */ | |
1187 | ||
1188 | if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1)) | |
1189 | { | |
1190 | /* If register stack conversion has already been done, then | |
a0c938f0 | 1191 | death notes must also be compared before it is certain that |
1192 | the two instruction streams match. */ | |
1d133110 | 1193 | |
1194 | rtx note; | |
1195 | HARD_REG_SET i1_regset, i2_regset; | |
1196 | ||
1197 | CLEAR_HARD_REG_SET (i1_regset); | |
1198 | CLEAR_HARD_REG_SET (i2_regset); | |
1199 | ||
1200 | for (note = REG_NOTES (i1); note; note = XEXP (note, 1)) | |
1201 | if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0))) | |
1202 | SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0))); | |
1203 | ||
1204 | for (note = REG_NOTES (i2); note; note = XEXP (note, 1)) | |
1205 | if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0))) | |
1206 | SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0))); | |
1207 | ||
ddc556d1 | 1208 | if (!hard_reg_set_equal_p (i1_regset, i2_regset)) |
764aef11 | 1209 | return dir_none; |
1d133110 | 1210 | } |
1211 | #endif | |
1212 | ||
1213 | if (reload_completed | |
1214 | ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2)) | |
764aef11 | 1215 | return dir_both; |
1d133110 | 1216 | |
764aef11 | 1217 | return can_replace_by (i1, i2); |
1d133110 | 1218 | } |
1219 | \f | |
84c471f5 | 1220 | /* When comparing insns I1 and I2 in flow_find_cross_jump or |
1221 | flow_find_head_matching_sequence, ensure the notes match. */ | |
1222 | ||
1223 | static void | |
1224 | merge_notes (rtx i1, rtx i2) | |
1225 | { | |
1226 | /* If the merged insns have different REG_EQUAL notes, then | |
1227 | remove them. */ | |
1228 | rtx equiv1 = find_reg_equal_equiv_note (i1); | |
1229 | rtx equiv2 = find_reg_equal_equiv_note (i2); | |
1230 | ||
1231 | if (equiv1 && !equiv2) | |
1232 | remove_note (i1, equiv1); | |
1233 | else if (!equiv1 && equiv2) | |
1234 | remove_note (i2, equiv2); | |
1235 | else if (equiv1 && equiv2 | |
1236 | && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0))) | |
1237 | { | |
1238 | remove_note (i1, equiv1); | |
1239 | remove_note (i2, equiv2); | |
1240 | } | |
1241 | } | |
1242 | ||
4ef44dfa | 1243 | /* Walks from I1 in BB1 backward till the next non-debug insn, and returns the |
1244 | resulting insn in I1, and the corresponding bb in BB1. At the head of a | |
1245 | bb, if there is a predecessor bb that reaches this bb via fallthru, and | |
1246 | FOLLOW_FALLTHRU, walks further in the predecessor bb and registers this in | |
1247 | DID_FALLTHRU. Otherwise, stops at the head of the bb. */ | |
1248 | ||
1249 | static void | |
1250 | walk_to_nondebug_insn (rtx *i1, basic_block *bb1, bool follow_fallthru, | |
1251 | bool *did_fallthru) | |
1252 | { | |
1253 | edge fallthru; | |
1254 | ||
1255 | *did_fallthru = false; | |
1256 | ||
1257 | /* Ignore notes. */ | |
1258 | while (!NONDEBUG_INSN_P (*i1)) | |
1259 | { | |
1260 | if (*i1 != BB_HEAD (*bb1)) | |
1261 | { | |
1262 | *i1 = PREV_INSN (*i1); | |
1263 | continue; | |
1264 | } | |
1265 | ||
1266 | if (!follow_fallthru) | |
1267 | return; | |
1268 | ||
1269 | fallthru = find_fallthru_edge ((*bb1)->preds); | |
34154e27 | 1270 | if (!fallthru || fallthru->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) |
4ef44dfa | 1271 | || !single_succ_p (fallthru->src)) |
1272 | return; | |
1273 | ||
1274 | *bb1 = fallthru->src; | |
1275 | *i1 = BB_END (*bb1); | |
1276 | *did_fallthru = true; | |
1277 | } | |
1278 | } | |
1279 | ||
1d133110 | 1280 | /* Look through the insns at the end of BB1 and BB2 and find the longest |
764aef11 | 1281 | sequence that are either equivalent, or allow forward or backward |
1282 | replacement. Store the first insns for that sequence in *F1 and *F2 and | |
1283 | return the sequence length. | |
1284 | ||
1285 | DIR_P indicates the allowed replacement direction on function entry, and | |
1286 | the actual replacement direction on function exit. If NULL, only equivalent | |
1287 | sequences are allowed. | |
1d133110 | 1288 | |
1289 | To simplify callers of this function, if the blocks match exactly, | |
1290 | store the head of the blocks in *F1 and *F2. */ | |
1291 | ||
84c471f5 | 1292 | int |
764aef11 | 1293 | flow_find_cross_jump (basic_block bb1, basic_block bb2, rtx *f1, rtx *f2, |
1294 | enum replace_direction *dir_p) | |
1d133110 | 1295 | { |
1296 | rtx i1, i2, last1, last2, afterlast1, afterlast2; | |
1297 | int ninsns = 0; | |
e1ebb662 | 1298 | rtx p1; |
764aef11 | 1299 | enum replace_direction dir, last_dir, afterlast_dir; |
4ef44dfa | 1300 | bool follow_fallthru, did_fallthru; |
764aef11 | 1301 | |
1302 | if (dir_p) | |
1303 | dir = *dir_p; | |
1304 | else | |
1305 | dir = dir_both; | |
1306 | afterlast_dir = dir; | |
1307 | last_dir = afterlast_dir; | |
1d133110 | 1308 | |
1309 | /* Skip simple jumps at the end of the blocks. Complex jumps still | |
1310 | need to be compared for equivalence, which we'll do below. */ | |
1311 | ||
1312 | i1 = BB_END (bb1); | |
1313 | last1 = afterlast1 = last2 = afterlast2 = NULL_RTX; | |
1314 | if (onlyjump_p (i1) | |
1315 | || (returnjump_p (i1) && !side_effects_p (PATTERN (i1)))) | |
1316 | { | |
1317 | last1 = i1; | |
1318 | i1 = PREV_INSN (i1); | |
1319 | } | |
1320 | ||
1321 | i2 = BB_END (bb2); | |
1322 | if (onlyjump_p (i2) | |
1323 | || (returnjump_p (i2) && !side_effects_p (PATTERN (i2)))) | |
1324 | { | |
1325 | last2 = i2; | |
1326 | /* Count everything except for unconditional jump as insn. */ | |
1327 | if (!simplejump_p (i2) && !returnjump_p (i2) && last1) | |
1328 | ninsns++; | |
1329 | i2 = PREV_INSN (i2); | |
1330 | } | |
1331 | ||
1332 | while (true) | |
1333 | { | |
4ef44dfa | 1334 | /* In the following example, we can replace all jumps to C by jumps to A. |
1335 | ||
1336 | This removes 4 duplicate insns. | |
1337 | [bb A] insn1 [bb C] insn1 | |
1338 | insn2 insn2 | |
1339 | [bb B] insn3 insn3 | |
1340 | insn4 insn4 | |
1341 | jump_insn jump_insn | |
1342 | ||
1343 | We could also replace all jumps to A by jumps to C, but that leaves B | |
1344 | alive, and removes only 2 duplicate insns. In a subsequent crossjump | |
1345 | step, all jumps to B would be replaced with jumps to the middle of C, | |
1346 | achieving the same result with more effort. | |
1347 | So we allow only the first possibility, which means that we don't allow | |
1348 | fallthru in the block that's being replaced. */ | |
1349 | ||
1350 | follow_fallthru = dir_p && dir != dir_forward; | |
1351 | walk_to_nondebug_insn (&i1, &bb1, follow_fallthru, &did_fallthru); | |
1352 | if (did_fallthru) | |
1353 | dir = dir_backward; | |
1354 | ||
1355 | follow_fallthru = dir_p && dir != dir_backward; | |
1356 | walk_to_nondebug_insn (&i2, &bb2, follow_fallthru, &did_fallthru); | |
1357 | if (did_fallthru) | |
1358 | dir = dir_forward; | |
1d133110 | 1359 | |
1360 | if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2)) | |
1361 | break; | |
1362 | ||
764aef11 | 1363 | dir = merge_dir (dir, old_insns_match_p (0, i1, i2)); |
1364 | if (dir == dir_none || (!dir_p && dir != dir_both)) | |
1d133110 | 1365 | break; |
1366 | ||
1367 | merge_memattrs (i1, i2); | |
1368 | ||
1369 | /* Don't begin a cross-jump with a NOTE insn. */ | |
1370 | if (INSN_P (i1)) | |
1371 | { | |
84c471f5 | 1372 | merge_notes (i1, i2); |
1d133110 | 1373 | |
1374 | afterlast1 = last1, afterlast2 = last2; | |
1375 | last1 = i1, last2 = i2; | |
764aef11 | 1376 | afterlast_dir = last_dir; |
1377 | last_dir = dir; | |
e1ebb662 | 1378 | p1 = PATTERN (i1); |
1379 | if (!(GET_CODE (p1) == USE || GET_CODE (p1) == CLOBBER)) | |
1380 | ninsns++; | |
1d133110 | 1381 | } |
1382 | ||
1383 | i1 = PREV_INSN (i1); | |
1384 | i2 = PREV_INSN (i2); | |
1385 | } | |
1386 | ||
1387 | #ifdef HAVE_cc0 | |
1388 | /* Don't allow the insn after a compare to be shared by | |
1389 | cross-jumping unless the compare is also shared. */ | |
1390 | if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1)) | |
764aef11 | 1391 | last1 = afterlast1, last2 = afterlast2, last_dir = afterlast_dir, ninsns--; |
1d133110 | 1392 | #endif |
1393 | ||
1394 | /* Include preceding notes and labels in the cross-jump. One, | |
1395 | this may bring us to the head of the blocks as requested above. | |
1396 | Two, it keeps line number notes as matched as may be. */ | |
1397 | if (ninsns) | |
1398 | { | |
4ef44dfa | 1399 | bb1 = BLOCK_FOR_INSN (last1); |
9845d120 | 1400 | while (last1 != BB_HEAD (bb1) && !NONDEBUG_INSN_P (PREV_INSN (last1))) |
1d133110 | 1401 | last1 = PREV_INSN (last1); |
1402 | ||
1403 | if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1))) | |
1404 | last1 = PREV_INSN (last1); | |
1405 | ||
4ef44dfa | 1406 | bb2 = BLOCK_FOR_INSN (last2); |
9845d120 | 1407 | while (last2 != BB_HEAD (bb2) && !NONDEBUG_INSN_P (PREV_INSN (last2))) |
1d133110 | 1408 | last2 = PREV_INSN (last2); |
1409 | ||
1410 | if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2))) | |
1411 | last2 = PREV_INSN (last2); | |
1412 | ||
1413 | *f1 = last1; | |
1414 | *f2 = last2; | |
1415 | } | |
1416 | ||
764aef11 | 1417 | if (dir_p) |
1418 | *dir_p = last_dir; | |
1d133110 | 1419 | return ninsns; |
1420 | } | |
1421 | ||
84c471f5 | 1422 | /* Like flow_find_cross_jump, except start looking for a matching sequence from |
1423 | the head of the two blocks. Do not include jumps at the end. | |
1424 | If STOP_AFTER is nonzero, stop after finding that many matching | |
1425 | instructions. */ | |
1426 | ||
1427 | int | |
1428 | flow_find_head_matching_sequence (basic_block bb1, basic_block bb2, rtx *f1, | |
1429 | rtx *f2, int stop_after) | |
1430 | { | |
1431 | rtx i1, i2, last1, last2, beforelast1, beforelast2; | |
1432 | int ninsns = 0; | |
1433 | edge e; | |
1434 | edge_iterator ei; | |
1435 | int nehedges1 = 0, nehedges2 = 0; | |
1436 | ||
1437 | FOR_EACH_EDGE (e, ei, bb1->succs) | |
1438 | if (e->flags & EDGE_EH) | |
1439 | nehedges1++; | |
1440 | FOR_EACH_EDGE (e, ei, bb2->succs) | |
1441 | if (e->flags & EDGE_EH) | |
1442 | nehedges2++; | |
1443 | ||
1444 | i1 = BB_HEAD (bb1); | |
1445 | i2 = BB_HEAD (bb2); | |
1446 | last1 = beforelast1 = last2 = beforelast2 = NULL_RTX; | |
1447 | ||
1448 | while (true) | |
1449 | { | |
bc6adae4 | 1450 | /* Ignore notes, except NOTE_INSN_EPILOGUE_BEG. */ |
84c471f5 | 1451 | while (!NONDEBUG_INSN_P (i1) && i1 != BB_END (bb1)) |
bc6adae4 | 1452 | { |
1453 | if (NOTE_P (i1) && NOTE_KIND (i1) == NOTE_INSN_EPILOGUE_BEG) | |
1454 | break; | |
1455 | i1 = NEXT_INSN (i1); | |
1456 | } | |
84c471f5 | 1457 | |
1458 | while (!NONDEBUG_INSN_P (i2) && i2 != BB_END (bb2)) | |
bc6adae4 | 1459 | { |
1460 | if (NOTE_P (i2) && NOTE_KIND (i2) == NOTE_INSN_EPILOGUE_BEG) | |
1461 | break; | |
1462 | i2 = NEXT_INSN (i2); | |
1463 | } | |
84c471f5 | 1464 | |
2b93e478 | 1465 | if ((i1 == BB_END (bb1) && !NONDEBUG_INSN_P (i1)) |
1466 | || (i2 == BB_END (bb2) && !NONDEBUG_INSN_P (i2))) | |
1467 | break; | |
1468 | ||
84c471f5 | 1469 | if (NOTE_P (i1) || NOTE_P (i2) |
1470 | || JUMP_P (i1) || JUMP_P (i2)) | |
1471 | break; | |
1472 | ||
1473 | /* A sanity check to make sure we're not merging insns with different | |
1474 | effects on EH. If only one of them ends a basic block, it shouldn't | |
1475 | have an EH edge; if both end a basic block, there should be the same | |
1476 | number of EH edges. */ | |
1477 | if ((i1 == BB_END (bb1) && i2 != BB_END (bb2) | |
1478 | && nehedges1 > 0) | |
1479 | || (i2 == BB_END (bb2) && i1 != BB_END (bb1) | |
1480 | && nehedges2 > 0) | |
1481 | || (i1 == BB_END (bb1) && i2 == BB_END (bb2) | |
1482 | && nehedges1 != nehedges2)) | |
1483 | break; | |
1484 | ||
764aef11 | 1485 | if (old_insns_match_p (0, i1, i2) != dir_both) |
84c471f5 | 1486 | break; |
1487 | ||
1488 | merge_memattrs (i1, i2); | |
1489 | ||
1490 | /* Don't begin a cross-jump with a NOTE insn. */ | |
1491 | if (INSN_P (i1)) | |
1492 | { | |
1493 | merge_notes (i1, i2); | |
1494 | ||
1495 | beforelast1 = last1, beforelast2 = last2; | |
1496 | last1 = i1, last2 = i2; | |
1497 | ninsns++; | |
1498 | } | |
1499 | ||
1500 | if (i1 == BB_END (bb1) || i2 == BB_END (bb2) | |
1501 | || (stop_after > 0 && ninsns == stop_after)) | |
1502 | break; | |
1503 | ||
1504 | i1 = NEXT_INSN (i1); | |
1505 | i2 = NEXT_INSN (i2); | |
1506 | } | |
1507 | ||
1508 | #ifdef HAVE_cc0 | |
1509 | /* Don't allow a compare to be shared by cross-jumping unless the insn | |
1510 | after the compare is also shared. */ | |
1511 | if (ninsns && reg_mentioned_p (cc0_rtx, last1) && sets_cc0_p (last1)) | |
1512 | last1 = beforelast1, last2 = beforelast2, ninsns--; | |
1513 | #endif | |
1514 | ||
1515 | if (ninsns) | |
1516 | { | |
1517 | *f1 = last1; | |
1518 | *f2 = last2; | |
1519 | } | |
1520 | ||
1521 | return ninsns; | |
1522 | } | |
1523 | ||
1d133110 | 1524 | /* Return true iff outgoing edges of BB1 and BB2 match, together with |
1525 | the branch instruction. This means that if we commonize the control | |
1526 | flow before end of the basic block, the semantic remains unchanged. | |
65f34de5 | 1527 | |
1528 | We may assume that there exists one edge with a common destination. */ | |
1529 | ||
1530 | static bool | |
1d133110 | 1531 | outgoing_edges_match (int mode, basic_block bb1, basic_block bb2) |
65f34de5 | 1532 | { |
ba38e12b | 1533 | int nehedges1 = 0, nehedges2 = 0; |
1534 | edge fallthru1 = 0, fallthru2 = 0; | |
1535 | edge e1, e2; | |
cd665a06 | 1536 | edge_iterator ei; |
ba38e12b | 1537 | |
efee62d1 | 1538 | /* If we performed shrink-wrapping, edges to the exit block can |
1f021f97 | 1539 | only be distinguished for JUMP_INSNs. The two paths may differ in |
1540 | whether they went through the prologue. Sibcalls are fine, we know | |
1541 | that we either didn't need or inserted an epilogue before them. */ | |
1542 | if (crtl->shrink_wrapped | |
34154e27 | 1543 | && single_succ_p (bb1) |
1544 | && single_succ (bb1) == EXIT_BLOCK_PTR_FOR_FN (cfun) | |
1f021f97 | 1545 | && !JUMP_P (BB_END (bb1)) |
1546 | && !(CALL_P (BB_END (bb1)) && SIBLING_CALL_P (BB_END (bb1)))) | |
1547 | return false; | |
1548 | ||
337b10d0 | 1549 | /* If BB1 has only one successor, we may be looking at either an |
1550 | unconditional jump, or a fake edge to exit. */ | |
ea091dfd | 1551 | if (single_succ_p (bb1) |
1552 | && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0 | |
6d7dc5b9 | 1553 | && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1)))) |
ea091dfd | 1554 | return (single_succ_p (bb2) |
1555 | && (single_succ_edge (bb2)->flags | |
1556 | & (EDGE_COMPLEX | EDGE_FAKE)) == 0 | |
6d7dc5b9 | 1557 | && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2)))); |
65f34de5 | 1558 | |
1559 | /* Match conditional jumps - this may get tricky when fallthru and branch | |
1560 | edges are crossed. */ | |
cd665a06 | 1561 | if (EDGE_COUNT (bb1->succs) == 2 |
5496dbfc | 1562 | && any_condjump_p (BB_END (bb1)) |
1563 | && onlyjump_p (BB_END (bb1))) | |
65f34de5 | 1564 | { |
1d133110 | 1565 | edge b1, f1, b2, f2; |
1566 | bool reverse, match; | |
1567 | rtx set1, set2, cond1, cond2; | |
1568 | enum rtx_code code1, code2; | |
1569 | ||
cd665a06 | 1570 | if (EDGE_COUNT (bb2->succs) != 2 |
5496dbfc | 1571 | || !any_condjump_p (BB_END (bb2)) |
1572 | || !onlyjump_p (BB_END (bb2))) | |
26fb1781 | 1573 | return false; |
1d133110 | 1574 | |
1575 | b1 = BRANCH_EDGE (bb1); | |
1576 | b2 = BRANCH_EDGE (bb2); | |
1577 | f1 = FALLTHRU_EDGE (bb1); | |
1578 | f2 = FALLTHRU_EDGE (bb2); | |
1579 | ||
1580 | /* Get around possible forwarders on fallthru edges. Other cases | |
a0c938f0 | 1581 | should be optimized out already. */ |
1d133110 | 1582 | if (FORWARDER_BLOCK_P (f1->dest)) |
1583 | f1 = single_succ_edge (f1->dest); | |
1584 | ||
1585 | if (FORWARDER_BLOCK_P (f2->dest)) | |
1586 | f2 = single_succ_edge (f2->dest); | |
1587 | ||
1588 | /* To simplify use of this function, return false if there are | |
1589 | unneeded forwarder blocks. These will get eliminated later | |
1590 | during cleanup_cfg. */ | |
1591 | if (FORWARDER_BLOCK_P (f1->dest) | |
1592 | || FORWARDER_BLOCK_P (f2->dest) | |
1593 | || FORWARDER_BLOCK_P (b1->dest) | |
1594 | || FORWARDER_BLOCK_P (b2->dest)) | |
1595 | return false; | |
1596 | ||
1597 | if (f1->dest == f2->dest && b1->dest == b2->dest) | |
1598 | reverse = false; | |
1599 | else if (f1->dest == b2->dest && b1->dest == f2->dest) | |
1600 | reverse = true; | |
1601 | else | |
1602 | return false; | |
1603 | ||
1604 | set1 = pc_set (BB_END (bb1)); | |
1605 | set2 = pc_set (BB_END (bb2)); | |
1606 | if ((XEXP (SET_SRC (set1), 1) == pc_rtx) | |
1607 | != (XEXP (SET_SRC (set2), 1) == pc_rtx)) | |
1608 | reverse = !reverse; | |
1609 | ||
1610 | cond1 = XEXP (SET_SRC (set1), 0); | |
1611 | cond2 = XEXP (SET_SRC (set2), 0); | |
1612 | code1 = GET_CODE (cond1); | |
1613 | if (reverse) | |
1614 | code2 = reversed_comparison_code (cond2, BB_END (bb2)); | |
1615 | else | |
1616 | code2 = GET_CODE (cond2); | |
1617 | ||
1618 | if (code2 == UNKNOWN) | |
1619 | return false; | |
1620 | ||
1621 | /* Verify codes and operands match. */ | |
1622 | match = ((code1 == code2 | |
1623 | && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0)) | |
1624 | && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1))) | |
1625 | || (code1 == swap_condition (code2) | |
1626 | && rtx_renumbered_equal_p (XEXP (cond1, 1), | |
1627 | XEXP (cond2, 0)) | |
1628 | && rtx_renumbered_equal_p (XEXP (cond1, 0), | |
1629 | XEXP (cond2, 1)))); | |
1630 | ||
1631 | /* If we return true, we will join the blocks. Which means that | |
1632 | we will only have one branch prediction bit to work with. Thus | |
1633 | we require the existing branches to have probabilities that are | |
1634 | roughly similar. */ | |
1635 | if (match | |
0bfd8d5c | 1636 | && optimize_bb_for_speed_p (bb1) |
1637 | && optimize_bb_for_speed_p (bb2)) | |
1d133110 | 1638 | { |
1639 | int prob2; | |
1640 | ||
1641 | if (b1->dest == b2->dest) | |
1642 | prob2 = b2->probability; | |
1643 | else | |
1644 | /* Do not use f2 probability as f2 may be forwarded. */ | |
1645 | prob2 = REG_BR_PROB_BASE - b2->probability; | |
1646 | ||
1647 | /* Fail if the difference in probabilities is greater than 50%. | |
1648 | This rules out two well-predicted branches with opposite | |
1649 | outcomes. */ | |
1650 | if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2) | |
1651 | { | |
1652 | if (dump_file) | |
1653 | fprintf (dump_file, | |
1654 | "Outcomes of branch in bb %i and %i differ too much (%i %i)\n", | |
1655 | bb1->index, bb2->index, b1->probability, prob2); | |
1656 | ||
1657 | return false; | |
1658 | } | |
1659 | } | |
1660 | ||
1661 | if (dump_file && match) | |
1662 | fprintf (dump_file, "Conditionals in bb %i and %i match.\n", | |
1663 | bb1->index, bb2->index); | |
1664 | ||
1665 | return match; | |
65f34de5 | 1666 | } |
1667 | ||
edc2a478 | 1668 | /* Generic case - we are seeing a computed jump, table jump or trapping |
ba38e12b | 1669 | instruction. */ |
1670 | ||
f2756aab | 1671 | /* Check whether there are tablejumps in the end of BB1 and BB2. |
1672 | Return true if they are identical. */ | |
1673 | { | |
1674 | rtx label1, label2; | |
1675 | rtx table1, table2; | |
1676 | ||
5496dbfc | 1677 | if (tablejump_p (BB_END (bb1), &label1, &table1) |
1678 | && tablejump_p (BB_END (bb2), &label2, &table2) | |
f2756aab | 1679 | && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2))) |
1680 | { | |
1681 | /* The labels should never be the same rtx. If they really are same | |
1682 | the jump tables are same too. So disable crossjumping of blocks BB1 | |
1683 | and BB2 because when deleting the common insns in the end of BB1 | |
4ee9c684 | 1684 | by delete_basic_block () the jump table would be deleted too. */ |
cda612f5 | 1685 | /* If LABEL2 is referenced in BB1->END do not do anything |
f2756aab | 1686 | because we would loose information when replacing |
1687 | LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */ | |
5496dbfc | 1688 | if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1))) |
f2756aab | 1689 | { |
1690 | /* Set IDENTICAL to true when the tables are identical. */ | |
1691 | bool identical = false; | |
1692 | rtx p1, p2; | |
1693 | ||
1694 | p1 = PATTERN (table1); | |
1695 | p2 = PATTERN (table2); | |
1696 | if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2)) | |
1697 | { | |
1698 | identical = true; | |
1699 | } | |
1700 | else if (GET_CODE (p1) == ADDR_DIFF_VEC | |
1701 | && (XVECLEN (p1, 1) == XVECLEN (p2, 1)) | |
1702 | && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2)) | |
1703 | && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3))) | |
1704 | { | |
1705 | int i; | |
1706 | ||
1707 | identical = true; | |
1708 | for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--) | |
1709 | if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i))) | |
1710 | identical = false; | |
1711 | } | |
1712 | ||
1d133110 | 1713 | if (identical) |
f2756aab | 1714 | { |
1d133110 | 1715 | replace_label_data rr; |
f2756aab | 1716 | bool match; |
1717 | ||
1d133110 | 1718 | /* Temporarily replace references to LABEL1 with LABEL2 |
f2756aab | 1719 | in BB1->END so that we could compare the instructions. */ |
1d133110 | 1720 | rr.r1 = label1; |
1721 | rr.r2 = label2; | |
1722 | rr.update_label_nuses = false; | |
1723 | for_each_rtx (&BB_END (bb1), replace_label, &rr); | |
f2756aab | 1724 | |
764aef11 | 1725 | match = (old_insns_match_p (mode, BB_END (bb1), BB_END (bb2)) |
1726 | == dir_both); | |
450d042a | 1727 | if (dump_file && match) |
1728 | fprintf (dump_file, | |
f2756aab | 1729 | "Tablejumps in bb %i and %i match.\n", |
1730 | bb1->index, bb2->index); | |
1731 | ||
1d133110 | 1732 | /* Set the original label in BB1->END because when deleting |
1733 | a block whose end is a tablejump, the tablejump referenced | |
1734 | from the instruction is deleted too. */ | |
1735 | rr.r1 = label2; | |
1736 | rr.r2 = label1; | |
1737 | for_each_rtx (&BB_END (bb1), replace_label, &rr); | |
1738 | ||
f2756aab | 1739 | return match; |
1740 | } | |
1741 | } | |
1742 | return false; | |
1743 | } | |
1744 | } | |
f2756aab | 1745 | |
a7920b67 | 1746 | /* Find the last non-debug non-note instruction in each bb, except |
1747 | stop when we see the NOTE_INSN_BASIC_BLOCK, as old_insns_match_p | |
1748 | handles that case specially. old_insns_match_p does not handle | |
1749 | other types of instruction notes. */ | |
f73960eb | 1750 | rtx last1 = BB_END (bb1); |
1751 | rtx last2 = BB_END (bb2); | |
a7920b67 | 1752 | while (!NOTE_INSN_BASIC_BLOCK_P (last1) && |
1753 | (DEBUG_INSN_P (last1) || NOTE_P (last1))) | |
1754 | last1 = PREV_INSN (last1); | |
1755 | while (!NOTE_INSN_BASIC_BLOCK_P (last2) && | |
1756 | (DEBUG_INSN_P (last2) || NOTE_P (last2))) | |
1757 | last2 = PREV_INSN (last2); | |
1758 | gcc_assert (last1 && last2); | |
1759 | ||
ba38e12b | 1760 | /* First ensure that the instructions match. There may be many outgoing |
f2756aab | 1761 | edges so this test is generally cheaper. */ |
f73960eb | 1762 | if (old_insns_match_p (mode, last1, last2) != dir_both) |
ba38e12b | 1763 | return false; |
1764 | ||
1765 | /* Search the outgoing edges, ensure that the counts do match, find possible | |
1766 | fallthru and exception handling edges since these needs more | |
1767 | validation. */ | |
cd665a06 | 1768 | if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs)) |
1769 | return false; | |
1770 | ||
f73960eb | 1771 | bool nonfakeedges = false; |
cd665a06 | 1772 | FOR_EACH_EDGE (e1, ei, bb1->succs) |
ba38e12b | 1773 | { |
cd665a06 | 1774 | e2 = EDGE_SUCC (bb2, ei.index); |
a0c938f0 | 1775 | |
f73960eb | 1776 | if ((e1->flags & EDGE_FAKE) == 0) |
1777 | nonfakeedges = true; | |
1778 | ||
ba38e12b | 1779 | if (e1->flags & EDGE_EH) |
1780 | nehedges1++; | |
5cc577b6 | 1781 | |
ba38e12b | 1782 | if (e2->flags & EDGE_EH) |
1783 | nehedges2++; | |
5cc577b6 | 1784 | |
ba38e12b | 1785 | if (e1->flags & EDGE_FALLTHRU) |
1786 | fallthru1 = e1; | |
1787 | if (e2->flags & EDGE_FALLTHRU) | |
1788 | fallthru2 = e2; | |
1789 | } | |
5cc577b6 | 1790 | |
ba38e12b | 1791 | /* If number of edges of various types does not match, fail. */ |
cd665a06 | 1792 | if (nehedges1 != nehedges2 |
5cc577b6 | 1793 | || (fallthru1 != 0) != (fallthru2 != 0)) |
ba38e12b | 1794 | return false; |
1795 | ||
f73960eb | 1796 | /* If !ACCUMULATE_OUTGOING_ARGS, bb1 (and bb2) have no successors |
1797 | and the last real insn doesn't have REG_ARGS_SIZE note, don't | |
1798 | attempt to optimize, as the two basic blocks might have different | |
1799 | REG_ARGS_SIZE depths. For noreturn calls and unconditional | |
1800 | traps there should be REG_ARG_SIZE notes, they could be missing | |
1801 | for __builtin_unreachable () uses though. */ | |
1802 | if (!nonfakeedges | |
1803 | && !ACCUMULATE_OUTGOING_ARGS | |
1804 | && (!INSN_P (last1) | |
1805 | || !find_reg_note (last1, REG_ARGS_SIZE, NULL))) | |
1806 | return false; | |
1807 | ||
ba38e12b | 1808 | /* fallthru edges must be forwarded to the same destination. */ |
1809 | if (fallthru1) | |
1810 | { | |
1811 | basic_block d1 = (forwarder_block_p (fallthru1->dest) | |
ea091dfd | 1812 | ? single_succ (fallthru1->dest): fallthru1->dest); |
ba38e12b | 1813 | basic_block d2 = (forwarder_block_p (fallthru2->dest) |
ea091dfd | 1814 | ? single_succ (fallthru2->dest): fallthru2->dest); |
5cc577b6 | 1815 | |
ba38e12b | 1816 | if (d1 != d2) |
1817 | return false; | |
1818 | } | |
5cc577b6 | 1819 | |
79f00c53 | 1820 | /* Ensure the same EH region. */ |
1821 | { | |
5496dbfc | 1822 | rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0); |
1823 | rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0); | |
5cc577b6 | 1824 | |
79f00c53 | 1825 | if (!n1 && n2) |
1826 | return false; | |
1827 | ||
1828 | if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0))) | |
1829 | return false; | |
1830 | } | |
5cc577b6 | 1831 | |
89140b26 | 1832 | /* The same checks as in try_crossjump_to_edge. It is required for RTL |
1833 | version of sequence abstraction. */ | |
1834 | FOR_EACH_EDGE (e1, ei, bb2->succs) | |
1835 | { | |
1836 | edge e2; | |
1837 | edge_iterator ei; | |
1838 | basic_block d1 = e1->dest; | |
1839 | ||
1840 | if (FORWARDER_BLOCK_P (d1)) | |
1841 | d1 = EDGE_SUCC (d1, 0)->dest; | |
1842 | ||
1843 | FOR_EACH_EDGE (e2, ei, bb1->succs) | |
1844 | { | |
1845 | basic_block d2 = e2->dest; | |
1846 | if (FORWARDER_BLOCK_P (d2)) | |
1847 | d2 = EDGE_SUCC (d2, 0)->dest; | |
1848 | if (d1 == d2) | |
1849 | break; | |
1850 | } | |
1851 | ||
1852 | if (!e2) | |
1853 | return false; | |
1854 | } | |
1855 | ||
ba38e12b | 1856 | return true; |
65f34de5 | 1857 | } |
1858 | ||
89140b26 | 1859 | /* Returns true if BB basic block has a preserve label. */ |
1860 | ||
1861 | static bool | |
1862 | block_has_preserve_label (basic_block bb) | |
1863 | { | |
1864 | return (bb | |
1865 | && block_label (bb) | |
1866 | && LABEL_PRESERVE_P (block_label (bb))); | |
1867 | } | |
1868 | ||
65f34de5 | 1869 | /* E1 and E2 are edges with the same destination block. Search their |
1870 | predecessors for common code. If found, redirect control flow from | |
0f38cfdd | 1871 | (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC (dir_forward), |
1872 | or the other way around (dir_backward). DIR specifies the allowed | |
1873 | replacement direction. */ | |
65f34de5 | 1874 | |
1875 | static bool | |
0f38cfdd | 1876 | try_crossjump_to_edge (int mode, edge e1, edge e2, |
1877 | enum replace_direction dir) | |
65f34de5 | 1878 | { |
1d133110 | 1879 | int nmatch; |
65f34de5 | 1880 | basic_block src1 = e1->src, src2 = e2->src; |
42e9bc25 | 1881 | basic_block redirect_to, redirect_from, to_remove; |
4ef44dfa | 1882 | basic_block osrc1, osrc2, redirect_edges_to, tmp; |
1d133110 | 1883 | rtx newpos1, newpos2; |
65f34de5 | 1884 | edge s; |
cd665a06 | 1885 | edge_iterator ei; |
1d133110 | 1886 | |
1887 | newpos1 = newpos2 = NULL_RTX; | |
4ee9c684 | 1888 | |
4f18499c | 1889 | /* If we have partitioned hot/cold basic blocks, it is a bad idea |
a0c938f0 | 1890 | to try this optimization. |
1118aef7 | 1891 | |
1892 | Basic block partitioning may result in some jumps that appear to | |
a0c938f0 | 1893 | be optimizable (or blocks that appear to be mergeable), but which really |
1894 | must be left untouched (they are required to make it safely across | |
1895 | partition boundaries). See the comments at the top of | |
1118aef7 | 1896 | bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */ |
4f18499c | 1897 | |
812ca88e | 1898 | if (crtl->has_bb_partition && reload_completed) |
4f18499c | 1899 | return false; |
1900 | ||
65f34de5 | 1901 | /* Search backward through forwarder blocks. We don't need to worry |
1902 | about multiple entry or chained forwarders, as they will be optimized | |
1903 | away. We do this to look past the unconditional jump following a | |
1904 | conditional jump that is required due to the current CFG shape. */ | |
ea091dfd | 1905 | if (single_pred_p (src1) |
33dbe4d1 | 1906 | && FORWARDER_BLOCK_P (src1)) |
ea091dfd | 1907 | e1 = single_pred_edge (src1), src1 = e1->src; |
5cc577b6 | 1908 | |
ea091dfd | 1909 | if (single_pred_p (src2) |
33dbe4d1 | 1910 | && FORWARDER_BLOCK_P (src2)) |
ea091dfd | 1911 | e2 = single_pred_edge (src2), src2 = e2->src; |
65f34de5 | 1912 | |
1913 | /* Nothing to do if we reach ENTRY, or a common source block. */ | |
34154e27 | 1914 | if (src1 == ENTRY_BLOCK_PTR_FOR_FN (cfun) || src2 |
1915 | == ENTRY_BLOCK_PTR_FOR_FN (cfun)) | |
65f34de5 | 1916 | return false; |
1917 | if (src1 == src2) | |
1918 | return false; | |
1919 | ||
1920 | /* Seeing more than 1 forwarder blocks would confuse us later... */ | |
33dbe4d1 | 1921 | if (FORWARDER_BLOCK_P (e1->dest) |
ea091dfd | 1922 | && FORWARDER_BLOCK_P (single_succ (e1->dest))) |
65f34de5 | 1923 | return false; |
5cc577b6 | 1924 | |
33dbe4d1 | 1925 | if (FORWARDER_BLOCK_P (e2->dest) |
ea091dfd | 1926 | && FORWARDER_BLOCK_P (single_succ (e2->dest))) |
65f34de5 | 1927 | return false; |
1928 | ||
1929 | /* Likewise with dead code (possibly newly created by the other optimizations | |
1930 | of cfg_cleanup). */ | |
cd665a06 | 1931 | if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0) |
65f34de5 | 1932 | return false; |
1933 | ||
65f34de5 | 1934 | /* Look for the common insn sequence, part the first ... */ |
1d133110 | 1935 | if (!outgoing_edges_match (mode, src1, src2)) |
65f34de5 | 1936 | return false; |
1937 | ||
1938 | /* ... and part the second. */ | |
764aef11 | 1939 | nmatch = flow_find_cross_jump (src1, src2, &newpos1, &newpos2, &dir); |
43341e2f | 1940 | |
4ef44dfa | 1941 | osrc1 = src1; |
1942 | osrc2 = src2; | |
1943 | if (newpos1 != NULL_RTX) | |
1944 | src1 = BLOCK_FOR_INSN (newpos1); | |
1945 | if (newpos2 != NULL_RTX) | |
1946 | src2 = BLOCK_FOR_INSN (newpos2); | |
1947 | ||
0f38cfdd | 1948 | if (dir == dir_backward) |
1949 | { | |
1950 | #define SWAP(T, X, Y) do { T tmp = (X); (X) = (Y); (Y) = tmp; } while (0) | |
1951 | SWAP (basic_block, osrc1, osrc2); | |
1952 | SWAP (basic_block, src1, src2); | |
1953 | SWAP (edge, e1, e2); | |
1954 | SWAP (rtx, newpos1, newpos2); | |
1955 | #undef SWAP | |
1956 | } | |
1957 | ||
43341e2f | 1958 | /* Don't proceed with the crossjump unless we found a sufficient number |
1959 | of matching instructions or the 'from' block was totally matched | |
1960 | (such that its predecessors will hopefully be redirected and the | |
1961 | block removed). */ | |
1d133110 | 1962 | if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS)) |
1963 | && (newpos1 != BB_HEAD (src1))) | |
cd7f40a2 | 1964 | return false; |
65f34de5 | 1965 | |
d961ae3a | 1966 | /* Avoid deleting preserve label when redirecting ABNORMAL edges. */ |
89140b26 | 1967 | if (block_has_preserve_label (e1->dest) |
1968 | && (e1->flags & EDGE_ABNORMAL)) | |
1969 | return false; | |
1970 | ||
f2756aab | 1971 | /* Here we know that the insns in the end of SRC1 which are common with SRC2 |
1972 | will be deleted. | |
1973 | If we have tablejumps in the end of SRC1 and SRC2 | |
1974 | they have been already compared for equivalence in outgoing_edges_match () | |
1975 | so replace the references to TABLE1 by references to TABLE2. */ | |
1976 | { | |
1977 | rtx label1, label2; | |
1978 | rtx table1, table2; | |
1979 | ||
4ef44dfa | 1980 | if (tablejump_p (BB_END (osrc1), &label1, &table1) |
1981 | && tablejump_p (BB_END (osrc2), &label2, &table2) | |
f2756aab | 1982 | && label1 != label2) |
1983 | { | |
cda612f5 | 1984 | replace_label_data rr; |
f2756aab | 1985 | rtx insn; |
1986 | ||
1987 | /* Replace references to LABEL1 with LABEL2. */ | |
1988 | rr.r1 = label1; | |
1989 | rr.r2 = label2; | |
cda612f5 | 1990 | rr.update_label_nuses = true; |
f2756aab | 1991 | for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) |
1992 | { | |
1993 | /* Do not replace the label in SRC1->END because when deleting | |
1994 | a block whose end is a tablejump, the tablejump referenced | |
1995 | from the instruction is deleted too. */ | |
4ef44dfa | 1996 | if (insn != BB_END (osrc1)) |
f2756aab | 1997 | for_each_rtx (&insn, replace_label, &rr); |
1998 | } | |
1999 | } | |
2000 | } | |
63b616bf | 2001 | |
12ea1478 | 2002 | /* Avoid splitting if possible. We must always split when SRC2 has |
2003 | EH predecessor edges, or we may end up with basic blocks with both | |
2004 | normal and EH predecessor edges. */ | |
1d133110 | 2005 | if (newpos2 == BB_HEAD (src2) |
12ea1478 | 2006 | && !(EDGE_PRED (src2, 0)->flags & EDGE_EH)) |
65f34de5 | 2007 | redirect_to = src2; |
2008 | else | |
2009 | { | |
1d133110 | 2010 | if (newpos2 == BB_HEAD (src2)) |
12ea1478 | 2011 | { |
2012 | /* Skip possible basic block header. */ | |
1d133110 | 2013 | if (LABEL_P (newpos2)) |
2014 | newpos2 = NEXT_INSN (newpos2); | |
9845d120 | 2015 | while (DEBUG_INSN_P (newpos2)) |
2016 | newpos2 = NEXT_INSN (newpos2); | |
1d133110 | 2017 | if (NOTE_P (newpos2)) |
2018 | newpos2 = NEXT_INSN (newpos2); | |
9845d120 | 2019 | while (DEBUG_INSN_P (newpos2)) |
2020 | newpos2 = NEXT_INSN (newpos2); | |
12ea1478 | 2021 | } |
2022 | ||
450d042a | 2023 | if (dump_file) |
2024 | fprintf (dump_file, "Splitting bb %i before %i insns\n", | |
b3d6de89 | 2025 | src2->index, nmatch); |
1d133110 | 2026 | redirect_to = split_block (src2, PREV_INSN (newpos2))->dest; |
65f34de5 | 2027 | } |
2028 | ||
450d042a | 2029 | if (dump_file) |
1d133110 | 2030 | fprintf (dump_file, |
2031 | "Cross jumping from bb %i to bb %i; %i common insns\n", | |
2032 | src1->index, src2->index, nmatch); | |
65f34de5 | 2033 | |
554f2707 | 2034 | /* We may have some registers visible through the block. */ |
3072d30e | 2035 | df_set_bb_dirty (redirect_to); |
65f34de5 | 2036 | |
4ef44dfa | 2037 | if (osrc2 == src2) |
2038 | redirect_edges_to = redirect_to; | |
2039 | else | |
2040 | redirect_edges_to = osrc2; | |
2041 | ||
65f34de5 | 2042 | /* Recompute the frequencies and counts of outgoing edges. */ |
4ef44dfa | 2043 | FOR_EACH_EDGE (s, ei, redirect_edges_to->succs) |
65f34de5 | 2044 | { |
2045 | edge s2; | |
cd665a06 | 2046 | edge_iterator ei; |
65f34de5 | 2047 | basic_block d = s->dest; |
2048 | ||
33dbe4d1 | 2049 | if (FORWARDER_BLOCK_P (d)) |
ea091dfd | 2050 | d = single_succ (d); |
5cc577b6 | 2051 | |
cd665a06 | 2052 | FOR_EACH_EDGE (s2, ei, src1->succs) |
65f34de5 | 2053 | { |
2054 | basic_block d2 = s2->dest; | |
33dbe4d1 | 2055 | if (FORWARDER_BLOCK_P (d2)) |
ea091dfd | 2056 | d2 = single_succ (d2); |
65f34de5 | 2057 | if (d == d2) |
2058 | break; | |
2059 | } | |
5cc577b6 | 2060 | |
65f34de5 | 2061 | s->count += s2->count; |
2062 | ||
2063 | /* Take care to update possible forwarder blocks. We verified | |
a0c938f0 | 2064 | that there is no more than one in the chain, so we can't run |
2065 | into infinite loop. */ | |
33dbe4d1 | 2066 | if (FORWARDER_BLOCK_P (s->dest)) |
65f34de5 | 2067 | { |
ea091dfd | 2068 | single_succ_edge (s->dest)->count += s2->count; |
65f34de5 | 2069 | s->dest->count += s2->count; |
2070 | s->dest->frequency += EDGE_FREQUENCY (s); | |
2071 | } | |
5cc577b6 | 2072 | |
33dbe4d1 | 2073 | if (FORWARDER_BLOCK_P (s2->dest)) |
65f34de5 | 2074 | { |
ea091dfd | 2075 | single_succ_edge (s2->dest)->count -= s2->count; |
2076 | if (single_succ_edge (s2->dest)->count < 0) | |
2077 | single_succ_edge (s2->dest)->count = 0; | |
65f34de5 | 2078 | s2->dest->count -= s2->count; |
2079 | s2->dest->frequency -= EDGE_FREQUENCY (s); | |
f884e43f | 2080 | if (s2->dest->frequency < 0) |
2081 | s2->dest->frequency = 0; | |
2082 | if (s2->dest->count < 0) | |
2083 | s2->dest->count = 0; | |
65f34de5 | 2084 | } |
5cc577b6 | 2085 | |
4ef44dfa | 2086 | if (!redirect_edges_to->frequency && !src1->frequency) |
65f34de5 | 2087 | s->probability = (s->probability + s2->probability) / 2; |
2088 | else | |
5cc577b6 | 2089 | s->probability |
4ef44dfa | 2090 | = ((s->probability * redirect_edges_to->frequency + |
5cc577b6 | 2091 | s2->probability * src1->frequency) |
4ef44dfa | 2092 | / (redirect_edges_to->frequency + src1->frequency)); |
65f34de5 | 2093 | } |
2094 | ||
8c09e55e | 2095 | /* Adjust count and frequency for the block. An earlier jump |
2096 | threading pass may have left the profile in an inconsistent | |
2097 | state (see update_bb_profile_for_threading) so we must be | |
2098 | prepared for overflows. */ | |
4ef44dfa | 2099 | tmp = redirect_to; |
2100 | do | |
2101 | { | |
2102 | tmp->count += src1->count; | |
2103 | tmp->frequency += src1->frequency; | |
2104 | if (tmp->frequency > BB_FREQ_MAX) | |
2105 | tmp->frequency = BB_FREQ_MAX; | |
2106 | if (tmp == redirect_edges_to) | |
2107 | break; | |
2108 | tmp = find_fallthru_edge (tmp->succs)->dest; | |
2109 | } | |
2110 | while (true); | |
2111 | update_br_prob_note (redirect_edges_to); | |
65f34de5 | 2112 | |
2113 | /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */ | |
2114 | ||
1d133110 | 2115 | /* Skip possible basic block header. */ |
2116 | if (LABEL_P (newpos1)) | |
2117 | newpos1 = NEXT_INSN (newpos1); | |
9845d120 | 2118 | |
2119 | while (DEBUG_INSN_P (newpos1)) | |
2120 | newpos1 = NEXT_INSN (newpos1); | |
2121 | ||
25e880b1 | 2122 | if (NOTE_INSN_BASIC_BLOCK_P (newpos1)) |
1d133110 | 2123 | newpos1 = NEXT_INSN (newpos1); |
2124 | ||
9845d120 | 2125 | while (DEBUG_INSN_P (newpos1)) |
2126 | newpos1 = NEXT_INSN (newpos1); | |
2127 | ||
1d133110 | 2128 | redirect_from = split_block (src1, PREV_INSN (newpos1))->src; |
ea091dfd | 2129 | to_remove = single_succ (redirect_from); |
65f34de5 | 2130 | |
ea091dfd | 2131 | redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to); |
5f5d4cd1 | 2132 | delete_basic_block (to_remove); |
65f34de5 | 2133 | |
42e9bc25 | 2134 | update_forwarder_flag (redirect_from); |
30ad61ed | 2135 | if (redirect_to != src2) |
2136 | update_forwarder_flag (src2); | |
33dbe4d1 | 2137 | |
65f34de5 | 2138 | return true; |
2139 | } | |
2140 | ||
2141 | /* Search the predecessors of BB for common insn sequences. When found, | |
2142 | share code between them by redirecting control flow. Return true if | |
2143 | any changes made. */ | |
2144 | ||
2145 | static bool | |
4c9e08a4 | 2146 | try_crossjump_bb (int mode, basic_block bb) |
65f34de5 | 2147 | { |
cd665a06 | 2148 | edge e, e2, fallthru; |
65f34de5 | 2149 | bool changed; |
cd665a06 | 2150 | unsigned max, ix, ix2; |
65f34de5 | 2151 | |
dd5b4b36 | 2152 | /* Nothing to do if there is not at least two incoming edges. */ |
cd665a06 | 2153 | if (EDGE_COUNT (bb->preds) < 2) |
65f34de5 | 2154 | return false; |
2155 | ||
b70a5a99 | 2156 | /* Don't crossjump if this block ends in a computed jump, |
2157 | unless we are optimizing for size. */ | |
0bfd8d5c | 2158 | if (optimize_bb_for_size_p (bb) |
34154e27 | 2159 | && bb != EXIT_BLOCK_PTR_FOR_FN (cfun) |
b70a5a99 | 2160 | && computed_jump_p (BB_END (bb))) |
2161 | return false; | |
2162 | ||
4f18499c | 2163 | /* If we are partitioning hot/cold basic blocks, we don't want to |
2164 | mess up unconditional or indirect jumps that cross between hot | |
a0c938f0 | 2165 | and cold sections. |
2166 | ||
1118aef7 | 2167 | Basic block partitioning may result in some jumps that appear to |
a0c938f0 | 2168 | be optimizable (or blocks that appear to be mergeable), but which really |
2169 | must be left untouched (they are required to make it safely across | |
2170 | partition boundaries). See the comments at the top of | |
1118aef7 | 2171 | bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */ |
2172 | ||
a0c938f0 | 2173 | if (BB_PARTITION (EDGE_PRED (bb, 0)->src) != |
2174 | BB_PARTITION (EDGE_PRED (bb, 1)->src) | |
1897b881 | 2175 | || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING)) |
4f18499c | 2176 | return false; |
2177 | ||
65f34de5 | 2178 | /* It is always cheapest to redirect a block that ends in a branch to |
2179 | a block that falls through into BB, as that adds no branches to the | |
2180 | program. We'll try that combination first. */ | |
2a5b4716 | 2181 | fallthru = NULL; |
2182 | max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES); | |
cd665a06 | 2183 | |
2184 | if (EDGE_COUNT (bb->preds) > max) | |
2185 | return false; | |
2186 | ||
7f58c05e | 2187 | fallthru = find_fallthru_edge (bb->preds); |
65f34de5 | 2188 | |
2189 | changed = false; | |
e8911909 | 2190 | for (ix = 0; ix < EDGE_COUNT (bb->preds);) |
65f34de5 | 2191 | { |
e8911909 | 2192 | e = EDGE_PRED (bb, ix); |
cd665a06 | 2193 | ix++; |
65f34de5 | 2194 | |
1ae2ffa7 | 2195 | /* As noted above, first try with the fallthru predecessor (or, a |
2196 | fallthru predecessor if we are in cfglayout mode). */ | |
65f34de5 | 2197 | if (fallthru) |
2198 | { | |
2199 | /* Don't combine the fallthru edge into anything else. | |
2200 | If there is a match, we'll do it the other way around. */ | |
2201 | if (e == fallthru) | |
2202 | continue; | |
10d3796f | 2203 | /* If nothing changed since the last attempt, there is nothing |
2204 | we can do. */ | |
2205 | if (!first_pass | |
bc6adae4 | 2206 | && !((e->src->flags & BB_MODIFIED) |
2207 | || (fallthru->src->flags & BB_MODIFIED))) | |
10d3796f | 2208 | continue; |
65f34de5 | 2209 | |
0f38cfdd | 2210 | if (try_crossjump_to_edge (mode, e, fallthru, dir_forward)) |
65f34de5 | 2211 | { |
2212 | changed = true; | |
cd665a06 | 2213 | ix = 0; |
65f34de5 | 2214 | continue; |
2215 | } | |
2216 | } | |
2217 | ||
2218 | /* Non-obvious work limiting check: Recognize that we're going | |
2219 | to call try_crossjump_bb on every basic block. So if we have | |
2220 | two blocks with lots of outgoing edges (a switch) and they | |
2221 | share lots of common destinations, then we would do the | |
2222 | cross-jump check once for each common destination. | |
2223 | ||
2224 | Now, if the blocks actually are cross-jump candidates, then | |
2225 | all of their destinations will be shared. Which means that | |
2226 | we only need check them for cross-jump candidacy once. We | |
2227 | can eliminate redundant checks of crossjump(A,B) by arbitrarily | |
2228 | choosing to do the check from the block for which the edge | |
2229 | in question is the first successor of A. */ | |
cd665a06 | 2230 | if (EDGE_SUCC (e->src, 0) != e) |
65f34de5 | 2231 | continue; |
2232 | ||
e8911909 | 2233 | for (ix2 = 0; ix2 < EDGE_COUNT (bb->preds); ix2++) |
65f34de5 | 2234 | { |
e8911909 | 2235 | e2 = EDGE_PRED (bb, ix2); |
65f34de5 | 2236 | |
2237 | if (e2 == e) | |
2238 | continue; | |
2239 | ||
2240 | /* We've already checked the fallthru edge above. */ | |
2241 | if (e2 == fallthru) | |
2242 | continue; | |
2243 | ||
65f34de5 | 2244 | /* The "first successor" check above only prevents multiple |
2245 | checks of crossjump(A,B). In order to prevent redundant | |
2246 | checks of crossjump(B,A), require that A be the block | |
2247 | with the lowest index. */ | |
b3d6de89 | 2248 | if (e->src->index > e2->src->index) |
65f34de5 | 2249 | continue; |
2250 | ||
10d3796f | 2251 | /* If nothing changed since the last attempt, there is nothing |
2252 | we can do. */ | |
2253 | if (!first_pass | |
bc6adae4 | 2254 | && !((e->src->flags & BB_MODIFIED) |
2255 | || (e2->src->flags & BB_MODIFIED))) | |
10d3796f | 2256 | continue; |
2257 | ||
0f38cfdd | 2258 | /* Both e and e2 are not fallthru edges, so we can crossjump in either |
2259 | direction. */ | |
2260 | if (try_crossjump_to_edge (mode, e, e2, dir_both)) | |
65f34de5 | 2261 | { |
2262 | changed = true; | |
cd665a06 | 2263 | ix = 0; |
65f34de5 | 2264 | break; |
2265 | } | |
2266 | } | |
2267 | } | |
2268 | ||
1ae2ffa7 | 2269 | if (changed) |
2270 | crossjumps_occured = true; | |
2271 | ||
65f34de5 | 2272 | return changed; |
2273 | } | |
2274 | ||
bc6adae4 | 2275 | /* Search the successors of BB for common insn sequences. When found, |
2276 | share code between them by moving it across the basic block | |
2277 | boundary. Return true if any changes made. */ | |
2278 | ||
2279 | static bool | |
2280 | try_head_merge_bb (basic_block bb) | |
2281 | { | |
2282 | basic_block final_dest_bb = NULL; | |
2283 | int max_match = INT_MAX; | |
2284 | edge e0; | |
2285 | rtx *headptr, *currptr, *nextptr; | |
2286 | bool changed, moveall; | |
2287 | unsigned ix; | |
2288 | rtx e0_last_head, cond, move_before; | |
2289 | unsigned nedges = EDGE_COUNT (bb->succs); | |
2290 | rtx jump = BB_END (bb); | |
2291 | regset live, live_union; | |
2292 | ||
2293 | /* Nothing to do if there is not at least two outgoing edges. */ | |
2294 | if (nedges < 2) | |
2295 | return false; | |
2296 | ||
2297 | /* Don't crossjump if this block ends in a computed jump, | |
2298 | unless we are optimizing for size. */ | |
2299 | if (optimize_bb_for_size_p (bb) | |
34154e27 | 2300 | && bb != EXIT_BLOCK_PTR_FOR_FN (cfun) |
bc6adae4 | 2301 | && computed_jump_p (BB_END (bb))) |
2302 | return false; | |
2303 | ||
2304 | cond = get_condition (jump, &move_before, true, false); | |
2305 | if (cond == NULL_RTX) | |
3cc98df4 | 2306 | { |
2307 | #ifdef HAVE_cc0 | |
2308 | if (reg_mentioned_p (cc0_rtx, jump)) | |
2309 | move_before = prev_nonnote_nondebug_insn (jump); | |
2310 | else | |
2311 | #endif | |
2312 | move_before = jump; | |
2313 | } | |
bc6adae4 | 2314 | |
2315 | for (ix = 0; ix < nedges; ix++) | |
34154e27 | 2316 | if (EDGE_SUCC (bb, ix)->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)) |
bc6adae4 | 2317 | return false; |
2318 | ||
2319 | for (ix = 0; ix < nedges; ix++) | |
2320 | { | |
2321 | edge e = EDGE_SUCC (bb, ix); | |
2322 | basic_block other_bb = e->dest; | |
2323 | ||
2324 | if (df_get_bb_dirty (other_bb)) | |
2325 | { | |
2326 | block_was_dirty = true; | |
2327 | return false; | |
2328 | } | |
2329 | ||
2330 | if (e->flags & EDGE_ABNORMAL) | |
2331 | return false; | |
2332 | ||
2333 | /* Normally, all destination blocks must only be reachable from this | |
2334 | block, i.e. they must have one incoming edge. | |
2335 | ||
2336 | There is one special case we can handle, that of multiple consecutive | |
2337 | jumps where the first jumps to one of the targets of the second jump. | |
2338 | This happens frequently in switch statements for default labels. | |
2339 | The structure is as follows: | |
2340 | FINAL_DEST_BB | |
2341 | .... | |
2342 | if (cond) jump A; | |
2343 | fall through | |
2344 | BB | |
2345 | jump with targets A, B, C, D... | |
2346 | A | |
2347 | has two incoming edges, from FINAL_DEST_BB and BB | |
2348 | ||
2349 | In this case, we can try to move the insns through BB and into | |
2350 | FINAL_DEST_BB. */ | |
2351 | if (EDGE_COUNT (other_bb->preds) != 1) | |
2352 | { | |
2353 | edge incoming_edge, incoming_bb_other_edge; | |
2354 | edge_iterator ei; | |
2355 | ||
2356 | if (final_dest_bb != NULL | |
2357 | || EDGE_COUNT (other_bb->preds) != 2) | |
2358 | return false; | |
2359 | ||
2360 | /* We must be able to move the insns across the whole block. */ | |
2361 | move_before = BB_HEAD (bb); | |
2362 | while (!NONDEBUG_INSN_P (move_before)) | |
2363 | move_before = NEXT_INSN (move_before); | |
2364 | ||
2365 | if (EDGE_COUNT (bb->preds) != 1) | |
2366 | return false; | |
2367 | incoming_edge = EDGE_PRED (bb, 0); | |
2368 | final_dest_bb = incoming_edge->src; | |
2369 | if (EDGE_COUNT (final_dest_bb->succs) != 2) | |
2370 | return false; | |
2371 | FOR_EACH_EDGE (incoming_bb_other_edge, ei, final_dest_bb->succs) | |
2372 | if (incoming_bb_other_edge != incoming_edge) | |
2373 | break; | |
2374 | if (incoming_bb_other_edge->dest != other_bb) | |
2375 | return false; | |
2376 | } | |
2377 | } | |
2378 | ||
2379 | e0 = EDGE_SUCC (bb, 0); | |
2380 | e0_last_head = NULL_RTX; | |
2381 | changed = false; | |
2382 | ||
2383 | for (ix = 1; ix < nedges; ix++) | |
2384 | { | |
2385 | edge e = EDGE_SUCC (bb, ix); | |
2386 | rtx e0_last, e_last; | |
2387 | int nmatch; | |
2388 | ||
2389 | nmatch = flow_find_head_matching_sequence (e0->dest, e->dest, | |
2390 | &e0_last, &e_last, 0); | |
2391 | if (nmatch == 0) | |
2392 | return false; | |
2393 | ||
2394 | if (nmatch < max_match) | |
2395 | { | |
2396 | max_match = nmatch; | |
2397 | e0_last_head = e0_last; | |
2398 | } | |
2399 | } | |
2400 | ||
2401 | /* If we matched an entire block, we probably have to avoid moving the | |
2402 | last insn. */ | |
2403 | if (max_match > 0 | |
2404 | && e0_last_head == BB_END (e0->dest) | |
2405 | && (find_reg_note (e0_last_head, REG_EH_REGION, 0) | |
2406 | || control_flow_insn_p (e0_last_head))) | |
2407 | { | |
2408 | max_match--; | |
2409 | if (max_match == 0) | |
2410 | return false; | |
2411 | do | |
2412 | e0_last_head = prev_real_insn (e0_last_head); | |
2413 | while (DEBUG_INSN_P (e0_last_head)); | |
2414 | } | |
2415 | ||
2416 | if (max_match == 0) | |
2417 | return false; | |
2418 | ||
2419 | /* We must find a union of the live registers at each of the end points. */ | |
2420 | live = BITMAP_ALLOC (NULL); | |
2421 | live_union = BITMAP_ALLOC (NULL); | |
2422 | ||
2423 | currptr = XNEWVEC (rtx, nedges); | |
2424 | headptr = XNEWVEC (rtx, nedges); | |
2425 | nextptr = XNEWVEC (rtx, nedges); | |
2426 | ||
2427 | for (ix = 0; ix < nedges; ix++) | |
2428 | { | |
2429 | int j; | |
2430 | basic_block merge_bb = EDGE_SUCC (bb, ix)->dest; | |
2431 | rtx head = BB_HEAD (merge_bb); | |
2432 | ||
2433 | while (!NONDEBUG_INSN_P (head)) | |
2434 | head = NEXT_INSN (head); | |
2435 | headptr[ix] = head; | |
2436 | currptr[ix] = head; | |
2437 | ||
2438 | /* Compute the end point and live information */ | |
2439 | for (j = 1; j < max_match; j++) | |
2440 | do | |
2441 | head = NEXT_INSN (head); | |
2442 | while (!NONDEBUG_INSN_P (head)); | |
2443 | simulate_backwards_to_point (merge_bb, live, head); | |
2444 | IOR_REG_SET (live_union, live); | |
2445 | } | |
2446 | ||
2447 | /* If we're moving across two blocks, verify the validity of the | |
2448 | first move, then adjust the target and let the loop below deal | |
2449 | with the final move. */ | |
2450 | if (final_dest_bb != NULL) | |
2451 | { | |
2452 | rtx move_upto; | |
2453 | ||
2454 | moveall = can_move_insns_across (currptr[0], e0_last_head, move_before, | |
2455 | jump, e0->dest, live_union, | |
2456 | NULL, &move_upto); | |
2457 | if (!moveall) | |
2458 | { | |
2459 | if (move_upto == NULL_RTX) | |
2460 | goto out; | |
2461 | ||
2462 | while (e0_last_head != move_upto) | |
2463 | { | |
2464 | df_simulate_one_insn_backwards (e0->dest, e0_last_head, | |
2465 | live_union); | |
2466 | e0_last_head = PREV_INSN (e0_last_head); | |
2467 | } | |
2468 | } | |
2469 | if (e0_last_head == NULL_RTX) | |
2470 | goto out; | |
2471 | ||
2472 | jump = BB_END (final_dest_bb); | |
2473 | cond = get_condition (jump, &move_before, true, false); | |
2474 | if (cond == NULL_RTX) | |
3cc98df4 | 2475 | { |
2476 | #ifdef HAVE_cc0 | |
2477 | if (reg_mentioned_p (cc0_rtx, jump)) | |
2478 | move_before = prev_nonnote_nondebug_insn (jump); | |
2479 | else | |
2480 | #endif | |
2481 | move_before = jump; | |
2482 | } | |
bc6adae4 | 2483 | } |
2484 | ||
2485 | do | |
2486 | { | |
2487 | rtx move_upto; | |
2488 | moveall = can_move_insns_across (currptr[0], e0_last_head, | |
2489 | move_before, jump, e0->dest, live_union, | |
2490 | NULL, &move_upto); | |
2491 | if (!moveall && move_upto == NULL_RTX) | |
2492 | { | |
2493 | if (jump == move_before) | |
2494 | break; | |
2495 | ||
2496 | /* Try again, using a different insertion point. */ | |
2497 | move_before = jump; | |
2498 | ||
2499 | #ifdef HAVE_cc0 | |
2500 | /* Don't try moving before a cc0 user, as that may invalidate | |
2501 | the cc0. */ | |
2502 | if (reg_mentioned_p (cc0_rtx, jump)) | |
2503 | break; | |
2504 | #endif | |
2505 | ||
2506 | continue; | |
2507 | } | |
2508 | ||
2509 | if (final_dest_bb && !moveall) | |
2510 | /* We haven't checked whether a partial move would be OK for the first | |
2511 | move, so we have to fail this case. */ | |
2512 | break; | |
2513 | ||
2514 | changed = true; | |
2515 | for (;;) | |
2516 | { | |
2517 | if (currptr[0] == move_upto) | |
2518 | break; | |
2519 | for (ix = 0; ix < nedges; ix++) | |
2520 | { | |
2521 | rtx curr = currptr[ix]; | |
2522 | do | |
2523 | curr = NEXT_INSN (curr); | |
2524 | while (!NONDEBUG_INSN_P (curr)); | |
2525 | currptr[ix] = curr; | |
2526 | } | |
2527 | } | |
2528 | ||
2529 | /* If we can't currently move all of the identical insns, remember | |
2530 | each insn after the range that we'll merge. */ | |
2531 | if (!moveall) | |
2532 | for (ix = 0; ix < nedges; ix++) | |
2533 | { | |
2534 | rtx curr = currptr[ix]; | |
2535 | do | |
2536 | curr = NEXT_INSN (curr); | |
2537 | while (!NONDEBUG_INSN_P (curr)); | |
2538 | nextptr[ix] = curr; | |
2539 | } | |
2540 | ||
2541 | reorder_insns (headptr[0], currptr[0], PREV_INSN (move_before)); | |
2542 | df_set_bb_dirty (EDGE_SUCC (bb, 0)->dest); | |
2543 | if (final_dest_bb != NULL) | |
2544 | df_set_bb_dirty (final_dest_bb); | |
2545 | df_set_bb_dirty (bb); | |
2546 | for (ix = 1; ix < nedges; ix++) | |
2547 | { | |
2548 | df_set_bb_dirty (EDGE_SUCC (bb, ix)->dest); | |
2549 | delete_insn_chain (headptr[ix], currptr[ix], false); | |
2550 | } | |
2551 | if (!moveall) | |
2552 | { | |
2553 | if (jump == move_before) | |
2554 | break; | |
2555 | ||
2556 | /* For the unmerged insns, try a different insertion point. */ | |
2557 | move_before = jump; | |
2558 | ||
2559 | #ifdef HAVE_cc0 | |
2560 | /* Don't try moving before a cc0 user, as that may invalidate | |
2561 | the cc0. */ | |
2562 | if (reg_mentioned_p (cc0_rtx, jump)) | |
2563 | break; | |
2564 | #endif | |
2565 | ||
2566 | for (ix = 0; ix < nedges; ix++) | |
2567 | currptr[ix] = headptr[ix] = nextptr[ix]; | |
2568 | } | |
2569 | } | |
2570 | while (!moveall); | |
2571 | ||
2572 | out: | |
2573 | free (currptr); | |
2574 | free (headptr); | |
2575 | free (nextptr); | |
2576 | ||
2577 | crossjumps_occured |= changed; | |
2578 | ||
2579 | return changed; | |
2580 | } | |
2581 | ||
17d1c688 | 2582 | /* Return true if BB contains just bb note, or bb note followed |
2583 | by only DEBUG_INSNs. */ | |
2584 | ||
2585 | static bool | |
2586 | trivially_empty_bb_p (basic_block bb) | |
2587 | { | |
2588 | rtx insn = BB_END (bb); | |
2589 | ||
2590 | while (1) | |
2591 | { | |
2592 | if (insn == BB_HEAD (bb)) | |
2593 | return true; | |
2594 | if (!DEBUG_INSN_P (insn)) | |
2595 | return false; | |
2596 | insn = PREV_INSN (insn); | |
2597 | } | |
2598 | } | |
2599 | ||
65f34de5 | 2600 | /* Do simple CFG optimizations - basic block merging, simplifying of jump |
2601 | instructions etc. Return nonzero if changes were made. */ | |
2602 | ||
2603 | static bool | |
4c9e08a4 | 2604 | try_optimize_cfg (int mode) |
65f34de5 | 2605 | { |
65f34de5 | 2606 | bool changed_overall = false; |
2607 | bool changed; | |
2608 | int iterations = 0; | |
9d574a94 | 2609 | basic_block bb, b, next; |
65f34de5 | 2610 | |
3072d30e | 2611 | if (mode & (CLEANUP_CROSSJUMP | CLEANUP_THREADING)) |
308f9b79 | 2612 | clear_bb_flags (); |
2613 | ||
1ae2ffa7 | 2614 | crossjumps_occured = false; |
2615 | ||
fc00614f | 2616 | FOR_EACH_BB_FN (bb, cfun) |
4fe5a223 | 2617 | update_forwarder_flag (bb); |
2618 | ||
6fb33aa0 | 2619 | if (! targetm.cannot_modify_jumps_p ()) |
65f34de5 | 2620 | { |
10d3796f | 2621 | first_pass = true; |
e27e52e0 | 2622 | /* Attempt to merge blocks as made possible by edge removal. If |
2623 | a block has only one successor, and the successor has only | |
2624 | one predecessor, they may be combined. */ | |
2625 | do | |
65f34de5 | 2626 | { |
bc6adae4 | 2627 | block_was_dirty = false; |
e27e52e0 | 2628 | changed = false; |
2629 | iterations++; | |
2630 | ||
450d042a | 2631 | if (dump_file) |
2632 | fprintf (dump_file, | |
e27e52e0 | 2633 | "\n\ntry_optimize_cfg iteration %i\n\n", |
2634 | iterations); | |
65f34de5 | 2635 | |
34154e27 | 2636 | for (b = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; b |
2637 | != EXIT_BLOCK_PTR_FOR_FN (cfun);) | |
65f34de5 | 2638 | { |
4c26117a | 2639 | basic_block c; |
e27e52e0 | 2640 | edge s; |
2641 | bool changed_here = false; | |
5cc577b6 | 2642 | |
d2b48f0c | 2643 | /* Delete trivially dead basic blocks. This is either |
2644 | blocks with no predecessors, or empty blocks with no | |
c4d13c5c | 2645 | successors. However if the empty block with no |
2646 | successors is the successor of the ENTRY_BLOCK, it is | |
2647 | kept. This ensures that the ENTRY_BLOCK will have a | |
2648 | successor which is a precondition for many RTL | |
2649 | passes. Empty blocks may result from expanding | |
d2b48f0c | 2650 | __builtin_unreachable (). */ |
2651 | if (EDGE_COUNT (b->preds) == 0 | |
c4d13c5c | 2652 | || (EDGE_COUNT (b->succs) == 0 |
17d1c688 | 2653 | && trivially_empty_bb_p (b) |
34154e27 | 2654 | && single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))->dest |
2655 | != b)) | |
e27e52e0 | 2656 | { |
345ac34a | 2657 | c = b->prev_bb; |
625a1adc | 2658 | if (EDGE_COUNT (b->preds) > 0) |
dcb172b4 | 2659 | { |
2660 | edge e; | |
2661 | edge_iterator ei; | |
2662 | ||
625a1adc | 2663 | if (current_ir_type () == IR_RTL_CFGLAYOUT) |
2664 | { | |
43e94e51 | 2665 | if (BB_FOOTER (b) |
2666 | && BARRIER_P (BB_FOOTER (b))) | |
625a1adc | 2667 | FOR_EACH_EDGE (e, ei, b->preds) |
2668 | if ((e->flags & EDGE_FALLTHRU) | |
43e94e51 | 2669 | && BB_FOOTER (e->src) == NULL) |
625a1adc | 2670 | { |
43e94e51 | 2671 | if (BB_FOOTER (b)) |
625a1adc | 2672 | { |
43e94e51 | 2673 | BB_FOOTER (e->src) = BB_FOOTER (b); |
2674 | BB_FOOTER (b) = NULL; | |
625a1adc | 2675 | } |
2676 | else | |
2677 | { | |
2678 | start_sequence (); | |
43e94e51 | 2679 | BB_FOOTER (e->src) = emit_barrier (); |
625a1adc | 2680 | end_sequence (); |
2681 | } | |
2682 | } | |
2683 | } | |
2684 | else | |
2685 | { | |
2686 | rtx last = get_last_bb_insn (b); | |
2687 | if (last && BARRIER_P (last)) | |
2688 | FOR_EACH_EDGE (e, ei, b->preds) | |
2689 | if ((e->flags & EDGE_FALLTHRU)) | |
2690 | emit_barrier_after (BB_END (e->src)); | |
2691 | } | |
dcb172b4 | 2692 | } |
5f5d4cd1 | 2693 | delete_basic_block (b); |
2decfaa7 | 2694 | changed = true; |
efee62d1 | 2695 | /* Avoid trying to remove the exit block. */ |
34154e27 | 2696 | b = (c == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? c->next_bb : c); |
2a3e49aa | 2697 | continue; |
e27e52e0 | 2698 | } |
65f34de5 | 2699 | |
fbac255a | 2700 | /* Remove code labels no longer used. */ |
ea091dfd | 2701 | if (single_pred_p (b) |
2702 | && (single_pred_edge (b)->flags & EDGE_FALLTHRU) | |
2703 | && !(single_pred_edge (b)->flags & EDGE_COMPLEX) | |
6d7dc5b9 | 2704 | && LABEL_P (BB_HEAD (b)) |
e27e52e0 | 2705 | /* If the previous block ends with a branch to this |
2706 | block, we can't delete the label. Normally this | |
2707 | is a condjump that is yet to be simplified, but | |
2708 | if CASE_DROPS_THRU, this can be a tablejump with | |
2709 | some element going to the same place as the | |
2710 | default (fallthru). */ | |
34154e27 | 2711 | && (single_pred (b) == ENTRY_BLOCK_PTR_FOR_FN (cfun) |
ea091dfd | 2712 | || !JUMP_P (BB_END (single_pred (b))) |
5496dbfc | 2713 | || ! label_is_jump_target_p (BB_HEAD (b), |
ea091dfd | 2714 | BB_END (single_pred (b))))) |
e27e52e0 | 2715 | { |
a2b85e40 | 2716 | delete_insn (BB_HEAD (b)); |
450d042a | 2717 | if (dump_file) |
2718 | fprintf (dump_file, "Deleted label in block %i.\n", | |
b3d6de89 | 2719 | b->index); |
e27e52e0 | 2720 | } |
65f34de5 | 2721 | |
e27e52e0 | 2722 | /* If we fall through an empty block, we can remove it. */ |
8ddad41d | 2723 | if (!(mode & (CLEANUP_CFGLAYOUT | CLEANUP_NO_INSN_DEL)) |
ea091dfd | 2724 | && single_pred_p (b) |
2725 | && (single_pred_edge (b)->flags & EDGE_FALLTHRU) | |
6d7dc5b9 | 2726 | && !LABEL_P (BB_HEAD (b)) |
e27e52e0 | 2727 | && FORWARDER_BLOCK_P (b) |
2728 | /* Note that forwarder_block_p true ensures that | |
2729 | there is a successor for this block. */ | |
ea091dfd | 2730 | && (single_succ_edge (b)->flags & EDGE_FALLTHRU) |
a28770e1 | 2731 | && n_basic_blocks_for_fn (cfun) > NUM_FIXED_BLOCKS + 1) |
e27e52e0 | 2732 | { |
450d042a | 2733 | if (dump_file) |
2734 | fprintf (dump_file, | |
e27e52e0 | 2735 | "Deleting fallthru block %i.\n", |
b3d6de89 | 2736 | b->index); |
e27e52e0 | 2737 | |
34154e27 | 2738 | c = ((b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)) |
2739 | ? b->next_bb : b->prev_bb); | |
ea091dfd | 2740 | redirect_edge_succ_nodup (single_pred_edge (b), |
2741 | single_succ (b)); | |
5f5d4cd1 | 2742 | delete_basic_block (b); |
e27e52e0 | 2743 | changed = true; |
2744 | b = c; | |
c4d13c5c | 2745 | continue; |
e27e52e0 | 2746 | } |
5cc577b6 | 2747 | |
18b762f0 | 2748 | /* Merge B with its single successor, if any. */ |
ea091dfd | 2749 | if (single_succ_p (b) |
2750 | && (s = single_succ_edge (b)) | |
9d574a94 | 2751 | && !(s->flags & EDGE_COMPLEX) |
34154e27 | 2752 | && (c = s->dest) != EXIT_BLOCK_PTR_FOR_FN (cfun) |
ea091dfd | 2753 | && single_pred_p (c) |
c60fa3a7 | 2754 | && b != c) |
2755 | { | |
2756 | /* When not in cfg_layout mode use code aware of reordering | |
2757 | INSN. This code possibly creates new basic blocks so it | |
2758 | does not fit merge_blocks interface and is kept here in | |
2759 | hope that it will become useless once more of compiler | |
2760 | is transformed to use cfg_layout mode. */ | |
a0c938f0 | 2761 | |
c60fa3a7 | 2762 | if ((mode & CLEANUP_CFGLAYOUT) |
2763 | && can_merge_blocks_p (b, c)) | |
2764 | { | |
2765 | merge_blocks (b, c); | |
2766 | update_forwarder_flag (b); | |
2767 | changed_here = true; | |
2768 | } | |
2769 | else if (!(mode & CLEANUP_CFGLAYOUT) | |
2770 | /* If the jump insn has side effects, | |
2771 | we can't kill the edge. */ | |
6d7dc5b9 | 2772 | && (!JUMP_P (BB_END (b)) |
18f811c9 | 2773 | || (reload_completed |
5496dbfc | 2774 | ? simplejump_p (BB_END (b)) |
902c164d | 2775 | : (onlyjump_p (BB_END (b)) |
2776 | && !tablejump_p (BB_END (b), | |
2777 | NULL, NULL)))) | |
c60fa3a7 | 2778 | && (next = merge_blocks_move (s, b, c, mode))) |
2779 | { | |
2780 | b = next; | |
2781 | changed_here = true; | |
2782 | } | |
9d574a94 | 2783 | } |
e27e52e0 | 2784 | |
2785 | /* Simplify branch over branch. */ | |
c60fa3a7 | 2786 | if ((mode & CLEANUP_EXPENSIVE) |
2787 | && !(mode & CLEANUP_CFGLAYOUT) | |
2788 | && try_simplify_condjump (b)) | |
308f9b79 | 2789 | changed_here = true; |
65f34de5 | 2790 | |
e27e52e0 | 2791 | /* If B has a single outgoing edge, but uses a |
2792 | non-trivial jump instruction without side-effects, we | |
2793 | can either delete the jump entirely, or replace it | |
e5562ab8 | 2794 | with a simple unconditional jump. */ |
ea091dfd | 2795 | if (single_succ_p (b) |
34154e27 | 2796 | && single_succ (b) != EXIT_BLOCK_PTR_FOR_FN (cfun) |
5496dbfc | 2797 | && onlyjump_p (BB_END (b)) |
4f18499c | 2798 | && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX) |
ea091dfd | 2799 | && try_redirect_by_replacing_jump (single_succ_edge (b), |
2800 | single_succ (b), | |
fa015b9a | 2801 | (mode & CLEANUP_CFGLAYOUT) != 0)) |
e27e52e0 | 2802 | { |
e27e52e0 | 2803 | update_forwarder_flag (b); |
2804 | changed_here = true; | |
2805 | } | |
65f34de5 | 2806 | |
e27e52e0 | 2807 | /* Simplify branch to branch. */ |
2808 | if (try_forward_edges (mode, b)) | |
50f4aeca | 2809 | { |
2810 | update_forwarder_flag (b); | |
2811 | changed_here = true; | |
2812 | } | |
65f34de5 | 2813 | |
e27e52e0 | 2814 | /* Look for shared code between blocks. */ |
2815 | if ((mode & CLEANUP_CROSSJUMP) | |
2816 | && try_crossjump_bb (mode, b)) | |
2817 | changed_here = true; | |
65f34de5 | 2818 | |
bc6adae4 | 2819 | if ((mode & CLEANUP_CROSSJUMP) |
2820 | /* This can lengthen register lifetimes. Do it only after | |
2821 | reload. */ | |
2822 | && reload_completed | |
2823 | && try_head_merge_bb (b)) | |
2824 | changed_here = true; | |
2825 | ||
e27e52e0 | 2826 | /* Don't get confused by the index shift caused by |
2827 | deleting blocks. */ | |
2828 | if (!changed_here) | |
4c26117a | 2829 | b = b->next_bb; |
e27e52e0 | 2830 | else |
2831 | changed = true; | |
2832 | } | |
65f34de5 | 2833 | |
e27e52e0 | 2834 | if ((mode & CLEANUP_CROSSJUMP) |
34154e27 | 2835 | && try_crossjump_bb (mode, EXIT_BLOCK_PTR_FOR_FN (cfun))) |
65f34de5 | 2836 | changed = true; |
65f34de5 | 2837 | |
bc6adae4 | 2838 | if (block_was_dirty) |
2839 | { | |
2840 | /* This should only be set by head-merging. */ | |
2841 | gcc_assert (mode & CLEANUP_CROSSJUMP); | |
2842 | df_analyze (); | |
2843 | } | |
2844 | ||
e27e52e0 | 2845 | if (changed) |
80adc5a6 | 2846 | { |
2847 | /* Edge forwarding in particular can cause hot blocks previously | |
2848 | reached by both hot and cold blocks to become dominated only | |
2849 | by cold blocks. This will cause the verification below to fail, | |
2850 | and lead to now cold code in the hot section. This is not easy | |
2851 | to detect and fix during edge forwarding, and in some cases | |
2852 | is only visible after newly unreachable blocks are deleted, | |
2853 | which will be done in fixup_partitions. */ | |
2854 | fixup_partitions (); | |
2855 | ||
2856 | #ifdef ENABLE_CHECKING | |
2857 | verify_flow_info (); | |
65f34de5 | 2858 | #endif |
80adc5a6 | 2859 | } |
65f34de5 | 2860 | |
e27e52e0 | 2861 | changed_overall |= changed; |
10d3796f | 2862 | first_pass = false; |
e27e52e0 | 2863 | } |
2864 | while (changed); | |
65f34de5 | 2865 | } |
b36d64df | 2866 | |
ed7d889a | 2867 | FOR_ALL_BB_FN (b, cfun) |
4fe5a223 | 2868 | b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK); |
33dbe4d1 | 2869 | |
65f34de5 | 2870 | return changed_overall; |
2871 | } | |
2872 | \f | |
1e625a2e | 2873 | /* Delete all unreachable basic blocks. */ |
e76f35e8 | 2874 | |
cd0fe062 | 2875 | bool |
4c9e08a4 | 2876 | delete_unreachable_blocks (void) |
65f34de5 | 2877 | { |
65f34de5 | 2878 | bool changed = false; |
9845d120 | 2879 | basic_block b, prev_bb; |
65f34de5 | 2880 | |
2881 | find_unreachable_blocks (); | |
2882 | ||
9845d120 | 2883 | /* When we're in GIMPLE mode and there may be debug insns, we should |
2884 | delete blocks in reverse dominator order, so as to get a chance | |
2885 | to substitute all released DEFs into debug stmts. If we don't | |
2886 | have dominators information, walking blocks backward gets us a | |
2887 | better chance of retaining most debug information than | |
2888 | otherwise. */ | |
4a020a8c | 2889 | if (MAY_HAVE_DEBUG_INSNS && current_ir_type () == IR_GIMPLE |
9845d120 | 2890 | && dom_info_available_p (CDI_DOMINATORS)) |
65f34de5 | 2891 | { |
34154e27 | 2892 | for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb; |
2893 | b != ENTRY_BLOCK_PTR_FOR_FN (cfun); b = prev_bb) | |
9845d120 | 2894 | { |
2895 | prev_bb = b->prev_bb; | |
2896 | ||
2897 | if (!(b->flags & BB_REACHABLE)) | |
2898 | { | |
2899 | /* Speed up the removal of blocks that don't dominate | |
2900 | others. Walking backwards, this should be the common | |
2901 | case. */ | |
2902 | if (!first_dom_son (CDI_DOMINATORS, b)) | |
2903 | delete_basic_block (b); | |
2904 | else | |
2905 | { | |
f1f41a6c | 2906 | vec<basic_block> h |
9845d120 | 2907 | = get_all_dominated_blocks (CDI_DOMINATORS, b); |
2908 | ||
f1f41a6c | 2909 | while (h.length ()) |
9845d120 | 2910 | { |
f1f41a6c | 2911 | b = h.pop (); |
9845d120 | 2912 | |
2913 | prev_bb = b->prev_bb; | |
b3d6de89 | 2914 | |
9845d120 | 2915 | gcc_assert (!(b->flags & BB_REACHABLE)); |
2916 | ||
2917 | delete_basic_block (b); | |
2918 | } | |
2919 | ||
f1f41a6c | 2920 | h.release (); |
9845d120 | 2921 | } |
2922 | ||
2923 | changed = true; | |
2924 | } | |
2925 | } | |
2926 | } | |
2927 | else | |
2928 | { | |
34154e27 | 2929 | for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb; |
2930 | b != ENTRY_BLOCK_PTR_FOR_FN (cfun); b = prev_bb) | |
8f8dcce4 | 2931 | { |
9845d120 | 2932 | prev_bb = b->prev_bb; |
2933 | ||
2934 | if (!(b->flags & BB_REACHABLE)) | |
2935 | { | |
2936 | delete_basic_block (b); | |
2937 | changed = true; | |
2938 | } | |
8f8dcce4 | 2939 | } |
65f34de5 | 2940 | } |
2941 | ||
2942 | if (changed) | |
2943 | tidy_fallthru_edges (); | |
2944 | return changed; | |
2945 | } | |
3072d30e | 2946 | |
2947 | /* Delete any jump tables never referenced. We can't delete them at the | |
c38b28e7 | 2948 | time of removing tablejump insn as they are referenced by the preceding |
2949 | insns computing the destination, so we delay deleting and garbagecollect | |
2950 | them once life information is computed. */ | |
3072d30e | 2951 | void |
2952 | delete_dead_jumptables (void) | |
2953 | { | |
2954 | basic_block bb; | |
2955 | ||
c38b28e7 | 2956 | /* A dead jump table does not belong to any basic block. Scan insns |
2957 | between two adjacent basic blocks. */ | |
fc00614f | 2958 | FOR_EACH_BB_FN (bb, cfun) |
3072d30e | 2959 | { |
c38b28e7 | 2960 | rtx insn, next; |
2961 | ||
2962 | for (insn = NEXT_INSN (BB_END (bb)); | |
2963 | insn && !NOTE_INSN_BASIC_BLOCK_P (insn); | |
2964 | insn = next) | |
0b3f0f44 | 2965 | { |
c38b28e7 | 2966 | next = NEXT_INSN (insn); |
2967 | if (LABEL_P (insn) | |
2968 | && LABEL_NUSES (insn) == LABEL_PRESERVE_P (insn) | |
2969 | && JUMP_TABLE_DATA_P (next)) | |
2970 | { | |
2971 | rtx label = insn, jump = next; | |
2972 | ||
2973 | if (dump_file) | |
2974 | fprintf (dump_file, "Dead jumptable %i removed\n", | |
2975 | INSN_UID (insn)); | |
2976 | ||
2977 | next = NEXT_INSN (next); | |
2978 | delete_insn (jump); | |
2979 | delete_insn (label); | |
2980 | } | |
3072d30e | 2981 | } |
2982 | } | |
2983 | } | |
2984 | ||
65f34de5 | 2985 | \f |
2986 | /* Tidy the CFG by deleting unreachable code and whatnot. */ | |
2987 | ||
2988 | bool | |
4c9e08a4 | 2989 | cleanup_cfg (int mode) |
65f34de5 | 2990 | { |
65f34de5 | 2991 | bool changed = false; |
2992 | ||
30641a08 | 2993 | /* Set the cfglayout mode flag here. We could update all the callers |
2994 | but that is just inconvenient, especially given that we eventually | |
2995 | want to have cfglayout mode as the default. */ | |
2996 | if (current_ir_type () == IR_RTL_CFGLAYOUT) | |
2997 | mode |= CLEANUP_CFGLAYOUT; | |
2998 | ||
65f34de5 | 2999 | timevar_push (TV_CLEANUP_CFG); |
fb20d6fa | 3000 | if (delete_unreachable_blocks ()) |
3001 | { | |
3002 | changed = true; | |
3003 | /* We've possibly created trivially dead code. Cleanup it right | |
d716ce75 | 3004 | now to introduce more opportunities for try_optimize_cfg. */ |
3072d30e | 3005 | if (!(mode & (CLEANUP_NO_INSN_DEL)) |
fb20d6fa | 3006 | && !reload_completed) |
d4473c84 | 3007 | delete_trivially_dead_insns (get_insns (), max_reg_num ()); |
fb20d6fa | 3008 | } |
3c0a32c9 | 3009 | |
3010 | compact_blocks (); | |
3011 | ||
1ae2ffa7 | 3012 | /* To tail-merge blocks ending in the same noreturn function (e.g. |
3013 | a call to abort) we have to insert fake edges to exit. Do this | |
3014 | here once. The fake edges do not interfere with any other CFG | |
3015 | cleanups. */ | |
3016 | if (mode & CLEANUP_CROSSJUMP) | |
3017 | add_noreturn_fake_exit_edges (); | |
3018 | ||
4ff06051 | 3019 | if (!dbg_cnt (cfg_cleanup)) |
3020 | return changed; | |
3021 | ||
fb20d6fa | 3022 | while (try_optimize_cfg (mode)) |
3023 | { | |
3024 | delete_unreachable_blocks (), changed = true; | |
1ae2ffa7 | 3025 | if (!(mode & CLEANUP_NO_INSN_DEL)) |
fb20d6fa | 3026 | { |
1ae2ffa7 | 3027 | /* Try to remove some trivially dead insns when doing an expensive |
3028 | cleanup. But delete_trivially_dead_insns doesn't work after | |
3029 | reload (it only handles pseudos) and run_fast_dce is too costly | |
3030 | to run in every iteration. | |
3031 | ||
3032 | For effective cross jumping, we really want to run a fast DCE to | |
3033 | clean up any dead conditions, or they get in the way of performing | |
3034 | useful tail merges. | |
3035 | ||
3036 | Other transformations in cleanup_cfg are not so sensitive to dead | |
3037 | code, so delete_trivially_dead_insns or even doing nothing at all | |
3038 | is good enough. */ | |
3039 | if ((mode & CLEANUP_EXPENSIVE) && !reload_completed | |
3040 | && !delete_trivially_dead_insns (get_insns (), max_reg_num ())) | |
fb20d6fa | 3041 | break; |
bc6adae4 | 3042 | if ((mode & CLEANUP_CROSSJUMP) && crossjumps_occured) |
bc27bb3b | 3043 | run_fast_dce (); |
fb20d6fa | 3044 | } |
3045 | else | |
3046 | break; | |
fb20d6fa | 3047 | } |
65f34de5 | 3048 | |
1ae2ffa7 | 3049 | if (mode & CLEANUP_CROSSJUMP) |
3050 | remove_fake_exit_edges (); | |
3051 | ||
c38b28e7 | 3052 | /* Don't call delete_dead_jumptables in cfglayout mode, because |
3053 | that function assumes that jump tables are in the insns stream. | |
3054 | But we also don't _have_ to delete dead jumptables in cfglayout | |
3055 | mode because we shouldn't even be looking at things that are | |
3056 | not in a basic block. Dead jumptables are cleaned up when | |
3057 | going out of cfglayout mode. */ | |
3058 | if (!(mode & CLEANUP_CFGLAYOUT)) | |
3072d30e | 3059 | delete_dead_jumptables (); |
3060 | ||
79f958cb | 3061 | /* ??? We probably do this way too often. */ |
3062 | if (current_loops | |
3063 | && (changed | |
3064 | || (mode & CLEANUP_CFG_CHANGED))) | |
3065 | { | |
79f958cb | 3066 | timevar_push (TV_REPAIR_LOOPS); |
3067 | /* The above doesn't preserve dominance info if available. */ | |
3068 | gcc_assert (!dom_info_available_p (CDI_DOMINATORS)); | |
3069 | calculate_dominance_info (CDI_DOMINATORS); | |
022d4ccc | 3070 | fix_loop_structure (NULL); |
79f958cb | 3071 | free_dominance_info (CDI_DOMINATORS); |
3072 | timevar_pop (TV_REPAIR_LOOPS); | |
3073 | } | |
3074 | ||
65f34de5 | 3075 | timevar_pop (TV_CLEANUP_CFG); |
3076 | ||
65f34de5 | 3077 | return changed; |
3078 | } | |
77fce4cd | 3079 | \f |
2a1990e9 | 3080 | static unsigned int |
76cdbc6d | 3081 | execute_jump (void) |
77fce4cd | 3082 | { |
77fce4cd | 3083 | delete_trivially_dead_insns (get_insns (), max_reg_num ()); |
77fce4cd | 3084 | if (dump_file) |
562d71e8 | 3085 | dump_flow_info (dump_file, dump_flags); |
da0f98a7 | 3086 | cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0) |
a0c938f0 | 3087 | | (flag_thread_jumps ? CLEANUP_THREADING : 0)); |
2a1990e9 | 3088 | return 0; |
77fce4cd | 3089 | } |
3090 | ||
cbe8bda8 | 3091 | namespace { |
3092 | ||
3093 | const pass_data pass_data_jump = | |
76cdbc6d | 3094 | { |
cbe8bda8 | 3095 | RTL_PASS, /* type */ |
3096 | "jump", /* name */ | |
3097 | OPTGROUP_NONE, /* optinfo_flags */ | |
3098 | false, /* has_gate */ | |
3099 | true, /* has_execute */ | |
3100 | TV_JUMP, /* tv_id */ | |
3101 | 0, /* properties_required */ | |
3102 | 0, /* properties_provided */ | |
3103 | 0, /* properties_destroyed */ | |
3104 | 0, /* todo_flags_start */ | |
3105 | TODO_verify_rtl_sharing, /* todo_flags_finish */ | |
76cdbc6d | 3106 | }; |
cbe8bda8 | 3107 | |
3108 | class pass_jump : public rtl_opt_pass | |
3109 | { | |
3110 | public: | |
9af5ce0c | 3111 | pass_jump (gcc::context *ctxt) |
3112 | : rtl_opt_pass (pass_data_jump, ctxt) | |
cbe8bda8 | 3113 | {} |
3114 | ||
3115 | /* opt_pass methods: */ | |
3116 | unsigned int execute () { return execute_jump (); } | |
3117 | ||
3118 | }; // class pass_jump | |
3119 | ||
3120 | } // anon namespace | |
3121 | ||
3122 | rtl_opt_pass * | |
3123 | make_pass_jump (gcc::context *ctxt) | |
3124 | { | |
3125 | return new pass_jump (ctxt); | |
3126 | } | |
76cdbc6d | 3127 | \f |
3128 | static unsigned int | |
3129 | execute_jump2 (void) | |
3130 | { | |
3131 | cleanup_cfg (flag_crossjumping ? CLEANUP_CROSSJUMP : 0); | |
3132 | return 0; | |
3133 | } | |
77fce4cd | 3134 | |
cbe8bda8 | 3135 | namespace { |
3136 | ||
3137 | const pass_data pass_data_jump2 = | |
77fce4cd | 3138 | { |
cbe8bda8 | 3139 | RTL_PASS, /* type */ |
3140 | "jump2", /* name */ | |
3141 | OPTGROUP_NONE, /* optinfo_flags */ | |
3142 | false, /* has_gate */ | |
3143 | true, /* has_execute */ | |
3144 | TV_JUMP, /* tv_id */ | |
3145 | 0, /* properties_required */ | |
3146 | 0, /* properties_provided */ | |
3147 | 0, /* properties_destroyed */ | |
3148 | 0, /* todo_flags_start */ | |
3149 | TODO_verify_rtl_sharing, /* todo_flags_finish */ | |
77fce4cd | 3150 | }; |
cbe8bda8 | 3151 | |
3152 | class pass_jump2 : public rtl_opt_pass | |
3153 | { | |
3154 | public: | |
9af5ce0c | 3155 | pass_jump2 (gcc::context *ctxt) |
3156 | : rtl_opt_pass (pass_data_jump2, ctxt) | |
cbe8bda8 | 3157 | {} |
3158 | ||
3159 | /* opt_pass methods: */ | |
3160 | unsigned int execute () { return execute_jump2 (); } | |
3161 | ||
3162 | }; // class pass_jump2 | |
3163 | ||
3164 | } // anon namespace | |
3165 | ||
3166 | rtl_opt_pass * | |
3167 | make_pass_jump2 (gcc::context *ctxt) | |
3168 | { | |
3169 | return new pass_jump2 (ctxt); | |
3170 | } |