]>
Commit | Line | Data |
---|---|---|
402209ff | 1 | /* Control flow optimization code for GNU compiler. |
85ec4feb | 2 | Copyright (C) 1987-2018 Free Software Foundation, Inc. |
402209ff JH |
3 | |
4 | This file is part of GCC. | |
5 | ||
6 | GCC is free software; you can redistribute it and/or modify it under | |
7 | the terms of the GNU General Public License as published by the Free | |
9dcd6f09 | 8 | Software Foundation; either version 3, or (at your option) any later |
402209ff JH |
9 | version. |
10 | ||
11 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
14 | for more details. | |
15 | ||
16 | You should have received a copy of the GNU General Public License | |
9dcd6f09 NC |
17 | along with GCC; see the file COPYING3. If not see |
18 | <http://www.gnu.org/licenses/>. */ | |
402209ff | 19 | |
1ea7e6ad | 20 | /* This file contains optimizer of the control flow. The main entry point is |
402209ff JH |
21 | cleanup_cfg. Following optimizations are performed: |
22 | ||
23 | - Unreachable blocks removal | |
d1a6adeb | 24 | - Edge forwarding (edge to the forwarder block is forwarded to its |
eaec9b3d | 25 | successor. Simplification of the branch instruction is performed by |
402209ff | 26 | underlying infrastructure so branch can be converted to simplejump or |
f5143c46 | 27 | eliminated). |
402209ff JH |
28 | - Cross jumping (tail merging) |
29 | - Conditional jump-around-simplejump simplification | |
30 | - Basic block merging. */ | |
31 | ||
32 | #include "config.h" | |
33 | #include "system.h" | |
4977bab6 | 34 | #include "coretypes.h" |
c7131fb2 | 35 | #include "backend.h" |
957060b5 | 36 | #include "target.h" |
402209ff | 37 | #include "rtl.h" |
957060b5 AM |
38 | #include "tree.h" |
39 | #include "cfghooks.h" | |
c7131fb2 | 40 | #include "df.h" |
4d0cdd0c | 41 | #include "memmodel.h" |
957060b5 | 42 | #include "tm_p.h" |
402209ff | 43 | #include "insn-config.h" |
957060b5 | 44 | #include "emit-rtl.h" |
8ecba28a | 45 | #include "cselib.h" |
5f24e0dc | 46 | #include "params.h" |
ef330312 PB |
47 | #include "tree-pass.h" |
48 | #include "cfgloop.h" | |
60393bbc AM |
49 | #include "cfgrtl.h" |
50 | #include "cfganal.h" | |
51 | #include "cfgbuild.h" | |
52 | #include "cfgcleanup.h" | |
c1e3e2d9 | 53 | #include "dce.h" |
7d817ebc | 54 | #include "dbgcnt.h" |
a2250fe9 | 55 | #include "rtl-iter.h" |
402209ff | 56 | |
2dd2d53e | 57 | #define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK) |
c22cacf3 | 58 | |
7cf240d5 JH |
59 | /* Set to true when we are running first pass of try_optimize_cfg loop. */ |
60 | static bool first_pass; | |
c1e3e2d9 | 61 | |
073a8998 | 62 | /* Set to true if crossjumps occurred in the latest run of try_optimize_cfg. */ |
bd2c6270 | 63 | static bool crossjumps_occurred; |
c1e3e2d9 | 64 | |
4ec5d4f5 BS |
65 | /* Set to true if we couldn't run an optimization due to stale liveness |
66 | information; we should run df_analyze to enable more opportunities. */ | |
67 | static bool block_was_dirty; | |
68 | ||
bf22920b | 69 | static bool try_crossjump_to_edge (int, edge, edge, enum replace_direction); |
d329e058 | 70 | static bool try_crossjump_bb (int, basic_block); |
c2fc5456 | 71 | static bool outgoing_edges_match (int, basic_block, basic_block); |
da5477a9 | 72 | static enum replace_direction old_insns_match_p (int, rtx_insn *, rtx_insn *); |
d329e058 | 73 | |
d329e058 AJ |
74 | static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block); |
75 | static void merge_blocks_move_successor_nojumps (basic_block, basic_block); | |
d329e058 AJ |
76 | static bool try_optimize_cfg (int); |
77 | static bool try_simplify_condjump (basic_block); | |
78 | static bool try_forward_edges (int, basic_block); | |
6fb5fa3c | 79 | static edge thread_jump (edge, basic_block); |
d329e058 AJ |
80 | static bool mark_effect (rtx, bitmap); |
81 | static void notice_new_block (basic_block); | |
82 | static void update_forwarder_flag (basic_block); | |
c2fc5456 | 83 | static void merge_memattrs (rtx, rtx); |
635559ab JH |
84 | \f |
85 | /* Set flags for newly created block. */ | |
86 | ||
87 | static void | |
d329e058 | 88 | notice_new_block (basic_block bb) |
635559ab JH |
89 | { |
90 | if (!bb) | |
91 | return; | |
5f0d2358 | 92 | |
635559ab | 93 | if (forwarder_block_p (bb)) |
2dd2d53e | 94 | bb->flags |= BB_FORWARDER_BLOCK; |
635559ab JH |
95 | } |
96 | ||
97 | /* Recompute forwarder flag after block has been modified. */ | |
98 | ||
99 | static void | |
d329e058 | 100 | update_forwarder_flag (basic_block bb) |
635559ab JH |
101 | { |
102 | if (forwarder_block_p (bb)) | |
2dd2d53e | 103 | bb->flags |= BB_FORWARDER_BLOCK; |
635559ab | 104 | else |
2dd2d53e | 105 | bb->flags &= ~BB_FORWARDER_BLOCK; |
635559ab | 106 | } |
402209ff JH |
107 | \f |
108 | /* Simplify a conditional jump around an unconditional jump. | |
109 | Return true if something changed. */ | |
110 | ||
111 | static bool | |
d329e058 | 112 | try_simplify_condjump (basic_block cbranch_block) |
402209ff JH |
113 | { |
114 | basic_block jump_block, jump_dest_block, cbranch_dest_block; | |
115 | edge cbranch_jump_edge, cbranch_fallthru_edge; | |
da5477a9 | 116 | rtx_insn *cbranch_insn; |
402209ff JH |
117 | |
118 | /* Verify that there are exactly two successors. */ | |
628f6a4e | 119 | if (EDGE_COUNT (cbranch_block->succs) != 2) |
402209ff JH |
120 | return false; |
121 | ||
122 | /* Verify that we've got a normal conditional branch at the end | |
123 | of the block. */ | |
a813c111 | 124 | cbranch_insn = BB_END (cbranch_block); |
402209ff JH |
125 | if (!any_condjump_p (cbranch_insn)) |
126 | return false; | |
127 | ||
128 | cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block); | |
129 | cbranch_jump_edge = BRANCH_EDGE (cbranch_block); | |
130 | ||
131 | /* The next block must not have multiple predecessors, must not | |
132 | be the last block in the function, and must contain just the | |
133 | unconditional jump. */ | |
134 | jump_block = cbranch_fallthru_edge->dest; | |
c5cbcccf | 135 | if (!single_pred_p (jump_block) |
fefa31b5 | 136 | || jump_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun) |
635559ab | 137 | || !FORWARDER_BLOCK_P (jump_block)) |
402209ff | 138 | return false; |
c5cbcccf | 139 | jump_dest_block = single_succ (jump_block); |
402209ff | 140 | |
750054a2 CT |
141 | /* If we are partitioning hot/cold basic blocks, we don't want to |
142 | mess up unconditional or indirect jumps that cross between hot | |
c22cacf3 | 143 | and cold sections. |
8e8d5162 CT |
144 | |
145 | Basic block partitioning may result in some jumps that appear to | |
c22cacf3 MS |
146 | be optimizable (or blocks that appear to be mergeable), but which really |
147 | must be left untouched (they are required to make it safely across | |
148 | partition boundaries). See the comments at the top of | |
8e8d5162 | 149 | bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */ |
750054a2 | 150 | |
87c8b4be CT |
151 | if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block) |
152 | || (cbranch_jump_edge->flags & EDGE_CROSSING)) | |
750054a2 CT |
153 | return false; |
154 | ||
402209ff JH |
155 | /* The conditional branch must target the block after the |
156 | unconditional branch. */ | |
157 | cbranch_dest_block = cbranch_jump_edge->dest; | |
158 | ||
fefa31b5 | 159 | if (cbranch_dest_block == EXIT_BLOCK_PTR_FOR_FN (cfun) |
1a8fb012 | 160 | || jump_dest_block == EXIT_BLOCK_PTR_FOR_FN (cfun) |
2f52c531 | 161 | || !can_fallthru (jump_block, cbranch_dest_block)) |
402209ff JH |
162 | return false; |
163 | ||
ca6c03ca | 164 | /* Invert the conditional branch. */ |
1476d1bd MM |
165 | if (!invert_jump (as_a <rtx_jump_insn *> (cbranch_insn), |
166 | block_label (jump_dest_block), 0)) | |
ca6c03ca | 167 | return false; |
402209ff | 168 | |
c263766c RH |
169 | if (dump_file) |
170 | fprintf (dump_file, "Simplifying condjump %i around jump %i\n", | |
a813c111 | 171 | INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block))); |
402209ff JH |
172 | |
173 | /* Success. Update the CFG to match. Note that after this point | |
174 | the edge variable names appear backwards; the redirection is done | |
175 | this way to preserve edge profile data. */ | |
176 | cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge, | |
177 | cbranch_dest_block); | |
178 | cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge, | |
179 | jump_dest_block); | |
180 | cbranch_jump_edge->flags |= EDGE_FALLTHRU; | |
181 | cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU; | |
b446e5a2 | 182 | update_br_prob_note (cbranch_block); |
402209ff JH |
183 | |
184 | /* Delete the block with the unconditional jump, and clean up the mess. */ | |
f470c378 ZD |
185 | delete_basic_block (jump_block); |
186 | tidy_fallthru_edge (cbranch_jump_edge); | |
261139ce | 187 | update_forwarder_flag (cbranch_block); |
402209ff JH |
188 | |
189 | return true; | |
190 | } | |
191 | \f | |
8ecba28a JH |
192 | /* Attempt to prove that operation is NOOP using CSElib or mark the effect |
193 | on register. Used by jump threading. */ | |
5f0d2358 | 194 | |
8ecba28a | 195 | static bool |
d329e058 | 196 | mark_effect (rtx exp, regset nonequal) |
8ecba28a | 197 | { |
9f16e871 | 198 | rtx dest; |
8ecba28a JH |
199 | switch (GET_CODE (exp)) |
200 | { | |
201 | /* In case we do clobber the register, mark it as equal, as we know the | |
c22cacf3 | 202 | value is dead so it don't have to match. */ |
f87c27b4 | 203 | case CLOBBER: |
07a737f3 RS |
204 | dest = XEXP (exp, 0); |
205 | if (REG_P (dest)) | |
206 | bitmap_clear_range (nonequal, REGNO (dest), REG_NREGS (dest)); | |
f87c27b4 | 207 | return false; |
5f0d2358 | 208 | |
f87c27b4 KH |
209 | case SET: |
210 | if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp))) | |
8ecba28a | 211 | return false; |
f87c27b4 KH |
212 | dest = SET_DEST (exp); |
213 | if (dest == pc_rtx) | |
8ecba28a | 214 | return false; |
f87c27b4 KH |
215 | if (!REG_P (dest)) |
216 | return true; | |
07a737f3 | 217 | bitmap_set_range (nonequal, REGNO (dest), REG_NREGS (dest)); |
f87c27b4 KH |
218 | return false; |
219 | ||
220 | default: | |
221 | return false; | |
8ecba28a JH |
222 | } |
223 | } | |
fe477d8b | 224 | |
a2250fe9 RS |
225 | /* Return true if X contains a register in NONEQUAL. */ |
226 | static bool | |
227 | mentions_nonequal_regs (const_rtx x, regset nonequal) | |
fe477d8b | 228 | { |
a2250fe9 RS |
229 | subrtx_iterator::array_type array; |
230 | FOR_EACH_SUBRTX (iter, array, x, NONCONST) | |
fe477d8b | 231 | { |
a2250fe9 RS |
232 | const_rtx x = *iter; |
233 | if (REG_P (x)) | |
fe477d8b | 234 | { |
53d1bae9 RS |
235 | unsigned int end_regno = END_REGNO (x); |
236 | for (unsigned int regno = REGNO (x); regno < end_regno; ++regno) | |
237 | if (REGNO_REG_SET_P (nonequal, regno)) | |
238 | return true; | |
fe477d8b JH |
239 | } |
240 | } | |
a2250fe9 | 241 | return false; |
fe477d8b | 242 | } |
a2250fe9 | 243 | |
8ecba28a | 244 | /* Attempt to prove that the basic block B will have no side effects and |
95bd1dd7 | 245 | always continues in the same edge if reached via E. Return the edge |
8ecba28a JH |
246 | if exist, NULL otherwise. */ |
247 | ||
248 | static edge | |
6fb5fa3c | 249 | thread_jump (edge e, basic_block b) |
8ecba28a | 250 | { |
da5477a9 DM |
251 | rtx set1, set2, cond1, cond2; |
252 | rtx_insn *insn; | |
8ecba28a JH |
253 | enum rtx_code code1, code2, reversed_code2; |
254 | bool reverse1 = false; | |
3cd8c58a | 255 | unsigned i; |
8ecba28a JH |
256 | regset nonequal; |
257 | bool failed = false; | |
a2041967 | 258 | reg_set_iterator rsi; |
8ecba28a | 259 | |
2dd2d53e | 260 | if (b->flags & BB_NONTHREADABLE_BLOCK) |
1540f9eb JH |
261 | return NULL; |
262 | ||
8ecba28a JH |
263 | /* At the moment, we do handle only conditional jumps, but later we may |
264 | want to extend this code to tablejumps and others. */ | |
628f6a4e | 265 | if (EDGE_COUNT (e->src->succs) != 2) |
8ecba28a | 266 | return NULL; |
628f6a4e | 267 | if (EDGE_COUNT (b->succs) != 2) |
1540f9eb | 268 | { |
2dd2d53e | 269 | b->flags |= BB_NONTHREADABLE_BLOCK; |
1540f9eb JH |
270 | return NULL; |
271 | } | |
8ecba28a JH |
272 | |
273 | /* Second branch must end with onlyjump, as we will eliminate the jump. */ | |
a813c111 | 274 | if (!any_condjump_p (BB_END (e->src))) |
8ecba28a | 275 | return NULL; |
f87c27b4 | 276 | |
a813c111 | 277 | if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b))) |
1540f9eb | 278 | { |
2dd2d53e | 279 | b->flags |= BB_NONTHREADABLE_BLOCK; |
1540f9eb JH |
280 | return NULL; |
281 | } | |
8ecba28a | 282 | |
a813c111 SB |
283 | set1 = pc_set (BB_END (e->src)); |
284 | set2 = pc_set (BB_END (b)); | |
8ecba28a | 285 | if (((e->flags & EDGE_FALLTHRU) != 0) |
68f3f6f1 | 286 | != (XEXP (SET_SRC (set1), 1) == pc_rtx)) |
8ecba28a JH |
287 | reverse1 = true; |
288 | ||
289 | cond1 = XEXP (SET_SRC (set1), 0); | |
290 | cond2 = XEXP (SET_SRC (set2), 0); | |
291 | if (reverse1) | |
a813c111 | 292 | code1 = reversed_comparison_code (cond1, BB_END (e->src)); |
8ecba28a JH |
293 | else |
294 | code1 = GET_CODE (cond1); | |
295 | ||
296 | code2 = GET_CODE (cond2); | |
a813c111 | 297 | reversed_code2 = reversed_comparison_code (cond2, BB_END (b)); |
8ecba28a JH |
298 | |
299 | if (!comparison_dominates_p (code1, code2) | |
300 | && !comparison_dominates_p (code1, reversed_code2)) | |
301 | return NULL; | |
302 | ||
303 | /* Ensure that the comparison operators are equivalent. | |
95bd1dd7 | 304 | ??? This is far too pessimistic. We should allow swapped operands, |
8ecba28a JH |
305 | different CCmodes, or for example comparisons for interval, that |
306 | dominate even when operands are not equivalent. */ | |
307 | if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0)) | |
308 | || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1))) | |
309 | return NULL; | |
310 | ||
311 | /* Short circuit cases where block B contains some side effects, as we can't | |
312 | safely bypass it. */ | |
a813c111 | 313 | for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b)); |
8ecba28a JH |
314 | insn = NEXT_INSN (insn)) |
315 | if (INSN_P (insn) && side_effects_p (PATTERN (insn))) | |
1540f9eb | 316 | { |
2dd2d53e | 317 | b->flags |= BB_NONTHREADABLE_BLOCK; |
1540f9eb JH |
318 | return NULL; |
319 | } | |
8ecba28a | 320 | |
457eeaae | 321 | cselib_init (0); |
8ecba28a JH |
322 | |
323 | /* First process all values computed in the source basic block. */ | |
3cd8c58a NS |
324 | for (insn = NEXT_INSN (BB_HEAD (e->src)); |
325 | insn != NEXT_INSN (BB_END (e->src)); | |
8ecba28a JH |
326 | insn = NEXT_INSN (insn)) |
327 | if (INSN_P (insn)) | |
328 | cselib_process_insn (insn); | |
329 | ||
8bdbfff5 | 330 | nonequal = BITMAP_ALLOC (NULL); |
8ecba28a | 331 | CLEAR_REG_SET (nonequal); |
5f0d2358 | 332 | |
8ecba28a JH |
333 | /* Now assume that we've continued by the edge E to B and continue |
334 | processing as if it were same basic block. | |
8ecba28a | 335 | Our goal is to prove that whole block is an NOOP. */ |
5f0d2358 | 336 | |
3cd8c58a NS |
337 | for (insn = NEXT_INSN (BB_HEAD (b)); |
338 | insn != NEXT_INSN (BB_END (b)) && !failed; | |
8ecba28a | 339 | insn = NEXT_INSN (insn)) |
f87c27b4 KH |
340 | { |
341 | if (INSN_P (insn)) | |
342 | { | |
343 | rtx pat = PATTERN (insn); | |
344 | ||
345 | if (GET_CODE (pat) == PARALLEL) | |
346 | { | |
3cd8c58a | 347 | for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++) |
f87c27b4 KH |
348 | failed |= mark_effect (XVECEXP (pat, 0, i), nonequal); |
349 | } | |
350 | else | |
351 | failed |= mark_effect (pat, nonequal); | |
352 | } | |
5f0d2358 | 353 | |
f87c27b4 KH |
354 | cselib_process_insn (insn); |
355 | } | |
8ecba28a JH |
356 | |
357 | /* Later we should clear nonequal of dead registers. So far we don't | |
358 | have life information in cfg_cleanup. */ | |
359 | if (failed) | |
1540f9eb | 360 | { |
2dd2d53e | 361 | b->flags |= BB_NONTHREADABLE_BLOCK; |
1540f9eb JH |
362 | goto failed_exit; |
363 | } | |
8ecba28a | 364 | |
fe477d8b JH |
365 | /* cond2 must not mention any register that is not equal to the |
366 | former block. */ | |
a2250fe9 | 367 | if (mentions_nonequal_regs (cond2, nonequal)) |
fe477d8b JH |
368 | goto failed_exit; |
369 | ||
a2041967 KH |
370 | EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi) |
371 | goto failed_exit; | |
8ecba28a | 372 | |
8bdbfff5 | 373 | BITMAP_FREE (nonequal); |
8ecba28a JH |
374 | cselib_finish (); |
375 | if ((comparison_dominates_p (code1, code2) != 0) | |
4deaa2f8 | 376 | != (XEXP (SET_SRC (set2), 1) == pc_rtx)) |
8ecba28a JH |
377 | return BRANCH_EDGE (b); |
378 | else | |
379 | return FALLTHRU_EDGE (b); | |
380 | ||
381 | failed_exit: | |
8bdbfff5 | 382 | BITMAP_FREE (nonequal); |
8ecba28a JH |
383 | cselib_finish (); |
384 | return NULL; | |
385 | } | |
386 | \f | |
402209ff | 387 | /* Attempt to forward edges leaving basic block B. |
eaec9b3d | 388 | Return true if successful. */ |
402209ff JH |
389 | |
390 | static bool | |
d329e058 | 391 | try_forward_edges (int mode, basic_block b) |
402209ff JH |
392 | { |
393 | bool changed = false; | |
628f6a4e BE |
394 | edge_iterator ei; |
395 | edge e, *threaded_edges = NULL; | |
402209ff | 396 | |
750054a2 CT |
397 | /* If we are partitioning hot/cold basic blocks, we don't want to |
398 | mess up unconditional or indirect jumps that cross between hot | |
c22cacf3 MS |
399 | and cold sections. |
400 | ||
8e8d5162 | 401 | Basic block partitioning may result in some jumps that appear to |
fa10beec RW |
402 | be optimizable (or blocks that appear to be mergeable), but which really |
403 | must be left untouched (they are required to make it safely across | |
c22cacf3 | 404 | partition boundaries). See the comments at the top of |
8e8d5162 CT |
405 | bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */ |
406 | ||
339ba33b | 407 | if (JUMP_P (BB_END (b)) && CROSSING_JUMP_P (BB_END (b))) |
750054a2 CT |
408 | return false; |
409 | ||
628f6a4e | 410 | for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); ) |
402209ff JH |
411 | { |
412 | basic_block target, first; | |
8a829274 EB |
413 | location_t goto_locus; |
414 | int counter; | |
8ecba28a | 415 | bool threaded = false; |
bcb3bc6d | 416 | int nthreaded_edges = 0; |
4ec5d4f5 | 417 | bool may_thread = first_pass || (b->flags & BB_MODIFIED) != 0; |
402209ff | 418 | |
402209ff JH |
419 | /* Skip complex edges because we don't know how to update them. |
420 | ||
c22cacf3 MS |
421 | Still handle fallthru edges, as we can succeed to forward fallthru |
422 | edge to the same place as the branch edge of conditional branch | |
423 | and turn conditional branch to an unconditional branch. */ | |
402209ff | 424 | if (e->flags & EDGE_COMPLEX) |
628f6a4e BE |
425 | { |
426 | ei_next (&ei); | |
427 | continue; | |
428 | } | |
402209ff JH |
429 | |
430 | target = first = e->dest; | |
24bd1a0b | 431 | counter = NUM_FIXED_BLOCKS; |
7241571e | 432 | goto_locus = e->goto_locus; |
402209ff | 433 | |
9fb32434 | 434 | /* If we are partitioning hot/cold basic_blocks, we don't want to mess |
8e8d5162 CT |
435 | up jumps that cross between hot/cold sections. |
436 | ||
437 | Basic block partitioning may result in some jumps that appear | |
c22cacf3 MS |
438 | to be optimizable (or blocks that appear to be mergeable), but which |
439 | really must be left untouched (they are required to make it safely | |
8e8d5162 CT |
440 | across partition boundaries). See the comments at the top of |
441 | bb-reorder.c:partition_hot_cold_basic_blocks for complete | |
442 | details. */ | |
9fb32434 | 443 | |
fefa31b5 | 444 | if (first != EXIT_BLOCK_PTR_FOR_FN (cfun) |
339ba33b RS |
445 | && JUMP_P (BB_END (first)) |
446 | && CROSSING_JUMP_P (BB_END (first))) | |
3371a64f | 447 | return changed; |
9fb32434 | 448 | |
0cae8d31 | 449 | while (counter < n_basic_blocks_for_fn (cfun)) |
402209ff | 450 | { |
8ecba28a JH |
451 | basic_block new_target = NULL; |
452 | bool new_target_threaded = false; | |
4ec5d4f5 | 453 | may_thread |= (target->flags & BB_MODIFIED) != 0; |
8ecba28a JH |
454 | |
455 | if (FORWARDER_BLOCK_P (target) | |
c22cacf3 | 456 | && !(single_succ_edge (target)->flags & EDGE_CROSSING) |
fefa31b5 | 457 | && single_succ (target) != EXIT_BLOCK_PTR_FOR_FN (cfun)) |
8ecba28a JH |
458 | { |
459 | /* Bypass trivial infinite loops. */ | |
c5cbcccf ZD |
460 | new_target = single_succ (target); |
461 | if (target == new_target) | |
0cae8d31 | 462 | counter = n_basic_blocks_for_fn (cfun); |
7241571e JJ |
463 | else if (!optimize) |
464 | { | |
465 | /* When not optimizing, ensure that edges or forwarder | |
466 | blocks with different locus are not optimized out. */ | |
8a829274 EB |
467 | location_t new_locus = single_succ_edge (target)->goto_locus; |
468 | location_t locus = goto_locus; | |
7241571e | 469 | |
ffa4602f EB |
470 | if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION |
471 | && LOCATION_LOCUS (locus) != UNKNOWN_LOCATION | |
5368224f | 472 | && new_locus != locus) |
50a36e42 EB |
473 | new_target = NULL; |
474 | else | |
7241571e | 475 | { |
ffa4602f | 476 | if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION) |
50a36e42 | 477 | locus = new_locus; |
7241571e | 478 | |
da5477a9 | 479 | rtx_insn *last = BB_END (target); |
11321111 AO |
480 | if (DEBUG_INSN_P (last)) |
481 | last = prev_nondebug_insn (last); | |
ffa4602f EB |
482 | if (last && INSN_P (last)) |
483 | new_locus = INSN_LOCATION (last); | |
484 | else | |
485 | new_locus = UNKNOWN_LOCATION; | |
11321111 | 486 | |
ffa4602f EB |
487 | if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION |
488 | && LOCATION_LOCUS (locus) != UNKNOWN_LOCATION | |
5368224f | 489 | && new_locus != locus) |
50a36e42 EB |
490 | new_target = NULL; |
491 | else | |
492 | { | |
ffa4602f | 493 | if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION) |
50a36e42 EB |
494 | locus = new_locus; |
495 | ||
496 | goto_locus = locus; | |
497 | } | |
7241571e JJ |
498 | } |
499 | } | |
8ecba28a | 500 | } |
5f0d2358 | 501 | |
8ecba28a JH |
502 | /* Allow to thread only over one edge at time to simplify updating |
503 | of probabilities. */ | |
7cf240d5 | 504 | else if ((mode & CLEANUP_THREADING) && may_thread) |
8ecba28a | 505 | { |
6fb5fa3c | 506 | edge t = thread_jump (e, target); |
1c570418 | 507 | if (t) |
8ecba28a | 508 | { |
bcb3bc6d | 509 | if (!threaded_edges) |
0cae8d31 DM |
510 | threaded_edges = XNEWVEC (edge, |
511 | n_basic_blocks_for_fn (cfun)); | |
3b3b1e32 RH |
512 | else |
513 | { | |
514 | int i; | |
515 | ||
516 | /* Detect an infinite loop across blocks not | |
517 | including the start block. */ | |
518 | for (i = 0; i < nthreaded_edges; ++i) | |
519 | if (threaded_edges[i] == t) | |
520 | break; | |
521 | if (i < nthreaded_edges) | |
b90e45ae | 522 | { |
0cae8d31 | 523 | counter = n_basic_blocks_for_fn (cfun); |
b90e45ae JH |
524 | break; |
525 | } | |
3b3b1e32 RH |
526 | } |
527 | ||
528 | /* Detect an infinite loop across the start block. */ | |
529 | if (t->dest == b) | |
530 | break; | |
531 | ||
0cae8d31 DM |
532 | gcc_assert (nthreaded_edges |
533 | < (n_basic_blocks_for_fn (cfun) | |
534 | - NUM_FIXED_BLOCKS)); | |
1c570418 | 535 | threaded_edges[nthreaded_edges++] = t; |
3b3b1e32 RH |
536 | |
537 | new_target = t->dest; | |
538 | new_target_threaded = true; | |
8ecba28a JH |
539 | } |
540 | } | |
5f0d2358 | 541 | |
8ecba28a JH |
542 | if (!new_target) |
543 | break; | |
402209ff | 544 | |
8ecba28a JH |
545 | counter++; |
546 | target = new_target; | |
547 | threaded |= new_target_threaded; | |
f87c27b4 | 548 | } |
402209ff | 549 | |
0cae8d31 | 550 | if (counter >= n_basic_blocks_for_fn (cfun)) |
402209ff | 551 | { |
c263766c RH |
552 | if (dump_file) |
553 | fprintf (dump_file, "Infinite loop in BB %i.\n", | |
0b17ab2f | 554 | target->index); |
402209ff JH |
555 | } |
556 | else if (target == first) | |
557 | ; /* We didn't do anything. */ | |
558 | else | |
559 | { | |
560 | /* Save the values now, as the edge may get removed. */ | |
ef30ab83 | 561 | profile_count edge_count = e->count (); |
1c570418 | 562 | int n = 0; |
402209ff | 563 | |
7241571e JJ |
564 | e->goto_locus = goto_locus; |
565 | ||
6ee3c8e4 | 566 | /* Don't force if target is exit block. */ |
fefa31b5 | 567 | if (threaded && target != EXIT_BLOCK_PTR_FOR_FN (cfun)) |
402209ff | 568 | { |
8ecba28a | 569 | notice_new_block (redirect_edge_and_branch_force (e, target)); |
c263766c RH |
570 | if (dump_file) |
571 | fprintf (dump_file, "Conditionals threaded.\n"); | |
402209ff | 572 | } |
8ecba28a | 573 | else if (!redirect_edge_and_branch (e, target)) |
402209ff | 574 | { |
c263766c RH |
575 | if (dump_file) |
576 | fprintf (dump_file, | |
5f0d2358 | 577 | "Forwarding edge %i->%i to %i failed.\n", |
0b17ab2f | 578 | b->index, e->dest->index, target->index); |
628f6a4e | 579 | ei_next (&ei); |
8ecba28a | 580 | continue; |
402209ff | 581 | } |
5f0d2358 | 582 | |
8ecba28a JH |
583 | /* We successfully forwarded the edge. Now update profile |
584 | data: for each edge we traversed in the chain, remove | |
585 | the original edge's execution count. */ | |
8ecba28a JH |
586 | do |
587 | { | |
588 | edge t; | |
5f0d2358 | 589 | |
c5cbcccf | 590 | if (!single_succ_p (first)) |
3b3b1e32 | 591 | { |
341c100f | 592 | gcc_assert (n < nthreaded_edges); |
3b3b1e32 | 593 | t = threaded_edges [n++]; |
341c100f | 594 | gcc_assert (t->src == first); |
e7a74006 | 595 | update_bb_profile_for_threading (first, edge_count, t); |
b446e5a2 | 596 | update_br_prob_note (first); |
3b3b1e32 | 597 | } |
8ecba28a | 598 | else |
bcb3bc6d | 599 | { |
15db5571 | 600 | first->count -= edge_count; |
bcb3bc6d JH |
601 | /* It is possible that as the result of |
602 | threading we've removed edge as it is | |
603 | threaded to the fallthru edge. Avoid | |
604 | getting out of sync. */ | |
605 | if (n < nthreaded_edges | |
606 | && first == threaded_edges [n]->src) | |
607 | n++; | |
c5cbcccf | 608 | t = single_succ_edge (first); |
f87c27b4 | 609 | } |
5f0d2358 | 610 | |
8ecba28a JH |
611 | first = t->dest; |
612 | } | |
613 | while (first != target); | |
614 | ||
615 | changed = true; | |
628f6a4e | 616 | continue; |
402209ff | 617 | } |
628f6a4e | 618 | ei_next (&ei); |
402209ff JH |
619 | } |
620 | ||
04695783 | 621 | free (threaded_edges); |
402209ff JH |
622 | return changed; |
623 | } | |
624 | \f | |
402209ff JH |
625 | |
626 | /* Blocks A and B are to be merged into a single block. A has no incoming | |
627 | fallthru edge, so it can be moved before B without adding or modifying | |
628 | any jumps (aside from the jump from A to B). */ | |
629 | ||
4262e623 | 630 | static void |
d329e058 | 631 | merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b) |
402209ff | 632 | { |
da5477a9 | 633 | rtx_insn *barrier; |
402209ff | 634 | |
750054a2 CT |
635 | /* If we are partitioning hot/cold basic blocks, we don't want to |
636 | mess up unconditional or indirect jumps that cross between hot | |
8e8d5162 | 637 | and cold sections. |
c22cacf3 | 638 | |
8e8d5162 | 639 | Basic block partitioning may result in some jumps that appear to |
c22cacf3 MS |
640 | be optimizable (or blocks that appear to be mergeable), but which really |
641 | must be left untouched (they are required to make it safely across | |
642 | partition boundaries). See the comments at the top of | |
8e8d5162 CT |
643 | bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */ |
644 | ||
87c8b4be | 645 | if (BB_PARTITION (a) != BB_PARTITION (b)) |
750054a2 CT |
646 | return; |
647 | ||
a813c111 | 648 | barrier = next_nonnote_insn (BB_END (a)); |
341c100f | 649 | gcc_assert (BARRIER_P (barrier)); |
53c17031 | 650 | delete_insn (barrier); |
402209ff | 651 | |
402209ff | 652 | /* Scramble the insn chain. */ |
a813c111 SB |
653 | if (BB_END (a) != PREV_INSN (BB_HEAD (b))) |
654 | reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b))); | |
6fb5fa3c | 655 | df_set_bb_dirty (a); |
402209ff | 656 | |
c263766c RH |
657 | if (dump_file) |
658 | fprintf (dump_file, "Moved block %d before %d and merged.\n", | |
0b17ab2f | 659 | a->index, b->index); |
402209ff | 660 | |
bf77398c | 661 | /* Swap the records for the two blocks around. */ |
402209ff | 662 | |
918ed612 ZD |
663 | unlink_block (a); |
664 | link_block (a, b->prev_bb); | |
665 | ||
402209ff | 666 | /* Now blocks A and B are contiguous. Merge them. */ |
bc35512f | 667 | merge_blocks (a, b); |
402209ff JH |
668 | } |
669 | ||
670 | /* Blocks A and B are to be merged into a single block. B has no outgoing | |
671 | fallthru edge, so it can be moved after A without adding or modifying | |
672 | any jumps (aside from the jump from A to B). */ | |
673 | ||
4262e623 | 674 | static void |
d329e058 | 675 | merge_blocks_move_successor_nojumps (basic_block a, basic_block b) |
402209ff | 676 | { |
da5477a9 | 677 | rtx_insn *barrier, *real_b_end; |
dfe08bc4 | 678 | rtx_insn *label; |
8942ee0f | 679 | rtx_jump_table_data *table; |
402209ff | 680 | |
750054a2 CT |
681 | /* If we are partitioning hot/cold basic blocks, we don't want to |
682 | mess up unconditional or indirect jumps that cross between hot | |
c22cacf3 MS |
683 | and cold sections. |
684 | ||
8e8d5162 | 685 | Basic block partitioning may result in some jumps that appear to |
c22cacf3 MS |
686 | be optimizable (or blocks that appear to be mergeable), but which really |
687 | must be left untouched (they are required to make it safely across | |
688 | partition boundaries). See the comments at the top of | |
8e8d5162 CT |
689 | bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */ |
690 | ||
87c8b4be | 691 | if (BB_PARTITION (a) != BB_PARTITION (b)) |
750054a2 CT |
692 | return; |
693 | ||
a813c111 | 694 | real_b_end = BB_END (b); |
402209ff | 695 | |
ee735eef JZ |
696 | /* If there is a jump table following block B temporarily add the jump table |
697 | to block B so that it will also be moved to the correct location. */ | |
a813c111 | 698 | if (tablejump_p (BB_END (b), &label, &table) |
dfe08bc4 | 699 | && prev_active_insn (label) == BB_END (b)) |
402209ff | 700 | { |
1130d5e3 | 701 | BB_END (b) = table; |
402209ff JH |
702 | } |
703 | ||
704 | /* There had better have been a barrier there. Delete it. */ | |
a813c111 | 705 | barrier = NEXT_INSN (BB_END (b)); |
4b4bf941 | 706 | if (barrier && BARRIER_P (barrier)) |
53c17031 | 707 | delete_insn (barrier); |
402209ff | 708 | |
402209ff JH |
709 | |
710 | /* Scramble the insn chain. */ | |
a813c111 | 711 | reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a)); |
402209ff | 712 | |
f62ce55b | 713 | /* Restore the real end of b. */ |
1130d5e3 | 714 | BB_END (b) = real_b_end; |
f62ce55b | 715 | |
c263766c RH |
716 | if (dump_file) |
717 | fprintf (dump_file, "Moved block %d after %d and merged.\n", | |
0b17ab2f | 718 | b->index, a->index); |
2150ad33 RH |
719 | |
720 | /* Now blocks A and B are contiguous. Merge them. */ | |
bc35512f | 721 | merge_blocks (a, b); |
402209ff JH |
722 | } |
723 | ||
724 | /* Attempt to merge basic blocks that are potentially non-adjacent. | |
ec3ae3da JH |
725 | Return NULL iff the attempt failed, otherwise return basic block |
726 | where cleanup_cfg should continue. Because the merging commonly | |
727 | moves basic block away or introduces another optimization | |
e0bb17a8 | 728 | possibility, return basic block just before B so cleanup_cfg don't |
ec3ae3da JH |
729 | need to iterate. |
730 | ||
731 | It may be good idea to return basic block before C in the case | |
732 | C has been moved after B and originally appeared earlier in the | |
4d6922ee | 733 | insn sequence, but we have no information available about the |
ec3ae3da JH |
734 | relative ordering of these two. Hopefully it is not too common. */ |
735 | ||
736 | static basic_block | |
bc35512f | 737 | merge_blocks_move (edge e, basic_block b, basic_block c, int mode) |
402209ff | 738 | { |
ec3ae3da | 739 | basic_block next; |
402209ff | 740 | |
750054a2 CT |
741 | /* If we are partitioning hot/cold basic blocks, we don't want to |
742 | mess up unconditional or indirect jumps that cross between hot | |
c22cacf3 MS |
743 | and cold sections. |
744 | ||
8e8d5162 | 745 | Basic block partitioning may result in some jumps that appear to |
c22cacf3 MS |
746 | be optimizable (or blocks that appear to be mergeable), but which really |
747 | must be left untouched (they are required to make it safely across | |
748 | partition boundaries). See the comments at the top of | |
8e8d5162 CT |
749 | bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */ |
750 | ||
87c8b4be | 751 | if (BB_PARTITION (b) != BB_PARTITION (c)) |
750054a2 | 752 | return NULL; |
c22cacf3 | 753 | |
402209ff JH |
754 | /* If B has a fallthru edge to C, no need to move anything. */ |
755 | if (e->flags & EDGE_FALLTHRU) | |
756 | { | |
0b17ab2f | 757 | int b_index = b->index, c_index = c->index; |
7d776ee2 RG |
758 | |
759 | /* Protect the loop latches. */ | |
760 | if (current_loops && c->loop_father->latch == c) | |
761 | return NULL; | |
762 | ||
bc35512f | 763 | merge_blocks (b, c); |
635559ab | 764 | update_forwarder_flag (b); |
402209ff | 765 | |
c263766c RH |
766 | if (dump_file) |
767 | fprintf (dump_file, "Merged %d and %d without moving.\n", | |
f87c27b4 | 768 | b_index, c_index); |
402209ff | 769 | |
fefa31b5 | 770 | return b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? b : b->prev_bb; |
402209ff | 771 | } |
5f0d2358 | 772 | |
402209ff JH |
773 | /* Otherwise we will need to move code around. Do that only if expensive |
774 | transformations are allowed. */ | |
775 | else if (mode & CLEANUP_EXPENSIVE) | |
776 | { | |
4262e623 JH |
777 | edge tmp_edge, b_fallthru_edge; |
778 | bool c_has_outgoing_fallthru; | |
779 | bool b_has_incoming_fallthru; | |
402209ff JH |
780 | |
781 | /* Avoid overactive code motion, as the forwarder blocks should be | |
c22cacf3 | 782 | eliminated by edge redirection instead. One exception might have |
402209ff JH |
783 | been if B is a forwarder block and C has no fallthru edge, but |
784 | that should be cleaned up by bb-reorder instead. */ | |
635559ab | 785 | if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c)) |
ec3ae3da | 786 | return NULL; |
402209ff JH |
787 | |
788 | /* We must make sure to not munge nesting of lexical blocks, | |
789 | and loop notes. This is done by squeezing out all the notes | |
790 | and leaving them there to lie. Not ideal, but functional. */ | |
791 | ||
0fd4b31d | 792 | tmp_edge = find_fallthru_edge (c->succs); |
402209ff | 793 | c_has_outgoing_fallthru = (tmp_edge != NULL); |
402209ff | 794 | |
0fd4b31d | 795 | tmp_edge = find_fallthru_edge (b->preds); |
402209ff | 796 | b_has_incoming_fallthru = (tmp_edge != NULL); |
4262e623 | 797 | b_fallthru_edge = tmp_edge; |
ec3ae3da | 798 | next = b->prev_bb; |
912b79e7 JH |
799 | if (next == c) |
800 | next = next->prev_bb; | |
4262e623 JH |
801 | |
802 | /* Otherwise, we're going to try to move C after B. If C does | |
803 | not have an outgoing fallthru, then it can be moved | |
804 | immediately after B without introducing or modifying jumps. */ | |
805 | if (! c_has_outgoing_fallthru) | |
806 | { | |
807 | merge_blocks_move_successor_nojumps (b, c); | |
fefa31b5 | 808 | return next == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? next->next_bb : next; |
4262e623 | 809 | } |
402209ff JH |
810 | |
811 | /* If B does not have an incoming fallthru, then it can be moved | |
812 | immediately before C without introducing or modifying jumps. | |
813 | C cannot be the first block, so we do not have to worry about | |
814 | accessing a non-existent block. */ | |
402209ff | 815 | |
4262e623 JH |
816 | if (b_has_incoming_fallthru) |
817 | { | |
473fb060 | 818 | basic_block bb; |
5f0d2358 | 819 | |
fefa31b5 | 820 | if (b_fallthru_edge->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)) |
ec3ae3da | 821 | return NULL; |
7dddfb65 JH |
822 | bb = force_nonfallthru (b_fallthru_edge); |
823 | if (bb) | |
824 | notice_new_block (bb); | |
4262e623 | 825 | } |
5f0d2358 | 826 | |
4262e623 | 827 | merge_blocks_move_predecessor_nojumps (b, c); |
fefa31b5 | 828 | return next == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? next->next_bb : next; |
402209ff | 829 | } |
5f0d2358 | 830 | |
10d6c0d0 | 831 | return NULL; |
402209ff JH |
832 | } |
833 | \f | |
c2fc5456 R |
834 | |
835 | /* Removes the memory attributes of MEM expression | |
836 | if they are not equal. */ | |
837 | ||
893479de | 838 | static void |
c2fc5456 R |
839 | merge_memattrs (rtx x, rtx y) |
840 | { | |
841 | int i; | |
842 | int j; | |
843 | enum rtx_code code; | |
844 | const char *fmt; | |
845 | ||
846 | if (x == y) | |
847 | return; | |
848 | if (x == 0 || y == 0) | |
849 | return; | |
850 | ||
851 | code = GET_CODE (x); | |
852 | ||
853 | if (code != GET_CODE (y)) | |
854 | return; | |
855 | ||
856 | if (GET_MODE (x) != GET_MODE (y)) | |
857 | return; | |
858 | ||
96b3c03f | 859 | if (code == MEM && !mem_attrs_eq_p (MEM_ATTRS (x), MEM_ATTRS (y))) |
c2fc5456 R |
860 | { |
861 | if (! MEM_ATTRS (x)) | |
862 | MEM_ATTRS (y) = 0; | |
863 | else if (! MEM_ATTRS (y)) | |
864 | MEM_ATTRS (x) = 0; | |
c22cacf3 | 865 | else |
c2fc5456 | 866 | { |
c2fc5456 R |
867 | if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y)) |
868 | { | |
869 | set_mem_alias_set (x, 0); | |
870 | set_mem_alias_set (y, 0); | |
871 | } | |
c22cacf3 | 872 | |
c2fc5456 R |
873 | if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y))) |
874 | { | |
875 | set_mem_expr (x, 0); | |
876 | set_mem_expr (y, 0); | |
527210c4 RS |
877 | clear_mem_offset (x); |
878 | clear_mem_offset (y); | |
c2fc5456 | 879 | } |
527210c4 RS |
880 | else if (MEM_OFFSET_KNOWN_P (x) != MEM_OFFSET_KNOWN_P (y) |
881 | || (MEM_OFFSET_KNOWN_P (x) | |
d05d7551 | 882 | && maybe_ne (MEM_OFFSET (x), MEM_OFFSET (y)))) |
c2fc5456 | 883 | { |
527210c4 RS |
884 | clear_mem_offset (x); |
885 | clear_mem_offset (y); | |
c2fc5456 | 886 | } |
c22cacf3 | 887 | |
d05d7551 RS |
888 | if (!MEM_SIZE_KNOWN_P (x)) |
889 | clear_mem_size (y); | |
890 | else if (!MEM_SIZE_KNOWN_P (y)) | |
891 | clear_mem_size (x); | |
892 | else if (known_le (MEM_SIZE (x), MEM_SIZE (y))) | |
893 | set_mem_size (x, MEM_SIZE (y)); | |
894 | else if (known_le (MEM_SIZE (y), MEM_SIZE (x))) | |
895 | set_mem_size (y, MEM_SIZE (x)); | |
c2fc5456 | 896 | else |
f5541398 | 897 | { |
d05d7551 | 898 | /* The sizes aren't ordered, so we can't merge them. */ |
f5541398 RS |
899 | clear_mem_size (x); |
900 | clear_mem_size (y); | |
901 | } | |
c2fc5456 R |
902 | |
903 | set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y))); | |
904 | set_mem_align (y, MEM_ALIGN (x)); | |
905 | } | |
906 | } | |
84cf4ab6 JJ |
907 | if (code == MEM) |
908 | { | |
909 | if (MEM_READONLY_P (x) != MEM_READONLY_P (y)) | |
910 | { | |
911 | MEM_READONLY_P (x) = 0; | |
912 | MEM_READONLY_P (y) = 0; | |
913 | } | |
914 | if (MEM_NOTRAP_P (x) != MEM_NOTRAP_P (y)) | |
915 | { | |
916 | MEM_NOTRAP_P (x) = 0; | |
917 | MEM_NOTRAP_P (y) = 0; | |
918 | } | |
919 | if (MEM_VOLATILE_P (x) != MEM_VOLATILE_P (y)) | |
920 | { | |
921 | MEM_VOLATILE_P (x) = 1; | |
922 | MEM_VOLATILE_P (y) = 1; | |
923 | } | |
924 | } | |
c22cacf3 | 925 | |
c2fc5456 R |
926 | fmt = GET_RTX_FORMAT (code); |
927 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
928 | { | |
929 | switch (fmt[i]) | |
930 | { | |
931 | case 'E': | |
932 | /* Two vectors must have the same length. */ | |
933 | if (XVECLEN (x, i) != XVECLEN (y, i)) | |
934 | return; | |
935 | ||
936 | for (j = 0; j < XVECLEN (x, i); j++) | |
937 | merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j)); | |
938 | ||
939 | break; | |
940 | ||
941 | case 'e': | |
942 | merge_memattrs (XEXP (x, i), XEXP (y, i)); | |
943 | } | |
944 | } | |
945 | return; | |
946 | } | |
947 | ||
948 | ||
472c95f5 TV |
949 | /* Checks if patterns P1 and P2 are equivalent, apart from the possibly |
950 | different single sets S1 and S2. */ | |
c2fc5456 R |
951 | |
952 | static bool | |
472c95f5 TV |
953 | equal_different_set_p (rtx p1, rtx s1, rtx p2, rtx s2) |
954 | { | |
955 | int i; | |
956 | rtx e1, e2; | |
957 | ||
958 | if (p1 == s1 && p2 == s2) | |
959 | return true; | |
960 | ||
961 | if (GET_CODE (p1) != PARALLEL || GET_CODE (p2) != PARALLEL) | |
962 | return false; | |
963 | ||
964 | if (XVECLEN (p1, 0) != XVECLEN (p2, 0)) | |
965 | return false; | |
966 | ||
967 | for (i = 0; i < XVECLEN (p1, 0); i++) | |
968 | { | |
969 | e1 = XVECEXP (p1, 0, i); | |
970 | e2 = XVECEXP (p2, 0, i); | |
971 | if (e1 == s1 && e2 == s2) | |
972 | continue; | |
973 | if (reload_completed | |
974 | ? rtx_renumbered_equal_p (e1, e2) : rtx_equal_p (e1, e2)) | |
975 | continue; | |
976 | ||
21c0a521 | 977 | return false; |
472c95f5 TV |
978 | } |
979 | ||
980 | return true; | |
981 | } | |
982 | ||
3e87f2d6 SC |
983 | |
984 | /* NOTE1 is the REG_EQUAL note, if any, attached to an insn | |
985 | that is a single_set with a SET_SRC of SRC1. Similarly | |
986 | for NOTE2/SRC2. | |
987 | ||
988 | So effectively NOTE1/NOTE2 are an alternate form of | |
989 | SRC1/SRC2 respectively. | |
990 | ||
991 | Return nonzero if SRC1 or NOTE1 has the same constant | |
992 | integer value as SRC2 or NOTE2. Else return zero. */ | |
993 | static int | |
994 | values_equal_p (rtx note1, rtx note2, rtx src1, rtx src2) | |
995 | { | |
996 | if (note1 | |
997 | && note2 | |
998 | && CONST_INT_P (XEXP (note1, 0)) | |
999 | && rtx_equal_p (XEXP (note1, 0), XEXP (note2, 0))) | |
1000 | return 1; | |
1001 | ||
1002 | if (!note1 | |
1003 | && !note2 | |
1004 | && CONST_INT_P (src1) | |
1005 | && CONST_INT_P (src2) | |
1006 | && rtx_equal_p (src1, src2)) | |
1007 | return 1; | |
1008 | ||
1009 | if (note1 | |
1010 | && CONST_INT_P (src2) | |
1011 | && rtx_equal_p (XEXP (note1, 0), src2)) | |
1012 | return 1; | |
1013 | ||
1014 | if (note2 | |
1015 | && CONST_INT_P (src1) | |
1016 | && rtx_equal_p (XEXP (note2, 0), src1)) | |
1017 | return 1; | |
1018 | ||
1019 | return 0; | |
1020 | } | |
1021 | ||
472c95f5 TV |
1022 | /* Examine register notes on I1 and I2 and return: |
1023 | - dir_forward if I1 can be replaced by I2, or | |
1024 | - dir_backward if I2 can be replaced by I1, or | |
1025 | - dir_both if both are the case. */ | |
1026 | ||
1027 | static enum replace_direction | |
da5477a9 | 1028 | can_replace_by (rtx_insn *i1, rtx_insn *i2) |
472c95f5 TV |
1029 | { |
1030 | rtx s1, s2, d1, d2, src1, src2, note1, note2; | |
1031 | bool c1, c2; | |
1032 | ||
1033 | /* Check for 2 sets. */ | |
1034 | s1 = single_set (i1); | |
1035 | s2 = single_set (i2); | |
1036 | if (s1 == NULL_RTX || s2 == NULL_RTX) | |
1037 | return dir_none; | |
1038 | ||
1039 | /* Check that the 2 sets set the same dest. */ | |
1040 | d1 = SET_DEST (s1); | |
1041 | d2 = SET_DEST (s2); | |
1042 | if (!(reload_completed | |
1043 | ? rtx_renumbered_equal_p (d1, d2) : rtx_equal_p (d1, d2))) | |
1044 | return dir_none; | |
1045 | ||
1046 | /* Find identical req_equiv or reg_equal note, which implies that the 2 sets | |
1047 | set dest to the same value. */ | |
1048 | note1 = find_reg_equal_equiv_note (i1); | |
1049 | note2 = find_reg_equal_equiv_note (i2); | |
3e87f2d6 SC |
1050 | |
1051 | src1 = SET_SRC (s1); | |
1052 | src2 = SET_SRC (s2); | |
1053 | ||
1054 | if (!values_equal_p (note1, note2, src1, src2)) | |
472c95f5 TV |
1055 | return dir_none; |
1056 | ||
1057 | if (!equal_different_set_p (PATTERN (i1), s1, PATTERN (i2), s2)) | |
1058 | return dir_none; | |
1059 | ||
1060 | /* Although the 2 sets set dest to the same value, we cannot replace | |
1061 | (set (dest) (const_int)) | |
1062 | by | |
1063 | (set (dest) (reg)) | |
1064 | because we don't know if the reg is live and has the same value at the | |
1065 | location of replacement. */ | |
472c95f5 TV |
1066 | c1 = CONST_INT_P (src1); |
1067 | c2 = CONST_INT_P (src2); | |
1068 | if (c1 && c2) | |
1069 | return dir_both; | |
1070 | else if (c2) | |
1071 | return dir_forward; | |
1072 | else if (c1) | |
1073 | return dir_backward; | |
1074 | ||
1075 | return dir_none; | |
1076 | } | |
1077 | ||
1078 | /* Merges directions A and B. */ | |
1079 | ||
1080 | static enum replace_direction | |
1081 | merge_dir (enum replace_direction a, enum replace_direction b) | |
1082 | { | |
1083 | /* Implements the following table: | |
1084 | |bo fw bw no | |
1085 | ---+----------- | |
1086 | bo |bo fw bw no | |
1087 | fw |-- fw no no | |
1088 | bw |-- -- bw no | |
1089 | no |-- -- -- no. */ | |
1090 | ||
1091 | if (a == b) | |
1092 | return a; | |
1093 | ||
1094 | if (a == dir_both) | |
1095 | return b; | |
1096 | if (b == dir_both) | |
1097 | return a; | |
1098 | ||
1099 | return dir_none; | |
1100 | } | |
1101 | ||
aade772d JJ |
1102 | /* Array of flags indexed by reg note kind, true if the given |
1103 | reg note is CFA related. */ | |
1104 | static const bool reg_note_cfa_p[] = { | |
1105 | #undef REG_CFA_NOTE | |
1106 | #define DEF_REG_NOTE(NAME) false, | |
1107 | #define REG_CFA_NOTE(NAME) true, | |
1108 | #include "reg-notes.def" | |
1109 | #undef REG_CFA_NOTE | |
1110 | #undef DEF_REG_NOTE | |
1111 | false | |
1112 | }; | |
1113 | ||
1114 | /* Return true if I1 and I2 have identical CFA notes (the same order | |
1115 | and equivalent content). */ | |
1116 | ||
1117 | static bool | |
1118 | insns_have_identical_cfa_notes (rtx_insn *i1, rtx_insn *i2) | |
1119 | { | |
1120 | rtx n1, n2; | |
1121 | for (n1 = REG_NOTES (i1), n2 = REG_NOTES (i2); ; | |
1122 | n1 = XEXP (n1, 1), n2 = XEXP (n2, 1)) | |
1123 | { | |
1124 | /* Skip over reg notes not related to CFI information. */ | |
1125 | while (n1 && !reg_note_cfa_p[REG_NOTE_KIND (n1)]) | |
1126 | n1 = XEXP (n1, 1); | |
1127 | while (n2 && !reg_note_cfa_p[REG_NOTE_KIND (n2)]) | |
1128 | n2 = XEXP (n2, 1); | |
1129 | if (n1 == NULL_RTX && n2 == NULL_RTX) | |
1130 | return true; | |
1131 | if (n1 == NULL_RTX || n2 == NULL_RTX) | |
1132 | return false; | |
1133 | if (XEXP (n1, 0) == XEXP (n2, 0)) | |
1134 | ; | |
1135 | else if (XEXP (n1, 0) == NULL_RTX || XEXP (n2, 0) == NULL_RTX) | |
1136 | return false; | |
1137 | else if (!(reload_completed | |
1138 | ? rtx_renumbered_equal_p (XEXP (n1, 0), XEXP (n2, 0)) | |
1139 | : rtx_equal_p (XEXP (n1, 0), XEXP (n2, 0)))) | |
1140 | return false; | |
1141 | } | |
1142 | } | |
1143 | ||
472c95f5 TV |
1144 | /* Examine I1 and I2 and return: |
1145 | - dir_forward if I1 can be replaced by I2, or | |
1146 | - dir_backward if I2 can be replaced by I1, or | |
1147 | - dir_both if both are the case. */ | |
1148 | ||
1149 | static enum replace_direction | |
da5477a9 | 1150 | old_insns_match_p (int mode ATTRIBUTE_UNUSED, rtx_insn *i1, rtx_insn *i2) |
c2fc5456 R |
1151 | { |
1152 | rtx p1, p2; | |
1153 | ||
1154 | /* Verify that I1 and I2 are equivalent. */ | |
1155 | if (GET_CODE (i1) != GET_CODE (i2)) | |
472c95f5 | 1156 | return dir_none; |
c2fc5456 | 1157 | |
ba21aba3 DD |
1158 | /* __builtin_unreachable() may lead to empty blocks (ending with |
1159 | NOTE_INSN_BASIC_BLOCK). They may be crossjumped. */ | |
1160 | if (NOTE_INSN_BASIC_BLOCK_P (i1) && NOTE_INSN_BASIC_BLOCK_P (i2)) | |
472c95f5 | 1161 | return dir_both; |
ba21aba3 | 1162 | |
9a08d230 RH |
1163 | /* ??? Do not allow cross-jumping between different stack levels. */ |
1164 | p1 = find_reg_note (i1, REG_ARGS_SIZE, NULL); | |
1165 | p2 = find_reg_note (i2, REG_ARGS_SIZE, NULL); | |
42aa5124 RH |
1166 | if (p1 && p2) |
1167 | { | |
1168 | p1 = XEXP (p1, 0); | |
1169 | p2 = XEXP (p2, 0); | |
1170 | if (!rtx_equal_p (p1, p2)) | |
1171 | return dir_none; | |
1172 | ||
1173 | /* ??? Worse, this adjustment had better be constant lest we | |
1174 | have differing incoming stack levels. */ | |
1175 | if (!frame_pointer_needed | |
68184180 | 1176 | && known_eq (find_args_size_adjust (i1), HOST_WIDE_INT_MIN)) |
42aa5124 RH |
1177 | return dir_none; |
1178 | } | |
1179 | else if (p1 || p2) | |
9a08d230 RH |
1180 | return dir_none; |
1181 | ||
aade772d JJ |
1182 | /* Do not allow cross-jumping between frame related insns and other |
1183 | insns. */ | |
1184 | if (RTX_FRAME_RELATED_P (i1) != RTX_FRAME_RELATED_P (i2)) | |
1185 | return dir_none; | |
1186 | ||
7752e522 | 1187 | p1 = PATTERN (i1); |
c2fc5456 R |
1188 | p2 = PATTERN (i2); |
1189 | ||
1190 | if (GET_CODE (p1) != GET_CODE (p2)) | |
472c95f5 | 1191 | return dir_none; |
c2fc5456 R |
1192 | |
1193 | /* If this is a CALL_INSN, compare register usage information. | |
1194 | If we don't check this on stack register machines, the two | |
1195 | CALL_INSNs might be merged leaving reg-stack.c with mismatching | |
1196 | numbers of stack registers in the same basic block. | |
1197 | If we don't check this on machines with delay slots, a delay slot may | |
1198 | be filled that clobbers a parameter expected by the subroutine. | |
1199 | ||
1200 | ??? We take the simple route for now and assume that if they're | |
31ce8a53 | 1201 | equal, they were constructed identically. |
c2fc5456 | 1202 | |
31ce8a53 BS |
1203 | Also check for identical exception regions. */ |
1204 | ||
1205 | if (CALL_P (i1)) | |
1206 | { | |
1207 | /* Ensure the same EH region. */ | |
1208 | rtx n1 = find_reg_note (i1, REG_EH_REGION, 0); | |
1209 | rtx n2 = find_reg_note (i2, REG_EH_REGION, 0); | |
1210 | ||
1211 | if (!n1 && n2) | |
472c95f5 | 1212 | return dir_none; |
31ce8a53 BS |
1213 | |
1214 | if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0))) | |
472c95f5 | 1215 | return dir_none; |
31ce8a53 BS |
1216 | |
1217 | if (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1), | |
c22cacf3 | 1218 | CALL_INSN_FUNCTION_USAGE (i2)) |
31ce8a53 | 1219 | || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)) |
472c95f5 | 1220 | return dir_none; |
68a9738a JJ |
1221 | |
1222 | /* For address sanitizer, never crossjump __asan_report_* builtins, | |
1223 | otherwise errors might be reported on incorrect lines. */ | |
de5a5fa1 | 1224 | if (flag_sanitize & SANITIZE_ADDRESS) |
68a9738a JJ |
1225 | { |
1226 | rtx call = get_call_rtx_from (i1); | |
1227 | if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF) | |
1228 | { | |
1229 | rtx symbol = XEXP (XEXP (call, 0), 0); | |
1230 | if (SYMBOL_REF_DECL (symbol) | |
1231 | && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL) | |
1232 | { | |
1233 | if ((DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol)) | |
1234 | == BUILT_IN_NORMAL) | |
1235 | && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)) | |
1236 | >= BUILT_IN_ASAN_REPORT_LOAD1 | |
1237 | && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)) | |
8946c29e | 1238 | <= BUILT_IN_ASAN_STOREN) |
68a9738a JJ |
1239 | return dir_none; |
1240 | } | |
1241 | } | |
1242 | } | |
31ce8a53 | 1243 | } |
c2fc5456 | 1244 | |
aade772d JJ |
1245 | /* If both i1 and i2 are frame related, verify all the CFA notes |
1246 | in the same order and with the same content. */ | |
1247 | if (RTX_FRAME_RELATED_P (i1) && !insns_have_identical_cfa_notes (i1, i2)) | |
1248 | return dir_none; | |
1249 | ||
c2fc5456 R |
1250 | #ifdef STACK_REGS |
1251 | /* If cross_jump_death_matters is not 0, the insn's mode | |
1252 | indicates whether or not the insn contains any stack-like | |
1253 | regs. */ | |
1254 | ||
1255 | if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1)) | |
1256 | { | |
1257 | /* If register stack conversion has already been done, then | |
c22cacf3 MS |
1258 | death notes must also be compared before it is certain that |
1259 | the two instruction streams match. */ | |
c2fc5456 R |
1260 | |
1261 | rtx note; | |
1262 | HARD_REG_SET i1_regset, i2_regset; | |
1263 | ||
1264 | CLEAR_HARD_REG_SET (i1_regset); | |
1265 | CLEAR_HARD_REG_SET (i2_regset); | |
1266 | ||
1267 | for (note = REG_NOTES (i1); note; note = XEXP (note, 1)) | |
1268 | if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0))) | |
1269 | SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0))); | |
1270 | ||
1271 | for (note = REG_NOTES (i2); note; note = XEXP (note, 1)) | |
1272 | if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0))) | |
1273 | SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0))); | |
1274 | ||
56b138ae | 1275 | if (!hard_reg_set_equal_p (i1_regset, i2_regset)) |
472c95f5 | 1276 | return dir_none; |
c2fc5456 R |
1277 | } |
1278 | #endif | |
1279 | ||
1280 | if (reload_completed | |
1281 | ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2)) | |
472c95f5 | 1282 | return dir_both; |
c2fc5456 | 1283 | |
472c95f5 | 1284 | return can_replace_by (i1, i2); |
c2fc5456 R |
1285 | } |
1286 | \f | |
31ce8a53 BS |
1287 | /* When comparing insns I1 and I2 in flow_find_cross_jump or |
1288 | flow_find_head_matching_sequence, ensure the notes match. */ | |
1289 | ||
1290 | static void | |
da5477a9 | 1291 | merge_notes (rtx_insn *i1, rtx_insn *i2) |
31ce8a53 BS |
1292 | { |
1293 | /* If the merged insns have different REG_EQUAL notes, then | |
1294 | remove them. */ | |
1295 | rtx equiv1 = find_reg_equal_equiv_note (i1); | |
1296 | rtx equiv2 = find_reg_equal_equiv_note (i2); | |
1297 | ||
1298 | if (equiv1 && !equiv2) | |
1299 | remove_note (i1, equiv1); | |
1300 | else if (!equiv1 && equiv2) | |
1301 | remove_note (i2, equiv2); | |
1302 | else if (equiv1 && equiv2 | |
1303 | && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0))) | |
1304 | { | |
1305 | remove_note (i1, equiv1); | |
1306 | remove_note (i2, equiv2); | |
1307 | } | |
1308 | } | |
1309 | ||
823918ae TV |
1310 | /* Walks from I1 in BB1 backward till the next non-debug insn, and returns the |
1311 | resulting insn in I1, and the corresponding bb in BB1. At the head of a | |
1312 | bb, if there is a predecessor bb that reaches this bb via fallthru, and | |
1313 | FOLLOW_FALLTHRU, walks further in the predecessor bb and registers this in | |
1314 | DID_FALLTHRU. Otherwise, stops at the head of the bb. */ | |
1315 | ||
1316 | static void | |
da5477a9 | 1317 | walk_to_nondebug_insn (rtx_insn **i1, basic_block *bb1, bool follow_fallthru, |
823918ae TV |
1318 | bool *did_fallthru) |
1319 | { | |
1320 | edge fallthru; | |
1321 | ||
1322 | *did_fallthru = false; | |
1323 | ||
1324 | /* Ignore notes. */ | |
1325 | while (!NONDEBUG_INSN_P (*i1)) | |
1326 | { | |
1327 | if (*i1 != BB_HEAD (*bb1)) | |
1328 | { | |
1329 | *i1 = PREV_INSN (*i1); | |
1330 | continue; | |
1331 | } | |
1332 | ||
1333 | if (!follow_fallthru) | |
1334 | return; | |
1335 | ||
1336 | fallthru = find_fallthru_edge ((*bb1)->preds); | |
fefa31b5 | 1337 | if (!fallthru || fallthru->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) |
823918ae TV |
1338 | || !single_succ_p (fallthru->src)) |
1339 | return; | |
1340 | ||
1341 | *bb1 = fallthru->src; | |
1342 | *i1 = BB_END (*bb1); | |
1343 | *did_fallthru = true; | |
1344 | } | |
1345 | } | |
1346 | ||
c2fc5456 | 1347 | /* Look through the insns at the end of BB1 and BB2 and find the longest |
472c95f5 TV |
1348 | sequence that are either equivalent, or allow forward or backward |
1349 | replacement. Store the first insns for that sequence in *F1 and *F2 and | |
1350 | return the sequence length. | |
1351 | ||
1352 | DIR_P indicates the allowed replacement direction on function entry, and | |
1353 | the actual replacement direction on function exit. If NULL, only equivalent | |
1354 | sequences are allowed. | |
c2fc5456 R |
1355 | |
1356 | To simplify callers of this function, if the blocks match exactly, | |
1357 | store the head of the blocks in *F1 and *F2. */ | |
1358 | ||
31ce8a53 | 1359 | int |
da5477a9 DM |
1360 | flow_find_cross_jump (basic_block bb1, basic_block bb2, rtx_insn **f1, |
1361 | rtx_insn **f2, enum replace_direction *dir_p) | |
c2fc5456 | 1362 | { |
da5477a9 | 1363 | rtx_insn *i1, *i2, *last1, *last2, *afterlast1, *afterlast2; |
c2fc5456 | 1364 | int ninsns = 0; |
472c95f5 | 1365 | enum replace_direction dir, last_dir, afterlast_dir; |
823918ae | 1366 | bool follow_fallthru, did_fallthru; |
472c95f5 TV |
1367 | |
1368 | if (dir_p) | |
1369 | dir = *dir_p; | |
1370 | else | |
1371 | dir = dir_both; | |
1372 | afterlast_dir = dir; | |
1373 | last_dir = afterlast_dir; | |
c2fc5456 R |
1374 | |
1375 | /* Skip simple jumps at the end of the blocks. Complex jumps still | |
1376 | need to be compared for equivalence, which we'll do below. */ | |
1377 | ||
1378 | i1 = BB_END (bb1); | |
da5477a9 | 1379 | last1 = afterlast1 = last2 = afterlast2 = NULL; |
c2fc5456 R |
1380 | if (onlyjump_p (i1) |
1381 | || (returnjump_p (i1) && !side_effects_p (PATTERN (i1)))) | |
1382 | { | |
1383 | last1 = i1; | |
1384 | i1 = PREV_INSN (i1); | |
1385 | } | |
1386 | ||
1387 | i2 = BB_END (bb2); | |
1388 | if (onlyjump_p (i2) | |
1389 | || (returnjump_p (i2) && !side_effects_p (PATTERN (i2)))) | |
1390 | { | |
1391 | last2 = i2; | |
a0cbe71e JJ |
1392 | /* Count everything except for unconditional jump as insn. |
1393 | Don't count any jumps if dir_p is NULL. */ | |
1394 | if (!simplejump_p (i2) && !returnjump_p (i2) && last1 && dir_p) | |
c2fc5456 R |
1395 | ninsns++; |
1396 | i2 = PREV_INSN (i2); | |
1397 | } | |
1398 | ||
1399 | while (true) | |
1400 | { | |
823918ae TV |
1401 | /* In the following example, we can replace all jumps to C by jumps to A. |
1402 | ||
1403 | This removes 4 duplicate insns. | |
1404 | [bb A] insn1 [bb C] insn1 | |
1405 | insn2 insn2 | |
1406 | [bb B] insn3 insn3 | |
1407 | insn4 insn4 | |
1408 | jump_insn jump_insn | |
1409 | ||
1410 | We could also replace all jumps to A by jumps to C, but that leaves B | |
1411 | alive, and removes only 2 duplicate insns. In a subsequent crossjump | |
1412 | step, all jumps to B would be replaced with jumps to the middle of C, | |
1413 | achieving the same result with more effort. | |
1414 | So we allow only the first possibility, which means that we don't allow | |
1415 | fallthru in the block that's being replaced. */ | |
1416 | ||
1417 | follow_fallthru = dir_p && dir != dir_forward; | |
1418 | walk_to_nondebug_insn (&i1, &bb1, follow_fallthru, &did_fallthru); | |
1419 | if (did_fallthru) | |
1420 | dir = dir_backward; | |
1421 | ||
1422 | follow_fallthru = dir_p && dir != dir_backward; | |
1423 | walk_to_nondebug_insn (&i2, &bb2, follow_fallthru, &did_fallthru); | |
1424 | if (did_fallthru) | |
1425 | dir = dir_forward; | |
c2fc5456 R |
1426 | |
1427 | if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2)) | |
1428 | break; | |
1429 | ||
ba61fc53 JH |
1430 | /* Do not turn corssing edge to non-crossing or vice versa after |
1431 | reload. */ | |
1432 | if (BB_PARTITION (BLOCK_FOR_INSN (i1)) | |
1433 | != BB_PARTITION (BLOCK_FOR_INSN (i2)) | |
1434 | && reload_completed) | |
1435 | break; | |
1436 | ||
472c95f5 TV |
1437 | dir = merge_dir (dir, old_insns_match_p (0, i1, i2)); |
1438 | if (dir == dir_none || (!dir_p && dir != dir_both)) | |
c2fc5456 R |
1439 | break; |
1440 | ||
1441 | merge_memattrs (i1, i2); | |
1442 | ||
1443 | /* Don't begin a cross-jump with a NOTE insn. */ | |
1444 | if (INSN_P (i1)) | |
1445 | { | |
31ce8a53 | 1446 | merge_notes (i1, i2); |
c2fc5456 R |
1447 | |
1448 | afterlast1 = last1, afterlast2 = last2; | |
1449 | last1 = i1, last2 = i2; | |
472c95f5 TV |
1450 | afterlast_dir = last_dir; |
1451 | last_dir = dir; | |
a0cbe71e | 1452 | if (active_insn_p (i1)) |
2a562b0a | 1453 | ninsns++; |
c2fc5456 R |
1454 | } |
1455 | ||
1456 | i1 = PREV_INSN (i1); | |
1457 | i2 = PREV_INSN (i2); | |
1458 | } | |
1459 | ||
c2fc5456 R |
1460 | /* Don't allow the insn after a compare to be shared by |
1461 | cross-jumping unless the compare is also shared. */ | |
618f4073 TS |
1462 | if (HAVE_cc0 && ninsns && reg_mentioned_p (cc0_rtx, last1) |
1463 | && ! sets_cc0_p (last1)) | |
472c95f5 | 1464 | last1 = afterlast1, last2 = afterlast2, last_dir = afterlast_dir, ninsns--; |
c2fc5456 R |
1465 | |
1466 | /* Include preceding notes and labels in the cross-jump. One, | |
1467 | this may bring us to the head of the blocks as requested above. | |
1468 | Two, it keeps line number notes as matched as may be. */ | |
1469 | if (ninsns) | |
1470 | { | |
823918ae | 1471 | bb1 = BLOCK_FOR_INSN (last1); |
b5b8b0ac | 1472 | while (last1 != BB_HEAD (bb1) && !NONDEBUG_INSN_P (PREV_INSN (last1))) |
c2fc5456 R |
1473 | last1 = PREV_INSN (last1); |
1474 | ||
1475 | if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1))) | |
1476 | last1 = PREV_INSN (last1); | |
1477 | ||
823918ae | 1478 | bb2 = BLOCK_FOR_INSN (last2); |
b5b8b0ac | 1479 | while (last2 != BB_HEAD (bb2) && !NONDEBUG_INSN_P (PREV_INSN (last2))) |
c2fc5456 R |
1480 | last2 = PREV_INSN (last2); |
1481 | ||
1482 | if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2))) | |
1483 | last2 = PREV_INSN (last2); | |
1484 | ||
1485 | *f1 = last1; | |
1486 | *f2 = last2; | |
1487 | } | |
1488 | ||
472c95f5 TV |
1489 | if (dir_p) |
1490 | *dir_p = last_dir; | |
c2fc5456 R |
1491 | return ninsns; |
1492 | } | |
1493 | ||
31ce8a53 BS |
1494 | /* Like flow_find_cross_jump, except start looking for a matching sequence from |
1495 | the head of the two blocks. Do not include jumps at the end. | |
1496 | If STOP_AFTER is nonzero, stop after finding that many matching | |
b59e0455 JJ |
1497 | instructions. If STOP_AFTER is zero, count all INSN_P insns, if it is |
1498 | non-zero, only count active insns. */ | |
31ce8a53 BS |
1499 | |
1500 | int | |
da5477a9 DM |
1501 | flow_find_head_matching_sequence (basic_block bb1, basic_block bb2, rtx_insn **f1, |
1502 | rtx_insn **f2, int stop_after) | |
31ce8a53 | 1503 | { |
da5477a9 | 1504 | rtx_insn *i1, *i2, *last1, *last2, *beforelast1, *beforelast2; |
31ce8a53 BS |
1505 | int ninsns = 0; |
1506 | edge e; | |
1507 | edge_iterator ei; | |
1508 | int nehedges1 = 0, nehedges2 = 0; | |
1509 | ||
1510 | FOR_EACH_EDGE (e, ei, bb1->succs) | |
1511 | if (e->flags & EDGE_EH) | |
1512 | nehedges1++; | |
1513 | FOR_EACH_EDGE (e, ei, bb2->succs) | |
1514 | if (e->flags & EDGE_EH) | |
1515 | nehedges2++; | |
1516 | ||
1517 | i1 = BB_HEAD (bb1); | |
1518 | i2 = BB_HEAD (bb2); | |
da5477a9 | 1519 | last1 = beforelast1 = last2 = beforelast2 = NULL; |
31ce8a53 BS |
1520 | |
1521 | while (true) | |
1522 | { | |
4ec5d4f5 | 1523 | /* Ignore notes, except NOTE_INSN_EPILOGUE_BEG. */ |
31ce8a53 | 1524 | while (!NONDEBUG_INSN_P (i1) && i1 != BB_END (bb1)) |
4ec5d4f5 BS |
1525 | { |
1526 | if (NOTE_P (i1) && NOTE_KIND (i1) == NOTE_INSN_EPILOGUE_BEG) | |
1527 | break; | |
1528 | i1 = NEXT_INSN (i1); | |
1529 | } | |
31ce8a53 BS |
1530 | |
1531 | while (!NONDEBUG_INSN_P (i2) && i2 != BB_END (bb2)) | |
4ec5d4f5 BS |
1532 | { |
1533 | if (NOTE_P (i2) && NOTE_KIND (i2) == NOTE_INSN_EPILOGUE_BEG) | |
1534 | break; | |
1535 | i2 = NEXT_INSN (i2); | |
1536 | } | |
31ce8a53 | 1537 | |
662592e1 BS |
1538 | if ((i1 == BB_END (bb1) && !NONDEBUG_INSN_P (i1)) |
1539 | || (i2 == BB_END (bb2) && !NONDEBUG_INSN_P (i2))) | |
1540 | break; | |
1541 | ||
31ce8a53 BS |
1542 | if (NOTE_P (i1) || NOTE_P (i2) |
1543 | || JUMP_P (i1) || JUMP_P (i2)) | |
1544 | break; | |
1545 | ||
1546 | /* A sanity check to make sure we're not merging insns with different | |
1547 | effects on EH. If only one of them ends a basic block, it shouldn't | |
1548 | have an EH edge; if both end a basic block, there should be the same | |
1549 | number of EH edges. */ | |
1550 | if ((i1 == BB_END (bb1) && i2 != BB_END (bb2) | |
1551 | && nehedges1 > 0) | |
1552 | || (i2 == BB_END (bb2) && i1 != BB_END (bb1) | |
1553 | && nehedges2 > 0) | |
1554 | || (i1 == BB_END (bb1) && i2 == BB_END (bb2) | |
1555 | && nehedges1 != nehedges2)) | |
1556 | break; | |
1557 | ||
472c95f5 | 1558 | if (old_insns_match_p (0, i1, i2) != dir_both) |
31ce8a53 BS |
1559 | break; |
1560 | ||
1561 | merge_memattrs (i1, i2); | |
1562 | ||
1563 | /* Don't begin a cross-jump with a NOTE insn. */ | |
1564 | if (INSN_P (i1)) | |
1565 | { | |
1566 | merge_notes (i1, i2); | |
1567 | ||
1568 | beforelast1 = last1, beforelast2 = last2; | |
1569 | last1 = i1, last2 = i2; | |
b59e0455 | 1570 | if (!stop_after || active_insn_p (i1)) |
a0cbe71e | 1571 | ninsns++; |
31ce8a53 BS |
1572 | } |
1573 | ||
1574 | if (i1 == BB_END (bb1) || i2 == BB_END (bb2) | |
1575 | || (stop_after > 0 && ninsns == stop_after)) | |
1576 | break; | |
1577 | ||
1578 | i1 = NEXT_INSN (i1); | |
1579 | i2 = NEXT_INSN (i2); | |
1580 | } | |
1581 | ||
31ce8a53 BS |
1582 | /* Don't allow a compare to be shared by cross-jumping unless the insn |
1583 | after the compare is also shared. */ | |
618f4073 TS |
1584 | if (HAVE_cc0 && ninsns && reg_mentioned_p (cc0_rtx, last1) |
1585 | && sets_cc0_p (last1)) | |
31ce8a53 | 1586 | last1 = beforelast1, last2 = beforelast2, ninsns--; |
31ce8a53 BS |
1587 | |
1588 | if (ninsns) | |
1589 | { | |
1590 | *f1 = last1; | |
1591 | *f2 = last2; | |
1592 | } | |
1593 | ||
1594 | return ninsns; | |
1595 | } | |
1596 | ||
c2fc5456 R |
1597 | /* Return true iff outgoing edges of BB1 and BB2 match, together with |
1598 | the branch instruction. This means that if we commonize the control | |
1599 | flow before end of the basic block, the semantic remains unchanged. | |
402209ff JH |
1600 | |
1601 | We may assume that there exists one edge with a common destination. */ | |
1602 | ||
1603 | static bool | |
c2fc5456 | 1604 | outgoing_edges_match (int mode, basic_block bb1, basic_block bb2) |
402209ff | 1605 | { |
0dd0e980 JH |
1606 | int nehedges1 = 0, nehedges2 = 0; |
1607 | edge fallthru1 = 0, fallthru2 = 0; | |
1608 | edge e1, e2; | |
628f6a4e | 1609 | edge_iterator ei; |
0dd0e980 | 1610 | |
6626665f | 1611 | /* If we performed shrink-wrapping, edges to the exit block can |
484db665 BS |
1612 | only be distinguished for JUMP_INSNs. The two paths may differ in |
1613 | whether they went through the prologue. Sibcalls are fine, we know | |
1614 | that we either didn't need or inserted an epilogue before them. */ | |
1615 | if (crtl->shrink_wrapped | |
fefa31b5 DM |
1616 | && single_succ_p (bb1) |
1617 | && single_succ (bb1) == EXIT_BLOCK_PTR_FOR_FN (cfun) | |
484db665 BS |
1618 | && !JUMP_P (BB_END (bb1)) |
1619 | && !(CALL_P (BB_END (bb1)) && SIBLING_CALL_P (BB_END (bb1)))) | |
1620 | return false; | |
1621 | ||
c04cf67b RH |
1622 | /* If BB1 has only one successor, we may be looking at either an |
1623 | unconditional jump, or a fake edge to exit. */ | |
c5cbcccf ZD |
1624 | if (single_succ_p (bb1) |
1625 | && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0 | |
4b4bf941 | 1626 | && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1)))) |
c5cbcccf ZD |
1627 | return (single_succ_p (bb2) |
1628 | && (single_succ_edge (bb2)->flags | |
1629 | & (EDGE_COMPLEX | EDGE_FAKE)) == 0 | |
4b4bf941 | 1630 | && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2)))); |
402209ff JH |
1631 | |
1632 | /* Match conditional jumps - this may get tricky when fallthru and branch | |
1633 | edges are crossed. */ | |
628f6a4e | 1634 | if (EDGE_COUNT (bb1->succs) == 2 |
a813c111 SB |
1635 | && any_condjump_p (BB_END (bb1)) |
1636 | && onlyjump_p (BB_END (bb1))) | |
402209ff | 1637 | { |
c2fc5456 R |
1638 | edge b1, f1, b2, f2; |
1639 | bool reverse, match; | |
1640 | rtx set1, set2, cond1, cond2; | |
1641 | enum rtx_code code1, code2; | |
1642 | ||
628f6a4e | 1643 | if (EDGE_COUNT (bb2->succs) != 2 |
a813c111 SB |
1644 | || !any_condjump_p (BB_END (bb2)) |
1645 | || !onlyjump_p (BB_END (bb2))) | |
0a2ed1f1 | 1646 | return false; |
c2fc5456 R |
1647 | |
1648 | b1 = BRANCH_EDGE (bb1); | |
1649 | b2 = BRANCH_EDGE (bb2); | |
1650 | f1 = FALLTHRU_EDGE (bb1); | |
1651 | f2 = FALLTHRU_EDGE (bb2); | |
1652 | ||
1653 | /* Get around possible forwarders on fallthru edges. Other cases | |
c22cacf3 | 1654 | should be optimized out already. */ |
c2fc5456 R |
1655 | if (FORWARDER_BLOCK_P (f1->dest)) |
1656 | f1 = single_succ_edge (f1->dest); | |
1657 | ||
1658 | if (FORWARDER_BLOCK_P (f2->dest)) | |
1659 | f2 = single_succ_edge (f2->dest); | |
1660 | ||
1661 | /* To simplify use of this function, return false if there are | |
1662 | unneeded forwarder blocks. These will get eliminated later | |
1663 | during cleanup_cfg. */ | |
1664 | if (FORWARDER_BLOCK_P (f1->dest) | |
1665 | || FORWARDER_BLOCK_P (f2->dest) | |
1666 | || FORWARDER_BLOCK_P (b1->dest) | |
1667 | || FORWARDER_BLOCK_P (b2->dest)) | |
1668 | return false; | |
1669 | ||
1670 | if (f1->dest == f2->dest && b1->dest == b2->dest) | |
1671 | reverse = false; | |
1672 | else if (f1->dest == b2->dest && b1->dest == f2->dest) | |
1673 | reverse = true; | |
1674 | else | |
1675 | return false; | |
1676 | ||
1677 | set1 = pc_set (BB_END (bb1)); | |
1678 | set2 = pc_set (BB_END (bb2)); | |
1679 | if ((XEXP (SET_SRC (set1), 1) == pc_rtx) | |
1680 | != (XEXP (SET_SRC (set2), 1) == pc_rtx)) | |
1681 | reverse = !reverse; | |
1682 | ||
1683 | cond1 = XEXP (SET_SRC (set1), 0); | |
1684 | cond2 = XEXP (SET_SRC (set2), 0); | |
1685 | code1 = GET_CODE (cond1); | |
1686 | if (reverse) | |
1687 | code2 = reversed_comparison_code (cond2, BB_END (bb2)); | |
1688 | else | |
1689 | code2 = GET_CODE (cond2); | |
1690 | ||
1691 | if (code2 == UNKNOWN) | |
1692 | return false; | |
1693 | ||
1694 | /* Verify codes and operands match. */ | |
1695 | match = ((code1 == code2 | |
1696 | && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0)) | |
1697 | && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1))) | |
1698 | || (code1 == swap_condition (code2) | |
1699 | && rtx_renumbered_equal_p (XEXP (cond1, 1), | |
1700 | XEXP (cond2, 0)) | |
1701 | && rtx_renumbered_equal_p (XEXP (cond1, 0), | |
1702 | XEXP (cond2, 1)))); | |
1703 | ||
1704 | /* If we return true, we will join the blocks. Which means that | |
1705 | we will only have one branch prediction bit to work with. Thus | |
1706 | we require the existing branches to have probabilities that are | |
1707 | roughly similar. */ | |
1708 | if (match | |
efd8f750 JH |
1709 | && optimize_bb_for_speed_p (bb1) |
1710 | && optimize_bb_for_speed_p (bb2)) | |
c2fc5456 | 1711 | { |
357067f2 | 1712 | profile_probability prob2; |
c2fc5456 R |
1713 | |
1714 | if (b1->dest == b2->dest) | |
1715 | prob2 = b2->probability; | |
1716 | else | |
1717 | /* Do not use f2 probability as f2 may be forwarded. */ | |
357067f2 | 1718 | prob2 = b2->probability.invert (); |
c2fc5456 R |
1719 | |
1720 | /* Fail if the difference in probabilities is greater than 50%. | |
1721 | This rules out two well-predicted branches with opposite | |
1722 | outcomes. */ | |
357067f2 | 1723 | if (b1->probability.differs_lot_from_p (prob2)) |
c2fc5456 R |
1724 | { |
1725 | if (dump_file) | |
357067f2 JH |
1726 | { |
1727 | fprintf (dump_file, | |
1728 | "Outcomes of branch in bb %i and %i differ too" | |
1729 | " much (", bb1->index, bb2->index); | |
1730 | b1->probability.dump (dump_file); | |
1731 | prob2.dump (dump_file); | |
1732 | fprintf (dump_file, ")\n"); | |
1733 | } | |
c2fc5456 R |
1734 | return false; |
1735 | } | |
1736 | } | |
1737 | ||
1738 | if (dump_file && match) | |
1739 | fprintf (dump_file, "Conditionals in bb %i and %i match.\n", | |
1740 | bb1->index, bb2->index); | |
1741 | ||
1742 | return match; | |
402209ff JH |
1743 | } |
1744 | ||
09da1532 | 1745 | /* Generic case - we are seeing a computed jump, table jump or trapping |
0dd0e980 JH |
1746 | instruction. */ |
1747 | ||
39811184 JZ |
1748 | /* Check whether there are tablejumps in the end of BB1 and BB2. |
1749 | Return true if they are identical. */ | |
1750 | { | |
dfe08bc4 | 1751 | rtx_insn *label1, *label2; |
8942ee0f | 1752 | rtx_jump_table_data *table1, *table2; |
39811184 | 1753 | |
a813c111 SB |
1754 | if (tablejump_p (BB_END (bb1), &label1, &table1) |
1755 | && tablejump_p (BB_END (bb2), &label2, &table2) | |
39811184 JZ |
1756 | && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2))) |
1757 | { | |
1758 | /* The labels should never be the same rtx. If they really are same | |
1759 | the jump tables are same too. So disable crossjumping of blocks BB1 | |
1760 | and BB2 because when deleting the common insns in the end of BB1 | |
6de9cd9a | 1761 | by delete_basic_block () the jump table would be deleted too. */ |
4af16369 | 1762 | /* If LABEL2 is referenced in BB1->END do not do anything |
39811184 JZ |
1763 | because we would loose information when replacing |
1764 | LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */ | |
a813c111 | 1765 | if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1))) |
39811184 JZ |
1766 | { |
1767 | /* Set IDENTICAL to true when the tables are identical. */ | |
1768 | bool identical = false; | |
1769 | rtx p1, p2; | |
1770 | ||
1771 | p1 = PATTERN (table1); | |
1772 | p2 = PATTERN (table2); | |
1773 | if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2)) | |
1774 | { | |
1775 | identical = true; | |
1776 | } | |
1777 | else if (GET_CODE (p1) == ADDR_DIFF_VEC | |
1778 | && (XVECLEN (p1, 1) == XVECLEN (p2, 1)) | |
1779 | && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2)) | |
1780 | && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3))) | |
1781 | { | |
1782 | int i; | |
1783 | ||
1784 | identical = true; | |
1785 | for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--) | |
1786 | if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i))) | |
1787 | identical = false; | |
1788 | } | |
1789 | ||
c2fc5456 | 1790 | if (identical) |
39811184 | 1791 | { |
39811184 JZ |
1792 | bool match; |
1793 | ||
c2fc5456 | 1794 | /* Temporarily replace references to LABEL1 with LABEL2 |
39811184 | 1795 | in BB1->END so that we could compare the instructions. */ |
a2b7026c | 1796 | replace_label_in_insn (BB_END (bb1), label1, label2, false); |
39811184 | 1797 | |
472c95f5 TV |
1798 | match = (old_insns_match_p (mode, BB_END (bb1), BB_END (bb2)) |
1799 | == dir_both); | |
c263766c RH |
1800 | if (dump_file && match) |
1801 | fprintf (dump_file, | |
39811184 JZ |
1802 | "Tablejumps in bb %i and %i match.\n", |
1803 | bb1->index, bb2->index); | |
1804 | ||
c2fc5456 R |
1805 | /* Set the original label in BB1->END because when deleting |
1806 | a block whose end is a tablejump, the tablejump referenced | |
1807 | from the instruction is deleted too. */ | |
a2b7026c | 1808 | replace_label_in_insn (BB_END (bb1), label2, label1, false); |
c2fc5456 | 1809 | |
39811184 JZ |
1810 | return match; |
1811 | } | |
1812 | } | |
1813 | return false; | |
1814 | } | |
1815 | } | |
39811184 | 1816 | |
d41d6122 TJ |
1817 | /* Find the last non-debug non-note instruction in each bb, except |
1818 | stop when we see the NOTE_INSN_BASIC_BLOCK, as old_insns_match_p | |
1819 | handles that case specially. old_insns_match_p does not handle | |
1820 | other types of instruction notes. */ | |
da5477a9 DM |
1821 | rtx_insn *last1 = BB_END (bb1); |
1822 | rtx_insn *last2 = BB_END (bb2); | |
d41d6122 TJ |
1823 | while (!NOTE_INSN_BASIC_BLOCK_P (last1) && |
1824 | (DEBUG_INSN_P (last1) || NOTE_P (last1))) | |
1825 | last1 = PREV_INSN (last1); | |
1826 | while (!NOTE_INSN_BASIC_BLOCK_P (last2) && | |
1827 | (DEBUG_INSN_P (last2) || NOTE_P (last2))) | |
1828 | last2 = PREV_INSN (last2); | |
1829 | gcc_assert (last1 && last2); | |
1830 | ||
0dd0e980 | 1831 | /* First ensure that the instructions match. There may be many outgoing |
39811184 | 1832 | edges so this test is generally cheaper. */ |
206604dc | 1833 | if (old_insns_match_p (mode, last1, last2) != dir_both) |
0dd0e980 JH |
1834 | return false; |
1835 | ||
1836 | /* Search the outgoing edges, ensure that the counts do match, find possible | |
1837 | fallthru and exception handling edges since these needs more | |
1838 | validation. */ | |
628f6a4e BE |
1839 | if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs)) |
1840 | return false; | |
1841 | ||
206604dc | 1842 | bool nonfakeedges = false; |
628f6a4e | 1843 | FOR_EACH_EDGE (e1, ei, bb1->succs) |
0dd0e980 | 1844 | { |
628f6a4e | 1845 | e2 = EDGE_SUCC (bb2, ei.index); |
c22cacf3 | 1846 | |
206604dc JJ |
1847 | if ((e1->flags & EDGE_FAKE) == 0) |
1848 | nonfakeedges = true; | |
1849 | ||
0dd0e980 JH |
1850 | if (e1->flags & EDGE_EH) |
1851 | nehedges1++; | |
5f0d2358 | 1852 | |
0dd0e980 JH |
1853 | if (e2->flags & EDGE_EH) |
1854 | nehedges2++; | |
5f0d2358 | 1855 | |
0dd0e980 JH |
1856 | if (e1->flags & EDGE_FALLTHRU) |
1857 | fallthru1 = e1; | |
1858 | if (e2->flags & EDGE_FALLTHRU) | |
1859 | fallthru2 = e2; | |
1860 | } | |
5f0d2358 | 1861 | |
0dd0e980 | 1862 | /* If number of edges of various types does not match, fail. */ |
628f6a4e | 1863 | if (nehedges1 != nehedges2 |
5f0d2358 | 1864 | || (fallthru1 != 0) != (fallthru2 != 0)) |
0dd0e980 JH |
1865 | return false; |
1866 | ||
206604dc JJ |
1867 | /* If !ACCUMULATE_OUTGOING_ARGS, bb1 (and bb2) have no successors |
1868 | and the last real insn doesn't have REG_ARGS_SIZE note, don't | |
1869 | attempt to optimize, as the two basic blocks might have different | |
1870 | REG_ARGS_SIZE depths. For noreturn calls and unconditional | |
1871 | traps there should be REG_ARG_SIZE notes, they could be missing | |
1872 | for __builtin_unreachable () uses though. */ | |
1873 | if (!nonfakeedges | |
1874 | && !ACCUMULATE_OUTGOING_ARGS | |
1875 | && (!INSN_P (last1) | |
1876 | || !find_reg_note (last1, REG_ARGS_SIZE, NULL))) | |
1877 | return false; | |
1878 | ||
0dd0e980 JH |
1879 | /* fallthru edges must be forwarded to the same destination. */ |
1880 | if (fallthru1) | |
1881 | { | |
1882 | basic_block d1 = (forwarder_block_p (fallthru1->dest) | |
c5cbcccf | 1883 | ? single_succ (fallthru1->dest): fallthru1->dest); |
0dd0e980 | 1884 | basic_block d2 = (forwarder_block_p (fallthru2->dest) |
c5cbcccf | 1885 | ? single_succ (fallthru2->dest): fallthru2->dest); |
5f0d2358 | 1886 | |
0dd0e980 JH |
1887 | if (d1 != d2) |
1888 | return false; | |
1889 | } | |
5f0d2358 | 1890 | |
5f77fbd4 JJ |
1891 | /* Ensure the same EH region. */ |
1892 | { | |
a813c111 SB |
1893 | rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0); |
1894 | rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0); | |
5f0d2358 | 1895 | |
5f77fbd4 JJ |
1896 | if (!n1 && n2) |
1897 | return false; | |
1898 | ||
1899 | if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0))) | |
1900 | return false; | |
1901 | } | |
5f0d2358 | 1902 | |
38109dab GL |
1903 | /* The same checks as in try_crossjump_to_edge. It is required for RTL |
1904 | version of sequence abstraction. */ | |
1905 | FOR_EACH_EDGE (e1, ei, bb2->succs) | |
1906 | { | |
1907 | edge e2; | |
1908 | edge_iterator ei; | |
1909 | basic_block d1 = e1->dest; | |
1910 | ||
1911 | if (FORWARDER_BLOCK_P (d1)) | |
1912 | d1 = EDGE_SUCC (d1, 0)->dest; | |
1913 | ||
1914 | FOR_EACH_EDGE (e2, ei, bb1->succs) | |
1915 | { | |
1916 | basic_block d2 = e2->dest; | |
1917 | if (FORWARDER_BLOCK_P (d2)) | |
1918 | d2 = EDGE_SUCC (d2, 0)->dest; | |
1919 | if (d1 == d2) | |
1920 | break; | |
1921 | } | |
1922 | ||
1923 | if (!e2) | |
1924 | return false; | |
1925 | } | |
1926 | ||
0dd0e980 | 1927 | return true; |
402209ff JH |
1928 | } |
1929 | ||
38109dab GL |
1930 | /* Returns true if BB basic block has a preserve label. */ |
1931 | ||
1932 | static bool | |
1933 | block_has_preserve_label (basic_block bb) | |
1934 | { | |
1935 | return (bb | |
1936 | && block_label (bb) | |
1937 | && LABEL_PRESERVE_P (block_label (bb))); | |
1938 | } | |
1939 | ||
402209ff JH |
1940 | /* E1 and E2 are edges with the same destination block. Search their |
1941 | predecessors for common code. If found, redirect control flow from | |
bf22920b TV |
1942 | (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC (dir_forward), |
1943 | or the other way around (dir_backward). DIR specifies the allowed | |
1944 | replacement direction. */ | |
402209ff JH |
1945 | |
1946 | static bool | |
bf22920b TV |
1947 | try_crossjump_to_edge (int mode, edge e1, edge e2, |
1948 | enum replace_direction dir) | |
402209ff | 1949 | { |
c2fc5456 | 1950 | int nmatch; |
402209ff | 1951 | basic_block src1 = e1->src, src2 = e2->src; |
39587bb9 | 1952 | basic_block redirect_to, redirect_from, to_remove; |
823918ae | 1953 | basic_block osrc1, osrc2, redirect_edges_to, tmp; |
da5477a9 | 1954 | rtx_insn *newpos1, *newpos2; |
402209ff | 1955 | edge s; |
628f6a4e | 1956 | edge_iterator ei; |
c2fc5456 | 1957 | |
da5477a9 | 1958 | newpos1 = newpos2 = NULL; |
6de9cd9a | 1959 | |
402209ff JH |
1960 | /* Search backward through forwarder blocks. We don't need to worry |
1961 | about multiple entry or chained forwarders, as they will be optimized | |
1962 | away. We do this to look past the unconditional jump following a | |
1963 | conditional jump that is required due to the current CFG shape. */ | |
c5cbcccf | 1964 | if (single_pred_p (src1) |
635559ab | 1965 | && FORWARDER_BLOCK_P (src1)) |
c5cbcccf | 1966 | e1 = single_pred_edge (src1), src1 = e1->src; |
5f0d2358 | 1967 | |
c5cbcccf | 1968 | if (single_pred_p (src2) |
635559ab | 1969 | && FORWARDER_BLOCK_P (src2)) |
c5cbcccf | 1970 | e2 = single_pred_edge (src2), src2 = e2->src; |
402209ff JH |
1971 | |
1972 | /* Nothing to do if we reach ENTRY, or a common source block. */ | |
fefa31b5 DM |
1973 | if (src1 == ENTRY_BLOCK_PTR_FOR_FN (cfun) || src2 |
1974 | == ENTRY_BLOCK_PTR_FOR_FN (cfun)) | |
402209ff JH |
1975 | return false; |
1976 | if (src1 == src2) | |
1977 | return false; | |
1978 | ||
1979 | /* Seeing more than 1 forwarder blocks would confuse us later... */ | |
635559ab | 1980 | if (FORWARDER_BLOCK_P (e1->dest) |
c5cbcccf | 1981 | && FORWARDER_BLOCK_P (single_succ (e1->dest))) |
402209ff | 1982 | return false; |
5f0d2358 | 1983 | |
635559ab | 1984 | if (FORWARDER_BLOCK_P (e2->dest) |
c5cbcccf | 1985 | && FORWARDER_BLOCK_P (single_succ (e2->dest))) |
402209ff JH |
1986 | return false; |
1987 | ||
1988 | /* Likewise with dead code (possibly newly created by the other optimizations | |
1989 | of cfg_cleanup). */ | |
628f6a4e | 1990 | if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0) |
402209ff JH |
1991 | return false; |
1992 | ||
ba61fc53 JH |
1993 | /* Do not turn corssing edge to non-crossing or vice versa after reload. */ |
1994 | if (BB_PARTITION (src1) != BB_PARTITION (src2) | |
1995 | && reload_completed) | |
1996 | return false; | |
1997 | ||
402209ff | 1998 | /* Look for the common insn sequence, part the first ... */ |
c2fc5456 | 1999 | if (!outgoing_edges_match (mode, src1, src2)) |
402209ff JH |
2000 | return false; |
2001 | ||
2002 | /* ... and part the second. */ | |
472c95f5 | 2003 | nmatch = flow_find_cross_jump (src1, src2, &newpos1, &newpos2, &dir); |
12183e0f | 2004 | |
823918ae TV |
2005 | osrc1 = src1; |
2006 | osrc2 = src2; | |
2007 | if (newpos1 != NULL_RTX) | |
2008 | src1 = BLOCK_FOR_INSN (newpos1); | |
2009 | if (newpos2 != NULL_RTX) | |
2010 | src2 = BLOCK_FOR_INSN (newpos2); | |
2011 | ||
dd68669b JL |
2012 | /* Check that SRC1 and SRC2 have preds again. They may have changed |
2013 | above due to the call to flow_find_cross_jump. */ | |
2014 | if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0) | |
2015 | return false; | |
2016 | ||
bf22920b TV |
2017 | if (dir == dir_backward) |
2018 | { | |
ba61fc53 JH |
2019 | std::swap (osrc1, osrc2); |
2020 | std::swap (src1, src2); | |
2021 | std::swap (e1, e2); | |
2022 | std::swap (newpos1, newpos2); | |
bf22920b TV |
2023 | } |
2024 | ||
12183e0f PH |
2025 | /* Don't proceed with the crossjump unless we found a sufficient number |
2026 | of matching instructions or the 'from' block was totally matched | |
2027 | (such that its predecessors will hopefully be redirected and the | |
2028 | block removed). */ | |
c2fc5456 R |
2029 | if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS)) |
2030 | && (newpos1 != BB_HEAD (src1))) | |
7d22e898 | 2031 | return false; |
402209ff | 2032 | |
75c40d56 | 2033 | /* Avoid deleting preserve label when redirecting ABNORMAL edges. */ |
38109dab GL |
2034 | if (block_has_preserve_label (e1->dest) |
2035 | && (e1->flags & EDGE_ABNORMAL)) | |
2036 | return false; | |
2037 | ||
39811184 JZ |
2038 | /* Here we know that the insns in the end of SRC1 which are common with SRC2 |
2039 | will be deleted. | |
2040 | If we have tablejumps in the end of SRC1 and SRC2 | |
2041 | they have been already compared for equivalence in outgoing_edges_match () | |
2042 | so replace the references to TABLE1 by references to TABLE2. */ | |
21c0a521 | 2043 | { |
dfe08bc4 | 2044 | rtx_insn *label1, *label2; |
8942ee0f | 2045 | rtx_jump_table_data *table1, *table2; |
39811184 | 2046 | |
823918ae TV |
2047 | if (tablejump_p (BB_END (osrc1), &label1, &table1) |
2048 | && tablejump_p (BB_END (osrc2), &label2, &table2) | |
39811184 JZ |
2049 | && label1 != label2) |
2050 | { | |
da5477a9 | 2051 | rtx_insn *insn; |
39811184 JZ |
2052 | |
2053 | /* Replace references to LABEL1 with LABEL2. */ | |
39811184 JZ |
2054 | for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) |
2055 | { | |
2056 | /* Do not replace the label in SRC1->END because when deleting | |
2057 | a block whose end is a tablejump, the tablejump referenced | |
2058 | from the instruction is deleted too. */ | |
823918ae | 2059 | if (insn != BB_END (osrc1)) |
a2b7026c | 2060 | replace_label_in_insn (insn, label1, label2, true); |
39811184 JZ |
2061 | } |
2062 | } | |
21c0a521 | 2063 | } |
10d6c0d0 | 2064 | |
b604fe9b SB |
2065 | /* Avoid splitting if possible. We must always split when SRC2 has |
2066 | EH predecessor edges, or we may end up with basic blocks with both | |
2067 | normal and EH predecessor edges. */ | |
c2fc5456 | 2068 | if (newpos2 == BB_HEAD (src2) |
b604fe9b | 2069 | && !(EDGE_PRED (src2, 0)->flags & EDGE_EH)) |
402209ff JH |
2070 | redirect_to = src2; |
2071 | else | |
2072 | { | |
c2fc5456 | 2073 | if (newpos2 == BB_HEAD (src2)) |
b604fe9b SB |
2074 | { |
2075 | /* Skip possible basic block header. */ | |
c2fc5456 R |
2076 | if (LABEL_P (newpos2)) |
2077 | newpos2 = NEXT_INSN (newpos2); | |
b5b8b0ac AO |
2078 | while (DEBUG_INSN_P (newpos2)) |
2079 | newpos2 = NEXT_INSN (newpos2); | |
c2fc5456 R |
2080 | if (NOTE_P (newpos2)) |
2081 | newpos2 = NEXT_INSN (newpos2); | |
b5b8b0ac AO |
2082 | while (DEBUG_INSN_P (newpos2)) |
2083 | newpos2 = NEXT_INSN (newpos2); | |
b604fe9b SB |
2084 | } |
2085 | ||
c263766c RH |
2086 | if (dump_file) |
2087 | fprintf (dump_file, "Splitting bb %i before %i insns\n", | |
0b17ab2f | 2088 | src2->index, nmatch); |
c2fc5456 | 2089 | redirect_to = split_block (src2, PREV_INSN (newpos2))->dest; |
402209ff JH |
2090 | } |
2091 | ||
c263766c | 2092 | if (dump_file) |
c2fc5456 R |
2093 | fprintf (dump_file, |
2094 | "Cross jumping from bb %i to bb %i; %i common insns\n", | |
2095 | src1->index, src2->index, nmatch); | |
402209ff | 2096 | |
6fc0bb99 | 2097 | /* We may have some registers visible through the block. */ |
6fb5fa3c | 2098 | df_set_bb_dirty (redirect_to); |
402209ff | 2099 | |
823918ae TV |
2100 | if (osrc2 == src2) |
2101 | redirect_edges_to = redirect_to; | |
2102 | else | |
2103 | redirect_edges_to = osrc2; | |
2104 | ||
e7a74006 | 2105 | /* Recompute the counts of destinations of outgoing edges. */ |
823918ae | 2106 | FOR_EACH_EDGE (s, ei, redirect_edges_to->succs) |
402209ff JH |
2107 | { |
2108 | edge s2; | |
628f6a4e | 2109 | edge_iterator ei; |
402209ff JH |
2110 | basic_block d = s->dest; |
2111 | ||
635559ab | 2112 | if (FORWARDER_BLOCK_P (d)) |
c5cbcccf | 2113 | d = single_succ (d); |
5f0d2358 | 2114 | |
628f6a4e | 2115 | FOR_EACH_EDGE (s2, ei, src1->succs) |
402209ff JH |
2116 | { |
2117 | basic_block d2 = s2->dest; | |
635559ab | 2118 | if (FORWARDER_BLOCK_P (d2)) |
c5cbcccf | 2119 | d2 = single_succ (d2); |
402209ff JH |
2120 | if (d == d2) |
2121 | break; | |
2122 | } | |
5f0d2358 | 2123 | |
402209ff | 2124 | /* Take care to update possible forwarder blocks. We verified |
c22cacf3 MS |
2125 | that there is no more than one in the chain, so we can't run |
2126 | into infinite loop. */ | |
635559ab | 2127 | if (FORWARDER_BLOCK_P (s->dest)) |
e7a74006 | 2128 | s->dest->count += s->count (); |
5f0d2358 | 2129 | |
635559ab | 2130 | if (FORWARDER_BLOCK_P (s2->dest)) |
e7a74006 | 2131 | s2->dest->count -= s->count (); |
5f0d2358 | 2132 | |
e7a74006 JH |
2133 | /* FIXME: Is this correct? Should be rewritten to count API. */ |
2134 | if (redirect_edges_to->count.nonzero_p () && src1->count.nonzero_p ()) | |
357067f2 | 2135 | s->probability = s->probability.combine_with_freq |
e7a74006 JH |
2136 | (redirect_edges_to->count.to_frequency (cfun), |
2137 | s2->probability, src1->count.to_frequency (cfun)); | |
402209ff JH |
2138 | } |
2139 | ||
e7a74006 | 2140 | /* Adjust count for the block. An earlier jump |
52982a97 EB |
2141 | threading pass may have left the profile in an inconsistent |
2142 | state (see update_bb_profile_for_threading) so we must be | |
2143 | prepared for overflows. */ | |
823918ae TV |
2144 | tmp = redirect_to; |
2145 | do | |
2146 | { | |
2147 | tmp->count += src1->count; | |
823918ae TV |
2148 | if (tmp == redirect_edges_to) |
2149 | break; | |
2150 | tmp = find_fallthru_edge (tmp->succs)->dest; | |
2151 | } | |
2152 | while (true); | |
2153 | update_br_prob_note (redirect_edges_to); | |
402209ff JH |
2154 | |
2155 | /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */ | |
2156 | ||
c2fc5456 R |
2157 | /* Skip possible basic block header. */ |
2158 | if (LABEL_P (newpos1)) | |
2159 | newpos1 = NEXT_INSN (newpos1); | |
b5b8b0ac AO |
2160 | |
2161 | while (DEBUG_INSN_P (newpos1)) | |
2162 | newpos1 = NEXT_INSN (newpos1); | |
2163 | ||
cd9c1ca8 | 2164 | if (NOTE_INSN_BASIC_BLOCK_P (newpos1)) |
c2fc5456 R |
2165 | newpos1 = NEXT_INSN (newpos1); |
2166 | ||
b5b8b0ac AO |
2167 | while (DEBUG_INSN_P (newpos1)) |
2168 | newpos1 = NEXT_INSN (newpos1); | |
2169 | ||
c2fc5456 | 2170 | redirect_from = split_block (src1, PREV_INSN (newpos1))->src; |
c5cbcccf | 2171 | to_remove = single_succ (redirect_from); |
402209ff | 2172 | |
c5cbcccf | 2173 | redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to); |
f470c378 | 2174 | delete_basic_block (to_remove); |
402209ff | 2175 | |
39587bb9 | 2176 | update_forwarder_flag (redirect_from); |
7cbd12b8 JJ |
2177 | if (redirect_to != src2) |
2178 | update_forwarder_flag (src2); | |
635559ab | 2179 | |
402209ff JH |
2180 | return true; |
2181 | } | |
2182 | ||
2183 | /* Search the predecessors of BB for common insn sequences. When found, | |
2184 | share code between them by redirecting control flow. Return true if | |
2185 | any changes made. */ | |
2186 | ||
2187 | static bool | |
d329e058 | 2188 | try_crossjump_bb (int mode, basic_block bb) |
402209ff | 2189 | { |
628f6a4e | 2190 | edge e, e2, fallthru; |
402209ff | 2191 | bool changed; |
628f6a4e | 2192 | unsigned max, ix, ix2; |
402209ff | 2193 | |
f63d1bf7 | 2194 | /* Nothing to do if there is not at least two incoming edges. */ |
628f6a4e | 2195 | if (EDGE_COUNT (bb->preds) < 2) |
402209ff JH |
2196 | return false; |
2197 | ||
bbcb0c05 SB |
2198 | /* Don't crossjump if this block ends in a computed jump, |
2199 | unless we are optimizing for size. */ | |
efd8f750 | 2200 | if (optimize_bb_for_size_p (bb) |
fefa31b5 | 2201 | && bb != EXIT_BLOCK_PTR_FOR_FN (cfun) |
bbcb0c05 SB |
2202 | && computed_jump_p (BB_END (bb))) |
2203 | return false; | |
2204 | ||
750054a2 CT |
2205 | /* If we are partitioning hot/cold basic blocks, we don't want to |
2206 | mess up unconditional or indirect jumps that cross between hot | |
c22cacf3 MS |
2207 | and cold sections. |
2208 | ||
8e8d5162 | 2209 | Basic block partitioning may result in some jumps that appear to |
c22cacf3 MS |
2210 | be optimizable (or blocks that appear to be mergeable), but which really |
2211 | must be left untouched (they are required to make it safely across | |
2212 | partition boundaries). See the comments at the top of | |
8e8d5162 CT |
2213 | bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */ |
2214 | ||
c22cacf3 MS |
2215 | if (BB_PARTITION (EDGE_PRED (bb, 0)->src) != |
2216 | BB_PARTITION (EDGE_PRED (bb, 1)->src) | |
87c8b4be | 2217 | || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING)) |
750054a2 CT |
2218 | return false; |
2219 | ||
402209ff JH |
2220 | /* It is always cheapest to redirect a block that ends in a branch to |
2221 | a block that falls through into BB, as that adds no branches to the | |
2222 | program. We'll try that combination first. */ | |
5f24e0dc RH |
2223 | fallthru = NULL; |
2224 | max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES); | |
628f6a4e BE |
2225 | |
2226 | if (EDGE_COUNT (bb->preds) > max) | |
2227 | return false; | |
2228 | ||
0fd4b31d | 2229 | fallthru = find_fallthru_edge (bb->preds); |
402209ff JH |
2230 | |
2231 | changed = false; | |
0248bceb | 2232 | for (ix = 0; ix < EDGE_COUNT (bb->preds);) |
402209ff | 2233 | { |
0248bceb | 2234 | e = EDGE_PRED (bb, ix); |
628f6a4e | 2235 | ix++; |
402209ff | 2236 | |
c1e3e2d9 SB |
2237 | /* As noted above, first try with the fallthru predecessor (or, a |
2238 | fallthru predecessor if we are in cfglayout mode). */ | |
402209ff JH |
2239 | if (fallthru) |
2240 | { | |
2241 | /* Don't combine the fallthru edge into anything else. | |
2242 | If there is a match, we'll do it the other way around. */ | |
2243 | if (e == fallthru) | |
2244 | continue; | |
7cf240d5 JH |
2245 | /* If nothing changed since the last attempt, there is nothing |
2246 | we can do. */ | |
2247 | if (!first_pass | |
4ec5d4f5 BS |
2248 | && !((e->src->flags & BB_MODIFIED) |
2249 | || (fallthru->src->flags & BB_MODIFIED))) | |
7cf240d5 | 2250 | continue; |
402209ff | 2251 | |
bf22920b | 2252 | if (try_crossjump_to_edge (mode, e, fallthru, dir_forward)) |
402209ff JH |
2253 | { |
2254 | changed = true; | |
628f6a4e | 2255 | ix = 0; |
402209ff JH |
2256 | continue; |
2257 | } | |
2258 | } | |
2259 | ||
2260 | /* Non-obvious work limiting check: Recognize that we're going | |
2261 | to call try_crossjump_bb on every basic block. So if we have | |
2262 | two blocks with lots of outgoing edges (a switch) and they | |
2263 | share lots of common destinations, then we would do the | |
2264 | cross-jump check once for each common destination. | |
2265 | ||
2266 | Now, if the blocks actually are cross-jump candidates, then | |
2267 | all of their destinations will be shared. Which means that | |
2268 | we only need check them for cross-jump candidacy once. We | |
2269 | can eliminate redundant checks of crossjump(A,B) by arbitrarily | |
2270 | choosing to do the check from the block for which the edge | |
2271 | in question is the first successor of A. */ | |
628f6a4e | 2272 | if (EDGE_SUCC (e->src, 0) != e) |
402209ff JH |
2273 | continue; |
2274 | ||
0248bceb | 2275 | for (ix2 = 0; ix2 < EDGE_COUNT (bb->preds); ix2++) |
402209ff | 2276 | { |
0248bceb | 2277 | e2 = EDGE_PRED (bb, ix2); |
402209ff JH |
2278 | |
2279 | if (e2 == e) | |
2280 | continue; | |
2281 | ||
2282 | /* We've already checked the fallthru edge above. */ | |
2283 | if (e2 == fallthru) | |
2284 | continue; | |
2285 | ||
402209ff JH |
2286 | /* The "first successor" check above only prevents multiple |
2287 | checks of crossjump(A,B). In order to prevent redundant | |
2288 | checks of crossjump(B,A), require that A be the block | |
2289 | with the lowest index. */ | |
0b17ab2f | 2290 | if (e->src->index > e2->src->index) |
402209ff JH |
2291 | continue; |
2292 | ||
7cf240d5 JH |
2293 | /* If nothing changed since the last attempt, there is nothing |
2294 | we can do. */ | |
2295 | if (!first_pass | |
4ec5d4f5 BS |
2296 | && !((e->src->flags & BB_MODIFIED) |
2297 | || (e2->src->flags & BB_MODIFIED))) | |
7cf240d5 JH |
2298 | continue; |
2299 | ||
bf22920b TV |
2300 | /* Both e and e2 are not fallthru edges, so we can crossjump in either |
2301 | direction. */ | |
2302 | if (try_crossjump_to_edge (mode, e, e2, dir_both)) | |
402209ff JH |
2303 | { |
2304 | changed = true; | |
628f6a4e | 2305 | ix = 0; |
402209ff JH |
2306 | break; |
2307 | } | |
2308 | } | |
2309 | } | |
2310 | ||
c1e3e2d9 | 2311 | if (changed) |
bd2c6270 | 2312 | crossjumps_occurred = true; |
c1e3e2d9 | 2313 | |
402209ff JH |
2314 | return changed; |
2315 | } | |
2316 | ||
4ec5d4f5 BS |
2317 | /* Search the successors of BB for common insn sequences. When found, |
2318 | share code between them by moving it across the basic block | |
2319 | boundary. Return true if any changes made. */ | |
2320 | ||
2321 | static bool | |
2322 | try_head_merge_bb (basic_block bb) | |
2323 | { | |
2324 | basic_block final_dest_bb = NULL; | |
2325 | int max_match = INT_MAX; | |
2326 | edge e0; | |
da5477a9 | 2327 | rtx_insn **headptr, **currptr, **nextptr; |
4ec5d4f5 BS |
2328 | bool changed, moveall; |
2329 | unsigned ix; | |
da5477a9 | 2330 | rtx_insn *e0_last_head; |
61aa0978 DM |
2331 | rtx cond; |
2332 | rtx_insn *move_before; | |
4ec5d4f5 | 2333 | unsigned nedges = EDGE_COUNT (bb->succs); |
da5477a9 | 2334 | rtx_insn *jump = BB_END (bb); |
4ec5d4f5 BS |
2335 | regset live, live_union; |
2336 | ||
2337 | /* Nothing to do if there is not at least two outgoing edges. */ | |
2338 | if (nedges < 2) | |
2339 | return false; | |
2340 | ||
2341 | /* Don't crossjump if this block ends in a computed jump, | |
2342 | unless we are optimizing for size. */ | |
2343 | if (optimize_bb_for_size_p (bb) | |
fefa31b5 | 2344 | && bb != EXIT_BLOCK_PTR_FOR_FN (cfun) |
4ec5d4f5 BS |
2345 | && computed_jump_p (BB_END (bb))) |
2346 | return false; | |
2347 | ||
2348 | cond = get_condition (jump, &move_before, true, false); | |
2349 | if (cond == NULL_RTX) | |
43052d45 | 2350 | { |
618f4073 | 2351 | if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump)) |
43052d45 BS |
2352 | move_before = prev_nonnote_nondebug_insn (jump); |
2353 | else | |
43052d45 BS |
2354 | move_before = jump; |
2355 | } | |
4ec5d4f5 BS |
2356 | |
2357 | for (ix = 0; ix < nedges; ix++) | |
fefa31b5 | 2358 | if (EDGE_SUCC (bb, ix)->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)) |
4ec5d4f5 BS |
2359 | return false; |
2360 | ||
2361 | for (ix = 0; ix < nedges; ix++) | |
2362 | { | |
2363 | edge e = EDGE_SUCC (bb, ix); | |
2364 | basic_block other_bb = e->dest; | |
2365 | ||
2366 | if (df_get_bb_dirty (other_bb)) | |
2367 | { | |
2368 | block_was_dirty = true; | |
2369 | return false; | |
2370 | } | |
2371 | ||
2372 | if (e->flags & EDGE_ABNORMAL) | |
2373 | return false; | |
2374 | ||
2375 | /* Normally, all destination blocks must only be reachable from this | |
2376 | block, i.e. they must have one incoming edge. | |
2377 | ||
2378 | There is one special case we can handle, that of multiple consecutive | |
2379 | jumps where the first jumps to one of the targets of the second jump. | |
2380 | This happens frequently in switch statements for default labels. | |
2381 | The structure is as follows: | |
2382 | FINAL_DEST_BB | |
2383 | .... | |
2384 | if (cond) jump A; | |
2385 | fall through | |
2386 | BB | |
2387 | jump with targets A, B, C, D... | |
2388 | A | |
2389 | has two incoming edges, from FINAL_DEST_BB and BB | |
2390 | ||
2391 | In this case, we can try to move the insns through BB and into | |
2392 | FINAL_DEST_BB. */ | |
2393 | if (EDGE_COUNT (other_bb->preds) != 1) | |
2394 | { | |
2395 | edge incoming_edge, incoming_bb_other_edge; | |
2396 | edge_iterator ei; | |
2397 | ||
2398 | if (final_dest_bb != NULL | |
2399 | || EDGE_COUNT (other_bb->preds) != 2) | |
2400 | return false; | |
2401 | ||
2402 | /* We must be able to move the insns across the whole block. */ | |
2403 | move_before = BB_HEAD (bb); | |
2404 | while (!NONDEBUG_INSN_P (move_before)) | |
2405 | move_before = NEXT_INSN (move_before); | |
2406 | ||
2407 | if (EDGE_COUNT (bb->preds) != 1) | |
2408 | return false; | |
2409 | incoming_edge = EDGE_PRED (bb, 0); | |
2410 | final_dest_bb = incoming_edge->src; | |
2411 | if (EDGE_COUNT (final_dest_bb->succs) != 2) | |
2412 | return false; | |
2413 | FOR_EACH_EDGE (incoming_bb_other_edge, ei, final_dest_bb->succs) | |
2414 | if (incoming_bb_other_edge != incoming_edge) | |
2415 | break; | |
2416 | if (incoming_bb_other_edge->dest != other_bb) | |
2417 | return false; | |
2418 | } | |
2419 | } | |
2420 | ||
2421 | e0 = EDGE_SUCC (bb, 0); | |
da5477a9 | 2422 | e0_last_head = NULL; |
4ec5d4f5 BS |
2423 | changed = false; |
2424 | ||
2425 | for (ix = 1; ix < nedges; ix++) | |
2426 | { | |
2427 | edge e = EDGE_SUCC (bb, ix); | |
da5477a9 | 2428 | rtx_insn *e0_last, *e_last; |
4ec5d4f5 BS |
2429 | int nmatch; |
2430 | ||
2431 | nmatch = flow_find_head_matching_sequence (e0->dest, e->dest, | |
2432 | &e0_last, &e_last, 0); | |
2433 | if (nmatch == 0) | |
2434 | return false; | |
2435 | ||
2436 | if (nmatch < max_match) | |
2437 | { | |
2438 | max_match = nmatch; | |
2439 | e0_last_head = e0_last; | |
2440 | } | |
2441 | } | |
2442 | ||
2443 | /* If we matched an entire block, we probably have to avoid moving the | |
2444 | last insn. */ | |
2445 | if (max_match > 0 | |
2446 | && e0_last_head == BB_END (e0->dest) | |
2447 | && (find_reg_note (e0_last_head, REG_EH_REGION, 0) | |
2448 | || control_flow_insn_p (e0_last_head))) | |
2449 | { | |
2450 | max_match--; | |
2451 | if (max_match == 0) | |
2452 | return false; | |
b59e0455 JJ |
2453 | do |
2454 | e0_last_head = prev_real_insn (e0_last_head); | |
2455 | while (DEBUG_INSN_P (e0_last_head)); | |
4ec5d4f5 BS |
2456 | } |
2457 | ||
2458 | if (max_match == 0) | |
2459 | return false; | |
2460 | ||
2461 | /* We must find a union of the live registers at each of the end points. */ | |
2462 | live = BITMAP_ALLOC (NULL); | |
2463 | live_union = BITMAP_ALLOC (NULL); | |
2464 | ||
da5477a9 DM |
2465 | currptr = XNEWVEC (rtx_insn *, nedges); |
2466 | headptr = XNEWVEC (rtx_insn *, nedges); | |
2467 | nextptr = XNEWVEC (rtx_insn *, nedges); | |
4ec5d4f5 BS |
2468 | |
2469 | for (ix = 0; ix < nedges; ix++) | |
2470 | { | |
2471 | int j; | |
2472 | basic_block merge_bb = EDGE_SUCC (bb, ix)->dest; | |
da5477a9 | 2473 | rtx_insn *head = BB_HEAD (merge_bb); |
4ec5d4f5 | 2474 | |
b59e0455 JJ |
2475 | while (!NONDEBUG_INSN_P (head)) |
2476 | head = NEXT_INSN (head); | |
4ec5d4f5 BS |
2477 | headptr[ix] = head; |
2478 | currptr[ix] = head; | |
2479 | ||
2480 | /* Compute the end point and live information */ | |
2481 | for (j = 1; j < max_match; j++) | |
b59e0455 JJ |
2482 | do |
2483 | head = NEXT_INSN (head); | |
2484 | while (!NONDEBUG_INSN_P (head)); | |
4ec5d4f5 BS |
2485 | simulate_backwards_to_point (merge_bb, live, head); |
2486 | IOR_REG_SET (live_union, live); | |
2487 | } | |
2488 | ||
2489 | /* If we're moving across two blocks, verify the validity of the | |
2490 | first move, then adjust the target and let the loop below deal | |
2491 | with the final move. */ | |
2492 | if (final_dest_bb != NULL) | |
2493 | { | |
61aa0978 | 2494 | rtx_insn *move_upto; |
4ec5d4f5 BS |
2495 | |
2496 | moveall = can_move_insns_across (currptr[0], e0_last_head, move_before, | |
2497 | jump, e0->dest, live_union, | |
2498 | NULL, &move_upto); | |
2499 | if (!moveall) | |
2500 | { | |
2501 | if (move_upto == NULL_RTX) | |
2502 | goto out; | |
2503 | ||
2504 | while (e0_last_head != move_upto) | |
2505 | { | |
2506 | df_simulate_one_insn_backwards (e0->dest, e0_last_head, | |
2507 | live_union); | |
2508 | e0_last_head = PREV_INSN (e0_last_head); | |
2509 | } | |
2510 | } | |
2511 | if (e0_last_head == NULL_RTX) | |
2512 | goto out; | |
2513 | ||
2514 | jump = BB_END (final_dest_bb); | |
2515 | cond = get_condition (jump, &move_before, true, false); | |
2516 | if (cond == NULL_RTX) | |
43052d45 | 2517 | { |
618f4073 | 2518 | if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump)) |
43052d45 BS |
2519 | move_before = prev_nonnote_nondebug_insn (jump); |
2520 | else | |
43052d45 BS |
2521 | move_before = jump; |
2522 | } | |
4ec5d4f5 BS |
2523 | } |
2524 | ||
2525 | do | |
2526 | { | |
61aa0978 | 2527 | rtx_insn *move_upto; |
4ec5d4f5 BS |
2528 | moveall = can_move_insns_across (currptr[0], e0_last_head, |
2529 | move_before, jump, e0->dest, live_union, | |
2530 | NULL, &move_upto); | |
2531 | if (!moveall && move_upto == NULL_RTX) | |
2532 | { | |
2533 | if (jump == move_before) | |
2534 | break; | |
2535 | ||
2536 | /* Try again, using a different insertion point. */ | |
2537 | move_before = jump; | |
2538 | ||
4ec5d4f5 BS |
2539 | /* Don't try moving before a cc0 user, as that may invalidate |
2540 | the cc0. */ | |
618f4073 | 2541 | if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump)) |
4ec5d4f5 | 2542 | break; |
4ec5d4f5 BS |
2543 | |
2544 | continue; | |
2545 | } | |
2546 | ||
2547 | if (final_dest_bb && !moveall) | |
2548 | /* We haven't checked whether a partial move would be OK for the first | |
2549 | move, so we have to fail this case. */ | |
2550 | break; | |
2551 | ||
2552 | changed = true; | |
2553 | for (;;) | |
2554 | { | |
2555 | if (currptr[0] == move_upto) | |
2556 | break; | |
2557 | for (ix = 0; ix < nedges; ix++) | |
2558 | { | |
da5477a9 | 2559 | rtx_insn *curr = currptr[ix]; |
4ec5d4f5 BS |
2560 | do |
2561 | curr = NEXT_INSN (curr); | |
2562 | while (!NONDEBUG_INSN_P (curr)); | |
2563 | currptr[ix] = curr; | |
2564 | } | |
2565 | } | |
2566 | ||
2567 | /* If we can't currently move all of the identical insns, remember | |
2568 | each insn after the range that we'll merge. */ | |
2569 | if (!moveall) | |
2570 | for (ix = 0; ix < nedges; ix++) | |
2571 | { | |
da5477a9 | 2572 | rtx_insn *curr = currptr[ix]; |
4ec5d4f5 BS |
2573 | do |
2574 | curr = NEXT_INSN (curr); | |
2575 | while (!NONDEBUG_INSN_P (curr)); | |
2576 | nextptr[ix] = curr; | |
2577 | } | |
2578 | ||
2579 | reorder_insns (headptr[0], currptr[0], PREV_INSN (move_before)); | |
2580 | df_set_bb_dirty (EDGE_SUCC (bb, 0)->dest); | |
2581 | if (final_dest_bb != NULL) | |
2582 | df_set_bb_dirty (final_dest_bb); | |
2583 | df_set_bb_dirty (bb); | |
2584 | for (ix = 1; ix < nedges; ix++) | |
2585 | { | |
2586 | df_set_bb_dirty (EDGE_SUCC (bb, ix)->dest); | |
2587 | delete_insn_chain (headptr[ix], currptr[ix], false); | |
2588 | } | |
2589 | if (!moveall) | |
2590 | { | |
2591 | if (jump == move_before) | |
2592 | break; | |
2593 | ||
2594 | /* For the unmerged insns, try a different insertion point. */ | |
2595 | move_before = jump; | |
2596 | ||
4ec5d4f5 BS |
2597 | /* Don't try moving before a cc0 user, as that may invalidate |
2598 | the cc0. */ | |
618f4073 | 2599 | if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump)) |
4ec5d4f5 | 2600 | break; |
4ec5d4f5 BS |
2601 | |
2602 | for (ix = 0; ix < nedges; ix++) | |
2603 | currptr[ix] = headptr[ix] = nextptr[ix]; | |
2604 | } | |
2605 | } | |
2606 | while (!moveall); | |
2607 | ||
2608 | out: | |
2609 | free (currptr); | |
2610 | free (headptr); | |
2611 | free (nextptr); | |
2612 | ||
bd2c6270 | 2613 | crossjumps_occurred |= changed; |
4ec5d4f5 BS |
2614 | |
2615 | return changed; | |
2616 | } | |
2617 | ||
7752e522 JJ |
2618 | /* Return true if BB contains just bb note, or bb note followed |
2619 | by only DEBUG_INSNs. */ | |
2620 | ||
2621 | static bool | |
2622 | trivially_empty_bb_p (basic_block bb) | |
2623 | { | |
da5477a9 | 2624 | rtx_insn *insn = BB_END (bb); |
7752e522 JJ |
2625 | |
2626 | while (1) | |
2627 | { | |
2628 | if (insn == BB_HEAD (bb)) | |
2629 | return true; | |
2630 | if (!DEBUG_INSN_P (insn)) | |
2631 | return false; | |
2632 | insn = PREV_INSN (insn); | |
2633 | } | |
2634 | } | |
2635 | ||
45676a7c SB |
2636 | /* Return true if BB contains just a return and possibly a USE of the |
2637 | return value. Fill in *RET and *USE with the return and use insns | |
2ea0d750 | 2638 | if any found, otherwise NULL. All CLOBBERs are ignored. */ |
45676a7c SB |
2639 | |
2640 | static bool | |
2641 | bb_is_just_return (basic_block bb, rtx_insn **ret, rtx_insn **use) | |
2642 | { | |
2643 | *ret = *use = NULL; | |
2644 | rtx_insn *insn; | |
2645 | ||
2646 | if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun)) | |
2647 | return false; | |
2648 | ||
2649 | FOR_BB_INSNS (bb, insn) | |
2650 | if (NONDEBUG_INSN_P (insn)) | |
2651 | { | |
2ea0d750 SB |
2652 | rtx pat = PATTERN (insn); |
2653 | ||
2654 | if (!*ret && ANY_RETURN_P (pat)) | |
45676a7c | 2655 | *ret = insn; |
2ea0d750 SB |
2656 | else if (!*ret && !*use && GET_CODE (pat) == USE |
2657 | && REG_P (XEXP (pat, 0)) | |
2658 | && REG_FUNCTION_VALUE_P (XEXP (pat, 0))) | |
45676a7c | 2659 | *use = insn; |
2ea0d750 | 2660 | else if (GET_CODE (pat) != CLOBBER) |
45676a7c SB |
2661 | return false; |
2662 | } | |
2663 | ||
2664 | return !!*ret; | |
2665 | } | |
2666 | ||
402209ff JH |
2667 | /* Do simple CFG optimizations - basic block merging, simplifying of jump |
2668 | instructions etc. Return nonzero if changes were made. */ | |
2669 | ||
2670 | static bool | |
d329e058 | 2671 | try_optimize_cfg (int mode) |
402209ff | 2672 | { |
402209ff JH |
2673 | bool changed_overall = false; |
2674 | bool changed; | |
2675 | int iterations = 0; | |
ec3ae3da | 2676 | basic_block bb, b, next; |
402209ff | 2677 | |
6fb5fa3c | 2678 | if (mode & (CLEANUP_CROSSJUMP | CLEANUP_THREADING)) |
38c1593d JH |
2679 | clear_bb_flags (); |
2680 | ||
bd2c6270 | 2681 | crossjumps_occurred = false; |
c1e3e2d9 | 2682 | |
11cd3bed | 2683 | FOR_EACH_BB_FN (bb, cfun) |
2dd2d53e SB |
2684 | update_forwarder_flag (bb); |
2685 | ||
245f1bfa | 2686 | if (! targetm.cannot_modify_jumps_p ()) |
402209ff | 2687 | { |
7cf240d5 | 2688 | first_pass = true; |
e4ec2cac AO |
2689 | /* Attempt to merge blocks as made possible by edge removal. If |
2690 | a block has only one successor, and the successor has only | |
2691 | one predecessor, they may be combined. */ | |
2692 | do | |
402209ff | 2693 | { |
4ec5d4f5 | 2694 | block_was_dirty = false; |
e4ec2cac AO |
2695 | changed = false; |
2696 | iterations++; | |
2697 | ||
c263766c RH |
2698 | if (dump_file) |
2699 | fprintf (dump_file, | |
e4ec2cac AO |
2700 | "\n\ntry_optimize_cfg iteration %i\n\n", |
2701 | iterations); | |
402209ff | 2702 | |
fefa31b5 DM |
2703 | for (b = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; b |
2704 | != EXIT_BLOCK_PTR_FOR_FN (cfun);) | |
402209ff | 2705 | { |
e0082a72 | 2706 | basic_block c; |
e4ec2cac AO |
2707 | edge s; |
2708 | bool changed_here = false; | |
5f0d2358 | 2709 | |
468059bc DD |
2710 | /* Delete trivially dead basic blocks. This is either |
2711 | blocks with no predecessors, or empty blocks with no | |
1e211590 DD |
2712 | successors. However if the empty block with no |
2713 | successors is the successor of the ENTRY_BLOCK, it is | |
2714 | kept. This ensures that the ENTRY_BLOCK will have a | |
2715 | successor which is a precondition for many RTL | |
2716 | passes. Empty blocks may result from expanding | |
468059bc DD |
2717 | __builtin_unreachable (). */ |
2718 | if (EDGE_COUNT (b->preds) == 0 | |
1e211590 | 2719 | || (EDGE_COUNT (b->succs) == 0 |
7752e522 | 2720 | && trivially_empty_bb_p (b) |
fefa31b5 DM |
2721 | && single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))->dest |
2722 | != b)) | |
e4ec2cac | 2723 | { |
f6366fc7 | 2724 | c = b->prev_bb; |
f1de5107 | 2725 | if (EDGE_COUNT (b->preds) > 0) |
3b5fda81 JJ |
2726 | { |
2727 | edge e; | |
2728 | edge_iterator ei; | |
2729 | ||
f1de5107 JJ |
2730 | if (current_ir_type () == IR_RTL_CFGLAYOUT) |
2731 | { | |
bcc708fc MM |
2732 | if (BB_FOOTER (b) |
2733 | && BARRIER_P (BB_FOOTER (b))) | |
f1de5107 JJ |
2734 | FOR_EACH_EDGE (e, ei, b->preds) |
2735 | if ((e->flags & EDGE_FALLTHRU) | |
bcc708fc | 2736 | && BB_FOOTER (e->src) == NULL) |
f1de5107 | 2737 | { |
bcc708fc | 2738 | if (BB_FOOTER (b)) |
f1de5107 | 2739 | { |
d8ce2eae DM |
2740 | BB_FOOTER (e->src) = BB_FOOTER (b); |
2741 | BB_FOOTER (b) = NULL; | |
f1de5107 JJ |
2742 | } |
2743 | else | |
2744 | { | |
2745 | start_sequence (); | |
d8ce2eae | 2746 | BB_FOOTER (e->src) = emit_barrier (); |
f1de5107 JJ |
2747 | end_sequence (); |
2748 | } | |
2749 | } | |
2750 | } | |
2751 | else | |
2752 | { | |
da5477a9 | 2753 | rtx_insn *last = get_last_bb_insn (b); |
f1de5107 JJ |
2754 | if (last && BARRIER_P (last)) |
2755 | FOR_EACH_EDGE (e, ei, b->preds) | |
2756 | if ((e->flags & EDGE_FALLTHRU)) | |
2757 | emit_barrier_after (BB_END (e->src)); | |
2758 | } | |
3b5fda81 | 2759 | } |
f470c378 | 2760 | delete_basic_block (b); |
bef16e87 | 2761 | changed = true; |
6626665f | 2762 | /* Avoid trying to remove the exit block. */ |
fefa31b5 | 2763 | b = (c == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? c->next_bb : c); |
83bd032b | 2764 | continue; |
e4ec2cac | 2765 | } |
402209ff | 2766 | |
6ce2bcb7 | 2767 | /* Remove code labels no longer used. */ |
c5cbcccf ZD |
2768 | if (single_pred_p (b) |
2769 | && (single_pred_edge (b)->flags & EDGE_FALLTHRU) | |
2770 | && !(single_pred_edge (b)->flags & EDGE_COMPLEX) | |
4b4bf941 | 2771 | && LABEL_P (BB_HEAD (b)) |
6c979aa1 | 2772 | && !LABEL_PRESERVE_P (BB_HEAD (b)) |
e4ec2cac AO |
2773 | /* If the previous block ends with a branch to this |
2774 | block, we can't delete the label. Normally this | |
2775 | is a condjump that is yet to be simplified, but | |
2776 | if CASE_DROPS_THRU, this can be a tablejump with | |
2777 | some element going to the same place as the | |
2778 | default (fallthru). */ | |
fefa31b5 | 2779 | && (single_pred (b) == ENTRY_BLOCK_PTR_FOR_FN (cfun) |
c5cbcccf | 2780 | || !JUMP_P (BB_END (single_pred (b))) |
a813c111 | 2781 | || ! label_is_jump_target_p (BB_HEAD (b), |
c5cbcccf | 2782 | BB_END (single_pred (b))))) |
e4ec2cac | 2783 | { |
03fbe718 | 2784 | delete_insn (BB_HEAD (b)); |
c263766c RH |
2785 | if (dump_file) |
2786 | fprintf (dump_file, "Deleted label in block %i.\n", | |
0b17ab2f | 2787 | b->index); |
e4ec2cac | 2788 | } |
402209ff | 2789 | |
e4ec2cac | 2790 | /* If we fall through an empty block, we can remove it. */ |
9be94227 | 2791 | if (!(mode & (CLEANUP_CFGLAYOUT | CLEANUP_NO_INSN_DEL)) |
c5cbcccf ZD |
2792 | && single_pred_p (b) |
2793 | && (single_pred_edge (b)->flags & EDGE_FALLTHRU) | |
4b4bf941 | 2794 | && !LABEL_P (BB_HEAD (b)) |
e4ec2cac AO |
2795 | && FORWARDER_BLOCK_P (b) |
2796 | /* Note that forwarder_block_p true ensures that | |
2797 | there is a successor for this block. */ | |
c5cbcccf | 2798 | && (single_succ_edge (b)->flags & EDGE_FALLTHRU) |
0cae8d31 | 2799 | && n_basic_blocks_for_fn (cfun) > NUM_FIXED_BLOCKS + 1) |
e4ec2cac | 2800 | { |
c263766c RH |
2801 | if (dump_file) |
2802 | fprintf (dump_file, | |
e4ec2cac | 2803 | "Deleting fallthru block %i.\n", |
0b17ab2f | 2804 | b->index); |
e4ec2cac | 2805 | |
fefa31b5 DM |
2806 | c = ((b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)) |
2807 | ? b->next_bb : b->prev_bb); | |
c5cbcccf ZD |
2808 | redirect_edge_succ_nodup (single_pred_edge (b), |
2809 | single_succ (b)); | |
f470c378 | 2810 | delete_basic_block (b); |
e4ec2cac AO |
2811 | changed = true; |
2812 | b = c; | |
1e211590 | 2813 | continue; |
e4ec2cac | 2814 | } |
5f0d2358 | 2815 | |
50a36e42 | 2816 | /* Merge B with its single successor, if any. */ |
c5cbcccf ZD |
2817 | if (single_succ_p (b) |
2818 | && (s = single_succ_edge (b)) | |
ec3ae3da | 2819 | && !(s->flags & EDGE_COMPLEX) |
fefa31b5 | 2820 | && (c = s->dest) != EXIT_BLOCK_PTR_FOR_FN (cfun) |
c5cbcccf | 2821 | && single_pred_p (c) |
bc35512f JH |
2822 | && b != c) |
2823 | { | |
2824 | /* When not in cfg_layout mode use code aware of reordering | |
2825 | INSN. This code possibly creates new basic blocks so it | |
2826 | does not fit merge_blocks interface and is kept here in | |
2827 | hope that it will become useless once more of compiler | |
2828 | is transformed to use cfg_layout mode. */ | |
c22cacf3 | 2829 | |
bc35512f JH |
2830 | if ((mode & CLEANUP_CFGLAYOUT) |
2831 | && can_merge_blocks_p (b, c)) | |
2832 | { | |
2833 | merge_blocks (b, c); | |
2834 | update_forwarder_flag (b); | |
2835 | changed_here = true; | |
2836 | } | |
2837 | else if (!(mode & CLEANUP_CFGLAYOUT) | |
2838 | /* If the jump insn has side effects, | |
2839 | we can't kill the edge. */ | |
4b4bf941 | 2840 | && (!JUMP_P (BB_END (b)) |
e24e7211 | 2841 | || (reload_completed |
a813c111 | 2842 | ? simplejump_p (BB_END (b)) |
e4efa971 JH |
2843 | : (onlyjump_p (BB_END (b)) |
2844 | && !tablejump_p (BB_END (b), | |
2845 | NULL, NULL)))) | |
bc35512f JH |
2846 | && (next = merge_blocks_move (s, b, c, mode))) |
2847 | { | |
2848 | b = next; | |
2849 | changed_here = true; | |
2850 | } | |
ec3ae3da | 2851 | } |
e4ec2cac | 2852 | |
45676a7c SB |
2853 | /* Try to change a branch to a return to just that return. */ |
2854 | rtx_insn *ret, *use; | |
2855 | if (single_succ_p (b) | |
2856 | && onlyjump_p (BB_END (b)) | |
2857 | && bb_is_just_return (single_succ (b), &ret, &use)) | |
2858 | { | |
2859 | if (redirect_jump (as_a <rtx_jump_insn *> (BB_END (b)), | |
2860 | PATTERN (ret), 0)) | |
2861 | { | |
2862 | if (use) | |
2863 | emit_insn_before (copy_insn (PATTERN (use)), | |
2864 | BB_END (b)); | |
2865 | if (dump_file) | |
2866 | fprintf (dump_file, "Changed jump %d->%d to return.\n", | |
2867 | b->index, single_succ (b)->index); | |
2868 | redirect_edge_succ (single_succ_edge (b), | |
2869 | EXIT_BLOCK_PTR_FOR_FN (cfun)); | |
2870 | single_succ_edge (b)->flags &= ~EDGE_CROSSING; | |
2871 | changed_here = true; | |
2872 | } | |
2873 | } | |
2874 | ||
2875 | /* Try to change a conditional branch to a return to the | |
2876 | respective conditional return. */ | |
2877 | if (EDGE_COUNT (b->succs) == 2 | |
2878 | && any_condjump_p (BB_END (b)) | |
2879 | && bb_is_just_return (BRANCH_EDGE (b)->dest, &ret, &use)) | |
2880 | { | |
2881 | if (redirect_jump (as_a <rtx_jump_insn *> (BB_END (b)), | |
2882 | PATTERN (ret), 0)) | |
2883 | { | |
2884 | if (use) | |
2885 | emit_insn_before (copy_insn (PATTERN (use)), | |
2886 | BB_END (b)); | |
2887 | if (dump_file) | |
2888 | fprintf (dump_file, "Changed conditional jump %d->%d " | |
2889 | "to conditional return.\n", | |
2890 | b->index, BRANCH_EDGE (b)->dest->index); | |
2891 | redirect_edge_succ (BRANCH_EDGE (b), | |
2892 | EXIT_BLOCK_PTR_FOR_FN (cfun)); | |
2893 | BRANCH_EDGE (b)->flags &= ~EDGE_CROSSING; | |
2894 | changed_here = true; | |
2895 | } | |
2896 | } | |
2897 | ||
2898 | /* Try to flip a conditional branch that falls through to | |
2899 | a return so that it becomes a conditional return and a | |
2900 | new jump to the original branch target. */ | |
2901 | if (EDGE_COUNT (b->succs) == 2 | |
ac2a4c0d | 2902 | && BRANCH_EDGE (b)->dest != EXIT_BLOCK_PTR_FOR_FN (cfun) |
45676a7c SB |
2903 | && any_condjump_p (BB_END (b)) |
2904 | && bb_is_just_return (FALLTHRU_EDGE (b)->dest, &ret, &use)) | |
2905 | { | |
2906 | if (invert_jump (as_a <rtx_jump_insn *> (BB_END (b)), | |
2907 | JUMP_LABEL (BB_END (b)), 0)) | |
2908 | { | |
2909 | basic_block new_ft = BRANCH_EDGE (b)->dest; | |
2910 | if (redirect_jump (as_a <rtx_jump_insn *> (BB_END (b)), | |
2911 | PATTERN (ret), 0)) | |
2912 | { | |
2913 | if (use) | |
2914 | emit_insn_before (copy_insn (PATTERN (use)), | |
2915 | BB_END (b)); | |
2916 | if (dump_file) | |
2917 | fprintf (dump_file, "Changed conditional jump " | |
2918 | "%d->%d to conditional return, adding " | |
2919 | "fall-through jump.\n", | |
2920 | b->index, BRANCH_EDGE (b)->dest->index); | |
2921 | redirect_edge_succ (BRANCH_EDGE (b), | |
2922 | EXIT_BLOCK_PTR_FOR_FN (cfun)); | |
2923 | BRANCH_EDGE (b)->flags &= ~EDGE_CROSSING; | |
2924 | std::swap (BRANCH_EDGE (b)->probability, | |
2925 | FALLTHRU_EDGE (b)->probability); | |
2926 | update_br_prob_note (b); | |
2927 | basic_block jb = force_nonfallthru (FALLTHRU_EDGE (b)); | |
2928 | notice_new_block (jb); | |
2929 | if (!redirect_jump (as_a <rtx_jump_insn *> (BB_END (jb)), | |
2930 | block_label (new_ft), 0)) | |
2931 | gcc_unreachable (); | |
2932 | redirect_edge_succ (single_succ_edge (jb), new_ft); | |
2933 | changed_here = true; | |
2934 | } | |
2935 | else | |
2936 | { | |
2937 | /* Invert the jump back to what it was. This should | |
2938 | never fail. */ | |
2939 | if (!invert_jump (as_a <rtx_jump_insn *> (BB_END (b)), | |
2940 | JUMP_LABEL (BB_END (b)), 0)) | |
2941 | gcc_unreachable (); | |
2942 | } | |
2943 | } | |
2944 | } | |
2945 | ||
e4ec2cac | 2946 | /* Simplify branch over branch. */ |
bc35512f JH |
2947 | if ((mode & CLEANUP_EXPENSIVE) |
2948 | && !(mode & CLEANUP_CFGLAYOUT) | |
2949 | && try_simplify_condjump (b)) | |
38c1593d | 2950 | changed_here = true; |
402209ff | 2951 | |
e4ec2cac AO |
2952 | /* If B has a single outgoing edge, but uses a |
2953 | non-trivial jump instruction without side-effects, we | |
2954 | can either delete the jump entirely, or replace it | |
3348b696 | 2955 | with a simple unconditional jump. */ |
c5cbcccf | 2956 | if (single_succ_p (b) |
fefa31b5 | 2957 | && single_succ (b) != EXIT_BLOCK_PTR_FOR_FN (cfun) |
a813c111 | 2958 | && onlyjump_p (BB_END (b)) |
339ba33b | 2959 | && !CROSSING_JUMP_P (BB_END (b)) |
c5cbcccf ZD |
2960 | && try_redirect_by_replacing_jump (single_succ_edge (b), |
2961 | single_succ (b), | |
20b4e8ae | 2962 | (mode & CLEANUP_CFGLAYOUT) != 0)) |
e4ec2cac | 2963 | { |
e4ec2cac AO |
2964 | update_forwarder_flag (b); |
2965 | changed_here = true; | |
2966 | } | |
402209ff | 2967 | |
e4ec2cac AO |
2968 | /* Simplify branch to branch. */ |
2969 | if (try_forward_edges (mode, b)) | |
afe8b6ec EB |
2970 | { |
2971 | update_forwarder_flag (b); | |
2972 | changed_here = true; | |
2973 | } | |
402209ff | 2974 | |
e4ec2cac AO |
2975 | /* Look for shared code between blocks. */ |
2976 | if ((mode & CLEANUP_CROSSJUMP) | |
2977 | && try_crossjump_bb (mode, b)) | |
2978 | changed_here = true; | |
402209ff | 2979 | |
4ec5d4f5 BS |
2980 | if ((mode & CLEANUP_CROSSJUMP) |
2981 | /* This can lengthen register lifetimes. Do it only after | |
2982 | reload. */ | |
2983 | && reload_completed | |
2984 | && try_head_merge_bb (b)) | |
2985 | changed_here = true; | |
2986 | ||
e4ec2cac AO |
2987 | /* Don't get confused by the index shift caused by |
2988 | deleting blocks. */ | |
2989 | if (!changed_here) | |
e0082a72 | 2990 | b = b->next_bb; |
e4ec2cac AO |
2991 | else |
2992 | changed = true; | |
2993 | } | |
402209ff | 2994 | |
e4ec2cac | 2995 | if ((mode & CLEANUP_CROSSJUMP) |
fefa31b5 | 2996 | && try_crossjump_bb (mode, EXIT_BLOCK_PTR_FOR_FN (cfun))) |
402209ff | 2997 | changed = true; |
402209ff | 2998 | |
4ec5d4f5 BS |
2999 | if (block_was_dirty) |
3000 | { | |
3001 | /* This should only be set by head-merging. */ | |
3002 | gcc_assert (mode & CLEANUP_CROSSJUMP); | |
3003 | df_analyze (); | |
3004 | } | |
3005 | ||
e4ec2cac | 3006 | if (changed) |
600b5b1d TJ |
3007 | { |
3008 | /* Edge forwarding in particular can cause hot blocks previously | |
3009 | reached by both hot and cold blocks to become dominated only | |
3010 | by cold blocks. This will cause the verification below to fail, | |
3011 | and lead to now cold code in the hot section. This is not easy | |
3012 | to detect and fix during edge forwarding, and in some cases | |
3013 | is only visible after newly unreachable blocks are deleted, | |
3014 | which will be done in fixup_partitions. */ | |
b2b29377 MM |
3015 | fixup_partitions (); |
3016 | checking_verify_flow_info (); | |
600b5b1d | 3017 | } |
402209ff | 3018 | |
e4ec2cac | 3019 | changed_overall |= changed; |
7cf240d5 | 3020 | first_pass = false; |
e4ec2cac AO |
3021 | } |
3022 | while (changed); | |
402209ff | 3023 | } |
ca6c03ca | 3024 | |
04a90bec | 3025 | FOR_ALL_BB_FN (b, cfun) |
2dd2d53e | 3026 | b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK); |
635559ab | 3027 | |
402209ff JH |
3028 | return changed_overall; |
3029 | } | |
3030 | \f | |
6d2f8887 | 3031 | /* Delete all unreachable basic blocks. */ |
4262e623 | 3032 | |
969d70ca | 3033 | bool |
d329e058 | 3034 | delete_unreachable_blocks (void) |
402209ff | 3035 | { |
402209ff | 3036 | bool changed = false; |
b5b8b0ac | 3037 | basic_block b, prev_bb; |
402209ff JH |
3038 | |
3039 | find_unreachable_blocks (); | |
3040 | ||
65f4b875 AO |
3041 | /* When we're in GIMPLE mode and there may be debug bind insns, we |
3042 | should delete blocks in reverse dominator order, so as to get a | |
3043 | chance to substitute all released DEFs into debug bind stmts. If | |
3044 | we don't have dominators information, walking blocks backward | |
3045 | gets us a better chance of retaining most debug information than | |
b5b8b0ac | 3046 | otherwise. */ |
65f4b875 | 3047 | if (MAY_HAVE_DEBUG_BIND_INSNS && current_ir_type () == IR_GIMPLE |
b5b8b0ac | 3048 | && dom_info_available_p (CDI_DOMINATORS)) |
402209ff | 3049 | { |
fefa31b5 DM |
3050 | for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb; |
3051 | b != ENTRY_BLOCK_PTR_FOR_FN (cfun); b = prev_bb) | |
b5b8b0ac AO |
3052 | { |
3053 | prev_bb = b->prev_bb; | |
3054 | ||
3055 | if (!(b->flags & BB_REACHABLE)) | |
3056 | { | |
3057 | /* Speed up the removal of blocks that don't dominate | |
3058 | others. Walking backwards, this should be the common | |
3059 | case. */ | |
3060 | if (!first_dom_son (CDI_DOMINATORS, b)) | |
3061 | delete_basic_block (b); | |
3062 | else | |
3063 | { | |
9771b263 | 3064 | vec<basic_block> h |
b5b8b0ac AO |
3065 | = get_all_dominated_blocks (CDI_DOMINATORS, b); |
3066 | ||
9771b263 | 3067 | while (h.length ()) |
b5b8b0ac | 3068 | { |
9771b263 | 3069 | b = h.pop (); |
b5b8b0ac AO |
3070 | |
3071 | prev_bb = b->prev_bb; | |
0b17ab2f | 3072 | |
b5b8b0ac AO |
3073 | gcc_assert (!(b->flags & BB_REACHABLE)); |
3074 | ||
3075 | delete_basic_block (b); | |
3076 | } | |
3077 | ||
9771b263 | 3078 | h.release (); |
b5b8b0ac AO |
3079 | } |
3080 | ||
3081 | changed = true; | |
3082 | } | |
3083 | } | |
3084 | } | |
3085 | else | |
3086 | { | |
fefa31b5 DM |
3087 | for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb; |
3088 | b != ENTRY_BLOCK_PTR_FOR_FN (cfun); b = prev_bb) | |
6a58eee9 | 3089 | { |
b5b8b0ac AO |
3090 | prev_bb = b->prev_bb; |
3091 | ||
3092 | if (!(b->flags & BB_REACHABLE)) | |
3093 | { | |
3094 | delete_basic_block (b); | |
3095 | changed = true; | |
3096 | } | |
6a58eee9 | 3097 | } |
402209ff JH |
3098 | } |
3099 | ||
3100 | if (changed) | |
3101 | tidy_fallthru_edges (); | |
3102 | return changed; | |
3103 | } | |
6fb5fa3c DB |
3104 | |
3105 | /* Delete any jump tables never referenced. We can't delete them at the | |
29f3fd5b SB |
3106 | time of removing tablejump insn as they are referenced by the preceding |
3107 | insns computing the destination, so we delay deleting and garbagecollect | |
3108 | them once life information is computed. */ | |
6fb5fa3c DB |
3109 | void |
3110 | delete_dead_jumptables (void) | |
3111 | { | |
3112 | basic_block bb; | |
3113 | ||
29f3fd5b SB |
3114 | /* A dead jump table does not belong to any basic block. Scan insns |
3115 | between two adjacent basic blocks. */ | |
11cd3bed | 3116 | FOR_EACH_BB_FN (bb, cfun) |
6fb5fa3c | 3117 | { |
da5477a9 | 3118 | rtx_insn *insn, *next; |
29f3fd5b SB |
3119 | |
3120 | for (insn = NEXT_INSN (BB_END (bb)); | |
3121 | insn && !NOTE_INSN_BASIC_BLOCK_P (insn); | |
3122 | insn = next) | |
57d6c446 | 3123 | { |
29f3fd5b SB |
3124 | next = NEXT_INSN (insn); |
3125 | if (LABEL_P (insn) | |
3126 | && LABEL_NUSES (insn) == LABEL_PRESERVE_P (insn) | |
3127 | && JUMP_TABLE_DATA_P (next)) | |
3128 | { | |
da5477a9 | 3129 | rtx_insn *label = insn, *jump = next; |
29f3fd5b SB |
3130 | |
3131 | if (dump_file) | |
3132 | fprintf (dump_file, "Dead jumptable %i removed\n", | |
3133 | INSN_UID (insn)); | |
3134 | ||
3135 | next = NEXT_INSN (next); | |
3136 | delete_insn (jump); | |
3137 | delete_insn (label); | |
3138 | } | |
6fb5fa3c DB |
3139 | } |
3140 | } | |
3141 | } | |
3142 | ||
402209ff JH |
3143 | \f |
3144 | /* Tidy the CFG by deleting unreachable code and whatnot. */ | |
3145 | ||
3146 | bool | |
d329e058 | 3147 | cleanup_cfg (int mode) |
402209ff | 3148 | { |
402209ff JH |
3149 | bool changed = false; |
3150 | ||
aeceeb06 SB |
3151 | /* Set the cfglayout mode flag here. We could update all the callers |
3152 | but that is just inconvenient, especially given that we eventually | |
3153 | want to have cfglayout mode as the default. */ | |
3154 | if (current_ir_type () == IR_RTL_CFGLAYOUT) | |
3155 | mode |= CLEANUP_CFGLAYOUT; | |
3156 | ||
402209ff | 3157 | timevar_push (TV_CLEANUP_CFG); |
3dec4024 JH |
3158 | if (delete_unreachable_blocks ()) |
3159 | { | |
3160 | changed = true; | |
3161 | /* We've possibly created trivially dead code. Cleanup it right | |
95bd1dd7 | 3162 | now to introduce more opportunities for try_optimize_cfg. */ |
6fb5fa3c | 3163 | if (!(mode & (CLEANUP_NO_INSN_DEL)) |
3dec4024 | 3164 | && !reload_completed) |
62e5bf5d | 3165 | delete_trivially_dead_insns (get_insns (), max_reg_num ()); |
3dec4024 | 3166 | } |
bf77398c ZD |
3167 | |
3168 | compact_blocks (); | |
3169 | ||
c1e3e2d9 SB |
3170 | /* To tail-merge blocks ending in the same noreturn function (e.g. |
3171 | a call to abort) we have to insert fake edges to exit. Do this | |
3172 | here once. The fake edges do not interfere with any other CFG | |
3173 | cleanups. */ | |
3174 | if (mode & CLEANUP_CROSSJUMP) | |
3175 | add_noreturn_fake_exit_edges (); | |
3176 | ||
7d817ebc DE |
3177 | if (!dbg_cnt (cfg_cleanup)) |
3178 | return changed; | |
3179 | ||
3dec4024 JH |
3180 | while (try_optimize_cfg (mode)) |
3181 | { | |
3182 | delete_unreachable_blocks (), changed = true; | |
c1e3e2d9 | 3183 | if (!(mode & CLEANUP_NO_INSN_DEL)) |
3dec4024 | 3184 | { |
c1e3e2d9 SB |
3185 | /* Try to remove some trivially dead insns when doing an expensive |
3186 | cleanup. But delete_trivially_dead_insns doesn't work after | |
3187 | reload (it only handles pseudos) and run_fast_dce is too costly | |
3188 | to run in every iteration. | |
3189 | ||
3190 | For effective cross jumping, we really want to run a fast DCE to | |
3191 | clean up any dead conditions, or they get in the way of performing | |
3192 | useful tail merges. | |
3193 | ||
3194 | Other transformations in cleanup_cfg are not so sensitive to dead | |
3195 | code, so delete_trivially_dead_insns or even doing nothing at all | |
3196 | is good enough. */ | |
3197 | if ((mode & CLEANUP_EXPENSIVE) && !reload_completed | |
3198 | && !delete_trivially_dead_insns (get_insns (), max_reg_num ())) | |
3dec4024 | 3199 | break; |
bd2c6270 | 3200 | if ((mode & CLEANUP_CROSSJUMP) && crossjumps_occurred) |
f842d54f | 3201 | run_fast_dce (); |
3dec4024 JH |
3202 | } |
3203 | else | |
3204 | break; | |
3dec4024 | 3205 | } |
402209ff | 3206 | |
c1e3e2d9 SB |
3207 | if (mode & CLEANUP_CROSSJUMP) |
3208 | remove_fake_exit_edges (); | |
3209 | ||
29f3fd5b SB |
3210 | /* Don't call delete_dead_jumptables in cfglayout mode, because |
3211 | that function assumes that jump tables are in the insns stream. | |
3212 | But we also don't _have_ to delete dead jumptables in cfglayout | |
3213 | mode because we shouldn't even be looking at things that are | |
3214 | not in a basic block. Dead jumptables are cleaned up when | |
3215 | going out of cfglayout mode. */ | |
3216 | if (!(mode & CLEANUP_CFGLAYOUT)) | |
6fb5fa3c DB |
3217 | delete_dead_jumptables (); |
3218 | ||
7d776ee2 RG |
3219 | /* ??? We probably do this way too often. */ |
3220 | if (current_loops | |
3221 | && (changed | |
3222 | || (mode & CLEANUP_CFG_CHANGED))) | |
3223 | { | |
7d776ee2 RG |
3224 | timevar_push (TV_REPAIR_LOOPS); |
3225 | /* The above doesn't preserve dominance info if available. */ | |
3226 | gcc_assert (!dom_info_available_p (CDI_DOMINATORS)); | |
3227 | calculate_dominance_info (CDI_DOMINATORS); | |
01cb1ef5 | 3228 | fix_loop_structure (NULL); |
7d776ee2 RG |
3229 | free_dominance_info (CDI_DOMINATORS); |
3230 | timevar_pop (TV_REPAIR_LOOPS); | |
3231 | } | |
3232 | ||
402209ff JH |
3233 | timevar_pop (TV_CLEANUP_CFG); |
3234 | ||
402209ff JH |
3235 | return changed; |
3236 | } | |
ef330312 | 3237 | \f |
27a4cd48 DM |
3238 | namespace { |
3239 | ||
3240 | const pass_data pass_data_jump = | |
11a687e7 | 3241 | { |
27a4cd48 DM |
3242 | RTL_PASS, /* type */ |
3243 | "jump", /* name */ | |
3244 | OPTGROUP_NONE, /* optinfo_flags */ | |
27a4cd48 DM |
3245 | TV_JUMP, /* tv_id */ |
3246 | 0, /* properties_required */ | |
3247 | 0, /* properties_provided */ | |
3248 | 0, /* properties_destroyed */ | |
3249 | 0, /* todo_flags_start */ | |
3bea341f | 3250 | 0, /* todo_flags_finish */ |
11a687e7 | 3251 | }; |
27a4cd48 DM |
3252 | |
3253 | class pass_jump : public rtl_opt_pass | |
3254 | { | |
3255 | public: | |
c3284718 RS |
3256 | pass_jump (gcc::context *ctxt) |
3257 | : rtl_opt_pass (pass_data_jump, ctxt) | |
27a4cd48 DM |
3258 | {} |
3259 | ||
3260 | /* opt_pass methods: */ | |
be55bfe6 | 3261 | virtual unsigned int execute (function *); |
27a4cd48 DM |
3262 | |
3263 | }; // class pass_jump | |
3264 | ||
be55bfe6 TS |
3265 | unsigned int |
3266 | pass_jump::execute (function *) | |
3267 | { | |
3268 | delete_trivially_dead_insns (get_insns (), max_reg_num ()); | |
3269 | if (dump_file) | |
3270 | dump_flow_info (dump_file, dump_flags); | |
3271 | cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0) | |
3272 | | (flag_thread_jumps ? CLEANUP_THREADING : 0)); | |
3273 | return 0; | |
3274 | } | |
3275 | ||
27a4cd48 DM |
3276 | } // anon namespace |
3277 | ||
3278 | rtl_opt_pass * | |
3279 | make_pass_jump (gcc::context *ctxt) | |
3280 | { | |
3281 | return new pass_jump (ctxt); | |
3282 | } | |
11a687e7 | 3283 | \f |
27a4cd48 DM |
3284 | namespace { |
3285 | ||
3286 | const pass_data pass_data_jump2 = | |
ef330312 | 3287 | { |
27a4cd48 DM |
3288 | RTL_PASS, /* type */ |
3289 | "jump2", /* name */ | |
3290 | OPTGROUP_NONE, /* optinfo_flags */ | |
27a4cd48 DM |
3291 | TV_JUMP, /* tv_id */ |
3292 | 0, /* properties_required */ | |
3293 | 0, /* properties_provided */ | |
3294 | 0, /* properties_destroyed */ | |
3295 | 0, /* todo_flags_start */ | |
3bea341f | 3296 | 0, /* todo_flags_finish */ |
ef330312 | 3297 | }; |
27a4cd48 DM |
3298 | |
3299 | class pass_jump2 : public rtl_opt_pass | |
3300 | { | |
3301 | public: | |
c3284718 RS |
3302 | pass_jump2 (gcc::context *ctxt) |
3303 | : rtl_opt_pass (pass_data_jump2, ctxt) | |
27a4cd48 DM |
3304 | {} |
3305 | ||
3306 | /* opt_pass methods: */ | |
be55bfe6 TS |
3307 | virtual unsigned int execute (function *) |
3308 | { | |
3309 | cleanup_cfg (flag_crossjumping ? CLEANUP_CROSSJUMP : 0); | |
3310 | return 0; | |
3311 | } | |
27a4cd48 DM |
3312 | |
3313 | }; // class pass_jump2 | |
3314 | ||
3315 | } // anon namespace | |
3316 | ||
3317 | rtl_opt_pass * | |
3318 | make_pass_jump2 (gcc::context *ctxt) | |
3319 | { | |
3320 | return new pass_jump2 (ctxt); | |
3321 | } |