2 Copyright (C) 2005-2013 Free Software Foundation, Inc.
3 Contributed by Jeff Law <law@redhat.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
28 #include "basic-block.h"
33 #include "pointer-set.h"
34 #include "tree-ssa-alias.h"
35 #include "internal-fn.h"
36 #include "gimple-expr.h"
39 #include "gimple-iterator.h"
40 #include "gimple-ssa.h"
42 #include "tree-phinodes.h"
43 #include "ssa-iterators.h"
44 #include "stringpool.h"
45 #include "tree-ssanames.h"
46 #include "tree-ssa-propagate.h"
47 #include "tree-ssa-threadupdate.h"
48 #include "langhooks.h"
50 #include "tree-ssa-threadedge.h"
52 /* To avoid code explosion due to jump threading, we limit the
53 number of statements we are going to copy. This variable
54 holds the number of statements currently seen that we'll have
55 to copy as part of the jump threading process. */
56 static int stmt_count
;
58 /* Array to record value-handles per SSA_NAME. */
59 vec
<tree
> ssa_name_values
;
61 /* Set the value for the SSA name NAME to VALUE. */
64 set_ssa_name_value (tree name
, tree value
)
66 if (SSA_NAME_VERSION (name
) >= ssa_name_values
.length ())
67 ssa_name_values
.safe_grow_cleared (SSA_NAME_VERSION (name
) + 1);
68 if (value
&& TREE_OVERFLOW_P (value
))
69 value
= drop_tree_overflow (value
);
70 ssa_name_values
[SSA_NAME_VERSION (name
)] = value
;
73 /* Initialize the per SSA_NAME value-handles array. Returns it. */
75 threadedge_initialize_values (void)
77 gcc_assert (!ssa_name_values
.exists ());
78 ssa_name_values
.create (num_ssa_names
);
81 /* Free the per SSA_NAME value-handle array. */
83 threadedge_finalize_values (void)
85 ssa_name_values
.release ();
88 /* Return TRUE if we may be able to thread an incoming edge into
89 BB to an outgoing edge from BB. Return FALSE otherwise. */
92 potentially_threadable_block (basic_block bb
)
94 gimple_stmt_iterator gsi
;
96 /* If BB has a single successor or a single predecessor, then
97 there is no threading opportunity. */
98 if (single_succ_p (bb
) || single_pred_p (bb
))
101 /* If BB does not end with a conditional, switch or computed goto,
102 then there is no threading opportunity. */
103 gsi
= gsi_last_bb (bb
);
106 || (gimple_code (gsi_stmt (gsi
)) != GIMPLE_COND
107 && gimple_code (gsi_stmt (gsi
)) != GIMPLE_GOTO
108 && gimple_code (gsi_stmt (gsi
)) != GIMPLE_SWITCH
))
114 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
115 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
116 BB. If no such ASSERT_EXPR is found, return OP. */
119 lhs_of_dominating_assert (tree op
, basic_block bb
, gimple stmt
)
121 imm_use_iterator imm_iter
;
125 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, op
)
127 use_stmt
= USE_STMT (use_p
);
129 && gimple_assign_single_p (use_stmt
)
130 && TREE_CODE (gimple_assign_rhs1 (use_stmt
)) == ASSERT_EXPR
131 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt
), 0) == op
132 && dominated_by_p (CDI_DOMINATORS
, bb
, gimple_bb (use_stmt
)))
134 return gimple_assign_lhs (use_stmt
);
140 /* We record temporary equivalences created by PHI nodes or
141 statements within the target block. Doing so allows us to
142 identify more jump threading opportunities, even in blocks
145 We keep track of those temporary equivalences in a stack
146 structure so that we can unwind them when we're done processing
147 a particular edge. This routine handles unwinding the data
151 remove_temporary_equivalences (vec
<tree
> *stack
)
153 while (stack
->length () > 0)
155 tree prev_value
, dest
;
157 dest
= stack
->pop ();
159 /* A NULL value indicates we should stop unwinding, otherwise
160 pop off the next entry as they're recorded in pairs. */
164 prev_value
= stack
->pop ();
165 set_ssa_name_value (dest
, prev_value
);
169 /* Record a temporary equivalence, saving enough information so that
170 we can restore the state of recorded equivalences when we're
171 done processing the current edge. */
174 record_temporary_equivalence (tree x
, tree y
, vec
<tree
> *stack
)
176 tree prev_x
= SSA_NAME_VALUE (x
);
178 if (TREE_CODE (y
) == SSA_NAME
)
180 tree tmp
= SSA_NAME_VALUE (y
);
184 set_ssa_name_value (x
, y
);
186 stack
->quick_push (prev_x
);
187 stack
->quick_push (x
);
190 /* Record temporary equivalences created by PHIs at the target of the
191 edge E. Record unwind information for the equivalences onto STACK.
193 If a PHI which prevents threading is encountered, then return FALSE
194 indicating we should not thread this edge, else return TRUE. */
197 record_temporary_equivalences_from_phis (edge e
, vec
<tree
> *stack
)
199 gimple_stmt_iterator gsi
;
201 /* Each PHI creates a temporary equivalence, record them.
202 These are context sensitive equivalences and will be removed
204 for (gsi
= gsi_start_phis (e
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
206 gimple phi
= gsi_stmt (gsi
);
207 tree src
= PHI_ARG_DEF_FROM_EDGE (phi
, e
);
208 tree dst
= gimple_phi_result (phi
);
210 /* If the desired argument is not the same as this PHI's result
211 and it is set by a PHI in E->dest, then we can not thread
214 && TREE_CODE (src
) == SSA_NAME
215 && gimple_code (SSA_NAME_DEF_STMT (src
)) == GIMPLE_PHI
216 && gimple_bb (SSA_NAME_DEF_STMT (src
)) == e
->dest
)
219 /* We consider any non-virtual PHI as a statement since it
220 count result in a constant assignment or copy operation. */
221 if (!virtual_operand_p (dst
))
224 record_temporary_equivalence (dst
, src
, stack
);
229 /* Fold the RHS of an assignment statement and return it as a tree.
230 May return NULL_TREE if no simplification is possible. */
233 fold_assignment_stmt (gimple stmt
)
235 enum tree_code subcode
= gimple_assign_rhs_code (stmt
);
237 switch (get_gimple_rhs_class (subcode
))
239 case GIMPLE_SINGLE_RHS
:
240 return fold (gimple_assign_rhs1 (stmt
));
242 case GIMPLE_UNARY_RHS
:
244 tree lhs
= gimple_assign_lhs (stmt
);
245 tree op0
= gimple_assign_rhs1 (stmt
);
246 return fold_unary (subcode
, TREE_TYPE (lhs
), op0
);
249 case GIMPLE_BINARY_RHS
:
251 tree lhs
= gimple_assign_lhs (stmt
);
252 tree op0
= gimple_assign_rhs1 (stmt
);
253 tree op1
= gimple_assign_rhs2 (stmt
);
254 return fold_binary (subcode
, TREE_TYPE (lhs
), op0
, op1
);
257 case GIMPLE_TERNARY_RHS
:
259 tree lhs
= gimple_assign_lhs (stmt
);
260 tree op0
= gimple_assign_rhs1 (stmt
);
261 tree op1
= gimple_assign_rhs2 (stmt
);
262 tree op2
= gimple_assign_rhs3 (stmt
);
264 /* Sadly, we have to handle conditional assignments specially
265 here, because fold expects all the operands of an expression
266 to be folded before the expression itself is folded, but we
267 can't just substitute the folded condition here. */
268 if (gimple_assign_rhs_code (stmt
) == COND_EXPR
)
271 return fold_ternary (subcode
, TREE_TYPE (lhs
), op0
, op1
, op2
);
279 /* Try to simplify each statement in E->dest, ultimately leading to
280 a simplification of the COND_EXPR at the end of E->dest.
282 Record unwind information for temporary equivalences onto STACK.
284 Use SIMPLIFY (a pointer to a callback function) to further simplify
285 statements using pass specific information.
287 We might consider marking just those statements which ultimately
288 feed the COND_EXPR. It's not clear if the overhead of bookkeeping
289 would be recovered by trying to simplify fewer statements.
291 If we are able to simplify a statement into the form
292 SSA_NAME = (SSA_NAME | gimple invariant), then we can record
293 a context sensitive equivalence which may help us simplify
294 later statements in E->dest. */
297 record_temporary_equivalences_from_stmts_at_dest (edge e
,
299 tree (*simplify
) (gimple
,
303 gimple_stmt_iterator gsi
;
306 max_stmt_count
= PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS
);
308 /* Walk through each statement in the block recording equivalences
309 we discover. Note any equivalences we discover are context
310 sensitive (ie, are dependent on traversing E) and must be unwound
311 when we're finished processing E. */
312 for (gsi
= gsi_start_bb (e
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
314 tree cached_lhs
= NULL
;
316 stmt
= gsi_stmt (gsi
);
318 /* Ignore empty statements and labels. */
319 if (gimple_code (stmt
) == GIMPLE_NOP
320 || gimple_code (stmt
) == GIMPLE_LABEL
321 || is_gimple_debug (stmt
))
324 /* If the statement has volatile operands, then we assume we
325 can not thread through this block. This is overly
326 conservative in some ways. */
327 if (gimple_code (stmt
) == GIMPLE_ASM
&& gimple_asm_volatile_p (stmt
))
330 /* If duplicating this block is going to cause too much code
331 expansion, then do not thread through this block. */
333 if (stmt_count
> max_stmt_count
)
336 /* If this is not a statement that sets an SSA_NAME to a new
337 value, then do not try to simplify this statement as it will
338 not simplify in any way that is helpful for jump threading. */
339 if ((gimple_code (stmt
) != GIMPLE_ASSIGN
340 || TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
341 && (gimple_code (stmt
) != GIMPLE_CALL
342 || gimple_call_lhs (stmt
) == NULL_TREE
343 || TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
))
346 /* The result of __builtin_object_size depends on all the arguments
347 of a phi node. Temporarily using only one edge produces invalid
356 r = PHI <&w[2].a[1](2), &a.a[6](3)>
357 __builtin_object_size (r, 0)
359 The result of __builtin_object_size is defined to be the maximum of
360 remaining bytes. If we use only one edge on the phi, the result will
361 change to be the remaining bytes for the corresponding phi argument.
363 Similarly for __builtin_constant_p:
366 __builtin_constant_p (r)
368 Both PHI arguments are constant, but x ? 1 : 2 is still not
371 if (is_gimple_call (stmt
))
373 tree fndecl
= gimple_call_fndecl (stmt
);
375 && (DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_OBJECT_SIZE
376 || DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_CONSTANT_P
))
380 /* At this point we have a statement which assigns an RHS to an
381 SSA_VAR on the LHS. We want to try and simplify this statement
382 to expose more context sensitive equivalences which in turn may
383 allow us to simplify the condition at the end of the loop.
385 Handle simple copy operations as well as implied copies from
387 if (gimple_assign_single_p (stmt
)
388 && TREE_CODE (gimple_assign_rhs1 (stmt
)) == SSA_NAME
)
389 cached_lhs
= gimple_assign_rhs1 (stmt
);
390 else if (gimple_assign_single_p (stmt
)
391 && TREE_CODE (gimple_assign_rhs1 (stmt
)) == ASSERT_EXPR
)
392 cached_lhs
= TREE_OPERAND (gimple_assign_rhs1 (stmt
), 0);
395 /* A statement that is not a trivial copy or ASSERT_EXPR.
396 We're going to temporarily copy propagate the operands
397 and see if that allows us to simplify this statement. */
401 unsigned int num
, i
= 0;
403 num
= NUM_SSA_OPERANDS (stmt
, (SSA_OP_USE
| SSA_OP_VUSE
));
404 copy
= XCNEWVEC (tree
, num
);
406 /* Make a copy of the uses & vuses into USES_COPY, then cprop into
408 FOR_EACH_SSA_USE_OPERAND (use_p
, stmt
, iter
, SSA_OP_USE
| SSA_OP_VUSE
)
411 tree use
= USE_FROM_PTR (use_p
);
414 if (TREE_CODE (use
) == SSA_NAME
)
415 tmp
= SSA_NAME_VALUE (use
);
417 SET_USE (use_p
, tmp
);
420 /* Try to fold/lookup the new expression. Inserting the
421 expression into the hash table is unlikely to help. */
422 if (is_gimple_call (stmt
))
423 cached_lhs
= fold_call_stmt (stmt
, false);
425 cached_lhs
= fold_assignment_stmt (stmt
);
428 || (TREE_CODE (cached_lhs
) != SSA_NAME
429 && !is_gimple_min_invariant (cached_lhs
)))
430 cached_lhs
= (*simplify
) (stmt
, stmt
);
432 /* Restore the statement's original uses/defs. */
434 FOR_EACH_SSA_USE_OPERAND (use_p
, stmt
, iter
, SSA_OP_USE
| SSA_OP_VUSE
)
435 SET_USE (use_p
, copy
[i
++]);
440 /* Record the context sensitive equivalence if we were able
441 to simplify this statement. */
443 && (TREE_CODE (cached_lhs
) == SSA_NAME
444 || is_gimple_min_invariant (cached_lhs
)))
445 record_temporary_equivalence (gimple_get_lhs (stmt
), cached_lhs
, stack
);
450 /* Simplify the control statement at the end of the block E->dest.
452 To avoid allocating memory unnecessarily, a scratch GIMPLE_COND
453 is available to use/clobber in DUMMY_COND.
455 Use SIMPLIFY (a pointer to a callback function) to further simplify
456 a condition using pass specific information.
458 Return the simplified condition or NULL if simplification could
462 simplify_control_stmt_condition (edge e
,
465 tree (*simplify
) (gimple
, gimple
),
466 bool handle_dominating_asserts
)
468 tree cond
, cached_lhs
;
469 enum gimple_code code
= gimple_code (stmt
);
471 /* For comparisons, we have to update both operands, then try
472 to simplify the comparison. */
473 if (code
== GIMPLE_COND
)
476 enum tree_code cond_code
;
478 op0
= gimple_cond_lhs (stmt
);
479 op1
= gimple_cond_rhs (stmt
);
480 cond_code
= gimple_cond_code (stmt
);
482 /* Get the current value of both operands. */
483 if (TREE_CODE (op0
) == SSA_NAME
)
485 tree tmp
= SSA_NAME_VALUE (op0
);
490 if (TREE_CODE (op1
) == SSA_NAME
)
492 tree tmp
= SSA_NAME_VALUE (op1
);
497 if (handle_dominating_asserts
)
499 /* Now see if the operand was consumed by an ASSERT_EXPR
500 which dominates E->src. If so, we want to replace the
501 operand with the LHS of the ASSERT_EXPR. */
502 if (TREE_CODE (op0
) == SSA_NAME
)
503 op0
= lhs_of_dominating_assert (op0
, e
->src
, stmt
);
505 if (TREE_CODE (op1
) == SSA_NAME
)
506 op1
= lhs_of_dominating_assert (op1
, e
->src
, stmt
);
509 /* We may need to canonicalize the comparison. For
510 example, op0 might be a constant while op1 is an
511 SSA_NAME. Failure to canonicalize will cause us to
512 miss threading opportunities. */
513 if (tree_swap_operands_p (op0
, op1
, false))
516 cond_code
= swap_tree_comparison (cond_code
);
522 /* Stuff the operator and operands into our dummy conditional
524 gimple_cond_set_code (dummy_cond
, cond_code
);
525 gimple_cond_set_lhs (dummy_cond
, op0
);
526 gimple_cond_set_rhs (dummy_cond
, op1
);
528 /* We absolutely do not care about any type conversions
529 we only care about a zero/nonzero value. */
530 fold_defer_overflow_warnings ();
532 cached_lhs
= fold_binary (cond_code
, boolean_type_node
, op0
, op1
);
534 while (CONVERT_EXPR_P (cached_lhs
))
535 cached_lhs
= TREE_OPERAND (cached_lhs
, 0);
537 fold_undefer_overflow_warnings ((cached_lhs
538 && is_gimple_min_invariant (cached_lhs
)),
539 stmt
, WARN_STRICT_OVERFLOW_CONDITIONAL
);
541 /* If we have not simplified the condition down to an invariant,
542 then use the pass specific callback to simplify the condition. */
544 || !is_gimple_min_invariant (cached_lhs
))
545 cached_lhs
= (*simplify
) (dummy_cond
, stmt
);
550 if (code
== GIMPLE_SWITCH
)
551 cond
= gimple_switch_index (stmt
);
552 else if (code
== GIMPLE_GOTO
)
553 cond
= gimple_goto_dest (stmt
);
557 /* We can have conditionals which just test the state of a variable
558 rather than use a relational operator. These are simpler to handle. */
559 if (TREE_CODE (cond
) == SSA_NAME
)
563 /* Get the variable's current value from the equivalence chains.
565 It is possible to get loops in the SSA_NAME_VALUE chains
566 (consider threading the backedge of a loop where we have
567 a loop invariant SSA_NAME used in the condition. */
569 && TREE_CODE (cached_lhs
) == SSA_NAME
570 && SSA_NAME_VALUE (cached_lhs
))
571 cached_lhs
= SSA_NAME_VALUE (cached_lhs
);
573 /* If we're dominated by a suitable ASSERT_EXPR, then
574 update CACHED_LHS appropriately. */
575 if (handle_dominating_asserts
&& TREE_CODE (cached_lhs
) == SSA_NAME
)
576 cached_lhs
= lhs_of_dominating_assert (cached_lhs
, e
->src
, stmt
);
578 /* If we haven't simplified to an invariant yet, then use the
579 pass specific callback to try and simplify it further. */
580 if (cached_lhs
&& ! is_gimple_min_invariant (cached_lhs
))
581 cached_lhs
= (*simplify
) (stmt
, stmt
);
589 /* Return TRUE if the statement at the end of e->dest depends on
590 the output of any statement in BB. Otherwise return FALSE.
592 This is used when we are threading a backedge and need to ensure
593 that temporary equivalences from BB do not affect the condition
597 cond_arg_set_in_bb (edge e
, basic_block bb
)
601 gimple last
= last_stmt (e
->dest
);
603 /* E->dest does not have to end with a control transferring
604 instruction. This can occur when we try to extend a jump
605 threading opportunity deeper into the CFG. In that case
606 it is safe for this check to return false. */
610 if (gimple_code (last
) != GIMPLE_COND
611 && gimple_code (last
) != GIMPLE_GOTO
612 && gimple_code (last
) != GIMPLE_SWITCH
)
615 FOR_EACH_SSA_USE_OPERAND (use_p
, last
, iter
, SSA_OP_USE
| SSA_OP_VUSE
)
617 tree use
= USE_FROM_PTR (use_p
);
619 if (TREE_CODE (use
) == SSA_NAME
620 && gimple_code (SSA_NAME_DEF_STMT (use
)) != GIMPLE_PHI
621 && gimple_bb (SSA_NAME_DEF_STMT (use
)) == bb
)
627 /* Copy debug stmts from DEST's chain of single predecessors up to
628 SRC, so that we don't lose the bindings as PHI nodes are introduced
629 when DEST gains new predecessors. */
631 propagate_threaded_block_debug_into (basic_block dest
, basic_block src
)
633 if (!MAY_HAVE_DEBUG_STMTS
)
636 if (!single_pred_p (dest
))
639 gcc_checking_assert (dest
!= src
);
641 gimple_stmt_iterator gsi
= gsi_after_labels (dest
);
643 const int alloc_count
= 16; // ?? Should this be a PARAM?
645 /* Estimate the number of debug vars overridden in the beginning of
646 DEST, to tell how many we're going to need to begin with. */
647 for (gimple_stmt_iterator si
= gsi
;
648 i
* 4 <= alloc_count
* 3 && !gsi_end_p (si
); gsi_next (&si
))
650 gimple stmt
= gsi_stmt (si
);
651 if (!is_gimple_debug (stmt
))
656 stack_vec
<tree
, alloc_count
> fewvars
;
657 pointer_set_t
*vars
= NULL
;
659 /* If we're already starting with 3/4 of alloc_count, go for a
660 pointer_set, otherwise start with an unordered stack-allocated
662 if (i
* 4 > alloc_count
* 3)
663 vars
= pointer_set_create ();
665 /* Now go through the initial debug stmts in DEST again, this time
666 actually inserting in VARS or FEWVARS. Don't bother checking for
667 duplicates in FEWVARS. */
668 for (gimple_stmt_iterator si
= gsi
; !gsi_end_p (si
); gsi_next (&si
))
670 gimple stmt
= gsi_stmt (si
);
671 if (!is_gimple_debug (stmt
))
676 if (gimple_debug_bind_p (stmt
))
677 var
= gimple_debug_bind_get_var (stmt
);
678 else if (gimple_debug_source_bind_p (stmt
))
679 var
= gimple_debug_source_bind_get_var (stmt
);
684 pointer_set_insert (vars
, var
);
686 fewvars
.quick_push (var
);
689 basic_block bb
= dest
;
693 bb
= single_pred (bb
);
694 for (gimple_stmt_iterator si
= gsi_last_bb (bb
);
695 !gsi_end_p (si
); gsi_prev (&si
))
697 gimple stmt
= gsi_stmt (si
);
698 if (!is_gimple_debug (stmt
))
703 if (gimple_debug_bind_p (stmt
))
704 var
= gimple_debug_bind_get_var (stmt
);
705 else if (gimple_debug_source_bind_p (stmt
))
706 var
= gimple_debug_source_bind_get_var (stmt
);
710 /* Discard debug bind overlaps. ??? Unlike stmts from src,
711 copied into a new block that will precede BB, debug bind
712 stmts in bypassed BBs may actually be discarded if
713 they're overwritten by subsequent debug bind stmts, which
714 might be a problem once we introduce stmt frontier notes
715 or somesuch. Adding `&& bb == src' to the condition
716 below will preserve all potentially relevant debug
718 if (vars
&& pointer_set_insert (vars
, var
))
722 int i
= fewvars
.length ();
724 if (fewvars
[i
] == var
)
729 if (fewvars
.length () < (unsigned) alloc_count
)
730 fewvars
.quick_push (var
);
733 vars
= pointer_set_create ();
734 for (i
= 0; i
< alloc_count
; i
++)
735 pointer_set_insert (vars
, fewvars
[i
]);
737 pointer_set_insert (vars
, var
);
741 stmt
= gimple_copy (stmt
);
742 /* ??? Should we drop the location of the copy to denote
743 they're artificial bindings? */
744 gsi_insert_before (&gsi
, stmt
, GSI_NEW_STMT
);
747 while (bb
!= src
&& single_pred_p (bb
));
750 pointer_set_destroy (vars
);
751 else if (fewvars
.exists ())
755 /* See if TAKEN_EDGE->dest is a threadable block with no side effecs (ie, it
756 need not be duplicated as part of the CFG/SSA updating process).
758 If it is threadable, add it to PATH and VISITED and recurse, ultimately
759 returning TRUE from the toplevel call. Otherwise do nothing and
762 DUMMY_COND, HANDLE_DOMINATING_ASSERTS and SIMPLIFY are used to
763 try and simplify the condition at the end of TAKEN_EDGE->dest. */
765 thread_around_empty_blocks (edge taken_edge
,
767 bool handle_dominating_asserts
,
768 tree (*simplify
) (gimple
, gimple
),
770 vec
<jump_thread_edge
*> *path
,
771 bool *backedge_seen_p
)
773 basic_block bb
= taken_edge
->dest
;
774 gimple_stmt_iterator gsi
;
778 /* The key property of these blocks is that they need not be duplicated
779 when threading. Thus they can not have visible side effects such
781 if (!gsi_end_p (gsi_start_phis (bb
)))
784 /* Skip over DEBUG statements at the start of the block. */
785 gsi
= gsi_start_nondebug_bb (bb
);
787 /* If the block has no statements, but does have a single successor, then
788 it's just a forwarding block and we can thread through it trivially.
790 However, note that just threading through empty blocks with single
791 successors is not inherently profitable. For the jump thread to
792 be profitable, we must avoid a runtime conditional.
794 By taking the return value from the recursive call, we get the
795 desired effect of returning TRUE when we found a profitable jump
796 threading opportunity and FALSE otherwise.
798 This is particularly important when this routine is called after
799 processing a joiner block. Returning TRUE too aggressively in
800 that case results in pointless duplication of the joiner block. */
803 if (single_succ_p (bb
))
805 taken_edge
= single_succ_edge (bb
);
806 if (!bitmap_bit_p (visited
, taken_edge
->dest
->index
))
809 = new jump_thread_edge (taken_edge
, EDGE_NO_COPY_SRC_BLOCK
);
811 bitmap_set_bit (visited
, taken_edge
->dest
->index
);
812 *backedge_seen_p
|= ((taken_edge
->flags
& EDGE_DFS_BACK
) != 0);
813 return thread_around_empty_blocks (taken_edge
,
815 handle_dominating_asserts
,
823 /* We have a block with no statements, but multiple successors? */
827 /* The only real statements this block can have are a control
828 flow altering statement. Anything else stops the thread. */
829 stmt
= gsi_stmt (gsi
);
830 if (gimple_code (stmt
) != GIMPLE_COND
831 && gimple_code (stmt
) != GIMPLE_GOTO
832 && gimple_code (stmt
) != GIMPLE_SWITCH
)
835 /* Extract and simplify the condition. */
836 cond
= simplify_control_stmt_condition (taken_edge
, stmt
, dummy_cond
,
837 simplify
, handle_dominating_asserts
);
839 /* If the condition can be statically computed and we have not already
840 visited the destination edge, then add the taken edge to our thread
842 if (cond
&& is_gimple_min_invariant (cond
))
844 taken_edge
= find_taken_edge (bb
, cond
);
846 if (bitmap_bit_p (visited
, taken_edge
->dest
->index
))
848 bitmap_set_bit (visited
, taken_edge
->dest
->index
);
851 = new jump_thread_edge (taken_edge
, EDGE_NO_COPY_SRC_BLOCK
);
853 *backedge_seen_p
|= ((taken_edge
->flags
& EDGE_DFS_BACK
) != 0);
855 thread_around_empty_blocks (taken_edge
,
857 handle_dominating_asserts
,
868 /* We are exiting E->src, see if E->dest ends with a conditional
869 jump which has a known value when reached via E.
871 E->dest can have arbitrary side effects which, if threading is
872 successful, will be maintained.
874 Special care is necessary if E is a back edge in the CFG as we
875 may have already recorded equivalences for E->dest into our
876 various tables, including the result of the conditional at
877 the end of E->dest. Threading opportunities are severely
878 limited in that case to avoid short-circuiting the loop
881 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
882 to avoid allocating memory.
884 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
885 the simplified condition with left-hand sides of ASSERT_EXPRs they are
888 STACK is used to undo temporary equivalences created during the walk of
891 SIMPLIFY is a pass-specific function used to simplify statements.
893 Our caller is responsible for restoring the state of the expression
894 and const_and_copies stacks. */
897 thread_through_normal_block (edge e
,
899 bool handle_dominating_asserts
,
901 tree (*simplify
) (gimple
, gimple
),
902 vec
<jump_thread_edge
*> *path
,
904 bool *backedge_seen_p
)
906 /* If we have crossed a backedge, then we want to verify that the COND_EXPR,
907 SWITCH_EXPR or GOTO_EXPR at the end of e->dest is not affected
908 by any statements in e->dest. If it is affected, then it is not
909 safe to thread this edge. */
911 && cond_arg_set_in_bb (e
, e
->dest
))
914 /* PHIs create temporary equivalences. */
915 if (!record_temporary_equivalences_from_phis (e
, stack
))
918 /* Now walk each statement recording any context sensitive
919 temporary equivalences we can detect. */
921 = record_temporary_equivalences_from_stmts_at_dest (e
, stack
, simplify
);
925 /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm
927 if (gimple_code (stmt
) == GIMPLE_COND
928 || gimple_code (stmt
) == GIMPLE_GOTO
929 || gimple_code (stmt
) == GIMPLE_SWITCH
)
933 /* Extract and simplify the condition. */
934 cond
= simplify_control_stmt_condition (e
, stmt
, dummy_cond
, simplify
,
935 handle_dominating_asserts
);
937 if (cond
&& is_gimple_min_invariant (cond
))
939 edge taken_edge
= find_taken_edge (e
->dest
, cond
);
940 basic_block dest
= (taken_edge
? taken_edge
->dest
: NULL
);
942 /* DEST could be NULL for a computed jump to an absolute
946 || bitmap_bit_p (visited
, dest
->index
))
949 /* Only push the EDGE_START_JUMP_THREAD marker if this is
950 first edge on the path. */
951 if (path
->length () == 0)
954 = new jump_thread_edge (e
, EDGE_START_JUMP_THREAD
);
956 *backedge_seen_p
|= ((e
->flags
& EDGE_DFS_BACK
) != 0);
960 = new jump_thread_edge (taken_edge
, EDGE_COPY_SRC_BLOCK
);
962 *backedge_seen_p
|= ((taken_edge
->flags
& EDGE_DFS_BACK
) != 0);
964 /* See if we can thread through DEST as well, this helps capture
965 secondary effects of threading without having to re-run DOM or
967 if (!*backedge_seen_p
968 || ! cond_arg_set_in_bb (taken_edge
, e
->dest
))
970 /* We don't want to thread back to a block we have already
971 visited. This may be overly conservative. */
972 bitmap_set_bit (visited
, dest
->index
);
973 bitmap_set_bit (visited
, e
->dest
->index
);
974 thread_around_empty_blocks (taken_edge
,
976 handle_dominating_asserts
,
988 /* We are exiting E->src, see if E->dest ends with a conditional
989 jump which has a known value when reached via E.
991 Special care is necessary if E is a back edge in the CFG as we
992 may have already recorded equivalences for E->dest into our
993 various tables, including the result of the conditional at
994 the end of E->dest. Threading opportunities are severely
995 limited in that case to avoid short-circuiting the loop
998 Note it is quite common for the first block inside a loop to
999 end with a conditional which is either always true or always
1000 false when reached via the loop backedge. Thus we do not want
1001 to blindly disable threading across a loop backedge.
1003 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
1004 to avoid allocating memory.
1006 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
1007 the simplified condition with left-hand sides of ASSERT_EXPRs they are
1010 STACK is used to undo temporary equivalences created during the walk of
1013 SIMPLIFY is a pass-specific function used to simplify statements. */
1016 thread_across_edge (gimple dummy_cond
,
1018 bool handle_dominating_asserts
,
1020 tree (*simplify
) (gimple
, gimple
))
1022 bitmap visited
= BITMAP_ALLOC (NULL
);
1027 vec
<jump_thread_edge
*> *path
= new vec
<jump_thread_edge
*> ();
1028 bitmap_clear (visited
);
1029 bitmap_set_bit (visited
, e
->src
->index
);
1030 bitmap_set_bit (visited
, e
->dest
->index
);
1031 backedge_seen
= ((e
->flags
& EDGE_DFS_BACK
) != 0);
1032 if (thread_through_normal_block (e
, dummy_cond
, handle_dominating_asserts
,
1033 stack
, simplify
, path
, visited
,
1036 propagate_threaded_block_debug_into (path
->last ()->e
->dest
,
1038 remove_temporary_equivalences (stack
);
1039 BITMAP_FREE (visited
);
1040 register_jump_thread (path
);
1045 /* There should be no edges on the path, so no need to walk through
1046 the vector entries. */
1047 gcc_assert (path
->length () == 0);
1051 /* We were unable to determine what out edge from E->dest is taken. However,
1052 we might still be able to thread through successors of E->dest. This
1053 often occurs when E->dest is a joiner block which then fans back out
1054 based on redundant tests.
1056 If so, we'll copy E->dest and redirect the appropriate predecessor to
1057 the copy. Within the copy of E->dest, we'll thread one or more edges
1058 to points deeper in the CFG.
1060 This is a stopgap until we have a more structured approach to path
1067 /* If E->dest has abnormal outgoing edges, then there's no guarantee
1068 we can safely redirect any of the edges. Just punt those cases. */
1069 FOR_EACH_EDGE (taken_edge
, ei
, e
->dest
->succs
)
1070 if (taken_edge
->flags
& EDGE_ABNORMAL
)
1072 remove_temporary_equivalences (stack
);
1073 BITMAP_FREE (visited
);
1077 /* Look at each successor of E->dest to see if we can thread through it. */
1078 FOR_EACH_EDGE (taken_edge
, ei
, e
->dest
->succs
)
1080 /* Push a fresh marker so we can unwind the equivalences created
1081 for each of E->dest's successors. */
1082 stack
->safe_push (NULL_TREE
);
1084 /* Avoid threading to any block we have already visited. */
1085 bitmap_clear (visited
);
1086 bitmap_set_bit (visited
, e
->src
->index
);
1087 bitmap_set_bit (visited
, e
->dest
->index
);
1088 bitmap_set_bit (visited
, taken_edge
->dest
->index
);
1089 vec
<jump_thread_edge
*> *path
= new vec
<jump_thread_edge
*> ();
1091 /* Record whether or not we were able to thread through a successor
1093 jump_thread_edge
*x
= new jump_thread_edge (e
, EDGE_START_JUMP_THREAD
);
1094 path
->safe_push (x
);
1096 x
= new jump_thread_edge (taken_edge
, EDGE_COPY_SRC_JOINER_BLOCK
);
1097 path
->safe_push (x
);
1099 backedge_seen
= ((e
->flags
& EDGE_DFS_BACK
) != 0);
1100 backedge_seen
|= ((taken_edge
->flags
& EDGE_DFS_BACK
) != 0);
1102 || ! cond_arg_set_in_bb (path
->last ()->e
, e
->dest
))
1103 found
= thread_around_empty_blocks (taken_edge
,
1105 handle_dominating_asserts
,
1113 || ! cond_arg_set_in_bb (path
->last ()->e
, e
->dest
)))
1114 found
= thread_through_normal_block (path
->last ()->e
, dummy_cond
,
1115 handle_dominating_asserts
,
1116 stack
, simplify
, path
, visited
,
1119 /* If we were able to thread through a successor of E->dest, then
1120 record the jump threading opportunity. */
1123 propagate_threaded_block_debug_into (path
->last ()->e
->dest
,
1125 register_jump_thread (path
);
1129 delete_jump_thread_path (path
);
1132 /* And unwind the equivalence table. */
1133 remove_temporary_equivalences (stack
);
1135 BITMAP_FREE (visited
);
1138 remove_temporary_equivalences (stack
);