2 Copyright (C) 2005-2014 Free Software Foundation, Inc.
3 Contributed by Jeff Law <law@redhat.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
28 #include "basic-block.h"
33 #include "pointer-set.h"
34 #include "tree-ssa-alias.h"
35 #include "internal-fn.h"
36 #include "gimple-expr.h"
39 #include "gimple-iterator.h"
40 #include "gimple-ssa.h"
42 #include "tree-phinodes.h"
43 #include "ssa-iterators.h"
44 #include "stringpool.h"
45 #include "tree-ssanames.h"
46 #include "tree-ssa-propagate.h"
47 #include "tree-ssa-threadupdate.h"
48 #include "langhooks.h"
50 #include "tree-ssa-threadedge.h"
53 /* To avoid code explosion due to jump threading, we limit the
54 number of statements we are going to copy. This variable
55 holds the number of statements currently seen that we'll have
56 to copy as part of the jump threading process. */
57 static int stmt_count
;
59 /* Array to record value-handles per SSA_NAME. */
60 vec
<tree
> ssa_name_values
;
62 /* Set the value for the SSA name NAME to VALUE. */
65 set_ssa_name_value (tree name
, tree value
)
67 if (SSA_NAME_VERSION (name
) >= ssa_name_values
.length ())
68 ssa_name_values
.safe_grow_cleared (SSA_NAME_VERSION (name
) + 1);
69 if (value
&& TREE_OVERFLOW_P (value
))
70 value
= drop_tree_overflow (value
);
71 ssa_name_values
[SSA_NAME_VERSION (name
)] = value
;
74 /* Initialize the per SSA_NAME value-handles array. Returns it. */
76 threadedge_initialize_values (void)
78 gcc_assert (!ssa_name_values
.exists ());
79 ssa_name_values
.create (num_ssa_names
);
82 /* Free the per SSA_NAME value-handle array. */
84 threadedge_finalize_values (void)
86 ssa_name_values
.release ();
89 /* Return TRUE if we may be able to thread an incoming edge into
90 BB to an outgoing edge from BB. Return FALSE otherwise. */
93 potentially_threadable_block (basic_block bb
)
95 gimple_stmt_iterator gsi
;
97 /* If BB has a single successor or a single predecessor, then
98 there is no threading opportunity. */
99 if (single_succ_p (bb
) || single_pred_p (bb
))
102 /* If BB does not end with a conditional, switch or computed goto,
103 then there is no threading opportunity. */
104 gsi
= gsi_last_bb (bb
);
107 || (gimple_code (gsi_stmt (gsi
)) != GIMPLE_COND
108 && gimple_code (gsi_stmt (gsi
)) != GIMPLE_GOTO
109 && gimple_code (gsi_stmt (gsi
)) != GIMPLE_SWITCH
))
115 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
116 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
117 BB. If no such ASSERT_EXPR is found, return OP. */
120 lhs_of_dominating_assert (tree op
, basic_block bb
, gimple stmt
)
122 imm_use_iterator imm_iter
;
126 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, op
)
128 use_stmt
= USE_STMT (use_p
);
130 && gimple_assign_single_p (use_stmt
)
131 && TREE_CODE (gimple_assign_rhs1 (use_stmt
)) == ASSERT_EXPR
132 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt
), 0) == op
133 && dominated_by_p (CDI_DOMINATORS
, bb
, gimple_bb (use_stmt
)))
135 return gimple_assign_lhs (use_stmt
);
141 /* We record temporary equivalences created by PHI nodes or
142 statements within the target block. Doing so allows us to
143 identify more jump threading opportunities, even in blocks
146 We keep track of those temporary equivalences in a stack
147 structure so that we can unwind them when we're done processing
148 a particular edge. This routine handles unwinding the data
152 remove_temporary_equivalences (vec
<tree
> *stack
)
154 while (stack
->length () > 0)
156 tree prev_value
, dest
;
158 dest
= stack
->pop ();
160 /* A NULL value indicates we should stop unwinding, otherwise
161 pop off the next entry as they're recorded in pairs. */
165 prev_value
= stack
->pop ();
166 set_ssa_name_value (dest
, prev_value
);
170 /* Record a temporary equivalence, saving enough information so that
171 we can restore the state of recorded equivalences when we're
172 done processing the current edge. */
175 record_temporary_equivalence (tree x
, tree y
, vec
<tree
> *stack
)
177 tree prev_x
= SSA_NAME_VALUE (x
);
179 /* Y may be NULL if we are invalidating entries in the table. */
180 if (y
&& TREE_CODE (y
) == SSA_NAME
)
182 tree tmp
= SSA_NAME_VALUE (y
);
186 set_ssa_name_value (x
, y
);
188 stack
->quick_push (prev_x
);
189 stack
->quick_push (x
);
192 /* Record temporary equivalences created by PHIs at the target of the
193 edge E. Record unwind information for the equivalences onto STACK.
195 If a PHI which prevents threading is encountered, then return FALSE
196 indicating we should not thread this edge, else return TRUE.
198 If SRC_MAP/DST_MAP exist, then mark the source and destination SSA_NAMEs
199 of any equivalences recorded. We use this to make invalidation after
200 traversing back edges less painful. */
203 record_temporary_equivalences_from_phis (edge e
, vec
<tree
> *stack
)
205 gimple_stmt_iterator gsi
;
207 /* Each PHI creates a temporary equivalence, record them.
208 These are context sensitive equivalences and will be removed
210 for (gsi
= gsi_start_phis (e
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
212 gimple phi
= gsi_stmt (gsi
);
213 tree src
= PHI_ARG_DEF_FROM_EDGE (phi
, e
);
214 tree dst
= gimple_phi_result (phi
);
216 /* If the desired argument is not the same as this PHI's result
217 and it is set by a PHI in E->dest, then we can not thread
220 && TREE_CODE (src
) == SSA_NAME
221 && gimple_code (SSA_NAME_DEF_STMT (src
)) == GIMPLE_PHI
222 && gimple_bb (SSA_NAME_DEF_STMT (src
)) == e
->dest
)
225 /* We consider any non-virtual PHI as a statement since it
226 count result in a constant assignment or copy operation. */
227 if (!virtual_operand_p (dst
))
230 record_temporary_equivalence (dst
, src
, stack
);
235 /* Fold the RHS of an assignment statement and return it as a tree.
236 May return NULL_TREE if no simplification is possible. */
239 fold_assignment_stmt (gimple stmt
)
241 enum tree_code subcode
= gimple_assign_rhs_code (stmt
);
243 switch (get_gimple_rhs_class (subcode
))
245 case GIMPLE_SINGLE_RHS
:
246 return fold (gimple_assign_rhs1 (stmt
));
248 case GIMPLE_UNARY_RHS
:
250 tree lhs
= gimple_assign_lhs (stmt
);
251 tree op0
= gimple_assign_rhs1 (stmt
);
252 return fold_unary (subcode
, TREE_TYPE (lhs
), op0
);
255 case GIMPLE_BINARY_RHS
:
257 tree lhs
= gimple_assign_lhs (stmt
);
258 tree op0
= gimple_assign_rhs1 (stmt
);
259 tree op1
= gimple_assign_rhs2 (stmt
);
260 return fold_binary (subcode
, TREE_TYPE (lhs
), op0
, op1
);
263 case GIMPLE_TERNARY_RHS
:
265 tree lhs
= gimple_assign_lhs (stmt
);
266 tree op0
= gimple_assign_rhs1 (stmt
);
267 tree op1
= gimple_assign_rhs2 (stmt
);
268 tree op2
= gimple_assign_rhs3 (stmt
);
270 /* Sadly, we have to handle conditional assignments specially
271 here, because fold expects all the operands of an expression
272 to be folded before the expression itself is folded, but we
273 can't just substitute the folded condition here. */
274 if (gimple_assign_rhs_code (stmt
) == COND_EXPR
)
277 return fold_ternary (subcode
, TREE_TYPE (lhs
), op0
, op1
, op2
);
285 /* A new value has been assigned to LHS. If necessary, invalidate any
286 equivalences that are no longer valid. */
288 invalidate_equivalences (tree lhs
, vec
<tree
> *stack
)
291 for (unsigned int i
= 1; i
< num_ssa_names
; i
++)
292 if (ssa_name (i
) && SSA_NAME_VALUE (ssa_name (i
)) == lhs
)
293 record_temporary_equivalence (ssa_name (i
), NULL_TREE
, stack
);
295 if (SSA_NAME_VALUE (lhs
))
296 record_temporary_equivalence (lhs
, NULL_TREE
, stack
);
299 /* Try to simplify each statement in E->dest, ultimately leading to
300 a simplification of the COND_EXPR at the end of E->dest.
302 Record unwind information for temporary equivalences onto STACK.
304 Use SIMPLIFY (a pointer to a callback function) to further simplify
305 statements using pass specific information.
307 We might consider marking just those statements which ultimately
308 feed the COND_EXPR. It's not clear if the overhead of bookkeeping
309 would be recovered by trying to simplify fewer statements.
311 If we are able to simplify a statement into the form
312 SSA_NAME = (SSA_NAME | gimple invariant), then we can record
313 a context sensitive equivalence which may help us simplify
314 later statements in E->dest. */
317 record_temporary_equivalences_from_stmts_at_dest (edge e
,
319 tree (*simplify
) (gimple
,
324 gimple_stmt_iterator gsi
;
327 max_stmt_count
= PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS
);
329 /* Walk through each statement in the block recording equivalences
330 we discover. Note any equivalences we discover are context
331 sensitive (ie, are dependent on traversing E) and must be unwound
332 when we're finished processing E. */
333 for (gsi
= gsi_start_bb (e
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
335 tree cached_lhs
= NULL
;
337 stmt
= gsi_stmt (gsi
);
339 /* Ignore empty statements and labels. */
340 if (gimple_code (stmt
) == GIMPLE_NOP
341 || gimple_code (stmt
) == GIMPLE_LABEL
342 || is_gimple_debug (stmt
))
345 /* If the statement has volatile operands, then we assume we
346 can not thread through this block. This is overly
347 conservative in some ways. */
348 if (gimple_code (stmt
) == GIMPLE_ASM
&& gimple_asm_volatile_p (stmt
))
351 /* If duplicating this block is going to cause too much code
352 expansion, then do not thread through this block. */
354 if (stmt_count
> max_stmt_count
)
357 /* If this is not a statement that sets an SSA_NAME to a new
358 value, then do not try to simplify this statement as it will
359 not simplify in any way that is helpful for jump threading. */
360 if ((gimple_code (stmt
) != GIMPLE_ASSIGN
361 || TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
362 && (gimple_code (stmt
) != GIMPLE_CALL
363 || gimple_call_lhs (stmt
) == NULL_TREE
364 || TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
))
366 /* STMT might still have DEFS and we need to invalidate any known
367 equivalences for them.
369 Consider if STMT is a GIMPLE_ASM with one or more outputs that
370 feeds a conditional inside a loop. We might derive an equivalence
371 due to the conditional. */
376 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, iter
, SSA_OP_DEF
)
377 invalidate_equivalences (op
, stack
);
382 /* The result of __builtin_object_size depends on all the arguments
383 of a phi node. Temporarily using only one edge produces invalid
392 r = PHI <&w[2].a[1](2), &a.a[6](3)>
393 __builtin_object_size (r, 0)
395 The result of __builtin_object_size is defined to be the maximum of
396 remaining bytes. If we use only one edge on the phi, the result will
397 change to be the remaining bytes for the corresponding phi argument.
399 Similarly for __builtin_constant_p:
402 __builtin_constant_p (r)
404 Both PHI arguments are constant, but x ? 1 : 2 is still not
407 if (is_gimple_call (stmt
))
409 tree fndecl
= gimple_call_fndecl (stmt
);
411 && (DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_OBJECT_SIZE
412 || DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_CONSTANT_P
))
416 tree lhs
= gimple_get_lhs (stmt
);
417 invalidate_equivalences (lhs
, stack
);
423 /* At this point we have a statement which assigns an RHS to an
424 SSA_VAR on the LHS. We want to try and simplify this statement
425 to expose more context sensitive equivalences which in turn may
426 allow us to simplify the condition at the end of the loop.
428 Handle simple copy operations as well as implied copies from
430 if (gimple_assign_single_p (stmt
)
431 && TREE_CODE (gimple_assign_rhs1 (stmt
)) == SSA_NAME
)
432 cached_lhs
= gimple_assign_rhs1 (stmt
);
433 else if (gimple_assign_single_p (stmt
)
434 && TREE_CODE (gimple_assign_rhs1 (stmt
)) == ASSERT_EXPR
)
435 cached_lhs
= TREE_OPERAND (gimple_assign_rhs1 (stmt
), 0);
438 /* A statement that is not a trivial copy or ASSERT_EXPR.
439 We're going to temporarily copy propagate the operands
440 and see if that allows us to simplify this statement. */
444 unsigned int num
, i
= 0;
446 num
= NUM_SSA_OPERANDS (stmt
, (SSA_OP_USE
| SSA_OP_VUSE
));
447 copy
= XCNEWVEC (tree
, num
);
449 /* Make a copy of the uses & vuses into USES_COPY, then cprop into
451 FOR_EACH_SSA_USE_OPERAND (use_p
, stmt
, iter
, SSA_OP_USE
| SSA_OP_VUSE
)
454 tree use
= USE_FROM_PTR (use_p
);
457 if (TREE_CODE (use
) == SSA_NAME
)
458 tmp
= SSA_NAME_VALUE (use
);
460 SET_USE (use_p
, tmp
);
463 /* Try to fold/lookup the new expression. Inserting the
464 expression into the hash table is unlikely to help. */
465 if (is_gimple_call (stmt
))
466 cached_lhs
= fold_call_stmt (stmt
, false);
468 cached_lhs
= fold_assignment_stmt (stmt
);
471 || (TREE_CODE (cached_lhs
) != SSA_NAME
472 && !is_gimple_min_invariant (cached_lhs
)))
473 cached_lhs
= (*simplify
) (stmt
, stmt
);
475 /* Restore the statement's original uses/defs. */
477 FOR_EACH_SSA_USE_OPERAND (use_p
, stmt
, iter
, SSA_OP_USE
| SSA_OP_VUSE
)
478 SET_USE (use_p
, copy
[i
++]);
483 /* Record the context sensitive equivalence if we were able
484 to simplify this statement.
486 If we have traversed a backedge at some point during threading,
487 then always enter something here. Either a real equivalence,
488 or a NULL_TREE equivalence which is effectively invalidation of
489 prior equivalences. */
491 && (TREE_CODE (cached_lhs
) == SSA_NAME
492 || is_gimple_min_invariant (cached_lhs
)))
493 record_temporary_equivalence (gimple_get_lhs (stmt
), cached_lhs
, stack
);
494 else if (backedge_seen
)
495 invalidate_equivalences (gimple_get_lhs (stmt
), stack
);
500 /* Once we have passed a backedge in the CFG when threading, we do not want to
501 utilize edge equivalences for simplification purpose. They are no longer
502 necessarily valid. We use this callback rather than the ones provided by
503 DOM/VRP to achieve that effect. */
505 dummy_simplify (gimple stmt1 ATTRIBUTE_UNUSED
, gimple stmt2 ATTRIBUTE_UNUSED
)
510 /* Simplify the control statement at the end of the block E->dest.
512 To avoid allocating memory unnecessarily, a scratch GIMPLE_COND
513 is available to use/clobber in DUMMY_COND.
515 Use SIMPLIFY (a pointer to a callback function) to further simplify
516 a condition using pass specific information.
518 Return the simplified condition or NULL if simplification could
522 simplify_control_stmt_condition (edge e
,
525 tree (*simplify
) (gimple
, gimple
),
526 bool handle_dominating_asserts
)
528 tree cond
, cached_lhs
;
529 enum gimple_code code
= gimple_code (stmt
);
531 /* For comparisons, we have to update both operands, then try
532 to simplify the comparison. */
533 if (code
== GIMPLE_COND
)
536 enum tree_code cond_code
;
538 op0
= gimple_cond_lhs (stmt
);
539 op1
= gimple_cond_rhs (stmt
);
540 cond_code
= gimple_cond_code (stmt
);
542 /* Get the current value of both operands. */
543 if (TREE_CODE (op0
) == SSA_NAME
)
545 tree tmp
= SSA_NAME_VALUE (op0
);
550 if (TREE_CODE (op1
) == SSA_NAME
)
552 tree tmp
= SSA_NAME_VALUE (op1
);
557 if (handle_dominating_asserts
)
559 /* Now see if the operand was consumed by an ASSERT_EXPR
560 which dominates E->src. If so, we want to replace the
561 operand with the LHS of the ASSERT_EXPR. */
562 if (TREE_CODE (op0
) == SSA_NAME
)
563 op0
= lhs_of_dominating_assert (op0
, e
->src
, stmt
);
565 if (TREE_CODE (op1
) == SSA_NAME
)
566 op1
= lhs_of_dominating_assert (op1
, e
->src
, stmt
);
569 /* We may need to canonicalize the comparison. For
570 example, op0 might be a constant while op1 is an
571 SSA_NAME. Failure to canonicalize will cause us to
572 miss threading opportunities. */
573 if (tree_swap_operands_p (op0
, op1
, false))
576 cond_code
= swap_tree_comparison (cond_code
);
582 /* Stuff the operator and operands into our dummy conditional
584 gimple_cond_set_code (dummy_cond
, cond_code
);
585 gimple_cond_set_lhs (dummy_cond
, op0
);
586 gimple_cond_set_rhs (dummy_cond
, op1
);
588 /* We absolutely do not care about any type conversions
589 we only care about a zero/nonzero value. */
590 fold_defer_overflow_warnings ();
592 cached_lhs
= fold_binary (cond_code
, boolean_type_node
, op0
, op1
);
594 while (CONVERT_EXPR_P (cached_lhs
))
595 cached_lhs
= TREE_OPERAND (cached_lhs
, 0);
597 fold_undefer_overflow_warnings ((cached_lhs
598 && is_gimple_min_invariant (cached_lhs
)),
599 stmt
, WARN_STRICT_OVERFLOW_CONDITIONAL
);
601 /* If we have not simplified the condition down to an invariant,
602 then use the pass specific callback to simplify the condition. */
604 || !is_gimple_min_invariant (cached_lhs
))
605 cached_lhs
= (*simplify
) (dummy_cond
, stmt
);
610 if (code
== GIMPLE_SWITCH
)
611 cond
= gimple_switch_index (stmt
);
612 else if (code
== GIMPLE_GOTO
)
613 cond
= gimple_goto_dest (stmt
);
617 /* We can have conditionals which just test the state of a variable
618 rather than use a relational operator. These are simpler to handle. */
619 if (TREE_CODE (cond
) == SSA_NAME
)
623 /* Get the variable's current value from the equivalence chains.
625 It is possible to get loops in the SSA_NAME_VALUE chains
626 (consider threading the backedge of a loop where we have
627 a loop invariant SSA_NAME used in the condition. */
629 && TREE_CODE (cached_lhs
) == SSA_NAME
630 && SSA_NAME_VALUE (cached_lhs
))
631 cached_lhs
= SSA_NAME_VALUE (cached_lhs
);
633 /* If we're dominated by a suitable ASSERT_EXPR, then
634 update CACHED_LHS appropriately. */
635 if (handle_dominating_asserts
&& TREE_CODE (cached_lhs
) == SSA_NAME
)
636 cached_lhs
= lhs_of_dominating_assert (cached_lhs
, e
->src
, stmt
);
638 /* If we haven't simplified to an invariant yet, then use the
639 pass specific callback to try and simplify it further. */
640 if (cached_lhs
&& ! is_gimple_min_invariant (cached_lhs
))
641 cached_lhs
= (*simplify
) (stmt
, stmt
);
649 /* Copy debug stmts from DEST's chain of single predecessors up to
650 SRC, so that we don't lose the bindings as PHI nodes are introduced
651 when DEST gains new predecessors. */
653 propagate_threaded_block_debug_into (basic_block dest
, basic_block src
)
655 if (!MAY_HAVE_DEBUG_STMTS
)
658 if (!single_pred_p (dest
))
661 gcc_checking_assert (dest
!= src
);
663 gimple_stmt_iterator gsi
= gsi_after_labels (dest
);
665 const int alloc_count
= 16; // ?? Should this be a PARAM?
667 /* Estimate the number of debug vars overridden in the beginning of
668 DEST, to tell how many we're going to need to begin with. */
669 for (gimple_stmt_iterator si
= gsi
;
670 i
* 4 <= alloc_count
* 3 && !gsi_end_p (si
); gsi_next (&si
))
672 gimple stmt
= gsi_stmt (si
);
673 if (!is_gimple_debug (stmt
))
678 auto_vec
<tree
, alloc_count
> fewvars
;
679 pointer_set_t
*vars
= NULL
;
681 /* If we're already starting with 3/4 of alloc_count, go for a
682 pointer_set, otherwise start with an unordered stack-allocated
684 if (i
* 4 > alloc_count
* 3)
685 vars
= pointer_set_create ();
687 /* Now go through the initial debug stmts in DEST again, this time
688 actually inserting in VARS or FEWVARS. Don't bother checking for
689 duplicates in FEWVARS. */
690 for (gimple_stmt_iterator si
= gsi
; !gsi_end_p (si
); gsi_next (&si
))
692 gimple stmt
= gsi_stmt (si
);
693 if (!is_gimple_debug (stmt
))
698 if (gimple_debug_bind_p (stmt
))
699 var
= gimple_debug_bind_get_var (stmt
);
700 else if (gimple_debug_source_bind_p (stmt
))
701 var
= gimple_debug_source_bind_get_var (stmt
);
706 pointer_set_insert (vars
, var
);
708 fewvars
.quick_push (var
);
711 basic_block bb
= dest
;
715 bb
= single_pred (bb
);
716 for (gimple_stmt_iterator si
= gsi_last_bb (bb
);
717 !gsi_end_p (si
); gsi_prev (&si
))
719 gimple stmt
= gsi_stmt (si
);
720 if (!is_gimple_debug (stmt
))
725 if (gimple_debug_bind_p (stmt
))
726 var
= gimple_debug_bind_get_var (stmt
);
727 else if (gimple_debug_source_bind_p (stmt
))
728 var
= gimple_debug_source_bind_get_var (stmt
);
732 /* Discard debug bind overlaps. ??? Unlike stmts from src,
733 copied into a new block that will precede BB, debug bind
734 stmts in bypassed BBs may actually be discarded if
735 they're overwritten by subsequent debug bind stmts, which
736 might be a problem once we introduce stmt frontier notes
737 or somesuch. Adding `&& bb == src' to the condition
738 below will preserve all potentially relevant debug
740 if (vars
&& pointer_set_insert (vars
, var
))
744 int i
= fewvars
.length ();
746 if (fewvars
[i
] == var
)
751 if (fewvars
.length () < (unsigned) alloc_count
)
752 fewvars
.quick_push (var
);
755 vars
= pointer_set_create ();
756 for (i
= 0; i
< alloc_count
; i
++)
757 pointer_set_insert (vars
, fewvars
[i
]);
759 pointer_set_insert (vars
, var
);
763 stmt
= gimple_copy (stmt
);
764 /* ??? Should we drop the location of the copy to denote
765 they're artificial bindings? */
766 gsi_insert_before (&gsi
, stmt
, GSI_NEW_STMT
);
769 while (bb
!= src
&& single_pred_p (bb
));
772 pointer_set_destroy (vars
);
773 else if (fewvars
.exists ())
777 /* See if TAKEN_EDGE->dest is a threadable block with no side effecs (ie, it
778 need not be duplicated as part of the CFG/SSA updating process).
780 If it is threadable, add it to PATH and VISITED and recurse, ultimately
781 returning TRUE from the toplevel call. Otherwise do nothing and
784 DUMMY_COND, HANDLE_DOMINATING_ASSERTS and SIMPLIFY are used to
785 try and simplify the condition at the end of TAKEN_EDGE->dest. */
787 thread_around_empty_blocks (edge taken_edge
,
789 bool handle_dominating_asserts
,
790 tree (*simplify
) (gimple
, gimple
),
792 vec
<jump_thread_edge
*> *path
,
793 bool *backedge_seen_p
)
795 basic_block bb
= taken_edge
->dest
;
796 gimple_stmt_iterator gsi
;
800 /* The key property of these blocks is that they need not be duplicated
801 when threading. Thus they can not have visible side effects such
803 if (!gsi_end_p (gsi_start_phis (bb
)))
806 /* Skip over DEBUG statements at the start of the block. */
807 gsi
= gsi_start_nondebug_bb (bb
);
809 /* If the block has no statements, but does have a single successor, then
810 it's just a forwarding block and we can thread through it trivially.
812 However, note that just threading through empty blocks with single
813 successors is not inherently profitable. For the jump thread to
814 be profitable, we must avoid a runtime conditional.
816 By taking the return value from the recursive call, we get the
817 desired effect of returning TRUE when we found a profitable jump
818 threading opportunity and FALSE otherwise.
820 This is particularly important when this routine is called after
821 processing a joiner block. Returning TRUE too aggressively in
822 that case results in pointless duplication of the joiner block. */
825 if (single_succ_p (bb
))
827 taken_edge
= single_succ_edge (bb
);
828 if (!bitmap_bit_p (visited
, taken_edge
->dest
->index
))
831 = new jump_thread_edge (taken_edge
, EDGE_NO_COPY_SRC_BLOCK
);
833 bitmap_set_bit (visited
, taken_edge
->dest
->index
);
834 *backedge_seen_p
|= ((taken_edge
->flags
& EDGE_DFS_BACK
) != 0);
835 if (*backedge_seen_p
)
836 simplify
= dummy_simplify
;
837 return thread_around_empty_blocks (taken_edge
,
839 handle_dominating_asserts
,
847 /* We have a block with no statements, but multiple successors? */
851 /* The only real statements this block can have are a control
852 flow altering statement. Anything else stops the thread. */
853 stmt
= gsi_stmt (gsi
);
854 if (gimple_code (stmt
) != GIMPLE_COND
855 && gimple_code (stmt
) != GIMPLE_GOTO
856 && gimple_code (stmt
) != GIMPLE_SWITCH
)
859 /* If we have traversed a backedge, then we do not want to look
860 at certain expressions in the table that can not be relied upon.
861 Luckily the only code that looked at those expressions is the
862 SIMPLIFY callback, which we replace if we can no longer use it. */
863 if (*backedge_seen_p
)
864 simplify
= dummy_simplify
;
866 /* Extract and simplify the condition. */
867 cond
= simplify_control_stmt_condition (taken_edge
, stmt
, dummy_cond
,
868 simplify
, handle_dominating_asserts
);
870 /* If the condition can be statically computed and we have not already
871 visited the destination edge, then add the taken edge to our thread
873 if (cond
&& is_gimple_min_invariant (cond
))
875 taken_edge
= find_taken_edge (bb
, cond
);
877 if (bitmap_bit_p (visited
, taken_edge
->dest
->index
))
879 bitmap_set_bit (visited
, taken_edge
->dest
->index
);
882 = new jump_thread_edge (taken_edge
, EDGE_NO_COPY_SRC_BLOCK
);
884 *backedge_seen_p
|= ((taken_edge
->flags
& EDGE_DFS_BACK
) != 0);
885 if (*backedge_seen_p
)
886 simplify
= dummy_simplify
;
888 thread_around_empty_blocks (taken_edge
,
890 handle_dominating_asserts
,
901 /* We are exiting E->src, see if E->dest ends with a conditional
902 jump which has a known value when reached via E.
904 E->dest can have arbitrary side effects which, if threading is
905 successful, will be maintained.
907 Special care is necessary if E is a back edge in the CFG as we
908 may have already recorded equivalences for E->dest into our
909 various tables, including the result of the conditional at
910 the end of E->dest. Threading opportunities are severely
911 limited in that case to avoid short-circuiting the loop
914 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
915 to avoid allocating memory.
917 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
918 the simplified condition with left-hand sides of ASSERT_EXPRs they are
921 STACK is used to undo temporary equivalences created during the walk of
924 SIMPLIFY is a pass-specific function used to simplify statements.
926 Our caller is responsible for restoring the state of the expression
927 and const_and_copies stacks.
929 Positive return value is success. Zero return value is failure, but
930 the block can still be duplicated as a joiner in a jump thread path,
931 negative indicates the block should not be duplicated and thus is not
932 suitable for a joiner in a jump threading path. */
935 thread_through_normal_block (edge e
,
937 bool handle_dominating_asserts
,
939 tree (*simplify
) (gimple
, gimple
),
940 vec
<jump_thread_edge
*> *path
,
942 bool *backedge_seen_p
)
944 /* If we have traversed a backedge, then we do not want to look
945 at certain expressions in the table that can not be relied upon.
946 Luckily the only code that looked at those expressions is the
947 SIMPLIFY callback, which we replace if we can no longer use it. */
948 if (*backedge_seen_p
)
949 simplify
= dummy_simplify
;
951 /* PHIs create temporary equivalences.
952 Note that if we found a PHI that made the block non-threadable, then
953 we need to bubble that up to our caller in the same manner we do
954 when we prematurely stop processing statements below. */
955 if (!record_temporary_equivalences_from_phis (e
, stack
))
958 /* Now walk each statement recording any context sensitive
959 temporary equivalences we can detect. */
961 = record_temporary_equivalences_from_stmts_at_dest (e
, stack
, simplify
,
964 /* If we didn't look at all the statements, the most likely reason is
965 there were too many and thus duplicating this block is not profitable.
967 Also note if we do not look at all the statements, then we may not
968 have invalidated equivalences that are no longer valid if we threaded
969 around a loop. Thus we must signal to our caller that this block
970 is not suitable for use as a joiner in a threading path. */
974 /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm
976 if (gimple_code (stmt
) == GIMPLE_COND
977 || gimple_code (stmt
) == GIMPLE_GOTO
978 || gimple_code (stmt
) == GIMPLE_SWITCH
)
982 /* Extract and simplify the condition. */
983 cond
= simplify_control_stmt_condition (e
, stmt
, dummy_cond
, simplify
,
984 handle_dominating_asserts
);
986 if (cond
&& is_gimple_min_invariant (cond
))
988 edge taken_edge
= find_taken_edge (e
->dest
, cond
);
989 basic_block dest
= (taken_edge
? taken_edge
->dest
: NULL
);
991 /* DEST could be NULL for a computed jump to an absolute
995 || bitmap_bit_p (visited
, dest
->index
))
998 /* Only push the EDGE_START_JUMP_THREAD marker if this is
999 first edge on the path. */
1000 if (path
->length () == 0)
1003 = new jump_thread_edge (e
, EDGE_START_JUMP_THREAD
);
1004 path
->safe_push (x
);
1005 *backedge_seen_p
|= ((e
->flags
& EDGE_DFS_BACK
) != 0);
1009 = new jump_thread_edge (taken_edge
, EDGE_COPY_SRC_BLOCK
);
1010 path
->safe_push (x
);
1011 *backedge_seen_p
|= ((taken_edge
->flags
& EDGE_DFS_BACK
) != 0);
1012 if (*backedge_seen_p
)
1013 simplify
= dummy_simplify
;
1015 /* See if we can thread through DEST as well, this helps capture
1016 secondary effects of threading without having to re-run DOM or
1019 We don't want to thread back to a block we have already
1020 visited. This may be overly conservative. */
1021 bitmap_set_bit (visited
, dest
->index
);
1022 bitmap_set_bit (visited
, e
->dest
->index
);
1023 thread_around_empty_blocks (taken_edge
,
1025 handle_dominating_asserts
,
1036 /* We are exiting E->src, see if E->dest ends with a conditional
1037 jump which has a known value when reached via E.
1039 Special care is necessary if E is a back edge in the CFG as we
1040 may have already recorded equivalences for E->dest into our
1041 various tables, including the result of the conditional at
1042 the end of E->dest. Threading opportunities are severely
1043 limited in that case to avoid short-circuiting the loop
1046 Note it is quite common for the first block inside a loop to
1047 end with a conditional which is either always true or always
1048 false when reached via the loop backedge. Thus we do not want
1049 to blindly disable threading across a loop backedge.
1051 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
1052 to avoid allocating memory.
1054 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
1055 the simplified condition with left-hand sides of ASSERT_EXPRs they are
1058 STACK is used to undo temporary equivalences created during the walk of
1061 SIMPLIFY is a pass-specific function used to simplify statements. */
1064 thread_across_edge (gimple dummy_cond
,
1066 bool handle_dominating_asserts
,
1068 tree (*simplify
) (gimple
, gimple
))
1070 bitmap visited
= BITMAP_ALLOC (NULL
);
1075 vec
<jump_thread_edge
*> *path
= new vec
<jump_thread_edge
*> ();
1076 bitmap_clear (visited
);
1077 bitmap_set_bit (visited
, e
->src
->index
);
1078 bitmap_set_bit (visited
, e
->dest
->index
);
1079 backedge_seen
= ((e
->flags
& EDGE_DFS_BACK
) != 0);
1081 simplify
= dummy_simplify
;
1083 int threaded
= thread_through_normal_block (e
, dummy_cond
,
1084 handle_dominating_asserts
,
1085 stack
, simplify
, path
,
1086 visited
, &backedge_seen
);
1089 propagate_threaded_block_debug_into (path
->last ()->e
->dest
,
1091 remove_temporary_equivalences (stack
);
1092 BITMAP_FREE (visited
);
1093 register_jump_thread (path
);
1098 /* Negative and zero return values indicate no threading was possible,
1099 thus there should be no edges on the thread path and no need to walk
1100 through the vector entries. */
1101 gcc_assert (path
->length () == 0);
1104 /* A negative status indicates the target block was deemed too big to
1105 duplicate. Just quit now rather than trying to use the block as
1106 a joiner in a jump threading path.
1108 This prevents unnecessary code growth, but more importantly if we
1109 do not look at all the statements in the block, then we may have
1110 missed some invalidations if we had traversed a backedge! */
1113 BITMAP_FREE (visited
);
1114 remove_temporary_equivalences (stack
);
1119 /* We were unable to determine what out edge from E->dest is taken. However,
1120 we might still be able to thread through successors of E->dest. This
1121 often occurs when E->dest is a joiner block which then fans back out
1122 based on redundant tests.
1124 If so, we'll copy E->dest and redirect the appropriate predecessor to
1125 the copy. Within the copy of E->dest, we'll thread one or more edges
1126 to points deeper in the CFG.
1128 This is a stopgap until we have a more structured approach to path
1135 /* If E->dest has abnormal outgoing edges, then there's no guarantee
1136 we can safely redirect any of the edges. Just punt those cases. */
1137 FOR_EACH_EDGE (taken_edge
, ei
, e
->dest
->succs
)
1138 if (taken_edge
->flags
& EDGE_ABNORMAL
)
1140 remove_temporary_equivalences (stack
);
1141 BITMAP_FREE (visited
);
1145 /* Look at each successor of E->dest to see if we can thread through it. */
1146 FOR_EACH_EDGE (taken_edge
, ei
, e
->dest
->succs
)
1148 /* Push a fresh marker so we can unwind the equivalences created
1149 for each of E->dest's successors. */
1150 stack
->safe_push (NULL_TREE
);
1152 /* Avoid threading to any block we have already visited. */
1153 bitmap_clear (visited
);
1154 bitmap_set_bit (visited
, e
->src
->index
);
1155 bitmap_set_bit (visited
, e
->dest
->index
);
1156 bitmap_set_bit (visited
, taken_edge
->dest
->index
);
1157 vec
<jump_thread_edge
*> *path
= new vec
<jump_thread_edge
*> ();
1159 /* Record whether or not we were able to thread through a successor
1161 jump_thread_edge
*x
= new jump_thread_edge (e
, EDGE_START_JUMP_THREAD
);
1162 path
->safe_push (x
);
1164 x
= new jump_thread_edge (taken_edge
, EDGE_COPY_SRC_JOINER_BLOCK
);
1165 path
->safe_push (x
);
1167 backedge_seen
= ((e
->flags
& EDGE_DFS_BACK
) != 0);
1168 backedge_seen
|= ((taken_edge
->flags
& EDGE_DFS_BACK
) != 0);
1170 simplify
= dummy_simplify
;
1171 found
= thread_around_empty_blocks (taken_edge
,
1173 handle_dominating_asserts
,
1180 simplify
= dummy_simplify
;
1183 found
= thread_through_normal_block (path
->last ()->e
, dummy_cond
,
1184 handle_dominating_asserts
,
1185 stack
, simplify
, path
, visited
,
1186 &backedge_seen
) > 0;
1188 /* If we were able to thread through a successor of E->dest, then
1189 record the jump threading opportunity. */
1192 propagate_threaded_block_debug_into (path
->last ()->e
->dest
,
1194 register_jump_thread (path
);
1198 delete_jump_thread_path (path
);
1201 /* And unwind the equivalence table. */
1202 remove_temporary_equivalences (stack
);
1204 BITMAP_FREE (visited
);
1207 remove_temporary_equivalences (stack
);