2 Copyright (C) 2005-2015 Free Software Foundation, Inc.
3 Contributed by Jeff Law <law@redhat.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
29 #include "fold-const.h"
31 #include "gimple-iterator.h"
33 #include "tree-ssa-threadupdate.h"
35 #include "tree-ssa-scopedtables.h"
36 #include "tree-ssa-threadedge.h"
37 #include "tree-ssa-threadbackward.h"
38 #include "tree-ssa-dom.h"
39 #include "gimple-fold.h"
41 /* To avoid code explosion due to jump threading, we limit the
42 number of statements we are going to copy. This variable
43 holds the number of statements currently seen that we'll have
44 to copy as part of the jump threading process. */
45 static int stmt_count
;
47 /* Array to record value-handles per SSA_NAME. */
48 vec
<tree
> ssa_name_values
;
50 typedef tree (pfn_simplify
) (gimple
*, gimple
*, class avail_exprs_stack
*);
52 /* Set the value for the SSA name NAME to VALUE. */
55 set_ssa_name_value (tree name
, tree value
)
57 if (SSA_NAME_VERSION (name
) >= ssa_name_values
.length ())
58 ssa_name_values
.safe_grow_cleared (SSA_NAME_VERSION (name
) + 1);
59 if (value
&& TREE_OVERFLOW_P (value
))
60 value
= drop_tree_overflow (value
);
61 ssa_name_values
[SSA_NAME_VERSION (name
)] = value
;
64 /* Initialize the per SSA_NAME value-handles array. Returns it. */
66 threadedge_initialize_values (void)
68 gcc_assert (!ssa_name_values
.exists ());
69 ssa_name_values
.create (num_ssa_names
);
72 /* Free the per SSA_NAME value-handle array. */
74 threadedge_finalize_values (void)
76 ssa_name_values
.release ();
79 /* Return TRUE if we may be able to thread an incoming edge into
80 BB to an outgoing edge from BB. Return FALSE otherwise. */
83 potentially_threadable_block (basic_block bb
)
85 gimple_stmt_iterator gsi
;
87 /* Special case. We can get blocks that are forwarders, but are
88 not optimized away because they forward from outside a loop
89 to the loop header. We want to thread through them as we can
90 sometimes thread to the loop exit, which is obviously profitable.
91 the interesting case here is when the block has PHIs. */
92 if (gsi_end_p (gsi_start_nondebug_bb (bb
))
93 && !gsi_end_p (gsi_start_phis (bb
)))
96 /* If BB has a single successor or a single predecessor, then
97 there is no threading opportunity. */
98 if (single_succ_p (bb
) || single_pred_p (bb
))
101 /* If BB does not end with a conditional, switch or computed goto,
102 then there is no threading opportunity. */
103 gsi
= gsi_last_bb (bb
);
106 || (gimple_code (gsi_stmt (gsi
)) != GIMPLE_COND
107 && gimple_code (gsi_stmt (gsi
)) != GIMPLE_GOTO
108 && gimple_code (gsi_stmt (gsi
)) != GIMPLE_SWITCH
))
114 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
115 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
116 BB. If no such ASSERT_EXPR is found, return OP. */
119 lhs_of_dominating_assert (tree op
, basic_block bb
, gimple
*stmt
)
121 imm_use_iterator imm_iter
;
125 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, op
)
127 use_stmt
= USE_STMT (use_p
);
129 && gimple_assign_single_p (use_stmt
)
130 && TREE_CODE (gimple_assign_rhs1 (use_stmt
)) == ASSERT_EXPR
131 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt
), 0) == op
132 && dominated_by_p (CDI_DOMINATORS
, bb
, gimple_bb (use_stmt
)))
134 return gimple_assign_lhs (use_stmt
);
140 /* Record temporary equivalences created by PHIs at the target of the
141 edge E. Record unwind information for the equivalences onto STACK.
143 If a PHI which prevents threading is encountered, then return FALSE
144 indicating we should not thread this edge, else return TRUE.
146 If SRC_MAP/DST_MAP exist, then mark the source and destination SSA_NAMEs
147 of any equivalences recorded. We use this to make invalidation after
148 traversing back edges less painful. */
151 record_temporary_equivalences_from_phis (edge e
, const_and_copies
*const_and_copies
)
155 /* Each PHI creates a temporary equivalence, record them.
156 These are context sensitive equivalences and will be removed
158 for (gsi
= gsi_start_phis (e
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
160 gphi
*phi
= gsi
.phi ();
161 tree src
= PHI_ARG_DEF_FROM_EDGE (phi
, e
);
162 tree dst
= gimple_phi_result (phi
);
164 /* If the desired argument is not the same as this PHI's result
165 and it is set by a PHI in E->dest, then we can not thread
168 && TREE_CODE (src
) == SSA_NAME
169 && gimple_code (SSA_NAME_DEF_STMT (src
)) == GIMPLE_PHI
170 && gimple_bb (SSA_NAME_DEF_STMT (src
)) == e
->dest
)
173 /* We consider any non-virtual PHI as a statement since it
174 count result in a constant assignment or copy operation. */
175 if (!virtual_operand_p (dst
))
178 const_and_copies
->record_const_or_copy (dst
, src
);
183 /* Valueize hook for gimple_fold_stmt_to_constant_1. */
186 threadedge_valueize (tree t
)
188 if (TREE_CODE (t
) == SSA_NAME
)
190 tree tem
= SSA_NAME_VALUE (t
);
197 /* Try to simplify each statement in E->dest, ultimately leading to
198 a simplification of the COND_EXPR at the end of E->dest.
200 Record unwind information for temporary equivalences onto STACK.
202 Use SIMPLIFY (a pointer to a callback function) to further simplify
203 statements using pass specific information.
205 We might consider marking just those statements which ultimately
206 feed the COND_EXPR. It's not clear if the overhead of bookkeeping
207 would be recovered by trying to simplify fewer statements.
209 If we are able to simplify a statement into the form
210 SSA_NAME = (SSA_NAME | gimple invariant), then we can record
211 a context sensitive equivalence which may help us simplify
212 later statements in E->dest. */
215 record_temporary_equivalences_from_stmts_at_dest (edge e
,
216 const_and_copies
*const_and_copies
,
217 avail_exprs_stack
*avail_exprs_stack
,
218 pfn_simplify simplify
,
222 gimple_stmt_iterator gsi
;
225 max_stmt_count
= PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS
);
227 /* Walk through each statement in the block recording equivalences
228 we discover. Note any equivalences we discover are context
229 sensitive (ie, are dependent on traversing E) and must be unwound
230 when we're finished processing E. */
231 for (gsi
= gsi_start_bb (e
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
233 tree cached_lhs
= NULL
;
235 stmt
= gsi_stmt (gsi
);
237 /* Ignore empty statements and labels. */
238 if (gimple_code (stmt
) == GIMPLE_NOP
239 || gimple_code (stmt
) == GIMPLE_LABEL
240 || is_gimple_debug (stmt
))
243 /* If the statement has volatile operands, then we assume we
244 can not thread through this block. This is overly
245 conservative in some ways. */
246 if (gimple_code (stmt
) == GIMPLE_ASM
247 && gimple_asm_volatile_p (as_a
<gasm
*> (stmt
)))
250 /* If the statement is a unique builtin, we can not thread
252 if (gimple_code (stmt
) == GIMPLE_CALL
253 && gimple_call_internal_p (stmt
)
254 && gimple_call_internal_unique_p (stmt
))
257 /* If duplicating this block is going to cause too much code
258 expansion, then do not thread through this block. */
260 if (stmt_count
> max_stmt_count
)
263 /* If this is not a statement that sets an SSA_NAME to a new
264 value, then do not try to simplify this statement as it will
265 not simplify in any way that is helpful for jump threading. */
266 if ((gimple_code (stmt
) != GIMPLE_ASSIGN
267 || TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
268 && (gimple_code (stmt
) != GIMPLE_CALL
269 || gimple_call_lhs (stmt
) == NULL_TREE
270 || TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
))
272 /* STMT might still have DEFS and we need to invalidate any known
273 equivalences for them.
275 Consider if STMT is a GIMPLE_ASM with one or more outputs that
276 feeds a conditional inside a loop. We might derive an equivalence
277 due to the conditional. */
282 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, iter
, SSA_OP_DEF
)
283 const_and_copies
->invalidate (op
);
288 /* The result of __builtin_object_size depends on all the arguments
289 of a phi node. Temporarily using only one edge produces invalid
298 r = PHI <&w[2].a[1](2), &a.a[6](3)>
299 __builtin_object_size (r, 0)
301 The result of __builtin_object_size is defined to be the maximum of
302 remaining bytes. If we use only one edge on the phi, the result will
303 change to be the remaining bytes for the corresponding phi argument.
305 Similarly for __builtin_constant_p:
308 __builtin_constant_p (r)
310 Both PHI arguments are constant, but x ? 1 : 2 is still not
313 if (is_gimple_call (stmt
))
315 tree fndecl
= gimple_call_fndecl (stmt
);
317 && (DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_OBJECT_SIZE
318 || DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_CONSTANT_P
))
322 tree lhs
= gimple_get_lhs (stmt
);
323 const_and_copies
->invalidate (lhs
);
329 /* At this point we have a statement which assigns an RHS to an
330 SSA_VAR on the LHS. We want to try and simplify this statement
331 to expose more context sensitive equivalences which in turn may
332 allow us to simplify the condition at the end of the loop.
334 Handle simple copy operations as well as implied copies from
336 if (gimple_assign_single_p (stmt
)
337 && TREE_CODE (gimple_assign_rhs1 (stmt
)) == SSA_NAME
)
338 cached_lhs
= gimple_assign_rhs1 (stmt
);
339 else if (gimple_assign_single_p (stmt
)
340 && TREE_CODE (gimple_assign_rhs1 (stmt
)) == ASSERT_EXPR
)
341 cached_lhs
= TREE_OPERAND (gimple_assign_rhs1 (stmt
), 0);
344 /* A statement that is not a trivial copy or ASSERT_EXPR.
345 Try to fold the new expression. Inserting the
346 expression into the hash table is unlikely to help. */
347 /* ??? The DOM callback below can be changed to setting
348 the mprts_hook around the call to thread_across_edge,
349 avoiding the use substitution. The VRP hook should be
350 changed to properly valueize operands itself using
351 SSA_NAME_VALUE in addition to its own lattice. */
352 cached_lhs
= gimple_fold_stmt_to_constant_1 (stmt
,
353 threadedge_valueize
);
355 || (TREE_CODE (cached_lhs
) != SSA_NAME
356 && !is_gimple_min_invariant (cached_lhs
)))
358 /* We're going to temporarily copy propagate the operands
359 and see if that allows us to simplify this statement. */
363 unsigned int num
, i
= 0;
365 num
= NUM_SSA_OPERANDS (stmt
, SSA_OP_ALL_USES
);
366 copy
= XALLOCAVEC (tree
, num
);
368 /* Make a copy of the uses & vuses into USES_COPY, then cprop into
370 FOR_EACH_SSA_USE_OPERAND (use_p
, stmt
, iter
, SSA_OP_ALL_USES
)
373 tree use
= USE_FROM_PTR (use_p
);
376 if (TREE_CODE (use
) == SSA_NAME
)
377 tmp
= SSA_NAME_VALUE (use
);
379 SET_USE (use_p
, tmp
);
382 cached_lhs
= (*simplify
) (stmt
, stmt
, avail_exprs_stack
);
384 /* Restore the statement's original uses/defs. */
386 FOR_EACH_SSA_USE_OPERAND (use_p
, stmt
, iter
, SSA_OP_ALL_USES
)
387 SET_USE (use_p
, copy
[i
++]);
391 /* Record the context sensitive equivalence if we were able
392 to simplify this statement.
394 If we have traversed a backedge at some point during threading,
395 then always enter something here. Either a real equivalence,
396 or a NULL_TREE equivalence which is effectively invalidation of
397 prior equivalences. */
399 && (TREE_CODE (cached_lhs
) == SSA_NAME
400 || is_gimple_min_invariant (cached_lhs
)))
401 const_and_copies
->record_const_or_copy (gimple_get_lhs (stmt
),
403 else if (backedge_seen
)
404 const_and_copies
->invalidate (gimple_get_lhs (stmt
));
409 /* Once we have passed a backedge in the CFG when threading, we do not want to
410 utilize edge equivalences for simplification purpose. They are no longer
411 necessarily valid. We use this callback rather than the ones provided by
412 DOM/VRP to achieve that effect. */
414 dummy_simplify (gimple
*stmt1 ATTRIBUTE_UNUSED
, gimple
*stmt2 ATTRIBUTE_UNUSED
,
415 class avail_exprs_stack
*avail_exprs_stack ATTRIBUTE_UNUSED
)
420 /* Simplify the control statement at the end of the block E->dest.
422 To avoid allocating memory unnecessarily, a scratch GIMPLE_COND
423 is available to use/clobber in DUMMY_COND.
425 Use SIMPLIFY (a pointer to a callback function) to further simplify
426 a condition using pass specific information.
428 Return the simplified condition or NULL if simplification could
431 The available expression table is referenced via AVAIL_EXPRS_STACK. */
434 simplify_control_stmt_condition (edge e
,
436 class avail_exprs_stack
*avail_exprs_stack
,
438 pfn_simplify simplify
,
439 bool handle_dominating_asserts
)
441 tree cond
, cached_lhs
;
442 enum gimple_code code
= gimple_code (stmt
);
444 /* For comparisons, we have to update both operands, then try
445 to simplify the comparison. */
446 if (code
== GIMPLE_COND
)
449 enum tree_code cond_code
;
451 op0
= gimple_cond_lhs (stmt
);
452 op1
= gimple_cond_rhs (stmt
);
453 cond_code
= gimple_cond_code (stmt
);
455 /* Get the current value of both operands. */
456 if (TREE_CODE (op0
) == SSA_NAME
)
458 for (int i
= 0; i
< 2; i
++)
460 if (TREE_CODE (op0
) == SSA_NAME
461 && SSA_NAME_VALUE (op0
))
462 op0
= SSA_NAME_VALUE (op0
);
468 if (TREE_CODE (op1
) == SSA_NAME
)
470 for (int i
= 0; i
< 2; i
++)
472 if (TREE_CODE (op1
) == SSA_NAME
473 && SSA_NAME_VALUE (op1
))
474 op1
= SSA_NAME_VALUE (op1
);
480 if (handle_dominating_asserts
)
482 /* Now see if the operand was consumed by an ASSERT_EXPR
483 which dominates E->src. If so, we want to replace the
484 operand with the LHS of the ASSERT_EXPR. */
485 if (TREE_CODE (op0
) == SSA_NAME
)
486 op0
= lhs_of_dominating_assert (op0
, e
->src
, stmt
);
488 if (TREE_CODE (op1
) == SSA_NAME
)
489 op1
= lhs_of_dominating_assert (op1
, e
->src
, stmt
);
492 /* We may need to canonicalize the comparison. For
493 example, op0 might be a constant while op1 is an
494 SSA_NAME. Failure to canonicalize will cause us to
495 miss threading opportunities. */
496 if (tree_swap_operands_p (op0
, op1
, false))
498 cond_code
= swap_tree_comparison (cond_code
);
499 std::swap (op0
, op1
);
502 /* Stuff the operator and operands into our dummy conditional
504 gimple_cond_set_code (dummy_cond
, cond_code
);
505 gimple_cond_set_lhs (dummy_cond
, op0
);
506 gimple_cond_set_rhs (dummy_cond
, op1
);
508 /* We absolutely do not care about any type conversions
509 we only care about a zero/nonzero value. */
510 fold_defer_overflow_warnings ();
512 cached_lhs
= fold_binary (cond_code
, boolean_type_node
, op0
, op1
);
514 while (CONVERT_EXPR_P (cached_lhs
))
515 cached_lhs
= TREE_OPERAND (cached_lhs
, 0);
517 fold_undefer_overflow_warnings ((cached_lhs
518 && is_gimple_min_invariant (cached_lhs
)),
519 stmt
, WARN_STRICT_OVERFLOW_CONDITIONAL
);
521 /* If we have not simplified the condition down to an invariant,
522 then use the pass specific callback to simplify the condition. */
524 || !is_gimple_min_invariant (cached_lhs
))
525 cached_lhs
= (*simplify
) (dummy_cond
, stmt
, avail_exprs_stack
);
527 /* If we were testing an integer/pointer against a constant, then
528 we can use the FSM code to trace the value of the SSA_NAME. If
529 a value is found, then the condition will collapse to a constant.
531 Return the SSA_NAME we want to trace back rather than the full
532 expression and give the FSM threader a chance to find its value. */
533 if (cached_lhs
== NULL
)
535 /* Recover the original operands. They may have been simplified
536 using context sensitive equivalences. Those context sensitive
537 equivalences may not be valid on paths found by the FSM optimizer. */
538 tree op0
= gimple_cond_lhs (stmt
);
539 tree op1
= gimple_cond_rhs (stmt
);
541 if ((INTEGRAL_TYPE_P (TREE_TYPE (op0
))
542 || POINTER_TYPE_P (TREE_TYPE (op0
)))
543 && TREE_CODE (op0
) == SSA_NAME
544 && TREE_CODE (op1
) == INTEGER_CST
)
551 if (code
== GIMPLE_SWITCH
)
552 cond
= gimple_switch_index (as_a
<gswitch
*> (stmt
));
553 else if (code
== GIMPLE_GOTO
)
554 cond
= gimple_goto_dest (stmt
);
558 /* We can have conditionals which just test the state of a variable
559 rather than use a relational operator. These are simpler to handle. */
560 if (TREE_CODE (cond
) == SSA_NAME
)
562 tree original_lhs
= cond
;
565 /* Get the variable's current value from the equivalence chains.
567 It is possible to get loops in the SSA_NAME_VALUE chains
568 (consider threading the backedge of a loop where we have
569 a loop invariant SSA_NAME used in the condition). */
572 for (int i
= 0; i
< 2; i
++)
574 if (TREE_CODE (cached_lhs
) == SSA_NAME
575 && SSA_NAME_VALUE (cached_lhs
))
576 cached_lhs
= SSA_NAME_VALUE (cached_lhs
);
582 /* If we're dominated by a suitable ASSERT_EXPR, then
583 update CACHED_LHS appropriately. */
584 if (handle_dominating_asserts
&& TREE_CODE (cached_lhs
) == SSA_NAME
)
585 cached_lhs
= lhs_of_dominating_assert (cached_lhs
, e
->src
, stmt
);
587 /* If we haven't simplified to an invariant yet, then use the
588 pass specific callback to try and simplify it further. */
589 if (cached_lhs
&& ! is_gimple_min_invariant (cached_lhs
))
590 cached_lhs
= (*simplify
) (stmt
, stmt
, avail_exprs_stack
);
592 /* We couldn't find an invariant. But, callers of this
593 function may be able to do something useful with the
594 unmodified destination. */
596 cached_lhs
= original_lhs
;
604 /* Copy debug stmts from DEST's chain of single predecessors up to
605 SRC, so that we don't lose the bindings as PHI nodes are introduced
606 when DEST gains new predecessors. */
608 propagate_threaded_block_debug_into (basic_block dest
, basic_block src
)
610 if (!MAY_HAVE_DEBUG_STMTS
)
613 if (!single_pred_p (dest
))
616 gcc_checking_assert (dest
!= src
);
618 gimple_stmt_iterator gsi
= gsi_after_labels (dest
);
620 const int alloc_count
= 16; // ?? Should this be a PARAM?
622 /* Estimate the number of debug vars overridden in the beginning of
623 DEST, to tell how many we're going to need to begin with. */
624 for (gimple_stmt_iterator si
= gsi
;
625 i
* 4 <= alloc_count
* 3 && !gsi_end_p (si
); gsi_next (&si
))
627 gimple
*stmt
= gsi_stmt (si
);
628 if (!is_gimple_debug (stmt
))
633 auto_vec
<tree
, alloc_count
> fewvars
;
634 hash_set
<tree
> *vars
= NULL
;
636 /* If we're already starting with 3/4 of alloc_count, go for a
637 hash_set, otherwise start with an unordered stack-allocated
639 if (i
* 4 > alloc_count
* 3)
640 vars
= new hash_set
<tree
>;
642 /* Now go through the initial debug stmts in DEST again, this time
643 actually inserting in VARS or FEWVARS. Don't bother checking for
644 duplicates in FEWVARS. */
645 for (gimple_stmt_iterator si
= gsi
; !gsi_end_p (si
); gsi_next (&si
))
647 gimple
*stmt
= gsi_stmt (si
);
648 if (!is_gimple_debug (stmt
))
653 if (gimple_debug_bind_p (stmt
))
654 var
= gimple_debug_bind_get_var (stmt
);
655 else if (gimple_debug_source_bind_p (stmt
))
656 var
= gimple_debug_source_bind_get_var (stmt
);
663 fewvars
.quick_push (var
);
666 basic_block bb
= dest
;
670 bb
= single_pred (bb
);
671 for (gimple_stmt_iterator si
= gsi_last_bb (bb
);
672 !gsi_end_p (si
); gsi_prev (&si
))
674 gimple
*stmt
= gsi_stmt (si
);
675 if (!is_gimple_debug (stmt
))
680 if (gimple_debug_bind_p (stmt
))
681 var
= gimple_debug_bind_get_var (stmt
);
682 else if (gimple_debug_source_bind_p (stmt
))
683 var
= gimple_debug_source_bind_get_var (stmt
);
687 /* Discard debug bind overlaps. ??? Unlike stmts from src,
688 copied into a new block that will precede BB, debug bind
689 stmts in bypassed BBs may actually be discarded if
690 they're overwritten by subsequent debug bind stmts, which
691 might be a problem once we introduce stmt frontier notes
692 or somesuch. Adding `&& bb == src' to the condition
693 below will preserve all potentially relevant debug
695 if (vars
&& vars
->add (var
))
699 int i
= fewvars
.length ();
701 if (fewvars
[i
] == var
)
706 if (fewvars
.length () < (unsigned) alloc_count
)
707 fewvars
.quick_push (var
);
710 vars
= new hash_set
<tree
>;
711 for (i
= 0; i
< alloc_count
; i
++)
712 vars
->add (fewvars
[i
]);
718 stmt
= gimple_copy (stmt
);
719 /* ??? Should we drop the location of the copy to denote
720 they're artificial bindings? */
721 gsi_insert_before (&gsi
, stmt
, GSI_NEW_STMT
);
724 while (bb
!= src
&& single_pred_p (bb
));
728 else if (fewvars
.exists ())
732 /* See if TAKEN_EDGE->dest is a threadable block with no side effecs (ie, it
733 need not be duplicated as part of the CFG/SSA updating process).
735 If it is threadable, add it to PATH and VISITED and recurse, ultimately
736 returning TRUE from the toplevel call. Otherwise do nothing and
739 DUMMY_COND, HANDLE_DOMINATING_ASSERTS and SIMPLIFY are used to
740 try and simplify the condition at the end of TAKEN_EDGE->dest.
742 The available expression table is referenced via AVAIL_EXPRS_STACK. */
745 thread_around_empty_blocks (edge taken_edge
,
747 class avail_exprs_stack
*avail_exprs_stack
,
748 bool handle_dominating_asserts
,
749 pfn_simplify simplify
,
751 vec
<jump_thread_edge
*> *path
,
752 bool *backedge_seen_p
)
754 basic_block bb
= taken_edge
->dest
;
755 gimple_stmt_iterator gsi
;
759 /* The key property of these blocks is that they need not be duplicated
760 when threading. Thus they can not have visible side effects such
762 if (!gsi_end_p (gsi_start_phis (bb
)))
765 /* Skip over DEBUG statements at the start of the block. */
766 gsi
= gsi_start_nondebug_bb (bb
);
768 /* If the block has no statements, but does have a single successor, then
769 it's just a forwarding block and we can thread through it trivially.
771 However, note that just threading through empty blocks with single
772 successors is not inherently profitable. For the jump thread to
773 be profitable, we must avoid a runtime conditional.
775 By taking the return value from the recursive call, we get the
776 desired effect of returning TRUE when we found a profitable jump
777 threading opportunity and FALSE otherwise.
779 This is particularly important when this routine is called after
780 processing a joiner block. Returning TRUE too aggressively in
781 that case results in pointless duplication of the joiner block. */
784 if (single_succ_p (bb
))
786 taken_edge
= single_succ_edge (bb
);
787 if (!bitmap_bit_p (visited
, taken_edge
->dest
->index
))
790 = new jump_thread_edge (taken_edge
, EDGE_NO_COPY_SRC_BLOCK
);
792 bitmap_set_bit (visited
, taken_edge
->dest
->index
);
793 *backedge_seen_p
|= ((taken_edge
->flags
& EDGE_DFS_BACK
) != 0);
794 if (*backedge_seen_p
)
795 simplify
= dummy_simplify
;
796 return thread_around_empty_blocks (taken_edge
,
799 handle_dominating_asserts
,
807 /* We have a block with no statements, but multiple successors? */
811 /* The only real statements this block can have are a control
812 flow altering statement. Anything else stops the thread. */
813 stmt
= gsi_stmt (gsi
);
814 if (gimple_code (stmt
) != GIMPLE_COND
815 && gimple_code (stmt
) != GIMPLE_GOTO
816 && gimple_code (stmt
) != GIMPLE_SWITCH
)
819 /* If we have traversed a backedge, then we do not want to look
820 at certain expressions in the table that can not be relied upon.
821 Luckily the only code that looked at those expressions is the
822 SIMPLIFY callback, which we replace if we can no longer use it. */
823 if (*backedge_seen_p
)
824 simplify
= dummy_simplify
;
826 /* Extract and simplify the condition. */
827 cond
= simplify_control_stmt_condition (taken_edge
, stmt
,
828 avail_exprs_stack
, dummy_cond
,
829 simplify
, handle_dominating_asserts
);
831 /* If the condition can be statically computed and we have not already
832 visited the destination edge, then add the taken edge to our thread
834 if (cond
&& is_gimple_min_invariant (cond
))
836 taken_edge
= find_taken_edge (bb
, cond
);
838 if (bitmap_bit_p (visited
, taken_edge
->dest
->index
))
840 bitmap_set_bit (visited
, taken_edge
->dest
->index
);
843 = new jump_thread_edge (taken_edge
, EDGE_NO_COPY_SRC_BLOCK
);
845 *backedge_seen_p
|= ((taken_edge
->flags
& EDGE_DFS_BACK
) != 0);
846 if (*backedge_seen_p
)
847 simplify
= dummy_simplify
;
849 thread_around_empty_blocks (taken_edge
,
852 handle_dominating_asserts
,
863 /* We are exiting E->src, see if E->dest ends with a conditional
864 jump which has a known value when reached via E.
866 E->dest can have arbitrary side effects which, if threading is
867 successful, will be maintained.
869 Special care is necessary if E is a back edge in the CFG as we
870 may have already recorded equivalences for E->dest into our
871 various tables, including the result of the conditional at
872 the end of E->dest. Threading opportunities are severely
873 limited in that case to avoid short-circuiting the loop
876 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
877 to avoid allocating memory.
879 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
880 the simplified condition with left-hand sides of ASSERT_EXPRs they are
883 STACK is used to undo temporary equivalences created during the walk of
886 SIMPLIFY is a pass-specific function used to simplify statements.
888 Our caller is responsible for restoring the state of the expression
889 and const_and_copies stacks.
891 Positive return value is success. Zero return value is failure, but
892 the block can still be duplicated as a joiner in a jump thread path,
893 negative indicates the block should not be duplicated and thus is not
894 suitable for a joiner in a jump threading path. */
897 thread_through_normal_block (edge e
,
899 bool handle_dominating_asserts
,
900 const_and_copies
*const_and_copies
,
901 avail_exprs_stack
*avail_exprs_stack
,
902 pfn_simplify simplify
,
903 vec
<jump_thread_edge
*> *path
,
905 bool *backedge_seen_p
)
907 /* If we have seen a backedge, then we rely solely on the FSM threader
908 to find jump threads. */
909 if (*backedge_seen_p
)
912 /* We want to record any equivalences created by traversing E. */
913 if (!handle_dominating_asserts
)
914 record_temporary_equivalences (e
, const_and_copies
, avail_exprs_stack
);
916 /* PHIs create temporary equivalences.
917 Note that if we found a PHI that made the block non-threadable, then
918 we need to bubble that up to our caller in the same manner we do
919 when we prematurely stop processing statements below. */
920 if (!record_temporary_equivalences_from_phis (e
, const_and_copies
))
923 /* Now walk each statement recording any context sensitive
924 temporary equivalences we can detect. */
926 = record_temporary_equivalences_from_stmts_at_dest (e
, const_and_copies
,
931 /* There's two reasons STMT might be null, and distinguishing
932 between them is important.
934 First the block may not have had any statements. For example, it
935 might have some PHIs and unconditionally transfer control elsewhere.
936 Such blocks are suitable for jump threading, particularly as a
939 The second reason would be if we did not process all the statements
940 in the block (because there were too many to make duplicating the
941 block profitable. If we did not look at all the statements, then
942 we may not have invalidated everything needing invalidation. Thus
943 we must signal to our caller that this block is not suitable for
944 use as a joiner in a threading path. */
947 /* First case. The statement simply doesn't have any instructions, but
949 if (gsi_end_p (gsi_start_nondebug_bb (e
->dest
))
950 && !gsi_end_p (gsi_start_phis (e
->dest
)))
957 /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm
959 if (gimple_code (stmt
) == GIMPLE_COND
960 || gimple_code (stmt
) == GIMPLE_GOTO
961 || gimple_code (stmt
) == GIMPLE_SWITCH
)
965 /* Extract and simplify the condition. */
966 cond
= simplify_control_stmt_condition (e
, stmt
, avail_exprs_stack
,
967 dummy_cond
, simplify
,
968 handle_dominating_asserts
);
973 if (is_gimple_min_invariant (cond
))
975 edge taken_edge
= find_taken_edge (e
->dest
, cond
);
976 basic_block dest
= (taken_edge
? taken_edge
->dest
: NULL
);
978 /* DEST could be NULL for a computed jump to an absolute
982 || bitmap_bit_p (visited
, dest
->index
))
985 /* Only push the EDGE_START_JUMP_THREAD marker if this is
986 first edge on the path. */
987 if (path
->length () == 0)
990 = new jump_thread_edge (e
, EDGE_START_JUMP_THREAD
);
992 *backedge_seen_p
|= ((e
->flags
& EDGE_DFS_BACK
) != 0);
996 = new jump_thread_edge (taken_edge
, EDGE_COPY_SRC_BLOCK
);
998 *backedge_seen_p
|= ((taken_edge
->flags
& EDGE_DFS_BACK
) != 0);
999 if (*backedge_seen_p
)
1000 simplify
= dummy_simplify
;
1002 /* See if we can thread through DEST as well, this helps capture
1003 secondary effects of threading without having to re-run DOM or
1006 We don't want to thread back to a block we have already
1007 visited. This may be overly conservative. */
1008 bitmap_set_bit (visited
, dest
->index
);
1009 bitmap_set_bit (visited
, e
->dest
->index
);
1010 thread_around_empty_blocks (taken_edge
,
1013 handle_dominating_asserts
,
1024 /* We are exiting E->src, see if E->dest ends with a conditional
1025 jump which has a known value when reached via E.
1027 Special care is necessary if E is a back edge in the CFG as we
1028 may have already recorded equivalences for E->dest into our
1029 various tables, including the result of the conditional at
1030 the end of E->dest. Threading opportunities are severely
1031 limited in that case to avoid short-circuiting the loop
1034 Note it is quite common for the first block inside a loop to
1035 end with a conditional which is either always true or always
1036 false when reached via the loop backedge. Thus we do not want
1037 to blindly disable threading across a loop backedge.
1039 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
1040 to avoid allocating memory.
1042 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
1043 the simplified condition with left-hand sides of ASSERT_EXPRs they are
1046 CONST_AND_COPIES is used to undo temporary equivalences created during the
1049 The available expression table is referenced vai AVAIL_EXPRS_STACK.
1051 SIMPLIFY is a pass-specific function used to simplify statements. */
1054 thread_across_edge (gcond
*dummy_cond
,
1056 bool handle_dominating_asserts
,
1057 class const_and_copies
*const_and_copies
,
1058 class avail_exprs_stack
*avail_exprs_stack
,
1059 tree (*simplify
) (gimple
*, gimple
*,
1060 class avail_exprs_stack
*))
1062 bitmap visited
= BITMAP_ALLOC (NULL
);
1067 vec
<jump_thread_edge
*> *path
= new vec
<jump_thread_edge
*> ();
1068 bitmap_clear (visited
);
1069 bitmap_set_bit (visited
, e
->src
->index
);
1070 bitmap_set_bit (visited
, e
->dest
->index
);
1071 backedge_seen
= ((e
->flags
& EDGE_DFS_BACK
) != 0);
1073 simplify
= dummy_simplify
;
1075 int threaded
= thread_through_normal_block (e
, dummy_cond
,
1076 handle_dominating_asserts
,
1080 visited
, &backedge_seen
);
1083 propagate_threaded_block_debug_into (path
->last ()->e
->dest
,
1085 const_and_copies
->pop_to_marker ();
1086 BITMAP_FREE (visited
);
1087 register_jump_thread (path
);
1092 /* Negative and zero return values indicate no threading was possible,
1093 thus there should be no edges on the thread path and no need to walk
1094 through the vector entries. */
1095 gcc_assert (path
->length () == 0);
1099 find_jump_threads_backwards (e
);
1101 /* A negative status indicates the target block was deemed too big to
1102 duplicate. Just quit now rather than trying to use the block as
1103 a joiner in a jump threading path.
1105 This prevents unnecessary code growth, but more importantly if we
1106 do not look at all the statements in the block, then we may have
1107 missed some invalidations if we had traversed a backedge! */
1110 BITMAP_FREE (visited
);
1111 const_and_copies
->pop_to_marker ();
1116 /* We were unable to determine what out edge from E->dest is taken. However,
1117 we might still be able to thread through successors of E->dest. This
1118 often occurs when E->dest is a joiner block which then fans back out
1119 based on redundant tests.
1121 If so, we'll copy E->dest and redirect the appropriate predecessor to
1122 the copy. Within the copy of E->dest, we'll thread one or more edges
1123 to points deeper in the CFG.
1125 This is a stopgap until we have a more structured approach to path
1132 /* If E->dest has abnormal outgoing edges, then there's no guarantee
1133 we can safely redirect any of the edges. Just punt those cases. */
1134 FOR_EACH_EDGE (taken_edge
, ei
, e
->dest
->succs
)
1135 if (taken_edge
->flags
& EDGE_ABNORMAL
)
1137 const_and_copies
->pop_to_marker ();
1138 BITMAP_FREE (visited
);
1142 /* Look at each successor of E->dest to see if we can thread through it. */
1143 FOR_EACH_EDGE (taken_edge
, ei
, e
->dest
->succs
)
1145 /* Push a fresh marker so we can unwind the equivalences created
1146 for each of E->dest's successors. */
1147 const_and_copies
->push_marker ();
1148 if (avail_exprs_stack
)
1149 avail_exprs_stack
->push_marker ();
1151 /* Avoid threading to any block we have already visited. */
1152 bitmap_clear (visited
);
1153 bitmap_set_bit (visited
, e
->src
->index
);
1154 bitmap_set_bit (visited
, e
->dest
->index
);
1155 bitmap_set_bit (visited
, taken_edge
->dest
->index
);
1156 vec
<jump_thread_edge
*> *path
= new vec
<jump_thread_edge
*> ();
1158 /* Record whether or not we were able to thread through a successor
1160 jump_thread_edge
*x
= new jump_thread_edge (e
, EDGE_START_JUMP_THREAD
);
1161 path
->safe_push (x
);
1163 x
= new jump_thread_edge (taken_edge
, EDGE_COPY_SRC_JOINER_BLOCK
);
1164 path
->safe_push (x
);
1166 backedge_seen
= ((e
->flags
& EDGE_DFS_BACK
) != 0);
1167 backedge_seen
|= ((taken_edge
->flags
& EDGE_DFS_BACK
) != 0);
1169 simplify
= dummy_simplify
;
1170 found
= thread_around_empty_blocks (taken_edge
,
1173 handle_dominating_asserts
,
1180 simplify
= dummy_simplify
;
1183 found
= thread_through_normal_block (path
->last ()->e
, dummy_cond
,
1184 handle_dominating_asserts
,
1188 visited
, &backedge_seen
) > 0;
1190 /* If we were able to thread through a successor of E->dest, then
1191 record the jump threading opportunity. */
1194 propagate_threaded_block_debug_into (path
->last ()->e
->dest
,
1196 register_jump_thread (path
);
1200 find_jump_threads_backwards (path
->last ()->e
);
1201 delete_jump_thread_path (path
);
1204 /* And unwind the equivalence table. */
1205 if (avail_exprs_stack
)
1206 avail_exprs_stack
->pop_to_marker ();
1207 const_and_copies
->pop_to_marker ();
1209 BITMAP_FREE (visited
);
1212 const_and_copies
->pop_to_marker ();