1 /* Thread edges through blocks and update the control flow and SSA graphs.
2 Copyright (C) 2004-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
28 #include "hard-reg-set.h"
31 #include "fold-const.h"
34 #include "internal-fn.h"
35 #include "gimple-iterator.h"
37 #include "tree-ssa-threadupdate.h"
42 #include "tree-pass.h"
44 /* Given a block B, update the CFG and SSA graph to reflect redirecting
45 one or more in-edges to B to instead reach the destination of an
46 out-edge from B while preserving any side effects in B.
48 i.e., given A->B and B->C, change A->B to be A->C yet still preserve the
49 side effects of executing B.
51 1. Make a copy of B (including its outgoing edges and statements). Call
52 the copy B'. Note B' has no incoming edges or PHIs at this time.
54 2. Remove the control statement at the end of B' and all outgoing edges
57 3. Add a new argument to each PHI in C with the same value as the existing
58 argument associated with edge B->C. Associate the new PHI arguments
61 4. For each PHI in B, find or create a PHI in B' with an identical
62 PHI_RESULT. Add an argument to the PHI in B' which has the same
63 value as the PHI in B associated with the edge A->B. Associate
64 the new argument in the PHI in B' with the edge A->B.
66 5. Change the edge A->B to A->B'.
68 5a. This automatically deletes any PHI arguments associated with the
71 5b. This automatically associates each new argument added in step 4
74 6. Repeat for other incoming edges into B.
76 7. Put the duplicated resources in B and all the B' blocks into SSA form.
78 Note that block duplication can be minimized by first collecting the
79 set of unique destination blocks that the incoming edges should
82 We reduce the number of edges and statements we create by not copying all
83 the outgoing edges and the control statement in step #1. We instead create
84 a template block without the outgoing edges and duplicate the template.
86 Another case this code handles is threading through a "joiner" block. In
87 this case, we do not know the destination of the joiner block, but one
88 of the outgoing edges from the joiner block leads to a threadable path. This
89 case largely works as outlined above, except the duplicate of the joiner
90 block still contains a full set of outgoing edges and its control statement.
91 We just redirect one of its outgoing edges to our jump threading path. */
94 /* Steps #5 and #6 of the above algorithm are best implemented by walking
95 all the incoming edges which thread to the same destination edge at
96 the same time. That avoids lots of table lookups to get information
97 for the destination edge.
99 To realize that implementation we create a list of incoming edges
100 which thread to the same outgoing edge. Thus to implement steps
101 #5 and #6 we traverse our hash table of outgoing edge information.
102 For each entry we walk the list of incoming edges which thread to
103 the current outgoing edge. */
111 /* Main data structure recording information regarding B's duplicate
114 /* We need to efficiently record the unique thread destinations of this
115 block and specific information associated with those destinations. We
116 may have many incoming edges threaded to the same outgoing edge. This
117 can be naturally implemented with a hash table. */
119 struct redirection_data
: free_ptr_hash
<redirection_data
>
121 /* We support wiring up two block duplicates in a jump threading path.
123 One is a normal block copy where we remove the control statement
124 and wire up its single remaining outgoing edge to the thread path.
126 The other is a joiner block where we leave the control statement
127 in place, but wire one of the outgoing edges to a thread path.
129 In theory we could have multiple block duplicates in a jump
130 threading path, but I haven't tried that.
132 The duplicate blocks appear in this array in the same order in
133 which they appear in the jump thread path. */
134 basic_block dup_blocks
[2];
136 /* The jump threading path. */
137 vec
<jump_thread_edge
*> *path
;
139 /* A list of incoming edges which we want to thread to the
141 struct el
*incoming_edges
;
143 /* hash_table support. */
144 static inline hashval_t
hash (const redirection_data
*);
145 static inline int equal (const redirection_data
*, const redirection_data
*);
148 /* Dump a jump threading path, including annotations about each
152 dump_jump_thread_path (FILE *dump_file
, vec
<jump_thread_edge
*> path
,
156 " %s%s jump thread: (%d, %d) incoming edge; ",
157 (registering
? "Registering" : "Cancelling"),
158 (path
[0]->type
== EDGE_FSM_THREAD
? " FSM": ""),
159 path
[0]->e
->src
->index
, path
[0]->e
->dest
->index
);
161 for (unsigned int i
= 1; i
< path
.length (); i
++)
163 /* We can get paths with a NULL edge when the final destination
164 of a jump thread turns out to be a constant address. We dump
165 those paths when debugging, so we have to be prepared for that
167 if (path
[i
]->e
== NULL
)
170 if (path
[i
]->type
== EDGE_COPY_SRC_JOINER_BLOCK
)
171 fprintf (dump_file
, " (%d, %d) joiner; ",
172 path
[i
]->e
->src
->index
, path
[i
]->e
->dest
->index
);
173 if (path
[i
]->type
== EDGE_COPY_SRC_BLOCK
)
174 fprintf (dump_file
, " (%d, %d) normal;",
175 path
[i
]->e
->src
->index
, path
[i
]->e
->dest
->index
);
176 if (path
[i
]->type
== EDGE_NO_COPY_SRC_BLOCK
)
177 fprintf (dump_file
, " (%d, %d) nocopy;",
178 path
[i
]->e
->src
->index
, path
[i
]->e
->dest
->index
);
179 if (path
[0]->type
== EDGE_FSM_THREAD
)
180 fprintf (dump_file
, " (%d, %d) ",
181 path
[i
]->e
->src
->index
, path
[i
]->e
->dest
->index
);
183 fputc ('\n', dump_file
);
186 /* Simple hashing function. For any given incoming edge E, we're going
187 to be most concerned with the final destination of its jump thread
188 path. So hash on the block index of the final edge in the path. */
191 redirection_data::hash (const redirection_data
*p
)
193 vec
<jump_thread_edge
*> *path
= p
->path
;
194 return path
->last ()->e
->dest
->index
;
197 /* Given two hash table entries, return true if they have the same
198 jump threading path. */
200 redirection_data::equal (const redirection_data
*p1
, const redirection_data
*p2
)
202 vec
<jump_thread_edge
*> *path1
= p1
->path
;
203 vec
<jump_thread_edge
*> *path2
= p2
->path
;
205 if (path1
->length () != path2
->length ())
208 for (unsigned int i
= 1; i
< path1
->length (); i
++)
210 if ((*path1
)[i
]->type
!= (*path2
)[i
]->type
211 || (*path1
)[i
]->e
!= (*path2
)[i
]->e
)
218 /* Rather than search all the edges in jump thread paths each time
219 DOM is able to simply if control statement, we build a hash table
220 with the deleted edges. We only care about the address of the edge,
222 struct removed_edges
: nofree_ptr_hash
<edge_def
>
224 static hashval_t
hash (edge e
) { return htab_hash_pointer (e
); }
225 static bool equal (edge e1
, edge e2
) { return e1
== e2
; }
228 static hash_table
<removed_edges
> *removed_edges
;
230 /* Data structure of information to pass to hash table traversal routines. */
231 struct ssa_local_info_t
233 /* The current block we are working on. */
236 /* We only create a template block for the first duplicated block in a
237 jump threading path as we may need many duplicates of that block.
239 The second duplicate block in a path is specific to that path. Creating
240 and sharing a template for that block is considerably more difficult. */
241 basic_block template_block
;
243 /* TRUE if we thread one or more jumps, FALSE otherwise. */
246 /* Blocks duplicated for the thread. */
247 bitmap duplicate_blocks
;
250 /* Passes which use the jump threading code register jump threading
251 opportunities as they are discovered. We keep the registered
252 jump threading opportunities in this vector as edge pairs
253 (original_edge, target_edge). */
254 static vec
<vec
<jump_thread_edge
*> *> paths
;
256 /* When we start updating the CFG for threading, data necessary for jump
257 threading is attached to the AUX field for the incoming edge. Use these
258 macros to access the underlying structure attached to the AUX field. */
259 #define THREAD_PATH(E) ((vec<jump_thread_edge *> *)(E)->aux)
261 /* Jump threading statistics. */
263 struct thread_stats_d
265 unsigned long num_threaded_edges
;
268 struct thread_stats_d thread_stats
;
271 /* Remove the last statement in block BB if it is a control statement
272 Also remove all outgoing edges except the edge which reaches DEST_BB.
273 If DEST_BB is NULL, then remove all outgoing edges. */
276 remove_ctrl_stmt_and_useless_edges (basic_block bb
, basic_block dest_bb
)
278 gimple_stmt_iterator gsi
;
282 gsi
= gsi_last_bb (bb
);
284 /* If the duplicate ends with a control statement, then remove it.
286 Note that if we are duplicating the template block rather than the
287 original basic block, then the duplicate might not have any real
291 && (gimple_code (gsi_stmt (gsi
)) == GIMPLE_COND
292 || gimple_code (gsi_stmt (gsi
)) == GIMPLE_GOTO
293 || gimple_code (gsi_stmt (gsi
)) == GIMPLE_SWITCH
))
294 gsi_remove (&gsi
, true);
296 for (ei
= ei_start (bb
->succs
); (e
= ei_safe_edge (ei
)); )
298 if (e
->dest
!= dest_bb
)
305 /* Create a duplicate of BB. Record the duplicate block in an array
306 indexed by COUNT stored in RD. */
309 create_block_for_threading (basic_block bb
,
310 struct redirection_data
*rd
,
312 bitmap
*duplicate_blocks
)
317 /* We can use the generic block duplication code and simply remove
318 the stuff we do not need. */
319 rd
->dup_blocks
[count
] = duplicate_block (bb
, NULL
, NULL
);
321 FOR_EACH_EDGE (e
, ei
, rd
->dup_blocks
[count
]->succs
)
324 /* Zero out the profile, since the block is unreachable for now. */
325 rd
->dup_blocks
[count
]->frequency
= 0;
326 rd
->dup_blocks
[count
]->count
= 0;
327 if (duplicate_blocks
)
328 bitmap_set_bit (*duplicate_blocks
, rd
->dup_blocks
[count
]->index
);
331 /* Main data structure to hold information for duplicates of BB. */
333 static hash_table
<redirection_data
> *redirection_data
;
335 /* Given an outgoing edge E lookup and return its entry in our hash table.
337 If INSERT is true, then we insert the entry into the hash table if
338 it is not already present. INCOMING_EDGE is added to the list of incoming
339 edges associated with E in the hash table. */
341 static struct redirection_data
*
342 lookup_redirection_data (edge e
, enum insert_option insert
)
344 struct redirection_data
**slot
;
345 struct redirection_data
*elt
;
346 vec
<jump_thread_edge
*> *path
= THREAD_PATH (e
);
348 /* Build a hash table element so we can see if E is already
350 elt
= XNEW (struct redirection_data
);
352 elt
->dup_blocks
[0] = NULL
;
353 elt
->dup_blocks
[1] = NULL
;
354 elt
->incoming_edges
= NULL
;
356 slot
= redirection_data
->find_slot (elt
, insert
);
358 /* This will only happen if INSERT is false and the entry is not
359 in the hash table. */
366 /* This will only happen if E was not in the hash table and
371 elt
->incoming_edges
= XNEW (struct el
);
372 elt
->incoming_edges
->e
= e
;
373 elt
->incoming_edges
->next
= NULL
;
376 /* E was in the hash table. */
379 /* Free ELT as we do not need it anymore, we will extract the
380 relevant entry from the hash table itself. */
383 /* Get the entry stored in the hash table. */
386 /* If insertion was requested, then we need to add INCOMING_EDGE
387 to the list of incoming edges associated with E. */
390 struct el
*el
= XNEW (struct el
);
391 el
->next
= elt
->incoming_edges
;
393 elt
->incoming_edges
= el
;
400 /* Similar to copy_phi_args, except that the PHI arg exists, it just
401 does not have a value associated with it. */
404 copy_phi_arg_into_existing_phi (edge src_e
, edge tgt_e
)
406 int src_idx
= src_e
->dest_idx
;
407 int tgt_idx
= tgt_e
->dest_idx
;
409 /* Iterate over each PHI in e->dest. */
410 for (gphi_iterator gsi
= gsi_start_phis (src_e
->dest
),
411 gsi2
= gsi_start_phis (tgt_e
->dest
);
413 gsi_next (&gsi
), gsi_next (&gsi2
))
415 gphi
*src_phi
= gsi
.phi ();
416 gphi
*dest_phi
= gsi2
.phi ();
417 tree val
= gimple_phi_arg_def (src_phi
, src_idx
);
418 source_location locus
= gimple_phi_arg_location (src_phi
, src_idx
);
420 SET_PHI_ARG_DEF (dest_phi
, tgt_idx
, val
);
421 gimple_phi_arg_set_location (dest_phi
, tgt_idx
, locus
);
425 /* Given ssa_name DEF, backtrack jump threading PATH from node IDX
426 to see if it has constant value in a flow sensitive manner. Set
427 LOCUS to location of the constant phi arg and return the value.
428 Return DEF directly if either PATH or idx is ZERO. */
431 get_value_locus_in_path (tree def
, vec
<jump_thread_edge
*> *path
,
432 basic_block bb
, int idx
, source_location
*locus
)
438 if (path
== NULL
|| idx
== 0)
441 def_phi
= dyn_cast
<gphi
*> (SSA_NAME_DEF_STMT (def
));
445 def_bb
= gimple_bb (def_phi
);
446 /* Don't propagate loop invariants into deeper loops. */
447 if (!def_bb
|| bb_loop_depth (def_bb
) < bb_loop_depth (bb
))
450 /* Backtrack jump threading path from IDX to see if def has constant
452 for (int j
= idx
- 1; j
>= 0; j
--)
454 edge e
= (*path
)[j
]->e
;
455 if (e
->dest
== def_bb
)
457 arg
= gimple_phi_arg_def (def_phi
, e
->dest_idx
);
458 if (is_gimple_min_invariant (arg
))
460 *locus
= gimple_phi_arg_location (def_phi
, e
->dest_idx
);
470 /* For each PHI in BB, copy the argument associated with SRC_E to TGT_E.
471 Try to backtrack jump threading PATH from node IDX to see if the arg
472 has constant value, copy constant value instead of argument itself
476 copy_phi_args (basic_block bb
, edge src_e
, edge tgt_e
,
477 vec
<jump_thread_edge
*> *path
, int idx
)
480 int src_indx
= src_e
->dest_idx
;
482 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
484 gphi
*phi
= gsi
.phi ();
485 tree def
= gimple_phi_arg_def (phi
, src_indx
);
486 source_location locus
= gimple_phi_arg_location (phi
, src_indx
);
488 if (TREE_CODE (def
) == SSA_NAME
489 && !virtual_operand_p (gimple_phi_result (phi
)))
490 def
= get_value_locus_in_path (def
, path
, bb
, idx
, &locus
);
492 add_phi_arg (phi
, def
, tgt_e
, locus
);
496 /* We have recently made a copy of ORIG_BB, including its outgoing
497 edges. The copy is NEW_BB. Every PHI node in every direct successor of
498 ORIG_BB has a new argument associated with edge from NEW_BB to the
499 successor. Initialize the PHI argument so that it is equal to the PHI
500 argument associated with the edge from ORIG_BB to the successor.
501 PATH and IDX are used to check if the new PHI argument has constant
502 value in a flow sensitive manner. */
505 update_destination_phis (basic_block orig_bb
, basic_block new_bb
,
506 vec
<jump_thread_edge
*> *path
, int idx
)
511 FOR_EACH_EDGE (e
, ei
, orig_bb
->succs
)
513 edge e2
= find_edge (new_bb
, e
->dest
);
514 copy_phi_args (e
->dest
, e
, e2
, path
, idx
);
518 /* Given a duplicate block and its single destination (both stored
519 in RD). Create an edge between the duplicate and its single
522 Add an additional argument to any PHI nodes at the single
523 destination. IDX is the start node in jump threading path
524 we start to check to see if the new PHI argument has constant
525 value along the jump threading path. */
528 create_edge_and_update_destination_phis (struct redirection_data
*rd
,
529 basic_block bb
, int idx
)
531 edge e
= make_edge (bb
, rd
->path
->last ()->e
->dest
, EDGE_FALLTHRU
);
533 rescan_loop_exit (e
, true, false);
534 e
->probability
= REG_BR_PROB_BASE
;
535 e
->count
= bb
->count
;
537 /* We used to copy the thread path here. That was added in 2007
538 and dutifully updated through the representation changes in 2013.
540 In 2013 we added code to thread from an interior node through
541 the backedge to another interior node. That runs after the code
542 to thread through loop headers from outside the loop.
544 The latter may delete edges in the CFG, including those
545 which appeared in the jump threading path we copied here. Thus
546 we'd end up using a dangling pointer.
548 After reviewing the 2007/2011 code, I can't see how anything
549 depended on copying the AUX field and clearly copying the jump
550 threading path is problematical due to embedded edge pointers.
551 It has been removed. */
554 /* If there are any PHI nodes at the destination of the outgoing edge
555 from the duplicate block, then we will need to add a new argument
556 to them. The argument should have the same value as the argument
557 associated with the outgoing edge stored in RD. */
558 copy_phi_args (e
->dest
, rd
->path
->last ()->e
, e
, rd
->path
, idx
);
561 /* Look through PATH beginning at START and return TRUE if there are
562 any additional blocks that need to be duplicated. Otherwise,
565 any_remaining_duplicated_blocks (vec
<jump_thread_edge
*> *path
,
568 for (unsigned int i
= start
+ 1; i
< path
->length (); i
++)
570 if ((*path
)[i
]->type
== EDGE_COPY_SRC_JOINER_BLOCK
571 || (*path
)[i
]->type
== EDGE_COPY_SRC_BLOCK
)
578 /* Compute the amount of profile count/frequency coming into the jump threading
579 path stored in RD that we are duplicating, returned in PATH_IN_COUNT_PTR and
580 PATH_IN_FREQ_PTR, as well as the amount of counts flowing out of the
581 duplicated path, returned in PATH_OUT_COUNT_PTR. LOCAL_INFO is used to
582 identify blocks duplicated for jump threading, which have duplicated
583 edges that need to be ignored in the analysis. Return true if path contains
584 a joiner, false otherwise.
586 In the non-joiner case, this is straightforward - all the counts/frequency
587 flowing into the jump threading path should flow through the duplicated
588 block and out of the duplicated path.
590 In the joiner case, it is very tricky. Some of the counts flowing into
591 the original path go offpath at the joiner. The problem is that while
592 we know how much total count goes off-path in the original control flow,
593 we don't know how many of the counts corresponding to just the jump
594 threading path go offpath at the joiner.
596 For example, assume we have the following control flow and identified
597 jump threading paths:
616 Jump threading paths: A -> J -> Son -> D (path 1)
617 C -> J -> Son -> E (path 2)
619 Note that the control flow could be more complicated:
620 - Each jump threading path may have more than one incoming edge. I.e. A and
621 Ea could represent multiple incoming blocks/edges that are included in
623 - There could be EDGE_NO_COPY_SRC_BLOCK edges after the joiner (either
624 before or after the "normal" copy block). These are not duplicated onto
625 the jump threading path, as they are single-successor.
626 - Any of the blocks along the path may have other incoming edges that
627 are not part of any jump threading path, but add profile counts along
630 In the aboe example, after all jump threading is complete, we will
631 end up with the following control flow:
640 Eona/ \ ---/---\-------- \Eonc
645 \___________ / \ _____/
650 The main issue to notice here is that when we are processing path 1
651 (A->J->Son->D) we need to figure out the outgoing edge weights to
652 the duplicated edges Ja->Sona and Ja->Soff, while ensuring that the
653 sum of the incoming weights to D remain Ed. The problem with simply
654 assuming that Ja (and Jc when processing path 2) has the same outgoing
655 probabilities to its successors as the original block J, is that after
656 all paths are processed and other edges/counts removed (e.g. none
657 of Ec will reach D after processing path 2), we may end up with not
658 enough count flowing along duplicated edge Sona->D.
660 Therefore, in the case of a joiner, we keep track of all counts
661 coming in along the current path, as well as from predecessors not
662 on any jump threading path (Eb in the above example). While we
663 first assume that the duplicated Eona for Ja->Sona has the same
664 probability as the original, we later compensate for other jump
665 threading paths that may eliminate edges. We do that by keep track
666 of all counts coming into the original path that are not in a jump
667 thread (Eb in the above example, but as noted earlier, there could
668 be other predecessors incoming to the path at various points, such
669 as at Son). Call this cumulative non-path count coming into the path
670 before D as Enonpath. We then ensure that the count from Sona->D is as at
671 least as big as (Ed - Enonpath), but no bigger than the minimum
672 weight along the jump threading path. The probabilities of both the
673 original and duplicated joiner block J and Ja will be adjusted
674 accordingly after the updates. */
677 compute_path_counts (struct redirection_data
*rd
,
678 ssa_local_info_t
*local_info
,
679 gcov_type
*path_in_count_ptr
,
680 gcov_type
*path_out_count_ptr
,
681 int *path_in_freq_ptr
)
683 edge e
= rd
->incoming_edges
->e
;
684 vec
<jump_thread_edge
*> *path
= THREAD_PATH (e
);
685 edge elast
= path
->last ()->e
;
686 gcov_type nonpath_count
= 0;
687 bool has_joiner
= false;
688 gcov_type path_in_count
= 0;
689 int path_in_freq
= 0;
691 /* Start by accumulating incoming edge counts to the path's first bb
692 into a couple buckets:
693 path_in_count: total count of incoming edges that flow into the
695 nonpath_count: total count of incoming edges that are not
696 flowing along *any* path. These are the counts
697 that will still flow along the original path after
698 all path duplication is done by potentially multiple
699 calls to this routine.
700 (any other incoming edge counts are for a different jump threading
701 path that will be handled by a later call to this routine.)
702 To make this easier, start by recording all incoming edges that flow into
703 the current path in a bitmap. We could add up the path's incoming edge
704 counts here, but we still need to walk all the first bb's incoming edges
705 below to add up the counts of the other edges not included in this jump
707 struct el
*next
, *el
;
708 bitmap in_edge_srcs
= BITMAP_ALLOC (NULL
);
709 for (el
= rd
->incoming_edges
; el
; el
= next
)
712 bitmap_set_bit (in_edge_srcs
, el
->e
->src
->index
);
716 FOR_EACH_EDGE (ein
, ei
, e
->dest
->preds
)
718 vec
<jump_thread_edge
*> *ein_path
= THREAD_PATH (ein
);
719 /* Simply check the incoming edge src against the set captured above. */
721 && bitmap_bit_p (in_edge_srcs
, (*ein_path
)[0]->e
->src
->index
))
723 /* It is necessary but not sufficient that the last path edges
724 are identical. There may be different paths that share the
725 same last path edge in the case where the last edge has a nocopy
727 gcc_assert (ein_path
->last ()->e
== elast
);
728 path_in_count
+= ein
->count
;
729 path_in_freq
+= EDGE_FREQUENCY (ein
);
733 /* Keep track of the incoming edges that are not on any jump-threading
734 path. These counts will still flow out of original path after all
735 jump threading is complete. */
736 nonpath_count
+= ein
->count
;
740 /* This is needed due to insane incoming frequencies. */
741 if (path_in_freq
> BB_FREQ_MAX
)
742 path_in_freq
= BB_FREQ_MAX
;
744 BITMAP_FREE (in_edge_srcs
);
746 /* Now compute the fraction of the total count coming into the first
747 path bb that is from the current threading path. */
748 gcov_type total_count
= e
->dest
->count
;
749 /* Handle incoming profile insanities. */
750 if (total_count
< path_in_count
)
751 path_in_count
= total_count
;
752 int onpath_scale
= GCOV_COMPUTE_SCALE (path_in_count
, total_count
);
754 /* Walk the entire path to do some more computation in order to estimate
755 how much of the path_in_count will flow out of the duplicated threading
756 path. In the non-joiner case this is straightforward (it should be
757 the same as path_in_count, although we will handle incoming profile
758 insanities by setting it equal to the minimum count along the path).
760 In the joiner case, we need to estimate how much of the path_in_count
761 will stay on the threading path after the joiner's conditional branch.
762 We don't really know for sure how much of the counts
763 associated with this path go to each successor of the joiner, but we'll
764 estimate based on the fraction of the total count coming into the path
765 bb was from the threading paths (computed above in onpath_scale).
766 Afterwards, we will need to do some fixup to account for other threading
767 paths and possible profile insanities.
769 In order to estimate the joiner case's counts we also need to update
770 nonpath_count with any additional counts coming into the path. Other
771 blocks along the path may have additional predecessors from outside
773 gcov_type path_out_count
= path_in_count
;
774 gcov_type min_path_count
= path_in_count
;
775 for (unsigned int i
= 1; i
< path
->length (); i
++)
777 edge epath
= (*path
)[i
]->e
;
778 gcov_type cur_count
= epath
->count
;
779 if ((*path
)[i
]->type
== EDGE_COPY_SRC_JOINER_BLOCK
)
782 cur_count
= apply_probability (cur_count
, onpath_scale
);
784 /* In the joiner case we need to update nonpath_count for any edges
785 coming into the path that will contribute to the count flowing
786 into the path successor. */
787 if (has_joiner
&& epath
!= elast
)
789 /* Look for other incoming edges after joiner. */
790 FOR_EACH_EDGE (ein
, ei
, epath
->dest
->preds
)
793 /* Ignore in edges from blocks we have duplicated for a
794 threading path, which have duplicated edge counts until
795 they are redirected by an invocation of this routine. */
796 && !bitmap_bit_p (local_info
->duplicate_blocks
,
798 nonpath_count
+= ein
->count
;
801 if (cur_count
< path_out_count
)
802 path_out_count
= cur_count
;
803 if (epath
->count
< min_path_count
)
804 min_path_count
= epath
->count
;
807 /* We computed path_out_count above assuming that this path targeted
808 the joiner's on-path successor with the same likelihood as it
809 reached the joiner. However, other thread paths through the joiner
810 may take a different path through the normal copy source block
811 (i.e. they have a different elast), meaning that they do not
812 contribute any counts to this path's elast. As a result, it may
813 turn out that this path must have more count flowing to the on-path
814 successor of the joiner. Essentially, all of this path's elast
815 count must be contributed by this path and any nonpath counts
816 (since any path through the joiner with a different elast will not
817 include a copy of this elast in its duplicated path).
818 So ensure that this path's path_out_count is at least the
819 difference between elast->count and nonpath_count. Otherwise the edge
820 counts after threading will not be sane. */
821 if (has_joiner
&& path_out_count
< elast
->count
- nonpath_count
)
823 path_out_count
= elast
->count
- nonpath_count
;
824 /* But neither can we go above the minimum count along the path
825 we are duplicating. This can be an issue due to profile
826 insanities coming in to this pass. */
827 if (path_out_count
> min_path_count
)
828 path_out_count
= min_path_count
;
831 *path_in_count_ptr
= path_in_count
;
832 *path_out_count_ptr
= path_out_count
;
833 *path_in_freq_ptr
= path_in_freq
;
838 /* Update the counts and frequencies for both an original path
839 edge EPATH and its duplicate EDUP. The duplicate source block
840 will get a count/frequency of PATH_IN_COUNT and PATH_IN_FREQ,
841 and the duplicate edge EDUP will have a count of PATH_OUT_COUNT. */
843 update_profile (edge epath
, edge edup
, gcov_type path_in_count
,
844 gcov_type path_out_count
, int path_in_freq
)
847 /* First update the duplicated block's count / frequency. */
850 basic_block dup_block
= edup
->src
;
851 gcc_assert (dup_block
->count
== 0);
852 gcc_assert (dup_block
->frequency
== 0);
853 dup_block
->count
= path_in_count
;
854 dup_block
->frequency
= path_in_freq
;
857 /* Now update the original block's count and frequency in the
858 opposite manner - remove the counts/freq that will flow
859 into the duplicated block. Handle underflow due to precision/
861 epath
->src
->count
-= path_in_count
;
862 if (epath
->src
->count
< 0)
863 epath
->src
->count
= 0;
864 epath
->src
->frequency
-= path_in_freq
;
865 if (epath
->src
->frequency
< 0)
866 epath
->src
->frequency
= 0;
868 /* Next update this path edge's original and duplicated counts. We know
869 that the duplicated path will have path_out_count flowing
870 out of it (in the joiner case this is the count along the duplicated path
871 out of the duplicated joiner). This count can then be removed from the
872 original path edge. */
874 edup
->count
= path_out_count
;
875 epath
->count
-= path_out_count
;
876 gcc_assert (epath
->count
>= 0);
880 /* The duplicate and original joiner blocks may end up with different
881 probabilities (different from both the original and from each other).
882 Recompute the probabilities here once we have updated the edge
883 counts and frequencies. */
886 recompute_probabilities (basic_block bb
)
890 FOR_EACH_EDGE (esucc
, ei
, bb
->succs
)
895 /* Prevent overflow computation due to insane profiles. */
896 if (esucc
->count
< bb
->count
)
897 esucc
->probability
= GCOV_COMPUTE_SCALE (esucc
->count
,
900 /* Can happen with missing/guessed probabilities, since we
901 may determine that more is flowing along duplicated
902 path than joiner succ probabilities allowed.
903 Counts and freqs will be insane after jump threading,
904 at least make sure probability is sane or we will
905 get a flow verification error.
906 Not much we can do to make counts/freqs sane without
907 redoing the profile estimation. */
908 esucc
->probability
= REG_BR_PROB_BASE
;
913 /* Update the counts of the original and duplicated edges from a joiner
914 that go off path, given that we have already determined that the
915 duplicate joiner DUP_BB has incoming count PATH_IN_COUNT and
916 outgoing count along the path PATH_OUT_COUNT. The original (on-)path
917 edge from joiner is EPATH. */
920 update_joiner_offpath_counts (edge epath
, basic_block dup_bb
,
921 gcov_type path_in_count
,
922 gcov_type path_out_count
)
924 /* Compute the count that currently flows off path from the joiner.
925 In other words, the total count of joiner's out edges other than
926 epath. Compute this by walking the successors instead of
927 subtracting epath's count from the joiner bb count, since there
928 are sometimes slight insanities where the total out edge count is
929 larger than the bb count (possibly due to rounding/truncation
931 gcov_type total_orig_off_path_count
= 0;
934 FOR_EACH_EDGE (enonpath
, ei
, epath
->src
->succs
)
936 if (enonpath
== epath
)
938 total_orig_off_path_count
+= enonpath
->count
;
941 /* For the path that we are duplicating, the amount that will flow
942 off path from the duplicated joiner is the delta between the
943 path's cumulative in count and the portion of that count we
944 estimated above as flowing from the joiner along the duplicated
946 gcov_type total_dup_off_path_count
= path_in_count
- path_out_count
;
948 /* Now do the actual updates of the off-path edges. */
949 FOR_EACH_EDGE (enonpath
, ei
, epath
->src
->succs
)
951 /* Look for edges going off of the threading path. */
952 if (enonpath
== epath
)
955 /* Find the corresponding edge out of the duplicated joiner. */
956 edge enonpathdup
= find_edge (dup_bb
, enonpath
->dest
);
957 gcc_assert (enonpathdup
);
959 /* We can't use the original probability of the joiner's out
960 edges, since the probabilities of the original branch
961 and the duplicated branches may vary after all threading is
962 complete. But apportion the duplicated joiner's off-path
963 total edge count computed earlier (total_dup_off_path_count)
964 among the duplicated off-path edges based on their original
965 ratio to the full off-path count (total_orig_off_path_count).
967 int scale
= GCOV_COMPUTE_SCALE (enonpath
->count
,
968 total_orig_off_path_count
);
969 /* Give the duplicated offpath edge a portion of the duplicated
971 enonpathdup
->count
= apply_scale (scale
,
972 total_dup_off_path_count
);
973 /* Now update the original offpath edge count, handling underflow
974 due to rounding errors. */
975 enonpath
->count
-= enonpathdup
->count
;
976 if (enonpath
->count
< 0)
982 /* Check if the paths through RD all have estimated frequencies but zero
983 profile counts. This is more accurate than checking the entry block
984 for a zero profile count, since profile insanities sometimes creep in. */
987 estimated_freqs_path (struct redirection_data
*rd
)
989 edge e
= rd
->incoming_edges
->e
;
990 vec
<jump_thread_edge
*> *path
= THREAD_PATH (e
);
993 bool non_zero_freq
= false;
994 FOR_EACH_EDGE (ein
, ei
, e
->dest
->preds
)
998 non_zero_freq
|= ein
->src
->frequency
!= 0;
1001 for (unsigned int i
= 1; i
< path
->length (); i
++)
1003 edge epath
= (*path
)[i
]->e
;
1004 if (epath
->src
->count
)
1006 non_zero_freq
|= epath
->src
->frequency
!= 0;
1008 FOR_EACH_EDGE (esucc
, ei
, epath
->src
->succs
)
1012 non_zero_freq
|= esucc
->src
->frequency
!= 0;
1015 return non_zero_freq
;
1019 /* Invoked for routines that have guessed frequencies and no profile
1020 counts to record the block and edge frequencies for paths through RD
1021 in the profile count fields of those blocks and edges. This is because
1022 ssa_fix_duplicate_block_edges incrementally updates the block and
1023 edge counts as edges are redirected, and it is difficult to do that
1024 for edge frequencies which are computed on the fly from the source
1025 block frequency and probability. When a block frequency is updated
1026 its outgoing edge frequencies are affected and become difficult to
1030 freqs_to_counts_path (struct redirection_data
*rd
)
1032 edge e
= rd
->incoming_edges
->e
;
1033 vec
<jump_thread_edge
*> *path
= THREAD_PATH (e
);
1036 FOR_EACH_EDGE (ein
, ei
, e
->dest
->preds
)
1038 /* Scale up the frequency by REG_BR_PROB_BASE, to avoid rounding
1039 errors applying the probability when the frequencies are very
1041 ein
->count
= apply_probability (ein
->src
->frequency
* REG_BR_PROB_BASE
,
1045 for (unsigned int i
= 1; i
< path
->length (); i
++)
1047 edge epath
= (*path
)[i
]->e
;
1049 /* Scale up the frequency by REG_BR_PROB_BASE, to avoid rounding
1050 errors applying the edge probability when the frequencies are very
1052 epath
->src
->count
= epath
->src
->frequency
* REG_BR_PROB_BASE
;
1053 FOR_EACH_EDGE (esucc
, ei
, epath
->src
->succs
)
1054 esucc
->count
= apply_probability (esucc
->src
->count
,
1055 esucc
->probability
);
1060 /* For routines that have guessed frequencies and no profile counts, where we
1061 used freqs_to_counts_path to record block and edge frequencies for paths
1062 through RD, we clear the counts after completing all updates for RD.
1063 The updates in ssa_fix_duplicate_block_edges are based off the count fields,
1064 but the block frequencies and edge probabilities were updated as well,
1065 so we can simply clear the count fields. */
1068 clear_counts_path (struct redirection_data
*rd
)
1070 edge e
= rd
->incoming_edges
->e
;
1071 vec
<jump_thread_edge
*> *path
= THREAD_PATH (e
);
1074 FOR_EACH_EDGE (ein
, ei
, e
->dest
->preds
)
1077 /* First clear counts along original path. */
1078 for (unsigned int i
= 1; i
< path
->length (); i
++)
1080 edge epath
= (*path
)[i
]->e
;
1081 FOR_EACH_EDGE (esucc
, ei
, epath
->src
->succs
)
1083 epath
->src
->count
= 0;
1085 /* Also need to clear the counts along duplicated path. */
1086 for (unsigned int i
= 0; i
< 2; i
++)
1088 basic_block dup
= rd
->dup_blocks
[i
];
1091 FOR_EACH_EDGE (esucc
, ei
, dup
->succs
)
1097 /* Wire up the outgoing edges from the duplicate blocks and
1098 update any PHIs as needed. Also update the profile counts
1099 on the original and duplicate blocks and edges. */
1101 ssa_fix_duplicate_block_edges (struct redirection_data
*rd
,
1102 ssa_local_info_t
*local_info
)
1104 bool multi_incomings
= (rd
->incoming_edges
->next
!= NULL
);
1105 edge e
= rd
->incoming_edges
->e
;
1106 vec
<jump_thread_edge
*> *path
= THREAD_PATH (e
);
1107 edge elast
= path
->last ()->e
;
1108 gcov_type path_in_count
= 0;
1109 gcov_type path_out_count
= 0;
1110 int path_in_freq
= 0;
1112 /* This routine updates profile counts, frequencies, and probabilities
1113 incrementally. Since it is difficult to do the incremental updates
1114 using frequencies/probabilities alone, for routines without profile
1115 data we first take a snapshot of the existing block and edge frequencies
1116 by copying them into the empty profile count fields. These counts are
1117 then used to do the incremental updates, and cleared at the end of this
1118 routine. If the function is marked as having a profile, we still check
1119 to see if the paths through RD are using estimated frequencies because
1120 the routine had zero profile counts. */
1121 bool do_freqs_to_counts
= (profile_status_for_fn (cfun
) != PROFILE_READ
1122 || estimated_freqs_path (rd
));
1123 if (do_freqs_to_counts
)
1124 freqs_to_counts_path (rd
);
1126 /* First determine how much profile count to move from original
1127 path to the duplicate path. This is tricky in the presence of
1128 a joiner (see comments for compute_path_counts), where some portion
1129 of the path's counts will flow off-path from the joiner. In the
1130 non-joiner case the path_in_count and path_out_count should be the
1132 bool has_joiner
= compute_path_counts (rd
, local_info
,
1133 &path_in_count
, &path_out_count
,
1136 int cur_path_freq
= path_in_freq
;
1137 for (unsigned int count
= 0, i
= 1; i
< path
->length (); i
++)
1139 edge epath
= (*path
)[i
]->e
;
1141 /* If we were threading through an joiner block, then we want
1142 to keep its control statement and redirect an outgoing edge.
1143 Else we want to remove the control statement & edges, then create
1144 a new outgoing edge. In both cases we may need to update PHIs. */
1145 if ((*path
)[i
]->type
== EDGE_COPY_SRC_JOINER_BLOCK
)
1150 gcc_assert (has_joiner
);
1152 /* This updates the PHIs at the destination of the duplicate
1153 block. Pass 0 instead of i if we are threading a path which
1154 has multiple incoming edges. */
1155 update_destination_phis (local_info
->bb
, rd
->dup_blocks
[count
],
1156 path
, multi_incomings
? 0 : i
);
1158 /* Find the edge from the duplicate block to the block we're
1159 threading through. That's the edge we want to redirect. */
1160 victim
= find_edge (rd
->dup_blocks
[count
], (*path
)[i
]->e
->dest
);
1162 /* If there are no remaining blocks on the path to duplicate,
1163 then redirect VICTIM to the final destination of the jump
1165 if (!any_remaining_duplicated_blocks (path
, i
))
1167 e2
= redirect_edge_and_branch (victim
, elast
->dest
);
1168 /* If we redirected the edge, then we need to copy PHI arguments
1169 at the target. If the edge already existed (e2 != victim
1170 case), then the PHIs in the target already have the correct
1173 copy_phi_args (e2
->dest
, elast
, e2
,
1174 path
, multi_incomings
? 0 : i
);
1178 /* Redirect VICTIM to the next duplicated block in the path. */
1179 e2
= redirect_edge_and_branch (victim
, rd
->dup_blocks
[count
+ 1]);
1181 /* We need to update the PHIs in the next duplicated block. We
1182 want the new PHI args to have the same value as they had
1183 in the source of the next duplicate block.
1185 Thus, we need to know which edge we traversed into the
1186 source of the duplicate. Furthermore, we may have
1187 traversed many edges to reach the source of the duplicate.
1189 Walk through the path starting at element I until we
1190 hit an edge marked with EDGE_COPY_SRC_BLOCK. We want
1191 the edge from the prior element. */
1192 for (unsigned int j
= i
+ 1; j
< path
->length (); j
++)
1194 if ((*path
)[j
]->type
== EDGE_COPY_SRC_BLOCK
)
1196 copy_phi_arg_into_existing_phi ((*path
)[j
- 1]->e
, e2
);
1202 /* Update the counts and frequency of both the original block
1203 and path edge, and the duplicates. The path duplicate's
1204 incoming count and frequency are the totals for all edges
1205 incoming to this jump threading path computed earlier.
1206 And we know that the duplicated path will have path_out_count
1207 flowing out of it (i.e. along the duplicated path out of the
1208 duplicated joiner). */
1209 update_profile (epath
, e2
, path_in_count
, path_out_count
,
1212 /* Next we need to update the counts of the original and duplicated
1213 edges from the joiner that go off path. */
1214 update_joiner_offpath_counts (epath
, e2
->src
, path_in_count
,
1217 /* Finally, we need to set the probabilities on the duplicated
1218 edges out of the duplicated joiner (e2->src). The probabilities
1219 along the original path will all be updated below after we finish
1220 processing the whole path. */
1221 recompute_probabilities (e2
->src
);
1223 /* Record the frequency flowing to the downstream duplicated
1225 cur_path_freq
= EDGE_FREQUENCY (e2
);
1227 else if ((*path
)[i
]->type
== EDGE_COPY_SRC_BLOCK
)
1229 remove_ctrl_stmt_and_useless_edges (rd
->dup_blocks
[count
], NULL
);
1230 create_edge_and_update_destination_phis (rd
, rd
->dup_blocks
[count
],
1231 multi_incomings
? 0 : i
);
1233 single_succ_edge (rd
->dup_blocks
[1])->aux
= NULL
;
1235 /* Update the counts and frequency of both the original block
1236 and path edge, and the duplicates. Since we are now after
1237 any joiner that may have existed on the path, the count
1238 flowing along the duplicated threaded path is path_out_count.
1239 If we didn't have a joiner, then cur_path_freq was the sum
1240 of the total frequencies along all incoming edges to the
1241 thread path (path_in_freq). If we had a joiner, it would have
1242 been updated at the end of that handling to the edge frequency
1243 along the duplicated joiner path edge. */
1244 update_profile (epath
, EDGE_SUCC (rd
->dup_blocks
[count
], 0),
1245 path_out_count
, path_out_count
,
1250 /* No copy case. In this case we don't have an equivalent block
1251 on the duplicated thread path to update, but we do need
1252 to remove the portion of the counts/freqs that were moved
1253 to the duplicated path from the counts/freqs flowing through
1254 this block on the original path. Since all the no-copy edges
1255 are after any joiner, the removed count is the same as
1258 If we didn't have a joiner, then cur_path_freq was the sum
1259 of the total frequencies along all incoming edges to the
1260 thread path (path_in_freq). If we had a joiner, it would have
1261 been updated at the end of that handling to the edge frequency
1262 along the duplicated joiner path edge. */
1263 update_profile (epath
, NULL
, path_out_count
, path_out_count
,
1267 /* Increment the index into the duplicated path when we processed
1268 a duplicated block. */
1269 if ((*path
)[i
]->type
== EDGE_COPY_SRC_JOINER_BLOCK
1270 || (*path
)[i
]->type
== EDGE_COPY_SRC_BLOCK
)
1276 /* Now walk orig blocks and update their probabilities, since the
1277 counts and freqs should be updated properly by above loop. */
1278 for (unsigned int i
= 1; i
< path
->length (); i
++)
1280 edge epath
= (*path
)[i
]->e
;
1281 recompute_probabilities (epath
->src
);
1284 /* Done with all profile and frequency updates, clear counts if they
1286 if (do_freqs_to_counts
)
1287 clear_counts_path (rd
);
1290 /* Hash table traversal callback routine to create duplicate blocks. */
1293 ssa_create_duplicates (struct redirection_data
**slot
,
1294 ssa_local_info_t
*local_info
)
1296 struct redirection_data
*rd
= *slot
;
1298 /* The second duplicated block in a jump threading path is specific
1299 to the path. So it gets stored in RD rather than in LOCAL_DATA.
1301 Each time we're called, we have to look through the path and see
1302 if a second block needs to be duplicated.
1304 Note the search starts with the third edge on the path. The first
1305 edge is the incoming edge, the second edge always has its source
1306 duplicated. Thus we start our search with the third edge. */
1307 vec
<jump_thread_edge
*> *path
= rd
->path
;
1308 for (unsigned int i
= 2; i
< path
->length (); i
++)
1310 if ((*path
)[i
]->type
== EDGE_COPY_SRC_BLOCK
1311 || (*path
)[i
]->type
== EDGE_COPY_SRC_JOINER_BLOCK
)
1313 create_block_for_threading ((*path
)[i
]->e
->src
, rd
, 1,
1314 &local_info
->duplicate_blocks
);
1319 /* Create a template block if we have not done so already. Otherwise
1320 use the template to create a new block. */
1321 if (local_info
->template_block
== NULL
)
1323 create_block_for_threading ((*path
)[1]->e
->src
, rd
, 0,
1324 &local_info
->duplicate_blocks
);
1325 local_info
->template_block
= rd
->dup_blocks
[0];
1327 /* We do not create any outgoing edges for the template. We will
1328 take care of that in a later traversal. That way we do not
1329 create edges that are going to just be deleted. */
1333 create_block_for_threading (local_info
->template_block
, rd
, 0,
1334 &local_info
->duplicate_blocks
);
1336 /* Go ahead and wire up outgoing edges and update PHIs for the duplicate
1338 ssa_fix_duplicate_block_edges (rd
, local_info
);
1341 /* Keep walking the hash table. */
1345 /* We did not create any outgoing edges for the template block during
1346 block creation. This hash table traversal callback creates the
1347 outgoing edge for the template block. */
1350 ssa_fixup_template_block (struct redirection_data
**slot
,
1351 ssa_local_info_t
*local_info
)
1353 struct redirection_data
*rd
= *slot
;
1355 /* If this is the template block halt the traversal after updating
1358 If we were threading through an joiner block, then we want
1359 to keep its control statement and redirect an outgoing edge.
1360 Else we want to remove the control statement & edges, then create
1361 a new outgoing edge. In both cases we may need to update PHIs. */
1362 if (rd
->dup_blocks
[0] && rd
->dup_blocks
[0] == local_info
->template_block
)
1364 ssa_fix_duplicate_block_edges (rd
, local_info
);
1371 /* Hash table traversal callback to redirect each incoming edge
1372 associated with this hash table element to its new destination. */
1375 ssa_redirect_edges (struct redirection_data
**slot
,
1376 ssa_local_info_t
*local_info
)
1378 struct redirection_data
*rd
= *slot
;
1379 struct el
*next
, *el
;
1381 /* Walk over all the incoming edges associated with this hash table
1383 for (el
= rd
->incoming_edges
; el
; el
= next
)
1386 vec
<jump_thread_edge
*> *path
= THREAD_PATH (e
);
1388 /* Go ahead and free this element from the list. Doing this now
1389 avoids the need for another list walk when we destroy the hash
1394 thread_stats
.num_threaded_edges
++;
1396 if (rd
->dup_blocks
[0])
1400 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1401 fprintf (dump_file
, " Threaded jump %d --> %d to %d\n",
1402 e
->src
->index
, e
->dest
->index
, rd
->dup_blocks
[0]->index
);
1404 /* If we redirect a loop latch edge cancel its loop. */
1405 if (e
->src
== e
->src
->loop_father
->latch
)
1406 mark_loop_for_removal (e
->src
->loop_father
);
1408 /* Redirect the incoming edge (possibly to the joiner block) to the
1409 appropriate duplicate block. */
1410 e2
= redirect_edge_and_branch (e
, rd
->dup_blocks
[0]);
1411 gcc_assert (e
== e2
);
1412 flush_pending_stmts (e2
);
1415 /* Go ahead and clear E->aux. It's not needed anymore and failure
1416 to clear it will cause all kinds of unpleasant problems later. */
1417 delete_jump_thread_path (path
);
1422 /* Indicate that we actually threaded one or more jumps. */
1423 if (rd
->incoming_edges
)
1424 local_info
->jumps_threaded
= true;
1429 /* Return true if this block has no executable statements other than
1430 a simple ctrl flow instruction. When the number of outgoing edges
1431 is one, this is equivalent to a "forwarder" block. */
1434 redirection_block_p (basic_block bb
)
1436 gimple_stmt_iterator gsi
;
1438 /* Advance to the first executable statement. */
1439 gsi
= gsi_start_bb (bb
);
1440 while (!gsi_end_p (gsi
)
1441 && (gimple_code (gsi_stmt (gsi
)) == GIMPLE_LABEL
1442 || is_gimple_debug (gsi_stmt (gsi
))
1443 || gimple_nop_p (gsi_stmt (gsi
))
1444 || gimple_clobber_p (gsi_stmt (gsi
))))
1447 /* Check if this is an empty block. */
1448 if (gsi_end_p (gsi
))
1451 /* Test that we've reached the terminating control statement. */
1452 return gsi_stmt (gsi
)
1453 && (gimple_code (gsi_stmt (gsi
)) == GIMPLE_COND
1454 || gimple_code (gsi_stmt (gsi
)) == GIMPLE_GOTO
1455 || gimple_code (gsi_stmt (gsi
)) == GIMPLE_SWITCH
);
1458 /* BB is a block which ends with a COND_EXPR or SWITCH_EXPR and when BB
1459 is reached via one or more specific incoming edges, we know which
1460 outgoing edge from BB will be traversed.
1462 We want to redirect those incoming edges to the target of the
1463 appropriate outgoing edge. Doing so avoids a conditional branch
1464 and may expose new optimization opportunities. Note that we have
1465 to update dominator tree and SSA graph after such changes.
1467 The key to keeping the SSA graph update manageable is to duplicate
1468 the side effects occurring in BB so that those side effects still
1469 occur on the paths which bypass BB after redirecting edges.
1471 We accomplish this by creating duplicates of BB and arranging for
1472 the duplicates to unconditionally pass control to one specific
1473 successor of BB. We then revector the incoming edges into BB to
1474 the appropriate duplicate of BB.
1476 If NOLOOP_ONLY is true, we only perform the threading as long as it
1477 does not affect the structure of the loops in a nontrivial way.
1479 If JOINERS is true, then thread through joiner blocks as well. */
1482 thread_block_1 (basic_block bb
, bool noloop_only
, bool joiners
)
1484 /* E is an incoming edge into BB that we may or may not want to
1485 redirect to a duplicate of BB. */
1488 ssa_local_info_t local_info
;
1490 local_info
.duplicate_blocks
= BITMAP_ALLOC (NULL
);
1492 /* To avoid scanning a linear array for the element we need we instead
1493 use a hash table. For normal code there should be no noticeable
1494 difference. However, if we have a block with a large number of
1495 incoming and outgoing edges such linear searches can get expensive. */
1497 = new hash_table
<struct redirection_data
> (EDGE_COUNT (bb
->succs
));
1499 /* Record each unique threaded destination into a hash table for
1500 efficient lookups. */
1501 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
1506 vec
<jump_thread_edge
*> *path
= THREAD_PATH (e
);
1508 if (((*path
)[1]->type
== EDGE_COPY_SRC_JOINER_BLOCK
&& !joiners
)
1509 || ((*path
)[1]->type
== EDGE_COPY_SRC_BLOCK
&& joiners
))
1512 e2
= path
->last ()->e
;
1513 if (!e2
|| noloop_only
)
1515 /* If NOLOOP_ONLY is true, we only allow threading through the
1516 header of a loop to exit edges. */
1518 /* One case occurs when there was loop header buried in a jump
1519 threading path that crosses loop boundaries. We do not try
1520 and thread this elsewhere, so just cancel the jump threading
1521 request by clearing the AUX field now. */
1522 if ((bb
->loop_father
!= e2
->src
->loop_father
1523 && !loop_exit_edge_p (e2
->src
->loop_father
, e2
))
1524 || (e2
->src
->loop_father
!= e2
->dest
->loop_father
1525 && !loop_exit_edge_p (e2
->src
->loop_father
, e2
)))
1527 /* Since this case is not handled by our special code
1528 to thread through a loop header, we must explicitly
1529 cancel the threading request here. */
1530 delete_jump_thread_path (path
);
1535 /* Another case occurs when trying to thread through our
1536 own loop header, possibly from inside the loop. We will
1537 thread these later. */
1539 for (i
= 1; i
< path
->length (); i
++)
1541 if ((*path
)[i
]->e
->src
== bb
->loop_father
->header
1542 && (!loop_exit_edge_p (bb
->loop_father
, e2
)
1543 || (*path
)[1]->type
== EDGE_COPY_SRC_JOINER_BLOCK
))
1547 if (i
!= path
->length ())
1551 /* Insert the outgoing edge into the hash table if it is not
1552 already in the hash table. */
1553 lookup_redirection_data (e
, INSERT
);
1556 /* We do not update dominance info. */
1557 free_dominance_info (CDI_DOMINATORS
);
1559 /* We know we only thread through the loop header to loop exits.
1560 Let the basic block duplication hook know we are not creating
1561 a multiple entry loop. */
1563 && bb
== bb
->loop_father
->header
)
1564 set_loop_copy (bb
->loop_father
, loop_outer (bb
->loop_father
));
1566 /* Now create duplicates of BB.
1568 Note that for a block with a high outgoing degree we can waste
1569 a lot of time and memory creating and destroying useless edges.
1571 So we first duplicate BB and remove the control structure at the
1572 tail of the duplicate as well as all outgoing edges from the
1573 duplicate. We then use that duplicate block as a template for
1574 the rest of the duplicates. */
1575 local_info
.template_block
= NULL
;
1577 local_info
.jumps_threaded
= false;
1578 redirection_data
->traverse
<ssa_local_info_t
*, ssa_create_duplicates
>
1581 /* The template does not have an outgoing edge. Create that outgoing
1582 edge and update PHI nodes as the edge's target as necessary.
1584 We do this after creating all the duplicates to avoid creating
1585 unnecessary edges. */
1586 redirection_data
->traverse
<ssa_local_info_t
*, ssa_fixup_template_block
>
1589 /* The hash table traversals above created the duplicate blocks (and the
1590 statements within the duplicate blocks). This loop creates PHI nodes for
1591 the duplicated blocks and redirects the incoming edges into BB to reach
1592 the duplicates of BB. */
1593 redirection_data
->traverse
<ssa_local_info_t
*, ssa_redirect_edges
>
1596 /* Done with this block. Clear REDIRECTION_DATA. */
1597 delete redirection_data
;
1598 redirection_data
= NULL
;
1601 && bb
== bb
->loop_father
->header
)
1602 set_loop_copy (bb
->loop_father
, NULL
);
1604 BITMAP_FREE (local_info
.duplicate_blocks
);
1605 local_info
.duplicate_blocks
= NULL
;
1607 /* Indicate to our caller whether or not any jumps were threaded. */
1608 return local_info
.jumps_threaded
;
1611 /* Wrapper for thread_block_1 so that we can first handle jump
1612 thread paths which do not involve copying joiner blocks, then
1613 handle jump thread paths which have joiner blocks.
1615 By doing things this way we can be as aggressive as possible and
1616 not worry that copying a joiner block will create a jump threading
1620 thread_block (basic_block bb
, bool noloop_only
)
1623 retval
= thread_block_1 (bb
, noloop_only
, false);
1624 retval
|= thread_block_1 (bb
, noloop_only
, true);
1629 /* Threads edge E through E->dest to the edge THREAD_TARGET (E). Returns the
1630 copy of E->dest created during threading, or E->dest if it was not necessary
1631 to copy it (E is its single predecessor). */
1634 thread_single_edge (edge e
)
1636 basic_block bb
= e
->dest
;
1637 struct redirection_data rd
;
1638 vec
<jump_thread_edge
*> *path
= THREAD_PATH (e
);
1639 edge eto
= (*path
)[1]->e
;
1641 delete_jump_thread_path (path
);
1644 thread_stats
.num_threaded_edges
++;
1646 if (single_pred_p (bb
))
1648 /* If BB has just a single predecessor, we should only remove the
1649 control statements at its end, and successors except for ETO. */
1650 remove_ctrl_stmt_and_useless_edges (bb
, eto
->dest
);
1652 /* And fixup the flags on the single remaining edge. */
1653 eto
->flags
&= ~(EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
| EDGE_ABNORMAL
);
1654 eto
->flags
|= EDGE_FALLTHRU
;
1659 /* Otherwise, we need to create a copy. */
1660 if (e
->dest
== eto
->src
)
1661 update_bb_profile_for_threading (bb
, EDGE_FREQUENCY (e
), e
->count
, eto
);
1663 vec
<jump_thread_edge
*> *npath
= new vec
<jump_thread_edge
*> ();
1664 jump_thread_edge
*x
= new jump_thread_edge (e
, EDGE_START_JUMP_THREAD
);
1665 npath
->safe_push (x
);
1667 x
= new jump_thread_edge (eto
, EDGE_COPY_SRC_BLOCK
);
1668 npath
->safe_push (x
);
1671 create_block_for_threading (bb
, &rd
, 0, NULL
);
1672 remove_ctrl_stmt_and_useless_edges (rd
.dup_blocks
[0], NULL
);
1673 create_edge_and_update_destination_phis (&rd
, rd
.dup_blocks
[0], 0);
1675 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1676 fprintf (dump_file
, " Threaded jump %d --> %d to %d\n",
1677 e
->src
->index
, e
->dest
->index
, rd
.dup_blocks
[0]->index
);
1679 rd
.dup_blocks
[0]->count
= e
->count
;
1680 rd
.dup_blocks
[0]->frequency
= EDGE_FREQUENCY (e
);
1681 single_succ_edge (rd
.dup_blocks
[0])->count
= e
->count
;
1682 redirect_edge_and_branch (e
, rd
.dup_blocks
[0]);
1683 flush_pending_stmts (e
);
1685 delete_jump_thread_path (npath
);
1686 return rd
.dup_blocks
[0];
1689 /* Callback for dfs_enumerate_from. Returns true if BB is different
1690 from STOP and DBDS_CE_STOP. */
1692 static basic_block dbds_ce_stop
;
1694 dbds_continue_enumeration_p (const_basic_block bb
, const void *stop
)
1696 return (bb
!= (const_basic_block
) stop
1697 && bb
!= dbds_ce_stop
);
1700 /* Evaluates the dominance relationship of latch of the LOOP and BB, and
1701 returns the state. */
1705 /* BB does not dominate latch of the LOOP. */
1706 DOMST_NONDOMINATING
,
1707 /* The LOOP is broken (there is no path from the header to its latch. */
1709 /* BB dominates the latch of the LOOP. */
1713 static enum bb_dom_status
1714 determine_bb_domination_status (struct loop
*loop
, basic_block bb
)
1716 basic_block
*bblocks
;
1717 unsigned nblocks
, i
;
1718 bool bb_reachable
= false;
1722 /* This function assumes BB is a successor of LOOP->header.
1723 If that is not the case return DOMST_NONDOMINATING which
1728 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
1730 if (e
->src
== loop
->header
)
1738 return DOMST_NONDOMINATING
;
1741 if (bb
== loop
->latch
)
1742 return DOMST_DOMINATING
;
1744 /* Check that BB dominates LOOP->latch, and that it is back-reachable
1747 bblocks
= XCNEWVEC (basic_block
, loop
->num_nodes
);
1748 dbds_ce_stop
= loop
->header
;
1749 nblocks
= dfs_enumerate_from (loop
->latch
, 1, dbds_continue_enumeration_p
,
1750 bblocks
, loop
->num_nodes
, bb
);
1751 for (i
= 0; i
< nblocks
; i
++)
1752 FOR_EACH_EDGE (e
, ei
, bblocks
[i
]->preds
)
1754 if (e
->src
== loop
->header
)
1757 return DOMST_NONDOMINATING
;
1760 bb_reachable
= true;
1764 return (bb_reachable
? DOMST_DOMINATING
: DOMST_LOOP_BROKEN
);
1767 /* Return true if BB is part of the new pre-header that is created
1768 when threading the latch to DATA. */
1771 def_split_header_continue_p (const_basic_block bb
, const void *data
)
1773 const_basic_block new_header
= (const_basic_block
) data
;
1774 const struct loop
*l
;
1776 if (bb
== new_header
1777 || loop_depth (bb
->loop_father
) < loop_depth (new_header
->loop_father
))
1779 for (l
= bb
->loop_father
; l
; l
= loop_outer (l
))
1780 if (l
== new_header
->loop_father
)
1785 /* Thread jumps through the header of LOOP. Returns true if cfg changes.
1786 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading from entry edges
1787 to the inside of the loop. */
1790 thread_through_loop_header (struct loop
*loop
, bool may_peel_loop_headers
)
1792 basic_block header
= loop
->header
;
1793 edge e
, tgt_edge
, latch
= loop_latch_edge (loop
);
1795 basic_block tgt_bb
, atgt_bb
;
1796 enum bb_dom_status domst
;
1798 /* We have already threaded through headers to exits, so all the threading
1799 requests now are to the inside of the loop. We need to avoid creating
1800 irreducible regions (i.e., loops with more than one entry block), and
1801 also loop with several latch edges, or new subloops of the loop (although
1802 there are cases where it might be appropriate, it is difficult to decide,
1803 and doing it wrongly may confuse other optimizers).
1805 We could handle more general cases here. However, the intention is to
1806 preserve some information about the loop, which is impossible if its
1807 structure changes significantly, in a way that is not well understood.
1808 Thus we only handle few important special cases, in which also updating
1809 of the loop-carried information should be feasible:
1811 1) Propagation of latch edge to a block that dominates the latch block
1812 of a loop. This aims to handle the following idiom:
1823 After threading the latch edge, this becomes
1834 The original header of the loop is moved out of it, and we may thread
1835 the remaining edges through it without further constraints.
1837 2) All entry edges are propagated to a single basic block that dominates
1838 the latch block of the loop. This aims to handle the following idiom
1839 (normally created for "for" loops):
1862 /* Threading through the header won't improve the code if the header has just
1864 if (single_succ_p (header
))
1867 /* If we threaded the latch using a joiner block, we cancel the
1868 threading opportunity out of an abundance of caution. However,
1869 still allow threading from outside to inside the loop. */
1872 vec
<jump_thread_edge
*> *path
= THREAD_PATH (latch
);
1873 if ((*path
)[1]->type
== EDGE_COPY_SRC_JOINER_BLOCK
)
1875 delete_jump_thread_path (path
);
1882 vec
<jump_thread_edge
*> *path
= THREAD_PATH (latch
);
1883 tgt_edge
= (*path
)[1]->e
;
1884 tgt_bb
= tgt_edge
->dest
;
1886 else if (!may_peel_loop_headers
1887 && !redirection_block_p (loop
->header
))
1893 FOR_EACH_EDGE (e
, ei
, header
->preds
)
1900 /* If latch is not threaded, and there is a header
1901 edge that is not threaded, we would create loop
1902 with multiple entries. */
1906 vec
<jump_thread_edge
*> *path
= THREAD_PATH (e
);
1908 if ((*path
)[1]->type
== EDGE_COPY_SRC_JOINER_BLOCK
)
1910 tgt_edge
= (*path
)[1]->e
;
1911 atgt_bb
= tgt_edge
->dest
;
1914 /* Two targets of threading would make us create loop
1915 with multiple entries. */
1916 else if (tgt_bb
!= atgt_bb
)
1922 /* There are no threading requests. */
1926 /* Redirecting to empty loop latch is useless. */
1927 if (tgt_bb
== loop
->latch
1928 && empty_block_p (loop
->latch
))
1932 /* The target block must dominate the loop latch, otherwise we would be
1933 creating a subloop. */
1934 domst
= determine_bb_domination_status (loop
, tgt_bb
);
1935 if (domst
== DOMST_NONDOMINATING
)
1937 if (domst
== DOMST_LOOP_BROKEN
)
1939 /* If the loop ceased to exist, mark it as such, and thread through its
1941 mark_loop_for_removal (loop
);
1942 return thread_block (header
, false);
1945 if (tgt_bb
->loop_father
->header
== tgt_bb
)
1947 /* If the target of the threading is a header of a subloop, we need
1948 to create a preheader for it, so that the headers of the two loops
1950 if (EDGE_COUNT (tgt_bb
->preds
) > 2)
1952 tgt_bb
= create_preheader (tgt_bb
->loop_father
, 0);
1953 gcc_assert (tgt_bb
!= NULL
);
1956 tgt_bb
= split_edge (tgt_edge
);
1961 basic_block
*bblocks
;
1962 unsigned nblocks
, i
;
1964 /* First handle the case latch edge is redirected. We are copying
1965 the loop header but not creating a multiple entry loop. Make the
1966 cfg manipulation code aware of that fact. */
1967 set_loop_copy (loop
, loop
);
1968 loop
->latch
= thread_single_edge (latch
);
1969 set_loop_copy (loop
, NULL
);
1970 gcc_assert (single_succ (loop
->latch
) == tgt_bb
);
1971 loop
->header
= tgt_bb
;
1973 /* Remove the new pre-header blocks from our loop. */
1974 bblocks
= XCNEWVEC (basic_block
, loop
->num_nodes
);
1975 nblocks
= dfs_enumerate_from (header
, 0, def_split_header_continue_p
,
1976 bblocks
, loop
->num_nodes
, tgt_bb
);
1977 for (i
= 0; i
< nblocks
; i
++)
1978 if (bblocks
[i
]->loop_father
== loop
)
1980 remove_bb_from_loops (bblocks
[i
]);
1981 add_bb_to_loop (bblocks
[i
], loop_outer (loop
));
1985 /* If the new header has multiple latches mark it so. */
1986 FOR_EACH_EDGE (e
, ei
, loop
->header
->preds
)
1987 if (e
->src
->loop_father
== loop
1988 && e
->src
!= loop
->latch
)
1991 loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES
);
1994 /* Cancel remaining threading requests that would make the
1995 loop a multiple entry loop. */
1996 FOR_EACH_EDGE (e
, ei
, header
->preds
)
2003 vec
<jump_thread_edge
*> *path
= THREAD_PATH (e
);
2004 e2
= path
->last ()->e
;
2006 if (e
->src
->loop_father
!= e2
->dest
->loop_father
2007 && e2
->dest
!= loop
->header
)
2009 delete_jump_thread_path (path
);
2014 /* Thread the remaining edges through the former header. */
2015 thread_block (header
, false);
2019 basic_block new_preheader
;
2021 /* Now consider the case entry edges are redirected to the new entry
2022 block. Remember one entry edge, so that we can find the new
2023 preheader (its destination after threading). */
2024 FOR_EACH_EDGE (e
, ei
, header
->preds
)
2030 /* The duplicate of the header is the new preheader of the loop. Ensure
2031 that it is placed correctly in the loop hierarchy. */
2032 set_loop_copy (loop
, loop_outer (loop
));
2034 thread_block (header
, false);
2035 set_loop_copy (loop
, NULL
);
2036 new_preheader
= e
->dest
;
2038 /* Create the new latch block. This is always necessary, as the latch
2039 must have only a single successor, but the original header had at
2040 least two successors. */
2042 mfb_kj_edge
= single_succ_edge (new_preheader
);
2043 loop
->header
= mfb_kj_edge
->dest
;
2044 latch
= make_forwarder_block (tgt_bb
, mfb_keep_just
, NULL
);
2045 loop
->header
= latch
->dest
;
2046 loop
->latch
= latch
->src
;
2052 /* We failed to thread anything. Cancel the requests. */
2053 FOR_EACH_EDGE (e
, ei
, header
->preds
)
2055 vec
<jump_thread_edge
*> *path
= THREAD_PATH (e
);
2059 delete_jump_thread_path (path
);
2066 /* E1 and E2 are edges into the same basic block. Return TRUE if the
2067 PHI arguments associated with those edges are equal or there are no
2068 PHI arguments, otherwise return FALSE. */
2071 phi_args_equal_on_edges (edge e1
, edge e2
)
2074 int indx1
= e1
->dest_idx
;
2075 int indx2
= e2
->dest_idx
;
2077 for (gsi
= gsi_start_phis (e1
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2079 gphi
*phi
= gsi
.phi ();
2081 if (!operand_equal_p (gimple_phi_arg_def (phi
, indx1
),
2082 gimple_phi_arg_def (phi
, indx2
), 0))
2088 /* Walk through the registered jump threads and convert them into a
2089 form convenient for this pass.
2091 Any block which has incoming edges threaded to outgoing edges
2092 will have its entry in THREADED_BLOCK set.
2094 Any threaded edge will have its new outgoing edge stored in the
2095 original edge's AUX field.
2097 This form avoids the need to walk all the edges in the CFG to
2098 discover blocks which need processing and avoids unnecessary
2099 hash table lookups to map from threaded edge to new target. */
2102 mark_threaded_blocks (bitmap threaded_blocks
)
2106 bitmap tmp
= BITMAP_ALLOC (NULL
);
2111 /* It is possible to have jump threads in which one is a subpath
2112 of the other. ie, (A, B), (B, C), (C, D) where B is a joiner
2113 block and (B, C), (C, D) where no joiner block exists.
2115 When this occurs ignore the jump thread request with the joiner
2116 block. It's totally subsumed by the simpler jump thread request.
2118 This results in less block copying, simpler CFGs. More importantly,
2119 when we duplicate the joiner block, B, in this case we will create
2120 a new threading opportunity that we wouldn't be able to optimize
2121 until the next jump threading iteration.
2123 So first convert the jump thread requests which do not require a
2125 for (i
= 0; i
< paths
.length (); i
++)
2127 vec
<jump_thread_edge
*> *path
= paths
[i
];
2129 if ((*path
)[1]->type
!= EDGE_COPY_SRC_JOINER_BLOCK
)
2131 edge e
= (*path
)[0]->e
;
2132 e
->aux
= (void *)path
;
2133 bitmap_set_bit (tmp
, e
->dest
->index
);
2137 /* Now iterate again, converting cases where we want to thread
2138 through a joiner block, but only if no other edge on the path
2139 already has a jump thread attached to it. We do this in two passes,
2140 to avoid situations where the order in the paths vec can hide overlapping
2141 threads (the path is recorded on the incoming edge, so we would miss
2142 cases where the second path starts at a downstream edge on the same
2143 path). First record all joiner paths, deleting any in the unexpected
2144 case where there is already a path for that incoming edge. */
2145 for (i
= 0; i
< paths
.length ();)
2147 vec
<jump_thread_edge
*> *path
= paths
[i
];
2149 if ((*path
)[1]->type
== EDGE_COPY_SRC_JOINER_BLOCK
)
2151 /* Attach the path to the starting edge if none is yet recorded. */
2152 if ((*path
)[0]->e
->aux
== NULL
)
2154 (*path
)[0]->e
->aux
= path
;
2159 paths
.unordered_remove (i
);
2160 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2161 dump_jump_thread_path (dump_file
, *path
, false);
2162 delete_jump_thread_path (path
);
2171 /* Second, look for paths that have any other jump thread attached to
2172 them, and either finish converting them or cancel them. */
2173 for (i
= 0; i
< paths
.length ();)
2175 vec
<jump_thread_edge
*> *path
= paths
[i
];
2176 edge e
= (*path
)[0]->e
;
2178 if ((*path
)[1]->type
== EDGE_COPY_SRC_JOINER_BLOCK
&& e
->aux
== path
)
2181 for (j
= 1; j
< path
->length (); j
++)
2182 if ((*path
)[j
]->e
->aux
!= NULL
)
2185 /* If we iterated through the entire path without exiting the loop,
2186 then we are good to go, record it. */
2187 if (j
== path
->length ())
2189 bitmap_set_bit (tmp
, e
->dest
->index
);
2195 paths
.unordered_remove (i
);
2196 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2197 dump_jump_thread_path (dump_file
, *path
, false);
2198 delete_jump_thread_path (path
);
2207 /* If optimizing for size, only thread through block if we don't have
2208 to duplicate it or it's an otherwise empty redirection block. */
2209 if (optimize_function_for_size_p (cfun
))
2211 EXECUTE_IF_SET_IN_BITMAP (tmp
, 0, i
, bi
)
2213 bb
= BASIC_BLOCK_FOR_FN (cfun
, i
);
2214 if (EDGE_COUNT (bb
->preds
) > 1
2215 && !redirection_block_p (bb
))
2217 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
2221 vec
<jump_thread_edge
*> *path
= THREAD_PATH (e
);
2222 delete_jump_thread_path (path
);
2228 bitmap_set_bit (threaded_blocks
, i
);
2232 bitmap_copy (threaded_blocks
, tmp
);
2234 /* Look for jump threading paths which cross multiple loop headers.
2236 The code to thread through loop headers will change the CFG in ways
2237 that break assumptions made by the loop optimization code.
2239 We don't want to blindly cancel the requests. We can instead do better
2240 by trimming off the end of the jump thread path. */
2241 EXECUTE_IF_SET_IN_BITMAP (tmp
, 0, i
, bi
)
2243 basic_block bb
= BASIC_BLOCK_FOR_FN (cfun
, i
);
2244 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
2248 vec
<jump_thread_edge
*> *path
= THREAD_PATH (e
);
2250 for (unsigned int i
= 0, crossed_headers
= 0;
2251 i
< path
->length ();
2254 basic_block dest
= (*path
)[i
]->e
->dest
;
2255 crossed_headers
+= (dest
== dest
->loop_father
->header
);
2256 if (crossed_headers
> 1)
2258 /* Trim from entry I onwards. */
2259 for (unsigned int j
= i
; j
< path
->length (); j
++)
2263 /* Now that we've truncated the path, make sure
2264 what's left is still valid. We need at least
2265 two edges on the path and the last edge can not
2266 be a joiner. This should never happen, but let's
2268 if (path
->length () < 2
2269 || (path
->last ()->type
2270 == EDGE_COPY_SRC_JOINER_BLOCK
))
2272 delete_jump_thread_path (path
);
2282 /* If we have a joiner block (J) which has two successors S1 and S2 and
2283 we are threading though S1 and the final destination of the thread
2284 is S2, then we must verify that any PHI nodes in S2 have the same
2285 PHI arguments for the edge J->S2 and J->S1->...->S2.
2287 We used to detect this prior to registering the jump thread, but
2288 that prohibits propagation of edge equivalences into non-dominated
2289 PHI nodes as the equivalency test might occur before propagation.
2291 This must also occur after we truncate any jump threading paths
2292 as this scenario may only show up after truncation.
2294 This works for now, but will need improvement as part of the FSA
2297 Note since we've moved the thread request data to the edges,
2298 we have to iterate on those rather than the threaded_edges vector. */
2299 EXECUTE_IF_SET_IN_BITMAP (tmp
, 0, i
, bi
)
2301 bb
= BASIC_BLOCK_FOR_FN (cfun
, i
);
2302 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
2306 vec
<jump_thread_edge
*> *path
= THREAD_PATH (e
);
2307 bool have_joiner
= ((*path
)[1]->type
== EDGE_COPY_SRC_JOINER_BLOCK
);
2311 basic_block joiner
= e
->dest
;
2312 edge final_edge
= path
->last ()->e
;
2313 basic_block final_dest
= final_edge
->dest
;
2314 edge e2
= find_edge (joiner
, final_dest
);
2316 if (e2
&& !phi_args_equal_on_edges (e2
, final_edge
))
2318 delete_jump_thread_path (path
);
2330 /* Return TRUE if BB ends with a switch statement or a computed goto.
2331 Otherwise return false. */
2333 bb_ends_with_multiway_branch (basic_block bb ATTRIBUTE_UNUSED
)
2335 gimple
*stmt
= last_stmt (bb
);
2336 if (stmt
&& gimple_code (stmt
) == GIMPLE_SWITCH
)
2338 if (stmt
&& gimple_code (stmt
) == GIMPLE_GOTO
2339 && TREE_CODE (gimple_goto_dest (stmt
)) == SSA_NAME
)
2344 /* Verify that the REGION is a valid jump thread. A jump thread is a special
2345 case of SEME Single Entry Multiple Exits region in which all nodes in the
2346 REGION have exactly one incoming edge. The only exception is the first block
2347 that may not have been connected to the rest of the cfg yet. */
2350 verify_jump_thread (basic_block
*region
, unsigned n_region
)
2352 for (unsigned i
= 0; i
< n_region
; i
++)
2353 gcc_assert (EDGE_COUNT (region
[i
]->preds
) <= 1);
2356 /* Return true when BB is one of the first N items in BBS. */
2359 bb_in_bbs (basic_block bb
, basic_block
*bbs
, int n
)
2361 for (int i
= 0; i
< n
; i
++)
2368 /* Duplicates a jump-thread path of N_REGION basic blocks.
2369 The ENTRY edge is redirected to the duplicate of the region.
2371 Remove the last conditional statement in the last basic block in the REGION,
2372 and create a single fallthru edge pointing to the same destination as the
2375 The new basic blocks are stored to REGION_COPY in the same order as they had
2376 in REGION, provided that REGION_COPY is not NULL.
2378 Returns false if it is unable to copy the region, true otherwise. */
2381 duplicate_thread_path (edge entry
, edge exit
,
2382 basic_block
*region
, unsigned n_region
,
2383 basic_block
*region_copy
)
2386 bool free_region_copy
= false;
2387 struct loop
*loop
= entry
->dest
->loop_father
;
2390 int total_freq
= 0, entry_freq
= 0;
2391 gcov_type total_count
= 0, entry_count
= 0;
2393 if (!can_copy_bbs_p (region
, n_region
))
2396 /* Some sanity checking. Note that we do not check for all possible
2397 missuses of the functions. I.e. if you ask to copy something weird,
2398 it will work, but the state of structures probably will not be
2400 for (i
= 0; i
< n_region
; i
++)
2402 /* We do not handle subloops, i.e. all the blocks must belong to the
2404 if (region
[i
]->loop_father
!= loop
)
2408 initialize_original_copy_tables ();
2410 set_loop_copy (loop
, loop
);
2414 region_copy
= XNEWVEC (basic_block
, n_region
);
2415 free_region_copy
= true;
2418 if (entry
->dest
->count
)
2420 total_count
= entry
->dest
->count
;
2421 entry_count
= entry
->count
;
2422 /* Fix up corner cases, to avoid division by zero or creation of negative
2424 if (entry_count
> total_count
)
2425 entry_count
= total_count
;
2429 total_freq
= entry
->dest
->frequency
;
2430 entry_freq
= EDGE_FREQUENCY (entry
);
2431 /* Fix up corner cases, to avoid division by zero or creation of negative
2433 if (total_freq
== 0)
2435 else if (entry_freq
> total_freq
)
2436 entry_freq
= total_freq
;
2439 copy_bbs (region
, n_region
, region_copy
, &exit
, 1, &exit_copy
, loop
,
2440 split_edge_bb_loc (entry
), false);
2442 /* Fix up: copy_bbs redirects all edges pointing to copied blocks. The
2443 following code ensures that all the edges exiting the jump-thread path are
2444 redirected back to the original code: these edges are exceptions
2445 invalidating the property that is propagated by executing all the blocks of
2446 the jump-thread path in order. */
2448 for (i
= 0; i
< n_region
; i
++)
2452 basic_block bb
= region_copy
[i
];
2454 if (single_succ_p (bb
))
2456 /* Make sure the successor is the next node in the path. */
2457 gcc_assert (i
+ 1 == n_region
2458 || region_copy
[i
+ 1] == single_succ_edge (bb
)->dest
);
2462 /* Special case the last block on the path: make sure that it does not
2463 jump back on the copied path. */
2464 if (i
+ 1 == n_region
)
2466 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
2467 if (bb_in_bbs (e
->dest
, region_copy
, n_region
- 1))
2469 basic_block orig
= get_bb_original (e
->dest
);
2471 redirect_edge_and_branch_force (e
, orig
);
2476 /* Redirect all other edges jumping to non-adjacent blocks back to the
2478 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
2479 if (region_copy
[i
+ 1] != e
->dest
)
2481 basic_block orig
= get_bb_original (e
->dest
);
2483 redirect_edge_and_branch_force (e
, orig
);
2489 scale_bbs_frequencies_gcov_type (region
, n_region
,
2490 total_count
- entry_count
,
2492 scale_bbs_frequencies_gcov_type (region_copy
, n_region
, entry_count
,
2497 scale_bbs_frequencies_int (region
, n_region
, total_freq
- entry_freq
,
2499 scale_bbs_frequencies_int (region_copy
, n_region
, entry_freq
, total_freq
);
2502 #ifdef ENABLE_CHECKING
2503 verify_jump_thread (region_copy
, n_region
);
2506 /* Remove the last branch in the jump thread path. */
2507 remove_ctrl_stmt_and_useless_edges (region_copy
[n_region
- 1], exit
->dest
);
2509 /* And fixup the flags on the single remaining edge. */
2510 edge fix_e
= find_edge (region_copy
[n_region
- 1], exit
->dest
);
2511 fix_e
->flags
&= ~(EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
| EDGE_ABNORMAL
);
2512 fix_e
->flags
|= EDGE_FALLTHRU
;
2514 edge e
= make_edge (region_copy
[n_region
- 1], exit
->dest
, EDGE_FALLTHRU
);
2517 rescan_loop_exit (e
, true, false);
2518 e
->probability
= REG_BR_PROB_BASE
;
2519 e
->count
= region_copy
[n_region
- 1]->count
;
2522 /* Redirect the entry and add the phi node arguments. */
2523 if (entry
->dest
== loop
->header
)
2524 mark_loop_for_removal (loop
);
2525 redirected
= redirect_edge_and_branch (entry
, get_bb_copy (entry
->dest
));
2526 gcc_assert (redirected
!= NULL
);
2527 flush_pending_stmts (entry
);
2529 /* Add the other PHI node arguments. */
2530 add_phi_args_after_copy (region_copy
, n_region
, NULL
);
2532 if (free_region_copy
)
2535 free_original_copy_tables ();
2539 /* Return true when PATH is a valid jump-thread path. */
2542 valid_jump_thread_path (vec
<jump_thread_edge
*> *path
)
2544 unsigned len
= path
->length ();
2546 /* Check that the path is connected. */
2547 for (unsigned int j
= 0; j
< len
- 1; j
++)
2548 if ((*path
)[j
]->e
->dest
!= (*path
)[j
+1]->e
->src
)
2554 /* Remove any queued jump threads that include edge E.
2556 We don't actually remove them here, just record the edges into ax
2557 hash table. That way we can do the search once per iteration of
2558 DOM/VRP rather than for every case where DOM optimizes away a COND_EXPR. */
2561 remove_jump_threads_including (edge_def
*e
)
2563 if (!paths
.exists ())
2567 removed_edges
= new hash_table
<struct removed_edges
> (17);
2569 edge
*slot
= removed_edges
->find_slot (e
, INSERT
);
2573 /* Walk through all blocks and thread incoming edges to the appropriate
2574 outgoing edge for each edge pair recorded in THREADED_EDGES.
2576 It is the caller's responsibility to fix the dominance information
2577 and rewrite duplicated SSA_NAMEs back into SSA form.
2579 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading edges through
2580 loop headers if it does not simplify the loop.
2582 Returns true if one or more edges were threaded, false otherwise. */
2585 thread_through_all_blocks (bool may_peel_loop_headers
)
2587 bool retval
= false;
2590 bitmap threaded_blocks
;
2593 if (!paths
.exists ())
2599 threaded_blocks
= BITMAP_ALLOC (NULL
);
2600 memset (&thread_stats
, 0, sizeof (thread_stats
));
2602 /* Remove any paths that referenced removed edges. */
2604 for (i
= 0; i
< paths
.length (); )
2607 vec
<jump_thread_edge
*> *path
= paths
[i
];
2609 for (j
= 0; j
< path
->length (); j
++)
2611 edge e
= (*path
)[j
]->e
;
2612 if (removed_edges
->find_slot (e
, NO_INSERT
))
2616 if (j
!= path
->length ())
2618 delete_jump_thread_path (path
);
2619 paths
.unordered_remove (i
);
2625 /* Jump-thread all FSM threads before other jump-threads. */
2626 for (i
= 0; i
< paths
.length ();)
2628 vec
<jump_thread_edge
*> *path
= paths
[i
];
2629 edge entry
= (*path
)[0]->e
;
2631 /* Only code-generate FSM jump-threads in this loop. */
2632 if ((*path
)[0]->type
!= EDGE_FSM_THREAD
)
2638 /* Do not jump-thread twice from the same block. */
2639 if (bitmap_bit_p (threaded_blocks
, entry
->src
->index
)
2640 /* Verify that the jump thread path is still valid: a
2641 previous jump-thread may have changed the CFG, and
2642 invalidated the current path. */
2643 || !valid_jump_thread_path (path
))
2645 /* Remove invalid FSM jump-thread paths. */
2646 delete_jump_thread_path (path
);
2647 paths
.unordered_remove (i
);
2651 unsigned len
= path
->length ();
2652 edge exit
= (*path
)[len
- 1]->e
;
2653 basic_block
*region
= XNEWVEC (basic_block
, len
- 1);
2655 for (unsigned int j
= 0; j
< len
- 1; j
++)
2656 region
[j
] = (*path
)[j
]->e
->dest
;
2658 if (duplicate_thread_path (entry
, exit
, region
, len
- 1, NULL
))
2660 /* We do not update dominance info. */
2661 free_dominance_info (CDI_DOMINATORS
);
2662 bitmap_set_bit (threaded_blocks
, entry
->src
->index
);
2666 delete_jump_thread_path (path
);
2667 paths
.unordered_remove (i
);
2670 /* Remove from PATHS all the jump-threads starting with an edge already
2672 for (i
= 0; i
< paths
.length ();)
2674 vec
<jump_thread_edge
*> *path
= paths
[i
];
2675 edge entry
= (*path
)[0]->e
;
2677 /* Do not jump-thread twice from the same block. */
2678 if (bitmap_bit_p (threaded_blocks
, entry
->src
->index
))
2680 delete_jump_thread_path (path
);
2681 paths
.unordered_remove (i
);
2687 bitmap_clear (threaded_blocks
);
2689 mark_threaded_blocks (threaded_blocks
);
2691 initialize_original_copy_tables ();
2693 /* First perform the threading requests that do not affect
2695 EXECUTE_IF_SET_IN_BITMAP (threaded_blocks
, 0, i
, bi
)
2697 basic_block bb
= BASIC_BLOCK_FOR_FN (cfun
, i
);
2699 if (EDGE_COUNT (bb
->preds
) > 0)
2700 retval
|= thread_block (bb
, true);
2703 /* Then perform the threading through loop headers. We start with the
2704 innermost loop, so that the changes in cfg we perform won't affect
2705 further threading. */
2706 FOR_EACH_LOOP (loop
, LI_FROM_INNERMOST
)
2709 || !bitmap_bit_p (threaded_blocks
, loop
->header
->index
))
2712 retval
|= thread_through_loop_header (loop
, may_peel_loop_headers
);
2715 /* Any jump threading paths that are still attached to edges at this
2716 point must be one of two cases.
2718 First, we could have a jump threading path which went from outside
2719 a loop to inside a loop that was ignored because a prior jump thread
2720 across a backedge was realized (which indirectly causes the loop
2721 above to ignore the latter thread). We can detect these because the
2722 loop structures will be different and we do not currently try to
2725 Second, we could be threading across a backedge to a point within the
2726 same loop. This occurrs for the FSA/FSM optimization and we would
2727 like to optimize it. However, we have to be very careful as this
2728 may completely scramble the loop structures, with the result being
2729 irreducible loops causing us to throw away our loop structure.
2731 As a compromise for the latter case, if the thread path ends in
2732 a block where the last statement is a multiway branch, then go
2733 ahead and thread it, else ignore it. */
2736 FOR_EACH_BB_FN (bb
, cfun
)
2738 /* If we do end up threading here, we can remove elements from
2739 BB->preds. Thus we can not use the FOR_EACH_EDGE iterator. */
2740 for (edge_iterator ei
= ei_start (bb
->preds
);
2741 (e
= ei_safe_edge (ei
));)
2744 vec
<jump_thread_edge
*> *path
= THREAD_PATH (e
);
2746 /* Case 1, threading from outside to inside the loop
2747 after we'd already threaded through the header. */
2748 if ((*path
)[0]->e
->dest
->loop_father
2749 != path
->last ()->e
->src
->loop_father
)
2751 delete_jump_thread_path (path
);
2755 else if (bb_ends_with_multiway_branch (path
->last ()->e
->src
))
2757 /* The code to thread through loop headers may have
2758 split a block with jump threads attached to it.
2760 We can identify this with a disjoint jump threading
2761 path. If found, just remove it. */
2762 for (unsigned int i
= 0; i
< path
->length () - 1; i
++)
2763 if ((*path
)[i
]->e
->dest
!= (*path
)[i
+ 1]->e
->src
)
2765 delete_jump_thread_path (path
);
2771 /* Our path is still valid, thread it. */
2774 if (thread_block ((*path
)[0]->e
->dest
, false))
2778 delete_jump_thread_path (path
);
2786 delete_jump_thread_path (path
);
2795 statistics_counter_event (cfun
, "Jumps threaded",
2796 thread_stats
.num_threaded_edges
);
2798 free_original_copy_tables ();
2800 BITMAP_FREE (threaded_blocks
);
2801 threaded_blocks
= NULL
;
2805 loops_state_set (LOOPS_NEED_FIXUP
);
2808 delete removed_edges
;
2809 removed_edges
= NULL
;
2813 /* Delete the jump threading path PATH. We have to explcitly delete
2814 each entry in the vector, then the container. */
2817 delete_jump_thread_path (vec
<jump_thread_edge
*> *path
)
2819 for (unsigned int i
= 0; i
< path
->length (); i
++)
2825 /* Register a jump threading opportunity. We queue up all the jump
2826 threading opportunities discovered by a pass and update the CFG
2827 and SSA form all at once.
2829 E is the edge we can thread, E2 is the new target edge, i.e., we
2830 are effectively recording that E->dest can be changed to E2->dest
2831 after fixing the SSA graph. */
2834 register_jump_thread (vec
<jump_thread_edge
*> *path
)
2836 if (!dbg_cnt (registered_jump_thread
))
2838 delete_jump_thread_path (path
);
2842 /* First make sure there are no NULL outgoing edges on the jump threading
2843 path. That can happen for jumping to a constant address. */
2844 for (unsigned int i
= 0; i
< path
->length (); i
++)
2845 if ((*path
)[i
]->e
== NULL
)
2847 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2850 "Found NULL edge in jump threading path. Cancelling jump thread:\n");
2851 dump_jump_thread_path (dump_file
, *path
, false);
2854 delete_jump_thread_path (path
);
2858 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2859 dump_jump_thread_path (dump_file
, *path
, true);
2861 if (!paths
.exists ())
2864 paths
.safe_push (path
);