]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-threadupdate.c
[PATCH][PR tree-optimization/67816] Fix jump threading when DOM removes conditionals...
[thirdparty/gcc.git] / gcc / tree-ssa-threadupdate.c
1 /* Thread edges through blocks and update the control flow and SSA graphs.
2 Copyright (C) 2004-2015 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "alias.h"
24 #include "backend.h"
25 #include "cfghooks.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "hard-reg-set.h"
29 #include "ssa.h"
30 #include "options.h"
31 #include "fold-const.h"
32 #include "flags.h"
33 #include "cfganal.h"
34 #include "internal-fn.h"
35 #include "gimple-iterator.h"
36 #include "tree-ssa.h"
37 #include "tree-ssa-threadupdate.h"
38 #include "dumpfile.h"
39 #include "cfgloop.h"
40 #include "dbgcnt.h"
41 #include "tree-cfg.h"
42 #include "tree-pass.h"
43
44 /* Given a block B, update the CFG and SSA graph to reflect redirecting
45 one or more in-edges to B to instead reach the destination of an
46 out-edge from B while preserving any side effects in B.
47
48 i.e., given A->B and B->C, change A->B to be A->C yet still preserve the
49 side effects of executing B.
50
51 1. Make a copy of B (including its outgoing edges and statements). Call
52 the copy B'. Note B' has no incoming edges or PHIs at this time.
53
54 2. Remove the control statement at the end of B' and all outgoing edges
55 except B'->C.
56
57 3. Add a new argument to each PHI in C with the same value as the existing
58 argument associated with edge B->C. Associate the new PHI arguments
59 with the edge B'->C.
60
61 4. For each PHI in B, find or create a PHI in B' with an identical
62 PHI_RESULT. Add an argument to the PHI in B' which has the same
63 value as the PHI in B associated with the edge A->B. Associate
64 the new argument in the PHI in B' with the edge A->B.
65
66 5. Change the edge A->B to A->B'.
67
68 5a. This automatically deletes any PHI arguments associated with the
69 edge A->B in B.
70
71 5b. This automatically associates each new argument added in step 4
72 with the edge A->B'.
73
74 6. Repeat for other incoming edges into B.
75
76 7. Put the duplicated resources in B and all the B' blocks into SSA form.
77
78 Note that block duplication can be minimized by first collecting the
79 set of unique destination blocks that the incoming edges should
80 be threaded to.
81
82 We reduce the number of edges and statements we create by not copying all
83 the outgoing edges and the control statement in step #1. We instead create
84 a template block without the outgoing edges and duplicate the template.
85
86 Another case this code handles is threading through a "joiner" block. In
87 this case, we do not know the destination of the joiner block, but one
88 of the outgoing edges from the joiner block leads to a threadable path. This
89 case largely works as outlined above, except the duplicate of the joiner
90 block still contains a full set of outgoing edges and its control statement.
91 We just redirect one of its outgoing edges to our jump threading path. */
92
93
94 /* Steps #5 and #6 of the above algorithm are best implemented by walking
95 all the incoming edges which thread to the same destination edge at
96 the same time. That avoids lots of table lookups to get information
97 for the destination edge.
98
99 To realize that implementation we create a list of incoming edges
100 which thread to the same outgoing edge. Thus to implement steps
101 #5 and #6 we traverse our hash table of outgoing edge information.
102 For each entry we walk the list of incoming edges which thread to
103 the current outgoing edge. */
104
105 struct el
106 {
107 edge e;
108 struct el *next;
109 };
110
111 /* Main data structure recording information regarding B's duplicate
112 blocks. */
113
114 /* We need to efficiently record the unique thread destinations of this
115 block and specific information associated with those destinations. We
116 may have many incoming edges threaded to the same outgoing edge. This
117 can be naturally implemented with a hash table. */
118
119 struct redirection_data : free_ptr_hash<redirection_data>
120 {
121 /* We support wiring up two block duplicates in a jump threading path.
122
123 One is a normal block copy where we remove the control statement
124 and wire up its single remaining outgoing edge to the thread path.
125
126 The other is a joiner block where we leave the control statement
127 in place, but wire one of the outgoing edges to a thread path.
128
129 In theory we could have multiple block duplicates in a jump
130 threading path, but I haven't tried that.
131
132 The duplicate blocks appear in this array in the same order in
133 which they appear in the jump thread path. */
134 basic_block dup_blocks[2];
135
136 /* The jump threading path. */
137 vec<jump_thread_edge *> *path;
138
139 /* A list of incoming edges which we want to thread to the
140 same path. */
141 struct el *incoming_edges;
142
143 /* hash_table support. */
144 static inline hashval_t hash (const redirection_data *);
145 static inline int equal (const redirection_data *, const redirection_data *);
146 };
147
148 /* Dump a jump threading path, including annotations about each
149 edge in the path. */
150
151 static void
152 dump_jump_thread_path (FILE *dump_file, vec<jump_thread_edge *> path,
153 bool registering)
154 {
155 fprintf (dump_file,
156 " %s%s jump thread: (%d, %d) incoming edge; ",
157 (registering ? "Registering" : "Cancelling"),
158 (path[0]->type == EDGE_FSM_THREAD ? " FSM": ""),
159 path[0]->e->src->index, path[0]->e->dest->index);
160
161 for (unsigned int i = 1; i < path.length (); i++)
162 {
163 /* We can get paths with a NULL edge when the final destination
164 of a jump thread turns out to be a constant address. We dump
165 those paths when debugging, so we have to be prepared for that
166 possibility here. */
167 if (path[i]->e == NULL)
168 continue;
169
170 if (path[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
171 fprintf (dump_file, " (%d, %d) joiner; ",
172 path[i]->e->src->index, path[i]->e->dest->index);
173 if (path[i]->type == EDGE_COPY_SRC_BLOCK)
174 fprintf (dump_file, " (%d, %d) normal;",
175 path[i]->e->src->index, path[i]->e->dest->index);
176 if (path[i]->type == EDGE_NO_COPY_SRC_BLOCK)
177 fprintf (dump_file, " (%d, %d) nocopy;",
178 path[i]->e->src->index, path[i]->e->dest->index);
179 if (path[0]->type == EDGE_FSM_THREAD)
180 fprintf (dump_file, " (%d, %d) ",
181 path[i]->e->src->index, path[i]->e->dest->index);
182 }
183 fputc ('\n', dump_file);
184 }
185
186 /* Simple hashing function. For any given incoming edge E, we're going
187 to be most concerned with the final destination of its jump thread
188 path. So hash on the block index of the final edge in the path. */
189
190 inline hashval_t
191 redirection_data::hash (const redirection_data *p)
192 {
193 vec<jump_thread_edge *> *path = p->path;
194 return path->last ()->e->dest->index;
195 }
196
197 /* Given two hash table entries, return true if they have the same
198 jump threading path. */
199 inline int
200 redirection_data::equal (const redirection_data *p1, const redirection_data *p2)
201 {
202 vec<jump_thread_edge *> *path1 = p1->path;
203 vec<jump_thread_edge *> *path2 = p2->path;
204
205 if (path1->length () != path2->length ())
206 return false;
207
208 for (unsigned int i = 1; i < path1->length (); i++)
209 {
210 if ((*path1)[i]->type != (*path2)[i]->type
211 || (*path1)[i]->e != (*path2)[i]->e)
212 return false;
213 }
214
215 return true;
216 }
217
218 /* Rather than search all the edges in jump thread paths each time
219 DOM is able to simply if control statement, we build a hash table
220 with the deleted edges. We only care about the address of the edge,
221 not its contents. */
222 struct removed_edges : nofree_ptr_hash<edge_def>
223 {
224 static hashval_t hash (edge e) { return htab_hash_pointer (e); }
225 static bool equal (edge e1, edge e2) { return e1 == e2; }
226 };
227
228 static hash_table<removed_edges> *removed_edges;
229
230 /* Data structure of information to pass to hash table traversal routines. */
231 struct ssa_local_info_t
232 {
233 /* The current block we are working on. */
234 basic_block bb;
235
236 /* We only create a template block for the first duplicated block in a
237 jump threading path as we may need many duplicates of that block.
238
239 The second duplicate block in a path is specific to that path. Creating
240 and sharing a template for that block is considerably more difficult. */
241 basic_block template_block;
242
243 /* TRUE if we thread one or more jumps, FALSE otherwise. */
244 bool jumps_threaded;
245
246 /* Blocks duplicated for the thread. */
247 bitmap duplicate_blocks;
248 };
249
250 /* Passes which use the jump threading code register jump threading
251 opportunities as they are discovered. We keep the registered
252 jump threading opportunities in this vector as edge pairs
253 (original_edge, target_edge). */
254 static vec<vec<jump_thread_edge *> *> paths;
255
256 /* When we start updating the CFG for threading, data necessary for jump
257 threading is attached to the AUX field for the incoming edge. Use these
258 macros to access the underlying structure attached to the AUX field. */
259 #define THREAD_PATH(E) ((vec<jump_thread_edge *> *)(E)->aux)
260
261 /* Jump threading statistics. */
262
263 struct thread_stats_d
264 {
265 unsigned long num_threaded_edges;
266 };
267
268 struct thread_stats_d thread_stats;
269
270
271 /* Remove the last statement in block BB if it is a control statement
272 Also remove all outgoing edges except the edge which reaches DEST_BB.
273 If DEST_BB is NULL, then remove all outgoing edges. */
274
275 void
276 remove_ctrl_stmt_and_useless_edges (basic_block bb, basic_block dest_bb)
277 {
278 gimple_stmt_iterator gsi;
279 edge e;
280 edge_iterator ei;
281
282 gsi = gsi_last_bb (bb);
283
284 /* If the duplicate ends with a control statement, then remove it.
285
286 Note that if we are duplicating the template block rather than the
287 original basic block, then the duplicate might not have any real
288 statements in it. */
289 if (!gsi_end_p (gsi)
290 && gsi_stmt (gsi)
291 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
292 || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
293 || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH))
294 gsi_remove (&gsi, true);
295
296 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
297 {
298 if (e->dest != dest_bb)
299 remove_edge (e);
300 else
301 ei_next (&ei);
302 }
303 }
304
305 /* Create a duplicate of BB. Record the duplicate block in an array
306 indexed by COUNT stored in RD. */
307
308 static void
309 create_block_for_threading (basic_block bb,
310 struct redirection_data *rd,
311 unsigned int count,
312 bitmap *duplicate_blocks)
313 {
314 edge_iterator ei;
315 edge e;
316
317 /* We can use the generic block duplication code and simply remove
318 the stuff we do not need. */
319 rd->dup_blocks[count] = duplicate_block (bb, NULL, NULL);
320
321 FOR_EACH_EDGE (e, ei, rd->dup_blocks[count]->succs)
322 e->aux = NULL;
323
324 /* Zero out the profile, since the block is unreachable for now. */
325 rd->dup_blocks[count]->frequency = 0;
326 rd->dup_blocks[count]->count = 0;
327 if (duplicate_blocks)
328 bitmap_set_bit (*duplicate_blocks, rd->dup_blocks[count]->index);
329 }
330
331 /* Main data structure to hold information for duplicates of BB. */
332
333 static hash_table<redirection_data> *redirection_data;
334
335 /* Given an outgoing edge E lookup and return its entry in our hash table.
336
337 If INSERT is true, then we insert the entry into the hash table if
338 it is not already present. INCOMING_EDGE is added to the list of incoming
339 edges associated with E in the hash table. */
340
341 static struct redirection_data *
342 lookup_redirection_data (edge e, enum insert_option insert)
343 {
344 struct redirection_data **slot;
345 struct redirection_data *elt;
346 vec<jump_thread_edge *> *path = THREAD_PATH (e);
347
348 /* Build a hash table element so we can see if E is already
349 in the table. */
350 elt = XNEW (struct redirection_data);
351 elt->path = path;
352 elt->dup_blocks[0] = NULL;
353 elt->dup_blocks[1] = NULL;
354 elt->incoming_edges = NULL;
355
356 slot = redirection_data->find_slot (elt, insert);
357
358 /* This will only happen if INSERT is false and the entry is not
359 in the hash table. */
360 if (slot == NULL)
361 {
362 free (elt);
363 return NULL;
364 }
365
366 /* This will only happen if E was not in the hash table and
367 INSERT is true. */
368 if (*slot == NULL)
369 {
370 *slot = elt;
371 elt->incoming_edges = XNEW (struct el);
372 elt->incoming_edges->e = e;
373 elt->incoming_edges->next = NULL;
374 return elt;
375 }
376 /* E was in the hash table. */
377 else
378 {
379 /* Free ELT as we do not need it anymore, we will extract the
380 relevant entry from the hash table itself. */
381 free (elt);
382
383 /* Get the entry stored in the hash table. */
384 elt = *slot;
385
386 /* If insertion was requested, then we need to add INCOMING_EDGE
387 to the list of incoming edges associated with E. */
388 if (insert)
389 {
390 struct el *el = XNEW (struct el);
391 el->next = elt->incoming_edges;
392 el->e = e;
393 elt->incoming_edges = el;
394 }
395
396 return elt;
397 }
398 }
399
400 /* Similar to copy_phi_args, except that the PHI arg exists, it just
401 does not have a value associated with it. */
402
403 static void
404 copy_phi_arg_into_existing_phi (edge src_e, edge tgt_e)
405 {
406 int src_idx = src_e->dest_idx;
407 int tgt_idx = tgt_e->dest_idx;
408
409 /* Iterate over each PHI in e->dest. */
410 for (gphi_iterator gsi = gsi_start_phis (src_e->dest),
411 gsi2 = gsi_start_phis (tgt_e->dest);
412 !gsi_end_p (gsi);
413 gsi_next (&gsi), gsi_next (&gsi2))
414 {
415 gphi *src_phi = gsi.phi ();
416 gphi *dest_phi = gsi2.phi ();
417 tree val = gimple_phi_arg_def (src_phi, src_idx);
418 source_location locus = gimple_phi_arg_location (src_phi, src_idx);
419
420 SET_PHI_ARG_DEF (dest_phi, tgt_idx, val);
421 gimple_phi_arg_set_location (dest_phi, tgt_idx, locus);
422 }
423 }
424
425 /* Given ssa_name DEF, backtrack jump threading PATH from node IDX
426 to see if it has constant value in a flow sensitive manner. Set
427 LOCUS to location of the constant phi arg and return the value.
428 Return DEF directly if either PATH or idx is ZERO. */
429
430 static tree
431 get_value_locus_in_path (tree def, vec<jump_thread_edge *> *path,
432 basic_block bb, int idx, source_location *locus)
433 {
434 tree arg;
435 gphi *def_phi;
436 basic_block def_bb;
437
438 if (path == NULL || idx == 0)
439 return def;
440
441 def_phi = dyn_cast <gphi *> (SSA_NAME_DEF_STMT (def));
442 if (!def_phi)
443 return def;
444
445 def_bb = gimple_bb (def_phi);
446 /* Don't propagate loop invariants into deeper loops. */
447 if (!def_bb || bb_loop_depth (def_bb) < bb_loop_depth (bb))
448 return def;
449
450 /* Backtrack jump threading path from IDX to see if def has constant
451 value. */
452 for (int j = idx - 1; j >= 0; j--)
453 {
454 edge e = (*path)[j]->e;
455 if (e->dest == def_bb)
456 {
457 arg = gimple_phi_arg_def (def_phi, e->dest_idx);
458 if (is_gimple_min_invariant (arg))
459 {
460 *locus = gimple_phi_arg_location (def_phi, e->dest_idx);
461 return arg;
462 }
463 break;
464 }
465 }
466
467 return def;
468 }
469
470 /* For each PHI in BB, copy the argument associated with SRC_E to TGT_E.
471 Try to backtrack jump threading PATH from node IDX to see if the arg
472 has constant value, copy constant value instead of argument itself
473 if yes. */
474
475 static void
476 copy_phi_args (basic_block bb, edge src_e, edge tgt_e,
477 vec<jump_thread_edge *> *path, int idx)
478 {
479 gphi_iterator gsi;
480 int src_indx = src_e->dest_idx;
481
482 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
483 {
484 gphi *phi = gsi.phi ();
485 tree def = gimple_phi_arg_def (phi, src_indx);
486 source_location locus = gimple_phi_arg_location (phi, src_indx);
487
488 if (TREE_CODE (def) == SSA_NAME
489 && !virtual_operand_p (gimple_phi_result (phi)))
490 def = get_value_locus_in_path (def, path, bb, idx, &locus);
491
492 add_phi_arg (phi, def, tgt_e, locus);
493 }
494 }
495
496 /* We have recently made a copy of ORIG_BB, including its outgoing
497 edges. The copy is NEW_BB. Every PHI node in every direct successor of
498 ORIG_BB has a new argument associated with edge from NEW_BB to the
499 successor. Initialize the PHI argument so that it is equal to the PHI
500 argument associated with the edge from ORIG_BB to the successor.
501 PATH and IDX are used to check if the new PHI argument has constant
502 value in a flow sensitive manner. */
503
504 static void
505 update_destination_phis (basic_block orig_bb, basic_block new_bb,
506 vec<jump_thread_edge *> *path, int idx)
507 {
508 edge_iterator ei;
509 edge e;
510
511 FOR_EACH_EDGE (e, ei, orig_bb->succs)
512 {
513 edge e2 = find_edge (new_bb, e->dest);
514 copy_phi_args (e->dest, e, e2, path, idx);
515 }
516 }
517
518 /* Given a duplicate block and its single destination (both stored
519 in RD). Create an edge between the duplicate and its single
520 destination.
521
522 Add an additional argument to any PHI nodes at the single
523 destination. IDX is the start node in jump threading path
524 we start to check to see if the new PHI argument has constant
525 value along the jump threading path. */
526
527 static void
528 create_edge_and_update_destination_phis (struct redirection_data *rd,
529 basic_block bb, int idx)
530 {
531 edge e = make_edge (bb, rd->path->last ()->e->dest, EDGE_FALLTHRU);
532
533 rescan_loop_exit (e, true, false);
534 e->probability = REG_BR_PROB_BASE;
535 e->count = bb->count;
536
537 /* We used to copy the thread path here. That was added in 2007
538 and dutifully updated through the representation changes in 2013.
539
540 In 2013 we added code to thread from an interior node through
541 the backedge to another interior node. That runs after the code
542 to thread through loop headers from outside the loop.
543
544 The latter may delete edges in the CFG, including those
545 which appeared in the jump threading path we copied here. Thus
546 we'd end up using a dangling pointer.
547
548 After reviewing the 2007/2011 code, I can't see how anything
549 depended on copying the AUX field and clearly copying the jump
550 threading path is problematical due to embedded edge pointers.
551 It has been removed. */
552 e->aux = NULL;
553
554 /* If there are any PHI nodes at the destination of the outgoing edge
555 from the duplicate block, then we will need to add a new argument
556 to them. The argument should have the same value as the argument
557 associated with the outgoing edge stored in RD. */
558 copy_phi_args (e->dest, rd->path->last ()->e, e, rd->path, idx);
559 }
560
561 /* Look through PATH beginning at START and return TRUE if there are
562 any additional blocks that need to be duplicated. Otherwise,
563 return FALSE. */
564 static bool
565 any_remaining_duplicated_blocks (vec<jump_thread_edge *> *path,
566 unsigned int start)
567 {
568 for (unsigned int i = start + 1; i < path->length (); i++)
569 {
570 if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK
571 || (*path)[i]->type == EDGE_COPY_SRC_BLOCK)
572 return true;
573 }
574 return false;
575 }
576
577
578 /* Compute the amount of profile count/frequency coming into the jump threading
579 path stored in RD that we are duplicating, returned in PATH_IN_COUNT_PTR and
580 PATH_IN_FREQ_PTR, as well as the amount of counts flowing out of the
581 duplicated path, returned in PATH_OUT_COUNT_PTR. LOCAL_INFO is used to
582 identify blocks duplicated for jump threading, which have duplicated
583 edges that need to be ignored in the analysis. Return true if path contains
584 a joiner, false otherwise.
585
586 In the non-joiner case, this is straightforward - all the counts/frequency
587 flowing into the jump threading path should flow through the duplicated
588 block and out of the duplicated path.
589
590 In the joiner case, it is very tricky. Some of the counts flowing into
591 the original path go offpath at the joiner. The problem is that while
592 we know how much total count goes off-path in the original control flow,
593 we don't know how many of the counts corresponding to just the jump
594 threading path go offpath at the joiner.
595
596 For example, assume we have the following control flow and identified
597 jump threading paths:
598
599 A B C
600 \ | /
601 Ea \ |Eb / Ec
602 \ | /
603 v v v
604 J <-- Joiner
605 / \
606 Eoff/ \Eon
607 / \
608 v v
609 Soff Son <--- Normal
610 /\
611 Ed/ \ Ee
612 / \
613 v v
614 D E
615
616 Jump threading paths: A -> J -> Son -> D (path 1)
617 C -> J -> Son -> E (path 2)
618
619 Note that the control flow could be more complicated:
620 - Each jump threading path may have more than one incoming edge. I.e. A and
621 Ea could represent multiple incoming blocks/edges that are included in
622 path 1.
623 - There could be EDGE_NO_COPY_SRC_BLOCK edges after the joiner (either
624 before or after the "normal" copy block). These are not duplicated onto
625 the jump threading path, as they are single-successor.
626 - Any of the blocks along the path may have other incoming edges that
627 are not part of any jump threading path, but add profile counts along
628 the path.
629
630 In the aboe example, after all jump threading is complete, we will
631 end up with the following control flow:
632
633 A B C
634 | | |
635 Ea| |Eb |Ec
636 | | |
637 v v v
638 Ja J Jc
639 / \ / \Eon' / \
640 Eona/ \ ---/---\-------- \Eonc
641 / \ / / \ \
642 v v v v v
643 Sona Soff Son Sonc
644 \ /\ /
645 \___________ / \ _____/
646 \ / \/
647 vv v
648 D E
649
650 The main issue to notice here is that when we are processing path 1
651 (A->J->Son->D) we need to figure out the outgoing edge weights to
652 the duplicated edges Ja->Sona and Ja->Soff, while ensuring that the
653 sum of the incoming weights to D remain Ed. The problem with simply
654 assuming that Ja (and Jc when processing path 2) has the same outgoing
655 probabilities to its successors as the original block J, is that after
656 all paths are processed and other edges/counts removed (e.g. none
657 of Ec will reach D after processing path 2), we may end up with not
658 enough count flowing along duplicated edge Sona->D.
659
660 Therefore, in the case of a joiner, we keep track of all counts
661 coming in along the current path, as well as from predecessors not
662 on any jump threading path (Eb in the above example). While we
663 first assume that the duplicated Eona for Ja->Sona has the same
664 probability as the original, we later compensate for other jump
665 threading paths that may eliminate edges. We do that by keep track
666 of all counts coming into the original path that are not in a jump
667 thread (Eb in the above example, but as noted earlier, there could
668 be other predecessors incoming to the path at various points, such
669 as at Son). Call this cumulative non-path count coming into the path
670 before D as Enonpath. We then ensure that the count from Sona->D is as at
671 least as big as (Ed - Enonpath), but no bigger than the minimum
672 weight along the jump threading path. The probabilities of both the
673 original and duplicated joiner block J and Ja will be adjusted
674 accordingly after the updates. */
675
676 static bool
677 compute_path_counts (struct redirection_data *rd,
678 ssa_local_info_t *local_info,
679 gcov_type *path_in_count_ptr,
680 gcov_type *path_out_count_ptr,
681 int *path_in_freq_ptr)
682 {
683 edge e = rd->incoming_edges->e;
684 vec<jump_thread_edge *> *path = THREAD_PATH (e);
685 edge elast = path->last ()->e;
686 gcov_type nonpath_count = 0;
687 bool has_joiner = false;
688 gcov_type path_in_count = 0;
689 int path_in_freq = 0;
690
691 /* Start by accumulating incoming edge counts to the path's first bb
692 into a couple buckets:
693 path_in_count: total count of incoming edges that flow into the
694 current path.
695 nonpath_count: total count of incoming edges that are not
696 flowing along *any* path. These are the counts
697 that will still flow along the original path after
698 all path duplication is done by potentially multiple
699 calls to this routine.
700 (any other incoming edge counts are for a different jump threading
701 path that will be handled by a later call to this routine.)
702 To make this easier, start by recording all incoming edges that flow into
703 the current path in a bitmap. We could add up the path's incoming edge
704 counts here, but we still need to walk all the first bb's incoming edges
705 below to add up the counts of the other edges not included in this jump
706 threading path. */
707 struct el *next, *el;
708 bitmap in_edge_srcs = BITMAP_ALLOC (NULL);
709 for (el = rd->incoming_edges; el; el = next)
710 {
711 next = el->next;
712 bitmap_set_bit (in_edge_srcs, el->e->src->index);
713 }
714 edge ein;
715 edge_iterator ei;
716 FOR_EACH_EDGE (ein, ei, e->dest->preds)
717 {
718 vec<jump_thread_edge *> *ein_path = THREAD_PATH (ein);
719 /* Simply check the incoming edge src against the set captured above. */
720 if (ein_path
721 && bitmap_bit_p (in_edge_srcs, (*ein_path)[0]->e->src->index))
722 {
723 /* It is necessary but not sufficient that the last path edges
724 are identical. There may be different paths that share the
725 same last path edge in the case where the last edge has a nocopy
726 source block. */
727 gcc_assert (ein_path->last ()->e == elast);
728 path_in_count += ein->count;
729 path_in_freq += EDGE_FREQUENCY (ein);
730 }
731 else if (!ein_path)
732 {
733 /* Keep track of the incoming edges that are not on any jump-threading
734 path. These counts will still flow out of original path after all
735 jump threading is complete. */
736 nonpath_count += ein->count;
737 }
738 }
739
740 /* This is needed due to insane incoming frequencies. */
741 if (path_in_freq > BB_FREQ_MAX)
742 path_in_freq = BB_FREQ_MAX;
743
744 BITMAP_FREE (in_edge_srcs);
745
746 /* Now compute the fraction of the total count coming into the first
747 path bb that is from the current threading path. */
748 gcov_type total_count = e->dest->count;
749 /* Handle incoming profile insanities. */
750 if (total_count < path_in_count)
751 path_in_count = total_count;
752 int onpath_scale = GCOV_COMPUTE_SCALE (path_in_count, total_count);
753
754 /* Walk the entire path to do some more computation in order to estimate
755 how much of the path_in_count will flow out of the duplicated threading
756 path. In the non-joiner case this is straightforward (it should be
757 the same as path_in_count, although we will handle incoming profile
758 insanities by setting it equal to the minimum count along the path).
759
760 In the joiner case, we need to estimate how much of the path_in_count
761 will stay on the threading path after the joiner's conditional branch.
762 We don't really know for sure how much of the counts
763 associated with this path go to each successor of the joiner, but we'll
764 estimate based on the fraction of the total count coming into the path
765 bb was from the threading paths (computed above in onpath_scale).
766 Afterwards, we will need to do some fixup to account for other threading
767 paths and possible profile insanities.
768
769 In order to estimate the joiner case's counts we also need to update
770 nonpath_count with any additional counts coming into the path. Other
771 blocks along the path may have additional predecessors from outside
772 the path. */
773 gcov_type path_out_count = path_in_count;
774 gcov_type min_path_count = path_in_count;
775 for (unsigned int i = 1; i < path->length (); i++)
776 {
777 edge epath = (*path)[i]->e;
778 gcov_type cur_count = epath->count;
779 if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
780 {
781 has_joiner = true;
782 cur_count = apply_probability (cur_count, onpath_scale);
783 }
784 /* In the joiner case we need to update nonpath_count for any edges
785 coming into the path that will contribute to the count flowing
786 into the path successor. */
787 if (has_joiner && epath != elast)
788 {
789 /* Look for other incoming edges after joiner. */
790 FOR_EACH_EDGE (ein, ei, epath->dest->preds)
791 {
792 if (ein != epath
793 /* Ignore in edges from blocks we have duplicated for a
794 threading path, which have duplicated edge counts until
795 they are redirected by an invocation of this routine. */
796 && !bitmap_bit_p (local_info->duplicate_blocks,
797 ein->src->index))
798 nonpath_count += ein->count;
799 }
800 }
801 if (cur_count < path_out_count)
802 path_out_count = cur_count;
803 if (epath->count < min_path_count)
804 min_path_count = epath->count;
805 }
806
807 /* We computed path_out_count above assuming that this path targeted
808 the joiner's on-path successor with the same likelihood as it
809 reached the joiner. However, other thread paths through the joiner
810 may take a different path through the normal copy source block
811 (i.e. they have a different elast), meaning that they do not
812 contribute any counts to this path's elast. As a result, it may
813 turn out that this path must have more count flowing to the on-path
814 successor of the joiner. Essentially, all of this path's elast
815 count must be contributed by this path and any nonpath counts
816 (since any path through the joiner with a different elast will not
817 include a copy of this elast in its duplicated path).
818 So ensure that this path's path_out_count is at least the
819 difference between elast->count and nonpath_count. Otherwise the edge
820 counts after threading will not be sane. */
821 if (has_joiner && path_out_count < elast->count - nonpath_count)
822 {
823 path_out_count = elast->count - nonpath_count;
824 /* But neither can we go above the minimum count along the path
825 we are duplicating. This can be an issue due to profile
826 insanities coming in to this pass. */
827 if (path_out_count > min_path_count)
828 path_out_count = min_path_count;
829 }
830
831 *path_in_count_ptr = path_in_count;
832 *path_out_count_ptr = path_out_count;
833 *path_in_freq_ptr = path_in_freq;
834 return has_joiner;
835 }
836
837
838 /* Update the counts and frequencies for both an original path
839 edge EPATH and its duplicate EDUP. The duplicate source block
840 will get a count/frequency of PATH_IN_COUNT and PATH_IN_FREQ,
841 and the duplicate edge EDUP will have a count of PATH_OUT_COUNT. */
842 static void
843 update_profile (edge epath, edge edup, gcov_type path_in_count,
844 gcov_type path_out_count, int path_in_freq)
845 {
846
847 /* First update the duplicated block's count / frequency. */
848 if (edup)
849 {
850 basic_block dup_block = edup->src;
851 gcc_assert (dup_block->count == 0);
852 gcc_assert (dup_block->frequency == 0);
853 dup_block->count = path_in_count;
854 dup_block->frequency = path_in_freq;
855 }
856
857 /* Now update the original block's count and frequency in the
858 opposite manner - remove the counts/freq that will flow
859 into the duplicated block. Handle underflow due to precision/
860 rounding issues. */
861 epath->src->count -= path_in_count;
862 if (epath->src->count < 0)
863 epath->src->count = 0;
864 epath->src->frequency -= path_in_freq;
865 if (epath->src->frequency < 0)
866 epath->src->frequency = 0;
867
868 /* Next update this path edge's original and duplicated counts. We know
869 that the duplicated path will have path_out_count flowing
870 out of it (in the joiner case this is the count along the duplicated path
871 out of the duplicated joiner). This count can then be removed from the
872 original path edge. */
873 if (edup)
874 edup->count = path_out_count;
875 epath->count -= path_out_count;
876 gcc_assert (epath->count >= 0);
877 }
878
879
880 /* The duplicate and original joiner blocks may end up with different
881 probabilities (different from both the original and from each other).
882 Recompute the probabilities here once we have updated the edge
883 counts and frequencies. */
884
885 static void
886 recompute_probabilities (basic_block bb)
887 {
888 edge esucc;
889 edge_iterator ei;
890 FOR_EACH_EDGE (esucc, ei, bb->succs)
891 {
892 if (!bb->count)
893 continue;
894
895 /* Prevent overflow computation due to insane profiles. */
896 if (esucc->count < bb->count)
897 esucc->probability = GCOV_COMPUTE_SCALE (esucc->count,
898 bb->count);
899 else
900 /* Can happen with missing/guessed probabilities, since we
901 may determine that more is flowing along duplicated
902 path than joiner succ probabilities allowed.
903 Counts and freqs will be insane after jump threading,
904 at least make sure probability is sane or we will
905 get a flow verification error.
906 Not much we can do to make counts/freqs sane without
907 redoing the profile estimation. */
908 esucc->probability = REG_BR_PROB_BASE;
909 }
910 }
911
912
913 /* Update the counts of the original and duplicated edges from a joiner
914 that go off path, given that we have already determined that the
915 duplicate joiner DUP_BB has incoming count PATH_IN_COUNT and
916 outgoing count along the path PATH_OUT_COUNT. The original (on-)path
917 edge from joiner is EPATH. */
918
919 static void
920 update_joiner_offpath_counts (edge epath, basic_block dup_bb,
921 gcov_type path_in_count,
922 gcov_type path_out_count)
923 {
924 /* Compute the count that currently flows off path from the joiner.
925 In other words, the total count of joiner's out edges other than
926 epath. Compute this by walking the successors instead of
927 subtracting epath's count from the joiner bb count, since there
928 are sometimes slight insanities where the total out edge count is
929 larger than the bb count (possibly due to rounding/truncation
930 errors). */
931 gcov_type total_orig_off_path_count = 0;
932 edge enonpath;
933 edge_iterator ei;
934 FOR_EACH_EDGE (enonpath, ei, epath->src->succs)
935 {
936 if (enonpath == epath)
937 continue;
938 total_orig_off_path_count += enonpath->count;
939 }
940
941 /* For the path that we are duplicating, the amount that will flow
942 off path from the duplicated joiner is the delta between the
943 path's cumulative in count and the portion of that count we
944 estimated above as flowing from the joiner along the duplicated
945 path. */
946 gcov_type total_dup_off_path_count = path_in_count - path_out_count;
947
948 /* Now do the actual updates of the off-path edges. */
949 FOR_EACH_EDGE (enonpath, ei, epath->src->succs)
950 {
951 /* Look for edges going off of the threading path. */
952 if (enonpath == epath)
953 continue;
954
955 /* Find the corresponding edge out of the duplicated joiner. */
956 edge enonpathdup = find_edge (dup_bb, enonpath->dest);
957 gcc_assert (enonpathdup);
958
959 /* We can't use the original probability of the joiner's out
960 edges, since the probabilities of the original branch
961 and the duplicated branches may vary after all threading is
962 complete. But apportion the duplicated joiner's off-path
963 total edge count computed earlier (total_dup_off_path_count)
964 among the duplicated off-path edges based on their original
965 ratio to the full off-path count (total_orig_off_path_count).
966 */
967 int scale = GCOV_COMPUTE_SCALE (enonpath->count,
968 total_orig_off_path_count);
969 /* Give the duplicated offpath edge a portion of the duplicated
970 total. */
971 enonpathdup->count = apply_scale (scale,
972 total_dup_off_path_count);
973 /* Now update the original offpath edge count, handling underflow
974 due to rounding errors. */
975 enonpath->count -= enonpathdup->count;
976 if (enonpath->count < 0)
977 enonpath->count = 0;
978 }
979 }
980
981
982 /* Check if the paths through RD all have estimated frequencies but zero
983 profile counts. This is more accurate than checking the entry block
984 for a zero profile count, since profile insanities sometimes creep in. */
985
986 static bool
987 estimated_freqs_path (struct redirection_data *rd)
988 {
989 edge e = rd->incoming_edges->e;
990 vec<jump_thread_edge *> *path = THREAD_PATH (e);
991 edge ein;
992 edge_iterator ei;
993 bool non_zero_freq = false;
994 FOR_EACH_EDGE (ein, ei, e->dest->preds)
995 {
996 if (ein->count)
997 return false;
998 non_zero_freq |= ein->src->frequency != 0;
999 }
1000
1001 for (unsigned int i = 1; i < path->length (); i++)
1002 {
1003 edge epath = (*path)[i]->e;
1004 if (epath->src->count)
1005 return false;
1006 non_zero_freq |= epath->src->frequency != 0;
1007 edge esucc;
1008 FOR_EACH_EDGE (esucc, ei, epath->src->succs)
1009 {
1010 if (esucc->count)
1011 return false;
1012 non_zero_freq |= esucc->src->frequency != 0;
1013 }
1014 }
1015 return non_zero_freq;
1016 }
1017
1018
1019 /* Invoked for routines that have guessed frequencies and no profile
1020 counts to record the block and edge frequencies for paths through RD
1021 in the profile count fields of those blocks and edges. This is because
1022 ssa_fix_duplicate_block_edges incrementally updates the block and
1023 edge counts as edges are redirected, and it is difficult to do that
1024 for edge frequencies which are computed on the fly from the source
1025 block frequency and probability. When a block frequency is updated
1026 its outgoing edge frequencies are affected and become difficult to
1027 adjust. */
1028
1029 static void
1030 freqs_to_counts_path (struct redirection_data *rd)
1031 {
1032 edge e = rd->incoming_edges->e;
1033 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1034 edge ein;
1035 edge_iterator ei;
1036 FOR_EACH_EDGE (ein, ei, e->dest->preds)
1037 {
1038 /* Scale up the frequency by REG_BR_PROB_BASE, to avoid rounding
1039 errors applying the probability when the frequencies are very
1040 small. */
1041 ein->count = apply_probability (ein->src->frequency * REG_BR_PROB_BASE,
1042 ein->probability);
1043 }
1044
1045 for (unsigned int i = 1; i < path->length (); i++)
1046 {
1047 edge epath = (*path)[i]->e;
1048 edge esucc;
1049 /* Scale up the frequency by REG_BR_PROB_BASE, to avoid rounding
1050 errors applying the edge probability when the frequencies are very
1051 small. */
1052 epath->src->count = epath->src->frequency * REG_BR_PROB_BASE;
1053 FOR_EACH_EDGE (esucc, ei, epath->src->succs)
1054 esucc->count = apply_probability (esucc->src->count,
1055 esucc->probability);
1056 }
1057 }
1058
1059
1060 /* For routines that have guessed frequencies and no profile counts, where we
1061 used freqs_to_counts_path to record block and edge frequencies for paths
1062 through RD, we clear the counts after completing all updates for RD.
1063 The updates in ssa_fix_duplicate_block_edges are based off the count fields,
1064 but the block frequencies and edge probabilities were updated as well,
1065 so we can simply clear the count fields. */
1066
1067 static void
1068 clear_counts_path (struct redirection_data *rd)
1069 {
1070 edge e = rd->incoming_edges->e;
1071 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1072 edge ein, esucc;
1073 edge_iterator ei;
1074 FOR_EACH_EDGE (ein, ei, e->dest->preds)
1075 ein->count = 0;
1076
1077 /* First clear counts along original path. */
1078 for (unsigned int i = 1; i < path->length (); i++)
1079 {
1080 edge epath = (*path)[i]->e;
1081 FOR_EACH_EDGE (esucc, ei, epath->src->succs)
1082 esucc->count = 0;
1083 epath->src->count = 0;
1084 }
1085 /* Also need to clear the counts along duplicated path. */
1086 for (unsigned int i = 0; i < 2; i++)
1087 {
1088 basic_block dup = rd->dup_blocks[i];
1089 if (!dup)
1090 continue;
1091 FOR_EACH_EDGE (esucc, ei, dup->succs)
1092 esucc->count = 0;
1093 dup->count = 0;
1094 }
1095 }
1096
1097 /* Wire up the outgoing edges from the duplicate blocks and
1098 update any PHIs as needed. Also update the profile counts
1099 on the original and duplicate blocks and edges. */
1100 void
1101 ssa_fix_duplicate_block_edges (struct redirection_data *rd,
1102 ssa_local_info_t *local_info)
1103 {
1104 bool multi_incomings = (rd->incoming_edges->next != NULL);
1105 edge e = rd->incoming_edges->e;
1106 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1107 edge elast = path->last ()->e;
1108 gcov_type path_in_count = 0;
1109 gcov_type path_out_count = 0;
1110 int path_in_freq = 0;
1111
1112 /* This routine updates profile counts, frequencies, and probabilities
1113 incrementally. Since it is difficult to do the incremental updates
1114 using frequencies/probabilities alone, for routines without profile
1115 data we first take a snapshot of the existing block and edge frequencies
1116 by copying them into the empty profile count fields. These counts are
1117 then used to do the incremental updates, and cleared at the end of this
1118 routine. If the function is marked as having a profile, we still check
1119 to see if the paths through RD are using estimated frequencies because
1120 the routine had zero profile counts. */
1121 bool do_freqs_to_counts = (profile_status_for_fn (cfun) != PROFILE_READ
1122 || estimated_freqs_path (rd));
1123 if (do_freqs_to_counts)
1124 freqs_to_counts_path (rd);
1125
1126 /* First determine how much profile count to move from original
1127 path to the duplicate path. This is tricky in the presence of
1128 a joiner (see comments for compute_path_counts), where some portion
1129 of the path's counts will flow off-path from the joiner. In the
1130 non-joiner case the path_in_count and path_out_count should be the
1131 same. */
1132 bool has_joiner = compute_path_counts (rd, local_info,
1133 &path_in_count, &path_out_count,
1134 &path_in_freq);
1135
1136 int cur_path_freq = path_in_freq;
1137 for (unsigned int count = 0, i = 1; i < path->length (); i++)
1138 {
1139 edge epath = (*path)[i]->e;
1140
1141 /* If we were threading through an joiner block, then we want
1142 to keep its control statement and redirect an outgoing edge.
1143 Else we want to remove the control statement & edges, then create
1144 a new outgoing edge. In both cases we may need to update PHIs. */
1145 if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1146 {
1147 edge victim;
1148 edge e2;
1149
1150 gcc_assert (has_joiner);
1151
1152 /* This updates the PHIs at the destination of the duplicate
1153 block. Pass 0 instead of i if we are threading a path which
1154 has multiple incoming edges. */
1155 update_destination_phis (local_info->bb, rd->dup_blocks[count],
1156 path, multi_incomings ? 0 : i);
1157
1158 /* Find the edge from the duplicate block to the block we're
1159 threading through. That's the edge we want to redirect. */
1160 victim = find_edge (rd->dup_blocks[count], (*path)[i]->e->dest);
1161
1162 /* If there are no remaining blocks on the path to duplicate,
1163 then redirect VICTIM to the final destination of the jump
1164 threading path. */
1165 if (!any_remaining_duplicated_blocks (path, i))
1166 {
1167 e2 = redirect_edge_and_branch (victim, elast->dest);
1168 /* If we redirected the edge, then we need to copy PHI arguments
1169 at the target. If the edge already existed (e2 != victim
1170 case), then the PHIs in the target already have the correct
1171 arguments. */
1172 if (e2 == victim)
1173 copy_phi_args (e2->dest, elast, e2,
1174 path, multi_incomings ? 0 : i);
1175 }
1176 else
1177 {
1178 /* Redirect VICTIM to the next duplicated block in the path. */
1179 e2 = redirect_edge_and_branch (victim, rd->dup_blocks[count + 1]);
1180
1181 /* We need to update the PHIs in the next duplicated block. We
1182 want the new PHI args to have the same value as they had
1183 in the source of the next duplicate block.
1184
1185 Thus, we need to know which edge we traversed into the
1186 source of the duplicate. Furthermore, we may have
1187 traversed many edges to reach the source of the duplicate.
1188
1189 Walk through the path starting at element I until we
1190 hit an edge marked with EDGE_COPY_SRC_BLOCK. We want
1191 the edge from the prior element. */
1192 for (unsigned int j = i + 1; j < path->length (); j++)
1193 {
1194 if ((*path)[j]->type == EDGE_COPY_SRC_BLOCK)
1195 {
1196 copy_phi_arg_into_existing_phi ((*path)[j - 1]->e, e2);
1197 break;
1198 }
1199 }
1200 }
1201
1202 /* Update the counts and frequency of both the original block
1203 and path edge, and the duplicates. The path duplicate's
1204 incoming count and frequency are the totals for all edges
1205 incoming to this jump threading path computed earlier.
1206 And we know that the duplicated path will have path_out_count
1207 flowing out of it (i.e. along the duplicated path out of the
1208 duplicated joiner). */
1209 update_profile (epath, e2, path_in_count, path_out_count,
1210 path_in_freq);
1211
1212 /* Next we need to update the counts of the original and duplicated
1213 edges from the joiner that go off path. */
1214 update_joiner_offpath_counts (epath, e2->src, path_in_count,
1215 path_out_count);
1216
1217 /* Finally, we need to set the probabilities on the duplicated
1218 edges out of the duplicated joiner (e2->src). The probabilities
1219 along the original path will all be updated below after we finish
1220 processing the whole path. */
1221 recompute_probabilities (e2->src);
1222
1223 /* Record the frequency flowing to the downstream duplicated
1224 path blocks. */
1225 cur_path_freq = EDGE_FREQUENCY (e2);
1226 }
1227 else if ((*path)[i]->type == EDGE_COPY_SRC_BLOCK)
1228 {
1229 remove_ctrl_stmt_and_useless_edges (rd->dup_blocks[count], NULL);
1230 create_edge_and_update_destination_phis (rd, rd->dup_blocks[count],
1231 multi_incomings ? 0 : i);
1232 if (count == 1)
1233 single_succ_edge (rd->dup_blocks[1])->aux = NULL;
1234
1235 /* Update the counts and frequency of both the original block
1236 and path edge, and the duplicates. Since we are now after
1237 any joiner that may have existed on the path, the count
1238 flowing along the duplicated threaded path is path_out_count.
1239 If we didn't have a joiner, then cur_path_freq was the sum
1240 of the total frequencies along all incoming edges to the
1241 thread path (path_in_freq). If we had a joiner, it would have
1242 been updated at the end of that handling to the edge frequency
1243 along the duplicated joiner path edge. */
1244 update_profile (epath, EDGE_SUCC (rd->dup_blocks[count], 0),
1245 path_out_count, path_out_count,
1246 cur_path_freq);
1247 }
1248 else
1249 {
1250 /* No copy case. In this case we don't have an equivalent block
1251 on the duplicated thread path to update, but we do need
1252 to remove the portion of the counts/freqs that were moved
1253 to the duplicated path from the counts/freqs flowing through
1254 this block on the original path. Since all the no-copy edges
1255 are after any joiner, the removed count is the same as
1256 path_out_count.
1257
1258 If we didn't have a joiner, then cur_path_freq was the sum
1259 of the total frequencies along all incoming edges to the
1260 thread path (path_in_freq). If we had a joiner, it would have
1261 been updated at the end of that handling to the edge frequency
1262 along the duplicated joiner path edge. */
1263 update_profile (epath, NULL, path_out_count, path_out_count,
1264 cur_path_freq);
1265 }
1266
1267 /* Increment the index into the duplicated path when we processed
1268 a duplicated block. */
1269 if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK
1270 || (*path)[i]->type == EDGE_COPY_SRC_BLOCK)
1271 {
1272 count++;
1273 }
1274 }
1275
1276 /* Now walk orig blocks and update their probabilities, since the
1277 counts and freqs should be updated properly by above loop. */
1278 for (unsigned int i = 1; i < path->length (); i++)
1279 {
1280 edge epath = (*path)[i]->e;
1281 recompute_probabilities (epath->src);
1282 }
1283
1284 /* Done with all profile and frequency updates, clear counts if they
1285 were copied. */
1286 if (do_freqs_to_counts)
1287 clear_counts_path (rd);
1288 }
1289
1290 /* Hash table traversal callback routine to create duplicate blocks. */
1291
1292 int
1293 ssa_create_duplicates (struct redirection_data **slot,
1294 ssa_local_info_t *local_info)
1295 {
1296 struct redirection_data *rd = *slot;
1297
1298 /* The second duplicated block in a jump threading path is specific
1299 to the path. So it gets stored in RD rather than in LOCAL_DATA.
1300
1301 Each time we're called, we have to look through the path and see
1302 if a second block needs to be duplicated.
1303
1304 Note the search starts with the third edge on the path. The first
1305 edge is the incoming edge, the second edge always has its source
1306 duplicated. Thus we start our search with the third edge. */
1307 vec<jump_thread_edge *> *path = rd->path;
1308 for (unsigned int i = 2; i < path->length (); i++)
1309 {
1310 if ((*path)[i]->type == EDGE_COPY_SRC_BLOCK
1311 || (*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1312 {
1313 create_block_for_threading ((*path)[i]->e->src, rd, 1,
1314 &local_info->duplicate_blocks);
1315 break;
1316 }
1317 }
1318
1319 /* Create a template block if we have not done so already. Otherwise
1320 use the template to create a new block. */
1321 if (local_info->template_block == NULL)
1322 {
1323 create_block_for_threading ((*path)[1]->e->src, rd, 0,
1324 &local_info->duplicate_blocks);
1325 local_info->template_block = rd->dup_blocks[0];
1326
1327 /* We do not create any outgoing edges for the template. We will
1328 take care of that in a later traversal. That way we do not
1329 create edges that are going to just be deleted. */
1330 }
1331 else
1332 {
1333 create_block_for_threading (local_info->template_block, rd, 0,
1334 &local_info->duplicate_blocks);
1335
1336 /* Go ahead and wire up outgoing edges and update PHIs for the duplicate
1337 block. */
1338 ssa_fix_duplicate_block_edges (rd, local_info);
1339 }
1340
1341 /* Keep walking the hash table. */
1342 return 1;
1343 }
1344
1345 /* We did not create any outgoing edges for the template block during
1346 block creation. This hash table traversal callback creates the
1347 outgoing edge for the template block. */
1348
1349 inline int
1350 ssa_fixup_template_block (struct redirection_data **slot,
1351 ssa_local_info_t *local_info)
1352 {
1353 struct redirection_data *rd = *slot;
1354
1355 /* If this is the template block halt the traversal after updating
1356 it appropriately.
1357
1358 If we were threading through an joiner block, then we want
1359 to keep its control statement and redirect an outgoing edge.
1360 Else we want to remove the control statement & edges, then create
1361 a new outgoing edge. In both cases we may need to update PHIs. */
1362 if (rd->dup_blocks[0] && rd->dup_blocks[0] == local_info->template_block)
1363 {
1364 ssa_fix_duplicate_block_edges (rd, local_info);
1365 return 0;
1366 }
1367
1368 return 1;
1369 }
1370
1371 /* Hash table traversal callback to redirect each incoming edge
1372 associated with this hash table element to its new destination. */
1373
1374 int
1375 ssa_redirect_edges (struct redirection_data **slot,
1376 ssa_local_info_t *local_info)
1377 {
1378 struct redirection_data *rd = *slot;
1379 struct el *next, *el;
1380
1381 /* Walk over all the incoming edges associated with this hash table
1382 entry. */
1383 for (el = rd->incoming_edges; el; el = next)
1384 {
1385 edge e = el->e;
1386 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1387
1388 /* Go ahead and free this element from the list. Doing this now
1389 avoids the need for another list walk when we destroy the hash
1390 table. */
1391 next = el->next;
1392 free (el);
1393
1394 thread_stats.num_threaded_edges++;
1395
1396 if (rd->dup_blocks[0])
1397 {
1398 edge e2;
1399
1400 if (dump_file && (dump_flags & TDF_DETAILS))
1401 fprintf (dump_file, " Threaded jump %d --> %d to %d\n",
1402 e->src->index, e->dest->index, rd->dup_blocks[0]->index);
1403
1404 /* If we redirect a loop latch edge cancel its loop. */
1405 if (e->src == e->src->loop_father->latch)
1406 mark_loop_for_removal (e->src->loop_father);
1407
1408 /* Redirect the incoming edge (possibly to the joiner block) to the
1409 appropriate duplicate block. */
1410 e2 = redirect_edge_and_branch (e, rd->dup_blocks[0]);
1411 gcc_assert (e == e2);
1412 flush_pending_stmts (e2);
1413 }
1414
1415 /* Go ahead and clear E->aux. It's not needed anymore and failure
1416 to clear it will cause all kinds of unpleasant problems later. */
1417 delete_jump_thread_path (path);
1418 e->aux = NULL;
1419
1420 }
1421
1422 /* Indicate that we actually threaded one or more jumps. */
1423 if (rd->incoming_edges)
1424 local_info->jumps_threaded = true;
1425
1426 return 1;
1427 }
1428
1429 /* Return true if this block has no executable statements other than
1430 a simple ctrl flow instruction. When the number of outgoing edges
1431 is one, this is equivalent to a "forwarder" block. */
1432
1433 static bool
1434 redirection_block_p (basic_block bb)
1435 {
1436 gimple_stmt_iterator gsi;
1437
1438 /* Advance to the first executable statement. */
1439 gsi = gsi_start_bb (bb);
1440 while (!gsi_end_p (gsi)
1441 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL
1442 || is_gimple_debug (gsi_stmt (gsi))
1443 || gimple_nop_p (gsi_stmt (gsi))
1444 || gimple_clobber_p (gsi_stmt (gsi))))
1445 gsi_next (&gsi);
1446
1447 /* Check if this is an empty block. */
1448 if (gsi_end_p (gsi))
1449 return true;
1450
1451 /* Test that we've reached the terminating control statement. */
1452 return gsi_stmt (gsi)
1453 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
1454 || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
1455 || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH);
1456 }
1457
1458 /* BB is a block which ends with a COND_EXPR or SWITCH_EXPR and when BB
1459 is reached via one or more specific incoming edges, we know which
1460 outgoing edge from BB will be traversed.
1461
1462 We want to redirect those incoming edges to the target of the
1463 appropriate outgoing edge. Doing so avoids a conditional branch
1464 and may expose new optimization opportunities. Note that we have
1465 to update dominator tree and SSA graph after such changes.
1466
1467 The key to keeping the SSA graph update manageable is to duplicate
1468 the side effects occurring in BB so that those side effects still
1469 occur on the paths which bypass BB after redirecting edges.
1470
1471 We accomplish this by creating duplicates of BB and arranging for
1472 the duplicates to unconditionally pass control to one specific
1473 successor of BB. We then revector the incoming edges into BB to
1474 the appropriate duplicate of BB.
1475
1476 If NOLOOP_ONLY is true, we only perform the threading as long as it
1477 does not affect the structure of the loops in a nontrivial way.
1478
1479 If JOINERS is true, then thread through joiner blocks as well. */
1480
1481 static bool
1482 thread_block_1 (basic_block bb, bool noloop_only, bool joiners)
1483 {
1484 /* E is an incoming edge into BB that we may or may not want to
1485 redirect to a duplicate of BB. */
1486 edge e, e2;
1487 edge_iterator ei;
1488 ssa_local_info_t local_info;
1489
1490 local_info.duplicate_blocks = BITMAP_ALLOC (NULL);
1491
1492 /* To avoid scanning a linear array for the element we need we instead
1493 use a hash table. For normal code there should be no noticeable
1494 difference. However, if we have a block with a large number of
1495 incoming and outgoing edges such linear searches can get expensive. */
1496 redirection_data
1497 = new hash_table<struct redirection_data> (EDGE_COUNT (bb->succs));
1498
1499 /* Record each unique threaded destination into a hash table for
1500 efficient lookups. */
1501 FOR_EACH_EDGE (e, ei, bb->preds)
1502 {
1503 if (e->aux == NULL)
1504 continue;
1505
1506 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1507
1508 if (((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK && !joiners)
1509 || ((*path)[1]->type == EDGE_COPY_SRC_BLOCK && joiners))
1510 continue;
1511
1512 e2 = path->last ()->e;
1513 if (!e2 || noloop_only)
1514 {
1515 /* If NOLOOP_ONLY is true, we only allow threading through the
1516 header of a loop to exit edges. */
1517
1518 /* One case occurs when there was loop header buried in a jump
1519 threading path that crosses loop boundaries. We do not try
1520 and thread this elsewhere, so just cancel the jump threading
1521 request by clearing the AUX field now. */
1522 if ((bb->loop_father != e2->src->loop_father
1523 && !loop_exit_edge_p (e2->src->loop_father, e2))
1524 || (e2->src->loop_father != e2->dest->loop_father
1525 && !loop_exit_edge_p (e2->src->loop_father, e2)))
1526 {
1527 /* Since this case is not handled by our special code
1528 to thread through a loop header, we must explicitly
1529 cancel the threading request here. */
1530 delete_jump_thread_path (path);
1531 e->aux = NULL;
1532 continue;
1533 }
1534
1535 /* Another case occurs when trying to thread through our
1536 own loop header, possibly from inside the loop. We will
1537 thread these later. */
1538 unsigned int i;
1539 for (i = 1; i < path->length (); i++)
1540 {
1541 if ((*path)[i]->e->src == bb->loop_father->header
1542 && (!loop_exit_edge_p (bb->loop_father, e2)
1543 || (*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK))
1544 break;
1545 }
1546
1547 if (i != path->length ())
1548 continue;
1549 }
1550
1551 /* Insert the outgoing edge into the hash table if it is not
1552 already in the hash table. */
1553 lookup_redirection_data (e, INSERT);
1554 }
1555
1556 /* We do not update dominance info. */
1557 free_dominance_info (CDI_DOMINATORS);
1558
1559 /* We know we only thread through the loop header to loop exits.
1560 Let the basic block duplication hook know we are not creating
1561 a multiple entry loop. */
1562 if (noloop_only
1563 && bb == bb->loop_father->header)
1564 set_loop_copy (bb->loop_father, loop_outer (bb->loop_father));
1565
1566 /* Now create duplicates of BB.
1567
1568 Note that for a block with a high outgoing degree we can waste
1569 a lot of time and memory creating and destroying useless edges.
1570
1571 So we first duplicate BB and remove the control structure at the
1572 tail of the duplicate as well as all outgoing edges from the
1573 duplicate. We then use that duplicate block as a template for
1574 the rest of the duplicates. */
1575 local_info.template_block = NULL;
1576 local_info.bb = bb;
1577 local_info.jumps_threaded = false;
1578 redirection_data->traverse <ssa_local_info_t *, ssa_create_duplicates>
1579 (&local_info);
1580
1581 /* The template does not have an outgoing edge. Create that outgoing
1582 edge and update PHI nodes as the edge's target as necessary.
1583
1584 We do this after creating all the duplicates to avoid creating
1585 unnecessary edges. */
1586 redirection_data->traverse <ssa_local_info_t *, ssa_fixup_template_block>
1587 (&local_info);
1588
1589 /* The hash table traversals above created the duplicate blocks (and the
1590 statements within the duplicate blocks). This loop creates PHI nodes for
1591 the duplicated blocks and redirects the incoming edges into BB to reach
1592 the duplicates of BB. */
1593 redirection_data->traverse <ssa_local_info_t *, ssa_redirect_edges>
1594 (&local_info);
1595
1596 /* Done with this block. Clear REDIRECTION_DATA. */
1597 delete redirection_data;
1598 redirection_data = NULL;
1599
1600 if (noloop_only
1601 && bb == bb->loop_father->header)
1602 set_loop_copy (bb->loop_father, NULL);
1603
1604 BITMAP_FREE (local_info.duplicate_blocks);
1605 local_info.duplicate_blocks = NULL;
1606
1607 /* Indicate to our caller whether or not any jumps were threaded. */
1608 return local_info.jumps_threaded;
1609 }
1610
1611 /* Wrapper for thread_block_1 so that we can first handle jump
1612 thread paths which do not involve copying joiner blocks, then
1613 handle jump thread paths which have joiner blocks.
1614
1615 By doing things this way we can be as aggressive as possible and
1616 not worry that copying a joiner block will create a jump threading
1617 opportunity. */
1618
1619 static bool
1620 thread_block (basic_block bb, bool noloop_only)
1621 {
1622 bool retval;
1623 retval = thread_block_1 (bb, noloop_only, false);
1624 retval |= thread_block_1 (bb, noloop_only, true);
1625 return retval;
1626 }
1627
1628
1629 /* Threads edge E through E->dest to the edge THREAD_TARGET (E). Returns the
1630 copy of E->dest created during threading, or E->dest if it was not necessary
1631 to copy it (E is its single predecessor). */
1632
1633 static basic_block
1634 thread_single_edge (edge e)
1635 {
1636 basic_block bb = e->dest;
1637 struct redirection_data rd;
1638 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1639 edge eto = (*path)[1]->e;
1640
1641 delete_jump_thread_path (path);
1642 e->aux = NULL;
1643
1644 thread_stats.num_threaded_edges++;
1645
1646 if (single_pred_p (bb))
1647 {
1648 /* If BB has just a single predecessor, we should only remove the
1649 control statements at its end, and successors except for ETO. */
1650 remove_ctrl_stmt_and_useless_edges (bb, eto->dest);
1651
1652 /* And fixup the flags on the single remaining edge. */
1653 eto->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE | EDGE_ABNORMAL);
1654 eto->flags |= EDGE_FALLTHRU;
1655
1656 return bb;
1657 }
1658
1659 /* Otherwise, we need to create a copy. */
1660 if (e->dest == eto->src)
1661 update_bb_profile_for_threading (bb, EDGE_FREQUENCY (e), e->count, eto);
1662
1663 vec<jump_thread_edge *> *npath = new vec<jump_thread_edge *> ();
1664 jump_thread_edge *x = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
1665 npath->safe_push (x);
1666
1667 x = new jump_thread_edge (eto, EDGE_COPY_SRC_BLOCK);
1668 npath->safe_push (x);
1669 rd.path = npath;
1670
1671 create_block_for_threading (bb, &rd, 0, NULL);
1672 remove_ctrl_stmt_and_useless_edges (rd.dup_blocks[0], NULL);
1673 create_edge_and_update_destination_phis (&rd, rd.dup_blocks[0], 0);
1674
1675 if (dump_file && (dump_flags & TDF_DETAILS))
1676 fprintf (dump_file, " Threaded jump %d --> %d to %d\n",
1677 e->src->index, e->dest->index, rd.dup_blocks[0]->index);
1678
1679 rd.dup_blocks[0]->count = e->count;
1680 rd.dup_blocks[0]->frequency = EDGE_FREQUENCY (e);
1681 single_succ_edge (rd.dup_blocks[0])->count = e->count;
1682 redirect_edge_and_branch (e, rd.dup_blocks[0]);
1683 flush_pending_stmts (e);
1684
1685 delete_jump_thread_path (npath);
1686 return rd.dup_blocks[0];
1687 }
1688
1689 /* Callback for dfs_enumerate_from. Returns true if BB is different
1690 from STOP and DBDS_CE_STOP. */
1691
1692 static basic_block dbds_ce_stop;
1693 static bool
1694 dbds_continue_enumeration_p (const_basic_block bb, const void *stop)
1695 {
1696 return (bb != (const_basic_block) stop
1697 && bb != dbds_ce_stop);
1698 }
1699
1700 /* Evaluates the dominance relationship of latch of the LOOP and BB, and
1701 returns the state. */
1702
1703 enum bb_dom_status
1704 {
1705 /* BB does not dominate latch of the LOOP. */
1706 DOMST_NONDOMINATING,
1707 /* The LOOP is broken (there is no path from the header to its latch. */
1708 DOMST_LOOP_BROKEN,
1709 /* BB dominates the latch of the LOOP. */
1710 DOMST_DOMINATING
1711 };
1712
1713 static enum bb_dom_status
1714 determine_bb_domination_status (struct loop *loop, basic_block bb)
1715 {
1716 basic_block *bblocks;
1717 unsigned nblocks, i;
1718 bool bb_reachable = false;
1719 edge_iterator ei;
1720 edge e;
1721
1722 /* This function assumes BB is a successor of LOOP->header.
1723 If that is not the case return DOMST_NONDOMINATING which
1724 is always safe. */
1725 {
1726 bool ok = false;
1727
1728 FOR_EACH_EDGE (e, ei, bb->preds)
1729 {
1730 if (e->src == loop->header)
1731 {
1732 ok = true;
1733 break;
1734 }
1735 }
1736
1737 if (!ok)
1738 return DOMST_NONDOMINATING;
1739 }
1740
1741 if (bb == loop->latch)
1742 return DOMST_DOMINATING;
1743
1744 /* Check that BB dominates LOOP->latch, and that it is back-reachable
1745 from it. */
1746
1747 bblocks = XCNEWVEC (basic_block, loop->num_nodes);
1748 dbds_ce_stop = loop->header;
1749 nblocks = dfs_enumerate_from (loop->latch, 1, dbds_continue_enumeration_p,
1750 bblocks, loop->num_nodes, bb);
1751 for (i = 0; i < nblocks; i++)
1752 FOR_EACH_EDGE (e, ei, bblocks[i]->preds)
1753 {
1754 if (e->src == loop->header)
1755 {
1756 free (bblocks);
1757 return DOMST_NONDOMINATING;
1758 }
1759 if (e->src == bb)
1760 bb_reachable = true;
1761 }
1762
1763 free (bblocks);
1764 return (bb_reachable ? DOMST_DOMINATING : DOMST_LOOP_BROKEN);
1765 }
1766
1767 /* Return true if BB is part of the new pre-header that is created
1768 when threading the latch to DATA. */
1769
1770 static bool
1771 def_split_header_continue_p (const_basic_block bb, const void *data)
1772 {
1773 const_basic_block new_header = (const_basic_block) data;
1774 const struct loop *l;
1775
1776 if (bb == new_header
1777 || loop_depth (bb->loop_father) < loop_depth (new_header->loop_father))
1778 return false;
1779 for (l = bb->loop_father; l; l = loop_outer (l))
1780 if (l == new_header->loop_father)
1781 return true;
1782 return false;
1783 }
1784
1785 /* Thread jumps through the header of LOOP. Returns true if cfg changes.
1786 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading from entry edges
1787 to the inside of the loop. */
1788
1789 static bool
1790 thread_through_loop_header (struct loop *loop, bool may_peel_loop_headers)
1791 {
1792 basic_block header = loop->header;
1793 edge e, tgt_edge, latch = loop_latch_edge (loop);
1794 edge_iterator ei;
1795 basic_block tgt_bb, atgt_bb;
1796 enum bb_dom_status domst;
1797
1798 /* We have already threaded through headers to exits, so all the threading
1799 requests now are to the inside of the loop. We need to avoid creating
1800 irreducible regions (i.e., loops with more than one entry block), and
1801 also loop with several latch edges, or new subloops of the loop (although
1802 there are cases where it might be appropriate, it is difficult to decide,
1803 and doing it wrongly may confuse other optimizers).
1804
1805 We could handle more general cases here. However, the intention is to
1806 preserve some information about the loop, which is impossible if its
1807 structure changes significantly, in a way that is not well understood.
1808 Thus we only handle few important special cases, in which also updating
1809 of the loop-carried information should be feasible:
1810
1811 1) Propagation of latch edge to a block that dominates the latch block
1812 of a loop. This aims to handle the following idiom:
1813
1814 first = 1;
1815 while (1)
1816 {
1817 if (first)
1818 initialize;
1819 first = 0;
1820 body;
1821 }
1822
1823 After threading the latch edge, this becomes
1824
1825 first = 1;
1826 if (first)
1827 initialize;
1828 while (1)
1829 {
1830 first = 0;
1831 body;
1832 }
1833
1834 The original header of the loop is moved out of it, and we may thread
1835 the remaining edges through it without further constraints.
1836
1837 2) All entry edges are propagated to a single basic block that dominates
1838 the latch block of the loop. This aims to handle the following idiom
1839 (normally created for "for" loops):
1840
1841 i = 0;
1842 while (1)
1843 {
1844 if (i >= 100)
1845 break;
1846 body;
1847 i++;
1848 }
1849
1850 This becomes
1851
1852 i = 0;
1853 while (1)
1854 {
1855 body;
1856 i++;
1857 if (i >= 100)
1858 break;
1859 }
1860 */
1861
1862 /* Threading through the header won't improve the code if the header has just
1863 one successor. */
1864 if (single_succ_p (header))
1865 goto fail;
1866
1867 /* If we threaded the latch using a joiner block, we cancel the
1868 threading opportunity out of an abundance of caution. However,
1869 still allow threading from outside to inside the loop. */
1870 if (latch->aux)
1871 {
1872 vec<jump_thread_edge *> *path = THREAD_PATH (latch);
1873 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1874 {
1875 delete_jump_thread_path (path);
1876 latch->aux = NULL;
1877 }
1878 }
1879
1880 if (latch->aux)
1881 {
1882 vec<jump_thread_edge *> *path = THREAD_PATH (latch);
1883 tgt_edge = (*path)[1]->e;
1884 tgt_bb = tgt_edge->dest;
1885 }
1886 else if (!may_peel_loop_headers
1887 && !redirection_block_p (loop->header))
1888 goto fail;
1889 else
1890 {
1891 tgt_bb = NULL;
1892 tgt_edge = NULL;
1893 FOR_EACH_EDGE (e, ei, header->preds)
1894 {
1895 if (!e->aux)
1896 {
1897 if (e == latch)
1898 continue;
1899
1900 /* If latch is not threaded, and there is a header
1901 edge that is not threaded, we would create loop
1902 with multiple entries. */
1903 goto fail;
1904 }
1905
1906 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1907
1908 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1909 goto fail;
1910 tgt_edge = (*path)[1]->e;
1911 atgt_bb = tgt_edge->dest;
1912 if (!tgt_bb)
1913 tgt_bb = atgt_bb;
1914 /* Two targets of threading would make us create loop
1915 with multiple entries. */
1916 else if (tgt_bb != atgt_bb)
1917 goto fail;
1918 }
1919
1920 if (!tgt_bb)
1921 {
1922 /* There are no threading requests. */
1923 return false;
1924 }
1925
1926 /* Redirecting to empty loop latch is useless. */
1927 if (tgt_bb == loop->latch
1928 && empty_block_p (loop->latch))
1929 goto fail;
1930 }
1931
1932 /* The target block must dominate the loop latch, otherwise we would be
1933 creating a subloop. */
1934 domst = determine_bb_domination_status (loop, tgt_bb);
1935 if (domst == DOMST_NONDOMINATING)
1936 goto fail;
1937 if (domst == DOMST_LOOP_BROKEN)
1938 {
1939 /* If the loop ceased to exist, mark it as such, and thread through its
1940 original header. */
1941 mark_loop_for_removal (loop);
1942 return thread_block (header, false);
1943 }
1944
1945 if (tgt_bb->loop_father->header == tgt_bb)
1946 {
1947 /* If the target of the threading is a header of a subloop, we need
1948 to create a preheader for it, so that the headers of the two loops
1949 do not merge. */
1950 if (EDGE_COUNT (tgt_bb->preds) > 2)
1951 {
1952 tgt_bb = create_preheader (tgt_bb->loop_father, 0);
1953 gcc_assert (tgt_bb != NULL);
1954 }
1955 else
1956 tgt_bb = split_edge (tgt_edge);
1957 }
1958
1959 if (latch->aux)
1960 {
1961 basic_block *bblocks;
1962 unsigned nblocks, i;
1963
1964 /* First handle the case latch edge is redirected. We are copying
1965 the loop header but not creating a multiple entry loop. Make the
1966 cfg manipulation code aware of that fact. */
1967 set_loop_copy (loop, loop);
1968 loop->latch = thread_single_edge (latch);
1969 set_loop_copy (loop, NULL);
1970 gcc_assert (single_succ (loop->latch) == tgt_bb);
1971 loop->header = tgt_bb;
1972
1973 /* Remove the new pre-header blocks from our loop. */
1974 bblocks = XCNEWVEC (basic_block, loop->num_nodes);
1975 nblocks = dfs_enumerate_from (header, 0, def_split_header_continue_p,
1976 bblocks, loop->num_nodes, tgt_bb);
1977 for (i = 0; i < nblocks; i++)
1978 if (bblocks[i]->loop_father == loop)
1979 {
1980 remove_bb_from_loops (bblocks[i]);
1981 add_bb_to_loop (bblocks[i], loop_outer (loop));
1982 }
1983 free (bblocks);
1984
1985 /* If the new header has multiple latches mark it so. */
1986 FOR_EACH_EDGE (e, ei, loop->header->preds)
1987 if (e->src->loop_father == loop
1988 && e->src != loop->latch)
1989 {
1990 loop->latch = NULL;
1991 loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
1992 }
1993
1994 /* Cancel remaining threading requests that would make the
1995 loop a multiple entry loop. */
1996 FOR_EACH_EDGE (e, ei, header->preds)
1997 {
1998 edge e2;
1999
2000 if (e->aux == NULL)
2001 continue;
2002
2003 vec<jump_thread_edge *> *path = THREAD_PATH (e);
2004 e2 = path->last ()->e;
2005
2006 if (e->src->loop_father != e2->dest->loop_father
2007 && e2->dest != loop->header)
2008 {
2009 delete_jump_thread_path (path);
2010 e->aux = NULL;
2011 }
2012 }
2013
2014 /* Thread the remaining edges through the former header. */
2015 thread_block (header, false);
2016 }
2017 else
2018 {
2019 basic_block new_preheader;
2020
2021 /* Now consider the case entry edges are redirected to the new entry
2022 block. Remember one entry edge, so that we can find the new
2023 preheader (its destination after threading). */
2024 FOR_EACH_EDGE (e, ei, header->preds)
2025 {
2026 if (e->aux)
2027 break;
2028 }
2029
2030 /* The duplicate of the header is the new preheader of the loop. Ensure
2031 that it is placed correctly in the loop hierarchy. */
2032 set_loop_copy (loop, loop_outer (loop));
2033
2034 thread_block (header, false);
2035 set_loop_copy (loop, NULL);
2036 new_preheader = e->dest;
2037
2038 /* Create the new latch block. This is always necessary, as the latch
2039 must have only a single successor, but the original header had at
2040 least two successors. */
2041 loop->latch = NULL;
2042 mfb_kj_edge = single_succ_edge (new_preheader);
2043 loop->header = mfb_kj_edge->dest;
2044 latch = make_forwarder_block (tgt_bb, mfb_keep_just, NULL);
2045 loop->header = latch->dest;
2046 loop->latch = latch->src;
2047 }
2048
2049 return true;
2050
2051 fail:
2052 /* We failed to thread anything. Cancel the requests. */
2053 FOR_EACH_EDGE (e, ei, header->preds)
2054 {
2055 vec<jump_thread_edge *> *path = THREAD_PATH (e);
2056
2057 if (path)
2058 {
2059 delete_jump_thread_path (path);
2060 e->aux = NULL;
2061 }
2062 }
2063 return false;
2064 }
2065
2066 /* E1 and E2 are edges into the same basic block. Return TRUE if the
2067 PHI arguments associated with those edges are equal or there are no
2068 PHI arguments, otherwise return FALSE. */
2069
2070 static bool
2071 phi_args_equal_on_edges (edge e1, edge e2)
2072 {
2073 gphi_iterator gsi;
2074 int indx1 = e1->dest_idx;
2075 int indx2 = e2->dest_idx;
2076
2077 for (gsi = gsi_start_phis (e1->dest); !gsi_end_p (gsi); gsi_next (&gsi))
2078 {
2079 gphi *phi = gsi.phi ();
2080
2081 if (!operand_equal_p (gimple_phi_arg_def (phi, indx1),
2082 gimple_phi_arg_def (phi, indx2), 0))
2083 return false;
2084 }
2085 return true;
2086 }
2087
2088 /* Walk through the registered jump threads and convert them into a
2089 form convenient for this pass.
2090
2091 Any block which has incoming edges threaded to outgoing edges
2092 will have its entry in THREADED_BLOCK set.
2093
2094 Any threaded edge will have its new outgoing edge stored in the
2095 original edge's AUX field.
2096
2097 This form avoids the need to walk all the edges in the CFG to
2098 discover blocks which need processing and avoids unnecessary
2099 hash table lookups to map from threaded edge to new target. */
2100
2101 static void
2102 mark_threaded_blocks (bitmap threaded_blocks)
2103 {
2104 unsigned int i;
2105 bitmap_iterator bi;
2106 bitmap tmp = BITMAP_ALLOC (NULL);
2107 basic_block bb;
2108 edge e;
2109 edge_iterator ei;
2110
2111 /* It is possible to have jump threads in which one is a subpath
2112 of the other. ie, (A, B), (B, C), (C, D) where B is a joiner
2113 block and (B, C), (C, D) where no joiner block exists.
2114
2115 When this occurs ignore the jump thread request with the joiner
2116 block. It's totally subsumed by the simpler jump thread request.
2117
2118 This results in less block copying, simpler CFGs. More importantly,
2119 when we duplicate the joiner block, B, in this case we will create
2120 a new threading opportunity that we wouldn't be able to optimize
2121 until the next jump threading iteration.
2122
2123 So first convert the jump thread requests which do not require a
2124 joiner block. */
2125 for (i = 0; i < paths.length (); i++)
2126 {
2127 vec<jump_thread_edge *> *path = paths[i];
2128
2129 if ((*path)[1]->type != EDGE_COPY_SRC_JOINER_BLOCK)
2130 {
2131 edge e = (*path)[0]->e;
2132 e->aux = (void *)path;
2133 bitmap_set_bit (tmp, e->dest->index);
2134 }
2135 }
2136
2137 /* Now iterate again, converting cases where we want to thread
2138 through a joiner block, but only if no other edge on the path
2139 already has a jump thread attached to it. We do this in two passes,
2140 to avoid situations where the order in the paths vec can hide overlapping
2141 threads (the path is recorded on the incoming edge, so we would miss
2142 cases where the second path starts at a downstream edge on the same
2143 path). First record all joiner paths, deleting any in the unexpected
2144 case where there is already a path for that incoming edge. */
2145 for (i = 0; i < paths.length ();)
2146 {
2147 vec<jump_thread_edge *> *path = paths[i];
2148
2149 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
2150 {
2151 /* Attach the path to the starting edge if none is yet recorded. */
2152 if ((*path)[0]->e->aux == NULL)
2153 {
2154 (*path)[0]->e->aux = path;
2155 i++;
2156 }
2157 else
2158 {
2159 paths.unordered_remove (i);
2160 if (dump_file && (dump_flags & TDF_DETAILS))
2161 dump_jump_thread_path (dump_file, *path, false);
2162 delete_jump_thread_path (path);
2163 }
2164 }
2165 else
2166 {
2167 i++;
2168 }
2169 }
2170
2171 /* Second, look for paths that have any other jump thread attached to
2172 them, and either finish converting them or cancel them. */
2173 for (i = 0; i < paths.length ();)
2174 {
2175 vec<jump_thread_edge *> *path = paths[i];
2176 edge e = (*path)[0]->e;
2177
2178 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK && e->aux == path)
2179 {
2180 unsigned int j;
2181 for (j = 1; j < path->length (); j++)
2182 if ((*path)[j]->e->aux != NULL)
2183 break;
2184
2185 /* If we iterated through the entire path without exiting the loop,
2186 then we are good to go, record it. */
2187 if (j == path->length ())
2188 {
2189 bitmap_set_bit (tmp, e->dest->index);
2190 i++;
2191 }
2192 else
2193 {
2194 e->aux = NULL;
2195 paths.unordered_remove (i);
2196 if (dump_file && (dump_flags & TDF_DETAILS))
2197 dump_jump_thread_path (dump_file, *path, false);
2198 delete_jump_thread_path (path);
2199 }
2200 }
2201 else
2202 {
2203 i++;
2204 }
2205 }
2206
2207 /* If optimizing for size, only thread through block if we don't have
2208 to duplicate it or it's an otherwise empty redirection block. */
2209 if (optimize_function_for_size_p (cfun))
2210 {
2211 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
2212 {
2213 bb = BASIC_BLOCK_FOR_FN (cfun, i);
2214 if (EDGE_COUNT (bb->preds) > 1
2215 && !redirection_block_p (bb))
2216 {
2217 FOR_EACH_EDGE (e, ei, bb->preds)
2218 {
2219 if (e->aux)
2220 {
2221 vec<jump_thread_edge *> *path = THREAD_PATH (e);
2222 delete_jump_thread_path (path);
2223 e->aux = NULL;
2224 }
2225 }
2226 }
2227 else
2228 bitmap_set_bit (threaded_blocks, i);
2229 }
2230 }
2231 else
2232 bitmap_copy (threaded_blocks, tmp);
2233
2234 /* Look for jump threading paths which cross multiple loop headers.
2235
2236 The code to thread through loop headers will change the CFG in ways
2237 that break assumptions made by the loop optimization code.
2238
2239 We don't want to blindly cancel the requests. We can instead do better
2240 by trimming off the end of the jump thread path. */
2241 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
2242 {
2243 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
2244 FOR_EACH_EDGE (e, ei, bb->preds)
2245 {
2246 if (e->aux)
2247 {
2248 vec<jump_thread_edge *> *path = THREAD_PATH (e);
2249
2250 for (unsigned int i = 0, crossed_headers = 0;
2251 i < path->length ();
2252 i++)
2253 {
2254 basic_block dest = (*path)[i]->e->dest;
2255 crossed_headers += (dest == dest->loop_father->header);
2256 if (crossed_headers > 1)
2257 {
2258 /* Trim from entry I onwards. */
2259 for (unsigned int j = i; j < path->length (); j++)
2260 delete (*path)[j];
2261 path->truncate (i);
2262
2263 /* Now that we've truncated the path, make sure
2264 what's left is still valid. We need at least
2265 two edges on the path and the last edge can not
2266 be a joiner. This should never happen, but let's
2267 be safe. */
2268 if (path->length () < 2
2269 || (path->last ()->type
2270 == EDGE_COPY_SRC_JOINER_BLOCK))
2271 {
2272 delete_jump_thread_path (path);
2273 e->aux = NULL;
2274 }
2275 break;
2276 }
2277 }
2278 }
2279 }
2280 }
2281
2282 /* If we have a joiner block (J) which has two successors S1 and S2 and
2283 we are threading though S1 and the final destination of the thread
2284 is S2, then we must verify that any PHI nodes in S2 have the same
2285 PHI arguments for the edge J->S2 and J->S1->...->S2.
2286
2287 We used to detect this prior to registering the jump thread, but
2288 that prohibits propagation of edge equivalences into non-dominated
2289 PHI nodes as the equivalency test might occur before propagation.
2290
2291 This must also occur after we truncate any jump threading paths
2292 as this scenario may only show up after truncation.
2293
2294 This works for now, but will need improvement as part of the FSA
2295 optimization.
2296
2297 Note since we've moved the thread request data to the edges,
2298 we have to iterate on those rather than the threaded_edges vector. */
2299 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
2300 {
2301 bb = BASIC_BLOCK_FOR_FN (cfun, i);
2302 FOR_EACH_EDGE (e, ei, bb->preds)
2303 {
2304 if (e->aux)
2305 {
2306 vec<jump_thread_edge *> *path = THREAD_PATH (e);
2307 bool have_joiner = ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK);
2308
2309 if (have_joiner)
2310 {
2311 basic_block joiner = e->dest;
2312 edge final_edge = path->last ()->e;
2313 basic_block final_dest = final_edge->dest;
2314 edge e2 = find_edge (joiner, final_dest);
2315
2316 if (e2 && !phi_args_equal_on_edges (e2, final_edge))
2317 {
2318 delete_jump_thread_path (path);
2319 e->aux = NULL;
2320 }
2321 }
2322 }
2323 }
2324 }
2325
2326 BITMAP_FREE (tmp);
2327 }
2328
2329
2330 /* Return TRUE if BB ends with a switch statement or a computed goto.
2331 Otherwise return false. */
2332 static bool
2333 bb_ends_with_multiway_branch (basic_block bb ATTRIBUTE_UNUSED)
2334 {
2335 gimple *stmt = last_stmt (bb);
2336 if (stmt && gimple_code (stmt) == GIMPLE_SWITCH)
2337 return true;
2338 if (stmt && gimple_code (stmt) == GIMPLE_GOTO
2339 && TREE_CODE (gimple_goto_dest (stmt)) == SSA_NAME)
2340 return true;
2341 return false;
2342 }
2343
2344 /* Verify that the REGION is a valid jump thread. A jump thread is a special
2345 case of SEME Single Entry Multiple Exits region in which all nodes in the
2346 REGION have exactly one incoming edge. The only exception is the first block
2347 that may not have been connected to the rest of the cfg yet. */
2348
2349 DEBUG_FUNCTION void
2350 verify_jump_thread (basic_block *region, unsigned n_region)
2351 {
2352 for (unsigned i = 0; i < n_region; i++)
2353 gcc_assert (EDGE_COUNT (region[i]->preds) <= 1);
2354 }
2355
2356 /* Return true when BB is one of the first N items in BBS. */
2357
2358 static inline bool
2359 bb_in_bbs (basic_block bb, basic_block *bbs, int n)
2360 {
2361 for (int i = 0; i < n; i++)
2362 if (bb == bbs[i])
2363 return true;
2364
2365 return false;
2366 }
2367
2368 /* Duplicates a jump-thread path of N_REGION basic blocks.
2369 The ENTRY edge is redirected to the duplicate of the region.
2370
2371 Remove the last conditional statement in the last basic block in the REGION,
2372 and create a single fallthru edge pointing to the same destination as the
2373 EXIT edge.
2374
2375 The new basic blocks are stored to REGION_COPY in the same order as they had
2376 in REGION, provided that REGION_COPY is not NULL.
2377
2378 Returns false if it is unable to copy the region, true otherwise. */
2379
2380 static bool
2381 duplicate_thread_path (edge entry, edge exit,
2382 basic_block *region, unsigned n_region,
2383 basic_block *region_copy)
2384 {
2385 unsigned i;
2386 bool free_region_copy = false;
2387 struct loop *loop = entry->dest->loop_father;
2388 edge exit_copy;
2389 edge redirected;
2390 int total_freq = 0, entry_freq = 0;
2391 gcov_type total_count = 0, entry_count = 0;
2392
2393 if (!can_copy_bbs_p (region, n_region))
2394 return false;
2395
2396 /* Some sanity checking. Note that we do not check for all possible
2397 missuses of the functions. I.e. if you ask to copy something weird,
2398 it will work, but the state of structures probably will not be
2399 correct. */
2400 for (i = 0; i < n_region; i++)
2401 {
2402 /* We do not handle subloops, i.e. all the blocks must belong to the
2403 same loop. */
2404 if (region[i]->loop_father != loop)
2405 return false;
2406 }
2407
2408 initialize_original_copy_tables ();
2409
2410 set_loop_copy (loop, loop);
2411
2412 if (!region_copy)
2413 {
2414 region_copy = XNEWVEC (basic_block, n_region);
2415 free_region_copy = true;
2416 }
2417
2418 if (entry->dest->count)
2419 {
2420 total_count = entry->dest->count;
2421 entry_count = entry->count;
2422 /* Fix up corner cases, to avoid division by zero or creation of negative
2423 frequencies. */
2424 if (entry_count > total_count)
2425 entry_count = total_count;
2426 }
2427 else
2428 {
2429 total_freq = entry->dest->frequency;
2430 entry_freq = EDGE_FREQUENCY (entry);
2431 /* Fix up corner cases, to avoid division by zero or creation of negative
2432 frequencies. */
2433 if (total_freq == 0)
2434 total_freq = 1;
2435 else if (entry_freq > total_freq)
2436 entry_freq = total_freq;
2437 }
2438
2439 copy_bbs (region, n_region, region_copy, &exit, 1, &exit_copy, loop,
2440 split_edge_bb_loc (entry), false);
2441
2442 /* Fix up: copy_bbs redirects all edges pointing to copied blocks. The
2443 following code ensures that all the edges exiting the jump-thread path are
2444 redirected back to the original code: these edges are exceptions
2445 invalidating the property that is propagated by executing all the blocks of
2446 the jump-thread path in order. */
2447
2448 for (i = 0; i < n_region; i++)
2449 {
2450 edge e;
2451 edge_iterator ei;
2452 basic_block bb = region_copy[i];
2453
2454 if (single_succ_p (bb))
2455 {
2456 /* Make sure the successor is the next node in the path. */
2457 gcc_assert (i + 1 == n_region
2458 || region_copy[i + 1] == single_succ_edge (bb)->dest);
2459 continue;
2460 }
2461
2462 /* Special case the last block on the path: make sure that it does not
2463 jump back on the copied path. */
2464 if (i + 1 == n_region)
2465 {
2466 FOR_EACH_EDGE (e, ei, bb->succs)
2467 if (bb_in_bbs (e->dest, region_copy, n_region - 1))
2468 {
2469 basic_block orig = get_bb_original (e->dest);
2470 if (orig)
2471 redirect_edge_and_branch_force (e, orig);
2472 }
2473 continue;
2474 }
2475
2476 /* Redirect all other edges jumping to non-adjacent blocks back to the
2477 original code. */
2478 FOR_EACH_EDGE (e, ei, bb->succs)
2479 if (region_copy[i + 1] != e->dest)
2480 {
2481 basic_block orig = get_bb_original (e->dest);
2482 if (orig)
2483 redirect_edge_and_branch_force (e, orig);
2484 }
2485 }
2486
2487 if (total_count)
2488 {
2489 scale_bbs_frequencies_gcov_type (region, n_region,
2490 total_count - entry_count,
2491 total_count);
2492 scale_bbs_frequencies_gcov_type (region_copy, n_region, entry_count,
2493 total_count);
2494 }
2495 else
2496 {
2497 scale_bbs_frequencies_int (region, n_region, total_freq - entry_freq,
2498 total_freq);
2499 scale_bbs_frequencies_int (region_copy, n_region, entry_freq, total_freq);
2500 }
2501
2502 #ifdef ENABLE_CHECKING
2503 verify_jump_thread (region_copy, n_region);
2504 #endif
2505
2506 /* Remove the last branch in the jump thread path. */
2507 remove_ctrl_stmt_and_useless_edges (region_copy[n_region - 1], exit->dest);
2508
2509 /* And fixup the flags on the single remaining edge. */
2510 edge fix_e = find_edge (region_copy[n_region - 1], exit->dest);
2511 fix_e->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE | EDGE_ABNORMAL);
2512 fix_e->flags |= EDGE_FALLTHRU;
2513
2514 edge e = make_edge (region_copy[n_region - 1], exit->dest, EDGE_FALLTHRU);
2515
2516 if (e) {
2517 rescan_loop_exit (e, true, false);
2518 e->probability = REG_BR_PROB_BASE;
2519 e->count = region_copy[n_region - 1]->count;
2520 }
2521
2522 /* Redirect the entry and add the phi node arguments. */
2523 if (entry->dest == loop->header)
2524 mark_loop_for_removal (loop);
2525 redirected = redirect_edge_and_branch (entry, get_bb_copy (entry->dest));
2526 gcc_assert (redirected != NULL);
2527 flush_pending_stmts (entry);
2528
2529 /* Add the other PHI node arguments. */
2530 add_phi_args_after_copy (region_copy, n_region, NULL);
2531
2532 if (free_region_copy)
2533 free (region_copy);
2534
2535 free_original_copy_tables ();
2536 return true;
2537 }
2538
2539 /* Return true when PATH is a valid jump-thread path. */
2540
2541 static bool
2542 valid_jump_thread_path (vec<jump_thread_edge *> *path)
2543 {
2544 unsigned len = path->length ();
2545
2546 /* Check that the path is connected. */
2547 for (unsigned int j = 0; j < len - 1; j++)
2548 if ((*path)[j]->e->dest != (*path)[j+1]->e->src)
2549 return false;
2550
2551 return true;
2552 }
2553
2554 /* Remove any queued jump threads that include edge E.
2555
2556 We don't actually remove them here, just record the edges into ax
2557 hash table. That way we can do the search once per iteration of
2558 DOM/VRP rather than for every case where DOM optimizes away a COND_EXPR. */
2559
2560 void
2561 remove_jump_threads_including (edge_def *e)
2562 {
2563 if (!paths.exists ())
2564 return;
2565
2566 if (!removed_edges)
2567 removed_edges = new hash_table<struct removed_edges> (17);
2568
2569 edge *slot = removed_edges->find_slot (e, INSERT);
2570 *slot = e;
2571 }
2572
2573 /* Walk through all blocks and thread incoming edges to the appropriate
2574 outgoing edge for each edge pair recorded in THREADED_EDGES.
2575
2576 It is the caller's responsibility to fix the dominance information
2577 and rewrite duplicated SSA_NAMEs back into SSA form.
2578
2579 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading edges through
2580 loop headers if it does not simplify the loop.
2581
2582 Returns true if one or more edges were threaded, false otherwise. */
2583
2584 bool
2585 thread_through_all_blocks (bool may_peel_loop_headers)
2586 {
2587 bool retval = false;
2588 unsigned int i;
2589 bitmap_iterator bi;
2590 bitmap threaded_blocks;
2591 struct loop *loop;
2592
2593 if (!paths.exists ())
2594 {
2595 retval = false;
2596 goto out;
2597 }
2598
2599 threaded_blocks = BITMAP_ALLOC (NULL);
2600 memset (&thread_stats, 0, sizeof (thread_stats));
2601
2602 /* Remove any paths that referenced removed edges. */
2603 if (removed_edges)
2604 for (i = 0; i < paths.length (); )
2605 {
2606 unsigned int j;
2607 vec<jump_thread_edge *> *path = paths[i];
2608
2609 for (j = 0; j < path->length (); j++)
2610 {
2611 edge e = (*path)[j]->e;
2612 if (removed_edges->find_slot (e, NO_INSERT))
2613 break;
2614 }
2615
2616 if (j != path->length ())
2617 {
2618 delete_jump_thread_path (path);
2619 paths.unordered_remove (i);
2620 continue;
2621 }
2622 i++;
2623 }
2624
2625 /* Jump-thread all FSM threads before other jump-threads. */
2626 for (i = 0; i < paths.length ();)
2627 {
2628 vec<jump_thread_edge *> *path = paths[i];
2629 edge entry = (*path)[0]->e;
2630
2631 /* Only code-generate FSM jump-threads in this loop. */
2632 if ((*path)[0]->type != EDGE_FSM_THREAD)
2633 {
2634 i++;
2635 continue;
2636 }
2637
2638 /* Do not jump-thread twice from the same block. */
2639 if (bitmap_bit_p (threaded_blocks, entry->src->index)
2640 /* Verify that the jump thread path is still valid: a
2641 previous jump-thread may have changed the CFG, and
2642 invalidated the current path. */
2643 || !valid_jump_thread_path (path))
2644 {
2645 /* Remove invalid FSM jump-thread paths. */
2646 delete_jump_thread_path (path);
2647 paths.unordered_remove (i);
2648 continue;
2649 }
2650
2651 unsigned len = path->length ();
2652 edge exit = (*path)[len - 1]->e;
2653 basic_block *region = XNEWVEC (basic_block, len - 1);
2654
2655 for (unsigned int j = 0; j < len - 1; j++)
2656 region[j] = (*path)[j]->e->dest;
2657
2658 if (duplicate_thread_path (entry, exit, region, len - 1, NULL))
2659 {
2660 /* We do not update dominance info. */
2661 free_dominance_info (CDI_DOMINATORS);
2662 bitmap_set_bit (threaded_blocks, entry->src->index);
2663 retval = true;
2664 }
2665
2666 delete_jump_thread_path (path);
2667 paths.unordered_remove (i);
2668 }
2669
2670 /* Remove from PATHS all the jump-threads starting with an edge already
2671 jump-threaded. */
2672 for (i = 0; i < paths.length ();)
2673 {
2674 vec<jump_thread_edge *> *path = paths[i];
2675 edge entry = (*path)[0]->e;
2676
2677 /* Do not jump-thread twice from the same block. */
2678 if (bitmap_bit_p (threaded_blocks, entry->src->index))
2679 {
2680 delete_jump_thread_path (path);
2681 paths.unordered_remove (i);
2682 }
2683 else
2684 i++;
2685 }
2686
2687 bitmap_clear (threaded_blocks);
2688
2689 mark_threaded_blocks (threaded_blocks);
2690
2691 initialize_original_copy_tables ();
2692
2693 /* First perform the threading requests that do not affect
2694 loop structure. */
2695 EXECUTE_IF_SET_IN_BITMAP (threaded_blocks, 0, i, bi)
2696 {
2697 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
2698
2699 if (EDGE_COUNT (bb->preds) > 0)
2700 retval |= thread_block (bb, true);
2701 }
2702
2703 /* Then perform the threading through loop headers. We start with the
2704 innermost loop, so that the changes in cfg we perform won't affect
2705 further threading. */
2706 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
2707 {
2708 if (!loop->header
2709 || !bitmap_bit_p (threaded_blocks, loop->header->index))
2710 continue;
2711
2712 retval |= thread_through_loop_header (loop, may_peel_loop_headers);
2713 }
2714
2715 /* Any jump threading paths that are still attached to edges at this
2716 point must be one of two cases.
2717
2718 First, we could have a jump threading path which went from outside
2719 a loop to inside a loop that was ignored because a prior jump thread
2720 across a backedge was realized (which indirectly causes the loop
2721 above to ignore the latter thread). We can detect these because the
2722 loop structures will be different and we do not currently try to
2723 optimize this case.
2724
2725 Second, we could be threading across a backedge to a point within the
2726 same loop. This occurrs for the FSA/FSM optimization and we would
2727 like to optimize it. However, we have to be very careful as this
2728 may completely scramble the loop structures, with the result being
2729 irreducible loops causing us to throw away our loop structure.
2730
2731 As a compromise for the latter case, if the thread path ends in
2732 a block where the last statement is a multiway branch, then go
2733 ahead and thread it, else ignore it. */
2734 basic_block bb;
2735 edge e;
2736 FOR_EACH_BB_FN (bb, cfun)
2737 {
2738 /* If we do end up threading here, we can remove elements from
2739 BB->preds. Thus we can not use the FOR_EACH_EDGE iterator. */
2740 for (edge_iterator ei = ei_start (bb->preds);
2741 (e = ei_safe_edge (ei));)
2742 if (e->aux)
2743 {
2744 vec<jump_thread_edge *> *path = THREAD_PATH (e);
2745
2746 /* Case 1, threading from outside to inside the loop
2747 after we'd already threaded through the header. */
2748 if ((*path)[0]->e->dest->loop_father
2749 != path->last ()->e->src->loop_father)
2750 {
2751 delete_jump_thread_path (path);
2752 e->aux = NULL;
2753 ei_next (&ei);
2754 }
2755 else if (bb_ends_with_multiway_branch (path->last ()->e->src))
2756 {
2757 /* The code to thread through loop headers may have
2758 split a block with jump threads attached to it.
2759
2760 We can identify this with a disjoint jump threading
2761 path. If found, just remove it. */
2762 for (unsigned int i = 0; i < path->length () - 1; i++)
2763 if ((*path)[i]->e->dest != (*path)[i + 1]->e->src)
2764 {
2765 delete_jump_thread_path (path);
2766 e->aux = NULL;
2767 ei_next (&ei);
2768 break;
2769 }
2770
2771 /* Our path is still valid, thread it. */
2772 if (e->aux)
2773 {
2774 if (thread_block ((*path)[0]->e->dest, false))
2775 e->aux = NULL;
2776 else
2777 {
2778 delete_jump_thread_path (path);
2779 e->aux = NULL;
2780 ei_next (&ei);
2781 }
2782 }
2783 }
2784 else
2785 {
2786 delete_jump_thread_path (path);
2787 e->aux = NULL;
2788 ei_next (&ei);
2789 }
2790 }
2791 else
2792 ei_next (&ei);
2793 }
2794
2795 statistics_counter_event (cfun, "Jumps threaded",
2796 thread_stats.num_threaded_edges);
2797
2798 free_original_copy_tables ();
2799
2800 BITMAP_FREE (threaded_blocks);
2801 threaded_blocks = NULL;
2802 paths.release ();
2803
2804 if (retval)
2805 loops_state_set (LOOPS_NEED_FIXUP);
2806
2807 out:
2808 delete removed_edges;
2809 removed_edges = NULL;
2810 return retval;
2811 }
2812
2813 /* Delete the jump threading path PATH. We have to explcitly delete
2814 each entry in the vector, then the container. */
2815
2816 void
2817 delete_jump_thread_path (vec<jump_thread_edge *> *path)
2818 {
2819 for (unsigned int i = 0; i < path->length (); i++)
2820 delete (*path)[i];
2821 path->release();
2822 delete path;
2823 }
2824
2825 /* Register a jump threading opportunity. We queue up all the jump
2826 threading opportunities discovered by a pass and update the CFG
2827 and SSA form all at once.
2828
2829 E is the edge we can thread, E2 is the new target edge, i.e., we
2830 are effectively recording that E->dest can be changed to E2->dest
2831 after fixing the SSA graph. */
2832
2833 void
2834 register_jump_thread (vec<jump_thread_edge *> *path)
2835 {
2836 if (!dbg_cnt (registered_jump_thread))
2837 {
2838 delete_jump_thread_path (path);
2839 return;
2840 }
2841
2842 /* First make sure there are no NULL outgoing edges on the jump threading
2843 path. That can happen for jumping to a constant address. */
2844 for (unsigned int i = 0; i < path->length (); i++)
2845 if ((*path)[i]->e == NULL)
2846 {
2847 if (dump_file && (dump_flags & TDF_DETAILS))
2848 {
2849 fprintf (dump_file,
2850 "Found NULL edge in jump threading path. Cancelling jump thread:\n");
2851 dump_jump_thread_path (dump_file, *path, false);
2852 }
2853
2854 delete_jump_thread_path (path);
2855 return;
2856 }
2857
2858 if (dump_file && (dump_flags & TDF_DETAILS))
2859 dump_jump_thread_path (dump_file, *path, true);
2860
2861 if (!paths.exists ())
2862 paths.create (5);
2863
2864 paths.safe_push (path);
2865 }