]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-threadupdate.c
3fba8486b3a9e6cca0d3da9614e57434cb6dc0a2
[thirdparty/gcc.git] / gcc / tree-ssa-threadupdate.c
1 /* Thread edges through blocks and update the control flow and SSA graphs.
2 Copyright (C) 2004-2013 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tree.h"
24 #include "flags.h"
25 #include "basic-block.h"
26 #include "function.h"
27 #include "gimple.h"
28 #include "gimple-iterator.h"
29 #include "gimple-ssa.h"
30 #include "tree-phinodes.h"
31 #include "tree-ssa.h"
32 #include "tree-ssa-threadupdate.h"
33 #include "ssa-iterators.h"
34 #include "dumpfile.h"
35 #include "cfgloop.h"
36 #include "hash-table.h"
37 #include "dbgcnt.h"
38
39 /* Given a block B, update the CFG and SSA graph to reflect redirecting
40 one or more in-edges to B to instead reach the destination of an
41 out-edge from B while preserving any side effects in B.
42
43 i.e., given A->B and B->C, change A->B to be A->C yet still preserve the
44 side effects of executing B.
45
46 1. Make a copy of B (including its outgoing edges and statements). Call
47 the copy B'. Note B' has no incoming edges or PHIs at this time.
48
49 2. Remove the control statement at the end of B' and all outgoing edges
50 except B'->C.
51
52 3. Add a new argument to each PHI in C with the same value as the existing
53 argument associated with edge B->C. Associate the new PHI arguments
54 with the edge B'->C.
55
56 4. For each PHI in B, find or create a PHI in B' with an identical
57 PHI_RESULT. Add an argument to the PHI in B' which has the same
58 value as the PHI in B associated with the edge A->B. Associate
59 the new argument in the PHI in B' with the edge A->B.
60
61 5. Change the edge A->B to A->B'.
62
63 5a. This automatically deletes any PHI arguments associated with the
64 edge A->B in B.
65
66 5b. This automatically associates each new argument added in step 4
67 with the edge A->B'.
68
69 6. Repeat for other incoming edges into B.
70
71 7. Put the duplicated resources in B and all the B' blocks into SSA form.
72
73 Note that block duplication can be minimized by first collecting the
74 set of unique destination blocks that the incoming edges should
75 be threaded to.
76
77 We reduce the number of edges and statements we create by not copying all
78 the outgoing edges and the control statement in step #1. We instead create
79 a template block without the outgoing edges and duplicate the template.
80
81 Another case this code handles is threading through a "joiner" block. In
82 this case, we do not know the destination of the joiner block, but one
83 of the outgoing edges from the joiner block leads to a threadable path. This
84 case largely works as outlined above, except the duplicate of the joiner
85 block still contains a full set of outgoing edges and its control statement.
86 We just redirect one of its outgoing edges to our jump threading path. */
87
88
89 /* Steps #5 and #6 of the above algorithm are best implemented by walking
90 all the incoming edges which thread to the same destination edge at
91 the same time. That avoids lots of table lookups to get information
92 for the destination edge.
93
94 To realize that implementation we create a list of incoming edges
95 which thread to the same outgoing edge. Thus to implement steps
96 #5 and #6 we traverse our hash table of outgoing edge information.
97 For each entry we walk the list of incoming edges which thread to
98 the current outgoing edge. */
99
100 struct el
101 {
102 edge e;
103 struct el *next;
104 };
105
106 /* Main data structure recording information regarding B's duplicate
107 blocks. */
108
109 /* We need to efficiently record the unique thread destinations of this
110 block and specific information associated with those destinations. We
111 may have many incoming edges threaded to the same outgoing edge. This
112 can be naturally implemented with a hash table. */
113
114 struct redirection_data : typed_free_remove<redirection_data>
115 {
116 /* We support wiring up two block duplicates in a jump threading path.
117
118 One is a normal block copy where we remove the control statement
119 and wire up its single remaining outgoing edge to the thread path.
120
121 The other is a joiner block where we leave the control statement
122 in place, but wire one of the outgoing edges to a thread path.
123
124 In theory we could have multiple block duplicates in a jump
125 threading path, but I haven't tried that.
126
127 The duplicate blocks appear in this array in the same order in
128 which they appear in the jump thread path. */
129 basic_block dup_blocks[2];
130
131 /* The jump threading path. */
132 vec<jump_thread_edge *> *path;
133
134 /* A list of incoming edges which we want to thread to the
135 same path. */
136 struct el *incoming_edges;
137
138 /* hash_table support. */
139 typedef redirection_data value_type;
140 typedef redirection_data compare_type;
141 static inline hashval_t hash (const value_type *);
142 static inline int equal (const value_type *, const compare_type *);
143 };
144
145 /* Simple hashing function. For any given incoming edge E, we're going
146 to be most concerned with the final destination of its jump thread
147 path. So hash on the block index of the final edge in the path. */
148
149 inline hashval_t
150 redirection_data::hash (const value_type *p)
151 {
152 vec<jump_thread_edge *> *path = p->path;
153 return path->last ()->e->dest->index;
154 }
155
156 /* Given two hash table entries, return true if they have the same
157 jump threading path. */
158 inline int
159 redirection_data::equal (const value_type *p1, const compare_type *p2)
160 {
161 vec<jump_thread_edge *> *path1 = p1->path;
162 vec<jump_thread_edge *> *path2 = p2->path;
163
164 if (path1->length () != path2->length ())
165 return false;
166
167 for (unsigned int i = 1; i < path1->length (); i++)
168 {
169 if ((*path1)[i]->type != (*path2)[i]->type
170 || (*path1)[i]->e != (*path2)[i]->e)
171 return false;
172 }
173
174 return true;
175 }
176
177 /* Data structure of information to pass to hash table traversal routines. */
178 struct ssa_local_info_t
179 {
180 /* The current block we are working on. */
181 basic_block bb;
182
183 /* We only create a template block for the first duplicated block in a
184 jump threading path as we may need many duplicates of that block.
185
186 The second duplicate block in a path is specific to that path. Creating
187 and sharing a template for that block is considerably more difficult. */
188 basic_block template_block;
189
190 /* TRUE if we thread one or more jumps, FALSE otherwise. */
191 bool jumps_threaded;
192 };
193
194 /* Passes which use the jump threading code register jump threading
195 opportunities as they are discovered. We keep the registered
196 jump threading opportunities in this vector as edge pairs
197 (original_edge, target_edge). */
198 static vec<vec<jump_thread_edge *> *> paths;
199
200 /* When we start updating the CFG for threading, data necessary for jump
201 threading is attached to the AUX field for the incoming edge. Use these
202 macros to access the underlying structure attached to the AUX field. */
203 #define THREAD_PATH(E) ((vec<jump_thread_edge *> *)(E)->aux)
204
205 /* Jump threading statistics. */
206
207 struct thread_stats_d
208 {
209 unsigned long num_threaded_edges;
210 };
211
212 struct thread_stats_d thread_stats;
213
214
215 /* Remove the last statement in block BB if it is a control statement
216 Also remove all outgoing edges except the edge which reaches DEST_BB.
217 If DEST_BB is NULL, then remove all outgoing edges. */
218
219 static void
220 remove_ctrl_stmt_and_useless_edges (basic_block bb, basic_block dest_bb)
221 {
222 gimple_stmt_iterator gsi;
223 edge e;
224 edge_iterator ei;
225
226 gsi = gsi_last_bb (bb);
227
228 /* If the duplicate ends with a control statement, then remove it.
229
230 Note that if we are duplicating the template block rather than the
231 original basic block, then the duplicate might not have any real
232 statements in it. */
233 if (!gsi_end_p (gsi)
234 && gsi_stmt (gsi)
235 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
236 || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
237 || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH))
238 gsi_remove (&gsi, true);
239
240 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
241 {
242 if (e->dest != dest_bb)
243 remove_edge (e);
244 else
245 ei_next (&ei);
246 }
247 }
248
249 /* Create a duplicate of BB. Record the duplicate block in an array
250 indexed by COUNT stored in RD. */
251
252 static void
253 create_block_for_threading (basic_block bb,
254 struct redirection_data *rd,
255 unsigned int count)
256 {
257 edge_iterator ei;
258 edge e;
259
260 /* We can use the generic block duplication code and simply remove
261 the stuff we do not need. */
262 rd->dup_blocks[count] = duplicate_block (bb, NULL, NULL);
263
264 FOR_EACH_EDGE (e, ei, rd->dup_blocks[count]->succs)
265 e->aux = NULL;
266
267 /* Zero out the profile, since the block is unreachable for now. */
268 rd->dup_blocks[count]->frequency = 0;
269 rd->dup_blocks[count]->count = 0;
270 }
271
272 /* Main data structure to hold information for duplicates of BB. */
273
274 static hash_table <redirection_data> redirection_data;
275
276 /* Given an outgoing edge E lookup and return its entry in our hash table.
277
278 If INSERT is true, then we insert the entry into the hash table if
279 it is not already present. INCOMING_EDGE is added to the list of incoming
280 edges associated with E in the hash table. */
281
282 static struct redirection_data *
283 lookup_redirection_data (edge e, enum insert_option insert)
284 {
285 struct redirection_data **slot;
286 struct redirection_data *elt;
287 vec<jump_thread_edge *> *path = THREAD_PATH (e);
288
289 /* Build a hash table element so we can see if E is already
290 in the table. */
291 elt = XNEW (struct redirection_data);
292 elt->path = path;
293 elt->dup_blocks[0] = NULL;
294 elt->dup_blocks[1] = NULL;
295 elt->incoming_edges = NULL;
296
297 slot = redirection_data.find_slot (elt, insert);
298
299 /* This will only happen if INSERT is false and the entry is not
300 in the hash table. */
301 if (slot == NULL)
302 {
303 free (elt);
304 return NULL;
305 }
306
307 /* This will only happen if E was not in the hash table and
308 INSERT is true. */
309 if (*slot == NULL)
310 {
311 *slot = elt;
312 elt->incoming_edges = XNEW (struct el);
313 elt->incoming_edges->e = e;
314 elt->incoming_edges->next = NULL;
315 return elt;
316 }
317 /* E was in the hash table. */
318 else
319 {
320 /* Free ELT as we do not need it anymore, we will extract the
321 relevant entry from the hash table itself. */
322 free (elt);
323
324 /* Get the entry stored in the hash table. */
325 elt = *slot;
326
327 /* If insertion was requested, then we need to add INCOMING_EDGE
328 to the list of incoming edges associated with E. */
329 if (insert)
330 {
331 struct el *el = XNEW (struct el);
332 el->next = elt->incoming_edges;
333 el->e = e;
334 elt->incoming_edges = el;
335 }
336
337 return elt;
338 }
339 }
340
341 /* Similar to copy_phi_args, except that the PHI arg exists, it just
342 does not have a value associated with it. */
343
344 static void
345 copy_phi_arg_into_existing_phi (edge src_e, edge tgt_e)
346 {
347 int src_idx = src_e->dest_idx;
348 int tgt_idx = tgt_e->dest_idx;
349
350 /* Iterate over each PHI in e->dest. */
351 for (gimple_stmt_iterator gsi = gsi_start_phis (src_e->dest),
352 gsi2 = gsi_start_phis (tgt_e->dest);
353 !gsi_end_p (gsi);
354 gsi_next (&gsi), gsi_next (&gsi2))
355 {
356 gimple src_phi = gsi_stmt (gsi);
357 gimple dest_phi = gsi_stmt (gsi2);
358 tree val = gimple_phi_arg_def (src_phi, src_idx);
359 source_location locus = gimple_phi_arg_location (src_phi, src_idx);
360
361 SET_PHI_ARG_DEF (dest_phi, tgt_idx, val);
362 gimple_phi_arg_set_location (dest_phi, tgt_idx, locus);
363 }
364 }
365
366 /* For each PHI in BB, copy the argument associated with SRC_E to TGT_E. */
367
368 static void
369 copy_phi_args (basic_block bb, edge src_e, edge tgt_e)
370 {
371 gimple_stmt_iterator gsi;
372 int src_indx = src_e->dest_idx;
373
374 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
375 {
376 gimple phi = gsi_stmt (gsi);
377 source_location locus = gimple_phi_arg_location (phi, src_indx);
378 add_phi_arg (phi, gimple_phi_arg_def (phi, src_indx), tgt_e, locus);
379 }
380 }
381
382 /* We have recently made a copy of ORIG_BB, including its outgoing
383 edges. The copy is NEW_BB. Every PHI node in every direct successor of
384 ORIG_BB has a new argument associated with edge from NEW_BB to the
385 successor. Initialize the PHI argument so that it is equal to the PHI
386 argument associated with the edge from ORIG_BB to the successor. */
387
388 static void
389 update_destination_phis (basic_block orig_bb, basic_block new_bb)
390 {
391 edge_iterator ei;
392 edge e;
393
394 FOR_EACH_EDGE (e, ei, orig_bb->succs)
395 {
396 edge e2 = find_edge (new_bb, e->dest);
397 copy_phi_args (e->dest, e, e2);
398 }
399 }
400
401 /* Given a duplicate block and its single destination (both stored
402 in RD). Create an edge between the duplicate and its single
403 destination.
404
405 Add an additional argument to any PHI nodes at the single
406 destination. */
407
408 static void
409 create_edge_and_update_destination_phis (struct redirection_data *rd,
410 basic_block bb)
411 {
412 edge e = make_edge (bb, rd->path->last ()->e->dest, EDGE_FALLTHRU);
413
414 rescan_loop_exit (e, true, false);
415 e->probability = REG_BR_PROB_BASE;
416 e->count = bb->count;
417
418 /* We have to copy path -- which means creating a new vector as well
419 as all the jump_thread_edge entries. */
420 if (rd->path->last ()->e->aux)
421 {
422 vec<jump_thread_edge *> *path = THREAD_PATH (rd->path->last ()->e);
423 vec<jump_thread_edge *> *copy = new vec<jump_thread_edge *> ();
424
425 /* Sadly, the elements of the vector are pointers and need to
426 be copied as well. */
427 for (unsigned int i = 0; i < path->length (); i++)
428 {
429 jump_thread_edge *x
430 = new jump_thread_edge ((*path)[i]->e, (*path)[i]->type);
431 copy->safe_push (x);
432 }
433 e->aux = (void *)copy;
434 }
435 else
436 {
437 e->aux = NULL;
438 }
439
440 /* If there are any PHI nodes at the destination of the outgoing edge
441 from the duplicate block, then we will need to add a new argument
442 to them. The argument should have the same value as the argument
443 associated with the outgoing edge stored in RD. */
444 copy_phi_args (e->dest, rd->path->last ()->e, e);
445 }
446
447 /* Look through PATH beginning at START and return TRUE if there are
448 any additional blocks that need to be duplicated. Otherwise,
449 return FALSE. */
450 static bool
451 any_remaining_duplicated_blocks (vec<jump_thread_edge *> *path,
452 unsigned int start)
453 {
454 for (unsigned int i = start + 1; i < path->length (); i++)
455 {
456 if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK
457 || (*path)[i]->type == EDGE_COPY_SRC_BLOCK)
458 return true;
459 }
460 return false;
461 }
462
463 /* Wire up the outgoing edges from the duplicate blocks and
464 update any PHIs as needed. */
465 void
466 ssa_fix_duplicate_block_edges (struct redirection_data *rd,
467 ssa_local_info_t *local_info)
468 {
469 edge e = rd->incoming_edges->e;
470 vec<jump_thread_edge *> *path = THREAD_PATH (e);
471
472 for (unsigned int count = 0, i = 1; i < path->length (); i++)
473 {
474 /* If we were threading through an joiner block, then we want
475 to keep its control statement and redirect an outgoing edge.
476 Else we want to remove the control statement & edges, then create
477 a new outgoing edge. In both cases we may need to update PHIs. */
478 if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
479 {
480 edge victim;
481 edge e2;
482
483 /* This updates the PHIs at the destination of the duplicate
484 block. */
485 update_destination_phis (local_info->bb, rd->dup_blocks[count]);
486
487 /* Find the edge from the duplicate block to the block we're
488 threading through. That's the edge we want to redirect. */
489 victim = find_edge (rd->dup_blocks[count], (*path)[i]->e->dest);
490
491 /* If there are no remaining blocks on the path to duplicate,
492 then redirect VICTIM to the final destination of the jump
493 threading path. */
494 if (!any_remaining_duplicated_blocks (path, i))
495 {
496 e2 = redirect_edge_and_branch (victim, path->last ()->e->dest);
497 e2->count = path->last ()->e->count;
498 /* If we redirected the edge, then we need to copy PHI arguments
499 at the target. If the edge already existed (e2 != victim
500 case), then the PHIs in the target already have the correct
501 arguments. */
502 if (e2 == victim)
503 copy_phi_args (e2->dest, path->last ()->e, e2);
504 }
505 else
506 {
507 /* Redirect VICTIM to the next duplicated block in the path. */
508 e2 = redirect_edge_and_branch (victim, rd->dup_blocks[count + 1]);
509
510 /* We need to update the PHIs in the next duplicated block. We
511 want the new PHI args to have the same value as they had
512 in the source of the next duplicate block.
513
514 Thus, we need to know which edge we traversed into the
515 source of the duplicate. Furthermore, we may have
516 traversed many edges to reach the source of the duplicate.
517
518 Walk through the path starting at element I until we
519 hit an edge marked with EDGE_COPY_SRC_BLOCK. We want
520 the edge from the prior element. */
521 for (unsigned int j = i + 1; j < path->length (); j++)
522 {
523 if ((*path)[j]->type == EDGE_COPY_SRC_BLOCK)
524 {
525 copy_phi_arg_into_existing_phi ((*path)[j - 1]->e, e2);
526 break;
527 }
528 }
529 }
530 count++;
531 }
532 else if ((*path)[i]->type == EDGE_COPY_SRC_BLOCK)
533 {
534 remove_ctrl_stmt_and_useless_edges (rd->dup_blocks[count], NULL);
535 create_edge_and_update_destination_phis (rd, rd->dup_blocks[count]);
536 if (count == 1)
537 single_succ_edge (rd->dup_blocks[1])->aux = NULL;
538 count++;
539 }
540 }
541 }
542
543 /* Hash table traversal callback routine to create duplicate blocks. */
544
545 int
546 ssa_create_duplicates (struct redirection_data **slot,
547 ssa_local_info_t *local_info)
548 {
549 struct redirection_data *rd = *slot;
550
551 /* The second duplicated block in a jump threading path is specific
552 to the path. So it gets stored in RD rather than in LOCAL_DATA.
553
554 Each time we're called, we have to look through the path and see
555 if a second block needs to be duplicated.
556
557 Note the search starts with the third edge on the path. The first
558 edge is the incoming edge, the second edge always has its source
559 duplicated. Thus we start our search with the third edge. */
560 vec<jump_thread_edge *> *path = rd->path;
561 for (unsigned int i = 2; i < path->length (); i++)
562 {
563 if ((*path)[i]->type == EDGE_COPY_SRC_BLOCK
564 || (*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
565 {
566 create_block_for_threading ((*path)[i]->e->src, rd, 1);
567 break;
568 }
569 }
570
571 /* Create a template block if we have not done so already. Otherwise
572 use the template to create a new block. */
573 if (local_info->template_block == NULL)
574 {
575 create_block_for_threading ((*path)[1]->e->src, rd, 0);
576 local_info->template_block = rd->dup_blocks[0];
577
578 /* We do not create any outgoing edges for the template. We will
579 take care of that in a later traversal. That way we do not
580 create edges that are going to just be deleted. */
581 }
582 else
583 {
584 create_block_for_threading (local_info->template_block, rd, 0);
585
586 /* Go ahead and wire up outgoing edges and update PHIs for the duplicate
587 block. */
588 ssa_fix_duplicate_block_edges (rd, local_info);
589 }
590
591 /* Keep walking the hash table. */
592 return 1;
593 }
594
595 /* We did not create any outgoing edges for the template block during
596 block creation. This hash table traversal callback creates the
597 outgoing edge for the template block. */
598
599 inline int
600 ssa_fixup_template_block (struct redirection_data **slot,
601 ssa_local_info_t *local_info)
602 {
603 struct redirection_data *rd = *slot;
604
605 /* If this is the template block halt the traversal after updating
606 it appropriately.
607
608 If we were threading through an joiner block, then we want
609 to keep its control statement and redirect an outgoing edge.
610 Else we want to remove the control statement & edges, then create
611 a new outgoing edge. In both cases we may need to update PHIs. */
612 if (rd->dup_blocks[0] && rd->dup_blocks[0] == local_info->template_block)
613 {
614 ssa_fix_duplicate_block_edges (rd, local_info);
615 return 0;
616 }
617
618 return 1;
619 }
620
621 /* Hash table traversal callback to redirect each incoming edge
622 associated with this hash table element to its new destination. */
623
624 int
625 ssa_redirect_edges (struct redirection_data **slot,
626 ssa_local_info_t *local_info)
627 {
628 struct redirection_data *rd = *slot;
629 struct el *next, *el;
630
631 /* Walk over all the incoming edges associated associated with this
632 hash table entry. */
633 for (el = rd->incoming_edges; el; el = next)
634 {
635 edge e = el->e;
636 vec<jump_thread_edge *> *path = THREAD_PATH (e);
637
638 /* Go ahead and free this element from the list. Doing this now
639 avoids the need for another list walk when we destroy the hash
640 table. */
641 next = el->next;
642 free (el);
643
644 thread_stats.num_threaded_edges++;
645
646 if (rd->dup_blocks[0])
647 {
648 edge e2;
649
650 if (dump_file && (dump_flags & TDF_DETAILS))
651 fprintf (dump_file, " Threaded jump %d --> %d to %d\n",
652 e->src->index, e->dest->index, rd->dup_blocks[0]->index);
653
654 rd->dup_blocks[0]->count += e->count;
655
656 /* Excessive jump threading may make frequencies large enough so
657 the computation overflows. */
658 if (rd->dup_blocks[0]->frequency < BB_FREQ_MAX * 2)
659 rd->dup_blocks[0]->frequency += EDGE_FREQUENCY (e);
660
661 /* In the case of threading through a joiner block, the outgoing
662 edges from the duplicate block were updated when they were
663 redirected during ssa_fix_duplicate_block_edges. */
664 if ((*path)[1]->type != EDGE_COPY_SRC_JOINER_BLOCK)
665 EDGE_SUCC (rd->dup_blocks[0], 0)->count += e->count;
666
667 /* Redirect the incoming edge (possibly to the joiner block) to the
668 appropriate duplicate block. */
669 e2 = redirect_edge_and_branch (e, rd->dup_blocks[0]);
670 gcc_assert (e == e2);
671 flush_pending_stmts (e2);
672 }
673
674 /* Go ahead and clear E->aux. It's not needed anymore and failure
675 to clear it will cause all kinds of unpleasant problems later. */
676 delete_jump_thread_path (path);
677 e->aux = NULL;
678
679 }
680
681 /* Indicate that we actually threaded one or more jumps. */
682 if (rd->incoming_edges)
683 local_info->jumps_threaded = true;
684
685 return 1;
686 }
687
688 /* Return true if this block has no executable statements other than
689 a simple ctrl flow instruction. When the number of outgoing edges
690 is one, this is equivalent to a "forwarder" block. */
691
692 static bool
693 redirection_block_p (basic_block bb)
694 {
695 gimple_stmt_iterator gsi;
696
697 /* Advance to the first executable statement. */
698 gsi = gsi_start_bb (bb);
699 while (!gsi_end_p (gsi)
700 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL
701 || is_gimple_debug (gsi_stmt (gsi))
702 || gimple_nop_p (gsi_stmt (gsi))))
703 gsi_next (&gsi);
704
705 /* Check if this is an empty block. */
706 if (gsi_end_p (gsi))
707 return true;
708
709 /* Test that we've reached the terminating control statement. */
710 return gsi_stmt (gsi)
711 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
712 || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
713 || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH);
714 }
715
716 /* BB is a block which ends with a COND_EXPR or SWITCH_EXPR and when BB
717 is reached via one or more specific incoming edges, we know which
718 outgoing edge from BB will be traversed.
719
720 We want to redirect those incoming edges to the target of the
721 appropriate outgoing edge. Doing so avoids a conditional branch
722 and may expose new optimization opportunities. Note that we have
723 to update dominator tree and SSA graph after such changes.
724
725 The key to keeping the SSA graph update manageable is to duplicate
726 the side effects occurring in BB so that those side effects still
727 occur on the paths which bypass BB after redirecting edges.
728
729 We accomplish this by creating duplicates of BB and arranging for
730 the duplicates to unconditionally pass control to one specific
731 successor of BB. We then revector the incoming edges into BB to
732 the appropriate duplicate of BB.
733
734 If NOLOOP_ONLY is true, we only perform the threading as long as it
735 does not affect the structure of the loops in a nontrivial way.
736
737 If JOINERS is true, then thread through joiner blocks as well. */
738
739 static bool
740 thread_block_1 (basic_block bb, bool noloop_only, bool joiners)
741 {
742 /* E is an incoming edge into BB that we may or may not want to
743 redirect to a duplicate of BB. */
744 edge e, e2;
745 edge_iterator ei;
746 ssa_local_info_t local_info;
747 struct loop *loop = bb->loop_father;
748
749 /* To avoid scanning a linear array for the element we need we instead
750 use a hash table. For normal code there should be no noticeable
751 difference. However, if we have a block with a large number of
752 incoming and outgoing edges such linear searches can get expensive. */
753 redirection_data.create (EDGE_COUNT (bb->succs));
754
755 /* If we thread the latch of the loop to its exit, the loop ceases to
756 exist. Make sure we do not restrict ourselves in order to preserve
757 this loop. */
758 if (loop->header == bb)
759 {
760 e = loop_latch_edge (loop);
761 vec<jump_thread_edge *> *path = THREAD_PATH (e);
762
763 if (path
764 && (((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK && joiners)
765 || ((*path)[1]->type == EDGE_COPY_SRC_BLOCK && !joiners)))
766 {
767 for (unsigned int i = 1; i < path->length (); i++)
768 {
769 edge e2 = (*path)[i]->e;
770
771 if (loop_exit_edge_p (loop, e2))
772 {
773 loop->header = NULL;
774 loop->latch = NULL;
775 loops_state_set (LOOPS_NEED_FIXUP);
776 }
777 }
778 }
779 }
780
781 /* Record each unique threaded destination into a hash table for
782 efficient lookups. */
783 FOR_EACH_EDGE (e, ei, bb->preds)
784 {
785 if (e->aux == NULL)
786 continue;
787
788 vec<jump_thread_edge *> *path = THREAD_PATH (e);
789
790 if (((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK && !joiners)
791 || ((*path)[1]->type == EDGE_COPY_SRC_BLOCK && joiners))
792 continue;
793
794 e2 = path->last ()->e;
795 if (!e2 || noloop_only)
796 {
797 /* If NOLOOP_ONLY is true, we only allow threading through the
798 header of a loop to exit edges.
799
800 There are two cases to consider. The first when BB is the
801 loop header. We will attempt to thread this elsewhere, so
802 we can just continue here. */
803
804 if (bb == bb->loop_father->header
805 && (!loop_exit_edge_p (bb->loop_father, e2)
806 || (*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK))
807 continue;
808
809
810 /* The second occurs when there was loop header buried in a jump
811 threading path. We do not try and thread this elsewhere, so
812 just cancel the jump threading request by clearing the AUX
813 field now. */
814 if ((bb->loop_father != e2->src->loop_father
815 && !loop_exit_edge_p (e2->src->loop_father, e2))
816 || (e2->src->loop_father != e2->dest->loop_father
817 && !loop_exit_edge_p (e2->src->loop_father, e2)))
818 {
819 /* Since this case is not handled by our special code
820 to thread through a loop header, we must explicitly
821 cancel the threading request here. */
822 delete_jump_thread_path (path);
823 e->aux = NULL;
824 continue;
825 }
826 }
827
828 if (e->dest == e2->src)
829 update_bb_profile_for_threading (e->dest, EDGE_FREQUENCY (e),
830 e->count, (*THREAD_PATH (e))[1]->e);
831
832 /* Insert the outgoing edge into the hash table if it is not
833 already in the hash table. */
834 lookup_redirection_data (e, INSERT);
835 }
836
837 /* We do not update dominance info. */
838 free_dominance_info (CDI_DOMINATORS);
839
840 /* We know we only thread through the loop header to loop exits.
841 Let the basic block duplication hook know we are not creating
842 a multiple entry loop. */
843 if (noloop_only
844 && bb == bb->loop_father->header)
845 set_loop_copy (bb->loop_father, loop_outer (bb->loop_father));
846
847 /* Now create duplicates of BB.
848
849 Note that for a block with a high outgoing degree we can waste
850 a lot of time and memory creating and destroying useless edges.
851
852 So we first duplicate BB and remove the control structure at the
853 tail of the duplicate as well as all outgoing edges from the
854 duplicate. We then use that duplicate block as a template for
855 the rest of the duplicates. */
856 local_info.template_block = NULL;
857 local_info.bb = bb;
858 local_info.jumps_threaded = false;
859 redirection_data.traverse <ssa_local_info_t *, ssa_create_duplicates>
860 (&local_info);
861
862 /* The template does not have an outgoing edge. Create that outgoing
863 edge and update PHI nodes as the edge's target as necessary.
864
865 We do this after creating all the duplicates to avoid creating
866 unnecessary edges. */
867 redirection_data.traverse <ssa_local_info_t *, ssa_fixup_template_block>
868 (&local_info);
869
870 /* The hash table traversals above created the duplicate blocks (and the
871 statements within the duplicate blocks). This loop creates PHI nodes for
872 the duplicated blocks and redirects the incoming edges into BB to reach
873 the duplicates of BB. */
874 redirection_data.traverse <ssa_local_info_t *, ssa_redirect_edges>
875 (&local_info);
876
877 /* Done with this block. Clear REDIRECTION_DATA. */
878 redirection_data.dispose ();
879
880 if (noloop_only
881 && bb == bb->loop_father->header)
882 set_loop_copy (bb->loop_father, NULL);
883
884 /* Indicate to our caller whether or not any jumps were threaded. */
885 return local_info.jumps_threaded;
886 }
887
888 /* Wrapper for thread_block_1 so that we can first handle jump
889 thread paths which do not involve copying joiner blocks, then
890 handle jump thread paths which have joiner blocks.
891
892 By doing things this way we can be as aggressive as possible and
893 not worry that copying a joiner block will create a jump threading
894 opportunity. */
895
896 static bool
897 thread_block (basic_block bb, bool noloop_only)
898 {
899 bool retval;
900 retval = thread_block_1 (bb, noloop_only, false);
901 retval |= thread_block_1 (bb, noloop_only, true);
902 return retval;
903 }
904
905
906 /* Threads edge E through E->dest to the edge THREAD_TARGET (E). Returns the
907 copy of E->dest created during threading, or E->dest if it was not necessary
908 to copy it (E is its single predecessor). */
909
910 static basic_block
911 thread_single_edge (edge e)
912 {
913 basic_block bb = e->dest;
914 struct redirection_data rd;
915 vec<jump_thread_edge *> *path = THREAD_PATH (e);
916 edge eto = (*path)[1]->e;
917
918 for (unsigned int i = 0; i < path->length (); i++)
919 delete (*path)[i];
920 delete path;
921 e->aux = NULL;
922
923 thread_stats.num_threaded_edges++;
924
925 if (single_pred_p (bb))
926 {
927 /* If BB has just a single predecessor, we should only remove the
928 control statements at its end, and successors except for ETO. */
929 remove_ctrl_stmt_and_useless_edges (bb, eto->dest);
930
931 /* And fixup the flags on the single remaining edge. */
932 eto->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE | EDGE_ABNORMAL);
933 eto->flags |= EDGE_FALLTHRU;
934
935 return bb;
936 }
937
938 /* Otherwise, we need to create a copy. */
939 if (e->dest == eto->src)
940 update_bb_profile_for_threading (bb, EDGE_FREQUENCY (e), e->count, eto);
941
942 vec<jump_thread_edge *> *npath = new vec<jump_thread_edge *> ();
943 jump_thread_edge *x = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
944 npath->safe_push (x);
945
946 x = new jump_thread_edge (eto, EDGE_COPY_SRC_BLOCK);
947 npath->safe_push (x);
948 rd.path = npath;
949
950 create_block_for_threading (bb, &rd, 0);
951 remove_ctrl_stmt_and_useless_edges (rd.dup_blocks[0], NULL);
952 create_edge_and_update_destination_phis (&rd, rd.dup_blocks[0]);
953
954 if (dump_file && (dump_flags & TDF_DETAILS))
955 fprintf (dump_file, " Threaded jump %d --> %d to %d\n",
956 e->src->index, e->dest->index, rd.dup_blocks[0]->index);
957
958 rd.dup_blocks[0]->count = e->count;
959 rd.dup_blocks[0]->frequency = EDGE_FREQUENCY (e);
960 single_succ_edge (rd.dup_blocks[0])->count = e->count;
961 redirect_edge_and_branch (e, rd.dup_blocks[0]);
962 flush_pending_stmts (e);
963
964 return rd.dup_blocks[0];
965 }
966
967 /* Callback for dfs_enumerate_from. Returns true if BB is different
968 from STOP and DBDS_CE_STOP. */
969
970 static basic_block dbds_ce_stop;
971 static bool
972 dbds_continue_enumeration_p (const_basic_block bb, const void *stop)
973 {
974 return (bb != (const_basic_block) stop
975 && bb != dbds_ce_stop);
976 }
977
978 /* Evaluates the dominance relationship of latch of the LOOP and BB, and
979 returns the state. */
980
981 enum bb_dom_status
982 {
983 /* BB does not dominate latch of the LOOP. */
984 DOMST_NONDOMINATING,
985 /* The LOOP is broken (there is no path from the header to its latch. */
986 DOMST_LOOP_BROKEN,
987 /* BB dominates the latch of the LOOP. */
988 DOMST_DOMINATING
989 };
990
991 static enum bb_dom_status
992 determine_bb_domination_status (struct loop *loop, basic_block bb)
993 {
994 basic_block *bblocks;
995 unsigned nblocks, i;
996 bool bb_reachable = false;
997 edge_iterator ei;
998 edge e;
999
1000 /* This function assumes BB is a successor of LOOP->header.
1001 If that is not the case return DOMST_NONDOMINATING which
1002 is always safe. */
1003 {
1004 bool ok = false;
1005
1006 FOR_EACH_EDGE (e, ei, bb->preds)
1007 {
1008 if (e->src == loop->header)
1009 {
1010 ok = true;
1011 break;
1012 }
1013 }
1014
1015 if (!ok)
1016 return DOMST_NONDOMINATING;
1017 }
1018
1019 if (bb == loop->latch)
1020 return DOMST_DOMINATING;
1021
1022 /* Check that BB dominates LOOP->latch, and that it is back-reachable
1023 from it. */
1024
1025 bblocks = XCNEWVEC (basic_block, loop->num_nodes);
1026 dbds_ce_stop = loop->header;
1027 nblocks = dfs_enumerate_from (loop->latch, 1, dbds_continue_enumeration_p,
1028 bblocks, loop->num_nodes, bb);
1029 for (i = 0; i < nblocks; i++)
1030 FOR_EACH_EDGE (e, ei, bblocks[i]->preds)
1031 {
1032 if (e->src == loop->header)
1033 {
1034 free (bblocks);
1035 return DOMST_NONDOMINATING;
1036 }
1037 if (e->src == bb)
1038 bb_reachable = true;
1039 }
1040
1041 free (bblocks);
1042 return (bb_reachable ? DOMST_DOMINATING : DOMST_LOOP_BROKEN);
1043 }
1044
1045 /* Return true if BB is part of the new pre-header that is created
1046 when threading the latch to DATA. */
1047
1048 static bool
1049 def_split_header_continue_p (const_basic_block bb, const void *data)
1050 {
1051 const_basic_block new_header = (const_basic_block) data;
1052 const struct loop *l;
1053
1054 if (bb == new_header
1055 || loop_depth (bb->loop_father) < loop_depth (new_header->loop_father))
1056 return false;
1057 for (l = bb->loop_father; l; l = loop_outer (l))
1058 if (l == new_header->loop_father)
1059 return true;
1060 return false;
1061 }
1062
1063 /* Thread jumps through the header of LOOP. Returns true if cfg changes.
1064 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading from entry edges
1065 to the inside of the loop. */
1066
1067 static bool
1068 thread_through_loop_header (struct loop *loop, bool may_peel_loop_headers)
1069 {
1070 basic_block header = loop->header;
1071 edge e, tgt_edge, latch = loop_latch_edge (loop);
1072 edge_iterator ei;
1073 basic_block tgt_bb, atgt_bb;
1074 enum bb_dom_status domst;
1075
1076 /* We have already threaded through headers to exits, so all the threading
1077 requests now are to the inside of the loop. We need to avoid creating
1078 irreducible regions (i.e., loops with more than one entry block), and
1079 also loop with several latch edges, or new subloops of the loop (although
1080 there are cases where it might be appropriate, it is difficult to decide,
1081 and doing it wrongly may confuse other optimizers).
1082
1083 We could handle more general cases here. However, the intention is to
1084 preserve some information about the loop, which is impossible if its
1085 structure changes significantly, in a way that is not well understood.
1086 Thus we only handle few important special cases, in which also updating
1087 of the loop-carried information should be feasible:
1088
1089 1) Propagation of latch edge to a block that dominates the latch block
1090 of a loop. This aims to handle the following idiom:
1091
1092 first = 1;
1093 while (1)
1094 {
1095 if (first)
1096 initialize;
1097 first = 0;
1098 body;
1099 }
1100
1101 After threading the latch edge, this becomes
1102
1103 first = 1;
1104 if (first)
1105 initialize;
1106 while (1)
1107 {
1108 first = 0;
1109 body;
1110 }
1111
1112 The original header of the loop is moved out of it, and we may thread
1113 the remaining edges through it without further constraints.
1114
1115 2) All entry edges are propagated to a single basic block that dominates
1116 the latch block of the loop. This aims to handle the following idiom
1117 (normally created for "for" loops):
1118
1119 i = 0;
1120 while (1)
1121 {
1122 if (i >= 100)
1123 break;
1124 body;
1125 i++;
1126 }
1127
1128 This becomes
1129
1130 i = 0;
1131 while (1)
1132 {
1133 body;
1134 i++;
1135 if (i >= 100)
1136 break;
1137 }
1138 */
1139
1140 /* Threading through the header won't improve the code if the header has just
1141 one successor. */
1142 if (single_succ_p (header))
1143 goto fail;
1144
1145 /* If we threaded the latch using a joiner block, we cancel the
1146 threading opportunity out of an abundance of caution. However,
1147 still allow threading from outside to inside the loop. */
1148 if (latch->aux)
1149 {
1150 vec<jump_thread_edge *> *path = THREAD_PATH (latch);
1151 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1152 {
1153 delete_jump_thread_path (path);
1154 latch->aux = NULL;
1155 }
1156 }
1157
1158 if (latch->aux)
1159 {
1160 vec<jump_thread_edge *> *path = THREAD_PATH (latch);
1161 tgt_edge = (*path)[1]->e;
1162 tgt_bb = tgt_edge->dest;
1163 }
1164 else if (!may_peel_loop_headers
1165 && !redirection_block_p (loop->header))
1166 goto fail;
1167 else
1168 {
1169 tgt_bb = NULL;
1170 tgt_edge = NULL;
1171 FOR_EACH_EDGE (e, ei, header->preds)
1172 {
1173 if (!e->aux)
1174 {
1175 if (e == latch)
1176 continue;
1177
1178 /* If latch is not threaded, and there is a header
1179 edge that is not threaded, we would create loop
1180 with multiple entries. */
1181 goto fail;
1182 }
1183
1184 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1185
1186 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1187 goto fail;
1188 tgt_edge = (*path)[1]->e;
1189 atgt_bb = tgt_edge->dest;
1190 if (!tgt_bb)
1191 tgt_bb = atgt_bb;
1192 /* Two targets of threading would make us create loop
1193 with multiple entries. */
1194 else if (tgt_bb != atgt_bb)
1195 goto fail;
1196 }
1197
1198 if (!tgt_bb)
1199 {
1200 /* There are no threading requests. */
1201 return false;
1202 }
1203
1204 /* Redirecting to empty loop latch is useless. */
1205 if (tgt_bb == loop->latch
1206 && empty_block_p (loop->latch))
1207 goto fail;
1208 }
1209
1210 /* The target block must dominate the loop latch, otherwise we would be
1211 creating a subloop. */
1212 domst = determine_bb_domination_status (loop, tgt_bb);
1213 if (domst == DOMST_NONDOMINATING)
1214 goto fail;
1215 if (domst == DOMST_LOOP_BROKEN)
1216 {
1217 /* If the loop ceased to exist, mark it as such, and thread through its
1218 original header. */
1219 loop->header = NULL;
1220 loop->latch = NULL;
1221 loops_state_set (LOOPS_NEED_FIXUP);
1222 return thread_block (header, false);
1223 }
1224
1225 if (tgt_bb->loop_father->header == tgt_bb)
1226 {
1227 /* If the target of the threading is a header of a subloop, we need
1228 to create a preheader for it, so that the headers of the two loops
1229 do not merge. */
1230 if (EDGE_COUNT (tgt_bb->preds) > 2)
1231 {
1232 tgt_bb = create_preheader (tgt_bb->loop_father, 0);
1233 gcc_assert (tgt_bb != NULL);
1234 }
1235 else
1236 tgt_bb = split_edge (tgt_edge);
1237 }
1238
1239 if (latch->aux)
1240 {
1241 basic_block *bblocks;
1242 unsigned nblocks, i;
1243
1244 /* First handle the case latch edge is redirected. We are copying
1245 the loop header but not creating a multiple entry loop. Make the
1246 cfg manipulation code aware of that fact. */
1247 set_loop_copy (loop, loop);
1248 loop->latch = thread_single_edge (latch);
1249 set_loop_copy (loop, NULL);
1250 gcc_assert (single_succ (loop->latch) == tgt_bb);
1251 loop->header = tgt_bb;
1252
1253 /* Remove the new pre-header blocks from our loop. */
1254 bblocks = XCNEWVEC (basic_block, loop->num_nodes);
1255 nblocks = dfs_enumerate_from (header, 0, def_split_header_continue_p,
1256 bblocks, loop->num_nodes, tgt_bb);
1257 for (i = 0; i < nblocks; i++)
1258 if (bblocks[i]->loop_father == loop)
1259 {
1260 remove_bb_from_loops (bblocks[i]);
1261 add_bb_to_loop (bblocks[i], loop_outer (loop));
1262 }
1263 free (bblocks);
1264
1265 /* If the new header has multiple latches mark it so. */
1266 FOR_EACH_EDGE (e, ei, loop->header->preds)
1267 if (e->src->loop_father == loop
1268 && e->src != loop->latch)
1269 {
1270 loop->latch = NULL;
1271 loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
1272 }
1273
1274 /* Cancel remaining threading requests that would make the
1275 loop a multiple entry loop. */
1276 FOR_EACH_EDGE (e, ei, header->preds)
1277 {
1278 edge e2;
1279
1280 if (e->aux == NULL)
1281 continue;
1282
1283 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1284 e2 = path->last ()->e;
1285
1286 if (e->src->loop_father != e2->dest->loop_father
1287 && e2->dest != loop->header)
1288 {
1289 delete_jump_thread_path (path);
1290 e->aux = NULL;
1291 }
1292 }
1293
1294 /* Thread the remaining edges through the former header. */
1295 thread_block (header, false);
1296 }
1297 else
1298 {
1299 basic_block new_preheader;
1300
1301 /* Now consider the case entry edges are redirected to the new entry
1302 block. Remember one entry edge, so that we can find the new
1303 preheader (its destination after threading). */
1304 FOR_EACH_EDGE (e, ei, header->preds)
1305 {
1306 if (e->aux)
1307 break;
1308 }
1309
1310 /* The duplicate of the header is the new preheader of the loop. Ensure
1311 that it is placed correctly in the loop hierarchy. */
1312 set_loop_copy (loop, loop_outer (loop));
1313
1314 thread_block (header, false);
1315 set_loop_copy (loop, NULL);
1316 new_preheader = e->dest;
1317
1318 /* Create the new latch block. This is always necessary, as the latch
1319 must have only a single successor, but the original header had at
1320 least two successors. */
1321 loop->latch = NULL;
1322 mfb_kj_edge = single_succ_edge (new_preheader);
1323 loop->header = mfb_kj_edge->dest;
1324 latch = make_forwarder_block (tgt_bb, mfb_keep_just, NULL);
1325 loop->header = latch->dest;
1326 loop->latch = latch->src;
1327 }
1328
1329 return true;
1330
1331 fail:
1332 /* We failed to thread anything. Cancel the requests. */
1333 FOR_EACH_EDGE (e, ei, header->preds)
1334 {
1335 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1336
1337 if (path)
1338 {
1339 delete_jump_thread_path (path);
1340 e->aux = NULL;
1341 }
1342 }
1343 return false;
1344 }
1345
1346 /* E1 and E2 are edges into the same basic block. Return TRUE if the
1347 PHI arguments associated with those edges are equal or there are no
1348 PHI arguments, otherwise return FALSE. */
1349
1350 static bool
1351 phi_args_equal_on_edges (edge e1, edge e2)
1352 {
1353 gimple_stmt_iterator gsi;
1354 int indx1 = e1->dest_idx;
1355 int indx2 = e2->dest_idx;
1356
1357 for (gsi = gsi_start_phis (e1->dest); !gsi_end_p (gsi); gsi_next (&gsi))
1358 {
1359 gimple phi = gsi_stmt (gsi);
1360
1361 if (!operand_equal_p (gimple_phi_arg_def (phi, indx1),
1362 gimple_phi_arg_def (phi, indx2), 0))
1363 return false;
1364 }
1365 return true;
1366 }
1367
1368 /* Walk through the registered jump threads and convert them into a
1369 form convenient for this pass.
1370
1371 Any block which has incoming edges threaded to outgoing edges
1372 will have its entry in THREADED_BLOCK set.
1373
1374 Any threaded edge will have its new outgoing edge stored in the
1375 original edge's AUX field.
1376
1377 This form avoids the need to walk all the edges in the CFG to
1378 discover blocks which need processing and avoids unnecessary
1379 hash table lookups to map from threaded edge to new target. */
1380
1381 static void
1382 mark_threaded_blocks (bitmap threaded_blocks)
1383 {
1384 unsigned int i;
1385 bitmap_iterator bi;
1386 bitmap tmp = BITMAP_ALLOC (NULL);
1387 basic_block bb;
1388 edge e;
1389 edge_iterator ei;
1390
1391 /* Move the jump threading requests from PATHS to each edge
1392 which starts a jump thread path. */
1393 for (i = 0; i < paths.length (); i++)
1394 {
1395 vec<jump_thread_edge *> *path = paths[i];
1396 edge e = (*path)[0]->e;
1397 e->aux = (void *)path;
1398 bitmap_set_bit (tmp, e->dest->index);
1399 }
1400
1401
1402
1403 /* If optimizing for size, only thread through block if we don't have
1404 to duplicate it or it's an otherwise empty redirection block. */
1405 if (optimize_function_for_size_p (cfun))
1406 {
1407 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
1408 {
1409 bb = BASIC_BLOCK (i);
1410 if (EDGE_COUNT (bb->preds) > 1
1411 && !redirection_block_p (bb))
1412 {
1413 FOR_EACH_EDGE (e, ei, bb->preds)
1414 {
1415 if (e->aux)
1416 {
1417 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1418 delete_jump_thread_path (path);
1419 e->aux = NULL;
1420 }
1421 }
1422 }
1423 else
1424 bitmap_set_bit (threaded_blocks, i);
1425 }
1426 }
1427 else
1428 bitmap_copy (threaded_blocks, tmp);
1429
1430 /* Look for jump threading paths which cross multiple loop headers.
1431
1432 The code to thread through loop headers will change the CFG in ways
1433 that break assumptions made by the loop optimization code.
1434
1435 We don't want to blindly cancel the requests. We can instead do better
1436 by trimming off the end of the jump thread path. */
1437 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
1438 {
1439 basic_block bb = BASIC_BLOCK (i);
1440 FOR_EACH_EDGE (e, ei, bb->preds)
1441 {
1442 if (e->aux)
1443 {
1444 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1445
1446 /* Basically we're looking for a situation where we can see
1447 3 or more loop structures on a jump threading path. */
1448
1449 struct loop *first_father = (*path)[0]->e->src->loop_father;
1450 struct loop *second_father = NULL;
1451 for (unsigned int i = 0; i < path->length (); i++)
1452 {
1453 /* See if this is a loop father we have not seen before. */
1454 if ((*path)[i]->e->dest->loop_father != first_father
1455 && (*path)[i]->e->dest->loop_father != second_father)
1456 {
1457 /* We've already seen two loop fathers, so we
1458 need to trim this jump threading path. */
1459 if (second_father != NULL)
1460 {
1461 /* Trim from entry I onwards. */
1462 for (unsigned int j = i; j < path->length (); j++)
1463 delete (*path)[j];
1464 path->truncate (i);
1465
1466 /* Now that we've truncated the path, make sure
1467 what's left is still valid. We need at least
1468 two edges on the path and the last edge can not
1469 be a joiner. This should never happen, but let's
1470 be safe. */
1471 if (path->length () < 2
1472 || (path->last ()->type
1473 == EDGE_COPY_SRC_JOINER_BLOCK))
1474 {
1475 delete_jump_thread_path (path);
1476 e->aux = NULL;
1477 }
1478 break;
1479 }
1480 else
1481 {
1482 second_father = (*path)[i]->e->dest->loop_father;
1483 }
1484 }
1485 }
1486 }
1487 }
1488 }
1489
1490 /* If we have a joiner block (J) which has two successors S1 and S2 and
1491 we are threading though S1 and the final destination of the thread
1492 is S2, then we must verify that any PHI nodes in S2 have the same
1493 PHI arguments for the edge J->S2 and J->S1->...->S2.
1494
1495 We used to detect this prior to registering the jump thread, but
1496 that prohibits propagation of edge equivalences into non-dominated
1497 PHI nodes as the equivalency test might occur before propagation.
1498
1499 This must also occur after we truncate any jump threading paths
1500 as this scenario may only show up after truncation.
1501
1502 This works for now, but will need improvement as part of the FSA
1503 optimization.
1504
1505 Note since we've moved the thread request data to the edges,
1506 we have to iterate on those rather than the threaded_edges vector. */
1507 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
1508 {
1509 bb = BASIC_BLOCK (i);
1510 FOR_EACH_EDGE (e, ei, bb->preds)
1511 {
1512 if (e->aux)
1513 {
1514 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1515 bool have_joiner = ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK);
1516
1517 if (have_joiner)
1518 {
1519 basic_block joiner = e->dest;
1520 edge final_edge = path->last ()->e;
1521 basic_block final_dest = final_edge->dest;
1522 edge e2 = find_edge (joiner, final_dest);
1523
1524 if (e2 && !phi_args_equal_on_edges (e2, final_edge))
1525 {
1526 delete_jump_thread_path (path);
1527 e->aux = NULL;
1528 }
1529 }
1530 }
1531 }
1532 }
1533
1534 BITMAP_FREE (tmp);
1535 }
1536
1537
1538 /* Walk through all blocks and thread incoming edges to the appropriate
1539 outgoing edge for each edge pair recorded in THREADED_EDGES.
1540
1541 It is the caller's responsibility to fix the dominance information
1542 and rewrite duplicated SSA_NAMEs back into SSA form.
1543
1544 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading edges through
1545 loop headers if it does not simplify the loop.
1546
1547 Returns true if one or more edges were threaded, false otherwise. */
1548
1549 bool
1550 thread_through_all_blocks (bool may_peel_loop_headers)
1551 {
1552 bool retval = false;
1553 unsigned int i;
1554 bitmap_iterator bi;
1555 bitmap threaded_blocks;
1556 struct loop *loop;
1557 loop_iterator li;
1558
1559 /* We must know about loops in order to preserve them. */
1560 gcc_assert (current_loops != NULL);
1561
1562 if (!paths.exists ())
1563 return false;
1564
1565 threaded_blocks = BITMAP_ALLOC (NULL);
1566 memset (&thread_stats, 0, sizeof (thread_stats));
1567
1568 mark_threaded_blocks (threaded_blocks);
1569
1570 initialize_original_copy_tables ();
1571
1572 /* First perform the threading requests that do not affect
1573 loop structure. */
1574 EXECUTE_IF_SET_IN_BITMAP (threaded_blocks, 0, i, bi)
1575 {
1576 basic_block bb = BASIC_BLOCK (i);
1577
1578 if (EDGE_COUNT (bb->preds) > 0)
1579 retval |= thread_block (bb, true);
1580 }
1581
1582 /* Then perform the threading through loop headers. We start with the
1583 innermost loop, so that the changes in cfg we perform won't affect
1584 further threading. */
1585 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
1586 {
1587 if (!loop->header
1588 || !bitmap_bit_p (threaded_blocks, loop->header->index))
1589 continue;
1590
1591 retval |= thread_through_loop_header (loop, may_peel_loop_headers);
1592 }
1593
1594 /* Assume we had a jump thread path which went from the latch to the exit
1595 and a path which goes from outside to inside the same loop.
1596
1597 If the latch to exit was handled first, we will thread it and clear
1598 loop->header.
1599
1600 The second path will be ignored by thread_block because we're going
1601 through a loop header. It will also be ignored by the loop above
1602 because loop->header is NULL.
1603
1604 This results in the second path never being threaded. The failure
1605 mode is a dangling AUX field.
1606
1607 This is inherently a bit of a pain to fix, so we just walk all the
1608 blocks and all the incoming edges to those blocks and clear their
1609 AUX fields. */
1610 basic_block bb;
1611 edge_iterator ei;
1612 edge e;
1613 FOR_EACH_BB (bb)
1614 {
1615 FOR_EACH_EDGE (e, ei, bb->preds)
1616 if (e->aux)
1617 {
1618 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1619
1620 delete_jump_thread_path (path);
1621 e->aux = NULL;
1622 }
1623 }
1624
1625 statistics_counter_event (cfun, "Jumps threaded",
1626 thread_stats.num_threaded_edges);
1627
1628 free_original_copy_tables ();
1629
1630 BITMAP_FREE (threaded_blocks);
1631 threaded_blocks = NULL;
1632 paths.release ();
1633
1634 if (retval)
1635 loops_state_set (LOOPS_NEED_FIXUP);
1636
1637 return retval;
1638 }
1639
1640 /* Delete the jump threading path PATH. We have to explcitly delete
1641 each entry in the vector, then the container. */
1642
1643 void
1644 delete_jump_thread_path (vec<jump_thread_edge *> *path)
1645 {
1646 for (unsigned int i = 0; i < path->length (); i++)
1647 delete (*path)[i];
1648 path->release();
1649 }
1650
1651 /* Dump a jump threading path, including annotations about each
1652 edge in the path. */
1653
1654 static void
1655 dump_jump_thread_path (FILE *dump_file, vec<jump_thread_edge *> path)
1656 {
1657 fprintf (dump_file,
1658 " Registering jump thread: (%d, %d) incoming edge; ",
1659 path[0]->e->src->index, path[0]->e->dest->index);
1660
1661 for (unsigned int i = 1; i < path.length (); i++)
1662 {
1663 /* We can get paths with a NULL edge when the final destination
1664 of a jump thread turns out to be a constant address. We dump
1665 those paths when debugging, so we have to be prepared for that
1666 possibility here. */
1667 if (path[i]->e == NULL)
1668 continue;
1669
1670 if (path[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1671 fprintf (dump_file, " (%d, %d) joiner; ",
1672 path[i]->e->src->index, path[i]->e->dest->index);
1673 if (path[i]->type == EDGE_COPY_SRC_BLOCK)
1674 fprintf (dump_file, " (%d, %d) normal;",
1675 path[i]->e->src->index, path[i]->e->dest->index);
1676 if (path[i]->type == EDGE_NO_COPY_SRC_BLOCK)
1677 fprintf (dump_file, " (%d, %d) nocopy;",
1678 path[i]->e->src->index, path[i]->e->dest->index);
1679 }
1680 fputc ('\n', dump_file);
1681 }
1682
1683 /* Register a jump threading opportunity. We queue up all the jump
1684 threading opportunities discovered by a pass and update the CFG
1685 and SSA form all at once.
1686
1687 E is the edge we can thread, E2 is the new target edge, i.e., we
1688 are effectively recording that E->dest can be changed to E2->dest
1689 after fixing the SSA graph. */
1690
1691 void
1692 register_jump_thread (vec<jump_thread_edge *> *path)
1693 {
1694 if (!dbg_cnt (registered_jump_thread))
1695 {
1696 delete_jump_thread_path (path);
1697 return;
1698 }
1699
1700 /* First make sure there are no NULL outgoing edges on the jump threading
1701 path. That can happen for jumping to a constant address. */
1702 for (unsigned int i = 0; i < path->length (); i++)
1703 if ((*path)[i]->e == NULL)
1704 {
1705 if (dump_file && (dump_flags & TDF_DETAILS))
1706 {
1707 fprintf (dump_file,
1708 "Found NULL edge in jump threading path. Cancelling jump thread:\n");
1709 dump_jump_thread_path (dump_file, *path);
1710 }
1711
1712 delete_jump_thread_path (path);
1713 return;
1714 }
1715
1716 if (dump_file && (dump_flags & TDF_DETAILS))
1717 dump_jump_thread_path (dump_file, *path);
1718
1719 if (!paths.exists ())
1720 paths.create (5);
1721
1722 paths.safe_push (path);
1723 }