]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-threadupdate.c
* output.h (__gcc_host_wide_int__): Move to hwint.h.
[thirdparty/gcc.git] / gcc / tree-ssa-threadupdate.c
1 /* Thread edges through blocks and update the control flow and SSA graphs.
2 Copyright (C) 2004, 2005, 2006, 2007, 2008, 2010, 201
3 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "flags.h"
27 #include "tm_p.h"
28 #include "basic-block.h"
29 #include "function.h"
30 #include "tree-flow.h"
31 #include "tree-dump.h"
32 #include "tree-pass.h"
33 #include "cfgloop.h"
34
35 /* Given a block B, update the CFG and SSA graph to reflect redirecting
36 one or more in-edges to B to instead reach the destination of an
37 out-edge from B while preserving any side effects in B.
38
39 i.e., given A->B and B->C, change A->B to be A->C yet still preserve the
40 side effects of executing B.
41
42 1. Make a copy of B (including its outgoing edges and statements). Call
43 the copy B'. Note B' has no incoming edges or PHIs at this time.
44
45 2. Remove the control statement at the end of B' and all outgoing edges
46 except B'->C.
47
48 3. Add a new argument to each PHI in C with the same value as the existing
49 argument associated with edge B->C. Associate the new PHI arguments
50 with the edge B'->C.
51
52 4. For each PHI in B, find or create a PHI in B' with an identical
53 PHI_RESULT. Add an argument to the PHI in B' which has the same
54 value as the PHI in B associated with the edge A->B. Associate
55 the new argument in the PHI in B' with the edge A->B.
56
57 5. Change the edge A->B to A->B'.
58
59 5a. This automatically deletes any PHI arguments associated with the
60 edge A->B in B.
61
62 5b. This automatically associates each new argument added in step 4
63 with the edge A->B'.
64
65 6. Repeat for other incoming edges into B.
66
67 7. Put the duplicated resources in B and all the B' blocks into SSA form.
68
69 Note that block duplication can be minimized by first collecting the
70 set of unique destination blocks that the incoming edges should
71 be threaded to.
72
73 Block duplication can be further minimized by using B instead of
74 creating B' for one destination if all edges into B are going to be
75 threaded to a successor of B. We had code to do this at one time, but
76 I'm not convinced it is correct with the changes to avoid mucking up
77 the loop structure (which may cancel threading requests, thus a block
78 which we thought was going to become unreachable may still be reachable).
79 This code was also going to get ugly with the introduction of the ability
80 for a single jump thread request to bypass multiple blocks.
81
82 We further reduce the number of edges and statements we create by
83 not copying all the outgoing edges and the control statement in
84 step #1. We instead create a template block without the outgoing
85 edges and duplicate the template. */
86
87
88 /* Steps #5 and #6 of the above algorithm are best implemented by walking
89 all the incoming edges which thread to the same destination edge at
90 the same time. That avoids lots of table lookups to get information
91 for the destination edge.
92
93 To realize that implementation we create a list of incoming edges
94 which thread to the same outgoing edge. Thus to implement steps
95 #5 and #6 we traverse our hash table of outgoing edge information.
96 For each entry we walk the list of incoming edges which thread to
97 the current outgoing edge. */
98
99 struct el
100 {
101 edge e;
102 struct el *next;
103 };
104
105 /* Main data structure recording information regarding B's duplicate
106 blocks. */
107
108 /* We need to efficiently record the unique thread destinations of this
109 block and specific information associated with those destinations. We
110 may have many incoming edges threaded to the same outgoing edge. This
111 can be naturally implemented with a hash table. */
112
113 struct redirection_data
114 {
115 /* A duplicate of B with the trailing control statement removed and which
116 targets a single successor of B. */
117 basic_block dup_block;
118
119 /* An outgoing edge from B. DUP_BLOCK will have OUTGOING_EDGE->dest as
120 its single successor. */
121 edge outgoing_edge;
122
123 edge intermediate_edge;
124
125 /* A list of incoming edges which we want to thread to
126 OUTGOING_EDGE->dest. */
127 struct el *incoming_edges;
128 };
129
130 /* Main data structure to hold information for duplicates of BB. */
131 static htab_t redirection_data;
132
133 /* Data structure of information to pass to hash table traversal routines. */
134 struct local_info
135 {
136 /* The current block we are working on. */
137 basic_block bb;
138
139 /* A template copy of BB with no outgoing edges or control statement that
140 we use for creating copies. */
141 basic_block template_block;
142
143 /* TRUE if we thread one or more jumps, FALSE otherwise. */
144 bool jumps_threaded;
145 };
146
147 /* Passes which use the jump threading code register jump threading
148 opportunities as they are discovered. We keep the registered
149 jump threading opportunities in this vector as edge pairs
150 (original_edge, target_edge). */
151 static VEC(edge,heap) *threaded_edges;
152
153 /* When we start updating the CFG for threading, data necessary for jump
154 threading is attached to the AUX field for the incoming edge. Use these
155 macros to access the underlying structure attached to the AUX field. */
156 #define THREAD_TARGET(E) ((edge *)(E)->aux)[0]
157 #define THREAD_TARGET2(E) ((edge *)(E)->aux)[1]
158
159 /* Jump threading statistics. */
160
161 struct thread_stats_d
162 {
163 unsigned long num_threaded_edges;
164 };
165
166 struct thread_stats_d thread_stats;
167
168
169 /* Remove the last statement in block BB if it is a control statement
170 Also remove all outgoing edges except the edge which reaches DEST_BB.
171 If DEST_BB is NULL, then remove all outgoing edges. */
172
173 static void
174 remove_ctrl_stmt_and_useless_edges (basic_block bb, basic_block dest_bb)
175 {
176 gimple_stmt_iterator gsi;
177 edge e;
178 edge_iterator ei;
179
180 gsi = gsi_last_bb (bb);
181
182 /* If the duplicate ends with a control statement, then remove it.
183
184 Note that if we are duplicating the template block rather than the
185 original basic block, then the duplicate might not have any real
186 statements in it. */
187 if (!gsi_end_p (gsi)
188 && gsi_stmt (gsi)
189 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
190 || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
191 || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH))
192 gsi_remove (&gsi, true);
193
194 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
195 {
196 if (e->dest != dest_bb)
197 remove_edge (e);
198 else
199 ei_next (&ei);
200 }
201 }
202
203 /* Create a duplicate of BB. Record the duplicate block in RD. */
204
205 static void
206 create_block_for_threading (basic_block bb, struct redirection_data *rd)
207 {
208 edge_iterator ei;
209 edge e;
210
211 /* We can use the generic block duplication code and simply remove
212 the stuff we do not need. */
213 rd->dup_block = duplicate_block (bb, NULL, NULL);
214
215 FOR_EACH_EDGE (e, ei, rd->dup_block->succs)
216 e->aux = NULL;
217
218 /* Zero out the profile, since the block is unreachable for now. */
219 rd->dup_block->frequency = 0;
220 rd->dup_block->count = 0;
221 }
222
223 /* Hashing and equality routines for our hash table. */
224 static hashval_t
225 redirection_data_hash (const void *p)
226 {
227 edge e = ((const struct redirection_data *)p)->outgoing_edge;
228 return e->dest->index;
229 }
230
231 static int
232 redirection_data_eq (const void *p1, const void *p2)
233 {
234 edge e1 = ((const struct redirection_data *)p1)->outgoing_edge;
235 edge e2 = ((const struct redirection_data *)p2)->outgoing_edge;
236 edge e3 = ((const struct redirection_data *)p1)->intermediate_edge;
237 edge e4 = ((const struct redirection_data *)p2)->intermediate_edge;
238
239 return e1 == e2 && e3 == e4;
240 }
241
242 /* Given an outgoing edge E lookup and return its entry in our hash table.
243
244 If INSERT is true, then we insert the entry into the hash table if
245 it is not already present. INCOMING_EDGE is added to the list of incoming
246 edges associated with E in the hash table. */
247
248 static struct redirection_data *
249 lookup_redirection_data (edge e, enum insert_option insert)
250 {
251 void **slot;
252 struct redirection_data *elt;
253
254 /* Build a hash table element so we can see if E is already
255 in the table. */
256 elt = XNEW (struct redirection_data);
257 elt->intermediate_edge = THREAD_TARGET2 (e) ? THREAD_TARGET (e) : NULL;
258 elt->outgoing_edge = THREAD_TARGET2 (e) ? THREAD_TARGET2 (e)
259 : THREAD_TARGET (e);
260 elt->dup_block = NULL;
261 elt->incoming_edges = NULL;
262
263 slot = htab_find_slot (redirection_data, elt, insert);
264
265 /* This will only happen if INSERT is false and the entry is not
266 in the hash table. */
267 if (slot == NULL)
268 {
269 free (elt);
270 return NULL;
271 }
272
273 /* This will only happen if E was not in the hash table and
274 INSERT is true. */
275 if (*slot == NULL)
276 {
277 *slot = (void *)elt;
278 elt->incoming_edges = XNEW (struct el);
279 elt->incoming_edges->e = e;
280 elt->incoming_edges->next = NULL;
281 return elt;
282 }
283 /* E was in the hash table. */
284 else
285 {
286 /* Free ELT as we do not need it anymore, we will extract the
287 relevant entry from the hash table itself. */
288 free (elt);
289
290 /* Get the entry stored in the hash table. */
291 elt = (struct redirection_data *) *slot;
292
293 /* If insertion was requested, then we need to add INCOMING_EDGE
294 to the list of incoming edges associated with E. */
295 if (insert)
296 {
297 struct el *el = XNEW (struct el);
298 el->next = elt->incoming_edges;
299 el->e = e;
300 elt->incoming_edges = el;
301 }
302
303 return elt;
304 }
305 }
306
307 /* For each PHI in BB, copy the argument associated with SRC_E to TGT_E. */
308
309 static void
310 copy_phi_args (basic_block bb, edge src_e, edge tgt_e)
311 {
312 gimple_stmt_iterator gsi;
313 int src_indx = src_e->dest_idx;
314
315 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
316 {
317 gimple phi = gsi_stmt (gsi);
318 source_location locus = gimple_phi_arg_location (phi, src_indx);
319 add_phi_arg (phi, gimple_phi_arg_def (phi, src_indx), tgt_e, locus);
320 }
321 }
322
323 /* We have recently made a copy of ORIG_BB, including its outgoing
324 edges. The copy is NEW_BB. Every PHI node in every direct successor of
325 ORIG_BB has a new argument associated with edge from NEW_BB to the
326 successor. Initialize the PHI argument so that it is equal to the PHI
327 argument associated with the edge from ORIG_BB to the successor. */
328
329 static void
330 update_destination_phis (basic_block orig_bb, basic_block new_bb)
331 {
332 edge_iterator ei;
333 edge e;
334
335 FOR_EACH_EDGE (e, ei, orig_bb->succs)
336 {
337 edge e2 = find_edge (new_bb, e->dest);
338 copy_phi_args (e->dest, e, e2);
339 }
340 }
341
342 /* Given a duplicate block and its single destination (both stored
343 in RD). Create an edge between the duplicate and its single
344 destination.
345
346 Add an additional argument to any PHI nodes at the single
347 destination. */
348
349 static void
350 create_edge_and_update_destination_phis (struct redirection_data *rd,
351 basic_block bb)
352 {
353 edge e = make_edge (bb, rd->outgoing_edge->dest, EDGE_FALLTHRU);
354
355 rescan_loop_exit (e, true, false);
356 e->probability = REG_BR_PROB_BASE;
357 e->count = bb->count;
358
359 if (rd->outgoing_edge->aux)
360 {
361 e->aux = (edge *) XNEWVEC (edge, 2);
362 THREAD_TARGET(e) = THREAD_TARGET (rd->outgoing_edge);
363 THREAD_TARGET2(e) = THREAD_TARGET2 (rd->outgoing_edge);
364 }
365 else
366 {
367 e->aux = NULL;
368 }
369
370 /* If there are any PHI nodes at the destination of the outgoing edge
371 from the duplicate block, then we will need to add a new argument
372 to them. The argument should have the same value as the argument
373 associated with the outgoing edge stored in RD. */
374 copy_phi_args (e->dest, rd->outgoing_edge, e);
375 }
376
377 /* Wire up the outgoing edges from the duplicate block and
378 update any PHIs as needed. */
379 static void
380 fix_duplicate_block_edges (struct redirection_data *rd,
381 struct local_info *local_info)
382 {
383 /* If we were threading through an joiner block, then we want
384 to keep its control statement and redirect an outgoing edge.
385 Else we want to remove the control statement & edges, then create
386 a new outgoing edge. In both cases we may need to update PHIs. */
387 if (THREAD_TARGET2 (rd->incoming_edges->e))
388 {
389 edge victim;
390 edge e2;
391 edge e = rd->incoming_edges->e;
392
393 /* This updates the PHIs at the destination of the duplicate
394 block. */
395 update_destination_phis (local_info->bb, rd->dup_block);
396
397 /* Find the edge from the duplicate block to the block we're
398 threading through. That's the edge we want to redirect. */
399 victim = find_edge (rd->dup_block, THREAD_TARGET (e)->dest);
400 e2 = redirect_edge_and_branch (victim, THREAD_TARGET2 (e)->dest);
401
402 /* If we redirected the edge, then we need to copy PHI arguments
403 at the target. If the edge already existed (e2 != victim case),
404 then the PHIs in the target already have the correct arguments. */
405 if (e2 == victim)
406 copy_phi_args (e2->dest, THREAD_TARGET2 (e), e2);
407 }
408 else
409 {
410 remove_ctrl_stmt_and_useless_edges (rd->dup_block, NULL);
411 create_edge_and_update_destination_phis (rd, rd->dup_block);
412 }
413 }
414 /* Hash table traversal callback routine to create duplicate blocks. */
415
416 static int
417 create_duplicates (void **slot, void *data)
418 {
419 struct redirection_data *rd = (struct redirection_data *) *slot;
420 struct local_info *local_info = (struct local_info *)data;
421
422 /* Create a template block if we have not done so already. Otherwise
423 use the template to create a new block. */
424 if (local_info->template_block == NULL)
425 {
426 create_block_for_threading (local_info->bb, rd);
427 local_info->template_block = rd->dup_block;
428
429 /* We do not create any outgoing edges for the template. We will
430 take care of that in a later traversal. That way we do not
431 create edges that are going to just be deleted. */
432 }
433 else
434 {
435 create_block_for_threading (local_info->template_block, rd);
436
437 /* Go ahead and wire up outgoing edges and update PHIs for the duplicate
438 block. */
439 fix_duplicate_block_edges (rd, local_info);
440 }
441
442 /* Keep walking the hash table. */
443 return 1;
444 }
445
446 /* We did not create any outgoing edges for the template block during
447 block creation. This hash table traversal callback creates the
448 outgoing edge for the template block. */
449
450 static int
451 fixup_template_block (void **slot, void *data)
452 {
453 struct redirection_data *rd = (struct redirection_data *) *slot;
454 struct local_info *local_info = (struct local_info *)data;
455
456 /* If this is the template block halt the traversal after updating
457 it appropriately.
458
459 If we were threading through an joiner block, then we want
460 to keep its control statement and redirect an outgoing edge.
461 Else we want to remove the control statement & edges, then create
462 a new outgoing edge. In both cases we may need to update PHIs. */
463 if (rd->dup_block && rd->dup_block == local_info->template_block)
464 {
465 fix_duplicate_block_edges (rd, local_info);
466 return 0;
467 }
468
469 return 1;
470 }
471
472 /* Hash table traversal callback to redirect each incoming edge
473 associated with this hash table element to its new destination. */
474
475 static int
476 redirect_edges (void **slot, void *data)
477 {
478 struct redirection_data *rd = (struct redirection_data *) *slot;
479 struct local_info *local_info = (struct local_info *)data;
480 struct el *next, *el;
481
482 /* Walk over all the incoming edges associated associated with this
483 hash table entry. */
484 for (el = rd->incoming_edges; el; el = next)
485 {
486 edge e = el->e;
487
488 /* Go ahead and free this element from the list. Doing this now
489 avoids the need for another list walk when we destroy the hash
490 table. */
491 next = el->next;
492 free (el);
493
494 thread_stats.num_threaded_edges++;
495 /* If we are threading through a joiner block, then we have to
496 find the edge we want to redirect and update some PHI nodes. */
497 if (THREAD_TARGET2 (e))
498 {
499 edge e2;
500
501 /* We want to redirect the incoming edge to the joiner block (E)
502 to instead reach the duplicate of the joiner block. */
503 e2 = redirect_edge_and_branch (e, rd->dup_block);
504 flush_pending_stmts (e2);
505 }
506 else if (rd->dup_block)
507 {
508 edge e2;
509
510 if (dump_file && (dump_flags & TDF_DETAILS))
511 fprintf (dump_file, " Threaded jump %d --> %d to %d\n",
512 e->src->index, e->dest->index, rd->dup_block->index);
513
514 rd->dup_block->count += e->count;
515
516 /* Excessive jump threading may make frequencies large enough so
517 the computation overflows. */
518 if (rd->dup_block->frequency < BB_FREQ_MAX * 2)
519 rd->dup_block->frequency += EDGE_FREQUENCY (e);
520 EDGE_SUCC (rd->dup_block, 0)->count += e->count;
521 /* Redirect the incoming edge to the appropriate duplicate
522 block. */
523 e2 = redirect_edge_and_branch (e, rd->dup_block);
524 gcc_assert (e == e2);
525 flush_pending_stmts (e2);
526 }
527
528 /* Go ahead and clear E->aux. It's not needed anymore and failure
529 to clear it will cause all kinds of unpleasant problems later. */
530 free (e->aux);
531 e->aux = NULL;
532
533 }
534
535 /* Indicate that we actually threaded one or more jumps. */
536 if (rd->incoming_edges)
537 local_info->jumps_threaded = true;
538
539 return 1;
540 }
541
542 /* Return true if this block has no executable statements other than
543 a simple ctrl flow instruction. When the number of outgoing edges
544 is one, this is equivalent to a "forwarder" block. */
545
546 static bool
547 redirection_block_p (basic_block bb)
548 {
549 gimple_stmt_iterator gsi;
550
551 /* Advance to the first executable statement. */
552 gsi = gsi_start_bb (bb);
553 while (!gsi_end_p (gsi)
554 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL
555 || is_gimple_debug (gsi_stmt (gsi))
556 || gimple_nop_p (gsi_stmt (gsi))))
557 gsi_next (&gsi);
558
559 /* Check if this is an empty block. */
560 if (gsi_end_p (gsi))
561 return true;
562
563 /* Test that we've reached the terminating control statement. */
564 return gsi_stmt (gsi)
565 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
566 || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
567 || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH);
568 }
569
570 /* BB is a block which ends with a COND_EXPR or SWITCH_EXPR and when BB
571 is reached via one or more specific incoming edges, we know which
572 outgoing edge from BB will be traversed.
573
574 We want to redirect those incoming edges to the target of the
575 appropriate outgoing edge. Doing so avoids a conditional branch
576 and may expose new optimization opportunities. Note that we have
577 to update dominator tree and SSA graph after such changes.
578
579 The key to keeping the SSA graph update manageable is to duplicate
580 the side effects occurring in BB so that those side effects still
581 occur on the paths which bypass BB after redirecting edges.
582
583 We accomplish this by creating duplicates of BB and arranging for
584 the duplicates to unconditionally pass control to one specific
585 successor of BB. We then revector the incoming edges into BB to
586 the appropriate duplicate of BB.
587
588 If NOLOOP_ONLY is true, we only perform the threading as long as it
589 does not affect the structure of the loops in a nontrivial way. */
590
591 static bool
592 thread_block (basic_block bb, bool noloop_only)
593 {
594 /* E is an incoming edge into BB that we may or may not want to
595 redirect to a duplicate of BB. */
596 edge e, e2;
597 edge_iterator ei;
598 struct local_info local_info;
599 struct loop *loop = bb->loop_father;
600
601 /* To avoid scanning a linear array for the element we need we instead
602 use a hash table. For normal code there should be no noticeable
603 difference. However, if we have a block with a large number of
604 incoming and outgoing edges such linear searches can get expensive. */
605 redirection_data = htab_create (EDGE_COUNT (bb->succs),
606 redirection_data_hash,
607 redirection_data_eq,
608 free);
609
610 /* If we thread the latch of the loop to its exit, the loop ceases to
611 exist. Make sure we do not restrict ourselves in order to preserve
612 this loop. */
613 if (loop->header == bb)
614 {
615 e = loop_latch_edge (loop);
616
617 if (e->aux)
618 e2 = THREAD_TARGET (e);
619 else
620 e2 = NULL;
621
622 if (e2 && loop_exit_edge_p (loop, e2))
623 {
624 loop->header = NULL;
625 loop->latch = NULL;
626 loops_state_set (LOOPS_NEED_FIXUP);
627 }
628 }
629
630 /* Record each unique threaded destination into a hash table for
631 efficient lookups. */
632 FOR_EACH_EDGE (e, ei, bb->preds)
633 {
634 if (e->aux == NULL)
635 continue;
636
637 if (THREAD_TARGET2 (e))
638 e2 = THREAD_TARGET2 (e);
639 else
640 e2 = THREAD_TARGET (e);
641
642 if (!e2
643 /* If NOLOOP_ONLY is true, we only allow threading through the
644 header of a loop to exit edges. */
645 || (noloop_only
646 && bb == bb->loop_father->header
647 && (!loop_exit_edge_p (bb->loop_father, e2)
648 || THREAD_TARGET2 (e))))
649 continue;
650
651 if (e->dest == e2->src)
652 update_bb_profile_for_threading (e->dest, EDGE_FREQUENCY (e),
653 e->count, THREAD_TARGET (e));
654
655 /* Insert the outgoing edge into the hash table if it is not
656 already in the hash table. */
657 lookup_redirection_data (e, INSERT);
658 }
659
660 /* We do not update dominance info. */
661 free_dominance_info (CDI_DOMINATORS);
662
663 /* We know we only thread through the loop header to loop exits.
664 Let the basic block duplication hook know we are not creating
665 a multiple entry loop. */
666 if (noloop_only
667 && bb == bb->loop_father->header)
668 set_loop_copy (bb->loop_father, loop_outer (bb->loop_father));
669
670 /* Now create duplicates of BB.
671
672 Note that for a block with a high outgoing degree we can waste
673 a lot of time and memory creating and destroying useless edges.
674
675 So we first duplicate BB and remove the control structure at the
676 tail of the duplicate as well as all outgoing edges from the
677 duplicate. We then use that duplicate block as a template for
678 the rest of the duplicates. */
679 local_info.template_block = NULL;
680 local_info.bb = bb;
681 local_info.jumps_threaded = false;
682 htab_traverse (redirection_data, create_duplicates, &local_info);
683
684 /* The template does not have an outgoing edge. Create that outgoing
685 edge and update PHI nodes as the edge's target as necessary.
686
687 We do this after creating all the duplicates to avoid creating
688 unnecessary edges. */
689 htab_traverse (redirection_data, fixup_template_block, &local_info);
690
691 /* The hash table traversals above created the duplicate blocks (and the
692 statements within the duplicate blocks). This loop creates PHI nodes for
693 the duplicated blocks and redirects the incoming edges into BB to reach
694 the duplicates of BB. */
695 htab_traverse (redirection_data, redirect_edges, &local_info);
696
697 /* Done with this block. Clear REDIRECTION_DATA. */
698 htab_delete (redirection_data);
699 redirection_data = NULL;
700
701 if (noloop_only
702 && bb == bb->loop_father->header)
703 set_loop_copy (bb->loop_father, NULL);
704
705 /* Indicate to our caller whether or not any jumps were threaded. */
706 return local_info.jumps_threaded;
707 }
708
709 /* Threads edge E through E->dest to the edge THREAD_TARGET (E). Returns the
710 copy of E->dest created during threading, or E->dest if it was not necessary
711 to copy it (E is its single predecessor). */
712
713 static basic_block
714 thread_single_edge (edge e)
715 {
716 basic_block bb = e->dest;
717 edge eto = THREAD_TARGET (e);
718 struct redirection_data rd;
719
720 free (e->aux);
721 e->aux = NULL;
722
723 thread_stats.num_threaded_edges++;
724
725 if (single_pred_p (bb))
726 {
727 /* If BB has just a single predecessor, we should only remove the
728 control statements at its end, and successors except for ETO. */
729 remove_ctrl_stmt_and_useless_edges (bb, eto->dest);
730
731 /* And fixup the flags on the single remaining edge. */
732 eto->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE | EDGE_ABNORMAL);
733 eto->flags |= EDGE_FALLTHRU;
734
735 return bb;
736 }
737
738 /* Otherwise, we need to create a copy. */
739 if (e->dest == eto->src)
740 update_bb_profile_for_threading (bb, EDGE_FREQUENCY (e), e->count, eto);
741
742 rd.outgoing_edge = eto;
743
744 create_block_for_threading (bb, &rd);
745 remove_ctrl_stmt_and_useless_edges (rd.dup_block, NULL);
746 create_edge_and_update_destination_phis (&rd, rd.dup_block);
747
748 if (dump_file && (dump_flags & TDF_DETAILS))
749 fprintf (dump_file, " Threaded jump %d --> %d to %d\n",
750 e->src->index, e->dest->index, rd.dup_block->index);
751
752 rd.dup_block->count = e->count;
753 rd.dup_block->frequency = EDGE_FREQUENCY (e);
754 single_succ_edge (rd.dup_block)->count = e->count;
755 redirect_edge_and_branch (e, rd.dup_block);
756 flush_pending_stmts (e);
757
758 return rd.dup_block;
759 }
760
761 /* Callback for dfs_enumerate_from. Returns true if BB is different
762 from STOP and DBDS_CE_STOP. */
763
764 static basic_block dbds_ce_stop;
765 static bool
766 dbds_continue_enumeration_p (const_basic_block bb, const void *stop)
767 {
768 return (bb != (const_basic_block) stop
769 && bb != dbds_ce_stop);
770 }
771
772 /* Evaluates the dominance relationship of latch of the LOOP and BB, and
773 returns the state. */
774
775 enum bb_dom_status
776 {
777 /* BB does not dominate latch of the LOOP. */
778 DOMST_NONDOMINATING,
779 /* The LOOP is broken (there is no path from the header to its latch. */
780 DOMST_LOOP_BROKEN,
781 /* BB dominates the latch of the LOOP. */
782 DOMST_DOMINATING
783 };
784
785 static enum bb_dom_status
786 determine_bb_domination_status (struct loop *loop, basic_block bb)
787 {
788 basic_block *bblocks;
789 unsigned nblocks, i;
790 bool bb_reachable = false;
791 edge_iterator ei;
792 edge e;
793
794 /* This function assumes BB is a successor of LOOP->header.
795 If that is not the case return DOMST_NONDOMINATING which
796 is always safe. */
797 {
798 bool ok = false;
799
800 FOR_EACH_EDGE (e, ei, bb->preds)
801 {
802 if (e->src == loop->header)
803 {
804 ok = true;
805 break;
806 }
807 }
808
809 if (!ok)
810 return DOMST_NONDOMINATING;
811 }
812
813 if (bb == loop->latch)
814 return DOMST_DOMINATING;
815
816 /* Check that BB dominates LOOP->latch, and that it is back-reachable
817 from it. */
818
819 bblocks = XCNEWVEC (basic_block, loop->num_nodes);
820 dbds_ce_stop = loop->header;
821 nblocks = dfs_enumerate_from (loop->latch, 1, dbds_continue_enumeration_p,
822 bblocks, loop->num_nodes, bb);
823 for (i = 0; i < nblocks; i++)
824 FOR_EACH_EDGE (e, ei, bblocks[i]->preds)
825 {
826 if (e->src == loop->header)
827 {
828 free (bblocks);
829 return DOMST_NONDOMINATING;
830 }
831 if (e->src == bb)
832 bb_reachable = true;
833 }
834
835 free (bblocks);
836 return (bb_reachable ? DOMST_DOMINATING : DOMST_LOOP_BROKEN);
837 }
838
839 /* Return true if BB is part of the new pre-header that is created
840 when threading the latch to DATA. */
841
842 static bool
843 def_split_header_continue_p (const_basic_block bb, const void *data)
844 {
845 const_basic_block new_header = (const_basic_block) data;
846 return (bb->loop_father == new_header->loop_father
847 && bb != new_header);
848 }
849
850 /* Thread jumps through the header of LOOP. Returns true if cfg changes.
851 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading from entry edges
852 to the inside of the loop. */
853
854 static bool
855 thread_through_loop_header (struct loop *loop, bool may_peel_loop_headers)
856 {
857 basic_block header = loop->header;
858 edge e, tgt_edge, latch = loop_latch_edge (loop);
859 edge_iterator ei;
860 basic_block tgt_bb, atgt_bb;
861 enum bb_dom_status domst;
862
863 /* We have already threaded through headers to exits, so all the threading
864 requests now are to the inside of the loop. We need to avoid creating
865 irreducible regions (i.e., loops with more than one entry block), and
866 also loop with several latch edges, or new subloops of the loop (although
867 there are cases where it might be appropriate, it is difficult to decide,
868 and doing it wrongly may confuse other optimizers).
869
870 We could handle more general cases here. However, the intention is to
871 preserve some information about the loop, which is impossible if its
872 structure changes significantly, in a way that is not well understood.
873 Thus we only handle few important special cases, in which also updating
874 of the loop-carried information should be feasible:
875
876 1) Propagation of latch edge to a block that dominates the latch block
877 of a loop. This aims to handle the following idiom:
878
879 first = 1;
880 while (1)
881 {
882 if (first)
883 initialize;
884 first = 0;
885 body;
886 }
887
888 After threading the latch edge, this becomes
889
890 first = 1;
891 if (first)
892 initialize;
893 while (1)
894 {
895 first = 0;
896 body;
897 }
898
899 The original header of the loop is moved out of it, and we may thread
900 the remaining edges through it without further constraints.
901
902 2) All entry edges are propagated to a single basic block that dominates
903 the latch block of the loop. This aims to handle the following idiom
904 (normally created for "for" loops):
905
906 i = 0;
907 while (1)
908 {
909 if (i >= 100)
910 break;
911 body;
912 i++;
913 }
914
915 This becomes
916
917 i = 0;
918 while (1)
919 {
920 body;
921 i++;
922 if (i >= 100)
923 break;
924 }
925 */
926
927 /* Threading through the header won't improve the code if the header has just
928 one successor. */
929 if (single_succ_p (header))
930 goto fail;
931
932 if (latch->aux)
933 {
934 if (THREAD_TARGET2 (latch))
935 goto fail;
936 tgt_edge = THREAD_TARGET (latch);
937 tgt_bb = tgt_edge->dest;
938 }
939 else if (!may_peel_loop_headers
940 && !redirection_block_p (loop->header))
941 goto fail;
942 else
943 {
944 tgt_bb = NULL;
945 tgt_edge = NULL;
946 FOR_EACH_EDGE (e, ei, header->preds)
947 {
948 if (!e->aux)
949 {
950 if (e == latch)
951 continue;
952
953 /* If latch is not threaded, and there is a header
954 edge that is not threaded, we would create loop
955 with multiple entries. */
956 goto fail;
957 }
958
959 if (THREAD_TARGET2 (e))
960 goto fail;
961 tgt_edge = THREAD_TARGET (e);
962 atgt_bb = tgt_edge->dest;
963 if (!tgt_bb)
964 tgt_bb = atgt_bb;
965 /* Two targets of threading would make us create loop
966 with multiple entries. */
967 else if (tgt_bb != atgt_bb)
968 goto fail;
969 }
970
971 if (!tgt_bb)
972 {
973 /* There are no threading requests. */
974 return false;
975 }
976
977 /* Redirecting to empty loop latch is useless. */
978 if (tgt_bb == loop->latch
979 && empty_block_p (loop->latch))
980 goto fail;
981 }
982
983 /* The target block must dominate the loop latch, otherwise we would be
984 creating a subloop. */
985 domst = determine_bb_domination_status (loop, tgt_bb);
986 if (domst == DOMST_NONDOMINATING)
987 goto fail;
988 if (domst == DOMST_LOOP_BROKEN)
989 {
990 /* If the loop ceased to exist, mark it as such, and thread through its
991 original header. */
992 loop->header = NULL;
993 loop->latch = NULL;
994 loops_state_set (LOOPS_NEED_FIXUP);
995 return thread_block (header, false);
996 }
997
998 if (tgt_bb->loop_father->header == tgt_bb)
999 {
1000 /* If the target of the threading is a header of a subloop, we need
1001 to create a preheader for it, so that the headers of the two loops
1002 do not merge. */
1003 if (EDGE_COUNT (tgt_bb->preds) > 2)
1004 {
1005 tgt_bb = create_preheader (tgt_bb->loop_father, 0);
1006 gcc_assert (tgt_bb != NULL);
1007 }
1008 else
1009 tgt_bb = split_edge (tgt_edge);
1010 }
1011
1012 if (latch->aux)
1013 {
1014 basic_block *bblocks;
1015 unsigned nblocks, i;
1016
1017 /* First handle the case latch edge is redirected. We are copying
1018 the loop header but not creating a multiple entry loop. Make the
1019 cfg manipulation code aware of that fact. */
1020 set_loop_copy (loop, loop);
1021 loop->latch = thread_single_edge (latch);
1022 set_loop_copy (loop, NULL);
1023 gcc_assert (single_succ (loop->latch) == tgt_bb);
1024 loop->header = tgt_bb;
1025
1026 /* Remove the new pre-header blocks from our loop. */
1027 bblocks = XCNEWVEC (basic_block, loop->num_nodes);
1028 nblocks = dfs_enumerate_from (header, 0, def_split_header_continue_p,
1029 bblocks, loop->num_nodes, tgt_bb);
1030 for (i = 0; i < nblocks; i++)
1031 {
1032 remove_bb_from_loops (bblocks[i]);
1033 add_bb_to_loop (bblocks[i], loop_outer (loop));
1034 }
1035 free (bblocks);
1036
1037 /* Cancel remaining threading requests that would make the
1038 loop a multiple entry loop. */
1039 FOR_EACH_EDGE (e, ei, header->preds)
1040 {
1041 edge e2;
1042 if (e->aux == NULL)
1043 continue;
1044
1045 if (THREAD_TARGET2 (e))
1046 e2 = THREAD_TARGET2 (e);
1047 else
1048 e2 = THREAD_TARGET (e);
1049
1050 if (e->src->loop_father != e2->dest->loop_father
1051 && e2->dest != loop->header)
1052 {
1053 free (e->aux);
1054 e->aux = NULL;
1055 }
1056 }
1057
1058 /* Thread the remaining edges through the former header. */
1059 thread_block (header, false);
1060 }
1061 else
1062 {
1063 basic_block new_preheader;
1064
1065 /* Now consider the case entry edges are redirected to the new entry
1066 block. Remember one entry edge, so that we can find the new
1067 preheader (its destination after threading). */
1068 FOR_EACH_EDGE (e, ei, header->preds)
1069 {
1070 if (e->aux)
1071 break;
1072 }
1073
1074 /* The duplicate of the header is the new preheader of the loop. Ensure
1075 that it is placed correctly in the loop hierarchy. */
1076 set_loop_copy (loop, loop_outer (loop));
1077
1078 thread_block (header, false);
1079 set_loop_copy (loop, NULL);
1080 new_preheader = e->dest;
1081
1082 /* Create the new latch block. This is always necessary, as the latch
1083 must have only a single successor, but the original header had at
1084 least two successors. */
1085 loop->latch = NULL;
1086 mfb_kj_edge = single_succ_edge (new_preheader);
1087 loop->header = mfb_kj_edge->dest;
1088 latch = make_forwarder_block (tgt_bb, mfb_keep_just, NULL);
1089 loop->header = latch->dest;
1090 loop->latch = latch->src;
1091 }
1092
1093 return true;
1094
1095 fail:
1096 /* We failed to thread anything. Cancel the requests. */
1097 FOR_EACH_EDGE (e, ei, header->preds)
1098 {
1099 free (e->aux);
1100 e->aux = NULL;
1101 }
1102 return false;
1103 }
1104
1105 /* Walk through the registered jump threads and convert them into a
1106 form convenient for this pass.
1107
1108 Any block which has incoming edges threaded to outgoing edges
1109 will have its entry in THREADED_BLOCK set.
1110
1111 Any threaded edge will have its new outgoing edge stored in the
1112 original edge's AUX field.
1113
1114 This form avoids the need to walk all the edges in the CFG to
1115 discover blocks which need processing and avoids unnecessary
1116 hash table lookups to map from threaded edge to new target. */
1117
1118 static void
1119 mark_threaded_blocks (bitmap threaded_blocks)
1120 {
1121 unsigned int i;
1122 bitmap_iterator bi;
1123 bitmap tmp = BITMAP_ALLOC (NULL);
1124 basic_block bb;
1125 edge e;
1126 edge_iterator ei;
1127
1128 for (i = 0; i < VEC_length (edge, threaded_edges); i += 3)
1129 {
1130 edge e = VEC_index (edge, threaded_edges, i);
1131 edge *x = (edge *) XNEWVEC (edge, 2);
1132
1133 e->aux = x;
1134 THREAD_TARGET (e) = VEC_index (edge, threaded_edges, i + 1);
1135 THREAD_TARGET2 (e) = VEC_index (edge, threaded_edges, i + 2);
1136 bitmap_set_bit (tmp, e->dest->index);
1137 }
1138
1139 /* If optimizing for size, only thread through block if we don't have
1140 to duplicate it or it's an otherwise empty redirection block. */
1141 if (optimize_function_for_size_p (cfun))
1142 {
1143 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
1144 {
1145 bb = BASIC_BLOCK (i);
1146 if (EDGE_COUNT (bb->preds) > 1
1147 && !redirection_block_p (bb))
1148 {
1149 FOR_EACH_EDGE (e, ei, bb->preds)
1150 {
1151 free (e->aux);
1152 e->aux = NULL;
1153 }
1154 }
1155 else
1156 bitmap_set_bit (threaded_blocks, i);
1157 }
1158 }
1159 else
1160 bitmap_copy (threaded_blocks, tmp);
1161
1162 BITMAP_FREE(tmp);
1163 }
1164
1165
1166 /* Walk through all blocks and thread incoming edges to the appropriate
1167 outgoing edge for each edge pair recorded in THREADED_EDGES.
1168
1169 It is the caller's responsibility to fix the dominance information
1170 and rewrite duplicated SSA_NAMEs back into SSA form.
1171
1172 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading edges through
1173 loop headers if it does not simplify the loop.
1174
1175 Returns true if one or more edges were threaded, false otherwise. */
1176
1177 bool
1178 thread_through_all_blocks (bool may_peel_loop_headers)
1179 {
1180 bool retval = false;
1181 unsigned int i;
1182 bitmap_iterator bi;
1183 bitmap threaded_blocks;
1184 struct loop *loop;
1185 loop_iterator li;
1186
1187 /* We must know about loops in order to preserve them. */
1188 gcc_assert (current_loops != NULL);
1189
1190 if (threaded_edges == NULL)
1191 return false;
1192
1193 threaded_blocks = BITMAP_ALLOC (NULL);
1194 memset (&thread_stats, 0, sizeof (thread_stats));
1195
1196 mark_threaded_blocks (threaded_blocks);
1197
1198 initialize_original_copy_tables ();
1199
1200 /* First perform the threading requests that do not affect
1201 loop structure. */
1202 EXECUTE_IF_SET_IN_BITMAP (threaded_blocks, 0, i, bi)
1203 {
1204 basic_block bb = BASIC_BLOCK (i);
1205
1206 if (EDGE_COUNT (bb->preds) > 0)
1207 retval |= thread_block (bb, true);
1208 }
1209
1210 /* Then perform the threading through loop headers. We start with the
1211 innermost loop, so that the changes in cfg we perform won't affect
1212 further threading. */
1213 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
1214 {
1215 if (!loop->header
1216 || !bitmap_bit_p (threaded_blocks, loop->header->index))
1217 continue;
1218
1219 retval |= thread_through_loop_header (loop, may_peel_loop_headers);
1220 }
1221
1222 statistics_counter_event (cfun, "Jumps threaded",
1223 thread_stats.num_threaded_edges);
1224
1225 free_original_copy_tables ();
1226
1227 BITMAP_FREE (threaded_blocks);
1228 threaded_blocks = NULL;
1229 VEC_free (edge, heap, threaded_edges);
1230 threaded_edges = NULL;
1231
1232 if (retval)
1233 loops_state_set (LOOPS_NEED_FIXUP);
1234
1235 return retval;
1236 }
1237
1238 /* Register a jump threading opportunity. We queue up all the jump
1239 threading opportunities discovered by a pass and update the CFG
1240 and SSA form all at once.
1241
1242 E is the edge we can thread, E2 is the new target edge, i.e., we
1243 are effectively recording that E->dest can be changed to E2->dest
1244 after fixing the SSA graph. */
1245
1246 void
1247 register_jump_thread (edge e, edge e2, edge e3)
1248 {
1249 /* This can occur if we're jumping to a constant address or
1250 or something similar. Just get out now. */
1251 if (e2 == NULL)
1252 return;
1253
1254 if (threaded_edges == NULL)
1255 threaded_edges = VEC_alloc (edge, heap, 15);
1256
1257 if (dump_file && (dump_flags & TDF_DETAILS)
1258 && e->dest != e2->src)
1259 fprintf (dump_file,
1260 " Registering jump thread around one or more intermediate blocks\n");
1261
1262 VEC_safe_push (edge, heap, threaded_edges, e);
1263 VEC_safe_push (edge, heap, threaded_edges, e2);
1264 VEC_safe_push (edge, heap, threaded_edges, e3);
1265 }