]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-threadupdate.c
Correct a function pre/postcondition [PR102403].
[thirdparty/gcc.git] / gcc / tree-ssa-threadupdate.c
1 /* Thread edges through blocks and update the control flow and SSA graphs.
2 Copyright (C) 2004-2021 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "tree.h"
25 #include "gimple.h"
26 #include "cfghooks.h"
27 #include "tree-pass.h"
28 #include "ssa.h"
29 #include "fold-const.h"
30 #include "cfganal.h"
31 #include "gimple-iterator.h"
32 #include "tree-ssa.h"
33 #include "tree-ssa-threadupdate.h"
34 #include "cfgloop.h"
35 #include "dbgcnt.h"
36 #include "tree-cfg.h"
37 #include "tree-vectorizer.h"
38 #include "tree-pass.h"
39
40 /* Given a block B, update the CFG and SSA graph to reflect redirecting
41 one or more in-edges to B to instead reach the destination of an
42 out-edge from B while preserving any side effects in B.
43
44 i.e., given A->B and B->C, change A->B to be A->C yet still preserve the
45 side effects of executing B.
46
47 1. Make a copy of B (including its outgoing edges and statements). Call
48 the copy B'. Note B' has no incoming edges or PHIs at this time.
49
50 2. Remove the control statement at the end of B' and all outgoing edges
51 except B'->C.
52
53 3. Add a new argument to each PHI in C with the same value as the existing
54 argument associated with edge B->C. Associate the new PHI arguments
55 with the edge B'->C.
56
57 4. For each PHI in B, find or create a PHI in B' with an identical
58 PHI_RESULT. Add an argument to the PHI in B' which has the same
59 value as the PHI in B associated with the edge A->B. Associate
60 the new argument in the PHI in B' with the edge A->B.
61
62 5. Change the edge A->B to A->B'.
63
64 5a. This automatically deletes any PHI arguments associated with the
65 edge A->B in B.
66
67 5b. This automatically associates each new argument added in step 4
68 with the edge A->B'.
69
70 6. Repeat for other incoming edges into B.
71
72 7. Put the duplicated resources in B and all the B' blocks into SSA form.
73
74 Note that block duplication can be minimized by first collecting the
75 set of unique destination blocks that the incoming edges should
76 be threaded to.
77
78 We reduce the number of edges and statements we create by not copying all
79 the outgoing edges and the control statement in step #1. We instead create
80 a template block without the outgoing edges and duplicate the template.
81
82 Another case this code handles is threading through a "joiner" block. In
83 this case, we do not know the destination of the joiner block, but one
84 of the outgoing edges from the joiner block leads to a threadable path. This
85 case largely works as outlined above, except the duplicate of the joiner
86 block still contains a full set of outgoing edges and its control statement.
87 We just redirect one of its outgoing edges to our jump threading path. */
88
89
90 /* Steps #5 and #6 of the above algorithm are best implemented by walking
91 all the incoming edges which thread to the same destination edge at
92 the same time. That avoids lots of table lookups to get information
93 for the destination edge.
94
95 To realize that implementation we create a list of incoming edges
96 which thread to the same outgoing edge. Thus to implement steps
97 #5 and #6 we traverse our hash table of outgoing edge information.
98 For each entry we walk the list of incoming edges which thread to
99 the current outgoing edge. */
100
101 struct el
102 {
103 edge e;
104 struct el *next;
105 };
106
107 /* Main data structure recording information regarding B's duplicate
108 blocks. */
109
110 /* We need to efficiently record the unique thread destinations of this
111 block and specific information associated with those destinations. We
112 may have many incoming edges threaded to the same outgoing edge. This
113 can be naturally implemented with a hash table. */
114
115 struct redirection_data : free_ptr_hash<redirection_data>
116 {
117 /* We support wiring up two block duplicates in a jump threading path.
118
119 One is a normal block copy where we remove the control statement
120 and wire up its single remaining outgoing edge to the thread path.
121
122 The other is a joiner block where we leave the control statement
123 in place, but wire one of the outgoing edges to a thread path.
124
125 In theory we could have multiple block duplicates in a jump
126 threading path, but I haven't tried that.
127
128 The duplicate blocks appear in this array in the same order in
129 which they appear in the jump thread path. */
130 basic_block dup_blocks[2];
131
132 vec<jump_thread_edge *> *path;
133
134 /* A list of incoming edges which we want to thread to the
135 same path. */
136 struct el *incoming_edges;
137
138 /* hash_table support. */
139 static inline hashval_t hash (const redirection_data *);
140 static inline int equal (const redirection_data *, const redirection_data *);
141 };
142
143 jump_thread_path_allocator::jump_thread_path_allocator ()
144 {
145 obstack_init (&m_obstack);
146 }
147
148 jump_thread_path_allocator::~jump_thread_path_allocator ()
149 {
150 obstack_free (&m_obstack, NULL);
151 }
152
153 jump_thread_edge *
154 jump_thread_path_allocator::allocate_thread_edge (edge e,
155 jump_thread_edge_type type)
156 {
157 void *r = obstack_alloc (&m_obstack, sizeof (jump_thread_edge));
158 return new (r) jump_thread_edge (e, type);
159 }
160
161 vec<jump_thread_edge *> *
162 jump_thread_path_allocator::allocate_thread_path ()
163 {
164 // ?? Since the paths live in an obstack, we should be able to remove all
165 // references to path->release() throughout the code.
166 void *r = obstack_alloc (&m_obstack, sizeof (vec <jump_thread_edge *>));
167 return new (r) vec<jump_thread_edge *> ();
168 }
169
170 jt_path_registry::jt_path_registry (bool backedge_threads)
171 {
172 m_paths.create (5);
173 m_num_threaded_edges = 0;
174 m_backedge_threads = backedge_threads;
175 }
176
177 jt_path_registry::~jt_path_registry ()
178 {
179 m_paths.release ();
180 }
181
182 fwd_jt_path_registry::fwd_jt_path_registry ()
183 : jt_path_registry (/*backedge_threads=*/false)
184 {
185 m_removed_edges = new hash_table<struct removed_edges> (17);
186 m_redirection_data = NULL;
187 }
188
189 fwd_jt_path_registry::~fwd_jt_path_registry ()
190 {
191 delete m_removed_edges;
192 }
193
194 back_jt_path_registry::back_jt_path_registry ()
195 : jt_path_registry (/*backedge_threads=*/true)
196 {
197 }
198
199 void
200 jt_path_registry::push_edge (vec<jump_thread_edge *> *path,
201 edge e, jump_thread_edge_type type)
202 {
203 jump_thread_edge *x = m_allocator.allocate_thread_edge (e, type);
204 path->safe_push (x);
205 }
206
207 vec<jump_thread_edge *> *
208 jt_path_registry::allocate_thread_path ()
209 {
210 return m_allocator.allocate_thread_path ();
211 }
212
213 /* Dump a jump threading path, including annotations about each
214 edge in the path. */
215
216 static void
217 dump_jump_thread_path (FILE *dump_file,
218 const vec<jump_thread_edge *> &path,
219 bool registering)
220 {
221 fprintf (dump_file,
222 " %s jump thread: (%d, %d) incoming edge; ",
223 (registering ? "Registering" : "Cancelling"),
224 path[0]->e->src->index, path[0]->e->dest->index);
225
226 for (unsigned int i = 1; i < path.length (); i++)
227 {
228 /* We can get paths with a NULL edge when the final destination
229 of a jump thread turns out to be a constant address. We dump
230 those paths when debugging, so we have to be prepared for that
231 possibility here. */
232 if (path[i]->e == NULL)
233 continue;
234
235 fprintf (dump_file, " (%d, %d) ",
236 path[i]->e->src->index, path[i]->e->dest->index);
237 switch (path[i]->type)
238 {
239 case EDGE_COPY_SRC_JOINER_BLOCK:
240 fprintf (dump_file, "joiner");
241 break;
242 case EDGE_COPY_SRC_BLOCK:
243 fprintf (dump_file, "normal");
244 break;
245 case EDGE_NO_COPY_SRC_BLOCK:
246 fprintf (dump_file, "nocopy");
247 break;
248 default:
249 gcc_unreachable ();
250 }
251 }
252 fprintf (dump_file, "; \n");
253 }
254
255 DEBUG_FUNCTION void
256 debug (const vec<jump_thread_edge *> &path)
257 {
258 dump_jump_thread_path (stderr, path, true);
259 }
260
261 DEBUG_FUNCTION void
262 debug (const vec<jump_thread_edge *> *path)
263 {
264 debug (*path);
265 }
266
267 /* Release the memory associated with PATH, and if dumping is enabled,
268 dump out the reason why the thread was canceled. */
269
270 static void
271 cancel_thread (vec<jump_thread_edge *> *path, const char *reason = NULL)
272 {
273 if (dump_file && (dump_flags & TDF_DETAILS))
274 {
275 if (reason)
276 fprintf (dump_file, "%s:\n", reason);
277
278 dump_jump_thread_path (dump_file, *path, false);
279 fprintf (dump_file, "\n");
280 }
281 path->release ();
282 }
283
284 /* Simple hashing function. For any given incoming edge E, we're going
285 to be most concerned with the final destination of its jump thread
286 path. So hash on the block index of the final edge in the path. */
287
288 inline hashval_t
289 redirection_data::hash (const redirection_data *p)
290 {
291 vec<jump_thread_edge *> *path = p->path;
292 return path->last ()->e->dest->index;
293 }
294
295 /* Given two hash table entries, return true if they have the same
296 jump threading path. */
297 inline int
298 redirection_data::equal (const redirection_data *p1, const redirection_data *p2)
299 {
300 vec<jump_thread_edge *> *path1 = p1->path;
301 vec<jump_thread_edge *> *path2 = p2->path;
302
303 if (path1->length () != path2->length ())
304 return false;
305
306 for (unsigned int i = 1; i < path1->length (); i++)
307 {
308 if ((*path1)[i]->type != (*path2)[i]->type
309 || (*path1)[i]->e != (*path2)[i]->e)
310 return false;
311 }
312
313 return true;
314 }
315
316 /* Data structure of information to pass to hash table traversal routines. */
317 struct ssa_local_info_t
318 {
319 /* The current block we are working on. */
320 basic_block bb;
321
322 /* We only create a template block for the first duplicated block in a
323 jump threading path as we may need many duplicates of that block.
324
325 The second duplicate block in a path is specific to that path. Creating
326 and sharing a template for that block is considerably more difficult. */
327 basic_block template_block;
328
329 /* If we append debug stmts to the template block after creating it,
330 this iterator won't be the last one in the block, and further
331 copies of the template block shouldn't get debug stmts after
332 it. */
333 gimple_stmt_iterator template_last_to_copy;
334
335 /* Blocks duplicated for the thread. */
336 bitmap duplicate_blocks;
337
338 /* TRUE if we thread one or more jumps, FALSE otherwise. */
339 bool jumps_threaded;
340
341 /* When we have multiple paths through a joiner which reach different
342 final destinations, then we may need to correct for potential
343 profile insanities. */
344 bool need_profile_correction;
345
346 // Jump threading statistics.
347 unsigned long num_threaded_edges;
348 };
349
350 /* When we start updating the CFG for threading, data necessary for jump
351 threading is attached to the AUX field for the incoming edge. Use these
352 macros to access the underlying structure attached to the AUX field. */
353 #define THREAD_PATH(E) ((vec<jump_thread_edge *> *)(E)->aux)
354
355 /* Remove the last statement in block BB if it is a control statement
356 Also remove all outgoing edges except the edge which reaches DEST_BB.
357 If DEST_BB is NULL, then remove all outgoing edges. */
358
359 static void
360 remove_ctrl_stmt_and_useless_edges (basic_block bb, basic_block dest_bb)
361 {
362 gimple_stmt_iterator gsi;
363 edge e;
364 edge_iterator ei;
365
366 gsi = gsi_last_bb (bb);
367
368 /* If the duplicate ends with a control statement, then remove it.
369
370 Note that if we are duplicating the template block rather than the
371 original basic block, then the duplicate might not have any real
372 statements in it. */
373 if (!gsi_end_p (gsi)
374 && gsi_stmt (gsi)
375 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
376 || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
377 || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH))
378 gsi_remove (&gsi, true);
379
380 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
381 {
382 if (e->dest != dest_bb)
383 {
384 free_dom_edge_info (e);
385 remove_edge (e);
386 }
387 else
388 {
389 e->probability = profile_probability::always ();
390 ei_next (&ei);
391 }
392 }
393
394 /* If the remaining edge is a loop exit, there must have
395 a removed edge that was not a loop exit.
396
397 In that case BB and possibly other blocks were previously
398 in the loop, but are now outside the loop. Thus, we need
399 to update the loop structures. */
400 if (single_succ_p (bb)
401 && loop_outer (bb->loop_father)
402 && loop_exit_edge_p (bb->loop_father, single_succ_edge (bb)))
403 loops_state_set (LOOPS_NEED_FIXUP);
404 }
405
406 /* Create a duplicate of BB. Record the duplicate block in an array
407 indexed by COUNT stored in RD. */
408
409 static void
410 create_block_for_threading (basic_block bb,
411 struct redirection_data *rd,
412 unsigned int count,
413 bitmap *duplicate_blocks)
414 {
415 edge_iterator ei;
416 edge e;
417
418 /* We can use the generic block duplication code and simply remove
419 the stuff we do not need. */
420 rd->dup_blocks[count] = duplicate_block (bb, NULL, NULL);
421
422 FOR_EACH_EDGE (e, ei, rd->dup_blocks[count]->succs)
423 {
424 e->aux = NULL;
425
426 /* If we duplicate a block with an outgoing edge marked as
427 EDGE_IGNORE, we must clear EDGE_IGNORE so that it doesn't
428 leak out of the current pass.
429
430 It would be better to simplify switch statements and remove
431 the edges before we get here, but the sequencing is nontrivial. */
432 e->flags &= ~EDGE_IGNORE;
433 }
434
435 /* Zero out the profile, since the block is unreachable for now. */
436 rd->dup_blocks[count]->count = profile_count::uninitialized ();
437 if (duplicate_blocks)
438 bitmap_set_bit (*duplicate_blocks, rd->dup_blocks[count]->index);
439 }
440
441 /* Given an outgoing edge E lookup and return its entry in our hash table.
442
443 If INSERT is true, then we insert the entry into the hash table if
444 it is not already present. INCOMING_EDGE is added to the list of incoming
445 edges associated with E in the hash table. */
446
447 redirection_data *
448 fwd_jt_path_registry::lookup_redirection_data (edge e, insert_option insert)
449 {
450 struct redirection_data **slot;
451 struct redirection_data *elt;
452 vec<jump_thread_edge *> *path = THREAD_PATH (e);
453
454 /* Build a hash table element so we can see if E is already
455 in the table. */
456 elt = XNEW (struct redirection_data);
457 elt->path = path;
458 elt->dup_blocks[0] = NULL;
459 elt->dup_blocks[1] = NULL;
460 elt->incoming_edges = NULL;
461
462 slot = m_redirection_data->find_slot (elt, insert);
463
464 /* This will only happen if INSERT is false and the entry is not
465 in the hash table. */
466 if (slot == NULL)
467 {
468 free (elt);
469 return NULL;
470 }
471
472 /* This will only happen if E was not in the hash table and
473 INSERT is true. */
474 if (*slot == NULL)
475 {
476 *slot = elt;
477 elt->incoming_edges = XNEW (struct el);
478 elt->incoming_edges->e = e;
479 elt->incoming_edges->next = NULL;
480 return elt;
481 }
482 /* E was in the hash table. */
483 else
484 {
485 /* Free ELT as we do not need it anymore, we will extract the
486 relevant entry from the hash table itself. */
487 free (elt);
488
489 /* Get the entry stored in the hash table. */
490 elt = *slot;
491
492 /* If insertion was requested, then we need to add INCOMING_EDGE
493 to the list of incoming edges associated with E. */
494 if (insert)
495 {
496 struct el *el = XNEW (struct el);
497 el->next = elt->incoming_edges;
498 el->e = e;
499 elt->incoming_edges = el;
500 }
501
502 return elt;
503 }
504 }
505
506 /* Similar to copy_phi_args, except that the PHI arg exists, it just
507 does not have a value associated with it. */
508
509 static void
510 copy_phi_arg_into_existing_phi (edge src_e, edge tgt_e)
511 {
512 int src_idx = src_e->dest_idx;
513 int tgt_idx = tgt_e->dest_idx;
514
515 /* Iterate over each PHI in e->dest. */
516 for (gphi_iterator gsi = gsi_start_phis (src_e->dest),
517 gsi2 = gsi_start_phis (tgt_e->dest);
518 !gsi_end_p (gsi);
519 gsi_next (&gsi), gsi_next (&gsi2))
520 {
521 gphi *src_phi = gsi.phi ();
522 gphi *dest_phi = gsi2.phi ();
523 tree val = gimple_phi_arg_def (src_phi, src_idx);
524 location_t locus = gimple_phi_arg_location (src_phi, src_idx);
525
526 SET_PHI_ARG_DEF (dest_phi, tgt_idx, val);
527 gimple_phi_arg_set_location (dest_phi, tgt_idx, locus);
528 }
529 }
530
531 /* Given ssa_name DEF, backtrack jump threading PATH from node IDX
532 to see if it has constant value in a flow sensitive manner. Set
533 LOCUS to location of the constant phi arg and return the value.
534 Return DEF directly if either PATH or idx is ZERO. */
535
536 static tree
537 get_value_locus_in_path (tree def, vec<jump_thread_edge *> *path,
538 basic_block bb, int idx, location_t *locus)
539 {
540 tree arg;
541 gphi *def_phi;
542 basic_block def_bb;
543
544 if (path == NULL || idx == 0)
545 return def;
546
547 def_phi = dyn_cast <gphi *> (SSA_NAME_DEF_STMT (def));
548 if (!def_phi)
549 return def;
550
551 def_bb = gimple_bb (def_phi);
552 /* Don't propagate loop invariants into deeper loops. */
553 if (!def_bb || bb_loop_depth (def_bb) < bb_loop_depth (bb))
554 return def;
555
556 /* Backtrack jump threading path from IDX to see if def has constant
557 value. */
558 for (int j = idx - 1; j >= 0; j--)
559 {
560 edge e = (*path)[j]->e;
561 if (e->dest == def_bb)
562 {
563 arg = gimple_phi_arg_def (def_phi, e->dest_idx);
564 if (is_gimple_min_invariant (arg))
565 {
566 *locus = gimple_phi_arg_location (def_phi, e->dest_idx);
567 return arg;
568 }
569 break;
570 }
571 }
572
573 return def;
574 }
575
576 /* For each PHI in BB, copy the argument associated with SRC_E to TGT_E.
577 Try to backtrack jump threading PATH from node IDX to see if the arg
578 has constant value, copy constant value instead of argument itself
579 if yes. */
580
581 static void
582 copy_phi_args (basic_block bb, edge src_e, edge tgt_e,
583 vec<jump_thread_edge *> *path, int idx)
584 {
585 gphi_iterator gsi;
586 int src_indx = src_e->dest_idx;
587
588 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
589 {
590 gphi *phi = gsi.phi ();
591 tree def = gimple_phi_arg_def (phi, src_indx);
592 location_t locus = gimple_phi_arg_location (phi, src_indx);
593
594 if (TREE_CODE (def) == SSA_NAME
595 && !virtual_operand_p (gimple_phi_result (phi)))
596 def = get_value_locus_in_path (def, path, bb, idx, &locus);
597
598 add_phi_arg (phi, def, tgt_e, locus);
599 }
600 }
601
602 /* We have recently made a copy of ORIG_BB, including its outgoing
603 edges. The copy is NEW_BB. Every PHI node in every direct successor of
604 ORIG_BB has a new argument associated with edge from NEW_BB to the
605 successor. Initialize the PHI argument so that it is equal to the PHI
606 argument associated with the edge from ORIG_BB to the successor.
607 PATH and IDX are used to check if the new PHI argument has constant
608 value in a flow sensitive manner. */
609
610 static void
611 update_destination_phis (basic_block orig_bb, basic_block new_bb,
612 vec<jump_thread_edge *> *path, int idx)
613 {
614 edge_iterator ei;
615 edge e;
616
617 FOR_EACH_EDGE (e, ei, orig_bb->succs)
618 {
619 edge e2 = find_edge (new_bb, e->dest);
620 copy_phi_args (e->dest, e, e2, path, idx);
621 }
622 }
623
624 /* Given a duplicate block and its single destination (both stored
625 in RD). Create an edge between the duplicate and its single
626 destination.
627
628 Add an additional argument to any PHI nodes at the single
629 destination. IDX is the start node in jump threading path
630 we start to check to see if the new PHI argument has constant
631 value along the jump threading path. */
632
633 static void
634 create_edge_and_update_destination_phis (struct redirection_data *rd,
635 basic_block bb, int idx)
636 {
637 edge e = make_single_succ_edge (bb, rd->path->last ()->e->dest, EDGE_FALLTHRU);
638
639 rescan_loop_exit (e, true, false);
640
641 /* We used to copy the thread path here. That was added in 2007
642 and dutifully updated through the representation changes in 2013.
643
644 In 2013 we added code to thread from an interior node through
645 the backedge to another interior node. That runs after the code
646 to thread through loop headers from outside the loop.
647
648 The latter may delete edges in the CFG, including those
649 which appeared in the jump threading path we copied here. Thus
650 we'd end up using a dangling pointer.
651
652 After reviewing the 2007/2011 code, I can't see how anything
653 depended on copying the AUX field and clearly copying the jump
654 threading path is problematical due to embedded edge pointers.
655 It has been removed. */
656 e->aux = NULL;
657
658 /* If there are any PHI nodes at the destination of the outgoing edge
659 from the duplicate block, then we will need to add a new argument
660 to them. The argument should have the same value as the argument
661 associated with the outgoing edge stored in RD. */
662 copy_phi_args (e->dest, rd->path->last ()->e, e, rd->path, idx);
663 }
664
665 /* Look through PATH beginning at START and return TRUE if there are
666 any additional blocks that need to be duplicated. Otherwise,
667 return FALSE. */
668 static bool
669 any_remaining_duplicated_blocks (vec<jump_thread_edge *> *path,
670 unsigned int start)
671 {
672 for (unsigned int i = start + 1; i < path->length (); i++)
673 {
674 if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK
675 || (*path)[i]->type == EDGE_COPY_SRC_BLOCK)
676 return true;
677 }
678 return false;
679 }
680
681
682 /* Compute the amount of profile count coming into the jump threading
683 path stored in RD that we are duplicating, returned in PATH_IN_COUNT_PTR and
684 PATH_IN_FREQ_PTR, as well as the amount of counts flowing out of the
685 duplicated path, returned in PATH_OUT_COUNT_PTR. LOCAL_INFO is used to
686 identify blocks duplicated for jump threading, which have duplicated
687 edges that need to be ignored in the analysis. Return true if path contains
688 a joiner, false otherwise.
689
690 In the non-joiner case, this is straightforward - all the counts
691 flowing into the jump threading path should flow through the duplicated
692 block and out of the duplicated path.
693
694 In the joiner case, it is very tricky. Some of the counts flowing into
695 the original path go offpath at the joiner. The problem is that while
696 we know how much total count goes off-path in the original control flow,
697 we don't know how many of the counts corresponding to just the jump
698 threading path go offpath at the joiner.
699
700 For example, assume we have the following control flow and identified
701 jump threading paths:
702
703 A B C
704 \ | /
705 Ea \ |Eb / Ec
706 \ | /
707 v v v
708 J <-- Joiner
709 / \
710 Eoff/ \Eon
711 / \
712 v v
713 Soff Son <--- Normal
714 /\
715 Ed/ \ Ee
716 / \
717 v v
718 D E
719
720 Jump threading paths: A -> J -> Son -> D (path 1)
721 C -> J -> Son -> E (path 2)
722
723 Note that the control flow could be more complicated:
724 - Each jump threading path may have more than one incoming edge. I.e. A and
725 Ea could represent multiple incoming blocks/edges that are included in
726 path 1.
727 - There could be EDGE_NO_COPY_SRC_BLOCK edges after the joiner (either
728 before or after the "normal" copy block). These are not duplicated onto
729 the jump threading path, as they are single-successor.
730 - Any of the blocks along the path may have other incoming edges that
731 are not part of any jump threading path, but add profile counts along
732 the path.
733
734 In the above example, after all jump threading is complete, we will
735 end up with the following control flow:
736
737 A B C
738 | | |
739 Ea| |Eb |Ec
740 | | |
741 v v v
742 Ja J Jc
743 / \ / \Eon' / \
744 Eona/ \ ---/---\-------- \Eonc
745 / \ / / \ \
746 v v v v v
747 Sona Soff Son Sonc
748 \ /\ /
749 \___________ / \ _____/
750 \ / \/
751 vv v
752 D E
753
754 The main issue to notice here is that when we are processing path 1
755 (A->J->Son->D) we need to figure out the outgoing edge weights to
756 the duplicated edges Ja->Sona and Ja->Soff, while ensuring that the
757 sum of the incoming weights to D remain Ed. The problem with simply
758 assuming that Ja (and Jc when processing path 2) has the same outgoing
759 probabilities to its successors as the original block J, is that after
760 all paths are processed and other edges/counts removed (e.g. none
761 of Ec will reach D after processing path 2), we may end up with not
762 enough count flowing along duplicated edge Sona->D.
763
764 Therefore, in the case of a joiner, we keep track of all counts
765 coming in along the current path, as well as from predecessors not
766 on any jump threading path (Eb in the above example). While we
767 first assume that the duplicated Eona for Ja->Sona has the same
768 probability as the original, we later compensate for other jump
769 threading paths that may eliminate edges. We do that by keep track
770 of all counts coming into the original path that are not in a jump
771 thread (Eb in the above example, but as noted earlier, there could
772 be other predecessors incoming to the path at various points, such
773 as at Son). Call this cumulative non-path count coming into the path
774 before D as Enonpath. We then ensure that the count from Sona->D is as at
775 least as big as (Ed - Enonpath), but no bigger than the minimum
776 weight along the jump threading path. The probabilities of both the
777 original and duplicated joiner block J and Ja will be adjusted
778 accordingly after the updates. */
779
780 static bool
781 compute_path_counts (struct redirection_data *rd,
782 ssa_local_info_t *local_info,
783 profile_count *path_in_count_ptr,
784 profile_count *path_out_count_ptr)
785 {
786 edge e = rd->incoming_edges->e;
787 vec<jump_thread_edge *> *path = THREAD_PATH (e);
788 edge elast = path->last ()->e;
789 profile_count nonpath_count = profile_count::zero ();
790 bool has_joiner = false;
791 profile_count path_in_count = profile_count::zero ();
792
793 /* Start by accumulating incoming edge counts to the path's first bb
794 into a couple buckets:
795 path_in_count: total count of incoming edges that flow into the
796 current path.
797 nonpath_count: total count of incoming edges that are not
798 flowing along *any* path. These are the counts
799 that will still flow along the original path after
800 all path duplication is done by potentially multiple
801 calls to this routine.
802 (any other incoming edge counts are for a different jump threading
803 path that will be handled by a later call to this routine.)
804 To make this easier, start by recording all incoming edges that flow into
805 the current path in a bitmap. We could add up the path's incoming edge
806 counts here, but we still need to walk all the first bb's incoming edges
807 below to add up the counts of the other edges not included in this jump
808 threading path. */
809 struct el *next, *el;
810 auto_bitmap in_edge_srcs;
811 for (el = rd->incoming_edges; el; el = next)
812 {
813 next = el->next;
814 bitmap_set_bit (in_edge_srcs, el->e->src->index);
815 }
816 edge ein;
817 edge_iterator ei;
818 FOR_EACH_EDGE (ein, ei, e->dest->preds)
819 {
820 vec<jump_thread_edge *> *ein_path = THREAD_PATH (ein);
821 /* Simply check the incoming edge src against the set captured above. */
822 if (ein_path
823 && bitmap_bit_p (in_edge_srcs, (*ein_path)[0]->e->src->index))
824 {
825 /* It is necessary but not sufficient that the last path edges
826 are identical. There may be different paths that share the
827 same last path edge in the case where the last edge has a nocopy
828 source block. */
829 gcc_assert (ein_path->last ()->e == elast);
830 path_in_count += ein->count ();
831 }
832 else if (!ein_path)
833 {
834 /* Keep track of the incoming edges that are not on any jump-threading
835 path. These counts will still flow out of original path after all
836 jump threading is complete. */
837 nonpath_count += ein->count ();
838 }
839 }
840
841 /* Now compute the fraction of the total count coming into the first
842 path bb that is from the current threading path. */
843 profile_count total_count = e->dest->count;
844 /* Handle incoming profile insanities. */
845 if (total_count < path_in_count)
846 path_in_count = total_count;
847 profile_probability onpath_scale = path_in_count.probability_in (total_count);
848
849 /* Walk the entire path to do some more computation in order to estimate
850 how much of the path_in_count will flow out of the duplicated threading
851 path. In the non-joiner case this is straightforward (it should be
852 the same as path_in_count, although we will handle incoming profile
853 insanities by setting it equal to the minimum count along the path).
854
855 In the joiner case, we need to estimate how much of the path_in_count
856 will stay on the threading path after the joiner's conditional branch.
857 We don't really know for sure how much of the counts
858 associated with this path go to each successor of the joiner, but we'll
859 estimate based on the fraction of the total count coming into the path
860 bb was from the threading paths (computed above in onpath_scale).
861 Afterwards, we will need to do some fixup to account for other threading
862 paths and possible profile insanities.
863
864 In order to estimate the joiner case's counts we also need to update
865 nonpath_count with any additional counts coming into the path. Other
866 blocks along the path may have additional predecessors from outside
867 the path. */
868 profile_count path_out_count = path_in_count;
869 profile_count min_path_count = path_in_count;
870 for (unsigned int i = 1; i < path->length (); i++)
871 {
872 edge epath = (*path)[i]->e;
873 profile_count cur_count = epath->count ();
874 if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
875 {
876 has_joiner = true;
877 cur_count = cur_count.apply_probability (onpath_scale);
878 }
879 /* In the joiner case we need to update nonpath_count for any edges
880 coming into the path that will contribute to the count flowing
881 into the path successor. */
882 if (has_joiner && epath != elast)
883 {
884 /* Look for other incoming edges after joiner. */
885 FOR_EACH_EDGE (ein, ei, epath->dest->preds)
886 {
887 if (ein != epath
888 /* Ignore in edges from blocks we have duplicated for a
889 threading path, which have duplicated edge counts until
890 they are redirected by an invocation of this routine. */
891 && !bitmap_bit_p (local_info->duplicate_blocks,
892 ein->src->index))
893 nonpath_count += ein->count ();
894 }
895 }
896 if (cur_count < path_out_count)
897 path_out_count = cur_count;
898 if (epath->count () < min_path_count)
899 min_path_count = epath->count ();
900 }
901
902 /* We computed path_out_count above assuming that this path targeted
903 the joiner's on-path successor with the same likelihood as it
904 reached the joiner. However, other thread paths through the joiner
905 may take a different path through the normal copy source block
906 (i.e. they have a different elast), meaning that they do not
907 contribute any counts to this path's elast. As a result, it may
908 turn out that this path must have more count flowing to the on-path
909 successor of the joiner. Essentially, all of this path's elast
910 count must be contributed by this path and any nonpath counts
911 (since any path through the joiner with a different elast will not
912 include a copy of this elast in its duplicated path).
913 So ensure that this path's path_out_count is at least the
914 difference between elast->count () and nonpath_count. Otherwise the edge
915 counts after threading will not be sane. */
916 if (local_info->need_profile_correction
917 && has_joiner && path_out_count < elast->count () - nonpath_count)
918 {
919 path_out_count = elast->count () - nonpath_count;
920 /* But neither can we go above the minimum count along the path
921 we are duplicating. This can be an issue due to profile
922 insanities coming in to this pass. */
923 if (path_out_count > min_path_count)
924 path_out_count = min_path_count;
925 }
926
927 *path_in_count_ptr = path_in_count;
928 *path_out_count_ptr = path_out_count;
929 return has_joiner;
930 }
931
932
933 /* Update the counts and frequencies for both an original path
934 edge EPATH and its duplicate EDUP. The duplicate source block
935 will get a count of PATH_IN_COUNT and PATH_IN_FREQ,
936 and the duplicate edge EDUP will have a count of PATH_OUT_COUNT. */
937 static void
938 update_profile (edge epath, edge edup, profile_count path_in_count,
939 profile_count path_out_count)
940 {
941
942 /* First update the duplicated block's count. */
943 if (edup)
944 {
945 basic_block dup_block = edup->src;
946
947 /* Edup's count is reduced by path_out_count. We need to redistribute
948 probabilities to the remaining edges. */
949
950 edge esucc;
951 edge_iterator ei;
952 profile_probability edup_prob
953 = path_out_count.probability_in (path_in_count);
954
955 /* Either scale up or down the remaining edges.
956 probabilities are always in range <0,1> and thus we can't do
957 both by same loop. */
958 if (edup->probability > edup_prob)
959 {
960 profile_probability rev_scale
961 = (profile_probability::always () - edup->probability)
962 / (profile_probability::always () - edup_prob);
963 FOR_EACH_EDGE (esucc, ei, dup_block->succs)
964 if (esucc != edup)
965 esucc->probability /= rev_scale;
966 }
967 else if (edup->probability < edup_prob)
968 {
969 profile_probability scale
970 = (profile_probability::always () - edup_prob)
971 / (profile_probability::always () - edup->probability);
972 FOR_EACH_EDGE (esucc, ei, dup_block->succs)
973 if (esucc != edup)
974 esucc->probability *= scale;
975 }
976 if (edup_prob.initialized_p ())
977 edup->probability = edup_prob;
978
979 gcc_assert (!dup_block->count.initialized_p ());
980 dup_block->count = path_in_count;
981 }
982
983 if (path_in_count == profile_count::zero ())
984 return;
985
986 profile_count final_count = epath->count () - path_out_count;
987
988 /* Now update the original block's count in the
989 opposite manner - remove the counts/freq that will flow
990 into the duplicated block. Handle underflow due to precision/
991 rounding issues. */
992 epath->src->count -= path_in_count;
993
994 /* Next update this path edge's original and duplicated counts. We know
995 that the duplicated path will have path_out_count flowing
996 out of it (in the joiner case this is the count along the duplicated path
997 out of the duplicated joiner). This count can then be removed from the
998 original path edge. */
999
1000 edge esucc;
1001 edge_iterator ei;
1002 profile_probability epath_prob = final_count.probability_in (epath->src->count);
1003
1004 if (epath->probability > epath_prob)
1005 {
1006 profile_probability rev_scale
1007 = (profile_probability::always () - epath->probability)
1008 / (profile_probability::always () - epath_prob);
1009 FOR_EACH_EDGE (esucc, ei, epath->src->succs)
1010 if (esucc != epath)
1011 esucc->probability /= rev_scale;
1012 }
1013 else if (epath->probability < epath_prob)
1014 {
1015 profile_probability scale
1016 = (profile_probability::always () - epath_prob)
1017 / (profile_probability::always () - epath->probability);
1018 FOR_EACH_EDGE (esucc, ei, epath->src->succs)
1019 if (esucc != epath)
1020 esucc->probability *= scale;
1021 }
1022 if (epath_prob.initialized_p ())
1023 epath->probability = epath_prob;
1024 }
1025
1026 /* Wire up the outgoing edges from the duplicate blocks and
1027 update any PHIs as needed. Also update the profile counts
1028 on the original and duplicate blocks and edges. */
1029 void
1030 ssa_fix_duplicate_block_edges (struct redirection_data *rd,
1031 ssa_local_info_t *local_info)
1032 {
1033 bool multi_incomings = (rd->incoming_edges->next != NULL);
1034 edge e = rd->incoming_edges->e;
1035 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1036 edge elast = path->last ()->e;
1037 profile_count path_in_count = profile_count::zero ();
1038 profile_count path_out_count = profile_count::zero ();
1039
1040 /* First determine how much profile count to move from original
1041 path to the duplicate path. This is tricky in the presence of
1042 a joiner (see comments for compute_path_counts), where some portion
1043 of the path's counts will flow off-path from the joiner. In the
1044 non-joiner case the path_in_count and path_out_count should be the
1045 same. */
1046 bool has_joiner = compute_path_counts (rd, local_info,
1047 &path_in_count, &path_out_count);
1048
1049 for (unsigned int count = 0, i = 1; i < path->length (); i++)
1050 {
1051 edge epath = (*path)[i]->e;
1052
1053 /* If we were threading through an joiner block, then we want
1054 to keep its control statement and redirect an outgoing edge.
1055 Else we want to remove the control statement & edges, then create
1056 a new outgoing edge. In both cases we may need to update PHIs. */
1057 if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1058 {
1059 edge victim;
1060 edge e2;
1061
1062 gcc_assert (has_joiner);
1063
1064 /* This updates the PHIs at the destination of the duplicate
1065 block. Pass 0 instead of i if we are threading a path which
1066 has multiple incoming edges. */
1067 update_destination_phis (local_info->bb, rd->dup_blocks[count],
1068 path, multi_incomings ? 0 : i);
1069
1070 /* Find the edge from the duplicate block to the block we're
1071 threading through. That's the edge we want to redirect. */
1072 victim = find_edge (rd->dup_blocks[count], (*path)[i]->e->dest);
1073
1074 /* If there are no remaining blocks on the path to duplicate,
1075 then redirect VICTIM to the final destination of the jump
1076 threading path. */
1077 if (!any_remaining_duplicated_blocks (path, i))
1078 {
1079 e2 = redirect_edge_and_branch (victim, elast->dest);
1080 /* If we redirected the edge, then we need to copy PHI arguments
1081 at the target. If the edge already existed (e2 != victim
1082 case), then the PHIs in the target already have the correct
1083 arguments. */
1084 if (e2 == victim)
1085 copy_phi_args (e2->dest, elast, e2,
1086 path, multi_incomings ? 0 : i);
1087 }
1088 else
1089 {
1090 /* Redirect VICTIM to the next duplicated block in the path. */
1091 e2 = redirect_edge_and_branch (victim, rd->dup_blocks[count + 1]);
1092
1093 /* We need to update the PHIs in the next duplicated block. We
1094 want the new PHI args to have the same value as they had
1095 in the source of the next duplicate block.
1096
1097 Thus, we need to know which edge we traversed into the
1098 source of the duplicate. Furthermore, we may have
1099 traversed many edges to reach the source of the duplicate.
1100
1101 Walk through the path starting at element I until we
1102 hit an edge marked with EDGE_COPY_SRC_BLOCK. We want
1103 the edge from the prior element. */
1104 for (unsigned int j = i + 1; j < path->length (); j++)
1105 {
1106 if ((*path)[j]->type == EDGE_COPY_SRC_BLOCK)
1107 {
1108 copy_phi_arg_into_existing_phi ((*path)[j - 1]->e, e2);
1109 break;
1110 }
1111 }
1112 }
1113
1114 /* Update the counts of both the original block
1115 and path edge, and the duplicates. The path duplicate's
1116 incoming count are the totals for all edges
1117 incoming to this jump threading path computed earlier.
1118 And we know that the duplicated path will have path_out_count
1119 flowing out of it (i.e. along the duplicated path out of the
1120 duplicated joiner). */
1121 update_profile (epath, e2, path_in_count, path_out_count);
1122 }
1123 else if ((*path)[i]->type == EDGE_COPY_SRC_BLOCK)
1124 {
1125 remove_ctrl_stmt_and_useless_edges (rd->dup_blocks[count], NULL);
1126 create_edge_and_update_destination_phis (rd, rd->dup_blocks[count],
1127 multi_incomings ? 0 : i);
1128 if (count == 1)
1129 single_succ_edge (rd->dup_blocks[1])->aux = NULL;
1130
1131 /* Update the counts of both the original block
1132 and path edge, and the duplicates. Since we are now after
1133 any joiner that may have existed on the path, the count
1134 flowing along the duplicated threaded path is path_out_count.
1135 If we didn't have a joiner, then cur_path_freq was the sum
1136 of the total frequencies along all incoming edges to the
1137 thread path (path_in_freq). If we had a joiner, it would have
1138 been updated at the end of that handling to the edge frequency
1139 along the duplicated joiner path edge. */
1140 update_profile (epath, EDGE_SUCC (rd->dup_blocks[count], 0),
1141 path_out_count, path_out_count);
1142 }
1143 else
1144 {
1145 /* No copy case. In this case we don't have an equivalent block
1146 on the duplicated thread path to update, but we do need
1147 to remove the portion of the counts/freqs that were moved
1148 to the duplicated path from the counts/freqs flowing through
1149 this block on the original path. Since all the no-copy edges
1150 are after any joiner, the removed count is the same as
1151 path_out_count.
1152
1153 If we didn't have a joiner, then cur_path_freq was the sum
1154 of the total frequencies along all incoming edges to the
1155 thread path (path_in_freq). If we had a joiner, it would have
1156 been updated at the end of that handling to the edge frequency
1157 along the duplicated joiner path edge. */
1158 update_profile (epath, NULL, path_out_count, path_out_count);
1159 }
1160
1161 /* Increment the index into the duplicated path when we processed
1162 a duplicated block. */
1163 if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK
1164 || (*path)[i]->type == EDGE_COPY_SRC_BLOCK)
1165 {
1166 count++;
1167 }
1168 }
1169 }
1170
1171 /* Hash table traversal callback routine to create duplicate blocks. */
1172
1173 int
1174 ssa_create_duplicates (struct redirection_data **slot,
1175 ssa_local_info_t *local_info)
1176 {
1177 struct redirection_data *rd = *slot;
1178
1179 /* The second duplicated block in a jump threading path is specific
1180 to the path. So it gets stored in RD rather than in LOCAL_DATA.
1181
1182 Each time we're called, we have to look through the path and see
1183 if a second block needs to be duplicated.
1184
1185 Note the search starts with the third edge on the path. The first
1186 edge is the incoming edge, the second edge always has its source
1187 duplicated. Thus we start our search with the third edge. */
1188 vec<jump_thread_edge *> *path = rd->path;
1189 for (unsigned int i = 2; i < path->length (); i++)
1190 {
1191 if ((*path)[i]->type == EDGE_COPY_SRC_BLOCK
1192 || (*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1193 {
1194 create_block_for_threading ((*path)[i]->e->src, rd, 1,
1195 &local_info->duplicate_blocks);
1196 break;
1197 }
1198 }
1199
1200 /* Create a template block if we have not done so already. Otherwise
1201 use the template to create a new block. */
1202 if (local_info->template_block == NULL)
1203 {
1204 create_block_for_threading ((*path)[1]->e->src, rd, 0,
1205 &local_info->duplicate_blocks);
1206 local_info->template_block = rd->dup_blocks[0];
1207 local_info->template_last_to_copy
1208 = gsi_last_bb (local_info->template_block);
1209
1210 /* We do not create any outgoing edges for the template. We will
1211 take care of that in a later traversal. That way we do not
1212 create edges that are going to just be deleted. */
1213 }
1214 else
1215 {
1216 gimple_seq seq = NULL;
1217 if (gsi_stmt (local_info->template_last_to_copy)
1218 != gsi_stmt (gsi_last_bb (local_info->template_block)))
1219 {
1220 if (gsi_end_p (local_info->template_last_to_copy))
1221 {
1222 seq = bb_seq (local_info->template_block);
1223 set_bb_seq (local_info->template_block, NULL);
1224 }
1225 else
1226 seq = gsi_split_seq_after (local_info->template_last_to_copy);
1227 }
1228 create_block_for_threading (local_info->template_block, rd, 0,
1229 &local_info->duplicate_blocks);
1230 if (seq)
1231 {
1232 if (gsi_end_p (local_info->template_last_to_copy))
1233 set_bb_seq (local_info->template_block, seq);
1234 else
1235 gsi_insert_seq_after (&local_info->template_last_to_copy,
1236 seq, GSI_SAME_STMT);
1237 }
1238
1239 /* Go ahead and wire up outgoing edges and update PHIs for the duplicate
1240 block. */
1241 ssa_fix_duplicate_block_edges (rd, local_info);
1242 }
1243
1244 if (MAY_HAVE_DEBUG_STMTS)
1245 {
1246 /* Copy debug stmts from each NO_COPY src block to the block
1247 that would have been its predecessor, if we can append to it
1248 (we can't add stmts after a block-ending stmt), or prepending
1249 to the duplicate of the successor, if there is one. If
1250 there's no duplicate successor, we'll mostly drop the blocks
1251 on the floor; propagate_threaded_block_debug_into, called
1252 elsewhere, will consolidate and preserve the effects of the
1253 binds, but none of the markers. */
1254 gimple_stmt_iterator copy_to = gsi_last_bb (rd->dup_blocks[0]);
1255 if (!gsi_end_p (copy_to))
1256 {
1257 if (stmt_ends_bb_p (gsi_stmt (copy_to)))
1258 {
1259 if (rd->dup_blocks[1])
1260 copy_to = gsi_after_labels (rd->dup_blocks[1]);
1261 else
1262 copy_to = gsi_none ();
1263 }
1264 else
1265 gsi_next (&copy_to);
1266 }
1267 for (unsigned int i = 2, j = 0; i < path->length (); i++)
1268 if ((*path)[i]->type == EDGE_NO_COPY_SRC_BLOCK
1269 && gsi_bb (copy_to))
1270 {
1271 for (gimple_stmt_iterator gsi = gsi_start_bb ((*path)[i]->e->src);
1272 !gsi_end_p (gsi); gsi_next (&gsi))
1273 {
1274 if (!is_gimple_debug (gsi_stmt (gsi)))
1275 continue;
1276 gimple *stmt = gsi_stmt (gsi);
1277 gimple *copy = gimple_copy (stmt);
1278 gsi_insert_before (&copy_to, copy, GSI_SAME_STMT);
1279 }
1280 }
1281 else if ((*path)[i]->type == EDGE_COPY_SRC_BLOCK
1282 || (*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1283 {
1284 j++;
1285 gcc_assert (j < 2);
1286 copy_to = gsi_last_bb (rd->dup_blocks[j]);
1287 if (!gsi_end_p (copy_to))
1288 {
1289 if (stmt_ends_bb_p (gsi_stmt (copy_to)))
1290 copy_to = gsi_none ();
1291 else
1292 gsi_next (&copy_to);
1293 }
1294 }
1295 }
1296
1297 /* Keep walking the hash table. */
1298 return 1;
1299 }
1300
1301 /* We did not create any outgoing edges for the template block during
1302 block creation. This hash table traversal callback creates the
1303 outgoing edge for the template block. */
1304
1305 inline int
1306 ssa_fixup_template_block (struct redirection_data **slot,
1307 ssa_local_info_t *local_info)
1308 {
1309 struct redirection_data *rd = *slot;
1310
1311 /* If this is the template block halt the traversal after updating
1312 it appropriately.
1313
1314 If we were threading through an joiner block, then we want
1315 to keep its control statement and redirect an outgoing edge.
1316 Else we want to remove the control statement & edges, then create
1317 a new outgoing edge. In both cases we may need to update PHIs. */
1318 if (rd->dup_blocks[0] && rd->dup_blocks[0] == local_info->template_block)
1319 {
1320 ssa_fix_duplicate_block_edges (rd, local_info);
1321 return 0;
1322 }
1323
1324 return 1;
1325 }
1326
1327 /* Hash table traversal callback to redirect each incoming edge
1328 associated with this hash table element to its new destination. */
1329
1330 static int
1331 ssa_redirect_edges (struct redirection_data **slot,
1332 ssa_local_info_t *local_info)
1333 {
1334 struct redirection_data *rd = *slot;
1335 struct el *next, *el;
1336
1337 /* Walk over all the incoming edges associated with this hash table
1338 entry. */
1339 for (el = rd->incoming_edges; el; el = next)
1340 {
1341 edge e = el->e;
1342 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1343
1344 /* Go ahead and free this element from the list. Doing this now
1345 avoids the need for another list walk when we destroy the hash
1346 table. */
1347 next = el->next;
1348 free (el);
1349
1350 local_info->num_threaded_edges++;
1351
1352 if (rd->dup_blocks[0])
1353 {
1354 edge e2;
1355
1356 if (dump_file && (dump_flags & TDF_DETAILS))
1357 fprintf (dump_file, " Threaded jump %d --> %d to %d\n",
1358 e->src->index, e->dest->index, rd->dup_blocks[0]->index);
1359
1360 /* Redirect the incoming edge (possibly to the joiner block) to the
1361 appropriate duplicate block. */
1362 e2 = redirect_edge_and_branch (e, rd->dup_blocks[0]);
1363 gcc_assert (e == e2);
1364 flush_pending_stmts (e2);
1365 }
1366
1367 /* Go ahead and clear E->aux. It's not needed anymore and failure
1368 to clear it will cause all kinds of unpleasant problems later. */
1369 path->release ();
1370 e->aux = NULL;
1371
1372 }
1373
1374 /* Indicate that we actually threaded one or more jumps. */
1375 if (rd->incoming_edges)
1376 local_info->jumps_threaded = true;
1377
1378 return 1;
1379 }
1380
1381 /* Return true if this block has no executable statements other than
1382 a simple ctrl flow instruction. When the number of outgoing edges
1383 is one, this is equivalent to a "forwarder" block. */
1384
1385 static bool
1386 redirection_block_p (basic_block bb)
1387 {
1388 gimple_stmt_iterator gsi;
1389
1390 /* Advance to the first executable statement. */
1391 gsi = gsi_start_bb (bb);
1392 while (!gsi_end_p (gsi)
1393 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL
1394 || is_gimple_debug (gsi_stmt (gsi))
1395 || gimple_nop_p (gsi_stmt (gsi))
1396 || gimple_clobber_p (gsi_stmt (gsi))))
1397 gsi_next (&gsi);
1398
1399 /* Check if this is an empty block. */
1400 if (gsi_end_p (gsi))
1401 return true;
1402
1403 /* Test that we've reached the terminating control statement. */
1404 return gsi_stmt (gsi)
1405 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
1406 || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
1407 || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH);
1408 }
1409
1410 /* BB is a block which ends with a COND_EXPR or SWITCH_EXPR and when BB
1411 is reached via one or more specific incoming edges, we know which
1412 outgoing edge from BB will be traversed.
1413
1414 We want to redirect those incoming edges to the target of the
1415 appropriate outgoing edge. Doing so avoids a conditional branch
1416 and may expose new optimization opportunities. Note that we have
1417 to update dominator tree and SSA graph after such changes.
1418
1419 The key to keeping the SSA graph update manageable is to duplicate
1420 the side effects occurring in BB so that those side effects still
1421 occur on the paths which bypass BB after redirecting edges.
1422
1423 We accomplish this by creating duplicates of BB and arranging for
1424 the duplicates to unconditionally pass control to one specific
1425 successor of BB. We then revector the incoming edges into BB to
1426 the appropriate duplicate of BB.
1427
1428 If NOLOOP_ONLY is true, we only perform the threading as long as it
1429 does not affect the structure of the loops in a nontrivial way.
1430
1431 If JOINERS is true, then thread through joiner blocks as well. */
1432
1433 bool
1434 fwd_jt_path_registry::thread_block_1 (basic_block bb,
1435 bool noloop_only,
1436 bool joiners)
1437 {
1438 /* E is an incoming edge into BB that we may or may not want to
1439 redirect to a duplicate of BB. */
1440 edge e, e2;
1441 edge_iterator ei;
1442 ssa_local_info_t local_info;
1443
1444 local_info.duplicate_blocks = BITMAP_ALLOC (NULL);
1445 local_info.need_profile_correction = false;
1446 local_info.num_threaded_edges = 0;
1447
1448 /* To avoid scanning a linear array for the element we need we instead
1449 use a hash table. For normal code there should be no noticeable
1450 difference. However, if we have a block with a large number of
1451 incoming and outgoing edges such linear searches can get expensive. */
1452 m_redirection_data
1453 = new hash_table<struct redirection_data> (EDGE_COUNT (bb->succs));
1454
1455 /* Record each unique threaded destination into a hash table for
1456 efficient lookups. */
1457 edge last = NULL;
1458 FOR_EACH_EDGE (e, ei, bb->preds)
1459 {
1460 if (e->aux == NULL)
1461 continue;
1462
1463 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1464
1465 if (((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK && !joiners)
1466 || ((*path)[1]->type == EDGE_COPY_SRC_BLOCK && joiners))
1467 continue;
1468
1469 e2 = path->last ()->e;
1470 if (!e2 || noloop_only)
1471 {
1472 /* If NOLOOP_ONLY is true, we only allow threading through the
1473 header of a loop to exit edges. */
1474
1475 /* One case occurs when there was loop header buried in a jump
1476 threading path that crosses loop boundaries. We do not try
1477 and thread this elsewhere, so just cancel the jump threading
1478 request by clearing the AUX field now. */
1479 if (bb->loop_father != e2->src->loop_father
1480 && (!loop_exit_edge_p (e2->src->loop_father, e2)
1481 || flow_loop_nested_p (bb->loop_father,
1482 e2->dest->loop_father)))
1483 {
1484 /* Since this case is not handled by our special code
1485 to thread through a loop header, we must explicitly
1486 cancel the threading request here. */
1487 cancel_thread (path, "Threading through unhandled loop header");
1488 e->aux = NULL;
1489 continue;
1490 }
1491
1492 /* Another case occurs when trying to thread through our
1493 own loop header, possibly from inside the loop. We will
1494 thread these later. */
1495 unsigned int i;
1496 for (i = 1; i < path->length (); i++)
1497 {
1498 if ((*path)[i]->e->src == bb->loop_father->header
1499 && (!loop_exit_edge_p (bb->loop_father, e2)
1500 || (*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK))
1501 break;
1502 }
1503
1504 if (i != path->length ())
1505 continue;
1506
1507 /* Loop parallelization can be confused by the result of
1508 threading through the loop exit test back into the loop.
1509 However, theading those jumps seems to help other codes.
1510
1511 I have been unable to find anything related to the shape of
1512 the CFG, the contents of the affected blocks, etc which would
1513 allow a more sensible test than what we're using below which
1514 merely avoids the optimization when parallelizing loops. */
1515 if (flag_tree_parallelize_loops > 1)
1516 {
1517 for (i = 1; i < path->length (); i++)
1518 if (bb->loop_father == e2->src->loop_father
1519 && loop_exits_from_bb_p (bb->loop_father,
1520 (*path)[i]->e->src)
1521 && !loop_exit_edge_p (bb->loop_father, e2))
1522 break;
1523
1524 if (i != path->length ())
1525 {
1526 cancel_thread (path, "Threading through loop exit");
1527 e->aux = NULL;
1528 continue;
1529 }
1530 }
1531 }
1532
1533 /* Insert the outgoing edge into the hash table if it is not
1534 already in the hash table. */
1535 lookup_redirection_data (e, INSERT);
1536
1537 /* When we have thread paths through a common joiner with different
1538 final destinations, then we may need corrections to deal with
1539 profile insanities. See the big comment before compute_path_counts. */
1540 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1541 {
1542 if (!last)
1543 last = e2;
1544 else if (e2 != last)
1545 local_info.need_profile_correction = true;
1546 }
1547 }
1548
1549 /* We do not update dominance info. */
1550 free_dominance_info (CDI_DOMINATORS);
1551
1552 /* We know we only thread through the loop header to loop exits.
1553 Let the basic block duplication hook know we are not creating
1554 a multiple entry loop. */
1555 if (noloop_only
1556 && bb == bb->loop_father->header)
1557 set_loop_copy (bb->loop_father, loop_outer (bb->loop_father));
1558
1559 /* Now create duplicates of BB.
1560
1561 Note that for a block with a high outgoing degree we can waste
1562 a lot of time and memory creating and destroying useless edges.
1563
1564 So we first duplicate BB and remove the control structure at the
1565 tail of the duplicate as well as all outgoing edges from the
1566 duplicate. We then use that duplicate block as a template for
1567 the rest of the duplicates. */
1568 local_info.template_block = NULL;
1569 local_info.bb = bb;
1570 local_info.jumps_threaded = false;
1571 m_redirection_data->traverse <ssa_local_info_t *, ssa_create_duplicates>
1572 (&local_info);
1573
1574 /* The template does not have an outgoing edge. Create that outgoing
1575 edge and update PHI nodes as the edge's target as necessary.
1576
1577 We do this after creating all the duplicates to avoid creating
1578 unnecessary edges. */
1579 m_redirection_data->traverse <ssa_local_info_t *, ssa_fixup_template_block>
1580 (&local_info);
1581
1582 /* The hash table traversals above created the duplicate blocks (and the
1583 statements within the duplicate blocks). This loop creates PHI nodes for
1584 the duplicated blocks and redirects the incoming edges into BB to reach
1585 the duplicates of BB. */
1586 m_redirection_data->traverse <ssa_local_info_t *, ssa_redirect_edges>
1587 (&local_info);
1588
1589 /* Done with this block. Clear REDIRECTION_DATA. */
1590 delete m_redirection_data;
1591 m_redirection_data = NULL;
1592
1593 if (noloop_only
1594 && bb == bb->loop_father->header)
1595 set_loop_copy (bb->loop_father, NULL);
1596
1597 BITMAP_FREE (local_info.duplicate_blocks);
1598 local_info.duplicate_blocks = NULL;
1599
1600 m_num_threaded_edges += local_info.num_threaded_edges;
1601
1602 /* Indicate to our caller whether or not any jumps were threaded. */
1603 return local_info.jumps_threaded;
1604 }
1605
1606 /* Wrapper for thread_block_1 so that we can first handle jump
1607 thread paths which do not involve copying joiner blocks, then
1608 handle jump thread paths which have joiner blocks.
1609
1610 By doing things this way we can be as aggressive as possible and
1611 not worry that copying a joiner block will create a jump threading
1612 opportunity. */
1613
1614 bool
1615 fwd_jt_path_registry::thread_block (basic_block bb, bool noloop_only)
1616 {
1617 bool retval;
1618 retval = thread_block_1 (bb, noloop_only, false);
1619 retval |= thread_block_1 (bb, noloop_only, true);
1620 return retval;
1621 }
1622
1623 /* Callback for dfs_enumerate_from. Returns true if BB is different
1624 from STOP and DBDS_CE_STOP. */
1625
1626 static basic_block dbds_ce_stop;
1627 static bool
1628 dbds_continue_enumeration_p (const_basic_block bb, const void *stop)
1629 {
1630 return (bb != (const_basic_block) stop
1631 && bb != dbds_ce_stop);
1632 }
1633
1634 /* Evaluates the dominance relationship of latch of the LOOP and BB, and
1635 returns the state. */
1636
1637 enum bb_dom_status
1638 determine_bb_domination_status (class loop *loop, basic_block bb)
1639 {
1640 basic_block *bblocks;
1641 unsigned nblocks, i;
1642 bool bb_reachable = false;
1643 edge_iterator ei;
1644 edge e;
1645
1646 /* This function assumes BB is a successor of LOOP->header.
1647 If that is not the case return DOMST_NONDOMINATING which
1648 is always safe. */
1649 {
1650 bool ok = false;
1651
1652 FOR_EACH_EDGE (e, ei, bb->preds)
1653 {
1654 if (e->src == loop->header)
1655 {
1656 ok = true;
1657 break;
1658 }
1659 }
1660
1661 if (!ok)
1662 return DOMST_NONDOMINATING;
1663 }
1664
1665 if (bb == loop->latch)
1666 return DOMST_DOMINATING;
1667
1668 /* Check that BB dominates LOOP->latch, and that it is back-reachable
1669 from it. */
1670
1671 bblocks = XCNEWVEC (basic_block, loop->num_nodes);
1672 dbds_ce_stop = loop->header;
1673 nblocks = dfs_enumerate_from (loop->latch, 1, dbds_continue_enumeration_p,
1674 bblocks, loop->num_nodes, bb);
1675 for (i = 0; i < nblocks; i++)
1676 FOR_EACH_EDGE (e, ei, bblocks[i]->preds)
1677 {
1678 if (e->src == loop->header)
1679 {
1680 free (bblocks);
1681 return DOMST_NONDOMINATING;
1682 }
1683 if (e->src == bb)
1684 bb_reachable = true;
1685 }
1686
1687 free (bblocks);
1688 return (bb_reachable ? DOMST_DOMINATING : DOMST_LOOP_BROKEN);
1689 }
1690
1691 /* Thread jumps through the header of LOOP. Returns true if cfg changes.
1692 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading from entry edges
1693 to the inside of the loop. */
1694
1695 bool
1696 fwd_jt_path_registry::thread_through_loop_header (class loop *loop,
1697 bool may_peel_loop_headers)
1698 {
1699 basic_block header = loop->header;
1700 edge e, tgt_edge, latch = loop_latch_edge (loop);
1701 edge_iterator ei;
1702 basic_block tgt_bb, atgt_bb;
1703 enum bb_dom_status domst;
1704
1705 /* We have already threaded through headers to exits, so all the threading
1706 requests now are to the inside of the loop. We need to avoid creating
1707 irreducible regions (i.e., loops with more than one entry block), and
1708 also loop with several latch edges, or new subloops of the loop (although
1709 there are cases where it might be appropriate, it is difficult to decide,
1710 and doing it wrongly may confuse other optimizers).
1711
1712 We could handle more general cases here. However, the intention is to
1713 preserve some information about the loop, which is impossible if its
1714 structure changes significantly, in a way that is not well understood.
1715 Thus we only handle few important special cases, in which also updating
1716 of the loop-carried information should be feasible:
1717
1718 1) Propagation of latch edge to a block that dominates the latch block
1719 of a loop. This aims to handle the following idiom:
1720
1721 first = 1;
1722 while (1)
1723 {
1724 if (first)
1725 initialize;
1726 first = 0;
1727 body;
1728 }
1729
1730 After threading the latch edge, this becomes
1731
1732 first = 1;
1733 if (first)
1734 initialize;
1735 while (1)
1736 {
1737 first = 0;
1738 body;
1739 }
1740
1741 The original header of the loop is moved out of it, and we may thread
1742 the remaining edges through it without further constraints.
1743
1744 2) All entry edges are propagated to a single basic block that dominates
1745 the latch block of the loop. This aims to handle the following idiom
1746 (normally created for "for" loops):
1747
1748 i = 0;
1749 while (1)
1750 {
1751 if (i >= 100)
1752 break;
1753 body;
1754 i++;
1755 }
1756
1757 This becomes
1758
1759 i = 0;
1760 while (1)
1761 {
1762 body;
1763 i++;
1764 if (i >= 100)
1765 break;
1766 }
1767 */
1768
1769 /* Threading through the header won't improve the code if the header has just
1770 one successor. */
1771 if (single_succ_p (header))
1772 goto fail;
1773
1774 if (!may_peel_loop_headers && !redirection_block_p (loop->header))
1775 goto fail;
1776 else
1777 {
1778 tgt_bb = NULL;
1779 tgt_edge = NULL;
1780 FOR_EACH_EDGE (e, ei, header->preds)
1781 {
1782 if (!e->aux)
1783 {
1784 if (e == latch)
1785 continue;
1786
1787 /* If latch is not threaded, and there is a header
1788 edge that is not threaded, we would create loop
1789 with multiple entries. */
1790 goto fail;
1791 }
1792
1793 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1794
1795 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1796 goto fail;
1797 tgt_edge = (*path)[1]->e;
1798 atgt_bb = tgt_edge->dest;
1799 if (!tgt_bb)
1800 tgt_bb = atgt_bb;
1801 /* Two targets of threading would make us create loop
1802 with multiple entries. */
1803 else if (tgt_bb != atgt_bb)
1804 goto fail;
1805 }
1806
1807 if (!tgt_bb)
1808 {
1809 /* There are no threading requests. */
1810 return false;
1811 }
1812
1813 /* Redirecting to empty loop latch is useless. */
1814 if (tgt_bb == loop->latch
1815 && empty_block_p (loop->latch))
1816 goto fail;
1817 }
1818
1819 /* The target block must dominate the loop latch, otherwise we would be
1820 creating a subloop. */
1821 domst = determine_bb_domination_status (loop, tgt_bb);
1822 if (domst == DOMST_NONDOMINATING)
1823 goto fail;
1824 if (domst == DOMST_LOOP_BROKEN)
1825 {
1826 /* If the loop ceased to exist, mark it as such, and thread through its
1827 original header. */
1828 mark_loop_for_removal (loop);
1829 return thread_block (header, false);
1830 }
1831
1832 if (tgt_bb->loop_father->header == tgt_bb)
1833 {
1834 /* If the target of the threading is a header of a subloop, we need
1835 to create a preheader for it, so that the headers of the two loops
1836 do not merge. */
1837 if (EDGE_COUNT (tgt_bb->preds) > 2)
1838 {
1839 tgt_bb = create_preheader (tgt_bb->loop_father, 0);
1840 gcc_assert (tgt_bb != NULL);
1841 }
1842 else
1843 tgt_bb = split_edge (tgt_edge);
1844 }
1845
1846 basic_block new_preheader;
1847
1848 /* Now consider the case entry edges are redirected to the new entry
1849 block. Remember one entry edge, so that we can find the new
1850 preheader (its destination after threading). */
1851 FOR_EACH_EDGE (e, ei, header->preds)
1852 {
1853 if (e->aux)
1854 break;
1855 }
1856
1857 /* The duplicate of the header is the new preheader of the loop. Ensure
1858 that it is placed correctly in the loop hierarchy. */
1859 set_loop_copy (loop, loop_outer (loop));
1860
1861 thread_block (header, false);
1862 set_loop_copy (loop, NULL);
1863 new_preheader = e->dest;
1864
1865 /* Create the new latch block. This is always necessary, as the latch
1866 must have only a single successor, but the original header had at
1867 least two successors. */
1868 loop->latch = NULL;
1869 mfb_kj_edge = single_succ_edge (new_preheader);
1870 loop->header = mfb_kj_edge->dest;
1871 latch = make_forwarder_block (tgt_bb, mfb_keep_just, NULL);
1872 loop->header = latch->dest;
1873 loop->latch = latch->src;
1874 return true;
1875
1876 fail:
1877 /* We failed to thread anything. Cancel the requests. */
1878 FOR_EACH_EDGE (e, ei, header->preds)
1879 {
1880 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1881
1882 if (path)
1883 {
1884 cancel_thread (path, "Failure in thread_through_loop_header");
1885 e->aux = NULL;
1886 }
1887 }
1888 return false;
1889 }
1890
1891 /* E1 and E2 are edges into the same basic block. Return TRUE if the
1892 PHI arguments associated with those edges are equal or there are no
1893 PHI arguments, otherwise return FALSE. */
1894
1895 static bool
1896 phi_args_equal_on_edges (edge e1, edge e2)
1897 {
1898 gphi_iterator gsi;
1899 int indx1 = e1->dest_idx;
1900 int indx2 = e2->dest_idx;
1901
1902 for (gsi = gsi_start_phis (e1->dest); !gsi_end_p (gsi); gsi_next (&gsi))
1903 {
1904 gphi *phi = gsi.phi ();
1905
1906 if (!operand_equal_p (gimple_phi_arg_def (phi, indx1),
1907 gimple_phi_arg_def (phi, indx2), 0))
1908 return false;
1909 }
1910 return true;
1911 }
1912
1913 /* Return the number of non-debug statements and non-virtual PHIs in a
1914 block. */
1915
1916 static unsigned int
1917 count_stmts_and_phis_in_block (basic_block bb)
1918 {
1919 unsigned int num_stmts = 0;
1920
1921 gphi_iterator gpi;
1922 for (gpi = gsi_start_phis (bb); !gsi_end_p (gpi); gsi_next (&gpi))
1923 if (!virtual_operand_p (PHI_RESULT (gpi.phi ())))
1924 num_stmts++;
1925
1926 gimple_stmt_iterator gsi;
1927 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1928 {
1929 gimple *stmt = gsi_stmt (gsi);
1930 if (!is_gimple_debug (stmt))
1931 num_stmts++;
1932 }
1933
1934 return num_stmts;
1935 }
1936
1937
1938 /* Walk through the registered jump threads and convert them into a
1939 form convenient for this pass.
1940
1941 Any block which has incoming edges threaded to outgoing edges
1942 will have its entry in THREADED_BLOCK set.
1943
1944 Any threaded edge will have its new outgoing edge stored in the
1945 original edge's AUX field.
1946
1947 This form avoids the need to walk all the edges in the CFG to
1948 discover blocks which need processing and avoids unnecessary
1949 hash table lookups to map from threaded edge to new target. */
1950
1951 void
1952 fwd_jt_path_registry::mark_threaded_blocks (bitmap threaded_blocks)
1953 {
1954 unsigned int i;
1955 bitmap_iterator bi;
1956 auto_bitmap tmp;
1957 basic_block bb;
1958 edge e;
1959 edge_iterator ei;
1960
1961 /* It is possible to have jump threads in which one is a subpath
1962 of the other. ie, (A, B), (B, C), (C, D) where B is a joiner
1963 block and (B, C), (C, D) where no joiner block exists.
1964
1965 When this occurs ignore the jump thread request with the joiner
1966 block. It's totally subsumed by the simpler jump thread request.
1967
1968 This results in less block copying, simpler CFGs. More importantly,
1969 when we duplicate the joiner block, B, in this case we will create
1970 a new threading opportunity that we wouldn't be able to optimize
1971 until the next jump threading iteration.
1972
1973 So first convert the jump thread requests which do not require a
1974 joiner block. */
1975 for (i = 0; i < m_paths.length (); i++)
1976 {
1977 vec<jump_thread_edge *> *path = m_paths[i];
1978
1979 if (path->length () > 1
1980 && (*path)[1]->type != EDGE_COPY_SRC_JOINER_BLOCK)
1981 {
1982 edge e = (*path)[0]->e;
1983 e->aux = (void *)path;
1984 bitmap_set_bit (tmp, e->dest->index);
1985 }
1986 }
1987
1988 /* Now iterate again, converting cases where we want to thread
1989 through a joiner block, but only if no other edge on the path
1990 already has a jump thread attached to it. We do this in two passes,
1991 to avoid situations where the order in the paths vec can hide overlapping
1992 threads (the path is recorded on the incoming edge, so we would miss
1993 cases where the second path starts at a downstream edge on the same
1994 path). First record all joiner paths, deleting any in the unexpected
1995 case where there is already a path for that incoming edge. */
1996 for (i = 0; i < m_paths.length ();)
1997 {
1998 vec<jump_thread_edge *> *path = m_paths[i];
1999
2000 if (path->length () > 1
2001 && (*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
2002 {
2003 /* Attach the path to the starting edge if none is yet recorded. */
2004 if ((*path)[0]->e->aux == NULL)
2005 {
2006 (*path)[0]->e->aux = path;
2007 i++;
2008 }
2009 else
2010 {
2011 m_paths.unordered_remove (i);
2012 cancel_thread (path);
2013 }
2014 }
2015 else
2016 {
2017 i++;
2018 }
2019 }
2020
2021 /* Second, look for paths that have any other jump thread attached to
2022 them, and either finish converting them or cancel them. */
2023 for (i = 0; i < m_paths.length ();)
2024 {
2025 vec<jump_thread_edge *> *path = m_paths[i];
2026 edge e = (*path)[0]->e;
2027
2028 if (path->length () > 1
2029 && (*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK && e->aux == path)
2030 {
2031 unsigned int j;
2032 for (j = 1; j < path->length (); j++)
2033 if ((*path)[j]->e->aux != NULL)
2034 break;
2035
2036 /* If we iterated through the entire path without exiting the loop,
2037 then we are good to go, record it. */
2038 if (j == path->length ())
2039 {
2040 bitmap_set_bit (tmp, e->dest->index);
2041 i++;
2042 }
2043 else
2044 {
2045 e->aux = NULL;
2046 m_paths.unordered_remove (i);
2047 cancel_thread (path);
2048 }
2049 }
2050 else
2051 {
2052 i++;
2053 }
2054 }
2055
2056 /* When optimizing for size, prune all thread paths where statement
2057 duplication is necessary.
2058
2059 We walk the jump thread path looking for copied blocks. There's
2060 two types of copied blocks.
2061
2062 EDGE_COPY_SRC_JOINER_BLOCK is always copied and thus we will
2063 cancel the jump threading request when optimizing for size.
2064
2065 EDGE_COPY_SRC_BLOCK which is copied, but some of its statements
2066 will be killed by threading. If threading does not kill all of
2067 its statements, then we should cancel the jump threading request
2068 when optimizing for size. */
2069 if (optimize_function_for_size_p (cfun))
2070 {
2071 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
2072 {
2073 FOR_EACH_EDGE (e, ei, BASIC_BLOCK_FOR_FN (cfun, i)->preds)
2074 if (e->aux)
2075 {
2076 vec<jump_thread_edge *> *path = THREAD_PATH (e);
2077
2078 unsigned int j;
2079 for (j = 1; j < path->length (); j++)
2080 {
2081 bb = (*path)[j]->e->src;
2082 if (redirection_block_p (bb))
2083 ;
2084 else if ((*path)[j]->type == EDGE_COPY_SRC_JOINER_BLOCK
2085 || ((*path)[j]->type == EDGE_COPY_SRC_BLOCK
2086 && (count_stmts_and_phis_in_block (bb)
2087 != estimate_threading_killed_stmts (bb))))
2088 break;
2089 }
2090
2091 if (j != path->length ())
2092 {
2093 cancel_thread (path);
2094 e->aux = NULL;
2095 }
2096 else
2097 bitmap_set_bit (threaded_blocks, i);
2098 }
2099 }
2100 }
2101 else
2102 bitmap_copy (threaded_blocks, tmp);
2103
2104 /* If we have a joiner block (J) which has two successors S1 and S2 and
2105 we are threading though S1 and the final destination of the thread
2106 is S2, then we must verify that any PHI nodes in S2 have the same
2107 PHI arguments for the edge J->S2 and J->S1->...->S2.
2108
2109 We used to detect this prior to registering the jump thread, but
2110 that prohibits propagation of edge equivalences into non-dominated
2111 PHI nodes as the equivalency test might occur before propagation.
2112
2113 This must also occur after we truncate any jump threading paths
2114 as this scenario may only show up after truncation.
2115
2116 This works for now, but will need improvement as part of the FSA
2117 optimization.
2118
2119 Note since we've moved the thread request data to the edges,
2120 we have to iterate on those rather than the threaded_edges vector. */
2121 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
2122 {
2123 bb = BASIC_BLOCK_FOR_FN (cfun, i);
2124 FOR_EACH_EDGE (e, ei, bb->preds)
2125 {
2126 if (e->aux)
2127 {
2128 vec<jump_thread_edge *> *path = THREAD_PATH (e);
2129 bool have_joiner = ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK);
2130
2131 if (have_joiner)
2132 {
2133 basic_block joiner = e->dest;
2134 edge final_edge = path->last ()->e;
2135 basic_block final_dest = final_edge->dest;
2136 edge e2 = find_edge (joiner, final_dest);
2137
2138 if (e2 && !phi_args_equal_on_edges (e2, final_edge))
2139 {
2140 cancel_thread (path);
2141 e->aux = NULL;
2142 }
2143 }
2144 }
2145 }
2146 }
2147
2148 /* Look for jump threading paths which cross multiple loop headers.
2149
2150 The code to thread through loop headers will change the CFG in ways
2151 that invalidate the cached loop iteration information. So we must
2152 detect that case and wipe the cached information. */
2153 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
2154 {
2155 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
2156 FOR_EACH_EDGE (e, ei, bb->preds)
2157 {
2158 if (e->aux)
2159 {
2160 gcc_assert (loops_state_satisfies_p
2161 (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS));
2162 vec<jump_thread_edge *> *path = THREAD_PATH (e);
2163
2164 for (unsigned int i = 0, crossed_headers = 0;
2165 i < path->length ();
2166 i++)
2167 {
2168 basic_block dest = (*path)[i]->e->dest;
2169 basic_block src = (*path)[i]->e->src;
2170 /* If we enter a loop. */
2171 if (flow_loop_nested_p (src->loop_father, dest->loop_father))
2172 ++crossed_headers;
2173 /* If we step from a block outside an irreducible region
2174 to a block inside an irreducible region, then we have
2175 crossed into a loop. */
2176 else if (! (src->flags & BB_IRREDUCIBLE_LOOP)
2177 && (dest->flags & BB_IRREDUCIBLE_LOOP))
2178 ++crossed_headers;
2179 if (crossed_headers > 1)
2180 {
2181 vect_free_loop_info_assumptions
2182 ((*path)[path->length () - 1]->e->dest->loop_father);
2183 break;
2184 }
2185 }
2186 }
2187 }
2188 }
2189 }
2190
2191
2192 /* Verify that the REGION is a valid jump thread. A jump thread is a special
2193 case of SEME Single Entry Multiple Exits region in which all nodes in the
2194 REGION have exactly one incoming edge. The only exception is the first block
2195 that may not have been connected to the rest of the cfg yet. */
2196
2197 DEBUG_FUNCTION void
2198 verify_jump_thread (basic_block *region, unsigned n_region)
2199 {
2200 for (unsigned i = 0; i < n_region; i++)
2201 gcc_assert (EDGE_COUNT (region[i]->preds) <= 1);
2202 }
2203
2204 /* Return true when BB is one of the first N items in BBS. */
2205
2206 static inline bool
2207 bb_in_bbs (basic_block bb, basic_block *bbs, int n)
2208 {
2209 for (int i = 0; i < n; i++)
2210 if (bb == bbs[i])
2211 return true;
2212
2213 return false;
2214 }
2215
2216 void
2217 jt_path_registry::debug_path (FILE *dump_file, int pathno)
2218 {
2219 vec<jump_thread_edge *> *p = m_paths[pathno];
2220 fprintf (dump_file, "path: ");
2221 for (unsigned i = 0; i < p->length (); ++i)
2222 fprintf (dump_file, "%d -> %d, ",
2223 (*p)[i]->e->src->index, (*p)[i]->e->dest->index);
2224 fprintf (dump_file, "\n");
2225 }
2226
2227 void
2228 jt_path_registry::debug ()
2229 {
2230 for (unsigned i = 0; i < m_paths.length (); ++i)
2231 debug_path (stderr, i);
2232 }
2233
2234 /* Rewire a jump_thread_edge so that the source block is now a
2235 threaded source block.
2236
2237 PATH_NUM is an index into the global path table PATHS.
2238 EDGE_NUM is the jump thread edge number into said path.
2239
2240 Returns TRUE if we were able to successfully rewire the edge. */
2241
2242 bool
2243 back_jt_path_registry::rewire_first_differing_edge (unsigned path_num,
2244 unsigned edge_num)
2245 {
2246 vec<jump_thread_edge *> *path = m_paths[path_num];
2247 edge &e = (*path)[edge_num]->e;
2248 if (dump_file && (dump_flags & TDF_DETAILS))
2249 fprintf (dump_file, "rewiring edge candidate: %d -> %d\n",
2250 e->src->index, e->dest->index);
2251 basic_block src_copy = get_bb_copy (e->src);
2252 if (src_copy == NULL)
2253 {
2254 if (dump_file && (dump_flags & TDF_DETAILS))
2255 fprintf (dump_file, "ignoring candidate: there is no src COPY\n");
2256 return false;
2257 }
2258 edge new_edge = find_edge (src_copy, e->dest);
2259 /* If the previously threaded paths created a flow graph where we
2260 can no longer figure out where to go, give up. */
2261 if (new_edge == NULL)
2262 {
2263 if (dump_file && (dump_flags & TDF_DETAILS))
2264 fprintf (dump_file, "ignoring candidate: we lost our way\n");
2265 return false;
2266 }
2267 e = new_edge;
2268 return true;
2269 }
2270
2271 /* After a path has been jump threaded, adjust the remaining paths
2272 that are subsets of this path, so these paths can be safely
2273 threaded within the context of the new threaded path.
2274
2275 For example, suppose we have just threaded:
2276
2277 5 -> 6 -> 7 -> 8 -> 12 => 5 -> 6' -> 7' -> 8' -> 12'
2278
2279 And we have an upcoming threading candidate:
2280 5 -> 6 -> 7 -> 8 -> 15 -> 20
2281
2282 This function adjusts the upcoming path into:
2283 8' -> 15 -> 20
2284
2285 CURR_PATH_NUM is an index into the global paths table. It
2286 specifies the path that was just threaded. */
2287
2288 void
2289 back_jt_path_registry::adjust_paths_after_duplication (unsigned curr_path_num)
2290 {
2291 vec<jump_thread_edge *> *curr_path = m_paths[curr_path_num];
2292
2293 if (dump_file && (dump_flags & TDF_DETAILS))
2294 {
2295 fprintf (dump_file, "just threaded: ");
2296 debug_path (dump_file, curr_path_num);
2297 }
2298
2299 /* Iterate through all the other paths and adjust them. */
2300 for (unsigned cand_path_num = 0; cand_path_num < m_paths.length (); )
2301 {
2302 if (cand_path_num == curr_path_num)
2303 {
2304 ++cand_path_num;
2305 continue;
2306 }
2307 /* Make sure the candidate to adjust starts with the same path
2308 as the recently threaded path. */
2309 vec<jump_thread_edge *> *cand_path = m_paths[cand_path_num];
2310 if ((*cand_path)[0]->e != (*curr_path)[0]->e)
2311 {
2312 ++cand_path_num;
2313 continue;
2314 }
2315 if (dump_file && (dump_flags & TDF_DETAILS))
2316 {
2317 fprintf (dump_file, "adjusting candidate: ");
2318 debug_path (dump_file, cand_path_num);
2319 }
2320
2321 /* Chop off from the candidate path any prefix it shares with
2322 the recently threaded path. */
2323 unsigned minlength = MIN (curr_path->length (), cand_path->length ());
2324 unsigned j;
2325 for (j = 0; j < minlength; ++j)
2326 {
2327 edge cand_edge = (*cand_path)[j]->e;
2328 edge curr_edge = (*curr_path)[j]->e;
2329
2330 /* Once the prefix no longer matches, adjust the first
2331 non-matching edge to point from an adjusted edge to
2332 wherever it was going. */
2333 if (cand_edge != curr_edge)
2334 {
2335 gcc_assert (cand_edge->src == curr_edge->src);
2336 if (!rewire_first_differing_edge (cand_path_num, j))
2337 goto remove_candidate_from_list;
2338 break;
2339 }
2340 }
2341 if (j == minlength)
2342 {
2343 /* If we consumed the max subgraph we could look at, and
2344 still didn't find any different edges, it's the
2345 last edge after MINLENGTH. */
2346 if (cand_path->length () > minlength)
2347 {
2348 if (!rewire_first_differing_edge (cand_path_num, j))
2349 goto remove_candidate_from_list;
2350 }
2351 else if (dump_file && (dump_flags & TDF_DETAILS))
2352 fprintf (dump_file, "adjusting first edge after MINLENGTH.\n");
2353 }
2354 if (j > 0)
2355 {
2356 /* If we are removing everything, delete the entire candidate. */
2357 if (j == cand_path->length ())
2358 {
2359 remove_candidate_from_list:
2360 cancel_thread (cand_path, "Adjusted candidate is EMPTY");
2361 m_paths.unordered_remove (cand_path_num);
2362 continue;
2363 }
2364 /* Otherwise, just remove the redundant sub-path. */
2365 if (cand_path->length () - j > 1)
2366 cand_path->block_remove (0, j);
2367 else if (dump_file && (dump_flags & TDF_DETAILS))
2368 fprintf (dump_file, "Dropping illformed candidate.\n");
2369 }
2370 if (dump_file && (dump_flags & TDF_DETAILS))
2371 {
2372 fprintf (dump_file, "adjusted candidate: ");
2373 debug_path (dump_file, cand_path_num);
2374 }
2375 ++cand_path_num;
2376 }
2377 }
2378
2379 /* Duplicates a jump-thread path of N_REGION basic blocks.
2380 The ENTRY edge is redirected to the duplicate of the region.
2381
2382 Remove the last conditional statement in the last basic block in the REGION,
2383 and create a single fallthru edge pointing to the same destination as the
2384 EXIT edge.
2385
2386 CURRENT_PATH_NO is an index into the global paths[] table
2387 specifying the jump-thread path.
2388
2389 Returns false if it is unable to copy the region, true otherwise. */
2390
2391 bool
2392 back_jt_path_registry::duplicate_thread_path (edge entry,
2393 edge exit,
2394 basic_block *region,
2395 unsigned n_region,
2396 unsigned current_path_no)
2397 {
2398 unsigned i;
2399 class loop *loop = entry->dest->loop_father;
2400 edge exit_copy;
2401 edge redirected;
2402 profile_count curr_count;
2403
2404 if (!can_copy_bbs_p (region, n_region))
2405 return false;
2406
2407 if (dump_file && (dump_flags & TDF_DETAILS))
2408 {
2409 fprintf (dump_file, "\nabout to thread: ");
2410 debug_path (dump_file, current_path_no);
2411 }
2412
2413 /* Some sanity checking. Note that we do not check for all possible
2414 missuses of the functions. I.e. if you ask to copy something weird,
2415 it will work, but the state of structures probably will not be
2416 correct. */
2417 for (i = 0; i < n_region; i++)
2418 {
2419 /* We do not handle subloops, i.e. all the blocks must belong to the
2420 same loop. */
2421 if (region[i]->loop_father != loop)
2422 return false;
2423 }
2424
2425 initialize_original_copy_tables ();
2426
2427 set_loop_copy (loop, loop);
2428
2429 basic_block *region_copy = XNEWVEC (basic_block, n_region);
2430 copy_bbs (region, n_region, region_copy, &exit, 1, &exit_copy, loop,
2431 split_edge_bb_loc (entry), false);
2432
2433 /* Fix up: copy_bbs redirects all edges pointing to copied blocks. The
2434 following code ensures that all the edges exiting the jump-thread path are
2435 redirected back to the original code: these edges are exceptions
2436 invalidating the property that is propagated by executing all the blocks of
2437 the jump-thread path in order. */
2438
2439 curr_count = entry->count ();
2440
2441 for (i = 0; i < n_region; i++)
2442 {
2443 edge e;
2444 edge_iterator ei;
2445 basic_block bb = region_copy[i];
2446
2447 /* Watch inconsistent profile. */
2448 if (curr_count > region[i]->count)
2449 curr_count = region[i]->count;
2450 /* Scale current BB. */
2451 if (region[i]->count.nonzero_p () && curr_count.initialized_p ())
2452 {
2453 /* In the middle of the path we only scale the frequencies.
2454 In last BB we need to update probabilities of outgoing edges
2455 because we know which one is taken at the threaded path. */
2456 if (i + 1 != n_region)
2457 scale_bbs_frequencies_profile_count (region + i, 1,
2458 region[i]->count - curr_count,
2459 region[i]->count);
2460 else
2461 update_bb_profile_for_threading (region[i],
2462 curr_count,
2463 exit);
2464 scale_bbs_frequencies_profile_count (region_copy + i, 1, curr_count,
2465 region_copy[i]->count);
2466 }
2467
2468 if (single_succ_p (bb))
2469 {
2470 /* Make sure the successor is the next node in the path. */
2471 gcc_assert (i + 1 == n_region
2472 || region_copy[i + 1] == single_succ_edge (bb)->dest);
2473 if (i + 1 != n_region)
2474 {
2475 curr_count = single_succ_edge (bb)->count ();
2476 }
2477 continue;
2478 }
2479
2480 /* Special case the last block on the path: make sure that it does not
2481 jump back on the copied path, including back to itself. */
2482 if (i + 1 == n_region)
2483 {
2484 FOR_EACH_EDGE (e, ei, bb->succs)
2485 if (bb_in_bbs (e->dest, region_copy, n_region))
2486 {
2487 basic_block orig = get_bb_original (e->dest);
2488 if (orig)
2489 redirect_edge_and_branch_force (e, orig);
2490 }
2491 continue;
2492 }
2493
2494 /* Redirect all other edges jumping to non-adjacent blocks back to the
2495 original code. */
2496 FOR_EACH_EDGE (e, ei, bb->succs)
2497 if (region_copy[i + 1] != e->dest)
2498 {
2499 basic_block orig = get_bb_original (e->dest);
2500 if (orig)
2501 redirect_edge_and_branch_force (e, orig);
2502 }
2503 else
2504 {
2505 curr_count = e->count ();
2506 }
2507 }
2508
2509
2510 if (flag_checking)
2511 verify_jump_thread (region_copy, n_region);
2512
2513 /* Remove the last branch in the jump thread path. */
2514 remove_ctrl_stmt_and_useless_edges (region_copy[n_region - 1], exit->dest);
2515
2516 /* And fixup the flags on the single remaining edge. */
2517 edge fix_e = find_edge (region_copy[n_region - 1], exit->dest);
2518 fix_e->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE | EDGE_ABNORMAL);
2519 fix_e->flags |= EDGE_FALLTHRU;
2520
2521 edge e = make_edge (region_copy[n_region - 1], exit->dest, EDGE_FALLTHRU);
2522
2523 if (e)
2524 {
2525 rescan_loop_exit (e, true, false);
2526 e->probability = profile_probability::always ();
2527 }
2528
2529 /* Redirect the entry and add the phi node arguments. */
2530 if (entry->dest == loop->header)
2531 mark_loop_for_removal (loop);
2532 redirected = redirect_edge_and_branch (entry, get_bb_copy (entry->dest));
2533 gcc_assert (redirected != NULL);
2534 flush_pending_stmts (entry);
2535
2536 /* Add the other PHI node arguments. */
2537 add_phi_args_after_copy (region_copy, n_region, NULL);
2538
2539 free (region_copy);
2540
2541 adjust_paths_after_duplication (current_path_no);
2542
2543 free_original_copy_tables ();
2544 return true;
2545 }
2546
2547 /* Return true when PATH is a valid jump-thread path. */
2548
2549 static bool
2550 valid_jump_thread_path (vec<jump_thread_edge *> *path)
2551 {
2552 unsigned len = path->length ();
2553
2554 /* Check that the path is connected. */
2555 for (unsigned int j = 0; j < len - 1; j++)
2556 {
2557 edge e = (*path)[j]->e;
2558 if (e->dest != (*path)[j+1]->e->src)
2559 return false;
2560 }
2561 return true;
2562 }
2563
2564 /* Remove any queued jump threads that include edge E.
2565
2566 We don't actually remove them here, just record the edges into ax
2567 hash table. That way we can do the search once per iteration of
2568 DOM/VRP rather than for every case where DOM optimizes away a COND_EXPR. */
2569
2570 void
2571 fwd_jt_path_registry::remove_jump_threads_including (edge_def *e)
2572 {
2573 if (!m_paths.exists ())
2574 return;
2575
2576 edge *slot = m_removed_edges->find_slot (e, INSERT);
2577 *slot = e;
2578 }
2579
2580 /* Thread all paths that have been queued for jump threading, and
2581 update the CFG accordingly.
2582
2583 It is the caller's responsibility to fix the dominance information
2584 and rewrite duplicated SSA_NAMEs back into SSA form.
2585
2586 If PEEL_LOOP_HEADERS is false, avoid threading edges through loop
2587 headers if it does not simplify the loop.
2588
2589 Returns true if one or more edges were threaded. */
2590
2591 bool
2592 jt_path_registry::thread_through_all_blocks (bool peel_loop_headers)
2593 {
2594 if (m_paths.length () == 0)
2595 return false;
2596
2597 m_num_threaded_edges = 0;
2598
2599 bool retval = update_cfg (peel_loop_headers);
2600
2601 statistics_counter_event (cfun, "Jumps threaded", m_num_threaded_edges);
2602
2603 if (retval)
2604 {
2605 loops_state_set (LOOPS_NEED_FIXUP);
2606 return true;
2607 }
2608 return false;
2609 }
2610
2611 /* This is the backward threader version of thread_through_all_blocks
2612 using a generic BB copier. */
2613
2614 bool
2615 back_jt_path_registry::update_cfg (bool /*peel_loop_headers*/)
2616 {
2617 bool retval = false;
2618 hash_set<edge> visited_starting_edges;
2619
2620 while (m_paths.length ())
2621 {
2622 vec<jump_thread_edge *> *path = m_paths[0];
2623 edge entry = (*path)[0]->e;
2624
2625 /* Do not jump-thread twice from the same starting edge.
2626
2627 Previously we only checked that we weren't threading twice
2628 from the same BB, but that was too restrictive. Imagine a
2629 path that starts from GIMPLE_COND(x_123 == 0,...), where both
2630 edges out of this conditional yield paths that can be
2631 threaded (for example, both lead to an x_123==0 or x_123!=0
2632 conditional further down the line. */
2633 if (visited_starting_edges.contains (entry)
2634 /* We may not want to realize this jump thread path for
2635 various reasons. So check it first. */
2636 || !valid_jump_thread_path (path))
2637 {
2638 /* Remove invalid jump-thread paths. */
2639 cancel_thread (path, "Avoiding threading twice from same edge");
2640 m_paths.unordered_remove (0);
2641 continue;
2642 }
2643
2644 unsigned len = path->length ();
2645 edge exit = (*path)[len - 1]->e;
2646 basic_block *region = XNEWVEC (basic_block, len - 1);
2647
2648 for (unsigned int j = 0; j < len - 1; j++)
2649 region[j] = (*path)[j]->e->dest;
2650
2651 if (duplicate_thread_path (entry, exit, region, len - 1, 0))
2652 {
2653 /* We do not update dominance info. */
2654 free_dominance_info (CDI_DOMINATORS);
2655 visited_starting_edges.add (entry);
2656 retval = true;
2657 m_num_threaded_edges++;
2658 }
2659
2660 path->release ();
2661 m_paths.unordered_remove (0);
2662 free (region);
2663 }
2664 return retval;
2665 }
2666
2667 /* This is the forward threader version of thread_through_all_blocks,
2668 using a custom BB copier. */
2669
2670 bool
2671 fwd_jt_path_registry::update_cfg (bool may_peel_loop_headers)
2672 {
2673 bool retval = false;
2674
2675 /* Remove any paths that referenced removed edges. */
2676 if (m_removed_edges)
2677 for (unsigned i = 0; i < m_paths.length (); )
2678 {
2679 unsigned int j;
2680 vec<jump_thread_edge *> *path = m_paths[i];
2681
2682 for (j = 0; j < path->length (); j++)
2683 {
2684 edge e = (*path)[j]->e;
2685 if (m_removed_edges->find_slot (e, NO_INSERT))
2686 break;
2687 }
2688
2689 if (j != path->length ())
2690 {
2691 cancel_thread (path, "Thread references removed edge");
2692 m_paths.unordered_remove (i);
2693 continue;
2694 }
2695 i++;
2696 }
2697
2698 auto_bitmap threaded_blocks;
2699 mark_threaded_blocks (threaded_blocks);
2700
2701 initialize_original_copy_tables ();
2702
2703 /* The order in which we process jump threads can be important.
2704
2705 Consider if we have two jump threading paths A and B. If the
2706 target edge of A is the starting edge of B and we thread path A
2707 first, then we create an additional incoming edge into B->dest that
2708 we cannot discover as a jump threading path on this iteration.
2709
2710 If we instead thread B first, then the edge into B->dest will have
2711 already been redirected before we process path A and path A will
2712 natually, with no further work, target the redirected path for B.
2713
2714 An post-order is sufficient here. Compute the ordering first, then
2715 process the blocks. */
2716 if (!bitmap_empty_p (threaded_blocks))
2717 {
2718 int *postorder = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
2719 unsigned int postorder_num = post_order_compute (postorder, false, false);
2720 for (unsigned int i = 0; i < postorder_num; i++)
2721 {
2722 unsigned int indx = postorder[i];
2723 if (bitmap_bit_p (threaded_blocks, indx))
2724 {
2725 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, indx);
2726 retval |= thread_block (bb, true);
2727 }
2728 }
2729 free (postorder);
2730 }
2731
2732 /* Then perform the threading through loop headers. We start with the
2733 innermost loop, so that the changes in cfg we perform won't affect
2734 further threading. */
2735 for (auto loop : loops_list (cfun, LI_FROM_INNERMOST))
2736 {
2737 if (!loop->header
2738 || !bitmap_bit_p (threaded_blocks, loop->header->index))
2739 continue;
2740
2741 retval |= thread_through_loop_header (loop, may_peel_loop_headers);
2742 }
2743
2744 /* All jump threading paths should have been resolved at this
2745 point. Verify that is the case. */
2746 basic_block bb;
2747 FOR_EACH_BB_FN (bb, cfun)
2748 {
2749 edge_iterator ei;
2750 edge e;
2751 FOR_EACH_EDGE (e, ei, bb->preds)
2752 gcc_assert (e->aux == NULL);
2753 }
2754
2755 free_original_copy_tables ();
2756
2757 return retval;
2758 }
2759
2760 /* Register a jump threading opportunity. We queue up all the jump
2761 threading opportunities discovered by a pass and update the CFG
2762 and SSA form all at once.
2763
2764 E is the edge we can thread, E2 is the new target edge, i.e., we
2765 are effectively recording that E->dest can be changed to E2->dest
2766 after fixing the SSA graph.
2767
2768 Return TRUE if PATH was successfully threaded. */
2769
2770 bool
2771 jt_path_registry::register_jump_thread (vec<jump_thread_edge *> *path)
2772 {
2773 if (!dbg_cnt (registered_jump_thread))
2774 {
2775 path->release ();
2776 return false;
2777 }
2778
2779 /* First make sure there are no NULL outgoing edges on the jump threading
2780 path. That can happen for jumping to a constant address. */
2781 for (unsigned int i = 0; i < path->length (); i++)
2782 {
2783 if ((*path)[i]->e == NULL)
2784 {
2785 cancel_thread (path, "Found NULL edge in jump threading path");
2786 return false;
2787 }
2788
2789 if (flag_checking && !m_backedge_threads)
2790 gcc_assert (((*path)[i]->e->flags & EDGE_DFS_BACK) == 0);
2791 }
2792
2793 if (dump_file && (dump_flags & TDF_DETAILS))
2794 dump_jump_thread_path (dump_file, *path, true);
2795
2796 m_paths.safe_push (path);
2797 return true;
2798 }
2799
2800 /* Return how many uses of T there are within BB, as long as there
2801 aren't any uses outside BB. If there are any uses outside BB,
2802 return -1 if there's at most one use within BB, or -2 if there is
2803 more than one use within BB. */
2804
2805 static int
2806 uses_in_bb (tree t, basic_block bb)
2807 {
2808 int uses = 0;
2809 bool outside_bb = false;
2810
2811 imm_use_iterator iter;
2812 use_operand_p use_p;
2813 FOR_EACH_IMM_USE_FAST (use_p, iter, t)
2814 {
2815 if (is_gimple_debug (USE_STMT (use_p)))
2816 continue;
2817
2818 if (gimple_bb (USE_STMT (use_p)) != bb)
2819 outside_bb = true;
2820 else
2821 uses++;
2822
2823 if (outside_bb && uses > 1)
2824 return -2;
2825 }
2826
2827 if (outside_bb)
2828 return -1;
2829
2830 return uses;
2831 }
2832
2833 /* Starting from the final control flow stmt in BB, assuming it will
2834 be removed, follow uses in to-be-removed stmts back to their defs
2835 and count how many defs are to become dead and be removed as
2836 well. */
2837
2838 unsigned int
2839 estimate_threading_killed_stmts (basic_block bb)
2840 {
2841 int killed_stmts = 0;
2842 hash_map<tree, int> ssa_remaining_uses;
2843 auto_vec<gimple *, 4> dead_worklist;
2844
2845 /* If the block has only two predecessors, threading will turn phi
2846 dsts into either src, so count them as dead stmts. */
2847 bool drop_all_phis = EDGE_COUNT (bb->preds) == 2;
2848
2849 if (drop_all_phis)
2850 for (gphi_iterator gsi = gsi_start_phis (bb);
2851 !gsi_end_p (gsi); gsi_next (&gsi))
2852 {
2853 gphi *phi = gsi.phi ();
2854 tree dst = gimple_phi_result (phi);
2855
2856 /* We don't count virtual PHIs as stmts in
2857 record_temporary_equivalences_from_phis. */
2858 if (virtual_operand_p (dst))
2859 continue;
2860
2861 killed_stmts++;
2862 }
2863
2864 if (gsi_end_p (gsi_last_bb (bb)))
2865 return killed_stmts;
2866
2867 gimple *stmt = gsi_stmt (gsi_last_bb (bb));
2868 if (gimple_code (stmt) != GIMPLE_COND
2869 && gimple_code (stmt) != GIMPLE_GOTO
2870 && gimple_code (stmt) != GIMPLE_SWITCH)
2871 return killed_stmts;
2872
2873 /* The control statement is always dead. */
2874 killed_stmts++;
2875 dead_worklist.quick_push (stmt);
2876 while (!dead_worklist.is_empty ())
2877 {
2878 stmt = dead_worklist.pop ();
2879
2880 ssa_op_iter iter;
2881 use_operand_p use_p;
2882 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE)
2883 {
2884 tree t = USE_FROM_PTR (use_p);
2885 gimple *def = SSA_NAME_DEF_STMT (t);
2886
2887 if (gimple_bb (def) == bb
2888 && (gimple_code (def) != GIMPLE_PHI
2889 || !drop_all_phis)
2890 && !gimple_has_side_effects (def))
2891 {
2892 int *usesp = ssa_remaining_uses.get (t);
2893 int uses;
2894
2895 if (usesp)
2896 uses = *usesp;
2897 else
2898 uses = uses_in_bb (t, bb);
2899
2900 gcc_assert (uses);
2901
2902 /* Don't bother recording the expected use count if we
2903 won't find any further uses within BB. */
2904 if (!usesp && (uses < -1 || uses > 1))
2905 {
2906 usesp = &ssa_remaining_uses.get_or_insert (t);
2907 *usesp = uses;
2908 }
2909
2910 if (uses < 0)
2911 continue;
2912
2913 --uses;
2914 if (usesp)
2915 *usesp = uses;
2916
2917 if (!uses)
2918 {
2919 killed_stmts++;
2920 if (usesp)
2921 ssa_remaining_uses.remove (t);
2922 if (gimple_code (def) != GIMPLE_PHI)
2923 dead_worklist.safe_push (def);
2924 }
2925 }
2926 }
2927 }
2928
2929 if (dump_file)
2930 fprintf (dump_file, "threading bb %i kills %i stmts\n",
2931 bb->index, killed_stmts);
2932
2933 return killed_stmts;
2934 }