]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/tree-ssa-threadupdate.c
Add forgotten Changelog entries for
[thirdparty/gcc.git] / gcc / tree-ssa-threadupdate.c
CommitLineData
a8046f60 1/* Thread edges through blocks and update the control flow and SSA graphs.
aad93da1 2 Copyright (C) 2004-2017 Free Software Foundation, Inc.
a8046f60 3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify
7it under the terms of the GNU General Public License as published by
8c4c00c1 8the Free Software Foundation; either version 3, or (at your option)
a8046f60 9any later version.
10
11GCC is distributed in the hope that it will be useful,
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14GNU General Public License for more details.
15
16You should have received a copy of the GNU General Public License
8c4c00c1 17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
a8046f60 19
20#include "config.h"
21#include "system.h"
22#include "coretypes.h"
9ef16211 23#include "backend.h"
a8046f60 24#include "tree.h"
9ef16211 25#include "gimple.h"
7c29e30e 26#include "cfghooks.h"
27#include "tree-pass.h"
9ef16211 28#include "ssa.h"
b20a8bb4 29#include "fold-const.h"
94ea8568 30#include "cfganal.h"
dcf1a1ec 31#include "gimple-iterator.h"
69ee5dbb 32#include "tree-ssa.h"
0c5b289a 33#include "tree-ssa-threadupdate.h"
388d1fc1 34#include "cfgloop.h"
a3724f9d 35#include "dbgcnt.h"
ab596744 36#include "tree-cfg.h"
325162f2 37#include "tree-vectorizer.h"
a8046f60 38
39/* Given a block B, update the CFG and SSA graph to reflect redirecting
40 one or more in-edges to B to instead reach the destination of an
41 out-edge from B while preserving any side effects in B.
42
0c6d8c36 43 i.e., given A->B and B->C, change A->B to be A->C yet still preserve the
a8046f60 44 side effects of executing B.
45
46 1. Make a copy of B (including its outgoing edges and statements). Call
47 the copy B'. Note B' has no incoming edges or PHIs at this time.
48
49 2. Remove the control statement at the end of B' and all outgoing edges
50 except B'->C.
51
52 3. Add a new argument to each PHI in C with the same value as the existing
53 argument associated with edge B->C. Associate the new PHI arguments
54 with the edge B'->C.
55
56 4. For each PHI in B, find or create a PHI in B' with an identical
7a635e9c 57 PHI_RESULT. Add an argument to the PHI in B' which has the same
a8046f60 58 value as the PHI in B associated with the edge A->B. Associate
59 the new argument in the PHI in B' with the edge A->B.
60
61 5. Change the edge A->B to A->B'.
62
63 5a. This automatically deletes any PHI arguments associated with the
64 edge A->B in B.
65
66 5b. This automatically associates each new argument added in step 4
67 with the edge A->B'.
68
69 6. Repeat for other incoming edges into B.
70
71 7. Put the duplicated resources in B and all the B' blocks into SSA form.
72
73 Note that block duplication can be minimized by first collecting the
f0b5f617 74 set of unique destination blocks that the incoming edges should
255a8494 75 be threaded to.
76
afe75331 77 We reduce the number of edges and statements we create by not copying all
78 the outgoing edges and the control statement in step #1. We instead create
79 a template block without the outgoing edges and duplicate the template.
a8046f60 80
afe75331 81 Another case this code handles is threading through a "joiner" block. In
82 this case, we do not know the destination of the joiner block, but one
83 of the outgoing edges from the joiner block leads to a threadable path. This
84 case largely works as outlined above, except the duplicate of the joiner
85 block still contains a full set of outgoing edges and its control statement.
86 We just redirect one of its outgoing edges to our jump threading path. */
778182c1 87
88
89/* Steps #5 and #6 of the above algorithm are best implemented by walking
90 all the incoming edges which thread to the same destination edge at
91 the same time. That avoids lots of table lookups to get information
92 for the destination edge.
93
94 To realize that implementation we create a list of incoming edges
95 which thread to the same outgoing edge. Thus to implement steps
96 #5 and #6 we traverse our hash table of outgoing edge information.
97 For each entry we walk the list of incoming edges which thread to
98 the current outgoing edge. */
99
100struct el
101{
102 edge e;
103 struct el *next;
104};
a8046f60 105
106/* Main data structure recording information regarding B's duplicate
107 blocks. */
108
778182c1 109/* We need to efficiently record the unique thread destinations of this
110 block and specific information associated with those destinations. We
111 may have many incoming edges threaded to the same outgoing edge. This
c5d4a10b 112 can be naturally implemented with a hash table. */
778182c1 113
298e7f9a 114struct redirection_data : free_ptr_hash<redirection_data>
a8046f60 115{
11af02d8 116 /* We support wiring up two block duplicates in a jump threading path.
117
118 One is a normal block copy where we remove the control statement
119 and wire up its single remaining outgoing edge to the thread path.
120
121 The other is a joiner block where we leave the control statement
1b83778e 122 in place, but wire one of the outgoing edges to a thread path.
11af02d8 123
124 In theory we could have multiple block duplicates in a jump
125 threading path, but I haven't tried that.
126
127 The duplicate blocks appear in this array in the same order in
128 which they appear in the jump thread path. */
129 basic_block dup_blocks[2];
a8046f60 130
5fe6149c 131 /* The jump threading path. */
132 vec<jump_thread_edge *> *path;
778182c1 133
5fe6149c 134 /* A list of incoming edges which we want to thread to the
135 same path. */
778182c1 136 struct el *incoming_edges;
494bbaae 137
138 /* hash_table support. */
9969c043 139 static inline hashval_t hash (const redirection_data *);
140 static inline int equal (const redirection_data *, const redirection_data *);
a8046f60 141};
142
b93ba654 143/* Dump a jump threading path, including annotations about each
144 edge in the path. */
145
146static void
147dump_jump_thread_path (FILE *dump_file, vec<jump_thread_edge *> path,
148 bool registering)
149{
150 fprintf (dump_file,
ded1c768 151 " %s%s jump thread: (%d, %d) incoming edge; ",
b93ba654 152 (registering ? "Registering" : "Cancelling"),
ded1c768 153 (path[0]->type == EDGE_FSM_THREAD ? " FSM": ""),
b93ba654 154 path[0]->e->src->index, path[0]->e->dest->index);
155
156 for (unsigned int i = 1; i < path.length (); i++)
157 {
158 /* We can get paths with a NULL edge when the final destination
159 of a jump thread turns out to be a constant address. We dump
160 those paths when debugging, so we have to be prepared for that
161 possibility here. */
162 if (path[i]->e == NULL)
163 continue;
164
165 if (path[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
166 fprintf (dump_file, " (%d, %d) joiner; ",
167 path[i]->e->src->index, path[i]->e->dest->index);
168 if (path[i]->type == EDGE_COPY_SRC_BLOCK)
169 fprintf (dump_file, " (%d, %d) normal;",
170 path[i]->e->src->index, path[i]->e->dest->index);
171 if (path[i]->type == EDGE_NO_COPY_SRC_BLOCK)
172 fprintf (dump_file, " (%d, %d) nocopy;",
173 path[i]->e->src->index, path[i]->e->dest->index);
c5baf1e1 174 if (path[0]->type == EDGE_FSM_THREAD)
175 fprintf (dump_file, " (%d, %d) ",
176 path[i]->e->src->index, path[i]->e->dest->index);
b93ba654 177 }
178 fputc ('\n', dump_file);
179}
180
5fe6149c 181/* Simple hashing function. For any given incoming edge E, we're going
182 to be most concerned with the final destination of its jump thread
183 path. So hash on the block index of the final edge in the path. */
184
494bbaae 185inline hashval_t
9969c043 186redirection_data::hash (const redirection_data *p)
494bbaae 187{
5fe6149c 188 vec<jump_thread_edge *> *path = p->path;
189 return path->last ()->e->dest->index;
494bbaae 190}
191
5fe6149c 192/* Given two hash table entries, return true if they have the same
193 jump threading path. */
494bbaae 194inline int
9969c043 195redirection_data::equal (const redirection_data *p1, const redirection_data *p2)
494bbaae 196{
5fe6149c 197 vec<jump_thread_edge *> *path1 = p1->path;
198 vec<jump_thread_edge *> *path2 = p2->path;
199
200 if (path1->length () != path2->length ())
201 return false;
202
203 for (unsigned int i = 1; i < path1->length (); i++)
204 {
205 if ((*path1)[i]->type != (*path2)[i]->type
206 || (*path1)[i]->e != (*path2)[i]->e)
207 return false;
208 }
209
210 return true;
494bbaae 211}
212
a27d141e 213/* Rather than search all the edges in jump thread paths each time
214 DOM is able to simply if control statement, we build a hash table
215 with the deleted edges. We only care about the address of the edge,
216 not its contents. */
217struct removed_edges : nofree_ptr_hash<edge_def>
218{
219 static hashval_t hash (edge e) { return htab_hash_pointer (e); }
220 static bool equal (edge e1, edge e2) { return e1 == e2; }
221};
222
223static hash_table<removed_edges> *removed_edges;
224
778182c1 225/* Data structure of information to pass to hash table traversal routines. */
2b15d2ba 226struct ssa_local_info_t
778182c1 227{
228 /* The current block we are working on. */
229 basic_block bb;
230
11af02d8 231 /* We only create a template block for the first duplicated block in a
232 jump threading path as we may need many duplicates of that block.
233
234 The second duplicate block in a path is specific to that path. Creating
235 and sharing a template for that block is considerably more difficult. */
778182c1 236 basic_block template_block;
388d1fc1 237
30e432bb 238 /* Blocks duplicated for the thread. */
239 bitmap duplicate_blocks;
d07cbccc 240
487798e2 241 /* TRUE if we thread one or more jumps, FALSE otherwise. */
242 bool jumps_threaded;
243
d07cbccc 244 /* When we have multiple paths through a joiner which reach different
245 final destinations, then we may need to correct for potential
246 profile insanities. */
247 bool need_profile_correction;
778182c1 248};
a3d0fd80 249
3cebc9d2 250/* Passes which use the jump threading code register jump threading
251 opportunities as they are discovered. We keep the registered
252 jump threading opportunities in this vector as edge pairs
253 (original_edge, target_edge). */
f2981b08 254static vec<vec<jump_thread_edge *> *> paths;
3cebc9d2 255
eb31063a 256/* When we start updating the CFG for threading, data necessary for jump
257 threading is attached to the AUX field for the incoming edge. Use these
258 macros to access the underlying structure attached to the AUX field. */
f2981b08 259#define THREAD_PATH(E) ((vec<jump_thread_edge *> *)(E)->aux)
3cebc9d2 260
5236b8bb 261/* Jump threading statistics. */
262
263struct thread_stats_d
264{
265 unsigned long num_threaded_edges;
266};
267
268struct thread_stats_d thread_stats;
269
270
f582bb6c 271/* Remove the last statement in block BB if it is a control statement
272 Also remove all outgoing edges except the edge which reaches DEST_BB.
273 If DEST_BB is NULL, then remove all outgoing edges. */
a8046f60 274
f1344f45 275void
f582bb6c 276remove_ctrl_stmt_and_useless_edges (basic_block bb, basic_block dest_bb)
a8046f60 277{
75a70cf9 278 gimple_stmt_iterator gsi;
cd665a06 279 edge e;
280 edge_iterator ei;
a8046f60 281
75a70cf9 282 gsi = gsi_last_bb (bb);
a8046f60 283
f582bb6c 284 /* If the duplicate ends with a control statement, then remove it.
a8046f60 285
f582bb6c 286 Note that if we are duplicating the template block rather than the
287 original basic block, then the duplicate might not have any real
288 statements in it. */
75a70cf9 289 if (!gsi_end_p (gsi)
290 && gsi_stmt (gsi)
291 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
292 || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
293 || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH))
294 gsi_remove (&gsi, true);
a8046f60 295
cd665a06 296 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
a8046f60 297 {
a8046f60 298 if (e->dest != dest_bb)
b7cbf36d 299 {
300 free_dom_edge_info (e);
301 remove_edge (e);
302 }
cd665a06 303 else
acb9fddd 304 {
720cfc43 305 e->probability = profile_probability::always ();
acb9fddd 306 e->count = bb->count;
307 ei_next (&ei);
308 }
a8046f60 309 }
5b4ada2a 310
311 /* If the remaining edge is a loop exit, there must have
312 a removed edge that was not a loop exit.
313
314 In that case BB and possibly other blocks were previously
315 in the loop, but are now outside the loop. Thus, we need
316 to update the loop structures. */
317 if (single_succ_p (bb)
318 && loop_outer (bb->loop_father)
319 && loop_exit_edge_p (bb->loop_father, single_succ_edge (bb)))
320 loops_state_set (LOOPS_NEED_FIXUP);
a8046f60 321}
322
11af02d8 323/* Create a duplicate of BB. Record the duplicate block in an array
a7ee7309 324 indexed by COUNT stored in RD. */
a8046f60 325
326static void
11af02d8 327create_block_for_threading (basic_block bb,
328 struct redirection_data *rd,
30e432bb 329 unsigned int count,
a7ee7309 330 bitmap *duplicate_blocks)
a8046f60 331{
eb31063a 332 edge_iterator ei;
333 edge e;
334
a8046f60 335 /* We can use the generic block duplication code and simply remove
336 the stuff we do not need. */
11af02d8 337 rd->dup_blocks[count] = duplicate_block (bb, NULL, NULL);
a8046f60 338
11af02d8 339 FOR_EACH_EDGE (e, ei, rd->dup_blocks[count]->succs)
eb31063a 340 e->aux = NULL;
341
615dd397 342 /* Zero out the profile, since the block is unreachable for now. */
11af02d8 343 rd->dup_blocks[count]->frequency = 0;
db9cef39 344 rd->dup_blocks[count]->count = profile_count::uninitialized ();
30e432bb 345 if (duplicate_blocks)
346 bitmap_set_bit (*duplicate_blocks, rd->dup_blocks[count]->index);
a8046f60 347}
348
2b15d2ba 349/* Main data structure to hold information for duplicates of BB. */
350
c1f445d2 351static hash_table<redirection_data> *redirection_data;
2b15d2ba 352
778182c1 353/* Given an outgoing edge E lookup and return its entry in our hash table.
354
355 If INSERT is true, then we insert the entry into the hash table if
356 it is not already present. INCOMING_EDGE is added to the list of incoming
357 edges associated with E in the hash table. */
358
359static struct redirection_data *
da81e0c5 360lookup_redirection_data (edge e, enum insert_option insert)
778182c1 361{
2b15d2ba 362 struct redirection_data **slot;
778182c1 363 struct redirection_data *elt;
f2981b08 364 vec<jump_thread_edge *> *path = THREAD_PATH (e);
778182c1 365
31e5d72d 366 /* Build a hash table element so we can see if E is already
778182c1 367 in the table. */
4c36ffe6 368 elt = XNEW (struct redirection_data);
5fe6149c 369 elt->path = path;
11af02d8 370 elt->dup_blocks[0] = NULL;
371 elt->dup_blocks[1] = NULL;
778182c1 372 elt->incoming_edges = NULL;
373
c1f445d2 374 slot = redirection_data->find_slot (elt, insert);
778182c1 375
376 /* This will only happen if INSERT is false and the entry is not
377 in the hash table. */
378 if (slot == NULL)
379 {
380 free (elt);
381 return NULL;
382 }
383
384 /* This will only happen if E was not in the hash table and
385 INSERT is true. */
386 if (*slot == NULL)
387 {
2b15d2ba 388 *slot = elt;
4c36ffe6 389 elt->incoming_edges = XNEW (struct el);
da81e0c5 390 elt->incoming_edges->e = e;
778182c1 391 elt->incoming_edges->next = NULL;
392 return elt;
393 }
394 /* E was in the hash table. */
395 else
396 {
397 /* Free ELT as we do not need it anymore, we will extract the
398 relevant entry from the hash table itself. */
399 free (elt);
400
401 /* Get the entry stored in the hash table. */
2b15d2ba 402 elt = *slot;
778182c1 403
404 /* If insertion was requested, then we need to add INCOMING_EDGE
405 to the list of incoming edges associated with E. */
406 if (insert)
407 {
559685be 408 struct el *el = XNEW (struct el);
778182c1 409 el->next = elt->incoming_edges;
da81e0c5 410 el->e = e;
778182c1 411 elt->incoming_edges = el;
412 }
413
414 return elt;
415 }
416}
417
fc54aba7 418/* Similar to copy_phi_args, except that the PHI arg exists, it just
419 does not have a value associated with it. */
420
421static void
422copy_phi_arg_into_existing_phi (edge src_e, edge tgt_e)
423{
424 int src_idx = src_e->dest_idx;
425 int tgt_idx = tgt_e->dest_idx;
426
427 /* Iterate over each PHI in e->dest. */
1a91d914 428 for (gphi_iterator gsi = gsi_start_phis (src_e->dest),
429 gsi2 = gsi_start_phis (tgt_e->dest);
fc54aba7 430 !gsi_end_p (gsi);
431 gsi_next (&gsi), gsi_next (&gsi2))
432 {
1a91d914 433 gphi *src_phi = gsi.phi ();
434 gphi *dest_phi = gsi2.phi ();
fc54aba7 435 tree val = gimple_phi_arg_def (src_phi, src_idx);
436 source_location locus = gimple_phi_arg_location (src_phi, src_idx);
437
438 SET_PHI_ARG_DEF (dest_phi, tgt_idx, val);
439 gimple_phi_arg_set_location (dest_phi, tgt_idx, locus);
440 }
441}
442
1b83c31b 443/* Given ssa_name DEF, backtrack jump threading PATH from node IDX
444 to see if it has constant value in a flow sensitive manner. Set
445 LOCUS to location of the constant phi arg and return the value.
446 Return DEF directly if either PATH or idx is ZERO. */
447
448static tree
449get_value_locus_in_path (tree def, vec<jump_thread_edge *> *path,
450 basic_block bb, int idx, source_location *locus)
451{
452 tree arg;
1a91d914 453 gphi *def_phi;
1b83c31b 454 basic_block def_bb;
455
456 if (path == NULL || idx == 0)
457 return def;
458
1a91d914 459 def_phi = dyn_cast <gphi *> (SSA_NAME_DEF_STMT (def));
460 if (!def_phi)
1b83c31b 461 return def;
462
463 def_bb = gimple_bb (def_phi);
464 /* Don't propagate loop invariants into deeper loops. */
465 if (!def_bb || bb_loop_depth (def_bb) < bb_loop_depth (bb))
466 return def;
467
468 /* Backtrack jump threading path from IDX to see if def has constant
469 value. */
470 for (int j = idx - 1; j >= 0; j--)
471 {
472 edge e = (*path)[j]->e;
473 if (e->dest == def_bb)
474 {
475 arg = gimple_phi_arg_def (def_phi, e->dest_idx);
476 if (is_gimple_min_invariant (arg))
477 {
478 *locus = gimple_phi_arg_location (def_phi, e->dest_idx);
479 return arg;
480 }
481 break;
482 }
483 }
484
485 return def;
486}
487
488/* For each PHI in BB, copy the argument associated with SRC_E to TGT_E.
489 Try to backtrack jump threading PATH from node IDX to see if the arg
490 has constant value, copy constant value instead of argument itself
491 if yes. */
da81e0c5 492
493static void
1b83c31b 494copy_phi_args (basic_block bb, edge src_e, edge tgt_e,
495 vec<jump_thread_edge *> *path, int idx)
da81e0c5 496{
1a91d914 497 gphi_iterator gsi;
da81e0c5 498 int src_indx = src_e->dest_idx;
499
500 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
501 {
1a91d914 502 gphi *phi = gsi.phi ();
1b83c31b 503 tree def = gimple_phi_arg_def (phi, src_indx);
da81e0c5 504 source_location locus = gimple_phi_arg_location (phi, src_indx);
1b83c31b 505
506 if (TREE_CODE (def) == SSA_NAME
507 && !virtual_operand_p (gimple_phi_result (phi)))
508 def = get_value_locus_in_path (def, path, bb, idx, &locus);
509
510 add_phi_arg (phi, def, tgt_e, locus);
da81e0c5 511 }
512}
513
514/* We have recently made a copy of ORIG_BB, including its outgoing
515 edges. The copy is NEW_BB. Every PHI node in every direct successor of
516 ORIG_BB has a new argument associated with edge from NEW_BB to the
517 successor. Initialize the PHI argument so that it is equal to the PHI
1b83c31b 518 argument associated with the edge from ORIG_BB to the successor.
519 PATH and IDX are used to check if the new PHI argument has constant
520 value in a flow sensitive manner. */
da81e0c5 521
522static void
1b83c31b 523update_destination_phis (basic_block orig_bb, basic_block new_bb,
524 vec<jump_thread_edge *> *path, int idx)
da81e0c5 525{
526 edge_iterator ei;
527 edge e;
528
529 FOR_EACH_EDGE (e, ei, orig_bb->succs)
530 {
531 edge e2 = find_edge (new_bb, e->dest);
1b83c31b 532 copy_phi_args (e->dest, e, e2, path, idx);
da81e0c5 533 }
534}
535
778182c1 536/* Given a duplicate block and its single destination (both stored
537 in RD). Create an edge between the duplicate and its single
538 destination.
539
540 Add an additional argument to any PHI nodes at the single
1b83c31b 541 destination. IDX is the start node in jump threading path
542 we start to check to see if the new PHI argument has constant
543 value along the jump threading path. */
778182c1 544
545static void
42b013bc 546create_edge_and_update_destination_phis (struct redirection_data *rd,
1b83c31b 547 basic_block bb, int idx)
778182c1 548{
720cfc43 549 edge e = make_single_succ_edge (bb, rd->path->last ()->e->dest, EDGE_FALLTHRU);
778182c1 550
f9614b84 551 rescan_loop_exit (e, true, false);
eb31063a 552
e63988cc 553 /* We used to copy the thread path here. That was added in 2007
554 and dutifully updated through the representation changes in 2013.
555
556 In 2013 we added code to thread from an interior node through
557 the backedge to another interior node. That runs after the code
558 to thread through loop headers from outside the loop.
559
560 The latter may delete edges in the CFG, including those
561 which appeared in the jump threading path we copied here. Thus
562 we'd end up using a dangling pointer.
563
564 After reviewing the 2007/2011 code, I can't see how anything
565 depended on copying the AUX field and clearly copying the jump
566 threading path is problematical due to embedded edge pointers.
567 It has been removed. */
568 e->aux = NULL;
421e19dd 569
778182c1 570 /* If there are any PHI nodes at the destination of the outgoing edge
571 from the duplicate block, then we will need to add a new argument
572 to them. The argument should have the same value as the argument
573 associated with the outgoing edge stored in RD. */
1b83c31b 574 copy_phi_args (e->dest, rd->path->last ()->e, e, rd->path, idx);
da81e0c5 575}
576
fc54aba7 577/* Look through PATH beginning at START and return TRUE if there are
578 any additional blocks that need to be duplicated. Otherwise,
579 return FALSE. */
580static bool
581any_remaining_duplicated_blocks (vec<jump_thread_edge *> *path,
582 unsigned int start)
583{
584 for (unsigned int i = start + 1; i < path->length (); i++)
585 {
586 if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK
587 || (*path)[i]->type == EDGE_COPY_SRC_BLOCK)
588 return true;
589 }
590 return false;
591}
592
30e432bb 593
594/* Compute the amount of profile count/frequency coming into the jump threading
595 path stored in RD that we are duplicating, returned in PATH_IN_COUNT_PTR and
596 PATH_IN_FREQ_PTR, as well as the amount of counts flowing out of the
597 duplicated path, returned in PATH_OUT_COUNT_PTR. LOCAL_INFO is used to
598 identify blocks duplicated for jump threading, which have duplicated
599 edges that need to be ignored in the analysis. Return true if path contains
600 a joiner, false otherwise.
601
602 In the non-joiner case, this is straightforward - all the counts/frequency
603 flowing into the jump threading path should flow through the duplicated
604 block and out of the duplicated path.
605
606 In the joiner case, it is very tricky. Some of the counts flowing into
607 the original path go offpath at the joiner. The problem is that while
608 we know how much total count goes off-path in the original control flow,
609 we don't know how many of the counts corresponding to just the jump
610 threading path go offpath at the joiner.
611
612 For example, assume we have the following control flow and identified
613 jump threading paths:
614
9943b198 615 A B C
616 \ | /
617 Ea \ |Eb / Ec
618 \ | /
619 v v v
620 J <-- Joiner
621 / \
622 Eoff/ \Eon
623 / \
624 v v
625 Soff Son <--- Normal
626 /\
627 Ed/ \ Ee
628 / \
629 v v
630 D E
631
632 Jump threading paths: A -> J -> Son -> D (path 1)
633 C -> J -> Son -> E (path 2)
30e432bb 634
635 Note that the control flow could be more complicated:
636 - Each jump threading path may have more than one incoming edge. I.e. A and
637 Ea could represent multiple incoming blocks/edges that are included in
638 path 1.
639 - There could be EDGE_NO_COPY_SRC_BLOCK edges after the joiner (either
640 before or after the "normal" copy block). These are not duplicated onto
641 the jump threading path, as they are single-successor.
642 - Any of the blocks along the path may have other incoming edges that
643 are not part of any jump threading path, but add profile counts along
644 the path.
645
31e5d72d 646 In the above example, after all jump threading is complete, we will
30e432bb 647 end up with the following control flow:
648
31e5d72d 649 A B C
650 | | |
651 Ea| |Eb |Ec
652 | | |
653 v v v
654 Ja J Jc
655 / \ / \Eon' / \
9943b198 656 Eona/ \ ---/---\-------- \Eonc
31e5d72d 657 / \ / / \ \
9943b198 658 v v v v v
659 Sona Soff Son Sonc
31e5d72d 660 \ /\ /
9943b198 661 \___________ / \ _____/
662 \ / \/
663 vv v
664 D E
30e432bb 665
666 The main issue to notice here is that when we are processing path 1
667 (A->J->Son->D) we need to figure out the outgoing edge weights to
668 the duplicated edges Ja->Sona and Ja->Soff, while ensuring that the
669 sum of the incoming weights to D remain Ed. The problem with simply
670 assuming that Ja (and Jc when processing path 2) has the same outgoing
671 probabilities to its successors as the original block J, is that after
672 all paths are processed and other edges/counts removed (e.g. none
673 of Ec will reach D after processing path 2), we may end up with not
674 enough count flowing along duplicated edge Sona->D.
675
676 Therefore, in the case of a joiner, we keep track of all counts
677 coming in along the current path, as well as from predecessors not
678 on any jump threading path (Eb in the above example). While we
679 first assume that the duplicated Eona for Ja->Sona has the same
680 probability as the original, we later compensate for other jump
681 threading paths that may eliminate edges. We do that by keep track
682 of all counts coming into the original path that are not in a jump
683 thread (Eb in the above example, but as noted earlier, there could
684 be other predecessors incoming to the path at various points, such
685 as at Son). Call this cumulative non-path count coming into the path
686 before D as Enonpath. We then ensure that the count from Sona->D is as at
687 least as big as (Ed - Enonpath), but no bigger than the minimum
688 weight along the jump threading path. The probabilities of both the
689 original and duplicated joiner block J and Ja will be adjusted
690 accordingly after the updates. */
691
692static bool
693compute_path_counts (struct redirection_data *rd,
9943b198 694 ssa_local_info_t *local_info,
db9cef39 695 profile_count *path_in_count_ptr,
696 profile_count *path_out_count_ptr,
9943b198 697 int *path_in_freq_ptr)
30e432bb 698{
699 edge e = rd->incoming_edges->e;
700 vec<jump_thread_edge *> *path = THREAD_PATH (e);
701 edge elast = path->last ()->e;
db9cef39 702 profile_count nonpath_count = profile_count::zero ();
30e432bb 703 bool has_joiner = false;
db9cef39 704 profile_count path_in_count = profile_count::zero ();
30e432bb 705 int path_in_freq = 0;
706
707 /* Start by accumulating incoming edge counts to the path's first bb
708 into a couple buckets:
9943b198 709 path_in_count: total count of incoming edges that flow into the
710 current path.
711 nonpath_count: total count of incoming edges that are not
712 flowing along *any* path. These are the counts
713 that will still flow along the original path after
714 all path duplication is done by potentially multiple
715 calls to this routine.
30e432bb 716 (any other incoming edge counts are for a different jump threading
717 path that will be handled by a later call to this routine.)
718 To make this easier, start by recording all incoming edges that flow into
719 the current path in a bitmap. We could add up the path's incoming edge
720 counts here, but we still need to walk all the first bb's incoming edges
721 below to add up the counts of the other edges not included in this jump
722 threading path. */
723 struct el *next, *el;
035def86 724 auto_bitmap in_edge_srcs;
30e432bb 725 for (el = rd->incoming_edges; el; el = next)
726 {
727 next = el->next;
728 bitmap_set_bit (in_edge_srcs, el->e->src->index);
729 }
730 edge ein;
731 edge_iterator ei;
732 FOR_EACH_EDGE (ein, ei, e->dest->preds)
733 {
734 vec<jump_thread_edge *> *ein_path = THREAD_PATH (ein);
735 /* Simply check the incoming edge src against the set captured above. */
736 if (ein_path
9943b198 737 && bitmap_bit_p (in_edge_srcs, (*ein_path)[0]->e->src->index))
738 {
739 /* It is necessary but not sufficient that the last path edges
740 are identical. There may be different paths that share the
741 same last path edge in the case where the last edge has a nocopy
742 source block. */
743 gcc_assert (ein_path->last ()->e == elast);
744 path_in_count += ein->count;
745 path_in_freq += EDGE_FREQUENCY (ein);
746 }
30e432bb 747 else if (!ein_path)
9943b198 748 {
749 /* Keep track of the incoming edges that are not on any jump-threading
750 path. These counts will still flow out of original path after all
751 jump threading is complete. */
752 nonpath_count += ein->count;
753 }
30e432bb 754 }
664dd751 755
756 /* This is needed due to insane incoming frequencies. */
757 if (path_in_freq > BB_FREQ_MAX)
758 path_in_freq = BB_FREQ_MAX;
759
30e432bb 760 /* Now compute the fraction of the total count coming into the first
761 path bb that is from the current threading path. */
db9cef39 762 profile_count total_count = e->dest->count;
30e432bb 763 /* Handle incoming profile insanities. */
764 if (total_count < path_in_count)
765 path_in_count = total_count;
720cfc43 766 int onpath_scale
767 = path_in_count.probability_in (total_count).to_reg_br_prob_base ();
30e432bb 768
769 /* Walk the entire path to do some more computation in order to estimate
770 how much of the path_in_count will flow out of the duplicated threading
771 path. In the non-joiner case this is straightforward (it should be
772 the same as path_in_count, although we will handle incoming profile
773 insanities by setting it equal to the minimum count along the path).
774
775 In the joiner case, we need to estimate how much of the path_in_count
776 will stay on the threading path after the joiner's conditional branch.
777 We don't really know for sure how much of the counts
778 associated with this path go to each successor of the joiner, but we'll
779 estimate based on the fraction of the total count coming into the path
780 bb was from the threading paths (computed above in onpath_scale).
781 Afterwards, we will need to do some fixup to account for other threading
782 paths and possible profile insanities.
783
784 In order to estimate the joiner case's counts we also need to update
785 nonpath_count with any additional counts coming into the path. Other
786 blocks along the path may have additional predecessors from outside
787 the path. */
db9cef39 788 profile_count path_out_count = path_in_count;
789 profile_count min_path_count = path_in_count;
30e432bb 790 for (unsigned int i = 1; i < path->length (); i++)
791 {
792 edge epath = (*path)[i]->e;
db9cef39 793 profile_count cur_count = epath->count;
30e432bb 794 if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
9943b198 795 {
796 has_joiner = true;
db9cef39 797 cur_count = cur_count.apply_probability (onpath_scale);
9943b198 798 }
30e432bb 799 /* In the joiner case we need to update nonpath_count for any edges
9943b198 800 coming into the path that will contribute to the count flowing
801 into the path successor. */
30e432bb 802 if (has_joiner && epath != elast)
31e5d72d 803 {
804 /* Look for other incoming edges after joiner. */
805 FOR_EACH_EDGE (ein, ei, epath->dest->preds)
806 {
807 if (ein != epath
808 /* Ignore in edges from blocks we have duplicated for a
809 threading path, which have duplicated edge counts until
810 they are redirected by an invocation of this routine. */
811 && !bitmap_bit_p (local_info->duplicate_blocks,
812 ein->src->index))
813 nonpath_count += ein->count;
814 }
815 }
30e432bb 816 if (cur_count < path_out_count)
9943b198 817 path_out_count = cur_count;
30e432bb 818 if (epath->count < min_path_count)
9943b198 819 min_path_count = epath->count;
30e432bb 820 }
821
822 /* We computed path_out_count above assuming that this path targeted
823 the joiner's on-path successor with the same likelihood as it
824 reached the joiner. However, other thread paths through the joiner
825 may take a different path through the normal copy source block
826 (i.e. they have a different elast), meaning that they do not
827 contribute any counts to this path's elast. As a result, it may
828 turn out that this path must have more count flowing to the on-path
829 successor of the joiner. Essentially, all of this path's elast
830 count must be contributed by this path and any nonpath counts
831 (since any path through the joiner with a different elast will not
832 include a copy of this elast in its duplicated path).
833 So ensure that this path's path_out_count is at least the
834 difference between elast->count and nonpath_count. Otherwise the edge
835 counts after threading will not be sane. */
d07cbccc 836 if (local_info->need_profile_correction
837 && has_joiner && path_out_count < elast->count - nonpath_count)
31e5d72d 838 {
839 path_out_count = elast->count - nonpath_count;
840 /* But neither can we go above the minimum count along the path
841 we are duplicating. This can be an issue due to profile
842 insanities coming in to this pass. */
843 if (path_out_count > min_path_count)
844 path_out_count = min_path_count;
845 }
30e432bb 846
847 *path_in_count_ptr = path_in_count;
848 *path_out_count_ptr = path_out_count;
849 *path_in_freq_ptr = path_in_freq;
850 return has_joiner;
851}
852
853
854/* Update the counts and frequencies for both an original path
855 edge EPATH and its duplicate EDUP. The duplicate source block
856 will get a count/frequency of PATH_IN_COUNT and PATH_IN_FREQ,
857 and the duplicate edge EDUP will have a count of PATH_OUT_COUNT. */
858static void
db9cef39 859update_profile (edge epath, edge edup, profile_count path_in_count,
860 profile_count path_out_count, int path_in_freq)
30e432bb 861{
862
863 /* First update the duplicated block's count / frequency. */
864 if (edup)
865 {
866 basic_block dup_block = edup->src;
db9cef39 867 gcc_assert (!dup_block->count.initialized_p ());
30e432bb 868 gcc_assert (dup_block->frequency == 0);
869 dup_block->count = path_in_count;
870 dup_block->frequency = path_in_freq;
871 }
872
873 /* Now update the original block's count and frequency in the
874 opposite manner - remove the counts/freq that will flow
875 into the duplicated block. Handle underflow due to precision/
876 rounding issues. */
877 epath->src->count -= path_in_count;
30e432bb 878 epath->src->frequency -= path_in_freq;
879 if (epath->src->frequency < 0)
880 epath->src->frequency = 0;
881
882 /* Next update this path edge's original and duplicated counts. We know
883 that the duplicated path will have path_out_count flowing
884 out of it (in the joiner case this is the count along the duplicated path
885 out of the duplicated joiner). This count can then be removed from the
886 original path edge. */
887 if (edup)
888 edup->count = path_out_count;
889 epath->count -= path_out_count;
db9cef39 890 /* FIXME: can epath->count be legally uninitialized here? */
30e432bb 891}
892
893
894/* The duplicate and original joiner blocks may end up with different
895 probabilities (different from both the original and from each other).
896 Recompute the probabilities here once we have updated the edge
897 counts and frequencies. */
898
899static void
900recompute_probabilities (basic_block bb)
901{
902 edge esucc;
903 edge_iterator ei;
904 FOR_EACH_EDGE (esucc, ei, bb->succs)
905 {
db9cef39 906 if (!(bb->count > 0))
9943b198 907 continue;
bdd367a0 908
909 /* Prevent overflow computation due to insane profiles. */
910 if (esucc->count < bb->count)
4bb697cd 911 esucc->probability = esucc->count.probability_in (bb->count).guessed ();
bdd367a0 912 else
9943b198 913 /* Can happen with missing/guessed probabilities, since we
914 may determine that more is flowing along duplicated
915 path than joiner succ probabilities allowed.
916 Counts and freqs will be insane after jump threading,
917 at least make sure probability is sane or we will
918 get a flow verification error.
919 Not much we can do to make counts/freqs sane without
920 redoing the profile estimation. */
720cfc43 921 esucc->probability = profile_probability::guessed_always ();
30e432bb 922 }
923}
924
925
926/* Update the counts of the original and duplicated edges from a joiner
927 that go off path, given that we have already determined that the
928 duplicate joiner DUP_BB has incoming count PATH_IN_COUNT and
929 outgoing count along the path PATH_OUT_COUNT. The original (on-)path
930 edge from joiner is EPATH. */
931
932static void
933update_joiner_offpath_counts (edge epath, basic_block dup_bb,
db9cef39 934 profile_count path_in_count,
935 profile_count path_out_count)
30e432bb 936{
937 /* Compute the count that currently flows off path from the joiner.
938 In other words, the total count of joiner's out edges other than
939 epath. Compute this by walking the successors instead of
940 subtracting epath's count from the joiner bb count, since there
941 are sometimes slight insanities where the total out edge count is
942 larger than the bb count (possibly due to rounding/truncation
943 errors). */
db9cef39 944 profile_count total_orig_off_path_count = profile_count::zero ();
30e432bb 945 edge enonpath;
946 edge_iterator ei;
947 FOR_EACH_EDGE (enonpath, ei, epath->src->succs)
948 {
949 if (enonpath == epath)
9943b198 950 continue;
30e432bb 951 total_orig_off_path_count += enonpath->count;
952 }
953
954 /* For the path that we are duplicating, the amount that will flow
955 off path from the duplicated joiner is the delta between the
956 path's cumulative in count and the portion of that count we
957 estimated above as flowing from the joiner along the duplicated
958 path. */
db9cef39 959 profile_count total_dup_off_path_count = path_in_count - path_out_count;
30e432bb 960
961 /* Now do the actual updates of the off-path edges. */
962 FOR_EACH_EDGE (enonpath, ei, epath->src->succs)
963 {
964 /* Look for edges going off of the threading path. */
965 if (enonpath == epath)
9943b198 966 continue;
30e432bb 967
968 /* Find the corresponding edge out of the duplicated joiner. */
969 edge enonpathdup = find_edge (dup_bb, enonpath->dest);
970 gcc_assert (enonpathdup);
971
972 /* We can't use the original probability of the joiner's out
9943b198 973 edges, since the probabilities of the original branch
974 and the duplicated branches may vary after all threading is
975 complete. But apportion the duplicated joiner's off-path
976 total edge count computed earlier (total_dup_off_path_count)
977 among the duplicated off-path edges based on their original
978 ratio to the full off-path count (total_orig_off_path_count).
979 */
720cfc43 980 int scale = enonpath->count.probability_in (total_orig_off_path_count)
981 .to_reg_br_prob_base ();
30e432bb 982 /* Give the duplicated offpath edge a portion of the duplicated
9943b198 983 total. */
db9cef39 984 enonpathdup->count = total_dup_off_path_count.apply_probability (scale);
30e432bb 985 /* Now update the original offpath edge count, handling underflow
9943b198 986 due to rounding errors. */
30e432bb 987 enonpath->count -= enonpathdup->count;
30e432bb 988 }
989}
990
991
f1ce4e72 992/* Check if the paths through RD all have estimated frequencies but zero
993 profile counts. This is more accurate than checking the entry block
994 for a zero profile count, since profile insanities sometimes creep in. */
995
996static bool
997estimated_freqs_path (struct redirection_data *rd)
998{
999 edge e = rd->incoming_edges->e;
1000 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1001 edge ein;
1002 edge_iterator ei;
1003 bool non_zero_freq = false;
1004 FOR_EACH_EDGE (ein, ei, e->dest->preds)
1005 {
db9cef39 1006 if (ein->count > 0)
9943b198 1007 return false;
f1ce4e72 1008 non_zero_freq |= ein->src->frequency != 0;
1009 }
1010
1011 for (unsigned int i = 1; i < path->length (); i++)
1012 {
1013 edge epath = (*path)[i]->e;
db9cef39 1014 if (epath->src->count > 0)
9943b198 1015 return false;
f1ce4e72 1016 non_zero_freq |= epath->src->frequency != 0;
1017 edge esucc;
1018 FOR_EACH_EDGE (esucc, ei, epath->src->succs)
9943b198 1019 {
db9cef39 1020 if (esucc->count > 0)
9943b198 1021 return false;
1022 non_zero_freq |= esucc->src->frequency != 0;
1023 }
f1ce4e72 1024 }
1025 return non_zero_freq;
1026}
1027
1028
30e432bb 1029/* Invoked for routines that have guessed frequencies and no profile
1030 counts to record the block and edge frequencies for paths through RD
1031 in the profile count fields of those blocks and edges. This is because
1032 ssa_fix_duplicate_block_edges incrementally updates the block and
1033 edge counts as edges are redirected, and it is difficult to do that
1034 for edge frequencies which are computed on the fly from the source
1035 block frequency and probability. When a block frequency is updated
1036 its outgoing edge frequencies are affected and become difficult to
1037 adjust. */
1038
1039static void
1040freqs_to_counts_path (struct redirection_data *rd)
1041{
1042 edge e = rd->incoming_edges->e;
1043 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1044 edge ein;
1045 edge_iterator ei;
1046 FOR_EACH_EDGE (ein, ei, e->dest->preds)
e8038c32 1047 {
1048 /* Scale up the frequency by REG_BR_PROB_BASE, to avoid rounding
9943b198 1049 errors applying the probability when the frequencies are very
1050 small. */
720cfc43 1051 if (ein->probability.initialized_p ())
1052 ein->count = profile_count::from_gcov_type
1053 (apply_probability (ein->src->frequency * REG_BR_PROB_BASE,
4bb697cd 1054 ein->probability
1055 .to_reg_br_prob_base ())).guessed ();
720cfc43 1056 else
1057 /* FIXME: this is hack; we should track uninitialized values. */
1058 ein->count = profile_count::zero ();
e8038c32 1059 }
30e432bb 1060
1061 for (unsigned int i = 1; i < path->length (); i++)
1062 {
1063 edge epath = (*path)[i]->e;
30e432bb 1064 edge esucc;
e8038c32 1065 /* Scale up the frequency by REG_BR_PROB_BASE, to avoid rounding
9943b198 1066 errors applying the edge probability when the frequencies are very
1067 small. */
db9cef39 1068 epath->src->count =
1069 profile_count::from_gcov_type
1070 (epath->src->frequency * REG_BR_PROB_BASE);
30e432bb 1071 FOR_EACH_EDGE (esucc, ei, epath->src->succs)
db9cef39 1072 esucc->count =
1073 esucc->src->count.apply_probability (esucc->probability);
30e432bb 1074 }
1075}
1076
1077
1078/* For routines that have guessed frequencies and no profile counts, where we
1079 used freqs_to_counts_path to record block and edge frequencies for paths
1080 through RD, we clear the counts after completing all updates for RD.
1081 The updates in ssa_fix_duplicate_block_edges are based off the count fields,
1082 but the block frequencies and edge probabilities were updated as well,
1083 so we can simply clear the count fields. */
1084
1085static void
1086clear_counts_path (struct redirection_data *rd)
1087{
1088 edge e = rd->incoming_edges->e;
1089 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1090 edge ein, esucc;
1091 edge_iterator ei;
25d2128b 1092 profile_count val = profile_count::uninitialized ();
1093 if (profile_status_for_fn (cfun) == PROFILE_READ)
1094 val = profile_count::zero ();
1095
30e432bb 1096 FOR_EACH_EDGE (ein, ei, e->dest->preds)
25d2128b 1097 ein->count = val;
30e432bb 1098
1099 /* First clear counts along original path. */
1100 for (unsigned int i = 1; i < path->length (); i++)
1101 {
1102 edge epath = (*path)[i]->e;
1103 FOR_EACH_EDGE (esucc, ei, epath->src->succs)
25d2128b 1104 esucc->count = val;
1105 epath->src->count = val;
30e432bb 1106 }
1107 /* Also need to clear the counts along duplicated path. */
1108 for (unsigned int i = 0; i < 2; i++)
1109 {
1110 basic_block dup = rd->dup_blocks[i];
1111 if (!dup)
9943b198 1112 continue;
30e432bb 1113 FOR_EACH_EDGE (esucc, ei, dup->succs)
25d2128b 1114 esucc->count = val;
1115 dup->count = val;
30e432bb 1116 }
1117}
1118
fc54aba7 1119/* Wire up the outgoing edges from the duplicate blocks and
30e432bb 1120 update any PHIs as needed. Also update the profile counts
1121 on the original and duplicate blocks and edges. */
2b15d2ba 1122void
1123ssa_fix_duplicate_block_edges (struct redirection_data *rd,
1124 ssa_local_info_t *local_info)
da81e0c5 1125{
1b83c31b 1126 bool multi_incomings = (rd->incoming_edges->next != NULL);
f2981b08 1127 edge e = rd->incoming_edges->e;
1128 vec<jump_thread_edge *> *path = THREAD_PATH (e);
30e432bb 1129 edge elast = path->last ()->e;
db9cef39 1130 profile_count path_in_count = profile_count::zero ();
1131 profile_count path_out_count = profile_count::zero ();
30e432bb 1132 int path_in_freq = 0;
1133
1134 /* This routine updates profile counts, frequencies, and probabilities
1135 incrementally. Since it is difficult to do the incremental updates
1136 using frequencies/probabilities alone, for routines without profile
1137 data we first take a snapshot of the existing block and edge frequencies
1138 by copying them into the empty profile count fields. These counts are
1139 then used to do the incremental updates, and cleared at the end of this
f1ce4e72 1140 routine. If the function is marked as having a profile, we still check
1141 to see if the paths through RD are using estimated frequencies because
1142 the routine had zero profile counts. */
30e432bb 1143 bool do_freqs_to_counts = (profile_status_for_fn (cfun) != PROFILE_READ
9943b198 1144 || estimated_freqs_path (rd));
30e432bb 1145 if (do_freqs_to_counts)
1146 freqs_to_counts_path (rd);
1147
1148 /* First determine how much profile count to move from original
1149 path to the duplicate path. This is tricky in the presence of
1150 a joiner (see comments for compute_path_counts), where some portion
1151 of the path's counts will flow off-path from the joiner. In the
1152 non-joiner case the path_in_count and path_out_count should be the
1153 same. */
1154 bool has_joiner = compute_path_counts (rd, local_info,
9943b198 1155 &path_in_count, &path_out_count,
1156 &path_in_freq);
30e432bb 1157
1158 int cur_path_freq = path_in_freq;
fc54aba7 1159 for (unsigned int count = 0, i = 1; i < path->length (); i++)
1b83778e 1160 {
30e432bb 1161 edge epath = (*path)[i]->e;
1162
fc54aba7 1163 /* If we were threading through an joiner block, then we want
1164 to keep its control statement and redirect an outgoing edge.
1165 Else we want to remove the control statement & edges, then create
1166 a new outgoing edge. In both cases we may need to update PHIs. */
1167 if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1168 {
1169 edge victim;
1170 edge e2;
1171
9943b198 1172 gcc_assert (has_joiner);
30e432bb 1173
fc54aba7 1174 /* This updates the PHIs at the destination of the duplicate
1b83c31b 1175 block. Pass 0 instead of i if we are threading a path which
1176 has multiple incoming edges. */
1177 update_destination_phis (local_info->bb, rd->dup_blocks[count],
1178 path, multi_incomings ? 0 : i);
fc54aba7 1179
1180 /* Find the edge from the duplicate block to the block we're
1181 threading through. That's the edge we want to redirect. */
1182 victim = find_edge (rd->dup_blocks[count], (*path)[i]->e->dest);
1183
1184 /* If there are no remaining blocks on the path to duplicate,
1185 then redirect VICTIM to the final destination of the jump
1186 threading path. */
1187 if (!any_remaining_duplicated_blocks (path, i))
1188 {
30e432bb 1189 e2 = redirect_edge_and_branch (victim, elast->dest);
fc54aba7 1190 /* If we redirected the edge, then we need to copy PHI arguments
559685be 1191 at the target. If the edge already existed (e2 != victim
fc54aba7 1192 case), then the PHIs in the target already have the correct
1193 arguments. */
1194 if (e2 == victim)
30e432bb 1195 copy_phi_args (e2->dest, elast, e2,
1b83c31b 1196 path, multi_incomings ? 0 : i);
fc54aba7 1197 }
1198 else
1199 {
1200 /* Redirect VICTIM to the next duplicated block in the path. */
1201 e2 = redirect_edge_and_branch (victim, rd->dup_blocks[count + 1]);
1202
1203 /* We need to update the PHIs in the next duplicated block. We
1204 want the new PHI args to have the same value as they had
1205 in the source of the next duplicate block.
1206
1207 Thus, we need to know which edge we traversed into the
1208 source of the duplicate. Furthermore, we may have
1209 traversed many edges to reach the source of the duplicate.
1210
1211 Walk through the path starting at element I until we
1212 hit an edge marked with EDGE_COPY_SRC_BLOCK. We want
1213 the edge from the prior element. */
1214 for (unsigned int j = i + 1; j < path->length (); j++)
1215 {
1216 if ((*path)[j]->type == EDGE_COPY_SRC_BLOCK)
1217 {
1218 copy_phi_arg_into_existing_phi ((*path)[j - 1]->e, e2);
1219 break;
1220 }
1221 }
1222 }
30e432bb 1223
1224 /* Update the counts and frequency of both the original block
1225 and path edge, and the duplicates. The path duplicate's
1226 incoming count and frequency are the totals for all edges
1227 incoming to this jump threading path computed earlier.
1228 And we know that the duplicated path will have path_out_count
1229 flowing out of it (i.e. along the duplicated path out of the
1230 duplicated joiner). */
1231 update_profile (epath, e2, path_in_count, path_out_count,
1232 path_in_freq);
1233
1234 /* Next we need to update the counts of the original and duplicated
1235 edges from the joiner that go off path. */
1236 update_joiner_offpath_counts (epath, e2->src, path_in_count,
9943b198 1237 path_out_count);
30e432bb 1238
1239 /* Finally, we need to set the probabilities on the duplicated
1240 edges out of the duplicated joiner (e2->src). The probabilities
1241 along the original path will all be updated below after we finish
1242 processing the whole path. */
1243 recompute_probabilities (e2->src);
1244
1245 /* Record the frequency flowing to the downstream duplicated
1246 path blocks. */
1247 cur_path_freq = EDGE_FREQUENCY (e2);
fc54aba7 1248 }
1249 else if ((*path)[i]->type == EDGE_COPY_SRC_BLOCK)
1250 {
1251 remove_ctrl_stmt_and_useless_edges (rd->dup_blocks[count], NULL);
1b83c31b 1252 create_edge_and_update_destination_phis (rd, rd->dup_blocks[count],
1253 multi_incomings ? 0 : i);
fc54aba7 1254 if (count == 1)
1255 single_succ_edge (rd->dup_blocks[1])->aux = NULL;
30e432bb 1256
1257 /* Update the counts and frequency of both the original block
1258 and path edge, and the duplicates. Since we are now after
1259 any joiner that may have existed on the path, the count
1260 flowing along the duplicated threaded path is path_out_count.
1261 If we didn't have a joiner, then cur_path_freq was the sum
1262 of the total frequencies along all incoming edges to the
1263 thread path (path_in_freq). If we had a joiner, it would have
1264 been updated at the end of that handling to the edge frequency
1265 along the duplicated joiner path edge. */
1266 update_profile (epath, EDGE_SUCC (rd->dup_blocks[count], 0),
1267 path_out_count, path_out_count,
1268 cur_path_freq);
fc54aba7 1269 }
30e432bb 1270 else
9943b198 1271 {
30e432bb 1272 /* No copy case. In this case we don't have an equivalent block
1273 on the duplicated thread path to update, but we do need
1274 to remove the portion of the counts/freqs that were moved
1275 to the duplicated path from the counts/freqs flowing through
1276 this block on the original path. Since all the no-copy edges
1277 are after any joiner, the removed count is the same as
1278 path_out_count.
1279
1280 If we didn't have a joiner, then cur_path_freq was the sum
1281 of the total frequencies along all incoming edges to the
1282 thread path (path_in_freq). If we had a joiner, it would have
1283 been updated at the end of that handling to the edge frequency
1284 along the duplicated joiner path edge. */
31e5d72d 1285 update_profile (epath, NULL, path_out_count, path_out_count,
1286 cur_path_freq);
30e432bb 1287 }
1288
1289 /* Increment the index into the duplicated path when we processed
9943b198 1290 a duplicated block. */
30e432bb 1291 if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK
9943b198 1292 || (*path)[i]->type == EDGE_COPY_SRC_BLOCK)
31e5d72d 1293 {
30e432bb 1294 count++;
31e5d72d 1295 }
30e432bb 1296 }
1297
1298 /* Now walk orig blocks and update their probabilities, since the
1299 counts and freqs should be updated properly by above loop. */
1300 for (unsigned int i = 1; i < path->length (); i++)
1301 {
1302 edge epath = (*path)[i]->e;
1303 recompute_probabilities (epath->src);
778182c1 1304 }
30e432bb 1305
1306 /* Done with all profile and frequency updates, clear counts if they
1307 were copied. */
1308 if (do_freqs_to_counts)
1309 clear_counts_path (rd);
778182c1 1310}
fc54aba7 1311
778182c1 1312/* Hash table traversal callback routine to create duplicate blocks. */
1313
2b15d2ba 1314int
1315ssa_create_duplicates (struct redirection_data **slot,
1316 ssa_local_info_t *local_info)
778182c1 1317{
2b15d2ba 1318 struct redirection_data *rd = *slot;
778182c1 1319
11af02d8 1320 /* The second duplicated block in a jump threading path is specific
1b83778e 1321 to the path. So it gets stored in RD rather than in LOCAL_DATA.
559685be 1322
11af02d8 1323 Each time we're called, we have to look through the path and see
1b83778e 1324 if a second block needs to be duplicated.
11af02d8 1325
1326 Note the search starts with the third edge on the path. The first
1327 edge is the incoming edge, the second edge always has its source
1328 duplicated. Thus we start our search with the third edge. */
a7ee7309 1329 vec<jump_thread_edge *> *path = rd->path;
11af02d8 1330 for (unsigned int i = 2; i < path->length (); i++)
1331 {
1332 if ((*path)[i]->type == EDGE_COPY_SRC_BLOCK
1333 || (*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1334 {
30e432bb 1335 create_block_for_threading ((*path)[i]->e->src, rd, 1,
a7ee7309 1336 &local_info->duplicate_blocks);
11af02d8 1337 break;
1338 }
1339 }
1b83778e 1340
778182c1 1341 /* Create a template block if we have not done so already. Otherwise
1342 use the template to create a new block. */
1343 if (local_info->template_block == NULL)
1344 {
30e432bb 1345 create_block_for_threading ((*path)[1]->e->src, rd, 0,
a7ee7309 1346 &local_info->duplicate_blocks);
11af02d8 1347 local_info->template_block = rd->dup_blocks[0];
778182c1 1348
1349 /* We do not create any outgoing edges for the template. We will
1350 take care of that in a later traversal. That way we do not
1351 create edges that are going to just be deleted. */
1352 }
1353 else
1354 {
30e432bb 1355 create_block_for_threading (local_info->template_block, rd, 0,
a7ee7309 1356 &local_info->duplicate_blocks);
778182c1 1357
1358 /* Go ahead and wire up outgoing edges and update PHIs for the duplicate
da81e0c5 1359 block. */
2b15d2ba 1360 ssa_fix_duplicate_block_edges (rd, local_info);
778182c1 1361 }
1362
1363 /* Keep walking the hash table. */
1364 return 1;
1365}
1366
1367/* We did not create any outgoing edges for the template block during
1368 block creation. This hash table traversal callback creates the
1369 outgoing edge for the template block. */
1370
2b15d2ba 1371inline int
1372ssa_fixup_template_block (struct redirection_data **slot,
1373 ssa_local_info_t *local_info)
778182c1 1374{
2b15d2ba 1375 struct redirection_data *rd = *slot;
778182c1 1376
da81e0c5 1377 /* If this is the template block halt the traversal after updating
1378 it appropriately.
1379
1380 If we were threading through an joiner block, then we want
1381 to keep its control statement and redirect an outgoing edge.
1382 Else we want to remove the control statement & edges, then create
1383 a new outgoing edge. In both cases we may need to update PHIs. */
11af02d8 1384 if (rd->dup_blocks[0] && rd->dup_blocks[0] == local_info->template_block)
778182c1 1385 {
2b15d2ba 1386 ssa_fix_duplicate_block_edges (rd, local_info);
778182c1 1387 return 0;
1388 }
1389
1390 return 1;
1391}
1392
1393/* Hash table traversal callback to redirect each incoming edge
1394 associated with this hash table element to its new destination. */
1395
2b15d2ba 1396int
1397ssa_redirect_edges (struct redirection_data **slot,
1398 ssa_local_info_t *local_info)
778182c1 1399{
2b15d2ba 1400 struct redirection_data *rd = *slot;
778182c1 1401 struct el *next, *el;
1402
47ae02b7 1403 /* Walk over all the incoming edges associated with this hash table
1404 entry. */
778182c1 1405 for (el = rd->incoming_edges; el; el = next)
1406 {
1407 edge e = el->e;
f2981b08 1408 vec<jump_thread_edge *> *path = THREAD_PATH (e);
778182c1 1409
1410 /* Go ahead and free this element from the list. Doing this now
1411 avoids the need for another list walk when we destroy the hash
1412 table. */
1413 next = el->next;
1414 free (el);
1415
5236b8bb 1416 thread_stats.num_threaded_edges++;
1417
11af02d8 1418 if (rd->dup_blocks[0])
778182c1 1419 {
1420 edge e2;
1421
1422 if (dump_file && (dump_flags & TDF_DETAILS))
1423 fprintf (dump_file, " Threaded jump %d --> %d to %d\n",
11af02d8 1424 e->src->index, e->dest->index, rd->dup_blocks[0]->index);
778182c1 1425
353f9f16 1426 /* Redirect the incoming edge (possibly to the joiner block) to the
1427 appropriate duplicate block. */
11af02d8 1428 e2 = redirect_edge_and_branch (e, rd->dup_blocks[0]);
7e0311ae 1429 gcc_assert (e == e2);
778182c1 1430 flush_pending_stmts (e2);
778182c1 1431 }
eb31063a 1432
1433 /* Go ahead and clear E->aux. It's not needed anymore and failure
559685be 1434 to clear it will cause all kinds of unpleasant problems later. */
6d1fdbf9 1435 delete_jump_thread_path (path);
eb31063a 1436 e->aux = NULL;
1437
778182c1 1438 }
388d1fc1 1439
1440 /* Indicate that we actually threaded one or more jumps. */
1441 if (rd->incoming_edges)
1442 local_info->jumps_threaded = true;
1443
778182c1 1444 return 1;
1445}
1446
aed95130 1447/* Return true if this block has no executable statements other than
1448 a simple ctrl flow instruction. When the number of outgoing edges
1449 is one, this is equivalent to a "forwarder" block. */
1450
1451static bool
47aaf6e6 1452redirection_block_p (basic_block bb)
aed95130 1453{
75a70cf9 1454 gimple_stmt_iterator gsi;
aed95130 1455
1456 /* Advance to the first executable statement. */
75a70cf9 1457 gsi = gsi_start_bb (bb);
1458 while (!gsi_end_p (gsi)
559685be 1459 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL
9845d120 1460 || is_gimple_debug (gsi_stmt (gsi))
c7566ddf 1461 || gimple_nop_p (gsi_stmt (gsi))
581c1a3c 1462 || gimple_clobber_p (gsi_stmt (gsi))))
75a70cf9 1463 gsi_next (&gsi);
48e1416a 1464
aed95130 1465 /* Check if this is an empty block. */
75a70cf9 1466 if (gsi_end_p (gsi))
aed95130 1467 return true;
1468
1469 /* Test that we've reached the terminating control statement. */
75a70cf9 1470 return gsi_stmt (gsi)
559685be 1471 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
1472 || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
1473 || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH);
aed95130 1474}
1475
a8046f60 1476/* BB is a block which ends with a COND_EXPR or SWITCH_EXPR and when BB
1477 is reached via one or more specific incoming edges, we know which
1478 outgoing edge from BB will be traversed.
1479
778182c1 1480 We want to redirect those incoming edges to the target of the
a8046f60 1481 appropriate outgoing edge. Doing so avoids a conditional branch
1482 and may expose new optimization opportunities. Note that we have
1483 to update dominator tree and SSA graph after such changes.
1484
597ff315 1485 The key to keeping the SSA graph update manageable is to duplicate
91275768 1486 the side effects occurring in BB so that those side effects still
a8046f60 1487 occur on the paths which bypass BB after redirecting edges.
1488
1489 We accomplish this by creating duplicates of BB and arranging for
1490 the duplicates to unconditionally pass control to one specific
1491 successor of BB. We then revector the incoming edges into BB to
1492 the appropriate duplicate of BB.
1493
7e0311ae 1494 If NOLOOP_ONLY is true, we only perform the threading as long as it
1b83778e 1495 does not affect the structure of the loops in a nontrivial way.
ed4feca1 1496
1497 If JOINERS is true, then thread through joiner blocks as well. */
a8046f60 1498
388d1fc1 1499static bool
ed4feca1 1500thread_block_1 (basic_block bb, bool noloop_only, bool joiners)
a8046f60 1501{
1502 /* E is an incoming edge into BB that we may or may not want to
1503 redirect to a duplicate of BB. */
7e0311ae 1504 edge e, e2;
cd665a06 1505 edge_iterator ei;
2b15d2ba 1506 ssa_local_info_t local_info;
388d1fc1 1507
30e432bb 1508 local_info.duplicate_blocks = BITMAP_ALLOC (NULL);
d07cbccc 1509 local_info.need_profile_correction = false;
30e432bb 1510
778182c1 1511 /* To avoid scanning a linear array for the element we need we instead
c5d4a10b 1512 use a hash table. For normal code there should be no noticeable
778182c1 1513 difference. However, if we have a block with a large number of
1514 incoming and outgoing edges such linear searches can get expensive. */
c1f445d2 1515 redirection_data
1516 = new hash_table<struct redirection_data> (EDGE_COUNT (bb->succs));
778182c1 1517
1518 /* Record each unique threaded destination into a hash table for
1519 efficient lookups. */
d07cbccc 1520 edge last = NULL;
cd665a06 1521 FOR_EACH_EDGE (e, ei, bb->preds)
a8046f60 1522 {
eb31063a 1523 if (e->aux == NULL)
1524 continue;
1525
f2981b08 1526 vec<jump_thread_edge *> *path = THREAD_PATH (e);
ed4feca1 1527
1528 if (((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK && !joiners)
1529 || ((*path)[1]->type == EDGE_COPY_SRC_BLOCK && joiners))
1530 continue;
1531
f2981b08 1532 e2 = path->last ()->e;
e2b72d6c 1533 if (!e2 || noloop_only)
1534 {
7e0311ae 1535 /* If NOLOOP_ONLY is true, we only allow threading through the
559685be 1536 header of a loop to exit edges. */
e2b72d6c 1537
559685be 1538 /* One case occurs when there was loop header buried in a jump
1539 threading path that crosses loop boundaries. We do not try
1540 and thread this elsewhere, so just cancel the jump threading
1541 request by clearing the AUX field now. */
9f3abcb8 1542 if (bb->loop_father != e2->src->loop_father
1543 && !loop_exit_edge_p (e2->src->loop_father, e2))
e2b72d6c 1544 {
1545 /* Since this case is not handled by our special code
1546 to thread through a loop header, we must explicitly
1547 cancel the threading request here. */
6d1fdbf9 1548 delete_jump_thread_path (path);
e2b72d6c 1549 e->aux = NULL;
1550 continue;
1551 }
559685be 1552
1553 /* Another case occurs when trying to thread through our
ab596744 1554 own loop header, possibly from inside the loop. We will
1555 thread these later. */
559685be 1556 unsigned int i;
1557 for (i = 1; i < path->length (); i++)
1558 {
1559 if ((*path)[i]->e->src == bb->loop_father->header
1560 && (!loop_exit_edge_p (bb->loop_father, e2)
1561 || (*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK))
ab596744 1562 break;
559685be 1563 }
1564
1565 if (i != path->length ())
1566 continue;
e2b72d6c 1567 }
778182c1 1568
7e0311ae 1569 /* Insert the outgoing edge into the hash table if it is not
1570 already in the hash table. */
da81e0c5 1571 lookup_redirection_data (e, INSERT);
d07cbccc 1572
1573 /* When we have thread paths through a common joiner with different
1574 final destinations, then we may need corrections to deal with
1575 profile insanities. See the big comment before compute_path_counts. */
1576 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1577 {
1578 if (!last)
1579 last = e2;
1580 else if (e2 != last)
1581 local_info.need_profile_correction = true;
1582 }
a8046f60 1583 }
1584
3f9439d7 1585 /* We do not update dominance info. */
1586 free_dominance_info (CDI_DOMINATORS);
1587
d906930c 1588 /* We know we only thread through the loop header to loop exits.
1589 Let the basic block duplication hook know we are not creating
1590 a multiple entry loop. */
1591 if (noloop_only
1592 && bb == bb->loop_father->header)
1593 set_loop_copy (bb->loop_father, loop_outer (bb->loop_father));
1594
778182c1 1595 /* Now create duplicates of BB.
f582bb6c 1596
1597 Note that for a block with a high outgoing degree we can waste
1598 a lot of time and memory creating and destroying useless edges.
1599
1600 So we first duplicate BB and remove the control structure at the
1601 tail of the duplicate as well as all outgoing edges from the
1602 duplicate. We then use that duplicate block as a template for
1603 the rest of the duplicates. */
778182c1 1604 local_info.template_block = NULL;
1605 local_info.bb = bb;
388d1fc1 1606 local_info.jumps_threaded = false;
c1f445d2 1607 redirection_data->traverse <ssa_local_info_t *, ssa_create_duplicates>
2b15d2ba 1608 (&local_info);
f582bb6c 1609
778182c1 1610 /* The template does not have an outgoing edge. Create that outgoing
1611 edge and update PHI nodes as the edge's target as necessary.
f582bb6c 1612
778182c1 1613 We do this after creating all the duplicates to avoid creating
1614 unnecessary edges. */
c1f445d2 1615 redirection_data->traverse <ssa_local_info_t *, ssa_fixup_template_block>
2b15d2ba 1616 (&local_info);
f582bb6c 1617
778182c1 1618 /* The hash table traversals above created the duplicate blocks (and the
1619 statements within the duplicate blocks). This loop creates PHI nodes for
1620 the duplicated blocks and redirects the incoming edges into BB to reach
1621 the duplicates of BB. */
c1f445d2 1622 redirection_data->traverse <ssa_local_info_t *, ssa_redirect_edges>
2b15d2ba 1623 (&local_info);
a8046f60 1624
a3d0fd80 1625 /* Done with this block. Clear REDIRECTION_DATA. */
c1f445d2 1626 delete redirection_data;
1627 redirection_data = NULL;
388d1fc1 1628
d906930c 1629 if (noloop_only
1630 && bb == bb->loop_father->header)
1631 set_loop_copy (bb->loop_father, NULL);
1632
30e432bb 1633 BITMAP_FREE (local_info.duplicate_blocks);
1634 local_info.duplicate_blocks = NULL;
1635
388d1fc1 1636 /* Indicate to our caller whether or not any jumps were threaded. */
1637 return local_info.jumps_threaded;
a8046f60 1638}
1639
ed4feca1 1640/* Wrapper for thread_block_1 so that we can first handle jump
1641 thread paths which do not involve copying joiner blocks, then
1642 handle jump thread paths which have joiner blocks.
1643
1644 By doing things this way we can be as aggressive as possible and
1645 not worry that copying a joiner block will create a jump threading
1646 opportunity. */
1b83778e 1647
ed4feca1 1648static bool
1649thread_block (basic_block bb, bool noloop_only)
1650{
1651 bool retval;
1652 retval = thread_block_1 (bb, noloop_only, false);
1653 retval |= thread_block_1 (bb, noloop_only, true);
1654 return retval;
1655}
1656
7e0311ae 1657/* Callback for dfs_enumerate_from. Returns true if BB is different
1658 from STOP and DBDS_CE_STOP. */
1659
1660static basic_block dbds_ce_stop;
1661static bool
7ecb5bb2 1662dbds_continue_enumeration_p (const_basic_block bb, const void *stop)
7e0311ae 1663{
7ecb5bb2 1664 return (bb != (const_basic_block) stop
7e0311ae 1665 && bb != dbds_ce_stop);
1666}
1667
1668/* Evaluates the dominance relationship of latch of the LOOP and BB, and
1669 returns the state. */
1670
1671enum bb_dom_status
7e0311ae 1672determine_bb_domination_status (struct loop *loop, basic_block bb)
1673{
1674 basic_block *bblocks;
1675 unsigned nblocks, i;
1676 bool bb_reachable = false;
1677 edge_iterator ei;
1678 edge e;
1679
42b013bc 1680 /* This function assumes BB is a successor of LOOP->header.
1681 If that is not the case return DOMST_NONDOMINATING which
1682 is always safe. */
7e0311ae 1683 {
1684 bool ok = false;
1685
1686 FOR_EACH_EDGE (e, ei, bb->preds)
1687 {
1688 if (e->src == loop->header)
1689 {
1690 ok = true;
1691 break;
1692 }
1693 }
1694
42b013bc 1695 if (!ok)
1696 return DOMST_NONDOMINATING;
7e0311ae 1697 }
7e0311ae 1698
1699 if (bb == loop->latch)
1700 return DOMST_DOMINATING;
1701
1702 /* Check that BB dominates LOOP->latch, and that it is back-reachable
1703 from it. */
1704
1705 bblocks = XCNEWVEC (basic_block, loop->num_nodes);
1706 dbds_ce_stop = loop->header;
1707 nblocks = dfs_enumerate_from (loop->latch, 1, dbds_continue_enumeration_p,
1708 bblocks, loop->num_nodes, bb);
1709 for (i = 0; i < nblocks; i++)
1710 FOR_EACH_EDGE (e, ei, bblocks[i]->preds)
1711 {
1712 if (e->src == loop->header)
1713 {
1714 free (bblocks);
1715 return DOMST_NONDOMINATING;
1716 }
1717 if (e->src == bb)
1718 bb_reachable = true;
1719 }
1720
1721 free (bblocks);
1722 return (bb_reachable ? DOMST_DOMINATING : DOMST_LOOP_BROKEN);
1723}
1724
1725/* Thread jumps through the header of LOOP. Returns true if cfg changes.
1726 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading from entry edges
1727 to the inside of the loop. */
1728
1729static bool
1730thread_through_loop_header (struct loop *loop, bool may_peel_loop_headers)
1731{
1732 basic_block header = loop->header;
1733 edge e, tgt_edge, latch = loop_latch_edge (loop);
1734 edge_iterator ei;
1735 basic_block tgt_bb, atgt_bb;
1736 enum bb_dom_status domst;
1737
1738 /* We have already threaded through headers to exits, so all the threading
1739 requests now are to the inside of the loop. We need to avoid creating
1740 irreducible regions (i.e., loops with more than one entry block), and
1741 also loop with several latch edges, or new subloops of the loop (although
1742 there are cases where it might be appropriate, it is difficult to decide,
1743 and doing it wrongly may confuse other optimizers).
1744
1745 We could handle more general cases here. However, the intention is to
1746 preserve some information about the loop, which is impossible if its
1747 structure changes significantly, in a way that is not well understood.
1748 Thus we only handle few important special cases, in which also updating
1749 of the loop-carried information should be feasible:
1750
1751 1) Propagation of latch edge to a block that dominates the latch block
1752 of a loop. This aims to handle the following idiom:
1753
1754 first = 1;
1755 while (1)
1756 {
1757 if (first)
1758 initialize;
1759 first = 0;
1760 body;
1761 }
1762
1763 After threading the latch edge, this becomes
1764
1765 first = 1;
1766 if (first)
1767 initialize;
1768 while (1)
1769 {
1770 first = 0;
1771 body;
1772 }
1773
1774 The original header of the loop is moved out of it, and we may thread
1775 the remaining edges through it without further constraints.
1776
1777 2) All entry edges are propagated to a single basic block that dominates
1778 the latch block of the loop. This aims to handle the following idiom
1779 (normally created for "for" loops):
1780
1781 i = 0;
1782 while (1)
1783 {
1784 if (i >= 100)
1785 break;
1786 body;
1787 i++;
1788 }
1789
1790 This becomes
1791
1792 i = 0;
1793 while (1)
1794 {
1795 body;
1796 i++;
1797 if (i >= 100)
1798 break;
1799 }
1800 */
1801
1802 /* Threading through the header won't improve the code if the header has just
1803 one successor. */
1804 if (single_succ_p (header))
1805 goto fail;
1806
d29932c9 1807 if (!may_peel_loop_headers && !redirection_block_p (loop->header))
7e0311ae 1808 goto fail;
1809 else
1810 {
1811 tgt_bb = NULL;
1812 tgt_edge = NULL;
1813 FOR_EACH_EDGE (e, ei, header->preds)
1814 {
1815 if (!e->aux)
1816 {
1817 if (e == latch)
1818 continue;
1819
1820 /* If latch is not threaded, and there is a header
1821 edge that is not threaded, we would create loop
1822 with multiple entries. */
1823 goto fail;
1824 }
1825
f2981b08 1826 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1827
1828 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
da81e0c5 1829 goto fail;
f2981b08 1830 tgt_edge = (*path)[1]->e;
7e0311ae 1831 atgt_bb = tgt_edge->dest;
1832 if (!tgt_bb)
1833 tgt_bb = atgt_bb;
1834 /* Two targets of threading would make us create loop
1835 with multiple entries. */
1836 else if (tgt_bb != atgt_bb)
1837 goto fail;
1838 }
1839
1840 if (!tgt_bb)
1841 {
1842 /* There are no threading requests. */
1843 return false;
1844 }
1845
1846 /* Redirecting to empty loop latch is useless. */
1847 if (tgt_bb == loop->latch
1848 && empty_block_p (loop->latch))
1849 goto fail;
1850 }
1851
1852 /* The target block must dominate the loop latch, otherwise we would be
1853 creating a subloop. */
1854 domst = determine_bb_domination_status (loop, tgt_bb);
1855 if (domst == DOMST_NONDOMINATING)
1856 goto fail;
1857 if (domst == DOMST_LOOP_BROKEN)
1858 {
1859 /* If the loop ceased to exist, mark it as such, and thread through its
1860 original header. */
d25159cc 1861 mark_loop_for_removal (loop);
7e0311ae 1862 return thread_block (header, false);
1863 }
1864
1865 if (tgt_bb->loop_father->header == tgt_bb)
1866 {
1867 /* If the target of the threading is a header of a subloop, we need
1868 to create a preheader for it, so that the headers of the two loops
1869 do not merge. */
1870 if (EDGE_COUNT (tgt_bb->preds) > 2)
1871 {
1872 tgt_bb = create_preheader (tgt_bb->loop_father, 0);
1873 gcc_assert (tgt_bb != NULL);
1874 }
1875 else
1876 tgt_bb = split_edge (tgt_edge);
1877 }
48e1416a 1878
d29932c9 1879 basic_block new_preheader;
6eb99d8a 1880
d29932c9 1881 /* Now consider the case entry edges are redirected to the new entry
1882 block. Remember one entry edge, so that we can find the new
1883 preheader (its destination after threading). */
1884 FOR_EACH_EDGE (e, ei, header->preds)
7e0311ae 1885 {
d29932c9 1886 if (e->aux)
1887 break;
1888 }
7e0311ae 1889
d29932c9 1890 /* The duplicate of the header is the new preheader of the loop. Ensure
1891 that it is placed correctly in the loop hierarchy. */
1892 set_loop_copy (loop, loop_outer (loop));
7e0311ae 1893
d29932c9 1894 thread_block (header, false);
1895 set_loop_copy (loop, NULL);
1896 new_preheader = e->dest;
48e1416a 1897
d29932c9 1898 /* Create the new latch block. This is always necessary, as the latch
1899 must have only a single successor, but the original header had at
1900 least two successors. */
1901 loop->latch = NULL;
1902 mfb_kj_edge = single_succ_edge (new_preheader);
1903 loop->header = mfb_kj_edge->dest;
1904 latch = make_forwarder_block (tgt_bb, mfb_keep_just, NULL);
1905 loop->header = latch->dest;
1906 loop->latch = latch->src;
7e0311ae 1907 return true;
1908
1909fail:
1910 /* We failed to thread anything. Cancel the requests. */
1911 FOR_EACH_EDGE (e, ei, header->preds)
1912 {
f2981b08 1913 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1914
1915 if (path)
1916 {
6d1fdbf9 1917 delete_jump_thread_path (path);
f2981b08 1918 e->aux = NULL;
1919 }
7e0311ae 1920 }
1921 return false;
1922}
1923
b99a7d6d 1924/* E1 and E2 are edges into the same basic block. Return TRUE if the
1925 PHI arguments associated with those edges are equal or there are no
1926 PHI arguments, otherwise return FALSE. */
1927
1928static bool
1929phi_args_equal_on_edges (edge e1, edge e2)
1930{
1a91d914 1931 gphi_iterator gsi;
b99a7d6d 1932 int indx1 = e1->dest_idx;
1933 int indx2 = e2->dest_idx;
1934
1935 for (gsi = gsi_start_phis (e1->dest); !gsi_end_p (gsi); gsi_next (&gsi))
1936 {
1a91d914 1937 gphi *phi = gsi.phi ();
b99a7d6d 1938
1939 if (!operand_equal_p (gimple_phi_arg_def (phi, indx1),
1940 gimple_phi_arg_def (phi, indx2), 0))
1941 return false;
1942 }
1943 return true;
1944}
1945
3cebc9d2 1946/* Walk through the registered jump threads and convert them into a
334ec2d8 1947 form convenient for this pass.
3cebc9d2 1948
1949 Any block which has incoming edges threaded to outgoing edges
1950 will have its entry in THREADED_BLOCK set.
a8046f60 1951
3cebc9d2 1952 Any threaded edge will have its new outgoing edge stored in the
1953 original edge's AUX field.
a8046f60 1954
3cebc9d2 1955 This form avoids the need to walk all the edges in the CFG to
1956 discover blocks which need processing and avoids unnecessary
1957 hash table lookups to map from threaded edge to new target. */
a8046f60 1958
3cebc9d2 1959static void
1960mark_threaded_blocks (bitmap threaded_blocks)
1961{
1962 unsigned int i;
7e0311ae 1963 bitmap_iterator bi;
035def86 1964 auto_bitmap tmp;
7e0311ae 1965 basic_block bb;
1966 edge e;
1967 edge_iterator ei;
3cebc9d2 1968
b93ba654 1969 /* It is possible to have jump threads in which one is a subpath
1970 of the other. ie, (A, B), (B, C), (C, D) where B is a joiner
1971 block and (B, C), (C, D) where no joiner block exists.
1972
1973 When this occurs ignore the jump thread request with the joiner
1974 block. It's totally subsumed by the simpler jump thread request.
1975
1976 This results in less block copying, simpler CFGs. More importantly,
1977 when we duplicate the joiner block, B, in this case we will create
1978 a new threading opportunity that we wouldn't be able to optimize
1979 until the next jump threading iteration.
1980
1981 So first convert the jump thread requests which do not require a
1982 joiner block. */
f2981b08 1983 for (i = 0; i < paths.length (); i++)
3cebc9d2 1984 {
f2981b08 1985 vec<jump_thread_edge *> *path = paths[i];
b93ba654 1986
1987 if ((*path)[1]->type != EDGE_COPY_SRC_JOINER_BLOCK)
1988 {
1989 edge e = (*path)[0]->e;
1990 e->aux = (void *)path;
1991 bitmap_set_bit (tmp, e->dest->index);
1992 }
1f3976e7 1993 }
1994
b93ba654 1995 /* Now iterate again, converting cases where we want to thread
1996 through a joiner block, but only if no other edge on the path
f1ce4e72 1997 already has a jump thread attached to it. We do this in two passes,
1998 to avoid situations where the order in the paths vec can hide overlapping
1999 threads (the path is recorded on the incoming edge, so we would miss
2000 cases where the second path starts at a downstream edge on the same
2001 path). First record all joiner paths, deleting any in the unexpected
2002 case where there is already a path for that incoming edge. */
51ea8bc6 2003 for (i = 0; i < paths.length ();)
b93ba654 2004 {
2005 vec<jump_thread_edge *> *path = paths[i];
2006
2007 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
9943b198 2008 {
f1ce4e72 2009 /* Attach the path to the starting edge if none is yet recorded. */
9943b198 2010 if ((*path)[0]->e->aux == NULL)
5d293e79 2011 {
9943b198 2012 (*path)[0]->e->aux = path;
51ea8bc6 2013 i++;
5d293e79 2014 }
2015 else
2016 {
2017 paths.unordered_remove (i);
2018 if (dump_file && (dump_flags & TDF_DETAILS))
9943b198 2019 dump_jump_thread_path (dump_file, *path, false);
5d293e79 2020 delete_jump_thread_path (path);
2021 }
9943b198 2022 }
51ea8bc6 2023 else
2024 {
2025 i++;
2026 }
f1ce4e72 2027 }
51ea8bc6 2028
f1ce4e72 2029 /* Second, look for paths that have any other jump thread attached to
2030 them, and either finish converting them or cancel them. */
51ea8bc6 2031 for (i = 0; i < paths.length ();)
f1ce4e72 2032 {
2033 vec<jump_thread_edge *> *path = paths[i];
2034 edge e = (*path)[0]->e;
2035
2036 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK && e->aux == path)
b93ba654 2037 {
2038 unsigned int j;
f1ce4e72 2039 for (j = 1; j < path->length (); j++)
b93ba654 2040 if ((*path)[j]->e->aux != NULL)
2041 break;
2042
2043 /* If we iterated through the entire path without exiting the loop,
f1ce4e72 2044 then we are good to go, record it. */
b93ba654 2045 if (j == path->length ())
51ea8bc6 2046 {
2047 bitmap_set_bit (tmp, e->dest->index);
2048 i++;
2049 }
f1ce4e72 2050 else
b93ba654 2051 {
f1ce4e72 2052 e->aux = NULL;
5d293e79 2053 paths.unordered_remove (i);
f1ce4e72 2054 if (dump_file && (dump_flags & TDF_DETAILS))
9943b198 2055 dump_jump_thread_path (dump_file, *path, false);
5d293e79 2056 delete_jump_thread_path (path);
b93ba654 2057 }
2058 }
51ea8bc6 2059 else
2060 {
2061 i++;
2062 }
b93ba654 2063 }
b99a7d6d 2064
7e0311ae 2065 /* If optimizing for size, only thread through block if we don't have
2066 to duplicate it or it's an otherwise empty redirection block. */
0bfd8d5c 2067 if (optimize_function_for_size_p (cfun))
7e0311ae 2068 {
2069 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
2070 {
f5a6b05f 2071 bb = BASIC_BLOCK_FOR_FN (cfun, i);
7e0311ae 2072 if (EDGE_COUNT (bb->preds) > 1
2073 && !redirection_block_p (bb))
2074 {
2075 FOR_EACH_EDGE (e, ei, bb->preds)
eb31063a 2076 {
f2981b08 2077 if (e->aux)
2078 {
2079 vec<jump_thread_edge *> *path = THREAD_PATH (e);
6d1fdbf9 2080 delete_jump_thread_path (path);
f2981b08 2081 e->aux = NULL;
2082 }
eb31063a 2083 }
7e0311ae 2084 }
2085 else
2086 bitmap_set_bit (threaded_blocks, i);
2087 }
3cebc9d2 2088 }
7e0311ae 2089 else
2090 bitmap_copy (threaded_blocks, tmp);
2091
af6b6631 2092 /* If we have a joiner block (J) which has two successors S1 and S2 and
2093 we are threading though S1 and the final destination of the thread
2094 is S2, then we must verify that any PHI nodes in S2 have the same
2095 PHI arguments for the edge J->S2 and J->S1->...->S2.
2096
2097 We used to detect this prior to registering the jump thread, but
2098 that prohibits propagation of edge equivalences into non-dominated
2099 PHI nodes as the equivalency test might occur before propagation.
2100
2101 This must also occur after we truncate any jump threading paths
2102 as this scenario may only show up after truncation.
2103
2104 This works for now, but will need improvement as part of the FSA
2105 optimization.
2106
2107 Note since we've moved the thread request data to the edges,
2108 we have to iterate on those rather than the threaded_edges vector. */
2109 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
2110 {
f5a6b05f 2111 bb = BASIC_BLOCK_FOR_FN (cfun, i);
af6b6631 2112 FOR_EACH_EDGE (e, ei, bb->preds)
2113 {
2114 if (e->aux)
2115 {
2116 vec<jump_thread_edge *> *path = THREAD_PATH (e);
2117 bool have_joiner = ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK);
2118
2119 if (have_joiner)
2120 {
2121 basic_block joiner = e->dest;
2122 edge final_edge = path->last ()->e;
2123 basic_block final_dest = final_edge->dest;
2124 edge e2 = find_edge (joiner, final_dest);
2125
2126 if (e2 && !phi_args_equal_on_edges (e2, final_edge))
2127 {
2128 delete_jump_thread_path (path);
2129 e->aux = NULL;
2130 }
2131 }
2132 }
2133 }
2134 }
2135
0afc9b47 2136 /* Look for jump threading paths which cross multiple loop headers.
2137
2138 The code to thread through loop headers will change the CFG in ways
2139 that invalidate the cached loop iteration information. So we must
2140 detect that case and wipe the cached information. */
2141 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
2142 {
2143 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
2144 FOR_EACH_EDGE (e, ei, bb->preds)
2145 {
2146 if (e->aux)
2147 {
2148 vec<jump_thread_edge *> *path = THREAD_PATH (e);
2149
2150 for (unsigned int i = 0, crossed_headers = 0;
2151 i < path->length ();
2152 i++)
2153 {
2154 basic_block dest = (*path)[i]->e->dest;
2155 basic_block src = (*path)[i]->e->src;
2156 /* If we enter a loop. */
2157 if (flow_loop_nested_p (src->loop_father, dest->loop_father))
2158 ++crossed_headers;
2159 /* If we step from a block outside an irreducible region
2160 to a block inside an irreducible region, then we have
2161 crossed into a loop. */
2162 else if (! (src->flags & BB_IRREDUCIBLE_LOOP)
2163 && (dest->flags & BB_IRREDUCIBLE_LOOP))
2164 ++crossed_headers;
2165 if (crossed_headers > 1)
2166 {
2167 vect_free_loop_info_assumptions
2168 ((*path)[path->length () - 1]->e->dest->loop_father);
2169 break;
2170 }
2171 }
2172 }
2173 }
2174 }
3cebc9d2 2175}
2176
2177
9e0d85a7 2178/* Verify that the REGION is a valid jump thread. A jump thread is a special
2179 case of SEME Single Entry Multiple Exits region in which all nodes in the
2180 REGION have exactly one incoming edge. The only exception is the first block
2181 that may not have been connected to the rest of the cfg yet. */
ded1c768 2182
2183DEBUG_FUNCTION void
9e0d85a7 2184verify_jump_thread (basic_block *region, unsigned n_region)
ded1c768 2185{
ded1c768 2186 for (unsigned i = 0; i < n_region; i++)
9e0d85a7 2187 gcc_assert (EDGE_COUNT (region[i]->preds) <= 1);
2188}
ded1c768 2189
9e0d85a7 2190/* Return true when BB is one of the first N items in BBS. */
ded1c768 2191
9e0d85a7 2192static inline bool
2193bb_in_bbs (basic_block bb, basic_block *bbs, int n)
2194{
2195 for (int i = 0; i < n; i++)
2196 if (bb == bbs[i])
2197 return true;
ded1c768 2198
9e0d85a7 2199 return false;
ded1c768 2200}
2201
9e0d85a7 2202/* Duplicates a jump-thread path of N_REGION basic blocks.
2203 The ENTRY edge is redirected to the duplicate of the region.
ded1c768 2204
2205 Remove the last conditional statement in the last basic block in the REGION,
2206 and create a single fallthru edge pointing to the same destination as the
2207 EXIT edge.
2208
2209 The new basic blocks are stored to REGION_COPY in the same order as they had
2210 in REGION, provided that REGION_COPY is not NULL.
2211
2212 Returns false if it is unable to copy the region, true otherwise. */
2213
2214static bool
9e0d85a7 2215duplicate_thread_path (edge entry, edge exit,
ded1c768 2216 basic_block *region, unsigned n_region,
2217 basic_block *region_copy)
2218{
2219 unsigned i;
af5f6a93 2220 bool free_region_copy = false;
ded1c768 2221 struct loop *loop = entry->dest->loop_father;
2222 edge exit_copy;
2223 edge redirected;
acb9fddd 2224 int curr_freq;
db9cef39 2225 profile_count curr_count;
ded1c768 2226
2227 if (!can_copy_bbs_p (region, n_region))
2228 return false;
2229
2230 /* Some sanity checking. Note that we do not check for all possible
2231 missuses of the functions. I.e. if you ask to copy something weird,
2232 it will work, but the state of structures probably will not be
2233 correct. */
2234 for (i = 0; i < n_region; i++)
2235 {
2236 /* We do not handle subloops, i.e. all the blocks must belong to the
2237 same loop. */
2238 if (region[i]->loop_father != loop)
2239 return false;
2240 }
2241
2242 initialize_original_copy_tables ();
2243
af5f6a93 2244 set_loop_copy (loop, loop);
ded1c768 2245
2246 if (!region_copy)
2247 {
2248 region_copy = XNEWVEC (basic_block, n_region);
2249 free_region_copy = true;
2250 }
2251
ded1c768 2252 copy_bbs (region, n_region, region_copy, &exit, 1, &exit_copy, loop,
9e0d85a7 2253 split_edge_bb_loc (entry), false);
2254
2255 /* Fix up: copy_bbs redirects all edges pointing to copied blocks. The
2256 following code ensures that all the edges exiting the jump-thread path are
2257 redirected back to the original code: these edges are exceptions
2258 invalidating the property that is propagated by executing all the blocks of
2259 the jump-thread path in order. */
2260
acb9fddd 2261 curr_count = entry->count;
2262 curr_freq = EDGE_FREQUENCY (entry);
2263
9e0d85a7 2264 for (i = 0; i < n_region; i++)
2265 {
2266 edge e;
2267 edge_iterator ei;
2268 basic_block bb = region_copy[i];
2269
acb9fddd 2270 /* Watch inconsistent profile. */
2271 if (curr_count > region[i]->count)
2272 curr_count = region[i]->count;
2273 if (curr_freq > region[i]->frequency)
2274 curr_freq = region[i]->frequency;
2275 /* Scale current BB. */
db9cef39 2276 if (region[i]->count > 0 && curr_count.initialized_p ())
acb9fddd 2277 {
2278 /* In the middle of the path we only scale the frequencies.
2279 In last BB we need to update probabilities of outgoing edges
2280 because we know which one is taken at the threaded path. */
2281 if (i + 1 != n_region)
db9cef39 2282 scale_bbs_frequencies_profile_count (region + i, 1,
2283 region[i]->count - curr_count,
2284 region[i]->count);
acb9fddd 2285 else
2286 update_bb_profile_for_threading (region[i],
2287 curr_freq, curr_count,
2288 exit);
db9cef39 2289 scale_bbs_frequencies_profile_count (region_copy + i, 1, curr_count,
2290 region_copy[i]->count);
acb9fddd 2291 }
2292 else if (region[i]->frequency)
2293 {
2294 if (i + 1 != n_region)
2295 scale_bbs_frequencies_int (region + i, 1,
2296 region[i]->frequency - curr_freq,
2297 region[i]->frequency);
2298 else
2299 update_bb_profile_for_threading (region[i],
2300 curr_freq, curr_count,
2301 exit);
2302 scale_bbs_frequencies_int (region_copy + i, 1, curr_freq,
2303 region_copy[i]->frequency);
2304 }
2305
9e0d85a7 2306 if (single_succ_p (bb))
2307 {
2308 /* Make sure the successor is the next node in the path. */
2309 gcc_assert (i + 1 == n_region
2310 || region_copy[i + 1] == single_succ_edge (bb)->dest);
acb9fddd 2311 if (i + 1 != n_region)
2312 {
2313 curr_freq = EDGE_FREQUENCY (single_succ_edge (bb));
2314 curr_count = single_succ_edge (bb)->count;
2315 }
9e0d85a7 2316 continue;
2317 }
2318
2319 /* Special case the last block on the path: make sure that it does not
5a653985 2320 jump back on the copied path, including back to itself. */
9e0d85a7 2321 if (i + 1 == n_region)
2322 {
2323 FOR_EACH_EDGE (e, ei, bb->succs)
5a653985 2324 if (bb_in_bbs (e->dest, region_copy, n_region))
9e0d85a7 2325 {
2326 basic_block orig = get_bb_original (e->dest);
2327 if (orig)
2328 redirect_edge_and_branch_force (e, orig);
2329 }
2330 continue;
2331 }
2332
2333 /* Redirect all other edges jumping to non-adjacent blocks back to the
2334 original code. */
2335 FOR_EACH_EDGE (e, ei, bb->succs)
2336 if (region_copy[i + 1] != e->dest)
2337 {
2338 basic_block orig = get_bb_original (e->dest);
2339 if (orig)
2340 redirect_edge_and_branch_force (e, orig);
2341 }
acb9fddd 2342 else
2343 {
2344 curr_freq = EDGE_FREQUENCY (e);
2345 curr_count = e->count;
2346 }
9e0d85a7 2347 }
2348
ded1c768 2349
382ecba7 2350 if (flag_checking)
2351 verify_jump_thread (region_copy, n_region);
ded1c768 2352
2353 /* Remove the last branch in the jump thread path. */
2354 remove_ctrl_stmt_and_useless_edges (region_copy[n_region - 1], exit->dest);
7729459f 2355
2356 /* And fixup the flags on the single remaining edge. */
2357 edge fix_e = find_edge (region_copy[n_region - 1], exit->dest);
2358 fix_e->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE | EDGE_ABNORMAL);
2359 fix_e->flags |= EDGE_FALLTHRU;
2360
ded1c768 2361 edge e = make_edge (region_copy[n_region - 1], exit->dest, EDGE_FALLTHRU);
2362
acb9fddd 2363 if (e)
2364 {
2365 rescan_loop_exit (e, true, false);
720cfc43 2366 e->probability = profile_probability::always ();
acb9fddd 2367 e->count = region_copy[n_region - 1]->count;
2368 }
ded1c768 2369
2370 /* Redirect the entry and add the phi node arguments. */
af5f6a93 2371 if (entry->dest == loop->header)
2372 mark_loop_for_removal (loop);
ded1c768 2373 redirected = redirect_edge_and_branch (entry, get_bb_copy (entry->dest));
2374 gcc_assert (redirected != NULL);
2375 flush_pending_stmts (entry);
2376
2377 /* Add the other PHI node arguments. */
2378 add_phi_args_after_copy (region_copy, n_region, NULL);
2379
2380 if (free_region_copy)
2381 free (region_copy);
2382
2383 free_original_copy_tables ();
2384 return true;
2385}
2386
b9903eb3 2387/* Return true when PATH is a valid jump-thread path. */
2388
2389static bool
2390valid_jump_thread_path (vec<jump_thread_edge *> *path)
2391{
2392 unsigned len = path->length ();
2393
105cb8d7 2394 /* Check that the path is connected. */
b9903eb3 2395 for (unsigned int j = 0; j < len - 1; j++)
8587f7ea 2396 {
f497d674 2397 edge e = (*path)[j]->e;
f497d674 2398 if (e->dest != (*path)[j+1]->e->src)
31e5d72d 2399 return false;
8587f7ea 2400 }
b9903eb3 2401 return true;
2402}
2403
a27d141e 2404/* Remove any queued jump threads that include edge E.
2405
2406 We don't actually remove them here, just record the edges into ax
2407 hash table. That way we can do the search once per iteration of
2408 DOM/VRP rather than for every case where DOM optimizes away a COND_EXPR. */
f1344f45 2409
2410void
a27d141e 2411remove_jump_threads_including (edge_def *e)
f1344f45 2412{
2413 if (!paths.exists ())
2414 return;
2415
a27d141e 2416 if (!removed_edges)
2417 removed_edges = new hash_table<struct removed_edges> (17);
f1344f45 2418
a27d141e 2419 edge *slot = removed_edges->find_slot (e, INSERT);
2420 *slot = e;
f1344f45 2421}
2422
3cebc9d2 2423/* Walk through all blocks and thread incoming edges to the appropriate
2424 outgoing edge for each edge pair recorded in THREADED_EDGES.
a8046f60 2425
2426 It is the caller's responsibility to fix the dominance information
2427 and rewrite duplicated SSA_NAMEs back into SSA form.
2428
7e0311ae 2429 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading edges through
2430 loop headers if it does not simplify the loop.
2431
dac49aa5 2432 Returns true if one or more edges were threaded, false otherwise. */
a8046f60 2433
2434bool
7e0311ae 2435thread_through_all_blocks (bool may_peel_loop_headers)
a8046f60 2436{
a8046f60 2437 bool retval = false;
7ea47fbd 2438 unsigned int i;
2439 bitmap_iterator bi;
7e0311ae 2440 struct loop *loop;
035def86 2441 auto_bitmap threaded_blocks;
3cebc9d2 2442
f2981b08 2443 if (!paths.exists ())
a27d141e 2444 {
2445 retval = false;
2446 goto out;
2447 }
a8046f60 2448
5236b8bb 2449 memset (&thread_stats, 0, sizeof (thread_stats));
388d1fc1 2450
a27d141e 2451 /* Remove any paths that referenced removed edges. */
2452 if (removed_edges)
2453 for (i = 0; i < paths.length (); )
2454 {
2455 unsigned int j;
2456 vec<jump_thread_edge *> *path = paths[i];
2457
2458 for (j = 0; j < path->length (); j++)
2459 {
2460 edge e = (*path)[j]->e;
2461 if (removed_edges->find_slot (e, NO_INSERT))
2462 break;
2463 }
2464
2465 if (j != path->length ())
2466 {
2467 delete_jump_thread_path (path);
2468 paths.unordered_remove (i);
2469 continue;
2470 }
2471 i++;
2472 }
2473
ded1c768 2474 /* Jump-thread all FSM threads before other jump-threads. */
2475 for (i = 0; i < paths.length ();)
2476 {
2477 vec<jump_thread_edge *> *path = paths[i];
2478 edge entry = (*path)[0]->e;
2479
b9903eb3 2480 /* Only code-generate FSM jump-threads in this loop. */
2481 if ((*path)[0]->type != EDGE_FSM_THREAD)
2482 {
2483 i++;
2484 continue;
2485 }
2486
2487 /* Do not jump-thread twice from the same block. */
2488 if (bitmap_bit_p (threaded_blocks, entry->src->index)
545d4a44 2489 /* We may not want to realize this jump thread path
2490 for various reasons. So check it first. */
b9903eb3 2491 || !valid_jump_thread_path (path))
2492 {
2493 /* Remove invalid FSM jump-thread paths. */
2494 delete_jump_thread_path (path);
2495 paths.unordered_remove (i);
2496 continue;
2497 }
ded1c768 2498
2499 unsigned len = path->length ();
2500 edge exit = (*path)[len - 1]->e;
2501 basic_block *region = XNEWVEC (basic_block, len - 1);
2502
2503 for (unsigned int j = 0; j < len - 1; j++)
2504 region[j] = (*path)[j]->e->dest;
2505
9e0d85a7 2506 if (duplicate_thread_path (entry, exit, region, len - 1, NULL))
ded1c768 2507 {
2508 /* We do not update dominance info. */
2509 free_dominance_info (CDI_DOMINATORS);
2510 bitmap_set_bit (threaded_blocks, entry->src->index);
2511 retval = true;
7998c0b5 2512 thread_stats.num_threaded_edges++;
ded1c768 2513 }
2514
2515 delete_jump_thread_path (path);
2516 paths.unordered_remove (i);
b7cbf36d 2517 free (region);
ded1c768 2518 }
2519
2520 /* Remove from PATHS all the jump-threads starting with an edge already
2521 jump-threaded. */
2522 for (i = 0; i < paths.length ();)
2523 {
2524 vec<jump_thread_edge *> *path = paths[i];
2525 edge entry = (*path)[0]->e;
2526
2527 /* Do not jump-thread twice from the same block. */
2528 if (bitmap_bit_p (threaded_blocks, entry->src->index))
2529 {
2530 delete_jump_thread_path (path);
2531 paths.unordered_remove (i);
2532 }
2533 else
2534 i++;
2535 }
2536
2537 bitmap_clear (threaded_blocks);
2538
3cebc9d2 2539 mark_threaded_blocks (threaded_blocks);
2540
96c90e5e 2541 initialize_original_copy_tables ();
7e0311ae 2542
2543 /* First perform the threading requests that do not affect
2544 loop structure. */
7ea47fbd 2545 EXECUTE_IF_SET_IN_BITMAP (threaded_blocks, 0, i, bi)
a8046f60 2546 {
f5a6b05f 2547 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
7ea47fbd 2548
2549 if (EDGE_COUNT (bb->preds) > 0)
7e0311ae 2550 retval |= thread_block (bb, true);
2551 }
2552
2553 /* Then perform the threading through loop headers. We start with the
2554 innermost loop, so that the changes in cfg we perform won't affect
2555 further threading. */
f21d4d00 2556 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
7e0311ae 2557 {
7a3bf727 2558 if (!loop->header
2559 || !bitmap_bit_p (threaded_blocks, loop->header->index))
2560 continue;
7e0311ae 2561
7a3bf727 2562 retval |= thread_through_loop_header (loop, may_peel_loop_headers);
a8046f60 2563 }
388d1fc1 2564
c3dbcc09 2565 /* All jump threading paths should have been resolved at this
2566 point. Verify that is the case. */
ed4feca1 2567 basic_block bb;
fc00614f 2568 FOR_EACH_BB_FN (bb, cfun)
ed4feca1 2569 {
c3dbcc09 2570 edge_iterator ei;
2571 edge e;
2572 FOR_EACH_EDGE (e, ei, bb->preds)
2573 gcc_assert (e->aux == NULL);
ed4feca1 2574 }
2575
581f8050 2576 statistics_counter_event (cfun, "Jumps threaded",
2577 thread_stats.num_threaded_edges);
5236b8bb 2578
96c90e5e 2579 free_original_copy_tables ();
2580
f2981b08 2581 paths.release ();
7e0311ae 2582
396c773e 2583 if (retval)
f24ec26f 2584 loops_state_set (LOOPS_NEED_FIXUP);
eb2a640e 2585
a27d141e 2586 out:
2587 delete removed_edges;
2588 removed_edges = NULL;
a8046f60 2589 return retval;
2590}
3cebc9d2 2591
6d1fdbf9 2592/* Delete the jump threading path PATH. We have to explcitly delete
2593 each entry in the vector, then the container. */
2594
2595void
2596delete_jump_thread_path (vec<jump_thread_edge *> *path)
2597{
2598 for (unsigned int i = 0; i < path->length (); i++)
2599 delete (*path)[i];
2600 path->release();
9b5a88db 2601 delete path;
6d1fdbf9 2602}
2603
3cebc9d2 2604/* Register a jump threading opportunity. We queue up all the jump
2605 threading opportunities discovered by a pass and update the CFG
2606 and SSA form all at once.
2607
f0b5f617 2608 E is the edge we can thread, E2 is the new target edge, i.e., we
3cebc9d2 2609 are effectively recording that E->dest can be changed to E2->dest
2610 after fixing the SSA graph. */
2611
2612void
f2981b08 2613register_jump_thread (vec<jump_thread_edge *> *path)
3cebc9d2 2614{
a3724f9d 2615 if (!dbg_cnt (registered_jump_thread))
2616 {
6d1fdbf9 2617 delete_jump_thread_path (path);
a3724f9d 2618 return;
2619 }
2620
0c5b289a 2621 /* First make sure there are no NULL outgoing edges on the jump threading
2622 path. That can happen for jumping to a constant address. */
f2981b08 2623 for (unsigned int i = 0; i < path->length (); i++)
d29932c9 2624 {
2625 if ((*path)[i]->e == NULL)
31e5d72d 2626 {
d29932c9 2627 if (dump_file && (dump_flags & TDF_DETAILS))
2628 {
2629 fprintf (dump_file,
2630 "Found NULL edge in jump threading path. Cancelling jump thread:\n");
2631 dump_jump_thread_path (dump_file, *path, false);
2632 }
f2981b08 2633
d29932c9 2634 delete_jump_thread_path (path);
2635 return;
31e5d72d 2636 }
d29932c9 2637
2638 /* Only the FSM threader is allowed to thread across
2639 backedges in the CFG. */
2640 if (flag_checking
2641 && (*path)[0]->type != EDGE_FSM_THREAD)
2642 gcc_assert (((*path)[i]->e->flags & EDGE_DFS_BACK) == 0);
2643 }
5411af4e 2644
631d940c 2645 if (dump_file && (dump_flags & TDF_DETAILS))
b93ba654 2646 dump_jump_thread_path (dump_file, *path, true);
631d940c 2647
f2981b08 2648 if (!paths.exists ())
2649 paths.create (5);
631d940c 2650
f2981b08 2651 paths.safe_push (path);
3cebc9d2 2652}