]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-threadedge.c
switch from gimple to gimple*
[thirdparty/gcc.git] / gcc / tree-ssa-threadedge.c
1 /* SSA Jump Threading
2 Copyright (C) 2005-2015 Free Software Foundation, Inc.
3 Contributed by Jeff Law <law@redhat.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "predict.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "hard-reg-set.h"
29 #include "ssa.h"
30 #include "alias.h"
31 #include "fold-const.h"
32 #include "flags.h"
33 #include "tm_p.h"
34 #include "cfgloop.h"
35 #include "timevar.h"
36 #include "dumpfile.h"
37 #include "internal-fn.h"
38 #include "gimple-iterator.h"
39 #include "tree-cfg.h"
40 #include "tree-ssa-propagate.h"
41 #include "tree-ssa-threadupdate.h"
42 #include "langhooks.h"
43 #include "params.h"
44 #include "tree-ssa-scopedtables.h"
45 #include "tree-ssa-threadedge.h"
46 #include "tree-ssa-loop.h"
47 #include "builtins.h"
48 #include "cfganal.h"
49
50 /* To avoid code explosion due to jump threading, we limit the
51 number of statements we are going to copy. This variable
52 holds the number of statements currently seen that we'll have
53 to copy as part of the jump threading process. */
54 static int stmt_count;
55
56 /* Array to record value-handles per SSA_NAME. */
57 vec<tree> ssa_name_values;
58
59 typedef tree (pfn_simplify) (gimple *, gimple *, class avail_exprs_stack *);
60
61 /* Set the value for the SSA name NAME to VALUE. */
62
63 void
64 set_ssa_name_value (tree name, tree value)
65 {
66 if (SSA_NAME_VERSION (name) >= ssa_name_values.length ())
67 ssa_name_values.safe_grow_cleared (SSA_NAME_VERSION (name) + 1);
68 if (value && TREE_OVERFLOW_P (value))
69 value = drop_tree_overflow (value);
70 ssa_name_values[SSA_NAME_VERSION (name)] = value;
71 }
72
73 /* Initialize the per SSA_NAME value-handles array. Returns it. */
74 void
75 threadedge_initialize_values (void)
76 {
77 gcc_assert (!ssa_name_values.exists ());
78 ssa_name_values.create (num_ssa_names);
79 }
80
81 /* Free the per SSA_NAME value-handle array. */
82 void
83 threadedge_finalize_values (void)
84 {
85 ssa_name_values.release ();
86 }
87
88 /* Return TRUE if we may be able to thread an incoming edge into
89 BB to an outgoing edge from BB. Return FALSE otherwise. */
90
91 bool
92 potentially_threadable_block (basic_block bb)
93 {
94 gimple_stmt_iterator gsi;
95
96 /* Special case. We can get blocks that are forwarders, but are
97 not optimized away because they forward from outside a loop
98 to the loop header. We want to thread through them as we can
99 sometimes thread to the loop exit, which is obviously profitable.
100 the interesting case here is when the block has PHIs. */
101 if (gsi_end_p (gsi_start_nondebug_bb (bb))
102 && !gsi_end_p (gsi_start_phis (bb)))
103 return true;
104
105 /* If BB has a single successor or a single predecessor, then
106 there is no threading opportunity. */
107 if (single_succ_p (bb) || single_pred_p (bb))
108 return false;
109
110 /* If BB does not end with a conditional, switch or computed goto,
111 then there is no threading opportunity. */
112 gsi = gsi_last_bb (bb);
113 if (gsi_end_p (gsi)
114 || ! gsi_stmt (gsi)
115 || (gimple_code (gsi_stmt (gsi)) != GIMPLE_COND
116 && gimple_code (gsi_stmt (gsi)) != GIMPLE_GOTO
117 && gimple_code (gsi_stmt (gsi)) != GIMPLE_SWITCH))
118 return false;
119
120 return true;
121 }
122
123 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
124 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
125 BB. If no such ASSERT_EXPR is found, return OP. */
126
127 static tree
128 lhs_of_dominating_assert (tree op, basic_block bb, gimple *stmt)
129 {
130 imm_use_iterator imm_iter;
131 gimple *use_stmt;
132 use_operand_p use_p;
133
134 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
135 {
136 use_stmt = USE_STMT (use_p);
137 if (use_stmt != stmt
138 && gimple_assign_single_p (use_stmt)
139 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
140 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
141 && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
142 {
143 return gimple_assign_lhs (use_stmt);
144 }
145 }
146 return op;
147 }
148
149 /* Record temporary equivalences created by PHIs at the target of the
150 edge E. Record unwind information for the equivalences onto STACK.
151
152 If a PHI which prevents threading is encountered, then return FALSE
153 indicating we should not thread this edge, else return TRUE.
154
155 If SRC_MAP/DST_MAP exist, then mark the source and destination SSA_NAMEs
156 of any equivalences recorded. We use this to make invalidation after
157 traversing back edges less painful. */
158
159 static bool
160 record_temporary_equivalences_from_phis (edge e, const_and_copies *const_and_copies)
161 {
162 gphi_iterator gsi;
163
164 /* Each PHI creates a temporary equivalence, record them.
165 These are context sensitive equivalences and will be removed
166 later. */
167 for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
168 {
169 gphi *phi = gsi.phi ();
170 tree src = PHI_ARG_DEF_FROM_EDGE (phi, e);
171 tree dst = gimple_phi_result (phi);
172
173 /* If the desired argument is not the same as this PHI's result
174 and it is set by a PHI in E->dest, then we can not thread
175 through E->dest. */
176 if (src != dst
177 && TREE_CODE (src) == SSA_NAME
178 && gimple_code (SSA_NAME_DEF_STMT (src)) == GIMPLE_PHI
179 && gimple_bb (SSA_NAME_DEF_STMT (src)) == e->dest)
180 return false;
181
182 /* We consider any non-virtual PHI as a statement since it
183 count result in a constant assignment or copy operation. */
184 if (!virtual_operand_p (dst))
185 stmt_count++;
186
187 const_and_copies->record_const_or_copy (dst, src);
188 }
189 return true;
190 }
191
192 /* Fold the RHS of an assignment statement and return it as a tree.
193 May return NULL_TREE if no simplification is possible. */
194
195 static tree
196 fold_assignment_stmt (gimple *stmt)
197 {
198 enum tree_code subcode = gimple_assign_rhs_code (stmt);
199
200 switch (get_gimple_rhs_class (subcode))
201 {
202 case GIMPLE_SINGLE_RHS:
203 return fold (gimple_assign_rhs1 (stmt));
204
205 case GIMPLE_UNARY_RHS:
206 {
207 tree lhs = gimple_assign_lhs (stmt);
208 tree op0 = gimple_assign_rhs1 (stmt);
209 return fold_unary (subcode, TREE_TYPE (lhs), op0);
210 }
211
212 case GIMPLE_BINARY_RHS:
213 {
214 tree lhs = gimple_assign_lhs (stmt);
215 tree op0 = gimple_assign_rhs1 (stmt);
216 tree op1 = gimple_assign_rhs2 (stmt);
217 return fold_binary (subcode, TREE_TYPE (lhs), op0, op1);
218 }
219
220 case GIMPLE_TERNARY_RHS:
221 {
222 tree lhs = gimple_assign_lhs (stmt);
223 tree op0 = gimple_assign_rhs1 (stmt);
224 tree op1 = gimple_assign_rhs2 (stmt);
225 tree op2 = gimple_assign_rhs3 (stmt);
226
227 /* Sadly, we have to handle conditional assignments specially
228 here, because fold expects all the operands of an expression
229 to be folded before the expression itself is folded, but we
230 can't just substitute the folded condition here. */
231 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
232 op0 = fold (op0);
233
234 return fold_ternary (subcode, TREE_TYPE (lhs), op0, op1, op2);
235 }
236
237 default:
238 gcc_unreachable ();
239 }
240 }
241
242 /* Try to simplify each statement in E->dest, ultimately leading to
243 a simplification of the COND_EXPR at the end of E->dest.
244
245 Record unwind information for temporary equivalences onto STACK.
246
247 Use SIMPLIFY (a pointer to a callback function) to further simplify
248 statements using pass specific information.
249
250 We might consider marking just those statements which ultimately
251 feed the COND_EXPR. It's not clear if the overhead of bookkeeping
252 would be recovered by trying to simplify fewer statements.
253
254 If we are able to simplify a statement into the form
255 SSA_NAME = (SSA_NAME | gimple invariant), then we can record
256 a context sensitive equivalence which may help us simplify
257 later statements in E->dest. */
258
259 static gimple *
260 record_temporary_equivalences_from_stmts_at_dest (edge e,
261 const_and_copies *const_and_copies,
262 avail_exprs_stack *avail_exprs_stack,
263 pfn_simplify simplify,
264 bool backedge_seen)
265 {
266 gimple *stmt = NULL;
267 gimple_stmt_iterator gsi;
268 int max_stmt_count;
269
270 max_stmt_count = PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS);
271
272 /* Walk through each statement in the block recording equivalences
273 we discover. Note any equivalences we discover are context
274 sensitive (ie, are dependent on traversing E) and must be unwound
275 when we're finished processing E. */
276 for (gsi = gsi_start_bb (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
277 {
278 tree cached_lhs = NULL;
279
280 stmt = gsi_stmt (gsi);
281
282 /* Ignore empty statements and labels. */
283 if (gimple_code (stmt) == GIMPLE_NOP
284 || gimple_code (stmt) == GIMPLE_LABEL
285 || is_gimple_debug (stmt))
286 continue;
287
288 /* If the statement has volatile operands, then we assume we
289 can not thread through this block. This is overly
290 conservative in some ways. */
291 if (gimple_code (stmt) == GIMPLE_ASM
292 && gimple_asm_volatile_p (as_a <gasm *> (stmt)))
293 return NULL;
294
295 /* If duplicating this block is going to cause too much code
296 expansion, then do not thread through this block. */
297 stmt_count++;
298 if (stmt_count > max_stmt_count)
299 return NULL;
300
301 /* If this is not a statement that sets an SSA_NAME to a new
302 value, then do not try to simplify this statement as it will
303 not simplify in any way that is helpful for jump threading. */
304 if ((gimple_code (stmt) != GIMPLE_ASSIGN
305 || TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
306 && (gimple_code (stmt) != GIMPLE_CALL
307 || gimple_call_lhs (stmt) == NULL_TREE
308 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME))
309 {
310 /* STMT might still have DEFS and we need to invalidate any known
311 equivalences for them.
312
313 Consider if STMT is a GIMPLE_ASM with one or more outputs that
314 feeds a conditional inside a loop. We might derive an equivalence
315 due to the conditional. */
316 tree op;
317 ssa_op_iter iter;
318
319 if (backedge_seen)
320 FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_DEF)
321 const_and_copies->invalidate (op);
322
323 continue;
324 }
325
326 /* The result of __builtin_object_size depends on all the arguments
327 of a phi node. Temporarily using only one edge produces invalid
328 results. For example
329
330 if (x < 6)
331 goto l;
332 else
333 goto l;
334
335 l:
336 r = PHI <&w[2].a[1](2), &a.a[6](3)>
337 __builtin_object_size (r, 0)
338
339 The result of __builtin_object_size is defined to be the maximum of
340 remaining bytes. If we use only one edge on the phi, the result will
341 change to be the remaining bytes for the corresponding phi argument.
342
343 Similarly for __builtin_constant_p:
344
345 r = PHI <1(2), 2(3)>
346 __builtin_constant_p (r)
347
348 Both PHI arguments are constant, but x ? 1 : 2 is still not
349 constant. */
350
351 if (is_gimple_call (stmt))
352 {
353 tree fndecl = gimple_call_fndecl (stmt);
354 if (fndecl
355 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_OBJECT_SIZE
356 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P))
357 {
358 if (backedge_seen)
359 {
360 tree lhs = gimple_get_lhs (stmt);
361 const_and_copies->invalidate (lhs);
362 }
363 continue;
364 }
365 }
366
367 /* At this point we have a statement which assigns an RHS to an
368 SSA_VAR on the LHS. We want to try and simplify this statement
369 to expose more context sensitive equivalences which in turn may
370 allow us to simplify the condition at the end of the loop.
371
372 Handle simple copy operations as well as implied copies from
373 ASSERT_EXPRs. */
374 if (gimple_assign_single_p (stmt)
375 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
376 cached_lhs = gimple_assign_rhs1 (stmt);
377 else if (gimple_assign_single_p (stmt)
378 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
379 cached_lhs = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0);
380 else
381 {
382 /* A statement that is not a trivial copy or ASSERT_EXPR.
383 We're going to temporarily copy propagate the operands
384 and see if that allows us to simplify this statement. */
385 tree *copy;
386 ssa_op_iter iter;
387 use_operand_p use_p;
388 unsigned int num, i = 0;
389
390 num = NUM_SSA_OPERANDS (stmt, (SSA_OP_USE | SSA_OP_VUSE));
391 copy = XCNEWVEC (tree, num);
392
393 /* Make a copy of the uses & vuses into USES_COPY, then cprop into
394 the operands. */
395 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
396 {
397 tree tmp = NULL;
398 tree use = USE_FROM_PTR (use_p);
399
400 copy[i++] = use;
401 if (TREE_CODE (use) == SSA_NAME)
402 tmp = SSA_NAME_VALUE (use);
403 if (tmp)
404 SET_USE (use_p, tmp);
405 }
406
407 /* Try to fold/lookup the new expression. Inserting the
408 expression into the hash table is unlikely to help. */
409 if (is_gimple_call (stmt))
410 cached_lhs = fold_call_stmt (as_a <gcall *> (stmt), false);
411 else
412 cached_lhs = fold_assignment_stmt (stmt);
413
414 if (!cached_lhs
415 || (TREE_CODE (cached_lhs) != SSA_NAME
416 && !is_gimple_min_invariant (cached_lhs)))
417 cached_lhs = (*simplify) (stmt, stmt, avail_exprs_stack);
418
419 /* Restore the statement's original uses/defs. */
420 i = 0;
421 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
422 SET_USE (use_p, copy[i++]);
423
424 free (copy);
425 }
426
427 /* Record the context sensitive equivalence if we were able
428 to simplify this statement.
429
430 If we have traversed a backedge at some point during threading,
431 then always enter something here. Either a real equivalence,
432 or a NULL_TREE equivalence which is effectively invalidation of
433 prior equivalences. */
434 if (cached_lhs
435 && (TREE_CODE (cached_lhs) == SSA_NAME
436 || is_gimple_min_invariant (cached_lhs)))
437 const_and_copies->record_const_or_copy (gimple_get_lhs (stmt),
438 cached_lhs);
439 else if (backedge_seen)
440 const_and_copies->invalidate (gimple_get_lhs (stmt));
441 }
442 return stmt;
443 }
444
445 /* Once we have passed a backedge in the CFG when threading, we do not want to
446 utilize edge equivalences for simplification purpose. They are no longer
447 necessarily valid. We use this callback rather than the ones provided by
448 DOM/VRP to achieve that effect. */
449 static tree
450 dummy_simplify (gimple *stmt1 ATTRIBUTE_UNUSED, gimple *stmt2 ATTRIBUTE_UNUSED,
451 class avail_exprs_stack *avail_exprs_stack ATTRIBUTE_UNUSED)
452 {
453 return NULL_TREE;
454 }
455
456 /* Simplify the control statement at the end of the block E->dest.
457
458 To avoid allocating memory unnecessarily, a scratch GIMPLE_COND
459 is available to use/clobber in DUMMY_COND.
460
461 Use SIMPLIFY (a pointer to a callback function) to further simplify
462 a condition using pass specific information.
463
464 Return the simplified condition or NULL if simplification could
465 not be performed.
466
467 The available expression table is referenced via AVAIL_EXPRS_STACK. */
468
469 static tree
470 simplify_control_stmt_condition (edge e,
471 gimple *stmt,
472 class avail_exprs_stack *avail_exprs_stack,
473 gcond *dummy_cond,
474 pfn_simplify simplify,
475 bool handle_dominating_asserts)
476 {
477 tree cond, cached_lhs;
478 enum gimple_code code = gimple_code (stmt);
479
480 /* For comparisons, we have to update both operands, then try
481 to simplify the comparison. */
482 if (code == GIMPLE_COND)
483 {
484 tree op0, op1;
485 enum tree_code cond_code;
486
487 op0 = gimple_cond_lhs (stmt);
488 op1 = gimple_cond_rhs (stmt);
489 cond_code = gimple_cond_code (stmt);
490
491 /* Get the current value of both operands. */
492 if (TREE_CODE (op0) == SSA_NAME)
493 {
494 for (int i = 0; i < 2; i++)
495 {
496 if (TREE_CODE (op0) == SSA_NAME
497 && SSA_NAME_VALUE (op0))
498 op0 = SSA_NAME_VALUE (op0);
499 else
500 break;
501 }
502 }
503
504 if (TREE_CODE (op1) == SSA_NAME)
505 {
506 for (int i = 0; i < 2; i++)
507 {
508 if (TREE_CODE (op1) == SSA_NAME
509 && SSA_NAME_VALUE (op1))
510 op1 = SSA_NAME_VALUE (op1);
511 else
512 break;
513 }
514 }
515
516 if (handle_dominating_asserts)
517 {
518 /* Now see if the operand was consumed by an ASSERT_EXPR
519 which dominates E->src. If so, we want to replace the
520 operand with the LHS of the ASSERT_EXPR. */
521 if (TREE_CODE (op0) == SSA_NAME)
522 op0 = lhs_of_dominating_assert (op0, e->src, stmt);
523
524 if (TREE_CODE (op1) == SSA_NAME)
525 op1 = lhs_of_dominating_assert (op1, e->src, stmt);
526 }
527
528 /* We may need to canonicalize the comparison. For
529 example, op0 might be a constant while op1 is an
530 SSA_NAME. Failure to canonicalize will cause us to
531 miss threading opportunities. */
532 if (tree_swap_operands_p (op0, op1, false))
533 {
534 cond_code = swap_tree_comparison (cond_code);
535 std::swap (op0, op1);
536 }
537
538 /* Stuff the operator and operands into our dummy conditional
539 expression. */
540 gimple_cond_set_code (dummy_cond, cond_code);
541 gimple_cond_set_lhs (dummy_cond, op0);
542 gimple_cond_set_rhs (dummy_cond, op1);
543
544 /* We absolutely do not care about any type conversions
545 we only care about a zero/nonzero value. */
546 fold_defer_overflow_warnings ();
547
548 cached_lhs = fold_binary (cond_code, boolean_type_node, op0, op1);
549 if (cached_lhs)
550 while (CONVERT_EXPR_P (cached_lhs))
551 cached_lhs = TREE_OPERAND (cached_lhs, 0);
552
553 fold_undefer_overflow_warnings ((cached_lhs
554 && is_gimple_min_invariant (cached_lhs)),
555 stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
556
557 /* If we have not simplified the condition down to an invariant,
558 then use the pass specific callback to simplify the condition. */
559 if (!cached_lhs
560 || !is_gimple_min_invariant (cached_lhs))
561 cached_lhs = (*simplify) (dummy_cond, stmt, avail_exprs_stack);
562
563 /* If we were just testing that an integral type was != 0, and that
564 failed, just return the first operand. This gives the FSM code a
565 chance to optimize the path. */
566 if (cached_lhs == NULL
567 && cond_code == NE_EXPR)
568 {
569 /* Recover the original operands. They may have been simplified
570 using context sensitive equivalences. Those context sensitive
571 equivalences may not be valid on paths found by the FSM optimizer. */
572 tree op0 = gimple_cond_lhs (stmt);
573 tree op1 = gimple_cond_rhs (stmt);
574
575 if (INTEGRAL_TYPE_P (TREE_TYPE (op0))
576 && TREE_CODE (op0) == SSA_NAME
577 && integer_zerop (op1))
578 return op0;
579 }
580
581 return cached_lhs;
582 }
583
584 if (code == GIMPLE_SWITCH)
585 cond = gimple_switch_index (as_a <gswitch *> (stmt));
586 else if (code == GIMPLE_GOTO)
587 cond = gimple_goto_dest (stmt);
588 else
589 gcc_unreachable ();
590
591 /* We can have conditionals which just test the state of a variable
592 rather than use a relational operator. These are simpler to handle. */
593 if (TREE_CODE (cond) == SSA_NAME)
594 {
595 tree original_lhs = cond;
596 cached_lhs = cond;
597
598 /* Get the variable's current value from the equivalence chains.
599
600 It is possible to get loops in the SSA_NAME_VALUE chains
601 (consider threading the backedge of a loop where we have
602 a loop invariant SSA_NAME used in the condition. */
603 if (cached_lhs)
604 {
605 for (int i = 0; i < 2; i++)
606 {
607 if (TREE_CODE (cached_lhs) == SSA_NAME
608 && SSA_NAME_VALUE (cached_lhs))
609 cached_lhs = SSA_NAME_VALUE (cached_lhs);
610 else
611 break;
612 }
613 }
614
615 /* If we're dominated by a suitable ASSERT_EXPR, then
616 update CACHED_LHS appropriately. */
617 if (handle_dominating_asserts && TREE_CODE (cached_lhs) == SSA_NAME)
618 cached_lhs = lhs_of_dominating_assert (cached_lhs, e->src, stmt);
619
620 /* If we haven't simplified to an invariant yet, then use the
621 pass specific callback to try and simplify it further. */
622 if (cached_lhs && ! is_gimple_min_invariant (cached_lhs))
623 cached_lhs = (*simplify) (stmt, stmt, avail_exprs_stack);
624
625 /* We couldn't find an invariant. But, callers of this
626 function may be able to do something useful with the
627 unmodified destination. */
628 if (!cached_lhs)
629 cached_lhs = original_lhs;
630 }
631 else
632 cached_lhs = NULL;
633
634 return cached_lhs;
635 }
636
637 /* Copy debug stmts from DEST's chain of single predecessors up to
638 SRC, so that we don't lose the bindings as PHI nodes are introduced
639 when DEST gains new predecessors. */
640 void
641 propagate_threaded_block_debug_into (basic_block dest, basic_block src)
642 {
643 if (!MAY_HAVE_DEBUG_STMTS)
644 return;
645
646 if (!single_pred_p (dest))
647 return;
648
649 gcc_checking_assert (dest != src);
650
651 gimple_stmt_iterator gsi = gsi_after_labels (dest);
652 int i = 0;
653 const int alloc_count = 16; // ?? Should this be a PARAM?
654
655 /* Estimate the number of debug vars overridden in the beginning of
656 DEST, to tell how many we're going to need to begin with. */
657 for (gimple_stmt_iterator si = gsi;
658 i * 4 <= alloc_count * 3 && !gsi_end_p (si); gsi_next (&si))
659 {
660 gimple *stmt = gsi_stmt (si);
661 if (!is_gimple_debug (stmt))
662 break;
663 i++;
664 }
665
666 auto_vec<tree, alloc_count> fewvars;
667 hash_set<tree> *vars = NULL;
668
669 /* If we're already starting with 3/4 of alloc_count, go for a
670 hash_set, otherwise start with an unordered stack-allocated
671 VEC. */
672 if (i * 4 > alloc_count * 3)
673 vars = new hash_set<tree>;
674
675 /* Now go through the initial debug stmts in DEST again, this time
676 actually inserting in VARS or FEWVARS. Don't bother checking for
677 duplicates in FEWVARS. */
678 for (gimple_stmt_iterator si = gsi; !gsi_end_p (si); gsi_next (&si))
679 {
680 gimple *stmt = gsi_stmt (si);
681 if (!is_gimple_debug (stmt))
682 break;
683
684 tree var;
685
686 if (gimple_debug_bind_p (stmt))
687 var = gimple_debug_bind_get_var (stmt);
688 else if (gimple_debug_source_bind_p (stmt))
689 var = gimple_debug_source_bind_get_var (stmt);
690 else
691 gcc_unreachable ();
692
693 if (vars)
694 vars->add (var);
695 else
696 fewvars.quick_push (var);
697 }
698
699 basic_block bb = dest;
700
701 do
702 {
703 bb = single_pred (bb);
704 for (gimple_stmt_iterator si = gsi_last_bb (bb);
705 !gsi_end_p (si); gsi_prev (&si))
706 {
707 gimple *stmt = gsi_stmt (si);
708 if (!is_gimple_debug (stmt))
709 continue;
710
711 tree var;
712
713 if (gimple_debug_bind_p (stmt))
714 var = gimple_debug_bind_get_var (stmt);
715 else if (gimple_debug_source_bind_p (stmt))
716 var = gimple_debug_source_bind_get_var (stmt);
717 else
718 gcc_unreachable ();
719
720 /* Discard debug bind overlaps. ??? Unlike stmts from src,
721 copied into a new block that will precede BB, debug bind
722 stmts in bypassed BBs may actually be discarded if
723 they're overwritten by subsequent debug bind stmts, which
724 might be a problem once we introduce stmt frontier notes
725 or somesuch. Adding `&& bb == src' to the condition
726 below will preserve all potentially relevant debug
727 notes. */
728 if (vars && vars->add (var))
729 continue;
730 else if (!vars)
731 {
732 int i = fewvars.length ();
733 while (i--)
734 if (fewvars[i] == var)
735 break;
736 if (i >= 0)
737 continue;
738
739 if (fewvars.length () < (unsigned) alloc_count)
740 fewvars.quick_push (var);
741 else
742 {
743 vars = new hash_set<tree>;
744 for (i = 0; i < alloc_count; i++)
745 vars->add (fewvars[i]);
746 fewvars.release ();
747 vars->add (var);
748 }
749 }
750
751 stmt = gimple_copy (stmt);
752 /* ??? Should we drop the location of the copy to denote
753 they're artificial bindings? */
754 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
755 }
756 }
757 while (bb != src && single_pred_p (bb));
758
759 if (vars)
760 delete vars;
761 else if (fewvars.exists ())
762 fewvars.release ();
763 }
764
765 /* See if TAKEN_EDGE->dest is a threadable block with no side effecs (ie, it
766 need not be duplicated as part of the CFG/SSA updating process).
767
768 If it is threadable, add it to PATH and VISITED and recurse, ultimately
769 returning TRUE from the toplevel call. Otherwise do nothing and
770 return false.
771
772 DUMMY_COND, HANDLE_DOMINATING_ASSERTS and SIMPLIFY are used to
773 try and simplify the condition at the end of TAKEN_EDGE->dest.
774
775 The available expression table is referenced via AVAIL_EXPRS_STACK. */
776
777 static bool
778 thread_around_empty_blocks (edge taken_edge,
779 gcond *dummy_cond,
780 class avail_exprs_stack *avail_exprs_stack,
781 bool handle_dominating_asserts,
782 pfn_simplify simplify,
783 bitmap visited,
784 vec<jump_thread_edge *> *path,
785 bool *backedge_seen_p)
786 {
787 basic_block bb = taken_edge->dest;
788 gimple_stmt_iterator gsi;
789 gimple *stmt;
790 tree cond;
791
792 /* The key property of these blocks is that they need not be duplicated
793 when threading. Thus they can not have visible side effects such
794 as PHI nodes. */
795 if (!gsi_end_p (gsi_start_phis (bb)))
796 return false;
797
798 /* Skip over DEBUG statements at the start of the block. */
799 gsi = gsi_start_nondebug_bb (bb);
800
801 /* If the block has no statements, but does have a single successor, then
802 it's just a forwarding block and we can thread through it trivially.
803
804 However, note that just threading through empty blocks with single
805 successors is not inherently profitable. For the jump thread to
806 be profitable, we must avoid a runtime conditional.
807
808 By taking the return value from the recursive call, we get the
809 desired effect of returning TRUE when we found a profitable jump
810 threading opportunity and FALSE otherwise.
811
812 This is particularly important when this routine is called after
813 processing a joiner block. Returning TRUE too aggressively in
814 that case results in pointless duplication of the joiner block. */
815 if (gsi_end_p (gsi))
816 {
817 if (single_succ_p (bb))
818 {
819 taken_edge = single_succ_edge (bb);
820 if (!bitmap_bit_p (visited, taken_edge->dest->index))
821 {
822 jump_thread_edge *x
823 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
824 path->safe_push (x);
825 bitmap_set_bit (visited, taken_edge->dest->index);
826 *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
827 if (*backedge_seen_p)
828 simplify = dummy_simplify;
829 return thread_around_empty_blocks (taken_edge,
830 dummy_cond,
831 avail_exprs_stack,
832 handle_dominating_asserts,
833 simplify,
834 visited,
835 path,
836 backedge_seen_p);
837 }
838 }
839
840 /* We have a block with no statements, but multiple successors? */
841 return false;
842 }
843
844 /* The only real statements this block can have are a control
845 flow altering statement. Anything else stops the thread. */
846 stmt = gsi_stmt (gsi);
847 if (gimple_code (stmt) != GIMPLE_COND
848 && gimple_code (stmt) != GIMPLE_GOTO
849 && gimple_code (stmt) != GIMPLE_SWITCH)
850 return false;
851
852 /* If we have traversed a backedge, then we do not want to look
853 at certain expressions in the table that can not be relied upon.
854 Luckily the only code that looked at those expressions is the
855 SIMPLIFY callback, which we replace if we can no longer use it. */
856 if (*backedge_seen_p)
857 simplify = dummy_simplify;
858
859 /* Extract and simplify the condition. */
860 cond = simplify_control_stmt_condition (taken_edge, stmt,
861 avail_exprs_stack, dummy_cond,
862 simplify, handle_dominating_asserts);
863
864 /* If the condition can be statically computed and we have not already
865 visited the destination edge, then add the taken edge to our thread
866 path. */
867 if (cond && is_gimple_min_invariant (cond))
868 {
869 taken_edge = find_taken_edge (bb, cond);
870
871 if (bitmap_bit_p (visited, taken_edge->dest->index))
872 return false;
873 bitmap_set_bit (visited, taken_edge->dest->index);
874
875 jump_thread_edge *x
876 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
877 path->safe_push (x);
878 *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
879 if (*backedge_seen_p)
880 simplify = dummy_simplify;
881
882 thread_around_empty_blocks (taken_edge,
883 dummy_cond,
884 avail_exprs_stack,
885 handle_dominating_asserts,
886 simplify,
887 visited,
888 path,
889 backedge_seen_p);
890 return true;
891 }
892
893 return false;
894 }
895
896 /* Return true if the CFG contains at least one path from START_BB to END_BB.
897 When a path is found, record in PATH the blocks from END_BB to START_BB.
898 VISITED_BBS is used to make sure we don't fall into an infinite loop. Bound
899 the recursion to basic blocks belonging to LOOP. */
900
901 static bool
902 fsm_find_thread_path (basic_block start_bb, basic_block end_bb,
903 vec<basic_block, va_gc> *&path,
904 hash_set<basic_block> *visited_bbs, loop_p loop)
905 {
906 if (loop != start_bb->loop_father)
907 return false;
908
909 if (start_bb == end_bb)
910 {
911 vec_safe_push (path, start_bb);
912 return true;
913 }
914
915 if (!visited_bbs->add (start_bb))
916 {
917 edge e;
918 edge_iterator ei;
919 FOR_EACH_EDGE (e, ei, start_bb->succs)
920 if (fsm_find_thread_path (e->dest, end_bb, path, visited_bbs, loop))
921 {
922 vec_safe_push (path, start_bb);
923 return true;
924 }
925 }
926
927 return false;
928 }
929
930 static int max_threaded_paths;
931
932 /* We trace the value of the variable EXPR back through any phi nodes looking
933 for places where it gets a constant value and save the path. Stop after
934 having recorded MAX_PATHS jump threading paths. */
935
936 static void
937 fsm_find_control_statement_thread_paths (tree expr,
938 hash_set<basic_block> *visited_bbs,
939 vec<basic_block, va_gc> *&path,
940 bool seen_loop_phi)
941 {
942 tree var = SSA_NAME_VAR (expr);
943 gimple *def_stmt = SSA_NAME_DEF_STMT (expr);
944 basic_block var_bb = gimple_bb (def_stmt);
945
946 if (var == NULL || var_bb == NULL)
947 return;
948
949 /* For the moment we assume that an SSA chain only contains phi nodes, and
950 eventually one of the phi arguments will be an integer constant. In the
951 future, this could be extended to also handle simple assignments of
952 arithmetic operations. */
953 if (gimple_code (def_stmt) != GIMPLE_PHI)
954 return;
955
956 /* Avoid infinite recursion. */
957 if (visited_bbs->add (var_bb))
958 return;
959
960 gphi *phi = as_a <gphi *> (def_stmt);
961 int next_path_length = 0;
962 basic_block last_bb_in_path = path->last ();
963
964 if (loop_containing_stmt (phi)->header == gimple_bb (phi))
965 {
966 /* Do not walk through more than one loop PHI node. */
967 if (seen_loop_phi)
968 return;
969 seen_loop_phi = true;
970 }
971
972 /* Following the chain of SSA_NAME definitions, we jumped from a definition in
973 LAST_BB_IN_PATH to a definition in VAR_BB. When these basic blocks are
974 different, append to PATH the blocks from LAST_BB_IN_PATH to VAR_BB. */
975 if (var_bb != last_bb_in_path)
976 {
977 edge e;
978 int e_count = 0;
979 edge_iterator ei;
980 vec<basic_block, va_gc> *next_path;
981 vec_alloc (next_path, n_basic_blocks_for_fn (cfun));
982
983 FOR_EACH_EDGE (e, ei, last_bb_in_path->preds)
984 {
985 hash_set<basic_block> *visited_bbs = new hash_set<basic_block>;
986
987 if (fsm_find_thread_path (var_bb, e->src, next_path, visited_bbs,
988 e->src->loop_father))
989 ++e_count;
990
991 delete visited_bbs;
992
993 /* If there is more than one path, stop. */
994 if (e_count > 1)
995 {
996 vec_free (next_path);
997 return;
998 }
999 }
1000
1001 /* Stop if we have not found a path: this could occur when the recursion
1002 is stopped by one of the bounds. */
1003 if (e_count == 0)
1004 {
1005 vec_free (next_path);
1006 return;
1007 }
1008
1009 /* Make sure we haven't already visited any of the nodes in
1010 NEXT_PATH. Don't add them here to avoid pollution. */
1011 for (unsigned int i = 0; i < next_path->length () - 1; i++)
1012 {
1013 if (visited_bbs->contains ((*next_path)[i]))
1014 {
1015 vec_free (next_path);
1016 return;
1017 }
1018 }
1019
1020 /* Now add the nodes to VISISTED_BBS. */
1021 for (unsigned int i = 0; i < next_path->length () - 1; i++)
1022 visited_bbs->add ((*next_path)[i]);
1023
1024 /* Append all the nodes from NEXT_PATH to PATH. */
1025 vec_safe_splice (path, next_path);
1026 next_path_length = next_path->length ();
1027 vec_free (next_path);
1028 }
1029
1030 gcc_assert (path->last () == var_bb);
1031
1032 /* Iterate over the arguments of PHI. */
1033 unsigned int i;
1034 for (i = 0; i < gimple_phi_num_args (phi); i++)
1035 {
1036 tree arg = gimple_phi_arg_def (phi, i);
1037 basic_block bbi = gimple_phi_arg_edge (phi, i)->src;
1038
1039 /* Skip edges pointing outside the current loop. */
1040 if (!arg || var_bb->loop_father != bbi->loop_father)
1041 continue;
1042
1043 if (TREE_CODE (arg) == SSA_NAME)
1044 {
1045 vec_safe_push (path, bbi);
1046 /* Recursively follow SSA_NAMEs looking for a constant definition. */
1047 fsm_find_control_statement_thread_paths (arg, visited_bbs, path,
1048 seen_loop_phi);
1049
1050 path->pop ();
1051 continue;
1052 }
1053
1054 if (TREE_CODE (arg) != INTEGER_CST)
1055 continue;
1056
1057 int path_length = path->length ();
1058 /* A path with less than 2 basic blocks should not be jump-threaded. */
1059 if (path_length < 2)
1060 continue;
1061
1062 if (path_length > PARAM_VALUE (PARAM_MAX_FSM_THREAD_LENGTH))
1063 {
1064 if (dump_file && (dump_flags & TDF_DETAILS))
1065 fprintf (dump_file, "FSM jump-thread path not considered: "
1066 "the number of basic blocks on the path "
1067 "exceeds PARAM_MAX_FSM_THREAD_LENGTH.\n");
1068 continue;
1069 }
1070
1071 if (max_threaded_paths <= 0)
1072 {
1073 if (dump_file && (dump_flags & TDF_DETAILS))
1074 fprintf (dump_file, "FSM jump-thread path not considered: "
1075 "the number of previously recorded FSM paths to thread "
1076 "exceeds PARAM_MAX_FSM_THREAD_PATHS.\n");
1077 continue;
1078 }
1079
1080 /* Add BBI to the path. */
1081 vec_safe_push (path, bbi);
1082 ++path_length;
1083
1084 int n_insns = 0;
1085 gimple_stmt_iterator gsi;
1086 int j;
1087 loop_p loop = (*path)[0]->loop_father;
1088 bool path_crosses_loops = false;
1089
1090 /* Count the number of instructions on the path: as these instructions
1091 will have to be duplicated, we will not record the path if there are
1092 too many instructions on the path. Also check that all the blocks in
1093 the path belong to a single loop. */
1094 for (j = 1; j < path_length - 1; j++)
1095 {
1096 basic_block bb = (*path)[j];
1097
1098 if (bb->loop_father != loop)
1099 {
1100 path_crosses_loops = true;
1101 break;
1102 }
1103
1104 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1105 {
1106 gimple *stmt = gsi_stmt (gsi);
1107 /* Do not count empty statements and labels. */
1108 if (gimple_code (stmt) != GIMPLE_NOP
1109 && gimple_code (stmt) != GIMPLE_LABEL
1110 && !is_gimple_debug (stmt))
1111 ++n_insns;
1112 }
1113 }
1114
1115 if (path_crosses_loops)
1116 {
1117 if (dump_file && (dump_flags & TDF_DETAILS))
1118 fprintf (dump_file, "FSM jump-thread path not considered: "
1119 "the path crosses loops.\n");
1120 path->pop ();
1121 continue;
1122 }
1123
1124 if (n_insns >= PARAM_VALUE (PARAM_MAX_FSM_THREAD_PATH_INSNS))
1125 {
1126 if (dump_file && (dump_flags & TDF_DETAILS))
1127 fprintf (dump_file, "FSM jump-thread path not considered: "
1128 "the number of instructions on the path "
1129 "exceeds PARAM_MAX_FSM_THREAD_PATH_INSNS.\n");
1130 path->pop ();
1131 continue;
1132 }
1133
1134 vec<jump_thread_edge *> *jump_thread_path
1135 = new vec<jump_thread_edge *> ();
1136
1137 /* Record the edges between the blocks in PATH. */
1138 for (j = 0; j < path_length - 1; j++)
1139 {
1140 edge e = find_edge ((*path)[path_length - j - 1],
1141 (*path)[path_length - j - 2]);
1142 gcc_assert (e);
1143 jump_thread_edge *x = new jump_thread_edge (e, EDGE_FSM_THREAD);
1144 jump_thread_path->safe_push (x);
1145 }
1146
1147 /* Add the edge taken when the control variable has value ARG. */
1148 edge taken_edge = find_taken_edge ((*path)[0], arg);
1149 jump_thread_edge *x
1150 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
1151 jump_thread_path->safe_push (x);
1152
1153 register_jump_thread (jump_thread_path);
1154 --max_threaded_paths;
1155
1156 /* Remove BBI from the path. */
1157 path->pop ();
1158 }
1159
1160 /* Remove all the nodes that we added from NEXT_PATH. */
1161 if (next_path_length)
1162 vec_safe_truncate (path, (path->length () - next_path_length));
1163 }
1164
1165 /* We are exiting E->src, see if E->dest ends with a conditional
1166 jump which has a known value when reached via E.
1167
1168 E->dest can have arbitrary side effects which, if threading is
1169 successful, will be maintained.
1170
1171 Special care is necessary if E is a back edge in the CFG as we
1172 may have already recorded equivalences for E->dest into our
1173 various tables, including the result of the conditional at
1174 the end of E->dest. Threading opportunities are severely
1175 limited in that case to avoid short-circuiting the loop
1176 incorrectly.
1177
1178 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
1179 to avoid allocating memory.
1180
1181 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
1182 the simplified condition with left-hand sides of ASSERT_EXPRs they are
1183 used in.
1184
1185 STACK is used to undo temporary equivalences created during the walk of
1186 E->dest.
1187
1188 SIMPLIFY is a pass-specific function used to simplify statements.
1189
1190 Our caller is responsible for restoring the state of the expression
1191 and const_and_copies stacks.
1192
1193 Positive return value is success. Zero return value is failure, but
1194 the block can still be duplicated as a joiner in a jump thread path,
1195 negative indicates the block should not be duplicated and thus is not
1196 suitable for a joiner in a jump threading path. */
1197
1198 static int
1199 thread_through_normal_block (edge e,
1200 gcond *dummy_cond,
1201 bool handle_dominating_asserts,
1202 const_and_copies *const_and_copies,
1203 avail_exprs_stack *avail_exprs_stack,
1204 pfn_simplify simplify,
1205 vec<jump_thread_edge *> *path,
1206 bitmap visited,
1207 bool *backedge_seen_p)
1208 {
1209 /* If we have traversed a backedge, then we do not want to look
1210 at certain expressions in the table that can not be relied upon.
1211 Luckily the only code that looked at those expressions is the
1212 SIMPLIFY callback, which we replace if we can no longer use it. */
1213 if (*backedge_seen_p)
1214 simplify = dummy_simplify;
1215
1216 /* PHIs create temporary equivalences.
1217 Note that if we found a PHI that made the block non-threadable, then
1218 we need to bubble that up to our caller in the same manner we do
1219 when we prematurely stop processing statements below. */
1220 if (!record_temporary_equivalences_from_phis (e, const_and_copies))
1221 return -1;
1222
1223 /* Now walk each statement recording any context sensitive
1224 temporary equivalences we can detect. */
1225 gimple *stmt
1226 = record_temporary_equivalences_from_stmts_at_dest (e, const_and_copies,
1227 avail_exprs_stack,
1228 simplify,
1229 *backedge_seen_p);
1230
1231 /* There's two reasons STMT might be null, and distinguishing
1232 between them is important.
1233
1234 First the block may not have had any statements. For example, it
1235 might have some PHIs and unconditionally transfer control elsewhere.
1236 Such blocks are suitable for jump threading, particularly as a
1237 joiner block.
1238
1239 The second reason would be if we did not process all the statements
1240 in the block (because there were too many to make duplicating the
1241 block profitable. If we did not look at all the statements, then
1242 we may not have invalidated everything needing invalidation. Thus
1243 we must signal to our caller that this block is not suitable for
1244 use as a joiner in a threading path. */
1245 if (!stmt)
1246 {
1247 /* First case. The statement simply doesn't have any instructions, but
1248 does have PHIs. */
1249 if (gsi_end_p (gsi_start_nondebug_bb (e->dest))
1250 && !gsi_end_p (gsi_start_phis (e->dest)))
1251 return 0;
1252
1253 /* Second case. */
1254 return -1;
1255 }
1256
1257 /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm
1258 will be taken. */
1259 if (gimple_code (stmt) == GIMPLE_COND
1260 || gimple_code (stmt) == GIMPLE_GOTO
1261 || gimple_code (stmt) == GIMPLE_SWITCH)
1262 {
1263 tree cond;
1264
1265 /* Extract and simplify the condition. */
1266 cond = simplify_control_stmt_condition (e, stmt, avail_exprs_stack,
1267 dummy_cond, simplify,
1268 handle_dominating_asserts);
1269
1270 if (!cond)
1271 return 0;
1272
1273 if (is_gimple_min_invariant (cond))
1274 {
1275 edge taken_edge = find_taken_edge (e->dest, cond);
1276 basic_block dest = (taken_edge ? taken_edge->dest : NULL);
1277
1278 /* DEST could be NULL for a computed jump to an absolute
1279 address. */
1280 if (dest == NULL
1281 || dest == e->dest
1282 || bitmap_bit_p (visited, dest->index))
1283 return 0;
1284
1285 /* Only push the EDGE_START_JUMP_THREAD marker if this is
1286 first edge on the path. */
1287 if (path->length () == 0)
1288 {
1289 jump_thread_edge *x
1290 = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
1291 path->safe_push (x);
1292 *backedge_seen_p |= ((e->flags & EDGE_DFS_BACK) != 0);
1293 }
1294
1295 jump_thread_edge *x
1296 = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_BLOCK);
1297 path->safe_push (x);
1298 *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
1299 if (*backedge_seen_p)
1300 simplify = dummy_simplify;
1301
1302 /* See if we can thread through DEST as well, this helps capture
1303 secondary effects of threading without having to re-run DOM or
1304 VRP.
1305
1306 We don't want to thread back to a block we have already
1307 visited. This may be overly conservative. */
1308 bitmap_set_bit (visited, dest->index);
1309 bitmap_set_bit (visited, e->dest->index);
1310 thread_around_empty_blocks (taken_edge,
1311 dummy_cond,
1312 avail_exprs_stack,
1313 handle_dominating_asserts,
1314 simplify,
1315 visited,
1316 path,
1317 backedge_seen_p);
1318 return 1;
1319 }
1320
1321 if (!flag_expensive_optimizations
1322 || optimize_function_for_size_p (cfun)
1323 || TREE_CODE (cond) != SSA_NAME
1324 || e->dest->loop_father != e->src->loop_father
1325 || loop_depth (e->dest->loop_father) == 0)
1326 return 0;
1327
1328 /* When COND cannot be simplified, try to find paths from a control
1329 statement back through the PHI nodes which would affect that control
1330 statement. */
1331 vec<basic_block, va_gc> *bb_path;
1332 vec_alloc (bb_path, n_basic_blocks_for_fn (cfun));
1333 vec_safe_push (bb_path, e->dest);
1334 hash_set<basic_block> *visited_bbs = new hash_set<basic_block>;
1335
1336 max_threaded_paths = PARAM_VALUE (PARAM_MAX_FSM_THREAD_PATHS);
1337 fsm_find_control_statement_thread_paths (cond, visited_bbs, bb_path,
1338 false);
1339
1340 delete visited_bbs;
1341 vec_free (bb_path);
1342 }
1343 return 0;
1344 }
1345
1346 /* We are exiting E->src, see if E->dest ends with a conditional
1347 jump which has a known value when reached via E.
1348
1349 Special care is necessary if E is a back edge in the CFG as we
1350 may have already recorded equivalences for E->dest into our
1351 various tables, including the result of the conditional at
1352 the end of E->dest. Threading opportunities are severely
1353 limited in that case to avoid short-circuiting the loop
1354 incorrectly.
1355
1356 Note it is quite common for the first block inside a loop to
1357 end with a conditional which is either always true or always
1358 false when reached via the loop backedge. Thus we do not want
1359 to blindly disable threading across a loop backedge.
1360
1361 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
1362 to avoid allocating memory.
1363
1364 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
1365 the simplified condition with left-hand sides of ASSERT_EXPRs they are
1366 used in.
1367
1368 CONST_AND_COPIES is used to undo temporary equivalences created during the
1369 walk of E->dest.
1370
1371 The available expression table is referenced vai AVAIL_EXPRS_STACK.
1372
1373 SIMPLIFY is a pass-specific function used to simplify statements. */
1374
1375 void
1376 thread_across_edge (gcond *dummy_cond,
1377 edge e,
1378 bool handle_dominating_asserts,
1379 class const_and_copies *const_and_copies,
1380 class avail_exprs_stack *avail_exprs_stack,
1381 tree (*simplify) (gimple *, gimple *,
1382 class avail_exprs_stack *))
1383 {
1384 bitmap visited = BITMAP_ALLOC (NULL);
1385 bool backedge_seen;
1386
1387 stmt_count = 0;
1388
1389 vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
1390 bitmap_clear (visited);
1391 bitmap_set_bit (visited, e->src->index);
1392 bitmap_set_bit (visited, e->dest->index);
1393 backedge_seen = ((e->flags & EDGE_DFS_BACK) != 0);
1394 if (backedge_seen)
1395 simplify = dummy_simplify;
1396
1397 int threaded = thread_through_normal_block (e, dummy_cond,
1398 handle_dominating_asserts,
1399 const_and_copies,
1400 avail_exprs_stack,
1401 simplify, path,
1402 visited, &backedge_seen);
1403 if (threaded > 0)
1404 {
1405 propagate_threaded_block_debug_into (path->last ()->e->dest,
1406 e->dest);
1407 const_and_copies->pop_to_marker ();
1408 BITMAP_FREE (visited);
1409 register_jump_thread (path);
1410 return;
1411 }
1412 else
1413 {
1414 /* Negative and zero return values indicate no threading was possible,
1415 thus there should be no edges on the thread path and no need to walk
1416 through the vector entries. */
1417 gcc_assert (path->length () == 0);
1418 path->release ();
1419 delete path;
1420
1421 /* A negative status indicates the target block was deemed too big to
1422 duplicate. Just quit now rather than trying to use the block as
1423 a joiner in a jump threading path.
1424
1425 This prevents unnecessary code growth, but more importantly if we
1426 do not look at all the statements in the block, then we may have
1427 missed some invalidations if we had traversed a backedge! */
1428 if (threaded < 0)
1429 {
1430 BITMAP_FREE (visited);
1431 const_and_copies->pop_to_marker ();
1432 return;
1433 }
1434 }
1435
1436 /* We were unable to determine what out edge from E->dest is taken. However,
1437 we might still be able to thread through successors of E->dest. This
1438 often occurs when E->dest is a joiner block which then fans back out
1439 based on redundant tests.
1440
1441 If so, we'll copy E->dest and redirect the appropriate predecessor to
1442 the copy. Within the copy of E->dest, we'll thread one or more edges
1443 to points deeper in the CFG.
1444
1445 This is a stopgap until we have a more structured approach to path
1446 isolation. */
1447 {
1448 edge taken_edge;
1449 edge_iterator ei;
1450 bool found;
1451
1452 /* If E->dest has abnormal outgoing edges, then there's no guarantee
1453 we can safely redirect any of the edges. Just punt those cases. */
1454 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1455 if (taken_edge->flags & EDGE_ABNORMAL)
1456 {
1457 const_and_copies->pop_to_marker ();
1458 BITMAP_FREE (visited);
1459 return;
1460 }
1461
1462 /* Look at each successor of E->dest to see if we can thread through it. */
1463 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1464 {
1465 /* Push a fresh marker so we can unwind the equivalences created
1466 for each of E->dest's successors. */
1467 const_and_copies->push_marker ();
1468 if (avail_exprs_stack)
1469 avail_exprs_stack->push_marker ();
1470
1471 /* Avoid threading to any block we have already visited. */
1472 bitmap_clear (visited);
1473 bitmap_set_bit (visited, e->src->index);
1474 bitmap_set_bit (visited, e->dest->index);
1475 bitmap_set_bit (visited, taken_edge->dest->index);
1476 vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
1477
1478 /* Record whether or not we were able to thread through a successor
1479 of E->dest. */
1480 jump_thread_edge *x = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
1481 path->safe_push (x);
1482
1483 x = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_JOINER_BLOCK);
1484 path->safe_push (x);
1485 found = false;
1486 backedge_seen = ((e->flags & EDGE_DFS_BACK) != 0);
1487 backedge_seen |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
1488 if (backedge_seen)
1489 simplify = dummy_simplify;
1490 found = thread_around_empty_blocks (taken_edge,
1491 dummy_cond,
1492 avail_exprs_stack,
1493 handle_dominating_asserts,
1494 simplify,
1495 visited,
1496 path,
1497 &backedge_seen);
1498
1499 if (backedge_seen)
1500 simplify = dummy_simplify;
1501
1502 if (!found)
1503 found = thread_through_normal_block (path->last ()->e, dummy_cond,
1504 handle_dominating_asserts,
1505 const_and_copies,
1506 avail_exprs_stack,
1507 simplify, path,
1508 visited, &backedge_seen) > 0;
1509
1510 /* If we were able to thread through a successor of E->dest, then
1511 record the jump threading opportunity. */
1512 if (found)
1513 {
1514 propagate_threaded_block_debug_into (path->last ()->e->dest,
1515 taken_edge->dest);
1516 register_jump_thread (path);
1517 }
1518 else
1519 {
1520 delete_jump_thread_path (path);
1521 }
1522
1523 /* And unwind the equivalence table. */
1524 if (avail_exprs_stack)
1525 avail_exprs_stack->pop_to_marker ();
1526 const_and_copies->pop_to_marker ();
1527 }
1528 BITMAP_FREE (visited);
1529 }
1530
1531 const_and_copies->pop_to_marker ();
1532 }