]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-threadedge.c
[PATCH][PR tree-optimization/67892] Use FSM threader to handle backedges
[thirdparty/gcc.git] / gcc / tree-ssa-threadedge.c
1 /* SSA Jump Threading
2 Copyright (C) 2005-2015 Free Software Foundation, Inc.
3 Contributed by Jeff Law <law@redhat.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "predict.h"
28 #include "ssa.h"
29 #include "fold-const.h"
30 #include "cfgloop.h"
31 #include "gimple-iterator.h"
32 #include "tree-cfg.h"
33 #include "tree-ssa-threadupdate.h"
34 #include "params.h"
35 #include "tree-ssa-scopedtables.h"
36 #include "tree-ssa-threadedge.h"
37 #include "tree-ssa-threadbackward.h"
38 #include "tree-ssa-dom.h"
39 #include "gimple-fold.h"
40
41 /* To avoid code explosion due to jump threading, we limit the
42 number of statements we are going to copy. This variable
43 holds the number of statements currently seen that we'll have
44 to copy as part of the jump threading process. */
45 static int stmt_count;
46
47 /* Array to record value-handles per SSA_NAME. */
48 vec<tree> ssa_name_values;
49
50 typedef tree (pfn_simplify) (gimple *, gimple *, class avail_exprs_stack *);
51
52 /* Set the value for the SSA name NAME to VALUE. */
53
54 void
55 set_ssa_name_value (tree name, tree value)
56 {
57 if (SSA_NAME_VERSION (name) >= ssa_name_values.length ())
58 ssa_name_values.safe_grow_cleared (SSA_NAME_VERSION (name) + 1);
59 if (value && TREE_OVERFLOW_P (value))
60 value = drop_tree_overflow (value);
61 ssa_name_values[SSA_NAME_VERSION (name)] = value;
62 }
63
64 /* Initialize the per SSA_NAME value-handles array. Returns it. */
65 void
66 threadedge_initialize_values (void)
67 {
68 gcc_assert (!ssa_name_values.exists ());
69 ssa_name_values.create (num_ssa_names);
70 }
71
72 /* Free the per SSA_NAME value-handle array. */
73 void
74 threadedge_finalize_values (void)
75 {
76 ssa_name_values.release ();
77 }
78
79 /* Return TRUE if we may be able to thread an incoming edge into
80 BB to an outgoing edge from BB. Return FALSE otherwise. */
81
82 bool
83 potentially_threadable_block (basic_block bb)
84 {
85 gimple_stmt_iterator gsi;
86
87 /* Special case. We can get blocks that are forwarders, but are
88 not optimized away because they forward from outside a loop
89 to the loop header. We want to thread through them as we can
90 sometimes thread to the loop exit, which is obviously profitable.
91 the interesting case here is when the block has PHIs. */
92 if (gsi_end_p (gsi_start_nondebug_bb (bb))
93 && !gsi_end_p (gsi_start_phis (bb)))
94 return true;
95
96 /* If BB has a single successor or a single predecessor, then
97 there is no threading opportunity. */
98 if (single_succ_p (bb) || single_pred_p (bb))
99 return false;
100
101 /* If BB does not end with a conditional, switch or computed goto,
102 then there is no threading opportunity. */
103 gsi = gsi_last_bb (bb);
104 if (gsi_end_p (gsi)
105 || ! gsi_stmt (gsi)
106 || (gimple_code (gsi_stmt (gsi)) != GIMPLE_COND
107 && gimple_code (gsi_stmt (gsi)) != GIMPLE_GOTO
108 && gimple_code (gsi_stmt (gsi)) != GIMPLE_SWITCH))
109 return false;
110
111 return true;
112 }
113
114 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
115 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
116 BB. If no such ASSERT_EXPR is found, return OP. */
117
118 static tree
119 lhs_of_dominating_assert (tree op, basic_block bb, gimple *stmt)
120 {
121 imm_use_iterator imm_iter;
122 gimple *use_stmt;
123 use_operand_p use_p;
124
125 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
126 {
127 use_stmt = USE_STMT (use_p);
128 if (use_stmt != stmt
129 && gimple_assign_single_p (use_stmt)
130 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
131 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
132 && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
133 {
134 return gimple_assign_lhs (use_stmt);
135 }
136 }
137 return op;
138 }
139
140 /* Record temporary equivalences created by PHIs at the target of the
141 edge E. Record unwind information for the equivalences onto STACK.
142
143 If a PHI which prevents threading is encountered, then return FALSE
144 indicating we should not thread this edge, else return TRUE.
145
146 If SRC_MAP/DST_MAP exist, then mark the source and destination SSA_NAMEs
147 of any equivalences recorded. We use this to make invalidation after
148 traversing back edges less painful. */
149
150 static bool
151 record_temporary_equivalences_from_phis (edge e, const_and_copies *const_and_copies)
152 {
153 gphi_iterator gsi;
154
155 /* Each PHI creates a temporary equivalence, record them.
156 These are context sensitive equivalences and will be removed
157 later. */
158 for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
159 {
160 gphi *phi = gsi.phi ();
161 tree src = PHI_ARG_DEF_FROM_EDGE (phi, e);
162 tree dst = gimple_phi_result (phi);
163
164 /* If the desired argument is not the same as this PHI's result
165 and it is set by a PHI in E->dest, then we can not thread
166 through E->dest. */
167 if (src != dst
168 && TREE_CODE (src) == SSA_NAME
169 && gimple_code (SSA_NAME_DEF_STMT (src)) == GIMPLE_PHI
170 && gimple_bb (SSA_NAME_DEF_STMT (src)) == e->dest)
171 return false;
172
173 /* We consider any non-virtual PHI as a statement since it
174 count result in a constant assignment or copy operation. */
175 if (!virtual_operand_p (dst))
176 stmt_count++;
177
178 const_and_copies->record_const_or_copy (dst, src);
179 }
180 return true;
181 }
182
183 /* Valueize hook for gimple_fold_stmt_to_constant_1. */
184
185 static tree
186 threadedge_valueize (tree t)
187 {
188 if (TREE_CODE (t) == SSA_NAME)
189 {
190 tree tem = SSA_NAME_VALUE (t);
191 if (tem)
192 return tem;
193 }
194 return t;
195 }
196
197 /* Try to simplify each statement in E->dest, ultimately leading to
198 a simplification of the COND_EXPR at the end of E->dest.
199
200 Record unwind information for temporary equivalences onto STACK.
201
202 Use SIMPLIFY (a pointer to a callback function) to further simplify
203 statements using pass specific information.
204
205 We might consider marking just those statements which ultimately
206 feed the COND_EXPR. It's not clear if the overhead of bookkeeping
207 would be recovered by trying to simplify fewer statements.
208
209 If we are able to simplify a statement into the form
210 SSA_NAME = (SSA_NAME | gimple invariant), then we can record
211 a context sensitive equivalence which may help us simplify
212 later statements in E->dest. */
213
214 static gimple *
215 record_temporary_equivalences_from_stmts_at_dest (edge e,
216 const_and_copies *const_and_copies,
217 avail_exprs_stack *avail_exprs_stack,
218 pfn_simplify simplify,
219 bool backedge_seen)
220 {
221 gimple *stmt = NULL;
222 gimple_stmt_iterator gsi;
223 int max_stmt_count;
224
225 max_stmt_count = PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS);
226
227 /* Walk through each statement in the block recording equivalences
228 we discover. Note any equivalences we discover are context
229 sensitive (ie, are dependent on traversing E) and must be unwound
230 when we're finished processing E. */
231 for (gsi = gsi_start_bb (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
232 {
233 tree cached_lhs = NULL;
234
235 stmt = gsi_stmt (gsi);
236
237 /* Ignore empty statements and labels. */
238 if (gimple_code (stmt) == GIMPLE_NOP
239 || gimple_code (stmt) == GIMPLE_LABEL
240 || is_gimple_debug (stmt))
241 continue;
242
243 /* If the statement has volatile operands, then we assume we
244 can not thread through this block. This is overly
245 conservative in some ways. */
246 if (gimple_code (stmt) == GIMPLE_ASM
247 && gimple_asm_volatile_p (as_a <gasm *> (stmt)))
248 return NULL;
249
250 /* If the statement is a unique builtin, we can not thread
251 through here. */
252 if (gimple_code (stmt) == GIMPLE_CALL
253 && gimple_call_internal_p (stmt)
254 && gimple_call_internal_unique_p (stmt))
255 return NULL;
256
257 /* If duplicating this block is going to cause too much code
258 expansion, then do not thread through this block. */
259 stmt_count++;
260 if (stmt_count > max_stmt_count)
261 return NULL;
262
263 /* If this is not a statement that sets an SSA_NAME to a new
264 value, then do not try to simplify this statement as it will
265 not simplify in any way that is helpful for jump threading. */
266 if ((gimple_code (stmt) != GIMPLE_ASSIGN
267 || TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
268 && (gimple_code (stmt) != GIMPLE_CALL
269 || gimple_call_lhs (stmt) == NULL_TREE
270 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME))
271 {
272 /* STMT might still have DEFS and we need to invalidate any known
273 equivalences for them.
274
275 Consider if STMT is a GIMPLE_ASM with one or more outputs that
276 feeds a conditional inside a loop. We might derive an equivalence
277 due to the conditional. */
278 tree op;
279 ssa_op_iter iter;
280
281 if (backedge_seen)
282 FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_DEF)
283 const_and_copies->invalidate (op);
284
285 continue;
286 }
287
288 /* The result of __builtin_object_size depends on all the arguments
289 of a phi node. Temporarily using only one edge produces invalid
290 results. For example
291
292 if (x < 6)
293 goto l;
294 else
295 goto l;
296
297 l:
298 r = PHI <&w[2].a[1](2), &a.a[6](3)>
299 __builtin_object_size (r, 0)
300
301 The result of __builtin_object_size is defined to be the maximum of
302 remaining bytes. If we use only one edge on the phi, the result will
303 change to be the remaining bytes for the corresponding phi argument.
304
305 Similarly for __builtin_constant_p:
306
307 r = PHI <1(2), 2(3)>
308 __builtin_constant_p (r)
309
310 Both PHI arguments are constant, but x ? 1 : 2 is still not
311 constant. */
312
313 if (is_gimple_call (stmt))
314 {
315 tree fndecl = gimple_call_fndecl (stmt);
316 if (fndecl
317 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_OBJECT_SIZE
318 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P))
319 {
320 if (backedge_seen)
321 {
322 tree lhs = gimple_get_lhs (stmt);
323 const_and_copies->invalidate (lhs);
324 }
325 continue;
326 }
327 }
328
329 /* At this point we have a statement which assigns an RHS to an
330 SSA_VAR on the LHS. We want to try and simplify this statement
331 to expose more context sensitive equivalences which in turn may
332 allow us to simplify the condition at the end of the loop.
333
334 Handle simple copy operations as well as implied copies from
335 ASSERT_EXPRs. */
336 if (gimple_assign_single_p (stmt)
337 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
338 cached_lhs = gimple_assign_rhs1 (stmt);
339 else if (gimple_assign_single_p (stmt)
340 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
341 cached_lhs = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0);
342 else
343 {
344 /* A statement that is not a trivial copy or ASSERT_EXPR.
345 Try to fold the new expression. Inserting the
346 expression into the hash table is unlikely to help. */
347 /* ??? The DOM callback below can be changed to setting
348 the mprts_hook around the call to thread_across_edge,
349 avoiding the use substitution. The VRP hook should be
350 changed to properly valueize operands itself using
351 SSA_NAME_VALUE in addition to its own lattice. */
352 cached_lhs = gimple_fold_stmt_to_constant_1 (stmt,
353 threadedge_valueize);
354 if (!cached_lhs
355 || (TREE_CODE (cached_lhs) != SSA_NAME
356 && !is_gimple_min_invariant (cached_lhs)))
357 {
358 /* We're going to temporarily copy propagate the operands
359 and see if that allows us to simplify this statement. */
360 tree *copy;
361 ssa_op_iter iter;
362 use_operand_p use_p;
363 unsigned int num, i = 0;
364
365 num = NUM_SSA_OPERANDS (stmt, SSA_OP_ALL_USES);
366 copy = XALLOCAVEC (tree, num);
367
368 /* Make a copy of the uses & vuses into USES_COPY, then cprop into
369 the operands. */
370 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES)
371 {
372 tree tmp = NULL;
373 tree use = USE_FROM_PTR (use_p);
374
375 copy[i++] = use;
376 if (TREE_CODE (use) == SSA_NAME)
377 tmp = SSA_NAME_VALUE (use);
378 if (tmp)
379 SET_USE (use_p, tmp);
380 }
381
382 cached_lhs = (*simplify) (stmt, stmt, avail_exprs_stack);
383
384 /* Restore the statement's original uses/defs. */
385 i = 0;
386 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES)
387 SET_USE (use_p, copy[i++]);
388 }
389 }
390
391 /* Record the context sensitive equivalence if we were able
392 to simplify this statement.
393
394 If we have traversed a backedge at some point during threading,
395 then always enter something here. Either a real equivalence,
396 or a NULL_TREE equivalence which is effectively invalidation of
397 prior equivalences. */
398 if (cached_lhs
399 && (TREE_CODE (cached_lhs) == SSA_NAME
400 || is_gimple_min_invariant (cached_lhs)))
401 const_and_copies->record_const_or_copy (gimple_get_lhs (stmt),
402 cached_lhs);
403 else if (backedge_seen)
404 const_and_copies->invalidate (gimple_get_lhs (stmt));
405 }
406 return stmt;
407 }
408
409 /* Once we have passed a backedge in the CFG when threading, we do not want to
410 utilize edge equivalences for simplification purpose. They are no longer
411 necessarily valid. We use this callback rather than the ones provided by
412 DOM/VRP to achieve that effect. */
413 static tree
414 dummy_simplify (gimple *stmt1 ATTRIBUTE_UNUSED, gimple *stmt2 ATTRIBUTE_UNUSED,
415 class avail_exprs_stack *avail_exprs_stack ATTRIBUTE_UNUSED)
416 {
417 return NULL_TREE;
418 }
419
420 /* Simplify the control statement at the end of the block E->dest.
421
422 To avoid allocating memory unnecessarily, a scratch GIMPLE_COND
423 is available to use/clobber in DUMMY_COND.
424
425 Use SIMPLIFY (a pointer to a callback function) to further simplify
426 a condition using pass specific information.
427
428 Return the simplified condition or NULL if simplification could
429 not be performed.
430
431 The available expression table is referenced via AVAIL_EXPRS_STACK. */
432
433 static tree
434 simplify_control_stmt_condition (edge e,
435 gimple *stmt,
436 class avail_exprs_stack *avail_exprs_stack,
437 gcond *dummy_cond,
438 pfn_simplify simplify,
439 bool handle_dominating_asserts)
440 {
441 tree cond, cached_lhs;
442 enum gimple_code code = gimple_code (stmt);
443
444 /* For comparisons, we have to update both operands, then try
445 to simplify the comparison. */
446 if (code == GIMPLE_COND)
447 {
448 tree op0, op1;
449 enum tree_code cond_code;
450
451 op0 = gimple_cond_lhs (stmt);
452 op1 = gimple_cond_rhs (stmt);
453 cond_code = gimple_cond_code (stmt);
454
455 /* Get the current value of both operands. */
456 if (TREE_CODE (op0) == SSA_NAME)
457 {
458 for (int i = 0; i < 2; i++)
459 {
460 if (TREE_CODE (op0) == SSA_NAME
461 && SSA_NAME_VALUE (op0))
462 op0 = SSA_NAME_VALUE (op0);
463 else
464 break;
465 }
466 }
467
468 if (TREE_CODE (op1) == SSA_NAME)
469 {
470 for (int i = 0; i < 2; i++)
471 {
472 if (TREE_CODE (op1) == SSA_NAME
473 && SSA_NAME_VALUE (op1))
474 op1 = SSA_NAME_VALUE (op1);
475 else
476 break;
477 }
478 }
479
480 if (handle_dominating_asserts)
481 {
482 /* Now see if the operand was consumed by an ASSERT_EXPR
483 which dominates E->src. If so, we want to replace the
484 operand with the LHS of the ASSERT_EXPR. */
485 if (TREE_CODE (op0) == SSA_NAME)
486 op0 = lhs_of_dominating_assert (op0, e->src, stmt);
487
488 if (TREE_CODE (op1) == SSA_NAME)
489 op1 = lhs_of_dominating_assert (op1, e->src, stmt);
490 }
491
492 /* We may need to canonicalize the comparison. For
493 example, op0 might be a constant while op1 is an
494 SSA_NAME. Failure to canonicalize will cause us to
495 miss threading opportunities. */
496 if (tree_swap_operands_p (op0, op1, false))
497 {
498 cond_code = swap_tree_comparison (cond_code);
499 std::swap (op0, op1);
500 }
501
502 /* Stuff the operator and operands into our dummy conditional
503 expression. */
504 gimple_cond_set_code (dummy_cond, cond_code);
505 gimple_cond_set_lhs (dummy_cond, op0);
506 gimple_cond_set_rhs (dummy_cond, op1);
507
508 /* We absolutely do not care about any type conversions
509 we only care about a zero/nonzero value. */
510 fold_defer_overflow_warnings ();
511
512 cached_lhs = fold_binary (cond_code, boolean_type_node, op0, op1);
513 if (cached_lhs)
514 while (CONVERT_EXPR_P (cached_lhs))
515 cached_lhs = TREE_OPERAND (cached_lhs, 0);
516
517 fold_undefer_overflow_warnings ((cached_lhs
518 && is_gimple_min_invariant (cached_lhs)),
519 stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
520
521 /* If we have not simplified the condition down to an invariant,
522 then use the pass specific callback to simplify the condition. */
523 if (!cached_lhs
524 || !is_gimple_min_invariant (cached_lhs))
525 cached_lhs = (*simplify) (dummy_cond, stmt, avail_exprs_stack);
526
527 /* If we were testing an integer/pointer against a constant, then
528 we can use the FSM code to trace the value of the SSA_NAME. If
529 a value is found, then the condition will collapse to a constant.
530
531 Return the SSA_NAME we want to trace back rather than the full
532 expression and give the FSM threader a chance to find its value. */
533 if (cached_lhs == NULL)
534 {
535 /* Recover the original operands. They may have been simplified
536 using context sensitive equivalences. Those context sensitive
537 equivalences may not be valid on paths found by the FSM optimizer. */
538 tree op0 = gimple_cond_lhs (stmt);
539 tree op1 = gimple_cond_rhs (stmt);
540
541 if ((INTEGRAL_TYPE_P (TREE_TYPE (op0))
542 || POINTER_TYPE_P (TREE_TYPE (op0)))
543 && TREE_CODE (op0) == SSA_NAME
544 && TREE_CODE (op1) == INTEGER_CST)
545 return op0;
546 }
547
548 return cached_lhs;
549 }
550
551 if (code == GIMPLE_SWITCH)
552 cond = gimple_switch_index (as_a <gswitch *> (stmt));
553 else if (code == GIMPLE_GOTO)
554 cond = gimple_goto_dest (stmt);
555 else
556 gcc_unreachable ();
557
558 /* We can have conditionals which just test the state of a variable
559 rather than use a relational operator. These are simpler to handle. */
560 if (TREE_CODE (cond) == SSA_NAME)
561 {
562 tree original_lhs = cond;
563 cached_lhs = cond;
564
565 /* Get the variable's current value from the equivalence chains.
566
567 It is possible to get loops in the SSA_NAME_VALUE chains
568 (consider threading the backedge of a loop where we have
569 a loop invariant SSA_NAME used in the condition). */
570 if (cached_lhs)
571 {
572 for (int i = 0; i < 2; i++)
573 {
574 if (TREE_CODE (cached_lhs) == SSA_NAME
575 && SSA_NAME_VALUE (cached_lhs))
576 cached_lhs = SSA_NAME_VALUE (cached_lhs);
577 else
578 break;
579 }
580 }
581
582 /* If we're dominated by a suitable ASSERT_EXPR, then
583 update CACHED_LHS appropriately. */
584 if (handle_dominating_asserts && TREE_CODE (cached_lhs) == SSA_NAME)
585 cached_lhs = lhs_of_dominating_assert (cached_lhs, e->src, stmt);
586
587 /* If we haven't simplified to an invariant yet, then use the
588 pass specific callback to try and simplify it further. */
589 if (cached_lhs && ! is_gimple_min_invariant (cached_lhs))
590 cached_lhs = (*simplify) (stmt, stmt, avail_exprs_stack);
591
592 /* We couldn't find an invariant. But, callers of this
593 function may be able to do something useful with the
594 unmodified destination. */
595 if (!cached_lhs)
596 cached_lhs = original_lhs;
597 }
598 else
599 cached_lhs = NULL;
600
601 return cached_lhs;
602 }
603
604 /* Copy debug stmts from DEST's chain of single predecessors up to
605 SRC, so that we don't lose the bindings as PHI nodes are introduced
606 when DEST gains new predecessors. */
607 void
608 propagate_threaded_block_debug_into (basic_block dest, basic_block src)
609 {
610 if (!MAY_HAVE_DEBUG_STMTS)
611 return;
612
613 if (!single_pred_p (dest))
614 return;
615
616 gcc_checking_assert (dest != src);
617
618 gimple_stmt_iterator gsi = gsi_after_labels (dest);
619 int i = 0;
620 const int alloc_count = 16; // ?? Should this be a PARAM?
621
622 /* Estimate the number of debug vars overridden in the beginning of
623 DEST, to tell how many we're going to need to begin with. */
624 for (gimple_stmt_iterator si = gsi;
625 i * 4 <= alloc_count * 3 && !gsi_end_p (si); gsi_next (&si))
626 {
627 gimple *stmt = gsi_stmt (si);
628 if (!is_gimple_debug (stmt))
629 break;
630 i++;
631 }
632
633 auto_vec<tree, alloc_count> fewvars;
634 hash_set<tree> *vars = NULL;
635
636 /* If we're already starting with 3/4 of alloc_count, go for a
637 hash_set, otherwise start with an unordered stack-allocated
638 VEC. */
639 if (i * 4 > alloc_count * 3)
640 vars = new hash_set<tree>;
641
642 /* Now go through the initial debug stmts in DEST again, this time
643 actually inserting in VARS or FEWVARS. Don't bother checking for
644 duplicates in FEWVARS. */
645 for (gimple_stmt_iterator si = gsi; !gsi_end_p (si); gsi_next (&si))
646 {
647 gimple *stmt = gsi_stmt (si);
648 if (!is_gimple_debug (stmt))
649 break;
650
651 tree var;
652
653 if (gimple_debug_bind_p (stmt))
654 var = gimple_debug_bind_get_var (stmt);
655 else if (gimple_debug_source_bind_p (stmt))
656 var = gimple_debug_source_bind_get_var (stmt);
657 else
658 gcc_unreachable ();
659
660 if (vars)
661 vars->add (var);
662 else
663 fewvars.quick_push (var);
664 }
665
666 basic_block bb = dest;
667
668 do
669 {
670 bb = single_pred (bb);
671 for (gimple_stmt_iterator si = gsi_last_bb (bb);
672 !gsi_end_p (si); gsi_prev (&si))
673 {
674 gimple *stmt = gsi_stmt (si);
675 if (!is_gimple_debug (stmt))
676 continue;
677
678 tree var;
679
680 if (gimple_debug_bind_p (stmt))
681 var = gimple_debug_bind_get_var (stmt);
682 else if (gimple_debug_source_bind_p (stmt))
683 var = gimple_debug_source_bind_get_var (stmt);
684 else
685 gcc_unreachable ();
686
687 /* Discard debug bind overlaps. ??? Unlike stmts from src,
688 copied into a new block that will precede BB, debug bind
689 stmts in bypassed BBs may actually be discarded if
690 they're overwritten by subsequent debug bind stmts, which
691 might be a problem once we introduce stmt frontier notes
692 or somesuch. Adding `&& bb == src' to the condition
693 below will preserve all potentially relevant debug
694 notes. */
695 if (vars && vars->add (var))
696 continue;
697 else if (!vars)
698 {
699 int i = fewvars.length ();
700 while (i--)
701 if (fewvars[i] == var)
702 break;
703 if (i >= 0)
704 continue;
705
706 if (fewvars.length () < (unsigned) alloc_count)
707 fewvars.quick_push (var);
708 else
709 {
710 vars = new hash_set<tree>;
711 for (i = 0; i < alloc_count; i++)
712 vars->add (fewvars[i]);
713 fewvars.release ();
714 vars->add (var);
715 }
716 }
717
718 stmt = gimple_copy (stmt);
719 /* ??? Should we drop the location of the copy to denote
720 they're artificial bindings? */
721 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
722 }
723 }
724 while (bb != src && single_pred_p (bb));
725
726 if (vars)
727 delete vars;
728 else if (fewvars.exists ())
729 fewvars.release ();
730 }
731
732 /* See if TAKEN_EDGE->dest is a threadable block with no side effecs (ie, it
733 need not be duplicated as part of the CFG/SSA updating process).
734
735 If it is threadable, add it to PATH and VISITED and recurse, ultimately
736 returning TRUE from the toplevel call. Otherwise do nothing and
737 return false.
738
739 DUMMY_COND, HANDLE_DOMINATING_ASSERTS and SIMPLIFY are used to
740 try and simplify the condition at the end of TAKEN_EDGE->dest.
741
742 The available expression table is referenced via AVAIL_EXPRS_STACK. */
743
744 static bool
745 thread_around_empty_blocks (edge taken_edge,
746 gcond *dummy_cond,
747 class avail_exprs_stack *avail_exprs_stack,
748 bool handle_dominating_asserts,
749 pfn_simplify simplify,
750 bitmap visited,
751 vec<jump_thread_edge *> *path,
752 bool *backedge_seen_p)
753 {
754 basic_block bb = taken_edge->dest;
755 gimple_stmt_iterator gsi;
756 gimple *stmt;
757 tree cond;
758
759 /* The key property of these blocks is that they need not be duplicated
760 when threading. Thus they can not have visible side effects such
761 as PHI nodes. */
762 if (!gsi_end_p (gsi_start_phis (bb)))
763 return false;
764
765 /* Skip over DEBUG statements at the start of the block. */
766 gsi = gsi_start_nondebug_bb (bb);
767
768 /* If the block has no statements, but does have a single successor, then
769 it's just a forwarding block and we can thread through it trivially.
770
771 However, note that just threading through empty blocks with single
772 successors is not inherently profitable. For the jump thread to
773 be profitable, we must avoid a runtime conditional.
774
775 By taking the return value from the recursive call, we get the
776 desired effect of returning TRUE when we found a profitable jump
777 threading opportunity and FALSE otherwise.
778
779 This is particularly important when this routine is called after
780 processing a joiner block. Returning TRUE too aggressively in
781 that case results in pointless duplication of the joiner block. */
782 if (gsi_end_p (gsi))
783 {
784 if (single_succ_p (bb))
785 {
786 taken_edge = single_succ_edge (bb);
787 if (!bitmap_bit_p (visited, taken_edge->dest->index))
788 {
789 jump_thread_edge *x
790 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
791 path->safe_push (x);
792 bitmap_set_bit (visited, taken_edge->dest->index);
793 *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
794 if (*backedge_seen_p)
795 simplify = dummy_simplify;
796 return thread_around_empty_blocks (taken_edge,
797 dummy_cond,
798 avail_exprs_stack,
799 handle_dominating_asserts,
800 simplify,
801 visited,
802 path,
803 backedge_seen_p);
804 }
805 }
806
807 /* We have a block with no statements, but multiple successors? */
808 return false;
809 }
810
811 /* The only real statements this block can have are a control
812 flow altering statement. Anything else stops the thread. */
813 stmt = gsi_stmt (gsi);
814 if (gimple_code (stmt) != GIMPLE_COND
815 && gimple_code (stmt) != GIMPLE_GOTO
816 && gimple_code (stmt) != GIMPLE_SWITCH)
817 return false;
818
819 /* If we have traversed a backedge, then we do not want to look
820 at certain expressions in the table that can not be relied upon.
821 Luckily the only code that looked at those expressions is the
822 SIMPLIFY callback, which we replace if we can no longer use it. */
823 if (*backedge_seen_p)
824 simplify = dummy_simplify;
825
826 /* Extract and simplify the condition. */
827 cond = simplify_control_stmt_condition (taken_edge, stmt,
828 avail_exprs_stack, dummy_cond,
829 simplify, handle_dominating_asserts);
830
831 /* If the condition can be statically computed and we have not already
832 visited the destination edge, then add the taken edge to our thread
833 path. */
834 if (cond && is_gimple_min_invariant (cond))
835 {
836 taken_edge = find_taken_edge (bb, cond);
837
838 if (bitmap_bit_p (visited, taken_edge->dest->index))
839 return false;
840 bitmap_set_bit (visited, taken_edge->dest->index);
841
842 jump_thread_edge *x
843 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
844 path->safe_push (x);
845 *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
846 if (*backedge_seen_p)
847 simplify = dummy_simplify;
848
849 thread_around_empty_blocks (taken_edge,
850 dummy_cond,
851 avail_exprs_stack,
852 handle_dominating_asserts,
853 simplify,
854 visited,
855 path,
856 backedge_seen_p);
857 return true;
858 }
859
860 return false;
861 }
862
863 /* We are exiting E->src, see if E->dest ends with a conditional
864 jump which has a known value when reached via E.
865
866 E->dest can have arbitrary side effects which, if threading is
867 successful, will be maintained.
868
869 Special care is necessary if E is a back edge in the CFG as we
870 may have already recorded equivalences for E->dest into our
871 various tables, including the result of the conditional at
872 the end of E->dest. Threading opportunities are severely
873 limited in that case to avoid short-circuiting the loop
874 incorrectly.
875
876 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
877 to avoid allocating memory.
878
879 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
880 the simplified condition with left-hand sides of ASSERT_EXPRs they are
881 used in.
882
883 STACK is used to undo temporary equivalences created during the walk of
884 E->dest.
885
886 SIMPLIFY is a pass-specific function used to simplify statements.
887
888 Our caller is responsible for restoring the state of the expression
889 and const_and_copies stacks.
890
891 Positive return value is success. Zero return value is failure, but
892 the block can still be duplicated as a joiner in a jump thread path,
893 negative indicates the block should not be duplicated and thus is not
894 suitable for a joiner in a jump threading path. */
895
896 static int
897 thread_through_normal_block (edge e,
898 gcond *dummy_cond,
899 bool handle_dominating_asserts,
900 const_and_copies *const_and_copies,
901 avail_exprs_stack *avail_exprs_stack,
902 pfn_simplify simplify,
903 vec<jump_thread_edge *> *path,
904 bitmap visited,
905 bool *backedge_seen_p)
906 {
907 /* If we have seen a backedge, then we rely solely on the FSM threader
908 to find jump threads. */
909 if (*backedge_seen_p)
910 return 0;
911
912 /* We want to record any equivalences created by traversing E. */
913 if (!handle_dominating_asserts)
914 record_temporary_equivalences (e, const_and_copies, avail_exprs_stack);
915
916 /* PHIs create temporary equivalences.
917 Note that if we found a PHI that made the block non-threadable, then
918 we need to bubble that up to our caller in the same manner we do
919 when we prematurely stop processing statements below. */
920 if (!record_temporary_equivalences_from_phis (e, const_and_copies))
921 return -1;
922
923 /* Now walk each statement recording any context sensitive
924 temporary equivalences we can detect. */
925 gimple *stmt
926 = record_temporary_equivalences_from_stmts_at_dest (e, const_and_copies,
927 avail_exprs_stack,
928 simplify,
929 *backedge_seen_p);
930
931 /* There's two reasons STMT might be null, and distinguishing
932 between them is important.
933
934 First the block may not have had any statements. For example, it
935 might have some PHIs and unconditionally transfer control elsewhere.
936 Such blocks are suitable for jump threading, particularly as a
937 joiner block.
938
939 The second reason would be if we did not process all the statements
940 in the block (because there were too many to make duplicating the
941 block profitable. If we did not look at all the statements, then
942 we may not have invalidated everything needing invalidation. Thus
943 we must signal to our caller that this block is not suitable for
944 use as a joiner in a threading path. */
945 if (!stmt)
946 {
947 /* First case. The statement simply doesn't have any instructions, but
948 does have PHIs. */
949 if (gsi_end_p (gsi_start_nondebug_bb (e->dest))
950 && !gsi_end_p (gsi_start_phis (e->dest)))
951 return 0;
952
953 /* Second case. */
954 return -1;
955 }
956
957 /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm
958 will be taken. */
959 if (gimple_code (stmt) == GIMPLE_COND
960 || gimple_code (stmt) == GIMPLE_GOTO
961 || gimple_code (stmt) == GIMPLE_SWITCH)
962 {
963 tree cond;
964
965 /* Extract and simplify the condition. */
966 cond = simplify_control_stmt_condition (e, stmt, avail_exprs_stack,
967 dummy_cond, simplify,
968 handle_dominating_asserts);
969
970 if (!cond)
971 return 0;
972
973 if (is_gimple_min_invariant (cond))
974 {
975 edge taken_edge = find_taken_edge (e->dest, cond);
976 basic_block dest = (taken_edge ? taken_edge->dest : NULL);
977
978 /* DEST could be NULL for a computed jump to an absolute
979 address. */
980 if (dest == NULL
981 || dest == e->dest
982 || bitmap_bit_p (visited, dest->index))
983 return 0;
984
985 /* Only push the EDGE_START_JUMP_THREAD marker if this is
986 first edge on the path. */
987 if (path->length () == 0)
988 {
989 jump_thread_edge *x
990 = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
991 path->safe_push (x);
992 *backedge_seen_p |= ((e->flags & EDGE_DFS_BACK) != 0);
993 }
994
995 jump_thread_edge *x
996 = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_BLOCK);
997 path->safe_push (x);
998 *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
999 if (*backedge_seen_p)
1000 simplify = dummy_simplify;
1001
1002 /* See if we can thread through DEST as well, this helps capture
1003 secondary effects of threading without having to re-run DOM or
1004 VRP.
1005
1006 We don't want to thread back to a block we have already
1007 visited. This may be overly conservative. */
1008 bitmap_set_bit (visited, dest->index);
1009 bitmap_set_bit (visited, e->dest->index);
1010 thread_around_empty_blocks (taken_edge,
1011 dummy_cond,
1012 avail_exprs_stack,
1013 handle_dominating_asserts,
1014 simplify,
1015 visited,
1016 path,
1017 backedge_seen_p);
1018 return 1;
1019 }
1020 }
1021 return 0;
1022 }
1023
1024 /* We are exiting E->src, see if E->dest ends with a conditional
1025 jump which has a known value when reached via E.
1026
1027 Special care is necessary if E is a back edge in the CFG as we
1028 may have already recorded equivalences for E->dest into our
1029 various tables, including the result of the conditional at
1030 the end of E->dest. Threading opportunities are severely
1031 limited in that case to avoid short-circuiting the loop
1032 incorrectly.
1033
1034 Note it is quite common for the first block inside a loop to
1035 end with a conditional which is either always true or always
1036 false when reached via the loop backedge. Thus we do not want
1037 to blindly disable threading across a loop backedge.
1038
1039 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
1040 to avoid allocating memory.
1041
1042 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
1043 the simplified condition with left-hand sides of ASSERT_EXPRs they are
1044 used in.
1045
1046 CONST_AND_COPIES is used to undo temporary equivalences created during the
1047 walk of E->dest.
1048
1049 The available expression table is referenced vai AVAIL_EXPRS_STACK.
1050
1051 SIMPLIFY is a pass-specific function used to simplify statements. */
1052
1053 void
1054 thread_across_edge (gcond *dummy_cond,
1055 edge e,
1056 bool handle_dominating_asserts,
1057 class const_and_copies *const_and_copies,
1058 class avail_exprs_stack *avail_exprs_stack,
1059 tree (*simplify) (gimple *, gimple *,
1060 class avail_exprs_stack *))
1061 {
1062 bitmap visited = BITMAP_ALLOC (NULL);
1063 bool backedge_seen;
1064
1065 stmt_count = 0;
1066
1067 vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
1068 bitmap_clear (visited);
1069 bitmap_set_bit (visited, e->src->index);
1070 bitmap_set_bit (visited, e->dest->index);
1071 backedge_seen = ((e->flags & EDGE_DFS_BACK) != 0);
1072 if (backedge_seen)
1073 simplify = dummy_simplify;
1074
1075 int threaded = thread_through_normal_block (e, dummy_cond,
1076 handle_dominating_asserts,
1077 const_and_copies,
1078 avail_exprs_stack,
1079 simplify, path,
1080 visited, &backedge_seen);
1081 if (threaded > 0)
1082 {
1083 propagate_threaded_block_debug_into (path->last ()->e->dest,
1084 e->dest);
1085 const_and_copies->pop_to_marker ();
1086 BITMAP_FREE (visited);
1087 register_jump_thread (path);
1088 return;
1089 }
1090 else
1091 {
1092 /* Negative and zero return values indicate no threading was possible,
1093 thus there should be no edges on the thread path and no need to walk
1094 through the vector entries. */
1095 gcc_assert (path->length () == 0);
1096 path->release ();
1097 delete path;
1098
1099 find_jump_threads_backwards (e);
1100
1101 /* A negative status indicates the target block was deemed too big to
1102 duplicate. Just quit now rather than trying to use the block as
1103 a joiner in a jump threading path.
1104
1105 This prevents unnecessary code growth, but more importantly if we
1106 do not look at all the statements in the block, then we may have
1107 missed some invalidations if we had traversed a backedge! */
1108 if (threaded < 0)
1109 {
1110 BITMAP_FREE (visited);
1111 const_and_copies->pop_to_marker ();
1112 return;
1113 }
1114 }
1115
1116 /* We were unable to determine what out edge from E->dest is taken. However,
1117 we might still be able to thread through successors of E->dest. This
1118 often occurs when E->dest is a joiner block which then fans back out
1119 based on redundant tests.
1120
1121 If so, we'll copy E->dest and redirect the appropriate predecessor to
1122 the copy. Within the copy of E->dest, we'll thread one or more edges
1123 to points deeper in the CFG.
1124
1125 This is a stopgap until we have a more structured approach to path
1126 isolation. */
1127 {
1128 edge taken_edge;
1129 edge_iterator ei;
1130 bool found;
1131
1132 /* If E->dest has abnormal outgoing edges, then there's no guarantee
1133 we can safely redirect any of the edges. Just punt those cases. */
1134 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1135 if (taken_edge->flags & EDGE_ABNORMAL)
1136 {
1137 const_and_copies->pop_to_marker ();
1138 BITMAP_FREE (visited);
1139 return;
1140 }
1141
1142 /* Look at each successor of E->dest to see if we can thread through it. */
1143 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1144 {
1145 /* Push a fresh marker so we can unwind the equivalences created
1146 for each of E->dest's successors. */
1147 const_and_copies->push_marker ();
1148 if (avail_exprs_stack)
1149 avail_exprs_stack->push_marker ();
1150
1151 /* Avoid threading to any block we have already visited. */
1152 bitmap_clear (visited);
1153 bitmap_set_bit (visited, e->src->index);
1154 bitmap_set_bit (visited, e->dest->index);
1155 bitmap_set_bit (visited, taken_edge->dest->index);
1156 vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
1157
1158 /* Record whether or not we were able to thread through a successor
1159 of E->dest. */
1160 jump_thread_edge *x = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
1161 path->safe_push (x);
1162
1163 x = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_JOINER_BLOCK);
1164 path->safe_push (x);
1165 found = false;
1166 backedge_seen = ((e->flags & EDGE_DFS_BACK) != 0);
1167 backedge_seen |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
1168 if (backedge_seen)
1169 simplify = dummy_simplify;
1170 found = thread_around_empty_blocks (taken_edge,
1171 dummy_cond,
1172 avail_exprs_stack,
1173 handle_dominating_asserts,
1174 simplify,
1175 visited,
1176 path,
1177 &backedge_seen);
1178
1179 if (backedge_seen)
1180 simplify = dummy_simplify;
1181
1182 if (!found)
1183 found = thread_through_normal_block (path->last ()->e, dummy_cond,
1184 handle_dominating_asserts,
1185 const_and_copies,
1186 avail_exprs_stack,
1187 simplify, path,
1188 visited, &backedge_seen) > 0;
1189
1190 /* If we were able to thread through a successor of E->dest, then
1191 record the jump threading opportunity. */
1192 if (found)
1193 {
1194 propagate_threaded_block_debug_into (path->last ()->e->dest,
1195 taken_edge->dest);
1196 register_jump_thread (path);
1197 }
1198 else
1199 {
1200 find_jump_threads_backwards (path->last ()->e);
1201 delete_jump_thread_path (path);
1202 }
1203
1204 /* And unwind the equivalence table. */
1205 if (avail_exprs_stack)
1206 avail_exprs_stack->pop_to_marker ();
1207 const_and_copies->pop_to_marker ();
1208 }
1209 BITMAP_FREE (visited);
1210 }
1211
1212 const_and_copies->pop_to_marker ();
1213 }