]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-threadedge.c
ggcplug.c: Shuffle includes to include gcc-plugin.h earlier.
[thirdparty/gcc.git] / gcc / tree-ssa-threadedge.c
1 /* SSA Jump Threading
2 Copyright (C) 2005-2014 Free Software Foundation, Inc.
3 Contributed by Jeff Law <law@redhat.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "flags.h"
27 #include "tm_p.h"
28 #include "predict.h"
29 #include "vec.h"
30 #include "hashtab.h"
31 #include "hash-set.h"
32 #include "machmode.h"
33 #include "hard-reg-set.h"
34 #include "input.h"
35 #include "function.h"
36 #include "dominance.h"
37 #include "basic-block.h"
38 #include "cfgloop.h"
39 #include "timevar.h"
40 #include "dumpfile.h"
41 #include "tree-ssa-alias.h"
42 #include "internal-fn.h"
43 #include "gimple-expr.h"
44 #include "is-a.h"
45 #include "gimple.h"
46 #include "gimple-iterator.h"
47 #include "gimple-ssa.h"
48 #include "tree-cfg.h"
49 #include "tree-phinodes.h"
50 #include "ssa-iterators.h"
51 #include "stringpool.h"
52 #include "tree-ssanames.h"
53 #include "tree-ssa-propagate.h"
54 #include "tree-ssa-threadupdate.h"
55 #include "langhooks.h"
56 #include "params.h"
57 #include "tree-ssa-threadedge.h"
58 #include "builtins.h"
59
60 /* To avoid code explosion due to jump threading, we limit the
61 number of statements we are going to copy. This variable
62 holds the number of statements currently seen that we'll have
63 to copy as part of the jump threading process. */
64 static int stmt_count;
65
66 /* Array to record value-handles per SSA_NAME. */
67 vec<tree> ssa_name_values;
68
69 /* Set the value for the SSA name NAME to VALUE. */
70
71 void
72 set_ssa_name_value (tree name, tree value)
73 {
74 if (SSA_NAME_VERSION (name) >= ssa_name_values.length ())
75 ssa_name_values.safe_grow_cleared (SSA_NAME_VERSION (name) + 1);
76 if (value && TREE_OVERFLOW_P (value))
77 value = drop_tree_overflow (value);
78 ssa_name_values[SSA_NAME_VERSION (name)] = value;
79 }
80
81 /* Initialize the per SSA_NAME value-handles array. Returns it. */
82 void
83 threadedge_initialize_values (void)
84 {
85 gcc_assert (!ssa_name_values.exists ());
86 ssa_name_values.create (num_ssa_names);
87 }
88
89 /* Free the per SSA_NAME value-handle array. */
90 void
91 threadedge_finalize_values (void)
92 {
93 ssa_name_values.release ();
94 }
95
96 /* Return TRUE if we may be able to thread an incoming edge into
97 BB to an outgoing edge from BB. Return FALSE otherwise. */
98
99 bool
100 potentially_threadable_block (basic_block bb)
101 {
102 gimple_stmt_iterator gsi;
103
104 /* If BB has a single successor or a single predecessor, then
105 there is no threading opportunity. */
106 if (single_succ_p (bb) || single_pred_p (bb))
107 return false;
108
109 /* If BB does not end with a conditional, switch or computed goto,
110 then there is no threading opportunity. */
111 gsi = gsi_last_bb (bb);
112 if (gsi_end_p (gsi)
113 || ! gsi_stmt (gsi)
114 || (gimple_code (gsi_stmt (gsi)) != GIMPLE_COND
115 && gimple_code (gsi_stmt (gsi)) != GIMPLE_GOTO
116 && gimple_code (gsi_stmt (gsi)) != GIMPLE_SWITCH))
117 return false;
118
119 return true;
120 }
121
122 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
123 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
124 BB. If no such ASSERT_EXPR is found, return OP. */
125
126 static tree
127 lhs_of_dominating_assert (tree op, basic_block bb, gimple stmt)
128 {
129 imm_use_iterator imm_iter;
130 gimple use_stmt;
131 use_operand_p use_p;
132
133 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
134 {
135 use_stmt = USE_STMT (use_p);
136 if (use_stmt != stmt
137 && gimple_assign_single_p (use_stmt)
138 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
139 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
140 && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
141 {
142 return gimple_assign_lhs (use_stmt);
143 }
144 }
145 return op;
146 }
147
148 /* We record temporary equivalences created by PHI nodes or
149 statements within the target block. Doing so allows us to
150 identify more jump threading opportunities, even in blocks
151 with side effects.
152
153 We keep track of those temporary equivalences in a stack
154 structure so that we can unwind them when we're done processing
155 a particular edge. This routine handles unwinding the data
156 structures. */
157
158 static void
159 remove_temporary_equivalences (vec<tree> *stack)
160 {
161 while (stack->length () > 0)
162 {
163 tree prev_value, dest;
164
165 dest = stack->pop ();
166
167 /* A NULL value indicates we should stop unwinding, otherwise
168 pop off the next entry as they're recorded in pairs. */
169 if (dest == NULL)
170 break;
171
172 prev_value = stack->pop ();
173 set_ssa_name_value (dest, prev_value);
174 }
175 }
176
177 /* Record a temporary equivalence, saving enough information so that
178 we can restore the state of recorded equivalences when we're
179 done processing the current edge. */
180
181 static void
182 record_temporary_equivalence (tree x, tree y, vec<tree> *stack)
183 {
184 tree prev_x = SSA_NAME_VALUE (x);
185
186 /* Y may be NULL if we are invalidating entries in the table. */
187 if (y && TREE_CODE (y) == SSA_NAME)
188 {
189 tree tmp = SSA_NAME_VALUE (y);
190 y = tmp ? tmp : y;
191 }
192
193 set_ssa_name_value (x, y);
194 stack->reserve (2);
195 stack->quick_push (prev_x);
196 stack->quick_push (x);
197 }
198
199 /* Record temporary equivalences created by PHIs at the target of the
200 edge E. Record unwind information for the equivalences onto STACK.
201
202 If a PHI which prevents threading is encountered, then return FALSE
203 indicating we should not thread this edge, else return TRUE.
204
205 If SRC_MAP/DST_MAP exist, then mark the source and destination SSA_NAMEs
206 of any equivalences recorded. We use this to make invalidation after
207 traversing back edges less painful. */
208
209 static bool
210 record_temporary_equivalences_from_phis (edge e, vec<tree> *stack)
211 {
212 gimple_stmt_iterator gsi;
213
214 /* Each PHI creates a temporary equivalence, record them.
215 These are context sensitive equivalences and will be removed
216 later. */
217 for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
218 {
219 gimple phi = gsi_stmt (gsi);
220 tree src = PHI_ARG_DEF_FROM_EDGE (phi, e);
221 tree dst = gimple_phi_result (phi);
222
223 /* If the desired argument is not the same as this PHI's result
224 and it is set by a PHI in E->dest, then we can not thread
225 through E->dest. */
226 if (src != dst
227 && TREE_CODE (src) == SSA_NAME
228 && gimple_code (SSA_NAME_DEF_STMT (src)) == GIMPLE_PHI
229 && gimple_bb (SSA_NAME_DEF_STMT (src)) == e->dest)
230 return false;
231
232 /* We consider any non-virtual PHI as a statement since it
233 count result in a constant assignment or copy operation. */
234 if (!virtual_operand_p (dst))
235 stmt_count++;
236
237 record_temporary_equivalence (dst, src, stack);
238 }
239 return true;
240 }
241
242 /* Fold the RHS of an assignment statement and return it as a tree.
243 May return NULL_TREE if no simplification is possible. */
244
245 static tree
246 fold_assignment_stmt (gimple stmt)
247 {
248 enum tree_code subcode = gimple_assign_rhs_code (stmt);
249
250 switch (get_gimple_rhs_class (subcode))
251 {
252 case GIMPLE_SINGLE_RHS:
253 return fold (gimple_assign_rhs1 (stmt));
254
255 case GIMPLE_UNARY_RHS:
256 {
257 tree lhs = gimple_assign_lhs (stmt);
258 tree op0 = gimple_assign_rhs1 (stmt);
259 return fold_unary (subcode, TREE_TYPE (lhs), op0);
260 }
261
262 case GIMPLE_BINARY_RHS:
263 {
264 tree lhs = gimple_assign_lhs (stmt);
265 tree op0 = gimple_assign_rhs1 (stmt);
266 tree op1 = gimple_assign_rhs2 (stmt);
267 return fold_binary (subcode, TREE_TYPE (lhs), op0, op1);
268 }
269
270 case GIMPLE_TERNARY_RHS:
271 {
272 tree lhs = gimple_assign_lhs (stmt);
273 tree op0 = gimple_assign_rhs1 (stmt);
274 tree op1 = gimple_assign_rhs2 (stmt);
275 tree op2 = gimple_assign_rhs3 (stmt);
276
277 /* Sadly, we have to handle conditional assignments specially
278 here, because fold expects all the operands of an expression
279 to be folded before the expression itself is folded, but we
280 can't just substitute the folded condition here. */
281 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
282 op0 = fold (op0);
283
284 return fold_ternary (subcode, TREE_TYPE (lhs), op0, op1, op2);
285 }
286
287 default:
288 gcc_unreachable ();
289 }
290 }
291
292 /* A new value has been assigned to LHS. If necessary, invalidate any
293 equivalences that are no longer valid. */
294 static void
295 invalidate_equivalences (tree lhs, vec<tree> *stack)
296 {
297
298 for (unsigned int i = 1; i < num_ssa_names; i++)
299 if (ssa_name (i) && SSA_NAME_VALUE (ssa_name (i)) == lhs)
300 record_temporary_equivalence (ssa_name (i), NULL_TREE, stack);
301
302 if (SSA_NAME_VALUE (lhs))
303 record_temporary_equivalence (lhs, NULL_TREE, stack);
304 }
305
306 /* Try to simplify each statement in E->dest, ultimately leading to
307 a simplification of the COND_EXPR at the end of E->dest.
308
309 Record unwind information for temporary equivalences onto STACK.
310
311 Use SIMPLIFY (a pointer to a callback function) to further simplify
312 statements using pass specific information.
313
314 We might consider marking just those statements which ultimately
315 feed the COND_EXPR. It's not clear if the overhead of bookkeeping
316 would be recovered by trying to simplify fewer statements.
317
318 If we are able to simplify a statement into the form
319 SSA_NAME = (SSA_NAME | gimple invariant), then we can record
320 a context sensitive equivalence which may help us simplify
321 later statements in E->dest. */
322
323 static gimple
324 record_temporary_equivalences_from_stmts_at_dest (edge e,
325 vec<tree> *stack,
326 tree (*simplify) (gimple,
327 gimple),
328 bool backedge_seen)
329 {
330 gimple stmt = NULL;
331 gimple_stmt_iterator gsi;
332 int max_stmt_count;
333
334 max_stmt_count = PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS);
335
336 /* Walk through each statement in the block recording equivalences
337 we discover. Note any equivalences we discover are context
338 sensitive (ie, are dependent on traversing E) and must be unwound
339 when we're finished processing E. */
340 for (gsi = gsi_start_bb (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
341 {
342 tree cached_lhs = NULL;
343
344 stmt = gsi_stmt (gsi);
345
346 /* Ignore empty statements and labels. */
347 if (gimple_code (stmt) == GIMPLE_NOP
348 || gimple_code (stmt) == GIMPLE_LABEL
349 || is_gimple_debug (stmt))
350 continue;
351
352 /* If the statement has volatile operands, then we assume we
353 can not thread through this block. This is overly
354 conservative in some ways. */
355 if (gimple_code (stmt) == GIMPLE_ASM && gimple_asm_volatile_p (stmt))
356 return NULL;
357
358 /* If duplicating this block is going to cause too much code
359 expansion, then do not thread through this block. */
360 stmt_count++;
361 if (stmt_count > max_stmt_count)
362 return NULL;
363
364 /* If this is not a statement that sets an SSA_NAME to a new
365 value, then do not try to simplify this statement as it will
366 not simplify in any way that is helpful for jump threading. */
367 if ((gimple_code (stmt) != GIMPLE_ASSIGN
368 || TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
369 && (gimple_code (stmt) != GIMPLE_CALL
370 || gimple_call_lhs (stmt) == NULL_TREE
371 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME))
372 {
373 /* STMT might still have DEFS and we need to invalidate any known
374 equivalences for them.
375
376 Consider if STMT is a GIMPLE_ASM with one or more outputs that
377 feeds a conditional inside a loop. We might derive an equivalence
378 due to the conditional. */
379 tree op;
380 ssa_op_iter iter;
381
382 if (backedge_seen)
383 FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_DEF)
384 invalidate_equivalences (op, stack);
385
386 continue;
387 }
388
389 /* The result of __builtin_object_size depends on all the arguments
390 of a phi node. Temporarily using only one edge produces invalid
391 results. For example
392
393 if (x < 6)
394 goto l;
395 else
396 goto l;
397
398 l:
399 r = PHI <&w[2].a[1](2), &a.a[6](3)>
400 __builtin_object_size (r, 0)
401
402 The result of __builtin_object_size is defined to be the maximum of
403 remaining bytes. If we use only one edge on the phi, the result will
404 change to be the remaining bytes for the corresponding phi argument.
405
406 Similarly for __builtin_constant_p:
407
408 r = PHI <1(2), 2(3)>
409 __builtin_constant_p (r)
410
411 Both PHI arguments are constant, but x ? 1 : 2 is still not
412 constant. */
413
414 if (is_gimple_call (stmt))
415 {
416 tree fndecl = gimple_call_fndecl (stmt);
417 if (fndecl
418 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_OBJECT_SIZE
419 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P))
420 {
421 if (backedge_seen)
422 {
423 tree lhs = gimple_get_lhs (stmt);
424 invalidate_equivalences (lhs, stack);
425 }
426 continue;
427 }
428 }
429
430 /* At this point we have a statement which assigns an RHS to an
431 SSA_VAR on the LHS. We want to try and simplify this statement
432 to expose more context sensitive equivalences which in turn may
433 allow us to simplify the condition at the end of the loop.
434
435 Handle simple copy operations as well as implied copies from
436 ASSERT_EXPRs. */
437 if (gimple_assign_single_p (stmt)
438 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
439 cached_lhs = gimple_assign_rhs1 (stmt);
440 else if (gimple_assign_single_p (stmt)
441 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
442 cached_lhs = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0);
443 else
444 {
445 /* A statement that is not a trivial copy or ASSERT_EXPR.
446 We're going to temporarily copy propagate the operands
447 and see if that allows us to simplify this statement. */
448 tree *copy;
449 ssa_op_iter iter;
450 use_operand_p use_p;
451 unsigned int num, i = 0;
452
453 num = NUM_SSA_OPERANDS (stmt, (SSA_OP_USE | SSA_OP_VUSE));
454 copy = XCNEWVEC (tree, num);
455
456 /* Make a copy of the uses & vuses into USES_COPY, then cprop into
457 the operands. */
458 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
459 {
460 tree tmp = NULL;
461 tree use = USE_FROM_PTR (use_p);
462
463 copy[i++] = use;
464 if (TREE_CODE (use) == SSA_NAME)
465 tmp = SSA_NAME_VALUE (use);
466 if (tmp)
467 SET_USE (use_p, tmp);
468 }
469
470 /* Try to fold/lookup the new expression. Inserting the
471 expression into the hash table is unlikely to help. */
472 if (is_gimple_call (stmt))
473 cached_lhs = fold_call_stmt (stmt, false);
474 else
475 cached_lhs = fold_assignment_stmt (stmt);
476
477 if (!cached_lhs
478 || (TREE_CODE (cached_lhs) != SSA_NAME
479 && !is_gimple_min_invariant (cached_lhs)))
480 cached_lhs = (*simplify) (stmt, stmt);
481
482 /* Restore the statement's original uses/defs. */
483 i = 0;
484 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
485 SET_USE (use_p, copy[i++]);
486
487 free (copy);
488 }
489
490 /* Record the context sensitive equivalence if we were able
491 to simplify this statement.
492
493 If we have traversed a backedge at some point during threading,
494 then always enter something here. Either a real equivalence,
495 or a NULL_TREE equivalence which is effectively invalidation of
496 prior equivalences. */
497 if (cached_lhs
498 && (TREE_CODE (cached_lhs) == SSA_NAME
499 || is_gimple_min_invariant (cached_lhs)))
500 record_temporary_equivalence (gimple_get_lhs (stmt), cached_lhs, stack);
501 else if (backedge_seen)
502 invalidate_equivalences (gimple_get_lhs (stmt), stack);
503 }
504 return stmt;
505 }
506
507 /* Once we have passed a backedge in the CFG when threading, we do not want to
508 utilize edge equivalences for simplification purpose. They are no longer
509 necessarily valid. We use this callback rather than the ones provided by
510 DOM/VRP to achieve that effect. */
511 static tree
512 dummy_simplify (gimple stmt1 ATTRIBUTE_UNUSED, gimple stmt2 ATTRIBUTE_UNUSED)
513 {
514 return NULL_TREE;
515 }
516
517 /* Simplify the control statement at the end of the block E->dest.
518
519 To avoid allocating memory unnecessarily, a scratch GIMPLE_COND
520 is available to use/clobber in DUMMY_COND.
521
522 Use SIMPLIFY (a pointer to a callback function) to further simplify
523 a condition using pass specific information.
524
525 Return the simplified condition or NULL if simplification could
526 not be performed. */
527
528 static tree
529 simplify_control_stmt_condition (edge e,
530 gimple stmt,
531 gimple dummy_cond,
532 tree (*simplify) (gimple, gimple),
533 bool handle_dominating_asserts)
534 {
535 tree cond, cached_lhs;
536 enum gimple_code code = gimple_code (stmt);
537
538 /* For comparisons, we have to update both operands, then try
539 to simplify the comparison. */
540 if (code == GIMPLE_COND)
541 {
542 tree op0, op1;
543 enum tree_code cond_code;
544
545 op0 = gimple_cond_lhs (stmt);
546 op1 = gimple_cond_rhs (stmt);
547 cond_code = gimple_cond_code (stmt);
548
549 /* Get the current value of both operands. */
550 if (TREE_CODE (op0) == SSA_NAME)
551 {
552 for (int i = 0; i < 2; i++)
553 {
554 if (TREE_CODE (op0) == SSA_NAME
555 && SSA_NAME_VALUE (op0))
556 op0 = SSA_NAME_VALUE (op0);
557 else
558 break;
559 }
560 }
561
562 if (TREE_CODE (op1) == SSA_NAME)
563 {
564 for (int i = 0; i < 2; i++)
565 {
566 if (TREE_CODE (op1) == SSA_NAME
567 && SSA_NAME_VALUE (op1))
568 op1 = SSA_NAME_VALUE (op1);
569 else
570 break;
571 }
572 }
573
574 if (handle_dominating_asserts)
575 {
576 /* Now see if the operand was consumed by an ASSERT_EXPR
577 which dominates E->src. If so, we want to replace the
578 operand with the LHS of the ASSERT_EXPR. */
579 if (TREE_CODE (op0) == SSA_NAME)
580 op0 = lhs_of_dominating_assert (op0, e->src, stmt);
581
582 if (TREE_CODE (op1) == SSA_NAME)
583 op1 = lhs_of_dominating_assert (op1, e->src, stmt);
584 }
585
586 /* We may need to canonicalize the comparison. For
587 example, op0 might be a constant while op1 is an
588 SSA_NAME. Failure to canonicalize will cause us to
589 miss threading opportunities. */
590 if (tree_swap_operands_p (op0, op1, false))
591 {
592 tree tmp;
593 cond_code = swap_tree_comparison (cond_code);
594 tmp = op0;
595 op0 = op1;
596 op1 = tmp;
597 }
598
599 /* Stuff the operator and operands into our dummy conditional
600 expression. */
601 gimple_cond_set_code (dummy_cond, cond_code);
602 gimple_cond_set_lhs (dummy_cond, op0);
603 gimple_cond_set_rhs (dummy_cond, op1);
604
605 /* We absolutely do not care about any type conversions
606 we only care about a zero/nonzero value. */
607 fold_defer_overflow_warnings ();
608
609 cached_lhs = fold_binary (cond_code, boolean_type_node, op0, op1);
610 if (cached_lhs)
611 while (CONVERT_EXPR_P (cached_lhs))
612 cached_lhs = TREE_OPERAND (cached_lhs, 0);
613
614 fold_undefer_overflow_warnings ((cached_lhs
615 && is_gimple_min_invariant (cached_lhs)),
616 stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
617
618 /* If we have not simplified the condition down to an invariant,
619 then use the pass specific callback to simplify the condition. */
620 if (!cached_lhs
621 || !is_gimple_min_invariant (cached_lhs))
622 cached_lhs = (*simplify) (dummy_cond, stmt);
623
624 return cached_lhs;
625 }
626
627 if (code == GIMPLE_SWITCH)
628 cond = gimple_switch_index (stmt);
629 else if (code == GIMPLE_GOTO)
630 cond = gimple_goto_dest (stmt);
631 else
632 gcc_unreachable ();
633
634 /* We can have conditionals which just test the state of a variable
635 rather than use a relational operator. These are simpler to handle. */
636 if (TREE_CODE (cond) == SSA_NAME)
637 {
638 cached_lhs = cond;
639
640 /* Get the variable's current value from the equivalence chains.
641
642 It is possible to get loops in the SSA_NAME_VALUE chains
643 (consider threading the backedge of a loop where we have
644 a loop invariant SSA_NAME used in the condition. */
645 if (cached_lhs)
646 {
647 for (int i = 0; i < 2; i++)
648 {
649 if (TREE_CODE (cached_lhs) == SSA_NAME
650 && SSA_NAME_VALUE (cached_lhs))
651 cached_lhs = SSA_NAME_VALUE (cached_lhs);
652 else
653 break;
654 }
655 }
656
657 /* If we're dominated by a suitable ASSERT_EXPR, then
658 update CACHED_LHS appropriately. */
659 if (handle_dominating_asserts && TREE_CODE (cached_lhs) == SSA_NAME)
660 cached_lhs = lhs_of_dominating_assert (cached_lhs, e->src, stmt);
661
662 /* If we haven't simplified to an invariant yet, then use the
663 pass specific callback to try and simplify it further. */
664 if (cached_lhs && ! is_gimple_min_invariant (cached_lhs))
665 cached_lhs = (*simplify) (stmt, stmt);
666 }
667 else
668 cached_lhs = NULL;
669
670 return cached_lhs;
671 }
672
673 /* Copy debug stmts from DEST's chain of single predecessors up to
674 SRC, so that we don't lose the bindings as PHI nodes are introduced
675 when DEST gains new predecessors. */
676 void
677 propagate_threaded_block_debug_into (basic_block dest, basic_block src)
678 {
679 if (!MAY_HAVE_DEBUG_STMTS)
680 return;
681
682 if (!single_pred_p (dest))
683 return;
684
685 gcc_checking_assert (dest != src);
686
687 gimple_stmt_iterator gsi = gsi_after_labels (dest);
688 int i = 0;
689 const int alloc_count = 16; // ?? Should this be a PARAM?
690
691 /* Estimate the number of debug vars overridden in the beginning of
692 DEST, to tell how many we're going to need to begin with. */
693 for (gimple_stmt_iterator si = gsi;
694 i * 4 <= alloc_count * 3 && !gsi_end_p (si); gsi_next (&si))
695 {
696 gimple stmt = gsi_stmt (si);
697 if (!is_gimple_debug (stmt))
698 break;
699 i++;
700 }
701
702 auto_vec<tree, alloc_count> fewvars;
703 hash_set<tree> *vars = NULL;
704
705 /* If we're already starting with 3/4 of alloc_count, go for a
706 hash_set, otherwise start with an unordered stack-allocated
707 VEC. */
708 if (i * 4 > alloc_count * 3)
709 vars = new hash_set<tree>;
710
711 /* Now go through the initial debug stmts in DEST again, this time
712 actually inserting in VARS or FEWVARS. Don't bother checking for
713 duplicates in FEWVARS. */
714 for (gimple_stmt_iterator si = gsi; !gsi_end_p (si); gsi_next (&si))
715 {
716 gimple stmt = gsi_stmt (si);
717 if (!is_gimple_debug (stmt))
718 break;
719
720 tree var;
721
722 if (gimple_debug_bind_p (stmt))
723 var = gimple_debug_bind_get_var (stmt);
724 else if (gimple_debug_source_bind_p (stmt))
725 var = gimple_debug_source_bind_get_var (stmt);
726 else
727 gcc_unreachable ();
728
729 if (vars)
730 vars->add (var);
731 else
732 fewvars.quick_push (var);
733 }
734
735 basic_block bb = dest;
736
737 do
738 {
739 bb = single_pred (bb);
740 for (gimple_stmt_iterator si = gsi_last_bb (bb);
741 !gsi_end_p (si); gsi_prev (&si))
742 {
743 gimple stmt = gsi_stmt (si);
744 if (!is_gimple_debug (stmt))
745 continue;
746
747 tree var;
748
749 if (gimple_debug_bind_p (stmt))
750 var = gimple_debug_bind_get_var (stmt);
751 else if (gimple_debug_source_bind_p (stmt))
752 var = gimple_debug_source_bind_get_var (stmt);
753 else
754 gcc_unreachable ();
755
756 /* Discard debug bind overlaps. ??? Unlike stmts from src,
757 copied into a new block that will precede BB, debug bind
758 stmts in bypassed BBs may actually be discarded if
759 they're overwritten by subsequent debug bind stmts, which
760 might be a problem once we introduce stmt frontier notes
761 or somesuch. Adding `&& bb == src' to the condition
762 below will preserve all potentially relevant debug
763 notes. */
764 if (vars && vars->add (var))
765 continue;
766 else if (!vars)
767 {
768 int i = fewvars.length ();
769 while (i--)
770 if (fewvars[i] == var)
771 break;
772 if (i >= 0)
773 continue;
774
775 if (fewvars.length () < (unsigned) alloc_count)
776 fewvars.quick_push (var);
777 else
778 {
779 vars = new hash_set<tree>;
780 for (i = 0; i < alloc_count; i++)
781 vars->add (fewvars[i]);
782 fewvars.release ();
783 vars->add (var);
784 }
785 }
786
787 stmt = gimple_copy (stmt);
788 /* ??? Should we drop the location of the copy to denote
789 they're artificial bindings? */
790 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
791 }
792 }
793 while (bb != src && single_pred_p (bb));
794
795 if (vars)
796 delete vars;
797 else if (fewvars.exists ())
798 fewvars.release ();
799 }
800
801 /* See if TAKEN_EDGE->dest is a threadable block with no side effecs (ie, it
802 need not be duplicated as part of the CFG/SSA updating process).
803
804 If it is threadable, add it to PATH and VISITED and recurse, ultimately
805 returning TRUE from the toplevel call. Otherwise do nothing and
806 return false.
807
808 DUMMY_COND, HANDLE_DOMINATING_ASSERTS and SIMPLIFY are used to
809 try and simplify the condition at the end of TAKEN_EDGE->dest. */
810 static bool
811 thread_around_empty_blocks (edge taken_edge,
812 gimple dummy_cond,
813 bool handle_dominating_asserts,
814 tree (*simplify) (gimple, gimple),
815 bitmap visited,
816 vec<jump_thread_edge *> *path,
817 bool *backedge_seen_p)
818 {
819 basic_block bb = taken_edge->dest;
820 gimple_stmt_iterator gsi;
821 gimple stmt;
822 tree cond;
823
824 /* The key property of these blocks is that they need not be duplicated
825 when threading. Thus they can not have visible side effects such
826 as PHI nodes. */
827 if (!gsi_end_p (gsi_start_phis (bb)))
828 return false;
829
830 /* Skip over DEBUG statements at the start of the block. */
831 gsi = gsi_start_nondebug_bb (bb);
832
833 /* If the block has no statements, but does have a single successor, then
834 it's just a forwarding block and we can thread through it trivially.
835
836 However, note that just threading through empty blocks with single
837 successors is not inherently profitable. For the jump thread to
838 be profitable, we must avoid a runtime conditional.
839
840 By taking the return value from the recursive call, we get the
841 desired effect of returning TRUE when we found a profitable jump
842 threading opportunity and FALSE otherwise.
843
844 This is particularly important when this routine is called after
845 processing a joiner block. Returning TRUE too aggressively in
846 that case results in pointless duplication of the joiner block. */
847 if (gsi_end_p (gsi))
848 {
849 if (single_succ_p (bb))
850 {
851 taken_edge = single_succ_edge (bb);
852 if (!bitmap_bit_p (visited, taken_edge->dest->index))
853 {
854 jump_thread_edge *x
855 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
856 path->safe_push (x);
857 bitmap_set_bit (visited, taken_edge->dest->index);
858 *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
859 if (*backedge_seen_p)
860 simplify = dummy_simplify;
861 return thread_around_empty_blocks (taken_edge,
862 dummy_cond,
863 handle_dominating_asserts,
864 simplify,
865 visited,
866 path,
867 backedge_seen_p);
868 }
869 }
870
871 /* We have a block with no statements, but multiple successors? */
872 return false;
873 }
874
875 /* The only real statements this block can have are a control
876 flow altering statement. Anything else stops the thread. */
877 stmt = gsi_stmt (gsi);
878 if (gimple_code (stmt) != GIMPLE_COND
879 && gimple_code (stmt) != GIMPLE_GOTO
880 && gimple_code (stmt) != GIMPLE_SWITCH)
881 return false;
882
883 /* If we have traversed a backedge, then we do not want to look
884 at certain expressions in the table that can not be relied upon.
885 Luckily the only code that looked at those expressions is the
886 SIMPLIFY callback, which we replace if we can no longer use it. */
887 if (*backedge_seen_p)
888 simplify = dummy_simplify;
889
890 /* Extract and simplify the condition. */
891 cond = simplify_control_stmt_condition (taken_edge, stmt, dummy_cond,
892 simplify, handle_dominating_asserts);
893
894 /* If the condition can be statically computed and we have not already
895 visited the destination edge, then add the taken edge to our thread
896 path. */
897 if (cond && is_gimple_min_invariant (cond))
898 {
899 taken_edge = find_taken_edge (bb, cond);
900
901 if (bitmap_bit_p (visited, taken_edge->dest->index))
902 return false;
903 bitmap_set_bit (visited, taken_edge->dest->index);
904
905 jump_thread_edge *x
906 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
907 path->safe_push (x);
908 *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
909 if (*backedge_seen_p)
910 simplify = dummy_simplify;
911
912 thread_around_empty_blocks (taken_edge,
913 dummy_cond,
914 handle_dominating_asserts,
915 simplify,
916 visited,
917 path,
918 backedge_seen_p);
919 return true;
920 }
921
922 return false;
923 }
924
925 /* We are exiting E->src, see if E->dest ends with a conditional
926 jump which has a known value when reached via E.
927
928 E->dest can have arbitrary side effects which, if threading is
929 successful, will be maintained.
930
931 Special care is necessary if E is a back edge in the CFG as we
932 may have already recorded equivalences for E->dest into our
933 various tables, including the result of the conditional at
934 the end of E->dest. Threading opportunities are severely
935 limited in that case to avoid short-circuiting the loop
936 incorrectly.
937
938 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
939 to avoid allocating memory.
940
941 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
942 the simplified condition with left-hand sides of ASSERT_EXPRs they are
943 used in.
944
945 STACK is used to undo temporary equivalences created during the walk of
946 E->dest.
947
948 SIMPLIFY is a pass-specific function used to simplify statements.
949
950 Our caller is responsible for restoring the state of the expression
951 and const_and_copies stacks.
952
953 Positive return value is success. Zero return value is failure, but
954 the block can still be duplicated as a joiner in a jump thread path,
955 negative indicates the block should not be duplicated and thus is not
956 suitable for a joiner in a jump threading path. */
957
958 static int
959 thread_through_normal_block (edge e,
960 gimple dummy_cond,
961 bool handle_dominating_asserts,
962 vec<tree> *stack,
963 tree (*simplify) (gimple, gimple),
964 vec<jump_thread_edge *> *path,
965 bitmap visited,
966 bool *backedge_seen_p)
967 {
968 /* If we have traversed a backedge, then we do not want to look
969 at certain expressions in the table that can not be relied upon.
970 Luckily the only code that looked at those expressions is the
971 SIMPLIFY callback, which we replace if we can no longer use it. */
972 if (*backedge_seen_p)
973 simplify = dummy_simplify;
974
975 /* PHIs create temporary equivalences.
976 Note that if we found a PHI that made the block non-threadable, then
977 we need to bubble that up to our caller in the same manner we do
978 when we prematurely stop processing statements below. */
979 if (!record_temporary_equivalences_from_phis (e, stack))
980 return -1;
981
982 /* Now walk each statement recording any context sensitive
983 temporary equivalences we can detect. */
984 gimple stmt
985 = record_temporary_equivalences_from_stmts_at_dest (e, stack, simplify,
986 *backedge_seen_p);
987
988 /* If we didn't look at all the statements, the most likely reason is
989 there were too many and thus duplicating this block is not profitable.
990
991 Also note if we do not look at all the statements, then we may not
992 have invalidated equivalences that are no longer valid if we threaded
993 around a loop. Thus we must signal to our caller that this block
994 is not suitable for use as a joiner in a threading path. */
995 if (!stmt)
996 return -1;
997
998 /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm
999 will be taken. */
1000 if (gimple_code (stmt) == GIMPLE_COND
1001 || gimple_code (stmt) == GIMPLE_GOTO
1002 || gimple_code (stmt) == GIMPLE_SWITCH)
1003 {
1004 tree cond;
1005
1006 /* Extract and simplify the condition. */
1007 cond = simplify_control_stmt_condition (e, stmt, dummy_cond, simplify,
1008 handle_dominating_asserts);
1009
1010 if (cond && is_gimple_min_invariant (cond))
1011 {
1012 edge taken_edge = find_taken_edge (e->dest, cond);
1013 basic_block dest = (taken_edge ? taken_edge->dest : NULL);
1014
1015 /* DEST could be NULL for a computed jump to an absolute
1016 address. */
1017 if (dest == NULL
1018 || dest == e->dest
1019 || bitmap_bit_p (visited, dest->index))
1020 return 0;
1021
1022 /* Only push the EDGE_START_JUMP_THREAD marker if this is
1023 first edge on the path. */
1024 if (path->length () == 0)
1025 {
1026 jump_thread_edge *x
1027 = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
1028 path->safe_push (x);
1029 *backedge_seen_p |= ((e->flags & EDGE_DFS_BACK) != 0);
1030 }
1031
1032 jump_thread_edge *x
1033 = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_BLOCK);
1034 path->safe_push (x);
1035 *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
1036 if (*backedge_seen_p)
1037 simplify = dummy_simplify;
1038
1039 /* See if we can thread through DEST as well, this helps capture
1040 secondary effects of threading without having to re-run DOM or
1041 VRP.
1042
1043 We don't want to thread back to a block we have already
1044 visited. This may be overly conservative. */
1045 bitmap_set_bit (visited, dest->index);
1046 bitmap_set_bit (visited, e->dest->index);
1047 thread_around_empty_blocks (taken_edge,
1048 dummy_cond,
1049 handle_dominating_asserts,
1050 simplify,
1051 visited,
1052 path,
1053 backedge_seen_p);
1054 return 1;
1055 }
1056 }
1057 return 0;
1058 }
1059
1060 /* We are exiting E->src, see if E->dest ends with a conditional
1061 jump which has a known value when reached via E.
1062
1063 Special care is necessary if E is a back edge in the CFG as we
1064 may have already recorded equivalences for E->dest into our
1065 various tables, including the result of the conditional at
1066 the end of E->dest. Threading opportunities are severely
1067 limited in that case to avoid short-circuiting the loop
1068 incorrectly.
1069
1070 Note it is quite common for the first block inside a loop to
1071 end with a conditional which is either always true or always
1072 false when reached via the loop backedge. Thus we do not want
1073 to blindly disable threading across a loop backedge.
1074
1075 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
1076 to avoid allocating memory.
1077
1078 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
1079 the simplified condition with left-hand sides of ASSERT_EXPRs they are
1080 used in.
1081
1082 STACK is used to undo temporary equivalences created during the walk of
1083 E->dest.
1084
1085 SIMPLIFY is a pass-specific function used to simplify statements. */
1086
1087 void
1088 thread_across_edge (gimple dummy_cond,
1089 edge e,
1090 bool handle_dominating_asserts,
1091 vec<tree> *stack,
1092 tree (*simplify) (gimple, gimple))
1093 {
1094 bitmap visited = BITMAP_ALLOC (NULL);
1095 bool backedge_seen;
1096
1097 stmt_count = 0;
1098
1099 vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
1100 bitmap_clear (visited);
1101 bitmap_set_bit (visited, e->src->index);
1102 bitmap_set_bit (visited, e->dest->index);
1103 backedge_seen = ((e->flags & EDGE_DFS_BACK) != 0);
1104 if (backedge_seen)
1105 simplify = dummy_simplify;
1106
1107 int threaded = thread_through_normal_block (e, dummy_cond,
1108 handle_dominating_asserts,
1109 stack, simplify, path,
1110 visited, &backedge_seen);
1111 if (threaded > 0)
1112 {
1113 propagate_threaded_block_debug_into (path->last ()->e->dest,
1114 e->dest);
1115 remove_temporary_equivalences (stack);
1116 BITMAP_FREE (visited);
1117 register_jump_thread (path);
1118 return;
1119 }
1120 else
1121 {
1122 /* Negative and zero return values indicate no threading was possible,
1123 thus there should be no edges on the thread path and no need to walk
1124 through the vector entries. */
1125 gcc_assert (path->length () == 0);
1126 path->release ();
1127
1128 /* A negative status indicates the target block was deemed too big to
1129 duplicate. Just quit now rather than trying to use the block as
1130 a joiner in a jump threading path.
1131
1132 This prevents unnecessary code growth, but more importantly if we
1133 do not look at all the statements in the block, then we may have
1134 missed some invalidations if we had traversed a backedge! */
1135 if (threaded < 0)
1136 {
1137 BITMAP_FREE (visited);
1138 remove_temporary_equivalences (stack);
1139 return;
1140 }
1141 }
1142
1143 /* We were unable to determine what out edge from E->dest is taken. However,
1144 we might still be able to thread through successors of E->dest. This
1145 often occurs when E->dest is a joiner block which then fans back out
1146 based on redundant tests.
1147
1148 If so, we'll copy E->dest and redirect the appropriate predecessor to
1149 the copy. Within the copy of E->dest, we'll thread one or more edges
1150 to points deeper in the CFG.
1151
1152 This is a stopgap until we have a more structured approach to path
1153 isolation. */
1154 {
1155 edge taken_edge;
1156 edge_iterator ei;
1157 bool found;
1158
1159 /* If E->dest has abnormal outgoing edges, then there's no guarantee
1160 we can safely redirect any of the edges. Just punt those cases. */
1161 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1162 if (taken_edge->flags & EDGE_ABNORMAL)
1163 {
1164 remove_temporary_equivalences (stack);
1165 BITMAP_FREE (visited);
1166 return;
1167 }
1168
1169 /* Look at each successor of E->dest to see if we can thread through it. */
1170 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1171 {
1172 /* Push a fresh marker so we can unwind the equivalences created
1173 for each of E->dest's successors. */
1174 stack->safe_push (NULL_TREE);
1175
1176 /* Avoid threading to any block we have already visited. */
1177 bitmap_clear (visited);
1178 bitmap_set_bit (visited, e->src->index);
1179 bitmap_set_bit (visited, e->dest->index);
1180 bitmap_set_bit (visited, taken_edge->dest->index);
1181 vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
1182
1183 /* Record whether or not we were able to thread through a successor
1184 of E->dest. */
1185 jump_thread_edge *x = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
1186 path->safe_push (x);
1187
1188 x = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_JOINER_BLOCK);
1189 path->safe_push (x);
1190 found = false;
1191 backedge_seen = ((e->flags & EDGE_DFS_BACK) != 0);
1192 backedge_seen |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
1193 if (backedge_seen)
1194 simplify = dummy_simplify;
1195 found = thread_around_empty_blocks (taken_edge,
1196 dummy_cond,
1197 handle_dominating_asserts,
1198 simplify,
1199 visited,
1200 path,
1201 &backedge_seen);
1202
1203 if (backedge_seen)
1204 simplify = dummy_simplify;
1205
1206 if (!found)
1207 found = thread_through_normal_block (path->last ()->e, dummy_cond,
1208 handle_dominating_asserts,
1209 stack, simplify, path, visited,
1210 &backedge_seen) > 0;
1211
1212 /* If we were able to thread through a successor of E->dest, then
1213 record the jump threading opportunity. */
1214 if (found)
1215 {
1216 propagate_threaded_block_debug_into (path->last ()->e->dest,
1217 taken_edge->dest);
1218 register_jump_thread (path);
1219 }
1220 else
1221 {
1222 delete_jump_thread_path (path);
1223 }
1224
1225 /* And unwind the equivalence table. */
1226 remove_temporary_equivalences (stack);
1227 }
1228 BITMAP_FREE (visited);
1229 }
1230
1231 remove_temporary_equivalences (stack);
1232 }