]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-threadedge.c
[C++] Protect call to copy_attributes_to_builtin (PR91505)
[thirdparty/gcc.git] / gcc / tree-ssa-threadedge.c
1 /* SSA Jump Threading
2 Copyright (C) 2005-2019 Free Software Foundation, Inc.
3 Contributed by Jeff Law <law@redhat.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "predict.h"
28 #include "ssa.h"
29 #include "fold-const.h"
30 #include "cfgloop.h"
31 #include "gimple-iterator.h"
32 #include "tree-cfg.h"
33 #include "tree-ssa-threadupdate.h"
34 #include "params.h"
35 #include "tree-ssa-scopedtables.h"
36 #include "tree-ssa-threadedge.h"
37 #include "tree-ssa-dom.h"
38 #include "gimple-fold.h"
39 #include "cfganal.h"
40 #include "alloc-pool.h"
41 #include "vr-values.h"
42 #include "gimple-ssa-evrp-analyze.h"
43
44 /* To avoid code explosion due to jump threading, we limit the
45 number of statements we are going to copy. This variable
46 holds the number of statements currently seen that we'll have
47 to copy as part of the jump threading process. */
48 static int stmt_count;
49
50 /* Array to record value-handles per SSA_NAME. */
51 vec<tree> ssa_name_values;
52
53 typedef tree (pfn_simplify) (gimple *, gimple *,
54 class avail_exprs_stack *,
55 basic_block);
56
57 /* Set the value for the SSA name NAME to VALUE. */
58
59 void
60 set_ssa_name_value (tree name, tree value)
61 {
62 if (SSA_NAME_VERSION (name) >= ssa_name_values.length ())
63 ssa_name_values.safe_grow_cleared (SSA_NAME_VERSION (name) + 1);
64 if (value && TREE_OVERFLOW_P (value))
65 value = drop_tree_overflow (value);
66 ssa_name_values[SSA_NAME_VERSION (name)] = value;
67 }
68
69 /* Initialize the per SSA_NAME value-handles array. Returns it. */
70 void
71 threadedge_initialize_values (void)
72 {
73 gcc_assert (!ssa_name_values.exists ());
74 ssa_name_values.create (num_ssa_names);
75 }
76
77 /* Free the per SSA_NAME value-handle array. */
78 void
79 threadedge_finalize_values (void)
80 {
81 ssa_name_values.release ();
82 }
83
84 /* Return TRUE if we may be able to thread an incoming edge into
85 BB to an outgoing edge from BB. Return FALSE otherwise. */
86
87 bool
88 potentially_threadable_block (basic_block bb)
89 {
90 gimple_stmt_iterator gsi;
91
92 /* Special case. We can get blocks that are forwarders, but are
93 not optimized away because they forward from outside a loop
94 to the loop header. We want to thread through them as we can
95 sometimes thread to the loop exit, which is obviously profitable.
96 the interesting case here is when the block has PHIs. */
97 if (gsi_end_p (gsi_start_nondebug_bb (bb))
98 && !gsi_end_p (gsi_start_phis (bb)))
99 return true;
100
101 /* If BB has a single successor or a single predecessor, then
102 there is no threading opportunity. */
103 if (single_succ_p (bb) || single_pred_p (bb))
104 return false;
105
106 /* If BB does not end with a conditional, switch or computed goto,
107 then there is no threading opportunity. */
108 gsi = gsi_last_bb (bb);
109 if (gsi_end_p (gsi)
110 || ! gsi_stmt (gsi)
111 || (gimple_code (gsi_stmt (gsi)) != GIMPLE_COND
112 && gimple_code (gsi_stmt (gsi)) != GIMPLE_GOTO
113 && gimple_code (gsi_stmt (gsi)) != GIMPLE_SWITCH))
114 return false;
115
116 return true;
117 }
118
119 /* Record temporary equivalences created by PHIs at the target of the
120 edge E. Record unwind information for the equivalences into
121 CONST_AND_COPIES and EVRP_RANGE_DATA.
122
123 If a PHI which prevents threading is encountered, then return FALSE
124 indicating we should not thread this edge, else return TRUE. */
125
126 static bool
127 record_temporary_equivalences_from_phis (edge e,
128 const_and_copies *const_and_copies,
129 evrp_range_analyzer *evrp_range_analyzer)
130 {
131 gphi_iterator gsi;
132
133 /* Each PHI creates a temporary equivalence, record them.
134 These are context sensitive equivalences and will be removed
135 later. */
136 for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
137 {
138 gphi *phi = gsi.phi ();
139 tree src = PHI_ARG_DEF_FROM_EDGE (phi, e);
140 tree dst = gimple_phi_result (phi);
141
142 /* If the desired argument is not the same as this PHI's result
143 and it is set by a PHI in E->dest, then we cannot thread
144 through E->dest. */
145 if (src != dst
146 && TREE_CODE (src) == SSA_NAME
147 && gimple_code (SSA_NAME_DEF_STMT (src)) == GIMPLE_PHI
148 && gimple_bb (SSA_NAME_DEF_STMT (src)) == e->dest)
149 return false;
150
151 /* We consider any non-virtual PHI as a statement since it
152 count result in a constant assignment or copy operation. */
153 if (!virtual_operand_p (dst))
154 stmt_count++;
155
156 const_and_copies->record_const_or_copy (dst, src);
157
158 /* Also update the value range associated with DST, using
159 the range from SRC.
160
161 Note that even if SRC is a constant we need to set a suitable
162 output range so that VR_UNDEFINED ranges do not leak through. */
163 if (evrp_range_analyzer)
164 {
165 /* Get an empty new VR we can pass to update_value_range and save
166 away in the VR stack. */
167 vr_values *vr_values = evrp_range_analyzer->get_vr_values ();
168 value_range *new_vr = vr_values->allocate_value_range ();
169 new (new_vr) value_range ();
170
171 /* There are three cases to consider:
172
173 First if SRC is an SSA_NAME, then we can copy the value
174 range from SRC into NEW_VR.
175
176 Second if SRC is an INTEGER_CST, then we can just wet
177 NEW_VR to a singleton range.
178
179 Otherwise set NEW_VR to varying. This may be overly
180 conservative. */
181 if (TREE_CODE (src) == SSA_NAME)
182 new_vr->deep_copy (vr_values->get_value_range (src));
183 else if (TREE_CODE (src) == INTEGER_CST)
184 new_vr->set (src);
185 else
186 new_vr->set_varying (TREE_TYPE (src));
187
188 /* This is a temporary range for DST, so push it. */
189 evrp_range_analyzer->push_value_range (dst, new_vr);
190 }
191 }
192 return true;
193 }
194
195 /* Valueize hook for gimple_fold_stmt_to_constant_1. */
196
197 static tree
198 threadedge_valueize (tree t)
199 {
200 if (TREE_CODE (t) == SSA_NAME)
201 {
202 tree tem = SSA_NAME_VALUE (t);
203 if (tem)
204 return tem;
205 }
206 return t;
207 }
208
209 /* Try to simplify each statement in E->dest, ultimately leading to
210 a simplification of the COND_EXPR at the end of E->dest.
211
212 Record unwind information for temporary equivalences onto STACK.
213
214 Use SIMPLIFY (a pointer to a callback function) to further simplify
215 statements using pass specific information.
216
217 We might consider marking just those statements which ultimately
218 feed the COND_EXPR. It's not clear if the overhead of bookkeeping
219 would be recovered by trying to simplify fewer statements.
220
221 If we are able to simplify a statement into the form
222 SSA_NAME = (SSA_NAME | gimple invariant), then we can record
223 a context sensitive equivalence which may help us simplify
224 later statements in E->dest. */
225
226 static gimple *
227 record_temporary_equivalences_from_stmts_at_dest (edge e,
228 const_and_copies *const_and_copies,
229 avail_exprs_stack *avail_exprs_stack,
230 evrp_range_analyzer *evrp_range_analyzer,
231 pfn_simplify simplify)
232 {
233 gimple *stmt = NULL;
234 gimple_stmt_iterator gsi;
235 int max_stmt_count;
236
237 max_stmt_count = PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS);
238
239 /* Walk through each statement in the block recording equivalences
240 we discover. Note any equivalences we discover are context
241 sensitive (ie, are dependent on traversing E) and must be unwound
242 when we're finished processing E. */
243 for (gsi = gsi_start_bb (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
244 {
245 tree cached_lhs = NULL;
246
247 stmt = gsi_stmt (gsi);
248
249 /* Ignore empty statements and labels. */
250 if (gimple_code (stmt) == GIMPLE_NOP
251 || gimple_code (stmt) == GIMPLE_LABEL
252 || is_gimple_debug (stmt))
253 continue;
254
255 /* If the statement has volatile operands, then we assume we
256 cannot thread through this block. This is overly
257 conservative in some ways. */
258 if (gimple_code (stmt) == GIMPLE_ASM
259 && gimple_asm_volatile_p (as_a <gasm *> (stmt)))
260 return NULL;
261
262 /* If the statement is a unique builtin, we cannot thread
263 through here. */
264 if (gimple_code (stmt) == GIMPLE_CALL
265 && gimple_call_internal_p (stmt)
266 && gimple_call_internal_unique_p (stmt))
267 return NULL;
268
269 /* If duplicating this block is going to cause too much code
270 expansion, then do not thread through this block. */
271 stmt_count++;
272 if (stmt_count > max_stmt_count)
273 {
274 /* If any of the stmts in the PATH's dests are going to be
275 killed due to threading, grow the max count
276 accordingly. */
277 if (max_stmt_count
278 == PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS))
279 {
280 max_stmt_count += estimate_threading_killed_stmts (e->dest);
281 if (dump_file)
282 fprintf (dump_file, "threading bb %i up to %i stmts\n",
283 e->dest->index, max_stmt_count);
284 }
285 /* If we're still past the limit, we're done. */
286 if (stmt_count > max_stmt_count)
287 return NULL;
288 }
289
290 /* These are temporary ranges, do nto reflect them back into
291 the global range data. */
292 if (evrp_range_analyzer)
293 evrp_range_analyzer->record_ranges_from_stmt (stmt, true);
294
295 /* If this is not a statement that sets an SSA_NAME to a new
296 value, then do not try to simplify this statement as it will
297 not simplify in any way that is helpful for jump threading. */
298 if ((gimple_code (stmt) != GIMPLE_ASSIGN
299 || TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
300 && (gimple_code (stmt) != GIMPLE_CALL
301 || gimple_call_lhs (stmt) == NULL_TREE
302 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME))
303 continue;
304
305 /* The result of __builtin_object_size depends on all the arguments
306 of a phi node. Temporarily using only one edge produces invalid
307 results. For example
308
309 if (x < 6)
310 goto l;
311 else
312 goto l;
313
314 l:
315 r = PHI <&w[2].a[1](2), &a.a[6](3)>
316 __builtin_object_size (r, 0)
317
318 The result of __builtin_object_size is defined to be the maximum of
319 remaining bytes. If we use only one edge on the phi, the result will
320 change to be the remaining bytes for the corresponding phi argument.
321
322 Similarly for __builtin_constant_p:
323
324 r = PHI <1(2), 2(3)>
325 __builtin_constant_p (r)
326
327 Both PHI arguments are constant, but x ? 1 : 2 is still not
328 constant. */
329
330 if (is_gimple_call (stmt))
331 {
332 tree fndecl = gimple_call_fndecl (stmt);
333 if (fndecl
334 && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL)
335 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_OBJECT_SIZE
336 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P))
337 continue;
338 }
339
340 /* At this point we have a statement which assigns an RHS to an
341 SSA_VAR on the LHS. We want to try and simplify this statement
342 to expose more context sensitive equivalences which in turn may
343 allow us to simplify the condition at the end of the loop.
344
345 Handle simple copy operations as well as implied copies from
346 ASSERT_EXPRs. */
347 if (gimple_assign_single_p (stmt)
348 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
349 cached_lhs = gimple_assign_rhs1 (stmt);
350 else if (gimple_assign_single_p (stmt)
351 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
352 cached_lhs = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0);
353 else
354 {
355 /* A statement that is not a trivial copy or ASSERT_EXPR.
356 Try to fold the new expression. Inserting the
357 expression into the hash table is unlikely to help. */
358 /* ??? The DOM callback below can be changed to setting
359 the mprts_hook around the call to thread_across_edge,
360 avoiding the use substitution. The VRP hook should be
361 changed to properly valueize operands itself using
362 SSA_NAME_VALUE in addition to its own lattice. */
363 cached_lhs = gimple_fold_stmt_to_constant_1 (stmt,
364 threadedge_valueize);
365 if (NUM_SSA_OPERANDS (stmt, SSA_OP_ALL_USES) != 0
366 && (!cached_lhs
367 || (TREE_CODE (cached_lhs) != SSA_NAME
368 && !is_gimple_min_invariant (cached_lhs))))
369 {
370 /* We're going to temporarily copy propagate the operands
371 and see if that allows us to simplify this statement. */
372 tree *copy;
373 ssa_op_iter iter;
374 use_operand_p use_p;
375 unsigned int num, i = 0;
376
377 num = NUM_SSA_OPERANDS (stmt, SSA_OP_ALL_USES);
378 copy = XALLOCAVEC (tree, num);
379
380 /* Make a copy of the uses & vuses into USES_COPY, then cprop into
381 the operands. */
382 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES)
383 {
384 tree tmp = NULL;
385 tree use = USE_FROM_PTR (use_p);
386
387 copy[i++] = use;
388 if (TREE_CODE (use) == SSA_NAME)
389 tmp = SSA_NAME_VALUE (use);
390 if (tmp)
391 SET_USE (use_p, tmp);
392 }
393
394 cached_lhs = (*simplify) (stmt, stmt, avail_exprs_stack, e->src);
395
396 /* Restore the statement's original uses/defs. */
397 i = 0;
398 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES)
399 SET_USE (use_p, copy[i++]);
400 }
401 }
402
403 /* Record the context sensitive equivalence if we were able
404 to simplify this statement. */
405 if (cached_lhs
406 && (TREE_CODE (cached_lhs) == SSA_NAME
407 || is_gimple_min_invariant (cached_lhs)))
408 const_and_copies->record_const_or_copy (gimple_get_lhs (stmt),
409 cached_lhs);
410 }
411 return stmt;
412 }
413
414 static tree simplify_control_stmt_condition_1 (edge, gimple *,
415 class avail_exprs_stack *,
416 tree, enum tree_code, tree,
417 gcond *, pfn_simplify,
418 unsigned);
419
420 /* Simplify the control statement at the end of the block E->dest.
421
422 To avoid allocating memory unnecessarily, a scratch GIMPLE_COND
423 is available to use/clobber in DUMMY_COND.
424
425 Use SIMPLIFY (a pointer to a callback function) to further simplify
426 a condition using pass specific information.
427
428 Return the simplified condition or NULL if simplification could
429 not be performed. When simplifying a GIMPLE_SWITCH, we may return
430 the CASE_LABEL_EXPR that will be taken.
431
432 The available expression table is referenced via AVAIL_EXPRS_STACK. */
433
434 static tree
435 simplify_control_stmt_condition (edge e,
436 gimple *stmt,
437 class avail_exprs_stack *avail_exprs_stack,
438 gcond *dummy_cond,
439 pfn_simplify simplify)
440 {
441 tree cond, cached_lhs;
442 enum gimple_code code = gimple_code (stmt);
443
444 /* For comparisons, we have to update both operands, then try
445 to simplify the comparison. */
446 if (code == GIMPLE_COND)
447 {
448 tree op0, op1;
449 enum tree_code cond_code;
450
451 op0 = gimple_cond_lhs (stmt);
452 op1 = gimple_cond_rhs (stmt);
453 cond_code = gimple_cond_code (stmt);
454
455 /* Get the current value of both operands. */
456 if (TREE_CODE (op0) == SSA_NAME)
457 {
458 for (int i = 0; i < 2; i++)
459 {
460 if (TREE_CODE (op0) == SSA_NAME
461 && SSA_NAME_VALUE (op0))
462 op0 = SSA_NAME_VALUE (op0);
463 else
464 break;
465 }
466 }
467
468 if (TREE_CODE (op1) == SSA_NAME)
469 {
470 for (int i = 0; i < 2; i++)
471 {
472 if (TREE_CODE (op1) == SSA_NAME
473 && SSA_NAME_VALUE (op1))
474 op1 = SSA_NAME_VALUE (op1);
475 else
476 break;
477 }
478 }
479
480 const unsigned recursion_limit = 4;
481
482 cached_lhs
483 = simplify_control_stmt_condition_1 (e, stmt, avail_exprs_stack,
484 op0, cond_code, op1,
485 dummy_cond, simplify,
486 recursion_limit);
487
488 /* If we were testing an integer/pointer against a constant, then
489 we can use the FSM code to trace the value of the SSA_NAME. If
490 a value is found, then the condition will collapse to a constant.
491
492 Return the SSA_NAME we want to trace back rather than the full
493 expression and give the FSM threader a chance to find its value. */
494 if (cached_lhs == NULL)
495 {
496 /* Recover the original operands. They may have been simplified
497 using context sensitive equivalences. Those context sensitive
498 equivalences may not be valid on paths found by the FSM optimizer. */
499 tree op0 = gimple_cond_lhs (stmt);
500 tree op1 = gimple_cond_rhs (stmt);
501
502 if ((INTEGRAL_TYPE_P (TREE_TYPE (op0))
503 || POINTER_TYPE_P (TREE_TYPE (op0)))
504 && TREE_CODE (op0) == SSA_NAME
505 && TREE_CODE (op1) == INTEGER_CST)
506 return op0;
507 }
508
509 return cached_lhs;
510 }
511
512 if (code == GIMPLE_SWITCH)
513 cond = gimple_switch_index (as_a <gswitch *> (stmt));
514 else if (code == GIMPLE_GOTO)
515 cond = gimple_goto_dest (stmt);
516 else
517 gcc_unreachable ();
518
519 /* We can have conditionals which just test the state of a variable
520 rather than use a relational operator. These are simpler to handle. */
521 if (TREE_CODE (cond) == SSA_NAME)
522 {
523 tree original_lhs = cond;
524 cached_lhs = cond;
525
526 /* Get the variable's current value from the equivalence chains.
527
528 It is possible to get loops in the SSA_NAME_VALUE chains
529 (consider threading the backedge of a loop where we have
530 a loop invariant SSA_NAME used in the condition). */
531 if (cached_lhs)
532 {
533 for (int i = 0; i < 2; i++)
534 {
535 if (TREE_CODE (cached_lhs) == SSA_NAME
536 && SSA_NAME_VALUE (cached_lhs))
537 cached_lhs = SSA_NAME_VALUE (cached_lhs);
538 else
539 break;
540 }
541 }
542
543 /* If we haven't simplified to an invariant yet, then use the
544 pass specific callback to try and simplify it further. */
545 if (cached_lhs && ! is_gimple_min_invariant (cached_lhs))
546 {
547 if (code == GIMPLE_SWITCH)
548 {
549 /* Replace the index operand of the GIMPLE_SWITCH with any LHS
550 we found before handing off to VRP. If simplification is
551 possible, the simplified value will be a CASE_LABEL_EXPR of
552 the label that is proven to be taken. */
553 gswitch *dummy_switch = as_a<gswitch *> (gimple_copy (stmt));
554 gimple_switch_set_index (dummy_switch, cached_lhs);
555 cached_lhs = (*simplify) (dummy_switch, stmt,
556 avail_exprs_stack, e->src);
557 ggc_free (dummy_switch);
558 }
559 else
560 cached_lhs = (*simplify) (stmt, stmt, avail_exprs_stack, e->src);
561 }
562
563 /* We couldn't find an invariant. But, callers of this
564 function may be able to do something useful with the
565 unmodified destination. */
566 if (!cached_lhs)
567 cached_lhs = original_lhs;
568 }
569 else
570 cached_lhs = NULL;
571
572 return cached_lhs;
573 }
574
575 /* Recursive helper for simplify_control_stmt_condition. */
576
577 static tree
578 simplify_control_stmt_condition_1 (edge e,
579 gimple *stmt,
580 class avail_exprs_stack *avail_exprs_stack,
581 tree op0,
582 enum tree_code cond_code,
583 tree op1,
584 gcond *dummy_cond,
585 pfn_simplify simplify,
586 unsigned limit)
587 {
588 if (limit == 0)
589 return NULL_TREE;
590
591 /* We may need to canonicalize the comparison. For
592 example, op0 might be a constant while op1 is an
593 SSA_NAME. Failure to canonicalize will cause us to
594 miss threading opportunities. */
595 if (tree_swap_operands_p (op0, op1))
596 {
597 cond_code = swap_tree_comparison (cond_code);
598 std::swap (op0, op1);
599 }
600
601 /* If the condition has the form (A & B) CMP 0 or (A | B) CMP 0 then
602 recurse into the LHS to see if there is a dominating ASSERT_EXPR
603 of A or of B that makes this condition always true or always false
604 along the edge E. */
605 if ((cond_code == EQ_EXPR || cond_code == NE_EXPR)
606 && TREE_CODE (op0) == SSA_NAME
607 && integer_zerop (op1))
608 {
609 gimple *def_stmt = SSA_NAME_DEF_STMT (op0);
610 if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
611 ;
612 else if (gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR
613 || gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR)
614 {
615 enum tree_code rhs_code = gimple_assign_rhs_code (def_stmt);
616 const tree rhs1 = gimple_assign_rhs1 (def_stmt);
617 const tree rhs2 = gimple_assign_rhs2 (def_stmt);
618
619 /* Is A != 0 ? */
620 const tree res1
621 = simplify_control_stmt_condition_1 (e, def_stmt, avail_exprs_stack,
622 rhs1, NE_EXPR, op1,
623 dummy_cond, simplify,
624 limit - 1);
625 if (res1 == NULL_TREE)
626 ;
627 else if (rhs_code == BIT_AND_EXPR && integer_zerop (res1))
628 {
629 /* If A == 0 then (A & B) != 0 is always false. */
630 if (cond_code == NE_EXPR)
631 return boolean_false_node;
632 /* If A == 0 then (A & B) == 0 is always true. */
633 if (cond_code == EQ_EXPR)
634 return boolean_true_node;
635 }
636 else if (rhs_code == BIT_IOR_EXPR && integer_nonzerop (res1))
637 {
638 /* If A != 0 then (A | B) != 0 is always true. */
639 if (cond_code == NE_EXPR)
640 return boolean_true_node;
641 /* If A != 0 then (A | B) == 0 is always false. */
642 if (cond_code == EQ_EXPR)
643 return boolean_false_node;
644 }
645
646 /* Is B != 0 ? */
647 const tree res2
648 = simplify_control_stmt_condition_1 (e, def_stmt, avail_exprs_stack,
649 rhs2, NE_EXPR, op1,
650 dummy_cond, simplify,
651 limit - 1);
652 if (res2 == NULL_TREE)
653 ;
654 else if (rhs_code == BIT_AND_EXPR && integer_zerop (res2))
655 {
656 /* If B == 0 then (A & B) != 0 is always false. */
657 if (cond_code == NE_EXPR)
658 return boolean_false_node;
659 /* If B == 0 then (A & B) == 0 is always true. */
660 if (cond_code == EQ_EXPR)
661 return boolean_true_node;
662 }
663 else if (rhs_code == BIT_IOR_EXPR && integer_nonzerop (res2))
664 {
665 /* If B != 0 then (A | B) != 0 is always true. */
666 if (cond_code == NE_EXPR)
667 return boolean_true_node;
668 /* If B != 0 then (A | B) == 0 is always false. */
669 if (cond_code == EQ_EXPR)
670 return boolean_false_node;
671 }
672
673 if (res1 != NULL_TREE && res2 != NULL_TREE)
674 {
675 if (rhs_code == BIT_AND_EXPR
676 && TYPE_PRECISION (TREE_TYPE (op0)) == 1
677 && integer_nonzerop (res1)
678 && integer_nonzerop (res2))
679 {
680 /* If A != 0 and B != 0 then (bool)(A & B) != 0 is true. */
681 if (cond_code == NE_EXPR)
682 return boolean_true_node;
683 /* If A != 0 and B != 0 then (bool)(A & B) == 0 is false. */
684 if (cond_code == EQ_EXPR)
685 return boolean_false_node;
686 }
687
688 if (rhs_code == BIT_IOR_EXPR
689 && integer_zerop (res1)
690 && integer_zerop (res2))
691 {
692 /* If A == 0 and B == 0 then (A | B) != 0 is false. */
693 if (cond_code == NE_EXPR)
694 return boolean_false_node;
695 /* If A == 0 and B == 0 then (A | B) == 0 is true. */
696 if (cond_code == EQ_EXPR)
697 return boolean_true_node;
698 }
699 }
700 }
701 /* Handle (A CMP B) CMP 0. */
702 else if (TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt))
703 == tcc_comparison)
704 {
705 tree rhs1 = gimple_assign_rhs1 (def_stmt);
706 tree rhs2 = gimple_assign_rhs2 (def_stmt);
707
708 tree_code new_cond = gimple_assign_rhs_code (def_stmt);
709 if (cond_code == EQ_EXPR)
710 new_cond = invert_tree_comparison (new_cond, false);
711
712 tree res
713 = simplify_control_stmt_condition_1 (e, def_stmt, avail_exprs_stack,
714 rhs1, new_cond, rhs2,
715 dummy_cond, simplify,
716 limit - 1);
717 if (res != NULL_TREE && is_gimple_min_invariant (res))
718 return res;
719 }
720 }
721
722 gimple_cond_set_code (dummy_cond, cond_code);
723 gimple_cond_set_lhs (dummy_cond, op0);
724 gimple_cond_set_rhs (dummy_cond, op1);
725
726 /* We absolutely do not care about any type conversions
727 we only care about a zero/nonzero value. */
728 fold_defer_overflow_warnings ();
729
730 tree res = fold_binary (cond_code, boolean_type_node, op0, op1);
731 if (res)
732 while (CONVERT_EXPR_P (res))
733 res = TREE_OPERAND (res, 0);
734
735 fold_undefer_overflow_warnings ((res && is_gimple_min_invariant (res)),
736 stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
737
738 /* If we have not simplified the condition down to an invariant,
739 then use the pass specific callback to simplify the condition. */
740 if (!res
741 || !is_gimple_min_invariant (res))
742 res = (*simplify) (dummy_cond, stmt, avail_exprs_stack, e->src);
743
744 return res;
745 }
746
747 /* Copy debug stmts from DEST's chain of single predecessors up to
748 SRC, so that we don't lose the bindings as PHI nodes are introduced
749 when DEST gains new predecessors. */
750 void
751 propagate_threaded_block_debug_into (basic_block dest, basic_block src)
752 {
753 if (!MAY_HAVE_DEBUG_BIND_STMTS)
754 return;
755
756 if (!single_pred_p (dest))
757 return;
758
759 gcc_checking_assert (dest != src);
760
761 gimple_stmt_iterator gsi = gsi_after_labels (dest);
762 int i = 0;
763 const int alloc_count = 16; // ?? Should this be a PARAM?
764
765 /* Estimate the number of debug vars overridden in the beginning of
766 DEST, to tell how many we're going to need to begin with. */
767 for (gimple_stmt_iterator si = gsi;
768 i * 4 <= alloc_count * 3 && !gsi_end_p (si); gsi_next (&si))
769 {
770 gimple *stmt = gsi_stmt (si);
771 if (!is_gimple_debug (stmt))
772 break;
773 if (gimple_debug_nonbind_marker_p (stmt))
774 continue;
775 i++;
776 }
777
778 auto_vec<tree, alloc_count> fewvars;
779 hash_set<tree> *vars = NULL;
780
781 /* If we're already starting with 3/4 of alloc_count, go for a
782 hash_set, otherwise start with an unordered stack-allocated
783 VEC. */
784 if (i * 4 > alloc_count * 3)
785 vars = new hash_set<tree>;
786
787 /* Now go through the initial debug stmts in DEST again, this time
788 actually inserting in VARS or FEWVARS. Don't bother checking for
789 duplicates in FEWVARS. */
790 for (gimple_stmt_iterator si = gsi; !gsi_end_p (si); gsi_next (&si))
791 {
792 gimple *stmt = gsi_stmt (si);
793 if (!is_gimple_debug (stmt))
794 break;
795
796 tree var;
797
798 if (gimple_debug_bind_p (stmt))
799 var = gimple_debug_bind_get_var (stmt);
800 else if (gimple_debug_source_bind_p (stmt))
801 var = gimple_debug_source_bind_get_var (stmt);
802 else if (gimple_debug_nonbind_marker_p (stmt))
803 continue;
804 else
805 gcc_unreachable ();
806
807 if (vars)
808 vars->add (var);
809 else
810 fewvars.quick_push (var);
811 }
812
813 basic_block bb = dest;
814
815 do
816 {
817 bb = single_pred (bb);
818 for (gimple_stmt_iterator si = gsi_last_bb (bb);
819 !gsi_end_p (si); gsi_prev (&si))
820 {
821 gimple *stmt = gsi_stmt (si);
822 if (!is_gimple_debug (stmt))
823 continue;
824
825 tree var;
826
827 if (gimple_debug_bind_p (stmt))
828 var = gimple_debug_bind_get_var (stmt);
829 else if (gimple_debug_source_bind_p (stmt))
830 var = gimple_debug_source_bind_get_var (stmt);
831 else if (gimple_debug_nonbind_marker_p (stmt))
832 continue;
833 else
834 gcc_unreachable ();
835
836 /* Discard debug bind overlaps. Unlike stmts from src,
837 copied into a new block that will precede BB, debug bind
838 stmts in bypassed BBs may actually be discarded if
839 they're overwritten by subsequent debug bind stmts. We
840 want to copy binds for all modified variables, so that we
841 retain a bind to the shared def if there is one, or to a
842 newly introduced PHI node if there is one. Our bind will
843 end up reset if the value is dead, but that implies the
844 variable couldn't have survived, so it's fine. We are
845 not actually running the code that performed the binds at
846 this point, we're just adding binds so that they survive
847 the new confluence, so markers should not be copied. */
848 if (vars && vars->add (var))
849 continue;
850 else if (!vars)
851 {
852 int i = fewvars.length ();
853 while (i--)
854 if (fewvars[i] == var)
855 break;
856 if (i >= 0)
857 continue;
858 else if (fewvars.length () < (unsigned) alloc_count)
859 fewvars.quick_push (var);
860 else
861 {
862 vars = new hash_set<tree>;
863 for (i = 0; i < alloc_count; i++)
864 vars->add (fewvars[i]);
865 fewvars.release ();
866 vars->add (var);
867 }
868 }
869
870 stmt = gimple_copy (stmt);
871 /* ??? Should we drop the location of the copy to denote
872 they're artificial bindings? */
873 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
874 }
875 }
876 while (bb != src && single_pred_p (bb));
877
878 if (vars)
879 delete vars;
880 else if (fewvars.exists ())
881 fewvars.release ();
882 }
883
884 /* See if TAKEN_EDGE->dest is a threadable block with no side effecs (ie, it
885 need not be duplicated as part of the CFG/SSA updating process).
886
887 If it is threadable, add it to PATH and VISITED and recurse, ultimately
888 returning TRUE from the toplevel call. Otherwise do nothing and
889 return false.
890
891 DUMMY_COND, SIMPLIFY are used to try and simplify the condition at the
892 end of TAKEN_EDGE->dest.
893
894 The available expression table is referenced via AVAIL_EXPRS_STACK. */
895
896 static bool
897 thread_around_empty_blocks (edge taken_edge,
898 gcond *dummy_cond,
899 class avail_exprs_stack *avail_exprs_stack,
900 pfn_simplify simplify,
901 bitmap visited,
902 vec<jump_thread_edge *> *path)
903 {
904 basic_block bb = taken_edge->dest;
905 gimple_stmt_iterator gsi;
906 gimple *stmt;
907 tree cond;
908
909 /* The key property of these blocks is that they need not be duplicated
910 when threading. Thus they cannot have visible side effects such
911 as PHI nodes. */
912 if (!gsi_end_p (gsi_start_phis (bb)))
913 return false;
914
915 /* Skip over DEBUG statements at the start of the block. */
916 gsi = gsi_start_nondebug_bb (bb);
917
918 /* If the block has no statements, but does have a single successor, then
919 it's just a forwarding block and we can thread through it trivially.
920
921 However, note that just threading through empty blocks with single
922 successors is not inherently profitable. For the jump thread to
923 be profitable, we must avoid a runtime conditional.
924
925 By taking the return value from the recursive call, we get the
926 desired effect of returning TRUE when we found a profitable jump
927 threading opportunity and FALSE otherwise.
928
929 This is particularly important when this routine is called after
930 processing a joiner block. Returning TRUE too aggressively in
931 that case results in pointless duplication of the joiner block. */
932 if (gsi_end_p (gsi))
933 {
934 if (single_succ_p (bb))
935 {
936 taken_edge = single_succ_edge (bb);
937
938 if ((taken_edge->flags & EDGE_DFS_BACK) != 0)
939 return false;
940
941 if (!bitmap_bit_p (visited, taken_edge->dest->index))
942 {
943 jump_thread_edge *x
944 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
945 path->safe_push (x);
946 bitmap_set_bit (visited, taken_edge->dest->index);
947 return thread_around_empty_blocks (taken_edge,
948 dummy_cond,
949 avail_exprs_stack,
950 simplify,
951 visited,
952 path);
953 }
954 }
955
956 /* We have a block with no statements, but multiple successors? */
957 return false;
958 }
959
960 /* The only real statements this block can have are a control
961 flow altering statement. Anything else stops the thread. */
962 stmt = gsi_stmt (gsi);
963 if (gimple_code (stmt) != GIMPLE_COND
964 && gimple_code (stmt) != GIMPLE_GOTO
965 && gimple_code (stmt) != GIMPLE_SWITCH)
966 return false;
967
968 /* Extract and simplify the condition. */
969 cond = simplify_control_stmt_condition (taken_edge, stmt,
970 avail_exprs_stack, dummy_cond,
971 simplify);
972
973 /* If the condition can be statically computed and we have not already
974 visited the destination edge, then add the taken edge to our thread
975 path. */
976 if (cond != NULL_TREE
977 && (is_gimple_min_invariant (cond)
978 || TREE_CODE (cond) == CASE_LABEL_EXPR))
979 {
980 if (TREE_CODE (cond) == CASE_LABEL_EXPR)
981 taken_edge = find_edge (bb, label_to_block (cfun, CASE_LABEL (cond)));
982 else
983 taken_edge = find_taken_edge (bb, cond);
984
985 if (!taken_edge
986 || (taken_edge->flags & EDGE_DFS_BACK) != 0)
987 return false;
988
989 if (bitmap_bit_p (visited, taken_edge->dest->index))
990 return false;
991 bitmap_set_bit (visited, taken_edge->dest->index);
992
993 jump_thread_edge *x
994 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
995 path->safe_push (x);
996
997 thread_around_empty_blocks (taken_edge,
998 dummy_cond,
999 avail_exprs_stack,
1000 simplify,
1001 visited,
1002 path);
1003 return true;
1004 }
1005
1006 return false;
1007 }
1008
1009 /* We are exiting E->src, see if E->dest ends with a conditional
1010 jump which has a known value when reached via E.
1011
1012 E->dest can have arbitrary side effects which, if threading is
1013 successful, will be maintained.
1014
1015 Special care is necessary if E is a back edge in the CFG as we
1016 may have already recorded equivalences for E->dest into our
1017 various tables, including the result of the conditional at
1018 the end of E->dest. Threading opportunities are severely
1019 limited in that case to avoid short-circuiting the loop
1020 incorrectly.
1021
1022 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
1023 to avoid allocating memory.
1024
1025 STACK is used to undo temporary equivalences created during the walk of
1026 E->dest.
1027
1028 SIMPLIFY is a pass-specific function used to simplify statements.
1029
1030 Our caller is responsible for restoring the state of the expression
1031 and const_and_copies stacks.
1032
1033 Positive return value is success. Zero return value is failure, but
1034 the block can still be duplicated as a joiner in a jump thread path,
1035 negative indicates the block should not be duplicated and thus is not
1036 suitable for a joiner in a jump threading path. */
1037
1038 static int
1039 thread_through_normal_block (edge e,
1040 gcond *dummy_cond,
1041 const_and_copies *const_and_copies,
1042 avail_exprs_stack *avail_exprs_stack,
1043 evrp_range_analyzer *evrp_range_analyzer,
1044 pfn_simplify simplify,
1045 vec<jump_thread_edge *> *path,
1046 bitmap visited)
1047 {
1048 /* We want to record any equivalences created by traversing E. */
1049 record_temporary_equivalences (e, const_and_copies, avail_exprs_stack);
1050
1051 /* PHIs create temporary equivalences.
1052 Note that if we found a PHI that made the block non-threadable, then
1053 we need to bubble that up to our caller in the same manner we do
1054 when we prematurely stop processing statements below. */
1055 if (!record_temporary_equivalences_from_phis (e, const_and_copies,
1056 evrp_range_analyzer))
1057 return -1;
1058
1059 /* Now walk each statement recording any context sensitive
1060 temporary equivalences we can detect. */
1061 gimple *stmt
1062 = record_temporary_equivalences_from_stmts_at_dest (e, const_and_copies,
1063 avail_exprs_stack,
1064 evrp_range_analyzer,
1065 simplify);
1066
1067 /* There's two reasons STMT might be null, and distinguishing
1068 between them is important.
1069
1070 First the block may not have had any statements. For example, it
1071 might have some PHIs and unconditionally transfer control elsewhere.
1072 Such blocks are suitable for jump threading, particularly as a
1073 joiner block.
1074
1075 The second reason would be if we did not process all the statements
1076 in the block (because there were too many to make duplicating the
1077 block profitable. If we did not look at all the statements, then
1078 we may not have invalidated everything needing invalidation. Thus
1079 we must signal to our caller that this block is not suitable for
1080 use as a joiner in a threading path. */
1081 if (!stmt)
1082 {
1083 /* First case. The statement simply doesn't have any instructions, but
1084 does have PHIs. */
1085 if (gsi_end_p (gsi_start_nondebug_bb (e->dest))
1086 && !gsi_end_p (gsi_start_phis (e->dest)))
1087 return 0;
1088
1089 /* Second case. */
1090 return -1;
1091 }
1092
1093 /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm
1094 will be taken. */
1095 if (gimple_code (stmt) == GIMPLE_COND
1096 || gimple_code (stmt) == GIMPLE_GOTO
1097 || gimple_code (stmt) == GIMPLE_SWITCH)
1098 {
1099 tree cond;
1100
1101 /* Extract and simplify the condition. */
1102 cond = simplify_control_stmt_condition (e, stmt, avail_exprs_stack,
1103 dummy_cond, simplify);
1104
1105 if (!cond)
1106 return 0;
1107
1108 if (is_gimple_min_invariant (cond)
1109 || TREE_CODE (cond) == CASE_LABEL_EXPR)
1110 {
1111 edge taken_edge;
1112 if (TREE_CODE (cond) == CASE_LABEL_EXPR)
1113 taken_edge = find_edge (e->dest,
1114 label_to_block (cfun, CASE_LABEL (cond)));
1115 else
1116 taken_edge = find_taken_edge (e->dest, cond);
1117
1118 basic_block dest = (taken_edge ? taken_edge->dest : NULL);
1119
1120 /* DEST could be NULL for a computed jump to an absolute
1121 address. */
1122 if (dest == NULL
1123 || dest == e->dest
1124 || (taken_edge->flags & EDGE_DFS_BACK) != 0
1125 || bitmap_bit_p (visited, dest->index))
1126 return 0;
1127
1128 /* Only push the EDGE_START_JUMP_THREAD marker if this is
1129 first edge on the path. */
1130 if (path->length () == 0)
1131 {
1132 jump_thread_edge *x
1133 = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
1134 path->safe_push (x);
1135 }
1136
1137 jump_thread_edge *x
1138 = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_BLOCK);
1139 path->safe_push (x);
1140
1141 /* See if we can thread through DEST as well, this helps capture
1142 secondary effects of threading without having to re-run DOM or
1143 VRP.
1144
1145 We don't want to thread back to a block we have already
1146 visited. This may be overly conservative. */
1147 bitmap_set_bit (visited, dest->index);
1148 bitmap_set_bit (visited, e->dest->index);
1149 thread_around_empty_blocks (taken_edge,
1150 dummy_cond,
1151 avail_exprs_stack,
1152 simplify,
1153 visited,
1154 path);
1155 return 1;
1156 }
1157 }
1158 return 0;
1159 }
1160
1161 /* There are basic blocks look like:
1162 <P0>
1163 p0 = a CMP b ; or p0 = (INT) (a CMP b)
1164 goto <X>;
1165
1166 <P1>
1167 p1 = c CMP d
1168 goto <X>;
1169
1170 <X>
1171 # phi = PHI <p0 (P0), p1 (P1)>
1172 if (phi != 0) goto <Y>; else goto <Z>;
1173
1174 Then, edge (P0,X) or (P1,X) could be marked as EDGE_START_JUMP_THREAD
1175 And edge (X,Y), (X,Z) is EDGE_COPY_SRC_JOINER_BLOCK
1176
1177 Return true if E is (P0,X) or (P1,X) */
1178
1179 bool
1180 edge_forwards_cmp_to_conditional_jump_through_empty_bb_p (edge e)
1181 {
1182 /* See if there is only one stmt which is gcond. */
1183 gcond *gs;
1184 if (!(gs = safe_dyn_cast<gcond *> (last_and_only_stmt (e->dest))))
1185 return false;
1186
1187 /* See if gcond's cond is "(phi !=/== 0/1)" in the basic block. */
1188 tree cond = gimple_cond_lhs (gs);
1189 enum tree_code code = gimple_cond_code (gs);
1190 tree rhs = gimple_cond_rhs (gs);
1191 if (TREE_CODE (cond) != SSA_NAME
1192 || (code != NE_EXPR && code != EQ_EXPR)
1193 || (!integer_onep (rhs) && !integer_zerop (rhs)))
1194 return false;
1195 gphi *phi = dyn_cast <gphi *> (SSA_NAME_DEF_STMT (cond));
1196 if (phi == NULL || gimple_bb (phi) != e->dest)
1197 return false;
1198
1199 /* Check if phi's incoming value is CMP. */
1200 gassign *def;
1201 tree value = PHI_ARG_DEF_FROM_EDGE (phi, e);
1202 if (TREE_CODE (value) != SSA_NAME
1203 || !has_single_use (value)
1204 || !(def = dyn_cast <gassign *> (SSA_NAME_DEF_STMT (value))))
1205 return false;
1206
1207 /* Or if it is (INT) (a CMP b). */
1208 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def)))
1209 {
1210 value = gimple_assign_rhs1 (def);
1211 if (TREE_CODE (value) != SSA_NAME
1212 || !has_single_use (value)
1213 || !(def = dyn_cast<gassign *> (SSA_NAME_DEF_STMT (value))))
1214 return false;
1215 }
1216
1217 if (TREE_CODE_CLASS (gimple_assign_rhs_code (def)) != tcc_comparison)
1218 return false;
1219
1220 return true;
1221 }
1222
1223 /* We are exiting E->src, see if E->dest ends with a conditional
1224 jump which has a known value when reached via E.
1225
1226 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
1227 to avoid allocating memory.
1228
1229 CONST_AND_COPIES is used to undo temporary equivalences created during the
1230 walk of E->dest.
1231
1232 The available expression table is referenced vai AVAIL_EXPRS_STACK.
1233
1234 SIMPLIFY is a pass-specific function used to simplify statements. */
1235
1236 static void
1237 thread_across_edge (gcond *dummy_cond,
1238 edge e,
1239 class const_and_copies *const_and_copies,
1240 class avail_exprs_stack *avail_exprs_stack,
1241 class evrp_range_analyzer *evrp_range_analyzer,
1242 pfn_simplify simplify)
1243 {
1244 bitmap visited = BITMAP_ALLOC (NULL);
1245
1246 const_and_copies->push_marker ();
1247 avail_exprs_stack->push_marker ();
1248 if (evrp_range_analyzer)
1249 evrp_range_analyzer->push_marker ();
1250
1251 stmt_count = 0;
1252
1253 vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
1254 bitmap_clear (visited);
1255 bitmap_set_bit (visited, e->src->index);
1256 bitmap_set_bit (visited, e->dest->index);
1257
1258 int threaded;
1259 if ((e->flags & EDGE_DFS_BACK) == 0)
1260 threaded = thread_through_normal_block (e, dummy_cond,
1261 const_and_copies,
1262 avail_exprs_stack,
1263 evrp_range_analyzer,
1264 simplify, path,
1265 visited);
1266 else
1267 threaded = 0;
1268
1269 if (threaded > 0)
1270 {
1271 propagate_threaded_block_debug_into (path->last ()->e->dest,
1272 e->dest);
1273 const_and_copies->pop_to_marker ();
1274 avail_exprs_stack->pop_to_marker ();
1275 if (evrp_range_analyzer)
1276 evrp_range_analyzer->pop_to_marker ();
1277 BITMAP_FREE (visited);
1278 register_jump_thread (path);
1279 return;
1280 }
1281 else
1282 {
1283 /* Negative and zero return values indicate no threading was possible,
1284 thus there should be no edges on the thread path and no need to walk
1285 through the vector entries. */
1286 gcc_assert (path->length () == 0);
1287 path->release ();
1288 delete path;
1289
1290 /* A negative status indicates the target block was deemed too big to
1291 duplicate. Just quit now rather than trying to use the block as
1292 a joiner in a jump threading path.
1293
1294 This prevents unnecessary code growth, but more importantly if we
1295 do not look at all the statements in the block, then we may have
1296 missed some invalidations if we had traversed a backedge! */
1297 if (threaded < 0)
1298 {
1299 BITMAP_FREE (visited);
1300 const_and_copies->pop_to_marker ();
1301 avail_exprs_stack->pop_to_marker ();
1302 if (evrp_range_analyzer)
1303 evrp_range_analyzer->pop_to_marker ();
1304 return;
1305 }
1306 }
1307
1308 /* We were unable to determine what out edge from E->dest is taken. However,
1309 we might still be able to thread through successors of E->dest. This
1310 often occurs when E->dest is a joiner block which then fans back out
1311 based on redundant tests.
1312
1313 If so, we'll copy E->dest and redirect the appropriate predecessor to
1314 the copy. Within the copy of E->dest, we'll thread one or more edges
1315 to points deeper in the CFG.
1316
1317 This is a stopgap until we have a more structured approach to path
1318 isolation. */
1319 {
1320 edge taken_edge;
1321 edge_iterator ei;
1322 bool found;
1323
1324 /* If E->dest has abnormal outgoing edges, then there's no guarantee
1325 we can safely redirect any of the edges. Just punt those cases. */
1326 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1327 if (taken_edge->flags & EDGE_ABNORMAL)
1328 {
1329 const_and_copies->pop_to_marker ();
1330 avail_exprs_stack->pop_to_marker ();
1331 if (evrp_range_analyzer)
1332 evrp_range_analyzer->pop_to_marker ();
1333 BITMAP_FREE (visited);
1334 return;
1335 }
1336
1337 /* Look at each successor of E->dest to see if we can thread through it. */
1338 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1339 {
1340 if ((e->flags & EDGE_DFS_BACK) != 0
1341 || (taken_edge->flags & EDGE_DFS_BACK) != 0)
1342 continue;
1343
1344 /* Push a fresh marker so we can unwind the equivalences created
1345 for each of E->dest's successors. */
1346 const_and_copies->push_marker ();
1347 avail_exprs_stack->push_marker ();
1348 if (evrp_range_analyzer)
1349 evrp_range_analyzer->push_marker ();
1350
1351 /* Avoid threading to any block we have already visited. */
1352 bitmap_clear (visited);
1353 bitmap_set_bit (visited, e->src->index);
1354 bitmap_set_bit (visited, e->dest->index);
1355 bitmap_set_bit (visited, taken_edge->dest->index);
1356 vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
1357
1358 /* Record whether or not we were able to thread through a successor
1359 of E->dest. */
1360 jump_thread_edge *x = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
1361 path->safe_push (x);
1362
1363 x = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_JOINER_BLOCK);
1364 path->safe_push (x);
1365 found = thread_around_empty_blocks (taken_edge,
1366 dummy_cond,
1367 avail_exprs_stack,
1368 simplify,
1369 visited,
1370 path);
1371
1372 if (!found)
1373 found = thread_through_normal_block (path->last ()->e, dummy_cond,
1374 const_and_copies,
1375 avail_exprs_stack,
1376 evrp_range_analyzer,
1377 simplify, path,
1378 visited) > 0;
1379
1380 /* If we were able to thread through a successor of E->dest, then
1381 record the jump threading opportunity. */
1382 if (found
1383 || edge_forwards_cmp_to_conditional_jump_through_empty_bb_p (e))
1384 {
1385 if (taken_edge->dest != path->last ()->e->dest)
1386 propagate_threaded_block_debug_into (path->last ()->e->dest,
1387 taken_edge->dest);
1388 register_jump_thread (path);
1389 }
1390 else
1391 delete_jump_thread_path (path);
1392
1393 /* And unwind the equivalence table. */
1394 if (evrp_range_analyzer)
1395 evrp_range_analyzer->pop_to_marker ();
1396 avail_exprs_stack->pop_to_marker ();
1397 const_and_copies->pop_to_marker ();
1398 }
1399 BITMAP_FREE (visited);
1400 }
1401
1402 if (evrp_range_analyzer)
1403 evrp_range_analyzer->pop_to_marker ();
1404 const_and_copies->pop_to_marker ();
1405 avail_exprs_stack->pop_to_marker ();
1406 }
1407
1408 /* Examine the outgoing edges from BB and conditionally
1409 try to thread them.
1410
1411 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
1412 to avoid allocating memory.
1413
1414 CONST_AND_COPIES is used to undo temporary equivalences created during the
1415 walk of E->dest.
1416
1417 The available expression table is referenced vai AVAIL_EXPRS_STACK.
1418
1419 SIMPLIFY is a pass-specific function used to simplify statements. */
1420
1421 void
1422 thread_outgoing_edges (basic_block bb, gcond *dummy_cond,
1423 class const_and_copies *const_and_copies,
1424 class avail_exprs_stack *avail_exprs_stack,
1425 class evrp_range_analyzer *evrp_range_analyzer,
1426 tree (*simplify) (gimple *, gimple *,
1427 class avail_exprs_stack *,
1428 basic_block))
1429 {
1430 int flags = (EDGE_IGNORE | EDGE_COMPLEX | EDGE_ABNORMAL);
1431 gimple *last;
1432
1433 /* If we have an outgoing edge to a block with multiple incoming and
1434 outgoing edges, then we may be able to thread the edge, i.e., we
1435 may be able to statically determine which of the outgoing edges
1436 will be traversed when the incoming edge from BB is traversed. */
1437 if (single_succ_p (bb)
1438 && (single_succ_edge (bb)->flags & flags) == 0
1439 && potentially_threadable_block (single_succ (bb)))
1440 {
1441 thread_across_edge (dummy_cond, single_succ_edge (bb),
1442 const_and_copies, avail_exprs_stack,
1443 evrp_range_analyzer, simplify);
1444 }
1445 else if ((last = last_stmt (bb))
1446 && gimple_code (last) == GIMPLE_COND
1447 && EDGE_COUNT (bb->succs) == 2
1448 && (EDGE_SUCC (bb, 0)->flags & flags) == 0
1449 && (EDGE_SUCC (bb, 1)->flags & flags) == 0)
1450 {
1451 edge true_edge, false_edge;
1452
1453 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1454
1455 /* Only try to thread the edge if it reaches a target block with
1456 more than one predecessor and more than one successor. */
1457 if (potentially_threadable_block (true_edge->dest))
1458 thread_across_edge (dummy_cond, true_edge,
1459 const_and_copies, avail_exprs_stack,
1460 evrp_range_analyzer, simplify);
1461
1462 /* Similarly for the ELSE arm. */
1463 if (potentially_threadable_block (false_edge->dest))
1464 thread_across_edge (dummy_cond, false_edge,
1465 const_and_copies, avail_exprs_stack,
1466 evrp_range_analyzer, simplify);
1467 }
1468 }