]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/tree-ssa-threadedge.c
alias.c: Reorder #include statements and remove duplicates.
[thirdparty/gcc.git] / gcc / tree-ssa-threadedge.c
CommitLineData
2090d6a0 1/* SSA Jump Threading
5624e564 2 Copyright (C) 2005-2015 Free Software Foundation, Inc.
2090d6a0
JL
3 Contributed by Jeff Law <law@redhat.com>
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9dcd6f09 9the Free Software Foundation; either version 3, or (at your option)
2090d6a0
JL
10any later version.
11
12GCC is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
9dcd6f09
NC
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
2090d6a0
JL
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
c7131fb2 24#include "backend.h"
2090d6a0 25#include "tree.h"
c7131fb2 26#include "gimple.h"
957060b5 27#include "predict.h"
c7131fb2 28#include "ssa.h"
40e23961 29#include "fold-const.h"
60393bbc 30#include "cfgloop.h"
5be5c238 31#include "gimple-iterator.h"
442b4905 32#include "tree-cfg.h"
5254eac4 33#include "tree-ssa-threadupdate.h"
2090d6a0 34#include "params.h"
f6c72af4 35#include "tree-ssa-scopedtables.h"
4484a35a 36#include "tree-ssa-threadedge.h"
0732f75f 37#include "tree-ssa-threadbackward.h"
7c3e7056 38#include "tree-ssa-dom.h"
1e080ab4 39#include "gimple-fold.h"
2090d6a0
JL
40
41/* To avoid code explosion due to jump threading, we limit the
42 number of statements we are going to copy. This variable
43 holds the number of statements currently seen that we'll have
44 to copy as part of the jump threading process. */
45static int stmt_count;
46
448ee662 47/* Array to record value-handles per SSA_NAME. */
9771b263 48vec<tree> ssa_name_values;
448ee662 49
355fe088 50typedef tree (pfn_simplify) (gimple *, gimple *, class avail_exprs_stack *);
8e33db8f 51
448ee662
RG
52/* Set the value for the SSA name NAME to VALUE. */
53
54void
55set_ssa_name_value (tree name, tree value)
56{
9771b263
DN
57 if (SSA_NAME_VERSION (name) >= ssa_name_values.length ())
58 ssa_name_values.safe_grow_cleared (SSA_NAME_VERSION (name) + 1);
846abd0d
RB
59 if (value && TREE_OVERFLOW_P (value))
60 value = drop_tree_overflow (value);
9771b263 61 ssa_name_values[SSA_NAME_VERSION (name)] = value;
448ee662
RG
62}
63
64/* Initialize the per SSA_NAME value-handles array. Returns it. */
65void
66threadedge_initialize_values (void)
67{
9771b263
DN
68 gcc_assert (!ssa_name_values.exists ());
69 ssa_name_values.create (num_ssa_names);
448ee662
RG
70}
71
72/* Free the per SSA_NAME value-handle array. */
73void
74threadedge_finalize_values (void)
75{
9771b263 76 ssa_name_values.release ();
448ee662
RG
77}
78
2090d6a0
JL
79/* Return TRUE if we may be able to thread an incoming edge into
80 BB to an outgoing edge from BB. Return FALSE otherwise. */
81
82bool
83potentially_threadable_block (basic_block bb)
84{
726a989a 85 gimple_stmt_iterator gsi;
2090d6a0 86
215f8d9e
JL
87 /* Special case. We can get blocks that are forwarders, but are
88 not optimized away because they forward from outside a loop
89 to the loop header. We want to thread through them as we can
1b2fe7d3 90 sometimes thread to the loop exit, which is obviously profitable.
215f8d9e
JL
91 the interesting case here is when the block has PHIs. */
92 if (gsi_end_p (gsi_start_nondebug_bb (bb))
93 && !gsi_end_p (gsi_start_phis (bb)))
94 return true;
1b2fe7d3 95
2090d6a0
JL
96 /* If BB has a single successor or a single predecessor, then
97 there is no threading opportunity. */
98 if (single_succ_p (bb) || single_pred_p (bb))
99 return false;
100
101 /* If BB does not end with a conditional, switch or computed goto,
102 then there is no threading opportunity. */
726a989a
RB
103 gsi = gsi_last_bb (bb);
104 if (gsi_end_p (gsi)
105 || ! gsi_stmt (gsi)
106 || (gimple_code (gsi_stmt (gsi)) != GIMPLE_COND
107 && gimple_code (gsi_stmt (gsi)) != GIMPLE_GOTO
108 && gimple_code (gsi_stmt (gsi)) != GIMPLE_SWITCH))
2090d6a0
JL
109 return false;
110
111 return true;
112}
113
114/* Return the LHS of any ASSERT_EXPR where OP appears as the first
115 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
116 BB. If no such ASSERT_EXPR is found, return OP. */
117
118static tree
355fe088 119lhs_of_dominating_assert (tree op, basic_block bb, gimple *stmt)
2090d6a0
JL
120{
121 imm_use_iterator imm_iter;
355fe088 122 gimple *use_stmt;
6c00f606 123 use_operand_p use_p;
2090d6a0 124
6c00f606 125 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
2090d6a0 126 {
6c00f606 127 use_stmt = USE_STMT (use_p);
2090d6a0 128 if (use_stmt != stmt
726a989a
RB
129 && gimple_assign_single_p (use_stmt)
130 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
131 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
132 && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
6c00f606 133 {
726a989a 134 return gimple_assign_lhs (use_stmt);
6c00f606 135 }
2090d6a0
JL
136 }
137 return op;
138}
139
2090d6a0 140/* Record temporary equivalences created by PHIs at the target of the
b8698a0f 141 edge E. Record unwind information for the equivalences onto STACK.
2090d6a0
JL
142
143 If a PHI which prevents threading is encountered, then return FALSE
1b2fe7d3 144 indicating we should not thread this edge, else return TRUE.
d4fdb4df
JL
145
146 If SRC_MAP/DST_MAP exist, then mark the source and destination SSA_NAMEs
147 of any equivalences recorded. We use this to make invalidation after
148 traversing back edges less painful. */
2090d6a0
JL
149
150static bool
f6c72af4 151record_temporary_equivalences_from_phis (edge e, const_and_copies *const_and_copies)
2090d6a0 152{
538dd0b7 153 gphi_iterator gsi;
2090d6a0
JL
154
155 /* Each PHI creates a temporary equivalence, record them.
156 These are context sensitive equivalences and will be removed
157 later. */
726a989a 158 for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
2090d6a0 159 {
538dd0b7 160 gphi *phi = gsi.phi ();
2090d6a0 161 tree src = PHI_ARG_DEF_FROM_EDGE (phi, e);
726a989a 162 tree dst = gimple_phi_result (phi);
2090d6a0 163
b8698a0f 164 /* If the desired argument is not the same as this PHI's result
2090d6a0
JL
165 and it is set by a PHI in E->dest, then we can not thread
166 through E->dest. */
167 if (src != dst
168 && TREE_CODE (src) == SSA_NAME
726a989a
RB
169 && gimple_code (SSA_NAME_DEF_STMT (src)) == GIMPLE_PHI
170 && gimple_bb (SSA_NAME_DEF_STMT (src)) == e->dest)
2090d6a0
JL
171 return false;
172
173 /* We consider any non-virtual PHI as a statement since it
174 count result in a constant assignment or copy operation. */
ea057359 175 if (!virtual_operand_p (dst))
2090d6a0
JL
176 stmt_count++;
177
f6c72af4 178 const_and_copies->record_const_or_copy (dst, src);
2090d6a0
JL
179 }
180 return true;
181}
182
1e080ab4 183/* Valueize hook for gimple_fold_stmt_to_constant_1. */
726a989a
RB
184
185static tree
1e080ab4 186threadedge_valueize (tree t)
726a989a 187{
1e080ab4 188 if (TREE_CODE (t) == SSA_NAME)
726a989a 189 {
1e080ab4
RB
190 tree tem = SSA_NAME_VALUE (t);
191 if (tem)
192 return tem;
726a989a 193 }
1e080ab4 194 return t;
726a989a
RB
195}
196
2090d6a0
JL
197/* Try to simplify each statement in E->dest, ultimately leading to
198 a simplification of the COND_EXPR at the end of E->dest.
199
200 Record unwind information for temporary equivalences onto STACK.
201
202 Use SIMPLIFY (a pointer to a callback function) to further simplify
b8698a0f 203 statements using pass specific information.
2090d6a0
JL
204
205 We might consider marking just those statements which ultimately
206 feed the COND_EXPR. It's not clear if the overhead of bookkeeping
207 would be recovered by trying to simplify fewer statements.
208
209 If we are able to simplify a statement into the form
210 SSA_NAME = (SSA_NAME | gimple invariant), then we can record
726a989a 211 a context sensitive equivalence which may help us simplify
2090d6a0
JL
212 later statements in E->dest. */
213
355fe088 214static gimple *
2090d6a0 215record_temporary_equivalences_from_stmts_at_dest (edge e,
8e33db8f
JL
216 const_and_copies *const_and_copies,
217 avail_exprs_stack *avail_exprs_stack,
218 pfn_simplify simplify,
219 bool backedge_seen)
2090d6a0 220{
355fe088 221 gimple *stmt = NULL;
726a989a 222 gimple_stmt_iterator gsi;
2090d6a0
JL
223 int max_stmt_count;
224
225 max_stmt_count = PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS);
226
227 /* Walk through each statement in the block recording equivalences
228 we discover. Note any equivalences we discover are context
229 sensitive (ie, are dependent on traversing E) and must be unwound
230 when we're finished processing E. */
726a989a 231 for (gsi = gsi_start_bb (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
2090d6a0
JL
232 {
233 tree cached_lhs = NULL;
234
726a989a 235 stmt = gsi_stmt (gsi);
2090d6a0
JL
236
237 /* Ignore empty statements and labels. */
b5b8b0ac
AO
238 if (gimple_code (stmt) == GIMPLE_NOP
239 || gimple_code (stmt) == GIMPLE_LABEL
240 || is_gimple_debug (stmt))
2090d6a0
JL
241 continue;
242
2090d6a0
JL
243 /* If the statement has volatile operands, then we assume we
244 can not thread through this block. This is overly
245 conservative in some ways. */
538dd0b7
DM
246 if (gimple_code (stmt) == GIMPLE_ASM
247 && gimple_asm_volatile_p (as_a <gasm *> (stmt)))
2090d6a0
JL
248 return NULL;
249
8ab78162
NS
250 /* If the statement is a unique builtin, we can not thread
251 through here. */
252 if (gimple_code (stmt) == GIMPLE_CALL
253 && gimple_call_internal_p (stmt)
254 && gimple_call_internal_unique_p (stmt))
255 return NULL;
256
2090d6a0
JL
257 /* If duplicating this block is going to cause too much code
258 expansion, then do not thread through this block. */
259 stmt_count++;
260 if (stmt_count > max_stmt_count)
261 return NULL;
262
726a989a 263 /* If this is not a statement that sets an SSA_NAME to a new
2090d6a0
JL
264 value, then do not try to simplify this statement as it will
265 not simplify in any way that is helpful for jump threading. */
726a989a
RB
266 if ((gimple_code (stmt) != GIMPLE_ASSIGN
267 || TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
268 && (gimple_code (stmt) != GIMPLE_CALL
269 || gimple_call_lhs (stmt) == NULL_TREE
270 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME))
9219922e
JL
271 {
272 /* STMT might still have DEFS and we need to invalidate any known
273 equivalences for them.
274
275 Consider if STMT is a GIMPLE_ASM with one or more outputs that
276 feeds a conditional inside a loop. We might derive an equivalence
277 due to the conditional. */
278 tree op;
279 ssa_op_iter iter;
280
281 if (backedge_seen)
07357f2f 282 FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_DEF)
f6c72af4 283 const_and_copies->invalidate (op);
9219922e
JL
284
285 continue;
286 }
2090d6a0 287
45e18420
RAE
288 /* The result of __builtin_object_size depends on all the arguments
289 of a phi node. Temporarily using only one edge produces invalid
290 results. For example
291
292 if (x < 6)
293 goto l;
294 else
295 goto l;
296
297 l:
298 r = PHI <&w[2].a[1](2), &a.a[6](3)>
299 __builtin_object_size (r, 0)
300
301 The result of __builtin_object_size is defined to be the maximum of
302 remaining bytes. If we use only one edge on the phi, the result will
56c6a499
JJ
303 change to be the remaining bytes for the corresponding phi argument.
304
305 Similarly for __builtin_constant_p:
306
307 r = PHI <1(2), 2(3)>
308 __builtin_constant_p (r)
309
310 Both PHI arguments are constant, but x ? 1 : 2 is still not
311 constant. */
45e18420 312
726a989a 313 if (is_gimple_call (stmt))
45e18420 314 {
726a989a 315 tree fndecl = gimple_call_fndecl (stmt);
56c6a499
JJ
316 if (fndecl
317 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_OBJECT_SIZE
318 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P))
d4fdb4df
JL
319 {
320 if (backedge_seen)
321 {
322 tree lhs = gimple_get_lhs (stmt);
f6c72af4 323 const_and_copies->invalidate (lhs);
d4fdb4df
JL
324 }
325 continue;
326 }
45e18420
RAE
327 }
328
2090d6a0
JL
329 /* At this point we have a statement which assigns an RHS to an
330 SSA_VAR on the LHS. We want to try and simplify this statement
331 to expose more context sensitive equivalences which in turn may
b8698a0f 332 allow us to simplify the condition at the end of the loop.
2090d6a0
JL
333
334 Handle simple copy operations as well as implied copies from
335 ASSERT_EXPRs. */
726a989a
RB
336 if (gimple_assign_single_p (stmt)
337 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
338 cached_lhs = gimple_assign_rhs1 (stmt);
339 else if (gimple_assign_single_p (stmt)
340 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
341 cached_lhs = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0);
2090d6a0
JL
342 else
343 {
344 /* A statement that is not a trivial copy or ASSERT_EXPR.
1e080ab4 345 Try to fold the new expression. Inserting the
726a989a 346 expression into the hash table is unlikely to help. */
1e080ab4
RB
347 /* ??? The DOM callback below can be changed to setting
348 the mprts_hook around the call to thread_across_edge,
349 avoiding the use substitution. The VRP hook should be
350 changed to properly valueize operands itself using
351 SSA_NAME_VALUE in addition to its own lattice. */
352 cached_lhs = gimple_fold_stmt_to_constant_1 (stmt,
353 threadedge_valueize);
726a989a
RB
354 if (!cached_lhs
355 || (TREE_CODE (cached_lhs) != SSA_NAME
356 && !is_gimple_min_invariant (cached_lhs)))
1e080ab4
RB
357 {
358 /* We're going to temporarily copy propagate the operands
359 and see if that allows us to simplify this statement. */
360 tree *copy;
361 ssa_op_iter iter;
362 use_operand_p use_p;
363 unsigned int num, i = 0;
364
365 num = NUM_SSA_OPERANDS (stmt, SSA_OP_ALL_USES);
366 copy = XALLOCAVEC (tree, num);
367
368 /* Make a copy of the uses & vuses into USES_COPY, then cprop into
369 the operands. */
370 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES)
371 {
372 tree tmp = NULL;
373 tree use = USE_FROM_PTR (use_p);
374
375 copy[i++] = use;
376 if (TREE_CODE (use) == SSA_NAME)
377 tmp = SSA_NAME_VALUE (use);
378 if (tmp)
379 SET_USE (use_p, tmp);
380 }
b8698a0f 381
1e080ab4 382 cached_lhs = (*simplify) (stmt, stmt, avail_exprs_stack);
2090d6a0 383
1e080ab4
RB
384 /* Restore the statement's original uses/defs. */
385 i = 0;
386 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES)
387 SET_USE (use_p, copy[i++]);
388 }
2090d6a0
JL
389 }
390
391 /* Record the context sensitive equivalence if we were able
1b2fe7d3 392 to simplify this statement.
d4fdb4df
JL
393
394 If we have traversed a backedge at some point during threading,
1b2fe7d3 395 then always enter something here. Either a real equivalence,
d4fdb4df
JL
396 or a NULL_TREE equivalence which is effectively invalidation of
397 prior equivalences. */
2090d6a0
JL
398 if (cached_lhs
399 && (TREE_CODE (cached_lhs) == SSA_NAME
400 || is_gimple_min_invariant (cached_lhs)))
a12cbc57
JL
401 const_and_copies->record_const_or_copy (gimple_get_lhs (stmt),
402 cached_lhs);
d4fdb4df 403 else if (backedge_seen)
f6c72af4 404 const_and_copies->invalidate (gimple_get_lhs (stmt));
2090d6a0
JL
405 }
406 return stmt;
407}
408
d4fdb4df
JL
409/* Once we have passed a backedge in the CFG when threading, we do not want to
410 utilize edge equivalences for simplification purpose. They are no longer
411 necessarily valid. We use this callback rather than the ones provided by
412 DOM/VRP to achieve that effect. */
413static tree
355fe088 414dummy_simplify (gimple *stmt1 ATTRIBUTE_UNUSED, gimple *stmt2 ATTRIBUTE_UNUSED,
8e33db8f 415 class avail_exprs_stack *avail_exprs_stack ATTRIBUTE_UNUSED)
d4fdb4df
JL
416{
417 return NULL_TREE;
418}
419
2090d6a0
JL
420/* Simplify the control statement at the end of the block E->dest.
421
726a989a 422 To avoid allocating memory unnecessarily, a scratch GIMPLE_COND
2090d6a0
JL
423 is available to use/clobber in DUMMY_COND.
424
425 Use SIMPLIFY (a pointer to a callback function) to further simplify
426 a condition using pass specific information.
427
428 Return the simplified condition or NULL if simplification could
8e33db8f
JL
429 not be performed.
430
431 The available expression table is referenced via AVAIL_EXPRS_STACK. */
2090d6a0
JL
432
433static tree
434simplify_control_stmt_condition (edge e,
355fe088 435 gimple *stmt,
8e33db8f 436 class avail_exprs_stack *avail_exprs_stack,
538dd0b7 437 gcond *dummy_cond,
8e33db8f 438 pfn_simplify simplify,
2090d6a0
JL
439 bool handle_dominating_asserts)
440{
441 tree cond, cached_lhs;
726a989a 442 enum gimple_code code = gimple_code (stmt);
2090d6a0
JL
443
444 /* For comparisons, we have to update both operands, then try
445 to simplify the comparison. */
726a989a 446 if (code == GIMPLE_COND)
2090d6a0
JL
447 {
448 tree op0, op1;
449 enum tree_code cond_code;
450
726a989a
RB
451 op0 = gimple_cond_lhs (stmt);
452 op1 = gimple_cond_rhs (stmt);
453 cond_code = gimple_cond_code (stmt);
2090d6a0
JL
454
455 /* Get the current value of both operands. */
456 if (TREE_CODE (op0) == SSA_NAME)
457 {
4f82fed2
JL
458 for (int i = 0; i < 2; i++)
459 {
460 if (TREE_CODE (op0) == SSA_NAME
461 && SSA_NAME_VALUE (op0))
462 op0 = SSA_NAME_VALUE (op0);
463 else
464 break;
465 }
2090d6a0
JL
466 }
467
468 if (TREE_CODE (op1) == SSA_NAME)
469 {
4f82fed2
JL
470 for (int i = 0; i < 2; i++)
471 {
472 if (TREE_CODE (op1) == SSA_NAME
473 && SSA_NAME_VALUE (op1))
474 op1 = SSA_NAME_VALUE (op1);
475 else
476 break;
477 }
2090d6a0
JL
478 }
479
480 if (handle_dominating_asserts)
481 {
482 /* Now see if the operand was consumed by an ASSERT_EXPR
483 which dominates E->src. If so, we want to replace the
484 operand with the LHS of the ASSERT_EXPR. */
485 if (TREE_CODE (op0) == SSA_NAME)
486 op0 = lhs_of_dominating_assert (op0, e->src, stmt);
487
488 if (TREE_CODE (op1) == SSA_NAME)
489 op1 = lhs_of_dominating_assert (op1, e->src, stmt);
490 }
491
492 /* We may need to canonicalize the comparison. For
493 example, op0 might be a constant while op1 is an
494 SSA_NAME. Failure to canonicalize will cause us to
495 miss threading opportunities. */
726a989a 496 if (tree_swap_operands_p (op0, op1, false))
2090d6a0 497 {
726a989a 498 cond_code = swap_tree_comparison (cond_code);
fab27f52 499 std::swap (op0, op1);
2090d6a0
JL
500 }
501
502 /* Stuff the operator and operands into our dummy conditional
503 expression. */
726a989a
RB
504 gimple_cond_set_code (dummy_cond, cond_code);
505 gimple_cond_set_lhs (dummy_cond, op0);
506 gimple_cond_set_rhs (dummy_cond, op1);
2090d6a0
JL
507
508 /* We absolutely do not care about any type conversions
509 we only care about a zero/nonzero value. */
6ac01510
ILT
510 fold_defer_overflow_warnings ();
511
726a989a
RB
512 cached_lhs = fold_binary (cond_code, boolean_type_node, op0, op1);
513 if (cached_lhs)
1a87cf0c 514 while (CONVERT_EXPR_P (cached_lhs))
726a989a 515 cached_lhs = TREE_OPERAND (cached_lhs, 0);
6ac01510 516
726a989a
RB
517 fold_undefer_overflow_warnings ((cached_lhs
518 && is_gimple_min_invariant (cached_lhs)),
6ac01510
ILT
519 stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
520
2090d6a0
JL
521 /* If we have not simplified the condition down to an invariant,
522 then use the pass specific callback to simplify the condition. */
726a989a
RB
523 if (!cached_lhs
524 || !is_gimple_min_invariant (cached_lhs))
8e33db8f 525 cached_lhs = (*simplify) (dummy_cond, stmt, avail_exprs_stack);
726a989a 526
3d466672
JL
527 /* If we were testing an integer/pointer against a constant, then
528 we can use the FSM code to trace the value of the SSA_NAME. If
529 a value is found, then the condition will collapse to a constant.
530
531 Return the SSA_NAME we want to trace back rather than the full
532 expression and give the FSM threader a chance to find its value. */
533 if (cached_lhs == NULL)
636351f9
JL
534 {
535 /* Recover the original operands. They may have been simplified
536 using context sensitive equivalences. Those context sensitive
537 equivalences may not be valid on paths found by the FSM optimizer. */
538 tree op0 = gimple_cond_lhs (stmt);
539 tree op1 = gimple_cond_rhs (stmt);
540
3d466672
JL
541 if ((INTEGRAL_TYPE_P (TREE_TYPE (op0))
542 || POINTER_TYPE_P (TREE_TYPE (op0)))
636351f9 543 && TREE_CODE (op0) == SSA_NAME
3d466672 544 && TREE_CODE (op1) == INTEGER_CST)
636351f9
JL
545 return op0;
546 }
547
726a989a 548 return cached_lhs;
2090d6a0
JL
549 }
550
726a989a 551 if (code == GIMPLE_SWITCH)
538dd0b7 552 cond = gimple_switch_index (as_a <gswitch *> (stmt));
726a989a
RB
553 else if (code == GIMPLE_GOTO)
554 cond = gimple_goto_dest (stmt);
555 else
556 gcc_unreachable ();
557
2090d6a0
JL
558 /* We can have conditionals which just test the state of a variable
559 rather than use a relational operator. These are simpler to handle. */
726a989a 560 if (TREE_CODE (cond) == SSA_NAME)
2090d6a0 561 {
50e9ff83 562 tree original_lhs = cond;
2090d6a0
JL
563 cached_lhs = cond;
564
726a989a 565 /* Get the variable's current value from the equivalence chains.
61864771
JL
566
567 It is possible to get loops in the SSA_NAME_VALUE chains
568 (consider threading the backedge of a loop where we have
569 a loop invariant SSA_NAME used in the condition. */
4f82fed2
JL
570 if (cached_lhs)
571 {
572 for (int i = 0; i < 2; i++)
573 {
574 if (TREE_CODE (cached_lhs) == SSA_NAME
575 && SSA_NAME_VALUE (cached_lhs))
576 cached_lhs = SSA_NAME_VALUE (cached_lhs);
577 else
578 break;
579 }
580 }
2090d6a0
JL
581
582 /* If we're dominated by a suitable ASSERT_EXPR, then
583 update CACHED_LHS appropriately. */
584 if (handle_dominating_asserts && TREE_CODE (cached_lhs) == SSA_NAME)
585 cached_lhs = lhs_of_dominating_assert (cached_lhs, e->src, stmt);
586
587 /* If we haven't simplified to an invariant yet, then use the
588 pass specific callback to try and simplify it further. */
589 if (cached_lhs && ! is_gimple_min_invariant (cached_lhs))
8e33db8f 590 cached_lhs = (*simplify) (stmt, stmt, avail_exprs_stack);
50e9ff83
JG
591
592 /* We couldn't find an invariant. But, callers of this
593 function may be able to do something useful with the
594 unmodified destination. */
595 if (!cached_lhs)
596 cached_lhs = original_lhs;
2090d6a0
JL
597 }
598 else
599 cached_lhs = NULL;
600
601 return cached_lhs;
602}
603
447a7045
AO
604/* Copy debug stmts from DEST's chain of single predecessors up to
605 SRC, so that we don't lose the bindings as PHI nodes are introduced
606 when DEST gains new predecessors. */
6e02b5f5 607void
447a7045
AO
608propagate_threaded_block_debug_into (basic_block dest, basic_block src)
609{
610 if (!MAY_HAVE_DEBUG_STMTS)
611 return;
612
613 if (!single_pred_p (dest))
614 return;
615
616 gcc_checking_assert (dest != src);
617
618 gimple_stmt_iterator gsi = gsi_after_labels (dest);
fe77e0ed
AO
619 int i = 0;
620 const int alloc_count = 16; // ?? Should this be a PARAM?
447a7045 621
fe77e0ed
AO
622 /* Estimate the number of debug vars overridden in the beginning of
623 DEST, to tell how many we're going to need to begin with. */
447a7045 624 for (gimple_stmt_iterator si = gsi;
fe77e0ed
AO
625 i * 4 <= alloc_count * 3 && !gsi_end_p (si); gsi_next (&si))
626 {
355fe088 627 gimple *stmt = gsi_stmt (si);
fe77e0ed
AO
628 if (!is_gimple_debug (stmt))
629 break;
630 i++;
631 }
632
00f96dc9 633 auto_vec<tree, alloc_count> fewvars;
6e2830c3 634 hash_set<tree> *vars = NULL;
fe77e0ed
AO
635
636 /* If we're already starting with 3/4 of alloc_count, go for a
6e2830c3 637 hash_set, otherwise start with an unordered stack-allocated
fe77e0ed
AO
638 VEC. */
639 if (i * 4 > alloc_count * 3)
6e2830c3 640 vars = new hash_set<tree>;
fe77e0ed
AO
641
642 /* Now go through the initial debug stmts in DEST again, this time
643 actually inserting in VARS or FEWVARS. Don't bother checking for
644 duplicates in FEWVARS. */
645 for (gimple_stmt_iterator si = gsi; !gsi_end_p (si); gsi_next (&si))
447a7045 646 {
355fe088 647 gimple *stmt = gsi_stmt (si);
447a7045
AO
648 if (!is_gimple_debug (stmt))
649 break;
650
651 tree var;
652
653 if (gimple_debug_bind_p (stmt))
654 var = gimple_debug_bind_get_var (stmt);
655 else if (gimple_debug_source_bind_p (stmt))
656 var = gimple_debug_source_bind_get_var (stmt);
657 else
658 gcc_unreachable ();
659
fe77e0ed 660 if (vars)
6e2830c3 661 vars->add (var);
fe77e0ed 662 else
9771b263 663 fewvars.quick_push (var);
447a7045
AO
664 }
665
666 basic_block bb = dest;
667
668 do
669 {
670 bb = single_pred (bb);
671 for (gimple_stmt_iterator si = gsi_last_bb (bb);
672 !gsi_end_p (si); gsi_prev (&si))
673 {
355fe088 674 gimple *stmt = gsi_stmt (si);
447a7045
AO
675 if (!is_gimple_debug (stmt))
676 continue;
677
678 tree var;
679
680 if (gimple_debug_bind_p (stmt))
681 var = gimple_debug_bind_get_var (stmt);
682 else if (gimple_debug_source_bind_p (stmt))
683 var = gimple_debug_source_bind_get_var (stmt);
684 else
685 gcc_unreachable ();
686
687 /* Discard debug bind overlaps. ??? Unlike stmts from src,
688 copied into a new block that will precede BB, debug bind
689 stmts in bypassed BBs may actually be discarded if
690 they're overwritten by subsequent debug bind stmts, which
691 might be a problem once we introduce stmt frontier notes
692 or somesuch. Adding `&& bb == src' to the condition
693 below will preserve all potentially relevant debug
694 notes. */
6e2830c3 695 if (vars && vars->add (var))
447a7045 696 continue;
fe77e0ed
AO
697 else if (!vars)
698 {
9771b263 699 int i = fewvars.length ();
fe77e0ed 700 while (i--)
9771b263 701 if (fewvars[i] == var)
fe77e0ed
AO
702 break;
703 if (i >= 0)
704 continue;
705
a8f986e9 706 if (fewvars.length () < (unsigned) alloc_count)
9771b263 707 fewvars.quick_push (var);
fe77e0ed
AO
708 else
709 {
6e2830c3 710 vars = new hash_set<tree>;
fe77e0ed 711 for (i = 0; i < alloc_count; i++)
6e2830c3 712 vars->add (fewvars[i]);
9771b263 713 fewvars.release ();
6e2830c3 714 vars->add (var);
fe77e0ed
AO
715 }
716 }
447a7045
AO
717
718 stmt = gimple_copy (stmt);
719 /* ??? Should we drop the location of the copy to denote
720 they're artificial bindings? */
721 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
722 }
723 }
724 while (bb != src && single_pred_p (bb));
725
fe77e0ed 726 if (vars)
6e2830c3 727 delete vars;
9771b263
DN
728 else if (fewvars.exists ())
729 fewvars.release ();
447a7045
AO
730}
731
770da076
JL
732/* See if TAKEN_EDGE->dest is a threadable block with no side effecs (ie, it
733 need not be duplicated as part of the CFG/SSA updating process).
734
735 If it is threadable, add it to PATH and VISITED and recurse, ultimately
736 returning TRUE from the toplevel call. Otherwise do nothing and
737 return false.
738
739 DUMMY_COND, HANDLE_DOMINATING_ASSERTS and SIMPLIFY are used to
8e33db8f
JL
740 try and simplify the condition at the end of TAKEN_EDGE->dest.
741
742 The available expression table is referenced via AVAIL_EXPRS_STACK. */
743
770da076
JL
744static bool
745thread_around_empty_blocks (edge taken_edge,
538dd0b7 746 gcond *dummy_cond,
8e33db8f 747 class avail_exprs_stack *avail_exprs_stack,
770da076 748 bool handle_dominating_asserts,
8e33db8f 749 pfn_simplify simplify,
770da076 750 bitmap visited,
3eae202f
JL
751 vec<jump_thread_edge *> *path,
752 bool *backedge_seen_p)
520af9ec
JL
753{
754 basic_block bb = taken_edge->dest;
755 gimple_stmt_iterator gsi;
355fe088 756 gimple *stmt;
520af9ec
JL
757 tree cond;
758
770da076
JL
759 /* The key property of these blocks is that they need not be duplicated
760 when threading. Thus they can not have visible side effects such
761 as PHI nodes. */
520af9ec 762 if (!gsi_end_p (gsi_start_phis (bb)))
581aedec 763 return false;
520af9ec
JL
764
765 /* Skip over DEBUG statements at the start of the block. */
766 gsi = gsi_start_nondebug_bb (bb);
767
770da076 768 /* If the block has no statements, but does have a single successor, then
45d99234 769 it's just a forwarding block and we can thread through it trivially.
9e1376e9
JL
770
771 However, note that just threading through empty blocks with single
772 successors is not inherently profitable. For the jump thread to
773 be profitable, we must avoid a runtime conditional.
774
775 By taking the return value from the recursive call, we get the
776 desired effect of returning TRUE when we found a profitable jump
45d99234 777 threading opportunity and FALSE otherwise.
9e1376e9
JL
778
779 This is particularly important when this routine is called after
780 processing a joiner block. Returning TRUE too aggressively in
781 that case results in pointless duplication of the joiner block. */
520af9ec 782 if (gsi_end_p (gsi))
770da076
JL
783 {
784 if (single_succ_p (bb))
785 {
786 taken_edge = single_succ_edge (bb);
3eae202f 787 if (!bitmap_bit_p (visited, taken_edge->dest->index))
770da076 788 {
5254eac4
JL
789 jump_thread_edge *x
790 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
791 path->safe_push (x);
770da076 792 bitmap_set_bit (visited, taken_edge->dest->index);
3eae202f 793 *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
d4fdb4df
JL
794 if (*backedge_seen_p)
795 simplify = dummy_simplify;
9e1376e9
JL
796 return thread_around_empty_blocks (taken_edge,
797 dummy_cond,
8e33db8f 798 avail_exprs_stack,
9e1376e9
JL
799 handle_dominating_asserts,
800 simplify,
801 visited,
3eae202f
JL
802 path,
803 backedge_seen_p);
770da076
JL
804 }
805 }
9e1376e9
JL
806
807 /* We have a block with no statements, but multiple successors? */
770da076
JL
808 return false;
809 }
520af9ec 810
770da076
JL
811 /* The only real statements this block can have are a control
812 flow altering statement. Anything else stops the thread. */
520af9ec
JL
813 stmt = gsi_stmt (gsi);
814 if (gimple_code (stmt) != GIMPLE_COND
815 && gimple_code (stmt) != GIMPLE_GOTO
816 && gimple_code (stmt) != GIMPLE_SWITCH)
770da076 817 return false;
520af9ec 818
d4fdb4df
JL
819 /* If we have traversed a backedge, then we do not want to look
820 at certain expressions in the table that can not be relied upon.
821 Luckily the only code that looked at those expressions is the
822 SIMPLIFY callback, which we replace if we can no longer use it. */
823 if (*backedge_seen_p)
824 simplify = dummy_simplify;
825
520af9ec 826 /* Extract and simplify the condition. */
8e33db8f
JL
827 cond = simplify_control_stmt_condition (taken_edge, stmt,
828 avail_exprs_stack, dummy_cond,
520af9ec
JL
829 simplify, handle_dominating_asserts);
830
831 /* If the condition can be statically computed and we have not already
832 visited the destination edge, then add the taken edge to our thread
833 path. */
834 if (cond && is_gimple_min_invariant (cond))
835 {
770da076 836 taken_edge = find_taken_edge (bb, cond);
520af9ec
JL
837
838 if (bitmap_bit_p (visited, taken_edge->dest->index))
770da076 839 return false;
520af9ec 840 bitmap_set_bit (visited, taken_edge->dest->index);
5254eac4
JL
841
842 jump_thread_edge *x
843 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
844 path->safe_push (x);
3eae202f 845 *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
d4fdb4df
JL
846 if (*backedge_seen_p)
847 simplify = dummy_simplify;
5254eac4 848
770da076
JL
849 thread_around_empty_blocks (taken_edge,
850 dummy_cond,
8e33db8f 851 avail_exprs_stack,
770da076
JL
852 handle_dominating_asserts,
853 simplify,
854 visited,
3eae202f
JL
855 path,
856 backedge_seen_p);
770da076 857 return true;
520af9ec 858 }
45d99234 859
770da076 860 return false;
520af9ec 861}
45d99234 862
2090d6a0 863/* We are exiting E->src, see if E->dest ends with a conditional
b8698a0f 864 jump which has a known value when reached via E.
2090d6a0 865
45d99234
JL
866 E->dest can have arbitrary side effects which, if threading is
867 successful, will be maintained.
868
2090d6a0
JL
869 Special care is necessary if E is a back edge in the CFG as we
870 may have already recorded equivalences for E->dest into our
871 various tables, including the result of the conditional at
872 the end of E->dest. Threading opportunities are severely
873 limited in that case to avoid short-circuiting the loop
874 incorrectly.
875
c7b852c8
ZD
876 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
877 to avoid allocating memory.
b8698a0f 878
c7b852c8
ZD
879 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
880 the simplified condition with left-hand sides of ASSERT_EXPRs they are
881 used in.
b8698a0f 882
c7b852c8
ZD
883 STACK is used to undo temporary equivalences created during the walk of
884 E->dest.
885
45d99234 886 SIMPLIFY is a pass-specific function used to simplify statements.
2090d6a0 887
45d99234 888 Our caller is responsible for restoring the state of the expression
0600049c 889 and const_and_copies stacks.
2090d6a0 890
0600049c
JL
891 Positive return value is success. Zero return value is failure, but
892 the block can still be duplicated as a joiner in a jump thread path,
893 negative indicates the block should not be duplicated and thus is not
894 suitable for a joiner in a jump threading path. */
895
896static int
45d99234 897thread_through_normal_block (edge e,
538dd0b7 898 gcond *dummy_cond,
45d99234 899 bool handle_dominating_asserts,
f6c72af4 900 const_and_copies *const_and_copies,
8e33db8f
JL
901 avail_exprs_stack *avail_exprs_stack,
902 pfn_simplify simplify,
b5c4ff78 903 vec<jump_thread_edge *> *path,
3eae202f 904 bitmap visited,
64e13bcd 905 bool *backedge_seen_p)
45d99234 906{
d4fdb4df
JL
907 /* If we have traversed a backedge, then we do not want to look
908 at certain expressions in the table that can not be relied upon.
909 Luckily the only code that looked at those expressions is the
910 SIMPLIFY callback, which we replace if we can no longer use it. */
911 if (*backedge_seen_p)
912 simplify = dummy_simplify;
b8698a0f 913
7c3e7056
JL
914 /* We want to record any equivalences created by traversing E. */
915 if (!handle_dominating_asserts)
916 record_temporary_equivalences (e, const_and_copies, avail_exprs_stack);
917
551a6341
JL
918 /* PHIs create temporary equivalences.
919 Note that if we found a PHI that made the block non-threadable, then
920 we need to bubble that up to our caller in the same manner we do
921 when we prematurely stop processing statements below. */
f6c72af4 922 if (!record_temporary_equivalences_from_phis (e, const_and_copies))
551a6341 923 return -1;
2090d6a0
JL
924
925 /* Now walk each statement recording any context sensitive
926 temporary equivalences we can detect. */
355fe088 927 gimple *stmt
a12cbc57 928 = record_temporary_equivalences_from_stmts_at_dest (e, const_and_copies,
8e33db8f 929 avail_exprs_stack,
a12cbc57 930 simplify,
64e13bcd 931 *backedge_seen_p);
0600049c 932
215f8d9e
JL
933 /* There's two reasons STMT might be null, and distinguishing
934 between them is important.
0600049c 935
215f8d9e
JL
936 First the block may not have had any statements. For example, it
937 might have some PHIs and unconditionally transfer control elsewhere.
938 Such blocks are suitable for jump threading, particularly as a
939 joiner block.
c7a28c1b 940
215f8d9e
JL
941 The second reason would be if we did not process all the statements
942 in the block (because there were too many to make duplicating the
943 block profitable. If we did not look at all the statements, then
944 we may not have invalidated everything needing invalidation. Thus
945 we must signal to our caller that this block is not suitable for
946 use as a joiner in a threading path. */
947 if (!stmt)
948 {
949 /* First case. The statement simply doesn't have any instructions, but
950 does have PHIs. */
951 if (gsi_end_p (gsi_start_nondebug_bb (e->dest))
952 && !gsi_end_p (gsi_start_phis (e->dest)))
953 return 0;
954
955 /* Second case. */
956 return -1;
957 }
1b2fe7d3 958
2090d6a0
JL
959 /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm
960 will be taken. */
726a989a
RB
961 if (gimple_code (stmt) == GIMPLE_COND
962 || gimple_code (stmt) == GIMPLE_GOTO
963 || gimple_code (stmt) == GIMPLE_SWITCH)
2090d6a0
JL
964 {
965 tree cond;
966
967 /* Extract and simplify the condition. */
8e33db8f
JL
968 cond = simplify_control_stmt_condition (e, stmt, avail_exprs_stack,
969 dummy_cond, simplify,
520af9ec 970 handle_dominating_asserts);
2090d6a0 971
50e9ff83
JG
972 if (!cond)
973 return 0;
974
975 if (is_gimple_min_invariant (cond))
2090d6a0
JL
976 {
977 edge taken_edge = find_taken_edge (e->dest, cond);
978 basic_block dest = (taken_edge ? taken_edge->dest : NULL);
979
770da076
JL
980 /* DEST could be NULL for a computed jump to an absolute
981 address. */
3eae202f
JL
982 if (dest == NULL
983 || dest == e->dest
984 || bitmap_bit_p (visited, dest->index))
0600049c 985 return 0;
2090d6a0 986
6eeef4cc
JL
987 /* Only push the EDGE_START_JUMP_THREAD marker if this is
988 first edge on the path. */
989 if (path->length () == 0)
990 {
991 jump_thread_edge *x
992 = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
993 path->safe_push (x);
994 *backedge_seen_p |= ((e->flags & EDGE_DFS_BACK) != 0);
995 }
5254eac4 996
6eeef4cc
JL
997 jump_thread_edge *x
998 = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_BLOCK);
aee2d611 999 path->safe_push (x);
3eae202f 1000 *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
d4fdb4df
JL
1001 if (*backedge_seen_p)
1002 simplify = dummy_simplify;
3b18bc42 1003
770da076
JL
1004 /* See if we can thread through DEST as well, this helps capture
1005 secondary effects of threading without having to re-run DOM or
1b2fe7d3 1006 VRP.
d4fdb4df
JL
1007
1008 We don't want to thread back to a block we have already
1009 visited. This may be overly conservative. */
1010 bitmap_set_bit (visited, dest->index);
1011 bitmap_set_bit (visited, e->dest->index);
1012 thread_around_empty_blocks (taken_edge,
1013 dummy_cond,
8e33db8f 1014 avail_exprs_stack,
d4fdb4df
JL
1015 handle_dominating_asserts,
1016 simplify,
1017 visited,
1018 path,
1019 backedge_seen_p);
0600049c 1020 return 1;
2090d6a0 1021 }
50e9ff83
JG
1022
1023 if (!flag_expensive_optimizations
1024 || optimize_function_for_size_p (cfun)
3d466672
JL
1025 || !(TREE_CODE (cond) == SSA_NAME
1026 || (TREE_CODE_CLASS (TREE_CODE (cond)) == tcc_comparison
1027 && TREE_CODE (TREE_OPERAND (cond, 0)) == SSA_NAME
1028 && TREE_CODE (TREE_OPERAND (cond, 1)) == INTEGER_CST))
50e9ff83
JG
1029 || e->dest->loop_father != e->src->loop_father
1030 || loop_depth (e->dest->loop_father) == 0)
1031 return 0;
1032
3d466672
JL
1033 /* Extract the SSA_NAME we want to trace backwards if COND is not
1034 already a bare SSA_NAME. */
1035 if (TREE_CODE (cond) != SSA_NAME)
1036 cond = TREE_OPERAND (cond, 0);
1037
50e9ff83
JG
1038 /* When COND cannot be simplified, try to find paths from a control
1039 statement back through the PHI nodes which would affect that control
1040 statement. */
0732f75f 1041 find_jump_threads_backwards (cond, e->dest);
2090d6a0 1042 }
0600049c 1043 return 0;
45d99234
JL
1044}
1045
1046/* We are exiting E->src, see if E->dest ends with a conditional
1047 jump which has a known value when reached via E.
1048
1049 Special care is necessary if E is a back edge in the CFG as we
1050 may have already recorded equivalences for E->dest into our
1051 various tables, including the result of the conditional at
1052 the end of E->dest. Threading opportunities are severely
1053 limited in that case to avoid short-circuiting the loop
1054 incorrectly.
1055
1056 Note it is quite common for the first block inside a loop to
1057 end with a conditional which is either always true or always
1058 false when reached via the loop backedge. Thus we do not want
1059 to blindly disable threading across a loop backedge.
1060
1061 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
1062 to avoid allocating memory.
1063
1064 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
1065 the simplified condition with left-hand sides of ASSERT_EXPRs they are
1066 used in.
1067
8e33db8f
JL
1068 CONST_AND_COPIES is used to undo temporary equivalences created during the
1069 walk of E->dest.
1070
1071 The available expression table is referenced vai AVAIL_EXPRS_STACK.
45d99234
JL
1072
1073 SIMPLIFY is a pass-specific function used to simplify statements. */
1074
1075void
538dd0b7 1076thread_across_edge (gcond *dummy_cond,
45d99234
JL
1077 edge e,
1078 bool handle_dominating_asserts,
8e33db8f
JL
1079 class const_and_copies *const_and_copies,
1080 class avail_exprs_stack *avail_exprs_stack,
355fe088 1081 tree (*simplify) (gimple *, gimple *,
8e33db8f 1082 class avail_exprs_stack *))
45d99234 1083{
b5c4ff78 1084 bitmap visited = BITMAP_ALLOC (NULL);
3eae202f 1085 bool backedge_seen;
b5c4ff78 1086
45d99234
JL
1087 stmt_count = 0;
1088
1089 vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
b5c4ff78
JL
1090 bitmap_clear (visited);
1091 bitmap_set_bit (visited, e->src->index);
1092 bitmap_set_bit (visited, e->dest->index);
3eae202f 1093 backedge_seen = ((e->flags & EDGE_DFS_BACK) != 0);
d4fdb4df
JL
1094 if (backedge_seen)
1095 simplify = dummy_simplify;
1096
0600049c
JL
1097 int threaded = thread_through_normal_block (e, dummy_cond,
1098 handle_dominating_asserts,
8e33db8f
JL
1099 const_and_copies,
1100 avail_exprs_stack,
1101 simplify, path,
64e13bcd 1102 visited, &backedge_seen);
0600049c 1103 if (threaded > 0)
45d99234
JL
1104 {
1105 propagate_threaded_block_debug_into (path->last ()->e->dest,
1106 e->dest);
f6c72af4 1107 const_and_copies->pop_to_marker ();
b5c4ff78 1108 BITMAP_FREE (visited);
45d99234
JL
1109 register_jump_thread (path);
1110 return;
1111 }
1112 else
1113 {
0600049c
JL
1114 /* Negative and zero return values indicate no threading was possible,
1115 thus there should be no edges on the thread path and no need to walk
1116 through the vector entries. */
45d99234
JL
1117 gcc_assert (path->length () == 0);
1118 path->release ();
89bd38d3 1119 delete path;
0600049c
JL
1120
1121 /* A negative status indicates the target block was deemed too big to
1122 duplicate. Just quit now rather than trying to use the block as
1123 a joiner in a jump threading path.
1124
1125 This prevents unnecessary code growth, but more importantly if we
1126 do not look at all the statements in the block, then we may have
1127 missed some invalidations if we had traversed a backedge! */
1128 if (threaded < 0)
1129 {
1130 BITMAP_FREE (visited);
f6c72af4 1131 const_and_copies->pop_to_marker ();
0600049c
JL
1132 return;
1133 }
45d99234 1134 }
2090d6a0 1135
361b51c0
JL
1136 /* We were unable to determine what out edge from E->dest is taken. However,
1137 we might still be able to thread through successors of E->dest. This
1138 often occurs when E->dest is a joiner block which then fans back out
1139 based on redundant tests.
1140
1141 If so, we'll copy E->dest and redirect the appropriate predecessor to
1142 the copy. Within the copy of E->dest, we'll thread one or more edges
1143 to points deeper in the CFG.
1144
1145 This is a stopgap until we have a more structured approach to path
1146 isolation. */
1147 {
770da076 1148 edge taken_edge;
361b51c0 1149 edge_iterator ei;
4f4b0b73 1150 bool found;
361b51c0 1151
b1149e84
JL
1152 /* If E->dest has abnormal outgoing edges, then there's no guarantee
1153 we can safely redirect any of the edges. Just punt those cases. */
1154 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1155 if (taken_edge->flags & EDGE_ABNORMAL)
1156 {
f6c72af4 1157 const_and_copies->pop_to_marker ();
b1149e84
JL
1158 BITMAP_FREE (visited);
1159 return;
1160 }
1161
361b51c0
JL
1162 /* Look at each successor of E->dest to see if we can thread through it. */
1163 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1164 {
a6094705
JL
1165 /* Push a fresh marker so we can unwind the equivalences created
1166 for each of E->dest's successors. */
f6c72af4 1167 const_and_copies->push_marker ();
e8ae63bb
JL
1168 if (avail_exprs_stack)
1169 avail_exprs_stack->push_marker ();
1b2fe7d3 1170
361b51c0
JL
1171 /* Avoid threading to any block we have already visited. */
1172 bitmap_clear (visited);
e44a45c6 1173 bitmap_set_bit (visited, e->src->index);
361b51c0 1174 bitmap_set_bit (visited, e->dest->index);
e44a45c6 1175 bitmap_set_bit (visited, taken_edge->dest->index);
aee2d611 1176 vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
361b51c0
JL
1177
1178 /* Record whether or not we were able to thread through a successor
1179 of E->dest. */
5254eac4 1180 jump_thread_edge *x = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
aee2d611 1181 path->safe_push (x);
5254eac4
JL
1182
1183 x = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_JOINER_BLOCK);
aee2d611 1184 path->safe_push (x);
4f4b0b73 1185 found = false;
3eae202f
JL
1186 backedge_seen = ((e->flags & EDGE_DFS_BACK) != 0);
1187 backedge_seen |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
d4fdb4df
JL
1188 if (backedge_seen)
1189 simplify = dummy_simplify;
1190 found = thread_around_empty_blocks (taken_edge,
1191 dummy_cond,
8e33db8f 1192 avail_exprs_stack,
d4fdb4df
JL
1193 handle_dominating_asserts,
1194 simplify,
1195 visited,
1196 path,
1197 &backedge_seen);
1198
1199 if (backedge_seen)
1200 simplify = dummy_simplify;
1201
1202 if (!found)
5f51d006
JL
1203 found = thread_through_normal_block (path->last ()->e, dummy_cond,
1204 handle_dominating_asserts,
8e33db8f
JL
1205 const_and_copies,
1206 avail_exprs_stack,
1207 simplify, path,
a12cbc57 1208 visited, &backedge_seen) > 0;
5f51d006 1209
361b51c0
JL
1210 /* If we were able to thread through a successor of E->dest, then
1211 record the jump threading opportunity. */
1212 if (found)
1213 {
aee2d611 1214 propagate_threaded_block_debug_into (path->last ()->e->dest,
8d34e421 1215 taken_edge->dest);
5254eac4 1216 register_jump_thread (path);
361b51c0 1217 }
aee2d611
JL
1218 else
1219 {
f0bc3cc0 1220 delete_jump_thread_path (path);
aee2d611 1221 }
a6094705
JL
1222
1223 /* And unwind the equivalence table. */
e8ae63bb
JL
1224 if (avail_exprs_stack)
1225 avail_exprs_stack->pop_to_marker ();
f6c72af4 1226 const_and_copies->pop_to_marker ();
361b51c0
JL
1227 }
1228 BITMAP_FREE (visited);
1229 }
1230
f6c72af4 1231 const_and_copies->pop_to_marker ();
2090d6a0 1232}