]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/tree-ssa-dom.c
Update copyright years.
[thirdparty/gcc.git] / gcc / tree-ssa-dom.c
CommitLineData
6de9cd9a 1/* SSA Dominator optimizations for trees
a5544970 2 Copyright (C) 2001-2019 Free Software Foundation, Inc.
6de9cd9a
DN
3 Contributed by Diego Novillo <dnovillo@redhat.com>
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9dcd6f09 9the Free Software Foundation; either version 3, or (at your option)
6de9cd9a
DN
10any later version.
11
12GCC is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
9dcd6f09
NC
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
6de9cd9a
DN
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
c7131fb2 24#include "backend.h"
6de9cd9a 25#include "tree.h"
c7131fb2 26#include "gimple.h"
957060b5 27#include "tree-pass.h"
c7131fb2 28#include "ssa.h"
957060b5 29#include "gimple-pretty-print.h"
40e23961 30#include "fold-const.h"
60393bbc 31#include "cfganal.h"
60393bbc 32#include "cfgloop.h"
2fb9a547
AM
33#include "gimple-fold.h"
34#include "tree-eh.h"
0db8ddfc 35#include "tree-inline.h"
5be5c238 36#include "gimple-iterator.h"
442b4905 37#include "tree-cfg.h"
442b4905 38#include "tree-into-ssa.h"
6de9cd9a 39#include "domwalk.h"
c7f90219 40#include "tree-ssa-propagate.h"
5254eac4 41#include "tree-ssa-threadupdate.h"
43f31be5 42#include "params.h"
f6c72af4 43#include "tree-ssa-scopedtables.h"
4484a35a
AM
44#include "tree-ssa-threadedge.h"
45#include "tree-ssa-dom.h"
b00734df 46#include "gimplify.h"
2a5671ee 47#include "tree-cfgcleanup.h"
102a9b43 48#include "dbgcnt.h"
d49e06ce
JL
49#include "alloc-pool.h"
50#include "tree-vrp.h"
51#include "vr-values.h"
52#include "gimple-ssa-evrp-analyze.h"
6de9cd9a
DN
53
54/* This file implements optimizations on the dominator tree. */
55
f2a4ca15 56/* Structure for recording edge equivalences.
efea75f9
JL
57
58 Computing and storing the edge equivalences instead of creating
59 them on-demand can save significant amounts of time, particularly
b8698a0f 60 for pathological cases involving switch statements.
efea75f9
JL
61
62 These structures live for a single iteration of the dominator
63 optimizer in the edge's AUX field. At the end of an iteration we
f2a4ca15 64 free each of these structures. */
a09f784a 65class edge_info
efea75f9 66{
a09f784a
JL
67 public:
68 typedef std::pair <tree, tree> equiv_pair;
69 edge_info (edge);
70 ~edge_info ();
71
72 /* Record a simple LHS = RHS equivalence. This may trigger
73 calls to derive_equivalences. */
74 void record_simple_equiv (tree, tree);
75
76 /* If traversing this edge creates simple equivalences, we store
77 them as LHS/RHS pairs within this vector. */
78 vec<equiv_pair> simple_equivalences;
efea75f9
JL
79
80 /* Traversing an edge may also indicate one or more particular conditions
fd4a760e 81 are true or false. */
9771b263 82 vec<cond_equivalence> cond_equivalences;
a09f784a
JL
83
84 private:
85 /* Derive equivalences by walking the use-def chains. */
86 void derive_equivalences (tree, tree, int);
efea75f9
JL
87};
88
6de9cd9a
DN
89/* Track whether or not we have changed the control flow graph. */
90static bool cfg_altered;
91
1eaba2f2 92/* Bitmap of blocks that have had EH statements cleaned. We should
f6fe65dc 93 remove their dead edges eventually. */
1eaba2f2 94static bitmap need_eh_cleanup;
355fe088 95static vec<gimple *> need_noreturn_fixup;
1eaba2f2 96
6de9cd9a
DN
97/* Statistics for dominator optimizations. */
98struct opt_stats_d
99{
100 long num_stmts;
101 long num_exprs_considered;
102 long num_re;
0bca51f0
DN
103 long num_const_prop;
104 long num_copy_prop;
6de9cd9a
DN
105};
106
23530866
JL
107static struct opt_stats_d opt_stats;
108
6de9cd9a 109/* Local functions. */
edfc9249 110static void record_equality (tree, tree, class const_and_copies *);
efea75f9 111static void record_equivalences_from_phis (basic_block);
edfc9249 112static void record_equivalences_from_incoming_edge (basic_block,
8e33db8f
JL
113 class const_and_copies *,
114 class avail_exprs_stack *);
edfc9249 115static void eliminate_redundant_computations (gimple_stmt_iterator *,
8e33db8f
JL
116 class const_and_copies *,
117 class avail_exprs_stack *);
355fe088 118static void record_equivalences_from_stmt (gimple *, int,
8e33db8f 119 class avail_exprs_stack *);
1b2fe7d3
JL
120static void dump_dominator_optimization_stats (FILE *file,
121 hash_table<expr_elt_hasher> *);
122
a09f784a
JL
123/* Constructor for EDGE_INFO. An EDGE_INFO instance is always
124 associated with an edge E. */
6de9cd9a 125
a09f784a 126edge_info::edge_info (edge e)
e8ae63bb 127{
a09f784a
JL
128 /* Free the old one associated with E, if it exists and
129 associate our new object with E. */
130 free_dom_edge_info (e);
131 e->aux = this;
e8ae63bb 132
a09f784a
JL
133 /* And initialize the embedded vectors. */
134 simple_equivalences = vNULL;
135 cond_equivalences = vNULL;
e8ae63bb
JL
136}
137
a09f784a 138/* Destructor just needs to release the vectors. */
14d62813 139
a09f784a
JL
140edge_info::~edge_info (void)
141{
142 this->cond_equivalences.release ();
143 this->simple_equivalences.release ();
144}
145
14d62813
JL
146/* NAME is known to have the value VALUE, which must be a constant.
147
148 Walk through its use-def chain to see if there are other equivalences
149 we might be able to derive.
150
151 RECURSION_LIMIT controls how far back we recurse through the use-def
152 chains. */
153
154void
155edge_info::derive_equivalences (tree name, tree value, int recursion_limit)
156{
157 if (TREE_CODE (name) != SSA_NAME || TREE_CODE (value) != INTEGER_CST)
158 return;
159
160 /* This records the equivalence for the toplevel object. Do
161 this before checking the recursion limit. */
162 simple_equivalences.safe_push (equiv_pair (name, value));
163
164 /* Limit how far up the use-def chains we are willing to walk. */
165 if (recursion_limit == 0)
166 return;
167
168 /* We can walk up the use-def chains to potentially find more
169 equivalences. */
170 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
171 if (is_gimple_assign (def_stmt))
172 {
173 /* We know the result of DEF_STMT was zero. See if that allows
174 us to deduce anything about the SSA_NAMEs used on the RHS. */
175 enum tree_code code = gimple_assign_rhs_code (def_stmt);
176 switch (code)
177 {
178 case BIT_IOR_EXPR:
179 if (integer_zerop (value))
180 {
181 tree rhs1 = gimple_assign_rhs1 (def_stmt);
182 tree rhs2 = gimple_assign_rhs2 (def_stmt);
183
184 value = build_zero_cst (TREE_TYPE (rhs1));
185 derive_equivalences (rhs1, value, recursion_limit - 1);
186 value = build_zero_cst (TREE_TYPE (rhs2));
187 derive_equivalences (rhs2, value, recursion_limit - 1);
188 }
189 break;
190
191 /* We know the result of DEF_STMT was one. See if that allows
192 us to deduce anything about the SSA_NAMEs used on the RHS. */
193 case BIT_AND_EXPR:
194 if (!integer_zerop (value))
195 {
196 tree rhs1 = gimple_assign_rhs1 (def_stmt);
197 tree rhs2 = gimple_assign_rhs2 (def_stmt);
198
199 /* If either operand has a boolean range, then we
200 know its value must be one, otherwise we just know it
201 is nonzero. The former is clearly useful, I haven't
202 seen cases where the latter is helpful yet. */
203 if (TREE_CODE (rhs1) == SSA_NAME)
204 {
205 if (ssa_name_has_boolean_range (rhs1))
206 {
207 value = build_one_cst (TREE_TYPE (rhs1));
208 derive_equivalences (rhs1, value, recursion_limit - 1);
209 }
210 }
211 if (TREE_CODE (rhs2) == SSA_NAME)
212 {
213 if (ssa_name_has_boolean_range (rhs2))
214 {
215 value = build_one_cst (TREE_TYPE (rhs2));
216 derive_equivalences (rhs2, value, recursion_limit - 1);
217 }
218 }
219 }
220 break;
221
222 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
223 set via a widening type conversion, then we may be able to record
224 additional equivalences. */
225 case NOP_EXPR:
226 case CONVERT_EXPR:
227 {
228 tree rhs = gimple_assign_rhs1 (def_stmt);
229 tree rhs_type = TREE_TYPE (rhs);
230 if (INTEGRAL_TYPE_P (rhs_type)
231 && (TYPE_PRECISION (TREE_TYPE (name))
232 >= TYPE_PRECISION (rhs_type))
233 && int_fits_type_p (value, rhs_type))
234 derive_equivalences (rhs,
235 fold_convert (rhs_type, value),
236 recursion_limit - 1);
237 break;
238 }
239
240 /* We can invert the operation of these codes trivially if
241 one of the RHS operands is a constant to produce a known
242 value for the other RHS operand. */
243 case POINTER_PLUS_EXPR:
244 case PLUS_EXPR:
245 {
246 tree rhs1 = gimple_assign_rhs1 (def_stmt);
247 tree rhs2 = gimple_assign_rhs2 (def_stmt);
248
249 /* If either argument is a constant, then we can compute
250 a constant value for the nonconstant argument. */
251 if (TREE_CODE (rhs1) == INTEGER_CST
252 && TREE_CODE (rhs2) == SSA_NAME)
253 derive_equivalences (rhs2,
254 fold_binary (MINUS_EXPR, TREE_TYPE (rhs1),
255 value, rhs1),
256 recursion_limit - 1);
257 else if (TREE_CODE (rhs2) == INTEGER_CST
258 && TREE_CODE (rhs1) == SSA_NAME)
259 derive_equivalences (rhs1,
260 fold_binary (MINUS_EXPR, TREE_TYPE (rhs1),
261 value, rhs2),
262 recursion_limit - 1);
263 break;
264 }
265
266 /* If one of the operands is a constant, then we can compute
267 the value of the other operand. If both operands are
268 SSA_NAMEs, then they must be equal if the result is zero. */
269 case MINUS_EXPR:
270 {
271 tree rhs1 = gimple_assign_rhs1 (def_stmt);
272 tree rhs2 = gimple_assign_rhs2 (def_stmt);
273
274 /* If either argument is a constant, then we can compute
275 a constant value for the nonconstant argument. */
276 if (TREE_CODE (rhs1) == INTEGER_CST
277 && TREE_CODE (rhs2) == SSA_NAME)
278 derive_equivalences (rhs2,
279 fold_binary (MINUS_EXPR, TREE_TYPE (rhs1),
280 rhs1, value),
281 recursion_limit - 1);
282 else if (TREE_CODE (rhs2) == INTEGER_CST
283 && TREE_CODE (rhs1) == SSA_NAME)
284 derive_equivalences (rhs1,
285 fold_binary (PLUS_EXPR, TREE_TYPE (rhs1),
286 value, rhs2),
287 recursion_limit - 1);
288 else if (integer_zerop (value))
289 {
290 tree cond = build2 (EQ_EXPR, boolean_type_node,
291 gimple_assign_rhs1 (def_stmt),
292 gimple_assign_rhs2 (def_stmt));
293 tree inverted = invert_truthvalue (cond);
294 record_conditions (&this->cond_equivalences, cond, inverted);
295 }
296 break;
297 }
298
299
300 case EQ_EXPR:
301 case NE_EXPR:
302 {
303 if ((code == EQ_EXPR && integer_onep (value))
304 || (code == NE_EXPR && integer_zerop (value)))
305 {
306 tree rhs1 = gimple_assign_rhs1 (def_stmt);
307 tree rhs2 = gimple_assign_rhs2 (def_stmt);
308
309 /* If either argument is a constant, then record the
310 other argument as being the same as that constant.
311
312 If neither operand is a constant, then we have a
313 conditional name == name equivalence. */
314 if (TREE_CODE (rhs1) == INTEGER_CST)
315 derive_equivalences (rhs2, rhs1, recursion_limit - 1);
316 else if (TREE_CODE (rhs2) == INTEGER_CST)
317 derive_equivalences (rhs1, rhs2, recursion_limit - 1);
318 }
319 else
320 {
321 tree cond = build2 (code, boolean_type_node,
322 gimple_assign_rhs1 (def_stmt),
323 gimple_assign_rhs2 (def_stmt));
324 tree inverted = invert_truthvalue (cond);
325 if (integer_zerop (value))
326 std::swap (cond, inverted);
327 record_conditions (&this->cond_equivalences, cond, inverted);
328 }
329 break;
330 }
331
332 /* For BIT_NOT and NEGATE, we can just apply the operation to the
333 VALUE to get the new equivalence. It will always be a constant
334 so we can recurse. */
335 case BIT_NOT_EXPR:
336 case NEGATE_EXPR:
337 {
338 tree rhs = gimple_assign_rhs1 (def_stmt);
339 tree res = fold_build1 (code, TREE_TYPE (rhs), value);
340 derive_equivalences (rhs, res, recursion_limit - 1);
341 break;
342 }
343
344 default:
345 {
346 if (TREE_CODE_CLASS (code) == tcc_comparison)
347 {
348 tree cond = build2 (code, boolean_type_node,
349 gimple_assign_rhs1 (def_stmt),
350 gimple_assign_rhs2 (def_stmt));
351 tree inverted = invert_truthvalue (cond);
352 if (integer_zerop (value))
353 std::swap (cond, inverted);
354 record_conditions (&this->cond_equivalences, cond, inverted);
355 break;
356 }
357 break;
358 }
359 }
360 }
361}
efea75f9 362
a09f784a
JL
363void
364edge_info::record_simple_equiv (tree lhs, tree rhs)
efea75f9 365{
14d62813
JL
366 /* If the RHS is a constant, then we may be able to derive
367 further equivalences. Else just record the name = name
368 equivalence. */
369 if (TREE_CODE (rhs) == INTEGER_CST)
370 derive_equivalences (lhs, rhs, 4);
371 else
372 simple_equivalences.safe_push (equiv_pair (lhs, rhs));
a09f784a 373}
efea75f9 374
a09f784a 375/* Free the edge_info data attached to E, if it exists. */
e8ae63bb 376
a09f784a
JL
377void
378free_dom_edge_info (edge e)
379{
380 class edge_info *edge_info = (struct edge_info *)e->aux;
efea75f9 381
a09f784a
JL
382 if (edge_info)
383 delete edge_info;
efea75f9
JL
384}
385
386/* Free all EDGE_INFO structures associated with edges in the CFG.
cbb1cada 387 If a particular edge can be threaded, copy the redirection
efea75f9
JL
388 target from the EDGE_INFO structure into the edge's AUX field
389 as required by code to update the CFG and SSA graph for
390 jump threading. */
391
392static void
393free_all_edge_infos (void)
394{
395 basic_block bb;
396 edge_iterator ei;
397 edge e;
398
11cd3bed 399 FOR_EACH_BB_FN (bb, cfun)
efea75f9
JL
400 {
401 FOR_EACH_EDGE (e, ei, bb->preds)
402 {
af121e82 403 free_dom_edge_info (e);
e8ae63bb 404 e->aux = NULL;
efea75f9
JL
405 }
406 }
407}
408
20e38fcf
JL
409/* We have finished optimizing BB, record any information implied by
410 taking a specific outgoing edge from BB. */
411
412static void
413record_edge_info (basic_block bb)
414{
415 gimple_stmt_iterator gsi = gsi_last_bb (bb);
a09f784a 416 class edge_info *edge_info;
20e38fcf
JL
417
418 if (! gsi_end_p (gsi))
419 {
355fe088 420 gimple *stmt = gsi_stmt (gsi);
20e38fcf
JL
421 location_t loc = gimple_location (stmt);
422
423 if (gimple_code (stmt) == GIMPLE_SWITCH)
424 {
425 gswitch *switch_stmt = as_a <gswitch *> (stmt);
426 tree index = gimple_switch_index (switch_stmt);
427
428 if (TREE_CODE (index) == SSA_NAME)
429 {
430 int i;
431 int n_labels = gimple_switch_num_labels (switch_stmt);
432 tree *info = XCNEWVEC (tree, last_basic_block_for_fn (cfun));
433 edge e;
434 edge_iterator ei;
435
436 for (i = 0; i < n_labels; i++)
437 {
438 tree label = gimple_switch_label (switch_stmt, i);
61ff5d6f
ML
439 basic_block target_bb
440 = label_to_block (cfun, CASE_LABEL (label));
20e38fcf
JL
441 if (CASE_HIGH (label)
442 || !CASE_LOW (label)
443 || info[target_bb->index])
444 info[target_bb->index] = error_mark_node;
445 else
446 info[target_bb->index] = label;
447 }
448
449 FOR_EACH_EDGE (e, ei, bb->succs)
450 {
451 basic_block target_bb = e->dest;
452 tree label = info[target_bb->index];
453
454 if (label != NULL && label != error_mark_node)
455 {
456 tree x = fold_convert_loc (loc, TREE_TYPE (index),
457 CASE_LOW (label));
a09f784a
JL
458 edge_info = new class edge_info (e);
459 edge_info->record_simple_equiv (index, x);
20e38fcf
JL
460 }
461 }
462 free (info);
463 }
464 }
465
466 /* A COND_EXPR may create equivalences too. */
467 if (gimple_code (stmt) == GIMPLE_COND)
468 {
469 edge true_edge;
470 edge false_edge;
471
472 tree op0 = gimple_cond_lhs (stmt);
473 tree op1 = gimple_cond_rhs (stmt);
474 enum tree_code code = gimple_cond_code (stmt);
475
476 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
477
478 /* Special case comparing booleans against a constant as we
479 know the value of OP0 on both arms of the branch. i.e., we
2bedb645
JL
480 can record an equivalence for OP0 rather than COND.
481
482 However, don't do this if the constant isn't zero or one.
483 Such conditionals will get optimized more thoroughly during
484 the domwalk. */
485 if ((code == EQ_EXPR || code == NE_EXPR)
486 && TREE_CODE (op0) == SSA_NAME
947c2ce5 487 && ssa_name_has_boolean_range (op0)
2bedb645
JL
488 && is_gimple_min_invariant (op1)
489 && (integer_zerop (op1) || integer_onep (op1)))
20e38fcf 490 {
54e32f9d
JL
491 tree true_val = constant_boolean_node (true, TREE_TYPE (op0));
492 tree false_val = constant_boolean_node (false, TREE_TYPE (op0));
493
20e38fcf
JL
494 if (code == EQ_EXPR)
495 {
a09f784a
JL
496 edge_info = new class edge_info (true_edge);
497 edge_info->record_simple_equiv (op0,
498 (integer_zerop (op1)
499 ? false_val : true_val));
500 edge_info = new class edge_info (false_edge);
501 edge_info->record_simple_equiv (op0,
502 (integer_zerop (op1)
503 ? true_val : false_val));
20e38fcf
JL
504 }
505 else
506 {
a09f784a
JL
507 edge_info = new class edge_info (true_edge);
508 edge_info->record_simple_equiv (op0,
509 (integer_zerop (op1)
510 ? true_val : false_val));
511 edge_info = new class edge_info (false_edge);
512 edge_info->record_simple_equiv (op0,
513 (integer_zerop (op1)
514 ? false_val : true_val));
20e38fcf
JL
515 }
516 }
a09f784a
JL
517 /* This can show up in the IL as a result of copy propagation
518 it will eventually be canonicalized, but we have to cope
519 with this case within the pass. */
20e38fcf 520 else if (is_gimple_min_invariant (op0)
a09f784a 521 && TREE_CODE (op1) == SSA_NAME)
20e38fcf
JL
522 {
523 tree cond = build2 (code, boolean_type_node, op0, op1);
524 tree inverted = invert_truthvalue_loc (loc, cond);
525 bool can_infer_simple_equiv
526 = !(HONOR_SIGNED_ZEROS (op0)
527 && real_zerop (op0));
528 struct edge_info *edge_info;
529
a09f784a 530 edge_info = new class edge_info (true_edge);
a3d514f2 531 record_conditions (&edge_info->cond_equivalences, cond, inverted);
20e38fcf
JL
532
533 if (can_infer_simple_equiv && code == EQ_EXPR)
a09f784a 534 edge_info->record_simple_equiv (op1, op0);
20e38fcf 535
a09f784a 536 edge_info = new class edge_info (false_edge);
a3d514f2 537 record_conditions (&edge_info->cond_equivalences, inverted, cond);
20e38fcf
JL
538
539 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
a09f784a 540 edge_info->record_simple_equiv (op1, op0);
20e38fcf
JL
541 }
542
543 else if (TREE_CODE (op0) == SSA_NAME
544 && (TREE_CODE (op1) == SSA_NAME
545 || is_gimple_min_invariant (op1)))
546 {
547 tree cond = build2 (code, boolean_type_node, op0, op1);
548 tree inverted = invert_truthvalue_loc (loc, cond);
549 bool can_infer_simple_equiv
550 = !(HONOR_SIGNED_ZEROS (op1)
551 && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
552 struct edge_info *edge_info;
553
a09f784a 554 edge_info = new class edge_info (true_edge);
a3d514f2 555 record_conditions (&edge_info->cond_equivalences, cond, inverted);
20e38fcf
JL
556
557 if (can_infer_simple_equiv && code == EQ_EXPR)
a09f784a 558 edge_info->record_simple_equiv (op0, op1);
20e38fcf 559
a09f784a 560 edge_info = new class edge_info (false_edge);
a3d514f2 561 record_conditions (&edge_info->cond_equivalences, inverted, cond);
20e38fcf
JL
562
563 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
a09f784a 564 edge_info->record_simple_equiv (op0, op1);
20e38fcf
JL
565 }
566 }
20e38fcf
JL
567 }
568}
569
570
4d9192b5
TS
571class dom_opt_dom_walker : public dom_walker
572{
573public:
edfc9249 574 dom_opt_dom_walker (cdi_direction direction,
8e33db8f 575 class const_and_copies *const_and_copies,
aa2a59fc
JL
576 class avail_exprs_stack *avail_exprs_stack,
577 gcond *dummy_cond)
9972bbbc 578 : dom_walker (direction, REACHABLE_BLOCKS),
edfc9249 579 m_const_and_copies (const_and_copies),
8e33db8f 580 m_avail_exprs_stack (avail_exprs_stack),
c844c402 581 evrp_range_analyzer (true),
aa2a59fc 582 m_dummy_cond (dummy_cond) { }
4d9192b5 583
3daacdcd 584 virtual edge before_dom_children (basic_block);
4d9192b5
TS
585 virtual void after_dom_children (basic_block);
586
587private:
4d9192b5 588
edfc9249
JL
589 /* Unwindable equivalences, both const/copy and expression varieties. */
590 class const_and_copies *m_const_and_copies;
8e33db8f 591 class avail_exprs_stack *m_avail_exprs_stack;
edfc9249 592
d49e06ce
JL
593 /* VRP data. */
594 class evrp_range_analyzer evrp_range_analyzer;
595
aa2a59fc 596 /* Dummy condition to avoid creating lots of throw away statements. */
538dd0b7 597 gcond *m_dummy_cond;
aa2a59fc
JL
598
599 /* Optimize a single statement within a basic block using the
600 various tables mantained by DOM. Returns the taken edge if
601 the statement is a conditional with a statically determined
602 value. */
603 edge optimize_stmt (basic_block, gimple_stmt_iterator);
4d9192b5
TS
604};
605
b8698a0f 606/* Jump threading, redundancy elimination and const/copy propagation.
6de9cd9a 607
6de9cd9a
DN
608 This pass may expose new symbols that need to be renamed into SSA. For
609 every new symbol exposed, its corresponding bit will be set in
ff2ad0f7 610 VARS_TO_RENAME. */
6de9cd9a 611
17795822
TS
612namespace {
613
614const pass_data pass_data_dominator =
be55bfe6
TS
615{
616 GIMPLE_PASS, /* type */
617 "dom", /* name */
618 OPTGROUP_NONE, /* optinfo_flags */
be55bfe6
TS
619 TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
620 ( PROP_cfg | PROP_ssa ), /* properties_required */
621 0, /* properties_provided */
622 0, /* properties_destroyed */
623 0, /* todo_flags_start */
3bea341f 624 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
be55bfe6
TS
625};
626
17795822 627class pass_dominator : public gimple_opt_pass
be55bfe6
TS
628{
629public:
630 pass_dominator (gcc::context *ctxt)
5ce8d99a
TV
631 : gimple_opt_pass (pass_data_dominator, ctxt),
632 may_peel_loop_headers_p (false)
be55bfe6
TS
633 {}
634
635 /* opt_pass methods: */
636 opt_pass * clone () { return new pass_dominator (m_ctxt); }
5ce8d99a
TV
637 void set_pass_param (unsigned int n, bool param)
638 {
639 gcc_assert (n == 0);
640 may_peel_loop_headers_p = param;
641 }
be55bfe6
TS
642 virtual bool gate (function *) { return flag_tree_dom != 0; }
643 virtual unsigned int execute (function *);
644
5ce8d99a
TV
645 private:
646 /* This flag is used to prevent loops from being peeled repeatedly in jump
647 threading; it will be removed once we preserve loop structures throughout
648 the compilation -- we will be able to mark the affected loops directly in
649 jump threading, and avoid peeling them next time. */
650 bool may_peel_loop_headers_p;
be55bfe6
TS
651}; // class pass_dominator
652
653unsigned int
654pass_dominator::execute (function *fun)
6de9cd9a 655{
fded8de7
DN
656 memset (&opt_stats, 0, sizeof (opt_stats));
657
6de9cd9a 658 /* Create our hash tables. */
1b2fe7d3
JL
659 hash_table<expr_elt_hasher> *avail_exprs
660 = new hash_table<expr_elt_hasher> (1024);
8e33db8f
JL
661 class avail_exprs_stack *avail_exprs_stack
662 = new class avail_exprs_stack (avail_exprs);
edfc9249 663 class const_and_copies *const_and_copies = new class const_and_copies ();
8bdbfff5 664 need_eh_cleanup = BITMAP_ALLOC (NULL);
2a5671ee 665 need_noreturn_fixup.create (0);
6de9cd9a 666
6de9cd9a 667 calculate_dominance_info (CDI_DOMINATORS);
8d9d6561 668 cfg_altered = false;
6de9cd9a 669
b02b9b53
ZD
670 /* We need to know loop structures in order to avoid destroying them
671 in jump threading. Note that we still can e.g. thread through loop
672 headers to an exit edge, or through loop header to the loop body, assuming
a3afdbb8
BC
673 that we update the loop info.
674
675 TODO: We don't need to set LOOPS_HAVE_PREHEADERS generally, but due
676 to several overly conservative bail-outs in jump threading, case
677 gcc.dg/tree-ssa/pr21417.c can't be threaded if loop preheader is
678 missing. We should improve jump threading in future then
679 LOOPS_HAVE_PREHEADERS won't be needed here. */
680 loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES);
d38ffc55 681
448ee662
RG
682 /* Initialize the value-handle array. */
683 threadedge_initialize_values ();
684
2090d6a0 685 /* We need accurate information regarding back edges in the CFG
fa10beec 686 for jump threading; this may include back edges that are not part of
b02b9b53 687 a single loop. */
2090d6a0 688 mark_dfs_back_edges ();
b8698a0f 689
e8ae63bb
JL
690 /* We want to create the edge info structures before the dominator walk
691 so that they'll be in place for the jump threader, particularly when
692 threading through a join block.
693
694 The conditions will be lazily updated with global equivalences as
695 we reach them during the dominator walk. */
696 basic_block bb;
697 FOR_EACH_BB_FN (bb, fun)
698 record_edge_info (bb);
699
aa2a59fc
JL
700 gcond *dummy_cond = gimple_build_cond (NE_EXPR, integer_zero_node,
701 integer_zero_node, NULL, NULL);
702
2090d6a0 703 /* Recursively walk the dominator tree optimizing statements. */
aa2a59fc
JL
704 dom_opt_dom_walker walker (CDI_DOMINATORS, const_and_copies,
705 avail_exprs_stack, dummy_cond);
edfc9249 706 walker.walk (fun->cfg->x_entry_block_ptr);
6de9cd9a 707
3daacdcd
JL
708 /* Look for blocks where we cleared EDGE_EXECUTABLE on an outgoing
709 edge. When found, remove jump threads which contain any outgoing
710 edge from the affected block. */
711 if (cfg_altered)
712 {
713 FOR_EACH_BB_FN (bb, fun)
714 {
715 edge_iterator ei;
716 edge e;
717
718 /* First see if there are any edges without EDGE_EXECUTABLE
719 set. */
720 bool found = false;
721 FOR_EACH_EDGE (e, ei, bb->succs)
722 {
723 if ((e->flags & EDGE_EXECUTABLE) == 0)
724 {
725 found = true;
726 break;
727 }
728 }
729
730 /* If there were any such edges found, then remove jump threads
731 containing any edge leaving BB. */
732 if (found)
733 FOR_EACH_EDGE (e, ei, bb->succs)
734 remove_jump_threads_including (e);
735 }
736 }
737
2090d6a0 738 {
726a989a 739 gimple_stmt_iterator gsi;
2090d6a0 740 basic_block bb;
be55bfe6 741 FOR_EACH_BB_FN (bb, fun)
a881aaa7
RG
742 {
743 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
726a989a 744 update_stmt_if_modified (gsi_stmt (gsi));
f430bae8 745 }
2090d6a0 746 }
a3b609df 747
2090d6a0
JL
748 /* If we exposed any new variables, go ahead and put them into
749 SSA form now, before we handle jump threading. This simplifies
750 interactions between rewriting of _DECL nodes into SSA form
751 and rewriting SSA_NAME nodes into SSA form after block
752 duplication and CFG manipulation. */
753 update_ssa (TODO_update_ssa);
d38ffc55 754
2090d6a0 755 free_all_edge_infos ();
d38ffc55 756
2090d6a0 757 /* Thread jumps, creating duplicate blocks as needed. */
5ce8d99a 758 cfg_altered |= thread_through_all_blocks (may_peel_loop_headers_p);
6de9cd9a 759
8d9d6561
EB
760 if (cfg_altered)
761 free_dominance_info (CDI_DOMINATORS);
762
2090d6a0
JL
763 /* Removal of statements may make some EH edges dead. Purge
764 such edges from the CFG as needed. */
765 if (!bitmap_empty_p (need_eh_cleanup))
766 {
45a7844f
EB
767 unsigned i;
768 bitmap_iterator bi;
769
770 /* Jump threading may have created forwarder blocks from blocks
771 needing EH cleanup; the new successor of these blocks, which
1ce296cf
JJ
772 has inherited from the original block, needs the cleanup.
773 Don't clear bits in the bitmap, as that can break the bitmap
774 iterator. */
45a7844f
EB
775 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
776 {
be55bfe6 777 basic_block bb = BASIC_BLOCK_FOR_FN (fun, i);
1ce296cf
JJ
778 if (bb == NULL)
779 continue;
780 while (single_succ_p (bb)
c1d21cd1
RB
781 && (single_succ_edge (bb)->flags
782 & (EDGE_EH|EDGE_DFS_BACK)) == 0)
1ce296cf 783 bb = single_succ (bb);
be55bfe6 784 if (bb == EXIT_BLOCK_PTR_FOR_FN (fun))
1ce296cf
JJ
785 continue;
786 if ((unsigned) bb->index != i)
787 bitmap_set_bit (need_eh_cleanup, bb->index);
45a7844f
EB
788 }
789
726a989a 790 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
f61e445a 791 bitmap_clear (need_eh_cleanup);
2090d6a0 792 }
6de9cd9a 793
2a5671ee
RB
794 /* Fixup stmts that became noreturn calls. This may require splitting
795 blocks and thus isn't possible during the dominator walk or before
796 jump threading finished. Do this in reverse order so we don't
797 inadvertedly remove a stmt we want to fixup by visiting a dominating
798 now noreturn call first. */
799 while (!need_noreturn_fixup.is_empty ())
800 {
355fe088 801 gimple *stmt = need_noreturn_fixup.pop ();
2a5671ee
RB
802 if (dump_file && dump_flags & TDF_DETAILS)
803 {
804 fprintf (dump_file, "Fixing up noreturn call ");
ef6cb4c7 805 print_gimple_stmt (dump_file, stmt, 0);
2a5671ee
RB
806 fprintf (dump_file, "\n");
807 }
808 fixup_noreturn_call (stmt);
809 }
810
be55bfe6 811 statistics_counter_event (fun, "Redundant expressions eliminated",
01902653 812 opt_stats.num_re);
be55bfe6 813 statistics_counter_event (fun, "Constants propagated",
01902653 814 opt_stats.num_const_prop);
be55bfe6 815 statistics_counter_event (fun, "Copies propagated",
01902653
RG
816 opt_stats.num_copy_prop);
817
6de9cd9a
DN
818 /* Debugging dumps. */
819 if (dump_file && (dump_flags & TDF_STATS))
1b2fe7d3 820 dump_dominator_optimization_stats (dump_file, avail_exprs);
6de9cd9a 821
b02b9b53
ZD
822 loop_optimizer_finalize ();
823
2090d6a0 824 /* Delete our main hashtable. */
c203e8a7
TS
825 delete avail_exprs;
826 avail_exprs = NULL;
6de9cd9a 827
b16caf72 828 /* Free asserted bitmaps and stacks. */
8bdbfff5 829 BITMAP_FREE (need_eh_cleanup);
2a5671ee 830 need_noreturn_fixup.release ();
10e0393c 831 delete avail_exprs_stack;
f6c72af4 832 delete const_and_copies;
b8698a0f 833
448ee662
RG
834 /* Free the value-handle array. */
835 threadedge_finalize_values ();
448ee662 836
c2924966 837 return 0;
6de9cd9a
DN
838}
839
17795822
TS
840} // anon namespace
841
27a4cd48
DM
842gimple_opt_pass *
843make_pass_dominator (gcc::context *ctxt)
844{
845 return new pass_dominator (ctxt);
846}
847
d49e06ce
JL
848/* A hack until we remove threading from tree-vrp.c and bring the
849 simplification routine into the dom_opt_dom_walker class. */
850static class vr_values *x_vr_values;
6de9cd9a 851
2090d6a0
JL
852/* A trivial wrapper so that we can present the generic jump
853 threading code with a simple API for simplifying statements. */
854static tree
355fe088
TS
855simplify_stmt_for_jump_threading (gimple *stmt,
856 gimple *within_stmt ATTRIBUTE_UNUSED,
8d7437be
JL
857 class avail_exprs_stack *avail_exprs_stack,
858 basic_block bb ATTRIBUTE_UNUSED)
2090d6a0 859{
d49e06ce
JL
860 /* First query our hash table to see if the the expression is available
861 there. A non-NULL return value will be either a constant or another
862 SSA_NAME. */
863 tree cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, false, true);
864 if (cached_lhs)
865 return cached_lhs;
866
867 /* If the hash table query failed, query VRP information. This is
868 essentially the same as tree-vrp's simplification routine. The
869 copy in tree-vrp is scheduled for removal in gcc-9. */
870 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
871 {
872 cached_lhs
873 = x_vr_values->vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
874 gimple_cond_lhs (cond_stmt),
875 gimple_cond_rhs (cond_stmt),
876 within_stmt);
877 return cached_lhs;
878 }
879
880 if (gswitch *switch_stmt = dyn_cast <gswitch *> (stmt))
881 {
882 tree op = gimple_switch_index (switch_stmt);
883 if (TREE_CODE (op) != SSA_NAME)
884 return NULL_TREE;
885
886 value_range *vr = x_vr_values->get_value_range (op);
54994253
AH
887 if (vr->undefined_p ()
888 || vr->varying_p ()
889 || vr->symbolic_p ())
d49e06ce
JL
890 return NULL_TREE;
891
54994253 892 if (vr->kind () == VR_RANGE)
d49e06ce
JL
893 {
894 size_t i, j;
895
54994253 896 find_case_label_range (switch_stmt, vr->min (), vr->max (), &i, &j);
d49e06ce
JL
897
898 if (i == j)
899 {
900 tree label = gimple_switch_label (switch_stmt, i);
54994253 901 tree singleton;
d49e06ce
JL
902
903 if (CASE_HIGH (label) != NULL_TREE
54994253
AH
904 ? (tree_int_cst_compare (CASE_LOW (label), vr->min ()) <= 0
905 && tree_int_cst_compare (CASE_HIGH (label), vr->max ()) >= 0)
906 : (vr->singleton_p (&singleton)
907 && tree_int_cst_equal (CASE_LOW (label), singleton)))
d49e06ce
JL
908 return label;
909
910 if (i > j)
911 return gimple_switch_label (switch_stmt, 0);
912 }
913 }
914
54994253 915 if (vr->kind () == VR_ANTI_RANGE)
d49e06ce
JL
916 {
917 unsigned n = gimple_switch_num_labels (switch_stmt);
918 tree min_label = gimple_switch_label (switch_stmt, 1);
919 tree max_label = gimple_switch_label (switch_stmt, n - 1);
920
921 /* The default label will be taken only if the anti-range of the
922 operand is entirely outside the bounds of all the (non-default)
923 case labels. */
54994253 924 if (tree_int_cst_compare (vr->min (), CASE_LOW (min_label)) <= 0
d49e06ce 925 && (CASE_HIGH (max_label) != NULL_TREE
54994253
AH
926 ? tree_int_cst_compare (vr->max (), CASE_HIGH (max_label)) >= 0
927 : tree_int_cst_compare (vr->max (), CASE_LOW (max_label)) >= 0))
d49e06ce
JL
928 return gimple_switch_label (switch_stmt, 0);
929 }
930 return NULL_TREE;
931 }
932
933 if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
934 {
935 tree lhs = gimple_assign_lhs (assign_stmt);
936 if (TREE_CODE (lhs) == SSA_NAME
937 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
938 || POINTER_TYPE_P (TREE_TYPE (lhs)))
939 && stmt_interesting_for_vrp (stmt))
940 {
941 edge dummy_e;
942 tree dummy_tree;
54994253 943 value_range new_vr;
d49e06ce
JL
944 x_vr_values->extract_range_from_stmt (stmt, &dummy_e,
945 &dummy_tree, &new_vr);
54994253
AH
946 tree singleton;
947 if (new_vr.singleton_p (&singleton))
948 return singleton;
d49e06ce
JL
949 }
950 }
951 return NULL;
2090d6a0
JL
952}
953
612b9d13
RB
954/* Valueize hook for gimple_fold_stmt_to_constant_1. */
955
956static tree
957dom_valueize (tree t)
958{
959 if (TREE_CODE (t) == SSA_NAME)
960 {
961 tree tem = SSA_NAME_VALUE (t);
962 if (tem)
963 return tem;
964 }
965 return t;
966}
967
44b6ab2b
JL
968/* We have just found an equivalence for LHS on an edge E.
969 Look backwards to other uses of LHS and see if we can derive
970 additional equivalences that are valid on edge E. */
971static void
972back_propagate_equivalences (tree lhs, edge e,
973 class const_and_copies *const_and_copies)
974{
975 use_operand_p use_p;
976 imm_use_iterator iter;
977 bitmap domby = NULL;
978 basic_block dest = e->dest;
979
980 /* Iterate over the uses of LHS to see if any dominate E->dest.
981 If so, they may create useful equivalences too.
982
983 ??? If the code gets re-organized to a worklist to catch more
984 indirect opportunities and it is made to handle PHIs then this
985 should only consider use_stmts in basic-blocks we have already visited. */
986 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
987 {
988 gimple *use_stmt = USE_STMT (use_p);
989
990 /* Often the use is in DEST, which we trivially know we can't use.
991 This is cheaper than the dominator set tests below. */
992 if (dest == gimple_bb (use_stmt))
993 continue;
994
995 /* Filter out statements that can never produce a useful
996 equivalence. */
997 tree lhs2 = gimple_get_lhs (use_stmt);
998 if (!lhs2 || TREE_CODE (lhs2) != SSA_NAME)
999 continue;
1000
1001 /* Profiling has shown the domination tests here can be fairly
1002 expensive. We get significant improvements by building the
1003 set of blocks that dominate BB. We can then just test
1004 for set membership below.
1005
1006 We also initialize the set lazily since often the only uses
1007 are going to be in the same block as DEST. */
1008 if (!domby)
1009 {
1010 domby = BITMAP_ALLOC (NULL);
1011 basic_block bb = get_immediate_dominator (CDI_DOMINATORS, dest);
1012 while (bb)
1013 {
1014 bitmap_set_bit (domby, bb->index);
1015 bb = get_immediate_dominator (CDI_DOMINATORS, bb);
1016 }
1017 }
1018
1019 /* This tests if USE_STMT does not dominate DEST. */
1020 if (!bitmap_bit_p (domby, gimple_bb (use_stmt)->index))
1021 continue;
1022
1023 /* At this point USE_STMT dominates DEST and may result in a
1024 useful equivalence. Try to simplify its RHS to a constant
1025 or SSA_NAME. */
1026 tree res = gimple_fold_stmt_to_constant_1 (use_stmt, dom_valueize,
1027 no_follow_ssa_edges);
1028 if (res && (TREE_CODE (res) == SSA_NAME || is_gimple_min_invariant (res)))
1029 record_equality (lhs2, res, const_and_copies);
1030 }
1031
1032 if (domby)
1033 BITMAP_FREE (domby);
1034}
1035
8e33db8f
JL
1036/* Record into CONST_AND_COPIES and AVAIL_EXPRS_STACK any equivalences implied
1037 by traversing edge E (which are cached in E->aux).
925f3871
JL
1038
1039 Callers are responsible for managing the unwinding markers. */
7c3e7056 1040void
edfc9249 1041record_temporary_equivalences (edge e,
8e33db8f
JL
1042 class const_and_copies *const_and_copies,
1043 class avail_exprs_stack *avail_exprs_stack)
83ae86f5
JL
1044{
1045 int i;
a09f784a 1046 class edge_info *edge_info = (class edge_info *) e->aux;
83ae86f5
JL
1047
1048 /* If we have info associated with this edge, record it into
1049 our equivalence tables. */
1050 if (edge_info)
1051 {
1052 cond_equivalence *eq;
44b6ab2b
JL
1053 /* If we have 0 = COND or 1 = COND equivalences, record them
1054 into our expression hash tables. */
1055 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
14d62813 1056 avail_exprs_stack->record_cond (eq);
44b6ab2b 1057
a09f784a
JL
1058 edge_info::equiv_pair *seq;
1059 for (i = 0; edge_info->simple_equivalences.iterate (i, &seq); ++i)
1060 {
1061 tree lhs = seq->first;
1062 if (!lhs || TREE_CODE (lhs) != SSA_NAME)
1063 continue;
83ae86f5 1064
a09f784a
JL
1065 /* Record the simple NAME = VALUE equivalence. */
1066 tree rhs = seq->second;
c9080ba2 1067
a09f784a
JL
1068 /* If this is a SSA_NAME = SSA_NAME equivalence and one operand is
1069 cheaper to compute than the other, then set up the equivalence
1070 such that we replace the expensive one with the cheap one.
0b604d2d 1071
a09f784a
JL
1072 If they are the same cost to compute, then do not record
1073 anything. */
1074 if (TREE_CODE (lhs) == SSA_NAME && TREE_CODE (rhs) == SSA_NAME)
1075 {
1076 gimple *rhs_def = SSA_NAME_DEF_STMT (rhs);
1077 int rhs_cost = estimate_num_insns (rhs_def, &eni_size_weights);
0db8ddfc 1078
a09f784a
JL
1079 gimple *lhs_def = SSA_NAME_DEF_STMT (lhs);
1080 int lhs_cost = estimate_num_insns (lhs_def, &eni_size_weights);
0db8ddfc 1081
a09f784a 1082 if (rhs_cost > lhs_cost)
14d62813 1083 record_equality (rhs, lhs, const_and_copies);
a09f784a 1084 else if (rhs_cost < lhs_cost)
14d62813 1085 record_equality (lhs, rhs, const_and_copies);
a09f784a
JL
1086 }
1087 else
0db8ddfc 1088 record_equality (lhs, rhs, const_and_copies);
0b604d2d 1089
83ae86f5 1090
a09f784a
JL
1091 /* Any equivalence found for LHS may result in additional
1092 equivalences for other uses of LHS that we have already
1093 processed. */
1094 back_propagate_equivalences (lhs, e, const_and_copies);
1095 }
83ae86f5
JL
1096 }
1097}
1098
6de9cd9a
DN
1099/* PHI nodes can create equivalences too.
1100
1101 Ignoring any alternatives which are the same as the result, if
1102 all the alternatives are equal, then the PHI node creates an
b16caf72 1103 equivalence. */
dd747311 1104
6de9cd9a 1105static void
efea75f9 1106record_equivalences_from_phis (basic_block bb)
6de9cd9a 1107{
538dd0b7 1108 gphi_iterator gsi;
b8698a0f 1109
0f9657f3 1110 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); )
6de9cd9a 1111 {
538dd0b7 1112 gphi *phi = gsi.phi ();
726a989a 1113
0f9657f3
JL
1114 /* We might eliminate the PHI, so advance GSI now. */
1115 gsi_next (&gsi);
1116
726a989a 1117 tree lhs = gimple_phi_result (phi);
6de9cd9a 1118 tree rhs = NULL;
726a989a 1119 size_t i;
6de9cd9a 1120
726a989a 1121 for (i = 0; i < gimple_phi_num_args (phi); i++)
6de9cd9a 1122 {
726a989a 1123 tree t = gimple_phi_arg_def (phi, i);
6de9cd9a 1124
6e38fea3
KH
1125 /* Ignore alternatives which are the same as our LHS. Since
1126 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1127 can simply compare pointers. */
073b8140 1128 if (lhs == t)
a18428f3
KH
1129 continue;
1130
129b5668
JL
1131 /* If the associated edge is not marked as executable, then it
1132 can be ignored. */
3daacdcd 1133 if ((gimple_phi_arg_edge (phi, i)->flags & EDGE_EXECUTABLE) == 0)
129b5668 1134 continue;
3daacdcd 1135
0b49d67b 1136 t = dom_valueize (t);
05b7b5a4 1137
6e02c507
JL
1138 /* If T is an SSA_NAME and its associated edge is a backedge,
1139 then quit as we can not utilize this equivalence. */
1140 if (TREE_CODE (t) == SSA_NAME
1141 && (gimple_phi_arg_edge (phi, i)->flags & EDGE_DFS_BACK))
1142 break;
1143
a18428f3
KH
1144 /* If we have not processed an alternative yet, then set
1145 RHS to this alternative. */
1146 if (rhs == NULL)
1147 rhs = t;
1148 /* If we have processed an alternative (stored in RHS), then
1149 see if it is equal to this one. If it isn't, then stop
1150 the search. */
1151 else if (! operand_equal_for_phi_arg_p (rhs, t))
6de9cd9a
DN
1152 break;
1153 }
1154
1155 /* If we had no interesting alternatives, then all the RHS alternatives
1156 must have been the same as LHS. */
1157 if (!rhs)
1158 rhs = lhs;
1159
1160 /* If we managed to iterate through each PHI alternative without
1161 breaking out of the loop, then we have a PHI which may create
1162 a useful equivalence. We do not need to record unwind data for
1163 this, since this is a true assignment and not an equivalence
1ea7e6ad 1164 inferred from a comparison. All uses of this ssa name are dominated
129b5668 1165 by this assignment, so unwinding just costs time and space. */
0f9657f3
JL
1166 if (i == gimple_phi_num_args (phi))
1167 {
1168 if (may_propagate_copy (lhs, rhs))
1169 set_ssa_name_value (lhs, rhs);
1170 else if (virtual_operand_p (lhs))
1171 {
1172 gimple *use_stmt;
1173 imm_use_iterator iter;
1174 use_operand_p use_p;
1175 /* For virtual operands we have to propagate into all uses as
1176 otherwise we will create overlapping life-ranges. */
1177 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
1178 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
1179 SET_USE (use_p, rhs);
1180 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
1181 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
1182 gimple_stmt_iterator tmp_gsi = gsi_for_stmt (phi);
1183 remove_phi_node (&tmp_gsi, true);
1184 }
1185 }
6de9cd9a
DN
1186 }
1187}
1188
8e33db8f
JL
1189/* Record any equivalences created by the incoming edge to BB into
1190 CONST_AND_COPIES and AVAIL_EXPRS_STACK. If BB has more than one
1191 incoming edge, then no equivalence is created. */
6de9cd9a
DN
1192
1193static void
edfc9249 1194record_equivalences_from_incoming_edge (basic_block bb,
8e33db8f
JL
1195 class const_and_copies *const_and_copies,
1196 class avail_exprs_stack *avail_exprs_stack)
6de9cd9a 1197{
efea75f9 1198 edge e;
6de9cd9a 1199 basic_block parent;
6de9cd9a 1200
35fd3193 1201 /* If our parent block ended with a control statement, then we may be
6de9cd9a
DN
1202 able to record some equivalences based on which outgoing edge from
1203 the parent was followed. */
1204 parent = get_immediate_dominator (CDI_DOMINATORS, bb);
6de9cd9a 1205
2965f127 1206 e = single_pred_edge_ignoring_loop_edges (bb, true);
6de9cd9a 1207
efea75f9
JL
1208 /* If we had a single incoming edge from our parent block, then enter
1209 any data associated with the edge into our tables. */
1210 if (e && e->src == parent)
8e33db8f 1211 record_temporary_equivalences (e, const_and_copies, avail_exprs_stack);
6de9cd9a
DN
1212}
1213
1b2fe7d3
JL
1214/* Dump statistics for the hash table HTAB. */
1215
1216static void
1217htab_statistics (FILE *file, const hash_table<expr_elt_hasher> &htab)
1218{
1219 fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1220 (long) htab.size (),
1221 (long) htab.elements (),
1222 htab.collisions ());
1223}
1224
6de9cd9a
DN
1225/* Dump SSA statistics on FILE. */
1226
1b2fe7d3
JL
1227static void
1228dump_dominator_optimization_stats (FILE *file,
1229 hash_table<expr_elt_hasher> *avail_exprs)
6de9cd9a 1230{
6de9cd9a
DN
1231 fprintf (file, "Total number of statements: %6ld\n\n",
1232 opt_stats.num_stmts);
1233 fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1234 opt_stats.num_exprs_considered);
1235
6de9cd9a
DN
1236 fprintf (file, "\nHash table statistics:\n");
1237
1238 fprintf (file, " avail_exprs: ");
c203e8a7 1239 htab_statistics (file, *avail_exprs);
6de9cd9a
DN
1240}
1241
1242
6de9cd9a
DN
1243/* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1244 This constrains the cases in which we may treat this as assignment. */
1245
1246static void
edfc9249 1247record_equality (tree x, tree y, class const_and_copies *const_and_copies)
6de9cd9a
DN
1248{
1249 tree prev_x = NULL, prev_y = NULL;
1250
14e72812 1251 if (tree_swap_operands_p (x, y))
05b7b5a4
RB
1252 std::swap (x, y);
1253
11da52a9
JL
1254 /* Most of the time tree_swap_operands_p does what we want. But there
1255 are cases where we know one operand is better for copy propagation than
009b7fc1
JL
1256 the other. Given no other code cares about ordering of equality
1257 comparison operators for that purpose, we just handle the special cases
1258 here. */
1259 if (TREE_CODE (x) == SSA_NAME && TREE_CODE (y) == SSA_NAME)
1260 {
1261 /* If one operand is a single use operand, then make it
1262 X. This will preserve its single use properly and if this
1263 conditional is eliminated, the computation of X can be
1264 eliminated as well. */
1265 if (has_single_use (y) && ! has_single_use (x))
1266 std::swap (x, y);
1267 }
6de9cd9a 1268 if (TREE_CODE (x) == SSA_NAME)
3aecd08b 1269 prev_x = SSA_NAME_VALUE (x);
6de9cd9a 1270 if (TREE_CODE (y) == SSA_NAME)
3aecd08b 1271 prev_y = SSA_NAME_VALUE (y);
6de9cd9a 1272
84dd478f
DB
1273 /* If one of the previous values is invariant, or invariant in more loops
1274 (by depth), then use that.
6de9cd9a
DN
1275 Otherwise it doesn't matter which value we choose, just so
1276 long as we canonicalize on one value. */
ad6003f2 1277 if (is_gimple_min_invariant (y))
6de9cd9a 1278 ;
0b604d2d 1279 else if (is_gimple_min_invariant (x))
6de9cd9a 1280 prev_x = x, x = y, y = prev_x, prev_x = prev_y;
ad6003f2 1281 else if (prev_x && is_gimple_min_invariant (prev_x))
6de9cd9a 1282 x = y, y = prev_x, prev_x = prev_y;
c9145754 1283 else if (prev_y)
6de9cd9a
DN
1284 y = prev_y;
1285
1286 /* After the swapping, we must have one SSA_NAME. */
1287 if (TREE_CODE (x) != SSA_NAME)
1288 return;
1289
1290 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1291 variable compared against zero. If we're honoring signed zeros,
1292 then we cannot record this value unless we know that the value is
1ea7e6ad 1293 nonzero. */
3d3dbadd 1294 if (HONOR_SIGNED_ZEROS (x)
6de9cd9a 1295 && (TREE_CODE (y) != REAL_CST
624d31fe 1296 || real_equal (&dconst0, &TREE_REAL_CST (y))))
6de9cd9a
DN
1297 return;
1298
f6c72af4 1299 const_and_copies->record_const_or_copy (x, y, prev_x);
6de9cd9a
DN
1300}
1301
f67e783f
ZD
1302/* Returns true when STMT is a simple iv increment. It detects the
1303 following situation:
b8698a0f 1304
cacb4a79
AO
1305 i_1 = phi (..., i_k)
1306 [...]
1307 i_j = i_{j-1} for each j : 2 <= j <= k-1
1308 [...]
1309 i_k = i_{k-1} +/- ... */
f67e783f 1310
601f64e2 1311bool
355fe088 1312simple_iv_increment_p (gimple *stmt)
f67e783f 1313{
601f64e2 1314 enum tree_code code;
726a989a 1315 tree lhs, preinc;
355fe088 1316 gimple *phi;
726a989a 1317 size_t i;
f67e783f 1318
726a989a 1319 if (gimple_code (stmt) != GIMPLE_ASSIGN)
f67e783f
ZD
1320 return false;
1321
726a989a 1322 lhs = gimple_assign_lhs (stmt);
f67e783f
ZD
1323 if (TREE_CODE (lhs) != SSA_NAME)
1324 return false;
1325
601f64e2
RG
1326 code = gimple_assign_rhs_code (stmt);
1327 if (code != PLUS_EXPR
1328 && code != MINUS_EXPR
1329 && code != POINTER_PLUS_EXPR)
f67e783f
ZD
1330 return false;
1331
726a989a 1332 preinc = gimple_assign_rhs1 (stmt);
f67e783f
ZD
1333 if (TREE_CODE (preinc) != SSA_NAME)
1334 return false;
1335
1336 phi = SSA_NAME_DEF_STMT (preinc);
cacb4a79
AO
1337 while (gimple_code (phi) != GIMPLE_PHI)
1338 {
1339 /* Follow trivial copies, but not the DEF used in a back edge,
1340 so that we don't prevent coalescing. */
1341 if (!gimple_assign_ssa_name_copy_p (phi))
1342 return false;
1343 preinc = gimple_assign_rhs1 (phi);
1344 phi = SSA_NAME_DEF_STMT (preinc);
1345 }
f67e783f 1346
726a989a
RB
1347 for (i = 0; i < gimple_phi_num_args (phi); i++)
1348 if (gimple_phi_arg_def (phi, i) == lhs)
f67e783f
ZD
1349 return true;
1350
1351 return false;
1352}
1353
edfc9249 1354/* Propagate know values from SSA_NAME_VALUE into the PHI nodes of the
b16caf72 1355 successors of BB. */
ff2ad0f7
DN
1356
1357static void
edfc9249
JL
1358cprop_into_successor_phis (basic_block bb,
1359 class const_and_copies *const_and_copies)
ff2ad0f7
DN
1360{
1361 edge e;
628f6a4e 1362 edge_iterator ei;
ff2ad0f7 1363
628f6a4e 1364 FOR_EACH_EDGE (e, ei, bb->succs)
ff2ad0f7 1365 {
0492baf2 1366 int indx;
538dd0b7 1367 gphi_iterator gsi;
ff2ad0f7
DN
1368
1369 /* If this is an abnormal edge, then we do not want to copy propagate
1370 into the PHI alternative associated with this edge. */
1371 if (e->flags & EDGE_ABNORMAL)
1372 continue;
1373
726a989a
RB
1374 gsi = gsi_start_phis (e->dest);
1375 if (gsi_end_p (gsi))
ff2ad0f7
DN
1376 continue;
1377
8d34e421
JL
1378 /* We may have an equivalence associated with this edge. While
1379 we can not propagate it into non-dominated blocks, we can
1380 propagate them into PHIs in non-dominated blocks. */
1381
1382 /* Push the unwind marker so we can reset the const and copies
1383 table back to its original state after processing this edge. */
f6c72af4 1384 const_and_copies->push_marker ();
8d34e421
JL
1385
1386 /* Extract and record any simple NAME = VALUE equivalences.
1387
1388 Don't bother with [01] = COND equivalences, they're not useful
1389 here. */
a09f784a
JL
1390 class edge_info *edge_info = (class edge_info *) e->aux;
1391
8d34e421
JL
1392 if (edge_info)
1393 {
a09f784a
JL
1394 edge_info::equiv_pair *seq;
1395 for (int i = 0; edge_info->simple_equivalences.iterate (i, &seq); ++i)
1396 {
1397 tree lhs = seq->first;
1398 tree rhs = seq->second;
1399
1400 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1401 const_and_copies->record_const_or_copy (lhs, rhs);
1402 }
8d34e421 1403
8d34e421
JL
1404 }
1405
0492baf2 1406 indx = e->dest_idx;
726a989a 1407 for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
ff2ad0f7 1408 {
c22940cd 1409 tree new_val;
ff2ad0f7 1410 use_operand_p orig_p;
c22940cd 1411 tree orig_val;
538dd0b7 1412 gphi *phi = gsi.phi ();
ff2ad0f7 1413
ff2ad0f7
DN
1414 /* The alternative may be associated with a constant, so verify
1415 it is an SSA_NAME before doing anything with it. */
726a989a
RB
1416 orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1417 orig_val = get_use_from_ptr (orig_p);
c22940cd 1418 if (TREE_CODE (orig_val) != SSA_NAME)
ff2ad0f7
DN
1419 continue;
1420
ff2ad0f7
DN
1421 /* If we have *ORIG_P in our constant/copy table, then replace
1422 ORIG_P with its value in our constant/copy table. */
c22940cd
TN
1423 new_val = SSA_NAME_VALUE (orig_val);
1424 if (new_val
1425 && new_val != orig_val
c22940cd
TN
1426 && may_propagate_copy (orig_val, new_val))
1427 propagate_value (orig_p, new_val);
ff2ad0f7 1428 }
8d34e421 1429
f6c72af4 1430 const_and_copies->pop_to_marker ();
ff2ad0f7
DN
1431 }
1432}
1433
3daacdcd 1434edge
4d9192b5 1435dom_opt_dom_walker::before_dom_children (basic_block bb)
6de9cd9a 1436{
ccf5c864
PB
1437 gimple_stmt_iterator gsi;
1438
1439 if (dump_file && (dump_flags & TDF_DETAILS))
1440 fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1441
d49e06ce
JL
1442 evrp_range_analyzer.enter (bb);
1443
ccf5c864
PB
1444 /* Push a marker on the stacks of local information so that we know how
1445 far to unwind when we finalize this block. */
8e33db8f 1446 m_avail_exprs_stack->push_marker ();
edfc9249 1447 m_const_and_copies->push_marker ();
ccf5c864 1448
8e33db8f
JL
1449 record_equivalences_from_incoming_edge (bb, m_const_and_copies,
1450 m_avail_exprs_stack);
ccf5c864
PB
1451
1452 /* PHI nodes can create equivalences too. */
1453 record_equivalences_from_phis (bb);
1454
13a3e5b6
BS
1455 /* Create equivalences from redundant PHIs. PHIs are only truly
1456 redundant when they exist in the same block, so push another
1457 marker and unwind right afterwards. */
8e33db8f 1458 m_avail_exprs_stack->push_marker ();
13a3e5b6 1459 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
8e33db8f
JL
1460 eliminate_redundant_computations (&gsi, m_const_and_copies,
1461 m_avail_exprs_stack);
1462 m_avail_exprs_stack->pop_to_marker ();
13a3e5b6 1463
3daacdcd 1464 edge taken_edge = NULL;
ccf5c864 1465 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
d49e06ce 1466 {
df80fc53 1467 evrp_range_analyzer.record_ranges_from_stmt (gsi_stmt (gsi), false);
d49e06ce
JL
1468 taken_edge = this->optimize_stmt (bb, gsi);
1469 }
ccf5c864
PB
1470
1471 /* Now prepare to process dominated blocks. */
efea75f9 1472 record_edge_info (bb);
edfc9249 1473 cprop_into_successor_phis (bb, m_const_and_copies);
102a9b43
JL
1474 if (taken_edge && !dbg_cnt (dom_unreachable_edges))
1475 return NULL;
1476
3daacdcd 1477 return taken_edge;
6de9cd9a
DN
1478}
1479
ccf5c864
PB
1480/* We have finished processing the dominator children of BB, perform
1481 any finalization actions in preparation for leaving this node in
1482 the dominator tree. */
1483
4d9192b5
TS
1484void
1485dom_opt_dom_walker::after_dom_children (basic_block bb)
ccf5c864 1486{
d49e06ce 1487 x_vr_values = evrp_range_analyzer.get_vr_values ();
c8755022
JL
1488 thread_outgoing_edges (bb, m_dummy_cond, m_const_and_copies,
1489 m_avail_exprs_stack,
df80fc53 1490 &evrp_range_analyzer,
c8755022 1491 simplify_stmt_for_jump_threading);
d49e06ce 1492 x_vr_values = NULL;
ccf5c864 1493
83ae86f5 1494 /* These remove expressions local to BB from the tables. */
8e33db8f 1495 m_avail_exprs_stack->pop_to_marker ();
edfc9249 1496 m_const_and_copies->pop_to_marker ();
d49e06ce 1497 evrp_range_analyzer.leave (bb);
ccf5c864
PB
1498}
1499
6de9cd9a
DN
1500/* Search for redundant computations in STMT. If any are found, then
1501 replace them with the variable holding the result of the computation.
1502
8e33db8f
JL
1503 If safe, record this expression into AVAIL_EXPRS_STACK and
1504 CONST_AND_COPIES. */
6de9cd9a 1505
87c93592 1506static void
edfc9249 1507eliminate_redundant_computations (gimple_stmt_iterator* gsi,
8e33db8f
JL
1508 class const_and_copies *const_and_copies,
1509 class avail_exprs_stack *avail_exprs_stack)
6de9cd9a 1510{
726a989a 1511 tree expr_type;
6de9cd9a 1512 tree cached_lhs;
13a3e5b6 1513 tree def;
726a989a 1514 bool insert = true;
726a989a 1515 bool assigns_var_p = false;
6de9cd9a 1516
355fe088 1517 gimple *stmt = gsi_stmt (*gsi);
726a989a 1518
13a3e5b6
BS
1519 if (gimple_code (stmt) == GIMPLE_PHI)
1520 def = gimple_phi_result (stmt);
1521 else
1522 def = gimple_get_lhs (stmt);
6de9cd9a
DN
1523
1524 /* Certain expressions on the RHS can be optimized away, but can not
471854f8 1525 themselves be entered into the hash tables. */
ff88c5aa 1526 if (! def
6de9cd9a
DN
1527 || TREE_CODE (def) != SSA_NAME
1528 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
5006671f 1529 || gimple_vdef (stmt)
f67e783f
ZD
1530 /* Do not record equivalences for increments of ivs. This would create
1531 overlapping live ranges for a very questionable gain. */
1532 || simple_iv_increment_p (stmt))
6de9cd9a
DN
1533 insert = false;
1534
1535 /* Check if the expression has been computed before. */
a3d514f2 1536 cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, insert, true);
6de9cd9a 1537
6de9cd9a
DN
1538 opt_stats.num_exprs_considered++;
1539
726a989a
RB
1540 /* Get the type of the expression we are trying to optimize. */
1541 if (is_gimple_assign (stmt))
019b02f1 1542 {
726a989a
RB
1543 expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
1544 assigns_var_p = true;
019b02f1 1545 }
726a989a
RB
1546 else if (gimple_code (stmt) == GIMPLE_COND)
1547 expr_type = boolean_type_node;
1548 else if (is_gimple_call (stmt))
019b02f1 1549 {
726a989a
RB
1550 gcc_assert (gimple_call_lhs (stmt));
1551 expr_type = TREE_TYPE (gimple_call_lhs (stmt));
1552 assigns_var_p = true;
019b02f1 1553 }
538dd0b7
DM
1554 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
1555 expr_type = TREE_TYPE (gimple_switch_index (swtch_stmt));
13a3e5b6
BS
1556 else if (gimple_code (stmt) == GIMPLE_PHI)
1557 /* We can't propagate into a phi, so the logic below doesn't apply.
1558 Instead record an equivalence between the cached LHS and the
1559 PHI result of this statement, provided they are in the same block.
1560 This should be sufficient to kill the redundant phi. */
1561 {
1562 if (def && cached_lhs)
f6c72af4 1563 const_and_copies->record_const_or_copy (def, cached_lhs);
13a3e5b6
BS
1564 return;
1565 }
726a989a
RB
1566 else
1567 gcc_unreachable ();
1568
1569 if (!cached_lhs)
87c93592 1570 return;
6de9cd9a
DN
1571
1572 /* It is safe to ignore types here since we have already done
1573 type checking in the hashing and equality routines. In fact
1574 type checking here merely gets in the way of constant
1575 propagation. Also, make sure that it is safe to propagate
726a989a
RB
1576 CACHED_LHS into the expression in STMT. */
1577 if ((TREE_CODE (cached_lhs) != SSA_NAME
1578 && (assigns_var_p
1579 || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
1580 || may_propagate_copy_into_stmt (stmt, cached_lhs))
1581 {
77a74ed7
NF
1582 gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
1583 || is_gimple_min_invariant (cached_lhs));
726a989a 1584
6de9cd9a
DN
1585 if (dump_file && (dump_flags & TDF_DETAILS))
1586 {
1587 fprintf (dump_file, " Replaced redundant expr '");
726a989a 1588 print_gimple_expr (dump_file, stmt, 0, dump_flags);
6de9cd9a
DN
1589 fprintf (dump_file, "' with '");
1590 print_generic_expr (dump_file, cached_lhs, dump_flags);
726a989a 1591 fprintf (dump_file, "'\n");
6de9cd9a
DN
1592 }
1593
1594 opt_stats.num_re++;
b8698a0f 1595
726a989a
RB
1596 if (assigns_var_p
1597 && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
1598 cached_lhs = fold_convert (expr_type, cached_lhs);
6de9cd9a 1599
726a989a
RB
1600 propagate_tree_value_into_stmt (gsi, cached_lhs);
1601
1602 /* Since it is always necessary to mark the result as modified,
1603 perhaps we should move this into propagate_tree_value_into_stmt
1604 itself. */
1605 gimple_set_modified (gsi_stmt (*gsi), true);
1606 }
6de9cd9a
DN
1607}
1608
726a989a 1609/* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
6de9cd9a 1610 the available expressions table or the const_and_copies table.
8e33db8f
JL
1611 Detect and record those equivalences into AVAIL_EXPRS_STACK.
1612
1613 We handle only very simple copy equivalences here. The heavy
726a989a 1614 lifing is done by eliminate_redundant_computations. */
6de9cd9a
DN
1615
1616static void
355fe088 1617record_equivalences_from_stmt (gimple *stmt, int may_optimize_p,
8e33db8f 1618 class avail_exprs_stack *avail_exprs_stack)
6de9cd9a 1619{
726a989a
RB
1620 tree lhs;
1621 enum tree_code lhs_code;
6de9cd9a 1622
726a989a
RB
1623 gcc_assert (is_gimple_assign (stmt));
1624
1625 lhs = gimple_assign_lhs (stmt);
1626 lhs_code = TREE_CODE (lhs);
6de9cd9a 1627
726a989a 1628 if (lhs_code == SSA_NAME
7e673273 1629 && gimple_assign_single_p (stmt))
726a989a
RB
1630 {
1631 tree rhs = gimple_assign_rhs1 (stmt);
b8698a0f 1632
6de9cd9a
DN
1633 /* If the RHS of the assignment is a constant or another variable that
1634 may be propagated, register it in the CONST_AND_COPIES table. We
1635 do not need to record unwind data for this, since this is a true
1ea7e6ad 1636 assignment and not an equivalence inferred from a comparison. All
6de9cd9a
DN
1637 uses of this ssa name are dominated by this assignment, so unwinding
1638 just costs time and space. */
1639 if (may_optimize_p
1640 && (TREE_CODE (rhs) == SSA_NAME
1641 || is_gimple_min_invariant (rhs)))
05b7b5a4 1642 {
0b49d67b 1643 rhs = dom_valueize (rhs);
726a989a 1644
05b7b5a4
RB
1645 if (dump_file && (dump_flags & TDF_DETAILS))
1646 {
1647 fprintf (dump_file, "==== ASGN ");
ef6cb4c7 1648 print_generic_expr (dump_file, lhs);
05b7b5a4 1649 fprintf (dump_file, " = ");
ef6cb4c7 1650 print_generic_expr (dump_file, rhs);
05b7b5a4
RB
1651 fprintf (dump_file, "\n");
1652 }
1653
1654 set_ssa_name_value (lhs, rhs);
1655 }
6de9cd9a
DN
1656 }
1657
b00734df
RB
1658 /* Make sure we can propagate &x + CST. */
1659 if (lhs_code == SSA_NAME
1660 && gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR
1661 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ADDR_EXPR
1662 && TREE_CODE (gimple_assign_rhs2 (stmt)) == INTEGER_CST)
1663 {
1664 tree op0 = gimple_assign_rhs1 (stmt);
1665 tree op1 = gimple_assign_rhs2 (stmt);
1666 tree new_rhs
1667 = build_fold_addr_expr (fold_build2 (MEM_REF,
1668 TREE_TYPE (TREE_TYPE (op0)),
1669 unshare_expr (op0),
1670 fold_convert (ptr_type_node,
1671 op1)));
1672 if (dump_file && (dump_flags & TDF_DETAILS))
1673 {
1674 fprintf (dump_file, "==== ASGN ");
ef6cb4c7 1675 print_generic_expr (dump_file, lhs);
b00734df 1676 fprintf (dump_file, " = ");
ef6cb4c7 1677 print_generic_expr (dump_file, new_rhs);
b00734df
RB
1678 fprintf (dump_file, "\n");
1679 }
1680
1681 set_ssa_name_value (lhs, new_rhs);
1682 }
1683
6de9cd9a
DN
1684 /* A memory store, even an aliased store, creates a useful
1685 equivalence. By exchanging the LHS and RHS, creating suitable
1686 vops and recording the result in the available expression table,
1687 we may be able to expose more redundant loads. */
726a989a
RB
1688 if (!gimple_has_volatile_ops (stmt)
1689 && gimple_references_memory_p (stmt)
1690 && gimple_assign_single_p (stmt)
1691 && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
1692 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
6de9cd9a
DN
1693 && !is_gimple_reg (lhs))
1694 {
726a989a 1695 tree rhs = gimple_assign_rhs1 (stmt);
538dd0b7 1696 gassign *new_stmt;
6de9cd9a 1697
cf3135aa 1698 /* Build a new statement with the RHS and LHS exchanged. */
726a989a
RB
1699 if (TREE_CODE (rhs) == SSA_NAME)
1700 {
1701 /* NOTE tuples. The call to gimple_build_assign below replaced
1702 a call to build_gimple_modify_stmt, which did not set the
1703 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
1704 may cause an SSA validation failure, as the LHS may be a
1705 default-initialized name and should have no definition. I'm
1706 a bit dubious of this, as the artificial statement that we
1707 generate here may in fact be ill-formed, but it is simply
1708 used as an internal device in this pass, and never becomes
1709 part of the CFG. */
355fe088 1710 gimple *defstmt = SSA_NAME_DEF_STMT (rhs);
726a989a
RB
1711 new_stmt = gimple_build_assign (rhs, lhs);
1712 SSA_NAME_DEF_STMT (rhs) = defstmt;
1713 }
1714 else
1715 new_stmt = gimple_build_assign (rhs, lhs);
1716
5006671f 1717 gimple_set_vuse (new_stmt, gimple_vdef (stmt));
6de9cd9a 1718
cf3135aa
RG
1719 /* Finally enter the statement into the available expression
1720 table. */
a3d514f2 1721 avail_exprs_stack->lookup_avail_expr (new_stmt, true, true);
6de9cd9a
DN
1722 }
1723}
1724
ff2ad0f7
DN
1725/* Replace *OP_P in STMT with any known equivalent value for *OP_P from
1726 CONST_AND_COPIES. */
1727
87c93592 1728static void
47ca20b4 1729cprop_operand (gimple *stmt, use_operand_p op_p, vr_values *vr_values)
ff2ad0f7 1730{
ff2ad0f7
DN
1731 tree val;
1732 tree op = USE_FROM_PTR (op_p);
1733
1734 /* If the operand has a known constant value or it is known to be a
1735 copy of some other variable, use the value or copy stored in
1736 CONST_AND_COPIES. */
3aecd08b 1737 val = SSA_NAME_VALUE (op);
47ca20b4
JL
1738 if (!val)
1739 val = vr_values->op_with_constant_singleton_value_range (op);
1740
c9145754 1741 if (val && val != op)
ff2ad0f7 1742 {
aa24864c 1743 /* Do not replace hard register operands in asm statements. */
726a989a 1744 if (gimple_code (stmt) == GIMPLE_ASM
aa24864c 1745 && !may_propagate_copy_into_asm (op))
87c93592 1746 return;
aa24864c 1747
ff2ad0f7
DN
1748 /* Certain operands are not allowed to be copy propagated due
1749 to their interaction with exception handling and some GCC
1750 extensions. */
66e8b99c 1751 if (!may_propagate_copy (op, val))
87c93592 1752 return;
66e8b99c 1753
6f423f4c
RB
1754 /* Do not propagate copies into BIVs.
1755 See PR23821 and PR62217 for how this can disturb IV and
1756 number of iteration analysis. */
1757 if (TREE_CODE (val) != INTEGER_CST)
1758 {
355fe088 1759 gimple *def = SSA_NAME_DEF_STMT (op);
6f423f4c
RB
1760 if (gimple_code (def) == GIMPLE_PHI
1761 && gimple_bb (def)->loop_father->header == gimple_bb (def))
1762 return;
1763 }
e9d85fa6 1764
ff2ad0f7
DN
1765 /* Dump details. */
1766 if (dump_file && (dump_flags & TDF_DETAILS))
1767 {
1768 fprintf (dump_file, " Replaced '");
1769 print_generic_expr (dump_file, op, dump_flags);
1770 fprintf (dump_file, "' with %s '",
1771 (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
1772 print_generic_expr (dump_file, val, dump_flags);
1773 fprintf (dump_file, "'\n");
1774 }
1775
0bca51f0
DN
1776 if (TREE_CODE (val) != SSA_NAME)
1777 opt_stats.num_const_prop++;
1778 else
1779 opt_stats.num_copy_prop++;
1780
ff2ad0f7
DN
1781 propagate_value (op_p, val);
1782
1783 /* And note that we modified this statement. This is now
1784 safe, even if we changed virtual operands since we will
1785 rescan the statement and rewrite its operands again. */
726a989a 1786 gimple_set_modified (stmt, true);
ff2ad0f7 1787 }
ff2ad0f7
DN
1788}
1789
1790/* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
b8698a0f 1791 known value for that SSA_NAME (or NULL if no value is known).
ff2ad0f7
DN
1792
1793 Propagate values from CONST_AND_COPIES into the uses, vuses and
cfaab3a9 1794 vdef_ops of STMT. */
ff2ad0f7 1795
87c93592 1796static void
47ca20b4 1797cprop_into_stmt (gimple *stmt, vr_values *vr_values)
ff2ad0f7 1798{
4c124b4c
AM
1799 use_operand_p op_p;
1800 ssa_op_iter iter;
d30078b8 1801 tree last_copy_propagated_op = NULL;
ff2ad0f7 1802
2a86de57 1803 FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE)
d30078b8
JL
1804 {
1805 tree old_op = USE_FROM_PTR (op_p);
1806
1807 /* If we have A = B and B = A in the copy propagation tables
1808 (due to an equality comparison), avoid substituting B for A
1809 then A for B in the trivially discovered cases. This allows
1810 optimization of statements were A and B appear as input
1811 operands. */
1812 if (old_op != last_copy_propagated_op)
1813 {
47ca20b4 1814 cprop_operand (stmt, op_p, vr_values);
d30078b8
JL
1815
1816 tree new_op = USE_FROM_PTR (op_p);
1817 if (new_op != old_op && TREE_CODE (new_op) == SSA_NAME)
1818 last_copy_propagated_op = new_op;
1819 }
1820 }
ff2ad0f7
DN
1821}
1822
aa2a59fc
JL
1823/* If STMT contains a relational test, try to convert it into an
1824 equality test if there is only a single value which can ever
1825 make the test true.
1826
1827 For example, if the expression hash table contains:
1828
1829 TRUE = (i <= 1)
1830
1831 And we have a test within statement of i >= 1, then we can safely
1832 rewrite the test as i == 1 since there only a single value where
1833 the test is true.
1834
1835 This is similar to code in VRP. */
1836
1837static void
1838test_for_singularity (gimple *stmt, gcond *dummy_cond,
1839 avail_exprs_stack *avail_exprs_stack)
1840{
1841 /* We want to support gimple conditionals as well as assignments
1842 where the RHS contains a conditional. */
1843 if (is_gimple_assign (stmt) || gimple_code (stmt) == GIMPLE_COND)
1844 {
1845 enum tree_code code = ERROR_MARK;
1846 tree lhs, rhs;
1847
1848 /* Extract the condition of interest from both forms we support. */
1849 if (is_gimple_assign (stmt))
1850 {
1851 code = gimple_assign_rhs_code (stmt);
1852 lhs = gimple_assign_rhs1 (stmt);
1853 rhs = gimple_assign_rhs2 (stmt);
1854 }
1855 else if (gimple_code (stmt) == GIMPLE_COND)
1856 {
1857 code = gimple_cond_code (as_a <gcond *> (stmt));
1858 lhs = gimple_cond_lhs (as_a <gcond *> (stmt));
1859 rhs = gimple_cond_rhs (as_a <gcond *> (stmt));
1860 }
1861
1862 /* We're looking for a relational test using LE/GE. Also note we can
1863 canonicalize LT/GT tests against constants into LE/GT tests. */
1864 if (code == LE_EXPR || code == GE_EXPR
1865 || ((code == LT_EXPR || code == GT_EXPR)
1866 && TREE_CODE (rhs) == INTEGER_CST))
1867 {
1868 /* For LT_EXPR and GT_EXPR, canonicalize to LE_EXPR and GE_EXPR. */
1869 if (code == LT_EXPR)
1870 rhs = fold_build2 (MINUS_EXPR, TREE_TYPE (rhs),
1871 rhs, build_int_cst (TREE_TYPE (rhs), 1));
1872
1873 if (code == GT_EXPR)
1874 rhs = fold_build2 (PLUS_EXPR, TREE_TYPE (rhs),
1875 rhs, build_int_cst (TREE_TYPE (rhs), 1));
1876
1877 /* Determine the code we want to check for in the hash table. */
1878 enum tree_code test_code;
1879 if (code == GE_EXPR || code == GT_EXPR)
1880 test_code = LE_EXPR;
1881 else
1882 test_code = GE_EXPR;
1883
1884 /* Update the dummy statement so we can query the hash tables. */
1885 gimple_cond_set_code (dummy_cond, test_code);
1886 gimple_cond_set_lhs (dummy_cond, lhs);
1887 gimple_cond_set_rhs (dummy_cond, rhs);
1888 tree cached_lhs
1889 = avail_exprs_stack->lookup_avail_expr (dummy_cond, false, false);
1890
1891 /* If the lookup returned 1 (true), then the expression we
1892 queried was in the hash table. As a result there is only
1893 one value that makes the original conditional true. Update
1894 STMT accordingly. */
1895 if (cached_lhs && integer_onep (cached_lhs))
1896 {
1897 if (is_gimple_assign (stmt))
1898 {
1899 gimple_assign_set_rhs_code (stmt, EQ_EXPR);
1900 gimple_assign_set_rhs2 (stmt, rhs);
1901 gimple_set_modified (stmt, true);
1902 }
1903 else
1904 {
1905 gimple_set_modified (stmt, true);
1906 gimple_cond_set_code (as_a <gcond *> (stmt), EQ_EXPR);
1907 gimple_cond_set_rhs (as_a <gcond *> (stmt), rhs);
1908 gimple_set_modified (stmt, true);
1909 }
1910 }
1911 }
1912 }
1913}
1914
1915/* Optimize the statement in block BB pointed to by iterator SI.
b8698a0f 1916
6de9cd9a
DN
1917 We try to perform some simplistic global redundancy elimination and
1918 constant propagation:
1919
1920 1- To detect global redundancy, we keep track of expressions that have
1921 been computed in this block and its dominators. If we find that the
1922 same expression is computed more than once, we eliminate repeated
1923 computations by using the target of the first one.
1924
1925 2- Constant values and copy assignments. This is used to do very
1926 simplistic constant and copy propagation. When a constant or copy
1927 assignment is found, we map the value on the RHS of the assignment to
aa2a59fc 1928 the variable in the LHS in the CONST_AND_COPIES table.
6de9cd9a 1929
aa2a59fc
JL
1930 3- Very simple redundant store elimination is performed.
1931
1932 4- We can simpify a condition to a constant or from a relational
1933 condition to an equality condition. */
1934
1935edge
1936dom_opt_dom_walker::optimize_stmt (basic_block bb, gimple_stmt_iterator si)
6de9cd9a 1937{
355fe088 1938 gimple *stmt, *old_stmt;
6de9cd9a 1939 bool may_optimize_p;
c5cac099 1940 bool modified_p = false;
2a5671ee 1941 bool was_noreturn;
3daacdcd 1942 edge retval = NULL;
6de9cd9a 1943
726a989a 1944 old_stmt = stmt = gsi_stmt (si);
2a5671ee 1945 was_noreturn = is_gimple_call (stmt) && gimple_call_noreturn_p (stmt);
b8698a0f 1946
6de9cd9a
DN
1947 if (dump_file && (dump_flags & TDF_DETAILS))
1948 {
1949 fprintf (dump_file, "Optimizing statement ");
726a989a 1950 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
6de9cd9a
DN
1951 }
1952
2a86de57
RB
1953 update_stmt_if_modified (stmt);
1954 opt_stats.num_stmts++;
1955
cfaab3a9 1956 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
47ca20b4 1957 cprop_into_stmt (stmt, evrp_range_analyzer.get_vr_values ());
6de9cd9a
DN
1958
1959 /* If the statement has been modified with constant replacements,
1960 fold its RHS before checking for redundant computations. */
726a989a 1961 if (gimple_modified_p (stmt))
6de9cd9a 1962 {
726a989a 1963 tree rhs = NULL;
6cedb4ac 1964
6de9cd9a
DN
1965 /* Try to fold the statement making sure that STMT is kept
1966 up to date. */
726a989a 1967 if (fold_stmt (&si))
6de9cd9a 1968 {
726a989a 1969 stmt = gsi_stmt (si);
076ba157 1970 gimple_set_modified (stmt, true);
6de9cd9a
DN
1971
1972 if (dump_file && (dump_flags & TDF_DETAILS))
1973 {
1974 fprintf (dump_file, " Folded to: ");
726a989a 1975 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
6de9cd9a
DN
1976 }
1977 }
1978
726a989a
RB
1979 /* We only need to consider cases that can yield a gimple operand. */
1980 if (gimple_assign_single_p (stmt))
1981 rhs = gimple_assign_rhs1 (stmt);
1982 else if (gimple_code (stmt) == GIMPLE_GOTO)
1983 rhs = gimple_goto_dest (stmt);
538dd0b7 1984 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
726a989a 1985 /* This should never be an ADDR_EXPR. */
538dd0b7 1986 rhs = gimple_switch_index (swtch_stmt);
726a989a 1987
6cedb4ac 1988 if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
726a989a 1989 recompute_tree_invariant_for_addr_expr (rhs);
6cedb4ac 1990
c5cac099
JJ
1991 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
1992 even if fold_stmt updated the stmt already and thus cleared
1993 gimple_modified_p flag on it. */
1994 modified_p = true;
6de9cd9a
DN
1995 }
1996
1997 /* Check for redundant computations. Do this optimization only
1998 for assignments that have no volatile ops and conditionals. */
d829c408
RG
1999 may_optimize_p = (!gimple_has_side_effects (stmt)
2000 && (is_gimple_assign (stmt)
726a989a 2001 || (is_gimple_call (stmt)
d829c408 2002 && gimple_call_lhs (stmt) != NULL_TREE)
726a989a
RB
2003 || gimple_code (stmt) == GIMPLE_COND
2004 || gimple_code (stmt) == GIMPLE_SWITCH));
6de9cd9a
DN
2005
2006 if (may_optimize_p)
726a989a 2007 {
44e10129
MM
2008 if (gimple_code (stmt) == GIMPLE_CALL)
2009 {
2010 /* Resolve __builtin_constant_p. If it hasn't been
2011 folded to integer_one_node by now, it's fairly
2012 certain that the value simply isn't constant. */
2013 tree callee = gimple_call_fndecl (stmt);
2014 if (callee
3d78e008 2015 && fndecl_built_in_p (callee, BUILT_IN_CONSTANT_P))
44e10129
MM
2016 {
2017 propagate_tree_value_into_stmt (&si, integer_zero_node);
2018 stmt = gsi_stmt (si);
2019 }
2020 }
aabf6a03 2021
2bedb645
JL
2022 if (gimple_code (stmt) == GIMPLE_COND)
2023 {
2024 tree lhs = gimple_cond_lhs (stmt);
2025 tree rhs = gimple_cond_rhs (stmt);
2026
2027 /* If the LHS has a range [0..1] and the RHS has a range ~[0..1],
2028 then this conditional is computable at compile time. We can just
2029 shove either 0 or 1 into the LHS, mark the statement as modified
2030 and all the right things will just happen below.
2031
2032 Note this would apply to any case where LHS has a range
2033 narrower than its type implies and RHS is outside that
2034 narrower range. Future work. */
2035 if (TREE_CODE (lhs) == SSA_NAME
2036 && ssa_name_has_boolean_range (lhs)
2037 && TREE_CODE (rhs) == INTEGER_CST
2038 && ! (integer_zerop (rhs) || integer_onep (rhs)))
2039 {
2040 gimple_cond_set_lhs (as_a <gcond *> (stmt),
2041 fold_convert (TREE_TYPE (lhs),
2042 integer_zero_node));
2043 gimple_set_modified (stmt, true);
2044 }
d49e06ce
JL
2045 else if (TREE_CODE (lhs) == SSA_NAME)
2046 {
2047 /* Exploiting EVRP data is not yet fully integrated into DOM
2048 but we need to do something for this case to avoid regressing
2049 udr4.f90 and new1.C which have unexecutable blocks with
2050 undefined behavior that get diagnosed if they're left in the
2051 IL because we've attached range information to new
2052 SSA_NAMES. */
75b7462e 2053 update_stmt_if_modified (stmt);
d49e06ce
JL
2054 edge taken_edge = NULL;
2055 evrp_range_analyzer.vrp_visit_cond_stmt (as_a <gcond *> (stmt),
2056 &taken_edge);
2057 if (taken_edge)
2058 {
2059 if (taken_edge->flags & EDGE_TRUE_VALUE)
2060 gimple_cond_make_true (as_a <gcond *> (stmt));
2061 else if (taken_edge->flags & EDGE_FALSE_VALUE)
2062 gimple_cond_make_false (as_a <gcond *> (stmt));
2063 else
2064 gcc_unreachable ();
2065 gimple_set_modified (stmt, true);
2066 update_stmt (stmt);
2067 cfg_altered = true;
2068 return taken_edge;
2069 }
2070 }
2bedb645
JL
2071 }
2072
aabf6a03 2073 update_stmt_if_modified (stmt);
aa2a59fc
JL
2074 eliminate_redundant_computations (&si, m_const_and_copies,
2075 m_avail_exprs_stack);
aabf6a03 2076 stmt = gsi_stmt (si);
565b8886
RG
2077
2078 /* Perform simple redundant store elimination. */
2079 if (gimple_assign_single_p (stmt)
2080 && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2081 {
2082 tree lhs = gimple_assign_lhs (stmt);
2083 tree rhs = gimple_assign_rhs1 (stmt);
2084 tree cached_lhs;
538dd0b7 2085 gassign *new_stmt;
0b49d67b 2086 rhs = dom_valueize (rhs);
565b8886
RG
2087 /* Build a new statement with the RHS and LHS exchanged. */
2088 if (TREE_CODE (rhs) == SSA_NAME)
2089 {
355fe088 2090 gimple *defstmt = SSA_NAME_DEF_STMT (rhs);
565b8886
RG
2091 new_stmt = gimple_build_assign (rhs, lhs);
2092 SSA_NAME_DEF_STMT (rhs) = defstmt;
2093 }
2094 else
2095 new_stmt = gimple_build_assign (rhs, lhs);
2096 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
aa2a59fc
JL
2097 cached_lhs = m_avail_exprs_stack->lookup_avail_expr (new_stmt, false,
2098 false);
a09f784a 2099 if (cached_lhs && operand_equal_p (rhs, cached_lhs, 0))
565b8886
RG
2100 {
2101 basic_block bb = gimple_bb (stmt);
565b8886 2102 unlink_stmt_vdef (stmt);
b5b3ec3e 2103 if (gsi_remove (&si, true))
565b8886
RG
2104 {
2105 bitmap_set_bit (need_eh_cleanup, bb->index);
2106 if (dump_file && (dump_flags & TDF_DETAILS))
2107 fprintf (dump_file, " Flagged to clear EH edges.\n");
2108 }
3d3f2249 2109 release_defs (stmt);
3daacdcd 2110 return retval;
565b8886
RG
2111 }
2112 }
aa2a59fc
JL
2113
2114 /* If this statement was not redundant, we may still be able to simplify
2115 it, which may in turn allow other part of DOM or other passes to do
2116 a better job. */
2117 test_for_singularity (stmt, m_dummy_cond, m_avail_exprs_stack);
726a989a 2118 }
6de9cd9a
DN
2119
2120 /* Record any additional equivalences created by this statement. */
726a989a 2121 if (is_gimple_assign (stmt))
aa2a59fc 2122 record_equivalences_from_stmt (stmt, may_optimize_p, m_avail_exprs_stack);
6de9cd9a 2123
d64f8dd2
JL
2124 /* If STMT is a COND_EXPR or SWITCH_EXPR and it was modified, then we may
2125 know where it goes. */
c5cac099 2126 if (gimple_modified_p (stmt) || modified_p)
6de9cd9a
DN
2127 {
2128 tree val = NULL;
b8698a0f 2129
726a989a 2130 if (gimple_code (stmt) == GIMPLE_COND)
db3927fb 2131 val = fold_binary_loc (gimple_location (stmt),
63cdb7a0
JJ
2132 gimple_cond_code (stmt), boolean_type_node,
2133 gimple_cond_lhs (stmt),
2134 gimple_cond_rhs (stmt));
538dd0b7
DM
2135 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2136 val = gimple_switch_index (swtch_stmt);
6de9cd9a 2137
d64f8dd2
JL
2138 if (val && TREE_CODE (val) == INTEGER_CST)
2139 {
3daacdcd
JL
2140 retval = find_taken_edge (bb, val);
2141 if (retval)
d64f8dd2 2142 {
3daacdcd
JL
2143 /* Fix the condition to be either true or false. */
2144 if (gimple_code (stmt) == GIMPLE_COND)
2145 {
2146 if (integer_zerop (val))
2147 gimple_cond_make_false (as_a <gcond *> (stmt));
2148 else if (integer_onep (val))
2149 gimple_cond_make_true (as_a <gcond *> (stmt));
2150 else
2151 gcc_unreachable ();
63cdb7a0
JJ
2152
2153 gimple_set_modified (stmt, true);
3daacdcd 2154 }
d64f8dd2
JL
2155
2156 /* Further simplifications may be possible. */
2157 cfg_altered = true;
2158 }
2159 }
1eaba2f2 2160
63cdb7a0
JJ
2161 update_stmt_if_modified (stmt);
2162
1eaba2f2
RH
2163 /* If we simplified a statement in such a way as to be shown that it
2164 cannot trap, update the eh information and the cfg to match. */
af47810a 2165 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
1eaba2f2
RH
2166 {
2167 bitmap_set_bit (need_eh_cleanup, bb->index);
2168 if (dump_file && (dump_flags & TDF_DETAILS))
2169 fprintf (dump_file, " Flagged to clear EH edges.\n");
2170 }
2a5671ee
RB
2171
2172 if (!was_noreturn
2173 && is_gimple_call (stmt) && gimple_call_noreturn_p (stmt))
2174 need_noreturn_fixup.safe_push (stmt);
6de9cd9a 2175 }
3daacdcd 2176 return retval;
6de9cd9a 2177}