]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-dom.c
alias.c: Reorder #include statements and remove duplicates.
[thirdparty/gcc.git] / gcc / tree-ssa-dom.c
1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001-2015 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "tree-pass.h"
28 #include "ssa.h"
29 #include "gimple-pretty-print.h"
30 #include "fold-const.h"
31 #include "cfganal.h"
32 #include "cfgloop.h"
33 #include "gimple-fold.h"
34 #include "tree-eh.h"
35 #include "gimple-iterator.h"
36 #include "tree-cfg.h"
37 #include "tree-into-ssa.h"
38 #include "domwalk.h"
39 #include "tree-ssa-propagate.h"
40 #include "tree-ssa-threadupdate.h"
41 #include "params.h"
42 #include "tree-ssa-scopedtables.h"
43 #include "tree-ssa-threadedge.h"
44 #include "tree-ssa-dom.h"
45 #include "gimplify.h"
46 #include "tree-cfgcleanup.h"
47
48 /* This file implements optimizations on the dominator tree. */
49
50 /* Structure for recording known values of a conditional expression
51 at the exits from its block. */
52
53 struct cond_equivalence
54 {
55 struct hashable_expr cond;
56 tree value;
57 };
58
59 /* Structure for recording edge equivalences.
60
61 Computing and storing the edge equivalences instead of creating
62 them on-demand can save significant amounts of time, particularly
63 for pathological cases involving switch statements.
64
65 These structures live for a single iteration of the dominator
66 optimizer in the edge's AUX field. At the end of an iteration we
67 free each of these structures. */
68
69 struct edge_info
70 {
71 /* If this edge creates a simple equivalence, the LHS and RHS of
72 the equivalence will be stored here. */
73 tree lhs;
74 tree rhs;
75
76 /* Traversing an edge may also indicate one or more particular conditions
77 are true or false. */
78 vec<cond_equivalence> cond_equivalences;
79 };
80
81 /* Track whether or not we have changed the control flow graph. */
82 static bool cfg_altered;
83
84 /* Bitmap of blocks that have had EH statements cleaned. We should
85 remove their dead edges eventually. */
86 static bitmap need_eh_cleanup;
87 static vec<gimple *> need_noreturn_fixup;
88
89 /* Statistics for dominator optimizations. */
90 struct opt_stats_d
91 {
92 long num_stmts;
93 long num_exprs_considered;
94 long num_re;
95 long num_const_prop;
96 long num_copy_prop;
97 };
98
99 static struct opt_stats_d opt_stats;
100
101 /* Local functions. */
102 static void optimize_stmt (basic_block, gimple_stmt_iterator,
103 class const_and_copies *,
104 class avail_exprs_stack *);
105 static tree lookup_avail_expr (gimple *, bool, class avail_exprs_stack *);
106 static void record_cond (cond_equivalence *, class avail_exprs_stack *);
107 static void record_equality (tree, tree, class const_and_copies *);
108 static void record_equivalences_from_phis (basic_block);
109 static void record_equivalences_from_incoming_edge (basic_block,
110 class const_and_copies *,
111 class avail_exprs_stack *);
112 static void eliminate_redundant_computations (gimple_stmt_iterator *,
113 class const_and_copies *,
114 class avail_exprs_stack *);
115 static void record_equivalences_from_stmt (gimple *, int,
116 class avail_exprs_stack *);
117 static edge single_incoming_edge_ignoring_loop_edges (basic_block);
118 static void dump_dominator_optimization_stats (FILE *file,
119 hash_table<expr_elt_hasher> *);
120
121
122 /* Free the edge_info data attached to E, if it exists. */
123
124 static void
125 free_edge_info (edge e)
126 {
127 struct edge_info *edge_info = (struct edge_info *)e->aux;
128
129 if (edge_info)
130 {
131 edge_info->cond_equivalences.release ();
132 free (edge_info);
133 }
134 }
135
136 /* Allocate an EDGE_INFO for edge E and attach it to E.
137 Return the new EDGE_INFO structure. */
138
139 static struct edge_info *
140 allocate_edge_info (edge e)
141 {
142 struct edge_info *edge_info;
143
144 /* Free the old one, if it exists. */
145 free_edge_info (e);
146
147 edge_info = XCNEW (struct edge_info);
148
149 e->aux = edge_info;
150 return edge_info;
151 }
152
153 /* Free all EDGE_INFO structures associated with edges in the CFG.
154 If a particular edge can be threaded, copy the redirection
155 target from the EDGE_INFO structure into the edge's AUX field
156 as required by code to update the CFG and SSA graph for
157 jump threading. */
158
159 static void
160 free_all_edge_infos (void)
161 {
162 basic_block bb;
163 edge_iterator ei;
164 edge e;
165
166 FOR_EACH_BB_FN (bb, cfun)
167 {
168 FOR_EACH_EDGE (e, ei, bb->preds)
169 {
170 free_edge_info (e);
171 e->aux = NULL;
172 }
173 }
174 }
175
176 /* Build a cond_equivalence record indicating that the comparison
177 CODE holds between operands OP0 and OP1 and push it to **P. */
178
179 static void
180 build_and_record_new_cond (enum tree_code code,
181 tree op0, tree op1,
182 vec<cond_equivalence> *p,
183 bool val = true)
184 {
185 cond_equivalence c;
186 struct hashable_expr *cond = &c.cond;
187
188 gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison);
189
190 cond->type = boolean_type_node;
191 cond->kind = EXPR_BINARY;
192 cond->ops.binary.op = code;
193 cond->ops.binary.opnd0 = op0;
194 cond->ops.binary.opnd1 = op1;
195
196 c.value = val ? boolean_true_node : boolean_false_node;
197 p->safe_push (c);
198 }
199
200 /* Record that COND is true and INVERTED is false into the edge information
201 structure. Also record that any conditions dominated by COND are true
202 as well.
203
204 For example, if a < b is true, then a <= b must also be true. */
205
206 static void
207 record_conditions (struct edge_info *edge_info, tree cond, tree inverted)
208 {
209 tree op0, op1;
210 cond_equivalence c;
211
212 if (!COMPARISON_CLASS_P (cond))
213 return;
214
215 op0 = TREE_OPERAND (cond, 0);
216 op1 = TREE_OPERAND (cond, 1);
217
218 switch (TREE_CODE (cond))
219 {
220 case LT_EXPR:
221 case GT_EXPR:
222 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
223 {
224 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
225 &edge_info->cond_equivalences);
226 build_and_record_new_cond (LTGT_EXPR, op0, op1,
227 &edge_info->cond_equivalences);
228 }
229
230 build_and_record_new_cond ((TREE_CODE (cond) == LT_EXPR
231 ? LE_EXPR : GE_EXPR),
232 op0, op1, &edge_info->cond_equivalences);
233 build_and_record_new_cond (NE_EXPR, op0, op1,
234 &edge_info->cond_equivalences);
235 build_and_record_new_cond (EQ_EXPR, op0, op1,
236 &edge_info->cond_equivalences, false);
237 break;
238
239 case GE_EXPR:
240 case LE_EXPR:
241 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
242 {
243 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
244 &edge_info->cond_equivalences);
245 }
246 break;
247
248 case EQ_EXPR:
249 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
250 {
251 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
252 &edge_info->cond_equivalences);
253 }
254 build_and_record_new_cond (LE_EXPR, op0, op1,
255 &edge_info->cond_equivalences);
256 build_and_record_new_cond (GE_EXPR, op0, op1,
257 &edge_info->cond_equivalences);
258 break;
259
260 case UNORDERED_EXPR:
261 build_and_record_new_cond (NE_EXPR, op0, op1,
262 &edge_info->cond_equivalences);
263 build_and_record_new_cond (UNLE_EXPR, op0, op1,
264 &edge_info->cond_equivalences);
265 build_and_record_new_cond (UNGE_EXPR, op0, op1,
266 &edge_info->cond_equivalences);
267 build_and_record_new_cond (UNEQ_EXPR, op0, op1,
268 &edge_info->cond_equivalences);
269 build_and_record_new_cond (UNLT_EXPR, op0, op1,
270 &edge_info->cond_equivalences);
271 build_and_record_new_cond (UNGT_EXPR, op0, op1,
272 &edge_info->cond_equivalences);
273 break;
274
275 case UNLT_EXPR:
276 case UNGT_EXPR:
277 build_and_record_new_cond ((TREE_CODE (cond) == UNLT_EXPR
278 ? UNLE_EXPR : UNGE_EXPR),
279 op0, op1, &edge_info->cond_equivalences);
280 build_and_record_new_cond (NE_EXPR, op0, op1,
281 &edge_info->cond_equivalences);
282 break;
283
284 case UNEQ_EXPR:
285 build_and_record_new_cond (UNLE_EXPR, op0, op1,
286 &edge_info->cond_equivalences);
287 build_and_record_new_cond (UNGE_EXPR, op0, op1,
288 &edge_info->cond_equivalences);
289 break;
290
291 case LTGT_EXPR:
292 build_and_record_new_cond (NE_EXPR, op0, op1,
293 &edge_info->cond_equivalences);
294 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
295 &edge_info->cond_equivalences);
296 break;
297
298 default:
299 break;
300 }
301
302 /* Now store the original true and false conditions into the first
303 two slots. */
304 initialize_expr_from_cond (cond, &c.cond);
305 c.value = boolean_true_node;
306 edge_info->cond_equivalences.safe_push (c);
307
308 /* It is possible for INVERTED to be the negation of a comparison,
309 and not a valid RHS or GIMPLE_COND condition. This happens because
310 invert_truthvalue may return such an expression when asked to invert
311 a floating-point comparison. These comparisons are not assumed to
312 obey the trichotomy law. */
313 initialize_expr_from_cond (inverted, &c.cond);
314 c.value = boolean_false_node;
315 edge_info->cond_equivalences.safe_push (c);
316 }
317
318 /* We have finished optimizing BB, record any information implied by
319 taking a specific outgoing edge from BB. */
320
321 static void
322 record_edge_info (basic_block bb)
323 {
324 gimple_stmt_iterator gsi = gsi_last_bb (bb);
325 struct edge_info *edge_info;
326
327 if (! gsi_end_p (gsi))
328 {
329 gimple *stmt = gsi_stmt (gsi);
330 location_t loc = gimple_location (stmt);
331
332 if (gimple_code (stmt) == GIMPLE_SWITCH)
333 {
334 gswitch *switch_stmt = as_a <gswitch *> (stmt);
335 tree index = gimple_switch_index (switch_stmt);
336
337 if (TREE_CODE (index) == SSA_NAME)
338 {
339 int i;
340 int n_labels = gimple_switch_num_labels (switch_stmt);
341 tree *info = XCNEWVEC (tree, last_basic_block_for_fn (cfun));
342 edge e;
343 edge_iterator ei;
344
345 for (i = 0; i < n_labels; i++)
346 {
347 tree label = gimple_switch_label (switch_stmt, i);
348 basic_block target_bb = label_to_block (CASE_LABEL (label));
349 if (CASE_HIGH (label)
350 || !CASE_LOW (label)
351 || info[target_bb->index])
352 info[target_bb->index] = error_mark_node;
353 else
354 info[target_bb->index] = label;
355 }
356
357 FOR_EACH_EDGE (e, ei, bb->succs)
358 {
359 basic_block target_bb = e->dest;
360 tree label = info[target_bb->index];
361
362 if (label != NULL && label != error_mark_node)
363 {
364 tree x = fold_convert_loc (loc, TREE_TYPE (index),
365 CASE_LOW (label));
366 edge_info = allocate_edge_info (e);
367 edge_info->lhs = index;
368 edge_info->rhs = x;
369 }
370 }
371 free (info);
372 }
373 }
374
375 /* A COND_EXPR may create equivalences too. */
376 if (gimple_code (stmt) == GIMPLE_COND)
377 {
378 edge true_edge;
379 edge false_edge;
380
381 tree op0 = gimple_cond_lhs (stmt);
382 tree op1 = gimple_cond_rhs (stmt);
383 enum tree_code code = gimple_cond_code (stmt);
384
385 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
386
387 /* Special case comparing booleans against a constant as we
388 know the value of OP0 on both arms of the branch. i.e., we
389 can record an equivalence for OP0 rather than COND. */
390 if ((code == EQ_EXPR || code == NE_EXPR)
391 && TREE_CODE (op0) == SSA_NAME
392 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
393 && is_gimple_min_invariant (op1))
394 {
395 if (code == EQ_EXPR)
396 {
397 edge_info = allocate_edge_info (true_edge);
398 edge_info->lhs = op0;
399 edge_info->rhs = (integer_zerop (op1)
400 ? boolean_false_node
401 : boolean_true_node);
402
403 edge_info = allocate_edge_info (false_edge);
404 edge_info->lhs = op0;
405 edge_info->rhs = (integer_zerop (op1)
406 ? boolean_true_node
407 : boolean_false_node);
408 }
409 else
410 {
411 edge_info = allocate_edge_info (true_edge);
412 edge_info->lhs = op0;
413 edge_info->rhs = (integer_zerop (op1)
414 ? boolean_true_node
415 : boolean_false_node);
416
417 edge_info = allocate_edge_info (false_edge);
418 edge_info->lhs = op0;
419 edge_info->rhs = (integer_zerop (op1)
420 ? boolean_false_node
421 : boolean_true_node);
422 }
423 }
424 else if (is_gimple_min_invariant (op0)
425 && (TREE_CODE (op1) == SSA_NAME
426 || is_gimple_min_invariant (op1)))
427 {
428 tree cond = build2 (code, boolean_type_node, op0, op1);
429 tree inverted = invert_truthvalue_loc (loc, cond);
430 bool can_infer_simple_equiv
431 = !(HONOR_SIGNED_ZEROS (op0)
432 && real_zerop (op0));
433 struct edge_info *edge_info;
434
435 edge_info = allocate_edge_info (true_edge);
436 record_conditions (edge_info, cond, inverted);
437
438 if (can_infer_simple_equiv && code == EQ_EXPR)
439 {
440 edge_info->lhs = op1;
441 edge_info->rhs = op0;
442 }
443
444 edge_info = allocate_edge_info (false_edge);
445 record_conditions (edge_info, inverted, cond);
446
447 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
448 {
449 edge_info->lhs = op1;
450 edge_info->rhs = op0;
451 }
452 }
453
454 else if (TREE_CODE (op0) == SSA_NAME
455 && (TREE_CODE (op1) == SSA_NAME
456 || is_gimple_min_invariant (op1)))
457 {
458 tree cond = build2 (code, boolean_type_node, op0, op1);
459 tree inverted = invert_truthvalue_loc (loc, cond);
460 bool can_infer_simple_equiv
461 = !(HONOR_SIGNED_ZEROS (op1)
462 && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
463 struct edge_info *edge_info;
464
465 edge_info = allocate_edge_info (true_edge);
466 record_conditions (edge_info, cond, inverted);
467
468 if (can_infer_simple_equiv && code == EQ_EXPR)
469 {
470 edge_info->lhs = op0;
471 edge_info->rhs = op1;
472 }
473
474 edge_info = allocate_edge_info (false_edge);
475 record_conditions (edge_info, inverted, cond);
476
477 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
478 {
479 edge_info->lhs = op0;
480 edge_info->rhs = op1;
481 }
482 }
483 }
484
485 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
486 }
487 }
488
489
490 class dom_opt_dom_walker : public dom_walker
491 {
492 public:
493 dom_opt_dom_walker (cdi_direction direction,
494 class const_and_copies *const_and_copies,
495 class avail_exprs_stack *avail_exprs_stack)
496 : dom_walker (direction),
497 m_const_and_copies (const_and_copies),
498 m_avail_exprs_stack (avail_exprs_stack),
499 m_dummy_cond (NULL) {}
500
501 virtual void before_dom_children (basic_block);
502 virtual void after_dom_children (basic_block);
503
504 private:
505 void thread_across_edge (edge);
506
507 /* Unwindable equivalences, both const/copy and expression varieties. */
508 class const_and_copies *m_const_and_copies;
509 class avail_exprs_stack *m_avail_exprs_stack;
510
511 gcond *m_dummy_cond;
512 };
513
514 /* Jump threading, redundancy elimination and const/copy propagation.
515
516 This pass may expose new symbols that need to be renamed into SSA. For
517 every new symbol exposed, its corresponding bit will be set in
518 VARS_TO_RENAME. */
519
520 namespace {
521
522 const pass_data pass_data_dominator =
523 {
524 GIMPLE_PASS, /* type */
525 "dom", /* name */
526 OPTGROUP_NONE, /* optinfo_flags */
527 TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
528 ( PROP_cfg | PROP_ssa ), /* properties_required */
529 0, /* properties_provided */
530 0, /* properties_destroyed */
531 0, /* todo_flags_start */
532 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
533 };
534
535 class pass_dominator : public gimple_opt_pass
536 {
537 public:
538 pass_dominator (gcc::context *ctxt)
539 : gimple_opt_pass (pass_data_dominator, ctxt)
540 {}
541
542 /* opt_pass methods: */
543 opt_pass * clone () { return new pass_dominator (m_ctxt); }
544 virtual bool gate (function *) { return flag_tree_dom != 0; }
545 virtual unsigned int execute (function *);
546
547 }; // class pass_dominator
548
549 unsigned int
550 pass_dominator::execute (function *fun)
551 {
552 memset (&opt_stats, 0, sizeof (opt_stats));
553
554 /* Create our hash tables. */
555 hash_table<expr_elt_hasher> *avail_exprs
556 = new hash_table<expr_elt_hasher> (1024);
557 class avail_exprs_stack *avail_exprs_stack
558 = new class avail_exprs_stack (avail_exprs);
559 class const_and_copies *const_and_copies = new class const_and_copies ();
560 need_eh_cleanup = BITMAP_ALLOC (NULL);
561 need_noreturn_fixup.create (0);
562
563 calculate_dominance_info (CDI_DOMINATORS);
564 cfg_altered = false;
565
566 /* We need to know loop structures in order to avoid destroying them
567 in jump threading. Note that we still can e.g. thread through loop
568 headers to an exit edge, or through loop header to the loop body, assuming
569 that we update the loop info.
570
571 TODO: We don't need to set LOOPS_HAVE_PREHEADERS generally, but due
572 to several overly conservative bail-outs in jump threading, case
573 gcc.dg/tree-ssa/pr21417.c can't be threaded if loop preheader is
574 missing. We should improve jump threading in future then
575 LOOPS_HAVE_PREHEADERS won't be needed here. */
576 loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES);
577
578 /* Initialize the value-handle array. */
579 threadedge_initialize_values ();
580
581 /* We need accurate information regarding back edges in the CFG
582 for jump threading; this may include back edges that are not part of
583 a single loop. */
584 mark_dfs_back_edges ();
585
586 /* We want to create the edge info structures before the dominator walk
587 so that they'll be in place for the jump threader, particularly when
588 threading through a join block.
589
590 The conditions will be lazily updated with global equivalences as
591 we reach them during the dominator walk. */
592 basic_block bb;
593 FOR_EACH_BB_FN (bb, fun)
594 record_edge_info (bb);
595
596 /* Recursively walk the dominator tree optimizing statements. */
597 dom_opt_dom_walker walker (CDI_DOMINATORS,
598 const_and_copies,
599 avail_exprs_stack);
600 walker.walk (fun->cfg->x_entry_block_ptr);
601
602 {
603 gimple_stmt_iterator gsi;
604 basic_block bb;
605 FOR_EACH_BB_FN (bb, fun)
606 {
607 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
608 update_stmt_if_modified (gsi_stmt (gsi));
609 }
610 }
611
612 /* If we exposed any new variables, go ahead and put them into
613 SSA form now, before we handle jump threading. This simplifies
614 interactions between rewriting of _DECL nodes into SSA form
615 and rewriting SSA_NAME nodes into SSA form after block
616 duplication and CFG manipulation. */
617 update_ssa (TODO_update_ssa);
618
619 free_all_edge_infos ();
620
621 /* Thread jumps, creating duplicate blocks as needed. */
622 cfg_altered |= thread_through_all_blocks (first_pass_instance);
623
624 if (cfg_altered)
625 free_dominance_info (CDI_DOMINATORS);
626
627 /* Removal of statements may make some EH edges dead. Purge
628 such edges from the CFG as needed. */
629 if (!bitmap_empty_p (need_eh_cleanup))
630 {
631 unsigned i;
632 bitmap_iterator bi;
633
634 /* Jump threading may have created forwarder blocks from blocks
635 needing EH cleanup; the new successor of these blocks, which
636 has inherited from the original block, needs the cleanup.
637 Don't clear bits in the bitmap, as that can break the bitmap
638 iterator. */
639 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
640 {
641 basic_block bb = BASIC_BLOCK_FOR_FN (fun, i);
642 if (bb == NULL)
643 continue;
644 while (single_succ_p (bb)
645 && (single_succ_edge (bb)->flags & EDGE_EH) == 0)
646 bb = single_succ (bb);
647 if (bb == EXIT_BLOCK_PTR_FOR_FN (fun))
648 continue;
649 if ((unsigned) bb->index != i)
650 bitmap_set_bit (need_eh_cleanup, bb->index);
651 }
652
653 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
654 bitmap_clear (need_eh_cleanup);
655 }
656
657 /* Fixup stmts that became noreturn calls. This may require splitting
658 blocks and thus isn't possible during the dominator walk or before
659 jump threading finished. Do this in reverse order so we don't
660 inadvertedly remove a stmt we want to fixup by visiting a dominating
661 now noreturn call first. */
662 while (!need_noreturn_fixup.is_empty ())
663 {
664 gimple *stmt = need_noreturn_fixup.pop ();
665 if (dump_file && dump_flags & TDF_DETAILS)
666 {
667 fprintf (dump_file, "Fixing up noreturn call ");
668 print_gimple_stmt (dump_file, stmt, 0, 0);
669 fprintf (dump_file, "\n");
670 }
671 fixup_noreturn_call (stmt);
672 }
673
674 statistics_counter_event (fun, "Redundant expressions eliminated",
675 opt_stats.num_re);
676 statistics_counter_event (fun, "Constants propagated",
677 opt_stats.num_const_prop);
678 statistics_counter_event (fun, "Copies propagated",
679 opt_stats.num_copy_prop);
680
681 /* Debugging dumps. */
682 if (dump_file && (dump_flags & TDF_STATS))
683 dump_dominator_optimization_stats (dump_file, avail_exprs);
684
685 loop_optimizer_finalize ();
686
687 /* Delete our main hashtable. */
688 delete avail_exprs;
689 avail_exprs = NULL;
690
691 /* Free asserted bitmaps and stacks. */
692 BITMAP_FREE (need_eh_cleanup);
693 need_noreturn_fixup.release ();
694 delete avail_exprs_stack;
695 delete const_and_copies;
696
697 /* Free the value-handle array. */
698 threadedge_finalize_values ();
699
700 return 0;
701 }
702
703 } // anon namespace
704
705 gimple_opt_pass *
706 make_pass_dominator (gcc::context *ctxt)
707 {
708 return new pass_dominator (ctxt);
709 }
710
711
712 /* Given a conditional statement CONDSTMT, convert the
713 condition to a canonical form. */
714
715 static void
716 canonicalize_comparison (gcond *condstmt)
717 {
718 tree op0;
719 tree op1;
720 enum tree_code code;
721
722 gcc_assert (gimple_code (condstmt) == GIMPLE_COND);
723
724 op0 = gimple_cond_lhs (condstmt);
725 op1 = gimple_cond_rhs (condstmt);
726
727 code = gimple_cond_code (condstmt);
728
729 /* If it would be profitable to swap the operands, then do so to
730 canonicalize the statement, enabling better optimization.
731
732 By placing canonicalization of such expressions here we
733 transparently keep statements in canonical form, even
734 when the statement is modified. */
735 if (tree_swap_operands_p (op0, op1, false))
736 {
737 /* For relationals we need to swap the operands
738 and change the code. */
739 if (code == LT_EXPR
740 || code == GT_EXPR
741 || code == LE_EXPR
742 || code == GE_EXPR)
743 {
744 code = swap_tree_comparison (code);
745
746 gimple_cond_set_code (condstmt, code);
747 gimple_cond_set_lhs (condstmt, op1);
748 gimple_cond_set_rhs (condstmt, op0);
749
750 update_stmt (condstmt);
751 }
752 }
753 }
754
755 /* A trivial wrapper so that we can present the generic jump
756 threading code with a simple API for simplifying statements. */
757 static tree
758 simplify_stmt_for_jump_threading (gimple *stmt,
759 gimple *within_stmt ATTRIBUTE_UNUSED,
760 class avail_exprs_stack *avail_exprs_stack)
761 {
762 return lookup_avail_expr (stmt, false, avail_exprs_stack);
763 }
764
765 /* Valueize hook for gimple_fold_stmt_to_constant_1. */
766
767 static tree
768 dom_valueize (tree t)
769 {
770 if (TREE_CODE (t) == SSA_NAME)
771 {
772 tree tem = SSA_NAME_VALUE (t);
773 if (tem)
774 return tem;
775 }
776 return t;
777 }
778
779 /* Record into CONST_AND_COPIES and AVAIL_EXPRS_STACK any equivalences implied
780 by traversing edge E (which are cached in E->aux).
781
782 Callers are responsible for managing the unwinding markers. */
783 void
784 record_temporary_equivalences (edge e,
785 class const_and_copies *const_and_copies,
786 class avail_exprs_stack *avail_exprs_stack)
787 {
788 int i;
789 struct edge_info *edge_info = (struct edge_info *) e->aux;
790
791 /* If we have info associated with this edge, record it into
792 our equivalence tables. */
793 if (edge_info)
794 {
795 cond_equivalence *eq;
796 tree lhs = edge_info->lhs;
797 tree rhs = edge_info->rhs;
798
799 /* If we have a simple NAME = VALUE equivalence, record it. */
800 if (lhs)
801 record_equality (lhs, rhs, const_and_copies);
802
803 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
804 set via a widening type conversion, then we may be able to record
805 additional equivalences. */
806 if (lhs
807 && TREE_CODE (lhs) == SSA_NAME
808 && TREE_CODE (rhs) == INTEGER_CST)
809 {
810 gimple *defstmt = SSA_NAME_DEF_STMT (lhs);
811
812 if (defstmt
813 && is_gimple_assign (defstmt)
814 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (defstmt)))
815 {
816 tree old_rhs = gimple_assign_rhs1 (defstmt);
817
818 /* If the conversion widens the original value and
819 the constant is in the range of the type of OLD_RHS,
820 then convert the constant and record the equivalence.
821
822 Note that int_fits_type_p does not check the precision
823 if the upper and lower bounds are OK. */
824 if (INTEGRAL_TYPE_P (TREE_TYPE (old_rhs))
825 && (TYPE_PRECISION (TREE_TYPE (lhs))
826 > TYPE_PRECISION (TREE_TYPE (old_rhs)))
827 && int_fits_type_p (rhs, TREE_TYPE (old_rhs)))
828 {
829 tree newval = fold_convert (TREE_TYPE (old_rhs), rhs);
830 record_equality (old_rhs, newval, const_and_copies);
831 }
832 }
833 }
834
835 /* If LHS is an SSA_NAME with a new equivalency then try if
836 stmts with uses of that LHS that dominate the edge destination
837 simplify and allow further equivalences to be recorded. */
838 if (lhs && TREE_CODE (lhs) == SSA_NAME)
839 {
840 use_operand_p use_p;
841 imm_use_iterator iter;
842 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
843 {
844 gimple *use_stmt = USE_STMT (use_p);
845
846 /* Only bother to record more equivalences for lhs that
847 can be directly used by e->dest.
848 ??? If the code gets re-organized to a worklist to
849 catch more indirect opportunities and it is made to
850 handle PHIs then this should only consider use_stmts
851 in basic-blocks we have already visited. */
852 if (e->dest == gimple_bb (use_stmt)
853 || !dominated_by_p (CDI_DOMINATORS,
854 e->dest, gimple_bb (use_stmt)))
855 continue;
856 tree lhs2 = gimple_get_lhs (use_stmt);
857 if (lhs2 && TREE_CODE (lhs2) == SSA_NAME)
858 {
859 tree res
860 = gimple_fold_stmt_to_constant_1 (use_stmt, dom_valueize,
861 no_follow_ssa_edges);
862 if (res
863 && (TREE_CODE (res) == SSA_NAME
864 || is_gimple_min_invariant (res)))
865 record_equality (lhs2, res, const_and_copies);
866 }
867 }
868 }
869
870 /* If we have 0 = COND or 1 = COND equivalences, record them
871 into our expression hash tables. */
872 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
873 record_cond (eq, avail_exprs_stack);
874 }
875 }
876
877 /* Wrapper for common code to attempt to thread an edge. For example,
878 it handles lazily building the dummy condition and the bookkeeping
879 when jump threading is successful. */
880
881 void
882 dom_opt_dom_walker::thread_across_edge (edge e)
883 {
884 if (! m_dummy_cond)
885 m_dummy_cond =
886 gimple_build_cond (NE_EXPR,
887 integer_zero_node, integer_zero_node,
888 NULL, NULL);
889
890 /* Push a marker on both stacks so we can unwind the tables back to their
891 current state. */
892 m_avail_exprs_stack->push_marker ();
893 m_const_and_copies->push_marker ();
894
895 /* Traversing E may result in equivalences we can utilize. */
896 record_temporary_equivalences (e, m_const_and_copies, m_avail_exprs_stack);
897
898 /* With all the edge equivalences in the tables, go ahead and attempt
899 to thread through E->dest. */
900 ::thread_across_edge (m_dummy_cond, e, false,
901 m_const_and_copies, m_avail_exprs_stack,
902 simplify_stmt_for_jump_threading);
903
904 /* And restore the various tables to their state before
905 we threaded this edge.
906
907 XXX The code in tree-ssa-threadedge.c will restore the state of
908 the const_and_copies table. We we just have to restore the expression
909 table. */
910 m_avail_exprs_stack->pop_to_marker ();
911 }
912
913 /* PHI nodes can create equivalences too.
914
915 Ignoring any alternatives which are the same as the result, if
916 all the alternatives are equal, then the PHI node creates an
917 equivalence. */
918
919 static void
920 record_equivalences_from_phis (basic_block bb)
921 {
922 gphi_iterator gsi;
923
924 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
925 {
926 gphi *phi = gsi.phi ();
927
928 tree lhs = gimple_phi_result (phi);
929 tree rhs = NULL;
930 size_t i;
931
932 for (i = 0; i < gimple_phi_num_args (phi); i++)
933 {
934 tree t = gimple_phi_arg_def (phi, i);
935
936 /* Ignore alternatives which are the same as our LHS. Since
937 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
938 can simply compare pointers. */
939 if (lhs == t)
940 continue;
941
942 t = dom_valueize (t);
943
944 /* If we have not processed an alternative yet, then set
945 RHS to this alternative. */
946 if (rhs == NULL)
947 rhs = t;
948 /* If we have processed an alternative (stored in RHS), then
949 see if it is equal to this one. If it isn't, then stop
950 the search. */
951 else if (! operand_equal_for_phi_arg_p (rhs, t))
952 break;
953 }
954
955 /* If we had no interesting alternatives, then all the RHS alternatives
956 must have been the same as LHS. */
957 if (!rhs)
958 rhs = lhs;
959
960 /* If we managed to iterate through each PHI alternative without
961 breaking out of the loop, then we have a PHI which may create
962 a useful equivalence. We do not need to record unwind data for
963 this, since this is a true assignment and not an equivalence
964 inferred from a comparison. All uses of this ssa name are dominated
965 by this assignment, so unwinding just costs time and space. */
966 if (i == gimple_phi_num_args (phi)
967 && may_propagate_copy (lhs, rhs))
968 set_ssa_name_value (lhs, rhs);
969 }
970 }
971
972 /* Ignoring loop backedges, if BB has precisely one incoming edge then
973 return that edge. Otherwise return NULL. */
974 static edge
975 single_incoming_edge_ignoring_loop_edges (basic_block bb)
976 {
977 edge retval = NULL;
978 edge e;
979 edge_iterator ei;
980
981 FOR_EACH_EDGE (e, ei, bb->preds)
982 {
983 /* A loop back edge can be identified by the destination of
984 the edge dominating the source of the edge. */
985 if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
986 continue;
987
988 /* If we have already seen a non-loop edge, then we must have
989 multiple incoming non-loop edges and thus we return NULL. */
990 if (retval)
991 return NULL;
992
993 /* This is the first non-loop incoming edge we have found. Record
994 it. */
995 retval = e;
996 }
997
998 return retval;
999 }
1000
1001 /* Record any equivalences created by the incoming edge to BB into
1002 CONST_AND_COPIES and AVAIL_EXPRS_STACK. If BB has more than one
1003 incoming edge, then no equivalence is created. */
1004
1005 static void
1006 record_equivalences_from_incoming_edge (basic_block bb,
1007 class const_and_copies *const_and_copies,
1008 class avail_exprs_stack *avail_exprs_stack)
1009 {
1010 edge e;
1011 basic_block parent;
1012
1013 /* If our parent block ended with a control statement, then we may be
1014 able to record some equivalences based on which outgoing edge from
1015 the parent was followed. */
1016 parent = get_immediate_dominator (CDI_DOMINATORS, bb);
1017
1018 e = single_incoming_edge_ignoring_loop_edges (bb);
1019
1020 /* If we had a single incoming edge from our parent block, then enter
1021 any data associated with the edge into our tables. */
1022 if (e && e->src == parent)
1023 record_temporary_equivalences (e, const_and_copies, avail_exprs_stack);
1024 }
1025
1026 /* Dump statistics for the hash table HTAB. */
1027
1028 static void
1029 htab_statistics (FILE *file, const hash_table<expr_elt_hasher> &htab)
1030 {
1031 fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1032 (long) htab.size (),
1033 (long) htab.elements (),
1034 htab.collisions ());
1035 }
1036
1037 /* Dump SSA statistics on FILE. */
1038
1039 static void
1040 dump_dominator_optimization_stats (FILE *file,
1041 hash_table<expr_elt_hasher> *avail_exprs)
1042 {
1043 fprintf (file, "Total number of statements: %6ld\n\n",
1044 opt_stats.num_stmts);
1045 fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1046 opt_stats.num_exprs_considered);
1047
1048 fprintf (file, "\nHash table statistics:\n");
1049
1050 fprintf (file, " avail_exprs: ");
1051 htab_statistics (file, *avail_exprs);
1052 }
1053
1054
1055 /* Enter condition equivalence P into AVAIL_EXPRS_HASH.
1056
1057 This indicates that a conditional expression has a known
1058 boolean value. */
1059
1060 static void
1061 record_cond (cond_equivalence *p,
1062 class avail_exprs_stack *avail_exprs_stack)
1063 {
1064 class expr_hash_elt *element = new expr_hash_elt (&p->cond, p->value);
1065 expr_hash_elt **slot;
1066
1067 hash_table<expr_elt_hasher> *avail_exprs = avail_exprs_stack->avail_exprs ();
1068 slot = avail_exprs->find_slot_with_hash (element, element->hash (), INSERT);
1069 if (*slot == NULL)
1070 {
1071 *slot = element;
1072 avail_exprs_stack->record_expr (element, NULL, '1');
1073 }
1074 else
1075 delete element;
1076 }
1077
1078 /* Return the loop depth of the basic block of the defining statement of X.
1079 This number should not be treated as absolutely correct because the loop
1080 information may not be completely up-to-date when dom runs. However, it
1081 will be relatively correct, and as more passes are taught to keep loop info
1082 up to date, the result will become more and more accurate. */
1083
1084 static int
1085 loop_depth_of_name (tree x)
1086 {
1087 gimple *defstmt;
1088 basic_block defbb;
1089
1090 /* If it's not an SSA_NAME, we have no clue where the definition is. */
1091 if (TREE_CODE (x) != SSA_NAME)
1092 return 0;
1093
1094 /* Otherwise return the loop depth of the defining statement's bb.
1095 Note that there may not actually be a bb for this statement, if the
1096 ssa_name is live on entry. */
1097 defstmt = SSA_NAME_DEF_STMT (x);
1098 defbb = gimple_bb (defstmt);
1099 if (!defbb)
1100 return 0;
1101
1102 return bb_loop_depth (defbb);
1103 }
1104
1105 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1106 This constrains the cases in which we may treat this as assignment. */
1107
1108 static void
1109 record_equality (tree x, tree y, class const_and_copies *const_and_copies)
1110 {
1111 tree prev_x = NULL, prev_y = NULL;
1112
1113 if (tree_swap_operands_p (x, y, false))
1114 std::swap (x, y);
1115
1116 /* Most of the time tree_swap_operands_p does what we want. But there
1117 are cases where we know one operand is better for copy propagation than
1118 the other. Given no other code cares about ordering of equality
1119 comparison operators for that purpose, we just handle the special cases
1120 here. */
1121 if (TREE_CODE (x) == SSA_NAME && TREE_CODE (y) == SSA_NAME)
1122 {
1123 /* If one operand is a single use operand, then make it
1124 X. This will preserve its single use properly and if this
1125 conditional is eliminated, the computation of X can be
1126 eliminated as well. */
1127 if (has_single_use (y) && ! has_single_use (x))
1128 std::swap (x, y);
1129 }
1130 if (TREE_CODE (x) == SSA_NAME)
1131 prev_x = SSA_NAME_VALUE (x);
1132 if (TREE_CODE (y) == SSA_NAME)
1133 prev_y = SSA_NAME_VALUE (y);
1134
1135 /* If one of the previous values is invariant, or invariant in more loops
1136 (by depth), then use that.
1137 Otherwise it doesn't matter which value we choose, just so
1138 long as we canonicalize on one value. */
1139 if (is_gimple_min_invariant (y))
1140 ;
1141 else if (is_gimple_min_invariant (x)
1142 /* ??? When threading over backedges the following is important
1143 for correctness. See PR61757. */
1144 || (loop_depth_of_name (x) < loop_depth_of_name (y)))
1145 prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1146 else if (prev_x && is_gimple_min_invariant (prev_x))
1147 x = y, y = prev_x, prev_x = prev_y;
1148 else if (prev_y)
1149 y = prev_y;
1150
1151 /* After the swapping, we must have one SSA_NAME. */
1152 if (TREE_CODE (x) != SSA_NAME)
1153 return;
1154
1155 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1156 variable compared against zero. If we're honoring signed zeros,
1157 then we cannot record this value unless we know that the value is
1158 nonzero. */
1159 if (HONOR_SIGNED_ZEROS (x)
1160 && (TREE_CODE (y) != REAL_CST
1161 || real_equal (&dconst0, &TREE_REAL_CST (y))))
1162 return;
1163
1164 const_and_copies->record_const_or_copy (x, y, prev_x);
1165 }
1166
1167 /* Returns true when STMT is a simple iv increment. It detects the
1168 following situation:
1169
1170 i_1 = phi (..., i_2)
1171 i_2 = i_1 +/- ... */
1172
1173 bool
1174 simple_iv_increment_p (gimple *stmt)
1175 {
1176 enum tree_code code;
1177 tree lhs, preinc;
1178 gimple *phi;
1179 size_t i;
1180
1181 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1182 return false;
1183
1184 lhs = gimple_assign_lhs (stmt);
1185 if (TREE_CODE (lhs) != SSA_NAME)
1186 return false;
1187
1188 code = gimple_assign_rhs_code (stmt);
1189 if (code != PLUS_EXPR
1190 && code != MINUS_EXPR
1191 && code != POINTER_PLUS_EXPR)
1192 return false;
1193
1194 preinc = gimple_assign_rhs1 (stmt);
1195 if (TREE_CODE (preinc) != SSA_NAME)
1196 return false;
1197
1198 phi = SSA_NAME_DEF_STMT (preinc);
1199 if (gimple_code (phi) != GIMPLE_PHI)
1200 return false;
1201
1202 for (i = 0; i < gimple_phi_num_args (phi); i++)
1203 if (gimple_phi_arg_def (phi, i) == lhs)
1204 return true;
1205
1206 return false;
1207 }
1208
1209 /* Propagate know values from SSA_NAME_VALUE into the PHI nodes of the
1210 successors of BB. */
1211
1212 static void
1213 cprop_into_successor_phis (basic_block bb,
1214 class const_and_copies *const_and_copies)
1215 {
1216 edge e;
1217 edge_iterator ei;
1218
1219 FOR_EACH_EDGE (e, ei, bb->succs)
1220 {
1221 int indx;
1222 gphi_iterator gsi;
1223
1224 /* If this is an abnormal edge, then we do not want to copy propagate
1225 into the PHI alternative associated with this edge. */
1226 if (e->flags & EDGE_ABNORMAL)
1227 continue;
1228
1229 gsi = gsi_start_phis (e->dest);
1230 if (gsi_end_p (gsi))
1231 continue;
1232
1233 /* We may have an equivalence associated with this edge. While
1234 we can not propagate it into non-dominated blocks, we can
1235 propagate them into PHIs in non-dominated blocks. */
1236
1237 /* Push the unwind marker so we can reset the const and copies
1238 table back to its original state after processing this edge. */
1239 const_and_copies->push_marker ();
1240
1241 /* Extract and record any simple NAME = VALUE equivalences.
1242
1243 Don't bother with [01] = COND equivalences, they're not useful
1244 here. */
1245 struct edge_info *edge_info = (struct edge_info *) e->aux;
1246 if (edge_info)
1247 {
1248 tree lhs = edge_info->lhs;
1249 tree rhs = edge_info->rhs;
1250
1251 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1252 const_and_copies->record_const_or_copy (lhs, rhs);
1253 }
1254
1255 indx = e->dest_idx;
1256 for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1257 {
1258 tree new_val;
1259 use_operand_p orig_p;
1260 tree orig_val;
1261 gphi *phi = gsi.phi ();
1262
1263 /* The alternative may be associated with a constant, so verify
1264 it is an SSA_NAME before doing anything with it. */
1265 orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1266 orig_val = get_use_from_ptr (orig_p);
1267 if (TREE_CODE (orig_val) != SSA_NAME)
1268 continue;
1269
1270 /* If we have *ORIG_P in our constant/copy table, then replace
1271 ORIG_P with its value in our constant/copy table. */
1272 new_val = SSA_NAME_VALUE (orig_val);
1273 if (new_val
1274 && new_val != orig_val
1275 && (TREE_CODE (new_val) == SSA_NAME
1276 || is_gimple_min_invariant (new_val))
1277 && may_propagate_copy (orig_val, new_val))
1278 propagate_value (orig_p, new_val);
1279 }
1280
1281 const_and_copies->pop_to_marker ();
1282 }
1283 }
1284
1285 void
1286 dom_opt_dom_walker::before_dom_children (basic_block bb)
1287 {
1288 gimple_stmt_iterator gsi;
1289
1290 if (dump_file && (dump_flags & TDF_DETAILS))
1291 fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1292
1293 /* Push a marker on the stacks of local information so that we know how
1294 far to unwind when we finalize this block. */
1295 m_avail_exprs_stack->push_marker ();
1296 m_const_and_copies->push_marker ();
1297
1298 record_equivalences_from_incoming_edge (bb, m_const_and_copies,
1299 m_avail_exprs_stack);
1300
1301 /* PHI nodes can create equivalences too. */
1302 record_equivalences_from_phis (bb);
1303
1304 /* Create equivalences from redundant PHIs. PHIs are only truly
1305 redundant when they exist in the same block, so push another
1306 marker and unwind right afterwards. */
1307 m_avail_exprs_stack->push_marker ();
1308 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1309 eliminate_redundant_computations (&gsi, m_const_and_copies,
1310 m_avail_exprs_stack);
1311 m_avail_exprs_stack->pop_to_marker ();
1312
1313 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1314 optimize_stmt (bb, gsi, m_const_and_copies, m_avail_exprs_stack);
1315
1316 /* Now prepare to process dominated blocks. */
1317 record_edge_info (bb);
1318 cprop_into_successor_phis (bb, m_const_and_copies);
1319 }
1320
1321 /* We have finished processing the dominator children of BB, perform
1322 any finalization actions in preparation for leaving this node in
1323 the dominator tree. */
1324
1325 void
1326 dom_opt_dom_walker::after_dom_children (basic_block bb)
1327 {
1328 gimple *last;
1329
1330 /* If we have an outgoing edge to a block with multiple incoming and
1331 outgoing edges, then we may be able to thread the edge, i.e., we
1332 may be able to statically determine which of the outgoing edges
1333 will be traversed when the incoming edge from BB is traversed. */
1334 if (single_succ_p (bb)
1335 && (single_succ_edge (bb)->flags & EDGE_ABNORMAL) == 0
1336 && potentially_threadable_block (single_succ (bb)))
1337 {
1338 thread_across_edge (single_succ_edge (bb));
1339 }
1340 else if ((last = last_stmt (bb))
1341 && gimple_code (last) == GIMPLE_COND
1342 && EDGE_COUNT (bb->succs) == 2
1343 && (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0
1344 && (EDGE_SUCC (bb, 1)->flags & EDGE_ABNORMAL) == 0)
1345 {
1346 edge true_edge, false_edge;
1347
1348 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1349
1350 /* Only try to thread the edge if it reaches a target block with
1351 more than one predecessor and more than one successor. */
1352 if (potentially_threadable_block (true_edge->dest))
1353 thread_across_edge (true_edge);
1354
1355 /* Similarly for the ELSE arm. */
1356 if (potentially_threadable_block (false_edge->dest))
1357 thread_across_edge (false_edge);
1358
1359 }
1360
1361 /* These remove expressions local to BB from the tables. */
1362 m_avail_exprs_stack->pop_to_marker ();
1363 m_const_and_copies->pop_to_marker ();
1364 }
1365
1366 /* Search for redundant computations in STMT. If any are found, then
1367 replace them with the variable holding the result of the computation.
1368
1369 If safe, record this expression into AVAIL_EXPRS_STACK and
1370 CONST_AND_COPIES. */
1371
1372 static void
1373 eliminate_redundant_computations (gimple_stmt_iterator* gsi,
1374 class const_and_copies *const_and_copies,
1375 class avail_exprs_stack *avail_exprs_stack)
1376 {
1377 tree expr_type;
1378 tree cached_lhs;
1379 tree def;
1380 bool insert = true;
1381 bool assigns_var_p = false;
1382
1383 gimple *stmt = gsi_stmt (*gsi);
1384
1385 if (gimple_code (stmt) == GIMPLE_PHI)
1386 def = gimple_phi_result (stmt);
1387 else
1388 def = gimple_get_lhs (stmt);
1389
1390 /* Certain expressions on the RHS can be optimized away, but can not
1391 themselves be entered into the hash tables. */
1392 if (! def
1393 || TREE_CODE (def) != SSA_NAME
1394 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
1395 || gimple_vdef (stmt)
1396 /* Do not record equivalences for increments of ivs. This would create
1397 overlapping live ranges for a very questionable gain. */
1398 || simple_iv_increment_p (stmt))
1399 insert = false;
1400
1401 /* Check if the expression has been computed before. */
1402 cached_lhs = lookup_avail_expr (stmt, insert, avail_exprs_stack);
1403
1404 opt_stats.num_exprs_considered++;
1405
1406 /* Get the type of the expression we are trying to optimize. */
1407 if (is_gimple_assign (stmt))
1408 {
1409 expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
1410 assigns_var_p = true;
1411 }
1412 else if (gimple_code (stmt) == GIMPLE_COND)
1413 expr_type = boolean_type_node;
1414 else if (is_gimple_call (stmt))
1415 {
1416 gcc_assert (gimple_call_lhs (stmt));
1417 expr_type = TREE_TYPE (gimple_call_lhs (stmt));
1418 assigns_var_p = true;
1419 }
1420 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
1421 expr_type = TREE_TYPE (gimple_switch_index (swtch_stmt));
1422 else if (gimple_code (stmt) == GIMPLE_PHI)
1423 /* We can't propagate into a phi, so the logic below doesn't apply.
1424 Instead record an equivalence between the cached LHS and the
1425 PHI result of this statement, provided they are in the same block.
1426 This should be sufficient to kill the redundant phi. */
1427 {
1428 if (def && cached_lhs)
1429 const_and_copies->record_const_or_copy (def, cached_lhs);
1430 return;
1431 }
1432 else
1433 gcc_unreachable ();
1434
1435 if (!cached_lhs)
1436 return;
1437
1438 /* It is safe to ignore types here since we have already done
1439 type checking in the hashing and equality routines. In fact
1440 type checking here merely gets in the way of constant
1441 propagation. Also, make sure that it is safe to propagate
1442 CACHED_LHS into the expression in STMT. */
1443 if ((TREE_CODE (cached_lhs) != SSA_NAME
1444 && (assigns_var_p
1445 || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
1446 || may_propagate_copy_into_stmt (stmt, cached_lhs))
1447 {
1448 gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
1449 || is_gimple_min_invariant (cached_lhs));
1450
1451 if (dump_file && (dump_flags & TDF_DETAILS))
1452 {
1453 fprintf (dump_file, " Replaced redundant expr '");
1454 print_gimple_expr (dump_file, stmt, 0, dump_flags);
1455 fprintf (dump_file, "' with '");
1456 print_generic_expr (dump_file, cached_lhs, dump_flags);
1457 fprintf (dump_file, "'\n");
1458 }
1459
1460 opt_stats.num_re++;
1461
1462 if (assigns_var_p
1463 && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
1464 cached_lhs = fold_convert (expr_type, cached_lhs);
1465
1466 propagate_tree_value_into_stmt (gsi, cached_lhs);
1467
1468 /* Since it is always necessary to mark the result as modified,
1469 perhaps we should move this into propagate_tree_value_into_stmt
1470 itself. */
1471 gimple_set_modified (gsi_stmt (*gsi), true);
1472 }
1473 }
1474
1475 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
1476 the available expressions table or the const_and_copies table.
1477 Detect and record those equivalences into AVAIL_EXPRS_STACK.
1478
1479 We handle only very simple copy equivalences here. The heavy
1480 lifing is done by eliminate_redundant_computations. */
1481
1482 static void
1483 record_equivalences_from_stmt (gimple *stmt, int may_optimize_p,
1484 class avail_exprs_stack *avail_exprs_stack)
1485 {
1486 tree lhs;
1487 enum tree_code lhs_code;
1488
1489 gcc_assert (is_gimple_assign (stmt));
1490
1491 lhs = gimple_assign_lhs (stmt);
1492 lhs_code = TREE_CODE (lhs);
1493
1494 if (lhs_code == SSA_NAME
1495 && gimple_assign_single_p (stmt))
1496 {
1497 tree rhs = gimple_assign_rhs1 (stmt);
1498
1499 /* If the RHS of the assignment is a constant or another variable that
1500 may be propagated, register it in the CONST_AND_COPIES table. We
1501 do not need to record unwind data for this, since this is a true
1502 assignment and not an equivalence inferred from a comparison. All
1503 uses of this ssa name are dominated by this assignment, so unwinding
1504 just costs time and space. */
1505 if (may_optimize_p
1506 && (TREE_CODE (rhs) == SSA_NAME
1507 || is_gimple_min_invariant (rhs)))
1508 {
1509 rhs = dom_valueize (rhs);
1510
1511 if (dump_file && (dump_flags & TDF_DETAILS))
1512 {
1513 fprintf (dump_file, "==== ASGN ");
1514 print_generic_expr (dump_file, lhs, 0);
1515 fprintf (dump_file, " = ");
1516 print_generic_expr (dump_file, rhs, 0);
1517 fprintf (dump_file, "\n");
1518 }
1519
1520 set_ssa_name_value (lhs, rhs);
1521 }
1522 }
1523
1524 /* Make sure we can propagate &x + CST. */
1525 if (lhs_code == SSA_NAME
1526 && gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR
1527 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ADDR_EXPR
1528 && TREE_CODE (gimple_assign_rhs2 (stmt)) == INTEGER_CST)
1529 {
1530 tree op0 = gimple_assign_rhs1 (stmt);
1531 tree op1 = gimple_assign_rhs2 (stmt);
1532 tree new_rhs
1533 = build_fold_addr_expr (fold_build2 (MEM_REF,
1534 TREE_TYPE (TREE_TYPE (op0)),
1535 unshare_expr (op0),
1536 fold_convert (ptr_type_node,
1537 op1)));
1538 if (dump_file && (dump_flags & TDF_DETAILS))
1539 {
1540 fprintf (dump_file, "==== ASGN ");
1541 print_generic_expr (dump_file, lhs, 0);
1542 fprintf (dump_file, " = ");
1543 print_generic_expr (dump_file, new_rhs, 0);
1544 fprintf (dump_file, "\n");
1545 }
1546
1547 set_ssa_name_value (lhs, new_rhs);
1548 }
1549
1550 /* A memory store, even an aliased store, creates a useful
1551 equivalence. By exchanging the LHS and RHS, creating suitable
1552 vops and recording the result in the available expression table,
1553 we may be able to expose more redundant loads. */
1554 if (!gimple_has_volatile_ops (stmt)
1555 && gimple_references_memory_p (stmt)
1556 && gimple_assign_single_p (stmt)
1557 && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
1558 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
1559 && !is_gimple_reg (lhs))
1560 {
1561 tree rhs = gimple_assign_rhs1 (stmt);
1562 gassign *new_stmt;
1563
1564 /* Build a new statement with the RHS and LHS exchanged. */
1565 if (TREE_CODE (rhs) == SSA_NAME)
1566 {
1567 /* NOTE tuples. The call to gimple_build_assign below replaced
1568 a call to build_gimple_modify_stmt, which did not set the
1569 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
1570 may cause an SSA validation failure, as the LHS may be a
1571 default-initialized name and should have no definition. I'm
1572 a bit dubious of this, as the artificial statement that we
1573 generate here may in fact be ill-formed, but it is simply
1574 used as an internal device in this pass, and never becomes
1575 part of the CFG. */
1576 gimple *defstmt = SSA_NAME_DEF_STMT (rhs);
1577 new_stmt = gimple_build_assign (rhs, lhs);
1578 SSA_NAME_DEF_STMT (rhs) = defstmt;
1579 }
1580 else
1581 new_stmt = gimple_build_assign (rhs, lhs);
1582
1583 gimple_set_vuse (new_stmt, gimple_vdef (stmt));
1584
1585 /* Finally enter the statement into the available expression
1586 table. */
1587 lookup_avail_expr (new_stmt, true, avail_exprs_stack);
1588 }
1589 }
1590
1591 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
1592 CONST_AND_COPIES. */
1593
1594 static void
1595 cprop_operand (gimple *stmt, use_operand_p op_p)
1596 {
1597 tree val;
1598 tree op = USE_FROM_PTR (op_p);
1599
1600 /* If the operand has a known constant value or it is known to be a
1601 copy of some other variable, use the value or copy stored in
1602 CONST_AND_COPIES. */
1603 val = SSA_NAME_VALUE (op);
1604 if (val && val != op)
1605 {
1606 /* Do not replace hard register operands in asm statements. */
1607 if (gimple_code (stmt) == GIMPLE_ASM
1608 && !may_propagate_copy_into_asm (op))
1609 return;
1610
1611 /* Certain operands are not allowed to be copy propagated due
1612 to their interaction with exception handling and some GCC
1613 extensions. */
1614 if (!may_propagate_copy (op, val))
1615 return;
1616
1617 /* Do not propagate copies into BIVs.
1618 See PR23821 and PR62217 for how this can disturb IV and
1619 number of iteration analysis. */
1620 if (TREE_CODE (val) != INTEGER_CST)
1621 {
1622 gimple *def = SSA_NAME_DEF_STMT (op);
1623 if (gimple_code (def) == GIMPLE_PHI
1624 && gimple_bb (def)->loop_father->header == gimple_bb (def))
1625 return;
1626 }
1627
1628 /* Dump details. */
1629 if (dump_file && (dump_flags & TDF_DETAILS))
1630 {
1631 fprintf (dump_file, " Replaced '");
1632 print_generic_expr (dump_file, op, dump_flags);
1633 fprintf (dump_file, "' with %s '",
1634 (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
1635 print_generic_expr (dump_file, val, dump_flags);
1636 fprintf (dump_file, "'\n");
1637 }
1638
1639 if (TREE_CODE (val) != SSA_NAME)
1640 opt_stats.num_const_prop++;
1641 else
1642 opt_stats.num_copy_prop++;
1643
1644 propagate_value (op_p, val);
1645
1646 /* And note that we modified this statement. This is now
1647 safe, even if we changed virtual operands since we will
1648 rescan the statement and rewrite its operands again. */
1649 gimple_set_modified (stmt, true);
1650 }
1651 }
1652
1653 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1654 known value for that SSA_NAME (or NULL if no value is known).
1655
1656 Propagate values from CONST_AND_COPIES into the uses, vuses and
1657 vdef_ops of STMT. */
1658
1659 static void
1660 cprop_into_stmt (gimple *stmt)
1661 {
1662 use_operand_p op_p;
1663 ssa_op_iter iter;
1664
1665 FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE)
1666 cprop_operand (stmt, op_p);
1667 }
1668
1669 /* Optimize the statement in block BB pointed to by iterator SI
1670 using equivalences from CONST_AND_COPIES and AVAIL_EXPRS_STACK.
1671
1672 We try to perform some simplistic global redundancy elimination and
1673 constant propagation:
1674
1675 1- To detect global redundancy, we keep track of expressions that have
1676 been computed in this block and its dominators. If we find that the
1677 same expression is computed more than once, we eliminate repeated
1678 computations by using the target of the first one.
1679
1680 2- Constant values and copy assignments. This is used to do very
1681 simplistic constant and copy propagation. When a constant or copy
1682 assignment is found, we map the value on the RHS of the assignment to
1683 the variable in the LHS in the CONST_AND_COPIES table. */
1684
1685 static void
1686 optimize_stmt (basic_block bb, gimple_stmt_iterator si,
1687 class const_and_copies *const_and_copies,
1688 class avail_exprs_stack *avail_exprs_stack)
1689 {
1690 gimple *stmt, *old_stmt;
1691 bool may_optimize_p;
1692 bool modified_p = false;
1693 bool was_noreturn;
1694
1695 old_stmt = stmt = gsi_stmt (si);
1696 was_noreturn = is_gimple_call (stmt) && gimple_call_noreturn_p (stmt);
1697
1698 if (dump_file && (dump_flags & TDF_DETAILS))
1699 {
1700 fprintf (dump_file, "Optimizing statement ");
1701 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1702 }
1703
1704 if (gimple_code (stmt) == GIMPLE_COND)
1705 canonicalize_comparison (as_a <gcond *> (stmt));
1706
1707 update_stmt_if_modified (stmt);
1708 opt_stats.num_stmts++;
1709
1710 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
1711 cprop_into_stmt (stmt);
1712
1713 /* If the statement has been modified with constant replacements,
1714 fold its RHS before checking for redundant computations. */
1715 if (gimple_modified_p (stmt))
1716 {
1717 tree rhs = NULL;
1718
1719 /* Try to fold the statement making sure that STMT is kept
1720 up to date. */
1721 if (fold_stmt (&si))
1722 {
1723 stmt = gsi_stmt (si);
1724 gimple_set_modified (stmt, true);
1725
1726 if (dump_file && (dump_flags & TDF_DETAILS))
1727 {
1728 fprintf (dump_file, " Folded to: ");
1729 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1730 }
1731 }
1732
1733 /* We only need to consider cases that can yield a gimple operand. */
1734 if (gimple_assign_single_p (stmt))
1735 rhs = gimple_assign_rhs1 (stmt);
1736 else if (gimple_code (stmt) == GIMPLE_GOTO)
1737 rhs = gimple_goto_dest (stmt);
1738 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
1739 /* This should never be an ADDR_EXPR. */
1740 rhs = gimple_switch_index (swtch_stmt);
1741
1742 if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
1743 recompute_tree_invariant_for_addr_expr (rhs);
1744
1745 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
1746 even if fold_stmt updated the stmt already and thus cleared
1747 gimple_modified_p flag on it. */
1748 modified_p = true;
1749 }
1750
1751 /* Check for redundant computations. Do this optimization only
1752 for assignments that have no volatile ops and conditionals. */
1753 may_optimize_p = (!gimple_has_side_effects (stmt)
1754 && (is_gimple_assign (stmt)
1755 || (is_gimple_call (stmt)
1756 && gimple_call_lhs (stmt) != NULL_TREE)
1757 || gimple_code (stmt) == GIMPLE_COND
1758 || gimple_code (stmt) == GIMPLE_SWITCH));
1759
1760 if (may_optimize_p)
1761 {
1762 if (gimple_code (stmt) == GIMPLE_CALL)
1763 {
1764 /* Resolve __builtin_constant_p. If it hasn't been
1765 folded to integer_one_node by now, it's fairly
1766 certain that the value simply isn't constant. */
1767 tree callee = gimple_call_fndecl (stmt);
1768 if (callee
1769 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
1770 && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
1771 {
1772 propagate_tree_value_into_stmt (&si, integer_zero_node);
1773 stmt = gsi_stmt (si);
1774 }
1775 }
1776
1777 update_stmt_if_modified (stmt);
1778 eliminate_redundant_computations (&si, const_and_copies,
1779 avail_exprs_stack);
1780 stmt = gsi_stmt (si);
1781
1782 /* Perform simple redundant store elimination. */
1783 if (gimple_assign_single_p (stmt)
1784 && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1785 {
1786 tree lhs = gimple_assign_lhs (stmt);
1787 tree rhs = gimple_assign_rhs1 (stmt);
1788 tree cached_lhs;
1789 gassign *new_stmt;
1790 rhs = dom_valueize (rhs);
1791 /* Build a new statement with the RHS and LHS exchanged. */
1792 if (TREE_CODE (rhs) == SSA_NAME)
1793 {
1794 gimple *defstmt = SSA_NAME_DEF_STMT (rhs);
1795 new_stmt = gimple_build_assign (rhs, lhs);
1796 SSA_NAME_DEF_STMT (rhs) = defstmt;
1797 }
1798 else
1799 new_stmt = gimple_build_assign (rhs, lhs);
1800 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
1801 cached_lhs = lookup_avail_expr (new_stmt, false, avail_exprs_stack);
1802 if (cached_lhs
1803 && rhs == cached_lhs)
1804 {
1805 basic_block bb = gimple_bb (stmt);
1806 unlink_stmt_vdef (stmt);
1807 if (gsi_remove (&si, true))
1808 {
1809 bitmap_set_bit (need_eh_cleanup, bb->index);
1810 if (dump_file && (dump_flags & TDF_DETAILS))
1811 fprintf (dump_file, " Flagged to clear EH edges.\n");
1812 }
1813 release_defs (stmt);
1814 return;
1815 }
1816 }
1817 }
1818
1819 /* Record any additional equivalences created by this statement. */
1820 if (is_gimple_assign (stmt))
1821 record_equivalences_from_stmt (stmt, may_optimize_p, avail_exprs_stack);
1822
1823 /* If STMT is a COND_EXPR or SWITCH_EXPR and it was modified, then we may
1824 know where it goes. */
1825 if (gimple_modified_p (stmt) || modified_p)
1826 {
1827 tree val = NULL;
1828
1829 update_stmt_if_modified (stmt);
1830
1831 if (gimple_code (stmt) == GIMPLE_COND)
1832 val = fold_binary_loc (gimple_location (stmt),
1833 gimple_cond_code (stmt), boolean_type_node,
1834 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
1835 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
1836 val = gimple_switch_index (swtch_stmt);
1837
1838 if (val && TREE_CODE (val) == INTEGER_CST)
1839 {
1840 edge taken_edge = find_taken_edge (bb, val);
1841 if (taken_edge)
1842 {
1843
1844 /* We need to remove any queued jump threads that
1845 reference outgoing edges from this block. */
1846 edge_iterator ei;
1847 edge e;
1848 FOR_EACH_EDGE (e, ei, bb->succs)
1849 remove_jump_threads_including (e);
1850
1851 /* Now clean up the control statement at the end of
1852 BB and remove unexecutable edges. */
1853 remove_ctrl_stmt_and_useless_edges (bb, taken_edge->dest);
1854
1855 /* Fixup the flags on the single remaining edge. */
1856 taken_edge->flags
1857 &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE | EDGE_ABNORMAL);
1858 taken_edge->flags |= EDGE_FALLTHRU;
1859
1860 /* Further simplifications may be possible. */
1861 cfg_altered = true;
1862 }
1863 }
1864
1865 /* If we simplified a statement in such a way as to be shown that it
1866 cannot trap, update the eh information and the cfg to match. */
1867 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
1868 {
1869 bitmap_set_bit (need_eh_cleanup, bb->index);
1870 if (dump_file && (dump_flags & TDF_DETAILS))
1871 fprintf (dump_file, " Flagged to clear EH edges.\n");
1872 }
1873
1874 if (!was_noreturn
1875 && is_gimple_call (stmt) && gimple_call_noreturn_p (stmt))
1876 need_noreturn_fixup.safe_push (stmt);
1877 }
1878 }
1879
1880 /* Helper for walk_non_aliased_vuses. Determine if we arrived at
1881 the desired memory state. */
1882
1883 static void *
1884 vuse_eq (ao_ref *, tree vuse1, unsigned int cnt, void *data)
1885 {
1886 tree vuse2 = (tree) data;
1887 if (vuse1 == vuse2)
1888 return data;
1889
1890 /* This bounds the stmt walks we perform on reference lookups
1891 to O(1) instead of O(N) where N is the number of dominating
1892 stores leading to a candidate. We re-use the SCCVN param
1893 for this as it is basically the same complexity. */
1894 if (cnt > (unsigned) PARAM_VALUE (PARAM_SCCVN_MAX_ALIAS_QUERIES_PER_ACCESS))
1895 return (void *)-1;
1896
1897 return NULL;
1898 }
1899
1900 /* Search for an existing instance of STMT in the AVAIL_EXPRS_STACK table.
1901 If found, return its LHS. Otherwise insert STMT in the table and
1902 return NULL_TREE.
1903
1904 Also, when an expression is first inserted in the table, it is also
1905 is also added to AVAIL_EXPRS_STACK, so that it can be removed when
1906 we finish processing this block and its children. */
1907
1908 static tree
1909 lookup_avail_expr (gimple *stmt, bool insert,
1910 class avail_exprs_stack *avail_exprs_stack)
1911 {
1912 expr_hash_elt **slot;
1913 tree lhs;
1914
1915 /* Get LHS of phi, assignment, or call; else NULL_TREE. */
1916 if (gimple_code (stmt) == GIMPLE_PHI)
1917 lhs = gimple_phi_result (stmt);
1918 else
1919 lhs = gimple_get_lhs (stmt);
1920
1921 class expr_hash_elt element (stmt, lhs);
1922
1923 if (dump_file && (dump_flags & TDF_DETAILS))
1924 {
1925 fprintf (dump_file, "LKUP ");
1926 element.print (dump_file);
1927 }
1928
1929 /* Don't bother remembering constant assignments and copy operations.
1930 Constants and copy operations are handled by the constant/copy propagator
1931 in optimize_stmt. */
1932 if (element.expr()->kind == EXPR_SINGLE
1933 && (TREE_CODE (element.expr()->ops.single.rhs) == SSA_NAME
1934 || is_gimple_min_invariant (element.expr()->ops.single.rhs)))
1935 return NULL_TREE;
1936
1937 /* Finally try to find the expression in the main expression hash table. */
1938 hash_table<expr_elt_hasher> *avail_exprs = avail_exprs_stack->avail_exprs ();
1939 slot = avail_exprs->find_slot (&element, (insert ? INSERT : NO_INSERT));
1940 if (slot == NULL)
1941 {
1942 return NULL_TREE;
1943 }
1944 else if (*slot == NULL)
1945 {
1946 class expr_hash_elt *element2 = new expr_hash_elt (element);
1947 *slot = element2;
1948
1949 avail_exprs_stack->record_expr (element2, NULL, '2');
1950 return NULL_TREE;
1951 }
1952
1953 /* If we found a redundant memory operation do an alias walk to
1954 check if we can re-use it. */
1955 if (gimple_vuse (stmt) != (*slot)->vop ())
1956 {
1957 tree vuse1 = (*slot)->vop ();
1958 tree vuse2 = gimple_vuse (stmt);
1959 /* If we have a load of a register and a candidate in the
1960 hash with vuse1 then try to reach its stmt by walking
1961 up the virtual use-def chain using walk_non_aliased_vuses.
1962 But don't do this when removing expressions from the hash. */
1963 ao_ref ref;
1964 if (!(vuse1 && vuse2
1965 && gimple_assign_single_p (stmt)
1966 && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME
1967 && (ao_ref_init (&ref, gimple_assign_rhs1 (stmt)), true)
1968 && walk_non_aliased_vuses (&ref, vuse2,
1969 vuse_eq, NULL, NULL, vuse1) != NULL))
1970 {
1971 if (insert)
1972 {
1973 class expr_hash_elt *element2 = new expr_hash_elt (element);
1974
1975 /* Insert the expr into the hash by replacing the current
1976 entry and recording the value to restore in the
1977 avail_exprs_stack. */
1978 avail_exprs_stack->record_expr (element2, *slot, '2');
1979 *slot = element2;
1980 }
1981 return NULL_TREE;
1982 }
1983 }
1984
1985 /* Extract the LHS of the assignment so that it can be used as the current
1986 definition of another variable. */
1987 lhs = (*slot)->lhs ();
1988
1989 lhs = dom_valueize (lhs);
1990
1991 if (dump_file && (dump_flags & TDF_DETAILS))
1992 {
1993 fprintf (dump_file, "FIND: ");
1994 print_generic_expr (dump_file, lhs, 0);
1995 fprintf (dump_file, "\n");
1996 }
1997
1998 return lhs;
1999 }