1 /* Optimization of PHI nodes by converting them into straightline code.
2 Copyright (C) 2004-2020 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
24 #include "insn-codes.h"
29 #include "tree-pass.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "gimple-pretty-print.h"
34 #include "fold-const.h"
35 #include "stor-layout.h"
38 #include "gimple-iterator.h"
39 #include "gimplify-me.h"
44 #include "tree-data-ref.h"
45 #include "tree-scalar-evolution.h"
46 #include "tree-inline.h"
47 #include "case-cfn-macros.h"
49 #include "gimple-fold.h"
50 #include "internal-fn.h"
52 static unsigned int tree_ssa_phiopt_worker (bool, bool, bool);
53 static bool two_value_replacement (basic_block
, basic_block
, edge
, gphi
*,
55 static bool conditional_replacement (basic_block
, basic_block
,
56 edge
, edge
, gphi
*, tree
, tree
);
57 static gphi
*factor_out_conditional_conversion (edge
, edge
, gphi
*, tree
, tree
,
59 static int value_replacement (basic_block
, basic_block
,
60 edge
, edge
, gimple
*, tree
, tree
);
61 static bool minmax_replacement (basic_block
, basic_block
,
62 edge
, edge
, gimple
*, tree
, tree
);
63 static bool abs_replacement (basic_block
, basic_block
,
64 edge
, edge
, gimple
*, tree
, tree
);
65 static bool cond_removal_in_popcount_clz_ctz_pattern (basic_block
, basic_block
,
68 static bool cond_store_replacement (basic_block
, basic_block
, edge
, edge
,
70 static bool cond_if_else_store_replacement (basic_block
, basic_block
, basic_block
);
71 static hash_set
<tree
> * get_non_trapping ();
72 static void replace_phi_edge_with_variable (basic_block
, edge
, gimple
*, tree
);
73 static void hoist_adjacent_loads (basic_block
, basic_block
,
74 basic_block
, basic_block
);
75 static bool gate_hoist_loads (void);
77 /* This pass tries to transform conditional stores into unconditional
78 ones, enabling further simplifications with the simpler then and else
79 blocks. In particular it replaces this:
82 if (cond) goto bb2; else goto bb1;
90 if (cond) goto bb1; else goto bb2;
94 condtmp = PHI <RHS, condtmp'>
97 This transformation can only be done under several constraints,
98 documented below. It also replaces:
101 if (cond) goto bb2; else goto bb1;
112 if (cond) goto bb3; else goto bb1;
115 condtmp = PHI <RHS1, RHS2>
119 tree_ssa_cs_elim (void)
122 /* ??? We are not interested in loop related info, but the following
123 will create it, ICEing as we didn't init loops with pre-headers.
124 An interfacing issue of find_data_references_in_bb. */
125 loop_optimizer_init (LOOPS_NORMAL
);
127 todo
= tree_ssa_phiopt_worker (true, false, false);
129 loop_optimizer_finalize ();
133 /* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
136 single_non_singleton_phi_for_edges (gimple_seq seq
, edge e0
, edge e1
)
138 gimple_stmt_iterator i
;
140 if (gimple_seq_singleton_p (seq
))
141 return as_a
<gphi
*> (gsi_stmt (gsi_start (seq
)));
142 for (i
= gsi_start (seq
); !gsi_end_p (i
); gsi_next (&i
))
144 gphi
*p
= as_a
<gphi
*> (gsi_stmt (i
));
145 /* If the PHI arguments are equal then we can skip this PHI. */
146 if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p
, e0
->dest_idx
),
147 gimple_phi_arg_def (p
, e1
->dest_idx
)))
150 /* If we already have a PHI that has the two edge arguments are
151 different, then return it is not a singleton for these PHIs. */
160 /* The core routine of conditional store replacement and normal
161 phi optimizations. Both share much of the infrastructure in how
162 to match applicable basic block patterns. DO_STORE_ELIM is true
163 when we want to do conditional store replacement, false otherwise.
164 DO_HOIST_LOADS is true when we want to hoist adjacent loads out
165 of diamond control flow patterns, false otherwise. */
167 tree_ssa_phiopt_worker (bool do_store_elim
, bool do_hoist_loads
, bool early_p
)
170 basic_block
*bb_order
;
172 bool cfgchanged
= false;
173 hash_set
<tree
> *nontrap
= 0;
176 /* Calculate the set of non-trapping memory accesses. */
177 nontrap
= get_non_trapping ();
179 /* Search every basic block for COND_EXPR we may be able to optimize.
181 We walk the blocks in order that guarantees that a block with
182 a single predecessor is processed before the predecessor.
183 This ensures that we collapse inner ifs before visiting the
184 outer ones, and also that we do not try to visit a removed
186 bb_order
= single_pred_before_succ_order ();
187 n
= n_basic_blocks_for_fn (cfun
) - NUM_FIXED_BLOCKS
;
189 for (i
= 0; i
< n
; i
++)
193 basic_block bb1
, bb2
;
199 cond_stmt
= last_stmt (bb
);
200 /* Check to see if the last statement is a GIMPLE_COND. */
202 || gimple_code (cond_stmt
) != GIMPLE_COND
)
205 e1
= EDGE_SUCC (bb
, 0);
207 e2
= EDGE_SUCC (bb
, 1);
210 /* We cannot do the optimization on abnormal edges. */
211 if ((e1
->flags
& EDGE_ABNORMAL
) != 0
212 || (e2
->flags
& EDGE_ABNORMAL
) != 0)
215 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
216 if (EDGE_COUNT (bb1
->succs
) == 0
218 || EDGE_COUNT (bb2
->succs
) == 0)
221 /* Find the bb which is the fall through to the other. */
222 if (EDGE_SUCC (bb1
, 0)->dest
== bb2
)
224 else if (EDGE_SUCC (bb2
, 0)->dest
== bb1
)
226 std::swap (bb1
, bb2
);
229 else if (do_store_elim
230 && EDGE_SUCC (bb1
, 0)->dest
== EDGE_SUCC (bb2
, 0)->dest
)
232 basic_block bb3
= EDGE_SUCC (bb1
, 0)->dest
;
234 if (!single_succ_p (bb1
)
235 || (EDGE_SUCC (bb1
, 0)->flags
& EDGE_FALLTHRU
) == 0
236 || !single_succ_p (bb2
)
237 || (EDGE_SUCC (bb2
, 0)->flags
& EDGE_FALLTHRU
) == 0
238 || EDGE_COUNT (bb3
->preds
) != 2)
240 if (cond_if_else_store_replacement (bb1
, bb2
, bb3
))
244 else if (do_hoist_loads
245 && EDGE_SUCC (bb1
, 0)->dest
== EDGE_SUCC (bb2
, 0)->dest
)
247 basic_block bb3
= EDGE_SUCC (bb1
, 0)->dest
;
249 if (!FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt
)))
250 && single_succ_p (bb1
)
251 && single_succ_p (bb2
)
252 && single_pred_p (bb1
)
253 && single_pred_p (bb2
)
254 && EDGE_COUNT (bb
->succs
) == 2
255 && EDGE_COUNT (bb3
->preds
) == 2
256 /* If one edge or the other is dominant, a conditional move
257 is likely to perform worse than the well-predicted branch. */
258 && !predictable_edge_p (EDGE_SUCC (bb
, 0))
259 && !predictable_edge_p (EDGE_SUCC (bb
, 1)))
260 hoist_adjacent_loads (bb
, bb1
, bb2
, bb3
);
266 e1
= EDGE_SUCC (bb1
, 0);
268 /* Make sure that bb1 is just a fall through. */
269 if (!single_succ_p (bb1
)
270 || (e1
->flags
& EDGE_FALLTHRU
) == 0)
273 /* Also make sure that bb1 only have one predecessor and that it
275 if (!single_pred_p (bb1
)
276 || single_pred (bb1
) != bb
)
281 /* bb1 is the middle block, bb2 the join block, bb the split block,
282 e1 the fallthrough edge from bb1 to bb2. We can't do the
283 optimization if the join block has more than two predecessors. */
284 if (EDGE_COUNT (bb2
->preds
) > 2)
286 if (cond_store_replacement (bb1
, bb2
, e1
, e2
, nontrap
))
291 gimple_seq phis
= phi_nodes (bb2
);
292 gimple_stmt_iterator gsi
;
293 bool candorest
= true;
295 /* Value replacement can work with more than one PHI
296 so try that first. */
298 for (gsi
= gsi_start (phis
); !gsi_end_p (gsi
); gsi_next (&gsi
))
300 phi
= as_a
<gphi
*> (gsi_stmt (gsi
));
301 arg0
= gimple_phi_arg_def (phi
, e1
->dest_idx
);
302 arg1
= gimple_phi_arg_def (phi
, e2
->dest_idx
);
303 if (value_replacement (bb
, bb1
, e1
, e2
, phi
, arg0
, arg1
) == 2)
314 phi
= single_non_singleton_phi_for_edges (phis
, e1
, e2
);
318 arg0
= gimple_phi_arg_def (phi
, e1
->dest_idx
);
319 arg1
= gimple_phi_arg_def (phi
, e2
->dest_idx
);
321 /* Something is wrong if we cannot find the arguments in the PHI
323 gcc_assert (arg0
!= NULL_TREE
&& arg1
!= NULL_TREE
);
325 gphi
*newphi
= factor_out_conditional_conversion (e1
, e2
, phi
,
331 /* factor_out_conditional_conversion may create a new PHI in
332 BB2 and eliminate an existing PHI in BB2. Recompute values
333 that may be affected by that change. */
334 arg0
= gimple_phi_arg_def (phi
, e1
->dest_idx
);
335 arg1
= gimple_phi_arg_def (phi
, e2
->dest_idx
);
336 gcc_assert (arg0
!= NULL_TREE
&& arg1
!= NULL_TREE
);
339 /* Do the replacement of conditional if it can be done. */
340 if (!early_p
&& two_value_replacement (bb
, bb1
, e2
, phi
, arg0
, arg1
))
343 && conditional_replacement (bb
, bb1
, e1
, e2
, phi
,
346 else if (abs_replacement (bb
, bb1
, e1
, e2
, phi
, arg0
, arg1
))
349 && cond_removal_in_popcount_clz_ctz_pattern (bb
, bb1
, e1
,
353 else if (minmax_replacement (bb
, bb1
, e1
, e2
, phi
, arg0
, arg1
))
362 /* If the CFG has changed, we should cleanup the CFG. */
363 if (cfgchanged
&& do_store_elim
)
365 /* In cond-store replacement we have added some loads on edges
366 and new VOPS (as we moved the store, and created a load). */
367 gsi_commit_edge_inserts ();
368 return TODO_cleanup_cfg
| TODO_update_ssa_only_virtuals
;
371 return TODO_cleanup_cfg
;
375 /* Replace PHI node element whose edge is E in block BB with variable NEW.
376 Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
377 is known to have two edges, one of which must reach BB). */
380 replace_phi_edge_with_variable (basic_block cond_block
,
381 edge e
, gimple
*phi
, tree new_tree
)
383 basic_block bb
= gimple_bb (phi
);
384 basic_block block_to_remove
;
385 gimple_stmt_iterator gsi
;
387 /* Change the PHI argument to new. */
388 SET_USE (PHI_ARG_DEF_PTR (phi
, e
->dest_idx
), new_tree
);
390 /* Remove the empty basic block. */
391 if (EDGE_SUCC (cond_block
, 0)->dest
== bb
)
393 EDGE_SUCC (cond_block
, 0)->flags
|= EDGE_FALLTHRU
;
394 EDGE_SUCC (cond_block
, 0)->flags
&= ~(EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
);
395 EDGE_SUCC (cond_block
, 0)->probability
= profile_probability::always ();
397 block_to_remove
= EDGE_SUCC (cond_block
, 1)->dest
;
401 EDGE_SUCC (cond_block
, 1)->flags
|= EDGE_FALLTHRU
;
402 EDGE_SUCC (cond_block
, 1)->flags
403 &= ~(EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
);
404 EDGE_SUCC (cond_block
, 1)->probability
= profile_probability::always ();
406 block_to_remove
= EDGE_SUCC (cond_block
, 0)->dest
;
408 delete_basic_block (block_to_remove
);
410 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
411 gsi
= gsi_last_bb (cond_block
);
412 gsi_remove (&gsi
, true);
414 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
416 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
421 /* PR66726: Factor conversion out of COND_EXPR. If the arguments of the PHI
422 stmt are CONVERT_STMT, factor out the conversion and perform the conversion
423 to the result of PHI stmt. COND_STMT is the controlling predicate.
424 Return the newly-created PHI, if any. */
427 factor_out_conditional_conversion (edge e0
, edge e1
, gphi
*phi
,
428 tree arg0
, tree arg1
, gimple
*cond_stmt
)
430 gimple
*arg0_def_stmt
= NULL
, *arg1_def_stmt
= NULL
, *new_stmt
;
431 tree new_arg0
= NULL_TREE
, new_arg1
= NULL_TREE
;
434 gimple_stmt_iterator gsi
, gsi_for_def
;
435 location_t locus
= gimple_location (phi
);
436 enum tree_code convert_code
;
438 /* Handle only PHI statements with two arguments. TODO: If all
439 other arguments to PHI are INTEGER_CST or if their defining
440 statement have the same unary operation, we can handle more
441 than two arguments too. */
442 if (gimple_phi_num_args (phi
) != 2)
445 /* First canonicalize to simplify tests. */
446 if (TREE_CODE (arg0
) != SSA_NAME
)
448 std::swap (arg0
, arg1
);
452 if (TREE_CODE (arg0
) != SSA_NAME
453 || (TREE_CODE (arg1
) != SSA_NAME
454 && TREE_CODE (arg1
) != INTEGER_CST
))
457 /* Check if arg0 is an SSA_NAME and the stmt which defines arg0 is
459 arg0_def_stmt
= SSA_NAME_DEF_STMT (arg0
);
460 if (!gimple_assign_cast_p (arg0_def_stmt
))
463 /* Use the RHS as new_arg0. */
464 convert_code
= gimple_assign_rhs_code (arg0_def_stmt
);
465 new_arg0
= gimple_assign_rhs1 (arg0_def_stmt
);
466 if (convert_code
== VIEW_CONVERT_EXPR
)
468 new_arg0
= TREE_OPERAND (new_arg0
, 0);
469 if (!is_gimple_reg_type (TREE_TYPE (new_arg0
)))
473 if (TREE_CODE (arg1
) == SSA_NAME
)
475 /* Check if arg1 is an SSA_NAME and the stmt which defines arg1
477 arg1_def_stmt
= SSA_NAME_DEF_STMT (arg1
);
478 if (!is_gimple_assign (arg1_def_stmt
)
479 || gimple_assign_rhs_code (arg1_def_stmt
) != convert_code
)
482 /* Use the RHS as new_arg1. */
483 new_arg1
= gimple_assign_rhs1 (arg1_def_stmt
);
484 if (convert_code
== VIEW_CONVERT_EXPR
)
485 new_arg1
= TREE_OPERAND (new_arg1
, 0);
489 /* If arg1 is an INTEGER_CST, fold it to new type. */
490 if (INTEGRAL_TYPE_P (TREE_TYPE (new_arg0
))
491 && int_fits_type_p (arg1
, TREE_TYPE (new_arg0
)))
493 if (gimple_assign_cast_p (arg0_def_stmt
))
495 /* For the INTEGER_CST case, we are just moving the
496 conversion from one place to another, which can often
497 hurt as the conversion moves further away from the
498 statement that computes the value. So, perform this
499 only if new_arg0 is an operand of COND_STMT, or
500 if arg0_def_stmt is the only non-debug stmt in
501 its basic block, because then it is possible this
502 could enable further optimizations (minmax replacement
503 etc.). See PR71016. */
504 if (new_arg0
!= gimple_cond_lhs (cond_stmt
)
505 && new_arg0
!= gimple_cond_rhs (cond_stmt
)
506 && gimple_bb (arg0_def_stmt
) == e0
->src
)
508 gsi
= gsi_for_stmt (arg0_def_stmt
);
509 gsi_prev_nondebug (&gsi
);
510 if (!gsi_end_p (gsi
))
513 = dyn_cast
<gassign
*> (gsi_stmt (gsi
)))
515 tree lhs
= gimple_assign_lhs (assign
);
516 enum tree_code ass_code
517 = gimple_assign_rhs_code (assign
);
518 if (ass_code
!= MAX_EXPR
&& ass_code
!= MIN_EXPR
)
520 if (lhs
!= gimple_assign_rhs1 (arg0_def_stmt
))
522 gsi_prev_nondebug (&gsi
);
523 if (!gsi_end_p (gsi
))
529 gsi
= gsi_for_stmt (arg0_def_stmt
);
530 gsi_next_nondebug (&gsi
);
531 if (!gsi_end_p (gsi
))
534 new_arg1
= fold_convert (TREE_TYPE (new_arg0
), arg1
);
543 /* If arg0/arg1 have > 1 use, then this transformation actually increases
544 the number of expressions evaluated at runtime. */
545 if (!has_single_use (arg0
)
546 || (arg1_def_stmt
&& !has_single_use (arg1
)))
549 /* If types of new_arg0 and new_arg1 are different bailout. */
550 if (!types_compatible_p (TREE_TYPE (new_arg0
), TREE_TYPE (new_arg1
)))
553 /* Create a new PHI stmt. */
554 result
= PHI_RESULT (phi
);
555 temp
= make_ssa_name (TREE_TYPE (new_arg0
), NULL
);
556 newphi
= create_phi_node (temp
, gimple_bb (phi
));
558 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
560 fprintf (dump_file
, "PHI ");
561 print_generic_expr (dump_file
, gimple_phi_result (phi
));
563 " changed to factor conversion out from COND_EXPR.\n");
564 fprintf (dump_file
, "New stmt with CAST that defines ");
565 print_generic_expr (dump_file
, result
);
566 fprintf (dump_file
, ".\n");
569 /* Remove the old cast(s) that has single use. */
570 gsi_for_def
= gsi_for_stmt (arg0_def_stmt
);
571 gsi_remove (&gsi_for_def
, true);
572 release_defs (arg0_def_stmt
);
576 gsi_for_def
= gsi_for_stmt (arg1_def_stmt
);
577 gsi_remove (&gsi_for_def
, true);
578 release_defs (arg1_def_stmt
);
581 add_phi_arg (newphi
, new_arg0
, e0
, locus
);
582 add_phi_arg (newphi
, new_arg1
, e1
, locus
);
584 /* Create the conversion stmt and insert it. */
585 if (convert_code
== VIEW_CONVERT_EXPR
)
587 temp
= fold_build1 (VIEW_CONVERT_EXPR
, TREE_TYPE (result
), temp
);
588 new_stmt
= gimple_build_assign (result
, temp
);
591 new_stmt
= gimple_build_assign (result
, convert_code
, temp
);
592 gsi
= gsi_after_labels (gimple_bb (phi
));
593 gsi_insert_before (&gsi
, new_stmt
, GSI_SAME_STMT
);
595 /* Remove the original PHI stmt. */
596 gsi
= gsi_for_stmt (phi
);
597 gsi_remove (&gsi
, true);
602 # x_5 in range [cst1, cst2] where cst2 = cst1 + 1
603 if (x_5 op cstN) # where op is == or != and N is 1 or 2
609 # r_6 = PHI<cst3(2), cst4(3)> # where cst3 == cst4 + 1 or cst4 == cst3 + 1
611 to r_6 = x_5 + (min (cst3, cst4) - cst1) or
612 r_6 = (min (cst3, cst4) + cst1) - x_5 depending on op, N and which
613 of cst3 and cst4 is smaller. */
616 two_value_replacement (basic_block cond_bb
, basic_block middle_bb
,
617 edge e1
, gphi
*phi
, tree arg0
, tree arg1
)
619 /* Only look for adjacent integer constants. */
620 if (!INTEGRAL_TYPE_P (TREE_TYPE (arg0
))
621 || !INTEGRAL_TYPE_P (TREE_TYPE (arg1
))
622 || TREE_CODE (arg0
) != INTEGER_CST
623 || TREE_CODE (arg1
) != INTEGER_CST
624 || (tree_int_cst_lt (arg0
, arg1
)
625 ? wi::to_widest (arg0
) + 1 != wi::to_widest (arg1
)
626 : wi::to_widest (arg1
) + 1 != wi::to_widest (arg0
)))
629 if (!empty_block_p (middle_bb
))
632 gimple
*stmt
= last_stmt (cond_bb
);
633 tree lhs
= gimple_cond_lhs (stmt
);
634 tree rhs
= gimple_cond_rhs (stmt
);
636 if (TREE_CODE (lhs
) != SSA_NAME
637 || !INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
638 || TREE_CODE (rhs
) != INTEGER_CST
)
641 switch (gimple_cond_code (stmt
))
650 /* Defer boolean x ? 0 : {1,-1} or x ? {1,-1} : 0 to
651 conditional_replacement. */
652 if (TREE_CODE (TREE_TYPE (lhs
)) == BOOLEAN_TYPE
653 && (integer_zerop (arg0
)
654 || integer_zerop (arg1
)
655 || TREE_CODE (TREE_TYPE (arg0
)) == BOOLEAN_TYPE
656 || (TYPE_PRECISION (TREE_TYPE (arg0
))
657 <= TYPE_PRECISION (TREE_TYPE (lhs
)))))
661 if (get_range_info (lhs
, &min
, &max
) != VR_RANGE
)
663 int prec
= TYPE_PRECISION (TREE_TYPE (lhs
));
664 signop sgn
= TYPE_SIGN (TREE_TYPE (lhs
));
665 min
= wi::min_value (prec
, sgn
);
666 max
= wi::max_value (prec
, sgn
);
669 || (wi::to_wide (rhs
) != min
670 && wi::to_wide (rhs
) != max
))
673 /* We need to know which is the true edge and which is the false
674 edge so that we know when to invert the condition below. */
675 edge true_edge
, false_edge
;
676 extract_true_false_edges_from_block (cond_bb
, &true_edge
, &false_edge
);
677 if ((gimple_cond_code (stmt
) == EQ_EXPR
)
678 ^ (wi::to_wide (rhs
) == max
)
679 ^ (e1
== false_edge
))
680 std::swap (arg0
, arg1
);
683 if (TYPE_PRECISION (TREE_TYPE (lhs
)) == TYPE_PRECISION (TREE_TYPE (arg0
)))
685 /* Avoid performing the arithmetics in bool type which has different
686 semantics, otherwise prefer unsigned types from the two with
687 the same precision. */
688 if (TREE_CODE (TREE_TYPE (arg0
)) == BOOLEAN_TYPE
689 || !TYPE_UNSIGNED (TREE_TYPE (arg0
)))
690 type
= TREE_TYPE (lhs
);
692 type
= TREE_TYPE (arg0
);
694 else if (TYPE_PRECISION (TREE_TYPE (lhs
)) > TYPE_PRECISION (TREE_TYPE (arg0
)))
695 type
= TREE_TYPE (lhs
);
697 type
= TREE_TYPE (arg0
);
699 min
= wide_int::from (min
, TYPE_PRECISION (type
),
700 TYPE_SIGN (TREE_TYPE (lhs
)));
701 wide_int a
= wide_int::from (wi::to_wide (arg0
), TYPE_PRECISION (type
),
702 TYPE_SIGN (TREE_TYPE (arg0
)));
704 wi::overflow_type ovf
;
705 if (tree_int_cst_lt (arg0
, arg1
))
709 if (!TYPE_UNSIGNED (type
))
711 /* lhs is known to be in range [min, min+1] and we want to add a
712 to it. Check if that operation can overflow for those 2 values
713 and if yes, force unsigned type. */
714 wi::add (min
+ (wi::neg_p (a
) ? 0 : 1), a
, SIGNED
, &ovf
);
716 type
= unsigned_type_for (type
);
723 if (!TYPE_UNSIGNED (type
))
725 /* lhs is known to be in range [min, min+1] and we want to subtract
726 it from a. Check if that operation can overflow for those 2
727 values and if yes, force unsigned type. */
728 wi::sub (a
, min
+ (wi::neg_p (min
) ? 0 : 1), SIGNED
, &ovf
);
730 type
= unsigned_type_for (type
);
734 tree arg
= wide_int_to_tree (type
, a
);
735 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
736 if (!useless_type_conversion_p (type
, TREE_TYPE (lhs
)))
737 lhs
= gimplify_build1 (&gsi
, NOP_EXPR
, type
, lhs
);
739 if (code
== PLUS_EXPR
)
740 new_rhs
= gimplify_build2 (&gsi
, PLUS_EXPR
, type
, lhs
, arg
);
742 new_rhs
= gimplify_build2 (&gsi
, MINUS_EXPR
, type
, arg
, lhs
);
743 if (!useless_type_conversion_p (TREE_TYPE (arg0
), type
))
744 new_rhs
= gimplify_build1 (&gsi
, NOP_EXPR
, TREE_TYPE (arg0
), new_rhs
);
746 replace_phi_edge_with_variable (cond_bb
, e1
, phi
, new_rhs
);
748 /* Note that we optimized this PHI. */
752 /* The function conditional_replacement does the main work of doing the
753 conditional replacement. Return true if the replacement is done.
754 Otherwise return false.
755 BB is the basic block where the replacement is going to be done on. ARG0
756 is argument 0 from PHI. Likewise for ARG1. */
759 conditional_replacement (basic_block cond_bb
, basic_block middle_bb
,
760 edge e0
, edge e1
, gphi
*phi
,
761 tree arg0
, tree arg1
)
767 gimple_stmt_iterator gsi
;
768 edge true_edge
, false_edge
;
769 tree new_var
, new_var2
;
774 /* FIXME: Gimplification of complex type is too hard for now. */
775 /* We aren't prepared to handle vectors either (and it is a question
776 if it would be worthwhile anyway). */
777 if (!(INTEGRAL_TYPE_P (TREE_TYPE (arg0
))
778 || POINTER_TYPE_P (TREE_TYPE (arg0
)))
779 || !(INTEGRAL_TYPE_P (TREE_TYPE (arg1
))
780 || POINTER_TYPE_P (TREE_TYPE (arg1
))))
783 /* The PHI arguments have the constants 0 and 1, or 0 and -1 or
784 0 and (1 << cst), then convert it to the conditional. */
785 if (integer_zerop (arg0
))
787 else if (integer_zerop (arg1
))
791 if (integer_all_onesp (nonzero_arg
))
793 else if (integer_pow2p (nonzero_arg
))
795 shift
= tree_log2 (nonzero_arg
);
796 if (shift
&& POINTER_TYPE_P (TREE_TYPE (nonzero_arg
)))
802 if (!empty_block_p (middle_bb
))
805 /* At this point we know we have a GIMPLE_COND with two successors.
806 One successor is BB, the other successor is an empty block which
807 falls through into BB.
809 There is a single PHI node at the join point (BB) and its arguments
810 are constants (0, 1) or (0, -1) or (0, (1 << shift)).
812 So, given the condition COND, and the two PHI arguments, we can
813 rewrite this PHI into non-branching code:
815 dest = (COND) or dest = COND' or dest = (COND) << shift
817 We use the condition as-is if the argument associated with the
818 true edge has the value one or the argument associated with the
819 false edge as the value zero. Note that those conditions are not
820 the same since only one of the outgoing edges from the GIMPLE_COND
821 will directly reach BB and thus be associated with an argument. */
823 stmt
= last_stmt (cond_bb
);
824 result
= PHI_RESULT (phi
);
826 /* To handle special cases like floating point comparison, it is easier and
827 less error-prone to build a tree and gimplify it on the fly though it is
829 cond
= fold_build2_loc (gimple_location (stmt
),
830 gimple_cond_code (stmt
), boolean_type_node
,
831 gimple_cond_lhs (stmt
), gimple_cond_rhs (stmt
));
833 /* We need to know which is the true edge and which is the false
834 edge so that we know when to invert the condition below. */
835 extract_true_false_edges_from_block (cond_bb
, &true_edge
, &false_edge
);
836 if ((e0
== true_edge
&& integer_zerop (arg0
))
837 || (e0
== false_edge
&& !integer_zerop (arg0
))
838 || (e1
== true_edge
&& integer_zerop (arg1
))
839 || (e1
== false_edge
&& !integer_zerop (arg1
)))
840 cond
= fold_build1_loc (gimple_location (stmt
),
841 TRUTH_NOT_EXPR
, TREE_TYPE (cond
), cond
);
845 cond
= fold_convert_loc (gimple_location (stmt
),
846 TREE_TYPE (result
), cond
);
847 cond
= fold_build1_loc (gimple_location (stmt
),
848 NEGATE_EXPR
, TREE_TYPE (cond
), cond
);
852 cond
= fold_convert_loc (gimple_location (stmt
),
853 TREE_TYPE (result
), cond
);
854 cond
= fold_build2_loc (gimple_location (stmt
),
855 LSHIFT_EXPR
, TREE_TYPE (cond
), cond
,
856 build_int_cst (integer_type_node
, shift
));
859 /* Insert our new statements at the end of conditional block before the
861 gsi
= gsi_for_stmt (stmt
);
862 new_var
= force_gimple_operand_gsi (&gsi
, cond
, true, NULL
, true,
865 if (!useless_type_conversion_p (TREE_TYPE (result
), TREE_TYPE (new_var
)))
867 location_t locus_0
, locus_1
;
869 new_var2
= make_ssa_name (TREE_TYPE (result
));
870 new_stmt
= gimple_build_assign (new_var2
, CONVERT_EXPR
, new_var
);
871 gsi_insert_before (&gsi
, new_stmt
, GSI_SAME_STMT
);
874 /* Set the locus to the first argument, unless is doesn't have one. */
875 locus_0
= gimple_phi_arg_location (phi
, 0);
876 locus_1
= gimple_phi_arg_location (phi
, 1);
877 if (locus_0
== UNKNOWN_LOCATION
)
879 gimple_set_location (new_stmt
, locus_0
);
882 replace_phi_edge_with_variable (cond_bb
, e1
, phi
, new_var
);
884 /* Note that we optimized this PHI. */
888 /* Update *ARG which is defined in STMT so that it contains the
889 computed value if that seems profitable. Return true if the
890 statement is made dead by that rewriting. */
893 jump_function_from_stmt (tree
*arg
, gimple
*stmt
)
895 enum tree_code code
= gimple_assign_rhs_code (stmt
);
896 if (code
== ADDR_EXPR
)
898 /* For arg = &p->i transform it to p, if possible. */
899 tree rhs1
= gimple_assign_rhs1 (stmt
);
901 tree tem
= get_addr_base_and_unit_offset (TREE_OPERAND (rhs1
, 0),
904 && TREE_CODE (tem
) == MEM_REF
905 && known_eq (mem_ref_offset (tem
) + offset
, 0))
907 *arg
= TREE_OPERAND (tem
, 0);
911 /* TODO: Much like IPA-CP jump-functions we want to handle constant
912 additions symbolically here, and we'd need to update the comparison
913 code that compares the arg + cst tuples in our caller. For now the
914 code above exactly handles the VEC_BASE pattern from vec.h. */
918 /* RHS is a source argument in a BIT_AND_EXPR which feeds a conditional
919 of the form SSA_NAME NE 0.
921 If RHS is fed by a simple EQ_EXPR comparison of two values, see if
922 the two input values of the EQ_EXPR match arg0 and arg1.
924 If so update *code and return TRUE. Otherwise return FALSE. */
927 rhs_is_fed_for_value_replacement (const_tree arg0
, const_tree arg1
,
928 enum tree_code
*code
, const_tree rhs
)
930 /* Obviously if RHS is not an SSA_NAME, we can't look at the defining
932 if (TREE_CODE (rhs
) == SSA_NAME
)
934 gimple
*def1
= SSA_NAME_DEF_STMT (rhs
);
936 /* Verify the defining statement has an EQ_EXPR on the RHS. */
937 if (is_gimple_assign (def1
) && gimple_assign_rhs_code (def1
) == EQ_EXPR
)
939 /* Finally verify the source operands of the EQ_EXPR are equal
941 tree op0
= gimple_assign_rhs1 (def1
);
942 tree op1
= gimple_assign_rhs2 (def1
);
943 if ((operand_equal_for_phi_arg_p (arg0
, op0
)
944 && operand_equal_for_phi_arg_p (arg1
, op1
))
945 || (operand_equal_for_phi_arg_p (arg0
, op1
)
946 && operand_equal_for_phi_arg_p (arg1
, op0
)))
948 /* We will perform the optimization. */
949 *code
= gimple_assign_rhs_code (def1
);
957 /* Return TRUE if arg0/arg1 are equal to the rhs/lhs or lhs/rhs of COND.
959 Also return TRUE if arg0/arg1 are equal to the source arguments of a
960 an EQ comparison feeding a BIT_AND_EXPR which feeds COND.
962 Return FALSE otherwise. */
965 operand_equal_for_value_replacement (const_tree arg0
, const_tree arg1
,
966 enum tree_code
*code
, gimple
*cond
)
969 tree lhs
= gimple_cond_lhs (cond
);
970 tree rhs
= gimple_cond_rhs (cond
);
972 if ((operand_equal_for_phi_arg_p (arg0
, lhs
)
973 && operand_equal_for_phi_arg_p (arg1
, rhs
))
974 || (operand_equal_for_phi_arg_p (arg1
, lhs
)
975 && operand_equal_for_phi_arg_p (arg0
, rhs
)))
978 /* Now handle more complex case where we have an EQ comparison
979 which feeds a BIT_AND_EXPR which feeds COND.
981 First verify that COND is of the form SSA_NAME NE 0. */
982 if (*code
!= NE_EXPR
|| !integer_zerop (rhs
)
983 || TREE_CODE (lhs
) != SSA_NAME
)
986 /* Now ensure that SSA_NAME is set by a BIT_AND_EXPR. */
987 def
= SSA_NAME_DEF_STMT (lhs
);
988 if (!is_gimple_assign (def
) || gimple_assign_rhs_code (def
) != BIT_AND_EXPR
)
991 /* Now verify arg0/arg1 correspond to the source arguments of an
992 EQ comparison feeding the BIT_AND_EXPR. */
994 tree tmp
= gimple_assign_rhs1 (def
);
995 if (rhs_is_fed_for_value_replacement (arg0
, arg1
, code
, tmp
))
998 tmp
= gimple_assign_rhs2 (def
);
999 if (rhs_is_fed_for_value_replacement (arg0
, arg1
, code
, tmp
))
1005 /* Returns true if ARG is a neutral element for operation CODE
1006 on the RIGHT side. */
1009 neutral_element_p (tree_code code
, tree arg
, bool right
)
1016 return integer_zerop (arg
);
1023 case POINTER_PLUS_EXPR
:
1024 return right
&& integer_zerop (arg
);
1027 return integer_onep (arg
);
1029 case TRUNC_DIV_EXPR
:
1031 case FLOOR_DIV_EXPR
:
1032 case ROUND_DIV_EXPR
:
1033 case EXACT_DIV_EXPR
:
1034 return right
&& integer_onep (arg
);
1037 return integer_all_onesp (arg
);
1044 /* Returns true if ARG is an absorbing element for operation CODE. */
1047 absorbing_element_p (tree_code code
, tree arg
, bool right
, tree rval
)
1052 return integer_all_onesp (arg
);
1056 return integer_zerop (arg
);
1062 return !right
&& integer_zerop (arg
);
1064 case TRUNC_DIV_EXPR
:
1066 case FLOOR_DIV_EXPR
:
1067 case ROUND_DIV_EXPR
:
1068 case EXACT_DIV_EXPR
:
1069 case TRUNC_MOD_EXPR
:
1071 case FLOOR_MOD_EXPR
:
1072 case ROUND_MOD_EXPR
:
1074 && integer_zerop (arg
)
1075 && tree_single_nonzero_warnv_p (rval
, NULL
));
1082 /* The function value_replacement does the main work of doing the value
1083 replacement. Return non-zero if the replacement is done. Otherwise return
1084 0. If we remove the middle basic block, return 2.
1085 BB is the basic block where the replacement is going to be done on. ARG0
1086 is argument 0 from the PHI. Likewise for ARG1. */
1089 value_replacement (basic_block cond_bb
, basic_block middle_bb
,
1090 edge e0
, edge e1
, gimple
*phi
,
1091 tree arg0
, tree arg1
)
1093 gimple_stmt_iterator gsi
;
1095 edge true_edge
, false_edge
;
1096 enum tree_code code
;
1097 bool empty_or_with_defined_p
= true;
1099 /* If the type says honor signed zeros we cannot do this
1101 if (HONOR_SIGNED_ZEROS (arg1
))
1104 /* If there is a statement in MIDDLE_BB that defines one of the PHI
1105 arguments, then adjust arg0 or arg1. */
1106 gsi
= gsi_start_nondebug_after_labels_bb (middle_bb
);
1107 while (!gsi_end_p (gsi
))
1109 gimple
*stmt
= gsi_stmt (gsi
);
1111 gsi_next_nondebug (&gsi
);
1112 if (!is_gimple_assign (stmt
))
1114 if (gimple_code (stmt
) != GIMPLE_PREDICT
1115 && gimple_code (stmt
) != GIMPLE_NOP
)
1116 empty_or_with_defined_p
= false;
1119 /* Now try to adjust arg0 or arg1 according to the computation
1120 in the statement. */
1121 lhs
= gimple_assign_lhs (stmt
);
1123 && jump_function_from_stmt (&arg0
, stmt
))
1125 && jump_function_from_stmt (&arg1
, stmt
)))
1126 empty_or_with_defined_p
= false;
1129 cond
= last_stmt (cond_bb
);
1130 code
= gimple_cond_code (cond
);
1132 /* This transformation is only valid for equality comparisons. */
1133 if (code
!= NE_EXPR
&& code
!= EQ_EXPR
)
1136 /* We need to know which is the true edge and which is the false
1137 edge so that we know if have abs or negative abs. */
1138 extract_true_false_edges_from_block (cond_bb
, &true_edge
, &false_edge
);
1140 /* At this point we know we have a COND_EXPR with two successors.
1141 One successor is BB, the other successor is an empty block which
1142 falls through into BB.
1144 The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
1146 There is a single PHI node at the join point (BB) with two arguments.
1148 We now need to verify that the two arguments in the PHI node match
1149 the two arguments to the equality comparison. */
1151 if (operand_equal_for_value_replacement (arg0
, arg1
, &code
, cond
))
1156 /* For NE_EXPR, we want to build an assignment result = arg where
1157 arg is the PHI argument associated with the true edge. For
1158 EQ_EXPR we want the PHI argument associated with the false edge. */
1159 e
= (code
== NE_EXPR
? true_edge
: false_edge
);
1161 /* Unfortunately, E may not reach BB (it may instead have gone to
1162 OTHER_BLOCK). If that is the case, then we want the single outgoing
1163 edge from OTHER_BLOCK which reaches BB and represents the desired
1164 path from COND_BLOCK. */
1165 if (e
->dest
== middle_bb
)
1166 e
= single_succ_edge (e
->dest
);
1168 /* Now we know the incoming edge to BB that has the argument for the
1169 RHS of our new assignment statement. */
1175 /* If the middle basic block was empty or is defining the
1176 PHI arguments and this is a single phi where the args are different
1177 for the edges e0 and e1 then we can remove the middle basic block. */
1178 if (empty_or_with_defined_p
1179 && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi
)),
1182 replace_phi_edge_with_variable (cond_bb
, e1
, phi
, arg
);
1183 /* Note that we optimized this PHI. */
1188 /* Replace the PHI arguments with arg. */
1189 SET_PHI_ARG_DEF (phi
, e0
->dest_idx
, arg
);
1190 SET_PHI_ARG_DEF (phi
, e1
->dest_idx
, arg
);
1191 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1193 fprintf (dump_file
, "PHI ");
1194 print_generic_expr (dump_file
, gimple_phi_result (phi
));
1195 fprintf (dump_file
, " reduced for COND_EXPR in block %d to ",
1197 print_generic_expr (dump_file
, arg
);
1198 fprintf (dump_file
, ".\n");
1205 /* Now optimize (x != 0) ? x + y : y to just x + y. */
1206 gsi
= gsi_last_nondebug_bb (middle_bb
);
1207 if (gsi_end_p (gsi
))
1210 gimple
*assign
= gsi_stmt (gsi
);
1211 if (!is_gimple_assign (assign
)
1212 || gimple_assign_rhs_class (assign
) != GIMPLE_BINARY_RHS
1213 || (!INTEGRAL_TYPE_P (TREE_TYPE (arg0
))
1214 && !POINTER_TYPE_P (TREE_TYPE (arg0
))))
1217 /* Punt if there are (degenerate) PHIs in middle_bb, there should not be. */
1218 if (!gimple_seq_empty_p (phi_nodes (middle_bb
)))
1221 /* Allow up to 2 cheap preparation statements that prepare argument
1229 iftmp.0_6 = x_5(D) r<< _1;
1231 # iftmp.0_2 = PHI <iftmp.0_6(3), x_5(D)(2)>
1242 # _2 = PHI <x_5(D)(2), _6(3)> */
1243 gimple
*prep_stmt
[2] = { NULL
, NULL
};
1245 for (prep_cnt
= 0; ; prep_cnt
++)
1247 gsi_prev_nondebug (&gsi
);
1248 if (gsi_end_p (gsi
))
1251 gimple
*g
= gsi_stmt (gsi
);
1252 if (gimple_code (g
) == GIMPLE_LABEL
)
1255 if (prep_cnt
== 2 || !is_gimple_assign (g
))
1258 tree lhs
= gimple_assign_lhs (g
);
1259 tree rhs1
= gimple_assign_rhs1 (g
);
1260 use_operand_p use_p
;
1262 if (TREE_CODE (lhs
) != SSA_NAME
1263 || TREE_CODE (rhs1
) != SSA_NAME
1264 || !INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
1265 || !INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
1266 || !single_imm_use (lhs
, &use_p
, &use_stmt
)
1267 || use_stmt
!= (prep_cnt
? prep_stmt
[prep_cnt
- 1] : assign
))
1269 switch (gimple_assign_rhs_code (g
))
1277 if (TREE_CODE (gimple_assign_rhs2 (g
)) != INTEGER_CST
)
1283 prep_stmt
[prep_cnt
] = g
;
1286 /* Only transform if it removes the condition. */
1287 if (!single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi
)), e0
, e1
))
1290 /* Size-wise, this is always profitable. */
1291 if (optimize_bb_for_speed_p (cond_bb
)
1292 /* The special case is useless if it has a low probability. */
1293 && profile_status_for_fn (cfun
) != PROFILE_ABSENT
1294 && EDGE_PRED (middle_bb
, 0)->probability
< profile_probability::even ()
1295 /* If assign is cheap, there is no point avoiding it. */
1296 && estimate_num_insns_seq (bb_seq (middle_bb
), &eni_time_weights
)
1297 >= 3 * estimate_num_insns (cond
, &eni_time_weights
))
1300 tree lhs
= gimple_assign_lhs (assign
);
1301 tree rhs1
= gimple_assign_rhs1 (assign
);
1302 tree rhs2
= gimple_assign_rhs2 (assign
);
1303 enum tree_code code_def
= gimple_assign_rhs_code (assign
);
1304 tree cond_lhs
= gimple_cond_lhs (cond
);
1305 tree cond_rhs
= gimple_cond_rhs (cond
);
1307 /* Propagate the cond_rhs constant through preparation stmts,
1308 make sure UB isn't invoked while doing that. */
1309 for (int i
= prep_cnt
- 1; i
>= 0; --i
)
1311 gimple
*g
= prep_stmt
[i
];
1312 tree grhs1
= gimple_assign_rhs1 (g
);
1313 if (!operand_equal_for_phi_arg_p (cond_lhs
, grhs1
))
1315 cond_lhs
= gimple_assign_lhs (g
);
1316 cond_rhs
= fold_convert (TREE_TYPE (grhs1
), cond_rhs
);
1317 if (TREE_CODE (cond_rhs
) != INTEGER_CST
1318 || TREE_OVERFLOW (cond_rhs
))
1320 if (gimple_assign_rhs_class (g
) == GIMPLE_BINARY_RHS
)
1322 cond_rhs
= int_const_binop (gimple_assign_rhs_code (g
), cond_rhs
,
1323 gimple_assign_rhs2 (g
));
1324 if (TREE_OVERFLOW (cond_rhs
))
1327 cond_rhs
= fold_convert (TREE_TYPE (cond_lhs
), cond_rhs
);
1328 if (TREE_CODE (cond_rhs
) != INTEGER_CST
1329 || TREE_OVERFLOW (cond_rhs
))
1333 if (((code
== NE_EXPR
&& e1
== false_edge
)
1334 || (code
== EQ_EXPR
&& e1
== true_edge
))
1337 && operand_equal_for_phi_arg_p (rhs2
, cond_lhs
)
1338 && neutral_element_p (code_def
, cond_rhs
, true))
1340 && operand_equal_for_phi_arg_p (rhs1
, cond_lhs
)
1341 && neutral_element_p (code_def
, cond_rhs
, false))
1342 || (operand_equal_for_phi_arg_p (arg1
, cond_rhs
)
1343 && ((operand_equal_for_phi_arg_p (rhs2
, cond_lhs
)
1344 && absorbing_element_p (code_def
, cond_rhs
, true, rhs2
))
1345 || (operand_equal_for_phi_arg_p (rhs1
, cond_lhs
)
1346 && absorbing_element_p (code_def
,
1347 cond_rhs
, false, rhs2
))))))
1349 gsi
= gsi_for_stmt (cond
);
1350 /* Moving ASSIGN might change VR of lhs, e.g. when moving u_6
1358 # RANGE [0, 4294967294]
1359 u_6 = n_5 + 4294967295;
1362 # u_3 = PHI <u_6(3), 4294967295(2)> */
1363 reset_flow_sensitive_info (lhs
);
1364 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs
)))
1366 /* If available, we can use VR of phi result at least. */
1367 tree phires
= gimple_phi_result (phi
);
1368 struct range_info_def
*phires_range_info
1369 = SSA_NAME_RANGE_INFO (phires
);
1370 if (phires_range_info
)
1371 duplicate_ssa_name_range_info (lhs
, SSA_NAME_RANGE_TYPE (phires
),
1374 gimple_stmt_iterator gsi_from
;
1375 for (int i
= prep_cnt
- 1; i
>= 0; --i
)
1377 tree plhs
= gimple_assign_lhs (prep_stmt
[i
]);
1378 reset_flow_sensitive_info (plhs
);
1379 gsi_from
= gsi_for_stmt (prep_stmt
[i
]);
1380 gsi_move_before (&gsi_from
, &gsi
);
1382 gsi_from
= gsi_for_stmt (assign
);
1383 gsi_move_before (&gsi_from
, &gsi
);
1384 replace_phi_edge_with_variable (cond_bb
, e1
, phi
, lhs
);
1391 /* The function minmax_replacement does the main work of doing the minmax
1392 replacement. Return true if the replacement is done. Otherwise return
1394 BB is the basic block where the replacement is going to be done on. ARG0
1395 is argument 0 from the PHI. Likewise for ARG1. */
1398 minmax_replacement (basic_block cond_bb
, basic_block middle_bb
,
1399 edge e0
, edge e1
, gimple
*phi
,
1400 tree arg0
, tree arg1
)
1403 edge true_edge
, false_edge
;
1404 enum tree_code minmax
, ass_code
;
1405 tree smaller
, larger
, arg_true
, arg_false
;
1406 gimple_stmt_iterator gsi
, gsi_from
;
1408 tree type
= TREE_TYPE (PHI_RESULT (phi
));
1410 /* The optimization may be unsafe due to NaNs. */
1411 if (HONOR_NANS (type
) || HONOR_SIGNED_ZEROS (type
))
1414 gcond
*cond
= as_a
<gcond
*> (last_stmt (cond_bb
));
1415 enum tree_code cmp
= gimple_cond_code (cond
);
1416 tree rhs
= gimple_cond_rhs (cond
);
1418 /* Turn EQ/NE of extreme values to order comparisons. */
1419 if ((cmp
== NE_EXPR
|| cmp
== EQ_EXPR
)
1420 && TREE_CODE (rhs
) == INTEGER_CST
1421 && INTEGRAL_TYPE_P (TREE_TYPE (rhs
)))
1423 if (wi::eq_p (wi::to_wide (rhs
), wi::min_value (TREE_TYPE (rhs
))))
1425 cmp
= (cmp
== EQ_EXPR
) ? LT_EXPR
: GE_EXPR
;
1426 rhs
= wide_int_to_tree (TREE_TYPE (rhs
),
1427 wi::min_value (TREE_TYPE (rhs
)) + 1);
1429 else if (wi::eq_p (wi::to_wide (rhs
), wi::max_value (TREE_TYPE (rhs
))))
1431 cmp
= (cmp
== EQ_EXPR
) ? GT_EXPR
: LE_EXPR
;
1432 rhs
= wide_int_to_tree (TREE_TYPE (rhs
),
1433 wi::max_value (TREE_TYPE (rhs
)) - 1);
1437 /* This transformation is only valid for order comparisons. Record which
1438 operand is smaller/larger if the result of the comparison is true. */
1439 tree alt_smaller
= NULL_TREE
;
1440 tree alt_larger
= NULL_TREE
;
1441 if (cmp
== LT_EXPR
|| cmp
== LE_EXPR
)
1443 smaller
= gimple_cond_lhs (cond
);
1445 /* If we have smaller < CST it is equivalent to smaller <= CST-1.
1446 Likewise smaller <= CST is equivalent to smaller < CST+1. */
1447 if (TREE_CODE (larger
) == INTEGER_CST
1448 && INTEGRAL_TYPE_P (TREE_TYPE (larger
)))
1452 wi::overflow_type overflow
;
1453 wide_int alt
= wi::sub (wi::to_wide (larger
), 1,
1454 TYPE_SIGN (TREE_TYPE (larger
)),
1457 alt_larger
= wide_int_to_tree (TREE_TYPE (larger
), alt
);
1461 wi::overflow_type overflow
;
1462 wide_int alt
= wi::add (wi::to_wide (larger
), 1,
1463 TYPE_SIGN (TREE_TYPE (larger
)),
1466 alt_larger
= wide_int_to_tree (TREE_TYPE (larger
), alt
);
1470 else if (cmp
== GT_EXPR
|| cmp
== GE_EXPR
)
1473 larger
= gimple_cond_lhs (cond
);
1474 /* If we have larger > CST it is equivalent to larger >= CST+1.
1475 Likewise larger >= CST is equivalent to larger > CST-1. */
1476 if (TREE_CODE (smaller
) == INTEGER_CST
1477 && INTEGRAL_TYPE_P (TREE_TYPE (smaller
)))
1479 wi::overflow_type overflow
;
1482 wide_int alt
= wi::add (wi::to_wide (smaller
), 1,
1483 TYPE_SIGN (TREE_TYPE (smaller
)),
1486 alt_smaller
= wide_int_to_tree (TREE_TYPE (smaller
), alt
);
1490 wide_int alt
= wi::sub (wi::to_wide (smaller
), 1,
1491 TYPE_SIGN (TREE_TYPE (smaller
)),
1494 alt_smaller
= wide_int_to_tree (TREE_TYPE (smaller
), alt
);
1501 /* Handle the special case of (signed_type)x < 0 being equivalent
1502 to x > MAX_VAL(signed_type) and (signed_type)x >= 0 equivalent
1503 to x <= MAX_VAL(signed_type). */
1504 if ((cmp
== GE_EXPR
|| cmp
== LT_EXPR
)
1505 && INTEGRAL_TYPE_P (type
)
1506 && TYPE_UNSIGNED (type
)
1507 && integer_zerop (rhs
))
1509 tree op
= gimple_cond_lhs (cond
);
1510 if (TREE_CODE (op
) == SSA_NAME
1511 && INTEGRAL_TYPE_P (TREE_TYPE (op
))
1512 && !TYPE_UNSIGNED (TREE_TYPE (op
)))
1514 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
1515 if (gimple_assign_cast_p (def_stmt
))
1517 tree op1
= gimple_assign_rhs1 (def_stmt
);
1518 if (INTEGRAL_TYPE_P (TREE_TYPE (op1
))
1519 && TYPE_UNSIGNED (TREE_TYPE (op1
))
1520 && (TYPE_PRECISION (TREE_TYPE (op
))
1521 == TYPE_PRECISION (TREE_TYPE (op1
)))
1522 && useless_type_conversion_p (type
, TREE_TYPE (op1
)))
1524 wide_int w1
= wi::max_value (TREE_TYPE (op
));
1525 wide_int w2
= wi::add (w1
, 1);
1529 smaller
= wide_int_to_tree (TREE_TYPE (op1
), w1
);
1530 alt_smaller
= wide_int_to_tree (TREE_TYPE (op1
), w2
);
1531 alt_larger
= NULL_TREE
;
1536 larger
= wide_int_to_tree (TREE_TYPE (op1
), w1
);
1537 alt_larger
= wide_int_to_tree (TREE_TYPE (op1
), w2
);
1538 alt_smaller
= NULL_TREE
;
1545 /* We need to know which is the true edge and which is the false
1546 edge so that we know if have abs or negative abs. */
1547 extract_true_false_edges_from_block (cond_bb
, &true_edge
, &false_edge
);
1549 /* Forward the edges over the middle basic block. */
1550 if (true_edge
->dest
== middle_bb
)
1551 true_edge
= EDGE_SUCC (true_edge
->dest
, 0);
1552 if (false_edge
->dest
== middle_bb
)
1553 false_edge
= EDGE_SUCC (false_edge
->dest
, 0);
1555 if (true_edge
== e0
)
1557 gcc_assert (false_edge
== e1
);
1563 gcc_assert (false_edge
== e0
);
1564 gcc_assert (true_edge
== e1
);
1569 if (empty_block_p (middle_bb
))
1571 if ((operand_equal_for_phi_arg_p (arg_true
, smaller
)
1573 && operand_equal_for_phi_arg_p (arg_true
, alt_smaller
)))
1574 && (operand_equal_for_phi_arg_p (arg_false
, larger
)
1576 && operand_equal_for_phi_arg_p (arg_true
, alt_larger
))))
1580 if (smaller < larger)
1586 else if ((operand_equal_for_phi_arg_p (arg_false
, smaller
)
1588 && operand_equal_for_phi_arg_p (arg_false
, alt_smaller
)))
1589 && (operand_equal_for_phi_arg_p (arg_true
, larger
)
1591 && operand_equal_for_phi_arg_p (arg_true
, alt_larger
))))
1598 /* Recognize the following case, assuming d <= u:
1604 This is equivalent to
1609 gimple
*assign
= last_and_only_stmt (middle_bb
);
1610 tree lhs
, op0
, op1
, bound
;
1613 || gimple_code (assign
) != GIMPLE_ASSIGN
)
1616 lhs
= gimple_assign_lhs (assign
);
1617 ass_code
= gimple_assign_rhs_code (assign
);
1618 if (ass_code
!= MAX_EXPR
&& ass_code
!= MIN_EXPR
)
1620 op0
= gimple_assign_rhs1 (assign
);
1621 op1
= gimple_assign_rhs2 (assign
);
1623 if (true_edge
->src
== middle_bb
)
1625 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
1626 if (!operand_equal_for_phi_arg_p (lhs
, arg_true
))
1629 if (operand_equal_for_phi_arg_p (arg_false
, larger
)
1631 && operand_equal_for_phi_arg_p (arg_false
, alt_larger
)))
1635 if (smaller < larger)
1637 r' = MAX_EXPR (smaller, bound)
1639 r = PHI <r', larger> --> to be turned to MIN_EXPR. */
1640 if (ass_code
!= MAX_EXPR
)
1644 if (operand_equal_for_phi_arg_p (op0
, smaller
)
1646 && operand_equal_for_phi_arg_p (op0
, alt_smaller
)))
1648 else if (operand_equal_for_phi_arg_p (op1
, smaller
)
1650 && operand_equal_for_phi_arg_p (op1
, alt_smaller
)))
1655 /* We need BOUND <= LARGER. */
1656 if (!integer_nonzerop (fold_build2 (LE_EXPR
, boolean_type_node
,
1660 else if (operand_equal_for_phi_arg_p (arg_false
, smaller
)
1662 && operand_equal_for_phi_arg_p (arg_false
, alt_smaller
)))
1666 if (smaller < larger)
1668 r' = MIN_EXPR (larger, bound)
1670 r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
1671 if (ass_code
!= MIN_EXPR
)
1675 if (operand_equal_for_phi_arg_p (op0
, larger
)
1677 && operand_equal_for_phi_arg_p (op0
, alt_larger
)))
1679 else if (operand_equal_for_phi_arg_p (op1
, larger
)
1681 && operand_equal_for_phi_arg_p (op1
, alt_larger
)))
1686 /* We need BOUND >= SMALLER. */
1687 if (!integer_nonzerop (fold_build2 (GE_EXPR
, boolean_type_node
,
1696 /* We got here if the condition is false, i.e., SMALLER > LARGER. */
1697 if (!operand_equal_for_phi_arg_p (lhs
, arg_false
))
1700 if (operand_equal_for_phi_arg_p (arg_true
, larger
)
1702 && operand_equal_for_phi_arg_p (arg_true
, alt_larger
)))
1706 if (smaller > larger)
1708 r' = MIN_EXPR (smaller, bound)
1710 r = PHI <r', larger> --> to be turned to MAX_EXPR. */
1711 if (ass_code
!= MIN_EXPR
)
1715 if (operand_equal_for_phi_arg_p (op0
, smaller
)
1717 && operand_equal_for_phi_arg_p (op0
, alt_smaller
)))
1719 else if (operand_equal_for_phi_arg_p (op1
, smaller
)
1721 && operand_equal_for_phi_arg_p (op1
, alt_smaller
)))
1726 /* We need BOUND >= LARGER. */
1727 if (!integer_nonzerop (fold_build2 (GE_EXPR
, boolean_type_node
,
1731 else if (operand_equal_for_phi_arg_p (arg_true
, smaller
)
1733 && operand_equal_for_phi_arg_p (arg_true
, alt_smaller
)))
1737 if (smaller > larger)
1739 r' = MAX_EXPR (larger, bound)
1741 r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
1742 if (ass_code
!= MAX_EXPR
)
1746 if (operand_equal_for_phi_arg_p (op0
, larger
))
1748 else if (operand_equal_for_phi_arg_p (op1
, larger
))
1753 /* We need BOUND <= SMALLER. */
1754 if (!integer_nonzerop (fold_build2 (LE_EXPR
, boolean_type_node
,
1762 /* Move the statement from the middle block. */
1763 gsi
= gsi_last_bb (cond_bb
);
1764 gsi_from
= gsi_last_nondebug_bb (middle_bb
);
1765 reset_flow_sensitive_info (SINGLE_SSA_TREE_OPERAND (gsi_stmt (gsi_from
),
1767 gsi_move_before (&gsi_from
, &gsi
);
1770 /* Emit the statement to compute min/max. */
1771 gimple_seq stmts
= NULL
;
1772 tree phi_result
= PHI_RESULT (phi
);
1773 result
= gimple_build (&stmts
, minmax
, TREE_TYPE (phi_result
), arg0
, arg1
);
1774 /* Duplicate range info if we're the only things setting the target PHI. */
1775 if (!gimple_seq_empty_p (stmts
)
1776 && EDGE_COUNT (gimple_bb (phi
)->preds
) == 2
1777 && !POINTER_TYPE_P (TREE_TYPE (phi_result
))
1778 && SSA_NAME_RANGE_INFO (phi_result
))
1779 duplicate_ssa_name_range_info (result
, SSA_NAME_RANGE_TYPE (phi_result
),
1780 SSA_NAME_RANGE_INFO (phi_result
));
1782 gsi
= gsi_last_bb (cond_bb
);
1783 gsi_insert_seq_before (&gsi
, stmts
, GSI_NEW_STMT
);
1785 replace_phi_edge_with_variable (cond_bb
, e1
, phi
, result
);
1799 _2 = (unsigned long) b_4(D);
1800 _9 = __builtin_popcountl (_2);
1802 _9 = __builtin_popcountl (b_4(D));
1805 c_12 = PHI <0(2), _9(3)>
1809 _2 = (unsigned long) b_4(D);
1810 _9 = __builtin_popcountl (_2);
1812 _9 = __builtin_popcountl (b_4(D));
1817 Similarly for __builtin_clz or __builtin_ctz if
1818 C?Z_DEFINED_VALUE_AT_ZERO is 2, optab is present and
1819 instead of 0 above it uses the value from that macro. */
1822 cond_removal_in_popcount_clz_ctz_pattern (basic_block cond_bb
,
1823 basic_block middle_bb
,
1824 edge e1
, edge e2
, gimple
*phi
,
1825 tree arg0
, tree arg1
)
1828 gimple_stmt_iterator gsi
, gsi_from
;
1830 gimple
*cast
= NULL
;
1834 _2 = (unsigned long) b_4(D);
1835 _9 = __builtin_popcountl (_2);
1837 _9 = __builtin_popcountl (b_4(D));
1838 are the only stmts in the middle_bb. */
1840 gsi
= gsi_start_nondebug_after_labels_bb (middle_bb
);
1841 if (gsi_end_p (gsi
))
1843 cast
= gsi_stmt (gsi
);
1844 gsi_next_nondebug (&gsi
);
1845 if (!gsi_end_p (gsi
))
1847 call
= gsi_stmt (gsi
);
1848 gsi_next_nondebug (&gsi
);
1849 if (!gsi_end_p (gsi
))
1858 /* Check that we have a popcount/clz/ctz builtin. */
1859 if (!is_gimple_call (call
) || gimple_call_num_args (call
) != 1)
1862 arg
= gimple_call_arg (call
, 0);
1863 lhs
= gimple_get_lhs (call
);
1865 if (lhs
== NULL_TREE
)
1868 combined_fn cfn
= gimple_call_combined_fn (call
);
1869 internal_fn ifn
= IFN_LAST
;
1876 if (INTEGRAL_TYPE_P (TREE_TYPE (arg
)))
1878 tree type
= TREE_TYPE (arg
);
1879 if (direct_internal_fn_supported_p (IFN_CLZ
, type
, OPTIMIZE_FOR_BOTH
)
1880 && CLZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (type
),
1889 if (INTEGRAL_TYPE_P (TREE_TYPE (arg
)))
1891 tree type
= TREE_TYPE (arg
);
1892 if (direct_internal_fn_supported_p (IFN_CTZ
, type
, OPTIMIZE_FOR_BOTH
)
1893 && CTZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (type
),
1907 /* We have a cast stmt feeding popcount/clz/ctz builtin. */
1908 /* Check that we have a cast prior to that. */
1909 if (gimple_code (cast
) != GIMPLE_ASSIGN
1910 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (cast
)))
1912 /* Result of the cast stmt is the argument to the builtin. */
1913 if (arg
!= gimple_assign_lhs (cast
))
1915 arg
= gimple_assign_rhs1 (cast
);
1918 cond
= last_stmt (cond_bb
);
1920 /* Cond_bb has a check for b_4 [!=|==] 0 before calling the popcount/clz/ctz
1922 if (gimple_code (cond
) != GIMPLE_COND
1923 || (gimple_cond_code (cond
) != NE_EXPR
1924 && gimple_cond_code (cond
) != EQ_EXPR
)
1925 || !integer_zerop (gimple_cond_rhs (cond
))
1926 || arg
!= gimple_cond_lhs (cond
))
1930 if ((e2
->flags
& EDGE_TRUE_VALUE
1931 && gimple_cond_code (cond
) == NE_EXPR
)
1932 || (e1
->flags
& EDGE_TRUE_VALUE
1933 && gimple_cond_code (cond
) == EQ_EXPR
))
1935 std::swap (arg0
, arg1
);
1939 /* Check PHI arguments. */
1941 || TREE_CODE (arg1
) != INTEGER_CST
1942 || wi::to_wide (arg1
) != val
)
1945 /* And insert the popcount/clz/ctz builtin and cast stmt before the
1947 gsi
= gsi_last_bb (cond_bb
);
1950 gsi_from
= gsi_for_stmt (cast
);
1951 gsi_move_before (&gsi_from
, &gsi
);
1952 reset_flow_sensitive_info (gimple_get_lhs (cast
));
1954 gsi_from
= gsi_for_stmt (call
);
1955 if (ifn
== IFN_LAST
|| gimple_call_internal_p (call
))
1956 gsi_move_before (&gsi_from
, &gsi
);
1959 /* For __builtin_c[lt]z* force .C[LT]Z ifn, because only
1960 the latter is well defined at zero. */
1961 call
= gimple_build_call_internal (ifn
, 1, gimple_call_arg (call
, 0));
1962 gimple_call_set_lhs (call
, lhs
);
1963 gsi_insert_before (&gsi
, call
, GSI_SAME_STMT
);
1964 gsi_remove (&gsi_from
, true);
1966 reset_flow_sensitive_info (lhs
);
1968 /* Now update the PHI and remove unneeded bbs. */
1969 replace_phi_edge_with_variable (cond_bb
, e2
, phi
, lhs
);
1973 /* The function absolute_replacement does the main work of doing the absolute
1974 replacement. Return true if the replacement is done. Otherwise return
1976 bb is the basic block where the replacement is going to be done on. arg0
1977 is argument 0 from the phi. Likewise for arg1. */
1980 abs_replacement (basic_block cond_bb
, basic_block middle_bb
,
1981 edge e0 ATTRIBUTE_UNUSED
, edge e1
,
1982 gimple
*phi
, tree arg0
, tree arg1
)
1987 gimple_stmt_iterator gsi
;
1988 edge true_edge
, false_edge
;
1993 enum tree_code cond_code
;
1995 /* If the type says honor signed zeros we cannot do this
1997 if (HONOR_SIGNED_ZEROS (arg1
))
2000 /* OTHER_BLOCK must have only one executable statement which must have the
2001 form arg0 = -arg1 or arg1 = -arg0. */
2003 assign
= last_and_only_stmt (middle_bb
);
2004 /* If we did not find the proper negation assignment, then we cannot
2009 /* If we got here, then we have found the only executable statement
2010 in OTHER_BLOCK. If it is anything other than arg = -arg1 or
2011 arg1 = -arg0, then we cannot optimize. */
2012 if (gimple_code (assign
) != GIMPLE_ASSIGN
)
2015 lhs
= gimple_assign_lhs (assign
);
2017 if (gimple_assign_rhs_code (assign
) != NEGATE_EXPR
)
2020 rhs
= gimple_assign_rhs1 (assign
);
2022 /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
2023 if (!(lhs
== arg0
&& rhs
== arg1
)
2024 && !(lhs
== arg1
&& rhs
== arg0
))
2027 cond
= last_stmt (cond_bb
);
2028 result
= PHI_RESULT (phi
);
2030 /* Only relationals comparing arg[01] against zero are interesting. */
2031 cond_code
= gimple_cond_code (cond
);
2032 if (cond_code
!= GT_EXPR
&& cond_code
!= GE_EXPR
2033 && cond_code
!= LT_EXPR
&& cond_code
!= LE_EXPR
)
2036 /* Make sure the conditional is arg[01] OP y. */
2037 if (gimple_cond_lhs (cond
) != rhs
)
2040 if (FLOAT_TYPE_P (TREE_TYPE (gimple_cond_rhs (cond
)))
2041 ? real_zerop (gimple_cond_rhs (cond
))
2042 : integer_zerop (gimple_cond_rhs (cond
)))
2047 /* We need to know which is the true edge and which is the false
2048 edge so that we know if have abs or negative abs. */
2049 extract_true_false_edges_from_block (cond_bb
, &true_edge
, &false_edge
);
2051 /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
2052 will need to negate the result. Similarly for LT_EXPR/LE_EXPR if
2053 the false edge goes to OTHER_BLOCK. */
2054 if (cond_code
== GT_EXPR
|| cond_code
== GE_EXPR
)
2059 if (e
->dest
== middle_bb
)
2064 /* If the code negates only iff positive then make sure to not
2065 introduce undefined behavior when negating or computing the absolute.
2066 ??? We could use range info if present to check for arg1 == INT_MIN. */
2068 && (ANY_INTEGRAL_TYPE_P (TREE_TYPE (arg1
))
2069 && ! TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg1
))))
2072 result
= duplicate_ssa_name (result
, NULL
);
2075 lhs
= make_ssa_name (TREE_TYPE (result
));
2079 /* Build the modify expression with abs expression. */
2080 new_stmt
= gimple_build_assign (lhs
, ABS_EXPR
, rhs
);
2082 gsi
= gsi_last_bb (cond_bb
);
2083 gsi_insert_before (&gsi
, new_stmt
, GSI_NEW_STMT
);
2087 /* Get the right GSI. We want to insert after the recently
2088 added ABS_EXPR statement (which we know is the first statement
2090 new_stmt
= gimple_build_assign (result
, NEGATE_EXPR
, lhs
);
2092 gsi_insert_after (&gsi
, new_stmt
, GSI_NEW_STMT
);
2095 replace_phi_edge_with_variable (cond_bb
, e1
, phi
, result
);
2097 /* Note that we optimized this PHI. */
2101 /* Auxiliary functions to determine the set of memory accesses which
2102 can't trap because they are preceded by accesses to the same memory
2103 portion. We do that for MEM_REFs, so we only need to track
2104 the SSA_NAME of the pointer indirectly referenced. The algorithm
2105 simply is a walk over all instructions in dominator order. When
2106 we see an MEM_REF we determine if we've already seen a same
2107 ref anywhere up to the root of the dominator tree. If we do the
2108 current access can't trap. If we don't see any dominating access
2109 the current access might trap, but might also make later accesses
2110 non-trapping, so we remember it. We need to be careful with loads
2111 or stores, for instance a load might not trap, while a store would,
2112 so if we see a dominating read access this doesn't mean that a later
2113 write access would not trap. Hence we also need to differentiate the
2114 type of access(es) seen.
2116 ??? We currently are very conservative and assume that a load might
2117 trap even if a store doesn't (write-only memory). This probably is
2118 overly conservative.
2120 We currently support a special case that for !TREE_ADDRESSABLE automatic
2121 variables, it could ignore whether something is a load or store because the
2122 local stack should be always writable. */
2124 /* A hash-table of references (MEM_REF/ARRAY_REF/COMPONENT_REF), and in which
2125 basic block an *_REF through it was seen, which would constitute a
2126 no-trap region for same accesses.
2128 Size is needed to support 2 MEM_REFs of different types, like
2129 MEM<double>(s_1) and MEM<long>(s_1), which would compare equal with
2139 /* Hashtable helpers. */
2141 struct refs_hasher
: free_ptr_hash
<ref_to_bb
>
2143 static inline hashval_t
hash (const ref_to_bb
*);
2144 static inline bool equal (const ref_to_bb
*, const ref_to_bb
*);
2147 /* Used for quick clearing of the hash-table when we see calls.
2148 Hash entries with phase < nt_call_phase are invalid. */
2149 static unsigned int nt_call_phase
;
2151 /* The hash function. */
2154 refs_hasher::hash (const ref_to_bb
*n
)
2156 inchash::hash hstate
;
2157 inchash::add_expr (n
->exp
, hstate
, OEP_ADDRESS_OF
);
2158 hstate
.add_hwi (n
->size
);
2159 return hstate
.end ();
2162 /* The equality function of *P1 and *P2. */
2165 refs_hasher::equal (const ref_to_bb
*n1
, const ref_to_bb
*n2
)
2167 return operand_equal_p (n1
->exp
, n2
->exp
, OEP_ADDRESS_OF
)
2168 && n1
->size
== n2
->size
;
2171 class nontrapping_dom_walker
: public dom_walker
2174 nontrapping_dom_walker (cdi_direction direction
, hash_set
<tree
> *ps
)
2175 : dom_walker (direction
), m_nontrapping (ps
), m_seen_refs (128)
2178 virtual edge
before_dom_children (basic_block
);
2179 virtual void after_dom_children (basic_block
);
2183 /* We see the expression EXP in basic block BB. If it's an interesting
2184 expression (an MEM_REF through an SSA_NAME) possibly insert the
2185 expression into the set NONTRAP or the hash table of seen expressions.
2186 STORE is true if this expression is on the LHS, otherwise it's on
2188 void add_or_mark_expr (basic_block
, tree
, bool);
2190 hash_set
<tree
> *m_nontrapping
;
2192 /* The hash table for remembering what we've seen. */
2193 hash_table
<refs_hasher
> m_seen_refs
;
2196 /* Called by walk_dominator_tree, when entering the block BB. */
2198 nontrapping_dom_walker::before_dom_children (basic_block bb
)
2202 gimple_stmt_iterator gsi
;
2204 /* If we haven't seen all our predecessors, clear the hash-table. */
2205 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
2206 if ((((size_t)e
->src
->aux
) & 2) == 0)
2212 /* Mark this BB as being on the path to dominator root and as visited. */
2213 bb
->aux
= (void*)(1 | 2);
2215 /* And walk the statements in order. */
2216 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2218 gimple
*stmt
= gsi_stmt (gsi
);
2220 if ((gimple_code (stmt
) == GIMPLE_ASM
&& gimple_vdef (stmt
))
2221 || (is_gimple_call (stmt
)
2222 && (!nonfreeing_call_p (stmt
) || !nonbarrier_call_p (stmt
))))
2224 else if (gimple_assign_single_p (stmt
) && !gimple_has_volatile_ops (stmt
))
2226 add_or_mark_expr (bb
, gimple_assign_lhs (stmt
), true);
2227 add_or_mark_expr (bb
, gimple_assign_rhs1 (stmt
), false);
2233 /* Called by walk_dominator_tree, when basic block BB is exited. */
2235 nontrapping_dom_walker::after_dom_children (basic_block bb
)
2237 /* This BB isn't on the path to dominator root anymore. */
2241 /* We see the expression EXP in basic block BB. If it's an interesting
2246 possibly insert the expression into the set NONTRAP or the hash table
2247 of seen expressions. STORE is true if this expression is on the LHS,
2248 otherwise it's on the RHS. */
2250 nontrapping_dom_walker::add_or_mark_expr (basic_block bb
, tree exp
, bool store
)
2254 if ((TREE_CODE (exp
) == MEM_REF
|| TREE_CODE (exp
) == ARRAY_REF
2255 || TREE_CODE (exp
) == COMPONENT_REF
)
2256 && (size
= int_size_in_bytes (TREE_TYPE (exp
))) > 0)
2258 struct ref_to_bb map
;
2260 struct ref_to_bb
*r2bb
;
2261 basic_block found_bb
= 0;
2265 tree base
= get_base_address (exp
);
2266 /* Only record a LOAD of a local variable without address-taken, as
2267 the local stack is always writable. This allows cselim on a STORE
2268 with a dominating LOAD. */
2269 if (!auto_var_p (base
) || TREE_ADDRESSABLE (base
))
2273 /* Try to find the last seen *_REF, which can trap. */
2276 slot
= m_seen_refs
.find_slot (&map
, INSERT
);
2278 if (r2bb
&& r2bb
->phase
>= nt_call_phase
)
2279 found_bb
= r2bb
->bb
;
2281 /* If we've found a trapping *_REF, _and_ it dominates EXP
2282 (it's in a basic block on the path from us to the dominator root)
2283 then we can't trap. */
2284 if (found_bb
&& (((size_t)found_bb
->aux
) & 1) == 1)
2286 m_nontrapping
->add (exp
);
2290 /* EXP might trap, so insert it into the hash table. */
2293 r2bb
->phase
= nt_call_phase
;
2298 r2bb
= XNEW (struct ref_to_bb
);
2299 r2bb
->phase
= nt_call_phase
;
2309 /* This is the entry point of gathering non trapping memory accesses.
2310 It will do a dominator walk over the whole function, and it will
2311 make use of the bb->aux pointers. It returns a set of trees
2312 (the MEM_REFs itself) which can't trap. */
2313 static hash_set
<tree
> *
2314 get_non_trapping (void)
2317 hash_set
<tree
> *nontrap
= new hash_set
<tree
>;
2318 /* We're going to do a dominator walk, so ensure that we have
2319 dominance information. */
2320 calculate_dominance_info (CDI_DOMINATORS
);
2322 nontrapping_dom_walker (CDI_DOMINATORS
, nontrap
)
2323 .walk (cfun
->cfg
->x_entry_block_ptr
);
2325 clear_aux_for_blocks ();
2329 /* Do the main work of conditional store replacement. We already know
2330 that the recognized pattern looks like so:
2333 if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
2336 fallthrough (edge E0)
2340 We check that MIDDLE_BB contains only one store, that that store
2341 doesn't trap (not via NOTRAP, but via checking if an access to the same
2342 memory location dominates us, or the store is to a local addressable
2343 object) and that the store has a "simple" RHS. */
2346 cond_store_replacement (basic_block middle_bb
, basic_block join_bb
,
2347 edge e0
, edge e1
, hash_set
<tree
> *nontrap
)
2349 gimple
*assign
= last_and_only_stmt (middle_bb
);
2350 tree lhs
, rhs
, name
, name2
;
2353 gimple_stmt_iterator gsi
;
2356 /* Check if middle_bb contains of only one store. */
2358 || !gimple_assign_single_p (assign
)
2359 || gimple_has_volatile_ops (assign
))
2362 /* And no PHI nodes so all uses in the single stmt are also
2363 available where we insert to. */
2364 if (!gimple_seq_empty_p (phi_nodes (middle_bb
)))
2367 locus
= gimple_location (assign
);
2368 lhs
= gimple_assign_lhs (assign
);
2369 rhs
= gimple_assign_rhs1 (assign
);
2370 if ((TREE_CODE (lhs
) != MEM_REF
2371 && TREE_CODE (lhs
) != ARRAY_REF
2372 && TREE_CODE (lhs
) != COMPONENT_REF
)
2373 || !is_gimple_reg_type (TREE_TYPE (lhs
)))
2376 /* Prove that we can move the store down. We could also check
2377 TREE_THIS_NOTRAP here, but in that case we also could move stores,
2378 whose value is not available readily, which we want to avoid. */
2379 if (!nontrap
->contains (lhs
))
2381 /* If LHS is an access to a local variable without address-taken
2382 (or when we allow data races) and known not to trap, we could
2383 always safely move down the store. */
2384 tree base
= get_base_address (lhs
);
2385 if (!auto_var_p (base
)
2386 || (TREE_ADDRESSABLE (base
) && !flag_store_data_races
)
2387 || tree_could_trap_p (lhs
))
2391 /* Now we've checked the constraints, so do the transformation:
2392 1) Remove the single store. */
2393 gsi
= gsi_for_stmt (assign
);
2394 unlink_stmt_vdef (assign
);
2395 gsi_remove (&gsi
, true);
2396 release_defs (assign
);
2398 /* Make both store and load use alias-set zero as we have to
2399 deal with the case of the store being a conditional change
2400 of the dynamic type. */
2401 lhs
= unshare_expr (lhs
);
2403 while (handled_component_p (*basep
))
2404 basep
= &TREE_OPERAND (*basep
, 0);
2405 if (TREE_CODE (*basep
) == MEM_REF
2406 || TREE_CODE (*basep
) == TARGET_MEM_REF
)
2407 TREE_OPERAND (*basep
, 1)
2408 = fold_convert (ptr_type_node
, TREE_OPERAND (*basep
, 1));
2410 *basep
= build2 (MEM_REF
, TREE_TYPE (*basep
),
2411 build_fold_addr_expr (*basep
),
2412 build_zero_cst (ptr_type_node
));
2414 /* 2) Insert a load from the memory of the store to the temporary
2415 on the edge which did not contain the store. */
2416 name
= make_temp_ssa_name (TREE_TYPE (lhs
), NULL
, "cstore");
2417 new_stmt
= gimple_build_assign (name
, lhs
);
2418 gimple_set_location (new_stmt
, locus
);
2419 lhs
= unshare_expr (lhs
);
2420 /* Set TREE_NO_WARNING on the rhs of the load to avoid uninit
2422 TREE_NO_WARNING (gimple_assign_rhs1 (new_stmt
)) = 1;
2423 gsi_insert_on_edge (e1
, new_stmt
);
2425 /* 3) Create a PHI node at the join block, with one argument
2426 holding the old RHS, and the other holding the temporary
2427 where we stored the old memory contents. */
2428 name2
= make_temp_ssa_name (TREE_TYPE (lhs
), NULL
, "cstore");
2429 newphi
= create_phi_node (name2
, join_bb
);
2430 add_phi_arg (newphi
, rhs
, e0
, locus
);
2431 add_phi_arg (newphi
, name
, e1
, locus
);
2433 new_stmt
= gimple_build_assign (lhs
, PHI_RESULT (newphi
));
2435 /* 4) Insert that PHI node. */
2436 gsi
= gsi_after_labels (join_bb
);
2437 if (gsi_end_p (gsi
))
2439 gsi
= gsi_last_bb (join_bb
);
2440 gsi_insert_after (&gsi
, new_stmt
, GSI_NEW_STMT
);
2443 gsi_insert_before (&gsi
, new_stmt
, GSI_NEW_STMT
);
2445 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2447 fprintf (dump_file
, "\nConditional store replacement happened!");
2448 fprintf (dump_file
, "\nReplaced the store with a load.");
2449 fprintf (dump_file
, "\nInserted a new PHI statement in joint block:\n");
2450 print_gimple_stmt (dump_file
, new_stmt
, 0, TDF_VOPS
|TDF_MEMSYMS
);
2456 /* Do the main work of conditional store replacement. */
2459 cond_if_else_store_replacement_1 (basic_block then_bb
, basic_block else_bb
,
2460 basic_block join_bb
, gimple
*then_assign
,
2461 gimple
*else_assign
)
2463 tree lhs_base
, lhs
, then_rhs
, else_rhs
, name
;
2464 location_t then_locus
, else_locus
;
2465 gimple_stmt_iterator gsi
;
2469 if (then_assign
== NULL
2470 || !gimple_assign_single_p (then_assign
)
2471 || gimple_clobber_p (then_assign
)
2472 || gimple_has_volatile_ops (then_assign
)
2473 || else_assign
== NULL
2474 || !gimple_assign_single_p (else_assign
)
2475 || gimple_clobber_p (else_assign
)
2476 || gimple_has_volatile_ops (else_assign
))
2479 lhs
= gimple_assign_lhs (then_assign
);
2480 if (!is_gimple_reg_type (TREE_TYPE (lhs
))
2481 || !operand_equal_p (lhs
, gimple_assign_lhs (else_assign
), 0))
2484 lhs_base
= get_base_address (lhs
);
2485 if (lhs_base
== NULL_TREE
2486 || (!DECL_P (lhs_base
) && TREE_CODE (lhs_base
) != MEM_REF
))
2489 then_rhs
= gimple_assign_rhs1 (then_assign
);
2490 else_rhs
= gimple_assign_rhs1 (else_assign
);
2491 then_locus
= gimple_location (then_assign
);
2492 else_locus
= gimple_location (else_assign
);
2494 /* Now we've checked the constraints, so do the transformation:
2495 1) Remove the stores. */
2496 gsi
= gsi_for_stmt (then_assign
);
2497 unlink_stmt_vdef (then_assign
);
2498 gsi_remove (&gsi
, true);
2499 release_defs (then_assign
);
2501 gsi
= gsi_for_stmt (else_assign
);
2502 unlink_stmt_vdef (else_assign
);
2503 gsi_remove (&gsi
, true);
2504 release_defs (else_assign
);
2506 /* 2) Create a PHI node at the join block, with one argument
2507 holding the old RHS, and the other holding the temporary
2508 where we stored the old memory contents. */
2509 name
= make_temp_ssa_name (TREE_TYPE (lhs
), NULL
, "cstore");
2510 newphi
= create_phi_node (name
, join_bb
);
2511 add_phi_arg (newphi
, then_rhs
, EDGE_SUCC (then_bb
, 0), then_locus
);
2512 add_phi_arg (newphi
, else_rhs
, EDGE_SUCC (else_bb
, 0), else_locus
);
2514 new_stmt
= gimple_build_assign (lhs
, PHI_RESULT (newphi
));
2516 /* 3) Insert that PHI node. */
2517 gsi
= gsi_after_labels (join_bb
);
2518 if (gsi_end_p (gsi
))
2520 gsi
= gsi_last_bb (join_bb
);
2521 gsi_insert_after (&gsi
, new_stmt
, GSI_NEW_STMT
);
2524 gsi_insert_before (&gsi
, new_stmt
, GSI_NEW_STMT
);
2529 /* Return the single store in BB with VDEF or NULL if there are
2530 other stores in the BB or loads following the store. */
2533 single_trailing_store_in_bb (basic_block bb
, tree vdef
)
2535 if (SSA_NAME_IS_DEFAULT_DEF (vdef
))
2537 gimple
*store
= SSA_NAME_DEF_STMT (vdef
);
2538 if (gimple_bb (store
) != bb
2539 || gimple_code (store
) == GIMPLE_PHI
)
2542 /* Verify there is no other store in this BB. */
2543 if (!SSA_NAME_IS_DEFAULT_DEF (gimple_vuse (store
))
2544 && gimple_bb (SSA_NAME_DEF_STMT (gimple_vuse (store
))) == bb
2545 && gimple_code (SSA_NAME_DEF_STMT (gimple_vuse (store
))) != GIMPLE_PHI
)
2548 /* Verify there is no load or store after the store. */
2549 use_operand_p use_p
;
2550 imm_use_iterator imm_iter
;
2551 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, gimple_vdef (store
))
2552 if (USE_STMT (use_p
) != store
2553 && gimple_bb (USE_STMT (use_p
)) == bb
)
2559 /* Conditional store replacement. We already know
2560 that the recognized pattern looks like so:
2563 if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
2573 fallthrough (edge E0)
2577 We check that it is safe to sink the store to JOIN_BB by verifying that
2578 there are no read-after-write or write-after-write dependencies in
2579 THEN_BB and ELSE_BB. */
2582 cond_if_else_store_replacement (basic_block then_bb
, basic_block else_bb
,
2583 basic_block join_bb
)
2585 vec
<data_reference_p
> then_datarefs
, else_datarefs
;
2586 vec
<ddr_p
> then_ddrs
, else_ddrs
;
2587 gimple
*then_store
, *else_store
;
2588 bool found
, ok
= false, res
;
2589 struct data_dependence_relation
*ddr
;
2590 data_reference_p then_dr
, else_dr
;
2592 tree then_lhs
, else_lhs
;
2593 basic_block blocks
[3];
2595 /* Handle the case with single store in THEN_BB and ELSE_BB. That is
2596 cheap enough to always handle as it allows us to elide dependence
2599 for (gphi_iterator si
= gsi_start_phis (join_bb
); !gsi_end_p (si
);
2601 if (virtual_operand_p (gimple_phi_result (si
.phi ())))
2608 tree then_vdef
= PHI_ARG_DEF_FROM_EDGE (vphi
, single_succ_edge (then_bb
));
2609 tree else_vdef
= PHI_ARG_DEF_FROM_EDGE (vphi
, single_succ_edge (else_bb
));
2610 gimple
*then_assign
= single_trailing_store_in_bb (then_bb
, then_vdef
);
2613 gimple
*else_assign
= single_trailing_store_in_bb (else_bb
, else_vdef
);
2615 return cond_if_else_store_replacement_1 (then_bb
, else_bb
, join_bb
,
2616 then_assign
, else_assign
);
2619 /* If either vectorization or if-conversion is disabled then do
2620 not sink any stores. */
2621 if (param_max_stores_to_sink
== 0
2622 || (!flag_tree_loop_vectorize
&& !flag_tree_slp_vectorize
)
2623 || !flag_tree_loop_if_convert
)
2626 /* Find data references. */
2627 then_datarefs
.create (1);
2628 else_datarefs
.create (1);
2629 if ((find_data_references_in_bb (NULL
, then_bb
, &then_datarefs
)
2631 || !then_datarefs
.length ()
2632 || (find_data_references_in_bb (NULL
, else_bb
, &else_datarefs
)
2634 || !else_datarefs
.length ())
2636 free_data_refs (then_datarefs
);
2637 free_data_refs (else_datarefs
);
2641 /* Find pairs of stores with equal LHS. */
2642 auto_vec
<gimple
*, 1> then_stores
, else_stores
;
2643 FOR_EACH_VEC_ELT (then_datarefs
, i
, then_dr
)
2645 if (DR_IS_READ (then_dr
))
2648 then_store
= DR_STMT (then_dr
);
2649 then_lhs
= gimple_get_lhs (then_store
);
2650 if (then_lhs
== NULL_TREE
)
2654 FOR_EACH_VEC_ELT (else_datarefs
, j
, else_dr
)
2656 if (DR_IS_READ (else_dr
))
2659 else_store
= DR_STMT (else_dr
);
2660 else_lhs
= gimple_get_lhs (else_store
);
2661 if (else_lhs
== NULL_TREE
)
2664 if (operand_equal_p (then_lhs
, else_lhs
, 0))
2674 then_stores
.safe_push (then_store
);
2675 else_stores
.safe_push (else_store
);
2678 /* No pairs of stores found. */
2679 if (!then_stores
.length ()
2680 || then_stores
.length () > (unsigned) param_max_stores_to_sink
)
2682 free_data_refs (then_datarefs
);
2683 free_data_refs (else_datarefs
);
2687 /* Compute and check data dependencies in both basic blocks. */
2688 then_ddrs
.create (1);
2689 else_ddrs
.create (1);
2690 if (!compute_all_dependences (then_datarefs
, &then_ddrs
,
2692 || !compute_all_dependences (else_datarefs
, &else_ddrs
,
2695 free_dependence_relations (then_ddrs
);
2696 free_dependence_relations (else_ddrs
);
2697 free_data_refs (then_datarefs
);
2698 free_data_refs (else_datarefs
);
2701 blocks
[0] = then_bb
;
2702 blocks
[1] = else_bb
;
2703 blocks
[2] = join_bb
;
2704 renumber_gimple_stmt_uids_in_blocks (blocks
, 3);
2706 /* Check that there are no read-after-write or write-after-write dependencies
2708 FOR_EACH_VEC_ELT (then_ddrs
, i
, ddr
)
2710 struct data_reference
*dra
= DDR_A (ddr
);
2711 struct data_reference
*drb
= DDR_B (ddr
);
2713 if (DDR_ARE_DEPENDENT (ddr
) != chrec_known
2714 && ((DR_IS_READ (dra
) && DR_IS_WRITE (drb
)
2715 && gimple_uid (DR_STMT (dra
)) > gimple_uid (DR_STMT (drb
)))
2716 || (DR_IS_READ (drb
) && DR_IS_WRITE (dra
)
2717 && gimple_uid (DR_STMT (drb
)) > gimple_uid (DR_STMT (dra
)))
2718 || (DR_IS_WRITE (dra
) && DR_IS_WRITE (drb
))))
2720 free_dependence_relations (then_ddrs
);
2721 free_dependence_relations (else_ddrs
);
2722 free_data_refs (then_datarefs
);
2723 free_data_refs (else_datarefs
);
2728 /* Check that there are no read-after-write or write-after-write dependencies
2730 FOR_EACH_VEC_ELT (else_ddrs
, i
, ddr
)
2732 struct data_reference
*dra
= DDR_A (ddr
);
2733 struct data_reference
*drb
= DDR_B (ddr
);
2735 if (DDR_ARE_DEPENDENT (ddr
) != chrec_known
2736 && ((DR_IS_READ (dra
) && DR_IS_WRITE (drb
)
2737 && gimple_uid (DR_STMT (dra
)) > gimple_uid (DR_STMT (drb
)))
2738 || (DR_IS_READ (drb
) && DR_IS_WRITE (dra
)
2739 && gimple_uid (DR_STMT (drb
)) > gimple_uid (DR_STMT (dra
)))
2740 || (DR_IS_WRITE (dra
) && DR_IS_WRITE (drb
))))
2742 free_dependence_relations (then_ddrs
);
2743 free_dependence_relations (else_ddrs
);
2744 free_data_refs (then_datarefs
);
2745 free_data_refs (else_datarefs
);
2750 /* Sink stores with same LHS. */
2751 FOR_EACH_VEC_ELT (then_stores
, i
, then_store
)
2753 else_store
= else_stores
[i
];
2754 res
= cond_if_else_store_replacement_1 (then_bb
, else_bb
, join_bb
,
2755 then_store
, else_store
);
2759 free_dependence_relations (then_ddrs
);
2760 free_dependence_relations (else_ddrs
);
2761 free_data_refs (then_datarefs
);
2762 free_data_refs (else_datarefs
);
2767 /* Return TRUE if STMT has a VUSE whose corresponding VDEF is in BB. */
2770 local_mem_dependence (gimple
*stmt
, basic_block bb
)
2772 tree vuse
= gimple_vuse (stmt
);
2778 def
= SSA_NAME_DEF_STMT (vuse
);
2779 return (def
&& gimple_bb (def
) == bb
);
2782 /* Given a "diamond" control-flow pattern where BB0 tests a condition,
2783 BB1 and BB2 are "then" and "else" blocks dependent on this test,
2784 and BB3 rejoins control flow following BB1 and BB2, look for
2785 opportunities to hoist loads as follows. If BB3 contains a PHI of
2786 two loads, one each occurring in BB1 and BB2, and the loads are
2787 provably of adjacent fields in the same structure, then move both
2788 loads into BB0. Of course this can only be done if there are no
2789 dependencies preventing such motion.
2791 One of the hoisted loads will always be speculative, so the
2792 transformation is currently conservative:
2794 - The fields must be strictly adjacent.
2795 - The two fields must occupy a single memory block that is
2796 guaranteed to not cross a page boundary.
2798 The last is difficult to prove, as such memory blocks should be
2799 aligned on the minimum of the stack alignment boundary and the
2800 alignment guaranteed by heap allocation interfaces. Thus we rely
2801 on a parameter for the alignment value.
2803 Provided a good value is used for the last case, the first
2804 restriction could possibly be relaxed. */
2807 hoist_adjacent_loads (basic_block bb0
, basic_block bb1
,
2808 basic_block bb2
, basic_block bb3
)
2810 int param_align
= param_l1_cache_line_size
;
2811 unsigned param_align_bits
= (unsigned) (param_align
* BITS_PER_UNIT
);
2814 /* Walk the phis in bb3 looking for an opportunity. We are looking
2815 for phis of two SSA names, one each of which is defined in bb1 and
2817 for (gsi
= gsi_start_phis (bb3
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2819 gphi
*phi_stmt
= gsi
.phi ();
2820 gimple
*def1
, *def2
;
2821 tree arg1
, arg2
, ref1
, ref2
, field1
, field2
;
2822 tree tree_offset1
, tree_offset2
, tree_size2
, next
;
2823 int offset1
, offset2
, size2
;
2825 gimple_stmt_iterator gsi2
;
2826 basic_block bb_for_def1
, bb_for_def2
;
2828 if (gimple_phi_num_args (phi_stmt
) != 2
2829 || virtual_operand_p (gimple_phi_result (phi_stmt
)))
2832 arg1
= gimple_phi_arg_def (phi_stmt
, 0);
2833 arg2
= gimple_phi_arg_def (phi_stmt
, 1);
2835 if (TREE_CODE (arg1
) != SSA_NAME
2836 || TREE_CODE (arg2
) != SSA_NAME
2837 || SSA_NAME_IS_DEFAULT_DEF (arg1
)
2838 || SSA_NAME_IS_DEFAULT_DEF (arg2
))
2841 def1
= SSA_NAME_DEF_STMT (arg1
);
2842 def2
= SSA_NAME_DEF_STMT (arg2
);
2844 if ((gimple_bb (def1
) != bb1
|| gimple_bb (def2
) != bb2
)
2845 && (gimple_bb (def2
) != bb1
|| gimple_bb (def1
) != bb2
))
2848 /* Check the mode of the arguments to be sure a conditional move
2849 can be generated for it. */
2850 if (optab_handler (movcc_optab
, TYPE_MODE (TREE_TYPE (arg1
)))
2851 == CODE_FOR_nothing
)
2854 /* Both statements must be assignments whose RHS is a COMPONENT_REF. */
2855 if (!gimple_assign_single_p (def1
)
2856 || !gimple_assign_single_p (def2
)
2857 || gimple_has_volatile_ops (def1
)
2858 || gimple_has_volatile_ops (def2
))
2861 ref1
= gimple_assign_rhs1 (def1
);
2862 ref2
= gimple_assign_rhs1 (def2
);
2864 if (TREE_CODE (ref1
) != COMPONENT_REF
2865 || TREE_CODE (ref2
) != COMPONENT_REF
)
2868 /* The zeroth operand of the two component references must be
2869 identical. It is not sufficient to compare get_base_address of
2870 the two references, because this could allow for different
2871 elements of the same array in the two trees. It is not safe to
2872 assume that the existence of one array element implies the
2873 existence of a different one. */
2874 if (!operand_equal_p (TREE_OPERAND (ref1
, 0), TREE_OPERAND (ref2
, 0), 0))
2877 field1
= TREE_OPERAND (ref1
, 1);
2878 field2
= TREE_OPERAND (ref2
, 1);
2880 /* Check for field adjacency, and ensure field1 comes first. */
2881 for (next
= DECL_CHAIN (field1
);
2882 next
&& TREE_CODE (next
) != FIELD_DECL
;
2883 next
= DECL_CHAIN (next
))
2888 for (next
= DECL_CHAIN (field2
);
2889 next
&& TREE_CODE (next
) != FIELD_DECL
;
2890 next
= DECL_CHAIN (next
))
2896 std::swap (field1
, field2
);
2897 std::swap (def1
, def2
);
2900 bb_for_def1
= gimple_bb (def1
);
2901 bb_for_def2
= gimple_bb (def2
);
2903 /* Check for proper alignment of the first field. */
2904 tree_offset1
= bit_position (field1
);
2905 tree_offset2
= bit_position (field2
);
2906 tree_size2
= DECL_SIZE (field2
);
2908 if (!tree_fits_uhwi_p (tree_offset1
)
2909 || !tree_fits_uhwi_p (tree_offset2
)
2910 || !tree_fits_uhwi_p (tree_size2
))
2913 offset1
= tree_to_uhwi (tree_offset1
);
2914 offset2
= tree_to_uhwi (tree_offset2
);
2915 size2
= tree_to_uhwi (tree_size2
);
2916 align1
= DECL_ALIGN (field1
) % param_align_bits
;
2918 if (offset1
% BITS_PER_UNIT
!= 0)
2921 /* For profitability, the two field references should fit within
2922 a single cache line. */
2923 if (align1
+ offset2
- offset1
+ size2
> param_align_bits
)
2926 /* The two expressions cannot be dependent upon vdefs defined
2928 if (local_mem_dependence (def1
, bb_for_def1
)
2929 || local_mem_dependence (def2
, bb_for_def2
))
2932 /* The conditions are satisfied; hoist the loads from bb1 and bb2 into
2933 bb0. We hoist the first one first so that a cache miss is handled
2934 efficiently regardless of hardware cache-fill policy. */
2935 gsi2
= gsi_for_stmt (def1
);
2936 gsi_move_to_bb_end (&gsi2
, bb0
);
2937 gsi2
= gsi_for_stmt (def2
);
2938 gsi_move_to_bb_end (&gsi2
, bb0
);
2940 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2943 "\nHoisting adjacent loads from %d and %d into %d: \n",
2944 bb_for_def1
->index
, bb_for_def2
->index
, bb0
->index
);
2945 print_gimple_stmt (dump_file
, def1
, 0, TDF_VOPS
|TDF_MEMSYMS
);
2946 print_gimple_stmt (dump_file
, def2
, 0, TDF_VOPS
|TDF_MEMSYMS
);
2951 /* Determine whether we should attempt to hoist adjacent loads out of
2952 diamond patterns in pass_phiopt. Always hoist loads if
2953 -fhoist-adjacent-loads is specified and the target machine has
2954 both a conditional move instruction and a defined cache line size. */
2957 gate_hoist_loads (void)
2959 return (flag_hoist_adjacent_loads
== 1
2960 && param_l1_cache_line_size
2961 && HAVE_conditional_move
);
2964 /* This pass tries to replaces an if-then-else block with an
2965 assignment. We have four kinds of transformations. Some of these
2966 transformations are also performed by the ifcvt RTL optimizer.
2968 Conditional Replacement
2969 -----------------------
2971 This transformation, implemented in conditional_replacement,
2975 if (cond) goto bb2; else goto bb1;
2978 x = PHI <0 (bb1), 1 (bb0), ...>;
2986 x = PHI <x' (bb0), ...>;
2988 We remove bb1 as it becomes unreachable. This occurs often due to
2989 gimplification of conditionals.
2994 This transformation, implemented in value_replacement, replaces
2997 if (a != b) goto bb2; else goto bb1;
3000 x = PHI <a (bb1), b (bb0), ...>;
3006 x = PHI <b (bb0), ...>;
3008 This opportunity can sometimes occur as a result of other
3012 Another case caught by value replacement looks like this:
3018 if (t3 != 0) goto bb1; else goto bb2;
3034 This transformation, implemented in abs_replacement, replaces
3037 if (a >= 0) goto bb2; else goto bb1;
3041 x = PHI <x (bb1), a (bb0), ...>;
3048 x = PHI <x' (bb0), ...>;
3053 This transformation, minmax_replacement replaces
3056 if (a <= b) goto bb2; else goto bb1;
3059 x = PHI <b (bb1), a (bb0), ...>;
3064 x' = MIN_EXPR (a, b)
3066 x = PHI <x' (bb0), ...>;
3068 A similar transformation is done for MAX_EXPR.
3071 This pass also performs a fifth transformation of a slightly different
3074 Factor conversion in COND_EXPR
3075 ------------------------------
3077 This transformation factors the conversion out of COND_EXPR with
3078 factor_out_conditional_conversion.
3081 if (a <= CST) goto <bb 3>; else goto <bb 4>;
3085 tmp = PHI <tmp, CST>
3088 if (a <= CST) goto <bb 3>; else goto <bb 4>;
3094 Adjacent Load Hoisting
3095 ----------------------
3097 This transformation replaces
3100 if (...) goto bb2; else goto bb1;
3102 x1 = (<expr>).field1;
3105 x2 = (<expr>).field2;
3112 x1 = (<expr>).field1;
3113 x2 = (<expr>).field2;
3114 if (...) goto bb2; else goto bb1;
3121 The purpose of this transformation is to enable generation of conditional
3122 move instructions such as Intel CMOVE or PowerPC ISEL. Because one of
3123 the loads is speculative, the transformation is restricted to very
3124 specific cases to avoid introducing a page fault. We are looking for
3132 where left and right are typically adjacent pointers in a tree structure. */
3136 const pass_data pass_data_phiopt
=
3138 GIMPLE_PASS
, /* type */
3139 "phiopt", /* name */
3140 OPTGROUP_NONE
, /* optinfo_flags */
3141 TV_TREE_PHIOPT
, /* tv_id */
3142 ( PROP_cfg
| PROP_ssa
), /* properties_required */
3143 0, /* properties_provided */
3144 0, /* properties_destroyed */
3145 0, /* todo_flags_start */
3146 0, /* todo_flags_finish */
3149 class pass_phiopt
: public gimple_opt_pass
3152 pass_phiopt (gcc::context
*ctxt
)
3153 : gimple_opt_pass (pass_data_phiopt
, ctxt
), early_p (false)
3156 /* opt_pass methods: */
3157 opt_pass
* clone () { return new pass_phiopt (m_ctxt
); }
3158 void set_pass_param (unsigned n
, bool param
)
3160 gcc_assert (n
== 0);
3163 virtual bool gate (function
*) { return flag_ssa_phiopt
; }
3164 virtual unsigned int execute (function
*)
3166 return tree_ssa_phiopt_worker (false,
3167 !early_p
? gate_hoist_loads () : false,
3173 }; // class pass_phiopt
3178 make_pass_phiopt (gcc::context
*ctxt
)
3180 return new pass_phiopt (ctxt
);
3185 const pass_data pass_data_cselim
=
3187 GIMPLE_PASS
, /* type */
3188 "cselim", /* name */
3189 OPTGROUP_NONE
, /* optinfo_flags */
3190 TV_TREE_PHIOPT
, /* tv_id */
3191 ( PROP_cfg
| PROP_ssa
), /* properties_required */
3192 0, /* properties_provided */
3193 0, /* properties_destroyed */
3194 0, /* todo_flags_start */
3195 0, /* todo_flags_finish */
3198 class pass_cselim
: public gimple_opt_pass
3201 pass_cselim (gcc::context
*ctxt
)
3202 : gimple_opt_pass (pass_data_cselim
, ctxt
)
3205 /* opt_pass methods: */
3206 virtual bool gate (function
*) { return flag_tree_cselim
; }
3207 virtual unsigned int execute (function
*) { return tree_ssa_cs_elim (); }
3209 }; // class pass_cselim
3214 make_pass_cselim (gcc::context
*ctxt
)
3216 return new pass_cselim (ctxt
);