1 /* Conditional constant propagation pass for the GNU compiler.
2 Copyright (C) 2000-2015 Free Software Foundation, Inc.
3 Adapted from original RTL SSA-CCP by Daniel Berlin <dberlin@dberlin.org>
4 Adapted to GIMPLE trees by Diego Novillo <dnovillo@redhat.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by the
10 Free Software Foundation; either version 3, or (at your option) any
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Conditional constant propagation (CCP) is based on the SSA
23 propagation engine (tree-ssa-propagate.c). Constant assignments of
24 the form VAR = CST are propagated from the assignments into uses of
25 VAR, which in turn may generate new constants. The simulation uses
26 a four level lattice to keep track of constant values associated
27 with SSA names. Given an SSA name V_i, it may take one of the
30 UNINITIALIZED -> the initial state of the value. This value
31 is replaced with a correct initial value
32 the first time the value is used, so the
33 rest of the pass does not need to care about
34 it. Using this value simplifies initialization
35 of the pass, and prevents us from needlessly
36 scanning statements that are never reached.
38 UNDEFINED -> V_i is a local variable whose definition
39 has not been processed yet. Therefore we
40 don't yet know if its value is a constant
43 CONSTANT -> V_i has been found to hold a constant
46 VARYING -> V_i cannot take a constant value, or if it
47 does, it is not possible to determine it
50 The core of SSA-CCP is in ccp_visit_stmt and ccp_visit_phi_node:
52 1- In ccp_visit_stmt, we are interested in assignments whose RHS
53 evaluates into a constant and conditional jumps whose predicate
54 evaluates into a boolean true or false. When an assignment of
55 the form V_i = CONST is found, V_i's lattice value is set to
56 CONSTANT and CONST is associated with it. This causes the
57 propagation engine to add all the SSA edges coming out the
58 assignment into the worklists, so that statements that use V_i
61 If the statement is a conditional with a constant predicate, we
62 mark the outgoing edges as executable or not executable
63 depending on the predicate's value. This is then used when
64 visiting PHI nodes to know when a PHI argument can be ignored.
67 2- In ccp_visit_phi_node, if all the PHI arguments evaluate to the
68 same constant C, then the LHS of the PHI is set to C. This
69 evaluation is known as the "meet operation". Since one of the
70 goals of this evaluation is to optimistically return constant
71 values as often as possible, it uses two main short cuts:
73 - If an argument is flowing in through a non-executable edge, it
74 is ignored. This is useful in cases like this:
80 a_11 = PHI (a_9, a_10)
82 If PRED is known to always evaluate to false, then we can
83 assume that a_11 will always take its value from a_10, meaning
84 that instead of consider it VARYING (a_9 and a_10 have
85 different values), we can consider it CONSTANT 100.
87 - If an argument has an UNDEFINED value, then it does not affect
88 the outcome of the meet operation. If a variable V_i has an
89 UNDEFINED value, it means that either its defining statement
90 hasn't been visited yet or V_i has no defining statement, in
91 which case the original symbol 'V' is being used
92 uninitialized. Since 'V' is a local variable, the compiler
93 may assume any initial value for it.
96 After propagation, every variable V_i that ends up with a lattice
97 value of CONSTANT will have the associated constant value in the
98 array CONST_VAL[i].VALUE. That is fed into substitute_and_fold for
99 final substitution and folding.
101 This algorithm uses wide-ints at the max precision of the target.
102 This means that, with one uninteresting exception, variables with
103 UNSIGNED types never go to VARYING because the bits above the
104 precision of the type of the variable are always zero. The
105 uninteresting case is a variable of UNSIGNED type that has the
106 maximum precision of the target. Such variables can go to VARYING,
107 but this causes no loss of infomation since these variables will
112 Constant propagation with conditional branches,
113 Wegman and Zadeck, ACM TOPLAS 13(2):181-210.
115 Building an Optimizing Compiler,
116 Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9.
118 Advanced Compiler Design and Implementation,
119 Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6 */
123 #include "coretypes.h"
125 #include "hash-set.h"
126 #include "machmode.h"
128 #include "double-int.h"
132 #include "wide-int.h"
136 #include "fold-const.h"
137 #include "stor-layout.h"
141 #include "hard-reg-set.h"
143 #include "function.h"
144 #include "dominance.h"
146 #include "basic-block.h"
147 #include "gimple-pretty-print.h"
148 #include "hash-table.h"
149 #include "tree-ssa-alias.h"
150 #include "internal-fn.h"
151 #include "gimple-fold.h"
153 #include "gimple-expr.h"
156 #include "gimplify.h"
157 #include "gimple-iterator.h"
158 #include "gimple-ssa.h"
159 #include "tree-cfg.h"
160 #include "tree-phinodes.h"
161 #include "ssa-iterators.h"
162 #include "stringpool.h"
163 #include "tree-ssanames.h"
164 #include "tree-pass.h"
165 #include "tree-ssa-propagate.h"
166 #include "value-prof.h"
167 #include "langhooks.h"
169 #include "diagnostic-core.h"
172 #include "wide-int-print.h"
173 #include "builtins.h"
174 #include "tree-chkp.h"
177 /* Possible lattice values. */
186 struct ccp_prop_value_t
{
188 ccp_lattice_t lattice_val
;
190 /* Propagated value. */
193 /* Mask that applies to the propagated value during CCP. For X
194 with a CONSTANT lattice value X & ~mask == value & ~mask. The
195 zero bits in the mask cover constant values. The ones mean no
200 /* Array of propagated constant values. After propagation,
201 CONST_VAL[I].VALUE holds the constant value for SSA_NAME(I). If
202 the constant is held in an SSA name representing a memory store
203 (i.e., a VDEF), CONST_VAL[I].MEM_REF will contain the actual
204 memory reference used to store (i.e., the LHS of the assignment
206 static ccp_prop_value_t
*const_val
;
207 static unsigned n_const_val
;
209 static void canonicalize_value (ccp_prop_value_t
*);
210 static bool ccp_fold_stmt (gimple_stmt_iterator
*);
212 /* Dump constant propagation value VAL to file OUTF prefixed by PREFIX. */
215 dump_lattice_value (FILE *outf
, const char *prefix
, ccp_prop_value_t val
)
217 switch (val
.lattice_val
)
220 fprintf (outf
, "%sUNINITIALIZED", prefix
);
223 fprintf (outf
, "%sUNDEFINED", prefix
);
226 fprintf (outf
, "%sVARYING", prefix
);
229 if (TREE_CODE (val
.value
) != INTEGER_CST
232 fprintf (outf
, "%sCONSTANT ", prefix
);
233 print_generic_expr (outf
, val
.value
, dump_flags
);
237 widest_int cval
= wi::bit_and_not (wi::to_widest (val
.value
),
239 fprintf (outf
, "%sCONSTANT ", prefix
);
240 print_hex (cval
, outf
);
241 fprintf (outf
, " (");
242 print_hex (val
.mask
, outf
);
252 /* Print lattice value VAL to stderr. */
254 void debug_lattice_value (ccp_prop_value_t val
);
257 debug_lattice_value (ccp_prop_value_t val
)
259 dump_lattice_value (stderr
, "", val
);
260 fprintf (stderr
, "\n");
263 /* Extend NONZERO_BITS to a full mask, with the upper bits being set. */
266 extend_mask (const wide_int
&nonzero_bits
)
268 return (wi::mask
<widest_int
> (wi::get_precision (nonzero_bits
), true)
269 | widest_int::from (nonzero_bits
, UNSIGNED
));
272 /* Compute a default value for variable VAR and store it in the
273 CONST_VAL array. The following rules are used to get default
276 1- Global and static variables that are declared constant are
279 2- Any other value is considered UNDEFINED. This is useful when
280 considering PHI nodes. PHI arguments that are undefined do not
281 change the constant value of the PHI node, which allows for more
282 constants to be propagated.
284 3- Variables defined by statements other than assignments and PHI
285 nodes are considered VARYING.
287 4- Initial values of variables that are not GIMPLE registers are
288 considered VARYING. */
290 static ccp_prop_value_t
291 get_default_value (tree var
)
293 ccp_prop_value_t val
= { UNINITIALIZED
, NULL_TREE
, 0 };
296 stmt
= SSA_NAME_DEF_STMT (var
);
298 if (gimple_nop_p (stmt
))
300 /* Variables defined by an empty statement are those used
301 before being initialized. If VAR is a local variable, we
302 can assume initially that it is UNDEFINED, otherwise we must
303 consider it VARYING. */
304 if (!virtual_operand_p (var
)
305 && TREE_CODE (SSA_NAME_VAR (var
)) == VAR_DECL
)
306 val
.lattice_val
= UNDEFINED
;
309 val
.lattice_val
= VARYING
;
311 if (flag_tree_bit_ccp
)
313 wide_int nonzero_bits
= get_nonzero_bits (var
);
314 if (nonzero_bits
!= -1)
316 val
.lattice_val
= CONSTANT
;
317 val
.value
= build_zero_cst (TREE_TYPE (var
));
318 val
.mask
= extend_mask (nonzero_bits
);
323 else if (is_gimple_assign (stmt
))
326 if (gimple_assign_single_p (stmt
)
327 && DECL_P (gimple_assign_rhs1 (stmt
))
328 && (cst
= get_symbol_constant_value (gimple_assign_rhs1 (stmt
))))
330 val
.lattice_val
= CONSTANT
;
335 /* Any other variable defined by an assignment is considered
337 val
.lattice_val
= UNDEFINED
;
340 else if ((is_gimple_call (stmt
)
341 && gimple_call_lhs (stmt
) != NULL_TREE
)
342 || gimple_code (stmt
) == GIMPLE_PHI
)
344 /* A variable defined by a call or a PHI node is considered
346 val
.lattice_val
= UNDEFINED
;
350 /* Otherwise, VAR will never take on a constant value. */
351 val
.lattice_val
= VARYING
;
359 /* Get the constant value associated with variable VAR. */
361 static inline ccp_prop_value_t
*
364 ccp_prop_value_t
*val
;
366 if (const_val
== NULL
367 || SSA_NAME_VERSION (var
) >= n_const_val
)
370 val
= &const_val
[SSA_NAME_VERSION (var
)];
371 if (val
->lattice_val
== UNINITIALIZED
)
372 *val
= get_default_value (var
);
374 canonicalize_value (val
);
379 /* Return the constant tree value associated with VAR. */
382 get_constant_value (tree var
)
384 ccp_prop_value_t
*val
;
385 if (TREE_CODE (var
) != SSA_NAME
)
387 if (is_gimple_min_invariant (var
))
391 val
= get_value (var
);
393 && val
->lattice_val
== CONSTANT
394 && (TREE_CODE (val
->value
) != INTEGER_CST
400 /* Sets the value associated with VAR to VARYING. */
403 set_value_varying (tree var
)
405 ccp_prop_value_t
*val
= &const_val
[SSA_NAME_VERSION (var
)];
407 val
->lattice_val
= VARYING
;
408 val
->value
= NULL_TREE
;
412 /* For integer constants, make sure to drop TREE_OVERFLOW. */
415 canonicalize_value (ccp_prop_value_t
*val
)
417 if (val
->lattice_val
!= CONSTANT
)
420 if (TREE_OVERFLOW_P (val
->value
))
421 val
->value
= drop_tree_overflow (val
->value
);
424 /* Return whether the lattice transition is valid. */
427 valid_lattice_transition (ccp_prop_value_t old_val
, ccp_prop_value_t new_val
)
429 /* Lattice transitions must always be monotonically increasing in
431 if (old_val
.lattice_val
< new_val
.lattice_val
)
434 if (old_val
.lattice_val
!= new_val
.lattice_val
)
437 if (!old_val
.value
&& !new_val
.value
)
440 /* Now both lattice values are CONSTANT. */
442 /* Allow transitioning from PHI <&x, not executable> == &x
443 to PHI <&x, &y> == common alignment. */
444 if (TREE_CODE (old_val
.value
) != INTEGER_CST
445 && TREE_CODE (new_val
.value
) == INTEGER_CST
)
448 /* Bit-lattices have to agree in the still valid bits. */
449 if (TREE_CODE (old_val
.value
) == INTEGER_CST
450 && TREE_CODE (new_val
.value
) == INTEGER_CST
)
451 return (wi::bit_and_not (wi::to_widest (old_val
.value
), new_val
.mask
)
452 == wi::bit_and_not (wi::to_widest (new_val
.value
), new_val
.mask
));
454 /* Otherwise constant values have to agree. */
455 if (operand_equal_p (old_val
.value
, new_val
.value
, 0))
458 /* At least the kinds and types should agree now. */
459 if (TREE_CODE (old_val
.value
) != TREE_CODE (new_val
.value
)
460 || !types_compatible_p (TREE_TYPE (old_val
.value
),
461 TREE_TYPE (new_val
.value
)))
464 /* For floats and !HONOR_NANS allow transitions from (partial) NaN
466 tree type
= TREE_TYPE (new_val
.value
);
467 if (SCALAR_FLOAT_TYPE_P (type
)
468 && !HONOR_NANS (type
))
470 if (REAL_VALUE_ISNAN (TREE_REAL_CST (old_val
.value
)))
473 else if (VECTOR_FLOAT_TYPE_P (type
)
474 && !HONOR_NANS (type
))
476 for (unsigned i
= 0; i
< VECTOR_CST_NELTS (old_val
.value
); ++i
)
477 if (!REAL_VALUE_ISNAN
478 (TREE_REAL_CST (VECTOR_CST_ELT (old_val
.value
, i
)))
479 && !operand_equal_p (VECTOR_CST_ELT (old_val
.value
, i
),
480 VECTOR_CST_ELT (new_val
.value
, i
), 0))
484 else if (COMPLEX_FLOAT_TYPE_P (type
)
485 && !HONOR_NANS (type
))
487 if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_REALPART (old_val
.value
)))
488 && !operand_equal_p (TREE_REALPART (old_val
.value
),
489 TREE_REALPART (new_val
.value
), 0))
491 if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_IMAGPART (old_val
.value
)))
492 && !operand_equal_p (TREE_IMAGPART (old_val
.value
),
493 TREE_IMAGPART (new_val
.value
), 0))
500 /* Set the value for variable VAR to NEW_VAL. Return true if the new
501 value is different from VAR's previous value. */
504 set_lattice_value (tree var
, ccp_prop_value_t new_val
)
506 /* We can deal with old UNINITIALIZED values just fine here. */
507 ccp_prop_value_t
*old_val
= &const_val
[SSA_NAME_VERSION (var
)];
509 canonicalize_value (&new_val
);
511 /* We have to be careful to not go up the bitwise lattice
512 represented by the mask.
513 ??? This doesn't seem to be the best place to enforce this. */
514 if (new_val
.lattice_val
== CONSTANT
515 && old_val
->lattice_val
== CONSTANT
516 && TREE_CODE (new_val
.value
) == INTEGER_CST
517 && TREE_CODE (old_val
->value
) == INTEGER_CST
)
519 widest_int diff
= (wi::to_widest (new_val
.value
)
520 ^ wi::to_widest (old_val
->value
));
521 new_val
.mask
= new_val
.mask
| old_val
->mask
| diff
;
524 gcc_checking_assert (valid_lattice_transition (*old_val
, new_val
));
526 /* If *OLD_VAL and NEW_VAL are the same, return false to inform the
527 caller that this was a non-transition. */
528 if (old_val
->lattice_val
!= new_val
.lattice_val
529 || (new_val
.lattice_val
== CONSTANT
530 && TREE_CODE (new_val
.value
) == INTEGER_CST
531 && (TREE_CODE (old_val
->value
) != INTEGER_CST
532 || new_val
.mask
!= old_val
->mask
)))
534 /* ??? We would like to delay creation of INTEGER_CSTs from
535 partially constants here. */
537 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
539 dump_lattice_value (dump_file
, "Lattice value changed to ", new_val
);
540 fprintf (dump_file
, ". Adding SSA edges to worklist.\n");
545 gcc_assert (new_val
.lattice_val
!= UNINITIALIZED
);
552 static ccp_prop_value_t
get_value_for_expr (tree
, bool);
553 static ccp_prop_value_t
bit_value_binop (enum tree_code
, tree
, tree
, tree
);
554 static void bit_value_binop_1 (enum tree_code
, tree
, widest_int
*, widest_int
*,
555 tree
, const widest_int
&, const widest_int
&,
556 tree
, const widest_int
&, const widest_int
&);
558 /* Return a widest_int that can be used for bitwise simplifications
562 value_to_wide_int (ccp_prop_value_t val
)
565 && TREE_CODE (val
.value
) == INTEGER_CST
)
566 return wi::to_widest (val
.value
);
571 /* Return the value for the address expression EXPR based on alignment
574 static ccp_prop_value_t
575 get_value_from_alignment (tree expr
)
577 tree type
= TREE_TYPE (expr
);
578 ccp_prop_value_t val
;
579 unsigned HOST_WIDE_INT bitpos
;
582 gcc_assert (TREE_CODE (expr
) == ADDR_EXPR
);
584 get_pointer_alignment_1 (expr
, &align
, &bitpos
);
585 val
.mask
= (POINTER_TYPE_P (type
) || TYPE_UNSIGNED (type
)
586 ? wi::mask
<widest_int
> (TYPE_PRECISION (type
), false)
587 : -1).and_not (align
/ BITS_PER_UNIT
- 1);
589 = wi::sext (val
.mask
, TYPE_PRECISION (type
)) == -1 ? VARYING
: CONSTANT
;
590 if (val
.lattice_val
== CONSTANT
)
591 val
.value
= build_int_cstu (type
, bitpos
/ BITS_PER_UNIT
);
593 val
.value
= NULL_TREE
;
598 /* Return the value for the tree operand EXPR. If FOR_BITS_P is true
599 return constant bits extracted from alignment information for
600 invariant addresses. */
602 static ccp_prop_value_t
603 get_value_for_expr (tree expr
, bool for_bits_p
)
605 ccp_prop_value_t val
;
607 if (TREE_CODE (expr
) == SSA_NAME
)
609 val
= *get_value (expr
);
611 && val
.lattice_val
== CONSTANT
612 && TREE_CODE (val
.value
) == ADDR_EXPR
)
613 val
= get_value_from_alignment (val
.value
);
615 else if (is_gimple_min_invariant (expr
)
616 && (!for_bits_p
|| TREE_CODE (expr
) != ADDR_EXPR
))
618 val
.lattice_val
= CONSTANT
;
621 canonicalize_value (&val
);
623 else if (TREE_CODE (expr
) == ADDR_EXPR
)
624 val
= get_value_from_alignment (expr
);
627 val
.lattice_val
= VARYING
;
629 val
.value
= NULL_TREE
;
634 /* Return the likely CCP lattice value for STMT.
636 If STMT has no operands, then return CONSTANT.
638 Else if undefinedness of operands of STMT cause its value to be
639 undefined, then return UNDEFINED.
641 Else if any operands of STMT are constants, then return CONSTANT.
643 Else return VARYING. */
646 likely_value (gimple stmt
)
648 bool has_constant_operand
, has_undefined_operand
, all_undefined_operands
;
649 bool has_nsa_operand
;
654 enum gimple_code code
= gimple_code (stmt
);
656 /* This function appears to be called only for assignments, calls,
657 conditionals, and switches, due to the logic in visit_stmt. */
658 gcc_assert (code
== GIMPLE_ASSIGN
659 || code
== GIMPLE_CALL
660 || code
== GIMPLE_COND
661 || code
== GIMPLE_SWITCH
);
663 /* If the statement has volatile operands, it won't fold to a
665 if (gimple_has_volatile_ops (stmt
))
668 /* Arrive here for more complex cases. */
669 has_constant_operand
= false;
670 has_undefined_operand
= false;
671 all_undefined_operands
= true;
672 has_nsa_operand
= false;
673 FOR_EACH_SSA_TREE_OPERAND (use
, stmt
, iter
, SSA_OP_USE
)
675 ccp_prop_value_t
*val
= get_value (use
);
677 if (val
->lattice_val
== UNDEFINED
)
678 has_undefined_operand
= true;
680 all_undefined_operands
= false;
682 if (val
->lattice_val
== CONSTANT
)
683 has_constant_operand
= true;
685 if (SSA_NAME_IS_DEFAULT_DEF (use
)
686 || !prop_simulate_again_p (SSA_NAME_DEF_STMT (use
)))
687 has_nsa_operand
= true;
690 /* There may be constants in regular rhs operands. For calls we
691 have to ignore lhs, fndecl and static chain, otherwise only
693 for (i
= (is_gimple_call (stmt
) ? 2 : 0) + gimple_has_lhs (stmt
);
694 i
< gimple_num_ops (stmt
); ++i
)
696 tree op
= gimple_op (stmt
, i
);
697 if (!op
|| TREE_CODE (op
) == SSA_NAME
)
699 if (is_gimple_min_invariant (op
))
700 has_constant_operand
= true;
703 if (has_constant_operand
)
704 all_undefined_operands
= false;
706 if (has_undefined_operand
707 && code
== GIMPLE_CALL
708 && gimple_call_internal_p (stmt
))
709 switch (gimple_call_internal_fn (stmt
))
711 /* These 3 builtins use the first argument just as a magic
712 way how to find out a decl uid. */
713 case IFN_GOMP_SIMD_LANE
:
714 case IFN_GOMP_SIMD_VF
:
715 case IFN_GOMP_SIMD_LAST_LANE
:
716 has_undefined_operand
= false;
722 /* If the operation combines operands like COMPLEX_EXPR make sure to
723 not mark the result UNDEFINED if only one part of the result is
725 if (has_undefined_operand
&& all_undefined_operands
)
727 else if (code
== GIMPLE_ASSIGN
&& has_undefined_operand
)
729 switch (gimple_assign_rhs_code (stmt
))
731 /* Unary operators are handled with all_undefined_operands. */
734 case POINTER_PLUS_EXPR
:
735 /* Not MIN_EXPR, MAX_EXPR. One VARYING operand may be selected.
736 Not bitwise operators, one VARYING operand may specify the
737 result completely. Not logical operators for the same reason.
738 Not COMPLEX_EXPR as one VARYING operand makes the result partly
739 not UNDEFINED. Not *DIV_EXPR, comparisons and shifts because
740 the undefined operand may be promoted. */
744 /* If any part of an address is UNDEFINED, like the index
745 of an ARRAY_EXPR, then treat the result as UNDEFINED. */
752 /* If there was an UNDEFINED operand but the result may be not UNDEFINED
753 fall back to CONSTANT. During iteration UNDEFINED may still drop
755 if (has_undefined_operand
)
758 /* We do not consider virtual operands here -- load from read-only
759 memory may have only VARYING virtual operands, but still be
760 constant. Also we can combine the stmt with definitions from
761 operands whose definitions are not simulated again. */
762 if (has_constant_operand
764 || gimple_references_memory_p (stmt
))
770 /* Returns true if STMT cannot be constant. */
773 surely_varying_stmt_p (gimple stmt
)
775 /* If the statement has operands that we cannot handle, it cannot be
777 if (gimple_has_volatile_ops (stmt
))
780 /* If it is a call and does not return a value or is not a
781 builtin and not an indirect call or a call to function with
782 assume_aligned/alloc_align attribute, it is varying. */
783 if (is_gimple_call (stmt
))
785 tree fndecl
, fntype
= gimple_call_fntype (stmt
);
786 if (!gimple_call_lhs (stmt
)
787 || ((fndecl
= gimple_call_fndecl (stmt
)) != NULL_TREE
788 && !DECL_BUILT_IN (fndecl
)
789 && !lookup_attribute ("assume_aligned",
790 TYPE_ATTRIBUTES (fntype
))
791 && !lookup_attribute ("alloc_align",
792 TYPE_ATTRIBUTES (fntype
))))
796 /* Any other store operation is not interesting. */
797 else if (gimple_vdef (stmt
))
800 /* Anything other than assignments and conditional jumps are not
801 interesting for CCP. */
802 if (gimple_code (stmt
) != GIMPLE_ASSIGN
803 && gimple_code (stmt
) != GIMPLE_COND
804 && gimple_code (stmt
) != GIMPLE_SWITCH
805 && gimple_code (stmt
) != GIMPLE_CALL
)
811 /* Initialize local data structures for CCP. */
814 ccp_initialize (void)
818 n_const_val
= num_ssa_names
;
819 const_val
= XCNEWVEC (ccp_prop_value_t
, n_const_val
);
821 /* Initialize simulation flags for PHI nodes and statements. */
822 FOR_EACH_BB_FN (bb
, cfun
)
824 gimple_stmt_iterator i
;
826 for (i
= gsi_start_bb (bb
); !gsi_end_p (i
); gsi_next (&i
))
828 gimple stmt
= gsi_stmt (i
);
831 /* If the statement is a control insn, then we do not
832 want to avoid simulating the statement once. Failure
833 to do so means that those edges will never get added. */
834 if (stmt_ends_bb_p (stmt
))
837 is_varying
= surely_varying_stmt_p (stmt
);
844 /* If the statement will not produce a constant, mark
845 all its outputs VARYING. */
846 FOR_EACH_SSA_TREE_OPERAND (def
, stmt
, iter
, SSA_OP_ALL_DEFS
)
847 set_value_varying (def
);
849 prop_set_simulate_again (stmt
, !is_varying
);
853 /* Now process PHI nodes. We never clear the simulate_again flag on
854 phi nodes, since we do not know which edges are executable yet,
855 except for phi nodes for virtual operands when we do not do store ccp. */
856 FOR_EACH_BB_FN (bb
, cfun
)
860 for (i
= gsi_start_phis (bb
); !gsi_end_p (i
); gsi_next (&i
))
862 gphi
*phi
= i
.phi ();
864 if (virtual_operand_p (gimple_phi_result (phi
)))
865 prop_set_simulate_again (phi
, false);
867 prop_set_simulate_again (phi
, true);
872 /* Debug count support. Reset the values of ssa names
873 VARYING when the total number ssa names analyzed is
874 beyond the debug count specified. */
880 for (i
= 0; i
< num_ssa_names
; i
++)
884 const_val
[i
].lattice_val
= VARYING
;
885 const_val
[i
].mask
= -1;
886 const_val
[i
].value
= NULL_TREE
;
892 /* Do final substitution of propagated values, cleanup the flowgraph and
893 free allocated storage.
895 Return TRUE when something was optimized. */
900 bool something_changed
;
905 /* Derive alignment and misalignment information from partially
906 constant pointers in the lattice or nonzero bits from partially
907 constant integers. */
908 for (i
= 1; i
< num_ssa_names
; ++i
)
910 tree name
= ssa_name (i
);
911 ccp_prop_value_t
*val
;
912 unsigned int tem
, align
;
915 || (!POINTER_TYPE_P (TREE_TYPE (name
))
916 && (!INTEGRAL_TYPE_P (TREE_TYPE (name
))
917 /* Don't record nonzero bits before IPA to avoid
918 using too much memory. */
919 || first_pass_instance
)))
922 val
= get_value (name
);
923 if (val
->lattice_val
!= CONSTANT
924 || TREE_CODE (val
->value
) != INTEGER_CST
)
927 if (POINTER_TYPE_P (TREE_TYPE (name
)))
929 /* Trailing mask bits specify the alignment, trailing value
930 bits the misalignment. */
931 tem
= val
->mask
.to_uhwi ();
932 align
= (tem
& -tem
);
934 set_ptr_info_alignment (get_ptr_info (name
), align
,
935 (TREE_INT_CST_LOW (val
->value
)
940 unsigned int precision
= TYPE_PRECISION (TREE_TYPE (val
->value
));
941 wide_int nonzero_bits
= wide_int::from (val
->mask
, precision
,
942 UNSIGNED
) | val
->value
;
943 nonzero_bits
&= get_nonzero_bits (name
);
944 set_nonzero_bits (name
, nonzero_bits
);
948 /* Perform substitutions based on the known constant values. */
949 something_changed
= substitute_and_fold (get_constant_value
,
950 ccp_fold_stmt
, true);
954 return something_changed
;;
958 /* Compute the meet operator between *VAL1 and *VAL2. Store the result
961 any M UNDEFINED = any
962 any M VARYING = VARYING
963 Ci M Cj = Ci if (i == j)
964 Ci M Cj = VARYING if (i != j)
968 ccp_lattice_meet (ccp_prop_value_t
*val1
, ccp_prop_value_t
*val2
)
970 if (val1
->lattice_val
== UNDEFINED
)
972 /* UNDEFINED M any = any */
975 else if (val2
->lattice_val
== UNDEFINED
)
977 /* any M UNDEFINED = any
978 Nothing to do. VAL1 already contains the value we want. */
981 else if (val1
->lattice_val
== VARYING
982 || val2
->lattice_val
== VARYING
)
984 /* any M VARYING = VARYING. */
985 val1
->lattice_val
= VARYING
;
987 val1
->value
= NULL_TREE
;
989 else if (val1
->lattice_val
== CONSTANT
990 && val2
->lattice_val
== CONSTANT
991 && TREE_CODE (val1
->value
) == INTEGER_CST
992 && TREE_CODE (val2
->value
) == INTEGER_CST
)
994 /* Ci M Cj = Ci if (i == j)
995 Ci M Cj = VARYING if (i != j)
997 For INTEGER_CSTs mask unequal bits. If no equal bits remain,
999 val1
->mask
= (val1
->mask
| val2
->mask
1000 | (wi::to_widest (val1
->value
)
1001 ^ wi::to_widest (val2
->value
)));
1002 if (wi::sext (val1
->mask
, TYPE_PRECISION (TREE_TYPE (val1
->value
))) == -1)
1004 val1
->lattice_val
= VARYING
;
1005 val1
->value
= NULL_TREE
;
1008 else if (val1
->lattice_val
== CONSTANT
1009 && val2
->lattice_val
== CONSTANT
1010 && simple_cst_equal (val1
->value
, val2
->value
) == 1)
1012 /* Ci M Cj = Ci if (i == j)
1013 Ci M Cj = VARYING if (i != j)
1015 VAL1 already contains the value we want for equivalent values. */
1017 else if (val1
->lattice_val
== CONSTANT
1018 && val2
->lattice_val
== CONSTANT
1019 && (TREE_CODE (val1
->value
) == ADDR_EXPR
1020 || TREE_CODE (val2
->value
) == ADDR_EXPR
))
1022 /* When not equal addresses are involved try meeting for
1024 ccp_prop_value_t tem
= *val2
;
1025 if (TREE_CODE (val1
->value
) == ADDR_EXPR
)
1026 *val1
= get_value_for_expr (val1
->value
, true);
1027 if (TREE_CODE (val2
->value
) == ADDR_EXPR
)
1028 tem
= get_value_for_expr (val2
->value
, true);
1029 ccp_lattice_meet (val1
, &tem
);
1033 /* Any other combination is VARYING. */
1034 val1
->lattice_val
= VARYING
;
1036 val1
->value
= NULL_TREE
;
1041 /* Loop through the PHI_NODE's parameters for BLOCK and compare their
1042 lattice values to determine PHI_NODE's lattice value. The value of a
1043 PHI node is determined calling ccp_lattice_meet with all the arguments
1044 of the PHI node that are incoming via executable edges. */
1046 static enum ssa_prop_result
1047 ccp_visit_phi_node (gphi
*phi
)
1050 ccp_prop_value_t
*old_val
, new_val
;
1052 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1054 fprintf (dump_file
, "\nVisiting PHI node: ");
1055 print_gimple_stmt (dump_file
, phi
, 0, dump_flags
);
1058 old_val
= get_value (gimple_phi_result (phi
));
1059 switch (old_val
->lattice_val
)
1062 return SSA_PROP_VARYING
;
1069 new_val
.lattice_val
= UNDEFINED
;
1070 new_val
.value
= NULL_TREE
;
1077 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
1079 /* Compute the meet operator over all the PHI arguments flowing
1080 through executable edges. */
1081 edge e
= gimple_phi_arg_edge (phi
, i
);
1083 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1086 "\n Argument #%d (%d -> %d %sexecutable)\n",
1087 i
, e
->src
->index
, e
->dest
->index
,
1088 (e
->flags
& EDGE_EXECUTABLE
) ? "" : "not ");
1091 /* If the incoming edge is executable, Compute the meet operator for
1092 the existing value of the PHI node and the current PHI argument. */
1093 if (e
->flags
& EDGE_EXECUTABLE
)
1095 tree arg
= gimple_phi_arg (phi
, i
)->def
;
1096 ccp_prop_value_t arg_val
= get_value_for_expr (arg
, false);
1098 ccp_lattice_meet (&new_val
, &arg_val
);
1100 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1102 fprintf (dump_file
, "\t");
1103 print_generic_expr (dump_file
, arg
, dump_flags
);
1104 dump_lattice_value (dump_file
, "\tValue: ", arg_val
);
1105 fprintf (dump_file
, "\n");
1108 if (new_val
.lattice_val
== VARYING
)
1113 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1115 dump_lattice_value (dump_file
, "\n PHI node value: ", new_val
);
1116 fprintf (dump_file
, "\n\n");
1119 /* Make the transition to the new value. */
1120 if (set_lattice_value (gimple_phi_result (phi
), new_val
))
1122 if (new_val
.lattice_val
== VARYING
)
1123 return SSA_PROP_VARYING
;
1125 return SSA_PROP_INTERESTING
;
1128 return SSA_PROP_NOT_INTERESTING
;
1131 /* Return the constant value for OP or OP otherwise. */
1134 valueize_op (tree op
)
1136 if (TREE_CODE (op
) == SSA_NAME
)
1138 tree tem
= get_constant_value (op
);
1145 /* Return the constant value for OP, but signal to not follow SSA
1146 edges if the definition may be simulated again. */
1149 valueize_op_1 (tree op
)
1151 if (TREE_CODE (op
) == SSA_NAME
)
1153 /* If the definition may be simulated again we cannot follow
1154 this SSA edge as the SSA propagator does not necessarily
1155 re-visit the use. */
1156 gimple def_stmt
= SSA_NAME_DEF_STMT (op
);
1157 if (!gimple_nop_p (def_stmt
)
1158 && prop_simulate_again_p (def_stmt
))
1160 tree tem
= get_constant_value (op
);
1167 /* CCP specific front-end to the non-destructive constant folding
1170 Attempt to simplify the RHS of STMT knowing that one or more
1171 operands are constants.
1173 If simplification is possible, return the simplified RHS,
1174 otherwise return the original RHS or NULL_TREE. */
1177 ccp_fold (gimple stmt
)
1179 location_t loc
= gimple_location (stmt
);
1180 switch (gimple_code (stmt
))
1184 /* Handle comparison operators that can appear in GIMPLE form. */
1185 tree op0
= valueize_op (gimple_cond_lhs (stmt
));
1186 tree op1
= valueize_op (gimple_cond_rhs (stmt
));
1187 enum tree_code code
= gimple_cond_code (stmt
);
1188 return fold_binary_loc (loc
, code
, boolean_type_node
, op0
, op1
);
1193 /* Return the constant switch index. */
1194 return valueize_op (gimple_switch_index (as_a
<gswitch
*> (stmt
)));
1199 return gimple_fold_stmt_to_constant_1 (stmt
,
1200 valueize_op
, valueize_op_1
);
1207 /* Apply the operation CODE in type TYPE to the value, mask pair
1208 RVAL and RMASK representing a value of type RTYPE and set
1209 the value, mask pair *VAL and *MASK to the result. */
1212 bit_value_unop_1 (enum tree_code code
, tree type
,
1213 widest_int
*val
, widest_int
*mask
,
1214 tree rtype
, const widest_int
&rval
, const widest_int
&rmask
)
1225 widest_int temv
, temm
;
1226 /* Return ~rval + 1. */
1227 bit_value_unop_1 (BIT_NOT_EXPR
, type
, &temv
, &temm
, type
, rval
, rmask
);
1228 bit_value_binop_1 (PLUS_EXPR
, type
, val
, mask
,
1229 type
, temv
, temm
, type
, 1, 0);
1237 /* First extend mask and value according to the original type. */
1238 sgn
= TYPE_SIGN (rtype
);
1239 *mask
= wi::ext (rmask
, TYPE_PRECISION (rtype
), sgn
);
1240 *val
= wi::ext (rval
, TYPE_PRECISION (rtype
), sgn
);
1242 /* Then extend mask and value according to the target type. */
1243 sgn
= TYPE_SIGN (type
);
1244 *mask
= wi::ext (*mask
, TYPE_PRECISION (type
), sgn
);
1245 *val
= wi::ext (*val
, TYPE_PRECISION (type
), sgn
);
1255 /* Apply the operation CODE in type TYPE to the value, mask pairs
1256 R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
1257 and R2TYPE and set the value, mask pair *VAL and *MASK to the result. */
1260 bit_value_binop_1 (enum tree_code code
, tree type
,
1261 widest_int
*val
, widest_int
*mask
,
1262 tree r1type
, const widest_int
&r1val
,
1263 const widest_int
&r1mask
, tree r2type
,
1264 const widest_int
&r2val
, const widest_int
&r2mask
)
1266 signop sgn
= TYPE_SIGN (type
);
1267 int width
= TYPE_PRECISION (type
);
1268 bool swap_p
= false;
1270 /* Assume we'll get a constant result. Use an initial non varying
1271 value, we fall back to varying in the end if necessary. */
1277 /* The mask is constant where there is a known not
1278 set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */
1279 *mask
= (r1mask
| r2mask
) & (r1val
| r1mask
) & (r2val
| r2mask
);
1280 *val
= r1val
& r2val
;
1284 /* The mask is constant where there is a known
1285 set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)). */
1286 *mask
= (r1mask
| r2mask
)
1287 .and_not (r1val
.and_not (r1mask
) | r2val
.and_not (r2mask
));
1288 *val
= r1val
| r2val
;
1293 *mask
= r1mask
| r2mask
;
1294 *val
= r1val
^ r2val
;
1301 widest_int shift
= r2val
;
1309 if (wi::neg_p (shift
))
1312 if (code
== RROTATE_EXPR
)
1313 code
= LROTATE_EXPR
;
1315 code
= RROTATE_EXPR
;
1317 if (code
== RROTATE_EXPR
)
1319 *mask
= wi::rrotate (r1mask
, shift
, width
);
1320 *val
= wi::rrotate (r1val
, shift
, width
);
1324 *mask
= wi::lrotate (r1mask
, shift
, width
);
1325 *val
= wi::lrotate (r1val
, shift
, width
);
1333 /* ??? We can handle partially known shift counts if we know
1334 its sign. That way we can tell that (x << (y | 8)) & 255
1338 widest_int shift
= r2val
;
1346 if (wi::neg_p (shift
))
1349 if (code
== RSHIFT_EXPR
)
1354 if (code
== RSHIFT_EXPR
)
1356 *mask
= wi::rshift (wi::ext (r1mask
, width
, sgn
), shift
, sgn
);
1357 *val
= wi::rshift (wi::ext (r1val
, width
, sgn
), shift
, sgn
);
1361 *mask
= wi::ext (wi::lshift (r1mask
, shift
), width
, sgn
);
1362 *val
= wi::ext (wi::lshift (r1val
, shift
), width
, sgn
);
1369 case POINTER_PLUS_EXPR
:
1371 /* Do the addition with unknown bits set to zero, to give carry-ins of
1372 zero wherever possible. */
1373 widest_int lo
= r1val
.and_not (r1mask
) + r2val
.and_not (r2mask
);
1374 lo
= wi::ext (lo
, width
, sgn
);
1375 /* Do the addition with unknown bits set to one, to give carry-ins of
1376 one wherever possible. */
1377 widest_int hi
= (r1val
| r1mask
) + (r2val
| r2mask
);
1378 hi
= wi::ext (hi
, width
, sgn
);
1379 /* Each bit in the result is known if (a) the corresponding bits in
1380 both inputs are known, and (b) the carry-in to that bit position
1381 is known. We can check condition (b) by seeing if we got the same
1382 result with minimised carries as with maximised carries. */
1383 *mask
= r1mask
| r2mask
| (lo
^ hi
);
1384 *mask
= wi::ext (*mask
, width
, sgn
);
1385 /* It shouldn't matter whether we choose lo or hi here. */
1392 widest_int temv
, temm
;
1393 bit_value_unop_1 (NEGATE_EXPR
, r2type
, &temv
, &temm
,
1394 r2type
, r2val
, r2mask
);
1395 bit_value_binop_1 (PLUS_EXPR
, type
, val
, mask
,
1396 r1type
, r1val
, r1mask
,
1397 r2type
, temv
, temm
);
1403 /* Just track trailing zeros in both operands and transfer
1404 them to the other. */
1405 int r1tz
= wi::ctz (r1val
| r1mask
);
1406 int r2tz
= wi::ctz (r2val
| r2mask
);
1407 if (r1tz
+ r2tz
>= width
)
1412 else if (r1tz
+ r2tz
> 0)
1414 *mask
= wi::ext (wi::mask
<widest_int
> (r1tz
+ r2tz
, true),
1424 widest_int m
= r1mask
| r2mask
;
1425 if (r1val
.and_not (m
) != r2val
.and_not (m
))
1428 *val
= ((code
== EQ_EXPR
) ? 0 : 1);
1432 /* We know the result of a comparison is always one or zero. */
1442 code
= swap_tree_comparison (code
);
1449 const widest_int
&o1val
= swap_p
? r2val
: r1val
;
1450 const widest_int
&o1mask
= swap_p
? r2mask
: r1mask
;
1451 const widest_int
&o2val
= swap_p
? r1val
: r2val
;
1452 const widest_int
&o2mask
= swap_p
? r1mask
: r2mask
;
1454 /* If the most significant bits are not known we know nothing. */
1455 if (wi::neg_p (o1mask
) || wi::neg_p (o2mask
))
1458 /* For comparisons the signedness is in the comparison operands. */
1459 sgn
= TYPE_SIGN (r1type
);
1461 /* If we know the most significant bits we know the values
1462 value ranges by means of treating varying bits as zero
1463 or one. Do a cross comparison of the max/min pairs. */
1464 maxmin
= wi::cmp (o1val
| o1mask
, o2val
.and_not (o2mask
), sgn
);
1465 minmax
= wi::cmp (o1val
.and_not (o1mask
), o2val
| o2mask
, sgn
);
1466 if (maxmin
< 0) /* o1 is less than o2. */
1471 else if (minmax
> 0) /* o1 is not less or equal to o2. */
1476 else if (maxmin
== minmax
) /* o1 and o2 are equal. */
1478 /* This probably should never happen as we'd have
1479 folded the thing during fully constant value folding. */
1481 *val
= (code
== LE_EXPR
? 1 : 0);
1485 /* We know the result of a comparison is always one or zero. */
1496 /* Return the propagation value when applying the operation CODE to
1497 the value RHS yielding type TYPE. */
1499 static ccp_prop_value_t
1500 bit_value_unop (enum tree_code code
, tree type
, tree rhs
)
1502 ccp_prop_value_t rval
= get_value_for_expr (rhs
, true);
1503 widest_int value
, mask
;
1504 ccp_prop_value_t val
;
1506 if (rval
.lattice_val
== UNDEFINED
)
1509 gcc_assert ((rval
.lattice_val
== CONSTANT
1510 && TREE_CODE (rval
.value
) == INTEGER_CST
)
1511 || wi::sext (rval
.mask
, TYPE_PRECISION (TREE_TYPE (rhs
))) == -1);
1512 bit_value_unop_1 (code
, type
, &value
, &mask
,
1513 TREE_TYPE (rhs
), value_to_wide_int (rval
), rval
.mask
);
1514 if (wi::sext (mask
, TYPE_PRECISION (type
)) != -1)
1516 val
.lattice_val
= CONSTANT
;
1518 /* ??? Delay building trees here. */
1519 val
.value
= wide_int_to_tree (type
, value
);
1523 val
.lattice_val
= VARYING
;
1524 val
.value
= NULL_TREE
;
1530 /* Return the propagation value when applying the operation CODE to
1531 the values RHS1 and RHS2 yielding type TYPE. */
1533 static ccp_prop_value_t
1534 bit_value_binop (enum tree_code code
, tree type
, tree rhs1
, tree rhs2
)
1536 ccp_prop_value_t r1val
= get_value_for_expr (rhs1
, true);
1537 ccp_prop_value_t r2val
= get_value_for_expr (rhs2
, true);
1538 widest_int value
, mask
;
1539 ccp_prop_value_t val
;
1541 if (r1val
.lattice_val
== UNDEFINED
1542 || r2val
.lattice_val
== UNDEFINED
)
1544 val
.lattice_val
= VARYING
;
1545 val
.value
= NULL_TREE
;
1550 gcc_assert ((r1val
.lattice_val
== CONSTANT
1551 && TREE_CODE (r1val
.value
) == INTEGER_CST
)
1552 || wi::sext (r1val
.mask
,
1553 TYPE_PRECISION (TREE_TYPE (rhs1
))) == -1);
1554 gcc_assert ((r2val
.lattice_val
== CONSTANT
1555 && TREE_CODE (r2val
.value
) == INTEGER_CST
)
1556 || wi::sext (r2val
.mask
,
1557 TYPE_PRECISION (TREE_TYPE (rhs2
))) == -1);
1558 bit_value_binop_1 (code
, type
, &value
, &mask
,
1559 TREE_TYPE (rhs1
), value_to_wide_int (r1val
), r1val
.mask
,
1560 TREE_TYPE (rhs2
), value_to_wide_int (r2val
), r2val
.mask
);
1561 if (wi::sext (mask
, TYPE_PRECISION (type
)) != -1)
1563 val
.lattice_val
= CONSTANT
;
1565 /* ??? Delay building trees here. */
1566 val
.value
= wide_int_to_tree (type
, value
);
1570 val
.lattice_val
= VARYING
;
1571 val
.value
= NULL_TREE
;
1577 /* Return the propagation value for __builtin_assume_aligned
1578 and functions with assume_aligned or alloc_aligned attribute.
1579 For __builtin_assume_aligned, ATTR is NULL_TREE,
1580 for assume_aligned attribute ATTR is non-NULL and ALLOC_ALIGNED
1581 is false, for alloc_aligned attribute ATTR is non-NULL and
1582 ALLOC_ALIGNED is true. */
1584 static ccp_prop_value_t
1585 bit_value_assume_aligned (gimple stmt
, tree attr
, ccp_prop_value_t ptrval
,
1588 tree align
, misalign
= NULL_TREE
, type
;
1589 unsigned HOST_WIDE_INT aligni
, misaligni
= 0;
1590 ccp_prop_value_t alignval
;
1591 widest_int value
, mask
;
1592 ccp_prop_value_t val
;
1594 if (attr
== NULL_TREE
)
1596 tree ptr
= gimple_call_arg (stmt
, 0);
1597 type
= TREE_TYPE (ptr
);
1598 ptrval
= get_value_for_expr (ptr
, true);
1602 tree lhs
= gimple_call_lhs (stmt
);
1603 type
= TREE_TYPE (lhs
);
1606 if (ptrval
.lattice_val
== UNDEFINED
)
1608 gcc_assert ((ptrval
.lattice_val
== CONSTANT
1609 && TREE_CODE (ptrval
.value
) == INTEGER_CST
)
1610 || wi::sext (ptrval
.mask
, TYPE_PRECISION (type
)) == -1);
1611 if (attr
== NULL_TREE
)
1613 /* Get aligni and misaligni from __builtin_assume_aligned. */
1614 align
= gimple_call_arg (stmt
, 1);
1615 if (!tree_fits_uhwi_p (align
))
1617 aligni
= tree_to_uhwi (align
);
1618 if (gimple_call_num_args (stmt
) > 2)
1620 misalign
= gimple_call_arg (stmt
, 2);
1621 if (!tree_fits_uhwi_p (misalign
))
1623 misaligni
= tree_to_uhwi (misalign
);
1628 /* Get aligni and misaligni from assume_aligned or
1629 alloc_align attributes. */
1630 if (TREE_VALUE (attr
) == NULL_TREE
)
1632 attr
= TREE_VALUE (attr
);
1633 align
= TREE_VALUE (attr
);
1634 if (!tree_fits_uhwi_p (align
))
1636 aligni
= tree_to_uhwi (align
);
1639 if (aligni
== 0 || aligni
> gimple_call_num_args (stmt
))
1641 align
= gimple_call_arg (stmt
, aligni
- 1);
1642 if (!tree_fits_uhwi_p (align
))
1644 aligni
= tree_to_uhwi (align
);
1646 else if (TREE_CHAIN (attr
) && TREE_VALUE (TREE_CHAIN (attr
)))
1648 misalign
= TREE_VALUE (TREE_CHAIN (attr
));
1649 if (!tree_fits_uhwi_p (misalign
))
1651 misaligni
= tree_to_uhwi (misalign
);
1654 if (aligni
<= 1 || (aligni
& (aligni
- 1)) != 0 || misaligni
>= aligni
)
1657 align
= build_int_cst_type (type
, -aligni
);
1658 alignval
= get_value_for_expr (align
, true);
1659 bit_value_binop_1 (BIT_AND_EXPR
, type
, &value
, &mask
,
1660 type
, value_to_wide_int (ptrval
), ptrval
.mask
,
1661 type
, value_to_wide_int (alignval
), alignval
.mask
);
1662 if (wi::sext (mask
, TYPE_PRECISION (type
)) != -1)
1664 val
.lattice_val
= CONSTANT
;
1666 gcc_assert ((mask
.to_uhwi () & (aligni
- 1)) == 0);
1667 gcc_assert ((value
.to_uhwi () & (aligni
- 1)) == 0);
1669 /* ??? Delay building trees here. */
1670 val
.value
= wide_int_to_tree (type
, value
);
1674 val
.lattice_val
= VARYING
;
1675 val
.value
= NULL_TREE
;
1681 /* Evaluate statement STMT.
1682 Valid only for assignments, calls, conditionals, and switches. */
1684 static ccp_prop_value_t
1685 evaluate_stmt (gimple stmt
)
1687 ccp_prop_value_t val
;
1688 tree simplified
= NULL_TREE
;
1689 ccp_lattice_t likelyvalue
= likely_value (stmt
);
1690 bool is_constant
= false;
1693 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1695 fprintf (dump_file
, "which is likely ");
1696 switch (likelyvalue
)
1699 fprintf (dump_file
, "CONSTANT");
1702 fprintf (dump_file
, "UNDEFINED");
1705 fprintf (dump_file
, "VARYING");
1709 fprintf (dump_file
, "\n");
1712 /* If the statement is likely to have a CONSTANT result, then try
1713 to fold the statement to determine the constant value. */
1714 /* FIXME. This is the only place that we call ccp_fold.
1715 Since likely_value never returns CONSTANT for calls, we will
1716 not attempt to fold them, including builtins that may profit. */
1717 if (likelyvalue
== CONSTANT
)
1719 fold_defer_overflow_warnings ();
1720 simplified
= ccp_fold (stmt
);
1721 is_constant
= simplified
&& is_gimple_min_invariant (simplified
);
1722 fold_undefer_overflow_warnings (is_constant
, stmt
, 0);
1725 /* The statement produced a constant value. */
1726 val
.lattice_val
= CONSTANT
;
1727 val
.value
= simplified
;
1731 /* If the statement is likely to have a VARYING result, then do not
1732 bother folding the statement. */
1733 else if (likelyvalue
== VARYING
)
1735 enum gimple_code code
= gimple_code (stmt
);
1736 if (code
== GIMPLE_ASSIGN
)
1738 enum tree_code subcode
= gimple_assign_rhs_code (stmt
);
1740 /* Other cases cannot satisfy is_gimple_min_invariant
1742 if (get_gimple_rhs_class (subcode
) == GIMPLE_SINGLE_RHS
)
1743 simplified
= gimple_assign_rhs1 (stmt
);
1745 else if (code
== GIMPLE_SWITCH
)
1746 simplified
= gimple_switch_index (as_a
<gswitch
*> (stmt
));
1748 /* These cannot satisfy is_gimple_min_invariant without folding. */
1749 gcc_assert (code
== GIMPLE_CALL
|| code
== GIMPLE_COND
);
1750 is_constant
= simplified
&& is_gimple_min_invariant (simplified
);
1753 /* The statement produced a constant value. */
1754 val
.lattice_val
= CONSTANT
;
1755 val
.value
= simplified
;
1760 /* Resort to simplification for bitwise tracking. */
1761 if (flag_tree_bit_ccp
1762 && (likelyvalue
== CONSTANT
|| is_gimple_call (stmt
)
1763 || (gimple_assign_single_p (stmt
)
1764 && gimple_assign_rhs_code (stmt
) == ADDR_EXPR
))
1767 enum gimple_code code
= gimple_code (stmt
);
1768 val
.lattice_val
= VARYING
;
1769 val
.value
= NULL_TREE
;
1771 if (code
== GIMPLE_ASSIGN
)
1773 enum tree_code subcode
= gimple_assign_rhs_code (stmt
);
1774 tree rhs1
= gimple_assign_rhs1 (stmt
);
1775 tree lhs
= gimple_assign_lhs (stmt
);
1776 if ((INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
1777 || POINTER_TYPE_P (TREE_TYPE (lhs
)))
1778 && (INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
1779 || POINTER_TYPE_P (TREE_TYPE (rhs1
))))
1780 switch (get_gimple_rhs_class (subcode
))
1782 case GIMPLE_SINGLE_RHS
:
1783 val
= get_value_for_expr (rhs1
, true);
1786 case GIMPLE_UNARY_RHS
:
1787 val
= bit_value_unop (subcode
, TREE_TYPE (lhs
), rhs1
);
1790 case GIMPLE_BINARY_RHS
:
1791 val
= bit_value_binop (subcode
, TREE_TYPE (lhs
), rhs1
,
1792 gimple_assign_rhs2 (stmt
));
1798 else if (code
== GIMPLE_COND
)
1800 enum tree_code code
= gimple_cond_code (stmt
);
1801 tree rhs1
= gimple_cond_lhs (stmt
);
1802 tree rhs2
= gimple_cond_rhs (stmt
);
1803 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
1804 || POINTER_TYPE_P (TREE_TYPE (rhs1
)))
1805 val
= bit_value_binop (code
, TREE_TYPE (rhs1
), rhs1
, rhs2
);
1807 else if (gimple_call_builtin_p (stmt
, BUILT_IN_NORMAL
))
1809 tree fndecl
= gimple_call_fndecl (stmt
);
1810 switch (DECL_FUNCTION_CODE (fndecl
))
1812 case BUILT_IN_MALLOC
:
1813 case BUILT_IN_REALLOC
:
1814 case BUILT_IN_CALLOC
:
1815 case BUILT_IN_STRDUP
:
1816 case BUILT_IN_STRNDUP
:
1817 val
.lattice_val
= CONSTANT
;
1818 val
.value
= build_int_cst (TREE_TYPE (gimple_get_lhs (stmt
)), 0);
1819 val
.mask
= ~((HOST_WIDE_INT
) MALLOC_ABI_ALIGNMENT
1820 / BITS_PER_UNIT
- 1);
1823 case BUILT_IN_ALLOCA
:
1824 case BUILT_IN_ALLOCA_WITH_ALIGN
:
1825 align
= (DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_ALLOCA_WITH_ALIGN
1826 ? TREE_INT_CST_LOW (gimple_call_arg (stmt
, 1))
1827 : BIGGEST_ALIGNMENT
);
1828 val
.lattice_val
= CONSTANT
;
1829 val
.value
= build_int_cst (TREE_TYPE (gimple_get_lhs (stmt
)), 0);
1830 val
.mask
= ~((HOST_WIDE_INT
) align
/ BITS_PER_UNIT
- 1);
1833 /* These builtins return their first argument, unmodified. */
1834 case BUILT_IN_MEMCPY
:
1835 case BUILT_IN_MEMMOVE
:
1836 case BUILT_IN_MEMSET
:
1837 case BUILT_IN_STRCPY
:
1838 case BUILT_IN_STRNCPY
:
1839 case BUILT_IN_MEMCPY_CHK
:
1840 case BUILT_IN_MEMMOVE_CHK
:
1841 case BUILT_IN_MEMSET_CHK
:
1842 case BUILT_IN_STRCPY_CHK
:
1843 case BUILT_IN_STRNCPY_CHK
:
1844 val
= get_value_for_expr (gimple_call_arg (stmt
, 0), true);
1847 case BUILT_IN_ASSUME_ALIGNED
:
1848 val
= bit_value_assume_aligned (stmt
, NULL_TREE
, val
, false);
1851 case BUILT_IN_ALIGNED_ALLOC
:
1853 tree align
= get_constant_value (gimple_call_arg (stmt
, 0));
1855 && tree_fits_uhwi_p (align
))
1857 unsigned HOST_WIDE_INT aligni
= tree_to_uhwi (align
);
1859 /* align must be power-of-two */
1860 && (aligni
& (aligni
- 1)) == 0)
1862 val
.lattice_val
= CONSTANT
;
1863 val
.value
= build_int_cst (ptr_type_node
, 0);
1873 if (is_gimple_call (stmt
) && gimple_call_lhs (stmt
))
1875 tree fntype
= gimple_call_fntype (stmt
);
1878 tree attrs
= lookup_attribute ("assume_aligned",
1879 TYPE_ATTRIBUTES (fntype
));
1881 val
= bit_value_assume_aligned (stmt
, attrs
, val
, false);
1882 attrs
= lookup_attribute ("alloc_align",
1883 TYPE_ATTRIBUTES (fntype
));
1885 val
= bit_value_assume_aligned (stmt
, attrs
, val
, true);
1888 is_constant
= (val
.lattice_val
== CONSTANT
);
1891 if (flag_tree_bit_ccp
1892 && ((is_constant
&& TREE_CODE (val
.value
) == INTEGER_CST
)
1893 || (!is_constant
&& likelyvalue
!= UNDEFINED
))
1894 && gimple_get_lhs (stmt
)
1895 && TREE_CODE (gimple_get_lhs (stmt
)) == SSA_NAME
)
1897 tree lhs
= gimple_get_lhs (stmt
);
1898 wide_int nonzero_bits
= get_nonzero_bits (lhs
);
1899 if (nonzero_bits
!= -1)
1903 val
.lattice_val
= CONSTANT
;
1904 val
.value
= build_zero_cst (TREE_TYPE (lhs
));
1905 val
.mask
= extend_mask (nonzero_bits
);
1910 if (wi::bit_and_not (val
.value
, nonzero_bits
) != 0)
1911 val
.value
= wide_int_to_tree (TREE_TYPE (lhs
),
1912 nonzero_bits
& val
.value
);
1913 if (nonzero_bits
== 0)
1916 val
.mask
= val
.mask
& extend_mask (nonzero_bits
);
1923 /* The statement produced a nonconstant value. If the statement
1924 had UNDEFINED operands, then the result of the statement
1925 should be UNDEFINED. Otherwise, the statement is VARYING. */
1926 if (likelyvalue
== UNDEFINED
)
1928 val
.lattice_val
= likelyvalue
;
1933 val
.lattice_val
= VARYING
;
1937 val
.value
= NULL_TREE
;
1943 typedef hash_table
<pointer_hash
<gimple_statement_base
> > gimple_htab
;
1945 /* Given a BUILT_IN_STACK_SAVE value SAVED_VAL, insert a clobber of VAR before
1946 each matching BUILT_IN_STACK_RESTORE. Mark visited phis in VISITED. */
1949 insert_clobber_before_stack_restore (tree saved_val
, tree var
,
1950 gimple_htab
**visited
)
1953 gassign
*clobber_stmt
;
1955 imm_use_iterator iter
;
1956 gimple_stmt_iterator i
;
1959 FOR_EACH_IMM_USE_STMT (stmt
, iter
, saved_val
)
1960 if (gimple_call_builtin_p (stmt
, BUILT_IN_STACK_RESTORE
))
1962 clobber
= build_constructor (TREE_TYPE (var
),
1964 TREE_THIS_VOLATILE (clobber
) = 1;
1965 clobber_stmt
= gimple_build_assign (var
, clobber
);
1967 i
= gsi_for_stmt (stmt
);
1968 gsi_insert_before (&i
, clobber_stmt
, GSI_SAME_STMT
);
1970 else if (gimple_code (stmt
) == GIMPLE_PHI
)
1973 *visited
= new gimple_htab (10);
1975 slot
= (*visited
)->find_slot (stmt
, INSERT
);
1980 insert_clobber_before_stack_restore (gimple_phi_result (stmt
), var
,
1983 else if (gimple_assign_ssa_name_copy_p (stmt
))
1984 insert_clobber_before_stack_restore (gimple_assign_lhs (stmt
), var
,
1986 else if (chkp_gimple_call_builtin_p (stmt
, BUILT_IN_CHKP_BNDRET
))
1989 gcc_assert (is_gimple_debug (stmt
));
1992 /* Advance the iterator to the previous non-debug gimple statement in the same
1993 or dominating basic block. */
1996 gsi_prev_dom_bb_nondebug (gimple_stmt_iterator
*i
)
2000 gsi_prev_nondebug (i
);
2001 while (gsi_end_p (*i
))
2003 dom
= get_immediate_dominator (CDI_DOMINATORS
, i
->bb
);
2004 if (dom
== NULL
|| dom
== ENTRY_BLOCK_PTR_FOR_FN (cfun
))
2007 *i
= gsi_last_bb (dom
);
2011 /* Find a BUILT_IN_STACK_SAVE dominating gsi_stmt (I), and insert
2012 a clobber of VAR before each matching BUILT_IN_STACK_RESTORE.
2014 It is possible that BUILT_IN_STACK_SAVE cannot be find in a dominator when a
2015 previous pass (such as DOM) duplicated it along multiple paths to a BB. In
2016 that case the function gives up without inserting the clobbers. */
2019 insert_clobbers_for_var (gimple_stmt_iterator i
, tree var
)
2023 gimple_htab
*visited
= NULL
;
2025 for (; !gsi_end_p (i
); gsi_prev_dom_bb_nondebug (&i
))
2027 stmt
= gsi_stmt (i
);
2029 if (!gimple_call_builtin_p (stmt
, BUILT_IN_STACK_SAVE
))
2032 saved_val
= gimple_call_lhs (stmt
);
2033 if (saved_val
== NULL_TREE
)
2036 insert_clobber_before_stack_restore (saved_val
, var
, &visited
);
2043 /* Detects a __builtin_alloca_with_align with constant size argument. Declares
2044 fixed-size array and returns the address, if found, otherwise returns
2048 fold_builtin_alloca_with_align (gimple stmt
)
2050 unsigned HOST_WIDE_INT size
, threshold
, n_elem
;
2051 tree lhs
, arg
, block
, var
, elem_type
, array_type
;
2054 lhs
= gimple_call_lhs (stmt
);
2055 if (lhs
== NULL_TREE
)
2058 /* Detect constant argument. */
2059 arg
= get_constant_value (gimple_call_arg (stmt
, 0));
2060 if (arg
== NULL_TREE
2061 || TREE_CODE (arg
) != INTEGER_CST
2062 || !tree_fits_uhwi_p (arg
))
2065 size
= tree_to_uhwi (arg
);
2067 /* Heuristic: don't fold large allocas. */
2068 threshold
= (unsigned HOST_WIDE_INT
)PARAM_VALUE (PARAM_LARGE_STACK_FRAME
);
2069 /* In case the alloca is located at function entry, it has the same lifetime
2070 as a declared array, so we allow a larger size. */
2071 block
= gimple_block (stmt
);
2072 if (!(cfun
->after_inlining
2073 && TREE_CODE (BLOCK_SUPERCONTEXT (block
)) == FUNCTION_DECL
))
2075 if (size
> threshold
)
2078 /* Declare array. */
2079 elem_type
= build_nonstandard_integer_type (BITS_PER_UNIT
, 1);
2080 n_elem
= size
* 8 / BITS_PER_UNIT
;
2081 array_type
= build_array_type_nelts (elem_type
, n_elem
);
2082 var
= create_tmp_var (array_type
);
2083 DECL_ALIGN (var
) = TREE_INT_CST_LOW (gimple_call_arg (stmt
, 1));
2085 struct ptr_info_def
*pi
= SSA_NAME_PTR_INFO (lhs
);
2086 if (pi
!= NULL
&& !pi
->pt
.anything
)
2090 singleton_p
= pt_solution_singleton_p (&pi
->pt
, &uid
);
2091 gcc_assert (singleton_p
);
2092 SET_DECL_PT_UID (var
, uid
);
2096 /* Fold alloca to the address of the array. */
2097 return fold_convert (TREE_TYPE (lhs
), build_fold_addr_expr (var
));
2100 /* Fold the stmt at *GSI with CCP specific information that propagating
2101 and regular folding does not catch. */
2104 ccp_fold_stmt (gimple_stmt_iterator
*gsi
)
2106 gimple stmt
= gsi_stmt (*gsi
);
2108 switch (gimple_code (stmt
))
2112 gcond
*cond_stmt
= as_a
<gcond
*> (stmt
);
2113 ccp_prop_value_t val
;
2114 /* Statement evaluation will handle type mismatches in constants
2115 more gracefully than the final propagation. This allows us to
2116 fold more conditionals here. */
2117 val
= evaluate_stmt (stmt
);
2118 if (val
.lattice_val
!= CONSTANT
2124 fprintf (dump_file
, "Folding predicate ");
2125 print_gimple_expr (dump_file
, stmt
, 0, 0);
2126 fprintf (dump_file
, " to ");
2127 print_generic_expr (dump_file
, val
.value
, 0);
2128 fprintf (dump_file
, "\n");
2131 if (integer_zerop (val
.value
))
2132 gimple_cond_make_false (cond_stmt
);
2134 gimple_cond_make_true (cond_stmt
);
2141 tree lhs
= gimple_call_lhs (stmt
);
2142 int flags
= gimple_call_flags (stmt
);
2145 bool changed
= false;
2148 /* If the call was folded into a constant make sure it goes
2149 away even if we cannot propagate into all uses because of
2152 && TREE_CODE (lhs
) == SSA_NAME
2153 && (val
= get_constant_value (lhs
))
2154 /* Don't optimize away calls that have side-effects. */
2155 && (flags
& (ECF_CONST
|ECF_PURE
)) != 0
2156 && (flags
& ECF_LOOPING_CONST_OR_PURE
) == 0)
2158 tree new_rhs
= unshare_expr (val
);
2160 if (!useless_type_conversion_p (TREE_TYPE (lhs
),
2161 TREE_TYPE (new_rhs
)))
2162 new_rhs
= fold_convert (TREE_TYPE (lhs
), new_rhs
);
2163 res
= update_call_from_tree (gsi
, new_rhs
);
2168 /* Internal calls provide no argument types, so the extra laxity
2169 for normal calls does not apply. */
2170 if (gimple_call_internal_p (stmt
))
2173 /* The heuristic of fold_builtin_alloca_with_align differs before and
2174 after inlining, so we don't require the arg to be changed into a
2175 constant for folding, but just to be constant. */
2176 if (gimple_call_builtin_p (stmt
, BUILT_IN_ALLOCA_WITH_ALIGN
))
2178 tree new_rhs
= fold_builtin_alloca_with_align (stmt
);
2181 bool res
= update_call_from_tree (gsi
, new_rhs
);
2182 tree var
= TREE_OPERAND (TREE_OPERAND (new_rhs
, 0),0);
2184 insert_clobbers_for_var (*gsi
, var
);
2189 /* Propagate into the call arguments. Compared to replace_uses_in
2190 this can use the argument slot types for type verification
2191 instead of the current argument type. We also can safely
2192 drop qualifiers here as we are dealing with constants anyway. */
2193 argt
= TYPE_ARG_TYPES (gimple_call_fntype (stmt
));
2194 for (i
= 0; i
< gimple_call_num_args (stmt
) && argt
;
2195 ++i
, argt
= TREE_CHAIN (argt
))
2197 tree arg
= gimple_call_arg (stmt
, i
);
2198 if (TREE_CODE (arg
) == SSA_NAME
2199 && (val
= get_constant_value (arg
))
2200 && useless_type_conversion_p
2201 (TYPE_MAIN_VARIANT (TREE_VALUE (argt
)),
2202 TYPE_MAIN_VARIANT (TREE_TYPE (val
))))
2204 gimple_call_set_arg (stmt
, i
, unshare_expr (val
));
2214 tree lhs
= gimple_assign_lhs (stmt
);
2217 /* If we have a load that turned out to be constant replace it
2218 as we cannot propagate into all uses in all cases. */
2219 if (gimple_assign_single_p (stmt
)
2220 && TREE_CODE (lhs
) == SSA_NAME
2221 && (val
= get_constant_value (lhs
)))
2223 tree rhs
= unshare_expr (val
);
2224 if (!useless_type_conversion_p (TREE_TYPE (lhs
), TREE_TYPE (rhs
)))
2225 rhs
= fold_build1 (VIEW_CONVERT_EXPR
, TREE_TYPE (lhs
), rhs
);
2226 gimple_assign_set_rhs_from_tree (gsi
, rhs
);
2238 /* Visit the assignment statement STMT. Set the value of its LHS to the
2239 value computed by the RHS and store LHS in *OUTPUT_P. If STMT
2240 creates virtual definitions, set the value of each new name to that
2241 of the RHS (if we can derive a constant out of the RHS).
2242 Value-returning call statements also perform an assignment, and
2243 are handled here. */
2245 static enum ssa_prop_result
2246 visit_assignment (gimple stmt
, tree
*output_p
)
2248 ccp_prop_value_t val
;
2249 enum ssa_prop_result retval
;
2251 tree lhs
= gimple_get_lhs (stmt
);
2253 gcc_assert (gimple_code (stmt
) != GIMPLE_CALL
2254 || gimple_call_lhs (stmt
) != NULL_TREE
);
2256 if (gimple_assign_single_p (stmt
)
2257 && gimple_assign_rhs_code (stmt
) == SSA_NAME
)
2258 /* For a simple copy operation, we copy the lattice values. */
2259 val
= *get_value (gimple_assign_rhs1 (stmt
));
2261 /* Evaluate the statement, which could be
2262 either a GIMPLE_ASSIGN or a GIMPLE_CALL. */
2263 val
= evaluate_stmt (stmt
);
2265 retval
= SSA_PROP_NOT_INTERESTING
;
2267 /* Set the lattice value of the statement's output. */
2268 if (TREE_CODE (lhs
) == SSA_NAME
)
2270 /* If STMT is an assignment to an SSA_NAME, we only have one
2272 if (set_lattice_value (lhs
, val
))
2275 if (val
.lattice_val
== VARYING
)
2276 retval
= SSA_PROP_VARYING
;
2278 retval
= SSA_PROP_INTERESTING
;
2286 /* Visit the conditional statement STMT. Return SSA_PROP_INTERESTING
2287 if it can determine which edge will be taken. Otherwise, return
2288 SSA_PROP_VARYING. */
2290 static enum ssa_prop_result
2291 visit_cond_stmt (gimple stmt
, edge
*taken_edge_p
)
2293 ccp_prop_value_t val
;
2296 block
= gimple_bb (stmt
);
2297 val
= evaluate_stmt (stmt
);
2298 if (val
.lattice_val
!= CONSTANT
2300 return SSA_PROP_VARYING
;
2302 /* Find which edge out of the conditional block will be taken and add it
2303 to the worklist. If no single edge can be determined statically,
2304 return SSA_PROP_VARYING to feed all the outgoing edges to the
2305 propagation engine. */
2306 *taken_edge_p
= find_taken_edge (block
, val
.value
);
2308 return SSA_PROP_INTERESTING
;
2310 return SSA_PROP_VARYING
;
2314 /* Evaluate statement STMT. If the statement produces an output value and
2315 its evaluation changes the lattice value of its output, return
2316 SSA_PROP_INTERESTING and set *OUTPUT_P to the SSA_NAME holding the
2319 If STMT is a conditional branch and we can determine its truth
2320 value, set *TAKEN_EDGE_P accordingly. If STMT produces a varying
2321 value, return SSA_PROP_VARYING. */
2323 static enum ssa_prop_result
2324 ccp_visit_stmt (gimple stmt
, edge
*taken_edge_p
, tree
*output_p
)
2329 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2331 fprintf (dump_file
, "\nVisiting statement:\n");
2332 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
2335 switch (gimple_code (stmt
))
2338 /* If the statement is an assignment that produces a single
2339 output value, evaluate its RHS to see if the lattice value of
2340 its output has changed. */
2341 return visit_assignment (stmt
, output_p
);
2344 /* A value-returning call also performs an assignment. */
2345 if (gimple_call_lhs (stmt
) != NULL_TREE
)
2346 return visit_assignment (stmt
, output_p
);
2351 /* If STMT is a conditional branch, see if we can determine
2352 which branch will be taken. */
2353 /* FIXME. It appears that we should be able to optimize
2354 computed GOTOs here as well. */
2355 return visit_cond_stmt (stmt
, taken_edge_p
);
2361 /* Any other kind of statement is not interesting for constant
2362 propagation and, therefore, not worth simulating. */
2363 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2364 fprintf (dump_file
, "No interesting values produced. Marked VARYING.\n");
2366 /* Definitions made by statements other than assignments to
2367 SSA_NAMEs represent unknown modifications to their outputs.
2368 Mark them VARYING. */
2369 FOR_EACH_SSA_TREE_OPERAND (def
, stmt
, iter
, SSA_OP_ALL_DEFS
)
2371 ccp_prop_value_t v
= { VARYING
, NULL_TREE
, -1 };
2372 set_lattice_value (def
, v
);
2375 return SSA_PROP_VARYING
;
2379 /* Main entry point for SSA Conditional Constant Propagation. */
2384 unsigned int todo
= 0;
2385 calculate_dominance_info (CDI_DOMINATORS
);
2387 ssa_propagate (ccp_visit_stmt
, ccp_visit_phi_node
);
2388 if (ccp_finalize ())
2389 todo
= (TODO_cleanup_cfg
| TODO_update_ssa
);
2390 free_dominance_info (CDI_DOMINATORS
);
2397 const pass_data pass_data_ccp
=
2399 GIMPLE_PASS
, /* type */
2401 OPTGROUP_NONE
, /* optinfo_flags */
2402 TV_TREE_CCP
, /* tv_id */
2403 ( PROP_cfg
| PROP_ssa
), /* properties_required */
2404 0, /* properties_provided */
2405 0, /* properties_destroyed */
2406 0, /* todo_flags_start */
2407 TODO_update_address_taken
, /* todo_flags_finish */
2410 class pass_ccp
: public gimple_opt_pass
2413 pass_ccp (gcc::context
*ctxt
)
2414 : gimple_opt_pass (pass_data_ccp
, ctxt
)
2417 /* opt_pass methods: */
2418 opt_pass
* clone () { return new pass_ccp (m_ctxt
); }
2419 virtual bool gate (function
*) { return flag_tree_ccp
!= 0; }
2420 virtual unsigned int execute (function
*) { return do_ssa_ccp (); }
2422 }; // class pass_ccp
2427 make_pass_ccp (gcc::context
*ctxt
)
2429 return new pass_ccp (ctxt
);
2434 /* Try to optimize out __builtin_stack_restore. Optimize it out
2435 if there is another __builtin_stack_restore in the same basic
2436 block and no calls or ASM_EXPRs are in between, or if this block's
2437 only outgoing edge is to EXIT_BLOCK and there are no calls or
2438 ASM_EXPRs after this __builtin_stack_restore. */
2441 optimize_stack_restore (gimple_stmt_iterator i
)
2446 basic_block bb
= gsi_bb (i
);
2447 gimple call
= gsi_stmt (i
);
2449 if (gimple_code (call
) != GIMPLE_CALL
2450 || gimple_call_num_args (call
) != 1
2451 || TREE_CODE (gimple_call_arg (call
, 0)) != SSA_NAME
2452 || !POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (call
, 0))))
2455 for (gsi_next (&i
); !gsi_end_p (i
); gsi_next (&i
))
2457 stmt
= gsi_stmt (i
);
2458 if (gimple_code (stmt
) == GIMPLE_ASM
)
2460 if (gimple_code (stmt
) != GIMPLE_CALL
)
2463 callee
= gimple_call_fndecl (stmt
);
2465 || DECL_BUILT_IN_CLASS (callee
) != BUILT_IN_NORMAL
2466 /* All regular builtins are ok, just obviously not alloca. */
2467 || DECL_FUNCTION_CODE (callee
) == BUILT_IN_ALLOCA
2468 || DECL_FUNCTION_CODE (callee
) == BUILT_IN_ALLOCA_WITH_ALIGN
)
2471 if (DECL_FUNCTION_CODE (callee
) == BUILT_IN_STACK_RESTORE
)
2472 goto second_stack_restore
;
2478 /* Allow one successor of the exit block, or zero successors. */
2479 switch (EDGE_COUNT (bb
->succs
))
2484 if (single_succ_edge (bb
)->dest
!= EXIT_BLOCK_PTR_FOR_FN (cfun
))
2490 second_stack_restore
:
2492 /* If there's exactly one use, then zap the call to __builtin_stack_save.
2493 If there are multiple uses, then the last one should remove the call.
2494 In any case, whether the call to __builtin_stack_save can be removed
2495 or not is irrelevant to removing the call to __builtin_stack_restore. */
2496 if (has_single_use (gimple_call_arg (call
, 0)))
2498 gimple stack_save
= SSA_NAME_DEF_STMT (gimple_call_arg (call
, 0));
2499 if (is_gimple_call (stack_save
))
2501 callee
= gimple_call_fndecl (stack_save
);
2503 && DECL_BUILT_IN_CLASS (callee
) == BUILT_IN_NORMAL
2504 && DECL_FUNCTION_CODE (callee
) == BUILT_IN_STACK_SAVE
)
2506 gimple_stmt_iterator stack_save_gsi
;
2509 stack_save_gsi
= gsi_for_stmt (stack_save
);
2510 rhs
= build_int_cst (TREE_TYPE (gimple_call_arg (call
, 0)), 0);
2511 update_call_from_tree (&stack_save_gsi
, rhs
);
2516 /* No effect, so the statement will be deleted. */
2517 return integer_zero_node
;
2520 /* If va_list type is a simple pointer and nothing special is needed,
2521 optimize __builtin_va_start (&ap, 0) into ap = __builtin_next_arg (0),
2522 __builtin_va_end (&ap) out as NOP and __builtin_va_copy into a simple
2523 pointer assignment. */
2526 optimize_stdarg_builtin (gimple call
)
2528 tree callee
, lhs
, rhs
, cfun_va_list
;
2529 bool va_list_simple_ptr
;
2530 location_t loc
= gimple_location (call
);
2532 if (gimple_code (call
) != GIMPLE_CALL
)
2535 callee
= gimple_call_fndecl (call
);
2537 cfun_va_list
= targetm
.fn_abi_va_list (callee
);
2538 va_list_simple_ptr
= POINTER_TYPE_P (cfun_va_list
)
2539 && (TREE_TYPE (cfun_va_list
) == void_type_node
2540 || TREE_TYPE (cfun_va_list
) == char_type_node
);
2542 switch (DECL_FUNCTION_CODE (callee
))
2544 case BUILT_IN_VA_START
:
2545 if (!va_list_simple_ptr
2546 || targetm
.expand_builtin_va_start
!= NULL
2547 || !builtin_decl_explicit_p (BUILT_IN_NEXT_ARG
))
2550 if (gimple_call_num_args (call
) != 2)
2553 lhs
= gimple_call_arg (call
, 0);
2554 if (!POINTER_TYPE_P (TREE_TYPE (lhs
))
2555 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs
)))
2556 != TYPE_MAIN_VARIANT (cfun_va_list
))
2559 lhs
= build_fold_indirect_ref_loc (loc
, lhs
);
2560 rhs
= build_call_expr_loc (loc
, builtin_decl_explicit (BUILT_IN_NEXT_ARG
),
2561 1, integer_zero_node
);
2562 rhs
= fold_convert_loc (loc
, TREE_TYPE (lhs
), rhs
);
2563 return build2 (MODIFY_EXPR
, TREE_TYPE (lhs
), lhs
, rhs
);
2565 case BUILT_IN_VA_COPY
:
2566 if (!va_list_simple_ptr
)
2569 if (gimple_call_num_args (call
) != 2)
2572 lhs
= gimple_call_arg (call
, 0);
2573 if (!POINTER_TYPE_P (TREE_TYPE (lhs
))
2574 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs
)))
2575 != TYPE_MAIN_VARIANT (cfun_va_list
))
2578 lhs
= build_fold_indirect_ref_loc (loc
, lhs
);
2579 rhs
= gimple_call_arg (call
, 1);
2580 if (TYPE_MAIN_VARIANT (TREE_TYPE (rhs
))
2581 != TYPE_MAIN_VARIANT (cfun_va_list
))
2584 rhs
= fold_convert_loc (loc
, TREE_TYPE (lhs
), rhs
);
2585 return build2 (MODIFY_EXPR
, TREE_TYPE (lhs
), lhs
, rhs
);
2587 case BUILT_IN_VA_END
:
2588 /* No effect, so the statement will be deleted. */
2589 return integer_zero_node
;
2596 /* Attemp to make the block of __builtin_unreachable I unreachable by changing
2597 the incoming jumps. Return true if at least one jump was changed. */
2600 optimize_unreachable (gimple_stmt_iterator i
)
2602 basic_block bb
= gsi_bb (i
);
2603 gimple_stmt_iterator gsi
;
2609 if (flag_sanitize
& SANITIZE_UNREACHABLE
)
2612 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2614 stmt
= gsi_stmt (gsi
);
2616 if (is_gimple_debug (stmt
))
2619 if (glabel
*label_stmt
= dyn_cast
<glabel
*> (stmt
))
2621 /* Verify we do not need to preserve the label. */
2622 if (FORCED_LABEL (gimple_label_label (label_stmt
)))
2628 /* Only handle the case that __builtin_unreachable is the first statement
2629 in the block. We rely on DCE to remove stmts without side-effects
2630 before __builtin_unreachable. */
2631 if (gsi_stmt (gsi
) != gsi_stmt (i
))
2636 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
2638 gsi
= gsi_last_bb (e
->src
);
2639 if (gsi_end_p (gsi
))
2642 stmt
= gsi_stmt (gsi
);
2643 if (gcond
*cond_stmt
= dyn_cast
<gcond
*> (stmt
))
2645 if (e
->flags
& EDGE_TRUE_VALUE
)
2646 gimple_cond_make_false (cond_stmt
);
2647 else if (e
->flags
& EDGE_FALSE_VALUE
)
2648 gimple_cond_make_true (cond_stmt
);
2651 update_stmt (cond_stmt
);
2655 /* Todo: handle other cases, f.i. switch statement. */
2665 /* A simple pass that attempts to fold all builtin functions. This pass
2666 is run after we've propagated as many constants as we can. */
2670 const pass_data pass_data_fold_builtins
=
2672 GIMPLE_PASS
, /* type */
2674 OPTGROUP_NONE
, /* optinfo_flags */
2675 TV_NONE
, /* tv_id */
2676 ( PROP_cfg
| PROP_ssa
), /* properties_required */
2677 0, /* properties_provided */
2678 0, /* properties_destroyed */
2679 0, /* todo_flags_start */
2680 TODO_update_ssa
, /* todo_flags_finish */
2683 class pass_fold_builtins
: public gimple_opt_pass
2686 pass_fold_builtins (gcc::context
*ctxt
)
2687 : gimple_opt_pass (pass_data_fold_builtins
, ctxt
)
2690 /* opt_pass methods: */
2691 opt_pass
* clone () { return new pass_fold_builtins (m_ctxt
); }
2692 virtual unsigned int execute (function
*);
2694 }; // class pass_fold_builtins
2697 pass_fold_builtins::execute (function
*fun
)
2699 bool cfg_changed
= false;
2701 unsigned int todoflags
= 0;
2703 FOR_EACH_BB_FN (bb
, fun
)
2705 gimple_stmt_iterator i
;
2706 for (i
= gsi_start_bb (bb
); !gsi_end_p (i
); )
2708 gimple stmt
, old_stmt
;
2710 enum built_in_function fcode
;
2712 stmt
= gsi_stmt (i
);
2714 if (gimple_code (stmt
) != GIMPLE_CALL
)
2716 /* Remove all *ssaname_N ={v} {CLOBBER}; stmts,
2717 after the last GIMPLE DSE they aren't needed and might
2718 unnecessarily keep the SSA_NAMEs live. */
2719 if (gimple_clobber_p (stmt
))
2721 tree lhs
= gimple_assign_lhs (stmt
);
2722 if (TREE_CODE (lhs
) == MEM_REF
2723 && TREE_CODE (TREE_OPERAND (lhs
, 0)) == SSA_NAME
)
2725 unlink_stmt_vdef (stmt
);
2726 gsi_remove (&i
, true);
2727 release_defs (stmt
);
2735 callee
= gimple_call_fndecl (stmt
);
2736 if (!callee
|| DECL_BUILT_IN_CLASS (callee
) != BUILT_IN_NORMAL
)
2742 fcode
= DECL_FUNCTION_CODE (callee
);
2747 tree result
= NULL_TREE
;
2748 switch (DECL_FUNCTION_CODE (callee
))
2750 case BUILT_IN_CONSTANT_P
:
2751 /* Resolve __builtin_constant_p. If it hasn't been
2752 folded to integer_one_node by now, it's fairly
2753 certain that the value simply isn't constant. */
2754 result
= integer_zero_node
;
2757 case BUILT_IN_ASSUME_ALIGNED
:
2758 /* Remove __builtin_assume_aligned. */
2759 result
= gimple_call_arg (stmt
, 0);
2762 case BUILT_IN_STACK_RESTORE
:
2763 result
= optimize_stack_restore (i
);
2769 case BUILT_IN_UNREACHABLE
:
2770 if (optimize_unreachable (i
))
2774 case BUILT_IN_VA_START
:
2775 case BUILT_IN_VA_END
:
2776 case BUILT_IN_VA_COPY
:
2777 /* These shouldn't be folded before pass_stdarg. */
2778 result
= optimize_stdarg_builtin (stmt
);
2792 if (!update_call_from_tree (&i
, result
))
2793 gimplify_and_update_call_from_tree (&i
, result
);
2796 todoflags
|= TODO_update_address_taken
;
2798 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2800 fprintf (dump_file
, "Simplified\n ");
2801 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
2805 stmt
= gsi_stmt (i
);
2808 if (maybe_clean_or_replace_eh_stmt (old_stmt
, stmt
)
2809 && gimple_purge_dead_eh_edges (bb
))
2812 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2814 fprintf (dump_file
, "to\n ");
2815 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
2816 fprintf (dump_file
, "\n");
2819 /* Retry the same statement if it changed into another
2820 builtin, there might be new opportunities now. */
2821 if (gimple_code (stmt
) != GIMPLE_CALL
)
2826 callee
= gimple_call_fndecl (stmt
);
2828 || DECL_BUILT_IN_CLASS (callee
) != BUILT_IN_NORMAL
2829 || DECL_FUNCTION_CODE (callee
) == fcode
)
2834 /* Delete unreachable blocks. */
2836 todoflags
|= TODO_cleanup_cfg
;
2844 make_pass_fold_builtins (gcc::context
*ctxt
)
2846 return new pass_fold_builtins (ctxt
);