]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-ccp.c
cfgexpand.c (expand_used_vars): Use virtual_operand_p.
[thirdparty/gcc.git] / gcc / tree-ssa-ccp.c
1 /* Conditional constant propagation pass for the GNU compiler.
2 Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009,
3 2010, 2011, 2012 Free Software Foundation, Inc.
4 Adapted from original RTL SSA-CCP by Daniel Berlin <dberlin@dberlin.org>
5 Adapted to GIMPLE trees by Diego Novillo <dnovillo@redhat.com>
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it
10 under the terms of the GNU General Public License as published by the
11 Free Software Foundation; either version 3, or (at your option) any
12 later version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT
15 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 /* Conditional constant propagation (CCP) is based on the SSA
24 propagation engine (tree-ssa-propagate.c). Constant assignments of
25 the form VAR = CST are propagated from the assignments into uses of
26 VAR, which in turn may generate new constants. The simulation uses
27 a four level lattice to keep track of constant values associated
28 with SSA names. Given an SSA name V_i, it may take one of the
29 following values:
30
31 UNINITIALIZED -> the initial state of the value. This value
32 is replaced with a correct initial value
33 the first time the value is used, so the
34 rest of the pass does not need to care about
35 it. Using this value simplifies initialization
36 of the pass, and prevents us from needlessly
37 scanning statements that are never reached.
38
39 UNDEFINED -> V_i is a local variable whose definition
40 has not been processed yet. Therefore we
41 don't yet know if its value is a constant
42 or not.
43
44 CONSTANT -> V_i has been found to hold a constant
45 value C.
46
47 VARYING -> V_i cannot take a constant value, or if it
48 does, it is not possible to determine it
49 at compile time.
50
51 The core of SSA-CCP is in ccp_visit_stmt and ccp_visit_phi_node:
52
53 1- In ccp_visit_stmt, we are interested in assignments whose RHS
54 evaluates into a constant and conditional jumps whose predicate
55 evaluates into a boolean true or false. When an assignment of
56 the form V_i = CONST is found, V_i's lattice value is set to
57 CONSTANT and CONST is associated with it. This causes the
58 propagation engine to add all the SSA edges coming out the
59 assignment into the worklists, so that statements that use V_i
60 can be visited.
61
62 If the statement is a conditional with a constant predicate, we
63 mark the outgoing edges as executable or not executable
64 depending on the predicate's value. This is then used when
65 visiting PHI nodes to know when a PHI argument can be ignored.
66
67
68 2- In ccp_visit_phi_node, if all the PHI arguments evaluate to the
69 same constant C, then the LHS of the PHI is set to C. This
70 evaluation is known as the "meet operation". Since one of the
71 goals of this evaluation is to optimistically return constant
72 values as often as possible, it uses two main short cuts:
73
74 - If an argument is flowing in through a non-executable edge, it
75 is ignored. This is useful in cases like this:
76
77 if (PRED)
78 a_9 = 3;
79 else
80 a_10 = 100;
81 a_11 = PHI (a_9, a_10)
82
83 If PRED is known to always evaluate to false, then we can
84 assume that a_11 will always take its value from a_10, meaning
85 that instead of consider it VARYING (a_9 and a_10 have
86 different values), we can consider it CONSTANT 100.
87
88 - If an argument has an UNDEFINED value, then it does not affect
89 the outcome of the meet operation. If a variable V_i has an
90 UNDEFINED value, it means that either its defining statement
91 hasn't been visited yet or V_i has no defining statement, in
92 which case the original symbol 'V' is being used
93 uninitialized. Since 'V' is a local variable, the compiler
94 may assume any initial value for it.
95
96
97 After propagation, every variable V_i that ends up with a lattice
98 value of CONSTANT will have the associated constant value in the
99 array CONST_VAL[i].VALUE. That is fed into substitute_and_fold for
100 final substitution and folding.
101
102 References:
103
104 Constant propagation with conditional branches,
105 Wegman and Zadeck, ACM TOPLAS 13(2):181-210.
106
107 Building an Optimizing Compiler,
108 Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9.
109
110 Advanced Compiler Design and Implementation,
111 Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6 */
112
113 #include "config.h"
114 #include "system.h"
115 #include "coretypes.h"
116 #include "tm.h"
117 #include "tree.h"
118 #include "flags.h"
119 #include "tm_p.h"
120 #include "basic-block.h"
121 #include "function.h"
122 #include "gimple-pretty-print.h"
123 #include "tree-flow.h"
124 #include "tree-pass.h"
125 #include "tree-ssa-propagate.h"
126 #include "value-prof.h"
127 #include "langhooks.h"
128 #include "target.h"
129 #include "diagnostic-core.h"
130 #include "dbgcnt.h"
131 #include "gimple-fold.h"
132 #include "params.h"
133
134
135 /* Possible lattice values. */
136 typedef enum
137 {
138 UNINITIALIZED,
139 UNDEFINED,
140 CONSTANT,
141 VARYING
142 } ccp_lattice_t;
143
144 struct prop_value_d {
145 /* Lattice value. */
146 ccp_lattice_t lattice_val;
147
148 /* Propagated value. */
149 tree value;
150
151 /* Mask that applies to the propagated value during CCP. For
152 X with a CONSTANT lattice value X & ~mask == value & ~mask. */
153 double_int mask;
154 };
155
156 typedef struct prop_value_d prop_value_t;
157
158 /* Array of propagated constant values. After propagation,
159 CONST_VAL[I].VALUE holds the constant value for SSA_NAME(I). If
160 the constant is held in an SSA name representing a memory store
161 (i.e., a VDEF), CONST_VAL[I].MEM_REF will contain the actual
162 memory reference used to store (i.e., the LHS of the assignment
163 doing the store). */
164 static prop_value_t *const_val;
165
166 static void canonicalize_float_value (prop_value_t *);
167 static bool ccp_fold_stmt (gimple_stmt_iterator *);
168
169 /* Dump constant propagation value VAL to file OUTF prefixed by PREFIX. */
170
171 static void
172 dump_lattice_value (FILE *outf, const char *prefix, prop_value_t val)
173 {
174 switch (val.lattice_val)
175 {
176 case UNINITIALIZED:
177 fprintf (outf, "%sUNINITIALIZED", prefix);
178 break;
179 case UNDEFINED:
180 fprintf (outf, "%sUNDEFINED", prefix);
181 break;
182 case VARYING:
183 fprintf (outf, "%sVARYING", prefix);
184 break;
185 case CONSTANT:
186 fprintf (outf, "%sCONSTANT ", prefix);
187 if (TREE_CODE (val.value) != INTEGER_CST
188 || double_int_zero_p (val.mask))
189 print_generic_expr (outf, val.value, dump_flags);
190 else
191 {
192 double_int cval = double_int_and_not (tree_to_double_int (val.value),
193 val.mask);
194 fprintf (outf, "%sCONSTANT " HOST_WIDE_INT_PRINT_DOUBLE_HEX,
195 prefix, cval.high, cval.low);
196 fprintf (outf, " (" HOST_WIDE_INT_PRINT_DOUBLE_HEX ")",
197 val.mask.high, val.mask.low);
198 }
199 break;
200 default:
201 gcc_unreachable ();
202 }
203 }
204
205
206 /* Print lattice value VAL to stderr. */
207
208 void debug_lattice_value (prop_value_t val);
209
210 DEBUG_FUNCTION void
211 debug_lattice_value (prop_value_t val)
212 {
213 dump_lattice_value (stderr, "", val);
214 fprintf (stderr, "\n");
215 }
216
217
218 /* Compute a default value for variable VAR and store it in the
219 CONST_VAL array. The following rules are used to get default
220 values:
221
222 1- Global and static variables that are declared constant are
223 considered CONSTANT.
224
225 2- Any other value is considered UNDEFINED. This is useful when
226 considering PHI nodes. PHI arguments that are undefined do not
227 change the constant value of the PHI node, which allows for more
228 constants to be propagated.
229
230 3- Variables defined by statements other than assignments and PHI
231 nodes are considered VARYING.
232
233 4- Initial values of variables that are not GIMPLE registers are
234 considered VARYING. */
235
236 static prop_value_t
237 get_default_value (tree var)
238 {
239 prop_value_t val = { UNINITIALIZED, NULL_TREE, { 0, 0 } };
240 gimple stmt;
241
242 stmt = SSA_NAME_DEF_STMT (var);
243
244 if (gimple_nop_p (stmt))
245 {
246 /* Variables defined by an empty statement are those used
247 before being initialized. If VAR is a local variable, we
248 can assume initially that it is UNDEFINED, otherwise we must
249 consider it VARYING. */
250 if (!virtual_operand_p (var)
251 && TREE_CODE (SSA_NAME_VAR (var)) == VAR_DECL)
252 val.lattice_val = UNDEFINED;
253 else
254 {
255 val.lattice_val = VARYING;
256 val.mask = double_int_minus_one;
257 }
258 }
259 else if (is_gimple_assign (stmt)
260 /* Value-returning GIMPLE_CALL statements assign to
261 a variable, and are treated similarly to GIMPLE_ASSIGN. */
262 || (is_gimple_call (stmt)
263 && gimple_call_lhs (stmt) != NULL_TREE)
264 || gimple_code (stmt) == GIMPLE_PHI)
265 {
266 tree cst;
267 if (gimple_assign_single_p (stmt)
268 && DECL_P (gimple_assign_rhs1 (stmt))
269 && (cst = get_symbol_constant_value (gimple_assign_rhs1 (stmt))))
270 {
271 val.lattice_val = CONSTANT;
272 val.value = cst;
273 }
274 else
275 /* Any other variable defined by an assignment or a PHI node
276 is considered UNDEFINED. */
277 val.lattice_val = UNDEFINED;
278 }
279 else
280 {
281 /* Otherwise, VAR will never take on a constant value. */
282 val.lattice_val = VARYING;
283 val.mask = double_int_minus_one;
284 }
285
286 return val;
287 }
288
289
290 /* Get the constant value associated with variable VAR. */
291
292 static inline prop_value_t *
293 get_value (tree var)
294 {
295 prop_value_t *val;
296
297 if (const_val == NULL)
298 return NULL;
299
300 val = &const_val[SSA_NAME_VERSION (var)];
301 if (val->lattice_val == UNINITIALIZED)
302 *val = get_default_value (var);
303
304 canonicalize_float_value (val);
305
306 return val;
307 }
308
309 /* Return the constant tree value associated with VAR. */
310
311 static inline tree
312 get_constant_value (tree var)
313 {
314 prop_value_t *val;
315 if (TREE_CODE (var) != SSA_NAME)
316 {
317 if (is_gimple_min_invariant (var))
318 return var;
319 return NULL_TREE;
320 }
321 val = get_value (var);
322 if (val
323 && val->lattice_val == CONSTANT
324 && (TREE_CODE (val->value) != INTEGER_CST
325 || double_int_zero_p (val->mask)))
326 return val->value;
327 return NULL_TREE;
328 }
329
330 /* Sets the value associated with VAR to VARYING. */
331
332 static inline void
333 set_value_varying (tree var)
334 {
335 prop_value_t *val = &const_val[SSA_NAME_VERSION (var)];
336
337 val->lattice_val = VARYING;
338 val->value = NULL_TREE;
339 val->mask = double_int_minus_one;
340 }
341
342 /* For float types, modify the value of VAL to make ccp work correctly
343 for non-standard values (-0, NaN):
344
345 If HONOR_SIGNED_ZEROS is false, and VAL = -0, we canonicalize it to 0.
346 If HONOR_NANS is false, and VAL is NaN, we canonicalize it to UNDEFINED.
347 This is to fix the following problem (see PR 29921): Suppose we have
348
349 x = 0.0 * y
350
351 and we set value of y to NaN. This causes value of x to be set to NaN.
352 When we later determine that y is in fact VARYING, fold uses the fact
353 that HONOR_NANS is false, and we try to change the value of x to 0,
354 causing an ICE. With HONOR_NANS being false, the real appearance of
355 NaN would cause undefined behavior, though, so claiming that y (and x)
356 are UNDEFINED initially is correct. */
357
358 static void
359 canonicalize_float_value (prop_value_t *val)
360 {
361 enum machine_mode mode;
362 tree type;
363 REAL_VALUE_TYPE d;
364
365 if (val->lattice_val != CONSTANT
366 || TREE_CODE (val->value) != REAL_CST)
367 return;
368
369 d = TREE_REAL_CST (val->value);
370 type = TREE_TYPE (val->value);
371 mode = TYPE_MODE (type);
372
373 if (!HONOR_SIGNED_ZEROS (mode)
374 && REAL_VALUE_MINUS_ZERO (d))
375 {
376 val->value = build_real (type, dconst0);
377 return;
378 }
379
380 if (!HONOR_NANS (mode)
381 && REAL_VALUE_ISNAN (d))
382 {
383 val->lattice_val = UNDEFINED;
384 val->value = NULL;
385 return;
386 }
387 }
388
389 /* Return whether the lattice transition is valid. */
390
391 static bool
392 valid_lattice_transition (prop_value_t old_val, prop_value_t new_val)
393 {
394 /* Lattice transitions must always be monotonically increasing in
395 value. */
396 if (old_val.lattice_val < new_val.lattice_val)
397 return true;
398
399 if (old_val.lattice_val != new_val.lattice_val)
400 return false;
401
402 if (!old_val.value && !new_val.value)
403 return true;
404
405 /* Now both lattice values are CONSTANT. */
406
407 /* Allow transitioning from PHI <&x, not executable> == &x
408 to PHI <&x, &y> == common alignment. */
409 if (TREE_CODE (old_val.value) != INTEGER_CST
410 && TREE_CODE (new_val.value) == INTEGER_CST)
411 return true;
412
413 /* Bit-lattices have to agree in the still valid bits. */
414 if (TREE_CODE (old_val.value) == INTEGER_CST
415 && TREE_CODE (new_val.value) == INTEGER_CST)
416 return double_int_equal_p
417 (double_int_and_not (tree_to_double_int (old_val.value),
418 new_val.mask),
419 double_int_and_not (tree_to_double_int (new_val.value),
420 new_val.mask));
421
422 /* Otherwise constant values have to agree. */
423 return operand_equal_p (old_val.value, new_val.value, 0);
424 }
425
426 /* Set the value for variable VAR to NEW_VAL. Return true if the new
427 value is different from VAR's previous value. */
428
429 static bool
430 set_lattice_value (tree var, prop_value_t new_val)
431 {
432 /* We can deal with old UNINITIALIZED values just fine here. */
433 prop_value_t *old_val = &const_val[SSA_NAME_VERSION (var)];
434
435 canonicalize_float_value (&new_val);
436
437 /* We have to be careful to not go up the bitwise lattice
438 represented by the mask.
439 ??? This doesn't seem to be the best place to enforce this. */
440 if (new_val.lattice_val == CONSTANT
441 && old_val->lattice_val == CONSTANT
442 && TREE_CODE (new_val.value) == INTEGER_CST
443 && TREE_CODE (old_val->value) == INTEGER_CST)
444 {
445 double_int diff;
446 diff = double_int_xor (tree_to_double_int (new_val.value),
447 tree_to_double_int (old_val->value));
448 new_val.mask = double_int_ior (new_val.mask,
449 double_int_ior (old_val->mask, diff));
450 }
451
452 gcc_assert (valid_lattice_transition (*old_val, new_val));
453
454 /* If *OLD_VAL and NEW_VAL are the same, return false to inform the
455 caller that this was a non-transition. */
456 if (old_val->lattice_val != new_val.lattice_val
457 || (new_val.lattice_val == CONSTANT
458 && TREE_CODE (new_val.value) == INTEGER_CST
459 && (TREE_CODE (old_val->value) != INTEGER_CST
460 || !double_int_equal_p (new_val.mask, old_val->mask))))
461 {
462 /* ??? We would like to delay creation of INTEGER_CSTs from
463 partially constants here. */
464
465 if (dump_file && (dump_flags & TDF_DETAILS))
466 {
467 dump_lattice_value (dump_file, "Lattice value changed to ", new_val);
468 fprintf (dump_file, ". Adding SSA edges to worklist.\n");
469 }
470
471 *old_val = new_val;
472
473 gcc_assert (new_val.lattice_val != UNINITIALIZED);
474 return true;
475 }
476
477 return false;
478 }
479
480 static prop_value_t get_value_for_expr (tree, bool);
481 static prop_value_t bit_value_binop (enum tree_code, tree, tree, tree);
482 static void bit_value_binop_1 (enum tree_code, tree, double_int *, double_int *,
483 tree, double_int, double_int,
484 tree, double_int, double_int);
485
486 /* Return a double_int that can be used for bitwise simplifications
487 from VAL. */
488
489 static double_int
490 value_to_double_int (prop_value_t val)
491 {
492 if (val.value
493 && TREE_CODE (val.value) == INTEGER_CST)
494 return tree_to_double_int (val.value);
495 else
496 return double_int_zero;
497 }
498
499 /* Return the value for the address expression EXPR based on alignment
500 information. */
501
502 static prop_value_t
503 get_value_from_alignment (tree expr)
504 {
505 tree type = TREE_TYPE (expr);
506 prop_value_t val;
507 unsigned HOST_WIDE_INT bitpos;
508 unsigned int align;
509
510 gcc_assert (TREE_CODE (expr) == ADDR_EXPR);
511
512 get_pointer_alignment_1 (expr, &align, &bitpos);
513 val.mask
514 = double_int_and_not (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type)
515 ? double_int_mask (TYPE_PRECISION (type))
516 : double_int_minus_one,
517 uhwi_to_double_int (align / BITS_PER_UNIT - 1));
518 val.lattice_val = double_int_minus_one_p (val.mask) ? VARYING : CONSTANT;
519 if (val.lattice_val == CONSTANT)
520 val.value
521 = double_int_to_tree (type, uhwi_to_double_int (bitpos / BITS_PER_UNIT));
522 else
523 val.value = NULL_TREE;
524
525 return val;
526 }
527
528 /* Return the value for the tree operand EXPR. If FOR_BITS_P is true
529 return constant bits extracted from alignment information for
530 invariant addresses. */
531
532 static prop_value_t
533 get_value_for_expr (tree expr, bool for_bits_p)
534 {
535 prop_value_t val;
536
537 if (TREE_CODE (expr) == SSA_NAME)
538 {
539 val = *get_value (expr);
540 if (for_bits_p
541 && val.lattice_val == CONSTANT
542 && TREE_CODE (val.value) == ADDR_EXPR)
543 val = get_value_from_alignment (val.value);
544 }
545 else if (is_gimple_min_invariant (expr)
546 && (!for_bits_p || TREE_CODE (expr) != ADDR_EXPR))
547 {
548 val.lattice_val = CONSTANT;
549 val.value = expr;
550 val.mask = double_int_zero;
551 canonicalize_float_value (&val);
552 }
553 else if (TREE_CODE (expr) == ADDR_EXPR)
554 val = get_value_from_alignment (expr);
555 else
556 {
557 val.lattice_val = VARYING;
558 val.mask = double_int_minus_one;
559 val.value = NULL_TREE;
560 }
561 return val;
562 }
563
564 /* Return the likely CCP lattice value for STMT.
565
566 If STMT has no operands, then return CONSTANT.
567
568 Else if undefinedness of operands of STMT cause its value to be
569 undefined, then return UNDEFINED.
570
571 Else if any operands of STMT are constants, then return CONSTANT.
572
573 Else return VARYING. */
574
575 static ccp_lattice_t
576 likely_value (gimple stmt)
577 {
578 bool has_constant_operand, has_undefined_operand, all_undefined_operands;
579 tree use;
580 ssa_op_iter iter;
581 unsigned i;
582
583 enum gimple_code code = gimple_code (stmt);
584
585 /* This function appears to be called only for assignments, calls,
586 conditionals, and switches, due to the logic in visit_stmt. */
587 gcc_assert (code == GIMPLE_ASSIGN
588 || code == GIMPLE_CALL
589 || code == GIMPLE_COND
590 || code == GIMPLE_SWITCH);
591
592 /* If the statement has volatile operands, it won't fold to a
593 constant value. */
594 if (gimple_has_volatile_ops (stmt))
595 return VARYING;
596
597 /* Arrive here for more complex cases. */
598 has_constant_operand = false;
599 has_undefined_operand = false;
600 all_undefined_operands = true;
601 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
602 {
603 prop_value_t *val = get_value (use);
604
605 if (val->lattice_val == UNDEFINED)
606 has_undefined_operand = true;
607 else
608 all_undefined_operands = false;
609
610 if (val->lattice_val == CONSTANT)
611 has_constant_operand = true;
612 }
613
614 /* There may be constants in regular rhs operands. For calls we
615 have to ignore lhs, fndecl and static chain, otherwise only
616 the lhs. */
617 for (i = (is_gimple_call (stmt) ? 2 : 0) + gimple_has_lhs (stmt);
618 i < gimple_num_ops (stmt); ++i)
619 {
620 tree op = gimple_op (stmt, i);
621 if (!op || TREE_CODE (op) == SSA_NAME)
622 continue;
623 if (is_gimple_min_invariant (op))
624 has_constant_operand = true;
625 }
626
627 if (has_constant_operand)
628 all_undefined_operands = false;
629
630 /* If the operation combines operands like COMPLEX_EXPR make sure to
631 not mark the result UNDEFINED if only one part of the result is
632 undefined. */
633 if (has_undefined_operand && all_undefined_operands)
634 return UNDEFINED;
635 else if (code == GIMPLE_ASSIGN && has_undefined_operand)
636 {
637 switch (gimple_assign_rhs_code (stmt))
638 {
639 /* Unary operators are handled with all_undefined_operands. */
640 case PLUS_EXPR:
641 case MINUS_EXPR:
642 case POINTER_PLUS_EXPR:
643 /* Not MIN_EXPR, MAX_EXPR. One VARYING operand may be selected.
644 Not bitwise operators, one VARYING operand may specify the
645 result completely. Not logical operators for the same reason.
646 Not COMPLEX_EXPR as one VARYING operand makes the result partly
647 not UNDEFINED. Not *DIV_EXPR, comparisons and shifts because
648 the undefined operand may be promoted. */
649 return UNDEFINED;
650
651 case ADDR_EXPR:
652 /* If any part of an address is UNDEFINED, like the index
653 of an ARRAY_EXPR, then treat the result as UNDEFINED. */
654 return UNDEFINED;
655
656 default:
657 ;
658 }
659 }
660 /* If there was an UNDEFINED operand but the result may be not UNDEFINED
661 fall back to CONSTANT. During iteration UNDEFINED may still drop
662 to CONSTANT. */
663 if (has_undefined_operand)
664 return CONSTANT;
665
666 /* We do not consider virtual operands here -- load from read-only
667 memory may have only VARYING virtual operands, but still be
668 constant. */
669 if (has_constant_operand
670 || gimple_references_memory_p (stmt))
671 return CONSTANT;
672
673 return VARYING;
674 }
675
676 /* Returns true if STMT cannot be constant. */
677
678 static bool
679 surely_varying_stmt_p (gimple stmt)
680 {
681 /* If the statement has operands that we cannot handle, it cannot be
682 constant. */
683 if (gimple_has_volatile_ops (stmt))
684 return true;
685
686 /* If it is a call and does not return a value or is not a
687 builtin and not an indirect call, it is varying. */
688 if (is_gimple_call (stmt))
689 {
690 tree fndecl;
691 if (!gimple_call_lhs (stmt)
692 || ((fndecl = gimple_call_fndecl (stmt)) != NULL_TREE
693 && !DECL_BUILT_IN (fndecl)))
694 return true;
695 }
696
697 /* Any other store operation is not interesting. */
698 else if (gimple_vdef (stmt))
699 return true;
700
701 /* Anything other than assignments and conditional jumps are not
702 interesting for CCP. */
703 if (gimple_code (stmt) != GIMPLE_ASSIGN
704 && gimple_code (stmt) != GIMPLE_COND
705 && gimple_code (stmt) != GIMPLE_SWITCH
706 && gimple_code (stmt) != GIMPLE_CALL)
707 return true;
708
709 return false;
710 }
711
712 /* Initialize local data structures for CCP. */
713
714 static void
715 ccp_initialize (void)
716 {
717 basic_block bb;
718
719 const_val = XCNEWVEC (prop_value_t, num_ssa_names);
720
721 /* Initialize simulation flags for PHI nodes and statements. */
722 FOR_EACH_BB (bb)
723 {
724 gimple_stmt_iterator i;
725
726 for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
727 {
728 gimple stmt = gsi_stmt (i);
729 bool is_varying;
730
731 /* If the statement is a control insn, then we do not
732 want to avoid simulating the statement once. Failure
733 to do so means that those edges will never get added. */
734 if (stmt_ends_bb_p (stmt))
735 is_varying = false;
736 else
737 is_varying = surely_varying_stmt_p (stmt);
738
739 if (is_varying)
740 {
741 tree def;
742 ssa_op_iter iter;
743
744 /* If the statement will not produce a constant, mark
745 all its outputs VARYING. */
746 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
747 set_value_varying (def);
748 }
749 prop_set_simulate_again (stmt, !is_varying);
750 }
751 }
752
753 /* Now process PHI nodes. We never clear the simulate_again flag on
754 phi nodes, since we do not know which edges are executable yet,
755 except for phi nodes for virtual operands when we do not do store ccp. */
756 FOR_EACH_BB (bb)
757 {
758 gimple_stmt_iterator i;
759
760 for (i = gsi_start_phis (bb); !gsi_end_p (i); gsi_next (&i))
761 {
762 gimple phi = gsi_stmt (i);
763
764 if (virtual_operand_p (gimple_phi_result (phi)))
765 prop_set_simulate_again (phi, false);
766 else
767 prop_set_simulate_again (phi, true);
768 }
769 }
770 }
771
772 /* Debug count support. Reset the values of ssa names
773 VARYING when the total number ssa names analyzed is
774 beyond the debug count specified. */
775
776 static void
777 do_dbg_cnt (void)
778 {
779 unsigned i;
780 for (i = 0; i < num_ssa_names; i++)
781 {
782 if (!dbg_cnt (ccp))
783 {
784 const_val[i].lattice_val = VARYING;
785 const_val[i].mask = double_int_minus_one;
786 const_val[i].value = NULL_TREE;
787 }
788 }
789 }
790
791
792 /* Do final substitution of propagated values, cleanup the flowgraph and
793 free allocated storage.
794
795 Return TRUE when something was optimized. */
796
797 static bool
798 ccp_finalize (void)
799 {
800 bool something_changed;
801 unsigned i;
802
803 do_dbg_cnt ();
804
805 /* Derive alignment and misalignment information from partially
806 constant pointers in the lattice. */
807 for (i = 1; i < num_ssa_names; ++i)
808 {
809 tree name = ssa_name (i);
810 prop_value_t *val;
811 unsigned int tem, align;
812
813 if (!name
814 || !POINTER_TYPE_P (TREE_TYPE (name)))
815 continue;
816
817 val = get_value (name);
818 if (val->lattice_val != CONSTANT
819 || TREE_CODE (val->value) != INTEGER_CST)
820 continue;
821
822 /* Trailing constant bits specify the alignment, trailing value
823 bits the misalignment. */
824 tem = val->mask.low;
825 align = (tem & -tem);
826 if (align > 1)
827 set_ptr_info_alignment (get_ptr_info (name), align,
828 TREE_INT_CST_LOW (val->value) & (align - 1));
829 }
830
831 /* Perform substitutions based on the known constant values. */
832 something_changed = substitute_and_fold (get_constant_value,
833 ccp_fold_stmt, true);
834
835 free (const_val);
836 const_val = NULL;
837 return something_changed;;
838 }
839
840
841 /* Compute the meet operator between *VAL1 and *VAL2. Store the result
842 in VAL1.
843
844 any M UNDEFINED = any
845 any M VARYING = VARYING
846 Ci M Cj = Ci if (i == j)
847 Ci M Cj = VARYING if (i != j)
848 */
849
850 static void
851 ccp_lattice_meet (prop_value_t *val1, prop_value_t *val2)
852 {
853 if (val1->lattice_val == UNDEFINED)
854 {
855 /* UNDEFINED M any = any */
856 *val1 = *val2;
857 }
858 else if (val2->lattice_val == UNDEFINED)
859 {
860 /* any M UNDEFINED = any
861 Nothing to do. VAL1 already contains the value we want. */
862 ;
863 }
864 else if (val1->lattice_val == VARYING
865 || val2->lattice_val == VARYING)
866 {
867 /* any M VARYING = VARYING. */
868 val1->lattice_val = VARYING;
869 val1->mask = double_int_minus_one;
870 val1->value = NULL_TREE;
871 }
872 else if (val1->lattice_val == CONSTANT
873 && val2->lattice_val == CONSTANT
874 && TREE_CODE (val1->value) == INTEGER_CST
875 && TREE_CODE (val2->value) == INTEGER_CST)
876 {
877 /* Ci M Cj = Ci if (i == j)
878 Ci M Cj = VARYING if (i != j)
879
880 For INTEGER_CSTs mask unequal bits. If no equal bits remain,
881 drop to varying. */
882 val1->mask
883 = double_int_ior (double_int_ior (val1->mask,
884 val2->mask),
885 double_int_xor (tree_to_double_int (val1->value),
886 tree_to_double_int (val2->value)));
887 if (double_int_minus_one_p (val1->mask))
888 {
889 val1->lattice_val = VARYING;
890 val1->value = NULL_TREE;
891 }
892 }
893 else if (val1->lattice_val == CONSTANT
894 && val2->lattice_val == CONSTANT
895 && simple_cst_equal (val1->value, val2->value) == 1)
896 {
897 /* Ci M Cj = Ci if (i == j)
898 Ci M Cj = VARYING if (i != j)
899
900 VAL1 already contains the value we want for equivalent values. */
901 }
902 else if (val1->lattice_val == CONSTANT
903 && val2->lattice_val == CONSTANT
904 && (TREE_CODE (val1->value) == ADDR_EXPR
905 || TREE_CODE (val2->value) == ADDR_EXPR))
906 {
907 /* When not equal addresses are involved try meeting for
908 alignment. */
909 prop_value_t tem = *val2;
910 if (TREE_CODE (val1->value) == ADDR_EXPR)
911 *val1 = get_value_for_expr (val1->value, true);
912 if (TREE_CODE (val2->value) == ADDR_EXPR)
913 tem = get_value_for_expr (val2->value, true);
914 ccp_lattice_meet (val1, &tem);
915 }
916 else
917 {
918 /* Any other combination is VARYING. */
919 val1->lattice_val = VARYING;
920 val1->mask = double_int_minus_one;
921 val1->value = NULL_TREE;
922 }
923 }
924
925
926 /* Loop through the PHI_NODE's parameters for BLOCK and compare their
927 lattice values to determine PHI_NODE's lattice value. The value of a
928 PHI node is determined calling ccp_lattice_meet with all the arguments
929 of the PHI node that are incoming via executable edges. */
930
931 static enum ssa_prop_result
932 ccp_visit_phi_node (gimple phi)
933 {
934 unsigned i;
935 prop_value_t *old_val, new_val;
936
937 if (dump_file && (dump_flags & TDF_DETAILS))
938 {
939 fprintf (dump_file, "\nVisiting PHI node: ");
940 print_gimple_stmt (dump_file, phi, 0, dump_flags);
941 }
942
943 old_val = get_value (gimple_phi_result (phi));
944 switch (old_val->lattice_val)
945 {
946 case VARYING:
947 return SSA_PROP_VARYING;
948
949 case CONSTANT:
950 new_val = *old_val;
951 break;
952
953 case UNDEFINED:
954 new_val.lattice_val = UNDEFINED;
955 new_val.value = NULL_TREE;
956 break;
957
958 default:
959 gcc_unreachable ();
960 }
961
962 for (i = 0; i < gimple_phi_num_args (phi); i++)
963 {
964 /* Compute the meet operator over all the PHI arguments flowing
965 through executable edges. */
966 edge e = gimple_phi_arg_edge (phi, i);
967
968 if (dump_file && (dump_flags & TDF_DETAILS))
969 {
970 fprintf (dump_file,
971 "\n Argument #%d (%d -> %d %sexecutable)\n",
972 i, e->src->index, e->dest->index,
973 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
974 }
975
976 /* If the incoming edge is executable, Compute the meet operator for
977 the existing value of the PHI node and the current PHI argument. */
978 if (e->flags & EDGE_EXECUTABLE)
979 {
980 tree arg = gimple_phi_arg (phi, i)->def;
981 prop_value_t arg_val = get_value_for_expr (arg, false);
982
983 ccp_lattice_meet (&new_val, &arg_val);
984
985 if (dump_file && (dump_flags & TDF_DETAILS))
986 {
987 fprintf (dump_file, "\t");
988 print_generic_expr (dump_file, arg, dump_flags);
989 dump_lattice_value (dump_file, "\tValue: ", arg_val);
990 fprintf (dump_file, "\n");
991 }
992
993 if (new_val.lattice_val == VARYING)
994 break;
995 }
996 }
997
998 if (dump_file && (dump_flags & TDF_DETAILS))
999 {
1000 dump_lattice_value (dump_file, "\n PHI node value: ", new_val);
1001 fprintf (dump_file, "\n\n");
1002 }
1003
1004 /* Make the transition to the new value. */
1005 if (set_lattice_value (gimple_phi_result (phi), new_val))
1006 {
1007 if (new_val.lattice_val == VARYING)
1008 return SSA_PROP_VARYING;
1009 else
1010 return SSA_PROP_INTERESTING;
1011 }
1012 else
1013 return SSA_PROP_NOT_INTERESTING;
1014 }
1015
1016 /* Return the constant value for OP or OP otherwise. */
1017
1018 static tree
1019 valueize_op (tree op)
1020 {
1021 if (TREE_CODE (op) == SSA_NAME)
1022 {
1023 tree tem = get_constant_value (op);
1024 if (tem)
1025 return tem;
1026 }
1027 return op;
1028 }
1029
1030 /* CCP specific front-end to the non-destructive constant folding
1031 routines.
1032
1033 Attempt to simplify the RHS of STMT knowing that one or more
1034 operands are constants.
1035
1036 If simplification is possible, return the simplified RHS,
1037 otherwise return the original RHS or NULL_TREE. */
1038
1039 static tree
1040 ccp_fold (gimple stmt)
1041 {
1042 location_t loc = gimple_location (stmt);
1043 switch (gimple_code (stmt))
1044 {
1045 case GIMPLE_COND:
1046 {
1047 /* Handle comparison operators that can appear in GIMPLE form. */
1048 tree op0 = valueize_op (gimple_cond_lhs (stmt));
1049 tree op1 = valueize_op (gimple_cond_rhs (stmt));
1050 enum tree_code code = gimple_cond_code (stmt);
1051 return fold_binary_loc (loc, code, boolean_type_node, op0, op1);
1052 }
1053
1054 case GIMPLE_SWITCH:
1055 {
1056 /* Return the constant switch index. */
1057 return valueize_op (gimple_switch_index (stmt));
1058 }
1059
1060 case GIMPLE_ASSIGN:
1061 case GIMPLE_CALL:
1062 return gimple_fold_stmt_to_constant_1 (stmt, valueize_op);
1063
1064 default:
1065 gcc_unreachable ();
1066 }
1067 }
1068
1069 /* Apply the operation CODE in type TYPE to the value, mask pair
1070 RVAL and RMASK representing a value of type RTYPE and set
1071 the value, mask pair *VAL and *MASK to the result. */
1072
1073 static void
1074 bit_value_unop_1 (enum tree_code code, tree type,
1075 double_int *val, double_int *mask,
1076 tree rtype, double_int rval, double_int rmask)
1077 {
1078 switch (code)
1079 {
1080 case BIT_NOT_EXPR:
1081 *mask = rmask;
1082 *val = double_int_not (rval);
1083 break;
1084
1085 case NEGATE_EXPR:
1086 {
1087 double_int temv, temm;
1088 /* Return ~rval + 1. */
1089 bit_value_unop_1 (BIT_NOT_EXPR, type, &temv, &temm, type, rval, rmask);
1090 bit_value_binop_1 (PLUS_EXPR, type, val, mask,
1091 type, temv, temm,
1092 type, double_int_one, double_int_zero);
1093 break;
1094 }
1095
1096 CASE_CONVERT:
1097 {
1098 bool uns;
1099
1100 /* First extend mask and value according to the original type. */
1101 uns = TYPE_UNSIGNED (rtype);
1102 *mask = double_int_ext (rmask, TYPE_PRECISION (rtype), uns);
1103 *val = double_int_ext (rval, TYPE_PRECISION (rtype), uns);
1104
1105 /* Then extend mask and value according to the target type. */
1106 uns = TYPE_UNSIGNED (type);
1107 *mask = double_int_ext (*mask, TYPE_PRECISION (type), uns);
1108 *val = double_int_ext (*val, TYPE_PRECISION (type), uns);
1109 break;
1110 }
1111
1112 default:
1113 *mask = double_int_minus_one;
1114 break;
1115 }
1116 }
1117
1118 /* Apply the operation CODE in type TYPE to the value, mask pairs
1119 R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
1120 and R2TYPE and set the value, mask pair *VAL and *MASK to the result. */
1121
1122 static void
1123 bit_value_binop_1 (enum tree_code code, tree type,
1124 double_int *val, double_int *mask,
1125 tree r1type, double_int r1val, double_int r1mask,
1126 tree r2type, double_int r2val, double_int r2mask)
1127 {
1128 bool uns = TYPE_UNSIGNED (type);
1129 /* Assume we'll get a constant result. Use an initial varying value,
1130 we fall back to varying in the end if necessary. */
1131 *mask = double_int_minus_one;
1132 switch (code)
1133 {
1134 case BIT_AND_EXPR:
1135 /* The mask is constant where there is a known not
1136 set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */
1137 *mask = double_int_and (double_int_ior (r1mask, r2mask),
1138 double_int_and (double_int_ior (r1val, r1mask),
1139 double_int_ior (r2val, r2mask)));
1140 *val = double_int_and (r1val, r2val);
1141 break;
1142
1143 case BIT_IOR_EXPR:
1144 /* The mask is constant where there is a known
1145 set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)). */
1146 *mask = double_int_and_not
1147 (double_int_ior (r1mask, r2mask),
1148 double_int_ior (double_int_and_not (r1val, r1mask),
1149 double_int_and_not (r2val, r2mask)));
1150 *val = double_int_ior (r1val, r2val);
1151 break;
1152
1153 case BIT_XOR_EXPR:
1154 /* m1 | m2 */
1155 *mask = double_int_ior (r1mask, r2mask);
1156 *val = double_int_xor (r1val, r2val);
1157 break;
1158
1159 case LROTATE_EXPR:
1160 case RROTATE_EXPR:
1161 if (double_int_zero_p (r2mask))
1162 {
1163 HOST_WIDE_INT shift = r2val.low;
1164 if (code == RROTATE_EXPR)
1165 shift = -shift;
1166 *mask = double_int_lrotate (r1mask, shift, TYPE_PRECISION (type));
1167 *val = double_int_lrotate (r1val, shift, TYPE_PRECISION (type));
1168 }
1169 break;
1170
1171 case LSHIFT_EXPR:
1172 case RSHIFT_EXPR:
1173 /* ??? We can handle partially known shift counts if we know
1174 its sign. That way we can tell that (x << (y | 8)) & 255
1175 is zero. */
1176 if (double_int_zero_p (r2mask))
1177 {
1178 HOST_WIDE_INT shift = r2val.low;
1179 if (code == RSHIFT_EXPR)
1180 shift = -shift;
1181 /* We need to know if we are doing a left or a right shift
1182 to properly shift in zeros for left shift and unsigned
1183 right shifts and the sign bit for signed right shifts.
1184 For signed right shifts we shift in varying in case
1185 the sign bit was varying. */
1186 if (shift > 0)
1187 {
1188 *mask = double_int_lshift (r1mask, shift,
1189 TYPE_PRECISION (type), false);
1190 *val = double_int_lshift (r1val, shift,
1191 TYPE_PRECISION (type), false);
1192 }
1193 else if (shift < 0)
1194 {
1195 shift = -shift;
1196 *mask = double_int_rshift (r1mask, shift,
1197 TYPE_PRECISION (type), !uns);
1198 *val = double_int_rshift (r1val, shift,
1199 TYPE_PRECISION (type), !uns);
1200 }
1201 else
1202 {
1203 *mask = r1mask;
1204 *val = r1val;
1205 }
1206 }
1207 break;
1208
1209 case PLUS_EXPR:
1210 case POINTER_PLUS_EXPR:
1211 {
1212 double_int lo, hi;
1213 /* Do the addition with unknown bits set to zero, to give carry-ins of
1214 zero wherever possible. */
1215 lo = double_int_add (double_int_and_not (r1val, r1mask),
1216 double_int_and_not (r2val, r2mask));
1217 lo = double_int_ext (lo, TYPE_PRECISION (type), uns);
1218 /* Do the addition with unknown bits set to one, to give carry-ins of
1219 one wherever possible. */
1220 hi = double_int_add (double_int_ior (r1val, r1mask),
1221 double_int_ior (r2val, r2mask));
1222 hi = double_int_ext (hi, TYPE_PRECISION (type), uns);
1223 /* Each bit in the result is known if (a) the corresponding bits in
1224 both inputs are known, and (b) the carry-in to that bit position
1225 is known. We can check condition (b) by seeing if we got the same
1226 result with minimised carries as with maximised carries. */
1227 *mask = double_int_ior (double_int_ior (r1mask, r2mask),
1228 double_int_xor (lo, hi));
1229 *mask = double_int_ext (*mask, TYPE_PRECISION (type), uns);
1230 /* It shouldn't matter whether we choose lo or hi here. */
1231 *val = lo;
1232 break;
1233 }
1234
1235 case MINUS_EXPR:
1236 {
1237 double_int temv, temm;
1238 bit_value_unop_1 (NEGATE_EXPR, r2type, &temv, &temm,
1239 r2type, r2val, r2mask);
1240 bit_value_binop_1 (PLUS_EXPR, type, val, mask,
1241 r1type, r1val, r1mask,
1242 r2type, temv, temm);
1243 break;
1244 }
1245
1246 case MULT_EXPR:
1247 {
1248 /* Just track trailing zeros in both operands and transfer
1249 them to the other. */
1250 int r1tz = double_int_ctz (double_int_ior (r1val, r1mask));
1251 int r2tz = double_int_ctz (double_int_ior (r2val, r2mask));
1252 if (r1tz + r2tz >= HOST_BITS_PER_DOUBLE_INT)
1253 {
1254 *mask = double_int_zero;
1255 *val = double_int_zero;
1256 }
1257 else if (r1tz + r2tz > 0)
1258 {
1259 *mask = double_int_not (double_int_mask (r1tz + r2tz));
1260 *mask = double_int_ext (*mask, TYPE_PRECISION (type), uns);
1261 *val = double_int_zero;
1262 }
1263 break;
1264 }
1265
1266 case EQ_EXPR:
1267 case NE_EXPR:
1268 {
1269 double_int m = double_int_ior (r1mask, r2mask);
1270 if (!double_int_equal_p (double_int_and_not (r1val, m),
1271 double_int_and_not (r2val, m)))
1272 {
1273 *mask = double_int_zero;
1274 *val = ((code == EQ_EXPR) ? double_int_zero : double_int_one);
1275 }
1276 else
1277 {
1278 /* We know the result of a comparison is always one or zero. */
1279 *mask = double_int_one;
1280 *val = double_int_zero;
1281 }
1282 break;
1283 }
1284
1285 case GE_EXPR:
1286 case GT_EXPR:
1287 {
1288 double_int tem = r1val;
1289 r1val = r2val;
1290 r2val = tem;
1291 tem = r1mask;
1292 r1mask = r2mask;
1293 r2mask = tem;
1294 code = swap_tree_comparison (code);
1295 }
1296 /* Fallthru. */
1297 case LT_EXPR:
1298 case LE_EXPR:
1299 {
1300 int minmax, maxmin;
1301 /* If the most significant bits are not known we know nothing. */
1302 if (double_int_negative_p (r1mask) || double_int_negative_p (r2mask))
1303 break;
1304
1305 /* For comparisons the signedness is in the comparison operands. */
1306 uns = TYPE_UNSIGNED (r1type);
1307
1308 /* If we know the most significant bits we know the values
1309 value ranges by means of treating varying bits as zero
1310 or one. Do a cross comparison of the max/min pairs. */
1311 maxmin = double_int_cmp (double_int_ior (r1val, r1mask),
1312 double_int_and_not (r2val, r2mask), uns);
1313 minmax = double_int_cmp (double_int_and_not (r1val, r1mask),
1314 double_int_ior (r2val, r2mask), uns);
1315 if (maxmin < 0) /* r1 is less than r2. */
1316 {
1317 *mask = double_int_zero;
1318 *val = double_int_one;
1319 }
1320 else if (minmax > 0) /* r1 is not less or equal to r2. */
1321 {
1322 *mask = double_int_zero;
1323 *val = double_int_zero;
1324 }
1325 else if (maxmin == minmax) /* r1 and r2 are equal. */
1326 {
1327 /* This probably should never happen as we'd have
1328 folded the thing during fully constant value folding. */
1329 *mask = double_int_zero;
1330 *val = (code == LE_EXPR ? double_int_one : double_int_zero);
1331 }
1332 else
1333 {
1334 /* We know the result of a comparison is always one or zero. */
1335 *mask = double_int_one;
1336 *val = double_int_zero;
1337 }
1338 break;
1339 }
1340
1341 default:;
1342 }
1343 }
1344
1345 /* Return the propagation value when applying the operation CODE to
1346 the value RHS yielding type TYPE. */
1347
1348 static prop_value_t
1349 bit_value_unop (enum tree_code code, tree type, tree rhs)
1350 {
1351 prop_value_t rval = get_value_for_expr (rhs, true);
1352 double_int value, mask;
1353 prop_value_t val;
1354
1355 if (rval.lattice_val == UNDEFINED)
1356 return rval;
1357
1358 gcc_assert ((rval.lattice_val == CONSTANT
1359 && TREE_CODE (rval.value) == INTEGER_CST)
1360 || double_int_minus_one_p (rval.mask));
1361 bit_value_unop_1 (code, type, &value, &mask,
1362 TREE_TYPE (rhs), value_to_double_int (rval), rval.mask);
1363 if (!double_int_minus_one_p (mask))
1364 {
1365 val.lattice_val = CONSTANT;
1366 val.mask = mask;
1367 /* ??? Delay building trees here. */
1368 val.value = double_int_to_tree (type, value);
1369 }
1370 else
1371 {
1372 val.lattice_val = VARYING;
1373 val.value = NULL_TREE;
1374 val.mask = double_int_minus_one;
1375 }
1376 return val;
1377 }
1378
1379 /* Return the propagation value when applying the operation CODE to
1380 the values RHS1 and RHS2 yielding type TYPE. */
1381
1382 static prop_value_t
1383 bit_value_binop (enum tree_code code, tree type, tree rhs1, tree rhs2)
1384 {
1385 prop_value_t r1val = get_value_for_expr (rhs1, true);
1386 prop_value_t r2val = get_value_for_expr (rhs2, true);
1387 double_int value, mask;
1388 prop_value_t val;
1389
1390 if (r1val.lattice_val == UNDEFINED
1391 || r2val.lattice_val == UNDEFINED)
1392 {
1393 val.lattice_val = VARYING;
1394 val.value = NULL_TREE;
1395 val.mask = double_int_minus_one;
1396 return val;
1397 }
1398
1399 gcc_assert ((r1val.lattice_val == CONSTANT
1400 && TREE_CODE (r1val.value) == INTEGER_CST)
1401 || double_int_minus_one_p (r1val.mask));
1402 gcc_assert ((r2val.lattice_val == CONSTANT
1403 && TREE_CODE (r2val.value) == INTEGER_CST)
1404 || double_int_minus_one_p (r2val.mask));
1405 bit_value_binop_1 (code, type, &value, &mask,
1406 TREE_TYPE (rhs1), value_to_double_int (r1val), r1val.mask,
1407 TREE_TYPE (rhs2), value_to_double_int (r2val), r2val.mask);
1408 if (!double_int_minus_one_p (mask))
1409 {
1410 val.lattice_val = CONSTANT;
1411 val.mask = mask;
1412 /* ??? Delay building trees here. */
1413 val.value = double_int_to_tree (type, value);
1414 }
1415 else
1416 {
1417 val.lattice_val = VARYING;
1418 val.value = NULL_TREE;
1419 val.mask = double_int_minus_one;
1420 }
1421 return val;
1422 }
1423
1424 /* Return the propagation value when applying __builtin_assume_aligned to
1425 its arguments. */
1426
1427 static prop_value_t
1428 bit_value_assume_aligned (gimple stmt)
1429 {
1430 tree ptr = gimple_call_arg (stmt, 0), align, misalign = NULL_TREE;
1431 tree type = TREE_TYPE (ptr);
1432 unsigned HOST_WIDE_INT aligni, misaligni = 0;
1433 prop_value_t ptrval = get_value_for_expr (ptr, true);
1434 prop_value_t alignval;
1435 double_int value, mask;
1436 prop_value_t val;
1437 if (ptrval.lattice_val == UNDEFINED)
1438 return ptrval;
1439 gcc_assert ((ptrval.lattice_val == CONSTANT
1440 && TREE_CODE (ptrval.value) == INTEGER_CST)
1441 || double_int_minus_one_p (ptrval.mask));
1442 align = gimple_call_arg (stmt, 1);
1443 if (!host_integerp (align, 1))
1444 return ptrval;
1445 aligni = tree_low_cst (align, 1);
1446 if (aligni <= 1
1447 || (aligni & (aligni - 1)) != 0)
1448 return ptrval;
1449 if (gimple_call_num_args (stmt) > 2)
1450 {
1451 misalign = gimple_call_arg (stmt, 2);
1452 if (!host_integerp (misalign, 1))
1453 return ptrval;
1454 misaligni = tree_low_cst (misalign, 1);
1455 if (misaligni >= aligni)
1456 return ptrval;
1457 }
1458 align = build_int_cst_type (type, -aligni);
1459 alignval = get_value_for_expr (align, true);
1460 bit_value_binop_1 (BIT_AND_EXPR, type, &value, &mask,
1461 type, value_to_double_int (ptrval), ptrval.mask,
1462 type, value_to_double_int (alignval), alignval.mask);
1463 if (!double_int_minus_one_p (mask))
1464 {
1465 val.lattice_val = CONSTANT;
1466 val.mask = mask;
1467 gcc_assert ((mask.low & (aligni - 1)) == 0);
1468 gcc_assert ((value.low & (aligni - 1)) == 0);
1469 value.low |= misaligni;
1470 /* ??? Delay building trees here. */
1471 val.value = double_int_to_tree (type, value);
1472 }
1473 else
1474 {
1475 val.lattice_val = VARYING;
1476 val.value = NULL_TREE;
1477 val.mask = double_int_minus_one;
1478 }
1479 return val;
1480 }
1481
1482 /* Evaluate statement STMT.
1483 Valid only for assignments, calls, conditionals, and switches. */
1484
1485 static prop_value_t
1486 evaluate_stmt (gimple stmt)
1487 {
1488 prop_value_t val;
1489 tree simplified = NULL_TREE;
1490 ccp_lattice_t likelyvalue = likely_value (stmt);
1491 bool is_constant = false;
1492 unsigned int align;
1493
1494 if (dump_file && (dump_flags & TDF_DETAILS))
1495 {
1496 fprintf (dump_file, "which is likely ");
1497 switch (likelyvalue)
1498 {
1499 case CONSTANT:
1500 fprintf (dump_file, "CONSTANT");
1501 break;
1502 case UNDEFINED:
1503 fprintf (dump_file, "UNDEFINED");
1504 break;
1505 case VARYING:
1506 fprintf (dump_file, "VARYING");
1507 break;
1508 default:;
1509 }
1510 fprintf (dump_file, "\n");
1511 }
1512
1513 /* If the statement is likely to have a CONSTANT result, then try
1514 to fold the statement to determine the constant value. */
1515 /* FIXME. This is the only place that we call ccp_fold.
1516 Since likely_value never returns CONSTANT for calls, we will
1517 not attempt to fold them, including builtins that may profit. */
1518 if (likelyvalue == CONSTANT)
1519 {
1520 fold_defer_overflow_warnings ();
1521 simplified = ccp_fold (stmt);
1522 is_constant = simplified && is_gimple_min_invariant (simplified);
1523 fold_undefer_overflow_warnings (is_constant, stmt, 0);
1524 if (is_constant)
1525 {
1526 /* The statement produced a constant value. */
1527 val.lattice_val = CONSTANT;
1528 val.value = simplified;
1529 val.mask = double_int_zero;
1530 }
1531 }
1532 /* If the statement is likely to have a VARYING result, then do not
1533 bother folding the statement. */
1534 else if (likelyvalue == VARYING)
1535 {
1536 enum gimple_code code = gimple_code (stmt);
1537 if (code == GIMPLE_ASSIGN)
1538 {
1539 enum tree_code subcode = gimple_assign_rhs_code (stmt);
1540
1541 /* Other cases cannot satisfy is_gimple_min_invariant
1542 without folding. */
1543 if (get_gimple_rhs_class (subcode) == GIMPLE_SINGLE_RHS)
1544 simplified = gimple_assign_rhs1 (stmt);
1545 }
1546 else if (code == GIMPLE_SWITCH)
1547 simplified = gimple_switch_index (stmt);
1548 else
1549 /* These cannot satisfy is_gimple_min_invariant without folding. */
1550 gcc_assert (code == GIMPLE_CALL || code == GIMPLE_COND);
1551 is_constant = simplified && is_gimple_min_invariant (simplified);
1552 if (is_constant)
1553 {
1554 /* The statement produced a constant value. */
1555 val.lattice_val = CONSTANT;
1556 val.value = simplified;
1557 val.mask = double_int_zero;
1558 }
1559 }
1560
1561 /* Resort to simplification for bitwise tracking. */
1562 if (flag_tree_bit_ccp
1563 && (likelyvalue == CONSTANT || is_gimple_call (stmt))
1564 && !is_constant)
1565 {
1566 enum gimple_code code = gimple_code (stmt);
1567 tree fndecl;
1568 val.lattice_val = VARYING;
1569 val.value = NULL_TREE;
1570 val.mask = double_int_minus_one;
1571 if (code == GIMPLE_ASSIGN)
1572 {
1573 enum tree_code subcode = gimple_assign_rhs_code (stmt);
1574 tree rhs1 = gimple_assign_rhs1 (stmt);
1575 switch (get_gimple_rhs_class (subcode))
1576 {
1577 case GIMPLE_SINGLE_RHS:
1578 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1579 || POINTER_TYPE_P (TREE_TYPE (rhs1)))
1580 val = get_value_for_expr (rhs1, true);
1581 break;
1582
1583 case GIMPLE_UNARY_RHS:
1584 if ((INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1585 || POINTER_TYPE_P (TREE_TYPE (rhs1)))
1586 && (INTEGRAL_TYPE_P (gimple_expr_type (stmt))
1587 || POINTER_TYPE_P (gimple_expr_type (stmt))))
1588 val = bit_value_unop (subcode, gimple_expr_type (stmt), rhs1);
1589 break;
1590
1591 case GIMPLE_BINARY_RHS:
1592 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1593 || POINTER_TYPE_P (TREE_TYPE (rhs1)))
1594 {
1595 tree lhs = gimple_assign_lhs (stmt);
1596 tree rhs2 = gimple_assign_rhs2 (stmt);
1597 val = bit_value_binop (subcode,
1598 TREE_TYPE (lhs), rhs1, rhs2);
1599 }
1600 break;
1601
1602 default:;
1603 }
1604 }
1605 else if (code == GIMPLE_COND)
1606 {
1607 enum tree_code code = gimple_cond_code (stmt);
1608 tree rhs1 = gimple_cond_lhs (stmt);
1609 tree rhs2 = gimple_cond_rhs (stmt);
1610 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1611 || POINTER_TYPE_P (TREE_TYPE (rhs1)))
1612 val = bit_value_binop (code, TREE_TYPE (rhs1), rhs1, rhs2);
1613 }
1614 else if (code == GIMPLE_CALL
1615 && (fndecl = gimple_call_fndecl (stmt))
1616 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
1617 {
1618 switch (DECL_FUNCTION_CODE (fndecl))
1619 {
1620 case BUILT_IN_MALLOC:
1621 case BUILT_IN_REALLOC:
1622 case BUILT_IN_CALLOC:
1623 case BUILT_IN_STRDUP:
1624 case BUILT_IN_STRNDUP:
1625 val.lattice_val = CONSTANT;
1626 val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
1627 val.mask = shwi_to_double_int
1628 (~(((HOST_WIDE_INT) MALLOC_ABI_ALIGNMENT)
1629 / BITS_PER_UNIT - 1));
1630 break;
1631
1632 case BUILT_IN_ALLOCA:
1633 case BUILT_IN_ALLOCA_WITH_ALIGN:
1634 align = (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_ALLOCA_WITH_ALIGN
1635 ? TREE_INT_CST_LOW (gimple_call_arg (stmt, 1))
1636 : BIGGEST_ALIGNMENT);
1637 val.lattice_val = CONSTANT;
1638 val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
1639 val.mask = shwi_to_double_int
1640 (~(((HOST_WIDE_INT) align)
1641 / BITS_PER_UNIT - 1));
1642 break;
1643
1644 /* These builtins return their first argument, unmodified. */
1645 case BUILT_IN_MEMCPY:
1646 case BUILT_IN_MEMMOVE:
1647 case BUILT_IN_MEMSET:
1648 case BUILT_IN_STRCPY:
1649 case BUILT_IN_STRNCPY:
1650 case BUILT_IN_MEMCPY_CHK:
1651 case BUILT_IN_MEMMOVE_CHK:
1652 case BUILT_IN_MEMSET_CHK:
1653 case BUILT_IN_STRCPY_CHK:
1654 case BUILT_IN_STRNCPY_CHK:
1655 val = get_value_for_expr (gimple_call_arg (stmt, 0), true);
1656 break;
1657
1658 case BUILT_IN_ASSUME_ALIGNED:
1659 val = bit_value_assume_aligned (stmt);
1660 break;
1661
1662 default:;
1663 }
1664 }
1665 is_constant = (val.lattice_val == CONSTANT);
1666 }
1667
1668 if (!is_constant)
1669 {
1670 /* The statement produced a nonconstant value. If the statement
1671 had UNDEFINED operands, then the result of the statement
1672 should be UNDEFINED. Otherwise, the statement is VARYING. */
1673 if (likelyvalue == UNDEFINED)
1674 {
1675 val.lattice_val = likelyvalue;
1676 val.mask = double_int_zero;
1677 }
1678 else
1679 {
1680 val.lattice_val = VARYING;
1681 val.mask = double_int_minus_one;
1682 }
1683
1684 val.value = NULL_TREE;
1685 }
1686
1687 return val;
1688 }
1689
1690 /* Given a BUILT_IN_STACK_SAVE value SAVED_VAL, insert a clobber of VAR before
1691 each matching BUILT_IN_STACK_RESTORE. Mark visited phis in VISITED. */
1692
1693 static void
1694 insert_clobber_before_stack_restore (tree saved_val, tree var, htab_t *visited)
1695 {
1696 gimple stmt, clobber_stmt;
1697 tree clobber;
1698 imm_use_iterator iter;
1699 gimple_stmt_iterator i;
1700 gimple *slot;
1701
1702 FOR_EACH_IMM_USE_STMT (stmt, iter, saved_val)
1703 if (gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
1704 {
1705 clobber = build_constructor (TREE_TYPE (var), NULL);
1706 TREE_THIS_VOLATILE (clobber) = 1;
1707 clobber_stmt = gimple_build_assign (var, clobber);
1708
1709 i = gsi_for_stmt (stmt);
1710 gsi_insert_before (&i, clobber_stmt, GSI_SAME_STMT);
1711 }
1712 else if (gimple_code (stmt) == GIMPLE_PHI)
1713 {
1714 if (*visited == NULL)
1715 *visited = htab_create (10, htab_hash_pointer, htab_eq_pointer, NULL);
1716
1717 slot = (gimple *)htab_find_slot (*visited, stmt, INSERT);
1718 if (*slot != NULL)
1719 continue;
1720
1721 *slot = stmt;
1722 insert_clobber_before_stack_restore (gimple_phi_result (stmt), var,
1723 visited);
1724 }
1725 else
1726 gcc_assert (is_gimple_debug (stmt));
1727 }
1728
1729 /* Advance the iterator to the previous non-debug gimple statement in the same
1730 or dominating basic block. */
1731
1732 static inline void
1733 gsi_prev_dom_bb_nondebug (gimple_stmt_iterator *i)
1734 {
1735 basic_block dom;
1736
1737 gsi_prev_nondebug (i);
1738 while (gsi_end_p (*i))
1739 {
1740 dom = get_immediate_dominator (CDI_DOMINATORS, i->bb);
1741 if (dom == NULL || dom == ENTRY_BLOCK_PTR)
1742 return;
1743
1744 *i = gsi_last_bb (dom);
1745 }
1746 }
1747
1748 /* Find a BUILT_IN_STACK_SAVE dominating gsi_stmt (I), and insert
1749 a clobber of VAR before each matching BUILT_IN_STACK_RESTORE.
1750
1751 It is possible that BUILT_IN_STACK_SAVE cannot be find in a dominator when a
1752 previous pass (such as DOM) duplicated it along multiple paths to a BB. In
1753 that case the function gives up without inserting the clobbers. */
1754
1755 static void
1756 insert_clobbers_for_var (gimple_stmt_iterator i, tree var)
1757 {
1758 gimple stmt;
1759 tree saved_val;
1760 htab_t visited = NULL;
1761
1762 for (; !gsi_end_p (i); gsi_prev_dom_bb_nondebug (&i))
1763 {
1764 stmt = gsi_stmt (i);
1765
1766 if (!gimple_call_builtin_p (stmt, BUILT_IN_STACK_SAVE))
1767 continue;
1768
1769 saved_val = gimple_call_lhs (stmt);
1770 if (saved_val == NULL_TREE)
1771 continue;
1772
1773 insert_clobber_before_stack_restore (saved_val, var, &visited);
1774 break;
1775 }
1776
1777 if (visited != NULL)
1778 htab_delete (visited);
1779 }
1780
1781 /* Detects a __builtin_alloca_with_align with constant size argument. Declares
1782 fixed-size array and returns the address, if found, otherwise returns
1783 NULL_TREE. */
1784
1785 static tree
1786 fold_builtin_alloca_with_align (gimple stmt)
1787 {
1788 unsigned HOST_WIDE_INT size, threshold, n_elem;
1789 tree lhs, arg, block, var, elem_type, array_type;
1790
1791 /* Get lhs. */
1792 lhs = gimple_call_lhs (stmt);
1793 if (lhs == NULL_TREE)
1794 return NULL_TREE;
1795
1796 /* Detect constant argument. */
1797 arg = get_constant_value (gimple_call_arg (stmt, 0));
1798 if (arg == NULL_TREE
1799 || TREE_CODE (arg) != INTEGER_CST
1800 || !host_integerp (arg, 1))
1801 return NULL_TREE;
1802
1803 size = TREE_INT_CST_LOW (arg);
1804
1805 /* Heuristic: don't fold large allocas. */
1806 threshold = (unsigned HOST_WIDE_INT)PARAM_VALUE (PARAM_LARGE_STACK_FRAME);
1807 /* In case the alloca is located at function entry, it has the same lifetime
1808 as a declared array, so we allow a larger size. */
1809 block = gimple_block (stmt);
1810 if (!(cfun->after_inlining
1811 && TREE_CODE (BLOCK_SUPERCONTEXT (block)) == FUNCTION_DECL))
1812 threshold /= 10;
1813 if (size > threshold)
1814 return NULL_TREE;
1815
1816 /* Declare array. */
1817 elem_type = build_nonstandard_integer_type (BITS_PER_UNIT, 1);
1818 n_elem = size * 8 / BITS_PER_UNIT;
1819 array_type = build_array_type_nelts (elem_type, n_elem);
1820 var = create_tmp_var (array_type, NULL);
1821 DECL_ALIGN (var) = TREE_INT_CST_LOW (gimple_call_arg (stmt, 1));
1822 {
1823 struct ptr_info_def *pi = SSA_NAME_PTR_INFO (lhs);
1824 if (pi != NULL && !pi->pt.anything)
1825 {
1826 bool singleton_p;
1827 unsigned uid;
1828 singleton_p = pt_solution_singleton_p (&pi->pt, &uid);
1829 gcc_assert (singleton_p);
1830 SET_DECL_PT_UID (var, uid);
1831 }
1832 }
1833
1834 /* Fold alloca to the address of the array. */
1835 return fold_convert (TREE_TYPE (lhs), build_fold_addr_expr (var));
1836 }
1837
1838 /* Fold the stmt at *GSI with CCP specific information that propagating
1839 and regular folding does not catch. */
1840
1841 static bool
1842 ccp_fold_stmt (gimple_stmt_iterator *gsi)
1843 {
1844 gimple stmt = gsi_stmt (*gsi);
1845
1846 switch (gimple_code (stmt))
1847 {
1848 case GIMPLE_COND:
1849 {
1850 prop_value_t val;
1851 /* Statement evaluation will handle type mismatches in constants
1852 more gracefully than the final propagation. This allows us to
1853 fold more conditionals here. */
1854 val = evaluate_stmt (stmt);
1855 if (val.lattice_val != CONSTANT
1856 || !double_int_zero_p (val.mask))
1857 return false;
1858
1859 if (dump_file)
1860 {
1861 fprintf (dump_file, "Folding predicate ");
1862 print_gimple_expr (dump_file, stmt, 0, 0);
1863 fprintf (dump_file, " to ");
1864 print_generic_expr (dump_file, val.value, 0);
1865 fprintf (dump_file, "\n");
1866 }
1867
1868 if (integer_zerop (val.value))
1869 gimple_cond_make_false (stmt);
1870 else
1871 gimple_cond_make_true (stmt);
1872
1873 return true;
1874 }
1875
1876 case GIMPLE_CALL:
1877 {
1878 tree lhs = gimple_call_lhs (stmt);
1879 int flags = gimple_call_flags (stmt);
1880 tree val;
1881 tree argt;
1882 bool changed = false;
1883 unsigned i;
1884
1885 /* If the call was folded into a constant make sure it goes
1886 away even if we cannot propagate into all uses because of
1887 type issues. */
1888 if (lhs
1889 && TREE_CODE (lhs) == SSA_NAME
1890 && (val = get_constant_value (lhs))
1891 /* Don't optimize away calls that have side-effects. */
1892 && (flags & (ECF_CONST|ECF_PURE)) != 0
1893 && (flags & ECF_LOOPING_CONST_OR_PURE) == 0)
1894 {
1895 tree new_rhs = unshare_expr (val);
1896 bool res;
1897 if (!useless_type_conversion_p (TREE_TYPE (lhs),
1898 TREE_TYPE (new_rhs)))
1899 new_rhs = fold_convert (TREE_TYPE (lhs), new_rhs);
1900 res = update_call_from_tree (gsi, new_rhs);
1901 gcc_assert (res);
1902 return true;
1903 }
1904
1905 /* Internal calls provide no argument types, so the extra laxity
1906 for normal calls does not apply. */
1907 if (gimple_call_internal_p (stmt))
1908 return false;
1909
1910 /* The heuristic of fold_builtin_alloca_with_align differs before and
1911 after inlining, so we don't require the arg to be changed into a
1912 constant for folding, but just to be constant. */
1913 if (gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
1914 {
1915 tree new_rhs = fold_builtin_alloca_with_align (stmt);
1916 if (new_rhs)
1917 {
1918 bool res = update_call_from_tree (gsi, new_rhs);
1919 tree var = TREE_OPERAND (TREE_OPERAND (new_rhs, 0),0);
1920 gcc_assert (res);
1921 insert_clobbers_for_var (*gsi, var);
1922 return true;
1923 }
1924 }
1925
1926 /* Propagate into the call arguments. Compared to replace_uses_in
1927 this can use the argument slot types for type verification
1928 instead of the current argument type. We also can safely
1929 drop qualifiers here as we are dealing with constants anyway. */
1930 argt = TYPE_ARG_TYPES (gimple_call_fntype (stmt));
1931 for (i = 0; i < gimple_call_num_args (stmt) && argt;
1932 ++i, argt = TREE_CHAIN (argt))
1933 {
1934 tree arg = gimple_call_arg (stmt, i);
1935 if (TREE_CODE (arg) == SSA_NAME
1936 && (val = get_constant_value (arg))
1937 && useless_type_conversion_p
1938 (TYPE_MAIN_VARIANT (TREE_VALUE (argt)),
1939 TYPE_MAIN_VARIANT (TREE_TYPE (val))))
1940 {
1941 gimple_call_set_arg (stmt, i, unshare_expr (val));
1942 changed = true;
1943 }
1944 }
1945
1946 return changed;
1947 }
1948
1949 case GIMPLE_ASSIGN:
1950 {
1951 tree lhs = gimple_assign_lhs (stmt);
1952 tree val;
1953
1954 /* If we have a load that turned out to be constant replace it
1955 as we cannot propagate into all uses in all cases. */
1956 if (gimple_assign_single_p (stmt)
1957 && TREE_CODE (lhs) == SSA_NAME
1958 && (val = get_constant_value (lhs)))
1959 {
1960 tree rhs = unshare_expr (val);
1961 if (!useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (rhs)))
1962 rhs = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (lhs), rhs);
1963 gimple_assign_set_rhs_from_tree (gsi, rhs);
1964 return true;
1965 }
1966
1967 return false;
1968 }
1969
1970 default:
1971 return false;
1972 }
1973 }
1974
1975 /* Visit the assignment statement STMT. Set the value of its LHS to the
1976 value computed by the RHS and store LHS in *OUTPUT_P. If STMT
1977 creates virtual definitions, set the value of each new name to that
1978 of the RHS (if we can derive a constant out of the RHS).
1979 Value-returning call statements also perform an assignment, and
1980 are handled here. */
1981
1982 static enum ssa_prop_result
1983 visit_assignment (gimple stmt, tree *output_p)
1984 {
1985 prop_value_t val;
1986 enum ssa_prop_result retval;
1987
1988 tree lhs = gimple_get_lhs (stmt);
1989
1990 gcc_assert (gimple_code (stmt) != GIMPLE_CALL
1991 || gimple_call_lhs (stmt) != NULL_TREE);
1992
1993 if (gimple_assign_single_p (stmt)
1994 && gimple_assign_rhs_code (stmt) == SSA_NAME)
1995 /* For a simple copy operation, we copy the lattice values. */
1996 val = *get_value (gimple_assign_rhs1 (stmt));
1997 else
1998 /* Evaluate the statement, which could be
1999 either a GIMPLE_ASSIGN or a GIMPLE_CALL. */
2000 val = evaluate_stmt (stmt);
2001
2002 retval = SSA_PROP_NOT_INTERESTING;
2003
2004 /* Set the lattice value of the statement's output. */
2005 if (TREE_CODE (lhs) == SSA_NAME)
2006 {
2007 /* If STMT is an assignment to an SSA_NAME, we only have one
2008 value to set. */
2009 if (set_lattice_value (lhs, val))
2010 {
2011 *output_p = lhs;
2012 if (val.lattice_val == VARYING)
2013 retval = SSA_PROP_VARYING;
2014 else
2015 retval = SSA_PROP_INTERESTING;
2016 }
2017 }
2018
2019 return retval;
2020 }
2021
2022
2023 /* Visit the conditional statement STMT. Return SSA_PROP_INTERESTING
2024 if it can determine which edge will be taken. Otherwise, return
2025 SSA_PROP_VARYING. */
2026
2027 static enum ssa_prop_result
2028 visit_cond_stmt (gimple stmt, edge *taken_edge_p)
2029 {
2030 prop_value_t val;
2031 basic_block block;
2032
2033 block = gimple_bb (stmt);
2034 val = evaluate_stmt (stmt);
2035 if (val.lattice_val != CONSTANT
2036 || !double_int_zero_p (val.mask))
2037 return SSA_PROP_VARYING;
2038
2039 /* Find which edge out of the conditional block will be taken and add it
2040 to the worklist. If no single edge can be determined statically,
2041 return SSA_PROP_VARYING to feed all the outgoing edges to the
2042 propagation engine. */
2043 *taken_edge_p = find_taken_edge (block, val.value);
2044 if (*taken_edge_p)
2045 return SSA_PROP_INTERESTING;
2046 else
2047 return SSA_PROP_VARYING;
2048 }
2049
2050
2051 /* Evaluate statement STMT. If the statement produces an output value and
2052 its evaluation changes the lattice value of its output, return
2053 SSA_PROP_INTERESTING and set *OUTPUT_P to the SSA_NAME holding the
2054 output value.
2055
2056 If STMT is a conditional branch and we can determine its truth
2057 value, set *TAKEN_EDGE_P accordingly. If STMT produces a varying
2058 value, return SSA_PROP_VARYING. */
2059
2060 static enum ssa_prop_result
2061 ccp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p)
2062 {
2063 tree def;
2064 ssa_op_iter iter;
2065
2066 if (dump_file && (dump_flags & TDF_DETAILS))
2067 {
2068 fprintf (dump_file, "\nVisiting statement:\n");
2069 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
2070 }
2071
2072 switch (gimple_code (stmt))
2073 {
2074 case GIMPLE_ASSIGN:
2075 /* If the statement is an assignment that produces a single
2076 output value, evaluate its RHS to see if the lattice value of
2077 its output has changed. */
2078 return visit_assignment (stmt, output_p);
2079
2080 case GIMPLE_CALL:
2081 /* A value-returning call also performs an assignment. */
2082 if (gimple_call_lhs (stmt) != NULL_TREE)
2083 return visit_assignment (stmt, output_p);
2084 break;
2085
2086 case GIMPLE_COND:
2087 case GIMPLE_SWITCH:
2088 /* If STMT is a conditional branch, see if we can determine
2089 which branch will be taken. */
2090 /* FIXME. It appears that we should be able to optimize
2091 computed GOTOs here as well. */
2092 return visit_cond_stmt (stmt, taken_edge_p);
2093
2094 default:
2095 break;
2096 }
2097
2098 /* Any other kind of statement is not interesting for constant
2099 propagation and, therefore, not worth simulating. */
2100 if (dump_file && (dump_flags & TDF_DETAILS))
2101 fprintf (dump_file, "No interesting values produced. Marked VARYING.\n");
2102
2103 /* Definitions made by statements other than assignments to
2104 SSA_NAMEs represent unknown modifications to their outputs.
2105 Mark them VARYING. */
2106 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
2107 {
2108 prop_value_t v = { VARYING, NULL_TREE, { -1, (HOST_WIDE_INT) -1 } };
2109 set_lattice_value (def, v);
2110 }
2111
2112 return SSA_PROP_VARYING;
2113 }
2114
2115
2116 /* Main entry point for SSA Conditional Constant Propagation. */
2117
2118 static unsigned int
2119 do_ssa_ccp (void)
2120 {
2121 unsigned int todo = 0;
2122 calculate_dominance_info (CDI_DOMINATORS);
2123 ccp_initialize ();
2124 ssa_propagate (ccp_visit_stmt, ccp_visit_phi_node);
2125 if (ccp_finalize ())
2126 todo = (TODO_cleanup_cfg | TODO_update_ssa | TODO_remove_unused_locals);
2127 free_dominance_info (CDI_DOMINATORS);
2128 return todo;
2129 }
2130
2131
2132 static bool
2133 gate_ccp (void)
2134 {
2135 return flag_tree_ccp != 0;
2136 }
2137
2138
2139 struct gimple_opt_pass pass_ccp =
2140 {
2141 {
2142 GIMPLE_PASS,
2143 "ccp", /* name */
2144 gate_ccp, /* gate */
2145 do_ssa_ccp, /* execute */
2146 NULL, /* sub */
2147 NULL, /* next */
2148 0, /* static_pass_number */
2149 TV_TREE_CCP, /* tv_id */
2150 PROP_cfg | PROP_ssa, /* properties_required */
2151 0, /* properties_provided */
2152 0, /* properties_destroyed */
2153 0, /* todo_flags_start */
2154 TODO_verify_ssa
2155 | TODO_verify_stmts | TODO_ggc_collect/* todo_flags_finish */
2156 }
2157 };
2158
2159
2160
2161 /* Try to optimize out __builtin_stack_restore. Optimize it out
2162 if there is another __builtin_stack_restore in the same basic
2163 block and no calls or ASM_EXPRs are in between, or if this block's
2164 only outgoing edge is to EXIT_BLOCK and there are no calls or
2165 ASM_EXPRs after this __builtin_stack_restore. */
2166
2167 static tree
2168 optimize_stack_restore (gimple_stmt_iterator i)
2169 {
2170 tree callee;
2171 gimple stmt;
2172
2173 basic_block bb = gsi_bb (i);
2174 gimple call = gsi_stmt (i);
2175
2176 if (gimple_code (call) != GIMPLE_CALL
2177 || gimple_call_num_args (call) != 1
2178 || TREE_CODE (gimple_call_arg (call, 0)) != SSA_NAME
2179 || !POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (call, 0))))
2180 return NULL_TREE;
2181
2182 for (gsi_next (&i); !gsi_end_p (i); gsi_next (&i))
2183 {
2184 stmt = gsi_stmt (i);
2185 if (gimple_code (stmt) == GIMPLE_ASM)
2186 return NULL_TREE;
2187 if (gimple_code (stmt) != GIMPLE_CALL)
2188 continue;
2189
2190 callee = gimple_call_fndecl (stmt);
2191 if (!callee
2192 || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL
2193 /* All regular builtins are ok, just obviously not alloca. */
2194 || DECL_FUNCTION_CODE (callee) == BUILT_IN_ALLOCA
2195 || DECL_FUNCTION_CODE (callee) == BUILT_IN_ALLOCA_WITH_ALIGN)
2196 return NULL_TREE;
2197
2198 if (DECL_FUNCTION_CODE (callee) == BUILT_IN_STACK_RESTORE)
2199 goto second_stack_restore;
2200 }
2201
2202 if (!gsi_end_p (i))
2203 return NULL_TREE;
2204
2205 /* Allow one successor of the exit block, or zero successors. */
2206 switch (EDGE_COUNT (bb->succs))
2207 {
2208 case 0:
2209 break;
2210 case 1:
2211 if (single_succ_edge (bb)->dest != EXIT_BLOCK_PTR)
2212 return NULL_TREE;
2213 break;
2214 default:
2215 return NULL_TREE;
2216 }
2217 second_stack_restore:
2218
2219 /* If there's exactly one use, then zap the call to __builtin_stack_save.
2220 If there are multiple uses, then the last one should remove the call.
2221 In any case, whether the call to __builtin_stack_save can be removed
2222 or not is irrelevant to removing the call to __builtin_stack_restore. */
2223 if (has_single_use (gimple_call_arg (call, 0)))
2224 {
2225 gimple stack_save = SSA_NAME_DEF_STMT (gimple_call_arg (call, 0));
2226 if (is_gimple_call (stack_save))
2227 {
2228 callee = gimple_call_fndecl (stack_save);
2229 if (callee
2230 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2231 && DECL_FUNCTION_CODE (callee) == BUILT_IN_STACK_SAVE)
2232 {
2233 gimple_stmt_iterator stack_save_gsi;
2234 tree rhs;
2235
2236 stack_save_gsi = gsi_for_stmt (stack_save);
2237 rhs = build_int_cst (TREE_TYPE (gimple_call_arg (call, 0)), 0);
2238 update_call_from_tree (&stack_save_gsi, rhs);
2239 }
2240 }
2241 }
2242
2243 /* No effect, so the statement will be deleted. */
2244 return integer_zero_node;
2245 }
2246
2247 /* If va_list type is a simple pointer and nothing special is needed,
2248 optimize __builtin_va_start (&ap, 0) into ap = __builtin_next_arg (0),
2249 __builtin_va_end (&ap) out as NOP and __builtin_va_copy into a simple
2250 pointer assignment. */
2251
2252 static tree
2253 optimize_stdarg_builtin (gimple call)
2254 {
2255 tree callee, lhs, rhs, cfun_va_list;
2256 bool va_list_simple_ptr;
2257 location_t loc = gimple_location (call);
2258
2259 if (gimple_code (call) != GIMPLE_CALL)
2260 return NULL_TREE;
2261
2262 callee = gimple_call_fndecl (call);
2263
2264 cfun_va_list = targetm.fn_abi_va_list (callee);
2265 va_list_simple_ptr = POINTER_TYPE_P (cfun_va_list)
2266 && (TREE_TYPE (cfun_va_list) == void_type_node
2267 || TREE_TYPE (cfun_va_list) == char_type_node);
2268
2269 switch (DECL_FUNCTION_CODE (callee))
2270 {
2271 case BUILT_IN_VA_START:
2272 if (!va_list_simple_ptr
2273 || targetm.expand_builtin_va_start != NULL
2274 || !builtin_decl_explicit_p (BUILT_IN_NEXT_ARG))
2275 return NULL_TREE;
2276
2277 if (gimple_call_num_args (call) != 2)
2278 return NULL_TREE;
2279
2280 lhs = gimple_call_arg (call, 0);
2281 if (!POINTER_TYPE_P (TREE_TYPE (lhs))
2282 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
2283 != TYPE_MAIN_VARIANT (cfun_va_list))
2284 return NULL_TREE;
2285
2286 lhs = build_fold_indirect_ref_loc (loc, lhs);
2287 rhs = build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_NEXT_ARG),
2288 1, integer_zero_node);
2289 rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
2290 return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
2291
2292 case BUILT_IN_VA_COPY:
2293 if (!va_list_simple_ptr)
2294 return NULL_TREE;
2295
2296 if (gimple_call_num_args (call) != 2)
2297 return NULL_TREE;
2298
2299 lhs = gimple_call_arg (call, 0);
2300 if (!POINTER_TYPE_P (TREE_TYPE (lhs))
2301 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
2302 != TYPE_MAIN_VARIANT (cfun_va_list))
2303 return NULL_TREE;
2304
2305 lhs = build_fold_indirect_ref_loc (loc, lhs);
2306 rhs = gimple_call_arg (call, 1);
2307 if (TYPE_MAIN_VARIANT (TREE_TYPE (rhs))
2308 != TYPE_MAIN_VARIANT (cfun_va_list))
2309 return NULL_TREE;
2310
2311 rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
2312 return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
2313
2314 case BUILT_IN_VA_END:
2315 /* No effect, so the statement will be deleted. */
2316 return integer_zero_node;
2317
2318 default:
2319 gcc_unreachable ();
2320 }
2321 }
2322
2323 /* Attemp to make the block of __builtin_unreachable I unreachable by changing
2324 the incoming jumps. Return true if at least one jump was changed. */
2325
2326 static bool
2327 optimize_unreachable (gimple_stmt_iterator i)
2328 {
2329 basic_block bb = gsi_bb (i);
2330 gimple_stmt_iterator gsi;
2331 gimple stmt;
2332 edge_iterator ei;
2333 edge e;
2334 bool ret;
2335
2336 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2337 {
2338 stmt = gsi_stmt (gsi);
2339
2340 if (is_gimple_debug (stmt))
2341 continue;
2342
2343 if (gimple_code (stmt) == GIMPLE_LABEL)
2344 {
2345 /* Verify we do not need to preserve the label. */
2346 if (FORCED_LABEL (gimple_label_label (stmt)))
2347 return false;
2348
2349 continue;
2350 }
2351
2352 /* Only handle the case that __builtin_unreachable is the first statement
2353 in the block. We rely on DCE to remove stmts without side-effects
2354 before __builtin_unreachable. */
2355 if (gsi_stmt (gsi) != gsi_stmt (i))
2356 return false;
2357 }
2358
2359 ret = false;
2360 FOR_EACH_EDGE (e, ei, bb->preds)
2361 {
2362 gsi = gsi_last_bb (e->src);
2363 if (gsi_end_p (gsi))
2364 continue;
2365
2366 stmt = gsi_stmt (gsi);
2367 if (gimple_code (stmt) == GIMPLE_COND)
2368 {
2369 if (e->flags & EDGE_TRUE_VALUE)
2370 gimple_cond_make_false (stmt);
2371 else if (e->flags & EDGE_FALSE_VALUE)
2372 gimple_cond_make_true (stmt);
2373 else
2374 gcc_unreachable ();
2375 }
2376 else
2377 {
2378 /* Todo: handle other cases, f.i. switch statement. */
2379 continue;
2380 }
2381
2382 ret = true;
2383 }
2384
2385 return ret;
2386 }
2387
2388 /* A simple pass that attempts to fold all builtin functions. This pass
2389 is run after we've propagated as many constants as we can. */
2390
2391 static unsigned int
2392 execute_fold_all_builtins (void)
2393 {
2394 bool cfg_changed = false;
2395 basic_block bb;
2396 unsigned int todoflags = 0;
2397
2398 FOR_EACH_BB (bb)
2399 {
2400 gimple_stmt_iterator i;
2401 for (i = gsi_start_bb (bb); !gsi_end_p (i); )
2402 {
2403 gimple stmt, old_stmt;
2404 tree callee, result;
2405 enum built_in_function fcode;
2406
2407 stmt = gsi_stmt (i);
2408
2409 if (gimple_code (stmt) != GIMPLE_CALL)
2410 {
2411 gsi_next (&i);
2412 continue;
2413 }
2414 callee = gimple_call_fndecl (stmt);
2415 if (!callee || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL)
2416 {
2417 gsi_next (&i);
2418 continue;
2419 }
2420 fcode = DECL_FUNCTION_CODE (callee);
2421
2422 result = gimple_fold_builtin (stmt);
2423
2424 if (result)
2425 gimple_remove_stmt_histograms (cfun, stmt);
2426
2427 if (!result)
2428 switch (DECL_FUNCTION_CODE (callee))
2429 {
2430 case BUILT_IN_CONSTANT_P:
2431 /* Resolve __builtin_constant_p. If it hasn't been
2432 folded to integer_one_node by now, it's fairly
2433 certain that the value simply isn't constant. */
2434 result = integer_zero_node;
2435 break;
2436
2437 case BUILT_IN_ASSUME_ALIGNED:
2438 /* Remove __builtin_assume_aligned. */
2439 result = gimple_call_arg (stmt, 0);
2440 break;
2441
2442 case BUILT_IN_STACK_RESTORE:
2443 result = optimize_stack_restore (i);
2444 if (result)
2445 break;
2446 gsi_next (&i);
2447 continue;
2448
2449 case BUILT_IN_UNREACHABLE:
2450 if (optimize_unreachable (i))
2451 cfg_changed = true;
2452 break;
2453
2454 case BUILT_IN_VA_START:
2455 case BUILT_IN_VA_END:
2456 case BUILT_IN_VA_COPY:
2457 /* These shouldn't be folded before pass_stdarg. */
2458 result = optimize_stdarg_builtin (stmt);
2459 if (result)
2460 break;
2461 /* FALLTHRU */
2462
2463 default:
2464 gsi_next (&i);
2465 continue;
2466 }
2467
2468 if (result == NULL_TREE)
2469 break;
2470
2471 if (dump_file && (dump_flags & TDF_DETAILS))
2472 {
2473 fprintf (dump_file, "Simplified\n ");
2474 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
2475 }
2476
2477 old_stmt = stmt;
2478 if (!update_call_from_tree (&i, result))
2479 {
2480 gimplify_and_update_call_from_tree (&i, result);
2481 todoflags |= TODO_update_address_taken;
2482 }
2483
2484 stmt = gsi_stmt (i);
2485 update_stmt (stmt);
2486
2487 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt)
2488 && gimple_purge_dead_eh_edges (bb))
2489 cfg_changed = true;
2490
2491 if (dump_file && (dump_flags & TDF_DETAILS))
2492 {
2493 fprintf (dump_file, "to\n ");
2494 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
2495 fprintf (dump_file, "\n");
2496 }
2497
2498 /* Retry the same statement if it changed into another
2499 builtin, there might be new opportunities now. */
2500 if (gimple_code (stmt) != GIMPLE_CALL)
2501 {
2502 gsi_next (&i);
2503 continue;
2504 }
2505 callee = gimple_call_fndecl (stmt);
2506 if (!callee
2507 || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL
2508 || DECL_FUNCTION_CODE (callee) == fcode)
2509 gsi_next (&i);
2510 }
2511 }
2512
2513 /* Delete unreachable blocks. */
2514 if (cfg_changed)
2515 todoflags |= TODO_cleanup_cfg;
2516
2517 return todoflags;
2518 }
2519
2520
2521 struct gimple_opt_pass pass_fold_builtins =
2522 {
2523 {
2524 GIMPLE_PASS,
2525 "fab", /* name */
2526 NULL, /* gate */
2527 execute_fold_all_builtins, /* execute */
2528 NULL, /* sub */
2529 NULL, /* next */
2530 0, /* static_pass_number */
2531 TV_NONE, /* tv_id */
2532 PROP_cfg | PROP_ssa, /* properties_required */
2533 0, /* properties_provided */
2534 0, /* properties_destroyed */
2535 0, /* todo_flags_start */
2536 TODO_verify_ssa
2537 | TODO_update_ssa /* todo_flags_finish */
2538 }
2539 };