]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-ccp.c
backport: As described in http://gcc.gnu.org/ml/gcc/2012-08/msg00015.html...
[thirdparty/gcc.git] / gcc / tree-ssa-ccp.c
1 /* Conditional constant propagation pass for the GNU compiler.
2 Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009,
3 2010, 2011, 2012 Free Software Foundation, Inc.
4 Adapted from original RTL SSA-CCP by Daniel Berlin <dberlin@dberlin.org>
5 Adapted to GIMPLE trees by Diego Novillo <dnovillo@redhat.com>
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it
10 under the terms of the GNU General Public License as published by the
11 Free Software Foundation; either version 3, or (at your option) any
12 later version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT
15 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 /* Conditional constant propagation (CCP) is based on the SSA
24 propagation engine (tree-ssa-propagate.c). Constant assignments of
25 the form VAR = CST are propagated from the assignments into uses of
26 VAR, which in turn may generate new constants. The simulation uses
27 a four level lattice to keep track of constant values associated
28 with SSA names. Given an SSA name V_i, it may take one of the
29 following values:
30
31 UNINITIALIZED -> the initial state of the value. This value
32 is replaced with a correct initial value
33 the first time the value is used, so the
34 rest of the pass does not need to care about
35 it. Using this value simplifies initialization
36 of the pass, and prevents us from needlessly
37 scanning statements that are never reached.
38
39 UNDEFINED -> V_i is a local variable whose definition
40 has not been processed yet. Therefore we
41 don't yet know if its value is a constant
42 or not.
43
44 CONSTANT -> V_i has been found to hold a constant
45 value C.
46
47 VARYING -> V_i cannot take a constant value, or if it
48 does, it is not possible to determine it
49 at compile time.
50
51 The core of SSA-CCP is in ccp_visit_stmt and ccp_visit_phi_node:
52
53 1- In ccp_visit_stmt, we are interested in assignments whose RHS
54 evaluates into a constant and conditional jumps whose predicate
55 evaluates into a boolean true or false. When an assignment of
56 the form V_i = CONST is found, V_i's lattice value is set to
57 CONSTANT and CONST is associated with it. This causes the
58 propagation engine to add all the SSA edges coming out the
59 assignment into the worklists, so that statements that use V_i
60 can be visited.
61
62 If the statement is a conditional with a constant predicate, we
63 mark the outgoing edges as executable or not executable
64 depending on the predicate's value. This is then used when
65 visiting PHI nodes to know when a PHI argument can be ignored.
66
67
68 2- In ccp_visit_phi_node, if all the PHI arguments evaluate to the
69 same constant C, then the LHS of the PHI is set to C. This
70 evaluation is known as the "meet operation". Since one of the
71 goals of this evaluation is to optimistically return constant
72 values as often as possible, it uses two main short cuts:
73
74 - If an argument is flowing in through a non-executable edge, it
75 is ignored. This is useful in cases like this:
76
77 if (PRED)
78 a_9 = 3;
79 else
80 a_10 = 100;
81 a_11 = PHI (a_9, a_10)
82
83 If PRED is known to always evaluate to false, then we can
84 assume that a_11 will always take its value from a_10, meaning
85 that instead of consider it VARYING (a_9 and a_10 have
86 different values), we can consider it CONSTANT 100.
87
88 - If an argument has an UNDEFINED value, then it does not affect
89 the outcome of the meet operation. If a variable V_i has an
90 UNDEFINED value, it means that either its defining statement
91 hasn't been visited yet or V_i has no defining statement, in
92 which case the original symbol 'V' is being used
93 uninitialized. Since 'V' is a local variable, the compiler
94 may assume any initial value for it.
95
96
97 After propagation, every variable V_i that ends up with a lattice
98 value of CONSTANT will have the associated constant value in the
99 array CONST_VAL[i].VALUE. That is fed into substitute_and_fold for
100 final substitution and folding.
101
102 References:
103
104 Constant propagation with conditional branches,
105 Wegman and Zadeck, ACM TOPLAS 13(2):181-210.
106
107 Building an Optimizing Compiler,
108 Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9.
109
110 Advanced Compiler Design and Implementation,
111 Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6 */
112
113 #include "config.h"
114 #include "system.h"
115 #include "coretypes.h"
116 #include "tm.h"
117 #include "tree.h"
118 #include "flags.h"
119 #include "tm_p.h"
120 #include "basic-block.h"
121 #include "function.h"
122 #include "gimple-pretty-print.h"
123 #include "tree-flow.h"
124 #include "tree-pass.h"
125 #include "tree-ssa-propagate.h"
126 #include "value-prof.h"
127 #include "langhooks.h"
128 #include "target.h"
129 #include "diagnostic-core.h"
130 #include "dbgcnt.h"
131 #include "gimple-fold.h"
132 #include "params.h"
133 #include "hash-table.h"
134
135
136 /* Possible lattice values. */
137 typedef enum
138 {
139 UNINITIALIZED,
140 UNDEFINED,
141 CONSTANT,
142 VARYING
143 } ccp_lattice_t;
144
145 struct prop_value_d {
146 /* Lattice value. */
147 ccp_lattice_t lattice_val;
148
149 /* Propagated value. */
150 tree value;
151
152 /* Mask that applies to the propagated value during CCP. For
153 X with a CONSTANT lattice value X & ~mask == value & ~mask. */
154 double_int mask;
155 };
156
157 typedef struct prop_value_d prop_value_t;
158
159 /* Array of propagated constant values. After propagation,
160 CONST_VAL[I].VALUE holds the constant value for SSA_NAME(I). If
161 the constant is held in an SSA name representing a memory store
162 (i.e., a VDEF), CONST_VAL[I].MEM_REF will contain the actual
163 memory reference used to store (i.e., the LHS of the assignment
164 doing the store). */
165 static prop_value_t *const_val;
166
167 static void canonicalize_float_value (prop_value_t *);
168 static bool ccp_fold_stmt (gimple_stmt_iterator *);
169
170 /* Dump constant propagation value VAL to file OUTF prefixed by PREFIX. */
171
172 static void
173 dump_lattice_value (FILE *outf, const char *prefix, prop_value_t val)
174 {
175 switch (val.lattice_val)
176 {
177 case UNINITIALIZED:
178 fprintf (outf, "%sUNINITIALIZED", prefix);
179 break;
180 case UNDEFINED:
181 fprintf (outf, "%sUNDEFINED", prefix);
182 break;
183 case VARYING:
184 fprintf (outf, "%sVARYING", prefix);
185 break;
186 case CONSTANT:
187 fprintf (outf, "%sCONSTANT ", prefix);
188 if (TREE_CODE (val.value) != INTEGER_CST
189 || double_int_zero_p (val.mask))
190 print_generic_expr (outf, val.value, dump_flags);
191 else
192 {
193 double_int cval = double_int_and_not (tree_to_double_int (val.value),
194 val.mask);
195 fprintf (outf, "%sCONSTANT " HOST_WIDE_INT_PRINT_DOUBLE_HEX,
196 prefix, cval.high, cval.low);
197 fprintf (outf, " (" HOST_WIDE_INT_PRINT_DOUBLE_HEX ")",
198 val.mask.high, val.mask.low);
199 }
200 break;
201 default:
202 gcc_unreachable ();
203 }
204 }
205
206
207 /* Print lattice value VAL to stderr. */
208
209 void debug_lattice_value (prop_value_t val);
210
211 DEBUG_FUNCTION void
212 debug_lattice_value (prop_value_t val)
213 {
214 dump_lattice_value (stderr, "", val);
215 fprintf (stderr, "\n");
216 }
217
218
219 /* Compute a default value for variable VAR and store it in the
220 CONST_VAL array. The following rules are used to get default
221 values:
222
223 1- Global and static variables that are declared constant are
224 considered CONSTANT.
225
226 2- Any other value is considered UNDEFINED. This is useful when
227 considering PHI nodes. PHI arguments that are undefined do not
228 change the constant value of the PHI node, which allows for more
229 constants to be propagated.
230
231 3- Variables defined by statements other than assignments and PHI
232 nodes are considered VARYING.
233
234 4- Initial values of variables that are not GIMPLE registers are
235 considered VARYING. */
236
237 static prop_value_t
238 get_default_value (tree var)
239 {
240 prop_value_t val = { UNINITIALIZED, NULL_TREE, { 0, 0 } };
241 gimple stmt;
242
243 stmt = SSA_NAME_DEF_STMT (var);
244
245 if (gimple_nop_p (stmt))
246 {
247 /* Variables defined by an empty statement are those used
248 before being initialized. If VAR is a local variable, we
249 can assume initially that it is UNDEFINED, otherwise we must
250 consider it VARYING. */
251 if (!virtual_operand_p (var)
252 && TREE_CODE (SSA_NAME_VAR (var)) == VAR_DECL)
253 val.lattice_val = UNDEFINED;
254 else
255 {
256 val.lattice_val = VARYING;
257 val.mask = double_int_minus_one;
258 }
259 }
260 else if (is_gimple_assign (stmt)
261 /* Value-returning GIMPLE_CALL statements assign to
262 a variable, and are treated similarly to GIMPLE_ASSIGN. */
263 || (is_gimple_call (stmt)
264 && gimple_call_lhs (stmt) != NULL_TREE)
265 || gimple_code (stmt) == GIMPLE_PHI)
266 {
267 tree cst;
268 if (gimple_assign_single_p (stmt)
269 && DECL_P (gimple_assign_rhs1 (stmt))
270 && (cst = get_symbol_constant_value (gimple_assign_rhs1 (stmt))))
271 {
272 val.lattice_val = CONSTANT;
273 val.value = cst;
274 }
275 else
276 /* Any other variable defined by an assignment or a PHI node
277 is considered UNDEFINED. */
278 val.lattice_val = UNDEFINED;
279 }
280 else
281 {
282 /* Otherwise, VAR will never take on a constant value. */
283 val.lattice_val = VARYING;
284 val.mask = double_int_minus_one;
285 }
286
287 return val;
288 }
289
290
291 /* Get the constant value associated with variable VAR. */
292
293 static inline prop_value_t *
294 get_value (tree var)
295 {
296 prop_value_t *val;
297
298 if (const_val == NULL)
299 return NULL;
300
301 val = &const_val[SSA_NAME_VERSION (var)];
302 if (val->lattice_val == UNINITIALIZED)
303 *val = get_default_value (var);
304
305 canonicalize_float_value (val);
306
307 return val;
308 }
309
310 /* Return the constant tree value associated with VAR. */
311
312 static inline tree
313 get_constant_value (tree var)
314 {
315 prop_value_t *val;
316 if (TREE_CODE (var) != SSA_NAME)
317 {
318 if (is_gimple_min_invariant (var))
319 return var;
320 return NULL_TREE;
321 }
322 val = get_value (var);
323 if (val
324 && val->lattice_val == CONSTANT
325 && (TREE_CODE (val->value) != INTEGER_CST
326 || double_int_zero_p (val->mask)))
327 return val->value;
328 return NULL_TREE;
329 }
330
331 /* Sets the value associated with VAR to VARYING. */
332
333 static inline void
334 set_value_varying (tree var)
335 {
336 prop_value_t *val = &const_val[SSA_NAME_VERSION (var)];
337
338 val->lattice_val = VARYING;
339 val->value = NULL_TREE;
340 val->mask = double_int_minus_one;
341 }
342
343 /* For float types, modify the value of VAL to make ccp work correctly
344 for non-standard values (-0, NaN):
345
346 If HONOR_SIGNED_ZEROS is false, and VAL = -0, we canonicalize it to 0.
347 If HONOR_NANS is false, and VAL is NaN, we canonicalize it to UNDEFINED.
348 This is to fix the following problem (see PR 29921): Suppose we have
349
350 x = 0.0 * y
351
352 and we set value of y to NaN. This causes value of x to be set to NaN.
353 When we later determine that y is in fact VARYING, fold uses the fact
354 that HONOR_NANS is false, and we try to change the value of x to 0,
355 causing an ICE. With HONOR_NANS being false, the real appearance of
356 NaN would cause undefined behavior, though, so claiming that y (and x)
357 are UNDEFINED initially is correct. */
358
359 static void
360 canonicalize_float_value (prop_value_t *val)
361 {
362 enum machine_mode mode;
363 tree type;
364 REAL_VALUE_TYPE d;
365
366 if (val->lattice_val != CONSTANT
367 || TREE_CODE (val->value) != REAL_CST)
368 return;
369
370 d = TREE_REAL_CST (val->value);
371 type = TREE_TYPE (val->value);
372 mode = TYPE_MODE (type);
373
374 if (!HONOR_SIGNED_ZEROS (mode)
375 && REAL_VALUE_MINUS_ZERO (d))
376 {
377 val->value = build_real (type, dconst0);
378 return;
379 }
380
381 if (!HONOR_NANS (mode)
382 && REAL_VALUE_ISNAN (d))
383 {
384 val->lattice_val = UNDEFINED;
385 val->value = NULL;
386 return;
387 }
388 }
389
390 /* Return whether the lattice transition is valid. */
391
392 static bool
393 valid_lattice_transition (prop_value_t old_val, prop_value_t new_val)
394 {
395 /* Lattice transitions must always be monotonically increasing in
396 value. */
397 if (old_val.lattice_val < new_val.lattice_val)
398 return true;
399
400 if (old_val.lattice_val != new_val.lattice_val)
401 return false;
402
403 if (!old_val.value && !new_val.value)
404 return true;
405
406 /* Now both lattice values are CONSTANT. */
407
408 /* Allow transitioning from PHI <&x, not executable> == &x
409 to PHI <&x, &y> == common alignment. */
410 if (TREE_CODE (old_val.value) != INTEGER_CST
411 && TREE_CODE (new_val.value) == INTEGER_CST)
412 return true;
413
414 /* Bit-lattices have to agree in the still valid bits. */
415 if (TREE_CODE (old_val.value) == INTEGER_CST
416 && TREE_CODE (new_val.value) == INTEGER_CST)
417 return double_int_equal_p
418 (double_int_and_not (tree_to_double_int (old_val.value),
419 new_val.mask),
420 double_int_and_not (tree_to_double_int (new_val.value),
421 new_val.mask));
422
423 /* Otherwise constant values have to agree. */
424 return operand_equal_p (old_val.value, new_val.value, 0);
425 }
426
427 /* Set the value for variable VAR to NEW_VAL. Return true if the new
428 value is different from VAR's previous value. */
429
430 static bool
431 set_lattice_value (tree var, prop_value_t new_val)
432 {
433 /* We can deal with old UNINITIALIZED values just fine here. */
434 prop_value_t *old_val = &const_val[SSA_NAME_VERSION (var)];
435
436 canonicalize_float_value (&new_val);
437
438 /* We have to be careful to not go up the bitwise lattice
439 represented by the mask.
440 ??? This doesn't seem to be the best place to enforce this. */
441 if (new_val.lattice_val == CONSTANT
442 && old_val->lattice_val == CONSTANT
443 && TREE_CODE (new_val.value) == INTEGER_CST
444 && TREE_CODE (old_val->value) == INTEGER_CST)
445 {
446 double_int diff;
447 diff = double_int_xor (tree_to_double_int (new_val.value),
448 tree_to_double_int (old_val->value));
449 new_val.mask = double_int_ior (new_val.mask,
450 double_int_ior (old_val->mask, diff));
451 }
452
453 gcc_assert (valid_lattice_transition (*old_val, new_val));
454
455 /* If *OLD_VAL and NEW_VAL are the same, return false to inform the
456 caller that this was a non-transition. */
457 if (old_val->lattice_val != new_val.lattice_val
458 || (new_val.lattice_val == CONSTANT
459 && TREE_CODE (new_val.value) == INTEGER_CST
460 && (TREE_CODE (old_val->value) != INTEGER_CST
461 || !double_int_equal_p (new_val.mask, old_val->mask))))
462 {
463 /* ??? We would like to delay creation of INTEGER_CSTs from
464 partially constants here. */
465
466 if (dump_file && (dump_flags & TDF_DETAILS))
467 {
468 dump_lattice_value (dump_file, "Lattice value changed to ", new_val);
469 fprintf (dump_file, ". Adding SSA edges to worklist.\n");
470 }
471
472 *old_val = new_val;
473
474 gcc_assert (new_val.lattice_val != UNINITIALIZED);
475 return true;
476 }
477
478 return false;
479 }
480
481 static prop_value_t get_value_for_expr (tree, bool);
482 static prop_value_t bit_value_binop (enum tree_code, tree, tree, tree);
483 static void bit_value_binop_1 (enum tree_code, tree, double_int *, double_int *,
484 tree, double_int, double_int,
485 tree, double_int, double_int);
486
487 /* Return a double_int that can be used for bitwise simplifications
488 from VAL. */
489
490 static double_int
491 value_to_double_int (prop_value_t val)
492 {
493 if (val.value
494 && TREE_CODE (val.value) == INTEGER_CST)
495 return tree_to_double_int (val.value);
496 else
497 return double_int_zero;
498 }
499
500 /* Return the value for the address expression EXPR based on alignment
501 information. */
502
503 static prop_value_t
504 get_value_from_alignment (tree expr)
505 {
506 tree type = TREE_TYPE (expr);
507 prop_value_t val;
508 unsigned HOST_WIDE_INT bitpos;
509 unsigned int align;
510
511 gcc_assert (TREE_CODE (expr) == ADDR_EXPR);
512
513 get_pointer_alignment_1 (expr, &align, &bitpos);
514 val.mask
515 = double_int_and_not (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type)
516 ? double_int_mask (TYPE_PRECISION (type))
517 : double_int_minus_one,
518 uhwi_to_double_int (align / BITS_PER_UNIT - 1));
519 val.lattice_val = double_int_minus_one_p (val.mask) ? VARYING : CONSTANT;
520 if (val.lattice_val == CONSTANT)
521 val.value
522 = double_int_to_tree (type, uhwi_to_double_int (bitpos / BITS_PER_UNIT));
523 else
524 val.value = NULL_TREE;
525
526 return val;
527 }
528
529 /* Return the value for the tree operand EXPR. If FOR_BITS_P is true
530 return constant bits extracted from alignment information for
531 invariant addresses. */
532
533 static prop_value_t
534 get_value_for_expr (tree expr, bool for_bits_p)
535 {
536 prop_value_t val;
537
538 if (TREE_CODE (expr) == SSA_NAME)
539 {
540 val = *get_value (expr);
541 if (for_bits_p
542 && val.lattice_val == CONSTANT
543 && TREE_CODE (val.value) == ADDR_EXPR)
544 val = get_value_from_alignment (val.value);
545 }
546 else if (is_gimple_min_invariant (expr)
547 && (!for_bits_p || TREE_CODE (expr) != ADDR_EXPR))
548 {
549 val.lattice_val = CONSTANT;
550 val.value = expr;
551 val.mask = double_int_zero;
552 canonicalize_float_value (&val);
553 }
554 else if (TREE_CODE (expr) == ADDR_EXPR)
555 val = get_value_from_alignment (expr);
556 else
557 {
558 val.lattice_val = VARYING;
559 val.mask = double_int_minus_one;
560 val.value = NULL_TREE;
561 }
562 return val;
563 }
564
565 /* Return the likely CCP lattice value for STMT.
566
567 If STMT has no operands, then return CONSTANT.
568
569 Else if undefinedness of operands of STMT cause its value to be
570 undefined, then return UNDEFINED.
571
572 Else if any operands of STMT are constants, then return CONSTANT.
573
574 Else return VARYING. */
575
576 static ccp_lattice_t
577 likely_value (gimple stmt)
578 {
579 bool has_constant_operand, has_undefined_operand, all_undefined_operands;
580 tree use;
581 ssa_op_iter iter;
582 unsigned i;
583
584 enum gimple_code code = gimple_code (stmt);
585
586 /* This function appears to be called only for assignments, calls,
587 conditionals, and switches, due to the logic in visit_stmt. */
588 gcc_assert (code == GIMPLE_ASSIGN
589 || code == GIMPLE_CALL
590 || code == GIMPLE_COND
591 || code == GIMPLE_SWITCH);
592
593 /* If the statement has volatile operands, it won't fold to a
594 constant value. */
595 if (gimple_has_volatile_ops (stmt))
596 return VARYING;
597
598 /* Arrive here for more complex cases. */
599 has_constant_operand = false;
600 has_undefined_operand = false;
601 all_undefined_operands = true;
602 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
603 {
604 prop_value_t *val = get_value (use);
605
606 if (val->lattice_val == UNDEFINED)
607 has_undefined_operand = true;
608 else
609 all_undefined_operands = false;
610
611 if (val->lattice_val == CONSTANT)
612 has_constant_operand = true;
613 }
614
615 /* There may be constants in regular rhs operands. For calls we
616 have to ignore lhs, fndecl and static chain, otherwise only
617 the lhs. */
618 for (i = (is_gimple_call (stmt) ? 2 : 0) + gimple_has_lhs (stmt);
619 i < gimple_num_ops (stmt); ++i)
620 {
621 tree op = gimple_op (stmt, i);
622 if (!op || TREE_CODE (op) == SSA_NAME)
623 continue;
624 if (is_gimple_min_invariant (op))
625 has_constant_operand = true;
626 }
627
628 if (has_constant_operand)
629 all_undefined_operands = false;
630
631 /* If the operation combines operands like COMPLEX_EXPR make sure to
632 not mark the result UNDEFINED if only one part of the result is
633 undefined. */
634 if (has_undefined_operand && all_undefined_operands)
635 return UNDEFINED;
636 else if (code == GIMPLE_ASSIGN && has_undefined_operand)
637 {
638 switch (gimple_assign_rhs_code (stmt))
639 {
640 /* Unary operators are handled with all_undefined_operands. */
641 case PLUS_EXPR:
642 case MINUS_EXPR:
643 case POINTER_PLUS_EXPR:
644 /* Not MIN_EXPR, MAX_EXPR. One VARYING operand may be selected.
645 Not bitwise operators, one VARYING operand may specify the
646 result completely. Not logical operators for the same reason.
647 Not COMPLEX_EXPR as one VARYING operand makes the result partly
648 not UNDEFINED. Not *DIV_EXPR, comparisons and shifts because
649 the undefined operand may be promoted. */
650 return UNDEFINED;
651
652 case ADDR_EXPR:
653 /* If any part of an address is UNDEFINED, like the index
654 of an ARRAY_EXPR, then treat the result as UNDEFINED. */
655 return UNDEFINED;
656
657 default:
658 ;
659 }
660 }
661 /* If there was an UNDEFINED operand but the result may be not UNDEFINED
662 fall back to CONSTANT. During iteration UNDEFINED may still drop
663 to CONSTANT. */
664 if (has_undefined_operand)
665 return CONSTANT;
666
667 /* We do not consider virtual operands here -- load from read-only
668 memory may have only VARYING virtual operands, but still be
669 constant. */
670 if (has_constant_operand
671 || gimple_references_memory_p (stmt))
672 return CONSTANT;
673
674 return VARYING;
675 }
676
677 /* Returns true if STMT cannot be constant. */
678
679 static bool
680 surely_varying_stmt_p (gimple stmt)
681 {
682 /* If the statement has operands that we cannot handle, it cannot be
683 constant. */
684 if (gimple_has_volatile_ops (stmt))
685 return true;
686
687 /* If it is a call and does not return a value or is not a
688 builtin and not an indirect call, it is varying. */
689 if (is_gimple_call (stmt))
690 {
691 tree fndecl;
692 if (!gimple_call_lhs (stmt)
693 || ((fndecl = gimple_call_fndecl (stmt)) != NULL_TREE
694 && !DECL_BUILT_IN (fndecl)))
695 return true;
696 }
697
698 /* Any other store operation is not interesting. */
699 else if (gimple_vdef (stmt))
700 return true;
701
702 /* Anything other than assignments and conditional jumps are not
703 interesting for CCP. */
704 if (gimple_code (stmt) != GIMPLE_ASSIGN
705 && gimple_code (stmt) != GIMPLE_COND
706 && gimple_code (stmt) != GIMPLE_SWITCH
707 && gimple_code (stmt) != GIMPLE_CALL)
708 return true;
709
710 return false;
711 }
712
713 /* Initialize local data structures for CCP. */
714
715 static void
716 ccp_initialize (void)
717 {
718 basic_block bb;
719
720 const_val = XCNEWVEC (prop_value_t, num_ssa_names);
721
722 /* Initialize simulation flags for PHI nodes and statements. */
723 FOR_EACH_BB (bb)
724 {
725 gimple_stmt_iterator i;
726
727 for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
728 {
729 gimple stmt = gsi_stmt (i);
730 bool is_varying;
731
732 /* If the statement is a control insn, then we do not
733 want to avoid simulating the statement once. Failure
734 to do so means that those edges will never get added. */
735 if (stmt_ends_bb_p (stmt))
736 is_varying = false;
737 else
738 is_varying = surely_varying_stmt_p (stmt);
739
740 if (is_varying)
741 {
742 tree def;
743 ssa_op_iter iter;
744
745 /* If the statement will not produce a constant, mark
746 all its outputs VARYING. */
747 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
748 set_value_varying (def);
749 }
750 prop_set_simulate_again (stmt, !is_varying);
751 }
752 }
753
754 /* Now process PHI nodes. We never clear the simulate_again flag on
755 phi nodes, since we do not know which edges are executable yet,
756 except for phi nodes for virtual operands when we do not do store ccp. */
757 FOR_EACH_BB (bb)
758 {
759 gimple_stmt_iterator i;
760
761 for (i = gsi_start_phis (bb); !gsi_end_p (i); gsi_next (&i))
762 {
763 gimple phi = gsi_stmt (i);
764
765 if (virtual_operand_p (gimple_phi_result (phi)))
766 prop_set_simulate_again (phi, false);
767 else
768 prop_set_simulate_again (phi, true);
769 }
770 }
771 }
772
773 /* Debug count support. Reset the values of ssa names
774 VARYING when the total number ssa names analyzed is
775 beyond the debug count specified. */
776
777 static void
778 do_dbg_cnt (void)
779 {
780 unsigned i;
781 for (i = 0; i < num_ssa_names; i++)
782 {
783 if (!dbg_cnt (ccp))
784 {
785 const_val[i].lattice_val = VARYING;
786 const_val[i].mask = double_int_minus_one;
787 const_val[i].value = NULL_TREE;
788 }
789 }
790 }
791
792
793 /* Do final substitution of propagated values, cleanup the flowgraph and
794 free allocated storage.
795
796 Return TRUE when something was optimized. */
797
798 static bool
799 ccp_finalize (void)
800 {
801 bool something_changed;
802 unsigned i;
803
804 do_dbg_cnt ();
805
806 /* Derive alignment and misalignment information from partially
807 constant pointers in the lattice. */
808 for (i = 1; i < num_ssa_names; ++i)
809 {
810 tree name = ssa_name (i);
811 prop_value_t *val;
812 unsigned int tem, align;
813
814 if (!name
815 || !POINTER_TYPE_P (TREE_TYPE (name)))
816 continue;
817
818 val = get_value (name);
819 if (val->lattice_val != CONSTANT
820 || TREE_CODE (val->value) != INTEGER_CST)
821 continue;
822
823 /* Trailing constant bits specify the alignment, trailing value
824 bits the misalignment. */
825 tem = val->mask.low;
826 align = (tem & -tem);
827 if (align > 1)
828 set_ptr_info_alignment (get_ptr_info (name), align,
829 TREE_INT_CST_LOW (val->value) & (align - 1));
830 }
831
832 /* Perform substitutions based on the known constant values. */
833 something_changed = substitute_and_fold (get_constant_value,
834 ccp_fold_stmt, true);
835
836 free (const_val);
837 const_val = NULL;
838 return something_changed;;
839 }
840
841
842 /* Compute the meet operator between *VAL1 and *VAL2. Store the result
843 in VAL1.
844
845 any M UNDEFINED = any
846 any M VARYING = VARYING
847 Ci M Cj = Ci if (i == j)
848 Ci M Cj = VARYING if (i != j)
849 */
850
851 static void
852 ccp_lattice_meet (prop_value_t *val1, prop_value_t *val2)
853 {
854 if (val1->lattice_val == UNDEFINED)
855 {
856 /* UNDEFINED M any = any */
857 *val1 = *val2;
858 }
859 else if (val2->lattice_val == UNDEFINED)
860 {
861 /* any M UNDEFINED = any
862 Nothing to do. VAL1 already contains the value we want. */
863 ;
864 }
865 else if (val1->lattice_val == VARYING
866 || val2->lattice_val == VARYING)
867 {
868 /* any M VARYING = VARYING. */
869 val1->lattice_val = VARYING;
870 val1->mask = double_int_minus_one;
871 val1->value = NULL_TREE;
872 }
873 else if (val1->lattice_val == CONSTANT
874 && val2->lattice_val == CONSTANT
875 && TREE_CODE (val1->value) == INTEGER_CST
876 && TREE_CODE (val2->value) == INTEGER_CST)
877 {
878 /* Ci M Cj = Ci if (i == j)
879 Ci M Cj = VARYING if (i != j)
880
881 For INTEGER_CSTs mask unequal bits. If no equal bits remain,
882 drop to varying. */
883 val1->mask
884 = double_int_ior (double_int_ior (val1->mask,
885 val2->mask),
886 double_int_xor (tree_to_double_int (val1->value),
887 tree_to_double_int (val2->value)));
888 if (double_int_minus_one_p (val1->mask))
889 {
890 val1->lattice_val = VARYING;
891 val1->value = NULL_TREE;
892 }
893 }
894 else if (val1->lattice_val == CONSTANT
895 && val2->lattice_val == CONSTANT
896 && simple_cst_equal (val1->value, val2->value) == 1)
897 {
898 /* Ci M Cj = Ci if (i == j)
899 Ci M Cj = VARYING if (i != j)
900
901 VAL1 already contains the value we want for equivalent values. */
902 }
903 else if (val1->lattice_val == CONSTANT
904 && val2->lattice_val == CONSTANT
905 && (TREE_CODE (val1->value) == ADDR_EXPR
906 || TREE_CODE (val2->value) == ADDR_EXPR))
907 {
908 /* When not equal addresses are involved try meeting for
909 alignment. */
910 prop_value_t tem = *val2;
911 if (TREE_CODE (val1->value) == ADDR_EXPR)
912 *val1 = get_value_for_expr (val1->value, true);
913 if (TREE_CODE (val2->value) == ADDR_EXPR)
914 tem = get_value_for_expr (val2->value, true);
915 ccp_lattice_meet (val1, &tem);
916 }
917 else
918 {
919 /* Any other combination is VARYING. */
920 val1->lattice_val = VARYING;
921 val1->mask = double_int_minus_one;
922 val1->value = NULL_TREE;
923 }
924 }
925
926
927 /* Loop through the PHI_NODE's parameters for BLOCK and compare their
928 lattice values to determine PHI_NODE's lattice value. The value of a
929 PHI node is determined calling ccp_lattice_meet with all the arguments
930 of the PHI node that are incoming via executable edges. */
931
932 static enum ssa_prop_result
933 ccp_visit_phi_node (gimple phi)
934 {
935 unsigned i;
936 prop_value_t *old_val, new_val;
937
938 if (dump_file && (dump_flags & TDF_DETAILS))
939 {
940 fprintf (dump_file, "\nVisiting PHI node: ");
941 print_gimple_stmt (dump_file, phi, 0, dump_flags);
942 }
943
944 old_val = get_value (gimple_phi_result (phi));
945 switch (old_val->lattice_val)
946 {
947 case VARYING:
948 return SSA_PROP_VARYING;
949
950 case CONSTANT:
951 new_val = *old_val;
952 break;
953
954 case UNDEFINED:
955 new_val.lattice_val = UNDEFINED;
956 new_val.value = NULL_TREE;
957 break;
958
959 default:
960 gcc_unreachable ();
961 }
962
963 for (i = 0; i < gimple_phi_num_args (phi); i++)
964 {
965 /* Compute the meet operator over all the PHI arguments flowing
966 through executable edges. */
967 edge e = gimple_phi_arg_edge (phi, i);
968
969 if (dump_file && (dump_flags & TDF_DETAILS))
970 {
971 fprintf (dump_file,
972 "\n Argument #%d (%d -> %d %sexecutable)\n",
973 i, e->src->index, e->dest->index,
974 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
975 }
976
977 /* If the incoming edge is executable, Compute the meet operator for
978 the existing value of the PHI node and the current PHI argument. */
979 if (e->flags & EDGE_EXECUTABLE)
980 {
981 tree arg = gimple_phi_arg (phi, i)->def;
982 prop_value_t arg_val = get_value_for_expr (arg, false);
983
984 ccp_lattice_meet (&new_val, &arg_val);
985
986 if (dump_file && (dump_flags & TDF_DETAILS))
987 {
988 fprintf (dump_file, "\t");
989 print_generic_expr (dump_file, arg, dump_flags);
990 dump_lattice_value (dump_file, "\tValue: ", arg_val);
991 fprintf (dump_file, "\n");
992 }
993
994 if (new_val.lattice_val == VARYING)
995 break;
996 }
997 }
998
999 if (dump_file && (dump_flags & TDF_DETAILS))
1000 {
1001 dump_lattice_value (dump_file, "\n PHI node value: ", new_val);
1002 fprintf (dump_file, "\n\n");
1003 }
1004
1005 /* Make the transition to the new value. */
1006 if (set_lattice_value (gimple_phi_result (phi), new_val))
1007 {
1008 if (new_val.lattice_val == VARYING)
1009 return SSA_PROP_VARYING;
1010 else
1011 return SSA_PROP_INTERESTING;
1012 }
1013 else
1014 return SSA_PROP_NOT_INTERESTING;
1015 }
1016
1017 /* Return the constant value for OP or OP otherwise. */
1018
1019 static tree
1020 valueize_op (tree op)
1021 {
1022 if (TREE_CODE (op) == SSA_NAME)
1023 {
1024 tree tem = get_constant_value (op);
1025 if (tem)
1026 return tem;
1027 }
1028 return op;
1029 }
1030
1031 /* CCP specific front-end to the non-destructive constant folding
1032 routines.
1033
1034 Attempt to simplify the RHS of STMT knowing that one or more
1035 operands are constants.
1036
1037 If simplification is possible, return the simplified RHS,
1038 otherwise return the original RHS or NULL_TREE. */
1039
1040 static tree
1041 ccp_fold (gimple stmt)
1042 {
1043 location_t loc = gimple_location (stmt);
1044 switch (gimple_code (stmt))
1045 {
1046 case GIMPLE_COND:
1047 {
1048 /* Handle comparison operators that can appear in GIMPLE form. */
1049 tree op0 = valueize_op (gimple_cond_lhs (stmt));
1050 tree op1 = valueize_op (gimple_cond_rhs (stmt));
1051 enum tree_code code = gimple_cond_code (stmt);
1052 return fold_binary_loc (loc, code, boolean_type_node, op0, op1);
1053 }
1054
1055 case GIMPLE_SWITCH:
1056 {
1057 /* Return the constant switch index. */
1058 return valueize_op (gimple_switch_index (stmt));
1059 }
1060
1061 case GIMPLE_ASSIGN:
1062 case GIMPLE_CALL:
1063 return gimple_fold_stmt_to_constant_1 (stmt, valueize_op);
1064
1065 default:
1066 gcc_unreachable ();
1067 }
1068 }
1069
1070 /* Apply the operation CODE in type TYPE to the value, mask pair
1071 RVAL and RMASK representing a value of type RTYPE and set
1072 the value, mask pair *VAL and *MASK to the result. */
1073
1074 static void
1075 bit_value_unop_1 (enum tree_code code, tree type,
1076 double_int *val, double_int *mask,
1077 tree rtype, double_int rval, double_int rmask)
1078 {
1079 switch (code)
1080 {
1081 case BIT_NOT_EXPR:
1082 *mask = rmask;
1083 *val = double_int_not (rval);
1084 break;
1085
1086 case NEGATE_EXPR:
1087 {
1088 double_int temv, temm;
1089 /* Return ~rval + 1. */
1090 bit_value_unop_1 (BIT_NOT_EXPR, type, &temv, &temm, type, rval, rmask);
1091 bit_value_binop_1 (PLUS_EXPR, type, val, mask,
1092 type, temv, temm,
1093 type, double_int_one, double_int_zero);
1094 break;
1095 }
1096
1097 CASE_CONVERT:
1098 {
1099 bool uns;
1100
1101 /* First extend mask and value according to the original type. */
1102 uns = TYPE_UNSIGNED (rtype);
1103 *mask = double_int_ext (rmask, TYPE_PRECISION (rtype), uns);
1104 *val = double_int_ext (rval, TYPE_PRECISION (rtype), uns);
1105
1106 /* Then extend mask and value according to the target type. */
1107 uns = TYPE_UNSIGNED (type);
1108 *mask = double_int_ext (*mask, TYPE_PRECISION (type), uns);
1109 *val = double_int_ext (*val, TYPE_PRECISION (type), uns);
1110 break;
1111 }
1112
1113 default:
1114 *mask = double_int_minus_one;
1115 break;
1116 }
1117 }
1118
1119 /* Apply the operation CODE in type TYPE to the value, mask pairs
1120 R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
1121 and R2TYPE and set the value, mask pair *VAL and *MASK to the result. */
1122
1123 static void
1124 bit_value_binop_1 (enum tree_code code, tree type,
1125 double_int *val, double_int *mask,
1126 tree r1type, double_int r1val, double_int r1mask,
1127 tree r2type, double_int r2val, double_int r2mask)
1128 {
1129 bool uns = TYPE_UNSIGNED (type);
1130 /* Assume we'll get a constant result. Use an initial varying value,
1131 we fall back to varying in the end if necessary. */
1132 *mask = double_int_minus_one;
1133 switch (code)
1134 {
1135 case BIT_AND_EXPR:
1136 /* The mask is constant where there is a known not
1137 set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */
1138 *mask = double_int_and (double_int_ior (r1mask, r2mask),
1139 double_int_and (double_int_ior (r1val, r1mask),
1140 double_int_ior (r2val, r2mask)));
1141 *val = double_int_and (r1val, r2val);
1142 break;
1143
1144 case BIT_IOR_EXPR:
1145 /* The mask is constant where there is a known
1146 set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)). */
1147 *mask = double_int_and_not
1148 (double_int_ior (r1mask, r2mask),
1149 double_int_ior (double_int_and_not (r1val, r1mask),
1150 double_int_and_not (r2val, r2mask)));
1151 *val = double_int_ior (r1val, r2val);
1152 break;
1153
1154 case BIT_XOR_EXPR:
1155 /* m1 | m2 */
1156 *mask = double_int_ior (r1mask, r2mask);
1157 *val = double_int_xor (r1val, r2val);
1158 break;
1159
1160 case LROTATE_EXPR:
1161 case RROTATE_EXPR:
1162 if (double_int_zero_p (r2mask))
1163 {
1164 HOST_WIDE_INT shift = r2val.low;
1165 if (code == RROTATE_EXPR)
1166 shift = -shift;
1167 *mask = double_int_lrotate (r1mask, shift, TYPE_PRECISION (type));
1168 *val = double_int_lrotate (r1val, shift, TYPE_PRECISION (type));
1169 }
1170 break;
1171
1172 case LSHIFT_EXPR:
1173 case RSHIFT_EXPR:
1174 /* ??? We can handle partially known shift counts if we know
1175 its sign. That way we can tell that (x << (y | 8)) & 255
1176 is zero. */
1177 if (double_int_zero_p (r2mask))
1178 {
1179 HOST_WIDE_INT shift = r2val.low;
1180 if (code == RSHIFT_EXPR)
1181 shift = -shift;
1182 /* We need to know if we are doing a left or a right shift
1183 to properly shift in zeros for left shift and unsigned
1184 right shifts and the sign bit for signed right shifts.
1185 For signed right shifts we shift in varying in case
1186 the sign bit was varying. */
1187 if (shift > 0)
1188 {
1189 *mask = double_int_lshift (r1mask, shift,
1190 TYPE_PRECISION (type), false);
1191 *val = double_int_lshift (r1val, shift,
1192 TYPE_PRECISION (type), false);
1193 }
1194 else if (shift < 0)
1195 {
1196 shift = -shift;
1197 *mask = double_int_rshift (r1mask, shift,
1198 TYPE_PRECISION (type), !uns);
1199 *val = double_int_rshift (r1val, shift,
1200 TYPE_PRECISION (type), !uns);
1201 }
1202 else
1203 {
1204 *mask = r1mask;
1205 *val = r1val;
1206 }
1207 }
1208 break;
1209
1210 case PLUS_EXPR:
1211 case POINTER_PLUS_EXPR:
1212 {
1213 double_int lo, hi;
1214 /* Do the addition with unknown bits set to zero, to give carry-ins of
1215 zero wherever possible. */
1216 lo = double_int_add (double_int_and_not (r1val, r1mask),
1217 double_int_and_not (r2val, r2mask));
1218 lo = double_int_ext (lo, TYPE_PRECISION (type), uns);
1219 /* Do the addition with unknown bits set to one, to give carry-ins of
1220 one wherever possible. */
1221 hi = double_int_add (double_int_ior (r1val, r1mask),
1222 double_int_ior (r2val, r2mask));
1223 hi = double_int_ext (hi, TYPE_PRECISION (type), uns);
1224 /* Each bit in the result is known if (a) the corresponding bits in
1225 both inputs are known, and (b) the carry-in to that bit position
1226 is known. We can check condition (b) by seeing if we got the same
1227 result with minimised carries as with maximised carries. */
1228 *mask = double_int_ior (double_int_ior (r1mask, r2mask),
1229 double_int_xor (lo, hi));
1230 *mask = double_int_ext (*mask, TYPE_PRECISION (type), uns);
1231 /* It shouldn't matter whether we choose lo or hi here. */
1232 *val = lo;
1233 break;
1234 }
1235
1236 case MINUS_EXPR:
1237 {
1238 double_int temv, temm;
1239 bit_value_unop_1 (NEGATE_EXPR, r2type, &temv, &temm,
1240 r2type, r2val, r2mask);
1241 bit_value_binop_1 (PLUS_EXPR, type, val, mask,
1242 r1type, r1val, r1mask,
1243 r2type, temv, temm);
1244 break;
1245 }
1246
1247 case MULT_EXPR:
1248 {
1249 /* Just track trailing zeros in both operands and transfer
1250 them to the other. */
1251 int r1tz = double_int_ctz (double_int_ior (r1val, r1mask));
1252 int r2tz = double_int_ctz (double_int_ior (r2val, r2mask));
1253 if (r1tz + r2tz >= HOST_BITS_PER_DOUBLE_INT)
1254 {
1255 *mask = double_int_zero;
1256 *val = double_int_zero;
1257 }
1258 else if (r1tz + r2tz > 0)
1259 {
1260 *mask = double_int_not (double_int_mask (r1tz + r2tz));
1261 *mask = double_int_ext (*mask, TYPE_PRECISION (type), uns);
1262 *val = double_int_zero;
1263 }
1264 break;
1265 }
1266
1267 case EQ_EXPR:
1268 case NE_EXPR:
1269 {
1270 double_int m = double_int_ior (r1mask, r2mask);
1271 if (!double_int_equal_p (double_int_and_not (r1val, m),
1272 double_int_and_not (r2val, m)))
1273 {
1274 *mask = double_int_zero;
1275 *val = ((code == EQ_EXPR) ? double_int_zero : double_int_one);
1276 }
1277 else
1278 {
1279 /* We know the result of a comparison is always one or zero. */
1280 *mask = double_int_one;
1281 *val = double_int_zero;
1282 }
1283 break;
1284 }
1285
1286 case GE_EXPR:
1287 case GT_EXPR:
1288 {
1289 double_int tem = r1val;
1290 r1val = r2val;
1291 r2val = tem;
1292 tem = r1mask;
1293 r1mask = r2mask;
1294 r2mask = tem;
1295 code = swap_tree_comparison (code);
1296 }
1297 /* Fallthru. */
1298 case LT_EXPR:
1299 case LE_EXPR:
1300 {
1301 int minmax, maxmin;
1302 /* If the most significant bits are not known we know nothing. */
1303 if (double_int_negative_p (r1mask) || double_int_negative_p (r2mask))
1304 break;
1305
1306 /* For comparisons the signedness is in the comparison operands. */
1307 uns = TYPE_UNSIGNED (r1type);
1308
1309 /* If we know the most significant bits we know the values
1310 value ranges by means of treating varying bits as zero
1311 or one. Do a cross comparison of the max/min pairs. */
1312 maxmin = double_int_cmp (double_int_ior (r1val, r1mask),
1313 double_int_and_not (r2val, r2mask), uns);
1314 minmax = double_int_cmp (double_int_and_not (r1val, r1mask),
1315 double_int_ior (r2val, r2mask), uns);
1316 if (maxmin < 0) /* r1 is less than r2. */
1317 {
1318 *mask = double_int_zero;
1319 *val = double_int_one;
1320 }
1321 else if (minmax > 0) /* r1 is not less or equal to r2. */
1322 {
1323 *mask = double_int_zero;
1324 *val = double_int_zero;
1325 }
1326 else if (maxmin == minmax) /* r1 and r2 are equal. */
1327 {
1328 /* This probably should never happen as we'd have
1329 folded the thing during fully constant value folding. */
1330 *mask = double_int_zero;
1331 *val = (code == LE_EXPR ? double_int_one : double_int_zero);
1332 }
1333 else
1334 {
1335 /* We know the result of a comparison is always one or zero. */
1336 *mask = double_int_one;
1337 *val = double_int_zero;
1338 }
1339 break;
1340 }
1341
1342 default:;
1343 }
1344 }
1345
1346 /* Return the propagation value when applying the operation CODE to
1347 the value RHS yielding type TYPE. */
1348
1349 static prop_value_t
1350 bit_value_unop (enum tree_code code, tree type, tree rhs)
1351 {
1352 prop_value_t rval = get_value_for_expr (rhs, true);
1353 double_int value, mask;
1354 prop_value_t val;
1355
1356 if (rval.lattice_val == UNDEFINED)
1357 return rval;
1358
1359 gcc_assert ((rval.lattice_val == CONSTANT
1360 && TREE_CODE (rval.value) == INTEGER_CST)
1361 || double_int_minus_one_p (rval.mask));
1362 bit_value_unop_1 (code, type, &value, &mask,
1363 TREE_TYPE (rhs), value_to_double_int (rval), rval.mask);
1364 if (!double_int_minus_one_p (mask))
1365 {
1366 val.lattice_val = CONSTANT;
1367 val.mask = mask;
1368 /* ??? Delay building trees here. */
1369 val.value = double_int_to_tree (type, value);
1370 }
1371 else
1372 {
1373 val.lattice_val = VARYING;
1374 val.value = NULL_TREE;
1375 val.mask = double_int_minus_one;
1376 }
1377 return val;
1378 }
1379
1380 /* Return the propagation value when applying the operation CODE to
1381 the values RHS1 and RHS2 yielding type TYPE. */
1382
1383 static prop_value_t
1384 bit_value_binop (enum tree_code code, tree type, tree rhs1, tree rhs2)
1385 {
1386 prop_value_t r1val = get_value_for_expr (rhs1, true);
1387 prop_value_t r2val = get_value_for_expr (rhs2, true);
1388 double_int value, mask;
1389 prop_value_t val;
1390
1391 if (r1val.lattice_val == UNDEFINED
1392 || r2val.lattice_val == UNDEFINED)
1393 {
1394 val.lattice_val = VARYING;
1395 val.value = NULL_TREE;
1396 val.mask = double_int_minus_one;
1397 return val;
1398 }
1399
1400 gcc_assert ((r1val.lattice_val == CONSTANT
1401 && TREE_CODE (r1val.value) == INTEGER_CST)
1402 || double_int_minus_one_p (r1val.mask));
1403 gcc_assert ((r2val.lattice_val == CONSTANT
1404 && TREE_CODE (r2val.value) == INTEGER_CST)
1405 || double_int_minus_one_p (r2val.mask));
1406 bit_value_binop_1 (code, type, &value, &mask,
1407 TREE_TYPE (rhs1), value_to_double_int (r1val), r1val.mask,
1408 TREE_TYPE (rhs2), value_to_double_int (r2val), r2val.mask);
1409 if (!double_int_minus_one_p (mask))
1410 {
1411 val.lattice_val = CONSTANT;
1412 val.mask = mask;
1413 /* ??? Delay building trees here. */
1414 val.value = double_int_to_tree (type, value);
1415 }
1416 else
1417 {
1418 val.lattice_val = VARYING;
1419 val.value = NULL_TREE;
1420 val.mask = double_int_minus_one;
1421 }
1422 return val;
1423 }
1424
1425 /* Return the propagation value when applying __builtin_assume_aligned to
1426 its arguments. */
1427
1428 static prop_value_t
1429 bit_value_assume_aligned (gimple stmt)
1430 {
1431 tree ptr = gimple_call_arg (stmt, 0), align, misalign = NULL_TREE;
1432 tree type = TREE_TYPE (ptr);
1433 unsigned HOST_WIDE_INT aligni, misaligni = 0;
1434 prop_value_t ptrval = get_value_for_expr (ptr, true);
1435 prop_value_t alignval;
1436 double_int value, mask;
1437 prop_value_t val;
1438 if (ptrval.lattice_val == UNDEFINED)
1439 return ptrval;
1440 gcc_assert ((ptrval.lattice_val == CONSTANT
1441 && TREE_CODE (ptrval.value) == INTEGER_CST)
1442 || double_int_minus_one_p (ptrval.mask));
1443 align = gimple_call_arg (stmt, 1);
1444 if (!host_integerp (align, 1))
1445 return ptrval;
1446 aligni = tree_low_cst (align, 1);
1447 if (aligni <= 1
1448 || (aligni & (aligni - 1)) != 0)
1449 return ptrval;
1450 if (gimple_call_num_args (stmt) > 2)
1451 {
1452 misalign = gimple_call_arg (stmt, 2);
1453 if (!host_integerp (misalign, 1))
1454 return ptrval;
1455 misaligni = tree_low_cst (misalign, 1);
1456 if (misaligni >= aligni)
1457 return ptrval;
1458 }
1459 align = build_int_cst_type (type, -aligni);
1460 alignval = get_value_for_expr (align, true);
1461 bit_value_binop_1 (BIT_AND_EXPR, type, &value, &mask,
1462 type, value_to_double_int (ptrval), ptrval.mask,
1463 type, value_to_double_int (alignval), alignval.mask);
1464 if (!double_int_minus_one_p (mask))
1465 {
1466 val.lattice_val = CONSTANT;
1467 val.mask = mask;
1468 gcc_assert ((mask.low & (aligni - 1)) == 0);
1469 gcc_assert ((value.low & (aligni - 1)) == 0);
1470 value.low |= misaligni;
1471 /* ??? Delay building trees here. */
1472 val.value = double_int_to_tree (type, value);
1473 }
1474 else
1475 {
1476 val.lattice_val = VARYING;
1477 val.value = NULL_TREE;
1478 val.mask = double_int_minus_one;
1479 }
1480 return val;
1481 }
1482
1483 /* Evaluate statement STMT.
1484 Valid only for assignments, calls, conditionals, and switches. */
1485
1486 static prop_value_t
1487 evaluate_stmt (gimple stmt)
1488 {
1489 prop_value_t val;
1490 tree simplified = NULL_TREE;
1491 ccp_lattice_t likelyvalue = likely_value (stmt);
1492 bool is_constant = false;
1493 unsigned int align;
1494
1495 if (dump_file && (dump_flags & TDF_DETAILS))
1496 {
1497 fprintf (dump_file, "which is likely ");
1498 switch (likelyvalue)
1499 {
1500 case CONSTANT:
1501 fprintf (dump_file, "CONSTANT");
1502 break;
1503 case UNDEFINED:
1504 fprintf (dump_file, "UNDEFINED");
1505 break;
1506 case VARYING:
1507 fprintf (dump_file, "VARYING");
1508 break;
1509 default:;
1510 }
1511 fprintf (dump_file, "\n");
1512 }
1513
1514 /* If the statement is likely to have a CONSTANT result, then try
1515 to fold the statement to determine the constant value. */
1516 /* FIXME. This is the only place that we call ccp_fold.
1517 Since likely_value never returns CONSTANT for calls, we will
1518 not attempt to fold them, including builtins that may profit. */
1519 if (likelyvalue == CONSTANT)
1520 {
1521 fold_defer_overflow_warnings ();
1522 simplified = ccp_fold (stmt);
1523 is_constant = simplified && is_gimple_min_invariant (simplified);
1524 fold_undefer_overflow_warnings (is_constant, stmt, 0);
1525 if (is_constant)
1526 {
1527 /* The statement produced a constant value. */
1528 val.lattice_val = CONSTANT;
1529 val.value = simplified;
1530 val.mask = double_int_zero;
1531 }
1532 }
1533 /* If the statement is likely to have a VARYING result, then do not
1534 bother folding the statement. */
1535 else if (likelyvalue == VARYING)
1536 {
1537 enum gimple_code code = gimple_code (stmt);
1538 if (code == GIMPLE_ASSIGN)
1539 {
1540 enum tree_code subcode = gimple_assign_rhs_code (stmt);
1541
1542 /* Other cases cannot satisfy is_gimple_min_invariant
1543 without folding. */
1544 if (get_gimple_rhs_class (subcode) == GIMPLE_SINGLE_RHS)
1545 simplified = gimple_assign_rhs1 (stmt);
1546 }
1547 else if (code == GIMPLE_SWITCH)
1548 simplified = gimple_switch_index (stmt);
1549 else
1550 /* These cannot satisfy is_gimple_min_invariant without folding. */
1551 gcc_assert (code == GIMPLE_CALL || code == GIMPLE_COND);
1552 is_constant = simplified && is_gimple_min_invariant (simplified);
1553 if (is_constant)
1554 {
1555 /* The statement produced a constant value. */
1556 val.lattice_val = CONSTANT;
1557 val.value = simplified;
1558 val.mask = double_int_zero;
1559 }
1560 }
1561
1562 /* Resort to simplification for bitwise tracking. */
1563 if (flag_tree_bit_ccp
1564 && (likelyvalue == CONSTANT || is_gimple_call (stmt))
1565 && !is_constant)
1566 {
1567 enum gimple_code code = gimple_code (stmt);
1568 tree fndecl;
1569 val.lattice_val = VARYING;
1570 val.value = NULL_TREE;
1571 val.mask = double_int_minus_one;
1572 if (code == GIMPLE_ASSIGN)
1573 {
1574 enum tree_code subcode = gimple_assign_rhs_code (stmt);
1575 tree rhs1 = gimple_assign_rhs1 (stmt);
1576 switch (get_gimple_rhs_class (subcode))
1577 {
1578 case GIMPLE_SINGLE_RHS:
1579 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1580 || POINTER_TYPE_P (TREE_TYPE (rhs1)))
1581 val = get_value_for_expr (rhs1, true);
1582 break;
1583
1584 case GIMPLE_UNARY_RHS:
1585 if ((INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1586 || POINTER_TYPE_P (TREE_TYPE (rhs1)))
1587 && (INTEGRAL_TYPE_P (gimple_expr_type (stmt))
1588 || POINTER_TYPE_P (gimple_expr_type (stmt))))
1589 val = bit_value_unop (subcode, gimple_expr_type (stmt), rhs1);
1590 break;
1591
1592 case GIMPLE_BINARY_RHS:
1593 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1594 || POINTER_TYPE_P (TREE_TYPE (rhs1)))
1595 {
1596 tree lhs = gimple_assign_lhs (stmt);
1597 tree rhs2 = gimple_assign_rhs2 (stmt);
1598 val = bit_value_binop (subcode,
1599 TREE_TYPE (lhs), rhs1, rhs2);
1600 }
1601 break;
1602
1603 default:;
1604 }
1605 }
1606 else if (code == GIMPLE_COND)
1607 {
1608 enum tree_code code = gimple_cond_code (stmt);
1609 tree rhs1 = gimple_cond_lhs (stmt);
1610 tree rhs2 = gimple_cond_rhs (stmt);
1611 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1612 || POINTER_TYPE_P (TREE_TYPE (rhs1)))
1613 val = bit_value_binop (code, TREE_TYPE (rhs1), rhs1, rhs2);
1614 }
1615 else if (code == GIMPLE_CALL
1616 && (fndecl = gimple_call_fndecl (stmt))
1617 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
1618 {
1619 switch (DECL_FUNCTION_CODE (fndecl))
1620 {
1621 case BUILT_IN_MALLOC:
1622 case BUILT_IN_REALLOC:
1623 case BUILT_IN_CALLOC:
1624 case BUILT_IN_STRDUP:
1625 case BUILT_IN_STRNDUP:
1626 val.lattice_val = CONSTANT;
1627 val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
1628 val.mask = shwi_to_double_int
1629 (~(((HOST_WIDE_INT) MALLOC_ABI_ALIGNMENT)
1630 / BITS_PER_UNIT - 1));
1631 break;
1632
1633 case BUILT_IN_ALLOCA:
1634 case BUILT_IN_ALLOCA_WITH_ALIGN:
1635 align = (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_ALLOCA_WITH_ALIGN
1636 ? TREE_INT_CST_LOW (gimple_call_arg (stmt, 1))
1637 : BIGGEST_ALIGNMENT);
1638 val.lattice_val = CONSTANT;
1639 val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
1640 val.mask = shwi_to_double_int
1641 (~(((HOST_WIDE_INT) align)
1642 / BITS_PER_UNIT - 1));
1643 break;
1644
1645 /* These builtins return their first argument, unmodified. */
1646 case BUILT_IN_MEMCPY:
1647 case BUILT_IN_MEMMOVE:
1648 case BUILT_IN_MEMSET:
1649 case BUILT_IN_STRCPY:
1650 case BUILT_IN_STRNCPY:
1651 case BUILT_IN_MEMCPY_CHK:
1652 case BUILT_IN_MEMMOVE_CHK:
1653 case BUILT_IN_MEMSET_CHK:
1654 case BUILT_IN_STRCPY_CHK:
1655 case BUILT_IN_STRNCPY_CHK:
1656 val = get_value_for_expr (gimple_call_arg (stmt, 0), true);
1657 break;
1658
1659 case BUILT_IN_ASSUME_ALIGNED:
1660 val = bit_value_assume_aligned (stmt);
1661 break;
1662
1663 default:;
1664 }
1665 }
1666 is_constant = (val.lattice_val == CONSTANT);
1667 }
1668
1669 if (!is_constant)
1670 {
1671 /* The statement produced a nonconstant value. If the statement
1672 had UNDEFINED operands, then the result of the statement
1673 should be UNDEFINED. Otherwise, the statement is VARYING. */
1674 if (likelyvalue == UNDEFINED)
1675 {
1676 val.lattice_val = likelyvalue;
1677 val.mask = double_int_zero;
1678 }
1679 else
1680 {
1681 val.lattice_val = VARYING;
1682 val.mask = double_int_minus_one;
1683 }
1684
1685 val.value = NULL_TREE;
1686 }
1687
1688 return val;
1689 }
1690
1691 typedef hash_table <gimple_statement_d, typed_pointer_hash<gimple_statement_d>,
1692 typed_pointer_equal<gimple_statement_d>,
1693 typed_null_remove<gimple_statement_d> >
1694 gimple_htab;
1695
1696 /* Given a BUILT_IN_STACK_SAVE value SAVED_VAL, insert a clobber of VAR before
1697 each matching BUILT_IN_STACK_RESTORE. Mark visited phis in VISITED. */
1698
1699 static void
1700 insert_clobber_before_stack_restore (tree saved_val, tree var,
1701 gimple_htab *visited)
1702 {
1703 gimple stmt, clobber_stmt;
1704 tree clobber;
1705 imm_use_iterator iter;
1706 gimple_stmt_iterator i;
1707 gimple *slot;
1708
1709 FOR_EACH_IMM_USE_STMT (stmt, iter, saved_val)
1710 if (gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
1711 {
1712 clobber = build_constructor (TREE_TYPE (var), NULL);
1713 TREE_THIS_VOLATILE (clobber) = 1;
1714 clobber_stmt = gimple_build_assign (var, clobber);
1715
1716 i = gsi_for_stmt (stmt);
1717 gsi_insert_before (&i, clobber_stmt, GSI_SAME_STMT);
1718 }
1719 else if (gimple_code (stmt) == GIMPLE_PHI)
1720 {
1721 if (!visited->is_created ())
1722 visited->create (10);
1723
1724 slot = visited->find_slot (stmt, INSERT);
1725 if (*slot != NULL)
1726 continue;
1727
1728 *slot = stmt;
1729 insert_clobber_before_stack_restore (gimple_phi_result (stmt), var,
1730 visited);
1731 }
1732 else
1733 gcc_assert (is_gimple_debug (stmt));
1734 }
1735
1736 /* Advance the iterator to the previous non-debug gimple statement in the same
1737 or dominating basic block. */
1738
1739 static inline void
1740 gsi_prev_dom_bb_nondebug (gimple_stmt_iterator *i)
1741 {
1742 basic_block dom;
1743
1744 gsi_prev_nondebug (i);
1745 while (gsi_end_p (*i))
1746 {
1747 dom = get_immediate_dominator (CDI_DOMINATORS, i->bb);
1748 if (dom == NULL || dom == ENTRY_BLOCK_PTR)
1749 return;
1750
1751 *i = gsi_last_bb (dom);
1752 }
1753 }
1754
1755 /* Find a BUILT_IN_STACK_SAVE dominating gsi_stmt (I), and insert
1756 a clobber of VAR before each matching BUILT_IN_STACK_RESTORE.
1757
1758 It is possible that BUILT_IN_STACK_SAVE cannot be find in a dominator when a
1759 previous pass (such as DOM) duplicated it along multiple paths to a BB. In
1760 that case the function gives up without inserting the clobbers. */
1761
1762 static void
1763 insert_clobbers_for_var (gimple_stmt_iterator i, tree var)
1764 {
1765 gimple stmt;
1766 tree saved_val;
1767 gimple_htab visited;
1768
1769 for (; !gsi_end_p (i); gsi_prev_dom_bb_nondebug (&i))
1770 {
1771 stmt = gsi_stmt (i);
1772
1773 if (!gimple_call_builtin_p (stmt, BUILT_IN_STACK_SAVE))
1774 continue;
1775
1776 saved_val = gimple_call_lhs (stmt);
1777 if (saved_val == NULL_TREE)
1778 continue;
1779
1780 insert_clobber_before_stack_restore (saved_val, var, &visited);
1781 break;
1782 }
1783
1784 if (visited.is_created ())
1785 visited.dispose ();
1786 }
1787
1788 /* Detects a __builtin_alloca_with_align with constant size argument. Declares
1789 fixed-size array and returns the address, if found, otherwise returns
1790 NULL_TREE. */
1791
1792 static tree
1793 fold_builtin_alloca_with_align (gimple stmt)
1794 {
1795 unsigned HOST_WIDE_INT size, threshold, n_elem;
1796 tree lhs, arg, block, var, elem_type, array_type;
1797
1798 /* Get lhs. */
1799 lhs = gimple_call_lhs (stmt);
1800 if (lhs == NULL_TREE)
1801 return NULL_TREE;
1802
1803 /* Detect constant argument. */
1804 arg = get_constant_value (gimple_call_arg (stmt, 0));
1805 if (arg == NULL_TREE
1806 || TREE_CODE (arg) != INTEGER_CST
1807 || !host_integerp (arg, 1))
1808 return NULL_TREE;
1809
1810 size = TREE_INT_CST_LOW (arg);
1811
1812 /* Heuristic: don't fold large allocas. */
1813 threshold = (unsigned HOST_WIDE_INT)PARAM_VALUE (PARAM_LARGE_STACK_FRAME);
1814 /* In case the alloca is located at function entry, it has the same lifetime
1815 as a declared array, so we allow a larger size. */
1816 block = gimple_block (stmt);
1817 if (!(cfun->after_inlining
1818 && TREE_CODE (BLOCK_SUPERCONTEXT (block)) == FUNCTION_DECL))
1819 threshold /= 10;
1820 if (size > threshold)
1821 return NULL_TREE;
1822
1823 /* Declare array. */
1824 elem_type = build_nonstandard_integer_type (BITS_PER_UNIT, 1);
1825 n_elem = size * 8 / BITS_PER_UNIT;
1826 array_type = build_array_type_nelts (elem_type, n_elem);
1827 var = create_tmp_var (array_type, NULL);
1828 DECL_ALIGN (var) = TREE_INT_CST_LOW (gimple_call_arg (stmt, 1));
1829 {
1830 struct ptr_info_def *pi = SSA_NAME_PTR_INFO (lhs);
1831 if (pi != NULL && !pi->pt.anything)
1832 {
1833 bool singleton_p;
1834 unsigned uid;
1835 singleton_p = pt_solution_singleton_p (&pi->pt, &uid);
1836 gcc_assert (singleton_p);
1837 SET_DECL_PT_UID (var, uid);
1838 }
1839 }
1840
1841 /* Fold alloca to the address of the array. */
1842 return fold_convert (TREE_TYPE (lhs), build_fold_addr_expr (var));
1843 }
1844
1845 /* Fold the stmt at *GSI with CCP specific information that propagating
1846 and regular folding does not catch. */
1847
1848 static bool
1849 ccp_fold_stmt (gimple_stmt_iterator *gsi)
1850 {
1851 gimple stmt = gsi_stmt (*gsi);
1852
1853 switch (gimple_code (stmt))
1854 {
1855 case GIMPLE_COND:
1856 {
1857 prop_value_t val;
1858 /* Statement evaluation will handle type mismatches in constants
1859 more gracefully than the final propagation. This allows us to
1860 fold more conditionals here. */
1861 val = evaluate_stmt (stmt);
1862 if (val.lattice_val != CONSTANT
1863 || !double_int_zero_p (val.mask))
1864 return false;
1865
1866 if (dump_file)
1867 {
1868 fprintf (dump_file, "Folding predicate ");
1869 print_gimple_expr (dump_file, stmt, 0, 0);
1870 fprintf (dump_file, " to ");
1871 print_generic_expr (dump_file, val.value, 0);
1872 fprintf (dump_file, "\n");
1873 }
1874
1875 if (integer_zerop (val.value))
1876 gimple_cond_make_false (stmt);
1877 else
1878 gimple_cond_make_true (stmt);
1879
1880 return true;
1881 }
1882
1883 case GIMPLE_CALL:
1884 {
1885 tree lhs = gimple_call_lhs (stmt);
1886 int flags = gimple_call_flags (stmt);
1887 tree val;
1888 tree argt;
1889 bool changed = false;
1890 unsigned i;
1891
1892 /* If the call was folded into a constant make sure it goes
1893 away even if we cannot propagate into all uses because of
1894 type issues. */
1895 if (lhs
1896 && TREE_CODE (lhs) == SSA_NAME
1897 && (val = get_constant_value (lhs))
1898 /* Don't optimize away calls that have side-effects. */
1899 && (flags & (ECF_CONST|ECF_PURE)) != 0
1900 && (flags & ECF_LOOPING_CONST_OR_PURE) == 0)
1901 {
1902 tree new_rhs = unshare_expr (val);
1903 bool res;
1904 if (!useless_type_conversion_p (TREE_TYPE (lhs),
1905 TREE_TYPE (new_rhs)))
1906 new_rhs = fold_convert (TREE_TYPE (lhs), new_rhs);
1907 res = update_call_from_tree (gsi, new_rhs);
1908 gcc_assert (res);
1909 return true;
1910 }
1911
1912 /* Internal calls provide no argument types, so the extra laxity
1913 for normal calls does not apply. */
1914 if (gimple_call_internal_p (stmt))
1915 return false;
1916
1917 /* The heuristic of fold_builtin_alloca_with_align differs before and
1918 after inlining, so we don't require the arg to be changed into a
1919 constant for folding, but just to be constant. */
1920 if (gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
1921 {
1922 tree new_rhs = fold_builtin_alloca_with_align (stmt);
1923 if (new_rhs)
1924 {
1925 bool res = update_call_from_tree (gsi, new_rhs);
1926 tree var = TREE_OPERAND (TREE_OPERAND (new_rhs, 0),0);
1927 gcc_assert (res);
1928 insert_clobbers_for_var (*gsi, var);
1929 return true;
1930 }
1931 }
1932
1933 /* Propagate into the call arguments. Compared to replace_uses_in
1934 this can use the argument slot types for type verification
1935 instead of the current argument type. We also can safely
1936 drop qualifiers here as we are dealing with constants anyway. */
1937 argt = TYPE_ARG_TYPES (gimple_call_fntype (stmt));
1938 for (i = 0; i < gimple_call_num_args (stmt) && argt;
1939 ++i, argt = TREE_CHAIN (argt))
1940 {
1941 tree arg = gimple_call_arg (stmt, i);
1942 if (TREE_CODE (arg) == SSA_NAME
1943 && (val = get_constant_value (arg))
1944 && useless_type_conversion_p
1945 (TYPE_MAIN_VARIANT (TREE_VALUE (argt)),
1946 TYPE_MAIN_VARIANT (TREE_TYPE (val))))
1947 {
1948 gimple_call_set_arg (stmt, i, unshare_expr (val));
1949 changed = true;
1950 }
1951 }
1952
1953 return changed;
1954 }
1955
1956 case GIMPLE_ASSIGN:
1957 {
1958 tree lhs = gimple_assign_lhs (stmt);
1959 tree val;
1960
1961 /* If we have a load that turned out to be constant replace it
1962 as we cannot propagate into all uses in all cases. */
1963 if (gimple_assign_single_p (stmt)
1964 && TREE_CODE (lhs) == SSA_NAME
1965 && (val = get_constant_value (lhs)))
1966 {
1967 tree rhs = unshare_expr (val);
1968 if (!useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (rhs)))
1969 rhs = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (lhs), rhs);
1970 gimple_assign_set_rhs_from_tree (gsi, rhs);
1971 return true;
1972 }
1973
1974 return false;
1975 }
1976
1977 default:
1978 return false;
1979 }
1980 }
1981
1982 /* Visit the assignment statement STMT. Set the value of its LHS to the
1983 value computed by the RHS and store LHS in *OUTPUT_P. If STMT
1984 creates virtual definitions, set the value of each new name to that
1985 of the RHS (if we can derive a constant out of the RHS).
1986 Value-returning call statements also perform an assignment, and
1987 are handled here. */
1988
1989 static enum ssa_prop_result
1990 visit_assignment (gimple stmt, tree *output_p)
1991 {
1992 prop_value_t val;
1993 enum ssa_prop_result retval;
1994
1995 tree lhs = gimple_get_lhs (stmt);
1996
1997 gcc_assert (gimple_code (stmt) != GIMPLE_CALL
1998 || gimple_call_lhs (stmt) != NULL_TREE);
1999
2000 if (gimple_assign_single_p (stmt)
2001 && gimple_assign_rhs_code (stmt) == SSA_NAME)
2002 /* For a simple copy operation, we copy the lattice values. */
2003 val = *get_value (gimple_assign_rhs1 (stmt));
2004 else
2005 /* Evaluate the statement, which could be
2006 either a GIMPLE_ASSIGN or a GIMPLE_CALL. */
2007 val = evaluate_stmt (stmt);
2008
2009 retval = SSA_PROP_NOT_INTERESTING;
2010
2011 /* Set the lattice value of the statement's output. */
2012 if (TREE_CODE (lhs) == SSA_NAME)
2013 {
2014 /* If STMT is an assignment to an SSA_NAME, we only have one
2015 value to set. */
2016 if (set_lattice_value (lhs, val))
2017 {
2018 *output_p = lhs;
2019 if (val.lattice_val == VARYING)
2020 retval = SSA_PROP_VARYING;
2021 else
2022 retval = SSA_PROP_INTERESTING;
2023 }
2024 }
2025
2026 return retval;
2027 }
2028
2029
2030 /* Visit the conditional statement STMT. Return SSA_PROP_INTERESTING
2031 if it can determine which edge will be taken. Otherwise, return
2032 SSA_PROP_VARYING. */
2033
2034 static enum ssa_prop_result
2035 visit_cond_stmt (gimple stmt, edge *taken_edge_p)
2036 {
2037 prop_value_t val;
2038 basic_block block;
2039
2040 block = gimple_bb (stmt);
2041 val = evaluate_stmt (stmt);
2042 if (val.lattice_val != CONSTANT
2043 || !double_int_zero_p (val.mask))
2044 return SSA_PROP_VARYING;
2045
2046 /* Find which edge out of the conditional block will be taken and add it
2047 to the worklist. If no single edge can be determined statically,
2048 return SSA_PROP_VARYING to feed all the outgoing edges to the
2049 propagation engine. */
2050 *taken_edge_p = find_taken_edge (block, val.value);
2051 if (*taken_edge_p)
2052 return SSA_PROP_INTERESTING;
2053 else
2054 return SSA_PROP_VARYING;
2055 }
2056
2057
2058 /* Evaluate statement STMT. If the statement produces an output value and
2059 its evaluation changes the lattice value of its output, return
2060 SSA_PROP_INTERESTING and set *OUTPUT_P to the SSA_NAME holding the
2061 output value.
2062
2063 If STMT is a conditional branch and we can determine its truth
2064 value, set *TAKEN_EDGE_P accordingly. If STMT produces a varying
2065 value, return SSA_PROP_VARYING. */
2066
2067 static enum ssa_prop_result
2068 ccp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p)
2069 {
2070 tree def;
2071 ssa_op_iter iter;
2072
2073 if (dump_file && (dump_flags & TDF_DETAILS))
2074 {
2075 fprintf (dump_file, "\nVisiting statement:\n");
2076 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
2077 }
2078
2079 switch (gimple_code (stmt))
2080 {
2081 case GIMPLE_ASSIGN:
2082 /* If the statement is an assignment that produces a single
2083 output value, evaluate its RHS to see if the lattice value of
2084 its output has changed. */
2085 return visit_assignment (stmt, output_p);
2086
2087 case GIMPLE_CALL:
2088 /* A value-returning call also performs an assignment. */
2089 if (gimple_call_lhs (stmt) != NULL_TREE)
2090 return visit_assignment (stmt, output_p);
2091 break;
2092
2093 case GIMPLE_COND:
2094 case GIMPLE_SWITCH:
2095 /* If STMT is a conditional branch, see if we can determine
2096 which branch will be taken. */
2097 /* FIXME. It appears that we should be able to optimize
2098 computed GOTOs here as well. */
2099 return visit_cond_stmt (stmt, taken_edge_p);
2100
2101 default:
2102 break;
2103 }
2104
2105 /* Any other kind of statement is not interesting for constant
2106 propagation and, therefore, not worth simulating. */
2107 if (dump_file && (dump_flags & TDF_DETAILS))
2108 fprintf (dump_file, "No interesting values produced. Marked VARYING.\n");
2109
2110 /* Definitions made by statements other than assignments to
2111 SSA_NAMEs represent unknown modifications to their outputs.
2112 Mark them VARYING. */
2113 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
2114 {
2115 prop_value_t v = { VARYING, NULL_TREE, { -1, (HOST_WIDE_INT) -1 } };
2116 set_lattice_value (def, v);
2117 }
2118
2119 return SSA_PROP_VARYING;
2120 }
2121
2122
2123 /* Main entry point for SSA Conditional Constant Propagation. */
2124
2125 static unsigned int
2126 do_ssa_ccp (void)
2127 {
2128 unsigned int todo = 0;
2129 calculate_dominance_info (CDI_DOMINATORS);
2130 ccp_initialize ();
2131 ssa_propagate (ccp_visit_stmt, ccp_visit_phi_node);
2132 if (ccp_finalize ())
2133 todo = (TODO_cleanup_cfg | TODO_update_ssa | TODO_remove_unused_locals);
2134 free_dominance_info (CDI_DOMINATORS);
2135 return todo;
2136 }
2137
2138
2139 static bool
2140 gate_ccp (void)
2141 {
2142 return flag_tree_ccp != 0;
2143 }
2144
2145
2146 struct gimple_opt_pass pass_ccp =
2147 {
2148 {
2149 GIMPLE_PASS,
2150 "ccp", /* name */
2151 gate_ccp, /* gate */
2152 do_ssa_ccp, /* execute */
2153 NULL, /* sub */
2154 NULL, /* next */
2155 0, /* static_pass_number */
2156 TV_TREE_CCP, /* tv_id */
2157 PROP_cfg | PROP_ssa, /* properties_required */
2158 0, /* properties_provided */
2159 0, /* properties_destroyed */
2160 0, /* todo_flags_start */
2161 TODO_verify_ssa
2162 | TODO_verify_stmts | TODO_ggc_collect/* todo_flags_finish */
2163 }
2164 };
2165
2166
2167
2168 /* Try to optimize out __builtin_stack_restore. Optimize it out
2169 if there is another __builtin_stack_restore in the same basic
2170 block and no calls or ASM_EXPRs are in between, or if this block's
2171 only outgoing edge is to EXIT_BLOCK and there are no calls or
2172 ASM_EXPRs after this __builtin_stack_restore. */
2173
2174 static tree
2175 optimize_stack_restore (gimple_stmt_iterator i)
2176 {
2177 tree callee;
2178 gimple stmt;
2179
2180 basic_block bb = gsi_bb (i);
2181 gimple call = gsi_stmt (i);
2182
2183 if (gimple_code (call) != GIMPLE_CALL
2184 || gimple_call_num_args (call) != 1
2185 || TREE_CODE (gimple_call_arg (call, 0)) != SSA_NAME
2186 || !POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (call, 0))))
2187 return NULL_TREE;
2188
2189 for (gsi_next (&i); !gsi_end_p (i); gsi_next (&i))
2190 {
2191 stmt = gsi_stmt (i);
2192 if (gimple_code (stmt) == GIMPLE_ASM)
2193 return NULL_TREE;
2194 if (gimple_code (stmt) != GIMPLE_CALL)
2195 continue;
2196
2197 callee = gimple_call_fndecl (stmt);
2198 if (!callee
2199 || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL
2200 /* All regular builtins are ok, just obviously not alloca. */
2201 || DECL_FUNCTION_CODE (callee) == BUILT_IN_ALLOCA
2202 || DECL_FUNCTION_CODE (callee) == BUILT_IN_ALLOCA_WITH_ALIGN)
2203 return NULL_TREE;
2204
2205 if (DECL_FUNCTION_CODE (callee) == BUILT_IN_STACK_RESTORE)
2206 goto second_stack_restore;
2207 }
2208
2209 if (!gsi_end_p (i))
2210 return NULL_TREE;
2211
2212 /* Allow one successor of the exit block, or zero successors. */
2213 switch (EDGE_COUNT (bb->succs))
2214 {
2215 case 0:
2216 break;
2217 case 1:
2218 if (single_succ_edge (bb)->dest != EXIT_BLOCK_PTR)
2219 return NULL_TREE;
2220 break;
2221 default:
2222 return NULL_TREE;
2223 }
2224 second_stack_restore:
2225
2226 /* If there's exactly one use, then zap the call to __builtin_stack_save.
2227 If there are multiple uses, then the last one should remove the call.
2228 In any case, whether the call to __builtin_stack_save can be removed
2229 or not is irrelevant to removing the call to __builtin_stack_restore. */
2230 if (has_single_use (gimple_call_arg (call, 0)))
2231 {
2232 gimple stack_save = SSA_NAME_DEF_STMT (gimple_call_arg (call, 0));
2233 if (is_gimple_call (stack_save))
2234 {
2235 callee = gimple_call_fndecl (stack_save);
2236 if (callee
2237 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2238 && DECL_FUNCTION_CODE (callee) == BUILT_IN_STACK_SAVE)
2239 {
2240 gimple_stmt_iterator stack_save_gsi;
2241 tree rhs;
2242
2243 stack_save_gsi = gsi_for_stmt (stack_save);
2244 rhs = build_int_cst (TREE_TYPE (gimple_call_arg (call, 0)), 0);
2245 update_call_from_tree (&stack_save_gsi, rhs);
2246 }
2247 }
2248 }
2249
2250 /* No effect, so the statement will be deleted. */
2251 return integer_zero_node;
2252 }
2253
2254 /* If va_list type is a simple pointer and nothing special is needed,
2255 optimize __builtin_va_start (&ap, 0) into ap = __builtin_next_arg (0),
2256 __builtin_va_end (&ap) out as NOP and __builtin_va_copy into a simple
2257 pointer assignment. */
2258
2259 static tree
2260 optimize_stdarg_builtin (gimple call)
2261 {
2262 tree callee, lhs, rhs, cfun_va_list;
2263 bool va_list_simple_ptr;
2264 location_t loc = gimple_location (call);
2265
2266 if (gimple_code (call) != GIMPLE_CALL)
2267 return NULL_TREE;
2268
2269 callee = gimple_call_fndecl (call);
2270
2271 cfun_va_list = targetm.fn_abi_va_list (callee);
2272 va_list_simple_ptr = POINTER_TYPE_P (cfun_va_list)
2273 && (TREE_TYPE (cfun_va_list) == void_type_node
2274 || TREE_TYPE (cfun_va_list) == char_type_node);
2275
2276 switch (DECL_FUNCTION_CODE (callee))
2277 {
2278 case BUILT_IN_VA_START:
2279 if (!va_list_simple_ptr
2280 || targetm.expand_builtin_va_start != NULL
2281 || !builtin_decl_explicit_p (BUILT_IN_NEXT_ARG))
2282 return NULL_TREE;
2283
2284 if (gimple_call_num_args (call) != 2)
2285 return NULL_TREE;
2286
2287 lhs = gimple_call_arg (call, 0);
2288 if (!POINTER_TYPE_P (TREE_TYPE (lhs))
2289 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
2290 != TYPE_MAIN_VARIANT (cfun_va_list))
2291 return NULL_TREE;
2292
2293 lhs = build_fold_indirect_ref_loc (loc, lhs);
2294 rhs = build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_NEXT_ARG),
2295 1, integer_zero_node);
2296 rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
2297 return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
2298
2299 case BUILT_IN_VA_COPY:
2300 if (!va_list_simple_ptr)
2301 return NULL_TREE;
2302
2303 if (gimple_call_num_args (call) != 2)
2304 return NULL_TREE;
2305
2306 lhs = gimple_call_arg (call, 0);
2307 if (!POINTER_TYPE_P (TREE_TYPE (lhs))
2308 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
2309 != TYPE_MAIN_VARIANT (cfun_va_list))
2310 return NULL_TREE;
2311
2312 lhs = build_fold_indirect_ref_loc (loc, lhs);
2313 rhs = gimple_call_arg (call, 1);
2314 if (TYPE_MAIN_VARIANT (TREE_TYPE (rhs))
2315 != TYPE_MAIN_VARIANT (cfun_va_list))
2316 return NULL_TREE;
2317
2318 rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
2319 return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
2320
2321 case BUILT_IN_VA_END:
2322 /* No effect, so the statement will be deleted. */
2323 return integer_zero_node;
2324
2325 default:
2326 gcc_unreachable ();
2327 }
2328 }
2329
2330 /* Attemp to make the block of __builtin_unreachable I unreachable by changing
2331 the incoming jumps. Return true if at least one jump was changed. */
2332
2333 static bool
2334 optimize_unreachable (gimple_stmt_iterator i)
2335 {
2336 basic_block bb = gsi_bb (i);
2337 gimple_stmt_iterator gsi;
2338 gimple stmt;
2339 edge_iterator ei;
2340 edge e;
2341 bool ret;
2342
2343 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2344 {
2345 stmt = gsi_stmt (gsi);
2346
2347 if (is_gimple_debug (stmt))
2348 continue;
2349
2350 if (gimple_code (stmt) == GIMPLE_LABEL)
2351 {
2352 /* Verify we do not need to preserve the label. */
2353 if (FORCED_LABEL (gimple_label_label (stmt)))
2354 return false;
2355
2356 continue;
2357 }
2358
2359 /* Only handle the case that __builtin_unreachable is the first statement
2360 in the block. We rely on DCE to remove stmts without side-effects
2361 before __builtin_unreachable. */
2362 if (gsi_stmt (gsi) != gsi_stmt (i))
2363 return false;
2364 }
2365
2366 ret = false;
2367 FOR_EACH_EDGE (e, ei, bb->preds)
2368 {
2369 gsi = gsi_last_bb (e->src);
2370 if (gsi_end_p (gsi))
2371 continue;
2372
2373 stmt = gsi_stmt (gsi);
2374 if (gimple_code (stmt) == GIMPLE_COND)
2375 {
2376 if (e->flags & EDGE_TRUE_VALUE)
2377 gimple_cond_make_false (stmt);
2378 else if (e->flags & EDGE_FALSE_VALUE)
2379 gimple_cond_make_true (stmt);
2380 else
2381 gcc_unreachable ();
2382 }
2383 else
2384 {
2385 /* Todo: handle other cases, f.i. switch statement. */
2386 continue;
2387 }
2388
2389 ret = true;
2390 }
2391
2392 return ret;
2393 }
2394
2395 /* A simple pass that attempts to fold all builtin functions. This pass
2396 is run after we've propagated as many constants as we can. */
2397
2398 static unsigned int
2399 execute_fold_all_builtins (void)
2400 {
2401 bool cfg_changed = false;
2402 basic_block bb;
2403 unsigned int todoflags = 0;
2404
2405 FOR_EACH_BB (bb)
2406 {
2407 gimple_stmt_iterator i;
2408 for (i = gsi_start_bb (bb); !gsi_end_p (i); )
2409 {
2410 gimple stmt, old_stmt;
2411 tree callee, result;
2412 enum built_in_function fcode;
2413
2414 stmt = gsi_stmt (i);
2415
2416 if (gimple_code (stmt) != GIMPLE_CALL)
2417 {
2418 gsi_next (&i);
2419 continue;
2420 }
2421 callee = gimple_call_fndecl (stmt);
2422 if (!callee || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL)
2423 {
2424 gsi_next (&i);
2425 continue;
2426 }
2427 fcode = DECL_FUNCTION_CODE (callee);
2428
2429 result = gimple_fold_builtin (stmt);
2430
2431 if (result)
2432 gimple_remove_stmt_histograms (cfun, stmt);
2433
2434 if (!result)
2435 switch (DECL_FUNCTION_CODE (callee))
2436 {
2437 case BUILT_IN_CONSTANT_P:
2438 /* Resolve __builtin_constant_p. If it hasn't been
2439 folded to integer_one_node by now, it's fairly
2440 certain that the value simply isn't constant. */
2441 result = integer_zero_node;
2442 break;
2443
2444 case BUILT_IN_ASSUME_ALIGNED:
2445 /* Remove __builtin_assume_aligned. */
2446 result = gimple_call_arg (stmt, 0);
2447 break;
2448
2449 case BUILT_IN_STACK_RESTORE:
2450 result = optimize_stack_restore (i);
2451 if (result)
2452 break;
2453 gsi_next (&i);
2454 continue;
2455
2456 case BUILT_IN_UNREACHABLE:
2457 if (optimize_unreachable (i))
2458 cfg_changed = true;
2459 break;
2460
2461 case BUILT_IN_VA_START:
2462 case BUILT_IN_VA_END:
2463 case BUILT_IN_VA_COPY:
2464 /* These shouldn't be folded before pass_stdarg. */
2465 result = optimize_stdarg_builtin (stmt);
2466 if (result)
2467 break;
2468 /* FALLTHRU */
2469
2470 default:
2471 gsi_next (&i);
2472 continue;
2473 }
2474
2475 if (result == NULL_TREE)
2476 break;
2477
2478 if (dump_file && (dump_flags & TDF_DETAILS))
2479 {
2480 fprintf (dump_file, "Simplified\n ");
2481 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
2482 }
2483
2484 old_stmt = stmt;
2485 if (!update_call_from_tree (&i, result))
2486 {
2487 gimplify_and_update_call_from_tree (&i, result);
2488 todoflags |= TODO_update_address_taken;
2489 }
2490
2491 stmt = gsi_stmt (i);
2492 update_stmt (stmt);
2493
2494 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt)
2495 && gimple_purge_dead_eh_edges (bb))
2496 cfg_changed = true;
2497
2498 if (dump_file && (dump_flags & TDF_DETAILS))
2499 {
2500 fprintf (dump_file, "to\n ");
2501 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
2502 fprintf (dump_file, "\n");
2503 }
2504
2505 /* Retry the same statement if it changed into another
2506 builtin, there might be new opportunities now. */
2507 if (gimple_code (stmt) != GIMPLE_CALL)
2508 {
2509 gsi_next (&i);
2510 continue;
2511 }
2512 callee = gimple_call_fndecl (stmt);
2513 if (!callee
2514 || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL
2515 || DECL_FUNCTION_CODE (callee) == fcode)
2516 gsi_next (&i);
2517 }
2518 }
2519
2520 /* Delete unreachable blocks. */
2521 if (cfg_changed)
2522 todoflags |= TODO_cleanup_cfg;
2523
2524 return todoflags;
2525 }
2526
2527
2528 struct gimple_opt_pass pass_fold_builtins =
2529 {
2530 {
2531 GIMPLE_PASS,
2532 "fab", /* name */
2533 NULL, /* gate */
2534 execute_fold_all_builtins, /* execute */
2535 NULL, /* sub */
2536 NULL, /* next */
2537 0, /* static_pass_number */
2538 TV_NONE, /* tv_id */
2539 PROP_cfg | PROP_ssa, /* properties_required */
2540 0, /* properties_provided */
2541 0, /* properties_destroyed */
2542 0, /* todo_flags_start */
2543 TODO_verify_ssa
2544 | TODO_update_ssa /* todo_flags_finish */
2545 }
2546 };