]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-uninit.c
gimple.h: Remove all includes.
[thirdparty/gcc.git] / gcc / tree-ssa-uninit.c
1 /* Predicate aware uninitialized variable warning.
2 Copyright (C) 2001-2013 Free Software Foundation, Inc.
3 Contributed by Xinliang David Li <davidxl@google.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "flags.h"
27 #include "tm_p.h"
28 #include "basic-block.h"
29 #include "function.h"
30 #include "gimple-pretty-print.h"
31 #include "bitmap.h"
32 #include "pointer-set.h"
33 #include "tree-ssa-alias.h"
34 #include "internal-fn.h"
35 #include "gimple-expr.h"
36 #include "is-a.h"
37 #include "gimple.h"
38 #include "gimple-iterator.h"
39 #include "gimple-ssa.h"
40 #include "tree-phinodes.h"
41 #include "ssa-iterators.h"
42 #include "tree-ssa.h"
43 #include "tree-inline.h"
44 #include "hashtab.h"
45 #include "tree-pass.h"
46 #include "diagnostic-core.h"
47
48 /* This implements the pass that does predicate aware warning on uses of
49 possibly uninitialized variables. The pass first collects the set of
50 possibly uninitialized SSA names. For each such name, it walks through
51 all its immediate uses. For each immediate use, it rebuilds the condition
52 expression (the predicate) that guards the use. The predicate is then
53 examined to see if the variable is always defined under that same condition.
54 This is done either by pruning the unrealizable paths that lead to the
55 default definitions or by checking if the predicate set that guards the
56 defining paths is a superset of the use predicate. */
57
58
59 /* Pointer set of potentially undefined ssa names, i.e.,
60 ssa names that are defined by phi with operands that
61 are not defined or potentially undefined. */
62 static struct pointer_set_t *possibly_undefined_names = 0;
63
64 /* Bit mask handling macros. */
65 #define MASK_SET_BIT(mask, pos) mask |= (1 << pos)
66 #define MASK_TEST_BIT(mask, pos) (mask & (1 << pos))
67 #define MASK_EMPTY(mask) (mask == 0)
68
69 /* Returns the first bit position (starting from LSB)
70 in mask that is non zero. Returns -1 if the mask is empty. */
71 static int
72 get_mask_first_set_bit (unsigned mask)
73 {
74 int pos = 0;
75 if (mask == 0)
76 return -1;
77
78 while ((mask & (1 << pos)) == 0)
79 pos++;
80
81 return pos;
82 }
83 #define MASK_FIRST_SET_BIT(mask) get_mask_first_set_bit (mask)
84
85 /* Return true if T, an SSA_NAME, has an undefined value. */
86 static bool
87 has_undefined_value_p (tree t)
88 {
89 return (ssa_undefined_value_p (t)
90 || (possibly_undefined_names
91 && pointer_set_contains (possibly_undefined_names, t)));
92 }
93
94
95
96 /* Like has_undefined_value_p, but don't return true if TREE_NO_WARNING
97 is set on SSA_NAME_VAR. */
98
99 static inline bool
100 uninit_undefined_value_p (tree t) {
101 if (!has_undefined_value_p (t))
102 return false;
103 if (SSA_NAME_VAR (t) && TREE_NO_WARNING (SSA_NAME_VAR (t)))
104 return false;
105 return true;
106 }
107
108 /* Emit warnings for uninitialized variables. This is done in two passes.
109
110 The first pass notices real uses of SSA names with undefined values.
111 Such uses are unconditionally uninitialized, and we can be certain that
112 such a use is a mistake. This pass is run before most optimizations,
113 so that we catch as many as we can.
114
115 The second pass follows PHI nodes to find uses that are potentially
116 uninitialized. In this case we can't necessarily prove that the use
117 is really uninitialized. This pass is run after most optimizations,
118 so that we thread as many jumps and possible, and delete as much dead
119 code as possible, in order to reduce false positives. We also look
120 again for plain uninitialized variables, since optimization may have
121 changed conditionally uninitialized to unconditionally uninitialized. */
122
123 /* Emit a warning for EXPR based on variable VAR at the point in the
124 program T, an SSA_NAME, is used being uninitialized. The exact
125 warning text is in MSGID and LOCUS may contain a location or be null.
126 WC is the warning code. */
127
128 static void
129 warn_uninit (enum opt_code wc, tree t,
130 tree expr, tree var, const char *gmsgid, void *data)
131 {
132 gimple context = (gimple) data;
133 location_t location, cfun_loc;
134 expanded_location xloc, floc;
135
136 if (!has_undefined_value_p (t))
137 return;
138
139 /* TREE_NO_WARNING either means we already warned, or the front end
140 wishes to suppress the warning. */
141 if ((context
142 && (gimple_no_warning_p (context)
143 || (gimple_assign_single_p (context)
144 && TREE_NO_WARNING (gimple_assign_rhs1 (context)))))
145 || TREE_NO_WARNING (expr))
146 return;
147
148 location = (context != NULL && gimple_has_location (context))
149 ? gimple_location (context)
150 : DECL_SOURCE_LOCATION (var);
151 location = linemap_resolve_location (line_table, location,
152 LRK_SPELLING_LOCATION,
153 NULL);
154 cfun_loc = DECL_SOURCE_LOCATION (cfun->decl);
155 xloc = expand_location (location);
156 floc = expand_location (cfun_loc);
157 if (warning_at (location, wc, gmsgid, expr))
158 {
159 TREE_NO_WARNING (expr) = 1;
160
161 if (location == DECL_SOURCE_LOCATION (var))
162 return;
163 if (xloc.file != floc.file
164 || linemap_location_before_p (line_table,
165 location, cfun_loc)
166 || linemap_location_before_p (line_table,
167 cfun->function_end_locus,
168 location))
169 inform (DECL_SOURCE_LOCATION (var), "%qD was declared here", var);
170 }
171 }
172
173 static unsigned int
174 warn_uninitialized_vars (bool warn_possibly_uninitialized)
175 {
176 gimple_stmt_iterator gsi;
177 basic_block bb;
178
179 FOR_EACH_BB (bb)
180 {
181 bool always_executed = dominated_by_p (CDI_POST_DOMINATORS,
182 single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)), bb);
183 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
184 {
185 gimple stmt = gsi_stmt (gsi);
186 use_operand_p use_p;
187 ssa_op_iter op_iter;
188 tree use;
189
190 if (is_gimple_debug (stmt))
191 continue;
192
193 /* We only do data flow with SSA_NAMEs, so that's all we
194 can warn about. */
195 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, op_iter, SSA_OP_USE)
196 {
197 use = USE_FROM_PTR (use_p);
198 if (always_executed)
199 warn_uninit (OPT_Wuninitialized, use,
200 SSA_NAME_VAR (use), SSA_NAME_VAR (use),
201 "%qD is used uninitialized in this function",
202 stmt);
203 else if (warn_possibly_uninitialized)
204 warn_uninit (OPT_Wmaybe_uninitialized, use,
205 SSA_NAME_VAR (use), SSA_NAME_VAR (use),
206 "%qD may be used uninitialized in this function",
207 stmt);
208 }
209
210 /* For memory the only cheap thing we can do is see if we
211 have a use of the default def of the virtual operand.
212 ??? Note that at -O0 we do not have virtual operands.
213 ??? Not so cheap would be to use the alias oracle via
214 walk_aliased_vdefs, if we don't find any aliasing vdef
215 warn as is-used-uninitialized, if we don't find an aliasing
216 vdef that kills our use (stmt_kills_ref_p), warn as
217 may-be-used-uninitialized. But this walk is quadratic and
218 so must be limited which means we would miss warning
219 opportunities. */
220 use = gimple_vuse (stmt);
221 if (use
222 && gimple_assign_single_p (stmt)
223 && !gimple_vdef (stmt)
224 && SSA_NAME_IS_DEFAULT_DEF (use))
225 {
226 tree rhs = gimple_assign_rhs1 (stmt);
227 tree base = get_base_address (rhs);
228
229 /* Do not warn if it can be initialized outside this function. */
230 if (TREE_CODE (base) != VAR_DECL
231 || DECL_HARD_REGISTER (base)
232 || is_global_var (base))
233 continue;
234
235 if (always_executed)
236 warn_uninit (OPT_Wuninitialized, use,
237 gimple_assign_rhs1 (stmt), base,
238 "%qE is used uninitialized in this function",
239 stmt);
240 else if (warn_possibly_uninitialized)
241 warn_uninit (OPT_Wmaybe_uninitialized, use,
242 gimple_assign_rhs1 (stmt), base,
243 "%qE may be used uninitialized in this function",
244 stmt);
245 }
246 }
247 }
248
249 return 0;
250 }
251
252 /* Checks if the operand OPND of PHI is defined by
253 another phi with one operand defined by this PHI,
254 but the rest operands are all defined. If yes,
255 returns true to skip this this operand as being
256 redundant. Can be enhanced to be more general. */
257
258 static bool
259 can_skip_redundant_opnd (tree opnd, gimple phi)
260 {
261 gimple op_def;
262 tree phi_def;
263 int i, n;
264
265 phi_def = gimple_phi_result (phi);
266 op_def = SSA_NAME_DEF_STMT (opnd);
267 if (gimple_code (op_def) != GIMPLE_PHI)
268 return false;
269 n = gimple_phi_num_args (op_def);
270 for (i = 0; i < n; ++i)
271 {
272 tree op = gimple_phi_arg_def (op_def, i);
273 if (TREE_CODE (op) != SSA_NAME)
274 continue;
275 if (op != phi_def && uninit_undefined_value_p (op))
276 return false;
277 }
278
279 return true;
280 }
281
282 /* Returns a bit mask holding the positions of arguments in PHI
283 that have empty (or possibly empty) definitions. */
284
285 static unsigned
286 compute_uninit_opnds_pos (gimple phi)
287 {
288 size_t i, n;
289 unsigned uninit_opnds = 0;
290
291 n = gimple_phi_num_args (phi);
292 /* Bail out for phi with too many args. */
293 if (n > 32)
294 return 0;
295
296 for (i = 0; i < n; ++i)
297 {
298 tree op = gimple_phi_arg_def (phi, i);
299 if (TREE_CODE (op) == SSA_NAME
300 && uninit_undefined_value_p (op)
301 && !can_skip_redundant_opnd (op, phi))
302 {
303 if (cfun->has_nonlocal_label || cfun->calls_setjmp)
304 {
305 /* Ignore SSA_NAMEs that appear on abnormal edges
306 somewhere. */
307 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
308 continue;
309 }
310 MASK_SET_BIT (uninit_opnds, i);
311 }
312 }
313 return uninit_opnds;
314 }
315
316 /* Find the immediate postdominator PDOM of the specified
317 basic block BLOCK. */
318
319 static inline basic_block
320 find_pdom (basic_block block)
321 {
322 if (block == EXIT_BLOCK_PTR_FOR_FN (cfun))
323 return EXIT_BLOCK_PTR_FOR_FN (cfun);
324 else
325 {
326 basic_block bb
327 = get_immediate_dominator (CDI_POST_DOMINATORS, block);
328 if (! bb)
329 return EXIT_BLOCK_PTR_FOR_FN (cfun);
330 return bb;
331 }
332 }
333
334 /* Find the immediate DOM of the specified
335 basic block BLOCK. */
336
337 static inline basic_block
338 find_dom (basic_block block)
339 {
340 if (block == ENTRY_BLOCK_PTR_FOR_FN (cfun))
341 return ENTRY_BLOCK_PTR_FOR_FN (cfun);
342 else
343 {
344 basic_block bb = get_immediate_dominator (CDI_DOMINATORS, block);
345 if (! bb)
346 return ENTRY_BLOCK_PTR_FOR_FN (cfun);
347 return bb;
348 }
349 }
350
351 /* Returns true if BB1 is postdominating BB2 and BB1 is
352 not a loop exit bb. The loop exit bb check is simple and does
353 not cover all cases. */
354
355 static bool
356 is_non_loop_exit_postdominating (basic_block bb1, basic_block bb2)
357 {
358 if (!dominated_by_p (CDI_POST_DOMINATORS, bb2, bb1))
359 return false;
360
361 if (single_pred_p (bb1) && !single_succ_p (bb2))
362 return false;
363
364 return true;
365 }
366
367 /* Find the closest postdominator of a specified BB, which is control
368 equivalent to BB. */
369
370 static inline basic_block
371 find_control_equiv_block (basic_block bb)
372 {
373 basic_block pdom;
374
375 pdom = find_pdom (bb);
376
377 /* Skip the postdominating bb that is also loop exit. */
378 if (!is_non_loop_exit_postdominating (pdom, bb))
379 return NULL;
380
381 if (dominated_by_p (CDI_DOMINATORS, pdom, bb))
382 return pdom;
383
384 return NULL;
385 }
386
387 #define MAX_NUM_CHAINS 8
388 #define MAX_CHAIN_LEN 5
389 #define MAX_POSTDOM_CHECK 8
390
391 /* Computes the control dependence chains (paths of edges)
392 for DEP_BB up to the dominating basic block BB (the head node of a
393 chain should be dominated by it). CD_CHAINS is pointer to a
394 dynamic array holding the result chains. CUR_CD_CHAIN is the current
395 chain being computed. *NUM_CHAINS is total number of chains. The
396 function returns true if the information is successfully computed,
397 return false if there is no control dependence or not computed. */
398
399 static bool
400 compute_control_dep_chain (basic_block bb, basic_block dep_bb,
401 vec<edge> *cd_chains,
402 size_t *num_chains,
403 vec<edge> *cur_cd_chain)
404 {
405 edge_iterator ei;
406 edge e;
407 size_t i;
408 bool found_cd_chain = false;
409 size_t cur_chain_len = 0;
410
411 if (EDGE_COUNT (bb->succs) < 2)
412 return false;
413
414 /* Could use a set instead. */
415 cur_chain_len = cur_cd_chain->length ();
416 if (cur_chain_len > MAX_CHAIN_LEN)
417 return false;
418
419 for (i = 0; i < cur_chain_len; i++)
420 {
421 edge e = (*cur_cd_chain)[i];
422 /* cycle detected. */
423 if (e->src == bb)
424 return false;
425 }
426
427 FOR_EACH_EDGE (e, ei, bb->succs)
428 {
429 basic_block cd_bb;
430 int post_dom_check = 0;
431 if (e->flags & (EDGE_FAKE | EDGE_ABNORMAL))
432 continue;
433
434 cd_bb = e->dest;
435 cur_cd_chain->safe_push (e);
436 while (!is_non_loop_exit_postdominating (cd_bb, bb))
437 {
438 if (cd_bb == dep_bb)
439 {
440 /* Found a direct control dependence. */
441 if (*num_chains < MAX_NUM_CHAINS)
442 {
443 cd_chains[*num_chains] = cur_cd_chain->copy ();
444 (*num_chains)++;
445 }
446 found_cd_chain = true;
447 /* check path from next edge. */
448 break;
449 }
450
451 /* Now check if DEP_BB is indirectly control dependent on BB. */
452 if (compute_control_dep_chain (cd_bb, dep_bb, cd_chains,
453 num_chains, cur_cd_chain))
454 {
455 found_cd_chain = true;
456 break;
457 }
458
459 cd_bb = find_pdom (cd_bb);
460 post_dom_check++;
461 if (cd_bb == EXIT_BLOCK_PTR_FOR_FN (cfun) || post_dom_check >
462 MAX_POSTDOM_CHECK)
463 break;
464 }
465 cur_cd_chain->pop ();
466 gcc_assert (cur_cd_chain->length () == cur_chain_len);
467 }
468 gcc_assert (cur_cd_chain->length () == cur_chain_len);
469
470 return found_cd_chain;
471 }
472
473 typedef struct use_pred_info
474 {
475 gimple cond;
476 bool invert;
477 } *use_pred_info_t;
478
479
480
481 /* Converts the chains of control dependence edges into a set of
482 predicates. A control dependence chain is represented by a vector
483 edges. DEP_CHAINS points to an array of dependence chains.
484 NUM_CHAINS is the size of the chain array. One edge in a dependence
485 chain is mapped to predicate expression represented by use_pred_info_t
486 type. One dependence chain is converted to a composite predicate that
487 is the result of AND operation of use_pred_info_t mapped to each edge.
488 A composite predicate is presented by a vector of use_pred_info_t. On
489 return, *PREDS points to the resulting array of composite predicates.
490 *NUM_PREDS is the number of composite predictes. */
491
492 static bool
493 convert_control_dep_chain_into_preds (vec<edge> *dep_chains,
494 size_t num_chains,
495 vec<use_pred_info_t> **preds,
496 size_t *num_preds)
497 {
498 bool has_valid_pred = false;
499 size_t i, j;
500 if (num_chains == 0 || num_chains >= MAX_NUM_CHAINS)
501 return false;
502
503 /* Now convert the control dep chain into a set
504 of predicates. */
505 typedef vec<use_pred_info_t> vec_use_pred_info_t_heap;
506 *preds = XCNEWVEC (vec_use_pred_info_t_heap, num_chains);
507 *num_preds = num_chains;
508
509 for (i = 0; i < num_chains; i++)
510 {
511 vec<edge> one_cd_chain = dep_chains[i];
512
513 has_valid_pred = false;
514 for (j = 0; j < one_cd_chain.length (); j++)
515 {
516 gimple cond_stmt;
517 gimple_stmt_iterator gsi;
518 basic_block guard_bb;
519 use_pred_info_t one_pred;
520 edge e;
521
522 e = one_cd_chain[j];
523 guard_bb = e->src;
524 gsi = gsi_last_bb (guard_bb);
525 if (gsi_end_p (gsi))
526 {
527 has_valid_pred = false;
528 break;
529 }
530 cond_stmt = gsi_stmt (gsi);
531 if (gimple_code (cond_stmt) == GIMPLE_CALL
532 && EDGE_COUNT (e->src->succs) >= 2)
533 {
534 /* Ignore EH edge. Can add assertion
535 on the other edge's flag. */
536 continue;
537 }
538 /* Skip if there is essentially one succesor. */
539 if (EDGE_COUNT (e->src->succs) == 2)
540 {
541 edge e1;
542 edge_iterator ei1;
543 bool skip = false;
544
545 FOR_EACH_EDGE (e1, ei1, e->src->succs)
546 {
547 if (EDGE_COUNT (e1->dest->succs) == 0)
548 {
549 skip = true;
550 break;
551 }
552 }
553 if (skip)
554 continue;
555 }
556 if (gimple_code (cond_stmt) != GIMPLE_COND)
557 {
558 has_valid_pred = false;
559 break;
560 }
561 one_pred = XNEW (struct use_pred_info);
562 one_pred->cond = cond_stmt;
563 one_pred->invert = !!(e->flags & EDGE_FALSE_VALUE);
564 (*preds)[i].safe_push (one_pred);
565 has_valid_pred = true;
566 }
567
568 if (!has_valid_pred)
569 break;
570 }
571 return has_valid_pred;
572 }
573
574 /* Computes all control dependence chains for USE_BB. The control
575 dependence chains are then converted to an array of composite
576 predicates pointed to by PREDS. PHI_BB is the basic block of
577 the phi whose result is used in USE_BB. */
578
579 static bool
580 find_predicates (vec<use_pred_info_t> **preds,
581 size_t *num_preds,
582 basic_block phi_bb,
583 basic_block use_bb)
584 {
585 size_t num_chains = 0, i;
586 vec<edge> *dep_chains = 0;
587 vec<edge> cur_chain = vNULL;
588 bool has_valid_pred = false;
589 basic_block cd_root = 0;
590
591 typedef vec<edge> vec_edge_heap;
592 dep_chains = XCNEWVEC (vec_edge_heap, MAX_NUM_CHAINS);
593
594 /* First find the closest bb that is control equivalent to PHI_BB
595 that also dominates USE_BB. */
596 cd_root = phi_bb;
597 while (dominated_by_p (CDI_DOMINATORS, use_bb, cd_root))
598 {
599 basic_block ctrl_eq_bb = find_control_equiv_block (cd_root);
600 if (ctrl_eq_bb && dominated_by_p (CDI_DOMINATORS, use_bb, ctrl_eq_bb))
601 cd_root = ctrl_eq_bb;
602 else
603 break;
604 }
605
606 compute_control_dep_chain (cd_root, use_bb,
607 dep_chains, &num_chains,
608 &cur_chain);
609
610 has_valid_pred
611 = convert_control_dep_chain_into_preds (dep_chains,
612 num_chains,
613 preds,
614 num_preds);
615 /* Free individual chain */
616 cur_chain.release ();
617 for (i = 0; i < num_chains; i++)
618 dep_chains[i].release ();
619 free (dep_chains);
620 return has_valid_pred;
621 }
622
623 /* Computes the set of incoming edges of PHI that have non empty
624 definitions of a phi chain. The collection will be done
625 recursively on operands that are defined by phis. CD_ROOT
626 is the control dependence root. *EDGES holds the result, and
627 VISITED_PHIS is a pointer set for detecting cycles. */
628
629 static void
630 collect_phi_def_edges (gimple phi, basic_block cd_root,
631 vec<edge> *edges,
632 struct pointer_set_t *visited_phis)
633 {
634 size_t i, n;
635 edge opnd_edge;
636 tree opnd;
637
638 if (pointer_set_insert (visited_phis, phi))
639 return;
640
641 n = gimple_phi_num_args (phi);
642 for (i = 0; i < n; i++)
643 {
644 opnd_edge = gimple_phi_arg_edge (phi, i);
645 opnd = gimple_phi_arg_def (phi, i);
646
647 if (TREE_CODE (opnd) != SSA_NAME)
648 {
649 if (dump_file && (dump_flags & TDF_DETAILS))
650 {
651 fprintf (dump_file, "\n[CHECK] Found def edge %d in ", (int)i);
652 print_gimple_stmt (dump_file, phi, 0, 0);
653 }
654 edges->safe_push (opnd_edge);
655 }
656 else
657 {
658 gimple def = SSA_NAME_DEF_STMT (opnd);
659
660 if (gimple_code (def) == GIMPLE_PHI
661 && dominated_by_p (CDI_DOMINATORS,
662 gimple_bb (def), cd_root))
663 collect_phi_def_edges (def, cd_root, edges,
664 visited_phis);
665 else if (!uninit_undefined_value_p (opnd))
666 {
667 if (dump_file && (dump_flags & TDF_DETAILS))
668 {
669 fprintf (dump_file, "\n[CHECK] Found def edge %d in ", (int)i);
670 print_gimple_stmt (dump_file, phi, 0, 0);
671 }
672 edges->safe_push (opnd_edge);
673 }
674 }
675 }
676 }
677
678 /* For each use edge of PHI, computes all control dependence chains.
679 The control dependence chains are then converted to an array of
680 composite predicates pointed to by PREDS. */
681
682 static bool
683 find_def_preds (vec<use_pred_info_t> **preds,
684 size_t *num_preds, gimple phi)
685 {
686 size_t num_chains = 0, i, n;
687 vec<edge> *dep_chains = 0;
688 vec<edge> cur_chain = vNULL;
689 vec<edge> def_edges = vNULL;
690 bool has_valid_pred = false;
691 basic_block phi_bb, cd_root = 0;
692 struct pointer_set_t *visited_phis;
693
694 typedef vec<edge> vec_edge_heap;
695 dep_chains = XCNEWVEC (vec_edge_heap, MAX_NUM_CHAINS);
696
697 phi_bb = gimple_bb (phi);
698 /* First find the closest dominating bb to be
699 the control dependence root */
700 cd_root = find_dom (phi_bb);
701 if (!cd_root)
702 return false;
703
704 visited_phis = pointer_set_create ();
705 collect_phi_def_edges (phi, cd_root, &def_edges, visited_phis);
706 pointer_set_destroy (visited_phis);
707
708 n = def_edges.length ();
709 if (n == 0)
710 return false;
711
712 for (i = 0; i < n; i++)
713 {
714 size_t prev_nc, j;
715 edge opnd_edge;
716
717 opnd_edge = def_edges[i];
718 prev_nc = num_chains;
719 compute_control_dep_chain (cd_root, opnd_edge->src,
720 dep_chains, &num_chains,
721 &cur_chain);
722 /* Free individual chain */
723 cur_chain.release ();
724
725 /* Now update the newly added chains with
726 the phi operand edge: */
727 if (EDGE_COUNT (opnd_edge->src->succs) > 1)
728 {
729 if (prev_nc == num_chains
730 && num_chains < MAX_NUM_CHAINS)
731 num_chains++;
732 for (j = prev_nc; j < num_chains; j++)
733 {
734 dep_chains[j].safe_push (opnd_edge);
735 }
736 }
737 }
738
739 has_valid_pred
740 = convert_control_dep_chain_into_preds (dep_chains,
741 num_chains,
742 preds,
743 num_preds);
744 for (i = 0; i < num_chains; i++)
745 dep_chains[i].release ();
746 free (dep_chains);
747 return has_valid_pred;
748 }
749
750 /* Dumps the predicates (PREDS) for USESTMT. */
751
752 static void
753 dump_predicates (gimple usestmt, size_t num_preds,
754 vec<use_pred_info_t> *preds,
755 const char* msg)
756 {
757 size_t i, j;
758 vec<use_pred_info_t> one_pred_chain;
759 fprintf (dump_file, msg);
760 print_gimple_stmt (dump_file, usestmt, 0, 0);
761 fprintf (dump_file, "is guarded by :\n");
762 /* do some dumping here: */
763 for (i = 0; i < num_preds; i++)
764 {
765 size_t np;
766
767 one_pred_chain = preds[i];
768 np = one_pred_chain.length ();
769
770 for (j = 0; j < np; j++)
771 {
772 use_pred_info_t one_pred
773 = one_pred_chain[j];
774 if (one_pred->invert)
775 fprintf (dump_file, " (.NOT.) ");
776 print_gimple_stmt (dump_file, one_pred->cond, 0, 0);
777 if (j < np - 1)
778 fprintf (dump_file, "(.AND.)\n");
779 }
780 if (i < num_preds - 1)
781 fprintf (dump_file, "(.OR.)\n");
782 }
783 }
784
785 /* Destroys the predicate set *PREDS. */
786
787 static void
788 destroy_predicate_vecs (size_t n,
789 vec<use_pred_info_t> * preds)
790 {
791 size_t i, j;
792 for (i = 0; i < n; i++)
793 {
794 for (j = 0; j < preds[i].length (); j++)
795 free (preds[i][j]);
796 preds[i].release ();
797 }
798 free (preds);
799 }
800
801
802 /* Computes the 'normalized' conditional code with operand
803 swapping and condition inversion. */
804
805 static enum tree_code
806 get_cmp_code (enum tree_code orig_cmp_code,
807 bool swap_cond, bool invert)
808 {
809 enum tree_code tc = orig_cmp_code;
810
811 if (swap_cond)
812 tc = swap_tree_comparison (orig_cmp_code);
813 if (invert)
814 tc = invert_tree_comparison (tc, false);
815
816 switch (tc)
817 {
818 case LT_EXPR:
819 case LE_EXPR:
820 case GT_EXPR:
821 case GE_EXPR:
822 case EQ_EXPR:
823 case NE_EXPR:
824 break;
825 default:
826 return ERROR_MARK;
827 }
828 return tc;
829 }
830
831 /* Returns true if VAL falls in the range defined by BOUNDARY and CMPC, i.e.
832 all values in the range satisfies (x CMPC BOUNDARY) == true. */
833
834 static bool
835 is_value_included_in (tree val, tree boundary, enum tree_code cmpc)
836 {
837 bool inverted = false;
838 bool is_unsigned;
839 bool result;
840
841 /* Only handle integer constant here. */
842 if (TREE_CODE (val) != INTEGER_CST
843 || TREE_CODE (boundary) != INTEGER_CST)
844 return true;
845
846 is_unsigned = TYPE_UNSIGNED (TREE_TYPE (val));
847
848 if (cmpc == GE_EXPR || cmpc == GT_EXPR
849 || cmpc == NE_EXPR)
850 {
851 cmpc = invert_tree_comparison (cmpc, false);
852 inverted = true;
853 }
854
855 if (is_unsigned)
856 {
857 if (cmpc == EQ_EXPR)
858 result = tree_int_cst_equal (val, boundary);
859 else if (cmpc == LT_EXPR)
860 result = INT_CST_LT_UNSIGNED (val, boundary);
861 else
862 {
863 gcc_assert (cmpc == LE_EXPR);
864 result = (tree_int_cst_equal (val, boundary)
865 || INT_CST_LT_UNSIGNED (val, boundary));
866 }
867 }
868 else
869 {
870 if (cmpc == EQ_EXPR)
871 result = tree_int_cst_equal (val, boundary);
872 else if (cmpc == LT_EXPR)
873 result = INT_CST_LT (val, boundary);
874 else
875 {
876 gcc_assert (cmpc == LE_EXPR);
877 result = (tree_int_cst_equal (val, boundary)
878 || INT_CST_LT (val, boundary));
879 }
880 }
881
882 if (inverted)
883 result ^= 1;
884
885 return result;
886 }
887
888 /* Returns true if PRED is common among all the predicate
889 chains (PREDS) (and therefore can be factored out).
890 NUM_PRED_CHAIN is the size of array PREDS. */
891
892 static bool
893 find_matching_predicate_in_rest_chains (use_pred_info_t pred,
894 vec<use_pred_info_t> *preds,
895 size_t num_pred_chains)
896 {
897 size_t i, j, n;
898
899 /* trival case */
900 if (num_pred_chains == 1)
901 return true;
902
903 for (i = 1; i < num_pred_chains; i++)
904 {
905 bool found = false;
906 vec<use_pred_info_t> one_chain = preds[i];
907 n = one_chain.length ();
908 for (j = 0; j < n; j++)
909 {
910 use_pred_info_t pred2
911 = one_chain[j];
912 /* can relax the condition comparison to not
913 use address comparison. However, the most common
914 case is that multiple control dependent paths share
915 a common path prefix, so address comparison should
916 be ok. */
917
918 if (pred2->cond == pred->cond
919 && pred2->invert == pred->invert)
920 {
921 found = true;
922 break;
923 }
924 }
925 if (!found)
926 return false;
927 }
928 return true;
929 }
930
931 /* Forward declaration. */
932 static bool
933 is_use_properly_guarded (gimple use_stmt,
934 basic_block use_bb,
935 gimple phi,
936 unsigned uninit_opnds,
937 struct pointer_set_t *visited_phis);
938
939 /* Returns true if all uninitialized opnds are pruned. Returns false
940 otherwise. PHI is the phi node with uninitialized operands,
941 UNINIT_OPNDS is the bitmap of the uninitialize operand positions,
942 FLAG_DEF is the statement defining the flag guarding the use of the
943 PHI output, BOUNDARY_CST is the const value used in the predicate
944 associated with the flag, CMP_CODE is the comparison code used in
945 the predicate, VISITED_PHIS is the pointer set of phis visited, and
946 VISITED_FLAG_PHIS is the pointer to the pointer set of flag definitions
947 that are also phis.
948
949 Example scenario:
950
951 BB1:
952 flag_1 = phi <0, 1> // (1)
953 var_1 = phi <undef, some_val>
954
955
956 BB2:
957 flag_2 = phi <0, flag_1, flag_1> // (2)
958 var_2 = phi <undef, var_1, var_1>
959 if (flag_2 == 1)
960 goto BB3;
961
962 BB3:
963 use of var_2 // (3)
964
965 Because some flag arg in (1) is not constant, if we do not look into the
966 flag phis recursively, it is conservatively treated as unknown and var_1
967 is thought to be flowed into use at (3). Since var_1 is potentially uninitialized
968 a false warning will be emitted. Checking recursively into (1), the compiler can
969 find out that only some_val (which is defined) can flow into (3) which is OK.
970
971 */
972
973 static bool
974 prune_uninit_phi_opnds_in_unrealizable_paths (
975 gimple phi, unsigned uninit_opnds,
976 gimple flag_def, tree boundary_cst,
977 enum tree_code cmp_code,
978 struct pointer_set_t *visited_phis,
979 bitmap *visited_flag_phis)
980 {
981 unsigned i;
982
983 for (i = 0; i < MIN (32, gimple_phi_num_args (flag_def)); i++)
984 {
985 tree flag_arg;
986
987 if (!MASK_TEST_BIT (uninit_opnds, i))
988 continue;
989
990 flag_arg = gimple_phi_arg_def (flag_def, i);
991 if (!is_gimple_constant (flag_arg))
992 {
993 gimple flag_arg_def, phi_arg_def;
994 tree phi_arg;
995 unsigned uninit_opnds_arg_phi;
996
997 if (TREE_CODE (flag_arg) != SSA_NAME)
998 return false;
999 flag_arg_def = SSA_NAME_DEF_STMT (flag_arg);
1000 if (gimple_code (flag_arg_def) != GIMPLE_PHI)
1001 return false;
1002
1003 phi_arg = gimple_phi_arg_def (phi, i);
1004 if (TREE_CODE (phi_arg) != SSA_NAME)
1005 return false;
1006
1007 phi_arg_def = SSA_NAME_DEF_STMT (phi_arg);
1008 if (gimple_code (phi_arg_def) != GIMPLE_PHI)
1009 return false;
1010
1011 if (gimple_bb (phi_arg_def) != gimple_bb (flag_arg_def))
1012 return false;
1013
1014 if (!*visited_flag_phis)
1015 *visited_flag_phis = BITMAP_ALLOC (NULL);
1016
1017 if (bitmap_bit_p (*visited_flag_phis,
1018 SSA_NAME_VERSION (gimple_phi_result (flag_arg_def))))
1019 return false;
1020
1021 bitmap_set_bit (*visited_flag_phis,
1022 SSA_NAME_VERSION (gimple_phi_result (flag_arg_def)));
1023
1024 /* Now recursively prune the uninitialized phi args. */
1025 uninit_opnds_arg_phi = compute_uninit_opnds_pos (phi_arg_def);
1026 if (!prune_uninit_phi_opnds_in_unrealizable_paths (
1027 phi_arg_def, uninit_opnds_arg_phi,
1028 flag_arg_def, boundary_cst, cmp_code,
1029 visited_phis, visited_flag_phis))
1030 return false;
1031
1032 bitmap_clear_bit (*visited_flag_phis,
1033 SSA_NAME_VERSION (gimple_phi_result (flag_arg_def)));
1034 continue;
1035 }
1036
1037 /* Now check if the constant is in the guarded range. */
1038 if (is_value_included_in (flag_arg, boundary_cst, cmp_code))
1039 {
1040 tree opnd;
1041 gimple opnd_def;
1042
1043 /* Now that we know that this undefined edge is not
1044 pruned. If the operand is defined by another phi,
1045 we can further prune the incoming edges of that
1046 phi by checking the predicates of this operands. */
1047
1048 opnd = gimple_phi_arg_def (phi, i);
1049 opnd_def = SSA_NAME_DEF_STMT (opnd);
1050 if (gimple_code (opnd_def) == GIMPLE_PHI)
1051 {
1052 edge opnd_edge;
1053 unsigned uninit_opnds2
1054 = compute_uninit_opnds_pos (opnd_def);
1055 gcc_assert (!MASK_EMPTY (uninit_opnds2));
1056 opnd_edge = gimple_phi_arg_edge (phi, i);
1057 if (!is_use_properly_guarded (phi,
1058 opnd_edge->src,
1059 opnd_def,
1060 uninit_opnds2,
1061 visited_phis))
1062 return false;
1063 }
1064 else
1065 return false;
1066 }
1067 }
1068
1069 return true;
1070 }
1071
1072 /* A helper function that determines if the predicate set
1073 of the use is not overlapping with that of the uninit paths.
1074 The most common senario of guarded use is in Example 1:
1075 Example 1:
1076 if (some_cond)
1077 {
1078 x = ...;
1079 flag = true;
1080 }
1081
1082 ... some code ...
1083
1084 if (flag)
1085 use (x);
1086
1087 The real world examples are usually more complicated, but similar
1088 and usually result from inlining:
1089
1090 bool init_func (int * x)
1091 {
1092 if (some_cond)
1093 return false;
1094 *x = ..
1095 return true;
1096 }
1097
1098 void foo(..)
1099 {
1100 int x;
1101
1102 if (!init_func(&x))
1103 return;
1104
1105 .. some_code ...
1106 use (x);
1107 }
1108
1109 Another possible use scenario is in the following trivial example:
1110
1111 Example 2:
1112 if (n > 0)
1113 x = 1;
1114 ...
1115 if (n > 0)
1116 {
1117 if (m < 2)
1118 .. = x;
1119 }
1120
1121 Predicate analysis needs to compute the composite predicate:
1122
1123 1) 'x' use predicate: (n > 0) .AND. (m < 2)
1124 2) 'x' default value (non-def) predicate: .NOT. (n > 0)
1125 (the predicate chain for phi operand defs can be computed
1126 starting from a bb that is control equivalent to the phi's
1127 bb and is dominating the operand def.)
1128
1129 and check overlapping:
1130 (n > 0) .AND. (m < 2) .AND. (.NOT. (n > 0))
1131 <==> false
1132
1133 This implementation provides framework that can handle
1134 scenarios. (Note that many simple cases are handled properly
1135 without the predicate analysis -- this is due to jump threading
1136 transformation which eliminates the merge point thus makes
1137 path sensitive analysis unnecessary.)
1138
1139 NUM_PREDS is the number is the number predicate chains, PREDS is
1140 the array of chains, PHI is the phi node whose incoming (undefined)
1141 paths need to be pruned, and UNINIT_OPNDS is the bitmap holding
1142 uninit operand positions. VISITED_PHIS is the pointer set of phi
1143 stmts being checked. */
1144
1145
1146 static bool
1147 use_pred_not_overlap_with_undef_path_pred (
1148 size_t num_preds,
1149 vec<use_pred_info_t> *preds,
1150 gimple phi, unsigned uninit_opnds,
1151 struct pointer_set_t *visited_phis)
1152 {
1153 unsigned int i, n;
1154 gimple flag_def = 0;
1155 tree boundary_cst = 0;
1156 enum tree_code cmp_code;
1157 bool swap_cond = false;
1158 bool invert = false;
1159 vec<use_pred_info_t> the_pred_chain;
1160 bitmap visited_flag_phis = NULL;
1161 bool all_pruned = false;
1162
1163 gcc_assert (num_preds > 0);
1164 /* Find within the common prefix of multiple predicate chains
1165 a predicate that is a comparison of a flag variable against
1166 a constant. */
1167 the_pred_chain = preds[0];
1168 n = the_pred_chain.length ();
1169 for (i = 0; i < n; i++)
1170 {
1171 gimple cond;
1172 tree cond_lhs, cond_rhs, flag = 0;
1173
1174 use_pred_info_t the_pred
1175 = the_pred_chain[i];
1176
1177 cond = the_pred->cond;
1178 invert = the_pred->invert;
1179 cond_lhs = gimple_cond_lhs (cond);
1180 cond_rhs = gimple_cond_rhs (cond);
1181 cmp_code = gimple_cond_code (cond);
1182
1183 if (cond_lhs != NULL_TREE && TREE_CODE (cond_lhs) == SSA_NAME
1184 && cond_rhs != NULL_TREE && is_gimple_constant (cond_rhs))
1185 {
1186 boundary_cst = cond_rhs;
1187 flag = cond_lhs;
1188 }
1189 else if (cond_rhs != NULL_TREE && TREE_CODE (cond_rhs) == SSA_NAME
1190 && cond_lhs != NULL_TREE && is_gimple_constant (cond_lhs))
1191 {
1192 boundary_cst = cond_lhs;
1193 flag = cond_rhs;
1194 swap_cond = true;
1195 }
1196
1197 if (!flag)
1198 continue;
1199
1200 flag_def = SSA_NAME_DEF_STMT (flag);
1201
1202 if (!flag_def)
1203 continue;
1204
1205 if ((gimple_code (flag_def) == GIMPLE_PHI)
1206 && (gimple_bb (flag_def) == gimple_bb (phi))
1207 && find_matching_predicate_in_rest_chains (
1208 the_pred, preds, num_preds))
1209 break;
1210
1211 flag_def = 0;
1212 }
1213
1214 if (!flag_def)
1215 return false;
1216
1217 /* Now check all the uninit incoming edge has a constant flag value
1218 that is in conflict with the use guard/predicate. */
1219 cmp_code = get_cmp_code (cmp_code, swap_cond, invert);
1220
1221 if (cmp_code == ERROR_MARK)
1222 return false;
1223
1224 all_pruned = prune_uninit_phi_opnds_in_unrealizable_paths (phi,
1225 uninit_opnds,
1226 flag_def,
1227 boundary_cst,
1228 cmp_code,
1229 visited_phis,
1230 &visited_flag_phis);
1231
1232 if (visited_flag_phis)
1233 BITMAP_FREE (visited_flag_phis);
1234
1235 return all_pruned;
1236 }
1237
1238 /* Returns true if TC is AND or OR */
1239
1240 static inline bool
1241 is_and_or_or (enum tree_code tc, tree typ)
1242 {
1243 return (tc == BIT_IOR_EXPR
1244 || (tc == BIT_AND_EXPR
1245 && (typ == 0 || TREE_CODE (typ) == BOOLEAN_TYPE)));
1246 }
1247
1248 typedef struct norm_cond
1249 {
1250 vec<gimple> conds;
1251 enum tree_code cond_code;
1252 bool invert;
1253 } *norm_cond_t;
1254
1255
1256 /* Normalizes gimple condition COND. The normalization follows
1257 UD chains to form larger condition expression trees. NORM_COND
1258 holds the normalized result. COND_CODE is the logical opcode
1259 (AND or OR) of the normalized tree. */
1260
1261 static void
1262 normalize_cond_1 (gimple cond,
1263 norm_cond_t norm_cond,
1264 enum tree_code cond_code)
1265 {
1266 enum gimple_code gc;
1267 enum tree_code cur_cond_code;
1268 tree rhs1, rhs2;
1269
1270 gc = gimple_code (cond);
1271 if (gc != GIMPLE_ASSIGN)
1272 {
1273 norm_cond->conds.safe_push (cond);
1274 return;
1275 }
1276
1277 cur_cond_code = gimple_assign_rhs_code (cond);
1278 rhs1 = gimple_assign_rhs1 (cond);
1279 rhs2 = gimple_assign_rhs2 (cond);
1280 if (cur_cond_code == NE_EXPR)
1281 {
1282 if (integer_zerop (rhs2)
1283 && (TREE_CODE (rhs1) == SSA_NAME))
1284 normalize_cond_1 (
1285 SSA_NAME_DEF_STMT (rhs1),
1286 norm_cond, cond_code);
1287 else if (integer_zerop (rhs1)
1288 && (TREE_CODE (rhs2) == SSA_NAME))
1289 normalize_cond_1 (
1290 SSA_NAME_DEF_STMT (rhs2),
1291 norm_cond, cond_code);
1292 else
1293 norm_cond->conds.safe_push (cond);
1294
1295 return;
1296 }
1297
1298 if (is_and_or_or (cur_cond_code, TREE_TYPE (rhs1))
1299 && (cond_code == cur_cond_code || cond_code == ERROR_MARK)
1300 && (TREE_CODE (rhs1) == SSA_NAME && TREE_CODE (rhs2) == SSA_NAME))
1301 {
1302 normalize_cond_1 (SSA_NAME_DEF_STMT (rhs1),
1303 norm_cond, cur_cond_code);
1304 normalize_cond_1 (SSA_NAME_DEF_STMT (rhs2),
1305 norm_cond, cur_cond_code);
1306 norm_cond->cond_code = cur_cond_code;
1307 }
1308 else
1309 norm_cond->conds.safe_push (cond);
1310 }
1311
1312 /* See normalize_cond_1 for details. INVERT is a flag to indicate
1313 if COND needs to be inverted or not. */
1314
1315 static void
1316 normalize_cond (gimple cond, norm_cond_t norm_cond, bool invert)
1317 {
1318 enum tree_code cond_code;
1319
1320 norm_cond->cond_code = ERROR_MARK;
1321 norm_cond->invert = false;
1322 norm_cond->conds.create (0);
1323 gcc_assert (gimple_code (cond) == GIMPLE_COND);
1324 cond_code = gimple_cond_code (cond);
1325 if (invert)
1326 cond_code = invert_tree_comparison (cond_code, false);
1327
1328 if (cond_code == NE_EXPR)
1329 {
1330 if (integer_zerop (gimple_cond_rhs (cond))
1331 && (TREE_CODE (gimple_cond_lhs (cond)) == SSA_NAME))
1332 normalize_cond_1 (
1333 SSA_NAME_DEF_STMT (gimple_cond_lhs (cond)),
1334 norm_cond, ERROR_MARK);
1335 else if (integer_zerop (gimple_cond_lhs (cond))
1336 && (TREE_CODE (gimple_cond_rhs (cond)) == SSA_NAME))
1337 normalize_cond_1 (
1338 SSA_NAME_DEF_STMT (gimple_cond_rhs (cond)),
1339 norm_cond, ERROR_MARK);
1340 else
1341 {
1342 norm_cond->conds.safe_push (cond);
1343 norm_cond->invert = invert;
1344 }
1345 }
1346 else
1347 {
1348 norm_cond->conds.safe_push (cond);
1349 norm_cond->invert = invert;
1350 }
1351
1352 gcc_assert (norm_cond->conds.length () == 1
1353 || is_and_or_or (norm_cond->cond_code, NULL));
1354 }
1355
1356 /* Returns true if the domain for condition COND1 is a subset of
1357 COND2. REVERSE is a flag. when it is true the function checks
1358 if COND1 is a superset of COND2. INVERT1 and INVERT2 are flags
1359 to indicate if COND1 and COND2 need to be inverted or not. */
1360
1361 static bool
1362 is_gcond_subset_of (gimple cond1, bool invert1,
1363 gimple cond2, bool invert2,
1364 bool reverse)
1365 {
1366 enum gimple_code gc1, gc2;
1367 enum tree_code cond1_code, cond2_code;
1368 gimple tmp;
1369 tree cond1_lhs, cond1_rhs, cond2_lhs, cond2_rhs;
1370
1371 /* Take the short cut. */
1372 if (cond1 == cond2)
1373 return true;
1374
1375 if (reverse)
1376 {
1377 tmp = cond1;
1378 cond1 = cond2;
1379 cond2 = tmp;
1380 }
1381
1382 gc1 = gimple_code (cond1);
1383 gc2 = gimple_code (cond2);
1384
1385 if ((gc1 != GIMPLE_ASSIGN && gc1 != GIMPLE_COND)
1386 || (gc2 != GIMPLE_ASSIGN && gc2 != GIMPLE_COND))
1387 return cond1 == cond2;
1388
1389 cond1_code = ((gc1 == GIMPLE_ASSIGN)
1390 ? gimple_assign_rhs_code (cond1)
1391 : gimple_cond_code (cond1));
1392
1393 cond2_code = ((gc2 == GIMPLE_ASSIGN)
1394 ? gimple_assign_rhs_code (cond2)
1395 : gimple_cond_code (cond2));
1396
1397 if (TREE_CODE_CLASS (cond1_code) != tcc_comparison
1398 || TREE_CODE_CLASS (cond2_code) != tcc_comparison)
1399 return false;
1400
1401 if (invert1)
1402 cond1_code = invert_tree_comparison (cond1_code, false);
1403 if (invert2)
1404 cond2_code = invert_tree_comparison (cond2_code, false);
1405
1406 cond1_lhs = ((gc1 == GIMPLE_ASSIGN)
1407 ? gimple_assign_rhs1 (cond1)
1408 : gimple_cond_lhs (cond1));
1409 cond1_rhs = ((gc1 == GIMPLE_ASSIGN)
1410 ? gimple_assign_rhs2 (cond1)
1411 : gimple_cond_rhs (cond1));
1412 cond2_lhs = ((gc2 == GIMPLE_ASSIGN)
1413 ? gimple_assign_rhs1 (cond2)
1414 : gimple_cond_lhs (cond2));
1415 cond2_rhs = ((gc2 == GIMPLE_ASSIGN)
1416 ? gimple_assign_rhs2 (cond2)
1417 : gimple_cond_rhs (cond2));
1418
1419 /* Assuming const operands have been swapped to the
1420 rhs at this point of the analysis. */
1421
1422 if (cond1_lhs != cond2_lhs)
1423 return false;
1424
1425 if (!is_gimple_constant (cond1_rhs)
1426 || TREE_CODE (cond1_rhs) != INTEGER_CST)
1427 return (cond1_rhs == cond2_rhs);
1428
1429 if (!is_gimple_constant (cond2_rhs)
1430 || TREE_CODE (cond2_rhs) != INTEGER_CST)
1431 return (cond1_rhs == cond2_rhs);
1432
1433 if (cond1_code == EQ_EXPR)
1434 return is_value_included_in (cond1_rhs,
1435 cond2_rhs, cond2_code);
1436 if (cond1_code == NE_EXPR || cond2_code == EQ_EXPR)
1437 return ((cond2_code == cond1_code)
1438 && tree_int_cst_equal (cond1_rhs, cond2_rhs));
1439
1440 if (((cond1_code == GE_EXPR || cond1_code == GT_EXPR)
1441 && (cond2_code == LE_EXPR || cond2_code == LT_EXPR))
1442 || ((cond1_code == LE_EXPR || cond1_code == LT_EXPR)
1443 && (cond2_code == GE_EXPR || cond2_code == GT_EXPR)))
1444 return false;
1445
1446 if (cond1_code != GE_EXPR && cond1_code != GT_EXPR
1447 && cond1_code != LE_EXPR && cond1_code != LT_EXPR)
1448 return false;
1449
1450 if (cond1_code == GT_EXPR)
1451 {
1452 cond1_code = GE_EXPR;
1453 cond1_rhs = fold_binary (PLUS_EXPR, TREE_TYPE (cond1_rhs),
1454 cond1_rhs,
1455 fold_convert (TREE_TYPE (cond1_rhs),
1456 integer_one_node));
1457 }
1458 else if (cond1_code == LT_EXPR)
1459 {
1460 cond1_code = LE_EXPR;
1461 cond1_rhs = fold_binary (MINUS_EXPR, TREE_TYPE (cond1_rhs),
1462 cond1_rhs,
1463 fold_convert (TREE_TYPE (cond1_rhs),
1464 integer_one_node));
1465 }
1466
1467 if (!cond1_rhs)
1468 return false;
1469
1470 gcc_assert (cond1_code == GE_EXPR || cond1_code == LE_EXPR);
1471
1472 if (cond2_code == GE_EXPR || cond2_code == GT_EXPR ||
1473 cond2_code == LE_EXPR || cond2_code == LT_EXPR)
1474 return is_value_included_in (cond1_rhs,
1475 cond2_rhs, cond2_code);
1476 else if (cond2_code == NE_EXPR)
1477 return
1478 (is_value_included_in (cond1_rhs,
1479 cond2_rhs, cond2_code)
1480 && !is_value_included_in (cond2_rhs,
1481 cond1_rhs, cond1_code));
1482 return false;
1483 }
1484
1485 /* Returns true if the domain of the condition expression
1486 in COND is a subset of any of the sub-conditions
1487 of the normalized condtion NORM_COND. INVERT is a flag
1488 to indicate of the COND needs to be inverted.
1489 REVERSE is a flag. When it is true, the check is reversed --
1490 it returns true if COND is a superset of any of the subconditions
1491 of NORM_COND. */
1492
1493 static bool
1494 is_subset_of_any (gimple cond, bool invert,
1495 norm_cond_t norm_cond, bool reverse)
1496 {
1497 size_t i;
1498 size_t len = norm_cond->conds.length ();
1499
1500 for (i = 0; i < len; i++)
1501 {
1502 if (is_gcond_subset_of (cond, invert,
1503 norm_cond->conds[i],
1504 false, reverse))
1505 return true;
1506 }
1507 return false;
1508 }
1509
1510 /* NORM_COND1 and NORM_COND2 are normalized logical/BIT OR
1511 expressions (formed by following UD chains not control
1512 dependence chains). The function returns true of domain
1513 of and expression NORM_COND1 is a subset of NORM_COND2's.
1514 The implementation is conservative, and it returns false if
1515 it the inclusion relationship may not hold. */
1516
1517 static bool
1518 is_or_set_subset_of (norm_cond_t norm_cond1,
1519 norm_cond_t norm_cond2)
1520 {
1521 size_t i;
1522 size_t len = norm_cond1->conds.length ();
1523
1524 for (i = 0; i < len; i++)
1525 {
1526 if (!is_subset_of_any (norm_cond1->conds[i],
1527 false, norm_cond2, false))
1528 return false;
1529 }
1530 return true;
1531 }
1532
1533 /* NORM_COND1 and NORM_COND2 are normalized logical AND
1534 expressions (formed by following UD chains not control
1535 dependence chains). The function returns true of domain
1536 of and expression NORM_COND1 is a subset of NORM_COND2's. */
1537
1538 static bool
1539 is_and_set_subset_of (norm_cond_t norm_cond1,
1540 norm_cond_t norm_cond2)
1541 {
1542 size_t i;
1543 size_t len = norm_cond2->conds.length ();
1544
1545 for (i = 0; i < len; i++)
1546 {
1547 if (!is_subset_of_any (norm_cond2->conds[i],
1548 false, norm_cond1, true))
1549 return false;
1550 }
1551 return true;
1552 }
1553
1554 /* Returns true of the domain if NORM_COND1 is a subset
1555 of that of NORM_COND2. Returns false if it can not be
1556 proved to be so. */
1557
1558 static bool
1559 is_norm_cond_subset_of (norm_cond_t norm_cond1,
1560 norm_cond_t norm_cond2)
1561 {
1562 size_t i;
1563 enum tree_code code1, code2;
1564
1565 code1 = norm_cond1->cond_code;
1566 code2 = norm_cond2->cond_code;
1567
1568 if (code1 == BIT_AND_EXPR)
1569 {
1570 /* Both conditions are AND expressions. */
1571 if (code2 == BIT_AND_EXPR)
1572 return is_and_set_subset_of (norm_cond1, norm_cond2);
1573 /* NORM_COND1 is an AND expression, and NORM_COND2 is an OR
1574 expression. In this case, returns true if any subexpression
1575 of NORM_COND1 is a subset of any subexpression of NORM_COND2. */
1576 else if (code2 == BIT_IOR_EXPR)
1577 {
1578 size_t len1;
1579 len1 = norm_cond1->conds.length ();
1580 for (i = 0; i < len1; i++)
1581 {
1582 gimple cond1 = norm_cond1->conds[i];
1583 if (is_subset_of_any (cond1, false, norm_cond2, false))
1584 return true;
1585 }
1586 return false;
1587 }
1588 else
1589 {
1590 gcc_assert (code2 == ERROR_MARK);
1591 gcc_assert (norm_cond2->conds.length () == 1);
1592 return is_subset_of_any (norm_cond2->conds[0],
1593 norm_cond2->invert, norm_cond1, true);
1594 }
1595 }
1596 /* NORM_COND1 is an OR expression */
1597 else if (code1 == BIT_IOR_EXPR)
1598 {
1599 if (code2 != code1)
1600 return false;
1601
1602 return is_or_set_subset_of (norm_cond1, norm_cond2);
1603 }
1604 else
1605 {
1606 gcc_assert (code1 == ERROR_MARK);
1607 gcc_assert (norm_cond1->conds.length () == 1);
1608 /* Conservatively returns false if NORM_COND1 is non-decomposible
1609 and NORM_COND2 is an AND expression. */
1610 if (code2 == BIT_AND_EXPR)
1611 return false;
1612
1613 if (code2 == BIT_IOR_EXPR)
1614 return is_subset_of_any (norm_cond1->conds[0],
1615 norm_cond1->invert, norm_cond2, false);
1616
1617 gcc_assert (code2 == ERROR_MARK);
1618 gcc_assert (norm_cond2->conds.length () == 1);
1619 return is_gcond_subset_of (norm_cond1->conds[0],
1620 norm_cond1->invert,
1621 norm_cond2->conds[0],
1622 norm_cond2->invert, false);
1623 }
1624 }
1625
1626 /* Returns true of the domain of single predicate expression
1627 EXPR1 is a subset of that of EXPR2. Returns false if it
1628 can not be proved. */
1629
1630 static bool
1631 is_pred_expr_subset_of (use_pred_info_t expr1,
1632 use_pred_info_t expr2)
1633 {
1634 gimple cond1, cond2;
1635 enum tree_code code1, code2;
1636 struct norm_cond norm_cond1, norm_cond2;
1637 bool is_subset = false;
1638
1639 cond1 = expr1->cond;
1640 cond2 = expr2->cond;
1641 code1 = gimple_cond_code (cond1);
1642 code2 = gimple_cond_code (cond2);
1643
1644 if (expr1->invert)
1645 code1 = invert_tree_comparison (code1, false);
1646 if (expr2->invert)
1647 code2 = invert_tree_comparison (code2, false);
1648
1649 /* Fast path -- match exactly */
1650 if ((gimple_cond_lhs (cond1) == gimple_cond_lhs (cond2))
1651 && (gimple_cond_rhs (cond1) == gimple_cond_rhs (cond2))
1652 && (code1 == code2))
1653 return true;
1654
1655 /* Normalize conditions. To keep NE_EXPR, do not invert
1656 with both need inversion. */
1657 normalize_cond (cond1, &norm_cond1, (expr1->invert));
1658 normalize_cond (cond2, &norm_cond2, (expr2->invert));
1659
1660 is_subset = is_norm_cond_subset_of (&norm_cond1, &norm_cond2);
1661
1662 /* Free memory */
1663 norm_cond1.conds.release ();
1664 norm_cond2.conds.release ();
1665 return is_subset ;
1666 }
1667
1668 /* Returns true if the domain of PRED1 is a subset
1669 of that of PRED2. Returns false if it can not be proved so. */
1670
1671 static bool
1672 is_pred_chain_subset_of (vec<use_pred_info_t> pred1,
1673 vec<use_pred_info_t> pred2)
1674 {
1675 size_t np1, np2, i1, i2;
1676
1677 np1 = pred1.length ();
1678 np2 = pred2.length ();
1679
1680 for (i2 = 0; i2 < np2; i2++)
1681 {
1682 bool found = false;
1683 use_pred_info_t info2
1684 = pred2[i2];
1685 for (i1 = 0; i1 < np1; i1++)
1686 {
1687 use_pred_info_t info1
1688 = pred1[i1];
1689 if (is_pred_expr_subset_of (info1, info2))
1690 {
1691 found = true;
1692 break;
1693 }
1694 }
1695 if (!found)
1696 return false;
1697 }
1698 return true;
1699 }
1700
1701 /* Returns true if the domain defined by
1702 one pred chain ONE_PRED is a subset of the domain
1703 of *PREDS. It returns false if ONE_PRED's domain is
1704 not a subset of any of the sub-domains of PREDS (
1705 corresponding to each individual chains in it), even
1706 though it may be still be a subset of whole domain
1707 of PREDS which is the union (ORed) of all its subdomains.
1708 In other words, the result is conservative. */
1709
1710 static bool
1711 is_included_in (vec<use_pred_info_t> one_pred,
1712 vec<use_pred_info_t> *preds,
1713 size_t n)
1714 {
1715 size_t i;
1716
1717 for (i = 0; i < n; i++)
1718 {
1719 if (is_pred_chain_subset_of (one_pred, preds[i]))
1720 return true;
1721 }
1722
1723 return false;
1724 }
1725
1726 /* compares two predicate sets PREDS1 and PREDS2 and returns
1727 true if the domain defined by PREDS1 is a superset
1728 of PREDS2's domain. N1 and N2 are array sizes of PREDS1 and
1729 PREDS2 respectively. The implementation chooses not to build
1730 generic trees (and relying on the folding capability of the
1731 compiler), but instead performs brute force comparison of
1732 individual predicate chains (won't be a compile time problem
1733 as the chains are pretty short). When the function returns
1734 false, it does not necessarily mean *PREDS1 is not a superset
1735 of *PREDS2, but mean it may not be so since the analysis can
1736 not prove it. In such cases, false warnings may still be
1737 emitted. */
1738
1739 static bool
1740 is_superset_of (vec<use_pred_info_t> *preds1,
1741 size_t n1,
1742 vec<use_pred_info_t> *preds2,
1743 size_t n2)
1744 {
1745 size_t i;
1746 vec<use_pred_info_t> one_pred_chain;
1747
1748 for (i = 0; i < n2; i++)
1749 {
1750 one_pred_chain = preds2[i];
1751 if (!is_included_in (one_pred_chain, preds1, n1))
1752 return false;
1753 }
1754
1755 return true;
1756 }
1757
1758 /* Comparison function used by qsort. It is used to
1759 sort predicate chains to allow predicate
1760 simplification. */
1761
1762 static int
1763 pred_chain_length_cmp (const void *p1, const void *p2)
1764 {
1765 use_pred_info_t i1, i2;
1766 vec<use_pred_info_t> const *chain1
1767 = (vec<use_pred_info_t> const *)p1;
1768 vec<use_pred_info_t> const *chain2
1769 = (vec<use_pred_info_t> const *)p2;
1770
1771 if (chain1->length () != chain2->length ())
1772 return (chain1->length () - chain2->length ());
1773
1774 i1 = (*chain1)[0];
1775 i2 = (*chain2)[0];
1776
1777 /* Allow predicates with similar prefix come together. */
1778 if (!i1->invert && i2->invert)
1779 return -1;
1780 else if (i1->invert && !i2->invert)
1781 return 1;
1782
1783 return gimple_uid (i1->cond) - gimple_uid (i2->cond);
1784 }
1785
1786 /* x OR (!x AND y) is equivalent to x OR y.
1787 This function normalizes x1 OR (!x1 AND x2) OR (!x1 AND !x2 AND x3)
1788 into x1 OR x2 OR x3. PREDS is the predicate chains, and N is
1789 the number of chains. Returns true if normalization happens. */
1790
1791 static bool
1792 normalize_preds (vec<use_pred_info_t> *preds, size_t *n)
1793 {
1794 size_t i, j, ll;
1795 vec<use_pred_info_t> pred_chain;
1796 vec<use_pred_info_t> x = vNULL;
1797 use_pred_info_t xj = 0, nxj = 0;
1798
1799 if (*n < 2)
1800 return false;
1801
1802 /* First sort the chains in ascending order of lengths. */
1803 qsort (preds, *n, sizeof (void *), pred_chain_length_cmp);
1804 pred_chain = preds[0];
1805 ll = pred_chain.length ();
1806 if (ll != 1)
1807 {
1808 if (ll == 2)
1809 {
1810 use_pred_info_t xx, yy, xx2, nyy;
1811 vec<use_pred_info_t> pred_chain2 = preds[1];
1812 if (pred_chain2.length () != 2)
1813 return false;
1814
1815 /* See if simplification x AND y OR x AND !y is possible. */
1816 xx = pred_chain[0];
1817 yy = pred_chain[1];
1818 xx2 = pred_chain2[0];
1819 nyy = pred_chain2[1];
1820 if (gimple_cond_lhs (xx->cond) != gimple_cond_lhs (xx2->cond)
1821 || gimple_cond_rhs (xx->cond) != gimple_cond_rhs (xx2->cond)
1822 || gimple_cond_code (xx->cond) != gimple_cond_code (xx2->cond)
1823 || (xx->invert != xx2->invert))
1824 return false;
1825 if (gimple_cond_lhs (yy->cond) != gimple_cond_lhs (nyy->cond)
1826 || gimple_cond_rhs (yy->cond) != gimple_cond_rhs (nyy->cond)
1827 || gimple_cond_code (yy->cond) != gimple_cond_code (nyy->cond)
1828 || (yy->invert == nyy->invert))
1829 return false;
1830
1831 /* Now merge the first two chains. */
1832 free (yy);
1833 free (nyy);
1834 free (xx2);
1835 pred_chain.release ();
1836 pred_chain2.release ();
1837 pred_chain.safe_push (xx);
1838 preds[0] = pred_chain;
1839 for (i = 1; i < *n - 1; i++)
1840 preds[i] = preds[i + 1];
1841
1842 preds[*n - 1].create (0);
1843 *n = *n - 1;
1844 }
1845 else
1846 return false;
1847 }
1848
1849 x.safe_push (pred_chain[0]);
1850
1851 /* The loop extracts x1, x2, x3, etc from chains
1852 x1 OR (!x1 AND x2) OR (!x1 AND !x2 AND x3) OR ... */
1853 for (i = 1; i < *n; i++)
1854 {
1855 pred_chain = preds[i];
1856 if (pred_chain.length () != i + 1)
1857 return false;
1858
1859 for (j = 0; j < i; j++)
1860 {
1861 xj = x[j];
1862 nxj = pred_chain[j];
1863
1864 /* Check if nxj is !xj */
1865 if (gimple_cond_lhs (xj->cond) != gimple_cond_lhs (nxj->cond)
1866 || gimple_cond_rhs (xj->cond) != gimple_cond_rhs (nxj->cond)
1867 || gimple_cond_code (xj->cond) != gimple_cond_code (nxj->cond)
1868 || (xj->invert == nxj->invert))
1869 return false;
1870 }
1871
1872 x.safe_push (pred_chain[i]);
1873 }
1874
1875 /* Now normalize the pred chains using the extraced x1, x2, x3 etc. */
1876 for (j = 0; j < *n; j++)
1877 {
1878 use_pred_info_t t;
1879 xj = x[j];
1880
1881 t = XNEW (struct use_pred_info);
1882 *t = *xj;
1883
1884 x[j] = t;
1885 }
1886
1887 for (i = 0; i < *n; i++)
1888 {
1889 pred_chain = preds[i];
1890 for (j = 0; j < pred_chain.length (); j++)
1891 free (pred_chain[j]);
1892 pred_chain.release ();
1893 /* A new chain. */
1894 pred_chain.safe_push (x[i]);
1895 preds[i] = pred_chain;
1896 }
1897 return true;
1898 }
1899
1900
1901
1902 /* Computes the predicates that guard the use and checks
1903 if the incoming paths that have empty (or possibly
1904 empty) definition can be pruned/filtered. The function returns
1905 true if it can be determined that the use of PHI's def in
1906 USE_STMT is guarded with a predicate set not overlapping with
1907 predicate sets of all runtime paths that do not have a definition.
1908 Returns false if it is not or it can not be determined. USE_BB is
1909 the bb of the use (for phi operand use, the bb is not the bb of
1910 the phi stmt, but the src bb of the operand edge). UNINIT_OPNDS
1911 is a bit vector. If an operand of PHI is uninitialized, the
1912 corresponding bit in the vector is 1. VISIED_PHIS is a pointer
1913 set of phis being visted. */
1914
1915 static bool
1916 is_use_properly_guarded (gimple use_stmt,
1917 basic_block use_bb,
1918 gimple phi,
1919 unsigned uninit_opnds,
1920 struct pointer_set_t *visited_phis)
1921 {
1922 basic_block phi_bb;
1923 vec<use_pred_info_t> *preds = 0;
1924 vec<use_pred_info_t> *def_preds = 0;
1925 size_t num_preds = 0, num_def_preds = 0;
1926 bool has_valid_preds = false;
1927 bool is_properly_guarded = false;
1928
1929 if (pointer_set_insert (visited_phis, phi))
1930 return false;
1931
1932 phi_bb = gimple_bb (phi);
1933
1934 if (is_non_loop_exit_postdominating (use_bb, phi_bb))
1935 return false;
1936
1937 has_valid_preds = find_predicates (&preds, &num_preds,
1938 phi_bb, use_bb);
1939
1940 if (!has_valid_preds)
1941 {
1942 destroy_predicate_vecs (num_preds, preds);
1943 return false;
1944 }
1945
1946 if (dump_file)
1947 dump_predicates (use_stmt, num_preds, preds,
1948 "\nUse in stmt ");
1949
1950 has_valid_preds = find_def_preds (&def_preds,
1951 &num_def_preds, phi);
1952
1953 if (has_valid_preds)
1954 {
1955 bool normed;
1956 if (dump_file)
1957 dump_predicates (phi, num_def_preds, def_preds,
1958 "Operand defs of phi ");
1959
1960 normed = normalize_preds (def_preds, &num_def_preds);
1961 if (normed && dump_file)
1962 {
1963 fprintf (dump_file, "\nNormalized to\n");
1964 dump_predicates (phi, num_def_preds, def_preds,
1965 "Operand defs of phi ");
1966 }
1967 is_properly_guarded =
1968 is_superset_of (def_preds, num_def_preds,
1969 preds, num_preds);
1970 }
1971
1972 /* further prune the dead incoming phi edges. */
1973 if (!is_properly_guarded)
1974 is_properly_guarded
1975 = use_pred_not_overlap_with_undef_path_pred (
1976 num_preds, preds, phi, uninit_opnds, visited_phis);
1977
1978 destroy_predicate_vecs (num_preds, preds);
1979 destroy_predicate_vecs (num_def_preds, def_preds);
1980 return is_properly_guarded;
1981 }
1982
1983 /* Searches through all uses of a potentially
1984 uninitialized variable defined by PHI and returns a use
1985 statement if the use is not properly guarded. It returns
1986 NULL if all uses are guarded. UNINIT_OPNDS is a bitvector
1987 holding the position(s) of uninit PHI operands. WORKLIST
1988 is the vector of candidate phis that may be updated by this
1989 function. ADDED_TO_WORKLIST is the pointer set tracking
1990 if the new phi is already in the worklist. */
1991
1992 static gimple
1993 find_uninit_use (gimple phi, unsigned uninit_opnds,
1994 vec<gimple> *worklist,
1995 struct pointer_set_t *added_to_worklist)
1996 {
1997 tree phi_result;
1998 use_operand_p use_p;
1999 gimple use_stmt;
2000 imm_use_iterator iter;
2001
2002 phi_result = gimple_phi_result (phi);
2003
2004 FOR_EACH_IMM_USE_FAST (use_p, iter, phi_result)
2005 {
2006 struct pointer_set_t *visited_phis;
2007 basic_block use_bb;
2008
2009 use_stmt = USE_STMT (use_p);
2010 if (is_gimple_debug (use_stmt))
2011 continue;
2012
2013 visited_phis = pointer_set_create ();
2014
2015 if (gimple_code (use_stmt) == GIMPLE_PHI)
2016 use_bb = gimple_phi_arg_edge (use_stmt,
2017 PHI_ARG_INDEX_FROM_USE (use_p))->src;
2018 else
2019 use_bb = gimple_bb (use_stmt);
2020
2021 if (is_use_properly_guarded (use_stmt,
2022 use_bb,
2023 phi,
2024 uninit_opnds,
2025 visited_phis))
2026 {
2027 pointer_set_destroy (visited_phis);
2028 continue;
2029 }
2030 pointer_set_destroy (visited_phis);
2031
2032 if (dump_file && (dump_flags & TDF_DETAILS))
2033 {
2034 fprintf (dump_file, "[CHECK]: Found unguarded use: ");
2035 print_gimple_stmt (dump_file, use_stmt, 0, 0);
2036 }
2037 /* Found one real use, return. */
2038 if (gimple_code (use_stmt) != GIMPLE_PHI)
2039 return use_stmt;
2040
2041 /* Found a phi use that is not guarded,
2042 add the phi to the worklist. */
2043 if (!pointer_set_insert (added_to_worklist,
2044 use_stmt))
2045 {
2046 if (dump_file && (dump_flags & TDF_DETAILS))
2047 {
2048 fprintf (dump_file, "[WORKLIST]: Update worklist with phi: ");
2049 print_gimple_stmt (dump_file, use_stmt, 0, 0);
2050 }
2051
2052 worklist->safe_push (use_stmt);
2053 pointer_set_insert (possibly_undefined_names, phi_result);
2054 }
2055 }
2056
2057 return NULL;
2058 }
2059
2060 /* Look for inputs to PHI that are SSA_NAMEs that have empty definitions
2061 and gives warning if there exists a runtime path from the entry to a
2062 use of the PHI def that does not contain a definition. In other words,
2063 the warning is on the real use. The more dead paths that can be pruned
2064 by the compiler, the fewer false positives the warning is. WORKLIST
2065 is a vector of candidate phis to be examined. ADDED_TO_WORKLIST is
2066 a pointer set tracking if the new phi is added to the worklist or not. */
2067
2068 static void
2069 warn_uninitialized_phi (gimple phi, vec<gimple> *worklist,
2070 struct pointer_set_t *added_to_worklist)
2071 {
2072 unsigned uninit_opnds;
2073 gimple uninit_use_stmt = 0;
2074 tree uninit_op;
2075
2076 /* Don't look at virtual operands. */
2077 if (virtual_operand_p (gimple_phi_result (phi)))
2078 return;
2079
2080 uninit_opnds = compute_uninit_opnds_pos (phi);
2081
2082 if (MASK_EMPTY (uninit_opnds))
2083 return;
2084
2085 if (dump_file && (dump_flags & TDF_DETAILS))
2086 {
2087 fprintf (dump_file, "[CHECK]: examining phi: ");
2088 print_gimple_stmt (dump_file, phi, 0, 0);
2089 }
2090
2091 /* Now check if we have any use of the value without proper guard. */
2092 uninit_use_stmt = find_uninit_use (phi, uninit_opnds,
2093 worklist, added_to_worklist);
2094
2095 /* All uses are properly guarded. */
2096 if (!uninit_use_stmt)
2097 return;
2098
2099 uninit_op = gimple_phi_arg_def (phi, MASK_FIRST_SET_BIT (uninit_opnds));
2100 if (SSA_NAME_VAR (uninit_op) == NULL_TREE)
2101 return;
2102 warn_uninit (OPT_Wmaybe_uninitialized, uninit_op, SSA_NAME_VAR (uninit_op),
2103 SSA_NAME_VAR (uninit_op),
2104 "%qD may be used uninitialized in this function",
2105 uninit_use_stmt);
2106
2107 }
2108
2109
2110 /* Entry point to the late uninitialized warning pass. */
2111
2112 static unsigned int
2113 execute_late_warn_uninitialized (void)
2114 {
2115 basic_block bb;
2116 gimple_stmt_iterator gsi;
2117 vec<gimple> worklist = vNULL;
2118 struct pointer_set_t *added_to_worklist;
2119
2120 calculate_dominance_info (CDI_DOMINATORS);
2121 calculate_dominance_info (CDI_POST_DOMINATORS);
2122 /* Re-do the plain uninitialized variable check, as optimization may have
2123 straightened control flow. Do this first so that we don't accidentally
2124 get a "may be" warning when we'd have seen an "is" warning later. */
2125 warn_uninitialized_vars (/*warn_possibly_uninitialized=*/1);
2126
2127 timevar_push (TV_TREE_UNINIT);
2128
2129 possibly_undefined_names = pointer_set_create ();
2130 added_to_worklist = pointer_set_create ();
2131
2132 /* Initialize worklist */
2133 FOR_EACH_BB (bb)
2134 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2135 {
2136 gimple phi = gsi_stmt (gsi);
2137 size_t n, i;
2138
2139 n = gimple_phi_num_args (phi);
2140
2141 /* Don't look at virtual operands. */
2142 if (virtual_operand_p (gimple_phi_result (phi)))
2143 continue;
2144
2145 for (i = 0; i < n; ++i)
2146 {
2147 tree op = gimple_phi_arg_def (phi, i);
2148 if (TREE_CODE (op) == SSA_NAME
2149 && uninit_undefined_value_p (op))
2150 {
2151 worklist.safe_push (phi);
2152 pointer_set_insert (added_to_worklist, phi);
2153 if (dump_file && (dump_flags & TDF_DETAILS))
2154 {
2155 fprintf (dump_file, "[WORKLIST]: add to initial list: ");
2156 print_gimple_stmt (dump_file, phi, 0, 0);
2157 }
2158 break;
2159 }
2160 }
2161 }
2162
2163 while (worklist.length () != 0)
2164 {
2165 gimple cur_phi = 0;
2166 cur_phi = worklist.pop ();
2167 warn_uninitialized_phi (cur_phi, &worklist, added_to_worklist);
2168 }
2169
2170 worklist.release ();
2171 pointer_set_destroy (added_to_worklist);
2172 pointer_set_destroy (possibly_undefined_names);
2173 possibly_undefined_names = NULL;
2174 free_dominance_info (CDI_POST_DOMINATORS);
2175 timevar_pop (TV_TREE_UNINIT);
2176 return 0;
2177 }
2178
2179 static bool
2180 gate_warn_uninitialized (void)
2181 {
2182 return warn_uninitialized != 0;
2183 }
2184
2185 namespace {
2186
2187 const pass_data pass_data_late_warn_uninitialized =
2188 {
2189 GIMPLE_PASS, /* type */
2190 "uninit", /* name */
2191 OPTGROUP_NONE, /* optinfo_flags */
2192 true, /* has_gate */
2193 true, /* has_execute */
2194 TV_NONE, /* tv_id */
2195 PROP_ssa, /* properties_required */
2196 0, /* properties_provided */
2197 0, /* properties_destroyed */
2198 0, /* todo_flags_start */
2199 0, /* todo_flags_finish */
2200 };
2201
2202 class pass_late_warn_uninitialized : public gimple_opt_pass
2203 {
2204 public:
2205 pass_late_warn_uninitialized (gcc::context *ctxt)
2206 : gimple_opt_pass (pass_data_late_warn_uninitialized, ctxt)
2207 {}
2208
2209 /* opt_pass methods: */
2210 opt_pass * clone () { return new pass_late_warn_uninitialized (m_ctxt); }
2211 bool gate () { return gate_warn_uninitialized (); }
2212 unsigned int execute () { return execute_late_warn_uninitialized (); }
2213
2214 }; // class pass_late_warn_uninitialized
2215
2216 } // anon namespace
2217
2218 gimple_opt_pass *
2219 make_pass_late_warn_uninitialized (gcc::context *ctxt)
2220 {
2221 return new pass_late_warn_uninitialized (ctxt);
2222 }
2223
2224
2225 static unsigned int
2226 execute_early_warn_uninitialized (void)
2227 {
2228 /* Currently, this pass runs always but
2229 execute_late_warn_uninitialized only runs with optimization. With
2230 optimization we want to warn about possible uninitialized as late
2231 as possible, thus don't do it here. However, without
2232 optimization we need to warn here about "may be uninitialized".
2233 */
2234 calculate_dominance_info (CDI_POST_DOMINATORS);
2235
2236 warn_uninitialized_vars (/*warn_possibly_uninitialized=*/!optimize);
2237
2238 /* Post-dominator information can not be reliably updated. Free it
2239 after the use. */
2240
2241 free_dominance_info (CDI_POST_DOMINATORS);
2242 return 0;
2243 }
2244
2245
2246 namespace {
2247
2248 const pass_data pass_data_early_warn_uninitialized =
2249 {
2250 GIMPLE_PASS, /* type */
2251 "*early_warn_uninitialized", /* name */
2252 OPTGROUP_NONE, /* optinfo_flags */
2253 true, /* has_gate */
2254 true, /* has_execute */
2255 TV_TREE_UNINIT, /* tv_id */
2256 PROP_ssa, /* properties_required */
2257 0, /* properties_provided */
2258 0, /* properties_destroyed */
2259 0, /* todo_flags_start */
2260 0, /* todo_flags_finish */
2261 };
2262
2263 class pass_early_warn_uninitialized : public gimple_opt_pass
2264 {
2265 public:
2266 pass_early_warn_uninitialized (gcc::context *ctxt)
2267 : gimple_opt_pass (pass_data_early_warn_uninitialized, ctxt)
2268 {}
2269
2270 /* opt_pass methods: */
2271 bool gate () { return gate_warn_uninitialized (); }
2272 unsigned int execute () { return execute_early_warn_uninitialized (); }
2273
2274 }; // class pass_early_warn_uninitialized
2275
2276 } // anon namespace
2277
2278 gimple_opt_pass *
2279 make_pass_early_warn_uninitialized (gcc::context *ctxt)
2280 {
2281 return new pass_early_warn_uninitialized (ctxt);
2282 }
2283
2284