]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-uninit.c
alloc-pool.c, [...]: Add missing whitespace before "(".
[thirdparty/gcc.git] / gcc / tree-ssa-uninit.c
1 /* Predicate aware uninitialized variable warning.
2 Copyright (C) 2001-2013 Free Software Foundation, Inc.
3 Contributed by Xinliang David Li <davidxl@google.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "flags.h"
27 #include "tm_p.h"
28 #include "basic-block.h"
29 #include "function.h"
30 #include "gimple-pretty-print.h"
31 #include "bitmap.h"
32 #include "pointer-set.h"
33 #include "tree-ssa.h"
34 #include "gimple.h"
35 #include "tree-inline.h"
36 #include "hashtab.h"
37 #include "tree-pass.h"
38 #include "diagnostic-core.h"
39
40 /* This implements the pass that does predicate aware warning on uses of
41 possibly uninitialized variables. The pass first collects the set of
42 possibly uninitialized SSA names. For each such name, it walks through
43 all its immediate uses. For each immediate use, it rebuilds the condition
44 expression (the predicate) that guards the use. The predicate is then
45 examined to see if the variable is always defined under that same condition.
46 This is done either by pruning the unrealizable paths that lead to the
47 default definitions or by checking if the predicate set that guards the
48 defining paths is a superset of the use predicate. */
49
50
51 /* Pointer set of potentially undefined ssa names, i.e.,
52 ssa names that are defined by phi with operands that
53 are not defined or potentially undefined. */
54 static struct pointer_set_t *possibly_undefined_names = 0;
55
56 /* Bit mask handling macros. */
57 #define MASK_SET_BIT(mask, pos) mask |= (1 << pos)
58 #define MASK_TEST_BIT(mask, pos) (mask & (1 << pos))
59 #define MASK_EMPTY(mask) (mask == 0)
60
61 /* Returns the first bit position (starting from LSB)
62 in mask that is non zero. Returns -1 if the mask is empty. */
63 static int
64 get_mask_first_set_bit (unsigned mask)
65 {
66 int pos = 0;
67 if (mask == 0)
68 return -1;
69
70 while ((mask & (1 << pos)) == 0)
71 pos++;
72
73 return pos;
74 }
75 #define MASK_FIRST_SET_BIT(mask) get_mask_first_set_bit (mask)
76
77 /* Return true if T, an SSA_NAME, has an undefined value. */
78 static bool
79 has_undefined_value_p (tree t)
80 {
81 return (ssa_undefined_value_p (t)
82 || (possibly_undefined_names
83 && pointer_set_contains (possibly_undefined_names, t)));
84 }
85
86
87
88 /* Like has_undefined_value_p, but don't return true if TREE_NO_WARNING
89 is set on SSA_NAME_VAR. */
90
91 static inline bool
92 uninit_undefined_value_p (tree t) {
93 if (!has_undefined_value_p (t))
94 return false;
95 if (SSA_NAME_VAR (t) && TREE_NO_WARNING (SSA_NAME_VAR (t)))
96 return false;
97 return true;
98 }
99
100 /* Emit warnings for uninitialized variables. This is done in two passes.
101
102 The first pass notices real uses of SSA names with undefined values.
103 Such uses are unconditionally uninitialized, and we can be certain that
104 such a use is a mistake. This pass is run before most optimizations,
105 so that we catch as many as we can.
106
107 The second pass follows PHI nodes to find uses that are potentially
108 uninitialized. In this case we can't necessarily prove that the use
109 is really uninitialized. This pass is run after most optimizations,
110 so that we thread as many jumps and possible, and delete as much dead
111 code as possible, in order to reduce false positives. We also look
112 again for plain uninitialized variables, since optimization may have
113 changed conditionally uninitialized to unconditionally uninitialized. */
114
115 /* Emit a warning for EXPR based on variable VAR at the point in the
116 program T, an SSA_NAME, is used being uninitialized. The exact
117 warning text is in MSGID and LOCUS may contain a location or be null.
118 WC is the warning code. */
119
120 static void
121 warn_uninit (enum opt_code wc, tree t,
122 tree expr, tree var, const char *gmsgid, void *data)
123 {
124 gimple context = (gimple) data;
125 location_t location, cfun_loc;
126 expanded_location xloc, floc;
127
128 if (!has_undefined_value_p (t))
129 return;
130
131 /* TREE_NO_WARNING either means we already warned, or the front end
132 wishes to suppress the warning. */
133 if ((context
134 && (gimple_no_warning_p (context)
135 || (gimple_assign_single_p (context)
136 && TREE_NO_WARNING (gimple_assign_rhs1 (context)))))
137 || TREE_NO_WARNING (expr))
138 return;
139
140 location = (context != NULL && gimple_has_location (context))
141 ? gimple_location (context)
142 : DECL_SOURCE_LOCATION (var);
143 location = linemap_resolve_location (line_table, location,
144 LRK_SPELLING_LOCATION,
145 NULL);
146 cfun_loc = DECL_SOURCE_LOCATION (cfun->decl);
147 xloc = expand_location (location);
148 floc = expand_location (cfun_loc);
149 if (warning_at (location, wc, gmsgid, expr))
150 {
151 TREE_NO_WARNING (expr) = 1;
152
153 if (location == DECL_SOURCE_LOCATION (var))
154 return;
155 if (xloc.file != floc.file
156 || linemap_location_before_p (line_table,
157 location, cfun_loc)
158 || linemap_location_before_p (line_table,
159 cfun->function_end_locus,
160 location))
161 inform (DECL_SOURCE_LOCATION (var), "%qD was declared here", var);
162 }
163 }
164
165 static unsigned int
166 warn_uninitialized_vars (bool warn_possibly_uninitialized)
167 {
168 gimple_stmt_iterator gsi;
169 basic_block bb;
170
171 FOR_EACH_BB (bb)
172 {
173 bool always_executed = dominated_by_p (CDI_POST_DOMINATORS,
174 single_succ (ENTRY_BLOCK_PTR), bb);
175 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
176 {
177 gimple stmt = gsi_stmt (gsi);
178 use_operand_p use_p;
179 ssa_op_iter op_iter;
180 tree use;
181
182 if (is_gimple_debug (stmt))
183 continue;
184
185 /* We only do data flow with SSA_NAMEs, so that's all we
186 can warn about. */
187 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, op_iter, SSA_OP_USE)
188 {
189 use = USE_FROM_PTR (use_p);
190 if (always_executed)
191 warn_uninit (OPT_Wuninitialized, use,
192 SSA_NAME_VAR (use), SSA_NAME_VAR (use),
193 "%qD is used uninitialized in this function",
194 stmt);
195 else if (warn_possibly_uninitialized)
196 warn_uninit (OPT_Wmaybe_uninitialized, use,
197 SSA_NAME_VAR (use), SSA_NAME_VAR (use),
198 "%qD may be used uninitialized in this function",
199 stmt);
200 }
201
202 /* For memory the only cheap thing we can do is see if we
203 have a use of the default def of the virtual operand.
204 ??? Note that at -O0 we do not have virtual operands.
205 ??? Not so cheap would be to use the alias oracle via
206 walk_aliased_vdefs, if we don't find any aliasing vdef
207 warn as is-used-uninitialized, if we don't find an aliasing
208 vdef that kills our use (stmt_kills_ref_p), warn as
209 may-be-used-uninitialized. But this walk is quadratic and
210 so must be limited which means we would miss warning
211 opportunities. */
212 use = gimple_vuse (stmt);
213 if (use
214 && gimple_assign_single_p (stmt)
215 && !gimple_vdef (stmt)
216 && SSA_NAME_IS_DEFAULT_DEF (use))
217 {
218 tree rhs = gimple_assign_rhs1 (stmt);
219 tree base = get_base_address (rhs);
220
221 /* Do not warn if it can be initialized outside this function. */
222 if (TREE_CODE (base) != VAR_DECL
223 || DECL_HARD_REGISTER (base)
224 || is_global_var (base))
225 continue;
226
227 if (always_executed)
228 warn_uninit (OPT_Wuninitialized, use,
229 gimple_assign_rhs1 (stmt), base,
230 "%qE is used uninitialized in this function",
231 stmt);
232 else if (warn_possibly_uninitialized)
233 warn_uninit (OPT_Wmaybe_uninitialized, use,
234 gimple_assign_rhs1 (stmt), base,
235 "%qE may be used uninitialized in this function",
236 stmt);
237 }
238 }
239 }
240
241 return 0;
242 }
243
244 /* Checks if the operand OPND of PHI is defined by
245 another phi with one operand defined by this PHI,
246 but the rest operands are all defined. If yes,
247 returns true to skip this this operand as being
248 redundant. Can be enhanced to be more general. */
249
250 static bool
251 can_skip_redundant_opnd (tree opnd, gimple phi)
252 {
253 gimple op_def;
254 tree phi_def;
255 int i, n;
256
257 phi_def = gimple_phi_result (phi);
258 op_def = SSA_NAME_DEF_STMT (opnd);
259 if (gimple_code (op_def) != GIMPLE_PHI)
260 return false;
261 n = gimple_phi_num_args (op_def);
262 for (i = 0; i < n; ++i)
263 {
264 tree op = gimple_phi_arg_def (op_def, i);
265 if (TREE_CODE (op) != SSA_NAME)
266 continue;
267 if (op != phi_def && uninit_undefined_value_p (op))
268 return false;
269 }
270
271 return true;
272 }
273
274 /* Returns a bit mask holding the positions of arguments in PHI
275 that have empty (or possibly empty) definitions. */
276
277 static unsigned
278 compute_uninit_opnds_pos (gimple phi)
279 {
280 size_t i, n;
281 unsigned uninit_opnds = 0;
282
283 n = gimple_phi_num_args (phi);
284 /* Bail out for phi with too many args. */
285 if (n > 32)
286 return 0;
287
288 for (i = 0; i < n; ++i)
289 {
290 tree op = gimple_phi_arg_def (phi, i);
291 if (TREE_CODE (op) == SSA_NAME
292 && uninit_undefined_value_p (op)
293 && !can_skip_redundant_opnd (op, phi))
294 {
295 if (cfun->has_nonlocal_label || cfun->calls_setjmp)
296 {
297 /* Ignore SSA_NAMEs that appear on abnormal edges
298 somewhere. */
299 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
300 continue;
301 }
302 MASK_SET_BIT (uninit_opnds, i);
303 }
304 }
305 return uninit_opnds;
306 }
307
308 /* Find the immediate postdominator PDOM of the specified
309 basic block BLOCK. */
310
311 static inline basic_block
312 find_pdom (basic_block block)
313 {
314 if (block == EXIT_BLOCK_PTR)
315 return EXIT_BLOCK_PTR;
316 else
317 {
318 basic_block bb
319 = get_immediate_dominator (CDI_POST_DOMINATORS, block);
320 if (! bb)
321 return EXIT_BLOCK_PTR;
322 return bb;
323 }
324 }
325
326 /* Find the immediate DOM of the specified
327 basic block BLOCK. */
328
329 static inline basic_block
330 find_dom (basic_block block)
331 {
332 if (block == ENTRY_BLOCK_PTR)
333 return ENTRY_BLOCK_PTR;
334 else
335 {
336 basic_block bb = get_immediate_dominator (CDI_DOMINATORS, block);
337 if (! bb)
338 return ENTRY_BLOCK_PTR;
339 return bb;
340 }
341 }
342
343 /* Returns true if BB1 is postdominating BB2 and BB1 is
344 not a loop exit bb. The loop exit bb check is simple and does
345 not cover all cases. */
346
347 static bool
348 is_non_loop_exit_postdominating (basic_block bb1, basic_block bb2)
349 {
350 if (!dominated_by_p (CDI_POST_DOMINATORS, bb2, bb1))
351 return false;
352
353 if (single_pred_p (bb1) && !single_succ_p (bb2))
354 return false;
355
356 return true;
357 }
358
359 /* Find the closest postdominator of a specified BB, which is control
360 equivalent to BB. */
361
362 static inline basic_block
363 find_control_equiv_block (basic_block bb)
364 {
365 basic_block pdom;
366
367 pdom = find_pdom (bb);
368
369 /* Skip the postdominating bb that is also loop exit. */
370 if (!is_non_loop_exit_postdominating (pdom, bb))
371 return NULL;
372
373 if (dominated_by_p (CDI_DOMINATORS, pdom, bb))
374 return pdom;
375
376 return NULL;
377 }
378
379 #define MAX_NUM_CHAINS 8
380 #define MAX_CHAIN_LEN 5
381 #define MAX_POSTDOM_CHECK 8
382
383 /* Computes the control dependence chains (paths of edges)
384 for DEP_BB up to the dominating basic block BB (the head node of a
385 chain should be dominated by it). CD_CHAINS is pointer to a
386 dynamic array holding the result chains. CUR_CD_CHAIN is the current
387 chain being computed. *NUM_CHAINS is total number of chains. The
388 function returns true if the information is successfully computed,
389 return false if there is no control dependence or not computed. */
390
391 static bool
392 compute_control_dep_chain (basic_block bb, basic_block dep_bb,
393 vec<edge> *cd_chains,
394 size_t *num_chains,
395 vec<edge> *cur_cd_chain)
396 {
397 edge_iterator ei;
398 edge e;
399 size_t i;
400 bool found_cd_chain = false;
401 size_t cur_chain_len = 0;
402
403 if (EDGE_COUNT (bb->succs) < 2)
404 return false;
405
406 /* Could use a set instead. */
407 cur_chain_len = cur_cd_chain->length ();
408 if (cur_chain_len > MAX_CHAIN_LEN)
409 return false;
410
411 for (i = 0; i < cur_chain_len; i++)
412 {
413 edge e = (*cur_cd_chain)[i];
414 /* cycle detected. */
415 if (e->src == bb)
416 return false;
417 }
418
419 FOR_EACH_EDGE (e, ei, bb->succs)
420 {
421 basic_block cd_bb;
422 int post_dom_check = 0;
423 if (e->flags & (EDGE_FAKE | EDGE_ABNORMAL))
424 continue;
425
426 cd_bb = e->dest;
427 cur_cd_chain->safe_push (e);
428 while (!is_non_loop_exit_postdominating (cd_bb, bb))
429 {
430 if (cd_bb == dep_bb)
431 {
432 /* Found a direct control dependence. */
433 if (*num_chains < MAX_NUM_CHAINS)
434 {
435 cd_chains[*num_chains] = cur_cd_chain->copy ();
436 (*num_chains)++;
437 }
438 found_cd_chain = true;
439 /* check path from next edge. */
440 break;
441 }
442
443 /* Now check if DEP_BB is indirectly control dependent on BB. */
444 if (compute_control_dep_chain (cd_bb, dep_bb, cd_chains,
445 num_chains, cur_cd_chain))
446 {
447 found_cd_chain = true;
448 break;
449 }
450
451 cd_bb = find_pdom (cd_bb);
452 post_dom_check++;
453 if (cd_bb == EXIT_BLOCK_PTR || post_dom_check > MAX_POSTDOM_CHECK)
454 break;
455 }
456 cur_cd_chain->pop ();
457 gcc_assert (cur_cd_chain->length () == cur_chain_len);
458 }
459 gcc_assert (cur_cd_chain->length () == cur_chain_len);
460
461 return found_cd_chain;
462 }
463
464 typedef struct use_pred_info
465 {
466 gimple cond;
467 bool invert;
468 } *use_pred_info_t;
469
470
471
472 /* Converts the chains of control dependence edges into a set of
473 predicates. A control dependence chain is represented by a vector
474 edges. DEP_CHAINS points to an array of dependence chains.
475 NUM_CHAINS is the size of the chain array. One edge in a dependence
476 chain is mapped to predicate expression represented by use_pred_info_t
477 type. One dependence chain is converted to a composite predicate that
478 is the result of AND operation of use_pred_info_t mapped to each edge.
479 A composite predicate is presented by a vector of use_pred_info_t. On
480 return, *PREDS points to the resulting array of composite predicates.
481 *NUM_PREDS is the number of composite predictes. */
482
483 static bool
484 convert_control_dep_chain_into_preds (vec<edge> *dep_chains,
485 size_t num_chains,
486 vec<use_pred_info_t> **preds,
487 size_t *num_preds)
488 {
489 bool has_valid_pred = false;
490 size_t i, j;
491 if (num_chains == 0 || num_chains >= MAX_NUM_CHAINS)
492 return false;
493
494 /* Now convert the control dep chain into a set
495 of predicates. */
496 typedef vec<use_pred_info_t> vec_use_pred_info_t_heap;
497 *preds = XCNEWVEC (vec_use_pred_info_t_heap, num_chains);
498 *num_preds = num_chains;
499
500 for (i = 0; i < num_chains; i++)
501 {
502 vec<edge> one_cd_chain = dep_chains[i];
503
504 has_valid_pred = false;
505 for (j = 0; j < one_cd_chain.length (); j++)
506 {
507 gimple cond_stmt;
508 gimple_stmt_iterator gsi;
509 basic_block guard_bb;
510 use_pred_info_t one_pred;
511 edge e;
512
513 e = one_cd_chain[j];
514 guard_bb = e->src;
515 gsi = gsi_last_bb (guard_bb);
516 if (gsi_end_p (gsi))
517 {
518 has_valid_pred = false;
519 break;
520 }
521 cond_stmt = gsi_stmt (gsi);
522 if (gimple_code (cond_stmt) == GIMPLE_CALL
523 && EDGE_COUNT (e->src->succs) >= 2)
524 {
525 /* Ignore EH edge. Can add assertion
526 on the other edge's flag. */
527 continue;
528 }
529 /* Skip if there is essentially one succesor. */
530 if (EDGE_COUNT (e->src->succs) == 2)
531 {
532 edge e1;
533 edge_iterator ei1;
534 bool skip = false;
535
536 FOR_EACH_EDGE (e1, ei1, e->src->succs)
537 {
538 if (EDGE_COUNT (e1->dest->succs) == 0)
539 {
540 skip = true;
541 break;
542 }
543 }
544 if (skip)
545 continue;
546 }
547 if (gimple_code (cond_stmt) != GIMPLE_COND)
548 {
549 has_valid_pred = false;
550 break;
551 }
552 one_pred = XNEW (struct use_pred_info);
553 one_pred->cond = cond_stmt;
554 one_pred->invert = !!(e->flags & EDGE_FALSE_VALUE);
555 (*preds)[i].safe_push (one_pred);
556 has_valid_pred = true;
557 }
558
559 if (!has_valid_pred)
560 break;
561 }
562 return has_valid_pred;
563 }
564
565 /* Computes all control dependence chains for USE_BB. The control
566 dependence chains are then converted to an array of composite
567 predicates pointed to by PREDS. PHI_BB is the basic block of
568 the phi whose result is used in USE_BB. */
569
570 static bool
571 find_predicates (vec<use_pred_info_t> **preds,
572 size_t *num_preds,
573 basic_block phi_bb,
574 basic_block use_bb)
575 {
576 size_t num_chains = 0, i;
577 vec<edge> *dep_chains = 0;
578 vec<edge> cur_chain = vNULL;
579 bool has_valid_pred = false;
580 basic_block cd_root = 0;
581
582 typedef vec<edge> vec_edge_heap;
583 dep_chains = XCNEWVEC (vec_edge_heap, MAX_NUM_CHAINS);
584
585 /* First find the closest bb that is control equivalent to PHI_BB
586 that also dominates USE_BB. */
587 cd_root = phi_bb;
588 while (dominated_by_p (CDI_DOMINATORS, use_bb, cd_root))
589 {
590 basic_block ctrl_eq_bb = find_control_equiv_block (cd_root);
591 if (ctrl_eq_bb && dominated_by_p (CDI_DOMINATORS, use_bb, ctrl_eq_bb))
592 cd_root = ctrl_eq_bb;
593 else
594 break;
595 }
596
597 compute_control_dep_chain (cd_root, use_bb,
598 dep_chains, &num_chains,
599 &cur_chain);
600
601 has_valid_pred
602 = convert_control_dep_chain_into_preds (dep_chains,
603 num_chains,
604 preds,
605 num_preds);
606 /* Free individual chain */
607 cur_chain.release ();
608 for (i = 0; i < num_chains; i++)
609 dep_chains[i].release ();
610 free (dep_chains);
611 return has_valid_pred;
612 }
613
614 /* Computes the set of incoming edges of PHI that have non empty
615 definitions of a phi chain. The collection will be done
616 recursively on operands that are defined by phis. CD_ROOT
617 is the control dependence root. *EDGES holds the result, and
618 VISITED_PHIS is a pointer set for detecting cycles. */
619
620 static void
621 collect_phi_def_edges (gimple phi, basic_block cd_root,
622 vec<edge> *edges,
623 struct pointer_set_t *visited_phis)
624 {
625 size_t i, n;
626 edge opnd_edge;
627 tree opnd;
628
629 if (pointer_set_insert (visited_phis, phi))
630 return;
631
632 n = gimple_phi_num_args (phi);
633 for (i = 0; i < n; i++)
634 {
635 opnd_edge = gimple_phi_arg_edge (phi, i);
636 opnd = gimple_phi_arg_def (phi, i);
637
638 if (TREE_CODE (opnd) != SSA_NAME)
639 {
640 if (dump_file && (dump_flags & TDF_DETAILS))
641 {
642 fprintf (dump_file, "\n[CHECK] Found def edge %d in ", (int)i);
643 print_gimple_stmt (dump_file, phi, 0, 0);
644 }
645 edges->safe_push (opnd_edge);
646 }
647 else
648 {
649 gimple def = SSA_NAME_DEF_STMT (opnd);
650
651 if (gimple_code (def) == GIMPLE_PHI
652 && dominated_by_p (CDI_DOMINATORS,
653 gimple_bb (def), cd_root))
654 collect_phi_def_edges (def, cd_root, edges,
655 visited_phis);
656 else if (!uninit_undefined_value_p (opnd))
657 {
658 if (dump_file && (dump_flags & TDF_DETAILS))
659 {
660 fprintf (dump_file, "\n[CHECK] Found def edge %d in ", (int)i);
661 print_gimple_stmt (dump_file, phi, 0, 0);
662 }
663 edges->safe_push (opnd_edge);
664 }
665 }
666 }
667 }
668
669 /* For each use edge of PHI, computes all control dependence chains.
670 The control dependence chains are then converted to an array of
671 composite predicates pointed to by PREDS. */
672
673 static bool
674 find_def_preds (vec<use_pred_info_t> **preds,
675 size_t *num_preds, gimple phi)
676 {
677 size_t num_chains = 0, i, n;
678 vec<edge> *dep_chains = 0;
679 vec<edge> cur_chain = vNULL;
680 vec<edge> def_edges = vNULL;
681 bool has_valid_pred = false;
682 basic_block phi_bb, cd_root = 0;
683 struct pointer_set_t *visited_phis;
684
685 typedef vec<edge> vec_edge_heap;
686 dep_chains = XCNEWVEC (vec_edge_heap, MAX_NUM_CHAINS);
687
688 phi_bb = gimple_bb (phi);
689 /* First find the closest dominating bb to be
690 the control dependence root */
691 cd_root = find_dom (phi_bb);
692 if (!cd_root)
693 return false;
694
695 visited_phis = pointer_set_create ();
696 collect_phi_def_edges (phi, cd_root, &def_edges, visited_phis);
697 pointer_set_destroy (visited_phis);
698
699 n = def_edges.length ();
700 if (n == 0)
701 return false;
702
703 for (i = 0; i < n; i++)
704 {
705 size_t prev_nc, j;
706 edge opnd_edge;
707
708 opnd_edge = def_edges[i];
709 prev_nc = num_chains;
710 compute_control_dep_chain (cd_root, opnd_edge->src,
711 dep_chains, &num_chains,
712 &cur_chain);
713 /* Free individual chain */
714 cur_chain.release ();
715
716 /* Now update the newly added chains with
717 the phi operand edge: */
718 if (EDGE_COUNT (opnd_edge->src->succs) > 1)
719 {
720 if (prev_nc == num_chains
721 && num_chains < MAX_NUM_CHAINS)
722 num_chains++;
723 for (j = prev_nc; j < num_chains; j++)
724 {
725 dep_chains[j].safe_push (opnd_edge);
726 }
727 }
728 }
729
730 has_valid_pred
731 = convert_control_dep_chain_into_preds (dep_chains,
732 num_chains,
733 preds,
734 num_preds);
735 for (i = 0; i < num_chains; i++)
736 dep_chains[i].release ();
737 free (dep_chains);
738 return has_valid_pred;
739 }
740
741 /* Dumps the predicates (PREDS) for USESTMT. */
742
743 static void
744 dump_predicates (gimple usestmt, size_t num_preds,
745 vec<use_pred_info_t> *preds,
746 const char* msg)
747 {
748 size_t i, j;
749 vec<use_pred_info_t> one_pred_chain;
750 fprintf (dump_file, msg);
751 print_gimple_stmt (dump_file, usestmt, 0, 0);
752 fprintf (dump_file, "is guarded by :\n");
753 /* do some dumping here: */
754 for (i = 0; i < num_preds; i++)
755 {
756 size_t np;
757
758 one_pred_chain = preds[i];
759 np = one_pred_chain.length ();
760
761 for (j = 0; j < np; j++)
762 {
763 use_pred_info_t one_pred
764 = one_pred_chain[j];
765 if (one_pred->invert)
766 fprintf (dump_file, " (.NOT.) ");
767 print_gimple_stmt (dump_file, one_pred->cond, 0, 0);
768 if (j < np - 1)
769 fprintf (dump_file, "(.AND.)\n");
770 }
771 if (i < num_preds - 1)
772 fprintf (dump_file, "(.OR.)\n");
773 }
774 }
775
776 /* Destroys the predicate set *PREDS. */
777
778 static void
779 destroy_predicate_vecs (size_t n,
780 vec<use_pred_info_t> * preds)
781 {
782 size_t i, j;
783 for (i = 0; i < n; i++)
784 {
785 for (j = 0; j < preds[i].length (); j++)
786 free (preds[i][j]);
787 preds[i].release ();
788 }
789 free (preds);
790 }
791
792
793 /* Computes the 'normalized' conditional code with operand
794 swapping and condition inversion. */
795
796 static enum tree_code
797 get_cmp_code (enum tree_code orig_cmp_code,
798 bool swap_cond, bool invert)
799 {
800 enum tree_code tc = orig_cmp_code;
801
802 if (swap_cond)
803 tc = swap_tree_comparison (orig_cmp_code);
804 if (invert)
805 tc = invert_tree_comparison (tc, false);
806
807 switch (tc)
808 {
809 case LT_EXPR:
810 case LE_EXPR:
811 case GT_EXPR:
812 case GE_EXPR:
813 case EQ_EXPR:
814 case NE_EXPR:
815 break;
816 default:
817 return ERROR_MARK;
818 }
819 return tc;
820 }
821
822 /* Returns true if VAL falls in the range defined by BOUNDARY and CMPC, i.e.
823 all values in the range satisfies (x CMPC BOUNDARY) == true. */
824
825 static bool
826 is_value_included_in (tree val, tree boundary, enum tree_code cmpc)
827 {
828 bool inverted = false;
829 bool is_unsigned;
830 bool result;
831
832 /* Only handle integer constant here. */
833 if (TREE_CODE (val) != INTEGER_CST
834 || TREE_CODE (boundary) != INTEGER_CST)
835 return true;
836
837 is_unsigned = TYPE_UNSIGNED (TREE_TYPE (val));
838
839 if (cmpc == GE_EXPR || cmpc == GT_EXPR
840 || cmpc == NE_EXPR)
841 {
842 cmpc = invert_tree_comparison (cmpc, false);
843 inverted = true;
844 }
845
846 if (is_unsigned)
847 {
848 if (cmpc == EQ_EXPR)
849 result = tree_int_cst_equal (val, boundary);
850 else if (cmpc == LT_EXPR)
851 result = INT_CST_LT_UNSIGNED (val, boundary);
852 else
853 {
854 gcc_assert (cmpc == LE_EXPR);
855 result = (tree_int_cst_equal (val, boundary)
856 || INT_CST_LT_UNSIGNED (val, boundary));
857 }
858 }
859 else
860 {
861 if (cmpc == EQ_EXPR)
862 result = tree_int_cst_equal (val, boundary);
863 else if (cmpc == LT_EXPR)
864 result = INT_CST_LT (val, boundary);
865 else
866 {
867 gcc_assert (cmpc == LE_EXPR);
868 result = (tree_int_cst_equal (val, boundary)
869 || INT_CST_LT (val, boundary));
870 }
871 }
872
873 if (inverted)
874 result ^= 1;
875
876 return result;
877 }
878
879 /* Returns true if PRED is common among all the predicate
880 chains (PREDS) (and therefore can be factored out).
881 NUM_PRED_CHAIN is the size of array PREDS. */
882
883 static bool
884 find_matching_predicate_in_rest_chains (use_pred_info_t pred,
885 vec<use_pred_info_t> *preds,
886 size_t num_pred_chains)
887 {
888 size_t i, j, n;
889
890 /* trival case */
891 if (num_pred_chains == 1)
892 return true;
893
894 for (i = 1; i < num_pred_chains; i++)
895 {
896 bool found = false;
897 vec<use_pred_info_t> one_chain = preds[i];
898 n = one_chain.length ();
899 for (j = 0; j < n; j++)
900 {
901 use_pred_info_t pred2
902 = one_chain[j];
903 /* can relax the condition comparison to not
904 use address comparison. However, the most common
905 case is that multiple control dependent paths share
906 a common path prefix, so address comparison should
907 be ok. */
908
909 if (pred2->cond == pred->cond
910 && pred2->invert == pred->invert)
911 {
912 found = true;
913 break;
914 }
915 }
916 if (!found)
917 return false;
918 }
919 return true;
920 }
921
922 /* Forward declaration. */
923 static bool
924 is_use_properly_guarded (gimple use_stmt,
925 basic_block use_bb,
926 gimple phi,
927 unsigned uninit_opnds,
928 struct pointer_set_t *visited_phis);
929
930 /* Returns true if all uninitialized opnds are pruned. Returns false
931 otherwise. PHI is the phi node with uninitialized operands,
932 UNINIT_OPNDS is the bitmap of the uninitialize operand positions,
933 FLAG_DEF is the statement defining the flag guarding the use of the
934 PHI output, BOUNDARY_CST is the const value used in the predicate
935 associated with the flag, CMP_CODE is the comparison code used in
936 the predicate, VISITED_PHIS is the pointer set of phis visited, and
937 VISITED_FLAG_PHIS is the pointer to the pointer set of flag definitions
938 that are also phis.
939
940 Example scenario:
941
942 BB1:
943 flag_1 = phi <0, 1> // (1)
944 var_1 = phi <undef, some_val>
945
946
947 BB2:
948 flag_2 = phi <0, flag_1, flag_1> // (2)
949 var_2 = phi <undef, var_1, var_1>
950 if (flag_2 == 1)
951 goto BB3;
952
953 BB3:
954 use of var_2 // (3)
955
956 Because some flag arg in (1) is not constant, if we do not look into the
957 flag phis recursively, it is conservatively treated as unknown and var_1
958 is thought to be flowed into use at (3). Since var_1 is potentially uninitialized
959 a false warning will be emitted. Checking recursively into (1), the compiler can
960 find out that only some_val (which is defined) can flow into (3) which is OK.
961
962 */
963
964 static bool
965 prune_uninit_phi_opnds_in_unrealizable_paths (
966 gimple phi, unsigned uninit_opnds,
967 gimple flag_def, tree boundary_cst,
968 enum tree_code cmp_code,
969 struct pointer_set_t *visited_phis,
970 bitmap *visited_flag_phis)
971 {
972 unsigned i;
973
974 for (i = 0; i < MIN (32, gimple_phi_num_args (flag_def)); i++)
975 {
976 tree flag_arg;
977
978 if (!MASK_TEST_BIT (uninit_opnds, i))
979 continue;
980
981 flag_arg = gimple_phi_arg_def (flag_def, i);
982 if (!is_gimple_constant (flag_arg))
983 {
984 gimple flag_arg_def, phi_arg_def;
985 tree phi_arg;
986 unsigned uninit_opnds_arg_phi;
987
988 if (TREE_CODE (flag_arg) != SSA_NAME)
989 return false;
990 flag_arg_def = SSA_NAME_DEF_STMT (flag_arg);
991 if (gimple_code (flag_arg_def) != GIMPLE_PHI)
992 return false;
993
994 phi_arg = gimple_phi_arg_def (phi, i);
995 if (TREE_CODE (phi_arg) != SSA_NAME)
996 return false;
997
998 phi_arg_def = SSA_NAME_DEF_STMT (phi_arg);
999 if (gimple_code (phi_arg_def) != GIMPLE_PHI)
1000 return false;
1001
1002 if (gimple_bb (phi_arg_def) != gimple_bb (flag_arg_def))
1003 return false;
1004
1005 if (!*visited_flag_phis)
1006 *visited_flag_phis = BITMAP_ALLOC (NULL);
1007
1008 if (bitmap_bit_p (*visited_flag_phis,
1009 SSA_NAME_VERSION (gimple_phi_result (flag_arg_def))))
1010 return false;
1011
1012 bitmap_set_bit (*visited_flag_phis,
1013 SSA_NAME_VERSION (gimple_phi_result (flag_arg_def)));
1014
1015 /* Now recursively prune the uninitialized phi args. */
1016 uninit_opnds_arg_phi = compute_uninit_opnds_pos (phi_arg_def);
1017 if (!prune_uninit_phi_opnds_in_unrealizable_paths (
1018 phi_arg_def, uninit_opnds_arg_phi,
1019 flag_arg_def, boundary_cst, cmp_code,
1020 visited_phis, visited_flag_phis))
1021 return false;
1022
1023 bitmap_clear_bit (*visited_flag_phis,
1024 SSA_NAME_VERSION (gimple_phi_result (flag_arg_def)));
1025 continue;
1026 }
1027
1028 /* Now check if the constant is in the guarded range. */
1029 if (is_value_included_in (flag_arg, boundary_cst, cmp_code))
1030 {
1031 tree opnd;
1032 gimple opnd_def;
1033
1034 /* Now that we know that this undefined edge is not
1035 pruned. If the operand is defined by another phi,
1036 we can further prune the incoming edges of that
1037 phi by checking the predicates of this operands. */
1038
1039 opnd = gimple_phi_arg_def (phi, i);
1040 opnd_def = SSA_NAME_DEF_STMT (opnd);
1041 if (gimple_code (opnd_def) == GIMPLE_PHI)
1042 {
1043 edge opnd_edge;
1044 unsigned uninit_opnds2
1045 = compute_uninit_opnds_pos (opnd_def);
1046 gcc_assert (!MASK_EMPTY (uninit_opnds2));
1047 opnd_edge = gimple_phi_arg_edge (phi, i);
1048 if (!is_use_properly_guarded (phi,
1049 opnd_edge->src,
1050 opnd_def,
1051 uninit_opnds2,
1052 visited_phis))
1053 return false;
1054 }
1055 else
1056 return false;
1057 }
1058 }
1059
1060 return true;
1061 }
1062
1063 /* A helper function that determines if the predicate set
1064 of the use is not overlapping with that of the uninit paths.
1065 The most common senario of guarded use is in Example 1:
1066 Example 1:
1067 if (some_cond)
1068 {
1069 x = ...;
1070 flag = true;
1071 }
1072
1073 ... some code ...
1074
1075 if (flag)
1076 use (x);
1077
1078 The real world examples are usually more complicated, but similar
1079 and usually result from inlining:
1080
1081 bool init_func (int * x)
1082 {
1083 if (some_cond)
1084 return false;
1085 *x = ..
1086 return true;
1087 }
1088
1089 void foo(..)
1090 {
1091 int x;
1092
1093 if (!init_func(&x))
1094 return;
1095
1096 .. some_code ...
1097 use (x);
1098 }
1099
1100 Another possible use scenario is in the following trivial example:
1101
1102 Example 2:
1103 if (n > 0)
1104 x = 1;
1105 ...
1106 if (n > 0)
1107 {
1108 if (m < 2)
1109 .. = x;
1110 }
1111
1112 Predicate analysis needs to compute the composite predicate:
1113
1114 1) 'x' use predicate: (n > 0) .AND. (m < 2)
1115 2) 'x' default value (non-def) predicate: .NOT. (n > 0)
1116 (the predicate chain for phi operand defs can be computed
1117 starting from a bb that is control equivalent to the phi's
1118 bb and is dominating the operand def.)
1119
1120 and check overlapping:
1121 (n > 0) .AND. (m < 2) .AND. (.NOT. (n > 0))
1122 <==> false
1123
1124 This implementation provides framework that can handle
1125 scenarios. (Note that many simple cases are handled properly
1126 without the predicate analysis -- this is due to jump threading
1127 transformation which eliminates the merge point thus makes
1128 path sensitive analysis unnecessary.)
1129
1130 NUM_PREDS is the number is the number predicate chains, PREDS is
1131 the array of chains, PHI is the phi node whose incoming (undefined)
1132 paths need to be pruned, and UNINIT_OPNDS is the bitmap holding
1133 uninit operand positions. VISITED_PHIS is the pointer set of phi
1134 stmts being checked. */
1135
1136
1137 static bool
1138 use_pred_not_overlap_with_undef_path_pred (
1139 size_t num_preds,
1140 vec<use_pred_info_t> *preds,
1141 gimple phi, unsigned uninit_opnds,
1142 struct pointer_set_t *visited_phis)
1143 {
1144 unsigned int i, n;
1145 gimple flag_def = 0;
1146 tree boundary_cst = 0;
1147 enum tree_code cmp_code;
1148 bool swap_cond = false;
1149 bool invert = false;
1150 vec<use_pred_info_t> the_pred_chain;
1151 bitmap visited_flag_phis = NULL;
1152 bool all_pruned = false;
1153
1154 gcc_assert (num_preds > 0);
1155 /* Find within the common prefix of multiple predicate chains
1156 a predicate that is a comparison of a flag variable against
1157 a constant. */
1158 the_pred_chain = preds[0];
1159 n = the_pred_chain.length ();
1160 for (i = 0; i < n; i++)
1161 {
1162 gimple cond;
1163 tree cond_lhs, cond_rhs, flag = 0;
1164
1165 use_pred_info_t the_pred
1166 = the_pred_chain[i];
1167
1168 cond = the_pred->cond;
1169 invert = the_pred->invert;
1170 cond_lhs = gimple_cond_lhs (cond);
1171 cond_rhs = gimple_cond_rhs (cond);
1172 cmp_code = gimple_cond_code (cond);
1173
1174 if (cond_lhs != NULL_TREE && TREE_CODE (cond_lhs) == SSA_NAME
1175 && cond_rhs != NULL_TREE && is_gimple_constant (cond_rhs))
1176 {
1177 boundary_cst = cond_rhs;
1178 flag = cond_lhs;
1179 }
1180 else if (cond_rhs != NULL_TREE && TREE_CODE (cond_rhs) == SSA_NAME
1181 && cond_lhs != NULL_TREE && is_gimple_constant (cond_lhs))
1182 {
1183 boundary_cst = cond_lhs;
1184 flag = cond_rhs;
1185 swap_cond = true;
1186 }
1187
1188 if (!flag)
1189 continue;
1190
1191 flag_def = SSA_NAME_DEF_STMT (flag);
1192
1193 if (!flag_def)
1194 continue;
1195
1196 if ((gimple_code (flag_def) == GIMPLE_PHI)
1197 && (gimple_bb (flag_def) == gimple_bb (phi))
1198 && find_matching_predicate_in_rest_chains (
1199 the_pred, preds, num_preds))
1200 break;
1201
1202 flag_def = 0;
1203 }
1204
1205 if (!flag_def)
1206 return false;
1207
1208 /* Now check all the uninit incoming edge has a constant flag value
1209 that is in conflict with the use guard/predicate. */
1210 cmp_code = get_cmp_code (cmp_code, swap_cond, invert);
1211
1212 if (cmp_code == ERROR_MARK)
1213 return false;
1214
1215 all_pruned = prune_uninit_phi_opnds_in_unrealizable_paths (phi,
1216 uninit_opnds,
1217 flag_def,
1218 boundary_cst,
1219 cmp_code,
1220 visited_phis,
1221 &visited_flag_phis);
1222
1223 if (visited_flag_phis)
1224 BITMAP_FREE (visited_flag_phis);
1225
1226 return all_pruned;
1227 }
1228
1229 /* Returns true if TC is AND or OR */
1230
1231 static inline bool
1232 is_and_or_or (enum tree_code tc, tree typ)
1233 {
1234 return (tc == BIT_IOR_EXPR
1235 || (tc == BIT_AND_EXPR
1236 && (typ == 0 || TREE_CODE (typ) == BOOLEAN_TYPE)));
1237 }
1238
1239 typedef struct norm_cond
1240 {
1241 vec<gimple> conds;
1242 enum tree_code cond_code;
1243 bool invert;
1244 } *norm_cond_t;
1245
1246
1247 /* Normalizes gimple condition COND. The normalization follows
1248 UD chains to form larger condition expression trees. NORM_COND
1249 holds the normalized result. COND_CODE is the logical opcode
1250 (AND or OR) of the normalized tree. */
1251
1252 static void
1253 normalize_cond_1 (gimple cond,
1254 norm_cond_t norm_cond,
1255 enum tree_code cond_code)
1256 {
1257 enum gimple_code gc;
1258 enum tree_code cur_cond_code;
1259 tree rhs1, rhs2;
1260
1261 gc = gimple_code (cond);
1262 if (gc != GIMPLE_ASSIGN)
1263 {
1264 norm_cond->conds.safe_push (cond);
1265 return;
1266 }
1267
1268 cur_cond_code = gimple_assign_rhs_code (cond);
1269 rhs1 = gimple_assign_rhs1 (cond);
1270 rhs2 = gimple_assign_rhs2 (cond);
1271 if (cur_cond_code == NE_EXPR)
1272 {
1273 if (integer_zerop (rhs2)
1274 && (TREE_CODE (rhs1) == SSA_NAME))
1275 normalize_cond_1 (
1276 SSA_NAME_DEF_STMT (rhs1),
1277 norm_cond, cond_code);
1278 else if (integer_zerop (rhs1)
1279 && (TREE_CODE (rhs2) == SSA_NAME))
1280 normalize_cond_1 (
1281 SSA_NAME_DEF_STMT (rhs2),
1282 norm_cond, cond_code);
1283 else
1284 norm_cond->conds.safe_push (cond);
1285
1286 return;
1287 }
1288
1289 if (is_and_or_or (cur_cond_code, TREE_TYPE (rhs1))
1290 && (cond_code == cur_cond_code || cond_code == ERROR_MARK)
1291 && (TREE_CODE (rhs1) == SSA_NAME && TREE_CODE (rhs2) == SSA_NAME))
1292 {
1293 normalize_cond_1 (SSA_NAME_DEF_STMT (rhs1),
1294 norm_cond, cur_cond_code);
1295 normalize_cond_1 (SSA_NAME_DEF_STMT (rhs2),
1296 norm_cond, cur_cond_code);
1297 norm_cond->cond_code = cur_cond_code;
1298 }
1299 else
1300 norm_cond->conds.safe_push (cond);
1301 }
1302
1303 /* See normalize_cond_1 for details. INVERT is a flag to indicate
1304 if COND needs to be inverted or not. */
1305
1306 static void
1307 normalize_cond (gimple cond, norm_cond_t norm_cond, bool invert)
1308 {
1309 enum tree_code cond_code;
1310
1311 norm_cond->cond_code = ERROR_MARK;
1312 norm_cond->invert = false;
1313 norm_cond->conds.create (0);
1314 gcc_assert (gimple_code (cond) == GIMPLE_COND);
1315 cond_code = gimple_cond_code (cond);
1316 if (invert)
1317 cond_code = invert_tree_comparison (cond_code, false);
1318
1319 if (cond_code == NE_EXPR)
1320 {
1321 if (integer_zerop (gimple_cond_rhs (cond))
1322 && (TREE_CODE (gimple_cond_lhs (cond)) == SSA_NAME))
1323 normalize_cond_1 (
1324 SSA_NAME_DEF_STMT (gimple_cond_lhs (cond)),
1325 norm_cond, ERROR_MARK);
1326 else if (integer_zerop (gimple_cond_lhs (cond))
1327 && (TREE_CODE (gimple_cond_rhs (cond)) == SSA_NAME))
1328 normalize_cond_1 (
1329 SSA_NAME_DEF_STMT (gimple_cond_rhs (cond)),
1330 norm_cond, ERROR_MARK);
1331 else
1332 {
1333 norm_cond->conds.safe_push (cond);
1334 norm_cond->invert = invert;
1335 }
1336 }
1337 else
1338 {
1339 norm_cond->conds.safe_push (cond);
1340 norm_cond->invert = invert;
1341 }
1342
1343 gcc_assert (norm_cond->conds.length () == 1
1344 || is_and_or_or (norm_cond->cond_code, NULL));
1345 }
1346
1347 /* Returns true if the domain for condition COND1 is a subset of
1348 COND2. REVERSE is a flag. when it is true the function checks
1349 if COND1 is a superset of COND2. INVERT1 and INVERT2 are flags
1350 to indicate if COND1 and COND2 need to be inverted or not. */
1351
1352 static bool
1353 is_gcond_subset_of (gimple cond1, bool invert1,
1354 gimple cond2, bool invert2,
1355 bool reverse)
1356 {
1357 enum gimple_code gc1, gc2;
1358 enum tree_code cond1_code, cond2_code;
1359 gimple tmp;
1360 tree cond1_lhs, cond1_rhs, cond2_lhs, cond2_rhs;
1361
1362 /* Take the short cut. */
1363 if (cond1 == cond2)
1364 return true;
1365
1366 if (reverse)
1367 {
1368 tmp = cond1;
1369 cond1 = cond2;
1370 cond2 = tmp;
1371 }
1372
1373 gc1 = gimple_code (cond1);
1374 gc2 = gimple_code (cond2);
1375
1376 if ((gc1 != GIMPLE_ASSIGN && gc1 != GIMPLE_COND)
1377 || (gc2 != GIMPLE_ASSIGN && gc2 != GIMPLE_COND))
1378 return cond1 == cond2;
1379
1380 cond1_code = ((gc1 == GIMPLE_ASSIGN)
1381 ? gimple_assign_rhs_code (cond1)
1382 : gimple_cond_code (cond1));
1383
1384 cond2_code = ((gc2 == GIMPLE_ASSIGN)
1385 ? gimple_assign_rhs_code (cond2)
1386 : gimple_cond_code (cond2));
1387
1388 if (TREE_CODE_CLASS (cond1_code) != tcc_comparison
1389 || TREE_CODE_CLASS (cond2_code) != tcc_comparison)
1390 return false;
1391
1392 if (invert1)
1393 cond1_code = invert_tree_comparison (cond1_code, false);
1394 if (invert2)
1395 cond2_code = invert_tree_comparison (cond2_code, false);
1396
1397 cond1_lhs = ((gc1 == GIMPLE_ASSIGN)
1398 ? gimple_assign_rhs1 (cond1)
1399 : gimple_cond_lhs (cond1));
1400 cond1_rhs = ((gc1 == GIMPLE_ASSIGN)
1401 ? gimple_assign_rhs2 (cond1)
1402 : gimple_cond_rhs (cond1));
1403 cond2_lhs = ((gc2 == GIMPLE_ASSIGN)
1404 ? gimple_assign_rhs1 (cond2)
1405 : gimple_cond_lhs (cond2));
1406 cond2_rhs = ((gc2 == GIMPLE_ASSIGN)
1407 ? gimple_assign_rhs2 (cond2)
1408 : gimple_cond_rhs (cond2));
1409
1410 /* Assuming const operands have been swapped to the
1411 rhs at this point of the analysis. */
1412
1413 if (cond1_lhs != cond2_lhs)
1414 return false;
1415
1416 if (!is_gimple_constant (cond1_rhs)
1417 || TREE_CODE (cond1_rhs) != INTEGER_CST)
1418 return (cond1_rhs == cond2_rhs);
1419
1420 if (!is_gimple_constant (cond2_rhs)
1421 || TREE_CODE (cond2_rhs) != INTEGER_CST)
1422 return (cond1_rhs == cond2_rhs);
1423
1424 if (cond1_code == EQ_EXPR)
1425 return is_value_included_in (cond1_rhs,
1426 cond2_rhs, cond2_code);
1427 if (cond1_code == NE_EXPR || cond2_code == EQ_EXPR)
1428 return ((cond2_code == cond1_code)
1429 && tree_int_cst_equal (cond1_rhs, cond2_rhs));
1430
1431 if (((cond1_code == GE_EXPR || cond1_code == GT_EXPR)
1432 && (cond2_code == LE_EXPR || cond2_code == LT_EXPR))
1433 || ((cond1_code == LE_EXPR || cond1_code == LT_EXPR)
1434 && (cond2_code == GE_EXPR || cond2_code == GT_EXPR)))
1435 return false;
1436
1437 if (cond1_code != GE_EXPR && cond1_code != GT_EXPR
1438 && cond1_code != LE_EXPR && cond1_code != LT_EXPR)
1439 return false;
1440
1441 if (cond1_code == GT_EXPR)
1442 {
1443 cond1_code = GE_EXPR;
1444 cond1_rhs = fold_binary (PLUS_EXPR, TREE_TYPE (cond1_rhs),
1445 cond1_rhs,
1446 fold_convert (TREE_TYPE (cond1_rhs),
1447 integer_one_node));
1448 }
1449 else if (cond1_code == LT_EXPR)
1450 {
1451 cond1_code = LE_EXPR;
1452 cond1_rhs = fold_binary (MINUS_EXPR, TREE_TYPE (cond1_rhs),
1453 cond1_rhs,
1454 fold_convert (TREE_TYPE (cond1_rhs),
1455 integer_one_node));
1456 }
1457
1458 if (!cond1_rhs)
1459 return false;
1460
1461 gcc_assert (cond1_code == GE_EXPR || cond1_code == LE_EXPR);
1462
1463 if (cond2_code == GE_EXPR || cond2_code == GT_EXPR ||
1464 cond2_code == LE_EXPR || cond2_code == LT_EXPR)
1465 return is_value_included_in (cond1_rhs,
1466 cond2_rhs, cond2_code);
1467 else if (cond2_code == NE_EXPR)
1468 return
1469 (is_value_included_in (cond1_rhs,
1470 cond2_rhs, cond2_code)
1471 && !is_value_included_in (cond2_rhs,
1472 cond1_rhs, cond1_code));
1473 return false;
1474 }
1475
1476 /* Returns true if the domain of the condition expression
1477 in COND is a subset of any of the sub-conditions
1478 of the normalized condtion NORM_COND. INVERT is a flag
1479 to indicate of the COND needs to be inverted.
1480 REVERSE is a flag. When it is true, the check is reversed --
1481 it returns true if COND is a superset of any of the subconditions
1482 of NORM_COND. */
1483
1484 static bool
1485 is_subset_of_any (gimple cond, bool invert,
1486 norm_cond_t norm_cond, bool reverse)
1487 {
1488 size_t i;
1489 size_t len = norm_cond->conds.length ();
1490
1491 for (i = 0; i < len; i++)
1492 {
1493 if (is_gcond_subset_of (cond, invert,
1494 norm_cond->conds[i],
1495 false, reverse))
1496 return true;
1497 }
1498 return false;
1499 }
1500
1501 /* NORM_COND1 and NORM_COND2 are normalized logical/BIT OR
1502 expressions (formed by following UD chains not control
1503 dependence chains). The function returns true of domain
1504 of and expression NORM_COND1 is a subset of NORM_COND2's.
1505 The implementation is conservative, and it returns false if
1506 it the inclusion relationship may not hold. */
1507
1508 static bool
1509 is_or_set_subset_of (norm_cond_t norm_cond1,
1510 norm_cond_t norm_cond2)
1511 {
1512 size_t i;
1513 size_t len = norm_cond1->conds.length ();
1514
1515 for (i = 0; i < len; i++)
1516 {
1517 if (!is_subset_of_any (norm_cond1->conds[i],
1518 false, norm_cond2, false))
1519 return false;
1520 }
1521 return true;
1522 }
1523
1524 /* NORM_COND1 and NORM_COND2 are normalized logical AND
1525 expressions (formed by following UD chains not control
1526 dependence chains). The function returns true of domain
1527 of and expression NORM_COND1 is a subset of NORM_COND2's. */
1528
1529 static bool
1530 is_and_set_subset_of (norm_cond_t norm_cond1,
1531 norm_cond_t norm_cond2)
1532 {
1533 size_t i;
1534 size_t len = norm_cond2->conds.length ();
1535
1536 for (i = 0; i < len; i++)
1537 {
1538 if (!is_subset_of_any (norm_cond2->conds[i],
1539 false, norm_cond1, true))
1540 return false;
1541 }
1542 return true;
1543 }
1544
1545 /* Returns true of the domain if NORM_COND1 is a subset
1546 of that of NORM_COND2. Returns false if it can not be
1547 proved to be so. */
1548
1549 static bool
1550 is_norm_cond_subset_of (norm_cond_t norm_cond1,
1551 norm_cond_t norm_cond2)
1552 {
1553 size_t i;
1554 enum tree_code code1, code2;
1555
1556 code1 = norm_cond1->cond_code;
1557 code2 = norm_cond2->cond_code;
1558
1559 if (code1 == BIT_AND_EXPR)
1560 {
1561 /* Both conditions are AND expressions. */
1562 if (code2 == BIT_AND_EXPR)
1563 return is_and_set_subset_of (norm_cond1, norm_cond2);
1564 /* NORM_COND1 is an AND expression, and NORM_COND2 is an OR
1565 expression. In this case, returns true if any subexpression
1566 of NORM_COND1 is a subset of any subexpression of NORM_COND2. */
1567 else if (code2 == BIT_IOR_EXPR)
1568 {
1569 size_t len1;
1570 len1 = norm_cond1->conds.length ();
1571 for (i = 0; i < len1; i++)
1572 {
1573 gimple cond1 = norm_cond1->conds[i];
1574 if (is_subset_of_any (cond1, false, norm_cond2, false))
1575 return true;
1576 }
1577 return false;
1578 }
1579 else
1580 {
1581 gcc_assert (code2 == ERROR_MARK);
1582 gcc_assert (norm_cond2->conds.length () == 1);
1583 return is_subset_of_any (norm_cond2->conds[0],
1584 norm_cond2->invert, norm_cond1, true);
1585 }
1586 }
1587 /* NORM_COND1 is an OR expression */
1588 else if (code1 == BIT_IOR_EXPR)
1589 {
1590 if (code2 != code1)
1591 return false;
1592
1593 return is_or_set_subset_of (norm_cond1, norm_cond2);
1594 }
1595 else
1596 {
1597 gcc_assert (code1 == ERROR_MARK);
1598 gcc_assert (norm_cond1->conds.length () == 1);
1599 /* Conservatively returns false if NORM_COND1 is non-decomposible
1600 and NORM_COND2 is an AND expression. */
1601 if (code2 == BIT_AND_EXPR)
1602 return false;
1603
1604 if (code2 == BIT_IOR_EXPR)
1605 return is_subset_of_any (norm_cond1->conds[0],
1606 norm_cond1->invert, norm_cond2, false);
1607
1608 gcc_assert (code2 == ERROR_MARK);
1609 gcc_assert (norm_cond2->conds.length () == 1);
1610 return is_gcond_subset_of (norm_cond1->conds[0],
1611 norm_cond1->invert,
1612 norm_cond2->conds[0],
1613 norm_cond2->invert, false);
1614 }
1615 }
1616
1617 /* Returns true of the domain of single predicate expression
1618 EXPR1 is a subset of that of EXPR2. Returns false if it
1619 can not be proved. */
1620
1621 static bool
1622 is_pred_expr_subset_of (use_pred_info_t expr1,
1623 use_pred_info_t expr2)
1624 {
1625 gimple cond1, cond2;
1626 enum tree_code code1, code2;
1627 struct norm_cond norm_cond1, norm_cond2;
1628 bool is_subset = false;
1629
1630 cond1 = expr1->cond;
1631 cond2 = expr2->cond;
1632 code1 = gimple_cond_code (cond1);
1633 code2 = gimple_cond_code (cond2);
1634
1635 if (expr1->invert)
1636 code1 = invert_tree_comparison (code1, false);
1637 if (expr2->invert)
1638 code2 = invert_tree_comparison (code2, false);
1639
1640 /* Fast path -- match exactly */
1641 if ((gimple_cond_lhs (cond1) == gimple_cond_lhs (cond2))
1642 && (gimple_cond_rhs (cond1) == gimple_cond_rhs (cond2))
1643 && (code1 == code2))
1644 return true;
1645
1646 /* Normalize conditions. To keep NE_EXPR, do not invert
1647 with both need inversion. */
1648 normalize_cond (cond1, &norm_cond1, (expr1->invert));
1649 normalize_cond (cond2, &norm_cond2, (expr2->invert));
1650
1651 is_subset = is_norm_cond_subset_of (&norm_cond1, &norm_cond2);
1652
1653 /* Free memory */
1654 norm_cond1.conds.release ();
1655 norm_cond2.conds.release ();
1656 return is_subset ;
1657 }
1658
1659 /* Returns true if the domain of PRED1 is a subset
1660 of that of PRED2. Returns false if it can not be proved so. */
1661
1662 static bool
1663 is_pred_chain_subset_of (vec<use_pred_info_t> pred1,
1664 vec<use_pred_info_t> pred2)
1665 {
1666 size_t np1, np2, i1, i2;
1667
1668 np1 = pred1.length ();
1669 np2 = pred2.length ();
1670
1671 for (i2 = 0; i2 < np2; i2++)
1672 {
1673 bool found = false;
1674 use_pred_info_t info2
1675 = pred2[i2];
1676 for (i1 = 0; i1 < np1; i1++)
1677 {
1678 use_pred_info_t info1
1679 = pred1[i1];
1680 if (is_pred_expr_subset_of (info1, info2))
1681 {
1682 found = true;
1683 break;
1684 }
1685 }
1686 if (!found)
1687 return false;
1688 }
1689 return true;
1690 }
1691
1692 /* Returns true if the domain defined by
1693 one pred chain ONE_PRED is a subset of the domain
1694 of *PREDS. It returns false if ONE_PRED's domain is
1695 not a subset of any of the sub-domains of PREDS (
1696 corresponding to each individual chains in it), even
1697 though it may be still be a subset of whole domain
1698 of PREDS which is the union (ORed) of all its subdomains.
1699 In other words, the result is conservative. */
1700
1701 static bool
1702 is_included_in (vec<use_pred_info_t> one_pred,
1703 vec<use_pred_info_t> *preds,
1704 size_t n)
1705 {
1706 size_t i;
1707
1708 for (i = 0; i < n; i++)
1709 {
1710 if (is_pred_chain_subset_of (one_pred, preds[i]))
1711 return true;
1712 }
1713
1714 return false;
1715 }
1716
1717 /* compares two predicate sets PREDS1 and PREDS2 and returns
1718 true if the domain defined by PREDS1 is a superset
1719 of PREDS2's domain. N1 and N2 are array sizes of PREDS1 and
1720 PREDS2 respectively. The implementation chooses not to build
1721 generic trees (and relying on the folding capability of the
1722 compiler), but instead performs brute force comparison of
1723 individual predicate chains (won't be a compile time problem
1724 as the chains are pretty short). When the function returns
1725 false, it does not necessarily mean *PREDS1 is not a superset
1726 of *PREDS2, but mean it may not be so since the analysis can
1727 not prove it. In such cases, false warnings may still be
1728 emitted. */
1729
1730 static bool
1731 is_superset_of (vec<use_pred_info_t> *preds1,
1732 size_t n1,
1733 vec<use_pred_info_t> *preds2,
1734 size_t n2)
1735 {
1736 size_t i;
1737 vec<use_pred_info_t> one_pred_chain;
1738
1739 for (i = 0; i < n2; i++)
1740 {
1741 one_pred_chain = preds2[i];
1742 if (!is_included_in (one_pred_chain, preds1, n1))
1743 return false;
1744 }
1745
1746 return true;
1747 }
1748
1749 /* Comparison function used by qsort. It is used to
1750 sort predicate chains to allow predicate
1751 simplification. */
1752
1753 static int
1754 pred_chain_length_cmp (const void *p1, const void *p2)
1755 {
1756 use_pred_info_t i1, i2;
1757 vec<use_pred_info_t> const *chain1
1758 = (vec<use_pred_info_t> const *)p1;
1759 vec<use_pred_info_t> const *chain2
1760 = (vec<use_pred_info_t> const *)p2;
1761
1762 if (chain1->length () != chain2->length ())
1763 return (chain1->length () - chain2->length ());
1764
1765 i1 = (*chain1)[0];
1766 i2 = (*chain2)[0];
1767
1768 /* Allow predicates with similar prefix come together. */
1769 if (!i1->invert && i2->invert)
1770 return -1;
1771 else if (i1->invert && !i2->invert)
1772 return 1;
1773
1774 return gimple_uid (i1->cond) - gimple_uid (i2->cond);
1775 }
1776
1777 /* x OR (!x AND y) is equivalent to x OR y.
1778 This function normalizes x1 OR (!x1 AND x2) OR (!x1 AND !x2 AND x3)
1779 into x1 OR x2 OR x3. PREDS is the predicate chains, and N is
1780 the number of chains. Returns true if normalization happens. */
1781
1782 static bool
1783 normalize_preds (vec<use_pred_info_t> *preds, size_t *n)
1784 {
1785 size_t i, j, ll;
1786 vec<use_pred_info_t> pred_chain;
1787 vec<use_pred_info_t> x = vNULL;
1788 use_pred_info_t xj = 0, nxj = 0;
1789
1790 if (*n < 2)
1791 return false;
1792
1793 /* First sort the chains in ascending order of lengths. */
1794 qsort (preds, *n, sizeof (void *), pred_chain_length_cmp);
1795 pred_chain = preds[0];
1796 ll = pred_chain.length ();
1797 if (ll != 1)
1798 {
1799 if (ll == 2)
1800 {
1801 use_pred_info_t xx, yy, xx2, nyy;
1802 vec<use_pred_info_t> pred_chain2 = preds[1];
1803 if (pred_chain2.length () != 2)
1804 return false;
1805
1806 /* See if simplification x AND y OR x AND !y is possible. */
1807 xx = pred_chain[0];
1808 yy = pred_chain[1];
1809 xx2 = pred_chain2[0];
1810 nyy = pred_chain2[1];
1811 if (gimple_cond_lhs (xx->cond) != gimple_cond_lhs (xx2->cond)
1812 || gimple_cond_rhs (xx->cond) != gimple_cond_rhs (xx2->cond)
1813 || gimple_cond_code (xx->cond) != gimple_cond_code (xx2->cond)
1814 || (xx->invert != xx2->invert))
1815 return false;
1816 if (gimple_cond_lhs (yy->cond) != gimple_cond_lhs (nyy->cond)
1817 || gimple_cond_rhs (yy->cond) != gimple_cond_rhs (nyy->cond)
1818 || gimple_cond_code (yy->cond) != gimple_cond_code (nyy->cond)
1819 || (yy->invert == nyy->invert))
1820 return false;
1821
1822 /* Now merge the first two chains. */
1823 free (yy);
1824 free (nyy);
1825 free (xx2);
1826 pred_chain.release ();
1827 pred_chain2.release ();
1828 pred_chain.safe_push (xx);
1829 preds[0] = pred_chain;
1830 for (i = 1; i < *n - 1; i++)
1831 preds[i] = preds[i + 1];
1832
1833 preds[*n - 1].create (0);
1834 *n = *n - 1;
1835 }
1836 else
1837 return false;
1838 }
1839
1840 x.safe_push (pred_chain[0]);
1841
1842 /* The loop extracts x1, x2, x3, etc from chains
1843 x1 OR (!x1 AND x2) OR (!x1 AND !x2 AND x3) OR ... */
1844 for (i = 1; i < *n; i++)
1845 {
1846 pred_chain = preds[i];
1847 if (pred_chain.length () != i + 1)
1848 return false;
1849
1850 for (j = 0; j < i; j++)
1851 {
1852 xj = x[j];
1853 nxj = pred_chain[j];
1854
1855 /* Check if nxj is !xj */
1856 if (gimple_cond_lhs (xj->cond) != gimple_cond_lhs (nxj->cond)
1857 || gimple_cond_rhs (xj->cond) != gimple_cond_rhs (nxj->cond)
1858 || gimple_cond_code (xj->cond) != gimple_cond_code (nxj->cond)
1859 || (xj->invert == nxj->invert))
1860 return false;
1861 }
1862
1863 x.safe_push (pred_chain[i]);
1864 }
1865
1866 /* Now normalize the pred chains using the extraced x1, x2, x3 etc. */
1867 for (j = 0; j < *n; j++)
1868 {
1869 use_pred_info_t t;
1870 xj = x[j];
1871
1872 t = XNEW (struct use_pred_info);
1873 *t = *xj;
1874
1875 x[j] = t;
1876 }
1877
1878 for (i = 0; i < *n; i++)
1879 {
1880 pred_chain = preds[i];
1881 for (j = 0; j < pred_chain.length (); j++)
1882 free (pred_chain[j]);
1883 pred_chain.release ();
1884 /* A new chain. */
1885 pred_chain.safe_push (x[i]);
1886 preds[i] = pred_chain;
1887 }
1888 return true;
1889 }
1890
1891
1892
1893 /* Computes the predicates that guard the use and checks
1894 if the incoming paths that have empty (or possibly
1895 empty) definition can be pruned/filtered. The function returns
1896 true if it can be determined that the use of PHI's def in
1897 USE_STMT is guarded with a predicate set not overlapping with
1898 predicate sets of all runtime paths that do not have a definition.
1899 Returns false if it is not or it can not be determined. USE_BB is
1900 the bb of the use (for phi operand use, the bb is not the bb of
1901 the phi stmt, but the src bb of the operand edge). UNINIT_OPNDS
1902 is a bit vector. If an operand of PHI is uninitialized, the
1903 corresponding bit in the vector is 1. VISIED_PHIS is a pointer
1904 set of phis being visted. */
1905
1906 static bool
1907 is_use_properly_guarded (gimple use_stmt,
1908 basic_block use_bb,
1909 gimple phi,
1910 unsigned uninit_opnds,
1911 struct pointer_set_t *visited_phis)
1912 {
1913 basic_block phi_bb;
1914 vec<use_pred_info_t> *preds = 0;
1915 vec<use_pred_info_t> *def_preds = 0;
1916 size_t num_preds = 0, num_def_preds = 0;
1917 bool has_valid_preds = false;
1918 bool is_properly_guarded = false;
1919
1920 if (pointer_set_insert (visited_phis, phi))
1921 return false;
1922
1923 phi_bb = gimple_bb (phi);
1924
1925 if (is_non_loop_exit_postdominating (use_bb, phi_bb))
1926 return false;
1927
1928 has_valid_preds = find_predicates (&preds, &num_preds,
1929 phi_bb, use_bb);
1930
1931 if (!has_valid_preds)
1932 {
1933 destroy_predicate_vecs (num_preds, preds);
1934 return false;
1935 }
1936
1937 if (dump_file)
1938 dump_predicates (use_stmt, num_preds, preds,
1939 "\nUse in stmt ");
1940
1941 has_valid_preds = find_def_preds (&def_preds,
1942 &num_def_preds, phi);
1943
1944 if (has_valid_preds)
1945 {
1946 bool normed;
1947 if (dump_file)
1948 dump_predicates (phi, num_def_preds, def_preds,
1949 "Operand defs of phi ");
1950
1951 normed = normalize_preds (def_preds, &num_def_preds);
1952 if (normed && dump_file)
1953 {
1954 fprintf (dump_file, "\nNormalized to\n");
1955 dump_predicates (phi, num_def_preds, def_preds,
1956 "Operand defs of phi ");
1957 }
1958 is_properly_guarded =
1959 is_superset_of (def_preds, num_def_preds,
1960 preds, num_preds);
1961 }
1962
1963 /* further prune the dead incoming phi edges. */
1964 if (!is_properly_guarded)
1965 is_properly_guarded
1966 = use_pred_not_overlap_with_undef_path_pred (
1967 num_preds, preds, phi, uninit_opnds, visited_phis);
1968
1969 destroy_predicate_vecs (num_preds, preds);
1970 destroy_predicate_vecs (num_def_preds, def_preds);
1971 return is_properly_guarded;
1972 }
1973
1974 /* Searches through all uses of a potentially
1975 uninitialized variable defined by PHI and returns a use
1976 statement if the use is not properly guarded. It returns
1977 NULL if all uses are guarded. UNINIT_OPNDS is a bitvector
1978 holding the position(s) of uninit PHI operands. WORKLIST
1979 is the vector of candidate phis that may be updated by this
1980 function. ADDED_TO_WORKLIST is the pointer set tracking
1981 if the new phi is already in the worklist. */
1982
1983 static gimple
1984 find_uninit_use (gimple phi, unsigned uninit_opnds,
1985 vec<gimple> *worklist,
1986 struct pointer_set_t *added_to_worklist)
1987 {
1988 tree phi_result;
1989 use_operand_p use_p;
1990 gimple use_stmt;
1991 imm_use_iterator iter;
1992
1993 phi_result = gimple_phi_result (phi);
1994
1995 FOR_EACH_IMM_USE_FAST (use_p, iter, phi_result)
1996 {
1997 struct pointer_set_t *visited_phis;
1998 basic_block use_bb;
1999
2000 use_stmt = USE_STMT (use_p);
2001 if (is_gimple_debug (use_stmt))
2002 continue;
2003
2004 visited_phis = pointer_set_create ();
2005
2006 if (gimple_code (use_stmt) == GIMPLE_PHI)
2007 use_bb = gimple_phi_arg_edge (use_stmt,
2008 PHI_ARG_INDEX_FROM_USE (use_p))->src;
2009 else
2010 use_bb = gimple_bb (use_stmt);
2011
2012 if (is_use_properly_guarded (use_stmt,
2013 use_bb,
2014 phi,
2015 uninit_opnds,
2016 visited_phis))
2017 {
2018 pointer_set_destroy (visited_phis);
2019 continue;
2020 }
2021 pointer_set_destroy (visited_phis);
2022
2023 if (dump_file && (dump_flags & TDF_DETAILS))
2024 {
2025 fprintf (dump_file, "[CHECK]: Found unguarded use: ");
2026 print_gimple_stmt (dump_file, use_stmt, 0, 0);
2027 }
2028 /* Found one real use, return. */
2029 if (gimple_code (use_stmt) != GIMPLE_PHI)
2030 return use_stmt;
2031
2032 /* Found a phi use that is not guarded,
2033 add the phi to the worklist. */
2034 if (!pointer_set_insert (added_to_worklist,
2035 use_stmt))
2036 {
2037 if (dump_file && (dump_flags & TDF_DETAILS))
2038 {
2039 fprintf (dump_file, "[WORKLIST]: Update worklist with phi: ");
2040 print_gimple_stmt (dump_file, use_stmt, 0, 0);
2041 }
2042
2043 worklist->safe_push (use_stmt);
2044 pointer_set_insert (possibly_undefined_names, phi_result);
2045 }
2046 }
2047
2048 return NULL;
2049 }
2050
2051 /* Look for inputs to PHI that are SSA_NAMEs that have empty definitions
2052 and gives warning if there exists a runtime path from the entry to a
2053 use of the PHI def that does not contain a definition. In other words,
2054 the warning is on the real use. The more dead paths that can be pruned
2055 by the compiler, the fewer false positives the warning is. WORKLIST
2056 is a vector of candidate phis to be examined. ADDED_TO_WORKLIST is
2057 a pointer set tracking if the new phi is added to the worklist or not. */
2058
2059 static void
2060 warn_uninitialized_phi (gimple phi, vec<gimple> *worklist,
2061 struct pointer_set_t *added_to_worklist)
2062 {
2063 unsigned uninit_opnds;
2064 gimple uninit_use_stmt = 0;
2065 tree uninit_op;
2066
2067 /* Don't look at virtual operands. */
2068 if (virtual_operand_p (gimple_phi_result (phi)))
2069 return;
2070
2071 uninit_opnds = compute_uninit_opnds_pos (phi);
2072
2073 if (MASK_EMPTY (uninit_opnds))
2074 return;
2075
2076 if (dump_file && (dump_flags & TDF_DETAILS))
2077 {
2078 fprintf (dump_file, "[CHECK]: examining phi: ");
2079 print_gimple_stmt (dump_file, phi, 0, 0);
2080 }
2081
2082 /* Now check if we have any use of the value without proper guard. */
2083 uninit_use_stmt = find_uninit_use (phi, uninit_opnds,
2084 worklist, added_to_worklist);
2085
2086 /* All uses are properly guarded. */
2087 if (!uninit_use_stmt)
2088 return;
2089
2090 uninit_op = gimple_phi_arg_def (phi, MASK_FIRST_SET_BIT (uninit_opnds));
2091 if (SSA_NAME_VAR (uninit_op) == NULL_TREE)
2092 return;
2093 warn_uninit (OPT_Wmaybe_uninitialized, uninit_op, SSA_NAME_VAR (uninit_op),
2094 SSA_NAME_VAR (uninit_op),
2095 "%qD may be used uninitialized in this function",
2096 uninit_use_stmt);
2097
2098 }
2099
2100
2101 /* Entry point to the late uninitialized warning pass. */
2102
2103 static unsigned int
2104 execute_late_warn_uninitialized (void)
2105 {
2106 basic_block bb;
2107 gimple_stmt_iterator gsi;
2108 vec<gimple> worklist = vNULL;
2109 struct pointer_set_t *added_to_worklist;
2110
2111 calculate_dominance_info (CDI_DOMINATORS);
2112 calculate_dominance_info (CDI_POST_DOMINATORS);
2113 /* Re-do the plain uninitialized variable check, as optimization may have
2114 straightened control flow. Do this first so that we don't accidentally
2115 get a "may be" warning when we'd have seen an "is" warning later. */
2116 warn_uninitialized_vars (/*warn_possibly_uninitialized=*/1);
2117
2118 timevar_push (TV_TREE_UNINIT);
2119
2120 possibly_undefined_names = pointer_set_create ();
2121 added_to_worklist = pointer_set_create ();
2122
2123 /* Initialize worklist */
2124 FOR_EACH_BB (bb)
2125 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2126 {
2127 gimple phi = gsi_stmt (gsi);
2128 size_t n, i;
2129
2130 n = gimple_phi_num_args (phi);
2131
2132 /* Don't look at virtual operands. */
2133 if (virtual_operand_p (gimple_phi_result (phi)))
2134 continue;
2135
2136 for (i = 0; i < n; ++i)
2137 {
2138 tree op = gimple_phi_arg_def (phi, i);
2139 if (TREE_CODE (op) == SSA_NAME
2140 && uninit_undefined_value_p (op))
2141 {
2142 worklist.safe_push (phi);
2143 pointer_set_insert (added_to_worklist, phi);
2144 if (dump_file && (dump_flags & TDF_DETAILS))
2145 {
2146 fprintf (dump_file, "[WORKLIST]: add to initial list: ");
2147 print_gimple_stmt (dump_file, phi, 0, 0);
2148 }
2149 break;
2150 }
2151 }
2152 }
2153
2154 while (worklist.length () != 0)
2155 {
2156 gimple cur_phi = 0;
2157 cur_phi = worklist.pop ();
2158 warn_uninitialized_phi (cur_phi, &worklist, added_to_worklist);
2159 }
2160
2161 worklist.release ();
2162 pointer_set_destroy (added_to_worklist);
2163 pointer_set_destroy (possibly_undefined_names);
2164 possibly_undefined_names = NULL;
2165 free_dominance_info (CDI_POST_DOMINATORS);
2166 timevar_pop (TV_TREE_UNINIT);
2167 return 0;
2168 }
2169
2170 static bool
2171 gate_warn_uninitialized (void)
2172 {
2173 return warn_uninitialized != 0;
2174 }
2175
2176 namespace {
2177
2178 const pass_data pass_data_late_warn_uninitialized =
2179 {
2180 GIMPLE_PASS, /* type */
2181 "uninit", /* name */
2182 OPTGROUP_NONE, /* optinfo_flags */
2183 true, /* has_gate */
2184 true, /* has_execute */
2185 TV_NONE, /* tv_id */
2186 PROP_ssa, /* properties_required */
2187 0, /* properties_provided */
2188 0, /* properties_destroyed */
2189 0, /* todo_flags_start */
2190 0, /* todo_flags_finish */
2191 };
2192
2193 class pass_late_warn_uninitialized : public gimple_opt_pass
2194 {
2195 public:
2196 pass_late_warn_uninitialized (gcc::context *ctxt)
2197 : gimple_opt_pass (pass_data_late_warn_uninitialized, ctxt)
2198 {}
2199
2200 /* opt_pass methods: */
2201 opt_pass * clone () { return new pass_late_warn_uninitialized (ctxt_); }
2202 bool gate () { return gate_warn_uninitialized (); }
2203 unsigned int execute () { return execute_late_warn_uninitialized (); }
2204
2205 }; // class pass_late_warn_uninitialized
2206
2207 } // anon namespace
2208
2209 gimple_opt_pass *
2210 make_pass_late_warn_uninitialized (gcc::context *ctxt)
2211 {
2212 return new pass_late_warn_uninitialized (ctxt);
2213 }
2214
2215
2216 static unsigned int
2217 execute_early_warn_uninitialized (void)
2218 {
2219 /* Currently, this pass runs always but
2220 execute_late_warn_uninitialized only runs with optimization. With
2221 optimization we want to warn about possible uninitialized as late
2222 as possible, thus don't do it here. However, without
2223 optimization we need to warn here about "may be uninitialized".
2224 */
2225 calculate_dominance_info (CDI_POST_DOMINATORS);
2226
2227 warn_uninitialized_vars (/*warn_possibly_uninitialized=*/!optimize);
2228
2229 /* Post-dominator information can not be reliably updated. Free it
2230 after the use. */
2231
2232 free_dominance_info (CDI_POST_DOMINATORS);
2233 return 0;
2234 }
2235
2236
2237 namespace {
2238
2239 const pass_data pass_data_early_warn_uninitialized =
2240 {
2241 GIMPLE_PASS, /* type */
2242 "*early_warn_uninitialized", /* name */
2243 OPTGROUP_NONE, /* optinfo_flags */
2244 true, /* has_gate */
2245 true, /* has_execute */
2246 TV_TREE_UNINIT, /* tv_id */
2247 PROP_ssa, /* properties_required */
2248 0, /* properties_provided */
2249 0, /* properties_destroyed */
2250 0, /* todo_flags_start */
2251 0, /* todo_flags_finish */
2252 };
2253
2254 class pass_early_warn_uninitialized : public gimple_opt_pass
2255 {
2256 public:
2257 pass_early_warn_uninitialized (gcc::context *ctxt)
2258 : gimple_opt_pass (pass_data_early_warn_uninitialized, ctxt)
2259 {}
2260
2261 /* opt_pass methods: */
2262 bool gate () { return gate_warn_uninitialized (); }
2263 unsigned int execute () { return execute_early_warn_uninitialized (); }
2264
2265 }; // class pass_early_warn_uninitialized
2266
2267 } // anon namespace
2268
2269 gimple_opt_pass *
2270 make_pass_early_warn_uninitialized (gcc::context *ctxt)
2271 {
2272 return new pass_early_warn_uninitialized (ctxt);
2273 }
2274
2275