]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-uninit.c
coroutines: Make call argument handling more robust [PR95440]
[thirdparty/gcc.git] / gcc / tree-ssa-uninit.c
1 /* Predicate aware uninitialized variable warning.
2 Copyright (C) 2001-2020 Free Software Foundation, Inc.
3 Contributed by Xinliang David Li <davidxl@google.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "tree-pass.h"
28 #include "ssa.h"
29 #include "gimple-pretty-print.h"
30 #include "diagnostic-core.h"
31 #include "fold-const.h"
32 #include "gimple-iterator.h"
33 #include "tree-ssa.h"
34 #include "tree-cfg.h"
35 #include "cfghooks.h"
36 #include "attribs.h"
37 #include "builtins.h"
38 #include "calls.h"
39
40 /* This implements the pass that does predicate aware warning on uses of
41 possibly uninitialized variables. The pass first collects the set of
42 possibly uninitialized SSA names. For each such name, it walks through
43 all its immediate uses. For each immediate use, it rebuilds the condition
44 expression (the predicate) that guards the use. The predicate is then
45 examined to see if the variable is always defined under that same condition.
46 This is done either by pruning the unrealizable paths that lead to the
47 default definitions or by checking if the predicate set that guards the
48 defining paths is a superset of the use predicate. */
49
50 /* Max PHI args we can handle in pass. */
51 const unsigned max_phi_args = 32;
52
53 /* Pointer set of potentially undefined ssa names, i.e.,
54 ssa names that are defined by phi with operands that
55 are not defined or potentially undefined. */
56 static hash_set<tree> *possibly_undefined_names = 0;
57
58 /* Bit mask handling macros. */
59 #define MASK_SET_BIT(mask, pos) mask |= (1 << pos)
60 #define MASK_TEST_BIT(mask, pos) (mask & (1 << pos))
61 #define MASK_EMPTY(mask) (mask == 0)
62
63 /* Returns the first bit position (starting from LSB)
64 in mask that is non zero. Returns -1 if the mask is empty. */
65 static int
66 get_mask_first_set_bit (unsigned mask)
67 {
68 int pos = 0;
69 if (mask == 0)
70 return -1;
71
72 while ((mask & (1 << pos)) == 0)
73 pos++;
74
75 return pos;
76 }
77 #define MASK_FIRST_SET_BIT(mask) get_mask_first_set_bit (mask)
78
79 /* Return true if T, an SSA_NAME, has an undefined value. */
80 static bool
81 has_undefined_value_p (tree t)
82 {
83 return (ssa_undefined_value_p (t)
84 || (possibly_undefined_names
85 && possibly_undefined_names->contains (t)));
86 }
87
88 /* Like has_undefined_value_p, but don't return true if TREE_NO_WARNING
89 is set on SSA_NAME_VAR. */
90
91 static inline bool
92 uninit_undefined_value_p (tree t)
93 {
94 if (!has_undefined_value_p (t))
95 return false;
96 if (SSA_NAME_VAR (t) && TREE_NO_WARNING (SSA_NAME_VAR (t)))
97 return false;
98 return true;
99 }
100
101 /* Emit warnings for uninitialized variables. This is done in two passes.
102
103 The first pass notices real uses of SSA names with undefined values.
104 Such uses are unconditionally uninitialized, and we can be certain that
105 such a use is a mistake. This pass is run before most optimizations,
106 so that we catch as many as we can.
107
108 The second pass follows PHI nodes to find uses that are potentially
109 uninitialized. In this case we can't necessarily prove that the use
110 is really uninitialized. This pass is run after most optimizations,
111 so that we thread as many jumps and possible, and delete as much dead
112 code as possible, in order to reduce false positives. We also look
113 again for plain uninitialized variables, since optimization may have
114 changed conditionally uninitialized to unconditionally uninitialized. */
115
116 /* Emit a warning for EXPR based on variable VAR at the point in the
117 program T, an SSA_NAME, is used being uninitialized. The exact
118 warning text is in MSGID and DATA is the gimple stmt with info about
119 the location in source code. When DATA is a GIMPLE_PHI, PHIARG_IDX
120 gives which argument of the phi node to take the location from. WC
121 is the warning code. */
122
123 static void
124 warn_uninit (enum opt_code wc, tree t, tree expr, tree var,
125 const char *gmsgid, void *data, location_t phiarg_loc)
126 {
127 gimple *context = (gimple *) data;
128 location_t location, cfun_loc;
129 expanded_location xloc, floc;
130
131 /* Ignore COMPLEX_EXPR as initializing only a part of a complex
132 turns in a COMPLEX_EXPR with the not initialized part being
133 set to its previous (undefined) value. */
134 if (is_gimple_assign (context)
135 && gimple_assign_rhs_code (context) == COMPLEX_EXPR)
136 return;
137 if (!has_undefined_value_p (t))
138 return;
139
140 /* Anonymous SSA_NAMEs shouldn't be uninitialized, but ssa_undefined_value_p
141 can return true if the def stmt of anonymous SSA_NAME is COMPLEX_EXPR
142 created for conversion from scalar to complex. Use the underlying var of
143 the COMPLEX_EXPRs real part in that case. See PR71581. */
144 if (expr == NULL_TREE
145 && var == NULL_TREE
146 && SSA_NAME_VAR (t) == NULL_TREE
147 && is_gimple_assign (SSA_NAME_DEF_STMT (t))
148 && gimple_assign_rhs_code (SSA_NAME_DEF_STMT (t)) == COMPLEX_EXPR)
149 {
150 tree v = gimple_assign_rhs1 (SSA_NAME_DEF_STMT (t));
151 if (TREE_CODE (v) == SSA_NAME
152 && has_undefined_value_p (v)
153 && zerop (gimple_assign_rhs2 (SSA_NAME_DEF_STMT (t))))
154 {
155 expr = SSA_NAME_VAR (v);
156 var = expr;
157 }
158 }
159
160 if (expr == NULL_TREE)
161 return;
162
163 /* TREE_NO_WARNING either means we already warned, or the front end
164 wishes to suppress the warning. */
165 if ((context
166 && (gimple_no_warning_p (context)
167 || (gimple_assign_single_p (context)
168 && TREE_NO_WARNING (gimple_assign_rhs1 (context)))))
169 || TREE_NO_WARNING (expr))
170 return;
171
172 if (context != NULL && gimple_has_location (context))
173 location = gimple_location (context);
174 else if (phiarg_loc != UNKNOWN_LOCATION)
175 location = phiarg_loc;
176 else
177 location = DECL_SOURCE_LOCATION (var);
178 location = linemap_resolve_location (line_table, location,
179 LRK_SPELLING_LOCATION, NULL);
180 cfun_loc = DECL_SOURCE_LOCATION (cfun->decl);
181 xloc = expand_location (location);
182 floc = expand_location (cfun_loc);
183 auto_diagnostic_group d;
184 if (warning_at (location, wc, gmsgid, expr))
185 {
186 TREE_NO_WARNING (expr) = 1;
187
188 if (location == DECL_SOURCE_LOCATION (var))
189 return;
190 if (xloc.file != floc.file
191 || linemap_location_before_p (line_table, location, cfun_loc)
192 || linemap_location_before_p (line_table, cfun->function_end_locus,
193 location))
194 inform (DECL_SOURCE_LOCATION (var), "%qD was declared here", var);
195 }
196 }
197
198 struct check_defs_data
199 {
200 /* If we found any may-defs besides must-def clobbers. */
201 bool found_may_defs;
202 };
203
204 /* Callback for walk_aliased_vdefs. */
205
206 static bool
207 check_defs (ao_ref *ref, tree vdef, void *data_)
208 {
209 check_defs_data *data = (check_defs_data *)data_;
210 gimple *def_stmt = SSA_NAME_DEF_STMT (vdef);
211 /* If this is a clobber then if it is not a kill walk past it. */
212 if (gimple_clobber_p (def_stmt))
213 {
214 if (stmt_kills_ref_p (def_stmt, ref))
215 return true;
216 return false;
217 }
218 /* Found a may-def on this path. */
219 data->found_may_defs = true;
220 return true;
221 }
222
223 /* Counters and limits controlling the the depth of analysis and
224 strictness of the warning. */
225 struct wlimits
226 {
227 /* Number of VDEFs encountered. */
228 unsigned int vdef_cnt;
229 /* Number of statements examined by walk_aliased_vdefs. */
230 unsigned int oracle_cnt;
231 /* Limit on the number of statements visited by walk_aliased_vdefs. */
232 unsigned limit;
233 /* Set when basic block with statement is executed unconditionally. */
234 bool always_executed;
235 /* Set to issue -Wmaybe-uninitialized. */
236 bool wmaybe_uninit;
237 };
238
239 /* Determine if REF references an uninitialized operand and diagnose
240 it if so. */
241
242 static tree
243 maybe_warn_operand (ao_ref &ref, gimple *stmt, tree lhs, tree rhs,
244 wlimits &wlims)
245 {
246 bool has_bit_insert = false;
247 use_operand_p luse_p;
248 imm_use_iterator liter;
249
250 if (TREE_NO_WARNING (rhs))
251 return NULL_TREE;
252
253 /* Do not warn if the base was marked so or this is a
254 hard register var. */
255 tree base = ao_ref_base (&ref);
256 if ((VAR_P (base)
257 && DECL_HARD_REGISTER (base))
258 || TREE_NO_WARNING (base))
259 return NULL_TREE;
260
261 /* Do not warn if the access is fully outside of the variable. */
262 poly_int64 decl_size;
263 if (DECL_P (base)
264 && ((known_size_p (ref.size)
265 && known_eq (ref.max_size, ref.size)
266 && known_le (ref.offset + ref.size, 0))
267 || (known_ge (ref.offset, 0)
268 && DECL_SIZE (base)
269 && poly_int_tree_p (DECL_SIZE (base), &decl_size)
270 && known_le (decl_size, ref.offset))))
271 return NULL_TREE;
272
273 /* Do not warn if the result of the access is then used for
274 a BIT_INSERT_EXPR. */
275 if (lhs && TREE_CODE (lhs) == SSA_NAME)
276 FOR_EACH_IMM_USE_FAST (luse_p, liter, lhs)
277 {
278 gimple *use_stmt = USE_STMT (luse_p);
279 /* BIT_INSERT_EXPR first operand should not be considered
280 a use for the purpose of uninit warnings. */
281 if (gassign *ass = dyn_cast <gassign *> (use_stmt))
282 {
283 if (gimple_assign_rhs_code (ass) == BIT_INSERT_EXPR
284 && luse_p->use == gimple_assign_rhs1_ptr (ass))
285 {
286 has_bit_insert = true;
287 break;
288 }
289 }
290 }
291
292 if (has_bit_insert)
293 return NULL_TREE;
294
295 /* Limit the walking to a constant number of stmts after
296 we overcommit quadratic behavior for small functions
297 and O(n) behavior. */
298 if (wlims.oracle_cnt > 128 * 128
299 && wlims.oracle_cnt > wlims.vdef_cnt * 2)
300 wlims.limit = 32;
301
302 check_defs_data data;
303 bool fentry_reached = false;
304 data.found_may_defs = false;
305 tree use = gimple_vuse (stmt);
306 if (!use)
307 return NULL_TREE;
308 int res = walk_aliased_vdefs (&ref, use,
309 check_defs, &data, NULL,
310 &fentry_reached, wlims.limit);
311 if (res == -1)
312 {
313 wlims.oracle_cnt += wlims.limit;
314 return NULL_TREE;
315 }
316
317 wlims.oracle_cnt += res;
318 if (data.found_may_defs)
319 return NULL_TREE;
320
321 bool found_alloc = false;
322
323 if (fentry_reached)
324 {
325 if (TREE_CODE (base) == MEM_REF)
326 base = TREE_OPERAND (base, 0);
327
328 /* Follow the chain of SSA_NAME assignments looking for an alloca
329 call (or VLA) or malloc/realloc, or for decls. If any is found
330 (and in the latter case, the operand is a local variable) issue
331 a warning. */
332 while (TREE_CODE (base) == SSA_NAME)
333 {
334 gimple *def_stmt = SSA_NAME_DEF_STMT (base);
335
336 if (is_gimple_call (def_stmt)
337 && gimple_call_builtin_p (def_stmt))
338 {
339 /* Detect uses of uninitialized alloca/VLAs. */
340 tree fndecl = gimple_call_fndecl (def_stmt);
341 const built_in_function fncode = DECL_FUNCTION_CODE (fndecl);
342 if (fncode == BUILT_IN_ALLOCA
343 || fncode == BUILT_IN_ALLOCA_WITH_ALIGN
344 || fncode == BUILT_IN_MALLOC)
345 found_alloc = true;
346 break;
347 }
348
349 if (!is_gimple_assign (def_stmt))
350 break;
351
352 tree_code code = gimple_assign_rhs_code (def_stmt);
353 if (code != ADDR_EXPR && code != POINTER_PLUS_EXPR)
354 break;
355
356 base = gimple_assign_rhs1 (def_stmt);
357 if (TREE_CODE (base) == ADDR_EXPR)
358 base = TREE_OPERAND (base, 0);
359
360 if (DECL_P (base)
361 || TREE_CODE (base) == COMPONENT_REF)
362 rhs = base;
363
364 if (TREE_CODE (base) == MEM_REF)
365 base = TREE_OPERAND (base, 0);
366
367 if (tree ba = get_base_address (base))
368 base = ba;
369 }
370
371 /* Replace the RHS expression with BASE so that it
372 refers to it in the diagnostic (instead of to
373 '<unknown>'). */
374 if (DECL_P (base)
375 && EXPR_P (rhs)
376 && TREE_CODE (rhs) != COMPONENT_REF)
377 rhs = base;
378 }
379
380 /* Do not warn if it can be initialized outside this function.
381 If we did not reach function entry then we found killing
382 clobbers on all paths to entry. */
383 if (!found_alloc
384 && fentry_reached
385 /* ??? We'd like to use ref_may_alias_global_p but that
386 excludes global readonly memory and thus we get bogus
387 warnings from p = cond ? "a" : "b" for example. */
388 && (!VAR_P (base)
389 || is_global_var (base)))
390 return NULL_TREE;
391
392 /* Strip the address-of expression from arrays passed to functions. */
393 if (TREE_CODE (rhs) == ADDR_EXPR)
394 rhs = TREE_OPERAND (rhs, 0);
395
396 /* Check again since RHS may have changed above. */
397 if (TREE_NO_WARNING (rhs))
398 return NULL_TREE;
399
400 /* Avoid warning about empty types such as structs with no members.
401 The first_field() test is important for C++ where the predicate
402 alone isn't always sufficient. */
403 tree rhstype = TREE_TYPE (rhs);
404 if (TYPE_EMPTY_P (rhstype)
405 || (RECORD_OR_UNION_TYPE_P (rhstype)
406 && (!first_field (rhstype)
407 || default_is_empty_record (rhstype))))
408 return NULL_TREE;
409
410 bool warned = false;
411 /* We didn't find any may-defs so on all paths either
412 reached function entry or a killing clobber. */
413 location_t location
414 = linemap_resolve_location (line_table, gimple_location (stmt),
415 LRK_SPELLING_LOCATION, NULL);
416 if (wlims.always_executed)
417 {
418 if (warning_at (location, OPT_Wuninitialized,
419 "%G%qE is used uninitialized", stmt, rhs))
420 {
421 /* ??? This is only effective for decls as in
422 gcc.dg/uninit-B-O0.c. Avoid doing this for maybe-uninit
423 uses or accesses by functions as it may hide important
424 locations. */
425 if (lhs)
426 TREE_NO_WARNING (rhs) = 1;
427 warned = true;
428 }
429 }
430 else if (wlims.wmaybe_uninit)
431 warned = warning_at (location, OPT_Wmaybe_uninitialized,
432 "%G%qE may be used uninitialized", stmt, rhs);
433
434 return warned ? base : NULL_TREE;
435 }
436
437
438 /* Diagnose passing addresses of uninitialized objects to either const
439 pointer arguments to functions, or to functions declared with attribute
440 access implying read access to those objects. */
441
442 static void
443 maybe_warn_pass_by_reference (gimple *stmt, wlimits &wlims)
444 {
445 if (!wlims.wmaybe_uninit)
446 return;
447
448 unsigned nargs = gimple_call_num_args (stmt);
449 if (!nargs)
450 return;
451
452 tree fndecl = gimple_call_fndecl (stmt);
453 tree fntype = gimple_call_fntype (stmt);
454 if (!fntype)
455 return;
456
457 const built_in_function fncode
458 = (fndecl && gimple_call_builtin_p (stmt, BUILT_IN_NORMAL)
459 ? DECL_FUNCTION_CODE (fndecl) : (built_in_function)BUILT_IN_LAST);
460
461 if (fncode == BUILT_IN_MEMCPY || fncode == BUILT_IN_MEMMOVE)
462 /* Avoid diagnosing calls to raw memory functions (this is overly
463 permissive; consider tightening it up). */
464 return;
465
466 /* Save the current warning setting and replace it either a "maybe"
467 when passing addresses of uninitialized variables to const-qualified
468 pointers or arguments declared with attribute read_write, or with
469 a "certain" when passing them to arguments declared with attribute
470 read_only. */
471 const bool save_always_executed = wlims.always_executed;
472
473 /* Map of attribute access specifications for function arguments. */
474 rdwr_map rdwr_idx;
475 init_attr_rdwr_indices (&rdwr_idx, fntype);
476
477 tree argtype;
478 unsigned argno = 0;
479 function_args_iterator it;
480
481 FOREACH_FUNCTION_ARGS (fntype, argtype, it)
482 {
483 ++argno;
484
485 if (!POINTER_TYPE_P (argtype))
486 continue;
487
488 tree access_size = NULL_TREE;
489 attr_access *access = rdwr_idx.get (argno - 1);
490 if (access)
491 {
492 if (access->mode == attr_access::none
493 || access->mode == attr_access::write_only)
494 continue;
495 if (save_always_executed && access->mode == attr_access::read_only)
496 /* Attribute read_only arguments imply read access. */
497 wlims.always_executed = true;
498 else
499 /* Attribute read_write arguments are documented as requiring
500 initialized objects but it's expected that aggregates may
501 be only partially initialized regardless. */
502 wlims.always_executed = false;
503
504 if (access->sizarg < nargs)
505 access_size = gimple_call_arg (stmt, access->sizarg);
506 }
507 else if (!TYPE_READONLY (TREE_TYPE (argtype)))
508 continue;
509 else if (save_always_executed && fncode != BUILT_IN_LAST)
510 /* Const-qualified arguments to built-ins imply read access. */
511 wlims.always_executed = true;
512 else
513 /* Const-qualified arguments to ordinary functions imply a likely
514 (but not definitive) read access. */
515 wlims.always_executed = false;
516
517 tree arg = gimple_call_arg (stmt, argno - 1);
518
519 ao_ref ref;
520 ao_ref_init_from_ptr_and_size (&ref, arg, access_size);
521 tree argbase = maybe_warn_operand (ref, stmt, NULL_TREE, arg, wlims);
522 if (!argbase)
523 continue;
524
525 if (access)
526 {
527 const char* const mode = (access->mode == attr_access::read_only
528 ? "read_only" : "read_write");
529 char attrstr[80];
530 int n = sprintf (attrstr, "access (%s, %u", mode, argno);
531 if (access->sizarg < UINT_MAX)
532 sprintf (attrstr + n, ", %u)", access->sizarg);
533 else
534 strcpy (attrstr + n, ")");
535
536 if (fndecl)
537 {
538 location_t loc = DECL_SOURCE_LOCATION (fndecl);
539 inform (loc, "in a call to %qD declared "
540 "with attribute %<access (%s, %u)%> here",
541 fndecl, mode, argno);
542 }
543 else
544 {
545 /* Handle calls through function pointers. */
546 location_t loc = gimple_location (stmt);
547 inform (loc, "in a call to %qT declared with "
548 "attribute %<access (%s, %u)%>",
549 fntype, mode, argno);
550 }
551 }
552 else if (fndecl)
553 {
554 location_t loc = DECL_SOURCE_LOCATION (fndecl);
555 inform (loc, "by argument %u of type %qT to %qD declared here",
556 argno, argtype, fndecl);
557 }
558 else
559 {
560 /* Handle calls through function pointers. */
561 location_t loc = gimple_location (stmt);
562 inform (loc, "by argument %u of type %qT to %qT",
563 argno, argtype, fntype);
564 }
565
566 if (DECL_P (argbase))
567 {
568 location_t loc = DECL_SOURCE_LOCATION (argbase);
569 inform (loc, "%qD declared here", argbase);
570 }
571 }
572
573 wlims.always_executed = save_always_executed;
574 }
575
576
577 static unsigned int
578 warn_uninitialized_vars (bool wmaybe_uninit)
579 {
580 /* Counters and limits controlling the the depth of the warning. */
581 wlimits wlims = { };
582 wlims.wmaybe_uninit = wmaybe_uninit;
583
584 gimple_stmt_iterator gsi;
585 basic_block bb;
586 FOR_EACH_BB_FN (bb, cfun)
587 {
588 basic_block succ = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
589 wlims.always_executed = dominated_by_p (CDI_POST_DOMINATORS, succ, bb);
590 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
591 {
592 gimple *stmt = gsi_stmt (gsi);
593 use_operand_p use_p;
594 ssa_op_iter op_iter;
595 tree use;
596
597 if (is_gimple_debug (stmt))
598 continue;
599
600 /* We only do data flow with SSA_NAMEs, so that's all we
601 can warn about. */
602 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, op_iter, SSA_OP_USE)
603 {
604 /* BIT_INSERT_EXPR first operand should not be considered
605 a use for the purpose of uninit warnings. */
606 if (gassign *ass = dyn_cast <gassign *> (stmt))
607 {
608 if (gimple_assign_rhs_code (ass) == BIT_INSERT_EXPR
609 && use_p->use == gimple_assign_rhs1_ptr (ass))
610 continue;
611 }
612 use = USE_FROM_PTR (use_p);
613 if (wlims.always_executed)
614 warn_uninit (OPT_Wuninitialized, use, SSA_NAME_VAR (use),
615 SSA_NAME_VAR (use),
616 "%qD is used uninitialized", stmt,
617 UNKNOWN_LOCATION);
618 else if (wmaybe_uninit)
619 warn_uninit (OPT_Wmaybe_uninitialized, use, SSA_NAME_VAR (use),
620 SSA_NAME_VAR (use),
621 "%qD may be used uninitialized",
622 stmt, UNKNOWN_LOCATION);
623 }
624
625 /* For limiting the alias walk below we count all
626 vdefs in the function. */
627 if (gimple_vdef (stmt))
628 wlims.vdef_cnt++;
629
630 if (is_gimple_call (stmt))
631 maybe_warn_pass_by_reference (stmt, wlims);
632 else if (gimple_assign_load_p (stmt)
633 && gimple_has_location (stmt))
634 {
635 tree rhs = gimple_assign_rhs1 (stmt);
636 tree lhs = gimple_assign_lhs (stmt);
637
638 ao_ref ref;
639 ao_ref_init (&ref, rhs);
640 tree var = maybe_warn_operand (ref, stmt, lhs, rhs, wlims);
641 if (!var)
642 continue;
643
644 if (DECL_P (var))
645 {
646 location_t loc = DECL_SOURCE_LOCATION (var);
647 inform (loc, "%qD declared here", var);
648 }
649 }
650 }
651 }
652
653 return 0;
654 }
655
656 /* Checks if the operand OPND of PHI is defined by
657 another phi with one operand defined by this PHI,
658 but the rest operands are all defined. If yes,
659 returns true to skip this operand as being
660 redundant. Can be enhanced to be more general. */
661
662 static bool
663 can_skip_redundant_opnd (tree opnd, gimple *phi)
664 {
665 gimple *op_def;
666 tree phi_def;
667 int i, n;
668
669 phi_def = gimple_phi_result (phi);
670 op_def = SSA_NAME_DEF_STMT (opnd);
671 if (gimple_code (op_def) != GIMPLE_PHI)
672 return false;
673 n = gimple_phi_num_args (op_def);
674 for (i = 0; i < n; ++i)
675 {
676 tree op = gimple_phi_arg_def (op_def, i);
677 if (TREE_CODE (op) != SSA_NAME)
678 continue;
679 if (op != phi_def && uninit_undefined_value_p (op))
680 return false;
681 }
682
683 return true;
684 }
685
686 /* Returns a bit mask holding the positions of arguments in PHI
687 that have empty (or possibly empty) definitions. */
688
689 static unsigned
690 compute_uninit_opnds_pos (gphi *phi)
691 {
692 size_t i, n;
693 unsigned uninit_opnds = 0;
694
695 n = gimple_phi_num_args (phi);
696 /* Bail out for phi with too many args. */
697 if (n > max_phi_args)
698 return 0;
699
700 for (i = 0; i < n; ++i)
701 {
702 tree op = gimple_phi_arg_def (phi, i);
703 if (TREE_CODE (op) == SSA_NAME
704 && uninit_undefined_value_p (op)
705 && !can_skip_redundant_opnd (op, phi))
706 {
707 if (cfun->has_nonlocal_label || cfun->calls_setjmp)
708 {
709 /* Ignore SSA_NAMEs that appear on abnormal edges
710 somewhere. */
711 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
712 continue;
713 }
714 MASK_SET_BIT (uninit_opnds, i);
715 }
716 }
717 return uninit_opnds;
718 }
719
720 /* Find the immediate postdominator PDOM of the specified
721 basic block BLOCK. */
722
723 static inline basic_block
724 find_pdom (basic_block block)
725 {
726 if (block == EXIT_BLOCK_PTR_FOR_FN (cfun))
727 return EXIT_BLOCK_PTR_FOR_FN (cfun);
728 else
729 {
730 basic_block bb = get_immediate_dominator (CDI_POST_DOMINATORS, block);
731 if (!bb)
732 return EXIT_BLOCK_PTR_FOR_FN (cfun);
733 return bb;
734 }
735 }
736
737 /* Find the immediate DOM of the specified basic block BLOCK. */
738
739 static inline basic_block
740 find_dom (basic_block block)
741 {
742 if (block == ENTRY_BLOCK_PTR_FOR_FN (cfun))
743 return ENTRY_BLOCK_PTR_FOR_FN (cfun);
744 else
745 {
746 basic_block bb = get_immediate_dominator (CDI_DOMINATORS, block);
747 if (!bb)
748 return ENTRY_BLOCK_PTR_FOR_FN (cfun);
749 return bb;
750 }
751 }
752
753 /* Returns true if BB1 is postdominating BB2 and BB1 is
754 not a loop exit bb. The loop exit bb check is simple and does
755 not cover all cases. */
756
757 static bool
758 is_non_loop_exit_postdominating (basic_block bb1, basic_block bb2)
759 {
760 if (!dominated_by_p (CDI_POST_DOMINATORS, bb2, bb1))
761 return false;
762
763 if (single_pred_p (bb1) && !single_succ_p (bb2))
764 return false;
765
766 return true;
767 }
768
769 /* Find the closest postdominator of a specified BB, which is control
770 equivalent to BB. */
771
772 static inline basic_block
773 find_control_equiv_block (basic_block bb)
774 {
775 basic_block pdom;
776
777 pdom = find_pdom (bb);
778
779 /* Skip the postdominating bb that is also loop exit. */
780 if (!is_non_loop_exit_postdominating (pdom, bb))
781 return NULL;
782
783 if (dominated_by_p (CDI_DOMINATORS, pdom, bb))
784 return pdom;
785
786 return NULL;
787 }
788
789 #define MAX_NUM_CHAINS 8
790 #define MAX_CHAIN_LEN 5
791 #define MAX_POSTDOM_CHECK 8
792 #define MAX_SWITCH_CASES 40
793
794 /* Computes the control dependence chains (paths of edges)
795 for DEP_BB up to the dominating basic block BB (the head node of a
796 chain should be dominated by it). CD_CHAINS is pointer to an
797 array holding the result chains. CUR_CD_CHAIN is the current
798 chain being computed. *NUM_CHAINS is total number of chains. The
799 function returns true if the information is successfully computed,
800 return false if there is no control dependence or not computed. */
801
802 static bool
803 compute_control_dep_chain (basic_block bb, basic_block dep_bb,
804 vec<edge> *cd_chains,
805 size_t *num_chains,
806 vec<edge> *cur_cd_chain,
807 int *num_calls)
808 {
809 edge_iterator ei;
810 edge e;
811 size_t i;
812 bool found_cd_chain = false;
813 size_t cur_chain_len = 0;
814
815 if (*num_calls > param_uninit_control_dep_attempts)
816 return false;
817 ++*num_calls;
818
819 /* Could use a set instead. */
820 cur_chain_len = cur_cd_chain->length ();
821 if (cur_chain_len > MAX_CHAIN_LEN)
822 return false;
823
824 for (i = 0; i < cur_chain_len; i++)
825 {
826 edge e = (*cur_cd_chain)[i];
827 /* Cycle detected. */
828 if (e->src == bb)
829 return false;
830 }
831
832 FOR_EACH_EDGE (e, ei, bb->succs)
833 {
834 basic_block cd_bb;
835 int post_dom_check = 0;
836 if (e->flags & (EDGE_FAKE | EDGE_ABNORMAL))
837 continue;
838
839 cd_bb = e->dest;
840 cur_cd_chain->safe_push (e);
841 while (!is_non_loop_exit_postdominating (cd_bb, bb))
842 {
843 if (cd_bb == dep_bb)
844 {
845 /* Found a direct control dependence. */
846 if (*num_chains < MAX_NUM_CHAINS)
847 {
848 cd_chains[*num_chains] = cur_cd_chain->copy ();
849 (*num_chains)++;
850 }
851 found_cd_chain = true;
852 /* Check path from next edge. */
853 break;
854 }
855
856 /* Now check if DEP_BB is indirectly control dependent on BB. */
857 if (compute_control_dep_chain (cd_bb, dep_bb, cd_chains, num_chains,
858 cur_cd_chain, num_calls))
859 {
860 found_cd_chain = true;
861 break;
862 }
863
864 cd_bb = find_pdom (cd_bb);
865 post_dom_check++;
866 if (cd_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
867 || post_dom_check > MAX_POSTDOM_CHECK)
868 break;
869 }
870 cur_cd_chain->pop ();
871 gcc_assert (cur_cd_chain->length () == cur_chain_len);
872 }
873 gcc_assert (cur_cd_chain->length () == cur_chain_len);
874
875 return found_cd_chain;
876 }
877
878 /* The type to represent a simple predicate. */
879
880 struct pred_info
881 {
882 tree pred_lhs;
883 tree pred_rhs;
884 enum tree_code cond_code;
885 bool invert;
886 };
887
888 /* The type to represent a sequence of predicates grouped
889 with .AND. operation. */
890
891 typedef vec<pred_info, va_heap, vl_ptr> pred_chain;
892
893 /* The type to represent a sequence of pred_chains grouped
894 with .OR. operation. */
895
896 typedef vec<pred_chain, va_heap, vl_ptr> pred_chain_union;
897
898 /* Converts the chains of control dependence edges into a set of
899 predicates. A control dependence chain is represented by a vector
900 edges. DEP_CHAINS points to an array of dependence chains.
901 NUM_CHAINS is the size of the chain array. One edge in a dependence
902 chain is mapped to predicate expression represented by pred_info
903 type. One dependence chain is converted to a composite predicate that
904 is the result of AND operation of pred_info mapped to each edge.
905 A composite predicate is presented by a vector of pred_info. On
906 return, *PREDS points to the resulting array of composite predicates.
907 *NUM_PREDS is the number of composite predictes. */
908
909 static bool
910 convert_control_dep_chain_into_preds (vec<edge> *dep_chains,
911 size_t num_chains,
912 pred_chain_union *preds)
913 {
914 bool has_valid_pred = false;
915 size_t i, j;
916 if (num_chains == 0 || num_chains >= MAX_NUM_CHAINS)
917 return false;
918
919 /* Now convert the control dep chain into a set
920 of predicates. */
921 preds->reserve (num_chains);
922
923 for (i = 0; i < num_chains; i++)
924 {
925 vec<edge> one_cd_chain = dep_chains[i];
926
927 has_valid_pred = false;
928 pred_chain t_chain = vNULL;
929 for (j = 0; j < one_cd_chain.length (); j++)
930 {
931 gimple *cond_stmt;
932 gimple_stmt_iterator gsi;
933 basic_block guard_bb;
934 pred_info one_pred;
935 edge e;
936
937 e = one_cd_chain[j];
938 guard_bb = e->src;
939 gsi = gsi_last_bb (guard_bb);
940 /* Ignore empty forwarder blocks. */
941 if (empty_block_p (guard_bb) && single_succ_p (guard_bb))
942 continue;
943 /* An empty basic block here is likely a PHI, and is not one
944 of the cases we handle below. */
945 if (gsi_end_p (gsi))
946 {
947 has_valid_pred = false;
948 break;
949 }
950 cond_stmt = gsi_stmt (gsi);
951 if (is_gimple_call (cond_stmt) && EDGE_COUNT (e->src->succs) >= 2)
952 /* Ignore EH edge. Can add assertion on the other edge's flag. */
953 continue;
954 /* Skip if there is essentially one succesor. */
955 if (EDGE_COUNT (e->src->succs) == 2)
956 {
957 edge e1;
958 edge_iterator ei1;
959 bool skip = false;
960
961 FOR_EACH_EDGE (e1, ei1, e->src->succs)
962 {
963 if (EDGE_COUNT (e1->dest->succs) == 0)
964 {
965 skip = true;
966 break;
967 }
968 }
969 if (skip)
970 continue;
971 }
972 if (gimple_code (cond_stmt) == GIMPLE_COND)
973 {
974 one_pred.pred_lhs = gimple_cond_lhs (cond_stmt);
975 one_pred.pred_rhs = gimple_cond_rhs (cond_stmt);
976 one_pred.cond_code = gimple_cond_code (cond_stmt);
977 one_pred.invert = !!(e->flags & EDGE_FALSE_VALUE);
978 t_chain.safe_push (one_pred);
979 has_valid_pred = true;
980 }
981 else if (gswitch *gs = dyn_cast<gswitch *> (cond_stmt))
982 {
983 /* Avoid quadratic behavior. */
984 if (gimple_switch_num_labels (gs) > MAX_SWITCH_CASES)
985 {
986 has_valid_pred = false;
987 break;
988 }
989 /* Find the case label. */
990 tree l = NULL_TREE;
991 unsigned idx;
992 for (idx = 0; idx < gimple_switch_num_labels (gs); ++idx)
993 {
994 tree tl = gimple_switch_label (gs, idx);
995 if (e->dest == label_to_block (cfun, CASE_LABEL (tl)))
996 {
997 if (!l)
998 l = tl;
999 else
1000 {
1001 l = NULL_TREE;
1002 break;
1003 }
1004 }
1005 }
1006 /* If more than one label reaches this block or the case
1007 label doesn't have a single value (like the default one)
1008 fail. */
1009 if (!l
1010 || !CASE_LOW (l)
1011 || (CASE_HIGH (l)
1012 && !operand_equal_p (CASE_LOW (l), CASE_HIGH (l), 0)))
1013 {
1014 has_valid_pred = false;
1015 break;
1016 }
1017 one_pred.pred_lhs = gimple_switch_index (gs);
1018 one_pred.pred_rhs = CASE_LOW (l);
1019 one_pred.cond_code = EQ_EXPR;
1020 one_pred.invert = false;
1021 t_chain.safe_push (one_pred);
1022 has_valid_pred = true;
1023 }
1024 else
1025 {
1026 has_valid_pred = false;
1027 break;
1028 }
1029 }
1030
1031 if (!has_valid_pred)
1032 break;
1033 else
1034 preds->safe_push (t_chain);
1035 }
1036 return has_valid_pred;
1037 }
1038
1039 /* Computes all control dependence chains for USE_BB. The control
1040 dependence chains are then converted to an array of composite
1041 predicates pointed to by PREDS. PHI_BB is the basic block of
1042 the phi whose result is used in USE_BB. */
1043
1044 static bool
1045 find_predicates (pred_chain_union *preds,
1046 basic_block phi_bb,
1047 basic_block use_bb)
1048 {
1049 size_t num_chains = 0, i;
1050 int num_calls = 0;
1051 vec<edge> dep_chains[MAX_NUM_CHAINS];
1052 auto_vec<edge, MAX_CHAIN_LEN + 1> cur_chain;
1053 bool has_valid_pred = false;
1054 basic_block cd_root = 0;
1055
1056 /* First find the closest bb that is control equivalent to PHI_BB
1057 that also dominates USE_BB. */
1058 cd_root = phi_bb;
1059 while (dominated_by_p (CDI_DOMINATORS, use_bb, cd_root))
1060 {
1061 basic_block ctrl_eq_bb = find_control_equiv_block (cd_root);
1062 if (ctrl_eq_bb && dominated_by_p (CDI_DOMINATORS, use_bb, ctrl_eq_bb))
1063 cd_root = ctrl_eq_bb;
1064 else
1065 break;
1066 }
1067
1068 compute_control_dep_chain (cd_root, use_bb, dep_chains, &num_chains,
1069 &cur_chain, &num_calls);
1070
1071 has_valid_pred
1072 = convert_control_dep_chain_into_preds (dep_chains, num_chains, preds);
1073 for (i = 0; i < num_chains; i++)
1074 dep_chains[i].release ();
1075 return has_valid_pred;
1076 }
1077
1078 /* Computes the set of incoming edges of PHI that have non empty
1079 definitions of a phi chain. The collection will be done
1080 recursively on operands that are defined by phis. CD_ROOT
1081 is the control dependence root. *EDGES holds the result, and
1082 VISITED_PHIS is a pointer set for detecting cycles. */
1083
1084 static void
1085 collect_phi_def_edges (gphi *phi, basic_block cd_root,
1086 auto_vec<edge> *edges,
1087 hash_set<gimple *> *visited_phis)
1088 {
1089 size_t i, n;
1090 edge opnd_edge;
1091 tree opnd;
1092
1093 if (visited_phis->add (phi))
1094 return;
1095
1096 n = gimple_phi_num_args (phi);
1097 for (i = 0; i < n; i++)
1098 {
1099 opnd_edge = gimple_phi_arg_edge (phi, i);
1100 opnd = gimple_phi_arg_def (phi, i);
1101
1102 if (TREE_CODE (opnd) != SSA_NAME)
1103 {
1104 if (dump_file && (dump_flags & TDF_DETAILS))
1105 {
1106 fprintf (dump_file, "\n[CHECK] Found def edge %d in ", (int) i);
1107 print_gimple_stmt (dump_file, phi, 0);
1108 }
1109 edges->safe_push (opnd_edge);
1110 }
1111 else
1112 {
1113 gimple *def = SSA_NAME_DEF_STMT (opnd);
1114
1115 if (gimple_code (def) == GIMPLE_PHI
1116 && dominated_by_p (CDI_DOMINATORS, gimple_bb (def), cd_root))
1117 collect_phi_def_edges (as_a<gphi *> (def), cd_root, edges,
1118 visited_phis);
1119 else if (!uninit_undefined_value_p (opnd))
1120 {
1121 if (dump_file && (dump_flags & TDF_DETAILS))
1122 {
1123 fprintf (dump_file, "\n[CHECK] Found def edge %d in ",
1124 (int) i);
1125 print_gimple_stmt (dump_file, phi, 0);
1126 }
1127 edges->safe_push (opnd_edge);
1128 }
1129 }
1130 }
1131 }
1132
1133 /* For each use edge of PHI, computes all control dependence chains.
1134 The control dependence chains are then converted to an array of
1135 composite predicates pointed to by PREDS. */
1136
1137 static bool
1138 find_def_preds (pred_chain_union *preds, gphi *phi)
1139 {
1140 size_t num_chains = 0, i, n;
1141 vec<edge> dep_chains[MAX_NUM_CHAINS];
1142 auto_vec<edge, MAX_CHAIN_LEN + 1> cur_chain;
1143 auto_vec<edge> def_edges;
1144 bool has_valid_pred = false;
1145 basic_block phi_bb, cd_root = 0;
1146
1147 phi_bb = gimple_bb (phi);
1148 /* First find the closest dominating bb to be
1149 the control dependence root. */
1150 cd_root = find_dom (phi_bb);
1151 if (!cd_root)
1152 return false;
1153
1154 hash_set<gimple *> visited_phis;
1155 collect_phi_def_edges (phi, cd_root, &def_edges, &visited_phis);
1156
1157 n = def_edges.length ();
1158 if (n == 0)
1159 return false;
1160
1161 for (i = 0; i < n; i++)
1162 {
1163 size_t prev_nc, j;
1164 int num_calls = 0;
1165 edge opnd_edge;
1166
1167 opnd_edge = def_edges[i];
1168 prev_nc = num_chains;
1169 compute_control_dep_chain (cd_root, opnd_edge->src, dep_chains,
1170 &num_chains, &cur_chain, &num_calls);
1171
1172 /* Now update the newly added chains with
1173 the phi operand edge: */
1174 if (EDGE_COUNT (opnd_edge->src->succs) > 1)
1175 {
1176 if (prev_nc == num_chains && num_chains < MAX_NUM_CHAINS)
1177 dep_chains[num_chains++] = vNULL;
1178 for (j = prev_nc; j < num_chains; j++)
1179 dep_chains[j].safe_push (opnd_edge);
1180 }
1181 }
1182
1183 has_valid_pred
1184 = convert_control_dep_chain_into_preds (dep_chains, num_chains, preds);
1185 for (i = 0; i < num_chains; i++)
1186 dep_chains[i].release ();
1187 return has_valid_pred;
1188 }
1189
1190 /* Dump a pred_info. */
1191
1192 static void
1193 dump_pred_info (pred_info one_pred)
1194 {
1195 if (one_pred.invert)
1196 fprintf (dump_file, " (.NOT.) ");
1197 print_generic_expr (dump_file, one_pred.pred_lhs);
1198 fprintf (dump_file, " %s ", op_symbol_code (one_pred.cond_code));
1199 print_generic_expr (dump_file, one_pred.pred_rhs);
1200 }
1201
1202 /* Dump a pred_chain. */
1203
1204 static void
1205 dump_pred_chain (pred_chain one_pred_chain)
1206 {
1207 size_t np = one_pred_chain.length ();
1208 for (size_t j = 0; j < np; j++)
1209 {
1210 dump_pred_info (one_pred_chain[j]);
1211 if (j < np - 1)
1212 fprintf (dump_file, " (.AND.) ");
1213 else
1214 fprintf (dump_file, "\n");
1215 }
1216 }
1217
1218 /* Dumps the predicates (PREDS) for USESTMT. */
1219
1220 static void
1221 dump_predicates (gimple *usestmt, pred_chain_union preds, const char *msg)
1222 {
1223 fprintf (dump_file, "%s", msg);
1224 if (usestmt)
1225 {
1226 print_gimple_stmt (dump_file, usestmt, 0);
1227 fprintf (dump_file, "is guarded by :\n\n");
1228 }
1229 size_t num_preds = preds.length ();
1230 for (size_t i = 0; i < num_preds; i++)
1231 {
1232 dump_pred_chain (preds[i]);
1233 if (i < num_preds - 1)
1234 fprintf (dump_file, "(.OR.)\n");
1235 else
1236 fprintf (dump_file, "\n\n");
1237 }
1238 }
1239
1240 /* Destroys the predicate set *PREDS. */
1241
1242 static void
1243 destroy_predicate_vecs (pred_chain_union *preds)
1244 {
1245 size_t i;
1246
1247 size_t n = preds->length ();
1248 for (i = 0; i < n; i++)
1249 (*preds)[i].release ();
1250 preds->release ();
1251 }
1252
1253 /* Computes the 'normalized' conditional code with operand
1254 swapping and condition inversion. */
1255
1256 static enum tree_code
1257 get_cmp_code (enum tree_code orig_cmp_code, bool swap_cond, bool invert)
1258 {
1259 enum tree_code tc = orig_cmp_code;
1260
1261 if (swap_cond)
1262 tc = swap_tree_comparison (orig_cmp_code);
1263 if (invert)
1264 tc = invert_tree_comparison (tc, false);
1265
1266 switch (tc)
1267 {
1268 case LT_EXPR:
1269 case LE_EXPR:
1270 case GT_EXPR:
1271 case GE_EXPR:
1272 case EQ_EXPR:
1273 case NE_EXPR:
1274 break;
1275 default:
1276 return ERROR_MARK;
1277 }
1278 return tc;
1279 }
1280
1281 /* Returns whether VAL CMPC BOUNDARY is true. */
1282
1283 static bool
1284 is_value_included_in (tree val, tree boundary, enum tree_code cmpc)
1285 {
1286 bool inverted = false;
1287 bool result;
1288
1289 /* Only handle integer constant here. */
1290 if (TREE_CODE (val) != INTEGER_CST || TREE_CODE (boundary) != INTEGER_CST)
1291 return true;
1292
1293 if (cmpc == GE_EXPR || cmpc == GT_EXPR || cmpc == NE_EXPR)
1294 {
1295 cmpc = invert_tree_comparison (cmpc, false);
1296 inverted = true;
1297 }
1298
1299 if (cmpc == EQ_EXPR)
1300 result = tree_int_cst_equal (val, boundary);
1301 else if (cmpc == LT_EXPR)
1302 result = tree_int_cst_lt (val, boundary);
1303 else
1304 {
1305 gcc_assert (cmpc == LE_EXPR);
1306 result = tree_int_cst_le (val, boundary);
1307 }
1308
1309 if (inverted)
1310 result ^= 1;
1311
1312 return result;
1313 }
1314
1315 /* Returns whether VAL satisfies (x CMPC BOUNDARY) predicate. CMPC can be
1316 either one of the range comparison codes ({GE,LT,EQ,NE}_EXPR and the like),
1317 or BIT_AND_EXPR. EXACT_P is only meaningful for the latter. It modifies the
1318 question from whether VAL & BOUNDARY != 0 to whether VAL & BOUNDARY == VAL.
1319 For other values of CMPC, EXACT_P is ignored. */
1320
1321 static bool
1322 value_sat_pred_p (tree val, tree boundary, enum tree_code cmpc,
1323 bool exact_p = false)
1324 {
1325 if (cmpc != BIT_AND_EXPR)
1326 return is_value_included_in (val, boundary, cmpc);
1327
1328 wide_int andw = wi::to_wide (val) & wi::to_wide (boundary);
1329 if (exact_p)
1330 return andw == wi::to_wide (val);
1331 else
1332 return andw.to_uhwi ();
1333 }
1334
1335 /* Returns true if PRED is common among all the predicate
1336 chains (PREDS) (and therefore can be factored out).
1337 NUM_PRED_CHAIN is the size of array PREDS. */
1338
1339 static bool
1340 find_matching_predicate_in_rest_chains (pred_info pred,
1341 pred_chain_union preds,
1342 size_t num_pred_chains)
1343 {
1344 size_t i, j, n;
1345
1346 /* Trival case. */
1347 if (num_pred_chains == 1)
1348 return true;
1349
1350 for (i = 1; i < num_pred_chains; i++)
1351 {
1352 bool found = false;
1353 pred_chain one_chain = preds[i];
1354 n = one_chain.length ();
1355 for (j = 0; j < n; j++)
1356 {
1357 pred_info pred2 = one_chain[j];
1358 /* Can relax the condition comparison to not
1359 use address comparison. However, the most common
1360 case is that multiple control dependent paths share
1361 a common path prefix, so address comparison should
1362 be ok. */
1363
1364 if (operand_equal_p (pred2.pred_lhs, pred.pred_lhs, 0)
1365 && operand_equal_p (pred2.pred_rhs, pred.pred_rhs, 0)
1366 && pred2.invert == pred.invert)
1367 {
1368 found = true;
1369 break;
1370 }
1371 }
1372 if (!found)
1373 return false;
1374 }
1375 return true;
1376 }
1377
1378 /* Forward declaration. */
1379 static bool is_use_properly_guarded (gimple *use_stmt,
1380 basic_block use_bb,
1381 gphi *phi,
1382 unsigned uninit_opnds,
1383 pred_chain_union *def_preds,
1384 hash_set<gphi *> *visited_phis);
1385
1386 /* Returns true if all uninitialized opnds are pruned. Returns false
1387 otherwise. PHI is the phi node with uninitialized operands,
1388 UNINIT_OPNDS is the bitmap of the uninitialize operand positions,
1389 FLAG_DEF is the statement defining the flag guarding the use of the
1390 PHI output, BOUNDARY_CST is the const value used in the predicate
1391 associated with the flag, CMP_CODE is the comparison code used in
1392 the predicate, VISITED_PHIS is the pointer set of phis visited, and
1393 VISITED_FLAG_PHIS is the pointer to the pointer set of flag definitions
1394 that are also phis.
1395
1396 Example scenario:
1397
1398 BB1:
1399 flag_1 = phi <0, 1> // (1)
1400 var_1 = phi <undef, some_val>
1401
1402
1403 BB2:
1404 flag_2 = phi <0, flag_1, flag_1> // (2)
1405 var_2 = phi <undef, var_1, var_1>
1406 if (flag_2 == 1)
1407 goto BB3;
1408
1409 BB3:
1410 use of var_2 // (3)
1411
1412 Because some flag arg in (1) is not constant, if we do not look into the
1413 flag phis recursively, it is conservatively treated as unknown and var_1
1414 is thought to be flowed into use at (3). Since var_1 is potentially
1415 uninitialized a false warning will be emitted.
1416 Checking recursively into (1), the compiler can find out that only some_val
1417 (which is defined) can flow into (3) which is OK. */
1418
1419 static bool
1420 prune_uninit_phi_opnds (gphi *phi, unsigned uninit_opnds, gphi *flag_def,
1421 tree boundary_cst, enum tree_code cmp_code,
1422 hash_set<gphi *> *visited_phis,
1423 bitmap *visited_flag_phis)
1424 {
1425 unsigned i;
1426
1427 for (i = 0; i < MIN (max_phi_args, gimple_phi_num_args (flag_def)); i++)
1428 {
1429 tree flag_arg;
1430
1431 if (!MASK_TEST_BIT (uninit_opnds, i))
1432 continue;
1433
1434 flag_arg = gimple_phi_arg_def (flag_def, i);
1435 if (!is_gimple_constant (flag_arg))
1436 {
1437 gphi *flag_arg_def, *phi_arg_def;
1438 tree phi_arg;
1439 unsigned uninit_opnds_arg_phi;
1440
1441 if (TREE_CODE (flag_arg) != SSA_NAME)
1442 return false;
1443 flag_arg_def = dyn_cast<gphi *> (SSA_NAME_DEF_STMT (flag_arg));
1444 if (!flag_arg_def)
1445 return false;
1446
1447 phi_arg = gimple_phi_arg_def (phi, i);
1448 if (TREE_CODE (phi_arg) != SSA_NAME)
1449 return false;
1450
1451 phi_arg_def = dyn_cast<gphi *> (SSA_NAME_DEF_STMT (phi_arg));
1452 if (!phi_arg_def)
1453 return false;
1454
1455 if (gimple_bb (phi_arg_def) != gimple_bb (flag_arg_def))
1456 return false;
1457
1458 if (!*visited_flag_phis)
1459 *visited_flag_phis = BITMAP_ALLOC (NULL);
1460
1461 tree phi_result = gimple_phi_result (flag_arg_def);
1462 if (bitmap_bit_p (*visited_flag_phis, SSA_NAME_VERSION (phi_result)))
1463 return false;
1464
1465 bitmap_set_bit (*visited_flag_phis,
1466 SSA_NAME_VERSION (gimple_phi_result (flag_arg_def)));
1467
1468 /* Now recursively prune the uninitialized phi args. */
1469 uninit_opnds_arg_phi = compute_uninit_opnds_pos (phi_arg_def);
1470 if (!prune_uninit_phi_opnds
1471 (phi_arg_def, uninit_opnds_arg_phi, flag_arg_def, boundary_cst,
1472 cmp_code, visited_phis, visited_flag_phis))
1473 return false;
1474
1475 phi_result = gimple_phi_result (flag_arg_def);
1476 bitmap_clear_bit (*visited_flag_phis, SSA_NAME_VERSION (phi_result));
1477 continue;
1478 }
1479
1480 /* Now check if the constant is in the guarded range. */
1481 if (is_value_included_in (flag_arg, boundary_cst, cmp_code))
1482 {
1483 tree opnd;
1484 gimple *opnd_def;
1485
1486 /* Now that we know that this undefined edge is not
1487 pruned. If the operand is defined by another phi,
1488 we can further prune the incoming edges of that
1489 phi by checking the predicates of this operands. */
1490
1491 opnd = gimple_phi_arg_def (phi, i);
1492 opnd_def = SSA_NAME_DEF_STMT (opnd);
1493 if (gphi *opnd_def_phi = dyn_cast <gphi *> (opnd_def))
1494 {
1495 edge opnd_edge;
1496 unsigned uninit_opnds2 = compute_uninit_opnds_pos (opnd_def_phi);
1497 if (!MASK_EMPTY (uninit_opnds2))
1498 {
1499 pred_chain_union def_preds = vNULL;
1500 bool ok;
1501 opnd_edge = gimple_phi_arg_edge (phi, i);
1502 ok = is_use_properly_guarded (phi,
1503 opnd_edge->src,
1504 opnd_def_phi,
1505 uninit_opnds2,
1506 &def_preds,
1507 visited_phis);
1508 destroy_predicate_vecs (&def_preds);
1509 if (!ok)
1510 return false;
1511 }
1512 }
1513 else
1514 return false;
1515 }
1516 }
1517
1518 return true;
1519 }
1520
1521 /* A helper function that determines if the predicate set
1522 of the use is not overlapping with that of the uninit paths.
1523 The most common senario of guarded use is in Example 1:
1524 Example 1:
1525 if (some_cond)
1526 {
1527 x = ...;
1528 flag = true;
1529 }
1530
1531 ... some code ...
1532
1533 if (flag)
1534 use (x);
1535
1536 The real world examples are usually more complicated, but similar
1537 and usually result from inlining:
1538
1539 bool init_func (int * x)
1540 {
1541 if (some_cond)
1542 return false;
1543 *x = ..
1544 return true;
1545 }
1546
1547 void foo (..)
1548 {
1549 int x;
1550
1551 if (!init_func (&x))
1552 return;
1553
1554 .. some_code ...
1555 use (x);
1556 }
1557
1558 Another possible use scenario is in the following trivial example:
1559
1560 Example 2:
1561 if (n > 0)
1562 x = 1;
1563 ...
1564 if (n > 0)
1565 {
1566 if (m < 2)
1567 .. = x;
1568 }
1569
1570 Predicate analysis needs to compute the composite predicate:
1571
1572 1) 'x' use predicate: (n > 0) .AND. (m < 2)
1573 2) 'x' default value (non-def) predicate: .NOT. (n > 0)
1574 (the predicate chain for phi operand defs can be computed
1575 starting from a bb that is control equivalent to the phi's
1576 bb and is dominating the operand def.)
1577
1578 and check overlapping:
1579 (n > 0) .AND. (m < 2) .AND. (.NOT. (n > 0))
1580 <==> false
1581
1582 This implementation provides framework that can handle
1583 scenarios. (Note that many simple cases are handled properly
1584 without the predicate analysis -- this is due to jump threading
1585 transformation which eliminates the merge point thus makes
1586 path sensitive analysis unnecessary.)
1587
1588 PHI is the phi node whose incoming (undefined) paths need to be
1589 pruned, and UNINIT_OPNDS is the bitmap holding uninit operand
1590 positions. VISITED_PHIS is the pointer set of phi stmts being
1591 checked. */
1592
1593 static bool
1594 use_pred_not_overlap_with_undef_path_pred (pred_chain_union preds,
1595 gphi *phi, unsigned uninit_opnds,
1596 hash_set<gphi *> *visited_phis)
1597 {
1598 unsigned int i, n;
1599 gimple *flag_def = 0;
1600 tree boundary_cst = 0;
1601 enum tree_code cmp_code;
1602 bool swap_cond = false;
1603 bool invert = false;
1604 pred_chain the_pred_chain = vNULL;
1605 bitmap visited_flag_phis = NULL;
1606 bool all_pruned = false;
1607 size_t num_preds = preds.length ();
1608
1609 gcc_assert (num_preds > 0);
1610 /* Find within the common prefix of multiple predicate chains
1611 a predicate that is a comparison of a flag variable against
1612 a constant. */
1613 the_pred_chain = preds[0];
1614 n = the_pred_chain.length ();
1615 for (i = 0; i < n; i++)
1616 {
1617 tree cond_lhs, cond_rhs, flag = 0;
1618
1619 pred_info the_pred = the_pred_chain[i];
1620
1621 invert = the_pred.invert;
1622 cond_lhs = the_pred.pred_lhs;
1623 cond_rhs = the_pred.pred_rhs;
1624 cmp_code = the_pred.cond_code;
1625
1626 if (cond_lhs != NULL_TREE && TREE_CODE (cond_lhs) == SSA_NAME
1627 && cond_rhs != NULL_TREE && is_gimple_constant (cond_rhs))
1628 {
1629 boundary_cst = cond_rhs;
1630 flag = cond_lhs;
1631 }
1632 else if (cond_rhs != NULL_TREE && TREE_CODE (cond_rhs) == SSA_NAME
1633 && cond_lhs != NULL_TREE && is_gimple_constant (cond_lhs))
1634 {
1635 boundary_cst = cond_lhs;
1636 flag = cond_rhs;
1637 swap_cond = true;
1638 }
1639
1640 if (!flag)
1641 continue;
1642
1643 flag_def = SSA_NAME_DEF_STMT (flag);
1644
1645 if (!flag_def)
1646 continue;
1647
1648 if ((gimple_code (flag_def) == GIMPLE_PHI)
1649 && (gimple_bb (flag_def) == gimple_bb (phi))
1650 && find_matching_predicate_in_rest_chains (the_pred, preds,
1651 num_preds))
1652 break;
1653
1654 flag_def = 0;
1655 }
1656
1657 if (!flag_def)
1658 return false;
1659
1660 /* Now check all the uninit incoming edge has a constant flag value
1661 that is in conflict with the use guard/predicate. */
1662 cmp_code = get_cmp_code (cmp_code, swap_cond, invert);
1663
1664 if (cmp_code == ERROR_MARK)
1665 return false;
1666
1667 all_pruned = prune_uninit_phi_opnds
1668 (phi, uninit_opnds, as_a<gphi *> (flag_def), boundary_cst, cmp_code,
1669 visited_phis, &visited_flag_phis);
1670
1671 if (visited_flag_phis)
1672 BITMAP_FREE (visited_flag_phis);
1673
1674 return all_pruned;
1675 }
1676
1677 /* The helper function returns true if two predicates X1 and X2
1678 are equivalent. It assumes the expressions have already
1679 properly re-associated. */
1680
1681 static inline bool
1682 pred_equal_p (pred_info x1, pred_info x2)
1683 {
1684 enum tree_code c1, c2;
1685 if (!operand_equal_p (x1.pred_lhs, x2.pred_lhs, 0)
1686 || !operand_equal_p (x1.pred_rhs, x2.pred_rhs, 0))
1687 return false;
1688
1689 c1 = x1.cond_code;
1690 if (x1.invert != x2.invert
1691 && TREE_CODE_CLASS (x2.cond_code) == tcc_comparison)
1692 c2 = invert_tree_comparison (x2.cond_code, false);
1693 else
1694 c2 = x2.cond_code;
1695
1696 return c1 == c2;
1697 }
1698
1699 /* Returns true if the predication is testing !=. */
1700
1701 static inline bool
1702 is_neq_relop_p (pred_info pred)
1703 {
1704
1705 return ((pred.cond_code == NE_EXPR && !pred.invert)
1706 || (pred.cond_code == EQ_EXPR && pred.invert));
1707 }
1708
1709 /* Returns true if pred is of the form X != 0. */
1710
1711 static inline bool
1712 is_neq_zero_form_p (pred_info pred)
1713 {
1714 if (!is_neq_relop_p (pred) || !integer_zerop (pred.pred_rhs)
1715 || TREE_CODE (pred.pred_lhs) != SSA_NAME)
1716 return false;
1717 return true;
1718 }
1719
1720 /* The helper function returns true if two predicates X1
1721 is equivalent to X2 != 0. */
1722
1723 static inline bool
1724 pred_expr_equal_p (pred_info x1, tree x2)
1725 {
1726 if (!is_neq_zero_form_p (x1))
1727 return false;
1728
1729 return operand_equal_p (x1.pred_lhs, x2, 0);
1730 }
1731
1732 /* Returns true of the domain of single predicate expression
1733 EXPR1 is a subset of that of EXPR2. Returns false if it
1734 cannot be proved. */
1735
1736 static bool
1737 is_pred_expr_subset_of (pred_info expr1, pred_info expr2)
1738 {
1739 enum tree_code code1, code2;
1740
1741 if (pred_equal_p (expr1, expr2))
1742 return true;
1743
1744 if ((TREE_CODE (expr1.pred_rhs) != INTEGER_CST)
1745 || (TREE_CODE (expr2.pred_rhs) != INTEGER_CST))
1746 return false;
1747
1748 if (!operand_equal_p (expr1.pred_lhs, expr2.pred_lhs, 0))
1749 return false;
1750
1751 code1 = expr1.cond_code;
1752 if (expr1.invert)
1753 code1 = invert_tree_comparison (code1, false);
1754 code2 = expr2.cond_code;
1755 if (expr2.invert)
1756 code2 = invert_tree_comparison (code2, false);
1757
1758 if (code2 == NE_EXPR && code1 == NE_EXPR)
1759 return false;
1760
1761 if (code2 == NE_EXPR)
1762 return !value_sat_pred_p (expr2.pred_rhs, expr1.pred_rhs, code1);
1763
1764 if (code1 == EQ_EXPR)
1765 return value_sat_pred_p (expr1.pred_rhs, expr2.pred_rhs, code2);
1766
1767 if (code1 == code2)
1768 return value_sat_pred_p (expr1.pred_rhs, expr2.pred_rhs, code2,
1769 code1 == BIT_AND_EXPR);
1770
1771 return false;
1772 }
1773
1774 /* Returns true if the domain of PRED1 is a subset
1775 of that of PRED2. Returns false if it cannot be proved so. */
1776
1777 static bool
1778 is_pred_chain_subset_of (pred_chain pred1, pred_chain pred2)
1779 {
1780 size_t np1, np2, i1, i2;
1781
1782 np1 = pred1.length ();
1783 np2 = pred2.length ();
1784
1785 for (i2 = 0; i2 < np2; i2++)
1786 {
1787 bool found = false;
1788 pred_info info2 = pred2[i2];
1789 for (i1 = 0; i1 < np1; i1++)
1790 {
1791 pred_info info1 = pred1[i1];
1792 if (is_pred_expr_subset_of (info1, info2))
1793 {
1794 found = true;
1795 break;
1796 }
1797 }
1798 if (!found)
1799 return false;
1800 }
1801 return true;
1802 }
1803
1804 /* Returns true if the domain defined by
1805 one pred chain ONE_PRED is a subset of the domain
1806 of *PREDS. It returns false if ONE_PRED's domain is
1807 not a subset of any of the sub-domains of PREDS
1808 (corresponding to each individual chains in it), even
1809 though it may be still be a subset of whole domain
1810 of PREDS which is the union (ORed) of all its subdomains.
1811 In other words, the result is conservative. */
1812
1813 static bool
1814 is_included_in (pred_chain one_pred, pred_chain_union preds)
1815 {
1816 size_t i;
1817 size_t n = preds.length ();
1818
1819 for (i = 0; i < n; i++)
1820 {
1821 if (is_pred_chain_subset_of (one_pred, preds[i]))
1822 return true;
1823 }
1824
1825 return false;
1826 }
1827
1828 /* Compares two predicate sets PREDS1 and PREDS2 and returns
1829 true if the domain defined by PREDS1 is a superset
1830 of PREDS2's domain. N1 and N2 are array sizes of PREDS1 and
1831 PREDS2 respectively. The implementation chooses not to build
1832 generic trees (and relying on the folding capability of the
1833 compiler), but instead performs brute force comparison of
1834 individual predicate chains (won't be a compile time problem
1835 as the chains are pretty short). When the function returns
1836 false, it does not necessarily mean *PREDS1 is not a superset
1837 of *PREDS2, but mean it may not be so since the analysis cannot
1838 prove it. In such cases, false warnings may still be
1839 emitted. */
1840
1841 static bool
1842 is_superset_of (pred_chain_union preds1, pred_chain_union preds2)
1843 {
1844 size_t i, n2;
1845 pred_chain one_pred_chain = vNULL;
1846
1847 n2 = preds2.length ();
1848
1849 for (i = 0; i < n2; i++)
1850 {
1851 one_pred_chain = preds2[i];
1852 if (!is_included_in (one_pred_chain, preds1))
1853 return false;
1854 }
1855
1856 return true;
1857 }
1858
1859 /* Returns true if X1 is the negate of X2. */
1860
1861 static inline bool
1862 pred_neg_p (pred_info x1, pred_info x2)
1863 {
1864 enum tree_code c1, c2;
1865 if (!operand_equal_p (x1.pred_lhs, x2.pred_lhs, 0)
1866 || !operand_equal_p (x1.pred_rhs, x2.pred_rhs, 0))
1867 return false;
1868
1869 c1 = x1.cond_code;
1870 if (x1.invert == x2.invert)
1871 c2 = invert_tree_comparison (x2.cond_code, false);
1872 else
1873 c2 = x2.cond_code;
1874
1875 return c1 == c2;
1876 }
1877
1878 /* 1) ((x IOR y) != 0) AND (x != 0) is equivalent to (x != 0);
1879 2) (X AND Y) OR (!X AND Y) is equivalent to Y;
1880 3) X OR (!X AND Y) is equivalent to (X OR Y);
1881 4) ((x IAND y) != 0) || (x != 0 AND y != 0)) is equivalent to
1882 (x != 0 AND y != 0)
1883 5) (X AND Y) OR (!X AND Z) OR (!Y AND Z) is equivalent to
1884 (X AND Y) OR Z
1885
1886 PREDS is the predicate chains, and N is the number of chains. */
1887
1888 /* Helper function to implement rule 1 above. ONE_CHAIN is
1889 the AND predication to be simplified. */
1890
1891 static void
1892 simplify_pred (pred_chain *one_chain)
1893 {
1894 size_t i, j, n;
1895 bool simplified = false;
1896 pred_chain s_chain = vNULL;
1897
1898 n = one_chain->length ();
1899
1900 for (i = 0; i < n; i++)
1901 {
1902 pred_info *a_pred = &(*one_chain)[i];
1903
1904 if (!a_pred->pred_lhs)
1905 continue;
1906 if (!is_neq_zero_form_p (*a_pred))
1907 continue;
1908
1909 gimple *def_stmt = SSA_NAME_DEF_STMT (a_pred->pred_lhs);
1910 if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
1911 continue;
1912 if (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR)
1913 {
1914 for (j = 0; j < n; j++)
1915 {
1916 pred_info *b_pred = &(*one_chain)[j];
1917
1918 if (!b_pred->pred_lhs)
1919 continue;
1920 if (!is_neq_zero_form_p (*b_pred))
1921 continue;
1922
1923 if (pred_expr_equal_p (*b_pred, gimple_assign_rhs1 (def_stmt))
1924 || pred_expr_equal_p (*b_pred, gimple_assign_rhs2 (def_stmt)))
1925 {
1926 /* Mark a_pred for removal. */
1927 a_pred->pred_lhs = NULL;
1928 a_pred->pred_rhs = NULL;
1929 simplified = true;
1930 break;
1931 }
1932 }
1933 }
1934 }
1935
1936 if (!simplified)
1937 return;
1938
1939 for (i = 0; i < n; i++)
1940 {
1941 pred_info *a_pred = &(*one_chain)[i];
1942 if (!a_pred->pred_lhs)
1943 continue;
1944 s_chain.safe_push (*a_pred);
1945 }
1946
1947 one_chain->release ();
1948 *one_chain = s_chain;
1949 }
1950
1951 /* The helper function implements the rule 2 for the
1952 OR predicate PREDS.
1953
1954 2) (X AND Y) OR (!X AND Y) is equivalent to Y. */
1955
1956 static bool
1957 simplify_preds_2 (pred_chain_union *preds)
1958 {
1959 size_t i, j, n;
1960 bool simplified = false;
1961 pred_chain_union s_preds = vNULL;
1962
1963 /* (X AND Y) OR (!X AND Y) is equivalent to Y.
1964 (X AND Y) OR (X AND !Y) is equivalent to X. */
1965
1966 n = preds->length ();
1967 for (i = 0; i < n; i++)
1968 {
1969 pred_info x, y;
1970 pred_chain *a_chain = &(*preds)[i];
1971
1972 if (a_chain->length () != 2)
1973 continue;
1974
1975 x = (*a_chain)[0];
1976 y = (*a_chain)[1];
1977
1978 for (j = 0; j < n; j++)
1979 {
1980 pred_chain *b_chain;
1981 pred_info x2, y2;
1982
1983 if (j == i)
1984 continue;
1985
1986 b_chain = &(*preds)[j];
1987 if (b_chain->length () != 2)
1988 continue;
1989
1990 x2 = (*b_chain)[0];
1991 y2 = (*b_chain)[1];
1992
1993 if (pred_equal_p (x, x2) && pred_neg_p (y, y2))
1994 {
1995 /* Kill a_chain. */
1996 a_chain->release ();
1997 b_chain->release ();
1998 b_chain->safe_push (x);
1999 simplified = true;
2000 break;
2001 }
2002 if (pred_neg_p (x, x2) && pred_equal_p (y, y2))
2003 {
2004 /* Kill a_chain. */
2005 a_chain->release ();
2006 b_chain->release ();
2007 b_chain->safe_push (y);
2008 simplified = true;
2009 break;
2010 }
2011 }
2012 }
2013 /* Now clean up the chain. */
2014 if (simplified)
2015 {
2016 for (i = 0; i < n; i++)
2017 {
2018 if ((*preds)[i].is_empty ())
2019 continue;
2020 s_preds.safe_push ((*preds)[i]);
2021 }
2022 preds->release ();
2023 (*preds) = s_preds;
2024 s_preds = vNULL;
2025 }
2026
2027 return simplified;
2028 }
2029
2030 /* The helper function implements the rule 2 for the
2031 OR predicate PREDS.
2032
2033 3) x OR (!x AND y) is equivalent to x OR y. */
2034
2035 static bool
2036 simplify_preds_3 (pred_chain_union *preds)
2037 {
2038 size_t i, j, n;
2039 bool simplified = false;
2040
2041 /* Now iteratively simplify X OR (!X AND Z ..)
2042 into X OR (Z ...). */
2043
2044 n = preds->length ();
2045 if (n < 2)
2046 return false;
2047
2048 for (i = 0; i < n; i++)
2049 {
2050 pred_info x;
2051 pred_chain *a_chain = &(*preds)[i];
2052
2053 if (a_chain->length () != 1)
2054 continue;
2055
2056 x = (*a_chain)[0];
2057
2058 for (j = 0; j < n; j++)
2059 {
2060 pred_chain *b_chain;
2061 pred_info x2;
2062 size_t k;
2063
2064 if (j == i)
2065 continue;
2066
2067 b_chain = &(*preds)[j];
2068 if (b_chain->length () < 2)
2069 continue;
2070
2071 for (k = 0; k < b_chain->length (); k++)
2072 {
2073 x2 = (*b_chain)[k];
2074 if (pred_neg_p (x, x2))
2075 {
2076 b_chain->unordered_remove (k);
2077 simplified = true;
2078 break;
2079 }
2080 }
2081 }
2082 }
2083 return simplified;
2084 }
2085
2086 /* The helper function implements the rule 4 for the
2087 OR predicate PREDS.
2088
2089 2) ((x AND y) != 0) OR (x != 0 AND y != 0) is equivalent to
2090 (x != 0 ANd y != 0). */
2091
2092 static bool
2093 simplify_preds_4 (pred_chain_union *preds)
2094 {
2095 size_t i, j, n;
2096 bool simplified = false;
2097 pred_chain_union s_preds = vNULL;
2098 gimple *def_stmt;
2099
2100 n = preds->length ();
2101 for (i = 0; i < n; i++)
2102 {
2103 pred_info z;
2104 pred_chain *a_chain = &(*preds)[i];
2105
2106 if (a_chain->length () != 1)
2107 continue;
2108
2109 z = (*a_chain)[0];
2110
2111 if (!is_neq_zero_form_p (z))
2112 continue;
2113
2114 def_stmt = SSA_NAME_DEF_STMT (z.pred_lhs);
2115 if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
2116 continue;
2117
2118 if (gimple_assign_rhs_code (def_stmt) != BIT_AND_EXPR)
2119 continue;
2120
2121 for (j = 0; j < n; j++)
2122 {
2123 pred_chain *b_chain;
2124 pred_info x2, y2;
2125
2126 if (j == i)
2127 continue;
2128
2129 b_chain = &(*preds)[j];
2130 if (b_chain->length () != 2)
2131 continue;
2132
2133 x2 = (*b_chain)[0];
2134 y2 = (*b_chain)[1];
2135 if (!is_neq_zero_form_p (x2) || !is_neq_zero_form_p (y2))
2136 continue;
2137
2138 if ((pred_expr_equal_p (x2, gimple_assign_rhs1 (def_stmt))
2139 && pred_expr_equal_p (y2, gimple_assign_rhs2 (def_stmt)))
2140 || (pred_expr_equal_p (x2, gimple_assign_rhs2 (def_stmt))
2141 && pred_expr_equal_p (y2, gimple_assign_rhs1 (def_stmt))))
2142 {
2143 /* Kill a_chain. */
2144 a_chain->release ();
2145 simplified = true;
2146 break;
2147 }
2148 }
2149 }
2150 /* Now clean up the chain. */
2151 if (simplified)
2152 {
2153 for (i = 0; i < n; i++)
2154 {
2155 if ((*preds)[i].is_empty ())
2156 continue;
2157 s_preds.safe_push ((*preds)[i]);
2158 }
2159
2160 preds->release ();
2161 (*preds) = s_preds;
2162 s_preds = vNULL;
2163 }
2164
2165 return simplified;
2166 }
2167
2168 /* This function simplifies predicates in PREDS. */
2169
2170 static void
2171 simplify_preds (pred_chain_union *preds, gimple *use_or_def, bool is_use)
2172 {
2173 size_t i, n;
2174 bool changed = false;
2175
2176 if (dump_file && dump_flags & TDF_DETAILS)
2177 {
2178 fprintf (dump_file, "[BEFORE SIMPLICATION -- ");
2179 dump_predicates (use_or_def, *preds, is_use ? "[USE]:\n" : "[DEF]:\n");
2180 }
2181
2182 for (i = 0; i < preds->length (); i++)
2183 simplify_pred (&(*preds)[i]);
2184
2185 n = preds->length ();
2186 if (n < 2)
2187 return;
2188
2189 do
2190 {
2191 changed = false;
2192 if (simplify_preds_2 (preds))
2193 changed = true;
2194
2195 /* Now iteratively simplify X OR (!X AND Z ..)
2196 into X OR (Z ...). */
2197 if (simplify_preds_3 (preds))
2198 changed = true;
2199
2200 if (simplify_preds_4 (preds))
2201 changed = true;
2202 }
2203 while (changed);
2204
2205 return;
2206 }
2207
2208 /* This is a helper function which attempts to normalize predicate chains
2209 by following UD chains. It basically builds up a big tree of either IOR
2210 operations or AND operations, and convert the IOR tree into a
2211 pred_chain_union or BIT_AND tree into a pred_chain.
2212 Example:
2213
2214 _3 = _2 RELOP1 _1;
2215 _6 = _5 RELOP2 _4;
2216 _9 = _8 RELOP3 _7;
2217 _10 = _3 | _6;
2218 _12 = _9 | _0;
2219 _t = _10 | _12;
2220
2221 then _t != 0 will be normalized into a pred_chain_union
2222
2223 (_2 RELOP1 _1) OR (_5 RELOP2 _4) OR (_8 RELOP3 _7) OR (_0 != 0)
2224
2225 Similarly given,
2226
2227 _3 = _2 RELOP1 _1;
2228 _6 = _5 RELOP2 _4;
2229 _9 = _8 RELOP3 _7;
2230 _10 = _3 & _6;
2231 _12 = _9 & _0;
2232
2233 then _t != 0 will be normalized into a pred_chain:
2234 (_2 RELOP1 _1) AND (_5 RELOP2 _4) AND (_8 RELOP3 _7) AND (_0 != 0)
2235
2236 */
2237
2238 /* This is a helper function that stores a PRED into NORM_PREDS. */
2239
2240 inline static void
2241 push_pred (pred_chain_union *norm_preds, pred_info pred)
2242 {
2243 pred_chain pred_chain = vNULL;
2244 pred_chain.safe_push (pred);
2245 norm_preds->safe_push (pred_chain);
2246 }
2247
2248 /* A helper function that creates a predicate of the form
2249 OP != 0 and push it WORK_LIST. */
2250
2251 inline static void
2252 push_to_worklist (tree op, vec<pred_info, va_heap, vl_ptr> *work_list,
2253 hash_set<tree> *mark_set)
2254 {
2255 if (mark_set->contains (op))
2256 return;
2257 mark_set->add (op);
2258
2259 pred_info arg_pred;
2260 arg_pred.pred_lhs = op;
2261 arg_pred.pred_rhs = integer_zero_node;
2262 arg_pred.cond_code = NE_EXPR;
2263 arg_pred.invert = false;
2264 work_list->safe_push (arg_pred);
2265 }
2266
2267 /* A helper that generates a pred_info from a gimple assignment
2268 CMP_ASSIGN with comparison rhs. */
2269
2270 static pred_info
2271 get_pred_info_from_cmp (gimple *cmp_assign)
2272 {
2273 pred_info n_pred;
2274 n_pred.pred_lhs = gimple_assign_rhs1 (cmp_assign);
2275 n_pred.pred_rhs = gimple_assign_rhs2 (cmp_assign);
2276 n_pred.cond_code = gimple_assign_rhs_code (cmp_assign);
2277 n_pred.invert = false;
2278 return n_pred;
2279 }
2280
2281 /* Returns true if the PHI is a degenerated phi with
2282 all args with the same value (relop). In that case, *PRED
2283 will be updated to that value. */
2284
2285 static bool
2286 is_degenerated_phi (gimple *phi, pred_info *pred_p)
2287 {
2288 int i, n;
2289 tree op0;
2290 gimple *def0;
2291 pred_info pred0;
2292
2293 n = gimple_phi_num_args (phi);
2294 op0 = gimple_phi_arg_def (phi, 0);
2295
2296 if (TREE_CODE (op0) != SSA_NAME)
2297 return false;
2298
2299 def0 = SSA_NAME_DEF_STMT (op0);
2300 if (gimple_code (def0) != GIMPLE_ASSIGN)
2301 return false;
2302 if (TREE_CODE_CLASS (gimple_assign_rhs_code (def0)) != tcc_comparison)
2303 return false;
2304 pred0 = get_pred_info_from_cmp (def0);
2305
2306 for (i = 1; i < n; ++i)
2307 {
2308 gimple *def;
2309 pred_info pred;
2310 tree op = gimple_phi_arg_def (phi, i);
2311
2312 if (TREE_CODE (op) != SSA_NAME)
2313 return false;
2314
2315 def = SSA_NAME_DEF_STMT (op);
2316 if (gimple_code (def) != GIMPLE_ASSIGN)
2317 return false;
2318 if (TREE_CODE_CLASS (gimple_assign_rhs_code (def)) != tcc_comparison)
2319 return false;
2320 pred = get_pred_info_from_cmp (def);
2321 if (!pred_equal_p (pred, pred0))
2322 return false;
2323 }
2324
2325 *pred_p = pred0;
2326 return true;
2327 }
2328
2329 /* Normalize one predicate PRED
2330 1) if PRED can no longer be normlized, put it into NORM_PREDS.
2331 2) otherwise if PRED is of the form x != 0, follow x's definition
2332 and put normalized predicates into WORK_LIST. */
2333
2334 static void
2335 normalize_one_pred_1 (pred_chain_union *norm_preds,
2336 pred_chain *norm_chain,
2337 pred_info pred,
2338 enum tree_code and_or_code,
2339 vec<pred_info, va_heap, vl_ptr> *work_list,
2340 hash_set<tree> *mark_set)
2341 {
2342 if (!is_neq_zero_form_p (pred))
2343 {
2344 if (and_or_code == BIT_IOR_EXPR)
2345 push_pred (norm_preds, pred);
2346 else
2347 norm_chain->safe_push (pred);
2348 return;
2349 }
2350
2351 gimple *def_stmt = SSA_NAME_DEF_STMT (pred.pred_lhs);
2352
2353 if (gimple_code (def_stmt) == GIMPLE_PHI
2354 && is_degenerated_phi (def_stmt, &pred))
2355 work_list->safe_push (pred);
2356 else if (gimple_code (def_stmt) == GIMPLE_PHI && and_or_code == BIT_IOR_EXPR)
2357 {
2358 int i, n;
2359 n = gimple_phi_num_args (def_stmt);
2360
2361 /* If we see non zero constant, we should punt. The predicate
2362 * should be one guarding the phi edge. */
2363 for (i = 0; i < n; ++i)
2364 {
2365 tree op = gimple_phi_arg_def (def_stmt, i);
2366 if (TREE_CODE (op) == INTEGER_CST && !integer_zerop (op))
2367 {
2368 push_pred (norm_preds, pred);
2369 return;
2370 }
2371 }
2372
2373 for (i = 0; i < n; ++i)
2374 {
2375 tree op = gimple_phi_arg_def (def_stmt, i);
2376 if (integer_zerop (op))
2377 continue;
2378
2379 push_to_worklist (op, work_list, mark_set);
2380 }
2381 }
2382 else if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
2383 {
2384 if (and_or_code == BIT_IOR_EXPR)
2385 push_pred (norm_preds, pred);
2386 else
2387 norm_chain->safe_push (pred);
2388 }
2389 else if (gimple_assign_rhs_code (def_stmt) == and_or_code)
2390 {
2391 /* Avoid splitting up bit manipulations like x & 3 or y | 1. */
2392 if (is_gimple_min_invariant (gimple_assign_rhs2 (def_stmt)))
2393 {
2394 /* But treat x & 3 as condition. */
2395 if (and_or_code == BIT_AND_EXPR)
2396 {
2397 pred_info n_pred;
2398 n_pred.pred_lhs = gimple_assign_rhs1 (def_stmt);
2399 n_pred.pred_rhs = gimple_assign_rhs2 (def_stmt);
2400 n_pred.cond_code = and_or_code;
2401 n_pred.invert = false;
2402 norm_chain->safe_push (n_pred);
2403 }
2404 }
2405 else
2406 {
2407 push_to_worklist (gimple_assign_rhs1 (def_stmt), work_list, mark_set);
2408 push_to_worklist (gimple_assign_rhs2 (def_stmt), work_list, mark_set);
2409 }
2410 }
2411 else if (TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt))
2412 == tcc_comparison)
2413 {
2414 pred_info n_pred = get_pred_info_from_cmp (def_stmt);
2415 if (and_or_code == BIT_IOR_EXPR)
2416 push_pred (norm_preds, n_pred);
2417 else
2418 norm_chain->safe_push (n_pred);
2419 }
2420 else
2421 {
2422 if (and_or_code == BIT_IOR_EXPR)
2423 push_pred (norm_preds, pred);
2424 else
2425 norm_chain->safe_push (pred);
2426 }
2427 }
2428
2429 /* Normalize PRED and store the normalized predicates into NORM_PREDS. */
2430
2431 static void
2432 normalize_one_pred (pred_chain_union *norm_preds, pred_info pred)
2433 {
2434 vec<pred_info, va_heap, vl_ptr> work_list = vNULL;
2435 enum tree_code and_or_code = ERROR_MARK;
2436 pred_chain norm_chain = vNULL;
2437
2438 if (!is_neq_zero_form_p (pred))
2439 {
2440 push_pred (norm_preds, pred);
2441 return;
2442 }
2443
2444 gimple *def_stmt = SSA_NAME_DEF_STMT (pred.pred_lhs);
2445 if (gimple_code (def_stmt) == GIMPLE_ASSIGN)
2446 and_or_code = gimple_assign_rhs_code (def_stmt);
2447 if (and_or_code != BIT_IOR_EXPR && and_or_code != BIT_AND_EXPR)
2448 {
2449 if (TREE_CODE_CLASS (and_or_code) == tcc_comparison)
2450 {
2451 pred_info n_pred = get_pred_info_from_cmp (def_stmt);
2452 push_pred (norm_preds, n_pred);
2453 }
2454 else
2455 push_pred (norm_preds, pred);
2456 return;
2457 }
2458
2459 work_list.safe_push (pred);
2460 hash_set<tree> mark_set;
2461
2462 while (!work_list.is_empty ())
2463 {
2464 pred_info a_pred = work_list.pop ();
2465 normalize_one_pred_1 (norm_preds, &norm_chain, a_pred, and_or_code,
2466 &work_list, &mark_set);
2467 }
2468 if (and_or_code == BIT_AND_EXPR)
2469 norm_preds->safe_push (norm_chain);
2470
2471 work_list.release ();
2472 }
2473
2474 static void
2475 normalize_one_pred_chain (pred_chain_union *norm_preds, pred_chain one_chain)
2476 {
2477 vec<pred_info, va_heap, vl_ptr> work_list = vNULL;
2478 hash_set<tree> mark_set;
2479 pred_chain norm_chain = vNULL;
2480 size_t i;
2481
2482 for (i = 0; i < one_chain.length (); i++)
2483 {
2484 work_list.safe_push (one_chain[i]);
2485 mark_set.add (one_chain[i].pred_lhs);
2486 }
2487
2488 while (!work_list.is_empty ())
2489 {
2490 pred_info a_pred = work_list.pop ();
2491 normalize_one_pred_1 (0, &norm_chain, a_pred, BIT_AND_EXPR, &work_list,
2492 &mark_set);
2493 }
2494
2495 norm_preds->safe_push (norm_chain);
2496 work_list.release ();
2497 }
2498
2499 /* Normalize predicate chains PREDS and returns the normalized one. */
2500
2501 static pred_chain_union
2502 normalize_preds (pred_chain_union preds, gimple *use_or_def, bool is_use)
2503 {
2504 pred_chain_union norm_preds = vNULL;
2505 size_t n = preds.length ();
2506 size_t i;
2507
2508 if (dump_file && dump_flags & TDF_DETAILS)
2509 {
2510 fprintf (dump_file, "[BEFORE NORMALIZATION --");
2511 dump_predicates (use_or_def, preds, is_use ? "[USE]:\n" : "[DEF]:\n");
2512 }
2513
2514 for (i = 0; i < n; i++)
2515 {
2516 if (preds[i].length () != 1)
2517 normalize_one_pred_chain (&norm_preds, preds[i]);
2518 else
2519 {
2520 normalize_one_pred (&norm_preds, preds[i][0]);
2521 preds[i].release ();
2522 }
2523 }
2524
2525 if (dump_file)
2526 {
2527 fprintf (dump_file, "[AFTER NORMALIZATION -- ");
2528 dump_predicates (use_or_def, norm_preds,
2529 is_use ? "[USE]:\n" : "[DEF]:\n");
2530 }
2531
2532 destroy_predicate_vecs (&preds);
2533 return norm_preds;
2534 }
2535
2536 /* Return TRUE if PREDICATE can be invalidated by any individual
2537 predicate in USE_GUARD. */
2538
2539 static bool
2540 can_one_predicate_be_invalidated_p (pred_info predicate,
2541 pred_chain use_guard)
2542 {
2543 if (dump_file && dump_flags & TDF_DETAILS)
2544 {
2545 fprintf (dump_file, "Testing if this predicate: ");
2546 dump_pred_info (predicate);
2547 fprintf (dump_file, "\n...can be invalidated by a USE guard of: ");
2548 dump_pred_chain (use_guard);
2549 }
2550 for (size_t i = 0; i < use_guard.length (); ++i)
2551 {
2552 /* NOTE: This is a very simple check, and only understands an
2553 exact opposite. So, [i == 0] is currently only invalidated
2554 by [.NOT. i == 0] or [i != 0]. Ideally we should also
2555 invalidate with say [i > 5] or [i == 8]. There is certainly
2556 room for improvement here. */
2557 if (pred_neg_p (predicate, use_guard[i]))
2558 {
2559 if (dump_file && dump_flags & TDF_DETAILS)
2560 {
2561 fprintf (dump_file, " Predicate was invalidated by: ");
2562 dump_pred_info (use_guard[i]);
2563 fputc ('\n', dump_file);
2564 }
2565 return true;
2566 }
2567 }
2568 return false;
2569 }
2570
2571 /* Return TRUE if all predicates in UNINIT_PRED are invalidated by
2572 USE_GUARD being true. */
2573
2574 static bool
2575 can_chain_union_be_invalidated_p (pred_chain_union uninit_pred,
2576 pred_chain use_guard)
2577 {
2578 if (uninit_pred.is_empty ())
2579 return false;
2580 if (dump_file && dump_flags & TDF_DETAILS)
2581 dump_predicates (NULL, uninit_pred,
2582 "Testing if anything here can be invalidated: ");
2583 for (size_t i = 0; i < uninit_pred.length (); ++i)
2584 {
2585 pred_chain c = uninit_pred[i];
2586 size_t j;
2587 for (j = 0; j < c.length (); ++j)
2588 if (can_one_predicate_be_invalidated_p (c[j], use_guard))
2589 break;
2590
2591 /* If we were unable to invalidate any predicate in C, then there
2592 is a viable path from entry to the PHI where the PHI takes
2593 an uninitialized value and continues to a use of the PHI. */
2594 if (j == c.length ())
2595 return false;
2596 }
2597 return true;
2598 }
2599
2600 /* Return TRUE if none of the uninitialized operands in UNINT_OPNDS
2601 can actually happen if we arrived at a use for PHI.
2602
2603 PHI_USE_GUARDS are the guard conditions for the use of the PHI. */
2604
2605 static bool
2606 uninit_uses_cannot_happen (gphi *phi, unsigned uninit_opnds,
2607 pred_chain_union phi_use_guards)
2608 {
2609 unsigned phi_args = gimple_phi_num_args (phi);
2610 if (phi_args > max_phi_args)
2611 return false;
2612
2613 /* PHI_USE_GUARDS are OR'ed together. If we have more than one
2614 possible guard, there's no way of knowing which guard was true.
2615 Since we need to be absolutely sure that the uninitialized
2616 operands will be invalidated, bail. */
2617 if (phi_use_guards.length () != 1)
2618 return false;
2619
2620 /* Look for the control dependencies of all the uninitialized
2621 operands and build guard predicates describing them. */
2622 pred_chain_union uninit_preds;
2623 bool ret = true;
2624 for (unsigned i = 0; i < phi_args; ++i)
2625 {
2626 if (!MASK_TEST_BIT (uninit_opnds, i))
2627 continue;
2628
2629 edge e = gimple_phi_arg_edge (phi, i);
2630 vec<edge> dep_chains[MAX_NUM_CHAINS];
2631 auto_vec<edge, MAX_CHAIN_LEN + 1> cur_chain;
2632 size_t num_chains = 0;
2633 int num_calls = 0;
2634
2635 /* Build the control dependency chain for uninit operand `i'... */
2636 uninit_preds = vNULL;
2637 if (!compute_control_dep_chain (ENTRY_BLOCK_PTR_FOR_FN (cfun),
2638 e->src, dep_chains, &num_chains,
2639 &cur_chain, &num_calls))
2640 {
2641 ret = false;
2642 break;
2643 }
2644 /* ...and convert it into a set of predicates. */
2645 bool has_valid_preds
2646 = convert_control_dep_chain_into_preds (dep_chains, num_chains,
2647 &uninit_preds);
2648 for (size_t j = 0; j < num_chains; ++j)
2649 dep_chains[j].release ();
2650 if (!has_valid_preds)
2651 {
2652 ret = false;
2653 break;
2654 }
2655 simplify_preds (&uninit_preds, NULL, false);
2656 uninit_preds = normalize_preds (uninit_preds, NULL, false);
2657
2658 /* Can the guard for this uninitialized operand be invalidated
2659 by the PHI use? */
2660 if (!can_chain_union_be_invalidated_p (uninit_preds, phi_use_guards[0]))
2661 {
2662 ret = false;
2663 break;
2664 }
2665 }
2666 destroy_predicate_vecs (&uninit_preds);
2667 return ret;
2668 }
2669
2670 /* Computes the predicates that guard the use and checks
2671 if the incoming paths that have empty (or possibly
2672 empty) definition can be pruned/filtered. The function returns
2673 true if it can be determined that the use of PHI's def in
2674 USE_STMT is guarded with a predicate set not overlapping with
2675 predicate sets of all runtime paths that do not have a definition.
2676
2677 Returns false if it is not or it cannot be determined. USE_BB is
2678 the bb of the use (for phi operand use, the bb is not the bb of
2679 the phi stmt, but the src bb of the operand edge).
2680
2681 UNINIT_OPNDS is a bit vector. If an operand of PHI is uninitialized, the
2682 corresponding bit in the vector is 1. VISITED_PHIS is a pointer
2683 set of phis being visited.
2684
2685 *DEF_PREDS contains the (memoized) defining predicate chains of PHI.
2686 If *DEF_PREDS is the empty vector, the defining predicate chains of
2687 PHI will be computed and stored into *DEF_PREDS as needed.
2688
2689 VISITED_PHIS is a pointer set of phis being visited. */
2690
2691 static bool
2692 is_use_properly_guarded (gimple *use_stmt,
2693 basic_block use_bb,
2694 gphi *phi,
2695 unsigned uninit_opnds,
2696 pred_chain_union *def_preds,
2697 hash_set<gphi *> *visited_phis)
2698 {
2699 basic_block phi_bb;
2700 pred_chain_union preds = vNULL;
2701 bool has_valid_preds = false;
2702 bool is_properly_guarded = false;
2703
2704 if (visited_phis->add (phi))
2705 return false;
2706
2707 phi_bb = gimple_bb (phi);
2708
2709 if (is_non_loop_exit_postdominating (use_bb, phi_bb))
2710 return false;
2711
2712 has_valid_preds = find_predicates (&preds, phi_bb, use_bb);
2713
2714 if (!has_valid_preds)
2715 {
2716 destroy_predicate_vecs (&preds);
2717 return false;
2718 }
2719
2720 /* Try to prune the dead incoming phi edges. */
2721 is_properly_guarded
2722 = use_pred_not_overlap_with_undef_path_pred (preds, phi, uninit_opnds,
2723 visited_phis);
2724
2725 /* We might be able to prove that if the control dependencies
2726 for UNINIT_OPNDS are true, that the control dependencies for
2727 USE_STMT can never be true. */
2728 if (!is_properly_guarded)
2729 is_properly_guarded |= uninit_uses_cannot_happen (phi, uninit_opnds,
2730 preds);
2731
2732 if (is_properly_guarded)
2733 {
2734 destroy_predicate_vecs (&preds);
2735 return true;
2736 }
2737
2738 if (def_preds->is_empty ())
2739 {
2740 has_valid_preds = find_def_preds (def_preds, phi);
2741
2742 if (!has_valid_preds)
2743 {
2744 destroy_predicate_vecs (&preds);
2745 return false;
2746 }
2747
2748 simplify_preds (def_preds, phi, false);
2749 *def_preds = normalize_preds (*def_preds, phi, false);
2750 }
2751
2752 simplify_preds (&preds, use_stmt, true);
2753 preds = normalize_preds (preds, use_stmt, true);
2754
2755 is_properly_guarded = is_superset_of (*def_preds, preds);
2756
2757 destroy_predicate_vecs (&preds);
2758 return is_properly_guarded;
2759 }
2760
2761 /* Searches through all uses of a potentially
2762 uninitialized variable defined by PHI and returns a use
2763 statement if the use is not properly guarded. It returns
2764 NULL if all uses are guarded. UNINIT_OPNDS is a bitvector
2765 holding the position(s) of uninit PHI operands. WORKLIST
2766 is the vector of candidate phis that may be updated by this
2767 function. ADDED_TO_WORKLIST is the pointer set tracking
2768 if the new phi is already in the worklist. */
2769
2770 static gimple *
2771 find_uninit_use (gphi *phi, unsigned uninit_opnds,
2772 vec<gphi *> *worklist,
2773 hash_set<gphi *> *added_to_worklist)
2774 {
2775 tree phi_result;
2776 use_operand_p use_p;
2777 gimple *use_stmt;
2778 imm_use_iterator iter;
2779 pred_chain_union def_preds = vNULL;
2780 gimple *ret = NULL;
2781
2782 phi_result = gimple_phi_result (phi);
2783
2784 FOR_EACH_IMM_USE_FAST (use_p, iter, phi_result)
2785 {
2786 basic_block use_bb;
2787
2788 use_stmt = USE_STMT (use_p);
2789 if (is_gimple_debug (use_stmt))
2790 continue;
2791
2792 if (gphi *use_phi = dyn_cast<gphi *> (use_stmt))
2793 use_bb = gimple_phi_arg_edge (use_phi,
2794 PHI_ARG_INDEX_FROM_USE (use_p))->src;
2795 else
2796 use_bb = gimple_bb (use_stmt);
2797
2798 hash_set<gphi *> visited_phis;
2799 if (is_use_properly_guarded (use_stmt, use_bb, phi, uninit_opnds,
2800 &def_preds, &visited_phis))
2801 continue;
2802
2803 if (dump_file && (dump_flags & TDF_DETAILS))
2804 {
2805 fprintf (dump_file, "[CHECK]: Found unguarded use: ");
2806 print_gimple_stmt (dump_file, use_stmt, 0);
2807 }
2808 /* Found one real use, return. */
2809 if (gimple_code (use_stmt) != GIMPLE_PHI)
2810 {
2811 ret = use_stmt;
2812 break;
2813 }
2814
2815 /* Found a phi use that is not guarded,
2816 add the phi to the worklist. */
2817 if (!added_to_worklist->add (as_a<gphi *> (use_stmt)))
2818 {
2819 if (dump_file && (dump_flags & TDF_DETAILS))
2820 {
2821 fprintf (dump_file, "[WORKLIST]: Update worklist with phi: ");
2822 print_gimple_stmt (dump_file, use_stmt, 0);
2823 }
2824
2825 worklist->safe_push (as_a<gphi *> (use_stmt));
2826 possibly_undefined_names->add (phi_result);
2827 }
2828 }
2829
2830 destroy_predicate_vecs (&def_preds);
2831 return ret;
2832 }
2833
2834 /* Look for inputs to PHI that are SSA_NAMEs that have empty definitions
2835 and gives warning if there exists a runtime path from the entry to a
2836 use of the PHI def that does not contain a definition. In other words,
2837 the warning is on the real use. The more dead paths that can be pruned
2838 by the compiler, the fewer false positives the warning is. WORKLIST
2839 is a vector of candidate phis to be examined. ADDED_TO_WORKLIST is
2840 a pointer set tracking if the new phi is added to the worklist or not. */
2841
2842 static void
2843 warn_uninitialized_phi (gphi *phi, vec<gphi *> *worklist,
2844 hash_set<gphi *> *added_to_worklist)
2845 {
2846 unsigned uninit_opnds;
2847 gimple *uninit_use_stmt = 0;
2848 tree uninit_op;
2849 int phiarg_index;
2850 location_t loc;
2851
2852 /* Don't look at virtual operands. */
2853 if (virtual_operand_p (gimple_phi_result (phi)))
2854 return;
2855
2856 uninit_opnds = compute_uninit_opnds_pos (phi);
2857
2858 if (MASK_EMPTY (uninit_opnds))
2859 return;
2860
2861 if (dump_file && (dump_flags & TDF_DETAILS))
2862 {
2863 fprintf (dump_file, "[CHECK]: examining phi: ");
2864 print_gimple_stmt (dump_file, phi, 0);
2865 }
2866
2867 /* Now check if we have any use of the value without proper guard. */
2868 uninit_use_stmt = find_uninit_use (phi, uninit_opnds,
2869 worklist, added_to_worklist);
2870
2871 /* All uses are properly guarded. */
2872 if (!uninit_use_stmt)
2873 return;
2874
2875 phiarg_index = MASK_FIRST_SET_BIT (uninit_opnds);
2876 uninit_op = gimple_phi_arg_def (phi, phiarg_index);
2877 if (SSA_NAME_VAR (uninit_op) == NULL_TREE)
2878 return;
2879 if (gimple_phi_arg_has_location (phi, phiarg_index))
2880 loc = gimple_phi_arg_location (phi, phiarg_index);
2881 else
2882 loc = UNKNOWN_LOCATION;
2883 warn_uninit (OPT_Wmaybe_uninitialized, uninit_op, SSA_NAME_VAR (uninit_op),
2884 SSA_NAME_VAR (uninit_op),
2885 "%qD may be used uninitialized in this function",
2886 uninit_use_stmt, loc);
2887 }
2888
2889 static bool
2890 gate_warn_uninitialized (void)
2891 {
2892 return warn_uninitialized || warn_maybe_uninitialized;
2893 }
2894
2895 namespace {
2896
2897 const pass_data pass_data_late_warn_uninitialized =
2898 {
2899 GIMPLE_PASS, /* type */
2900 "uninit", /* name */
2901 OPTGROUP_NONE, /* optinfo_flags */
2902 TV_NONE, /* tv_id */
2903 PROP_ssa, /* properties_required */
2904 0, /* properties_provided */
2905 0, /* properties_destroyed */
2906 0, /* todo_flags_start */
2907 0, /* todo_flags_finish */
2908 };
2909
2910 class pass_late_warn_uninitialized : public gimple_opt_pass
2911 {
2912 public:
2913 pass_late_warn_uninitialized (gcc::context *ctxt)
2914 : gimple_opt_pass (pass_data_late_warn_uninitialized, ctxt)
2915 {}
2916
2917 /* opt_pass methods: */
2918 opt_pass *clone () { return new pass_late_warn_uninitialized (m_ctxt); }
2919 virtual bool gate (function *) { return gate_warn_uninitialized (); }
2920 virtual unsigned int execute (function *);
2921
2922 }; // class pass_late_warn_uninitialized
2923
2924 unsigned int
2925 pass_late_warn_uninitialized::execute (function *fun)
2926 {
2927 basic_block bb;
2928 gphi_iterator gsi;
2929 vec<gphi *> worklist = vNULL;
2930
2931 calculate_dominance_info (CDI_DOMINATORS);
2932 calculate_dominance_info (CDI_POST_DOMINATORS);
2933 /* Re-do the plain uninitialized variable check, as optimization may have
2934 straightened control flow. Do this first so that we don't accidentally
2935 get a "may be" warning when we'd have seen an "is" warning later. */
2936 warn_uninitialized_vars (/*warn_maybe_uninitialized=*/1);
2937
2938 timevar_push (TV_TREE_UNINIT);
2939
2940 possibly_undefined_names = new hash_set<tree>;
2941 hash_set<gphi *> added_to_worklist;
2942
2943 /* Initialize worklist */
2944 FOR_EACH_BB_FN (bb, fun)
2945 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2946 {
2947 gphi *phi = gsi.phi ();
2948 size_t n, i;
2949
2950 n = gimple_phi_num_args (phi);
2951
2952 /* Don't look at virtual operands. */
2953 if (virtual_operand_p (gimple_phi_result (phi)))
2954 continue;
2955
2956 for (i = 0; i < n; ++i)
2957 {
2958 tree op = gimple_phi_arg_def (phi, i);
2959 if (TREE_CODE (op) == SSA_NAME && uninit_undefined_value_p (op))
2960 {
2961 worklist.safe_push (phi);
2962 added_to_worklist.add (phi);
2963 if (dump_file && (dump_flags & TDF_DETAILS))
2964 {
2965 fprintf (dump_file, "[WORKLIST]: add to initial list: ");
2966 print_gimple_stmt (dump_file, phi, 0);
2967 }
2968 break;
2969 }
2970 }
2971 }
2972
2973 while (worklist.length () != 0)
2974 {
2975 gphi *cur_phi = 0;
2976 cur_phi = worklist.pop ();
2977 warn_uninitialized_phi (cur_phi, &worklist, &added_to_worklist);
2978 }
2979
2980 worklist.release ();
2981 delete possibly_undefined_names;
2982 possibly_undefined_names = NULL;
2983 free_dominance_info (CDI_POST_DOMINATORS);
2984 timevar_pop (TV_TREE_UNINIT);
2985 return 0;
2986 }
2987
2988 } // anon namespace
2989
2990 gimple_opt_pass *
2991 make_pass_late_warn_uninitialized (gcc::context *ctxt)
2992 {
2993 return new pass_late_warn_uninitialized (ctxt);
2994 }
2995
2996 static unsigned int
2997 execute_early_warn_uninitialized (void)
2998 {
2999 /* Currently, this pass runs always but
3000 execute_late_warn_uninitialized only runs with optimization. With
3001 optimization we want to warn about possible uninitialized as late
3002 as possible, thus don't do it here. However, without
3003 optimization we need to warn here about "may be uninitialized". */
3004 calculate_dominance_info (CDI_POST_DOMINATORS);
3005
3006 warn_uninitialized_vars (/*warn_maybe_uninitialized=*/!optimize);
3007
3008 /* Post-dominator information cannot be reliably updated. Free it
3009 after the use. */
3010
3011 free_dominance_info (CDI_POST_DOMINATORS);
3012 return 0;
3013 }
3014
3015 namespace {
3016
3017 const pass_data pass_data_early_warn_uninitialized =
3018 {
3019 GIMPLE_PASS, /* type */
3020 "*early_warn_uninitialized", /* name */
3021 OPTGROUP_NONE, /* optinfo_flags */
3022 TV_TREE_UNINIT, /* tv_id */
3023 PROP_ssa, /* properties_required */
3024 0, /* properties_provided */
3025 0, /* properties_destroyed */
3026 0, /* todo_flags_start */
3027 0, /* todo_flags_finish */
3028 };
3029
3030 class pass_early_warn_uninitialized : public gimple_opt_pass
3031 {
3032 public:
3033 pass_early_warn_uninitialized (gcc::context *ctxt)
3034 : gimple_opt_pass (pass_data_early_warn_uninitialized, ctxt)
3035 {}
3036
3037 /* opt_pass methods: */
3038 virtual bool gate (function *) { return gate_warn_uninitialized (); }
3039 virtual unsigned int execute (function *)
3040 {
3041 return execute_early_warn_uninitialized ();
3042 }
3043
3044 }; // class pass_early_warn_uninitialized
3045
3046 } // anon namespace
3047
3048 gimple_opt_pass *
3049 make_pass_early_warn_uninitialized (gcc::context *ctxt)
3050 {
3051 return new pass_early_warn_uninitialized (ctxt);
3052 }