]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/analyzer/engine.cc
Update copyright years.
[thirdparty/gcc.git] / gcc / analyzer / engine.cc
1 /* The analysis "engine".
2 Copyright (C) 2019-2022 Free Software Foundation, Inc.
3 Contributed by David Malcolm <dmalcolm@redhat.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #define INCLUDE_MEMORY
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tree.h"
26 #include "fold-const.h"
27 #include "gcc-rich-location.h"
28 #include "alloc-pool.h"
29 #include "fibonacci_heap.h"
30 #include "shortest-paths.h"
31 #include "diagnostic-core.h"
32 #include "diagnostic-event-id.h"
33 #include "diagnostic-path.h"
34 #include "function.h"
35 #include "pretty-print.h"
36 #include "sbitmap.h"
37 #include "bitmap.h"
38 #include "tristate.h"
39 #include "ordered-hash-map.h"
40 #include "selftest.h"
41 #include "json.h"
42 #include "analyzer/analyzer.h"
43 #include "analyzer/analyzer-logging.h"
44 #include "analyzer/call-string.h"
45 #include "analyzer/program-point.h"
46 #include "analyzer/store.h"
47 #include "analyzer/region-model.h"
48 #include "analyzer/constraint-manager.h"
49 #include "analyzer/sm.h"
50 #include "analyzer/pending-diagnostic.h"
51 #include "analyzer/diagnostic-manager.h"
52 #include "cfg.h"
53 #include "basic-block.h"
54 #include "gimple.h"
55 #include "gimple-iterator.h"
56 #include "gimple-pretty-print.h"
57 #include "cgraph.h"
58 #include "digraph.h"
59 #include "analyzer/supergraph.h"
60 #include "analyzer/program-state.h"
61 #include "analyzer/exploded-graph.h"
62 #include "analyzer/analysis-plan.h"
63 #include "analyzer/checker-path.h"
64 #include "analyzer/state-purge.h"
65 #include "analyzer/bar-chart.h"
66 #include "analyzer/call-info.h"
67 #include <zlib.h>
68 #include "plugin.h"
69 #include "target.h"
70 #include <memory>
71
72 /* For an overview, see gcc/doc/analyzer.texi. */
73
74 #if ENABLE_ANALYZER
75
76 namespace ana {
77
78 /* class impl_region_model_context : public region_model_context. */
79
80 impl_region_model_context::
81 impl_region_model_context (exploded_graph &eg,
82 exploded_node *enode_for_diag,
83 const program_state *old_state,
84 program_state *new_state,
85 uncertainty_t *uncertainty,
86 path_context *path_ctxt,
87 const gimple *stmt,
88 stmt_finder *stmt_finder)
89 : m_eg (&eg), m_logger (eg.get_logger ()),
90 m_enode_for_diag (enode_for_diag),
91 m_old_state (old_state),
92 m_new_state (new_state),
93 m_stmt (stmt),
94 m_stmt_finder (stmt_finder),
95 m_ext_state (eg.get_ext_state ()),
96 m_uncertainty (uncertainty),
97 m_path_ctxt (path_ctxt)
98 {
99 }
100
101 impl_region_model_context::
102 impl_region_model_context (program_state *state,
103 const extrinsic_state &ext_state,
104 uncertainty_t *uncertainty,
105 logger *logger)
106 : m_eg (NULL), m_logger (logger), m_enode_for_diag (NULL),
107 m_old_state (NULL),
108 m_new_state (state),
109 m_stmt (NULL),
110 m_stmt_finder (NULL),
111 m_ext_state (ext_state),
112 m_uncertainty (uncertainty),
113 m_path_ctxt (NULL)
114 {
115 }
116
117 bool
118 impl_region_model_context::warn (pending_diagnostic *d)
119 {
120 LOG_FUNC (get_logger ());
121 if (m_stmt == NULL && m_stmt_finder == NULL)
122 {
123 if (get_logger ())
124 get_logger ()->log ("rejecting diagnostic: no stmt");
125 delete d;
126 return false;
127 }
128 if (m_eg)
129 {
130 m_eg->get_diagnostic_manager ().add_diagnostic
131 (m_enode_for_diag, m_enode_for_diag->get_supernode (),
132 m_stmt, m_stmt_finder, d);
133 return true;
134 }
135 else
136 {
137 delete d;
138 return false;
139 }
140 }
141
142 void
143 impl_region_model_context::on_svalue_leak (const svalue *sval)
144
145 {
146 for (sm_state_map *smap : m_new_state->m_checker_states)
147 smap->on_svalue_leak (sval, this);
148 }
149
150 void
151 impl_region_model_context::
152 on_liveness_change (const svalue_set &live_svalues,
153 const region_model *model)
154 {
155 for (sm_state_map *smap : m_new_state->m_checker_states)
156 smap->on_liveness_change (live_svalues, model, this);
157 }
158
159 void
160 impl_region_model_context::on_unknown_change (const svalue *sval,
161 bool is_mutable)
162 {
163 for (sm_state_map *smap : m_new_state->m_checker_states)
164 smap->on_unknown_change (sval, is_mutable, m_ext_state);
165 }
166
167 void
168 impl_region_model_context::on_escaped_function (tree fndecl)
169 {
170 m_eg->on_escaped_function (fndecl);
171 }
172
173 uncertainty_t *
174 impl_region_model_context::get_uncertainty ()
175 {
176 return m_uncertainty;
177 }
178
179 /* Purge state involving SVAL. The region_model has already been purged,
180 so we only need to purge other state in the program_state:
181 the sm-state. */
182
183 void
184 impl_region_model_context::purge_state_involving (const svalue *sval)
185 {
186 int i;
187 sm_state_map *smap;
188 FOR_EACH_VEC_ELT (m_new_state->m_checker_states, i, smap)
189 smap->purge_state_involving (sval, m_ext_state);
190 }
191
192 void
193 impl_region_model_context::bifurcate (custom_edge_info *info)
194 {
195 if (m_path_ctxt)
196 m_path_ctxt->bifurcate (info);
197 else
198 delete info;
199 }
200
201 void
202 impl_region_model_context::terminate_path ()
203 {
204 if (m_path_ctxt)
205 return m_path_ctxt->terminate_path ();
206 }
207
208 bool
209 impl_region_model_context::get_malloc_map (sm_state_map **out_smap,
210 const state_machine **out_sm,
211 unsigned *out_sm_idx)
212 {
213 unsigned malloc_sm_idx;
214 if (!m_ext_state.get_sm_idx_by_name ("malloc", &malloc_sm_idx))
215 return false;
216
217 *out_smap = m_new_state->m_checker_states[malloc_sm_idx];
218 *out_sm = &m_ext_state.get_sm (malloc_sm_idx);
219 *out_sm_idx = malloc_sm_idx;
220 return true;
221 }
222
223 bool
224 impl_region_model_context::get_taint_map (sm_state_map **out_smap,
225 const state_machine **out_sm,
226 unsigned *out_sm_idx)
227 {
228 if (!m_new_state)
229 return false;
230
231 unsigned taint_sm_idx;
232 if (!m_ext_state.get_sm_idx_by_name ("taint", &taint_sm_idx))
233 return false;
234
235 *out_smap = m_new_state->m_checker_states[taint_sm_idx];
236 *out_sm = &m_ext_state.get_sm (taint_sm_idx);
237 *out_sm_idx = taint_sm_idx;
238 return true;
239 }
240
241 /* struct setjmp_record. */
242
243 int
244 setjmp_record::cmp (const setjmp_record &rec1, const setjmp_record &rec2)
245 {
246 if (int cmp_enode = rec1.m_enode->m_index - rec2.m_enode->m_index)
247 return cmp_enode;
248 gcc_assert (&rec1 == &rec2);
249 return 0;
250 }
251
252 /* class setjmp_svalue : public svalue. */
253
254 /* Implementation of svalue::accept vfunc for setjmp_svalue. */
255
256 void
257 setjmp_svalue::accept (visitor *v) const
258 {
259 v->visit_setjmp_svalue (this);
260 }
261
262 /* Implementation of svalue::dump_to_pp vfunc for setjmp_svalue. */
263
264 void
265 setjmp_svalue::dump_to_pp (pretty_printer *pp, bool simple) const
266 {
267 if (simple)
268 pp_printf (pp, "SETJMP(EN: %i)", get_enode_index ());
269 else
270 pp_printf (pp, "setjmp_svalue(EN%i)", get_enode_index ());
271 }
272
273 /* Get the index of the stored exploded_node. */
274
275 int
276 setjmp_svalue::get_enode_index () const
277 {
278 return m_setjmp_record.m_enode->m_index;
279 }
280
281 /* Concrete implementation of sm_context, wiring it up to the rest of this
282 file. */
283
284 class impl_sm_context : public sm_context
285 {
286 public:
287 impl_sm_context (exploded_graph &eg,
288 int sm_idx,
289 const state_machine &sm,
290 exploded_node *enode_for_diag,
291 const program_state *old_state,
292 program_state *new_state,
293 const sm_state_map *old_smap,
294 sm_state_map *new_smap,
295 path_context *path_ctxt,
296 stmt_finder *stmt_finder = NULL)
297 : sm_context (sm_idx, sm),
298 m_logger (eg.get_logger ()),
299 m_eg (eg), m_enode_for_diag (enode_for_diag),
300 m_old_state (old_state), m_new_state (new_state),
301 m_old_smap (old_smap), m_new_smap (new_smap),
302 m_path_ctxt (path_ctxt),
303 m_stmt_finder (stmt_finder)
304 {
305 }
306
307 logger *get_logger () const { return m_logger.get_logger (); }
308
309 tree get_fndecl_for_call (const gcall *call) FINAL OVERRIDE
310 {
311 impl_region_model_context old_ctxt
312 (m_eg, m_enode_for_diag, NULL, NULL, NULL/*m_enode->get_state ()*/,
313 NULL, call);
314 region_model *model = m_new_state->m_region_model;
315 return model->get_fndecl_for_call (call, &old_ctxt);
316 }
317
318 state_machine::state_t get_state (const gimple *stmt ATTRIBUTE_UNUSED,
319 tree var)
320 {
321 logger * const logger = get_logger ();
322 LOG_FUNC (logger);
323 /* Use NULL ctxt on this get_rvalue call to avoid triggering
324 uninitialized value warnings. */
325 const svalue *var_old_sval
326 = m_old_state->m_region_model->get_rvalue (var, NULL);
327
328 state_machine::state_t current
329 = m_old_smap->get_state (var_old_sval, m_eg.get_ext_state ());
330 return current;
331 }
332 state_machine::state_t get_state (const gimple *stmt ATTRIBUTE_UNUSED,
333 const svalue *sval)
334 {
335 logger * const logger = get_logger ();
336 LOG_FUNC (logger);
337 state_machine::state_t current
338 = m_old_smap->get_state (sval, m_eg.get_ext_state ());
339 return current;
340 }
341
342
343 void set_next_state (const gimple *stmt,
344 tree var,
345 state_machine::state_t to,
346 tree origin)
347 {
348 logger * const logger = get_logger ();
349 LOG_FUNC (logger);
350 impl_region_model_context new_ctxt (m_eg, m_enode_for_diag,
351 m_old_state, m_new_state,
352 NULL, NULL,
353 stmt);
354 const svalue *var_new_sval
355 = m_new_state->m_region_model->get_rvalue (var, &new_ctxt);
356 const svalue *origin_new_sval
357 = m_new_state->m_region_model->get_rvalue (origin, &new_ctxt);
358
359 /* We use the new sval here to avoid issues with uninitialized values. */
360 state_machine::state_t current
361 = m_old_smap->get_state (var_new_sval, m_eg.get_ext_state ());
362 if (logger)
363 logger->log ("%s: state transition of %qE: %s -> %s",
364 m_sm.get_name (),
365 var,
366 current->get_name (),
367 to->get_name ());
368 m_new_smap->set_state (m_new_state->m_region_model, var_new_sval,
369 to, origin_new_sval, m_eg.get_ext_state ());
370 }
371
372 void set_next_state (const gimple *stmt,
373 const svalue *sval,
374 state_machine::state_t to,
375 tree origin)
376 {
377 logger * const logger = get_logger ();
378 LOG_FUNC (logger);
379 impl_region_model_context old_ctxt
380 (m_eg, m_enode_for_diag, NULL, NULL, NULL/*m_enode->get_state ()*/,
381 NULL, stmt);
382
383 impl_region_model_context new_ctxt (m_eg, m_enode_for_diag,
384 m_old_state, m_new_state,
385 NULL, NULL,
386 stmt);
387 const svalue *origin_new_sval
388 = m_new_state->m_region_model->get_rvalue (origin, &new_ctxt);
389
390 state_machine::state_t current
391 = m_old_smap->get_state (sval, m_eg.get_ext_state ());
392 if (logger)
393 {
394 logger->start_log_line ();
395 logger->log_partial ("%s: state transition of ",
396 m_sm.get_name ());
397 sval->dump_to_pp (logger->get_printer (), true);
398 logger->log_partial (": %s -> %s",
399 current->get_name (),
400 to->get_name ());
401 logger->end_log_line ();
402 }
403 m_new_smap->set_state (m_new_state->m_region_model, sval,
404 to, origin_new_sval, m_eg.get_ext_state ());
405 }
406
407 void warn (const supernode *snode, const gimple *stmt,
408 tree var, pending_diagnostic *d) FINAL OVERRIDE
409 {
410 LOG_FUNC (get_logger ());
411 gcc_assert (d); // take ownership
412 impl_region_model_context old_ctxt
413 (m_eg, m_enode_for_diag, m_old_state, m_new_state, NULL, NULL, NULL);
414
415 const svalue *var_old_sval
416 = m_old_state->m_region_model->get_rvalue (var, &old_ctxt);
417 state_machine::state_t current
418 = (var
419 ? m_old_smap->get_state (var_old_sval, m_eg.get_ext_state ())
420 : m_old_smap->get_global_state ());
421 m_eg.get_diagnostic_manager ().add_diagnostic
422 (&m_sm, m_enode_for_diag, snode, stmt, m_stmt_finder,
423 var, var_old_sval, current, d);
424 }
425
426 /* Hook for picking more readable trees for SSA names of temporaries,
427 so that rather than e.g.
428 "double-free of '<unknown>'"
429 we can print:
430 "double-free of 'inbuf.data'". */
431
432 tree get_diagnostic_tree (tree expr) FINAL OVERRIDE
433 {
434 /* Only for SSA_NAMEs of temporaries; otherwise, return EXPR, as it's
435 likely to be the least surprising tree to report. */
436 if (TREE_CODE (expr) != SSA_NAME)
437 return expr;
438 if (SSA_NAME_VAR (expr) != NULL)
439 return expr;
440
441 gcc_assert (m_new_state);
442 const svalue *sval = m_new_state->m_region_model->get_rvalue (expr, NULL);
443 /* Find trees for all regions storing the value. */
444 if (tree t = m_new_state->m_region_model->get_representative_tree (sval))
445 return t;
446 else
447 return expr;
448 }
449
450 tree get_diagnostic_tree (const svalue *sval) FINAL OVERRIDE
451 {
452 return m_new_state->m_region_model->get_representative_tree (sval);
453 }
454
455 state_machine::state_t get_global_state () const FINAL OVERRIDE
456 {
457 return m_old_state->m_checker_states[m_sm_idx]->get_global_state ();
458 }
459
460 void set_global_state (state_machine::state_t state) FINAL OVERRIDE
461 {
462 m_new_state->m_checker_states[m_sm_idx]->set_global_state (state);
463 }
464
465 void on_custom_transition (custom_transition *transition) FINAL OVERRIDE
466 {
467 transition->impl_transition (&m_eg,
468 const_cast<exploded_node *> (m_enode_for_diag),
469 m_sm_idx);
470 }
471
472 tree is_zero_assignment (const gimple *stmt) FINAL OVERRIDE
473 {
474 const gassign *assign_stmt = dyn_cast <const gassign *> (stmt);
475 if (!assign_stmt)
476 return NULL_TREE;
477 impl_region_model_context old_ctxt
478 (m_eg, m_enode_for_diag, m_old_state, m_new_state, NULL, NULL, stmt);
479 if (const svalue *sval
480 = m_new_state->m_region_model->get_gassign_result (assign_stmt,
481 &old_ctxt))
482 if (tree cst = sval->maybe_get_constant ())
483 if (::zerop(cst))
484 return gimple_assign_lhs (assign_stmt);
485 return NULL_TREE;
486 }
487
488 path_context *get_path_context () const FINAL OVERRIDE
489 {
490 return m_path_ctxt;
491 }
492
493 log_user m_logger;
494 exploded_graph &m_eg;
495 exploded_node *m_enode_for_diag;
496 const program_state *m_old_state;
497 program_state *m_new_state;
498 const sm_state_map *m_old_smap;
499 sm_state_map *m_new_smap;
500 path_context *m_path_ctxt;
501 stmt_finder *m_stmt_finder;
502 };
503
504 /* Subclass of stmt_finder for finding the best stmt to report the leak at,
505 given the emission path. */
506
507 class leak_stmt_finder : public stmt_finder
508 {
509 public:
510 leak_stmt_finder (const exploded_graph &eg, tree var)
511 : m_eg (eg), m_var (var) {}
512
513 stmt_finder *clone () const FINAL OVERRIDE
514 {
515 return new leak_stmt_finder (m_eg, m_var);
516 }
517
518 const gimple *find_stmt (const exploded_path &epath)
519 FINAL OVERRIDE
520 {
521 logger * const logger = m_eg.get_logger ();
522 LOG_FUNC (logger);
523
524 if (m_var && TREE_CODE (m_var) == SSA_NAME)
525 {
526 /* Locate the final write to this SSA name in the path. */
527 const gimple *def_stmt = SSA_NAME_DEF_STMT (m_var);
528
529 int idx_of_def_stmt;
530 bool found = epath.find_stmt_backwards (def_stmt, &idx_of_def_stmt);
531 if (!found)
532 goto not_found;
533
534 /* What was the next write to the underlying var
535 after the SSA name was set? (if any). */
536
537 for (unsigned idx = idx_of_def_stmt + 1;
538 idx < epath.m_edges.length ();
539 ++idx)
540 {
541 const exploded_edge *eedge = epath.m_edges[idx];
542 if (logger)
543 logger->log ("eedge[%i]: EN %i -> EN %i",
544 idx,
545 eedge->m_src->m_index,
546 eedge->m_dest->m_index);
547 const exploded_node *dst_node = eedge->m_dest;
548 const program_point &dst_point = dst_node->get_point ();
549 const gimple *stmt = dst_point.get_stmt ();
550 if (!stmt)
551 continue;
552 if (const gassign *assign = dyn_cast <const gassign *> (stmt))
553 {
554 tree lhs = gimple_assign_lhs (assign);
555 if (TREE_CODE (lhs) == SSA_NAME
556 && SSA_NAME_VAR (lhs) == SSA_NAME_VAR (m_var))
557 return assign;
558 }
559 }
560 }
561
562 not_found:
563
564 /* Look backwards for the first statement with a location. */
565 int i;
566 const exploded_edge *eedge;
567 FOR_EACH_VEC_ELT_REVERSE (epath.m_edges, i, eedge)
568 {
569 if (logger)
570 logger->log ("eedge[%i]: EN %i -> EN %i",
571 i,
572 eedge->m_src->m_index,
573 eedge->m_dest->m_index);
574 const exploded_node *dst_node = eedge->m_dest;
575 const program_point &dst_point = dst_node->get_point ();
576 const gimple *stmt = dst_point.get_stmt ();
577 if (stmt)
578 if (get_pure_location (stmt->location) != UNKNOWN_LOCATION)
579 return stmt;
580 }
581
582 gcc_unreachable ();
583 return NULL;
584 }
585
586 private:
587 const exploded_graph &m_eg;
588 tree m_var;
589 };
590
591 /* A measurement of how good EXPR is for presenting to the user, so
592 that e.g. we can say prefer printing
593 "leak of 'tmp.m_ptr'"
594 over:
595 "leak of '<unknown>'". */
596
597 static int
598 readability (const_tree expr)
599 {
600 /* Arbitrarily-chosen "high readability" value. */
601 const int HIGH_READABILITY = 65536;
602
603 gcc_assert (expr);
604 switch (TREE_CODE (expr))
605 {
606 case COMPONENT_REF:
607 case MEM_REF:
608 /* Impose a slight readability penalty relative to that of
609 operand 0. */
610 return readability (TREE_OPERAND (expr, 0)) - 16;
611
612 case SSA_NAME:
613 {
614 if (tree var = SSA_NAME_VAR (expr))
615 {
616 if (DECL_ARTIFICIAL (var))
617 {
618 /* If we have an SSA name for an artificial var,
619 only use it if it has a debug expr associated with
620 it that fixup_tree_for_diagnostic can use. */
621 if (VAR_P (var) && DECL_HAS_DEBUG_EXPR_P (var))
622 return readability (DECL_DEBUG_EXPR (var)) - 1;
623 }
624 else
625 {
626 /* Slightly favor the underlying var over the SSA name to
627 avoid having them compare equal. */
628 return readability (var) - 1;
629 }
630 }
631 /* Avoid printing '<unknown>' for SSA names for temporaries. */
632 return -1;
633 }
634 break;
635
636 case PARM_DECL:
637 case VAR_DECL:
638 if (DECL_NAME (expr))
639 return HIGH_READABILITY;
640 else
641 /* We don't want to print temporaries. For example, the C FE
642 prints them as e.g. "<Uxxxx>" where "xxxx" is the low 16 bits
643 of the tree pointer (see pp_c_tree_decl_identifier). */
644 return -1;
645
646 case RESULT_DECL:
647 /* Printing "<return-value>" isn't ideal, but is less awful than
648 trying to print a temporary. */
649 return HIGH_READABILITY / 2;
650
651 case NOP_EXPR:
652 {
653 /* Impose a moderate readability penalty for casts. */
654 const int CAST_PENALTY = 32;
655 return readability (TREE_OPERAND (expr, 0)) - CAST_PENALTY;
656 }
657
658 case INTEGER_CST:
659 return HIGH_READABILITY;
660
661 default:
662 return 0;
663 }
664
665 return 0;
666 }
667
668 /* A qsort comparator for trees to sort them into most user-readable to
669 least user-readable. */
670
671 int
672 readability_comparator (const void *p1, const void *p2)
673 {
674 path_var pv1 = *(path_var const *)p1;
675 path_var pv2 = *(path_var const *)p2;
676
677 const int tree_r1 = readability (pv1.m_tree);
678 const int tree_r2 = readability (pv2.m_tree);
679
680 /* Favor items that are deeper on the stack and hence more recent;
681 this also favors locals over globals. */
682 const int COST_PER_FRAME = 64;
683 const int depth_r1 = pv1.m_stack_depth * COST_PER_FRAME;
684 const int depth_r2 = pv2.m_stack_depth * COST_PER_FRAME;
685
686 /* Combine the scores from the tree and from the stack depth.
687 This e.g. lets us have a slightly penalized cast in the most
688 recent stack frame "beat" an uncast value in an older stack frame. */
689 const int sum_r1 = tree_r1 + depth_r1;
690 const int sum_r2 = tree_r2 + depth_r2;
691 if (int cmp = sum_r2 - sum_r1)
692 return cmp;
693
694 /* Otherwise, more readable trees win. */
695 if (int cmp = tree_r2 - tree_r1)
696 return cmp;
697
698 /* Otherwise, if they have the same readability, then impose an
699 arbitrary deterministic ordering on them. */
700
701 if (int cmp = TREE_CODE (pv1.m_tree) - TREE_CODE (pv2.m_tree))
702 return cmp;
703
704 switch (TREE_CODE (pv1.m_tree))
705 {
706 default:
707 break;
708 case SSA_NAME:
709 if (int cmp = (SSA_NAME_VERSION (pv1.m_tree)
710 - SSA_NAME_VERSION (pv2.m_tree)))
711 return cmp;
712 break;
713 case PARM_DECL:
714 case VAR_DECL:
715 case RESULT_DECL:
716 if (int cmp = DECL_UID (pv1.m_tree) - DECL_UID (pv2.m_tree))
717 return cmp;
718 break;
719 }
720
721 /* TODO: We ought to find ways of sorting such cases. */
722 return 0;
723 }
724
725 /* Find the best tree for SVAL and call SM's on_leak vfunc with it.
726 If on_leak returns a pending_diagnostic, queue it up to be reported,
727 so that we potentially complain about a leak of SVAL in the given STATE. */
728
729 void
730 impl_region_model_context::on_state_leak (const state_machine &sm,
731 const svalue *sval,
732 state_machine::state_t state)
733 {
734 logger * const logger = get_logger ();
735 LOG_SCOPE (logger);
736 if (logger)
737 {
738 logger->start_log_line ();
739 logger->log_partial ("considering leak of ");
740 sval->dump_to_pp (logger->get_printer (), true);
741 logger->end_log_line ();
742 }
743
744 if (!m_eg)
745 return;
746
747 /* m_old_state also needs to be non-NULL so that the sm_ctxt can look
748 up the old state of SVAL. */
749 gcc_assert (m_old_state);
750
751 /* SVAL has leaked within the new state: it is not used by any reachable
752 regions.
753 We need to convert it back to a tree, but since it's likely no regions
754 use it, we have to find the "best" tree for it in the old_state. */
755 svalue_set visited;
756 path_var leaked_pv
757 = m_old_state->m_region_model->get_representative_path_var (sval,
758 &visited);
759
760 /* Strip off top-level casts */
761 if (leaked_pv.m_tree && TREE_CODE (leaked_pv.m_tree) == NOP_EXPR)
762 leaked_pv.m_tree = TREE_OPERAND (leaked_pv.m_tree, 0);
763
764 /* This might be NULL; the pending_diagnostic subclasses need to cope
765 with this. */
766 tree leaked_tree = leaked_pv.m_tree;
767 if (logger)
768 {
769 if (leaked_tree)
770 logger->log ("best leaked_tree: %qE", leaked_tree);
771 else
772 logger->log ("best leaked_tree: NULL");
773 }
774
775 leak_stmt_finder stmt_finder (*m_eg, leaked_tree);
776 gcc_assert (m_enode_for_diag);
777
778 /* Don't complain about leaks when returning from "main". */
779 if (m_enode_for_diag->get_supernode ()
780 && m_enode_for_diag->get_supernode ()->return_p ())
781 {
782 tree fndecl = m_enode_for_diag->get_function ()->decl;
783 if (id_equal (DECL_NAME (fndecl), "main"))
784 {
785 if (logger)
786 logger->log ("not reporting leak from main");
787 return;
788 }
789 }
790
791 tree leaked_tree_for_diag = fixup_tree_for_diagnostic (leaked_tree);
792 pending_diagnostic *pd = sm.on_leak (leaked_tree_for_diag);
793 if (pd)
794 m_eg->get_diagnostic_manager ().add_diagnostic
795 (&sm, m_enode_for_diag, m_enode_for_diag->get_supernode (),
796 m_stmt, &stmt_finder,
797 leaked_tree_for_diag, sval, state, pd);
798 }
799
800 /* Implementation of region_model_context::on_condition vfunc.
801 Notify all state machines about the condition, which could lead to
802 state transitions. */
803
804 void
805 impl_region_model_context::on_condition (const svalue *lhs,
806 enum tree_code op,
807 const svalue *rhs)
808 {
809 int sm_idx;
810 sm_state_map *smap;
811 FOR_EACH_VEC_ELT (m_new_state->m_checker_states, sm_idx, smap)
812 {
813 const state_machine &sm = m_ext_state.get_sm (sm_idx);
814 impl_sm_context sm_ctxt (*m_eg, sm_idx, sm, m_enode_for_diag,
815 m_old_state, m_new_state,
816 m_old_state->m_checker_states[sm_idx],
817 m_new_state->m_checker_states[sm_idx],
818 m_path_ctxt);
819 sm.on_condition (&sm_ctxt,
820 (m_enode_for_diag
821 ? m_enode_for_diag->get_supernode ()
822 : NULL),
823 m_stmt,
824 lhs, op, rhs);
825 }
826 }
827
828 /* Implementation of region_model_context::on_phi vfunc.
829 Notify all state machines about the phi, which could lead to
830 state transitions. */
831
832 void
833 impl_region_model_context::on_phi (const gphi *phi, tree rhs)
834 {
835 int sm_idx;
836 sm_state_map *smap;
837 FOR_EACH_VEC_ELT (m_new_state->m_checker_states, sm_idx, smap)
838 {
839 const state_machine &sm = m_ext_state.get_sm (sm_idx);
840 impl_sm_context sm_ctxt (*m_eg, sm_idx, sm, m_enode_for_diag,
841 m_old_state, m_new_state,
842 m_old_state->m_checker_states[sm_idx],
843 m_new_state->m_checker_states[sm_idx],
844 m_path_ctxt);
845 sm.on_phi (&sm_ctxt, m_enode_for_diag->get_supernode (), phi, rhs);
846 }
847 }
848
849 /* Implementation of region_model_context::on_unexpected_tree_code vfunc.
850 Mark the new state as being invalid for further exploration.
851 TODO(stage1): introduce a warning for when this occurs. */
852
853 void
854 impl_region_model_context::on_unexpected_tree_code (tree t,
855 const dump_location_t &loc)
856 {
857 logger * const logger = get_logger ();
858 if (logger)
859 logger->log ("unhandled tree code: %qs in %qs at %s:%i",
860 get_tree_code_name (TREE_CODE (t)),
861 loc.get_impl_location ().m_function,
862 loc.get_impl_location ().m_file,
863 loc.get_impl_location ().m_line);
864 if (m_new_state)
865 m_new_state->m_valid = false;
866 }
867
868 /* struct point_and_state. */
869
870 /* Assert that this object is sane. */
871
872 void
873 point_and_state::validate (const extrinsic_state &ext_state) const
874 {
875 /* Skip this in a release build. */
876 #if !CHECKING_P
877 return;
878 #endif
879
880 m_point.validate ();
881
882 m_state.validate (ext_state);
883
884 /* Verify that the callstring's model of the stack corresponds to that
885 of the region_model. */
886 /* They should have the same depth. */
887 gcc_assert (m_point.get_stack_depth ()
888 == m_state.m_region_model->get_stack_depth ());
889 /* Check the functions in the callstring vs those in the frames
890 at each depth. */
891 for (const frame_region *iter_frame
892 = m_state.m_region_model->get_current_frame ();
893 iter_frame; iter_frame = iter_frame->get_calling_frame ())
894 {
895 int index = iter_frame->get_index ();
896 gcc_assert (m_point.get_function_at_depth (index)
897 == iter_frame->get_function ());
898 }
899 }
900
901 /* Subroutine of print_enode_indices: print a run of indices from START_IDX
902 to END_IDX to PP, using and updating *FIRST_RUN. */
903
904 static void
905 print_run (pretty_printer *pp, int start_idx, int end_idx,
906 bool *first_run)
907 {
908 if (!(*first_run))
909 pp_string (pp, ", ");
910 *first_run = false;
911 if (start_idx == end_idx)
912 pp_printf (pp, "EN: %i", start_idx);
913 else
914 pp_printf (pp, "EN: %i-%i", start_idx, end_idx);
915 }
916
917 /* Print the indices within ENODES to PP, collecting them as
918 runs/singletons e.g. "EN: 4-7, EN: 20-23, EN: 42". */
919
920 static void
921 print_enode_indices (pretty_printer *pp,
922 const auto_vec<exploded_node *> &enodes)
923 {
924 int cur_start_idx = -1;
925 int cur_finish_idx = -1;
926 bool first_run = true;
927 unsigned i;
928 exploded_node *enode;
929 FOR_EACH_VEC_ELT (enodes, i, enode)
930 {
931 if (cur_start_idx == -1)
932 {
933 gcc_assert (cur_finish_idx == -1);
934 cur_start_idx = cur_finish_idx = enode->m_index;
935 }
936 else
937 {
938 if (enode->m_index == cur_finish_idx + 1)
939 /* Continuation of a run. */
940 cur_finish_idx = enode->m_index;
941 else
942 {
943 /* Finish existing run, start a new one. */
944 gcc_assert (cur_start_idx >= 0);
945 gcc_assert (cur_finish_idx >= 0);
946 print_run (pp, cur_start_idx, cur_finish_idx,
947 &first_run);
948 cur_start_idx = cur_finish_idx = enode->m_index;
949 }
950 }
951 }
952 /* Finish any existing run. */
953 if (cur_start_idx >= 0)
954 {
955 gcc_assert (cur_finish_idx >= 0);
956 print_run (pp, cur_start_idx, cur_finish_idx,
957 &first_run);
958 }
959 }
960
961 /* struct eg_traits::dump_args_t. */
962
963 /* The <FILENAME>.eg.dot output can quickly become unwieldy if we show
964 full details for all enodes (both in terms of CPU time to render it,
965 and in terms of being meaningful to a human viewing it).
966
967 If we show just the IDs then the resulting graph is usually viewable,
968 but then we have to keep switching back and forth between the .dot
969 view and other dumps.
970
971 This function implements a heuristic for showing detail at the enodes
972 that (we hope) matter, and just the ID at other enodes, fixing the CPU
973 usage of the .dot viewer, and drawing the attention of the viewer
974 to these enodes.
975
976 Return true if ENODE should be shown in detail in .dot output.
977 Return false if no detail should be shown for ENODE. */
978
979 bool
980 eg_traits::dump_args_t::show_enode_details_p (const exploded_node &enode) const
981 {
982 /* If the number of exploded nodes isn't too large, we may as well show
983 all enodes in full detail in the .dot output. */
984 if (m_eg.m_nodes.length ()
985 <= (unsigned) param_analyzer_max_enodes_for_full_dump)
986 return true;
987
988 /* Otherwise, assume that what's most interesting are state explosions,
989 and thus the places where this happened.
990 Expand enodes at program points where we hit the per-enode limit, so we
991 can investigate what exploded. */
992 const per_program_point_data *per_point_data
993 = m_eg.get_per_program_point_data (enode.get_point ());
994 return per_point_data->m_excess_enodes > 0;
995 }
996
997 /* class exploded_node : public dnode<eg_traits>. */
998
999 const char *
1000 exploded_node::status_to_str (enum status s)
1001 {
1002 switch (s)
1003 {
1004 default: gcc_unreachable ();
1005 case STATUS_WORKLIST: return "WORKLIST";
1006 case STATUS_PROCESSED: return "PROCESSED";
1007 case STATUS_MERGER: return "MERGER";
1008 case STATUS_BULK_MERGED: return "BULK_MERGED";
1009 }
1010 }
1011
1012 /* exploded_node's ctor. */
1013
1014 exploded_node::exploded_node (const point_and_state &ps,
1015 int index)
1016 : m_ps (ps), m_status (STATUS_WORKLIST), m_index (index),
1017 m_num_processed_stmts (0)
1018 {
1019 gcc_checking_assert (ps.get_state ().m_region_model->canonicalized_p ());
1020 }
1021
1022 /* Get the stmt that was processed in this enode at index IDX.
1023 IDX is an index within the stmts processed at this enode, rather
1024 than within those of the supernode. */
1025
1026 const gimple *
1027 exploded_node::get_processed_stmt (unsigned idx) const
1028 {
1029 gcc_assert (idx < m_num_processed_stmts);
1030 const program_point &point = get_point ();
1031 gcc_assert (point.get_kind () == PK_BEFORE_STMT);
1032 const supernode *snode = get_supernode ();
1033 const unsigned int point_stmt_idx = point.get_stmt_idx ();
1034 const unsigned int idx_within_snode = point_stmt_idx + idx;
1035 const gimple *stmt = snode->m_stmts[idx_within_snode];
1036 return stmt;
1037 }
1038
1039 /* For use by dump_dot, get a value for the .dot "fillcolor" attribute.
1040 Colorize by sm-state, to make it easier to see how sm-state propagates
1041 through the exploded_graph. */
1042
1043 const char *
1044 exploded_node::get_dot_fillcolor () const
1045 {
1046 const program_state &state = get_state ();
1047
1048 /* We want to be able to easily distinguish the no-sm-state case,
1049 and to be able to distinguish cases where there's a single state
1050 from each other.
1051
1052 Sum the sm_states, and use the result to choose from a table,
1053 modulo table-size, special-casing the "no sm-state" case. */
1054 int total_sm_state = 0;
1055 int i;
1056 sm_state_map *smap;
1057 FOR_EACH_VEC_ELT (state.m_checker_states, i, smap)
1058 {
1059 for (sm_state_map::iterator_t iter = smap->begin ();
1060 iter != smap->end ();
1061 ++iter)
1062 total_sm_state += (*iter).second.m_state->get_id ();
1063 total_sm_state += smap->get_global_state ()->get_id ();
1064 }
1065
1066 if (total_sm_state > 0)
1067 {
1068 /* An arbitrarily-picked collection of light colors. */
1069 const char * const colors[]
1070 = {"azure", "coral", "cornsilk", "lightblue", "yellow",
1071 "honeydew", "lightpink", "lightsalmon", "palegreen1",
1072 "wheat", "seashell"};
1073 const int num_colors = sizeof (colors) / sizeof (colors[0]);
1074 return colors[total_sm_state % num_colors];
1075 }
1076 else
1077 /* No sm-state. */
1078 return "lightgrey";
1079 }
1080
1081 /* Implementation of dnode::dump_dot vfunc for exploded_node. */
1082
1083 void
1084 exploded_node::dump_dot (graphviz_out *gv, const dump_args_t &args) const
1085 {
1086 pretty_printer *pp = gv->get_pp ();
1087
1088 dump_dot_id (pp);
1089 pp_printf (pp, " [shape=none,margin=0,style=filled,fillcolor=%s,label=\"",
1090 get_dot_fillcolor ());
1091 pp_write_text_to_stream (pp);
1092
1093 pp_printf (pp, "EN: %i", m_index);
1094 if (m_status == STATUS_MERGER)
1095 pp_string (pp, " (merger)");
1096 else if (m_status == STATUS_BULK_MERGED)
1097 pp_string (pp, " (bulk merged)");
1098 pp_newline (pp);
1099
1100 if (args.show_enode_details_p (*this))
1101 {
1102 format f (true);
1103 m_ps.get_point ().print (pp, f);
1104 pp_newline (pp);
1105
1106 const extrinsic_state &ext_state = args.m_eg.get_ext_state ();
1107 const program_state &state = m_ps.get_state ();
1108 state.dump_to_pp (ext_state, false, true, pp);
1109 pp_newline (pp);
1110
1111 dump_processed_stmts (pp);
1112 }
1113
1114 dump_saved_diagnostics (pp);
1115
1116 args.dump_extra_info (this, pp);
1117
1118 pp_write_text_as_dot_label_to_stream (pp, /*for_record=*/true);
1119
1120 pp_string (pp, "\"];\n\n");
1121 pp_flush (pp);
1122 }
1123
1124 /* Show any stmts that were processed within this enode,
1125 and their index within the supernode. */
1126 void
1127 exploded_node::dump_processed_stmts (pretty_printer *pp) const
1128 {
1129 if (m_num_processed_stmts > 0)
1130 {
1131 const program_point &point = get_point ();
1132 gcc_assert (point.get_kind () == PK_BEFORE_STMT);
1133 const supernode *snode = get_supernode ();
1134 const unsigned int point_stmt_idx = point.get_stmt_idx ();
1135
1136 pp_printf (pp, "stmts: %i", m_num_processed_stmts);
1137 pp_newline (pp);
1138 for (unsigned i = 0; i < m_num_processed_stmts; i++)
1139 {
1140 const unsigned int idx_within_snode = point_stmt_idx + i;
1141 const gimple *stmt = snode->m_stmts[idx_within_snode];
1142 pp_printf (pp, " %i: ", idx_within_snode);
1143 pp_gimple_stmt_1 (pp, stmt, 0, (dump_flags_t)0);
1144 pp_newline (pp);
1145 }
1146 }
1147 }
1148
1149 /* Dump any saved_diagnostics at this enode to PP. */
1150
1151 void
1152 exploded_node::dump_saved_diagnostics (pretty_printer *pp) const
1153 {
1154 unsigned i;
1155 const saved_diagnostic *sd;
1156 FOR_EACH_VEC_ELT (m_saved_diagnostics, i, sd)
1157 {
1158 pp_printf (pp, "DIAGNOSTIC: %s (sd: %i)",
1159 sd->m_d->get_kind (), sd->get_index ());
1160 pp_newline (pp);
1161 }
1162 }
1163
1164 /* Dump this to PP in a form suitable for use as an id in .dot output. */
1165
1166 void
1167 exploded_node::dump_dot_id (pretty_printer *pp) const
1168 {
1169 pp_printf (pp, "exploded_node_%i", m_index);
1170 }
1171
1172 /* Dump a multiline representation of this node to PP. */
1173
1174 void
1175 exploded_node::dump_to_pp (pretty_printer *pp,
1176 const extrinsic_state &ext_state) const
1177 {
1178 pp_printf (pp, "EN: %i", m_index);
1179 pp_newline (pp);
1180
1181 format f (true);
1182 m_ps.get_point ().print (pp, f);
1183 pp_newline (pp);
1184
1185 m_ps.get_state ().dump_to_pp (ext_state, false, true, pp);
1186 pp_newline (pp);
1187 }
1188
1189 /* Dump a multiline representation of this node to FILE. */
1190
1191 void
1192 exploded_node::dump (FILE *fp,
1193 const extrinsic_state &ext_state) const
1194 {
1195 pretty_printer pp;
1196 pp_format_decoder (&pp) = default_tree_printer;
1197 pp_show_color (&pp) = pp_show_color (global_dc->printer);
1198 pp.buffer->stream = fp;
1199 dump_to_pp (&pp, ext_state);
1200 pp_flush (&pp);
1201 }
1202
1203 /* Dump a multiline representation of this node to stderr. */
1204
1205 DEBUG_FUNCTION void
1206 exploded_node::dump (const extrinsic_state &ext_state) const
1207 {
1208 dump (stderr, ext_state);
1209 }
1210
1211 /* Return a new json::object of the form
1212 {"point" : object for program_point,
1213 "state" : object for program_state,
1214 "status" : str,
1215 "idx" : int,
1216 "processed_stmts" : int}. */
1217
1218 json::object *
1219 exploded_node::to_json (const extrinsic_state &ext_state) const
1220 {
1221 json::object *enode_obj = new json::object ();
1222
1223 enode_obj->set ("point", get_point ().to_json ());
1224 enode_obj->set ("state", get_state ().to_json (ext_state));
1225 enode_obj->set ("status", new json::string (status_to_str (m_status)));
1226 enode_obj->set ("idx", new json::integer_number (m_index));
1227 enode_obj->set ("processed_stmts",
1228 new json::integer_number (m_num_processed_stmts));
1229
1230 return enode_obj;
1231 }
1232
1233 } // namespace ana
1234
1235 /* Return true if FNDECL has a gimple body. */
1236 // TODO: is there a pre-canned way to do this?
1237
1238 bool
1239 fndecl_has_gimple_body_p (tree fndecl)
1240 {
1241 if (fndecl == NULL_TREE)
1242 return false;
1243
1244 cgraph_node *n = cgraph_node::get (fndecl);
1245 if (!n)
1246 return false;
1247
1248 return n->has_gimple_body_p ();
1249 }
1250
1251 namespace ana {
1252
1253 /* Modify STATE in place, applying the effects of the stmt at this node's
1254 point. */
1255
1256 exploded_node::on_stmt_flags
1257 exploded_node::on_stmt (exploded_graph &eg,
1258 const supernode *snode,
1259 const gimple *stmt,
1260 program_state *state,
1261 uncertainty_t *uncertainty,
1262 path_context *path_ctxt)
1263 {
1264 logger *logger = eg.get_logger ();
1265 LOG_SCOPE (logger);
1266 if (logger)
1267 {
1268 logger->start_log_line ();
1269 pp_gimple_stmt_1 (logger->get_printer (), stmt, 0, (dump_flags_t)0);
1270 logger->end_log_line ();
1271 }
1272
1273 /* Update input_location in case of ICE: make it easier to track down which
1274 source construct we're failing to handle. */
1275 input_location = stmt->location;
1276
1277 gcc_assert (state->m_region_model);
1278
1279 /* Preserve the old state. It is used here for looking
1280 up old checker states, for determining state transitions, and
1281 also within impl_region_model_context and impl_sm_context for
1282 going from tree to svalue_id. */
1283 const program_state old_state (*state);
1284
1285 impl_region_model_context ctxt (eg, this,
1286 &old_state, state, uncertainty,
1287 path_ctxt, stmt);
1288
1289 bool unknown_side_effects = false;
1290 bool terminate_path = false;
1291
1292 on_stmt_pre (eg, stmt, state, &terminate_path,
1293 &unknown_side_effects, &ctxt);
1294
1295 if (terminate_path)
1296 return on_stmt_flags::terminate_path ();
1297
1298 int sm_idx;
1299 sm_state_map *smap;
1300 FOR_EACH_VEC_ELT (old_state.m_checker_states, sm_idx, smap)
1301 {
1302 const state_machine &sm = eg.get_ext_state ().get_sm (sm_idx);
1303 const sm_state_map *old_smap
1304 = old_state.m_checker_states[sm_idx];
1305 sm_state_map *new_smap = state->m_checker_states[sm_idx];
1306 impl_sm_context sm_ctxt (eg, sm_idx, sm, this, &old_state, state,
1307 old_smap, new_smap, path_ctxt);
1308
1309 /* Allow the state_machine to handle the stmt. */
1310 if (sm.on_stmt (&sm_ctxt, snode, stmt))
1311 unknown_side_effects = false;
1312 }
1313
1314 if (path_ctxt->terminate_path_p ())
1315 return on_stmt_flags::terminate_path ();
1316
1317 on_stmt_post (stmt, state, unknown_side_effects, &ctxt);
1318
1319 return on_stmt_flags ();
1320 }
1321
1322 /* Handle the pre-sm-state part of STMT, modifying STATE in-place.
1323 Write true to *OUT_TERMINATE_PATH if the path should be terminated.
1324 Write true to *OUT_UNKNOWN_SIDE_EFFECTS if the stmt has unknown
1325 side effects. */
1326
1327 void
1328 exploded_node::on_stmt_pre (exploded_graph &eg,
1329 const gimple *stmt,
1330 program_state *state,
1331 bool *out_terminate_path,
1332 bool *out_unknown_side_effects,
1333 region_model_context *ctxt)
1334 {
1335 /* Handle special-case calls that require the full program_state. */
1336 if (const gcall *call = dyn_cast <const gcall *> (stmt))
1337 {
1338 if (is_special_named_call_p (call, "__analyzer_dump", 0))
1339 {
1340 /* Handle the builtin "__analyzer_dump" by dumping state
1341 to stderr. */
1342 state->dump (eg.get_ext_state (), true);
1343 return;
1344 }
1345 else if (is_special_named_call_p (call, "__analyzer_dump_state", 2))
1346 {
1347 state->impl_call_analyzer_dump_state (call, eg.get_ext_state (),
1348 ctxt);
1349 return;
1350 }
1351 else if (is_setjmp_call_p (call))
1352 {
1353 state->m_region_model->on_setjmp (call, this, ctxt);
1354 return;
1355 }
1356 else if (is_longjmp_call_p (call))
1357 {
1358 on_longjmp (eg, call, state, ctxt);
1359 *out_terminate_path = true;
1360 return;
1361 }
1362 }
1363
1364 /* Otherwise, defer to m_region_model. */
1365 state->m_region_model->on_stmt_pre (stmt,
1366 out_terminate_path,
1367 out_unknown_side_effects,
1368 ctxt);
1369 }
1370
1371 /* Handle the post-sm-state part of STMT, modifying STATE in-place. */
1372
1373 void
1374 exploded_node::on_stmt_post (const gimple *stmt,
1375 program_state *state,
1376 bool unknown_side_effects,
1377 region_model_context *ctxt)
1378 {
1379 if (const gcall *call = dyn_cast <const gcall *> (stmt))
1380 state->m_region_model->on_call_post (call, unknown_side_effects, ctxt);
1381 }
1382
1383 /* Consider the effect of following superedge SUCC from this node.
1384
1385 Return true if it's feasible to follow the edge, or false
1386 if it's infeasible.
1387
1388 Examples: if it's the "true" branch within
1389 a CFG and we know the conditional is false, we know it's infeasible.
1390 If it's one of multiple interprocedual "return" edges, then only
1391 the edge back to the most recent callsite is feasible.
1392
1393 Update NEXT_STATE accordingly (e.g. to record that a condition was
1394 true or false, or that the NULL-ness of a pointer has been checked,
1395 pushing/popping stack frames, etc).
1396
1397 Update NEXT_POINT accordingly (updating the call string). */
1398
1399 bool
1400 exploded_node::on_edge (exploded_graph &eg,
1401 const superedge *succ,
1402 program_point *next_point,
1403 program_state *next_state,
1404 uncertainty_t *uncertainty)
1405 {
1406 LOG_FUNC (eg.get_logger ());
1407
1408 if (!next_point->on_edge (eg, succ))
1409 return false;
1410
1411 if (!next_state->on_edge (eg, this, succ, uncertainty))
1412 return false;
1413
1414 return true;
1415 }
1416
1417 /* Verify that the stack at LONGJMP_POINT is still valid, given a call
1418 to "setjmp" at SETJMP_POINT - the stack frame that "setjmp" was
1419 called in must still be valid.
1420
1421 Caveat: this merely checks the call_strings in the points; it doesn't
1422 detect the case where a frame returns and is then called again. */
1423
1424 static bool
1425 valid_longjmp_stack_p (const program_point &longjmp_point,
1426 const program_point &setjmp_point)
1427 {
1428 const call_string &cs_at_longjmp = longjmp_point.get_call_string ();
1429 const call_string &cs_at_setjmp = setjmp_point.get_call_string ();
1430
1431 if (cs_at_longjmp.length () < cs_at_setjmp.length ())
1432 return false;
1433
1434 /* Check that the call strings match, up to the depth of the
1435 setjmp point. */
1436 for (unsigned depth = 0; depth < cs_at_setjmp.length (); depth++)
1437 if (cs_at_longjmp[depth] != cs_at_setjmp[depth])
1438 return false;
1439
1440 return true;
1441 }
1442
1443 /* A pending_diagnostic subclass for complaining about bad longjmps,
1444 where the enclosing function of the "setjmp" has returned (and thus
1445 the stack frame no longer exists). */
1446
1447 class stale_jmp_buf : public pending_diagnostic_subclass<stale_jmp_buf>
1448 {
1449 public:
1450 stale_jmp_buf (const gcall *setjmp_call, const gcall *longjmp_call,
1451 const program_point &setjmp_point)
1452 : m_setjmp_call (setjmp_call), m_longjmp_call (longjmp_call),
1453 m_setjmp_point (setjmp_point), m_stack_pop_event (NULL)
1454 {}
1455
1456 bool emit (rich_location *richloc) FINAL OVERRIDE
1457 {
1458 return warning_at
1459 (richloc, OPT_Wanalyzer_stale_setjmp_buffer,
1460 "%qs called after enclosing function of %qs has returned",
1461 get_user_facing_name (m_longjmp_call),
1462 get_user_facing_name (m_setjmp_call));
1463 }
1464
1465 const char *get_kind () const FINAL OVERRIDE
1466 { return "stale_jmp_buf"; }
1467
1468 bool operator== (const stale_jmp_buf &other) const
1469 {
1470 return (m_setjmp_call == other.m_setjmp_call
1471 && m_longjmp_call == other.m_longjmp_call);
1472 }
1473
1474 bool
1475 maybe_add_custom_events_for_superedge (const exploded_edge &eedge,
1476 checker_path *emission_path)
1477 FINAL OVERRIDE
1478 {
1479 /* Detect exactly when the stack first becomes invalid,
1480 and issue an event then. */
1481 if (m_stack_pop_event)
1482 return false;
1483 const exploded_node *src_node = eedge.m_src;
1484 const program_point &src_point = src_node->get_point ();
1485 const exploded_node *dst_node = eedge.m_dest;
1486 const program_point &dst_point = dst_node->get_point ();
1487 if (valid_longjmp_stack_p (src_point, m_setjmp_point)
1488 && !valid_longjmp_stack_p (dst_point, m_setjmp_point))
1489 {
1490 /* Compare with diagnostic_manager::add_events_for_superedge. */
1491 const int src_stack_depth = src_point.get_stack_depth ();
1492 m_stack_pop_event = new precanned_custom_event
1493 (src_point.get_location (),
1494 src_point.get_fndecl (),
1495 src_stack_depth,
1496 "stack frame is popped here, invalidating saved environment");
1497 emission_path->add_event (m_stack_pop_event);
1498 return false;
1499 }
1500 return false;
1501 }
1502
1503 label_text describe_final_event (const evdesc::final_event &ev)
1504 {
1505 if (m_stack_pop_event)
1506 return ev.formatted_print
1507 ("%qs called after enclosing function of %qs returned at %@",
1508 get_user_facing_name (m_longjmp_call),
1509 get_user_facing_name (m_setjmp_call),
1510 m_stack_pop_event->get_id_ptr ());
1511 else
1512 return ev.formatted_print
1513 ("%qs called after enclosing function of %qs has returned",
1514 get_user_facing_name (m_longjmp_call),
1515 get_user_facing_name (m_setjmp_call));;
1516 }
1517
1518
1519 private:
1520 const gcall *m_setjmp_call;
1521 const gcall *m_longjmp_call;
1522 program_point m_setjmp_point;
1523 custom_event *m_stack_pop_event;
1524 };
1525
1526 /* Handle LONGJMP_CALL, a call to longjmp or siglongjmp.
1527
1528 Attempt to locate where setjmp/sigsetjmp was called on the jmp_buf and build
1529 an exploded_node and exploded_edge to it representing a rewind to that frame,
1530 handling the various kinds of failure that can occur. */
1531
1532 void
1533 exploded_node::on_longjmp (exploded_graph &eg,
1534 const gcall *longjmp_call,
1535 program_state *new_state,
1536 region_model_context *ctxt)
1537 {
1538 tree buf_ptr = gimple_call_arg (longjmp_call, 0);
1539 gcc_assert (POINTER_TYPE_P (TREE_TYPE (buf_ptr)));
1540
1541 region_model *new_region_model = new_state->m_region_model;
1542 const svalue *buf_ptr_sval = new_region_model->get_rvalue (buf_ptr, ctxt);
1543 const region *buf = new_region_model->deref_rvalue (buf_ptr_sval, buf_ptr,
1544 ctxt);
1545
1546 const svalue *buf_content_sval
1547 = new_region_model->get_store_value (buf, ctxt);
1548 const setjmp_svalue *setjmp_sval
1549 = buf_content_sval->dyn_cast_setjmp_svalue ();
1550 if (!setjmp_sval)
1551 return;
1552
1553 const setjmp_record tmp_setjmp_record = setjmp_sval->get_setjmp_record ();
1554
1555 /* Build a custom enode and eedge for rewinding from the longjmp/siglongjmp
1556 call back to the setjmp/sigsetjmp. */
1557 rewind_info_t rewind_info (tmp_setjmp_record, longjmp_call);
1558
1559 const gcall *setjmp_call = rewind_info.get_setjmp_call ();
1560 const program_point &setjmp_point = rewind_info.get_setjmp_point ();
1561
1562 const program_point &longjmp_point = get_point ();
1563
1564 /* Verify that the setjmp's call_stack hasn't been popped. */
1565 if (!valid_longjmp_stack_p (longjmp_point, setjmp_point))
1566 {
1567 ctxt->warn (new stale_jmp_buf (setjmp_call, longjmp_call, setjmp_point));
1568 return;
1569 }
1570
1571 gcc_assert (longjmp_point.get_stack_depth ()
1572 >= setjmp_point.get_stack_depth ());
1573
1574 /* Update the state for use by the destination node. */
1575
1576 /* Stash the current number of diagnostics so that we can update
1577 any that this adds to show where the longjmp is rewinding to. */
1578
1579 diagnostic_manager *dm = &eg.get_diagnostic_manager ();
1580 unsigned prev_num_diagnostics = dm->get_num_diagnostics ();
1581
1582 new_region_model->on_longjmp (longjmp_call, setjmp_call,
1583 setjmp_point.get_stack_depth (), ctxt);
1584
1585 /* Detect leaks in the new state relative to the old state. */
1586 program_state::detect_leaks (get_state (), *new_state, NULL,
1587 eg.get_ext_state (), ctxt);
1588
1589 program_point next_point
1590 = program_point::after_supernode (setjmp_point.get_supernode (),
1591 setjmp_point.get_call_string ());
1592
1593 exploded_node *next
1594 = eg.get_or_create_node (next_point, *new_state, this);
1595
1596 /* Create custom exploded_edge for a longjmp. */
1597 if (next)
1598 {
1599 exploded_edge *eedge
1600 = eg.add_edge (const_cast<exploded_node *> (this), next, NULL,
1601 new rewind_info_t (tmp_setjmp_record, longjmp_call));
1602
1603 /* For any diagnostics that were queued here (such as leaks) we want
1604 the checker_path to show the rewinding events after the "final event"
1605 so that the user sees where the longjmp is rewinding to (otherwise the
1606 path is meaningless).
1607
1608 For example, we want to emit something like:
1609 | NN | {
1610 | NN | longjmp (env, 1);
1611 | | ~~~~~~~~~~~~~~~~
1612 | | |
1613 | | (10) 'ptr' leaks here; was allocated at (7)
1614 | | (11) rewinding from 'longjmp' in 'inner'...
1615 |
1616 <-------------+
1617 |
1618 'outer': event 12
1619 |
1620 | NN | i = setjmp(env);
1621 | | ^~~~~~
1622 | | |
1623 | | (12) ...to 'setjmp' in 'outer' (saved at (2))
1624
1625 where the "final" event above is event (10), but we want to append
1626 events (11) and (12) afterwards.
1627
1628 Do this by setting m_trailing_eedge on any diagnostics that were
1629 just saved. */
1630 unsigned num_diagnostics = dm->get_num_diagnostics ();
1631 for (unsigned i = prev_num_diagnostics; i < num_diagnostics; i++)
1632 {
1633 saved_diagnostic *sd = dm->get_saved_diagnostic (i);
1634 sd->m_trailing_eedge = eedge;
1635 }
1636 }
1637 }
1638
1639 /* Subroutine of exploded_graph::process_node for finding the successors
1640 of the supernode for a function exit basic block.
1641
1642 Ensure that pop_frame is called, potentially queuing diagnostics about
1643 leaks. */
1644
1645 void
1646 exploded_node::detect_leaks (exploded_graph &eg)
1647 {
1648 LOG_FUNC_1 (eg.get_logger (), "EN: %i", m_index);
1649
1650 gcc_assert (get_point ().get_supernode ()->return_p ());
1651
1652 /* If we're not a "top-level" function, do nothing; pop_frame
1653 will be called when handling the return superedge. */
1654 if (get_point ().get_stack_depth () > 1)
1655 return;
1656
1657 /* We have a "top-level" function. */
1658 gcc_assert (get_point ().get_stack_depth () == 1);
1659
1660 const program_state &old_state = get_state ();
1661
1662 /* Work with a temporary copy of the state: pop the frame, and see
1663 what leaks (via purge_unused_svalues). */
1664 program_state new_state (old_state);
1665
1666 gcc_assert (new_state.m_region_model);
1667
1668 uncertainty_t uncertainty;
1669 impl_region_model_context ctxt (eg, this,
1670 &old_state, &new_state, &uncertainty, NULL,
1671 get_stmt ());
1672 const svalue *result = NULL;
1673 new_state.m_region_model->pop_frame (NULL, &result, &ctxt);
1674 program_state::detect_leaks (old_state, new_state, result,
1675 eg.get_ext_state (), &ctxt);
1676 }
1677
1678 /* Dump the successors and predecessors of this enode to OUTF. */
1679
1680 void
1681 exploded_node::dump_succs_and_preds (FILE *outf) const
1682 {
1683 unsigned i;
1684 exploded_edge *e;
1685 {
1686 auto_vec<exploded_node *> preds (m_preds.length ());
1687 FOR_EACH_VEC_ELT (m_preds, i, e)
1688 preds.quick_push (e->m_src);
1689 pretty_printer pp;
1690 print_enode_indices (&pp, preds);
1691 fprintf (outf, "preds: %s\n",
1692 pp_formatted_text (&pp));
1693 }
1694 {
1695 auto_vec<exploded_node *> succs (m_succs.length ());
1696 FOR_EACH_VEC_ELT (m_succs, i, e)
1697 succs.quick_push (e->m_dest);
1698 pretty_printer pp;
1699 print_enode_indices (&pp, succs);
1700 fprintf (outf, "succs: %s\n",
1701 pp_formatted_text (&pp));
1702 }
1703 }
1704
1705 /* class dynamic_call_info_t : public custom_edge_info. */
1706
1707 /* Implementation of custom_edge_info::update_model vfunc
1708 for dynamic_call_info_t.
1709
1710 Update state for the dynamically discorverd calls */
1711
1712 bool
1713 dynamic_call_info_t::update_model (region_model *model,
1714 const exploded_edge *eedge,
1715 region_model_context *) const
1716 {
1717 gcc_assert (eedge);
1718 const program_state &dest_state = eedge->m_dest->get_state ();
1719 *model = *dest_state.m_region_model;
1720 return true;
1721 }
1722
1723 /* Implementation of custom_edge_info::add_events_to_path vfunc
1724 for dynamic_call_info_t. */
1725
1726 void
1727 dynamic_call_info_t::add_events_to_path (checker_path *emission_path,
1728 const exploded_edge &eedge) const
1729 {
1730 const exploded_node *src_node = eedge.m_src;
1731 const program_point &src_point = src_node->get_point ();
1732 const int src_stack_depth = src_point.get_stack_depth ();
1733 const exploded_node *dest_node = eedge.m_dest;
1734 const program_point &dest_point = dest_node->get_point ();
1735 const int dest_stack_depth = dest_point.get_stack_depth ();
1736
1737 if (m_is_returning_call)
1738 emission_path->add_event (new return_event (eedge, (m_dynamic_call
1739 ? m_dynamic_call->location
1740 : UNKNOWN_LOCATION),
1741 dest_point.get_fndecl (),
1742 dest_stack_depth));
1743 else
1744 emission_path->add_event (new call_event (eedge, (m_dynamic_call
1745 ? m_dynamic_call->location
1746 : UNKNOWN_LOCATION),
1747 src_point.get_fndecl (),
1748 src_stack_depth));
1749
1750 }
1751
1752 /* class rewind_info_t : public custom_edge_info. */
1753
1754 /* Implementation of custom_edge_info::update_model vfunc
1755 for rewind_info_t.
1756
1757 Update state for the special-case of a rewind of a longjmp
1758 to a setjmp (which doesn't have a superedge, but does affect
1759 state). */
1760
1761 bool
1762 rewind_info_t::update_model (region_model *model,
1763 const exploded_edge *eedge,
1764 region_model_context *) const
1765 {
1766 gcc_assert (eedge);
1767 const program_point &longjmp_point = eedge->m_src->get_point ();
1768 const program_point &setjmp_point = eedge->m_dest->get_point ();
1769
1770 gcc_assert (longjmp_point.get_stack_depth ()
1771 >= setjmp_point.get_stack_depth ());
1772
1773 model->on_longjmp (get_longjmp_call (),
1774 get_setjmp_call (),
1775 setjmp_point.get_stack_depth (), NULL);
1776 return true;
1777 }
1778
1779 /* Implementation of custom_edge_info::add_events_to_path vfunc
1780 for rewind_info_t. */
1781
1782 void
1783 rewind_info_t::add_events_to_path (checker_path *emission_path,
1784 const exploded_edge &eedge) const
1785 {
1786 const exploded_node *src_node = eedge.m_src;
1787 const program_point &src_point = src_node->get_point ();
1788 const int src_stack_depth = src_point.get_stack_depth ();
1789 const exploded_node *dst_node = eedge.m_dest;
1790 const program_point &dst_point = dst_node->get_point ();
1791 const int dst_stack_depth = dst_point.get_stack_depth ();
1792
1793 emission_path->add_event
1794 (new rewind_from_longjmp_event
1795 (&eedge, get_longjmp_call ()->location,
1796 src_point.get_fndecl (),
1797 src_stack_depth, this));
1798 emission_path->add_event
1799 (new rewind_to_setjmp_event
1800 (&eedge, get_setjmp_call ()->location,
1801 dst_point.get_fndecl (),
1802 dst_stack_depth, this));
1803 }
1804
1805 /* class exploded_edge : public dedge<eg_traits>. */
1806
1807 /* exploded_edge's ctor. */
1808
1809 exploded_edge::exploded_edge (exploded_node *src, exploded_node *dest,
1810 const superedge *sedge,
1811 custom_edge_info *custom_info)
1812 : dedge<eg_traits> (src, dest), m_sedge (sedge),
1813 m_custom_info (custom_info)
1814 {
1815 }
1816
1817 /* exploded_edge's dtor. */
1818
1819 exploded_edge::~exploded_edge ()
1820 {
1821 delete m_custom_info;
1822 }
1823
1824 /* Implementation of dedge::dump_dot vfunc for exploded_edge.
1825 Use the label of the underlying superedge, if any. */
1826
1827 void
1828 exploded_edge::dump_dot (graphviz_out *gv, const dump_args_t &) const
1829 {
1830 pretty_printer *pp = gv->get_pp ();
1831
1832 m_src->dump_dot_id (pp);
1833 pp_string (pp, " -> ");
1834 m_dest->dump_dot_id (pp);
1835 dump_dot_label (pp);
1836 }
1837
1838 /* Second half of exploded_edge::dump_dot. This is split out
1839 for use by trimmed_graph::dump_dot and base_feasible_edge::dump_dot. */
1840
1841 void
1842 exploded_edge::dump_dot_label (pretty_printer *pp) const
1843 {
1844 const char *style = "\"solid,bold\"";
1845 const char *color = "black";
1846 int weight = 10;
1847 const char *constraint = "true";
1848
1849 if (m_sedge)
1850 switch (m_sedge->m_kind)
1851 {
1852 default:
1853 gcc_unreachable ();
1854 case SUPEREDGE_CFG_EDGE:
1855 break;
1856 case SUPEREDGE_CALL:
1857 color = "red";
1858 //constraint = "false";
1859 break;
1860 case SUPEREDGE_RETURN:
1861 color = "green";
1862 //constraint = "false";
1863 break;
1864 case SUPEREDGE_INTRAPROCEDURAL_CALL:
1865 style = "\"dotted\"";
1866 break;
1867 }
1868 if (m_custom_info)
1869 {
1870 color = "red";
1871 style = "\"dotted\"";
1872 }
1873
1874 pp_printf (pp,
1875 (" [style=%s, color=%s, weight=%d, constraint=%s,"
1876 " headlabel=\""),
1877 style, color, weight, constraint);
1878
1879 if (m_sedge)
1880 m_sedge->dump_label_to_pp (pp, false);
1881 else if (m_custom_info)
1882 m_custom_info->print (pp);
1883
1884 //pp_write_text_as_dot_label_to_stream (pp, /*for_record=*/false);
1885
1886 pp_printf (pp, "\"];\n");
1887 }
1888
1889 /* Return a new json::object of the form
1890 {"src_idx": int, the index of the source exploded edge,
1891 "dst_idx": int, the index of the destination exploded edge,
1892 "sedge": (optional) object for the superedge, if any,
1893 "custom": (optional) str, a description, if this is a custom edge}. */
1894
1895 json::object *
1896 exploded_edge::to_json () const
1897 {
1898 json::object *eedge_obj = new json::object ();
1899 eedge_obj->set ("src_idx", new json::integer_number (m_src->m_index));
1900 eedge_obj->set ("dst_idx", new json::integer_number (m_dest->m_index));
1901 if (m_sedge)
1902 eedge_obj->set ("sedge", m_sedge->to_json ());
1903 if (m_custom_info)
1904 {
1905 pretty_printer pp;
1906 pp_format_decoder (&pp) = default_tree_printer;
1907 m_custom_info->print (&pp);
1908 eedge_obj->set ("custom", new json::string (pp_formatted_text (&pp)));
1909 }
1910 return eedge_obj;
1911 }
1912
1913 /* struct stats. */
1914
1915 /* stats' ctor. */
1916
1917 stats::stats (int num_supernodes)
1918 : m_node_reuse_count (0),
1919 m_node_reuse_after_merge_count (0),
1920 m_num_supernodes (num_supernodes)
1921 {
1922 for (int i = 0; i < NUM_POINT_KINDS; i++)
1923 m_num_nodes[i] = 0;
1924 }
1925
1926 /* Log these stats in multiline form to LOGGER. */
1927
1928 void
1929 stats::log (logger *logger) const
1930 {
1931 gcc_assert (logger);
1932 for (int i = 0; i < NUM_POINT_KINDS; i++)
1933 if (m_num_nodes[i] > 0)
1934 logger->log ("m_num_nodes[%s]: %i",
1935 point_kind_to_string (static_cast <enum point_kind> (i)),
1936 m_num_nodes[i]);
1937 logger->log ("m_node_reuse_count: %i", m_node_reuse_count);
1938 logger->log ("m_node_reuse_after_merge_count: %i",
1939 m_node_reuse_after_merge_count);
1940 }
1941
1942 /* Dump these stats in multiline form to OUT. */
1943
1944 void
1945 stats::dump (FILE *out) const
1946 {
1947 for (int i = 0; i < NUM_POINT_KINDS; i++)
1948 if (m_num_nodes[i] > 0)
1949 fprintf (out, "m_num_nodes[%s]: %i\n",
1950 point_kind_to_string (static_cast <enum point_kind> (i)),
1951 m_num_nodes[i]);
1952 fprintf (out, "m_node_reuse_count: %i\n", m_node_reuse_count);
1953 fprintf (out, "m_node_reuse_after_merge_count: %i\n",
1954 m_node_reuse_after_merge_count);
1955
1956 if (m_num_supernodes > 0)
1957 fprintf (out, "PK_AFTER_SUPERNODE nodes per supernode: %.2f\n",
1958 (float)m_num_nodes[PK_AFTER_SUPERNODE] / (float)m_num_supernodes);
1959 }
1960
1961 /* Return the total number of enodes recorded within this object. */
1962
1963 int
1964 stats::get_total_enodes () const
1965 {
1966 int result = 0;
1967 for (int i = 0; i < NUM_POINT_KINDS; i++)
1968 result += m_num_nodes[i];
1969 return result;
1970 }
1971
1972 /* strongly_connected_components's ctor. Tarjan's SCC algorithm. */
1973
1974 strongly_connected_components::
1975 strongly_connected_components (const supergraph &sg, logger *logger)
1976 : m_sg (sg), m_per_node (m_sg.num_nodes ())
1977 {
1978 LOG_SCOPE (logger);
1979 auto_timevar tv (TV_ANALYZER_SCC);
1980
1981 for (int i = 0; i < m_sg.num_nodes (); i++)
1982 m_per_node.quick_push (per_node_data ());
1983
1984 for (int i = 0; i < m_sg.num_nodes (); i++)
1985 if (m_per_node[i].m_index == -1)
1986 strong_connect (i);
1987
1988 if (0)
1989 dump ();
1990 }
1991
1992 /* Dump this object to stderr. */
1993
1994 DEBUG_FUNCTION void
1995 strongly_connected_components::dump () const
1996 {
1997 for (int i = 0; i < m_sg.num_nodes (); i++)
1998 {
1999 const per_node_data &v = m_per_node[i];
2000 fprintf (stderr, "SN %i: index: %i lowlink: %i on_stack: %i\n",
2001 i, v.m_index, v.m_lowlink, v.m_on_stack);
2002 }
2003 }
2004
2005 /* Return a new json::array of per-snode SCC ids. */
2006
2007 json::array *
2008 strongly_connected_components::to_json () const
2009 {
2010 json::array *scc_arr = new json::array ();
2011 for (int i = 0; i < m_sg.num_nodes (); i++)
2012 scc_arr->append (new json::integer_number (get_scc_id (i)));
2013 return scc_arr;
2014 }
2015
2016 /* Subroutine of strongly_connected_components's ctor, part of Tarjan's
2017 SCC algorithm. */
2018
2019 void
2020 strongly_connected_components::strong_connect (unsigned index)
2021 {
2022 supernode *v_snode = m_sg.get_node_by_index (index);
2023
2024 /* Set the depth index for v to the smallest unused index. */
2025 per_node_data *v = &m_per_node[index];
2026 v->m_index = index;
2027 v->m_lowlink = index;
2028 m_stack.safe_push (index);
2029 v->m_on_stack = true;
2030 index++;
2031
2032 /* Consider successors of v. */
2033 unsigned i;
2034 superedge *sedge;
2035 FOR_EACH_VEC_ELT (v_snode->m_succs, i, sedge)
2036 {
2037 if (sedge->get_kind () != SUPEREDGE_CFG_EDGE
2038 && sedge->get_kind () != SUPEREDGE_INTRAPROCEDURAL_CALL)
2039 continue;
2040 supernode *w_snode = sedge->m_dest;
2041 per_node_data *w = &m_per_node[w_snode->m_index];
2042 if (w->m_index == -1)
2043 {
2044 /* Successor w has not yet been visited; recurse on it. */
2045 strong_connect (w_snode->m_index);
2046 v->m_lowlink = MIN (v->m_lowlink, w->m_lowlink);
2047 }
2048 else if (w->m_on_stack)
2049 {
2050 /* Successor w is in stack S and hence in the current SCC
2051 If w is not on stack, then (v, w) is a cross-edge in the DFS
2052 tree and must be ignored. */
2053 v->m_lowlink = MIN (v->m_lowlink, w->m_index);
2054 }
2055 }
2056
2057 /* If v is a root node, pop the stack and generate an SCC. */
2058
2059 if (v->m_lowlink == v->m_index)
2060 {
2061 per_node_data *w;
2062 do {
2063 int idx = m_stack.pop ();
2064 w = &m_per_node[idx];
2065 w->m_on_stack = false;
2066 } while (w != v);
2067 }
2068 }
2069
2070 /* worklist's ctor. */
2071
2072 worklist::worklist (const exploded_graph &eg, const analysis_plan &plan)
2073 : m_scc (eg.get_supergraph (), eg.get_logger ()),
2074 m_plan (plan),
2075 m_queue (key_t (*this, NULL))
2076 {
2077 }
2078
2079 /* Return the number of nodes in the worklist. */
2080
2081 unsigned
2082 worklist::length () const
2083 {
2084 return m_queue.nodes ();
2085 }
2086
2087 /* Return the next node in the worklist, removing it. */
2088
2089 exploded_node *
2090 worklist::take_next ()
2091 {
2092 return m_queue.extract_min ();
2093 }
2094
2095 /* Return the next node in the worklist without removing it. */
2096
2097 exploded_node *
2098 worklist::peek_next ()
2099 {
2100 return m_queue.min ();
2101 }
2102
2103 /* Add ENODE to the worklist. */
2104
2105 void
2106 worklist::add_node (exploded_node *enode)
2107 {
2108 gcc_assert (enode->get_status () == exploded_node::STATUS_WORKLIST);
2109 m_queue.insert (key_t (*this, enode), enode);
2110 }
2111
2112 /* Comparator for implementing worklist::key_t comparison operators.
2113 Return negative if KA is before KB
2114 Return positive if KA is after KB
2115 Return 0 if they are equal.
2116
2117 The ordering of the worklist is critical for performance and for
2118 avoiding node explosions. Ideally we want all enodes at a CFG join-point
2119 with the same callstring to be sorted next to each other in the worklist
2120 so that a run of consecutive enodes can be merged and processed "in bulk"
2121 rather than individually or pairwise, minimizing the number of new enodes
2122 created. */
2123
2124 int
2125 worklist::key_t::cmp (const worklist::key_t &ka, const worklist::key_t &kb)
2126 {
2127 const program_point &point_a = ka.m_enode->get_point ();
2128 const program_point &point_b = kb.m_enode->get_point ();
2129 const call_string &call_string_a = point_a.get_call_string ();
2130 const call_string &call_string_b = point_b.get_call_string ();
2131
2132 /* Order empty-callstring points with different functions based on the
2133 analysis_plan, so that we generate summaries before they are used. */
2134 if (flag_analyzer_call_summaries
2135 && call_string_a.empty_p ()
2136 && call_string_b.empty_p ()
2137 && point_a.get_function () != NULL
2138 && point_b.get_function () != NULL
2139 && point_a.get_function () != point_b.get_function ())
2140 {
2141 if (int cmp = ka.m_worklist.m_plan.cmp_function (point_a.get_function (),
2142 point_b.get_function ()))
2143 return cmp;
2144 }
2145
2146 /* Sort by callstring, so that nodes with deeper call strings are processed
2147 before those with shallower call strings.
2148 If we have
2149 splitting BB
2150 / \
2151 / \
2152 fn call no fn call
2153 \ /
2154 \ /
2155 join BB
2156 then we want the path inside the function call to be fully explored up
2157 to the return to the join BB before we explore on the "no fn call" path,
2158 so that both enodes at the join BB reach the front of the worklist at
2159 the same time and thus have a chance of being merged. */
2160 int cs_cmp = call_string::cmp (call_string_a, call_string_b);
2161 if (cs_cmp)
2162 return cs_cmp;
2163
2164 /* Order by SCC. */
2165 int scc_id_a = ka.get_scc_id (ka.m_enode);
2166 int scc_id_b = kb.get_scc_id (kb.m_enode);
2167 if (scc_id_a != scc_id_b)
2168 return scc_id_a - scc_id_b;
2169
2170 /* If in same SCC, order by supernode index (an arbitrary but stable
2171 ordering). */
2172 const supernode *snode_a = ka.m_enode->get_supernode ();
2173 const supernode *snode_b = kb.m_enode->get_supernode ();
2174 if (snode_a == NULL)
2175 {
2176 if (snode_b != NULL)
2177 /* One is NULL. */
2178 return -1;
2179 else
2180 /* Both are NULL. */
2181 return 0;
2182 }
2183 if (snode_b == NULL)
2184 /* One is NULL. */
2185 return 1;
2186 /* Neither are NULL. */
2187 gcc_assert (snode_a && snode_b);
2188 if (snode_a->m_index != snode_b->m_index)
2189 return snode_a->m_index - snode_b->m_index;
2190
2191 gcc_assert (snode_a == snode_b);
2192
2193 /* Order within supernode via program point. */
2194 int within_snode_cmp
2195 = function_point::cmp_within_supernode (point_a.get_function_point (),
2196 point_b.get_function_point ());
2197 if (within_snode_cmp)
2198 return within_snode_cmp;
2199
2200 /* Otherwise, we ought to have the same program_point. */
2201 gcc_assert (point_a == point_b);
2202
2203 const program_state &state_a = ka.m_enode->get_state ();
2204 const program_state &state_b = kb.m_enode->get_state ();
2205
2206 /* Sort by sm-state, so that identical sm-states are grouped
2207 together in the worklist. */
2208 for (unsigned sm_idx = 0; sm_idx < state_a.m_checker_states.length ();
2209 ++sm_idx)
2210 {
2211 sm_state_map *smap_a = state_a.m_checker_states[sm_idx];
2212 sm_state_map *smap_b = state_b.m_checker_states[sm_idx];
2213
2214 if (int smap_cmp = sm_state_map::cmp (*smap_a, *smap_b))
2215 return smap_cmp;
2216 }
2217
2218 /* Otherwise, we have two enodes at the same program point but with
2219 different states. We don't have a good total ordering on states,
2220 so order them by enode index, so that we have at least have a
2221 stable sort. */
2222 return ka.m_enode->m_index - kb.m_enode->m_index;
2223 }
2224
2225 /* Return a new json::object of the form
2226 {"scc" : [per-snode-IDs]}, */
2227
2228 json::object *
2229 worklist::to_json () const
2230 {
2231 json::object *worklist_obj = new json::object ();
2232
2233 worklist_obj->set ("scc", m_scc.to_json ());
2234
2235 /* The following field isn't yet being JSONified:
2236 queue_t m_queue; */
2237
2238 return worklist_obj;
2239 }
2240
2241 /* exploded_graph's ctor. */
2242
2243 exploded_graph::exploded_graph (const supergraph &sg, logger *logger,
2244 const extrinsic_state &ext_state,
2245 const state_purge_map *purge_map,
2246 const analysis_plan &plan,
2247 int verbosity)
2248 : m_sg (sg), m_logger (logger),
2249 m_worklist (*this, plan),
2250 m_ext_state (ext_state),
2251 m_purge_map (purge_map),
2252 m_plan (plan),
2253 m_diagnostic_manager (logger, ext_state.get_engine (), verbosity),
2254 m_global_stats (m_sg.num_nodes ()),
2255 m_functionless_stats (m_sg.num_nodes ()),
2256 m_PK_AFTER_SUPERNODE_per_snode (m_sg.num_nodes ())
2257 {
2258 m_origin = get_or_create_node (program_point::origin (),
2259 program_state (ext_state), NULL);
2260 for (int i = 0; i < m_sg.num_nodes (); i++)
2261 m_PK_AFTER_SUPERNODE_per_snode.quick_push (i);
2262 }
2263
2264 /* exploded_graph's dtor. */
2265
2266 exploded_graph::~exploded_graph ()
2267 {
2268 for (function_stat_map_t::iterator iter = m_per_function_stats.begin ();
2269 iter != m_per_function_stats.end ();
2270 ++iter)
2271 delete (*iter).second;
2272
2273 for (point_map_t::iterator iter = m_per_point_data.begin ();
2274 iter != m_per_point_data.end ();
2275 ++iter)
2276 delete (*iter).second;
2277 }
2278
2279 /* Ensure that there is an exploded_node representing an external call to
2280 FUN, adding it to the worklist if creating it.
2281
2282 Add an edge from the origin exploded_node to the function entrypoint
2283 exploded_node.
2284
2285 Return the exploded_node for the entrypoint to the function. */
2286
2287 exploded_node *
2288 exploded_graph::add_function_entry (function *fun)
2289 {
2290 gcc_assert (gimple_has_body_p (fun->decl));
2291
2292 /* Be idempotent. */
2293 if (m_functions_with_enodes.contains (fun))
2294 {
2295 logger * const logger = get_logger ();
2296 if (logger)
2297 logger->log ("entrypoint for %qE already exists", fun->decl);
2298 return NULL;
2299 }
2300
2301 program_point point = program_point::from_function_entry (m_sg, fun);
2302 program_state state (m_ext_state);
2303 state.push_frame (m_ext_state, fun);
2304
2305 if (!state.m_valid)
2306 return NULL;
2307
2308 exploded_node *enode = get_or_create_node (point, state, NULL);
2309 if (!enode)
2310 return NULL;
2311
2312 add_edge (m_origin, enode, NULL);
2313
2314 m_functions_with_enodes.add (fun);
2315
2316 return enode;
2317 }
2318
2319 /* Get or create an exploded_node for (POINT, STATE).
2320 If a new node is created, it is added to the worklist.
2321
2322 Use ENODE_FOR_DIAG, a pre-existing enode, for any diagnostics
2323 that need to be emitted (e.g. when purging state *before* we have
2324 a new enode). */
2325
2326 exploded_node *
2327 exploded_graph::get_or_create_node (const program_point &point,
2328 const program_state &state,
2329 exploded_node *enode_for_diag)
2330 {
2331 logger * const logger = get_logger ();
2332 LOG_FUNC (logger);
2333 if (logger)
2334 {
2335 format f (false);
2336 pretty_printer *pp = logger->get_printer ();
2337 logger->start_log_line ();
2338 pp_string (pp, "point: ");
2339 point.print (pp, f);
2340 logger->end_log_line ();
2341 logger->start_log_line ();
2342 pp_string (pp, "state: ");
2343 state.dump_to_pp (m_ext_state, true, false, pp);
2344 logger->end_log_line ();
2345 }
2346
2347 /* Stop exploring paths for which we don't know how to effectively
2348 model the state. */
2349 if (!state.m_valid)
2350 {
2351 if (logger)
2352 logger->log ("invalid state; not creating node");
2353 return NULL;
2354 }
2355
2356 auto_cfun sentinel (point.get_function ());
2357
2358 state.validate (get_ext_state ());
2359
2360 //state.dump (get_ext_state ());
2361
2362 /* Prune state to try to improve the chances of a cache hit,
2363 avoiding generating redundant nodes. */
2364 uncertainty_t uncertainty;
2365 program_state pruned_state
2366 = state.prune_for_point (*this, point, enode_for_diag, &uncertainty);
2367
2368 pruned_state.validate (get_ext_state ());
2369
2370 //pruned_state.dump (get_ext_state ());
2371
2372 if (logger)
2373 {
2374 pretty_printer *pp = logger->get_printer ();
2375 logger->start_log_line ();
2376 pp_string (pp, "pruned_state: ");
2377 pruned_state.dump_to_pp (m_ext_state, true, false, pp);
2378 logger->end_log_line ();
2379 pruned_state.m_region_model->dump_to_pp (logger->get_printer (), true,
2380 false);
2381 }
2382
2383 stats *per_fn_stats = get_or_create_function_stats (point.get_function ());
2384
2385 stats *per_cs_stats
2386 = &get_or_create_per_call_string_data (point.get_call_string ())->m_stats;
2387
2388 point_and_state ps (point, pruned_state);
2389 ps.validate (m_ext_state);
2390 if (exploded_node **slot = m_point_and_state_to_node.get (&ps))
2391 {
2392 /* An exploded_node for PS already exists. */
2393 if (logger)
2394 logger->log ("reused EN: %i", (*slot)->m_index);
2395 m_global_stats.m_node_reuse_count++;
2396 per_fn_stats->m_node_reuse_count++;
2397 per_cs_stats->m_node_reuse_count++;
2398 return *slot;
2399 }
2400
2401 per_program_point_data *per_point_data
2402 = get_or_create_per_program_point_data (point);
2403
2404 /* Consider merging state with another enode at this program_point. */
2405 if (flag_analyzer_state_merge)
2406 {
2407 exploded_node *existing_enode;
2408 unsigned i;
2409 FOR_EACH_VEC_ELT (per_point_data->m_enodes, i, existing_enode)
2410 {
2411 if (logger)
2412 logger->log ("considering merging with existing EN: %i for point",
2413 existing_enode->m_index);
2414 gcc_assert (existing_enode->get_point () == point);
2415 const program_state &existing_state = existing_enode->get_state ();
2416
2417 /* This merges successfully within the loop. */
2418
2419 program_state merged_state (m_ext_state);
2420 if (pruned_state.can_merge_with_p (existing_state, m_ext_state, point,
2421 &merged_state))
2422 {
2423 merged_state.validate (m_ext_state);
2424 if (logger)
2425 logger->log ("merging new state with that of EN: %i",
2426 existing_enode->m_index);
2427
2428 /* Try again for a cache hit.
2429 Whether we get one or not, merged_state's value_ids have no
2430 relationship to those of the input state, and thus to those
2431 of CHANGE, so we must purge any svalue_ids from *CHANGE. */
2432 ps.set_state (merged_state);
2433
2434 if (exploded_node **slot = m_point_and_state_to_node.get (&ps))
2435 {
2436 /* An exploded_node for PS already exists. */
2437 if (logger)
2438 logger->log ("reused EN: %i", (*slot)->m_index);
2439 m_global_stats.m_node_reuse_after_merge_count++;
2440 per_fn_stats->m_node_reuse_after_merge_count++;
2441 per_cs_stats->m_node_reuse_after_merge_count++;
2442 return *slot;
2443 }
2444 }
2445 else
2446 if (logger)
2447 logger->log ("not merging new state with that of EN: %i",
2448 existing_enode->m_index);
2449 }
2450 }
2451
2452 /* Impose a limit on the number of enodes per program point, and
2453 simply stop if we exceed it. */
2454 if ((int)per_point_data->m_enodes.length ()
2455 >= param_analyzer_max_enodes_per_program_point)
2456 {
2457 pretty_printer pp;
2458 point.print (&pp, format (false));
2459 print_enode_indices (&pp, per_point_data->m_enodes);
2460 if (logger)
2461 logger->log ("not creating enode; too many at program point: %s",
2462 pp_formatted_text (&pp));
2463 warning_at (point.get_location (), OPT_Wanalyzer_too_complex,
2464 "terminating analysis for this program point: %s",
2465 pp_formatted_text (&pp));
2466 per_point_data->m_excess_enodes++;
2467 return NULL;
2468 }
2469
2470 ps.validate (m_ext_state);
2471
2472 /* An exploded_node for "ps" doesn't already exist; create one. */
2473 exploded_node *node = new exploded_node (ps, m_nodes.length ());
2474 add_node (node);
2475 m_point_and_state_to_node.put (node->get_ps_key (), node);
2476
2477 /* Update per-program_point data. */
2478 per_point_data->m_enodes.safe_push (node);
2479
2480 const enum point_kind node_pk = node->get_point ().get_kind ();
2481 m_global_stats.m_num_nodes[node_pk]++;
2482 per_fn_stats->m_num_nodes[node_pk]++;
2483 per_cs_stats->m_num_nodes[node_pk]++;
2484
2485 if (node_pk == PK_AFTER_SUPERNODE)
2486 m_PK_AFTER_SUPERNODE_per_snode[point.get_supernode ()->m_index]++;
2487
2488 if (logger)
2489 {
2490 format f (false);
2491 pretty_printer *pp = logger->get_printer ();
2492 logger->log ("created EN: %i", node->m_index);
2493 logger->start_log_line ();
2494 pp_string (pp, "point: ");
2495 point.print (pp, f);
2496 logger->end_log_line ();
2497 logger->start_log_line ();
2498 pp_string (pp, "pruned_state: ");
2499 pruned_state.dump_to_pp (m_ext_state, true, false, pp);
2500 logger->end_log_line ();
2501 }
2502
2503 /* Add the new node to the worlist. */
2504 m_worklist.add_node (node);
2505 return node;
2506 }
2507
2508 /* Add an exploded_edge from SRC to DEST, recording its association
2509 with SEDGE (which may be NULL), and, if non-NULL, taking ownership
2510 of REWIND_INFO.
2511 Return the newly-created eedge. */
2512
2513 exploded_edge *
2514 exploded_graph::add_edge (exploded_node *src, exploded_node *dest,
2515 const superedge *sedge,
2516 custom_edge_info *custom_info)
2517 {
2518 if (get_logger ())
2519 get_logger ()->log ("creating edge EN: %i -> EN: %i",
2520 src->m_index, dest->m_index);
2521 exploded_edge *e = new exploded_edge (src, dest, sedge, custom_info);
2522 digraph<eg_traits>::add_edge (e);
2523 return e;
2524 }
2525
2526 /* Ensure that this graph has per-program_point-data for POINT;
2527 borrow a pointer to it. */
2528
2529 per_program_point_data *
2530 exploded_graph::
2531 get_or_create_per_program_point_data (const program_point &point)
2532 {
2533 if (per_program_point_data **slot = m_per_point_data.get (&point))
2534 return *slot;
2535
2536 per_program_point_data *per_point_data = new per_program_point_data (point);
2537 m_per_point_data.put (&per_point_data->m_key, per_point_data);
2538 return per_point_data;
2539 }
2540
2541 /* Get this graph's per-program-point-data for POINT if there is any,
2542 otherwise NULL. */
2543
2544 per_program_point_data *
2545 exploded_graph::get_per_program_point_data (const program_point &point) const
2546 {
2547 if (per_program_point_data **slot
2548 = const_cast <point_map_t &> (m_per_point_data).get (&point))
2549 return *slot;
2550
2551 return NULL;
2552 }
2553
2554 /* Ensure that this graph has per-call_string-data for CS;
2555 borrow a pointer to it. */
2556
2557 per_call_string_data *
2558 exploded_graph::get_or_create_per_call_string_data (const call_string &cs)
2559 {
2560 if (per_call_string_data **slot = m_per_call_string_data.get (&cs))
2561 return *slot;
2562
2563 per_call_string_data *data = new per_call_string_data (cs, m_sg.num_nodes ());
2564 m_per_call_string_data.put (&data->m_key,
2565 data);
2566 return data;
2567 }
2568
2569 /* Ensure that this graph has per-function-data for FUN;
2570 borrow a pointer to it. */
2571
2572 per_function_data *
2573 exploded_graph::get_or_create_per_function_data (function *fun)
2574 {
2575 if (per_function_data **slot = m_per_function_data.get (fun))
2576 return *slot;
2577
2578 per_function_data *data = new per_function_data ();
2579 m_per_function_data.put (fun, data);
2580 return data;
2581 }
2582
2583 /* Get this graph's per-function-data for FUN if there is any,
2584 otherwise NULL. */
2585
2586 per_function_data *
2587 exploded_graph::get_per_function_data (function *fun) const
2588 {
2589 if (per_function_data **slot
2590 = const_cast <per_function_data_t &> (m_per_function_data).get (fun))
2591 return *slot;
2592
2593 return NULL;
2594 }
2595
2596 /* Return true if FUN should be traversed directly, rather than only as
2597 called via other functions. */
2598
2599 static bool
2600 toplevel_function_p (function *fun, logger *logger)
2601 {
2602 /* Don't directly traverse into functions that have an "__analyzer_"
2603 prefix. Doing so is useful for the analyzer test suite, allowing
2604 us to have functions that are called in traversals, but not directly
2605 explored, thus testing how the analyzer handles calls and returns.
2606 With this, we can have DejaGnu directives that cover just the case
2607 of where a function is called by another function, without generating
2608 excess messages from the case of the first function being traversed
2609 directly. */
2610 #define ANALYZER_PREFIX "__analyzer_"
2611 if (!strncmp (IDENTIFIER_POINTER (DECL_NAME (fun->decl)), ANALYZER_PREFIX,
2612 strlen (ANALYZER_PREFIX)))
2613 {
2614 if (logger)
2615 logger->log ("not traversing %qE (starts with %qs)",
2616 fun->decl, ANALYZER_PREFIX);
2617 return false;
2618 }
2619
2620 if (logger)
2621 logger->log ("traversing %qE (all checks passed)", fun->decl);
2622
2623 return true;
2624 }
2625
2626 /* Add initial nodes to EG, with entrypoints for externally-callable
2627 functions. */
2628
2629 void
2630 exploded_graph::build_initial_worklist ()
2631 {
2632 logger * const logger = get_logger ();
2633 LOG_SCOPE (logger);
2634
2635 cgraph_node *node;
2636 FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
2637 {
2638 function *fun = node->get_fun ();
2639 if (!toplevel_function_p (fun, logger))
2640 continue;
2641 exploded_node *enode = add_function_entry (fun);
2642 if (logger)
2643 {
2644 if (enode)
2645 logger->log ("created EN %i for %qE entrypoint",
2646 enode->m_index, fun->decl);
2647 else
2648 logger->log ("did not create enode for %qE entrypoint", fun->decl);
2649 }
2650 }
2651 }
2652
2653 /* The main loop of the analysis.
2654 Take freshly-created exploded_nodes from the worklist, calling
2655 process_node on them to explore the <point, state> graph.
2656 Add edges to their successors, potentially creating new successors
2657 (which are also added to the worklist). */
2658
2659 void
2660 exploded_graph::process_worklist ()
2661 {
2662 logger * const logger = get_logger ();
2663 LOG_SCOPE (logger);
2664 auto_timevar tv (TV_ANALYZER_WORKLIST);
2665
2666 while (m_worklist.length () > 0)
2667 {
2668 exploded_node *node = m_worklist.take_next ();
2669 gcc_assert (node->get_status () == exploded_node::STATUS_WORKLIST);
2670 gcc_assert (node->m_succs.length () == 0
2671 || node == m_origin);
2672
2673 if (logger)
2674 logger->log ("next to process: EN: %i", node->m_index);
2675
2676 /* If we have a run of nodes that are before-supernode, try merging and
2677 processing them together, rather than pairwise or individually. */
2678 if (flag_analyzer_state_merge && node != m_origin)
2679 if (maybe_process_run_of_before_supernode_enodes (node))
2680 goto handle_limit;
2681
2682 /* Avoid exponential explosions of nodes by attempting to merge
2683 nodes that are at the same program point and which have
2684 sufficiently similar state. */
2685 if (flag_analyzer_state_merge && node != m_origin)
2686 if (exploded_node *node_2 = m_worklist.peek_next ())
2687 {
2688 gcc_assert (node_2->get_status ()
2689 == exploded_node::STATUS_WORKLIST);
2690 gcc_assert (node->m_succs.length () == 0);
2691 gcc_assert (node_2->m_succs.length () == 0);
2692
2693 gcc_assert (node != node_2);
2694
2695 if (logger)
2696 logger->log ("peek worklist: EN: %i", node_2->m_index);
2697
2698 if (node->get_point () == node_2->get_point ())
2699 {
2700 const program_point &point = node->get_point ();
2701 if (logger)
2702 {
2703 format f (false);
2704 pretty_printer *pp = logger->get_printer ();
2705 logger->start_log_line ();
2706 logger->log_partial
2707 ("got potential merge EN: %i and EN: %i at ",
2708 node->m_index, node_2->m_index);
2709 point.print (pp, f);
2710 logger->end_log_line ();
2711 }
2712 const program_state &state = node->get_state ();
2713 const program_state &state_2 = node_2->get_state ();
2714
2715 /* They shouldn't be equal, or we wouldn't have two
2716 separate nodes. */
2717 gcc_assert (state != state_2);
2718
2719 program_state merged_state (m_ext_state);
2720 if (state.can_merge_with_p (state_2, m_ext_state,
2721 point, &merged_state))
2722 {
2723 if (logger)
2724 logger->log ("merging EN: %i and EN: %i",
2725 node->m_index, node_2->m_index);
2726
2727 if (merged_state == state)
2728 {
2729 /* Then merge node_2 into node by adding an edge. */
2730 add_edge (node_2, node, NULL);
2731
2732 /* Remove node_2 from the worklist. */
2733 m_worklist.take_next ();
2734 node_2->set_status (exploded_node::STATUS_MERGER);
2735
2736 /* Continue processing "node" below. */
2737 }
2738 else if (merged_state == state_2)
2739 {
2740 /* Then merge node into node_2, and leave node_2
2741 in the worklist, to be processed on the next
2742 iteration. */
2743 add_edge (node, node_2, NULL);
2744 node->set_status (exploded_node::STATUS_MERGER);
2745 continue;
2746 }
2747 else
2748 {
2749 /* We have a merged state that differs from
2750 both state and state_2. */
2751
2752 /* Remove node_2 from the worklist. */
2753 m_worklist.take_next ();
2754
2755 /* Create (or get) an exploded node for the merged
2756 states, adding to the worklist. */
2757 exploded_node *merged_enode
2758 = get_or_create_node (node->get_point (),
2759 merged_state, node);
2760 if (merged_enode == NULL)
2761 continue;
2762
2763 if (logger)
2764 logger->log ("merged EN: %i and EN: %i into EN: %i",
2765 node->m_index, node_2->m_index,
2766 merged_enode->m_index);
2767
2768 /* "node" and "node_2" have both now been removed
2769 from the worklist; we should not process them.
2770
2771 "merged_enode" may be a new node; if so it will be
2772 processed in a subsequent iteration.
2773 Alternatively, "merged_enode" could be an existing
2774 node; one way the latter can
2775 happen is if we end up merging a succession of
2776 similar nodes into one. */
2777
2778 /* If merged_node is one of the two we were merging,
2779 add it back to the worklist to ensure it gets
2780 processed.
2781
2782 Add edges from the merged nodes to it (but not a
2783 self-edge). */
2784 if (merged_enode == node)
2785 m_worklist.add_node (merged_enode);
2786 else
2787 {
2788 add_edge (node, merged_enode, NULL);
2789 node->set_status (exploded_node::STATUS_MERGER);
2790 }
2791
2792 if (merged_enode == node_2)
2793 m_worklist.add_node (merged_enode);
2794 else
2795 {
2796 add_edge (node_2, merged_enode, NULL);
2797 node_2->set_status (exploded_node::STATUS_MERGER);
2798 }
2799
2800 continue;
2801 }
2802 }
2803
2804 /* TODO: should we attempt more than two nodes,
2805 or just do pairs of nodes? (and hope that we get
2806 a cascade of mergers). */
2807 }
2808 }
2809
2810 process_node (node);
2811
2812 handle_limit:
2813 /* Impose a hard limit on the number of exploded nodes, to ensure
2814 that the analysis terminates in the face of pathological state
2815 explosion (or bugs).
2816
2817 Specifically, the limit is on the number of PK_AFTER_SUPERNODE
2818 exploded nodes, looking at supernode exit events.
2819
2820 We use exit rather than entry since there can be multiple
2821 entry ENs, one per phi; the number of PK_AFTER_SUPERNODE ought
2822 to be equivalent to the number of supernodes multiplied by the
2823 number of states. */
2824 const int limit = m_sg.num_nodes () * param_analyzer_bb_explosion_factor;
2825 if (m_global_stats.m_num_nodes[PK_AFTER_SUPERNODE] > limit)
2826 {
2827 if (logger)
2828 logger->log ("bailing out; too many nodes");
2829 warning_at (node->get_point ().get_location (),
2830 OPT_Wanalyzer_too_complex,
2831 "analysis bailed out early"
2832 " (%i 'after-snode' enodes; %i enodes)",
2833 m_global_stats.m_num_nodes[PK_AFTER_SUPERNODE],
2834 m_nodes.length ());
2835 return;
2836 }
2837 }
2838 }
2839
2840 /* Attempt to process a consecutive run of sufficiently-similar nodes in
2841 the worklist at a CFG join-point (having already popped ENODE from the
2842 head of the worklist).
2843
2844 If ENODE's point is of the form (before-supernode, SNODE) and the next
2845 nodes in the worklist are a consecutive run of enodes of the same form,
2846 for the same supernode as ENODE (but potentially from different in-edges),
2847 process them all together, setting their status to STATUS_BULK_MERGED,
2848 and return true.
2849 Otherwise, return false, in which case ENODE must be processed in the
2850 normal way.
2851
2852 When processing them all together, generate successor states based
2853 on phi nodes for the appropriate CFG edges, and then attempt to merge
2854 these states into a minimal set of merged successor states, partitioning
2855 the inputs by merged successor state.
2856
2857 Create new exploded nodes for all of the merged states, and add edges
2858 connecting the input enodes to the corresponding merger exploded nodes.
2859
2860 We hope we have a much smaller number of merged successor states
2861 compared to the number of input enodes - ideally just one,
2862 if all successor states can be merged.
2863
2864 Processing and merging many together as one operation rather than as
2865 pairs avoids scaling issues where per-pair mergers could bloat the
2866 graph with merger nodes (especially so after switch statements). */
2867
2868 bool
2869 exploded_graph::
2870 maybe_process_run_of_before_supernode_enodes (exploded_node *enode)
2871 {
2872 /* A struct for tracking per-input state. */
2873 struct item
2874 {
2875 item (exploded_node *input_enode)
2876 : m_input_enode (input_enode),
2877 m_processed_state (input_enode->get_state ()),
2878 m_merger_idx (-1)
2879 {}
2880
2881 exploded_node *m_input_enode;
2882 program_state m_processed_state;
2883 int m_merger_idx;
2884 };
2885
2886 gcc_assert (enode->get_status () == exploded_node::STATUS_WORKLIST);
2887 gcc_assert (enode->m_succs.length () == 0);
2888
2889 const program_point &point = enode->get_point ();
2890
2891 if (point.get_kind () != PK_BEFORE_SUPERNODE)
2892 return false;
2893
2894 const supernode *snode = point.get_supernode ();
2895
2896 logger * const logger = get_logger ();
2897 LOG_SCOPE (logger);
2898
2899 /* Find a run of enodes in the worklist that are before the same supernode,
2900 but potentially from different in-edges. */
2901 auto_vec <exploded_node *> enodes;
2902 enodes.safe_push (enode);
2903 while (exploded_node *enode_2 = m_worklist.peek_next ())
2904 {
2905 gcc_assert (enode_2->get_status ()
2906 == exploded_node::STATUS_WORKLIST);
2907 gcc_assert (enode_2->m_succs.length () == 0);
2908
2909 const program_point &point_2 = enode_2->get_point ();
2910
2911 if (point_2.get_kind () == PK_BEFORE_SUPERNODE
2912 && point_2.get_supernode () == snode
2913 && point_2.get_call_string () == point.get_call_string ())
2914 {
2915 enodes.safe_push (enode_2);
2916 m_worklist.take_next ();
2917 }
2918 else
2919 break;
2920 }
2921
2922 /* If the only node is ENODE, then give up. */
2923 if (enodes.length () == 1)
2924 return false;
2925
2926 if (logger)
2927 logger->log ("got run of %i enodes for SN: %i",
2928 enodes.length (), snode->m_index);
2929
2930 /* All of these enodes have a shared successor point (even if they
2931 were for different in-edges). */
2932 program_point next_point (point.get_next ());
2933
2934 /* Calculate the successor state for each enode in enodes. */
2935 auto_delete_vec<item> items (enodes.length ());
2936 unsigned i;
2937 exploded_node *iter_enode;
2938 FOR_EACH_VEC_ELT (enodes, i, iter_enode)
2939 {
2940 item *it = new item (iter_enode);
2941 items.quick_push (it);
2942 const program_state &state = iter_enode->get_state ();
2943 program_state *next_state = &it->m_processed_state;
2944 next_state->validate (m_ext_state);
2945 const program_point &iter_point = iter_enode->get_point ();
2946 if (const superedge *iter_sedge = iter_point.get_from_edge ())
2947 {
2948 uncertainty_t uncertainty;
2949 impl_region_model_context ctxt (*this, iter_enode,
2950 &state, next_state,
2951 &uncertainty, NULL, NULL);
2952 const cfg_superedge *last_cfg_superedge
2953 = iter_sedge->dyn_cast_cfg_superedge ();
2954 if (last_cfg_superedge)
2955 next_state->m_region_model->update_for_phis
2956 (snode, last_cfg_superedge, &ctxt);
2957 }
2958 next_state->validate (m_ext_state);
2959 }
2960
2961 /* Attempt to partition the items into a set of merged states.
2962 We hope we have a much smaller number of merged states
2963 compared to the number of input enodes - ideally just one,
2964 if all can be merged. */
2965 auto_delete_vec <program_state> merged_states;
2966 auto_vec<item *> first_item_for_each_merged_state;
2967 item *it;
2968 FOR_EACH_VEC_ELT (items, i, it)
2969 {
2970 const program_state &it_state = it->m_processed_state;
2971 program_state *merged_state;
2972 unsigned iter_merger_idx;
2973 FOR_EACH_VEC_ELT (merged_states, iter_merger_idx, merged_state)
2974 {
2975 merged_state->validate (m_ext_state);
2976 program_state merge (m_ext_state);
2977 if (it_state.can_merge_with_p (*merged_state, m_ext_state,
2978 next_point, &merge))
2979 {
2980 *merged_state = merge;
2981 merged_state->validate (m_ext_state);
2982 it->m_merger_idx = iter_merger_idx;
2983 if (logger)
2984 logger->log ("reusing merger state %i for item %i (EN: %i)",
2985 it->m_merger_idx, i, it->m_input_enode->m_index);
2986 goto got_merger;
2987 }
2988 }
2989 /* If it couldn't be merged with any existing merged_states,
2990 create a new one. */
2991 if (it->m_merger_idx == -1)
2992 {
2993 it->m_merger_idx = merged_states.length ();
2994 merged_states.safe_push (new program_state (it_state));
2995 first_item_for_each_merged_state.safe_push (it);
2996 if (logger)
2997 logger->log ("using new merger state %i for item %i (EN: %i)",
2998 it->m_merger_idx, i, it->m_input_enode->m_index);
2999 }
3000 got_merger:
3001 gcc_assert (it->m_merger_idx >= 0);
3002 gcc_assert ((unsigned)it->m_merger_idx < merged_states.length ());
3003 }
3004
3005 /* Create merger nodes. */
3006 auto_vec<exploded_node *> next_enodes (merged_states.length ());
3007 program_state *merged_state;
3008 FOR_EACH_VEC_ELT (merged_states, i, merged_state)
3009 {
3010 exploded_node *src_enode
3011 = first_item_for_each_merged_state[i]->m_input_enode;
3012 exploded_node *next
3013 = get_or_create_node (next_point, *merged_state, src_enode);
3014 /* "next" could be NULL; we handle that when adding the edges below. */
3015 next_enodes.quick_push (next);
3016 if (logger)
3017 {
3018 if (next)
3019 logger->log ("using EN: %i for merger state %i", next->m_index, i);
3020 else
3021 logger->log ("using NULL enode for merger state %i", i);
3022 }
3023 }
3024
3025 /* Create edges from each input enode to the appropriate successor enode.
3026 Update the status of the now-processed input enodes. */
3027 FOR_EACH_VEC_ELT (items, i, it)
3028 {
3029 exploded_node *next = next_enodes[it->m_merger_idx];
3030 if (next)
3031 add_edge (it->m_input_enode, next, NULL);
3032 it->m_input_enode->set_status (exploded_node::STATUS_BULK_MERGED);
3033 }
3034
3035 if (logger)
3036 logger->log ("merged %i in-enodes into %i out-enode(s) at SN: %i",
3037 items.length (), merged_states.length (), snode->m_index);
3038
3039 return true;
3040 }
3041
3042 /* Return true if STMT must appear at the start of its exploded node, and
3043 thus we can't consolidate its effects within a run of other statements,
3044 where PREV_STMT was the previous statement. */
3045
3046 static bool
3047 stmt_requires_new_enode_p (const gimple *stmt,
3048 const gimple *prev_stmt)
3049 {
3050 if (const gcall *call = dyn_cast <const gcall *> (stmt))
3051 {
3052 /* Stop consolidating at calls to
3053 "__analyzer_dump_exploded_nodes", so they always appear at the
3054 start of an exploded_node. */
3055 if (is_special_named_call_p (call, "__analyzer_dump_exploded_nodes",
3056 1))
3057 return true;
3058
3059 /* sm-signal.cc injects an additional custom eedge at "signal" calls
3060 from the registration enode to the handler enode, separate from the
3061 regular next state, which defeats the "detect state change" logic
3062 in process_node. Work around this via special-casing, to ensure
3063 we split the enode immediately before any "signal" call. */
3064 if (is_special_named_call_p (call, "signal", 2))
3065 return true;
3066 }
3067
3068 /* If we had a PREV_STMT with an unknown location, and this stmt
3069 has a known location, then if a state change happens here, it
3070 could be consolidated into PREV_STMT, giving us an event with
3071 no location. Ensure that STMT gets its own exploded_node to
3072 avoid this. */
3073 if (get_pure_location (prev_stmt->location) == UNKNOWN_LOCATION
3074 && get_pure_location (stmt->location) != UNKNOWN_LOCATION)
3075 return true;
3076
3077 return false;
3078 }
3079
3080 /* Return true if OLD_STATE and NEW_STATE are sufficiently different that
3081 we should split enodes and create an exploded_edge separating them
3082 (which makes it easier to identify state changes of intereset when
3083 constructing checker_paths). */
3084
3085 static bool
3086 state_change_requires_new_enode_p (const program_state &old_state,
3087 const program_state &new_state)
3088 {
3089 /* Changes in dynamic extents signify creations of heap/alloca regions
3090 and resizings of heap regions; likely to be of interest in
3091 diagnostic paths. */
3092 if (old_state.m_region_model->get_dynamic_extents ()
3093 != new_state.m_region_model->get_dynamic_extents ())
3094 return true;
3095
3096 /* Changes in sm-state are of interest. */
3097 int sm_idx;
3098 sm_state_map *smap;
3099 FOR_EACH_VEC_ELT (old_state.m_checker_states, sm_idx, smap)
3100 {
3101 const sm_state_map *old_smap = old_state.m_checker_states[sm_idx];
3102 const sm_state_map *new_smap = new_state.m_checker_states[sm_idx];
3103 if (*old_smap != *new_smap)
3104 return true;
3105 }
3106
3107 return false;
3108 }
3109
3110 /* Create enodes and eedges for the function calls that doesn't have an
3111 underlying call superedge.
3112
3113 Such case occurs when GCC's middle end didn't know which function to
3114 call but the analyzer does (with the help of current state).
3115
3116 Some example such calls are dynamically dispatched calls to virtual
3117 functions or calls that happen via function pointer. */
3118
3119 bool
3120 exploded_graph::maybe_create_dynamic_call (const gcall *call,
3121 tree fn_decl,
3122 exploded_node *node,
3123 program_state next_state,
3124 program_point &next_point,
3125 uncertainty_t *uncertainty,
3126 logger *logger)
3127 {
3128 LOG_FUNC (logger);
3129
3130 const program_point *this_point = &node->get_point ();
3131 function *fun = DECL_STRUCT_FUNCTION (fn_decl);
3132 if (fun)
3133 {
3134 const supergraph &sg = this->get_supergraph ();
3135 supernode *sn_entry = sg.get_node_for_function_entry (fun);
3136 supernode *sn_exit = sg.get_node_for_function_exit (fun);
3137
3138 program_point new_point
3139 = program_point::before_supernode (sn_entry,
3140 NULL,
3141 this_point->get_call_string ());
3142
3143 new_point.push_to_call_stack (sn_exit,
3144 next_point.get_supernode());
3145
3146 /* Impose a maximum recursion depth and don't analyze paths
3147 that exceed it further.
3148 This is something of a blunt workaround, but it only
3149 applies to recursion (and mutual recursion), not to
3150 general call stacks. */
3151 if (new_point.get_call_string ().calc_recursion_depth ()
3152 > param_analyzer_max_recursion_depth)
3153 {
3154 if (logger)
3155 logger->log ("rejecting call edge: recursion limit exceeded");
3156 return false;
3157 }
3158
3159 next_state.push_call (*this, node, call, uncertainty);
3160
3161 if (next_state.m_valid)
3162 {
3163 if (logger)
3164 logger->log ("Discovered call to %s [SN: %i -> SN: %i]",
3165 function_name(fun),
3166 this_point->get_supernode ()->m_index,
3167 sn_entry->m_index);
3168
3169 exploded_node *enode = get_or_create_node (new_point,
3170 next_state,
3171 node);
3172 if (enode)
3173 add_edge (node,enode, NULL,
3174 new dynamic_call_info_t (call));
3175 return true;
3176 }
3177 }
3178 return false;
3179 }
3180
3181 /* Subclass of path_context for use within exploded_graph::process_node,
3182 so that we can split states e.g. at "realloc" calls. */
3183
3184 class impl_path_context : public path_context
3185 {
3186 public:
3187 impl_path_context (const program_state *cur_state)
3188 : m_cur_state (cur_state),
3189 m_terminate_path (false)
3190 {
3191 }
3192
3193 bool bifurcation_p () const
3194 {
3195 return m_custom_eedge_infos.length () > 0;
3196 }
3197
3198 const program_state &get_state_at_bifurcation () const
3199 {
3200 gcc_assert (m_state_at_bifurcation);
3201 return *m_state_at_bifurcation;
3202 }
3203
3204 void
3205 bifurcate (custom_edge_info *info) FINAL OVERRIDE
3206 {
3207 if (m_state_at_bifurcation)
3208 /* Verify that the state at bifurcation is consistent when we
3209 split into multiple out-edges. */
3210 gcc_assert (*m_state_at_bifurcation == *m_cur_state);
3211 else
3212 /* Take a copy of the cur_state at the moment when bifurcation
3213 happens. */
3214 m_state_at_bifurcation
3215 = std::unique_ptr<program_state> (new program_state (*m_cur_state));
3216
3217 /* Take ownership of INFO. */
3218 m_custom_eedge_infos.safe_push (info);
3219 }
3220
3221 void terminate_path () FINAL OVERRIDE
3222 {
3223 m_terminate_path = true;
3224 }
3225
3226 bool terminate_path_p () const FINAL OVERRIDE
3227 {
3228 return m_terminate_path;
3229 }
3230
3231 const vec<custom_edge_info *> & get_custom_eedge_infos ()
3232 {
3233 return m_custom_eedge_infos;
3234 }
3235
3236 private:
3237 const program_state *m_cur_state;
3238
3239 /* Lazily-created copy of the state before the split. */
3240 std::unique_ptr<program_state> m_state_at_bifurcation;
3241
3242 auto_vec <custom_edge_info *> m_custom_eedge_infos;
3243
3244 bool m_terminate_path;
3245 };
3246
3247 /* The core of exploded_graph::process_worklist (the main analysis loop),
3248 handling one node in the worklist.
3249
3250 Get successor <point, state> pairs for NODE, calling get_or_create on
3251 them, and adding an exploded_edge to each successors.
3252
3253 Freshly-created nodes will be added to the worklist. */
3254
3255 void
3256 exploded_graph::process_node (exploded_node *node)
3257 {
3258 logger * const logger = get_logger ();
3259 LOG_FUNC_1 (logger, "EN: %i", node->m_index);
3260
3261 node->set_status (exploded_node::STATUS_PROCESSED);
3262
3263 const program_point &point = node->get_point ();
3264
3265 /* Update cfun and input_location in case of an ICE: make it easier to
3266 track down which source construct we're failing to handle. */
3267 auto_cfun sentinel (node->get_function ());
3268 const gimple *stmt = point.get_stmt ();
3269 if (stmt)
3270 input_location = stmt->location;
3271
3272 const program_state &state = node->get_state ();
3273 if (logger)
3274 {
3275 pretty_printer *pp = logger->get_printer ();
3276 logger->start_log_line ();
3277 pp_string (pp, "point: ");
3278 point.print (pp, format (false));
3279 pp_string (pp, ", state: ");
3280 state.dump_to_pp (m_ext_state, true, false, pp);
3281 logger->end_log_line ();
3282 }
3283
3284 switch (point.get_kind ())
3285 {
3286 default:
3287 gcc_unreachable ();
3288 case PK_ORIGIN:
3289 /* This node exists to simplify finding the shortest path
3290 to an exploded_node. */
3291 break;
3292
3293 case PK_BEFORE_SUPERNODE:
3294 {
3295 program_state next_state (state);
3296 uncertainty_t uncertainty;
3297
3298 if (point.get_from_edge ())
3299 {
3300 impl_region_model_context ctxt (*this, node,
3301 &state, &next_state,
3302 &uncertainty, NULL, NULL);
3303 const cfg_superedge *last_cfg_superedge
3304 = point.get_from_edge ()->dyn_cast_cfg_superedge ();
3305 if (last_cfg_superedge)
3306 next_state.m_region_model->update_for_phis
3307 (node->get_supernode (),
3308 last_cfg_superedge,
3309 &ctxt);
3310 program_state::detect_leaks (state, next_state, NULL,
3311 get_ext_state (), &ctxt);
3312 }
3313
3314 program_point next_point (point.get_next ());
3315 exploded_node *next = get_or_create_node (next_point, next_state, node);
3316 if (next)
3317 add_edge (node, next, NULL);
3318 }
3319 break;
3320 case PK_BEFORE_STMT:
3321 {
3322 /* Determine the effect of a run of one or more statements
3323 within one supernode, generating an edge to the program_point
3324 after the last statement that's processed.
3325
3326 Stop iterating statements and thus consolidating into one enode
3327 when:
3328 - reaching the end of the statements in the supernode
3329 - if an sm-state-change occurs (so that it gets its own
3330 exploded_node)
3331 - if "-fanalyzer-fine-grained" is active
3332 - encountering certain statements must appear at the start of
3333 their enode (for which stmt_requires_new_enode_p returns true)
3334
3335 Update next_state in-place, to get the result of the one
3336 or more stmts that are processed.
3337
3338 Split the node in-place if an sm-state-change occurs, so that
3339 the sm-state-change occurs on an edge where the src enode has
3340 exactly one stmt, the one that caused the change. */
3341 program_state next_state (state);
3342
3343 impl_path_context path_ctxt (&next_state);
3344
3345 uncertainty_t uncertainty;
3346 const supernode *snode = point.get_supernode ();
3347 unsigned stmt_idx;
3348 const gimple *prev_stmt = NULL;
3349 for (stmt_idx = point.get_stmt_idx ();
3350 stmt_idx < snode->m_stmts.length ();
3351 stmt_idx++)
3352 {
3353 const gimple *stmt = snode->m_stmts[stmt_idx];
3354
3355 if (stmt_idx > point.get_stmt_idx ())
3356 if (stmt_requires_new_enode_p (stmt, prev_stmt))
3357 {
3358 stmt_idx--;
3359 break;
3360 }
3361 prev_stmt = stmt;
3362
3363 program_state old_state (next_state);
3364
3365 /* Process the stmt. */
3366 exploded_node::on_stmt_flags flags
3367 = node->on_stmt (*this, snode, stmt, &next_state, &uncertainty,
3368 &path_ctxt);
3369 node->m_num_processed_stmts++;
3370
3371 /* If flags.m_terminate_path, stop analyzing; any nodes/edges
3372 will have been added by on_stmt (e.g. for handling longjmp). */
3373 if (flags.m_terminate_path)
3374 return;
3375
3376 if (next_state.m_region_model)
3377 {
3378 impl_region_model_context ctxt (*this, node,
3379 &old_state, &next_state,
3380 &uncertainty, NULL, stmt);
3381 program_state::detect_leaks (old_state, next_state, NULL,
3382 get_ext_state (), &ctxt);
3383 }
3384
3385 unsigned next_idx = stmt_idx + 1;
3386 program_point next_point
3387 = (next_idx < point.get_supernode ()->m_stmts.length ()
3388 ? program_point::before_stmt (point.get_supernode (), next_idx,
3389 point.get_call_string ())
3390 : program_point::after_supernode (point.get_supernode (),
3391 point.get_call_string ()));
3392 next_state = next_state.prune_for_point (*this, next_point, node,
3393 &uncertainty);
3394
3395 if (flag_analyzer_fine_grained
3396 || state_change_requires_new_enode_p (old_state, next_state)
3397 || path_ctxt.bifurcation_p ()
3398 || path_ctxt.terminate_path_p ())
3399 {
3400 program_point split_point
3401 = program_point::before_stmt (point.get_supernode (),
3402 stmt_idx,
3403 point.get_call_string ());
3404 if (split_point != node->get_point ())
3405 {
3406 /* If we're not at the start of NODE, split the enode at
3407 this stmt, so we have:
3408 node -> split_enode
3409 so that when split_enode is processed the next edge
3410 we add will be:
3411 split_enode -> next
3412 and any state change will effectively occur on that
3413 latter edge, and split_enode will contain just stmt. */
3414 if (logger)
3415 logger->log ("getting split_enode");
3416 exploded_node *split_enode
3417 = get_or_create_node (split_point, old_state, node);
3418 if (!split_enode)
3419 return;
3420 /* "stmt" will be reprocessed when split_enode is
3421 processed. */
3422 node->m_num_processed_stmts--;
3423 if (logger)
3424 logger->log ("creating edge to split_enode");
3425 add_edge (node, split_enode, NULL);
3426 return;
3427 }
3428 else
3429 /* If we're at the start of NODE, stop iterating,
3430 so that an edge will be created from NODE to
3431 (next_point, next_state) below. */
3432 break;
3433 }
3434 }
3435 unsigned next_idx = stmt_idx + 1;
3436 program_point next_point
3437 = (next_idx < point.get_supernode ()->m_stmts.length ()
3438 ? program_point::before_stmt (point.get_supernode (), next_idx,
3439 point.get_call_string ())
3440 : program_point::after_supernode (point.get_supernode (),
3441 point.get_call_string ()));
3442 if (path_ctxt.terminate_path_p ())
3443 {
3444 if (logger)
3445 logger->log ("not adding node: terminating path");
3446 }
3447 else
3448 {
3449 exploded_node *next
3450 = get_or_create_node (next_point, next_state, node);
3451 if (next)
3452 add_edge (node, next, NULL);
3453 }
3454
3455 /* If we have custom edge infos, "bifurcate" the state
3456 accordingly, potentially creating a new state/enode/eedge
3457 instances. For example, to handle a "realloc" call, we
3458 might split into 3 states, for the "failure",
3459 "resizing in place", and "moving to a new buffer" cases. */
3460 for (auto edge_info : path_ctxt.get_custom_eedge_infos ())
3461 {
3462 if (logger)
3463 {
3464 logger->start_log_line ();
3465 logger->log_partial ("bifurcating for edge: ");
3466 edge_info->print (logger->get_printer ());
3467 logger->end_log_line ();
3468 }
3469 program_state bifurcated_new_state
3470 (path_ctxt.get_state_at_bifurcation ());
3471
3472 /* Apply edge_info to state. */
3473 impl_region_model_context
3474 bifurcation_ctxt (*this,
3475 NULL, // enode_for_diag
3476 &path_ctxt.get_state_at_bifurcation (),
3477 &bifurcated_new_state,
3478 NULL, // uncertainty_t *uncertainty
3479 NULL, // path_context *path_ctxt
3480 stmt);
3481 if (edge_info->update_model (bifurcated_new_state.m_region_model,
3482 NULL, /* no exploded_edge yet. */
3483 &bifurcation_ctxt))
3484 {
3485 exploded_node *next2
3486 = get_or_create_node (next_point, bifurcated_new_state, node);
3487 if (next2)
3488 {
3489 /* Take ownership of edge_info. */
3490 add_edge (node, next2, NULL, edge_info);
3491 }
3492 else
3493 delete edge_info;
3494 }
3495 else
3496 {
3497 if (logger)
3498 logger->log ("infeasible state, not adding node");
3499 delete edge_info;
3500 }
3501 }
3502 }
3503 break;
3504 case PK_AFTER_SUPERNODE:
3505 {
3506 bool found_a_superedge = false;
3507 bool is_an_exit_block = false;
3508 /* If this is an EXIT BB, detect leaks, and potentially
3509 create a function summary. */
3510 if (point.get_supernode ()->return_p ())
3511 {
3512 is_an_exit_block = true;
3513 node->detect_leaks (*this);
3514 if (flag_analyzer_call_summaries
3515 && point.get_call_string ().empty_p ())
3516 {
3517 /* TODO: create function summary
3518 There can be more than one; each corresponds to a different
3519 final enode in the function. */
3520 if (logger)
3521 {
3522 pretty_printer *pp = logger->get_printer ();
3523 logger->start_log_line ();
3524 logger->log_partial
3525 ("would create function summary for %qE; state: ",
3526 point.get_fndecl ());
3527 state.dump_to_pp (m_ext_state, true, false, pp);
3528 logger->end_log_line ();
3529 }
3530 per_function_data *per_fn_data
3531 = get_or_create_per_function_data (point.get_function ());
3532 per_fn_data->add_call_summary (node);
3533 }
3534 }
3535 /* Traverse into successors of the supernode. */
3536 int i;
3537 superedge *succ;
3538 FOR_EACH_VEC_ELT (point.get_supernode ()->m_succs, i, succ)
3539 {
3540 found_a_superedge = true;
3541 if (logger)
3542 logger->log ("considering SN: %i -> SN: %i",
3543 succ->m_src->m_index, succ->m_dest->m_index);
3544
3545 program_point next_point
3546 = program_point::before_supernode (succ->m_dest, succ,
3547 point.get_call_string ());
3548 program_state next_state (state);
3549 uncertainty_t uncertainty;
3550
3551 /* Make use the current state and try to discover and analyse
3552 indirect function calls (a call that doesn't have an underlying
3553 cgraph edge representing call).
3554
3555 Some examples of such calls are virtual function calls
3556 and calls that happen via a function pointer. */
3557 if (succ->m_kind == SUPEREDGE_INTRAPROCEDURAL_CALL
3558 && !(succ->get_any_callgraph_edge ()))
3559 {
3560 const gcall *call
3561 = point.get_supernode ()->get_final_call ();
3562
3563 impl_region_model_context ctxt (*this,
3564 node,
3565 &state,
3566 &next_state,
3567 &uncertainty,
3568 NULL,
3569 point.get_stmt());
3570
3571 region_model *model = state.m_region_model;
3572 bool call_discovered = false;
3573
3574 if (tree fn_decl = model->get_fndecl_for_call(call,&ctxt))
3575 call_discovered = maybe_create_dynamic_call (call,
3576 fn_decl,
3577 node,
3578 next_state,
3579 next_point,
3580 &uncertainty,
3581 logger);
3582 if (!call_discovered)
3583 {
3584 /* An unknown function or a special function was called
3585 at this point, in such case, don't terminate the
3586 analysis of the current function.
3587
3588 The analyzer handles calls to such functions while
3589 analysing the stmt itself, so the the function call
3590 must have been handled by the anlyzer till now. */
3591 exploded_node *next
3592 = get_or_create_node (next_point,
3593 next_state,
3594 node);
3595 if (next)
3596 add_edge (node, next, succ);
3597 }
3598 }
3599
3600 if (!node->on_edge (*this, succ, &next_point, &next_state,
3601 &uncertainty))
3602 {
3603 if (logger)
3604 logger->log ("skipping impossible edge to SN: %i",
3605 succ->m_dest->m_index);
3606 continue;
3607 }
3608 exploded_node *next = get_or_create_node (next_point, next_state,
3609 node);
3610 if (next)
3611 add_edge (node, next, succ);
3612 }
3613
3614 /* Return from the calls which doesn't have a return superedge.
3615 Such case occurs when GCC's middle end didn't knew which function to
3616 call but analyzer did. */
3617 if((is_an_exit_block && !found_a_superedge)
3618 && (!point.get_call_string ().empty_p ()))
3619 {
3620 const call_string cs = point.get_call_string ();
3621 program_point next_point
3622 = program_point::before_supernode (cs.get_caller_node (),
3623 NULL,
3624 cs);
3625 program_state next_state (state);
3626 uncertainty_t uncertainty;
3627
3628 const gcall *call
3629 = next_point.get_supernode ()->get_returning_call ();
3630
3631 if(call)
3632 next_state.returning_call (*this, node, call, &uncertainty);
3633
3634 if (next_state.m_valid)
3635 {
3636 next_point.pop_from_call_stack ();
3637 exploded_node *enode = get_or_create_node (next_point,
3638 next_state,
3639 node);
3640 if (enode)
3641 add_edge (node, enode, NULL,
3642 new dynamic_call_info_t (call, true));
3643 }
3644 }
3645 }
3646 break;
3647 }
3648 }
3649
3650 /* Ensure that this graph has a stats instance for FN, return it.
3651 FN can be NULL, in which case a stats instances is returned covering
3652 "functionless" parts of the graph (the origin node). */
3653
3654 stats *
3655 exploded_graph::get_or_create_function_stats (function *fn)
3656 {
3657 if (!fn)
3658 return &m_functionless_stats;
3659
3660 if (stats **slot = m_per_function_stats.get (fn))
3661 return *slot;
3662 else
3663 {
3664 int num_supernodes = fn ? n_basic_blocks_for_fn (fn) : 0;
3665 /* not quite the num supernodes, but nearly. */
3666 stats *new_stats = new stats (num_supernodes);
3667 m_per_function_stats.put (fn, new_stats);
3668 return new_stats;
3669 }
3670 }
3671
3672 /* Print bar charts to PP showing:
3673 - the number of enodes per function, and
3674 - for each function:
3675 - the number of enodes per supernode/BB
3676 - the number of excess enodes per supernode/BB beyond the
3677 per-program-point limit, if there were any. */
3678
3679 void
3680 exploded_graph::print_bar_charts (pretty_printer *pp) const
3681 {
3682 cgraph_node *cgnode;
3683
3684 pp_string (pp, "enodes per function:");
3685 pp_newline (pp);
3686 bar_chart enodes_per_function;
3687 FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (cgnode)
3688 {
3689 function *fn = cgnode->get_fun ();
3690 const stats * const *s_ptr
3691 = const_cast <function_stat_map_t &> (m_per_function_stats).get (fn);
3692 enodes_per_function.add_item (function_name (fn),
3693 s_ptr ? (*s_ptr)->get_total_enodes () : 0);
3694 }
3695 enodes_per_function.print (pp);
3696
3697 /* Accumulate number of enodes per supernode. */
3698 auto_vec<unsigned> enodes_per_supernode (m_sg.num_nodes ());
3699 for (int i = 0; i < m_sg.num_nodes (); i++)
3700 enodes_per_supernode.quick_push (0);
3701 int i;
3702 exploded_node *enode;
3703 FOR_EACH_VEC_ELT (m_nodes, i, enode)
3704 {
3705 const supernode *iter_snode = enode->get_supernode ();
3706 if (!iter_snode)
3707 continue;
3708 enodes_per_supernode[iter_snode->m_index]++;
3709 }
3710
3711 /* Accumulate excess enodes per supernode. */
3712 auto_vec<unsigned> excess_enodes_per_supernode (m_sg.num_nodes ());
3713 for (int i = 0; i < m_sg.num_nodes (); i++)
3714 excess_enodes_per_supernode.quick_push (0);
3715 for (point_map_t::iterator iter = m_per_point_data.begin ();
3716 iter != m_per_point_data.end (); ++iter)
3717 {
3718 const program_point *point = (*iter).first;
3719 const supernode *iter_snode = point->get_supernode ();
3720 if (!iter_snode)
3721 continue;
3722 const per_program_point_data *point_data = (*iter).second;
3723 excess_enodes_per_supernode[iter_snode->m_index]
3724 += point_data->m_excess_enodes;
3725 }
3726
3727 /* Show per-function bar_charts of enodes per supernode/BB. */
3728 pp_string (pp, "per-function enodes per supernode/BB:");
3729 pp_newline (pp);
3730 FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (cgnode)
3731 {
3732 function *fn = cgnode->get_fun ();
3733 pp_printf (pp, "function: %qs", function_name (fn));
3734 pp_newline (pp);
3735
3736 bar_chart enodes_per_snode;
3737 bar_chart excess_enodes_per_snode;
3738 bool have_excess_enodes = false;
3739 for (int i = 0; i < m_sg.num_nodes (); i++)
3740 {
3741 const supernode *iter_snode = m_sg.get_node_by_index (i);
3742 if (iter_snode->get_function () != fn)
3743 continue;
3744 pretty_printer tmp_pp;
3745 pp_printf (&tmp_pp, "sn %i (bb %i)",
3746 iter_snode->m_index, iter_snode->m_bb->index);
3747 enodes_per_snode.add_item (pp_formatted_text (&tmp_pp),
3748 enodes_per_supernode[iter_snode->m_index]);
3749 const int num_excess
3750 = excess_enodes_per_supernode[iter_snode->m_index];
3751 excess_enodes_per_snode.add_item (pp_formatted_text (&tmp_pp),
3752 num_excess);
3753 if (num_excess)
3754 have_excess_enodes = true;
3755 }
3756 enodes_per_snode.print (pp);
3757 if (have_excess_enodes)
3758 {
3759 pp_printf (pp, "EXCESS ENODES:");
3760 pp_newline (pp);
3761 excess_enodes_per_snode.print (pp);
3762 }
3763 }
3764 }
3765
3766 /* Write all stats information to this graph's logger, if any. */
3767
3768 void
3769 exploded_graph::log_stats () const
3770 {
3771 logger * const logger = get_logger ();
3772 if (!logger)
3773 return;
3774
3775 LOG_SCOPE (logger);
3776
3777 m_ext_state.get_engine ()->log_stats (logger);
3778
3779 logger->log ("m_sg.num_nodes (): %i", m_sg.num_nodes ());
3780 logger->log ("m_nodes.length (): %i", m_nodes.length ());
3781 logger->log ("m_edges.length (): %i", m_edges.length ());
3782 logger->log ("remaining enodes in worklist: %i", m_worklist.length ());
3783
3784 logger->log ("global stats:");
3785 m_global_stats.log (logger);
3786
3787 for (function_stat_map_t::iterator iter = m_per_function_stats.begin ();
3788 iter != m_per_function_stats.end ();
3789 ++iter)
3790 {
3791 function *fn = (*iter).first;
3792 log_scope s (logger, function_name (fn));
3793 (*iter).second->log (logger);
3794 }
3795
3796 print_bar_charts (logger->get_printer ());
3797 }
3798
3799 /* Dump all stats information to OUT. */
3800
3801 void
3802 exploded_graph::dump_stats (FILE *out) const
3803 {
3804 fprintf (out, "m_sg.num_nodes (): %i\n", m_sg.num_nodes ());
3805 fprintf (out, "m_nodes.length (): %i\n", m_nodes.length ());
3806 fprintf (out, "m_edges.length (): %i\n", m_edges.length ());
3807 fprintf (out, "remaining enodes in worklist: %i", m_worklist.length ());
3808
3809 fprintf (out, "global stats:\n");
3810 m_global_stats.dump (out);
3811
3812 for (function_stat_map_t::iterator iter = m_per_function_stats.begin ();
3813 iter != m_per_function_stats.end ();
3814 ++iter)
3815 {
3816 function *fn = (*iter).first;
3817 fprintf (out, "function: %s\n", function_name (fn));
3818 (*iter).second->dump (out);
3819 }
3820
3821 fprintf (out, "PK_AFTER_SUPERNODE per supernode:\n");
3822 for (unsigned i = 0; i < m_PK_AFTER_SUPERNODE_per_snode.length (); i++)
3823 fprintf (out, " SN %i: %3i\n", i, m_PK_AFTER_SUPERNODE_per_snode[i]);
3824 }
3825
3826 void
3827 exploded_graph::dump_states_for_supernode (FILE *out,
3828 const supernode *snode) const
3829 {
3830 fprintf (out, "PK_AFTER_SUPERNODE nodes for SN: %i\n", snode->m_index);
3831 int i;
3832 exploded_node *enode;
3833 int state_idx = 0;
3834 FOR_EACH_VEC_ELT (m_nodes, i, enode)
3835 {
3836 const supernode *iter_snode = enode->get_supernode ();
3837 if (enode->get_point ().get_kind () == PK_AFTER_SUPERNODE
3838 && iter_snode == snode)
3839 {
3840 pretty_printer pp;
3841 pp_format_decoder (&pp) = default_tree_printer;
3842 enode->get_state ().dump_to_pp (m_ext_state, true, false, &pp);
3843 fprintf (out, "state %i: EN: %i\n %s\n",
3844 state_idx++, enode->m_index,
3845 pp_formatted_text (&pp));
3846 }
3847 }
3848 fprintf (out, "#exploded_node for PK_AFTER_SUPERNODE for SN: %i = %i\n",
3849 snode->m_index, state_idx);
3850 }
3851
3852 /* Return a new json::object of the form
3853 {"nodes" : [objs for enodes],
3854 "edges" : [objs for eedges],
3855 "ext_state": object for extrinsic_state,
3856 "diagnostic_manager": object for diagnostic_manager}. */
3857
3858 json::object *
3859 exploded_graph::to_json () const
3860 {
3861 json::object *egraph_obj = new json::object ();
3862
3863 /* Nodes. */
3864 {
3865 json::array *nodes_arr = new json::array ();
3866 unsigned i;
3867 exploded_node *n;
3868 FOR_EACH_VEC_ELT (m_nodes, i, n)
3869 nodes_arr->append (n->to_json (m_ext_state));
3870 egraph_obj->set ("nodes", nodes_arr);
3871 }
3872
3873 /* Edges. */
3874 {
3875 json::array *edges_arr = new json::array ();
3876 unsigned i;
3877 exploded_edge *n;
3878 FOR_EACH_VEC_ELT (m_edges, i, n)
3879 edges_arr->append (n->to_json ());
3880 egraph_obj->set ("edges", edges_arr);
3881 }
3882
3883 /* m_sg is JSONified at the top-level. */
3884
3885 egraph_obj->set ("ext_state", m_ext_state.to_json ());
3886 egraph_obj->set ("worklist", m_worklist.to_json ());
3887 egraph_obj->set ("diagnostic_manager", m_diagnostic_manager.to_json ());
3888
3889 /* The following fields aren't yet being JSONified:
3890 const state_purge_map *const m_purge_map;
3891 const analysis_plan &m_plan;
3892 stats m_global_stats;
3893 function_stat_map_t m_per_function_stats;
3894 stats m_functionless_stats;
3895 call_string_data_map_t m_per_call_string_data;
3896 auto_vec<int> m_PK_AFTER_SUPERNODE_per_snode; */
3897
3898 return egraph_obj;
3899 }
3900
3901 /* class exploded_path. */
3902
3903 /* Copy ctor. */
3904
3905 exploded_path::exploded_path (const exploded_path &other)
3906 : m_edges (other.m_edges.length ())
3907 {
3908 int i;
3909 const exploded_edge *eedge;
3910 FOR_EACH_VEC_ELT (other.m_edges, i, eedge)
3911 m_edges.quick_push (eedge);
3912 }
3913
3914 /* Look for the last use of SEARCH_STMT within this path.
3915 If found write the edge's index to *OUT_IDX and return true, otherwise
3916 return false. */
3917
3918 bool
3919 exploded_path::find_stmt_backwards (const gimple *search_stmt,
3920 int *out_idx) const
3921 {
3922 int i;
3923 const exploded_edge *eedge;
3924 FOR_EACH_VEC_ELT_REVERSE (m_edges, i, eedge)
3925 {
3926 const exploded_node *dst_node = eedge->m_dest;
3927 const program_point &dst_point = dst_node->get_point ();
3928 const gimple *stmt = dst_point.get_stmt ();
3929 if (stmt == search_stmt)
3930 {
3931 *out_idx = i;
3932 return true;
3933 }
3934 }
3935 return false;
3936 }
3937
3938 /* Get the final exploded_node in this path, which must be non-empty. */
3939
3940 exploded_node *
3941 exploded_path::get_final_enode () const
3942 {
3943 gcc_assert (m_edges.length () > 0);
3944 return m_edges[m_edges.length () - 1]->m_dest;
3945 }
3946
3947 /* Check state along this path, returning true if it is feasible.
3948 If OUT is non-NULL, and the path is infeasible, write a new
3949 feasibility_problem to *OUT. */
3950
3951 bool
3952 exploded_path::feasible_p (logger *logger, feasibility_problem **out,
3953 engine *eng, const exploded_graph *eg) const
3954 {
3955 LOG_SCOPE (logger);
3956
3957 feasibility_state state (eng->get_model_manager (),
3958 eg->get_supergraph ());
3959
3960 /* Traverse the path, updating this state. */
3961 for (unsigned edge_idx = 0; edge_idx < m_edges.length (); edge_idx++)
3962 {
3963 const exploded_edge *eedge = m_edges[edge_idx];
3964 if (logger)
3965 logger->log ("considering edge %i: EN:%i -> EN:%i",
3966 edge_idx,
3967 eedge->m_src->m_index,
3968 eedge->m_dest->m_index);
3969
3970 rejected_constraint *rc = NULL;
3971 if (!state.maybe_update_for_edge (logger, eedge, &rc))
3972 {
3973 gcc_assert (rc);
3974 if (out)
3975 {
3976 const exploded_node &src_enode = *eedge->m_src;
3977 const program_point &src_point = src_enode.get_point ();
3978 const gimple *last_stmt
3979 = src_point.get_supernode ()->get_last_stmt ();
3980 *out = new feasibility_problem (edge_idx, *eedge,
3981 last_stmt, rc);
3982 }
3983 else
3984 delete rc;
3985 return false;
3986 }
3987
3988 if (logger)
3989 {
3990 logger->log ("state after edge %i: EN:%i -> EN:%i",
3991 edge_idx,
3992 eedge->m_src->m_index,
3993 eedge->m_dest->m_index);
3994 logger->start_log_line ();
3995 state.get_model ().dump_to_pp (logger->get_printer (), true, false);
3996 logger->end_log_line ();
3997 }
3998 }
3999
4000 return true;
4001 }
4002
4003 /* Dump this path in multiline form to PP.
4004 If EXT_STATE is non-NULL, then show the nodes. */
4005
4006 void
4007 exploded_path::dump_to_pp (pretty_printer *pp,
4008 const extrinsic_state *ext_state) const
4009 {
4010 for (unsigned i = 0; i < m_edges.length (); i++)
4011 {
4012 const exploded_edge *eedge = m_edges[i];
4013 pp_printf (pp, "m_edges[%i]: EN %i -> EN %i",
4014 i,
4015 eedge->m_src->m_index,
4016 eedge->m_dest->m_index);
4017 pp_newline (pp);
4018
4019 if (ext_state)
4020 eedge->m_dest->dump_to_pp (pp, *ext_state);
4021 }
4022 }
4023
4024 /* Dump this path in multiline form to FP. */
4025
4026 void
4027 exploded_path::dump (FILE *fp, const extrinsic_state *ext_state) const
4028 {
4029 pretty_printer pp;
4030 pp_format_decoder (&pp) = default_tree_printer;
4031 pp_show_color (&pp) = pp_show_color (global_dc->printer);
4032 pp.buffer->stream = fp;
4033 dump_to_pp (&pp, ext_state);
4034 pp_flush (&pp);
4035 }
4036
4037 /* Dump this path in multiline form to stderr. */
4038
4039 DEBUG_FUNCTION void
4040 exploded_path::dump (const extrinsic_state *ext_state) const
4041 {
4042 dump (stderr, ext_state);
4043 }
4044
4045 /* Dump this path verbosely to FILENAME. */
4046
4047 void
4048 exploded_path::dump_to_file (const char *filename,
4049 const extrinsic_state &ext_state) const
4050 {
4051 FILE *fp = fopen (filename, "w");
4052 if (!fp)
4053 return;
4054 pretty_printer pp;
4055 pp_format_decoder (&pp) = default_tree_printer;
4056 pp.buffer->stream = fp;
4057 dump_to_pp (&pp, &ext_state);
4058 pp_flush (&pp);
4059 fclose (fp);
4060 }
4061
4062 /* class feasibility_problem. */
4063
4064 void
4065 feasibility_problem::dump_to_pp (pretty_printer *pp) const
4066 {
4067 pp_printf (pp, "edge from EN: %i to EN: %i",
4068 m_eedge.m_src->m_index, m_eedge.m_dest->m_index);
4069 if (m_rc)
4070 {
4071 pp_string (pp, "; rejected constraint: ");
4072 m_rc->dump_to_pp (pp);
4073 pp_string (pp, "; rmodel: ");
4074 m_rc->get_model ().dump_to_pp (pp, true, false);
4075 }
4076 }
4077
4078 /* class feasibility_state. */
4079
4080 /* Ctor for feasibility_state, at the beginning of a path. */
4081
4082 feasibility_state::feasibility_state (region_model_manager *manager,
4083 const supergraph &sg)
4084 : m_model (manager),
4085 m_snodes_visited (sg.m_nodes.length ())
4086 {
4087 bitmap_clear (m_snodes_visited);
4088 }
4089
4090 /* Copy ctor for feasibility_state, for extending a path. */
4091
4092 feasibility_state::feasibility_state (const feasibility_state &other)
4093 : m_model (other.m_model),
4094 m_snodes_visited (const_sbitmap (other.m_snodes_visited)->n_bits)
4095 {
4096 bitmap_copy (m_snodes_visited, other.m_snodes_visited);
4097 }
4098
4099 /* The heart of feasibility-checking.
4100
4101 Attempt to update this state in-place based on traversing EEDGE
4102 in a path.
4103 Update the model for the stmts in the src enode.
4104 Attempt to add constraints for EEDGE.
4105
4106 If feasible, return true.
4107 Otherwise, return false and write to *OUT_RC. */
4108
4109 bool
4110 feasibility_state::maybe_update_for_edge (logger *logger,
4111 const exploded_edge *eedge,
4112 rejected_constraint **out_rc)
4113 {
4114 const exploded_node &src_enode = *eedge->m_src;
4115 const program_point &src_point = src_enode.get_point ();
4116 if (logger)
4117 {
4118 logger->start_log_line ();
4119 src_point.print (logger->get_printer (), format (false));
4120 logger->end_log_line ();
4121 }
4122
4123 /* Update state for the stmts that were processed in each enode. */
4124 for (unsigned stmt_idx = 0; stmt_idx < src_enode.m_num_processed_stmts;
4125 stmt_idx++)
4126 {
4127 const gimple *stmt = src_enode.get_processed_stmt (stmt_idx);
4128
4129 /* Update cfun and input_location in case of ICE: make it easier to
4130 track down which source construct we're failing to handle. */
4131 auto_cfun sentinel (src_point.get_function ());
4132 input_location = stmt->location;
4133
4134 if (const gassign *assign = dyn_cast <const gassign *> (stmt))
4135 m_model.on_assignment (assign, NULL);
4136 else if (const gasm *asm_stmt = dyn_cast <const gasm *> (stmt))
4137 m_model.on_asm_stmt (asm_stmt, NULL);
4138 else if (const gcall *call = dyn_cast <const gcall *> (stmt))
4139 {
4140 bool terminate_path;
4141 bool unknown_side_effects
4142 = m_model.on_call_pre (call, NULL, &terminate_path);
4143 m_model.on_call_post (call, unknown_side_effects, NULL);
4144 }
4145 else if (const greturn *return_ = dyn_cast <const greturn *> (stmt))
4146 m_model.on_return (return_, NULL);
4147 }
4148
4149 const superedge *sedge = eedge->m_sedge;
4150 if (sedge)
4151 {
4152 if (logger)
4153 logger->log (" sedge: SN:%i -> SN:%i %s",
4154 sedge->m_src->m_index,
4155 sedge->m_dest->m_index,
4156 sedge->get_description (false));
4157
4158 const gimple *last_stmt = src_point.get_supernode ()->get_last_stmt ();
4159 if (!m_model.maybe_update_for_edge (*sedge, last_stmt, NULL, out_rc))
4160 {
4161 if (logger)
4162 {
4163 logger->log ("rejecting due to region model");
4164 m_model.dump_to_pp (logger->get_printer (), true, false);
4165 }
4166 return false;
4167 }
4168 }
4169 else
4170 {
4171 /* Special-case the initial eedge from the origin node to the
4172 initial function by pushing a frame for it. */
4173 if (src_point.get_kind () == PK_ORIGIN)
4174 {
4175 gcc_assert (eedge->m_src->m_index == 0);
4176 gcc_assert (eedge->m_dest->get_point ().get_kind ()
4177 == PK_BEFORE_SUPERNODE);
4178 function *fun = eedge->m_dest->get_function ();
4179 gcc_assert (fun);
4180 m_model.push_frame (fun, NULL, NULL);
4181 if (logger)
4182 logger->log (" pushing frame for %qD", fun->decl);
4183 }
4184 else if (eedge->m_custom_info)
4185 {
4186 eedge->m_custom_info->update_model (&m_model, eedge, NULL);
4187 }
4188 }
4189
4190 /* Handle phi nodes on an edge leaving a PK_BEFORE_SUPERNODE (to
4191 a PK_BEFORE_STMT, or a PK_AFTER_SUPERNODE if no stmts).
4192 This will typically not be associated with a superedge. */
4193 if (src_point.get_from_edge ())
4194 {
4195 const cfg_superedge *last_cfg_superedge
4196 = src_point.get_from_edge ()->dyn_cast_cfg_superedge ();
4197 const exploded_node &dst_enode = *eedge->m_dest;
4198 const unsigned dst_snode_idx = dst_enode.get_supernode ()->m_index;
4199 if (last_cfg_superedge)
4200 {
4201 if (logger)
4202 logger->log (" update for phis");
4203 m_model.update_for_phis (src_enode.get_supernode (),
4204 last_cfg_superedge,
4205 NULL);
4206 /* If we've entering an snode that we've already visited on this
4207 epath, then we need do fix things up for loops; see the
4208 comment for store::loop_replay_fixup.
4209 Perhaps we should probably also verify the callstring,
4210 and track program_points, but hopefully doing it by supernode
4211 is good enough. */
4212 if (bitmap_bit_p (m_snodes_visited, dst_snode_idx))
4213 m_model.loop_replay_fixup (dst_enode.get_state ().m_region_model);
4214 }
4215 bitmap_set_bit (m_snodes_visited, dst_snode_idx);
4216 }
4217 return true;
4218 }
4219
4220 /* A family of cluster subclasses for use when generating .dot output for
4221 exploded graphs (-fdump-analyzer-exploded-graph), for grouping the
4222 enodes into hierarchical boxes.
4223
4224 All functionless enodes appear in the top-level graph.
4225 Every (function, call_string) pair gets its own cluster. Within that
4226 cluster, each supernode gets its own cluster.
4227
4228 Hence all enodes relating to a particular function with a particular
4229 callstring will be in a cluster together; all enodes for the same
4230 function but with a different callstring will be in a different
4231 cluster. */
4232
4233 /* Base class of cluster for clustering exploded_node instances in .dot
4234 output, based on various subclass-specific criteria. */
4235
4236 class exploded_cluster : public cluster<eg_traits>
4237 {
4238 };
4239
4240 /* Cluster containing all exploded_node instances for one supernode. */
4241
4242 class supernode_cluster : public exploded_cluster
4243 {
4244 public:
4245 supernode_cluster (const supernode *supernode) : m_supernode (supernode) {}
4246
4247 // TODO: dtor?
4248
4249 void dump_dot (graphviz_out *gv, const dump_args_t &args) const FINAL OVERRIDE
4250 {
4251 gv->println ("subgraph \"cluster_supernode_%i\" {", m_supernode->m_index);
4252 gv->indent ();
4253 gv->println ("style=\"dashed\";");
4254 gv->println ("label=\"SN: %i (bb: %i; scc: %i)\";",
4255 m_supernode->m_index, m_supernode->m_bb->index,
4256 args.m_eg.get_scc_id (*m_supernode));
4257
4258 int i;
4259 exploded_node *enode;
4260 FOR_EACH_VEC_ELT (m_enodes, i, enode)
4261 enode->dump_dot (gv, args);
4262
4263 /* Terminate subgraph. */
4264 gv->outdent ();
4265 gv->println ("}");
4266 }
4267
4268 void add_node (exploded_node *en) FINAL OVERRIDE
4269 {
4270 m_enodes.safe_push (en);
4271 }
4272
4273 /* Comparator for use by auto_vec<supernode_cluster *>::qsort. */
4274
4275 static int cmp_ptr_ptr (const void *p1, const void *p2)
4276 {
4277 const supernode_cluster *c1
4278 = *(const supernode_cluster * const *)p1;
4279 const supernode_cluster *c2
4280 = *(const supernode_cluster * const *)p2;
4281 return c1->m_supernode->m_index - c2->m_supernode->m_index;
4282 }
4283
4284 private:
4285 const supernode *m_supernode;
4286 auto_vec <exploded_node *> m_enodes;
4287 };
4288
4289 /* Cluster containing all supernode_cluster instances for one
4290 (function, call_string) pair. */
4291
4292 class function_call_string_cluster : public exploded_cluster
4293 {
4294 public:
4295 function_call_string_cluster (function *fun, call_string cs)
4296 : m_fun (fun), m_cs (cs) {}
4297
4298 ~function_call_string_cluster ()
4299 {
4300 for (map_t::iterator iter = m_map.begin ();
4301 iter != m_map.end ();
4302 ++iter)
4303 delete (*iter).second;
4304 }
4305
4306 void dump_dot (graphviz_out *gv, const dump_args_t &args) const FINAL OVERRIDE
4307 {
4308 const char *funcname = function_name (m_fun);
4309
4310 gv->println ("subgraph \"cluster_function_%s\" {",
4311 IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (m_fun->decl)));
4312 gv->indent ();
4313 gv->write_indent ();
4314 gv->print ("label=\"call string: ");
4315 m_cs.print (gv->get_pp ());
4316 gv->print (" function: %s \";", funcname);
4317 gv->print ("\n");
4318
4319 /* Dump m_map, sorting it to avoid churn when comparing dumps. */
4320 auto_vec<supernode_cluster *> child_clusters (m_map.elements ());
4321 for (map_t::iterator iter = m_map.begin ();
4322 iter != m_map.end ();
4323 ++iter)
4324 child_clusters.quick_push ((*iter).second);
4325
4326 child_clusters.qsort (supernode_cluster::cmp_ptr_ptr);
4327
4328 unsigned i;
4329 supernode_cluster *child_cluster;
4330 FOR_EACH_VEC_ELT (child_clusters, i, child_cluster)
4331 child_cluster->dump_dot (gv, args);
4332
4333 /* Terminate subgraph. */
4334 gv->outdent ();
4335 gv->println ("}");
4336 }
4337
4338 void add_node (exploded_node *en) FINAL OVERRIDE
4339 {
4340 const supernode *supernode = en->get_supernode ();
4341 gcc_assert (supernode);
4342 supernode_cluster **slot = m_map.get (supernode);
4343 if (slot)
4344 (*slot)->add_node (en);
4345 else
4346 {
4347 supernode_cluster *child = new supernode_cluster (supernode);
4348 m_map.put (supernode, child);
4349 child->add_node (en);
4350 }
4351 }
4352
4353 /* Comparator for use by auto_vec<function_call_string_cluster *>. */
4354
4355 static int cmp_ptr_ptr (const void *p1, const void *p2)
4356 {
4357 const function_call_string_cluster *c1
4358 = *(const function_call_string_cluster * const *)p1;
4359 const function_call_string_cluster *c2
4360 = *(const function_call_string_cluster * const *)p2;
4361 if (int cmp_names
4362 = strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (c1->m_fun->decl)),
4363 IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (c2->m_fun->decl))))
4364 return cmp_names;
4365 return call_string::cmp (c1->m_cs, c2->m_cs);
4366 }
4367
4368 private:
4369 function *m_fun;
4370 call_string m_cs;
4371 typedef ordered_hash_map<const supernode *, supernode_cluster *> map_t;
4372 map_t m_map;
4373 };
4374
4375 /* Keys for root_cluster. */
4376
4377 struct function_call_string
4378 {
4379 function_call_string (function *fun, call_string cs)
4380 : m_fun (fun), m_cs (cs)
4381 {
4382 gcc_assert (fun);
4383 }
4384
4385 function *m_fun;
4386 call_string m_cs;
4387 };
4388
4389 } // namespace ana
4390
4391 template <> struct default_hash_traits<function_call_string>
4392 : public pod_hash_traits<function_call_string>
4393 {
4394 static const bool empty_zero_p = false;
4395 };
4396
4397 template <>
4398 inline hashval_t
4399 pod_hash_traits<function_call_string>::hash (value_type v)
4400 {
4401 return pointer_hash <function>::hash (v.m_fun) ^ v.m_cs.hash ();
4402 }
4403
4404 template <>
4405 inline bool
4406 pod_hash_traits<function_call_string>::equal (const value_type &existing,
4407 const value_type &candidate)
4408 {
4409 return existing.m_fun == candidate.m_fun && existing.m_cs == candidate.m_cs;
4410 }
4411 template <>
4412 inline void
4413 pod_hash_traits<function_call_string>::mark_deleted (value_type &v)
4414 {
4415 v.m_fun = reinterpret_cast<function *> (1);
4416 }
4417 template <>
4418 inline void
4419 pod_hash_traits<function_call_string>::mark_empty (value_type &v)
4420 {
4421 v.m_fun = NULL;
4422 }
4423 template <>
4424 inline bool
4425 pod_hash_traits<function_call_string>::is_deleted (value_type v)
4426 {
4427 return v.m_fun == reinterpret_cast<function *> (1);
4428 }
4429 template <>
4430 inline bool
4431 pod_hash_traits<function_call_string>::is_empty (value_type v)
4432 {
4433 return v.m_fun == NULL;
4434 }
4435
4436 namespace ana {
4437
4438 /* Top-level cluster for generating .dot output for exploded graphs,
4439 handling the functionless nodes, and grouping the remaining nodes by
4440 callstring. */
4441
4442 class root_cluster : public exploded_cluster
4443 {
4444 public:
4445 ~root_cluster ()
4446 {
4447 for (map_t::iterator iter = m_map.begin ();
4448 iter != m_map.end ();
4449 ++iter)
4450 delete (*iter).second;
4451 }
4452
4453 void dump_dot (graphviz_out *gv, const dump_args_t &args) const FINAL OVERRIDE
4454 {
4455 int i;
4456 exploded_node *enode;
4457 FOR_EACH_VEC_ELT (m_functionless_enodes, i, enode)
4458 enode->dump_dot (gv, args);
4459
4460 /* Dump m_map, sorting it to avoid churn when comparing dumps. */
4461 auto_vec<function_call_string_cluster *> child_clusters (m_map.elements ());
4462 for (map_t::iterator iter = m_map.begin ();
4463 iter != m_map.end ();
4464 ++iter)
4465 child_clusters.quick_push ((*iter).second);
4466
4467 child_clusters.qsort (function_call_string_cluster::cmp_ptr_ptr);
4468
4469 function_call_string_cluster *child_cluster;
4470 FOR_EACH_VEC_ELT (child_clusters, i, child_cluster)
4471 child_cluster->dump_dot (gv, args);
4472 }
4473
4474 void add_node (exploded_node *en) FINAL OVERRIDE
4475 {
4476 function *fun = en->get_function ();
4477 if (!fun)
4478 {
4479 m_functionless_enodes.safe_push (en);
4480 return;
4481 }
4482
4483 const call_string &cs = en->get_point ().get_call_string ();
4484 function_call_string key (fun, cs);
4485 function_call_string_cluster **slot = m_map.get (key);
4486 if (slot)
4487 (*slot)->add_node (en);
4488 else
4489 {
4490 function_call_string_cluster *child
4491 = new function_call_string_cluster (fun, cs);
4492 m_map.put (key, child);
4493 child->add_node (en);
4494 }
4495 }
4496
4497 private:
4498 /* This can't be an ordered_hash_map, as we can't store vec<call_string>,
4499 since it's not a POD; vec<>::quick_push has:
4500 *slot = obj;
4501 and the slot isn't initialized, so the assignment op dies when cleaning up
4502 un-inited *slot (within the truncate call). */
4503 typedef hash_map<function_call_string, function_call_string_cluster *> map_t;
4504 map_t m_map;
4505
4506 /* This should just be the origin exploded_node. */
4507 auto_vec <exploded_node *> m_functionless_enodes;
4508 };
4509
4510 /* Subclass of range_label for use within
4511 exploded_graph::dump_exploded_nodes for implementing
4512 -fdump-analyzer-exploded-nodes: a label for a specific
4513 exploded_node. */
4514
4515 class enode_label : public range_label
4516 {
4517 public:
4518 enode_label (const extrinsic_state &ext_state,
4519 exploded_node *enode)
4520 : m_ext_state (ext_state), m_enode (enode) {}
4521
4522 label_text get_text (unsigned) const FINAL OVERRIDE
4523 {
4524 pretty_printer pp;
4525 pp_format_decoder (&pp) = default_tree_printer;
4526 m_enode->get_state ().dump_to_pp (m_ext_state, true, false, &pp);
4527 return make_label_text (false, "EN: %i: %s",
4528 m_enode->m_index, pp_formatted_text (&pp));
4529 }
4530
4531 private:
4532 const extrinsic_state &m_ext_state;
4533 exploded_node *m_enode;
4534 };
4535
4536 /* Postprocessing support for dumping the exploded nodes.
4537 Handle -fdump-analyzer-exploded-nodes,
4538 -fdump-analyzer-exploded-nodes-2, and the
4539 "__analyzer_dump_exploded_nodes" builtin. */
4540
4541 void
4542 exploded_graph::dump_exploded_nodes () const
4543 {
4544 // TODO
4545 /* Locate calls to __analyzer_dump_exploded_nodes. */
4546 // Print how many egs there are for them?
4547 /* Better: log them as we go, and record the exploded nodes
4548 in question. */
4549
4550 /* Show every enode. */
4551
4552 /* Gather them by stmt, so that we can more clearly see the
4553 "hotspots" requiring numerous exploded nodes. */
4554
4555 /* Alternatively, simply throw them all into one big rich_location
4556 and see if the label-printing will sort it out...
4557 This requires them all to be in the same source file. */
4558
4559 if (flag_dump_analyzer_exploded_nodes)
4560 {
4561 auto_timevar tv (TV_ANALYZER_DUMP);
4562 gcc_rich_location richloc (UNKNOWN_LOCATION);
4563 unsigned i;
4564 exploded_node *enode;
4565 FOR_EACH_VEC_ELT (m_nodes, i, enode)
4566 {
4567 if (const gimple *stmt = enode->get_stmt ())
4568 {
4569 if (get_pure_location (richloc.get_loc ()) == UNKNOWN_LOCATION)
4570 richloc.set_range (0, stmt->location, SHOW_RANGE_WITH_CARET);
4571 else
4572 richloc.add_range (stmt->location,
4573 SHOW_RANGE_WITHOUT_CARET,
4574 new enode_label (m_ext_state, enode));
4575 }
4576 }
4577 warning_at (&richloc, 0, "%i exploded nodes", m_nodes.length ());
4578
4579 /* Repeat the warning without all the labels, so that message is visible
4580 (the other one may well have scrolled past the terminal limit). */
4581 warning_at (richloc.get_loc (), 0,
4582 "%i exploded nodes", m_nodes.length ());
4583
4584 if (m_worklist.length () > 0)
4585 warning_at (richloc.get_loc (), 0,
4586 "worklist still contains %i nodes", m_worklist.length ());
4587 }
4588
4589 /* Dump the egraph in textual form to a dump file. */
4590 if (flag_dump_analyzer_exploded_nodes_2)
4591 {
4592 auto_timevar tv (TV_ANALYZER_DUMP);
4593 char *filename
4594 = concat (dump_base_name, ".eg.txt", NULL);
4595 FILE *outf = fopen (filename, "w");
4596 if (!outf)
4597 error_at (UNKNOWN_LOCATION, "unable to open %qs for writing", filename);
4598 free (filename);
4599
4600 fprintf (outf, "exploded graph for %s\n", dump_base_name);
4601 fprintf (outf, " nodes: %i\n", m_nodes.length ());
4602 fprintf (outf, " edges: %i\n", m_edges.length ());
4603
4604 unsigned i;
4605 exploded_node *enode;
4606 FOR_EACH_VEC_ELT (m_nodes, i, enode)
4607 {
4608 fprintf (outf, "\nEN %i:\n", enode->m_index);
4609 enode->dump_succs_and_preds (outf);
4610 pretty_printer pp;
4611 enode->get_point ().print (&pp, format (true));
4612 fprintf (outf, "%s\n", pp_formatted_text (&pp));
4613 enode->get_state ().dump_to_file (m_ext_state, false, true, outf);
4614 }
4615
4616 fclose (outf);
4617 }
4618
4619 /* Dump the egraph in textual form to multiple dump files, one per enode. */
4620 if (flag_dump_analyzer_exploded_nodes_3)
4621 {
4622 auto_timevar tv (TV_ANALYZER_DUMP);
4623
4624 unsigned i;
4625 exploded_node *enode;
4626 FOR_EACH_VEC_ELT (m_nodes, i, enode)
4627 {
4628 char *filename
4629 = xasprintf ("%s.en-%i.txt", dump_base_name, i);
4630 FILE *outf = fopen (filename, "w");
4631 if (!outf)
4632 error_at (UNKNOWN_LOCATION, "unable to open %qs for writing", filename);
4633 free (filename);
4634
4635 fprintf (outf, "EN %i:\n", enode->m_index);
4636 enode->dump_succs_and_preds (outf);
4637 pretty_printer pp;
4638 enode->get_point ().print (&pp, format (true));
4639 fprintf (outf, "%s\n", pp_formatted_text (&pp));
4640 enode->get_state ().dump_to_file (m_ext_state, false, true, outf);
4641
4642 fclose (outf);
4643 }
4644 }
4645
4646 /* Emit a warning at any call to "__analyzer_dump_exploded_nodes",
4647 giving the number of processed exploded nodes for "before-stmt",
4648 and the IDs of processed, merger, and worklist enodes.
4649
4650 We highlight the count of *processed* enodes since this is of most
4651 interest in DejaGnu tests for ensuring that state merger has
4652 happened.
4653
4654 We don't show the count of merger and worklist enodes, as this is
4655 more of an implementation detail of the merging/worklist that we
4656 don't want to bake into our expected DejaGnu messages. */
4657
4658 unsigned i;
4659 exploded_node *enode;
4660 hash_set<const gimple *> seen;
4661 FOR_EACH_VEC_ELT (m_nodes, i, enode)
4662 {
4663 if (enode->get_point ().get_kind () != PK_BEFORE_STMT)
4664 continue;
4665
4666 if (const gimple *stmt = enode->get_stmt ())
4667 if (const gcall *call = dyn_cast <const gcall *> (stmt))
4668 if (is_special_named_call_p (call, "__analyzer_dump_exploded_nodes",
4669 1))
4670 {
4671 if (seen.contains (stmt))
4672 continue;
4673
4674 auto_vec<exploded_node *> processed_enodes;
4675 auto_vec<exploded_node *> merger_enodes;
4676 auto_vec<exploded_node *> worklist_enodes;
4677 /* This is O(N^2). */
4678 unsigned j;
4679 exploded_node *other_enode;
4680 FOR_EACH_VEC_ELT (m_nodes, j, other_enode)
4681 {
4682 if (other_enode->get_point ().get_kind () != PK_BEFORE_STMT)
4683 continue;
4684 if (other_enode->get_stmt () == stmt)
4685 switch (other_enode->get_status ())
4686 {
4687 default:
4688 gcc_unreachable ();
4689 case exploded_node::STATUS_WORKLIST:
4690 worklist_enodes.safe_push (other_enode);
4691 break;
4692 case exploded_node::STATUS_PROCESSED:
4693 processed_enodes.safe_push (other_enode);
4694 break;
4695 case exploded_node::STATUS_MERGER:
4696 merger_enodes.safe_push (other_enode);
4697 break;
4698 }
4699 }
4700
4701 pretty_printer pp;
4702 pp_character (&pp, '[');
4703 print_enode_indices (&pp, processed_enodes);
4704 if (merger_enodes.length () > 0)
4705 {
4706 pp_string (&pp, "] merger(s): [");
4707 print_enode_indices (&pp, merger_enodes);
4708 }
4709 if (worklist_enodes.length () > 0)
4710 {
4711 pp_string (&pp, "] worklist: [");
4712 print_enode_indices (&pp, worklist_enodes);
4713 }
4714 pp_character (&pp, ']');
4715
4716 warning_n (stmt->location, 0, processed_enodes.length (),
4717 "%i processed enode: %s",
4718 "%i processed enodes: %s",
4719 processed_enodes.length (), pp_formatted_text (&pp));
4720 seen.add (stmt);
4721
4722 /* If the argument is non-zero, then print all of the states
4723 of the various enodes. */
4724 tree t_arg = fold (gimple_call_arg (call, 0));
4725 if (TREE_CODE (t_arg) != INTEGER_CST)
4726 {
4727 error_at (call->location,
4728 "integer constant required for arg 1");
4729 return;
4730 }
4731 int i_arg = TREE_INT_CST_LOW (t_arg);
4732 if (i_arg)
4733 {
4734 exploded_node *other_enode;
4735 FOR_EACH_VEC_ELT (processed_enodes, j, other_enode)
4736 {
4737 fprintf (stderr, "%i of %i: EN %i:\n",
4738 j + 1, processed_enodes.length (),
4739 other_enode->m_index);
4740 other_enode->dump_succs_and_preds (stderr);
4741 /* Dump state. */
4742 other_enode->get_state ().dump (m_ext_state, false);
4743 }
4744 }
4745 }
4746 }
4747 }
4748
4749 DEBUG_FUNCTION exploded_node *
4750 exploded_graph::get_node_by_index (int idx) const
4751 {
4752 exploded_node *enode = m_nodes[idx];
4753 gcc_assert (enode->m_index == idx);
4754 return enode;
4755 }
4756
4757 /* Ensure that there is an exploded_node for a top-level call to FNDECL. */
4758
4759 void
4760 exploded_graph::on_escaped_function (tree fndecl)
4761 {
4762 logger * const logger = get_logger ();
4763 LOG_FUNC_1 (logger, "%qE", fndecl);
4764
4765 cgraph_node *cgnode = cgraph_node::get (fndecl);
4766 if (!cgnode)
4767 return;
4768
4769 function *fun = cgnode->get_fun ();
4770 if (!fun)
4771 return;
4772
4773 if (!gimple_has_body_p (fndecl))
4774 return;
4775
4776 exploded_node *enode = add_function_entry (fun);
4777 if (logger)
4778 {
4779 if (enode)
4780 logger->log ("created EN %i for %qE entrypoint",
4781 enode->m_index, fun->decl);
4782 else
4783 logger->log ("did not create enode for %qE entrypoint", fun->decl);
4784 }
4785 }
4786
4787 /* A collection of classes for visualizing the callgraph in .dot form
4788 (as represented in the supergraph). */
4789
4790 /* Forward decls. */
4791 class viz_callgraph_node;
4792 class viz_callgraph_edge;
4793 class viz_callgraph;
4794 class viz_callgraph_cluster;
4795
4796 /* Traits for using "digraph.h" to visualize the callgraph. */
4797
4798 struct viz_callgraph_traits
4799 {
4800 typedef viz_callgraph_node node_t;
4801 typedef viz_callgraph_edge edge_t;
4802 typedef viz_callgraph graph_t;
4803 struct dump_args_t
4804 {
4805 dump_args_t (const exploded_graph *eg) : m_eg (eg) {}
4806 const exploded_graph *m_eg;
4807 };
4808 typedef viz_callgraph_cluster cluster_t;
4809 };
4810
4811 /* Subclass of dnode representing a function within the callgraph. */
4812
4813 class viz_callgraph_node : public dnode<viz_callgraph_traits>
4814 {
4815 friend class viz_callgraph;
4816
4817 public:
4818 viz_callgraph_node (function *fun, int index)
4819 : m_fun (fun), m_index (index), m_num_supernodes (0), m_num_superedges (0)
4820 {
4821 gcc_assert (fun);
4822 }
4823
4824 void dump_dot (graphviz_out *gv, const dump_args_t &args) const FINAL OVERRIDE
4825 {
4826 pretty_printer *pp = gv->get_pp ();
4827
4828 dump_dot_id (pp);
4829 pp_printf (pp, " [shape=none,margin=0,style=filled,fillcolor=%s,label=<",
4830 "lightgrey");
4831 pp_string (pp, "<TABLE BORDER=\"0\">");
4832 pp_write_text_to_stream (pp);
4833
4834 gv->begin_trtd ();
4835 pp_printf (pp, "VCG: %i: %s", m_index, function_name (m_fun));
4836 gv->end_tdtr ();
4837 pp_newline (pp);
4838
4839 gv->begin_trtd ();
4840 pp_printf (pp, "supernodes: %i\n", m_num_supernodes);
4841 gv->end_tdtr ();
4842 pp_newline (pp);
4843
4844 gv->begin_trtd ();
4845 pp_printf (pp, "superedges: %i\n", m_num_superedges);
4846 gv->end_tdtr ();
4847 pp_newline (pp);
4848
4849 if (args.m_eg)
4850 {
4851 unsigned i;
4852 exploded_node *enode;
4853 unsigned num_enodes = 0;
4854 FOR_EACH_VEC_ELT (args.m_eg->m_nodes, i, enode)
4855 {
4856 if (enode->get_point ().get_function () == m_fun)
4857 num_enodes++;
4858 }
4859 gv->begin_trtd ();
4860 pp_printf (pp, "enodes: %i\n", num_enodes);
4861 gv->end_tdtr ();
4862 pp_newline (pp);
4863
4864 // TODO: also show the per-callstring breakdown
4865 const exploded_graph::call_string_data_map_t *per_cs_data
4866 = args.m_eg->get_per_call_string_data ();
4867 for (exploded_graph::call_string_data_map_t::iterator iter
4868 = per_cs_data->begin ();
4869 iter != per_cs_data->end ();
4870 ++iter)
4871 {
4872 const call_string *cs = (*iter).first;
4873 //per_call_string_data *data = (*iter).second;
4874 num_enodes = 0;
4875 FOR_EACH_VEC_ELT (args.m_eg->m_nodes, i, enode)
4876 {
4877 if (enode->get_point ().get_function () == m_fun
4878 && enode->get_point ().get_call_string () == *cs)
4879 num_enodes++;
4880 }
4881 if (num_enodes > 0)
4882 {
4883 gv->begin_trtd ();
4884 cs->print (pp);
4885 pp_printf (pp, ": %i\n", num_enodes);
4886 pp_write_text_as_html_like_dot_to_stream (pp);
4887 gv->end_tdtr ();
4888 }
4889 }
4890
4891 /* Show any summaries. */
4892 per_function_data *data = args.m_eg->get_per_function_data (m_fun);
4893 if (data)
4894 {
4895 pp_newline (pp);
4896 gv->begin_trtd ();
4897 pp_printf (pp, "summaries: %i\n", data->m_summaries.length ());
4898 pp_write_text_as_html_like_dot_to_stream (pp);
4899 gv->end_tdtr ();
4900 }
4901 }
4902
4903 pp_string (pp, "</TABLE>>];\n\n");
4904 pp_flush (pp);
4905 }
4906
4907 void dump_dot_id (pretty_printer *pp) const
4908 {
4909 pp_printf (pp, "vcg_%i", m_index);
4910 }
4911
4912 private:
4913 function *m_fun;
4914 int m_index;
4915 int m_num_supernodes;
4916 int m_num_superedges;
4917 };
4918
4919 /* Subclass of dedge representing a callgraph edge. */
4920
4921 class viz_callgraph_edge : public dedge<viz_callgraph_traits>
4922 {
4923 public:
4924 viz_callgraph_edge (viz_callgraph_node *src, viz_callgraph_node *dest)
4925 : dedge<viz_callgraph_traits> (src, dest)
4926 {}
4927
4928 void dump_dot (graphviz_out *gv, const dump_args_t &) const
4929 FINAL OVERRIDE
4930 {
4931 pretty_printer *pp = gv->get_pp ();
4932
4933 const char *style = "\"solid,bold\"";
4934 const char *color = "black";
4935 int weight = 10;
4936 const char *constraint = "true";
4937
4938 m_src->dump_dot_id (pp);
4939 pp_string (pp, " -> ");
4940 m_dest->dump_dot_id (pp);
4941 pp_printf (pp,
4942 (" [style=%s, color=%s, weight=%d, constraint=%s,"
4943 " headlabel=\""),
4944 style, color, weight, constraint);
4945 pp_printf (pp, "\"];\n");
4946 }
4947 };
4948
4949 /* Subclass of digraph representing the callgraph. */
4950
4951 class viz_callgraph : public digraph<viz_callgraph_traits>
4952 {
4953 public:
4954 viz_callgraph (const supergraph &sg);
4955
4956 viz_callgraph_node *get_vcg_node_for_function (function *fun)
4957 {
4958 return *m_map.get (fun);
4959 }
4960
4961 viz_callgraph_node *get_vcg_node_for_snode (supernode *snode)
4962 {
4963 return get_vcg_node_for_function (snode->m_fun);
4964 }
4965
4966 private:
4967 hash_map<function *, viz_callgraph_node *> m_map;
4968 };
4969
4970 /* Placeholder subclass of cluster. */
4971
4972 class viz_callgraph_cluster : public cluster<viz_callgraph_traits>
4973 {
4974 };
4975
4976 /* viz_callgraph's ctor. */
4977
4978 viz_callgraph::viz_callgraph (const supergraph &sg)
4979 {
4980 cgraph_node *node;
4981 FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
4982 {
4983 function *fun = node->get_fun ();
4984 viz_callgraph_node *vcg_node
4985 = new viz_callgraph_node (fun, m_nodes.length ());
4986 m_map.put (fun, vcg_node);
4987 add_node (vcg_node);
4988 }
4989
4990 unsigned i;
4991 superedge *sedge;
4992 FOR_EACH_VEC_ELT (sg.m_edges, i, sedge)
4993 {
4994 viz_callgraph_node *vcg_src = get_vcg_node_for_snode (sedge->m_src);
4995 if (vcg_src->m_fun)
4996 get_vcg_node_for_function (vcg_src->m_fun)->m_num_superedges++;
4997 if (sedge->dyn_cast_call_superedge ())
4998 {
4999 viz_callgraph_node *vcg_dest = get_vcg_node_for_snode (sedge->m_dest);
5000 viz_callgraph_edge *vcg_edge
5001 = new viz_callgraph_edge (vcg_src, vcg_dest);
5002 add_edge (vcg_edge);
5003 }
5004 }
5005
5006 supernode *snode;
5007 FOR_EACH_VEC_ELT (sg.m_nodes, i, snode)
5008 {
5009 if (snode->m_fun)
5010 get_vcg_node_for_function (snode->m_fun)->m_num_supernodes++;
5011 }
5012 }
5013
5014 /* Dump the callgraph to FILENAME. */
5015
5016 static void
5017 dump_callgraph (const supergraph &sg, const char *filename,
5018 const exploded_graph *eg)
5019 {
5020 FILE *outf = fopen (filename, "w");
5021 if (!outf)
5022 return;
5023
5024 // TODO
5025 viz_callgraph vcg (sg);
5026 vcg.dump_dot (filename, NULL, viz_callgraph_traits::dump_args_t (eg));
5027
5028 fclose (outf);
5029 }
5030
5031 /* Dump the callgraph to "<srcfile>.callgraph.dot". */
5032
5033 static void
5034 dump_callgraph (const supergraph &sg, const exploded_graph *eg)
5035 {
5036 auto_timevar tv (TV_ANALYZER_DUMP);
5037 char *filename = concat (dump_base_name, ".callgraph.dot", NULL);
5038 dump_callgraph (sg, filename, eg);
5039 free (filename);
5040 }
5041
5042 /* Subclass of dot_annotator for implementing
5043 DUMP_BASE_NAME.supergraph-eg.dot, a post-analysis dump of the supergraph.
5044
5045 Annotate the supergraph nodes by printing the exploded nodes in concise
5046 form within them, next to their pertinent statements where appropriate,
5047 colorizing the exploded nodes based on sm-state.
5048 Also show saved diagnostics within the exploded nodes, giving information
5049 on whether they were feasible, and, if infeasible, where the problem
5050 was. */
5051
5052 class exploded_graph_annotator : public dot_annotator
5053 {
5054 public:
5055 exploded_graph_annotator (const exploded_graph &eg)
5056 : m_eg (eg)
5057 {
5058 /* Avoid O(N^2) by prepopulating m_enodes_per_snodes. */
5059 unsigned i;
5060 supernode *snode;
5061 FOR_EACH_VEC_ELT (eg.get_supergraph ().m_nodes, i, snode)
5062 m_enodes_per_snodes.safe_push (new auto_vec <exploded_node *> ());
5063 exploded_node *enode;
5064 FOR_EACH_VEC_ELT (m_eg.m_nodes, i, enode)
5065 if (enode->get_supernode ())
5066 m_enodes_per_snodes[enode->get_supernode ()->m_index]->safe_push (enode);
5067 }
5068
5069 /* Show exploded nodes for BEFORE_SUPERNODE points before N. */
5070 bool add_node_annotations (graphviz_out *gv, const supernode &n,
5071 bool within_table)
5072 const FINAL OVERRIDE
5073 {
5074 if (!within_table)
5075 return false;
5076 gv->begin_tr ();
5077 pretty_printer *pp = gv->get_pp ();
5078
5079 gv->begin_td ();
5080 pp_string (pp, "BEFORE");
5081 pp_printf (pp, " (scc: %i)", m_eg.get_scc_id (n));
5082 gv->end_td ();
5083
5084 unsigned i;
5085 exploded_node *enode;
5086 bool had_enode = false;
5087 FOR_EACH_VEC_ELT (*m_enodes_per_snodes[n.m_index], i, enode)
5088 {
5089 gcc_assert (enode->get_supernode () == &n);
5090 const program_point &point = enode->get_point ();
5091 if (point.get_kind () != PK_BEFORE_SUPERNODE)
5092 continue;
5093 print_enode (gv, enode);
5094 had_enode = true;
5095 }
5096 if (!had_enode)
5097 pp_string (pp, "<TD BGCOLOR=\"red\">UNREACHED</TD>");
5098 pp_flush (pp);
5099 gv->end_tr ();
5100 return true;
5101 }
5102
5103 /* Show exploded nodes for STMT. */
5104 void add_stmt_annotations (graphviz_out *gv, const gimple *stmt,
5105 bool within_row)
5106 const FINAL OVERRIDE
5107 {
5108 if (!within_row)
5109 return;
5110 pretty_printer *pp = gv->get_pp ();
5111
5112 const supernode *snode
5113 = m_eg.get_supergraph ().get_supernode_for_stmt (stmt);
5114 unsigned i;
5115 exploded_node *enode;
5116 bool had_td = false;
5117 FOR_EACH_VEC_ELT (*m_enodes_per_snodes[snode->m_index], i, enode)
5118 {
5119 const program_point &point = enode->get_point ();
5120 if (point.get_kind () != PK_BEFORE_STMT)
5121 continue;
5122 if (point.get_stmt () != stmt)
5123 continue;
5124 print_enode (gv, enode);
5125 had_td = true;
5126 }
5127 pp_flush (pp);
5128 if (!had_td)
5129 {
5130 gv->begin_td ();
5131 gv->end_td ();
5132 }
5133 }
5134
5135 /* Show exploded nodes for AFTER_SUPERNODE points after N. */
5136 bool add_after_node_annotations (graphviz_out *gv, const supernode &n)
5137 const FINAL OVERRIDE
5138 {
5139 gv->begin_tr ();
5140 pretty_printer *pp = gv->get_pp ();
5141
5142 gv->begin_td ();
5143 pp_string (pp, "AFTER");
5144 gv->end_td ();
5145
5146 unsigned i;
5147 exploded_node *enode;
5148 FOR_EACH_VEC_ELT (*m_enodes_per_snodes[n.m_index], i, enode)
5149 {
5150 gcc_assert (enode->get_supernode () == &n);
5151 const program_point &point = enode->get_point ();
5152 if (point.get_kind () != PK_AFTER_SUPERNODE)
5153 continue;
5154 print_enode (gv, enode);
5155 }
5156 pp_flush (pp);
5157 gv->end_tr ();
5158 return true;
5159 }
5160
5161 private:
5162 /* Concisely print a TD element for ENODE, showing the index, status,
5163 and any saved_diagnostics at the enode. Colorize it to show sm-state.
5164
5165 Ideally we'd dump ENODE's state here, hidden behind some kind of
5166 interactive disclosure method like a tooltip, so that the states
5167 can be explored without overwhelming the graph.
5168 However, I wasn't able to get graphviz/xdot to show tooltips on
5169 individual elements within a HTML-like label. */
5170 void print_enode (graphviz_out *gv, const exploded_node *enode) const
5171 {
5172 pretty_printer *pp = gv->get_pp ();
5173 pp_printf (pp, "<TD BGCOLOR=\"%s\">",
5174 enode->get_dot_fillcolor ());
5175 pp_printf (pp, "<TABLE BORDER=\"0\">");
5176 gv->begin_trtd ();
5177 pp_printf (pp, "EN: %i", enode->m_index);
5178 switch (enode->get_status ())
5179 {
5180 default:
5181 gcc_unreachable ();
5182 case exploded_node::STATUS_WORKLIST:
5183 pp_string (pp, "(W)");
5184 break;
5185 case exploded_node::STATUS_PROCESSED:
5186 break;
5187 case exploded_node::STATUS_MERGER:
5188 pp_string (pp, "(M)");
5189 break;
5190 case exploded_node::STATUS_BULK_MERGED:
5191 pp_string (pp, "(BM)");
5192 break;
5193 }
5194 gv->end_tdtr ();
5195
5196 /* Dump any saved_diagnostics at this enode. */
5197 for (unsigned i = 0; i < enode->get_num_diagnostics (); i++)
5198 {
5199 const saved_diagnostic *sd = enode->get_saved_diagnostic (i);
5200 print_saved_diagnostic (gv, sd);
5201 }
5202 pp_printf (pp, "</TABLE>");
5203 pp_printf (pp, "</TD>");
5204 }
5205
5206 /* Print a TABLE element for SD, showing the kind, the length of the
5207 exploded_path, whether the path was feasible, and if infeasible,
5208 what the problem was. */
5209 void print_saved_diagnostic (graphviz_out *gv,
5210 const saved_diagnostic *sd) const
5211 {
5212 pretty_printer *pp = gv->get_pp ();
5213 gv->begin_trtd ();
5214 pp_printf (pp, "<TABLE BORDER=\"0\">");
5215 gv->begin_tr ();
5216 pp_string (pp, "<TD BGCOLOR=\"green\">");
5217 pp_printf (pp, "DIAGNOSTIC: %s", sd->m_d->get_kind ());
5218 gv->end_tdtr ();
5219 gv->begin_trtd ();
5220 if (sd->get_best_epath ())
5221 pp_printf (pp, "epath length: %i", sd->get_epath_length ());
5222 else
5223 pp_printf (pp, "no best epath");
5224 gv->end_tdtr ();
5225 if (const feasibility_problem *p = sd->get_feasibility_problem ())
5226 {
5227 gv->begin_trtd ();
5228 pp_printf (pp, "INFEASIBLE at eedge %i: EN:%i -> EN:%i",
5229 p->m_eedge_idx,
5230 p->m_eedge.m_src->m_index,
5231 p->m_eedge.m_dest->m_index);
5232 pp_write_text_as_html_like_dot_to_stream (pp);
5233 gv->end_tdtr ();
5234 gv->begin_trtd ();
5235 p->m_eedge.m_sedge->dump (pp);
5236 pp_write_text_as_html_like_dot_to_stream (pp);
5237 gv->end_tdtr ();
5238 gv->begin_trtd ();
5239 pp_gimple_stmt_1 (pp, p->m_last_stmt, 0, (dump_flags_t)0);
5240 pp_write_text_as_html_like_dot_to_stream (pp);
5241 gv->end_tdtr ();
5242 /* Ideally we'd print p->m_model here; see the notes above about
5243 tooltips. */
5244 }
5245 pp_printf (pp, "</TABLE>");
5246 gv->end_tdtr ();
5247 }
5248
5249 const exploded_graph &m_eg;
5250 auto_delete_vec<auto_vec <exploded_node *> > m_enodes_per_snodes;
5251 };
5252
5253 /* Implement -fdump-analyzer-json. */
5254
5255 static void
5256 dump_analyzer_json (const supergraph &sg,
5257 const exploded_graph &eg)
5258 {
5259 auto_timevar tv (TV_ANALYZER_DUMP);
5260 char *filename = concat (dump_base_name, ".analyzer.json.gz", NULL);
5261 gzFile output = gzopen (filename, "w");
5262 if (!output)
5263 {
5264 error_at (UNKNOWN_LOCATION, "unable to open %qs for writing", filename);
5265 free (filename);
5266 return;
5267 }
5268
5269 json::object *toplev_obj = new json::object ();
5270 toplev_obj->set ("sgraph", sg.to_json ());
5271 toplev_obj->set ("egraph", eg.to_json ());
5272
5273 pretty_printer pp;
5274 toplev_obj->print (&pp);
5275 pp_formatted_text (&pp);
5276
5277 delete toplev_obj;
5278
5279 if (gzputs (output, pp_formatted_text (&pp)) == EOF
5280 || gzclose (output))
5281 error_at (UNKNOWN_LOCATION, "error writing %qs", filename);
5282
5283 free (filename);
5284 }
5285
5286 /* Concrete subclass of plugin_analyzer_init_iface, allowing plugins
5287 to register new state machines. */
5288
5289 class plugin_analyzer_init_impl : public plugin_analyzer_init_iface
5290 {
5291 public:
5292 plugin_analyzer_init_impl (auto_delete_vec <state_machine> *checkers,
5293 logger *logger)
5294 : m_checkers (checkers),
5295 m_logger (logger)
5296 {}
5297
5298 void register_state_machine (state_machine *sm) FINAL OVERRIDE
5299 {
5300 m_checkers->safe_push (sm);
5301 }
5302
5303 logger *get_logger () const FINAL OVERRIDE
5304 {
5305 return m_logger;
5306 }
5307
5308 private:
5309 auto_delete_vec <state_machine> *m_checkers;
5310 logger *m_logger;
5311 };
5312
5313 /* Run the analysis "engine". */
5314
5315 void
5316 impl_run_checkers (logger *logger)
5317 {
5318 LOG_SCOPE (logger);
5319
5320 if (logger)
5321 {
5322 logger->log ("BITS_BIG_ENDIAN: %i", BITS_BIG_ENDIAN ? 1 : 0);
5323 logger->log ("BYTES_BIG_ENDIAN: %i", BYTES_BIG_ENDIAN ? 1 : 0);
5324 logger->log ("WORDS_BIG_ENDIAN: %i", WORDS_BIG_ENDIAN ? 1 : 0);
5325 }
5326
5327 /* If using LTO, ensure that the cgraph nodes have function bodies. */
5328 cgraph_node *node;
5329 FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
5330 node->get_untransformed_body ();
5331
5332 engine eng;
5333
5334 /* Create the supergraph. */
5335 supergraph sg (logger);
5336
5337 state_purge_map *purge_map = NULL;
5338
5339 if (flag_analyzer_state_purge)
5340 purge_map = new state_purge_map (sg, logger);
5341
5342 if (flag_dump_analyzer_supergraph)
5343 {
5344 /* Dump supergraph pre-analysis. */
5345 auto_timevar tv (TV_ANALYZER_DUMP);
5346 char *filename = concat (dump_base_name, ".supergraph.dot", NULL);
5347 supergraph::dump_args_t args ((enum supergraph_dot_flags)0, NULL);
5348 sg.dump_dot (filename, args);
5349 free (filename);
5350 }
5351
5352 if (flag_dump_analyzer_state_purge)
5353 {
5354 auto_timevar tv (TV_ANALYZER_DUMP);
5355 state_purge_annotator a (purge_map);
5356 char *filename = concat (dump_base_name, ".state-purge.dot", NULL);
5357 supergraph::dump_args_t args ((enum supergraph_dot_flags)0, &a);
5358 sg.dump_dot (filename, args);
5359 free (filename);
5360 }
5361
5362 auto_delete_vec <state_machine> checkers;
5363 make_checkers (checkers, logger);
5364
5365 plugin_analyzer_init_impl data (&checkers, logger);
5366 invoke_plugin_callbacks (PLUGIN_ANALYZER_INIT, &data);
5367
5368 if (logger)
5369 {
5370 int i;
5371 state_machine *sm;
5372 FOR_EACH_VEC_ELT (checkers, i, sm)
5373 logger->log ("checkers[%i]: %s", i, sm->get_name ());
5374 }
5375
5376 /* Extrinsic state shared by nodes in the graph. */
5377 const extrinsic_state ext_state (checkers, &eng, logger);
5378
5379 const analysis_plan plan (sg, logger);
5380
5381 /* The exploded graph. */
5382 exploded_graph eg (sg, logger, ext_state, purge_map, plan,
5383 analyzer_verbosity);
5384
5385 /* Add entrypoints to the graph for externally-callable functions. */
5386 eg.build_initial_worklist ();
5387
5388 /* Now process the worklist, exploring the <point, state> graph. */
5389 eg.process_worklist ();
5390
5391 if (flag_dump_analyzer_exploded_graph)
5392 {
5393 auto_timevar tv (TV_ANALYZER_DUMP);
5394 char *filename
5395 = concat (dump_base_name, ".eg.dot", NULL);
5396 exploded_graph::dump_args_t args (eg);
5397 root_cluster c;
5398 eg.dump_dot (filename, &c, args);
5399 free (filename);
5400 }
5401
5402 /* Now emit any saved diagnostics. */
5403 eg.get_diagnostic_manager ().emit_saved_diagnostics (eg);
5404
5405 eg.dump_exploded_nodes ();
5406
5407 eg.log_stats ();
5408
5409 if (flag_dump_analyzer_callgraph)
5410 dump_callgraph (sg, &eg);
5411
5412 if (flag_dump_analyzer_supergraph)
5413 {
5414 /* Dump post-analysis form of supergraph. */
5415 auto_timevar tv (TV_ANALYZER_DUMP);
5416 char *filename = concat (dump_base_name, ".supergraph-eg.dot", NULL);
5417 exploded_graph_annotator a (eg);
5418 supergraph::dump_args_t args ((enum supergraph_dot_flags)0, &a);
5419 sg.dump_dot (filename, args);
5420 free (filename);
5421 }
5422
5423 if (flag_dump_analyzer_json)
5424 dump_analyzer_json (sg, eg);
5425
5426 delete purge_map;
5427 }
5428
5429 /* External entrypoint to the analysis "engine".
5430 Set up any dumps, then call impl_run_checkers. */
5431
5432 void
5433 run_checkers ()
5434 {
5435 /* Save input_location. */
5436 location_t saved_input_location = input_location;
5437
5438 /* Handle -fdump-analyzer and -fdump-analyzer-stderr. */
5439 FILE *dump_fout = NULL;
5440 /* Track if we're responsible for closing dump_fout. */
5441 bool owns_dump_fout = false;
5442 if (flag_dump_analyzer_stderr)
5443 dump_fout = stderr;
5444 else if (flag_dump_analyzer)
5445 {
5446 char *dump_filename = concat (dump_base_name, ".analyzer.txt", NULL);
5447 dump_fout = fopen (dump_filename, "w");
5448 free (dump_filename);
5449 if (dump_fout)
5450 owns_dump_fout = true;
5451 }
5452
5453 {
5454 log_user the_logger (NULL);
5455 if (dump_fout)
5456 the_logger.set_logger (new logger (dump_fout, 0, 0,
5457 *global_dc->printer));
5458 LOG_SCOPE (the_logger.get_logger ());
5459
5460 impl_run_checkers (the_logger.get_logger ());
5461
5462 /* end of lifetime of the_logger (so that dump file is closed after the
5463 various dtors run). */
5464 }
5465
5466 if (owns_dump_fout)
5467 fclose (dump_fout);
5468
5469 /* Restore input_location. Subsequent passes may assume that input_location
5470 is some arbitrary value *not* in the block tree, which might be violated
5471 if we didn't restore it. */
5472 input_location = saved_input_location;
5473 }
5474
5475 } // namespace ana
5476
5477 #endif /* #if ENABLE_ANALYZER */