]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/analyzer/diagnostic-manager.cc
analyzer: fix ICE on non-lvalue in prune_for_sm_diagnostic [PR93993]
[thirdparty/gcc.git] / gcc / analyzer / diagnostic-manager.cc
CommitLineData
757bf1df
DM
1/* Classes for saving, deduplicating, and emitting analyzer diagnostics.
2 Copyright (C) 2019-2020 Free Software Foundation, Inc.
3 Contributed by David Malcolm <dmalcolm@redhat.com>.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it
8under the terms of the GNU General Public License as published by
9the Free Software Foundation; either version 3, or (at your option)
10any later version.
11
12GCC is distributed in the hope that it will be useful, but
13WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15General Public License for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
24#include "tree.h"
25#include "pretty-print.h"
26#include "gcc-rich-location.h"
27#include "gimple-pretty-print.h"
28#include "function.h"
29#include "diagnostic-core.h"
30#include "diagnostic-event-id.h"
31#include "diagnostic-path.h"
32#include "alloc-pool.h"
33#include "fibonacci_heap.h"
34#include "shortest-paths.h"
35#include "sbitmap.h"
36#include "tristate.h"
37#include "selftest.h"
38#include "ordered-hash-map.h"
39#include "analyzer/analyzer.h"
40#include "analyzer/analyzer-logging.h"
41#include "analyzer/sm.h"
42#include "analyzer/pending-diagnostic.h"
43#include "analyzer/diagnostic-manager.h"
44#include "analyzer/region-model.h"
45#include "analyzer/constraint-manager.h"
46#include "cfg.h"
47#include "basic-block.h"
48#include "gimple.h"
49#include "gimple-iterator.h"
50#include "cgraph.h"
51#include "digraph.h"
52#include "analyzer/supergraph.h"
53#include "analyzer/call-string.h"
54#include "analyzer/program-point.h"
55#include "analyzer/program-state.h"
56#include "analyzer/exploded-graph.h"
57#include "analyzer/checker-path.h"
004f2c07 58#include "analyzer/reachability.h"
757bf1df
DM
59
60#if ENABLE_ANALYZER
61
75038aa6
DM
62namespace ana {
63
757bf1df
DM
64/* class saved_diagnostic. */
65
66/* saved_diagnostic's ctor.
67 Take ownership of D and STMT_FINDER. */
68
69saved_diagnostic::saved_diagnostic (const state_machine *sm,
70 const exploded_node *enode,
71 const supernode *snode, const gimple *stmt,
72 stmt_finder *stmt_finder,
73 tree var, state_machine::state_t state,
74 pending_diagnostic *d)
75: m_sm (sm), m_enode (enode), m_snode (snode), m_stmt (stmt),
76 /* stmt_finder could be on-stack; we want our own copy that can
77 outlive that. */
78 m_stmt_finder (stmt_finder ? stmt_finder->clone () : NULL),
79 m_var (var), m_state (state),
80 m_d (d), m_trailing_eedge (NULL)
81{
82 gcc_assert (m_stmt || m_stmt_finder);
83
84 /* We must have an enode in order to be able to look for paths
85 through the exploded_graph to this diagnostic. */
86 gcc_assert (m_enode);
87}
88
89/* saved_diagnostic's dtor. */
90
91saved_diagnostic::~saved_diagnostic ()
92{
93 delete m_stmt_finder;
94 delete m_d;
95}
96
14f9d7b9
DM
97bool
98saved_diagnostic::operator== (const saved_diagnostic &other) const
99{
100 return (m_sm == other.m_sm
101 /* We don't compare m_enode. */
102 && m_snode == other.m_snode
103 && m_stmt == other.m_stmt
104 /* We don't compare m_stmt_finder. */
105 && pending_diagnostic::same_tree_p (m_var, other.m_var)
106 && m_state == other.m_state
107 && m_d->equal_p (*other.m_d)
108 && m_trailing_eedge == other.m_trailing_eedge);
109}
110
004f2c07
DM
111/* State for building a checker_path from a particular exploded_path.
112 In particular, this precomputes reachability information: the set of
113 source enodes for which a a path be found to the diagnostic enode. */
114
115class path_builder
116{
117public:
118 path_builder (const exploded_graph &eg,
119 const exploded_path &epath)
120 : m_eg (eg),
121 m_diag_enode (epath.get_final_enode ()),
122 m_reachability (eg, m_diag_enode)
123 {}
124
125 const exploded_node *get_diag_node () const { return m_diag_enode; }
126
127 bool reachable_from_p (const exploded_node *src_enode) const
128 {
129 return m_reachability.reachable_from_p (src_enode);
130 }
131
132 const extrinsic_state &get_ext_state () const { return m_eg.get_ext_state (); }
133
134private:
135 typedef reachability<eg_traits> enode_reachability;
136
137 const exploded_graph &m_eg;
138
139 /* The enode where the diagnostic occurs. */
140 const exploded_node *m_diag_enode;
141
142 /* Precompute all enodes from which the diagnostic is reachable. */
143 enode_reachability m_reachability;
144};
145
757bf1df
DM
146/* class diagnostic_manager. */
147
148/* diagnostic_manager's ctor. */
149
150diagnostic_manager::diagnostic_manager (logger *logger, int verbosity)
151: log_user (logger), m_verbosity (verbosity)
152{
153}
154
155/* Queue pending_diagnostic D at ENODE for later emission. */
156
157void
158diagnostic_manager::add_diagnostic (const state_machine *sm,
159 const exploded_node *enode,
160 const supernode *snode, const gimple *stmt,
161 stmt_finder *finder,
162 tree var, state_machine::state_t state,
163 pending_diagnostic *d)
164{
165 LOG_FUNC (get_logger ());
166
167 /* We must have an enode in order to be able to look for paths
168 through the exploded_graph to the diagnostic. */
169 gcc_assert (enode);
170
171 saved_diagnostic *sd
172 = new saved_diagnostic (sm, enode, snode, stmt, finder, var, state, d);
173 m_saved_diagnostics.safe_push (sd);
174 if (get_logger ())
175 log ("adding saved diagnostic %i at SN %i: %qs",
176 m_saved_diagnostics.length () - 1,
177 snode->m_index, d->get_kind ());
178}
179
180/* Queue pending_diagnostic D at ENODE for later emission. */
181
182void
183diagnostic_manager::add_diagnostic (const exploded_node *enode,
184 const supernode *snode, const gimple *stmt,
185 stmt_finder *finder,
186 pending_diagnostic *d)
187{
188 gcc_assert (enode);
189 add_diagnostic (NULL, enode, snode, stmt, finder, NULL_TREE, 0, d);
190}
191
192/* A class for identifying sets of duplicated pending_diagnostic.
193
194 We want to find the simplest dedupe_candidate amongst those that share a
195 dedupe_key. */
196
197class dedupe_key
198{
199public:
200 dedupe_key (const saved_diagnostic &sd,
201 const exploded_path &epath)
202 : m_sd (sd), m_stmt (sd.m_stmt)
203 {
204 /* Support deferring the choice of stmt until after an emission path has
205 been built, using an optional stmt_finder. */
206 if (m_stmt == NULL)
207 {
208 gcc_assert (sd.m_stmt_finder);
209 m_stmt = sd.m_stmt_finder->find_stmt (epath);
210 }
211 gcc_assert (m_stmt);
212 }
213
214 hashval_t hash () const
215 {
216 inchash::hash hstate;
217 hstate.add_ptr (m_stmt);
218 // TODO: m_sd
219 return hstate.end ();
220 }
221 bool operator== (const dedupe_key &other) const
222 {
223 return (m_sd == other.m_sd
224 && m_stmt == other.m_stmt);
225 }
226
227 location_t get_location () const
228 {
229 return m_stmt->location;
230 }
231
232 /* A qsort comparator for use by dedupe_winners::emit_best
233 to sort them into location_t order. */
234
235 static int
236 comparator (const void *p1, const void *p2)
237 {
238 const dedupe_key *pk1 = *(const dedupe_key * const *)p1;
239 const dedupe_key *pk2 = *(const dedupe_key * const *)p2;
240
241 location_t loc1 = pk1->get_location ();
242 location_t loc2 = pk2->get_location ();
243
244 return linemap_compare_locations (line_table, loc2, loc1);
245 }
246
247 const saved_diagnostic &m_sd;
248 const gimple *m_stmt;
249};
250
251/* The value of a slot for a dedupe_key within dedupe_winners:
252 the exploded_path for the best candidate for that key, and the
253 number of duplicates seen so far. */
254
255class dedupe_candidate
256{
257public:
258 // has the exploded_path
259 dedupe_candidate (const shortest_exploded_paths &sp,
260 const saved_diagnostic &sd)
261 : m_epath (sp.get_shortest_path (sd.m_enode)),
262 m_num_dupes (0)
263 {
264 }
265
266 unsigned length () const { return m_epath.length (); }
267 const exploded_path &get_path () const { return m_epath; }
268
269 void add_duplicate () { m_num_dupes++; }
270 int get_num_dupes () const { return m_num_dupes; }
271
272private:
273 exploded_path m_epath;
274public:
275 int m_num_dupes;
276};
277
278/* Traits for use by dedupe_winners. */
279
280class dedupe_hash_map_traits
281{
282public:
283 typedef const dedupe_key *key_type;
284 typedef dedupe_candidate *value_type;
285 typedef dedupe_candidate *compare_type;
286
287 static inline hashval_t hash (const key_type &v)
288 {
289 return v->hash ();
290 }
291 static inline bool equal_keys (const key_type &k1, const key_type &k2)
292 {
293 return *k1 == *k2;
294 }
295 template <typename T>
296 static inline void remove (T &)
297 {
298 // TODO
299 }
300 template <typename T>
301 static inline void mark_deleted (T &entry)
302 {
303 entry.m_key = reinterpret_cast<key_type> (1);
304 }
305 template <typename T>
306 static inline void mark_empty (T &entry)
307 {
308 entry.m_key = NULL;
309 }
310 template <typename T>
311 static inline bool is_deleted (const T &entry)
312 {
313 return entry.m_key == reinterpret_cast<key_type> (1);
314 }
315 template <typename T>
316 static inline bool is_empty (const T &entry)
317 {
318 return entry.m_key == NULL;
319 }
320 static const bool empty_zero_p = true;
321};
322
323/* A class for deduplicating diagnostics and finding (and emitting) the
324 best diagnostic within each partition. */
325
326class dedupe_winners
327{
328public:
329 ~dedupe_winners ()
330 {
331 /* Delete all keys and candidates. */
332 for (map_t::iterator iter = m_map.begin ();
333 iter != m_map.end ();
334 ++iter)
335 {
336 delete (*iter).first;
337 delete (*iter).second;
338 }
339 }
340
341 /* Determine an exploded_path for SD using SP and, if it's feasible,
342 determine if it's the best seen so far for its dedupe_key.
343 Retain the winner for each dedupe_key, and discard the rest. */
344
345 void add (logger *logger,
346 const shortest_exploded_paths &sp,
347 const saved_diagnostic &sd)
348 {
349 /* Build a dedupe_candidate for SD.
350 This uses SP to build an exploded_path. */
351 dedupe_candidate *dc = new dedupe_candidate (sp, sd);
352
353 /* Verify that the epath is feasible.
354 State-merging means that not every path in the epath corresponds
355 to a feasible one w.r.t. states.
356 Here we simply check each duplicate saved_diagnostic's
357 shortest_path, and reject any that aren't feasible.
358 This could introduce false negatives, as there could be longer
359 feasible paths within the egraph. */
360 if (logger)
361 logger->log ("considering %qs at SN: %i",
362 sd.m_d->get_kind (), sd.m_snode->m_index);
363 if (!dc->get_path ().feasible_p (logger))
364 {
365 if (logger)
366 logger->log ("rejecting %qs at SN: %i"
367 " due to infeasible path",
368 sd.m_d->get_kind (), sd.m_snode->m_index);
369 delete dc;
370 return;
371 }
372 else
373 if (logger)
374 logger->log ("accepting %qs at SN: %i with feasible path",
375 sd.m_d->get_kind (), sd.m_snode->m_index);
376
377 dedupe_key *key = new dedupe_key (sd, dc->get_path ());
378 if (dedupe_candidate **slot = m_map.get (key))
379 {
f474fbd5
DM
380 if (logger)
381 logger->log ("already have this dedupe_key");
382
757bf1df
DM
383 (*slot)->add_duplicate ();
384
385 if (dc->length () < (*slot)->length ())
386 {
387 /* We've got a shorter path for the key; replace
388 the current candidate. */
f474fbd5
DM
389 if (logger)
390 logger->log ("length %i is better than existing length %i;"
391 " taking over this dedupe_key",
392 dc->length (), (*slot)->length ());
757bf1df
DM
393 dc->m_num_dupes = (*slot)->get_num_dupes ();
394 delete *slot;
395 *slot = dc;
396 }
397 else
398 /* We haven't beaten the current best candidate;
399 drop the new candidate. */
f474fbd5
DM
400 {
401 if (logger)
402 logger->log ("length %i isn't better than existing length %i;"
403 " dropping this candidate",
404 dc->length (), (*slot)->length ());
405 delete dc;
406 }
757bf1df
DM
407 delete key;
408 }
409 else
f474fbd5
DM
410 {
411 /* This is the first candidate for this key. */
412 m_map.put (key, dc);
413 if (logger)
414 logger->log ("first candidate for this dedupe_key");
415 }
757bf1df
DM
416 }
417
418 /* Emit the simplest diagnostic within each set. */
419
420 void emit_best (diagnostic_manager *dm,
421 const exploded_graph &eg)
422 {
423 LOG_SCOPE (dm->get_logger ());
424
425 /* Get keys into a vec for sorting. */
426 auto_vec<const dedupe_key *> keys (m_map.elements ());
427 for (map_t::iterator iter = m_map.begin ();
428 iter != m_map.end ();
429 ++iter)
430 keys.quick_push ((*iter).first);
431
432 dm->log ("# keys after de-duplication: %i", keys.length ());
433
434 /* Sort into a good emission order. */
435 keys.qsort (dedupe_key::comparator);
436
437 /* Emit the best candidate for each key. */
438 int i;
439 const dedupe_key *key;
440 FOR_EACH_VEC_ELT (keys, i, key)
441 {
442 dedupe_candidate **slot = m_map.get (key);
443 gcc_assert (*slot);
444 const dedupe_candidate &dc = **slot;
445
446 dm->emit_saved_diagnostic (eg, key->m_sd,
447 dc.get_path (), key->m_stmt,
448 dc.get_num_dupes ());
449 }
450 }
451
452private:
453
454 /* This maps from each dedupe_key to a current best dedupe_candidate. */
455
456 typedef hash_map<const dedupe_key *, dedupe_candidate *,
457 dedupe_hash_map_traits> map_t;
458 map_t m_map;
459};
460
461/* Emit all saved diagnostics. */
462
463void
464diagnostic_manager::emit_saved_diagnostics (const exploded_graph &eg)
465{
466 LOG_SCOPE (get_logger ());
467 auto_timevar tv (TV_ANALYZER_DIAGNOSTICS);
468 log ("# saved diagnostics: %i", m_saved_diagnostics.length ());
469
470 if (m_saved_diagnostics.length () == 0)
471 return;
472
473 /* Compute the shortest_paths once, sharing it between all diagnostics. */
474 shortest_exploded_paths sp (eg, eg.get_origin ());
475
476 /* Iterate through all saved diagnostics, adding them to a dedupe_winners
477 instance. This partitions the saved diagnostics by dedupe_key,
478 generating exploded_paths for them, and retaining the best one in each
479 partition. */
480 dedupe_winners best_candidates;
481
482 int i;
483 saved_diagnostic *sd;
484 FOR_EACH_VEC_ELT (m_saved_diagnostics, i, sd)
485 best_candidates.add (get_logger (), sp, *sd);
486
487 /* For each dedupe-key, call emit_saved_diagnostic on the "best"
488 saved_diagnostic. */
489 best_candidates.emit_best (this, eg);
490}
491
492/* Given a saved_diagnostic SD at STMT with feasible path EPATH through EG,
493 create an checker_path of suitable events and use it to call
494 SD's underlying pending_diagnostic "emit" vfunc to emit a diagnostic. */
495
496void
497diagnostic_manager::emit_saved_diagnostic (const exploded_graph &eg,
498 const saved_diagnostic &sd,
499 const exploded_path &epath,
500 const gimple *stmt,
501 int num_dupes)
502{
503 LOG_SCOPE (get_logger ());
504 log ("sd: %qs at SN: %i", sd.m_d->get_kind (), sd.m_snode->m_index);
505 log ("num dupes: %i", num_dupes);
506
507 pretty_printer *pp = global_dc->printer->clone ();
508
004f2c07
DM
509 /* Precompute all enodes from which the diagnostic is reachable. */
510 path_builder pb (eg, epath);
511
512 /* This is the diagnostic_path subclass that will be built for
513 the diagnostic. */
757bf1df
DM
514 checker_path emission_path;
515
516 /* Populate emission_path with a full description of EPATH. */
004f2c07 517 build_emission_path (pb, epath, &emission_path);
757bf1df
DM
518
519 /* Now prune it to just cover the most pertinent events. */
520 prune_path (&emission_path, sd.m_sm, sd.m_var, sd.m_state);
521
522 /* Add a final event to the path, covering the diagnostic itself.
523 We use the final enode from the epath, which might be different from
524 the sd.m_enode, as the dedupe code doesn't care about enodes, just
525 snodes. */
526 emission_path.add_final_event (sd.m_sm, epath.get_final_enode (), stmt,
527 sd.m_var, sd.m_state);
528
529 /* The "final" event might not be final; if the saved_diagnostic has a
530 trailing eedge stashed, add any events for it. This is for use
531 in handling longjmp, to show where a longjmp is rewinding to. */
532 if (sd.m_trailing_eedge)
004f2c07 533 add_events_for_eedge (pb, *sd.m_trailing_eedge, &emission_path);
757bf1df
DM
534
535 emission_path.prepare_for_emission (sd.m_d);
536
537 gcc_rich_location rich_loc (stmt->location);
538 rich_loc.set_path (&emission_path);
539
540 auto_diagnostic_group d;
541 auto_cfun sentinel (sd.m_snode->m_fun);
542 if (sd.m_d->emit (&rich_loc))
543 {
13b76912 544 if (flag_analyzer_show_duplicate_count && num_dupes > 0)
757bf1df
DM
545 inform_n (stmt->location, num_dupes,
546 "%i duplicate", "%i duplicates",
547 num_dupes);
548 }
549 delete pp;
550}
551
552/* Given a state change to DST_REP, determine a tree that gives the origin
553 of that state at STMT, using DST_STATE's region model, so that state
554 changes based on assignments can be tracked back to their origins.
555
556 For example, if we have
557
558 (S1) _1 = malloc (64);
559 (S2) EXPR = _1;
560
561 then at stmt S2 we can get the origin of EXPR's state as being _1,
562 and thus track the allocation back to S1. */
563
564static tree
565get_any_origin (const gimple *stmt,
566 tree dst_rep,
567 const program_state &dst_state)
568{
569 if (!stmt)
570 return NULL_TREE;
571
572 gcc_assert (dst_rep);
573
574 if (const gassign *assign = dyn_cast <const gassign *> (stmt))
575 {
576 tree lhs = gimple_assign_lhs (assign);
3d66e153
DM
577 /* Use region IDs to compare lhs with DST_REP, bulletproofing against
578 cases where they can't have lvalues by using
579 tentative_region_model_context. */
580 tentative_region_model_context ctxt;
581 region_id lhs_rid = dst_state.m_region_model->get_lvalue (lhs, &ctxt);
582 region_id dst_rep_rid
583 = dst_state.m_region_model->get_lvalue (dst_rep, &ctxt);
584 if (lhs_rid == dst_rep_rid && !ctxt.had_errors_p ())
757bf1df
DM
585 {
586 tree rhs1 = gimple_assign_rhs1 (assign);
587 enum tree_code op = gimple_assign_rhs_code (assign);
588 switch (op)
589 {
590 default:
591 //gcc_unreachable (); // TODO
592 break;
593 case COMPONENT_REF:
594 case SSA_NAME:
595 return rhs1;
596 }
597 }
598 }
599 return NULL_TREE;
600}
601
602/* Emit a "path" of events to EMISSION_PATH describing the exploded path
603 EPATH within EG. */
604
605void
004f2c07 606diagnostic_manager::build_emission_path (const path_builder &pb,
757bf1df
DM
607 const exploded_path &epath,
608 checker_path *emission_path) const
609{
610 LOG_SCOPE (get_logger ());
757bf1df
DM
611 for (unsigned i = 0; i < epath.m_edges.length (); i++)
612 {
613 const exploded_edge *eedge = epath.m_edges[i];
004f2c07 614 add_events_for_eedge (pb, *eedge, emission_path);
757bf1df
DM
615 }
616}
617
618/* Subclass of state_change_visitor that creates state_change_event
619 instances. */
620
621class state_change_event_creator : public state_change_visitor
622{
623public:
624 state_change_event_creator (const exploded_edge &eedge,
625 checker_path *emission_path)
626 : m_eedge (eedge),
627 m_emission_path (emission_path)
628 {}
629
630 bool on_global_state_change (const state_machine &sm,
631 state_machine::state_t src_sm_val,
632 state_machine::state_t dst_sm_val)
633 FINAL OVERRIDE
634 {
635 const exploded_node *src_node = m_eedge.m_src;
636 const program_point &src_point = src_node->get_point ();
637 const int src_stack_depth = src_point.get_stack_depth ();
638 const exploded_node *dst_node = m_eedge.m_dest;
639 const gimple *stmt = src_point.get_stmt ();
640 const supernode *supernode = src_point.get_supernode ();
641 const program_state &dst_state = dst_node->get_state ();
642
643 int stack_depth = src_stack_depth;
644
645 m_emission_path->add_event (new state_change_event (supernode,
646 stmt,
647 stack_depth,
648 sm,
649 NULL_TREE,
650 src_sm_val,
651 dst_sm_val,
652 NULL_TREE,
653 dst_state));
654 return false;
655 }
656
657 bool on_state_change (const state_machine &sm,
658 state_machine::state_t src_sm_val,
659 state_machine::state_t dst_sm_val,
660 tree dst_rep,
661 svalue_id dst_origin_sid) FINAL OVERRIDE
662 {
663 const exploded_node *src_node = m_eedge.m_src;
664 const program_point &src_point = src_node->get_point ();
665 const int src_stack_depth = src_point.get_stack_depth ();
666 const exploded_node *dst_node = m_eedge.m_dest;
667 const gimple *stmt = src_point.get_stmt ();
668 const supernode *supernode = src_point.get_supernode ();
669 const program_state &dst_state = dst_node->get_state ();
670
671 int stack_depth = src_stack_depth;
672
673 if (m_eedge.m_sedge
674 && m_eedge.m_sedge->m_kind == SUPEREDGE_CFG_EDGE)
675 {
676 supernode = src_point.get_supernode ();
677 stmt = supernode->get_last_stmt ();
678 stack_depth = src_stack_depth;
679 }
680
681 /* Bulletproofing for state changes at calls/returns;
682 TODO: is there a better way? */
683 if (!stmt)
684 return false;
685
686 tree origin_rep
687 = dst_state.get_representative_tree (dst_origin_sid);
688
689 if (origin_rep == NULL_TREE)
690 origin_rep = get_any_origin (stmt, dst_rep, dst_state);
691 m_emission_path->add_event (new state_change_event (supernode,
692 stmt,
693 stack_depth,
694 sm,
695 dst_rep,
696 src_sm_val,
697 dst_sm_val,
698 origin_rep,
699 dst_state));
700 return false;
701 }
702
703 const exploded_edge &m_eedge;
704 checker_path *m_emission_path;
705};
706
707/* Compare SRC_STATE and DST_STATE (which use EXT_STATE), and call
708 VISITOR's on_state_change for every sm-state change that occurs
709 to a tree, and on_global_state_change for every global state change
710 that occurs.
711
712 This determines the state changes that ought to be reported to
713 the user: a combination of the effects of changes to sm_state_map
714 (which maps svalues to sm-states), and of region_model changes
715 (which map trees to svalues).
716
717 Bail out early and return true if any call to on_global_state_change
718 or on_state_change returns true, otherwise return false.
719
720 This is split out to make it easier to experiment with changes to
721 exploded_node granularity (so that we can observe what state changes
722 lead to state_change_events being emitted). */
723
724bool
725for_each_state_change (const program_state &src_state,
726 const program_state &dst_state,
727 const extrinsic_state &ext_state,
728 state_change_visitor *visitor)
729{
730 gcc_assert (src_state.m_checker_states.length ()
ebe9174e 731 == ext_state.get_num_checkers ());
757bf1df 732 gcc_assert (dst_state.m_checker_states.length ()
ebe9174e
DM
733 == ext_state.get_num_checkers ());
734 for (unsigned i = 0; i < ext_state.get_num_checkers (); i++)
757bf1df
DM
735 {
736 const state_machine &sm = ext_state.get_sm (i);
737 const sm_state_map &src_smap = *src_state.m_checker_states[i];
738 const sm_state_map &dst_smap = *dst_state.m_checker_states[i];
739
740 /* Add events for any global state changes. */
741 if (src_smap.get_global_state () != dst_smap.get_global_state ())
742 if (visitor->on_global_state_change (sm,
743 src_smap.get_global_state (),
744 dst_smap.get_global_state ()))
745 return true;
746
747 /* Add events for per-svalue state changes. */
748 for (sm_state_map::iterator_t iter = dst_smap.begin ();
749 iter != dst_smap.end ();
750 ++iter)
751 {
752 /* Ideally we'd directly compare the SM state between src state
753 and dst state, but there's no guarantee that the IDs can
754 be meaningfully compared. */
755 svalue_id dst_sid = (*iter).first;
756 state_machine::state_t dst_sm_val = (*iter).second.m_state;
757
758 auto_vec<path_var> dst_pvs;
759 dst_state.m_region_model->get_path_vars_for_svalue (dst_sid,
760 &dst_pvs);
761
762 unsigned j;
763 path_var *dst_pv;
764 FOR_EACH_VEC_ELT (dst_pvs, j, dst_pv)
765 {
766 tree dst_rep = dst_pv->m_tree;
767 gcc_assert (dst_rep);
768 if (dst_pv->m_stack_depth
769 >= src_state.m_region_model->get_stack_depth ())
770 continue;
771 svalue_id src_sid
772 = src_state.m_region_model->get_rvalue (*dst_pv, NULL);
773 if (src_sid.null_p ())
774 continue;
775 state_machine::state_t src_sm_val = src_smap.get_state (src_sid);
776 if (dst_sm_val != src_sm_val)
777 {
778 svalue_id dst_origin_sid = (*iter).second.m_origin;
779 if (visitor->on_state_change (sm, src_sm_val, dst_sm_val,
780 dst_rep, dst_origin_sid))
781 return true;
782 }
783 }
784 }
785 }
786 return false;
787}
788
789/* Subroutine of diagnostic_manager::build_emission_path.
790 Add any events for EEDGE to EMISSION_PATH. */
791
792void
004f2c07
DM
793diagnostic_manager::add_events_for_eedge (const path_builder &pb,
794 const exploded_edge &eedge,
757bf1df
DM
795 checker_path *emission_path) const
796{
797 const exploded_node *src_node = eedge.m_src;
798 const program_point &src_point = src_node->get_point ();
799 const exploded_node *dst_node = eedge.m_dest;
800 const program_point &dst_point = dst_node->get_point ();
801 const int dst_stack_depth = dst_point.get_stack_depth ();
802 if (get_logger ())
803 {
804 get_logger ()->start_log_line ();
805 pretty_printer *pp = get_logger ()->get_printer ();
806 pp_printf (pp, "EN %i -> EN %i: ",
807 eedge.m_src->m_index,
808 eedge.m_dest->m_index);
809 src_point.print (pp, format (false));
810 pp_string (pp, "-> ");
811 dst_point.print (pp, format (false));
812 get_logger ()->end_log_line ();
813 }
814 const program_state &src_state = src_node->get_state ();
815 const program_state &dst_state = dst_node->get_state ();
816
817 /* Add state change events for the states that have changed.
818 We add these before events for superedges, so that if we have a
819 state_change_event due to following an edge, we'll get this sequence
820 of events:
821
822 | if (!ptr)
823 | ~
824 | |
825 | (1) assuming 'ptr' is non-NULL (state_change_event)
826 | (2) following 'false' branch... (start_cfg_edge_event)
827 ...
828 | do_something (ptr);
829 | ~~~~~~~~~~~~~^~~~~
830 | |
831 | (3) ...to here (end_cfg_edge_event). */
832 state_change_event_creator visitor (eedge, emission_path);
004f2c07 833 for_each_state_change (src_state, dst_state, pb.get_ext_state (),
757bf1df
DM
834 &visitor);
835
836 /* Allow non-standard edges to add events, e.g. when rewinding from
837 longjmp to a setjmp. */
838 if (eedge.m_custom_info)
839 eedge.m_custom_info->add_events_to_path (emission_path, eedge);
840
841 /* Add events for superedges, function entries, and for statements. */
842 switch (dst_point.get_kind ())
843 {
844 default:
845 break;
846 case PK_BEFORE_SUPERNODE:
847 if (src_point.get_kind () == PK_AFTER_SUPERNODE)
848 {
849 if (eedge.m_sedge)
004f2c07 850 add_events_for_superedge (pb, eedge, emission_path);
757bf1df
DM
851 }
852 /* Add function entry events. */
853 if (dst_point.get_supernode ()->entry_p ())
854 {
855 emission_path->add_event
856 (new function_entry_event
857 (dst_point.get_supernode ()->get_start_location (),
858 dst_point.get_fndecl (),
859 dst_stack_depth));
860 }
861 break;
862 case PK_BEFORE_STMT:
863 {
864 const gimple *stmt = dst_point.get_stmt ();
342e14ff
DM
865 const gcall *call = dyn_cast <const gcall *> (stmt);
866 if (call && is_setjmp_call_p (call))
757bf1df
DM
867 emission_path->add_event
868 (new setjmp_event (stmt->location,
869 dst_node,
870 dst_point.get_fndecl (),
342e14ff
DM
871 dst_stack_depth,
872 call));
757bf1df
DM
873 else
874 emission_path->add_event
875 (new statement_event (stmt,
876 dst_point.get_fndecl (),
877 dst_stack_depth, dst_state));
878 }
879 break;
880 }
881}
882
004f2c07
DM
883/* Return true if EEDGE is a significant edge in the path to the diagnostic
884 for PB.
885
886 Consider all of the sibling out-eedges from the same source enode
887 as EEDGE.
888 If there's no path from the destinations of those eedges to the
889 diagnostic enode, then we have to take this eedge and thus it's
890 significant.
891
892 Conversely if there is a path from the destination of any other sibling
893 eedge to the diagnostic enode, then this edge is insignificant.
894
895 Example 1: redundant if-else:
896
897 (A) if (...) A
898 (B) ... / \
899 else B C
900 (C) ... \ /
901 (D) [DIAGNOSTIC] D
902
903 D is reachable by either B or C, so neither of these edges
904 are significant.
905
906 Example 2: pertinent if-else:
907
908 (A) if (...) A
909 (B) ... / \
910 else B C
911 (C) [NECESSARY CONDITION] | |
912 (D) [POSSIBLE DIAGNOSTIC] D1 D2
913
914 D becomes D1 and D2 in the exploded graph, where the diagnostic occurs
915 at D2. D2 is only reachable via C, so the A -> C edge is significant.
916
917 Example 3: redundant loop:
918
919 (A) while (...) +-->A
920 (B) ... | / \
921 (C) ... +-B C
922 (D) [DIAGNOSTIC] |
923 D
924
925 D is reachable from both B and C, so the A->C edge is not significant. */
926
927bool
928diagnostic_manager::significant_edge_p (const path_builder &pb,
929 const exploded_edge &eedge) const
930{
931 int i;
932 exploded_edge *sibling;
933 FOR_EACH_VEC_ELT (eedge.m_src->m_succs, i, sibling)
934 {
935 if (sibling == &eedge)
936 continue;
937 if (pb.reachable_from_p (sibling->m_dest))
938 {
939 if (get_logger ())
940 get_logger ()->log (" edge EN: %i -> EN: %i is insignificant as"
941 " EN: %i is also reachable via"
942 " EN: %i -> EN: %i",
943 eedge.m_src->m_index, eedge.m_dest->m_index,
944 pb.get_diag_node ()->m_index,
945 sibling->m_src->m_index,
946 sibling->m_dest->m_index);
947 return false;
948 }
949 }
950
951 return true;
952}
953
757bf1df
DM
954/* Subroutine of diagnostic_manager::add_events_for_eedge
955 where EEDGE has an underlying superedge i.e. a CFG edge,
956 or an interprocedural call/return.
957 Add any events for the superedge to EMISSION_PATH. */
958
959void
004f2c07
DM
960diagnostic_manager::add_events_for_superedge (const path_builder &pb,
961 const exploded_edge &eedge,
757bf1df
DM
962 checker_path *emission_path)
963 const
964{
965 gcc_assert (eedge.m_sedge);
966
004f2c07
DM
967 /* Don't add events for insignificant edges at verbosity levels below 3. */
968 if (m_verbosity < 3)
969 if (!significant_edge_p (pb, eedge))
970 return;
971
757bf1df
DM
972 const exploded_node *src_node = eedge.m_src;
973 const program_point &src_point = src_node->get_point ();
974 const exploded_node *dst_node = eedge.m_dest;
975 const program_point &dst_point = dst_node->get_point ();
976 const int src_stack_depth = src_point.get_stack_depth ();
977 const int dst_stack_depth = dst_point.get_stack_depth ();
978 const gimple *last_stmt = src_point.get_supernode ()->get_last_stmt ();
979
980 switch (eedge.m_sedge->m_kind)
981 {
982 case SUPEREDGE_CFG_EDGE:
983 {
984 emission_path->add_event
985 (new start_cfg_edge_event (eedge,
986 (last_stmt
987 ? last_stmt->location
988 : UNKNOWN_LOCATION),
989 src_point.get_fndecl (),
990 src_stack_depth));
991 emission_path->add_event
992 (new end_cfg_edge_event (eedge,
993 dst_point.get_supernode ()->get_start_location (),
994 dst_point.get_fndecl (),
995 dst_stack_depth));
996 }
997 break;
998
999 case SUPEREDGE_CALL:
1000 {
1001 emission_path->add_event
1002 (new call_event (eedge,
1003 (last_stmt
1004 ? last_stmt->location
1005 : UNKNOWN_LOCATION),
1006 src_point.get_fndecl (),
1007 src_stack_depth));
1008 }
1009 break;
1010
1011 case SUPEREDGE_INTRAPROCEDURAL_CALL:
1012 {
1013 /* TODO: add a subclass for this, or generate events for the
1014 summary. */
1015 emission_path->add_event
1016 (new debug_event ((last_stmt
1017 ? last_stmt->location
1018 : UNKNOWN_LOCATION),
1019 src_point.get_fndecl (),
1020 src_stack_depth,
1021 "call summary"));
1022 }
1023 break;
1024
1025 case SUPEREDGE_RETURN:
1026 {
1027 const return_superedge *return_edge
1028 = as_a <const return_superedge *> (eedge.m_sedge);
1029
1030 const gcall *call_stmt = return_edge->get_call_stmt ();
1031 emission_path->add_event
1032 (new return_event (eedge,
1033 (call_stmt
1034 ? call_stmt->location
1035 : UNKNOWN_LOCATION),
1036 dst_point.get_fndecl (),
1037 dst_stack_depth));
1038 }
1039 break;
1040 }
1041}
1042
1043/* Prune PATH, based on the verbosity level, to the most pertinent
1044 events for a diagnostic that involves VAR ending in state STATE
1045 (for state machine SM).
1046
1047 PATH is updated in place, and the redundant checker_events are deleted.
1048
1049 As well as deleting events, call record_critical_state on events in
1050 which state critical to the pending_diagnostic is being handled; see
1051 the comment for diagnostic_manager::prune_for_sm_diagnostic. */
1052
1053void
1054diagnostic_manager::prune_path (checker_path *path,
1055 const state_machine *sm,
1056 tree var,
1057 state_machine::state_t state) const
1058{
1059 LOG_FUNC (get_logger ());
1060 path->maybe_log (get_logger (), "path");
1061 prune_for_sm_diagnostic (path, sm, var, state);
1062 prune_interproc_events (path);
1063 finish_pruning (path);
1064 path->maybe_log (get_logger (), "pruned");
1065}
1066
3d66e153
DM
1067/* A cheap test to determine if EXPR can be the expression of interest in
1068 an sm-diagnostic, so that we can reject cases where we have a non-lvalue.
1069 We don't have always have a model when calling this, so we can't use
1070 tentative_region_model_context, so there can be false positives. */
1071
1072static bool
1073can_be_expr_of_interest_p (tree expr)
1074{
1075 if (!expr)
1076 return false;
1077
1078 /* Reject constants. */
1079 if (CONSTANT_CLASS_P (expr))
1080 return false;
1081
1082 /* Otherwise assume that it can be an lvalue. */
1083 return true;
1084}
1085
757bf1df
DM
1086/* First pass of diagnostic_manager::prune_path: apply verbosity level,
1087 pruning unrelated state change events.
1088
1089 Iterate backwards through PATH, skipping state change events that aren't
1090 VAR but update the pertinent VAR when state-copying occurs.
1091
1092 As well as deleting events, call record_critical_state on events in
1093 which state critical to the pending_diagnostic is being handled, so
1094 that the event's get_desc vfunc can potentially supply a more precise
1095 description of the event to the user.
1096 e.g. improving
1097 "calling 'foo' from 'bar'"
1098 to
1099 "passing possibly-NULL pointer 'ptr' to 'foo' from 'bar' as param 1"
1100 when the diagnostic relates to later dereferencing 'ptr'. */
1101
1102void
1103diagnostic_manager::prune_for_sm_diagnostic (checker_path *path,
1104 const state_machine *sm,
1105 tree var,
1106 state_machine::state_t state) const
1107{
3d66e153 1108 update_for_unsuitable_sm_exprs (&var);
e953f958 1109
e2a538b1
DM
1110 int idx = path->num_events () - 1;
1111 while (idx >= 0 && idx < (signed)path->num_events ())
757bf1df 1112 {
e2a538b1 1113 checker_event *base_event = path->get_checker_event (idx);
757bf1df
DM
1114 if (get_logger ())
1115 {
1116 if (sm)
1117 {
1118 if (var)
1119 log ("considering event %i, with var: %qE, state: %qs",
1120 idx, var, sm->get_state_name (state));
1121 else
1122 log ("considering event %i, with global state: %qs",
1123 idx, sm->get_state_name (state));
1124 }
1125 else
1126 log ("considering event %i", idx);
1127 }
3d66e153 1128 gcc_assert (var == NULL || can_be_expr_of_interest_p (var));
757bf1df
DM
1129 switch (base_event->m_kind)
1130 {
1131 default:
1132 gcc_unreachable ();
1133
1134 case EK_DEBUG:
004f2c07 1135 if (m_verbosity < 4)
757bf1df
DM
1136 {
1137 log ("filtering event %i: debug event", idx);
1138 path->delete_event (idx);
1139 }
1140 break;
1141
1142 case EK_CUSTOM:
1143 /* Don't filter custom events. */
1144 break;
1145
1146 case EK_STMT:
1147 {
1148 /* If this stmt is the origin of "var", update var. */
1149 if (var)
1150 {
1151 statement_event *stmt_event = (statement_event *)base_event;
1152 tree new_var = get_any_origin (stmt_event->m_stmt, var,
1153 stmt_event->m_dst_state);
1154 if (new_var)
1155 {
1156 log ("event %i: switching var of interest from %qE to %qE",
1157 idx, var, new_var);
1158 var = new_var;
1159 }
1160 }
004f2c07 1161 if (m_verbosity < 4)
757bf1df
DM
1162 {
1163 log ("filtering event %i: statement event", idx);
1164 path->delete_event (idx);
1165 }
1166 }
1167 break;
1168
1169 case EK_FUNCTION_ENTRY:
1170 if (m_verbosity < 1)
1171 {
1172 log ("filtering event %i: function entry", idx);
1173 path->delete_event (idx);
1174 }
1175 break;
1176
1177 case EK_STATE_CHANGE:
1178 {
1179 state_change_event *state_change = (state_change_event *)base_event;
3d66e153
DM
1180 /* Use region IDs to compare var with the state_change's m_var,
1181 bulletproofing against cases where they can't have lvalues by
1182 using tentative_region_model_context. */
1183 tentative_region_model_context ctxt;
1184 region_id state_var_rid
1185 = state_change->get_lvalue (state_change->m_var, &ctxt);
1186 region_id var_rid = state_change->get_lvalue (var, &ctxt);
1187 if (state_var_rid == var_rid && !ctxt.had_errors_p ())
757bf1df
DM
1188 {
1189 if (state_change->m_origin)
1190 {
1191 log ("event %i: switching var of interest from %qE to %qE",
1192 idx, var, state_change->m_origin);
1193 var = state_change->m_origin;
3d66e153 1194 update_for_unsuitable_sm_exprs (&var);
757bf1df
DM
1195 }
1196 log ("event %i: switching state of interest from %qs to %qs",
1197 idx, sm->get_state_name (state_change->m_to),
1198 sm->get_state_name (state_change->m_from));
1199 state = state_change->m_from;
1200 }
004f2c07 1201 else if (m_verbosity < 4)
757bf1df
DM
1202 {
1203 if (var)
1204 log ("filtering event %i:"
1205 " state change to %qE unrelated to %qE",
1206 idx, state_change->m_var, var);
1207 else
1208 log ("filtering event %i: state change to %qE",
1209 idx, state_change->m_var);
3d66e153
DM
1210 if (ctxt.had_errors_p ())
1211 log ("context had errors");
757bf1df
DM
1212 path->delete_event (idx);
1213 }
1214 }
1215 break;
1216
1217 case EK_START_CFG_EDGE:
1218 {
1219 cfg_edge_event *event = (cfg_edge_event *)base_event;
1220 const cfg_superedge& cfg_superedge
1221 = event->get_cfg_superedge ();
1222 const supernode *dest = event->m_sedge->m_dest;
1223 /* Do we have an SSA_NAME defined via a phi node in
1224 the dest CFG node? */
1225 if (var && TREE_CODE (var) == SSA_NAME)
1226 if (SSA_NAME_DEF_STMT (var)->bb == dest->m_bb)
1227 {
1228 if (gphi *phi
1229 = dyn_cast <gphi *> (SSA_NAME_DEF_STMT (var)))
1230 {
1231 /* Update var based on its phi node. */
1232 tree old_var = var;
1233 var = cfg_superedge.get_phi_arg (phi);
1234 log ("updating from %qE to %qE based on phi node",
1235 old_var, var);
1236 if (get_logger ())
1237 {
1238 pretty_printer pp;
1239 pp_gimple_stmt_1 (&pp, phi, 0, (dump_flags_t)0);
1240 log (" phi: %s", pp_formatted_text (&pp));
1241 }
8525d1f5
DM
1242 /* If we've chosen a bad exploded_path, then the
1243 phi arg might be a constant. Fail gracefully for
1244 this case. */
3d66e153 1245 update_for_unsuitable_sm_exprs (&var);
757bf1df
DM
1246 }
1247 }
1248
1249 /* TODO: is this edge significant to var?
1250 See if var can be in other states in the dest, but not
1251 in other states in the src?
1252 Must have multiple sibling edges. */
1253
1254 if (event->should_filter_p (m_verbosity))
1255 {
1256 log ("filtering event %i: CFG edge", idx);
1257 path->delete_event (idx);
1258 /* Also delete the corresponding EK_END_CFG_EDGE. */
e2a538b1
DM
1259 gcc_assert (path->get_checker_event (idx)->m_kind
1260 == EK_END_CFG_EDGE);
757bf1df
DM
1261 path->delete_event (idx);
1262 }
1263 }
1264 break;
1265
1266 case EK_END_CFG_EDGE:
1267 /* These come in pairs with EK_START_CFG_EDGE events and are
1268 filtered when their start event is filtered. */
1269 break;
1270
1271 case EK_CALL_EDGE:
1272 {
1273 call_event *event = (call_event *)base_event;
1274 const callgraph_superedge& cg_superedge
1275 = event->get_callgraph_superedge ();
1276 callsite_expr expr;
1277 tree caller_var
1278 = cg_superedge.map_expr_from_callee_to_caller (var, &expr);
1279 if (caller_var)
1280 {
1281 log ("event %i:"
1282 " switching var of interest"
1283 " from %qE in callee to %qE in caller",
1284 idx, var, caller_var);
1285 var = caller_var;
1286 if (expr.param_p ())
1287 event->record_critical_state (var, state);
3d66e153 1288 update_for_unsuitable_sm_exprs (&var);
757bf1df
DM
1289 }
1290 }
1291 break;
1292
1293 case EK_RETURN_EDGE:
1294 // TODO: potentially update var/state based on return value,
1295 // args etc
1296 {
1297 if (var)
1298 {
1299 return_event *event = (return_event *)base_event;
1300 const callgraph_superedge& cg_superedge
1301 = event->get_callgraph_superedge ();
1302 callsite_expr expr;
1303 tree callee_var
1304 = cg_superedge.map_expr_from_caller_to_callee (var, &expr);
1305 if (callee_var)
1306 {
1307 log ("event %i:"
1308 " switching var of interest"
1309 " from %qE in caller to %qE in callee",
1310 idx, var, callee_var);
1311 var = callee_var;
1312 if (expr.return_value_p ())
1313 event->record_critical_state (var, state);
3d66e153 1314 update_for_unsuitable_sm_exprs (&var);
757bf1df
DM
1315 }
1316 }
1317 }
1318 break;
1319
1320 case EK_SETJMP:
1321 /* TODO: only show setjmp_events that matter i.e. those for which
1322 there is a later rewind event using them. */
1323 case EK_REWIND_FROM_LONGJMP:
1324 case EK_REWIND_TO_SETJMP:
1325 break;
1326
1327 case EK_WARNING:
1328 /* Always show the final "warning" event in the path. */
1329 break;
1330 }
1331 idx--;
1332 }
1333}
1334
3d66e153
DM
1335/* Subroutine of diagnostic_manager::prune_for_sm_diagnostic.
1336 If *EXPR is not suitable to be the expression of interest in
1337 an sm-diagnostic, set *EXPR to NULL and log. */
1338
1339void
1340diagnostic_manager::update_for_unsuitable_sm_exprs (tree *expr) const
1341{
1342 gcc_assert (expr);
1343 if (*expr && !can_be_expr_of_interest_p (*expr))
1344 {
1345 log ("new var %qE is unsuitable; setting var to NULL", *expr);
1346 *expr = NULL_TREE;
1347 }
1348}
1349
757bf1df
DM
1350/* Second pass of diagnostic_manager::prune_path: remove redundant
1351 interprocedural information.
1352
1353 For example, given:
1354 (1)- calling "f2" from "f1"
1355 (2)--- entry to "f2"
1356 (3)--- calling "f3" from "f2"
1357 (4)----- entry to "f3"
1358 (5)--- returning to "f2" to "f3"
1359 (6)- returning to "f1" to "f2"
1360 with no other intervening events, then none of these events are
1361 likely to be interesting to the user.
1362
1363 Prune [..., call, function-entry, return, ...] triples repeatedly
1364 until nothing has changed. For the example above, this would
1365 remove events (3, 4, 5), and then remove events (1, 2, 6). */
1366
1367void
1368diagnostic_manager::prune_interproc_events (checker_path *path) const
1369{
1370 bool changed = false;
1371 do
1372 {
1373 changed = false;
e2a538b1 1374 int idx = path->num_events () - 1;
757bf1df
DM
1375 while (idx >= 0)
1376 {
1377 /* Prune [..., call, function-entry, return, ...] triples. */
e2a538b1
DM
1378 if (idx + 2 < (signed)path->num_events ()
1379 && path->get_checker_event (idx)->is_call_p ()
1380 && path->get_checker_event (idx + 1)->is_function_entry_p ()
1381 && path->get_checker_event (idx + 2)->is_return_p ())
757bf1df
DM
1382 {
1383 if (get_logger ())
1384 {
e2a538b1
DM
1385 label_text desc
1386 (path->get_checker_event (idx)->get_desc (false));
757bf1df
DM
1387 log ("filtering events %i-%i:"
1388 " irrelevant call/entry/return: %s",
1389 idx, idx + 2, desc.m_buffer);
1390 desc.maybe_free ();
1391 }
1392 path->delete_event (idx + 2);
1393 path->delete_event (idx + 1);
1394 path->delete_event (idx);
1395 changed = true;
1396 idx--;
1397 continue;
1398 }
1399
1400 /* Prune [..., call, return, ...] pairs
1401 (for -fanalyzer-verbosity=0). */
e2a538b1
DM
1402 if (idx + 1 < (signed)path->num_events ()
1403 && path->get_checker_event (idx)->is_call_p ()
1404 && path->get_checker_event (idx + 1)->is_return_p ())
757bf1df
DM
1405 {
1406 if (get_logger ())
1407 {
e2a538b1
DM
1408 label_text desc
1409 (path->get_checker_event (idx)->get_desc (false));
757bf1df
DM
1410 log ("filtering events %i-%i:"
1411 " irrelevant call/return: %s",
1412 idx, idx + 1, desc.m_buffer);
1413 desc.maybe_free ();
1414 }
1415 path->delete_event (idx + 1);
1416 path->delete_event (idx);
1417 changed = true;
1418 idx--;
1419 continue;
1420 }
1421
1422 idx--;
1423 }
1424
1425 }
1426 while (changed);
1427}
1428
1429/* Final pass of diagnostic_manager::prune_path.
1430
1431 If all we're left with is in one function, then filter function entry
1432 events. */
1433
1434void
1435diagnostic_manager::finish_pruning (checker_path *path) const
1436{
1437 if (!path->interprocedural_p ())
1438 {
e2a538b1
DM
1439 int idx = path->num_events () - 1;
1440 while (idx >= 0 && idx < (signed)path->num_events ())
757bf1df 1441 {
e2a538b1 1442 checker_event *base_event = path->get_checker_event (idx);
757bf1df
DM
1443 if (base_event->m_kind == EK_FUNCTION_ENTRY)
1444 {
1445 log ("filtering event %i:"
1446 " function entry for purely intraprocedural path", idx);
1447 path->delete_event (idx);
1448 }
1449 idx--;
1450 }
1451 }
1452}
1453
75038aa6
DM
1454} // namespace ana
1455
757bf1df 1456#endif /* #if ENABLE_ANALYZER */