]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/analyzer/store.cc
analyzer: add sarif properties for bounds checking diagnostics
[thirdparty/gcc.git] / gcc / analyzer / store.cc
CommitLineData
808f4dfe 1/* Classes for modeling the state of memory.
83ffe9cd 2 Copyright (C) 2020-2023 Free Software Foundation, Inc.
808f4dfe
DM
3 Contributed by David Malcolm <dmalcolm@redhat.com>.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it
8under the terms of the GNU General Public License as published by
9the Free Software Foundation; either version 3, or (at your option)
10any later version.
11
12GCC is distributed in the hope that it will be useful, but
13WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15General Public License for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
20
21#include "config.h"
6341f14e 22#define INCLUDE_MEMORY
808f4dfe
DM
23#include "system.h"
24#include "coretypes.h"
25#include "tree.h"
26#include "function.h"
27#include "basic-block.h"
28#include "gimple.h"
29#include "gimple-iterator.h"
30#include "diagnostic-core.h"
31#include "graphviz.h"
32#include "options.h"
33#include "cgraph.h"
34#include "tree-dfa.h"
35#include "stringpool.h"
36#include "convert.h"
37#include "target.h"
38#include "fold-const.h"
39#include "tree-pretty-print.h"
40#include "diagnostic-color.h"
808f4dfe
DM
41#include "bitmap.h"
42#include "selftest.h"
808f4dfe
DM
43#include "analyzer/analyzer.h"
44#include "analyzer/analyzer-logging.h"
45#include "ordered-hash-map.h"
46#include "options.h"
808f4dfe 47#include "cfg.h"
808f4dfe
DM
48#include "analyzer/supergraph.h"
49#include "sbitmap.h"
50#include "analyzer/call-string.h"
51#include "analyzer/program-point.h"
52#include "analyzer/store.h"
53#include "analyzer/region-model.h"
bfca9505 54#include "analyzer/call-summary.h"
808f4dfe
DM
55#include "analyzer/analyzer-selftests.h"
56#include "stor-layout.h"
57
58#if ENABLE_ANALYZER
59
60namespace ana {
61
3a66c289
DM
62/* Dump SVALS to PP, sorting them to ensure determinism. */
63
64static void
65dump_svalue_set (const hash_set <const svalue *> &svals,
66 pretty_printer *pp, bool simple)
67{
68 auto_vec <const svalue *> v;
69 for (hash_set<const svalue *>::iterator iter = svals.begin ();
70 iter != svals.end (); ++iter)
71 {
72 v.safe_push (*iter);
73 }
74 v.qsort (svalue::cmp_ptr_ptr);
75
76 pp_character (pp, '{');
77 const svalue *sval;
78 unsigned i;
79 FOR_EACH_VEC_ELT (v, i, sval)
80 {
81 if (i > 0)
82 pp_string (pp, ", ");
83 sval->dump_to_pp (pp, simple);
84 }
85 pp_character (pp, '}');
86}
87
88/* class uncertainty_t. */
89
90/* Dump this object to PP. */
91
92void
93uncertainty_t::dump_to_pp (pretty_printer *pp, bool simple) const
94{
95 pp_string (pp, "{m_maybe_bound_svals: ");
96 dump_svalue_set (m_maybe_bound_svals, pp, simple);
97
98 pp_string (pp, ", m_mutable_at_unknown_call_svals: ");
99 dump_svalue_set (m_mutable_at_unknown_call_svals, pp, simple);
100 pp_string (pp, "}");
101}
102
103/* Dump this object to stderr. */
104
105DEBUG_FUNCTION void
106uncertainty_t::dump (bool simple) const
107{
108 pretty_printer pp;
109 pp_format_decoder (&pp) = default_tree_printer;
110 pp_show_color (&pp) = pp_show_color (global_dc->printer);
111 pp.buffer->stream = stderr;
112 dump_to_pp (&pp, simple);
113 pp_newline (&pp);
114 pp_flush (&pp);
115}
116
808f4dfe
DM
117/* class binding_key. */
118
119const binding_key *
e61ffa20 120binding_key::make (store_manager *mgr, const region *r)
808f4dfe 121{
7a6564c9 122 region_offset offset = r->get_offset (mgr->get_svalue_manager ());
808f4dfe 123 if (offset.symbolic_p ())
e61ffa20 124 return mgr->get_symbolic_binding (r);
808f4dfe
DM
125 else
126 {
127 bit_size_t bit_size;
128 if (r->get_bit_size (&bit_size))
dfe2ef7f
DM
129 {
130 /* Must be non-empty. */
131 gcc_assert (bit_size > 0);
132 return mgr->get_concrete_binding (offset.get_bit_offset (),
133 bit_size);
134 }
808f4dfe 135 else
e61ffa20 136 return mgr->get_symbolic_binding (r);
808f4dfe
DM
137 }
138}
139
808f4dfe
DM
140/* Dump this binding_key to stderr. */
141
142DEBUG_FUNCTION void
143binding_key::dump (bool simple) const
144{
145 pretty_printer pp;
146 pp_format_decoder (&pp) = default_tree_printer;
147 pp_show_color (&pp) = pp_show_color (global_dc->printer);
148 pp.buffer->stream = stderr;
149 dump_to_pp (&pp, simple);
150 pp_newline (&pp);
151 pp_flush (&pp);
152}
153
809192e7
DM
154/* Get a description of this binding_key. */
155
156label_text
157binding_key::get_desc (bool simple) const
158{
159 pretty_printer pp;
160 pp_format_decoder (&pp) = default_tree_printer;
161 dump_to_pp (&pp, simple);
162 return label_text::take (xstrdup (pp_formatted_text (&pp)));
163}
164
808f4dfe
DM
165/* qsort callback. */
166
167int
168binding_key::cmp_ptrs (const void *p1, const void *p2)
169{
170 const binding_key * const *pk1 = (const binding_key * const *)p1;
171 const binding_key * const *pk2 = (const binding_key * const *)p2;
172 return cmp (*pk1, *pk2);
173}
174
175/* Comparator for binding_keys. */
176
177int
178binding_key::cmp (const binding_key *k1, const binding_key *k2)
179{
808f4dfe
DM
180 int concrete1 = k1->concrete_p ();
181 int concrete2 = k2->concrete_p ();
182 if (int concrete_cmp = concrete1 - concrete2)
183 return concrete_cmp;
184 if (concrete1)
185 {
186 const concrete_binding *b1 = (const concrete_binding *)k1;
187 const concrete_binding *b2 = (const concrete_binding *)k2;
188 if (int start_cmp = wi::cmp (b1->get_start_bit_offset (),
189 b2->get_start_bit_offset (),
190 SIGNED))
191 return start_cmp;
192 return wi::cmp (b1->get_next_bit_offset (), b2->get_next_bit_offset (),
193 SIGNED);
194 }
195 else
196 {
197 const symbolic_binding *s1 = (const symbolic_binding *)k1;
198 const symbolic_binding *s2 = (const symbolic_binding *)k2;
199 if (s1 > s2)
200 return 1;
201 if (s1 < s2)
202 return -1;
203 return 0;
204 }
205}
206
e61ffa20 207/* struct bit_range. */
808f4dfe
DM
208
209void
6b400aef 210bit_range::dump_to_pp (pretty_printer *pp) const
808f4dfe 211{
7c6b354b
DM
212 byte_range bytes (0, 0);
213 if (as_byte_range (&bytes))
214 bytes.dump_to_pp (pp);
215 else
216 {
217 pp_string (pp, "start: ");
218 pp_wide_int (pp, m_start_bit_offset, SIGNED);
219 pp_string (pp, ", size: ");
220 pp_wide_int (pp, m_size_in_bits, SIGNED);
221 pp_string (pp, ", next: ");
222 pp_wide_int (pp, get_next_bit_offset (), SIGNED);
223 }
808f4dfe
DM
224}
225
ec3fafa9
DM
226/* Dump this object to stderr. */
227
228DEBUG_FUNCTION void
229bit_range::dump () const
230{
231 pretty_printer pp;
232 pp.buffer->stream = stderr;
233 dump_to_pp (&pp);
234 pp_newline (&pp);
235 pp_flush (&pp);
236}
237
7abc7aae
DM
238/* Generate a JSON value for this bit_range.
239 This is intended for debugging the analyzer rather
240 than serialization. */
241
242json::object *
243bit_range::to_json () const
244{
245 json::object *obj = new json::object ();
246 obj->set ("start_bit_offset",
247 bit_offset_to_json (m_start_bit_offset));
248 obj->set ("size_in_bits",
249 bit_offset_to_json (m_size_in_bits));
250 return obj;
251}
252
0e466e97
DM
253/* If OTHER is a subset of this, return true and, if OUT is
254 non-null, write to *OUT the relative range of OTHER within this.
e61ffa20
DM
255 Otherwise return false. */
256
257bool
258bit_range::contains_p (const bit_range &other, bit_range *out) const
259{
260 if (contains_p (other.get_start_bit_offset ())
261 && contains_p (other.get_last_bit_offset ()))
262 {
0e466e97
DM
263 if (out)
264 {
265 out->m_start_bit_offset = other.m_start_bit_offset - m_start_bit_offset;
266 out->m_size_in_bits = other.m_size_in_bits;
267 }
e61ffa20
DM
268 return true;
269 }
270 else
271 return false;
272}
273
4892b308
DM
274/* If OTHER intersects this, return true and write
275 the relative range of OTHER within THIS to *OUT_THIS,
276 and the relative range of THIS within OTHER to *OUT_OTHER.
277 Otherwise return false. */
278
279bool
280bit_range::intersects_p (const bit_range &other,
281 bit_range *out_this,
282 bit_range *out_other) const
283{
284 if (get_start_bit_offset () < other.get_next_bit_offset ()
285 && other.get_start_bit_offset () < get_next_bit_offset ())
286 {
287 bit_offset_t overlap_start
288 = MAX (get_start_bit_offset (),
289 other.get_start_bit_offset ());
290 bit_offset_t overlap_next
291 = MIN (get_next_bit_offset (),
292 other.get_next_bit_offset ());
293 gcc_assert (overlap_next > overlap_start);
294 bit_range abs_overlap_bits (overlap_start, overlap_next - overlap_start);
295 *out_this = abs_overlap_bits - get_start_bit_offset ();
296 *out_other = abs_overlap_bits - other.get_start_bit_offset ();
297 return true;
298 }
299 else
300 return false;
301}
302
5f1bed2a
DM
303/* Return true if THIS and OTHER intersect and write the number
304 of bits both buffers overlap to *OUT_NUM_OVERLAP_BITS.
305
306 Otherwise return false. */
307
308bool
309bit_range::intersects_p (const bit_range &other,
310 bit_size_t *out_num_overlap_bits) const
311{
312 if (get_start_bit_offset () < other.get_next_bit_offset ()
313 && other.get_start_bit_offset () < get_next_bit_offset ())
314 {
315 bit_offset_t overlap_start = MAX (get_start_bit_offset (),
316 other.get_start_bit_offset ());
317 bit_offset_t overlap_next = MIN (get_next_bit_offset (),
318 other.get_next_bit_offset ());
319 gcc_assert (overlap_next > overlap_start);
320 *out_num_overlap_bits = overlap_next - overlap_start;
321 return true;
322 }
323 else
324 return false;
325}
326
327/* Return true if THIS exceeds OTHER and write the overhanging
328 bit range to OUT_OVERHANGING_BIT_RANGE. */
329
330bool
331bit_range::exceeds_p (const bit_range &other,
332 bit_range *out_overhanging_bit_range) const
333{
334 gcc_assert (!empty_p ());
335
336 if (other.get_next_bit_offset () < get_next_bit_offset ())
337 {
338 /* THIS definitely exceeds OTHER. */
339 bit_offset_t start = MAX (get_start_bit_offset (),
340 other.get_next_bit_offset ());
341 bit_offset_t size = get_next_bit_offset () - start;
342 gcc_assert (size > 0);
343 out_overhanging_bit_range->m_start_bit_offset = start;
344 out_overhanging_bit_range->m_size_in_bits = size;
345 return true;
346 }
347 else
348 return false;
349}
350
351/* Return true if THIS falls short of OFFSET and write the
352 bit range fallen short to OUT_FALL_SHORT_BITS. */
353
354bool
355bit_range::falls_short_of_p (bit_offset_t offset,
356 bit_range *out_fall_short_bits) const
357{
358 gcc_assert (!empty_p ());
359
360 if (get_start_bit_offset () < offset)
361 {
362 /* THIS falls short of OFFSET. */
363 bit_offset_t start = get_start_bit_offset ();
364 bit_offset_t size = MIN (offset, get_next_bit_offset ()) - start;
365 gcc_assert (size > 0);
366 out_fall_short_bits->m_start_bit_offset = start;
367 out_fall_short_bits->m_size_in_bits = size;
368 return true;
369 }
370 else
371 return false;
372}
373
6b400aef
DM
374int
375bit_range::cmp (const bit_range &br1, const bit_range &br2)
376{
377 if (int start_cmp = wi::cmps (br1.m_start_bit_offset,
378 br2.m_start_bit_offset))
379 return start_cmp;
380
381 return wi::cmpu (br1.m_size_in_bits, br2.m_size_in_bits);
382}
383
4892b308
DM
384/* Offset this range by OFFSET. */
385
386bit_range
387bit_range::operator- (bit_offset_t offset) const
388{
389 return bit_range (m_start_bit_offset - offset, m_size_in_bits);
390}
391
d3b1ef7a
DM
392/* If MASK is a contiguous range of set bits, write them
393 to *OUT and return true.
394 Otherwise return false. */
395
396bool
397bit_range::from_mask (unsigned HOST_WIDE_INT mask, bit_range *out)
398{
399 unsigned iter_bit_idx = 0;
400 unsigned HOST_WIDE_INT iter_bit_mask = 1;
401
402 /* Find the first contiguous run of set bits in MASK. */
403
404 /* Find first set bit in MASK. */
405 while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
406 {
407 if (mask & iter_bit_mask)
408 break;
409 iter_bit_idx++;
410 iter_bit_mask <<= 1;
411 }
412 if (iter_bit_idx == HOST_BITS_PER_WIDE_INT)
413 /* MASK is zero. */
414 return false;
415
416 unsigned first_set_iter_bit_idx = iter_bit_idx;
417 unsigned num_set_bits = 1;
418 iter_bit_idx++;
419 iter_bit_mask <<= 1;
420
421 /* Find next unset bit in MASK. */
422 while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
423 {
424 if (!(mask & iter_bit_mask))
425 break;
426 num_set_bits++;
427 iter_bit_idx++;
428 iter_bit_mask <<= 1;
429 }
430 if (iter_bit_idx == HOST_BITS_PER_WIDE_INT)
431 {
432 *out = bit_range (first_set_iter_bit_idx, num_set_bits);
433 return true;
434 }
435
436 /* We now have the first contiguous run of set bits in MASK.
437 Fail if any other bits are set. */
438 while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
439 {
440 if (mask & iter_bit_mask)
441 return false;
442 iter_bit_idx++;
443 iter_bit_mask <<= 1;
444 }
445
446 *out = bit_range (first_set_iter_bit_idx, num_set_bits);
447 return true;
448}
449
7c6b354b
DM
450/* Attempt to convert this bit_range to a byte_range.
451 Return true if it is possible, writing the result to *OUT.
452 Otherwise return false. */
453
454bool
455bit_range::as_byte_range (byte_range *out) const
456{
457 if (m_start_bit_offset % BITS_PER_UNIT == 0
458 && m_size_in_bits % BITS_PER_UNIT == 0)
459 {
460 out->m_start_byte_offset = m_start_bit_offset / BITS_PER_UNIT;
461 out->m_size_in_bytes = m_size_in_bits / BITS_PER_UNIT;
462 return true;
463 }
464 return false;
465}
466
467/* Dump this object to PP. */
468
469void
470byte_range::dump_to_pp (pretty_printer *pp) const
471{
0ea5e3f4
TL
472 if (m_size_in_bytes == 0)
473 {
474 pp_string (pp, "empty");
475 }
476 else if (m_size_in_bytes == 1)
7c6b354b
DM
477 {
478 pp_string (pp, "byte ");
479 pp_wide_int (pp, m_start_byte_offset, SIGNED);
480 }
481 else
482 {
483 pp_string (pp, "bytes ");
484 pp_wide_int (pp, m_start_byte_offset, SIGNED);
485 pp_string (pp, "-");
486 pp_wide_int (pp, get_last_byte_offset (), SIGNED);
487 }
488}
489
e61ffa20
DM
490/* Dump this object to stderr. */
491
492DEBUG_FUNCTION void
493byte_range::dump () const
494{
495 pretty_printer pp;
496 pp.buffer->stream = stderr;
497 dump_to_pp (&pp);
498 pp_newline (&pp);
499 pp_flush (&pp);
500}
501
7abc7aae
DM
502/* Generate a JSON value for this byte_range.
503 This is intended for debugging the analyzer rather
504 than serialization. */
505
506json::object *
507byte_range::to_json () const
508{
509 json::object *obj = new json::object ();
510 obj->set ("start_byte_offset",
511 byte_offset_to_json (m_start_byte_offset));
512 obj->set ("size_in_bytes",
513 byte_offset_to_json (m_size_in_bytes));
514 return obj;
515}
516
e61ffa20
DM
517/* If OTHER is a subset of this, return true and write
518 to *OUT the relative range of OTHER within this.
519 Otherwise return false. */
520
521bool
522byte_range::contains_p (const byte_range &other, byte_range *out) const
523{
524 if (contains_p (other.get_start_byte_offset ())
525 && contains_p (other.get_last_byte_offset ()))
526 {
527 out->m_start_byte_offset = other.m_start_byte_offset - m_start_byte_offset;
528 out->m_size_in_bytes = other.m_size_in_bytes;
529 return true;
530 }
531 else
532 return false;
533}
534
535/* qsort comparator for byte ranges. */
536
537int
538byte_range::cmp (const byte_range &br1, const byte_range &br2)
539{
540 /* Order first by offset. */
541 if (int start_cmp = wi::cmps (br1.m_start_byte_offset,
542 br2.m_start_byte_offset))
543 return start_cmp;
544
545 /* ...then by size. */
546 return wi::cmpu (br1.m_size_in_bytes, br2.m_size_in_bytes);
547}
548
6b400aef
DM
549/* class concrete_binding : public binding_key. */
550
551/* Implementation of binding_key::dump_to_pp vfunc for concrete_binding. */
552
553void
e61ffa20 554concrete_binding::dump_to_pp (pretty_printer *pp, bool) const
6b400aef 555{
6b400aef
DM
556 m_bit_range.dump_to_pp (pp);
557}
558
808f4dfe
DM
559/* Return true if this binding overlaps with OTHER. */
560
561bool
562concrete_binding::overlaps_p (const concrete_binding &other) const
563{
6b400aef 564 if (get_start_bit_offset () < other.get_next_bit_offset ()
808f4dfe
DM
565 && get_next_bit_offset () > other.get_start_bit_offset ())
566 return true;
567 return false;
568}
569
fe97f09a
DM
570/* If this is expressible as a concrete byte range, return true
571 and write it to *OUT. Otherwise return false. */
572
573bool
574concrete_binding::get_byte_range (byte_range *out) const
575{
576 return m_bit_range.as_byte_range (out);
577}
578
b0702ac5
DM
579/* Comparator for use by vec<const concrete_binding *>::qsort. */
580
581int
582concrete_binding::cmp_ptr_ptr (const void *p1, const void *p2)
583{
584 const concrete_binding *b1 = *(const concrete_binding * const *)p1;
585 const concrete_binding *b2 = *(const concrete_binding * const *)p2;
586
6b400aef 587 return bit_range::cmp (b1->m_bit_range, b2->m_bit_range);
b0702ac5
DM
588}
589
808f4dfe
DM
590/* class symbolic_binding : public binding_key. */
591
592void
593symbolic_binding::dump_to_pp (pretty_printer *pp, bool simple) const
594{
e61ffa20
DM
595 //binding_key::dump_to_pp (pp, simple);
596 pp_string (pp, "region: ");
808f4dfe
DM
597 m_region->dump_to_pp (pp, simple);
598}
599
b0702ac5
DM
600/* Comparator for use by vec<const symbolic_binding *>::qsort. */
601
602int
603symbolic_binding::cmp_ptr_ptr (const void *p1, const void *p2)
604{
605 const symbolic_binding *b1 = *(const symbolic_binding * const *)p1;
606 const symbolic_binding *b2 = *(const symbolic_binding * const *)p2;
607
b0702ac5
DM
608 return region::cmp_ids (b1->get_region (), b2->get_region ());
609}
610
808f4dfe
DM
611/* The store is oblivious to the types of the svalues bound within
612 it: any type can get bound at any location.
613 Simplify any casts before binding.
614
615 For example, if we have:
616 struct big { int ia[1024]; };
617 struct big src, dst;
618 memcpy (&dst, &src, sizeof (struct big));
619 this reaches us in gimple form as:
620 MEM <unsigned char[4096]> [(char * {ref-all})&dst]
621 = MEM <unsigned char[4096]> [(char * {ref-all})&src];
622 Using cast_region when handling the MEM_REF would give us:
623 INIT_VAL(CAST_REG(unsigned char[4096], src))
624 as rhs_sval, but we can fold that into a cast svalue:
625 CAST(unsigned char[4096], INIT_VAL(src))
626 We can discard that cast from the svalue when binding it in
627 the store for "dst", and simply store:
628 cluster for: dst
629 key: {kind: direct, start: 0, size: 32768, next: 32768}
630 value: ‘struct big’ {INIT_VAL(src)}. */
631
632static const svalue *
633simplify_for_binding (const svalue *sval)
634{
635 if (const svalue *cast_sval = sval->maybe_undo_cast ())
636 sval = cast_sval;
637 return sval;
638}
639
640/* class binding_map. */
641
642/* binding_map's copy ctor. */
643
644binding_map::binding_map (const binding_map &other)
645: m_map (other.m_map)
646{
647}
648
649/* binding_map's assignment operator. */
650
651binding_map&
652binding_map::operator=(const binding_map &other)
653{
654 /* For now, assume we only ever copy to an empty cluster. */
655 gcc_assert (m_map.elements () == 0);
656 for (map_t::iterator iter = other.m_map.begin (); iter != other.m_map.end ();
657 ++iter)
658 {
659 const binding_key *key = (*iter).first;
660 const svalue *sval = (*iter).second;
661 m_map.put (key, sval);
662 }
663 return *this;
664}
665
666/* binding_map's equality operator. */
667
668bool
669binding_map::operator== (const binding_map &other) const
670{
671 if (m_map.elements () != other.m_map.elements ())
672 return false;
673
674 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
675 {
676 const binding_key *key = (*iter).first;
677 const svalue *sval = (*iter).second;
678 const svalue **other_slot
679 = const_cast <map_t &> (other.m_map).get (key);
680 if (other_slot == NULL)
681 return false;
682 if (sval != *other_slot)
683 return false;
684 }
685 gcc_checking_assert (hash () == other.hash ());
686 return true;
687}
688
689/* Generate a hash value for this binding_map. */
690
691hashval_t
692binding_map::hash () const
693{
694 hashval_t result = 0;
695 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
696 {
697 /* Use a new hasher for each key to avoid depending on the ordering
698 of keys when accumulating the result. */
699 inchash::hash hstate;
700 hstate.add_ptr ((*iter).first);
701 hstate.add_ptr ((*iter).second);
702 result ^= hstate.end ();
703 }
704 return result;
705}
706
707/* Dump a representation of this binding_map to PP.
708 SIMPLE controls how values and regions are to be printed.
709 If MULTILINE, then split the dump over multiple lines and
710 use whitespace for readability, otherwise put all on one line. */
711
712void
713binding_map::dump_to_pp (pretty_printer *pp, bool simple,
714 bool multiline) const
715{
716 auto_vec <const binding_key *> binding_keys;
717 for (map_t::iterator iter = m_map.begin ();
718 iter != m_map.end (); ++iter)
719 {
720 const binding_key *key = (*iter).first;
721 binding_keys.safe_push (key);
722 }
723 binding_keys.qsort (binding_key::cmp_ptrs);
724
725 const binding_key *key;
726 unsigned i;
727 FOR_EACH_VEC_ELT (binding_keys, i, key)
728 {
729 const svalue *value = *const_cast <map_t &> (m_map).get (key);
730 if (multiline)
731 {
732 pp_string (pp, " key: {");
733 key->dump_to_pp (pp, simple);
734 pp_string (pp, "}");
735 pp_newline (pp);
736 pp_string (pp, " value: ");
737 if (tree t = value->get_type ())
738 dump_quoted_tree (pp, t);
739 pp_string (pp, " {");
740 value->dump_to_pp (pp, simple);
741 pp_string (pp, "}");
742 pp_newline (pp);
743 }
744 else
745 {
746 if (i > 0)
747 pp_string (pp, ", ");
748 pp_string (pp, "binding key: {");
749 key->dump_to_pp (pp, simple);
750 pp_string (pp, "}, value: {");
751 value->dump_to_pp (pp, simple);
752 pp_string (pp, "}");
753 }
754 }
755}
756
757/* Dump a multiline representation of this binding_map to stderr. */
758
759DEBUG_FUNCTION void
760binding_map::dump (bool simple) const
761{
762 pretty_printer pp;
763 pp_format_decoder (&pp) = default_tree_printer;
764 pp_show_color (&pp) = pp_show_color (global_dc->printer);
765 pp.buffer->stream = stderr;
766 dump_to_pp (&pp, simple, true);
767 pp_newline (&pp);
768 pp_flush (&pp);
769}
770
809192e7
DM
771/* Return a new json::object of the form
772 {KEY_DESC : SVALUE_DESC,
773 ...for the various key/value pairs in this binding_map}. */
774
775json::object *
776binding_map::to_json () const
777{
778 json::object *map_obj = new json::object ();
779
780 auto_vec <const binding_key *> binding_keys;
781 for (map_t::iterator iter = m_map.begin ();
782 iter != m_map.end (); ++iter)
783 {
784 const binding_key *key = (*iter).first;
785 binding_keys.safe_push (key);
786 }
787 binding_keys.qsort (binding_key::cmp_ptrs);
788
789 const binding_key *key;
790 unsigned i;
791 FOR_EACH_VEC_ELT (binding_keys, i, key)
792 {
793 const svalue *value = *const_cast <map_t &> (m_map).get (key);
794 label_text key_desc = key->get_desc ();
f858fe7a 795 map_obj->set (key_desc.get (), value->to_json ());
809192e7
DM
796 }
797
798 return map_obj;
799}
800
b0702ac5
DM
801/* Comparator for imposing an order on binding_maps. */
802
803int
804binding_map::cmp (const binding_map &map1, const binding_map &map2)
805{
806 if (int count_cmp = map1.elements () - map2.elements ())
807 return count_cmp;
808
809 auto_vec <const binding_key *> keys1 (map1.elements ());
810 for (map_t::iterator iter = map1.begin ();
811 iter != map1.end (); ++iter)
812 keys1.quick_push ((*iter).first);
813 keys1.qsort (binding_key::cmp_ptrs);
814
815 auto_vec <const binding_key *> keys2 (map2.elements ());
816 for (map_t::iterator iter = map2.begin ();
817 iter != map2.end (); ++iter)
818 keys2.quick_push ((*iter).first);
819 keys2.qsort (binding_key::cmp_ptrs);
820
821 for (size_t i = 0; i < keys1.length (); i++)
822 {
823 const binding_key *k1 = keys1[i];
824 const binding_key *k2 = keys2[i];
825 if (int key_cmp = binding_key::cmp (k1, k2))
826 return key_cmp;
827 gcc_assert (k1 == k2);
828 if (int sval_cmp = svalue::cmp_ptr (map1.get (k1), map2.get (k2)))
829 return sval_cmp;
830 }
831
832 return 0;
833}
834
2867118d
DM
835/* Get the child region of PARENT_REG based upon INDEX within a
836 CONSTRUCTOR. */
837
838static const region *
839get_subregion_within_ctor (const region *parent_reg, tree index,
840 region_model_manager *mgr)
841{
842 switch (TREE_CODE (index))
843 {
844 default:
845 gcc_unreachable ();
846 case INTEGER_CST:
847 {
848 const svalue *index_sval
849 = mgr->get_or_create_constant_svalue (index);
850 return mgr->get_element_region (parent_reg,
851 TREE_TYPE (parent_reg->get_type ()),
852 index_sval);
853 }
854 break;
855 case FIELD_DECL:
856 return mgr->get_field_region (parent_reg, index);
857 }
858}
859
35c5f8fb
DM
860/* Get the svalue for VAL, a non-CONSTRUCTOR value within a CONSTRUCTOR. */
861
862static const svalue *
863get_svalue_for_ctor_val (tree val, region_model_manager *mgr)
864{
623bc027
DM
865 /* Reuse the get_rvalue logic from region_model. */
866 region_model m (mgr);
867 return m.get_rvalue (path_var (val, 0), NULL);
35c5f8fb
DM
868}
869
2867118d 870/* Bind values from CONSTRUCTOR to this map, relative to
18056e45
DM
871 PARENT_REG's relationship to its base region.
872 Return true if successful, false if there was a problem (e.g. due
873 to hitting a complexity limit). */
2867118d 874
18056e45 875bool
2867118d
DM
876binding_map::apply_ctor_to_region (const region *parent_reg, tree ctor,
877 region_model_manager *mgr)
878{
879 gcc_assert (parent_reg);
880 gcc_assert (TREE_CODE (ctor) == CONSTRUCTOR);
2867118d
DM
881
882 unsigned ix;
883 tree index;
884 tree val;
15af33a8
DM
885 tree parent_type = parent_reg->get_type ();
886 tree field;
887 if (TREE_CODE (parent_type) == RECORD_TYPE)
888 field = TYPE_FIELDS (parent_type);
889 else
890 field = NULL_TREE;
2867118d
DM
891 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor), ix, index, val)
892 {
893 if (!index)
15af33a8
DM
894 {
895 /* If index is NULL, then iterate through the fields for
896 a RECORD_TYPE, or use an INTEGER_CST otherwise.
897 Compare with similar logic in output_constructor. */
898 if (field)
899 {
900 index = field;
901 field = DECL_CHAIN (field);
902 }
903 else
904 index = build_int_cst (integer_type_node, ix);
905 }
0d1b4edc
DM
906 else if (TREE_CODE (index) == RANGE_EXPR)
907 {
908 tree min_index = TREE_OPERAND (index, 0);
909 tree max_index = TREE_OPERAND (index, 1);
af656c40
DM
910 if (min_index == max_index)
911 {
912 if (!apply_ctor_pair_to_child_region (parent_reg, mgr,
913 min_index, val))
914 return false;
915 }
916 else
917 {
918 if (!apply_ctor_val_to_range (parent_reg, mgr,
919 min_index, max_index, val))
920 return false;
921 }
0d1b4edc
DM
922 continue;
923 }
18056e45
DM
924 if (!apply_ctor_pair_to_child_region (parent_reg, mgr, index, val))
925 return false;
0d1b4edc 926 }
18056e45 927 return true;
0d1b4edc
DM
928}
929
930/* Bind the value VAL into the range of elements within PARENT_REF
931 from MIN_INDEX to MAX_INDEX (including endpoints).
18056e45
DM
932 For use in handling RANGE_EXPR within a CONSTRUCTOR.
933 Return true if successful, false if there was a problem (e.g. due
934 to hitting a complexity limit). */
0d1b4edc 935
18056e45 936bool
0d1b4edc
DM
937binding_map::apply_ctor_val_to_range (const region *parent_reg,
938 region_model_manager *mgr,
939 tree min_index, tree max_index,
940 tree val)
941{
942 gcc_assert (TREE_CODE (min_index) == INTEGER_CST);
943 gcc_assert (TREE_CODE (max_index) == INTEGER_CST);
944
945 /* Generate a binding key for the range. */
946 const region *min_element
947 = get_subregion_within_ctor (parent_reg, min_index, mgr);
948 const region *max_element
949 = get_subregion_within_ctor (parent_reg, max_index, mgr);
7a6564c9 950 region_offset min_offset = min_element->get_offset (mgr);
34d926db
DM
951 if (min_offset.symbolic_p ())
952 return false;
0d1b4edc
DM
953 bit_offset_t start_bit_offset = min_offset.get_bit_offset ();
954 store_manager *smgr = mgr->get_store_manager ();
41faa1d7
DM
955 if (max_element->empty_p ())
956 return false;
e61ffa20 957 const binding_key *max_element_key = binding_key::make (smgr, max_element);
34d926db
DM
958 if (max_element_key->symbolic_p ())
959 return false;
0d1b4edc
DM
960 const concrete_binding *max_element_ckey
961 = max_element_key->dyn_cast_concrete_binding ();
962 bit_size_t range_size_in_bits
963 = max_element_ckey->get_next_bit_offset () - start_bit_offset;
964 const concrete_binding *range_key
e61ffa20 965 = smgr->get_concrete_binding (start_bit_offset, range_size_in_bits);
34d926db
DM
966 if (range_key->symbolic_p ())
967 return false;
0d1b4edc
DM
968
969 /* Get the value. */
af656c40
DM
970 if (TREE_CODE (val) == CONSTRUCTOR)
971 return false;
0d1b4edc
DM
972 const svalue *sval = get_svalue_for_ctor_val (val, mgr);
973
974 /* Bind the value to the range. */
975 put (range_key, sval);
18056e45 976 return true;
0d1b4edc
DM
977}
978
979/* Bind the value VAL into INDEX within PARENT_REF.
18056e45
DM
980 For use in handling a pair of entries within a CONSTRUCTOR.
981 Return true if successful, false if there was a problem (e.g. due
982 to hitting a complexity limit). */
0d1b4edc 983
18056e45 984bool
0d1b4edc
DM
985binding_map::apply_ctor_pair_to_child_region (const region *parent_reg,
986 region_model_manager *mgr,
987 tree index, tree val)
988{
989 const region *child_reg
990 = get_subregion_within_ctor (parent_reg, index, mgr);
991 if (TREE_CODE (val) == CONSTRUCTOR)
18056e45 992 return apply_ctor_to_region (child_reg, val, mgr);
0d1b4edc
DM
993 else
994 {
995 const svalue *sval = get_svalue_for_ctor_val (val, mgr);
41faa1d7
DM
996 if (child_reg->empty_p ())
997 return false;
0d1b4edc 998 const binding_key *k
e61ffa20 999 = binding_key::make (mgr->get_store_manager (), child_reg);
0d1b4edc
DM
1000 /* Handle the case where we have an unknown size for child_reg
1001 (e.g. due to it being a trailing field with incomplete array
1002 type. */
1003 if (!k->concrete_p ())
2867118d 1004 {
0d1b4edc
DM
1005 /* Assume that sval has a well-defined size for this case. */
1006 tree sval_type = sval->get_type ();
1007 gcc_assert (sval_type);
1008 HOST_WIDE_INT sval_byte_size = int_size_in_bytes (sval_type);
1009 gcc_assert (sval_byte_size != -1);
1010 bit_size_t sval_bit_size = sval_byte_size * BITS_PER_UNIT;
1011 /* Get offset of child relative to base region. */
7a6564c9 1012 region_offset child_base_offset = child_reg->get_offset (mgr);
18056e45
DM
1013 if (child_base_offset.symbolic_p ())
1014 return false;
0d1b4edc 1015 /* Convert to an offset relative to the parent region. */
7a6564c9 1016 region_offset parent_base_offset = parent_reg->get_offset (mgr);
0d1b4edc
DM
1017 gcc_assert (!parent_base_offset.symbolic_p ());
1018 bit_offset_t child_parent_offset
1019 = (child_base_offset.get_bit_offset ()
1020 - parent_base_offset.get_bit_offset ());
1021 /* Create a concrete key for the child within the parent. */
1022 k = mgr->get_store_manager ()->get_concrete_binding
e61ffa20 1023 (child_parent_offset, sval_bit_size);
2867118d 1024 }
0d1b4edc
DM
1025 gcc_assert (k->concrete_p ());
1026 put (k, sval);
18056e45 1027 return true;
2867118d
DM
1028 }
1029}
1030
e61ffa20
DM
1031/* Populate OUT with all bindings within this map that overlap KEY. */
1032
1033void
1034binding_map::get_overlapping_bindings (const binding_key *key,
1035 auto_vec<const binding_key *> *out)
1036{
1037 for (auto iter : *this)
1038 {
1039 const binding_key *iter_key = iter.first;
1040 if (const concrete_binding *ckey
1041 = key->dyn_cast_concrete_binding ())
1042 {
1043 if (const concrete_binding *iter_ckey
1044 = iter_key->dyn_cast_concrete_binding ())
1045 {
1046 if (ckey->overlaps_p (*iter_ckey))
1047 out->safe_push (iter_key);
1048 }
1049 else
1050 {
1051 /* Assume overlap. */
1052 out->safe_push (iter_key);
1053 }
1054 }
1055 else
1056 {
1057 /* Assume overlap. */
1058 out->safe_push (iter_key);
1059 }
1060 }
1061}
1062
1063/* Remove, truncate, and/or split any bindings within this map that
1064 overlap DROP_KEY.
1065
1066 For example, if we have:
1067
1068 +------------------------------------+
1069 | old binding |
1070 +------------------------------------+
1071
1072 which is to be overwritten with:
1073
1074 .......+----------------------+.......
1075 .......| new binding |.......
1076 .......+----------------------+.......
1077
1078 this function "cuts a hole" out of the old binding:
1079
1080 +------+......................+------+
1081 |prefix| hole for new binding |suffix|
1082 +------+......................+------+
1083
1084 into which the new binding can be added without
1085 overlapping the prefix or suffix.
1086
1087 The prefix and suffix (if added) will be bound to the pertinent
1088 parts of the value of the old binding.
1089
1090 For example, given:
1091 struct s5
1092 {
1093 char arr[8];
1094 };
1095 void test_5 (struct s5 *p)
1096 {
1097 struct s5 f = *p;
1098 f.arr[3] = 42;
1099 }
1100 then after the "f = *p;" we have:
1101 cluster for: f: INIT_VAL((*INIT_VAL(p_33(D))))
1102 and at the "f.arr[3] = 42;" we remove the bindings overlapping
1103 "f.arr[3]", replacing it with a prefix (bytes 0-2) and suffix (bytes 4-7)
1104 giving:
1105 cluster for: f
1106 key: {bytes 0-2}
1107 value: {BITS_WITHIN(bytes 0-2, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
1108 key: {bytes 4-7}
1109 value: {BITS_WITHIN(bytes 4-7, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
1110 punching a hole into which the new value can be written at byte 3:
1111 cluster for: f
1112 key: {bytes 0-2}
1113 value: {BITS_WITHIN(bytes 0-2, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
1114 key: {byte 3}
1115 value: 'char' {(char)42}
1116 key: {bytes 4-7}
1117 value: {BITS_WITHIN(bytes 4-7, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
1118
1119 If UNCERTAINTY is non-NULL, use it to record any svalues that
88b939b1
DM
1120 were removed, as being maybe-bound.
1121
14f5e56a
DM
1122 If MAYBE_LIVE_VALUES is non-NULL, then use it to record any svalues that
1123 were removed as being maybe-live.
1124
88b939b1
DM
1125 If ALWAYS_OVERLAP, then assume that DROP_KEY can overlap anything
1126 in the map, due to one or both of the underlying clusters being
1127 symbolic (but not the same symbolic region). Hence even if DROP_KEY is a
1128 concrete binding it could actually be referring to the same memory as
1129 distinct concrete bindings in the map. Remove all bindings, but
1130 register any svalues with *UNCERTAINTY. */
e61ffa20
DM
1131
1132void
1133binding_map::remove_overlapping_bindings (store_manager *mgr,
1134 const binding_key *drop_key,
88b939b1 1135 uncertainty_t *uncertainty,
14f5e56a 1136 svalue_set *maybe_live_values,
88b939b1 1137 bool always_overlap)
e61ffa20 1138{
88b939b1 1139 /* Get the bindings of interest within this map. */
e61ffa20 1140 auto_vec<const binding_key *> bindings;
88b939b1
DM
1141 if (always_overlap)
1142 for (auto iter : *this)
1143 bindings.safe_push (iter.first); /* Add all bindings. */
1144 else
1145 /* Just add overlapping bindings. */
1146 get_overlapping_bindings (drop_key, &bindings);
e61ffa20
DM
1147
1148 unsigned i;
1149 const binding_key *iter_binding;
1150 FOR_EACH_VEC_ELT (bindings, i, iter_binding)
1151 {
88b939b1
DM
1152 /* Record any svalues that were removed to *UNCERTAINTY as being
1153 maybe-bound, provided at least some part of the binding is symbolic.
1154
1155 Specifically, if at least one of the bindings is symbolic, or we
1156 have ALWAYS_OVERLAP for the case where we have possibly aliasing
1157 regions, then we don't know that the svalue has been overwritten,
1158 and should record that to *UNCERTAINTY.
1159
1160 However, if we have concrete keys accessing within the same symbolic
1161 region, then we *know* that the symbolic region has been overwritten,
1162 so we don't record it to *UNCERTAINTY, as this could be a genuine
1163 leak. */
e61ffa20 1164 const svalue *old_sval = get (iter_binding);
88b939b1
DM
1165 if (uncertainty
1166 && (drop_key->symbolic_p ()
1167 || iter_binding->symbolic_p ()
1168 || always_overlap))
e61ffa20
DM
1169 uncertainty->on_maybe_bound_sval (old_sval);
1170
14f5e56a
DM
1171 /* Record any svalues that were removed to *MAYBE_LIVE_VALUES as being
1172 maybe-live. */
1173 if (maybe_live_values)
1174 maybe_live_values->add (old_sval);
1175
e61ffa20
DM
1176 /* Begin by removing the old binding. */
1177 m_map.remove (iter_binding);
1178
88b939b1
DM
1179 /* Don't attempt to handle prefixes/suffixes for the
1180 "always_overlap" case; everything's being removed. */
1181 if (always_overlap)
1182 continue;
1183
e61ffa20
DM
1184 /* Now potentially add the prefix and suffix. */
1185 if (const concrete_binding *drop_ckey
1186 = drop_key->dyn_cast_concrete_binding ())
1187 if (const concrete_binding *iter_ckey
1188 = iter_binding->dyn_cast_concrete_binding ())
1189 {
1190 gcc_assert (drop_ckey->overlaps_p (*iter_ckey));
1191
1192 const bit_range &drop_bits = drop_ckey->get_bit_range ();
1193 const bit_range &iter_bits = iter_ckey->get_bit_range ();
1194
1195 if (iter_bits.get_start_bit_offset ()
1196 < drop_bits.get_start_bit_offset ())
1197 {
1198 /* We have a truncated prefix. */
1199 bit_range prefix_bits (iter_bits.get_start_bit_offset (),
1200 (drop_bits.get_start_bit_offset ()
1201 - iter_bits.get_start_bit_offset ()));
1202 const concrete_binding *prefix_key
1203 = mgr->get_concrete_binding (prefix_bits);
1204 bit_range rel_prefix (0, prefix_bits.m_size_in_bits);
1205 const svalue *prefix_sval
1206 = old_sval->extract_bit_range (NULL_TREE,
1207 rel_prefix,
1208 mgr->get_svalue_manager ());
1209 m_map.put (prefix_key, prefix_sval);
1210 }
1211
1212 if (iter_bits.get_next_bit_offset ()
1213 > drop_bits.get_next_bit_offset ())
1214 {
1215 /* We have a truncated suffix. */
1216 bit_range suffix_bits (drop_bits.get_next_bit_offset (),
1217 (iter_bits.get_next_bit_offset ()
1218 - drop_bits.get_next_bit_offset ()));
1219 const concrete_binding *suffix_key
1220 = mgr->get_concrete_binding (suffix_bits);
1221 bit_range rel_suffix (drop_bits.get_next_bit_offset ()
1222 - iter_bits.get_start_bit_offset (),
1223 suffix_bits.m_size_in_bits);
1224 const svalue *suffix_sval
1225 = old_sval->extract_bit_range (NULL_TREE,
1226 rel_suffix,
1227 mgr->get_svalue_manager ());
1228 m_map.put (suffix_key, suffix_sval);
1229 }
1230 }
1231 }
1232}
1233
808f4dfe
DM
1234/* class binding_cluster. */
1235
68871a00
DM
1236binding_cluster::binding_cluster (const region *base_region)
1237: m_base_region (base_region), m_map (),
1238 m_escaped (false), m_touched (false)
1239{
68871a00
DM
1240}
1241
808f4dfe
DM
1242/* binding_cluster's copy ctor. */
1243
1244binding_cluster::binding_cluster (const binding_cluster &other)
1245: m_base_region (other.m_base_region), m_map (other.m_map),
1246 m_escaped (other.m_escaped), m_touched (other.m_touched)
1247{
1248}
1249
1250/* binding_cluster's assignment operator. */
1251
1252binding_cluster&
1253binding_cluster::operator= (const binding_cluster &other)
1254{
1255 gcc_assert (m_base_region == other.m_base_region);
1256 m_map = other.m_map;
1257 m_escaped = other.m_escaped;
1258 m_touched = other.m_touched;
1259 return *this;
1260}
1261
1262/* binding_cluster's equality operator. */
1263
1264bool
1265binding_cluster::operator== (const binding_cluster &other) const
1266{
1267 if (m_map != other.m_map)
1268 return false;
1269
1270 if (m_base_region != other.m_base_region)
1271 return false;
1272
1273 if (m_escaped != other.m_escaped)
1274 return false;
1275
1276 if (m_touched != other.m_touched)
1277 return false;
1278
1279 gcc_checking_assert (hash () == other.hash ());
1280
1281 return true;
1282}
1283
1284/* Generate a hash value for this binding_cluster. */
1285
1286hashval_t
1287binding_cluster::hash () const
1288{
1289 return m_map.hash ();
1290}
1291
1292/* Return true if this binding_cluster is symbolic
1293 i.e. its base region is symbolic. */
1294
1295bool
1296binding_cluster::symbolic_p () const
1297{
1298 return m_base_region->get_kind () == RK_SYMBOLIC;
1299}
1300
1301/* Dump a representation of this binding_cluster to PP.
1302 SIMPLE controls how values and regions are to be printed.
1303 If MULTILINE, then split the dump over multiple lines and
1304 use whitespace for readability, otherwise put all on one line. */
1305
1306void
1307binding_cluster::dump_to_pp (pretty_printer *pp, bool simple,
1308 bool multiline) const
1309{
1310 if (m_escaped)
1311 {
1312 if (multiline)
1313 {
1314 pp_string (pp, " ESCAPED");
1315 pp_newline (pp);
1316 }
1317 else
1318 pp_string (pp, "(ESCAPED)");
1319 }
1320 if (m_touched)
1321 {
1322 if (multiline)
1323 {
1324 pp_string (pp, " TOUCHED");
1325 pp_newline (pp);
1326 }
1327 else
1328 pp_string (pp, "(TOUCHED)");
1329 }
1330
1331 m_map.dump_to_pp (pp, simple, multiline);
1332}
1333
1334/* Dump a multiline representation of this binding_cluster to stderr. */
1335
1336DEBUG_FUNCTION void
1337binding_cluster::dump (bool simple) const
1338{
1339 pretty_printer pp;
1340 pp_format_decoder (&pp) = default_tree_printer;
1341 pp_show_color (&pp) = pp_show_color (global_dc->printer);
1342 pp.buffer->stream = stderr;
1343 pp_string (&pp, " cluster for: ");
1344 m_base_region->dump_to_pp (&pp, simple);
1345 pp_string (&pp, ": ");
1346 pp_newline (&pp);
1347 dump_to_pp (&pp, simple, true);
1348 pp_newline (&pp);
1349 pp_flush (&pp);
1350}
1351
e61ffa20
DM
1352/* Assert that this object is valid. */
1353
1354void
1355binding_cluster::validate () const
1356{
1357 int num_symbolic = 0;
1358 int num_concrete = 0;
1359 for (auto iter : m_map)
1360 {
1361 if (iter.first->symbolic_p ())
1362 num_symbolic++;
1363 else
1364 num_concrete++;
1365 }
1366 /* We shouldn't have more than one symbolic key per cluster
1367 (or one would have clobbered the other). */
1368 gcc_assert (num_symbolic < 2);
1369 /* We can't have both concrete and symbolic keys. */
1370 gcc_assert (num_concrete == 0 || num_symbolic == 0);
1371}
1372
809192e7
DM
1373/* Return a new json::object of the form
1374 {"escaped": true/false,
1375 "touched": true/false,
027e3041 1376 "map" : object for the binding_map. */
809192e7
DM
1377
1378json::object *
1379binding_cluster::to_json () const
1380{
1381 json::object *cluster_obj = new json::object ();
1382
1383 cluster_obj->set ("escaped", new json::literal (m_escaped));
1384 cluster_obj->set ("touched", new json::literal (m_touched));
1385 cluster_obj->set ("map", m_map.to_json ());
1386
1387 return cluster_obj;
1388}
1389
808f4dfe
DM
1390/* Add a binding of SVAL of kind KIND to REG, unpacking SVAL if it is a
1391 compound_sval. */
1392
1393void
1394binding_cluster::bind (store_manager *mgr,
e61ffa20 1395 const region *reg, const svalue *sval)
808f4dfe
DM
1396{
1397 if (const compound_svalue *compound_sval
1398 = sval->dyn_cast_compound_svalue ())
1399 {
1400 bind_compound_sval (mgr, reg, compound_sval);
1401 return;
1402 }
1403
41faa1d7
DM
1404 if (reg->empty_p ())
1405 return;
e61ffa20 1406 const binding_key *binding = binding_key::make (mgr, reg);
808f4dfe
DM
1407 bind_key (binding, sval);
1408}
1409
1410/* Bind SVAL to KEY.
1411 Unpacking of compound_svalues should already have been done by the
1412 time this is called. */
1413
1414void
1415binding_cluster::bind_key (const binding_key *key, const svalue *sval)
1416{
1417 gcc_assert (sval->get_kind () != SK_COMPOUND);
1418
1419 m_map.put (key, sval);
1420 if (key->symbolic_p ())
1421 m_touched = true;
1422}
1423
1424/* Subroutine of binding_cluster::bind.
1425 Unpack compound_svals when binding them, so that we bind them
1426 element-wise. */
1427
1428void
1429binding_cluster::bind_compound_sval (store_manager *mgr,
1430 const region *reg,
1431 const compound_svalue *compound_sval)
1432{
7a6564c9
TL
1433 region_offset reg_offset
1434 = reg->get_offset (mgr->get_svalue_manager ());
808f4dfe
DM
1435 if (reg_offset.symbolic_p ())
1436 {
1437 m_touched = true;
1438 clobber_region (mgr, reg);
1439 return;
1440 }
1441
1442 for (map_t::iterator iter = compound_sval->begin ();
1443 iter != compound_sval->end (); ++iter)
1444 {
1445 const binding_key *iter_key = (*iter).first;
1446 const svalue *iter_sval = (*iter).second;
1447
1448 if (const concrete_binding *concrete_key
1449 = iter_key->dyn_cast_concrete_binding ())
1450 {
1451 bit_offset_t effective_start
1452 = (concrete_key->get_start_bit_offset ()
1453 + reg_offset.get_bit_offset ());
1454 const concrete_binding *effective_concrete_key
1455 = mgr->get_concrete_binding (effective_start,
e61ffa20 1456 concrete_key->get_size_in_bits ());
808f4dfe
DM
1457 bind_key (effective_concrete_key, iter_sval);
1458 }
1459 else
1460 gcc_unreachable ();
1461 }
1462}
1463
1464/* Remove all bindings overlapping REG within this cluster. */
1465
1466void
1467binding_cluster::clobber_region (store_manager *mgr, const region *reg)
1468{
14f5e56a 1469 remove_overlapping_bindings (mgr, reg, NULL, NULL);
808f4dfe
DM
1470}
1471
1472/* Remove any bindings for REG within this cluster. */
1473
1474void
1475binding_cluster::purge_region (store_manager *mgr, const region *reg)
1476{
1477 gcc_assert (reg->get_kind () == RK_DECL);
41faa1d7
DM
1478 if (reg->empty_p ())
1479 return;
808f4dfe 1480 const binding_key *binding
e61ffa20 1481 = binding_key::make (mgr, const_cast<region *> (reg));
808f4dfe
DM
1482 m_map.remove (binding);
1483}
1484
e61ffa20 1485/* Clobber REG and fill it with repeated copies of SVAL. */
808f4dfe
DM
1486
1487void
e61ffa20
DM
1488binding_cluster::fill_region (store_manager *mgr,
1489 const region *reg,
1490 const svalue *sval)
808f4dfe
DM
1491{
1492 clobber_region (mgr, reg);
1493
808f4dfe 1494 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
e61ffa20
DM
1495 const svalue *byte_size_sval = reg->get_byte_size_sval (sval_mgr);
1496 const svalue *fill_sval
1497 = sval_mgr->get_or_create_repeated_svalue (reg->get_type (),
1498 byte_size_sval, sval);
1499 bind (mgr, reg, fill_sval);
1500}
1501
1502/* Clobber REG within this cluster and fill it with zeroes. */
808f4dfe 1503
e61ffa20
DM
1504void
1505binding_cluster::zero_fill_region (store_manager *mgr, const region *reg)
1506{
1507 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1508 const svalue *zero_sval = sval_mgr->get_or_create_int_cst (char_type_node, 0);
1509 fill_region (mgr, reg, zero_sval);
808f4dfe
DM
1510}
1511
88b939b1
DM
1512/* Mark REG_TO_BIND within this cluster as being unknown.
1513
1514 Remove any bindings overlapping REG_FOR_OVERLAP.
3a66c289 1515 If UNCERTAINTY is non-NULL, use it to record any svalues that
88b939b1 1516 had bindings to them removed, as being maybe-bound.
14f5e56a
DM
1517 If MAYBE_LIVE_VALUES is non-NULL, use it to record any svalues that
1518 had bindings to them removed, as being maybe-live.
88b939b1
DM
1519
1520 REG_TO_BIND and REG_FOR_OVERLAP are the same for
1521 store::mark_region_as_unknown, but are different in
1522 store::set_value's alias handling, for handling the case where
1523 we have a write to a symbolic REG_FOR_OVERLAP. */
808f4dfe
DM
1524
1525void
1526binding_cluster::mark_region_as_unknown (store_manager *mgr,
88b939b1
DM
1527 const region *reg_to_bind,
1528 const region *reg_for_overlap,
14f5e56a
DM
1529 uncertainty_t *uncertainty,
1530 svalue_set *maybe_live_values)
808f4dfe 1531{
dfe2ef7f
DM
1532 if (reg_to_bind->empty_p ())
1533 return;
1534
14f5e56a
DM
1535 remove_overlapping_bindings (mgr, reg_for_overlap, uncertainty,
1536 maybe_live_values);
808f4dfe
DM
1537
1538 /* Add a default binding to "unknown". */
1539 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1540 const svalue *sval
88b939b1
DM
1541 = sval_mgr->get_or_create_unknown_svalue (reg_to_bind->get_type ());
1542 bind (mgr, reg_to_bind, sval);
808f4dfe
DM
1543}
1544
33255ad3
DM
1545/* Purge state involving SVAL. */
1546
1547void
1548binding_cluster::purge_state_involving (const svalue *sval,
1549 region_model_manager *sval_mgr)
1550{
1551 auto_vec<const binding_key *> to_remove;
87bd75cd 1552 auto_vec<std::pair<const binding_key *, tree> > to_make_unknown;
33255ad3
DM
1553 for (auto iter : m_map)
1554 {
1555 const binding_key *iter_key = iter.first;
1556 if (const symbolic_binding *symbolic_key
1557 = iter_key->dyn_cast_symbolic_binding ())
1558 {
1559 const region *reg = symbolic_key->get_region ();
1560 if (reg->involves_p (sval))
1561 to_remove.safe_push (iter_key);
1562 }
1563 const svalue *iter_sval = iter.second;
1564 if (iter_sval->involves_p (sval))
87bd75cd
DM
1565 to_make_unknown.safe_push (std::make_pair(iter_key,
1566 iter_sval->get_type ()));
33255ad3
DM
1567 }
1568 for (auto iter : to_remove)
1569 {
1570 m_map.remove (iter);
1571 m_touched = true;
1572 }
87bd75cd
DM
1573 for (auto iter : to_make_unknown)
1574 {
1575 const svalue *new_sval
1576 = sval_mgr->get_or_create_unknown_svalue (iter.second);
1577 m_map.put (iter.first, new_sval);
1578 }
33255ad3
DM
1579}
1580
808f4dfe
DM
1581/* Get any SVAL bound to REG within this cluster via kind KIND,
1582 without checking parent regions of REG. */
1583
1584const svalue *
1585binding_cluster::get_binding (store_manager *mgr,
e61ffa20 1586 const region *reg) const
808f4dfe 1587{
dfe2ef7f
DM
1588 if (reg->empty_p ())
1589 return NULL;
e61ffa20 1590 const binding_key *reg_binding = binding_key::make (mgr, reg);
808f4dfe
DM
1591 const svalue *sval = m_map.get (reg_binding);
1592 if (sval)
1593 {
1594 /* If we have a struct with a single field, then the binding of
1595 the field will equal that of the struct, and looking up e.g.
1596 PARENT_REG.field within:
1597 cluster for PARENT_REG: INIT_VAL(OTHER_REG)
1598 will erroneously return INIT_VAL(OTHER_REG), rather than
1599 SUB_VALUE(INIT_VAL(OTHER_REG), FIELD) == INIT_VAL(OTHER_REG.FIELD).
1600 Fix this issue by iterating upwards whilst the bindings are equal,
1601 expressing the lookups as subvalues.
1602 We have to gather a list of subregion accesses, then walk it
1603 in reverse to get the subvalues. */
1604 auto_vec<const region *> regions;
1605 while (const region *parent_reg = reg->get_parent_region ())
1606 {
1607 const binding_key *parent_reg_binding
e61ffa20 1608 = binding_key::make (mgr, parent_reg);
808f4dfe
DM
1609 if (parent_reg_binding == reg_binding
1610 && sval->get_type ()
1611 && reg->get_type ()
1612 && sval->get_type () != reg->get_type ())
1613 {
1614 regions.safe_push (reg);
1615 reg = parent_reg;
1616 }
1617 else
1618 break;
1619 }
1620 if (sval->get_type ()
1621 && reg->get_type ()
1622 && sval->get_type () == reg->get_type ())
1623 {
1624 unsigned i;
1625 const region *iter_reg;
1626 FOR_EACH_VEC_ELT_REVERSE (regions, i, iter_reg)
1627 {
1628 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
e61ffa20 1629 sval = rmm_mgr->get_or_create_sub_svalue (iter_reg->get_type (),
808f4dfe
DM
1630 sval, iter_reg);
1631 }
1632 }
1633 }
1634 return sval;
1635}
1636
e61ffa20 1637/* Get any SVAL bound to REG within this cluster,
808f4dfe
DM
1638 either directly for REG, or recursively checking for bindings within
1639 parent regions and extracting subvalues if need be. */
1640
1641const svalue *
1642binding_cluster::get_binding_recursive (store_manager *mgr,
e61ffa20 1643 const region *reg) const
808f4dfe 1644{
e61ffa20 1645 if (const svalue *sval = get_binding (mgr, reg))
808f4dfe
DM
1646 return sval;
1647 if (reg != m_base_region)
1648 if (const region *parent_reg = reg->get_parent_region ())
1649 if (const svalue *parent_sval
e61ffa20 1650 = get_binding_recursive (mgr, parent_reg))
808f4dfe
DM
1651 {
1652 /* Extract child svalue from parent svalue. */
1653 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1654 return rmm_mgr->get_or_create_sub_svalue (reg->get_type (),
1655 parent_sval, reg);
1656 }
1657 return NULL;
1658}
1659
1660/* Get any value bound for REG within this cluster. */
1661
1662const svalue *
1663binding_cluster::get_any_binding (store_manager *mgr,
1664 const region *reg) const
1665{
e61ffa20 1666 /* Look for a direct binding. */
808f4dfe 1667 if (const svalue *direct_sval
e61ffa20 1668 = get_binding_recursive (mgr, reg))
808f4dfe
DM
1669 return direct_sval;
1670
00c4405c
DM
1671 /* If we had a write to a cluster of unknown size, we might
1672 have a self-binding of the whole base region with an svalue,
1673 where the base region is symbolic.
1674 Handle such cases by returning sub_svalue instances. */
1675 if (const svalue *cluster_sval = maybe_get_simple_value (mgr))
1676 {
1677 /* Extract child svalue from parent svalue. */
1678 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1679 return rmm_mgr->get_or_create_sub_svalue (reg->get_type (),
1680 cluster_sval, reg);
1681 }
1682
808f4dfe
DM
1683 /* If this cluster has been touched by a symbolic write, then the content
1684 of any subregion not currently specifically bound is "UNKNOWN". */
1685 if (m_touched)
1686 {
1687 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1688 return rmm_mgr->get_or_create_unknown_svalue (reg->get_type ());
1689 }
1690
3bb85b86
DM
1691 /* Alternatively, if this is a symbolic read and the cluster has any bindings,
1692 then we don't know if we're reading those values or not, so the result
1693 is also "UNKNOWN". */
7a6564c9 1694 if (reg->get_offset (mgr->get_svalue_manager ()).symbolic_p ()
3bb85b86
DM
1695 && m_map.elements () > 0)
1696 {
1697 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1698 return rmm_mgr->get_or_create_unknown_svalue (reg->get_type ());
1699 }
1700
808f4dfe
DM
1701 if (const svalue *compound_sval = maybe_get_compound_binding (mgr, reg))
1702 return compound_sval;
1703
1704 /* Otherwise, the initial value, or uninitialized. */
1705 return NULL;
1706}
1707
1708/* Attempt to get a compound_svalue for the bindings within the cluster
1709 affecting REG (which could be the base region itself).
1710
1711 Create a compound_svalue with the subset of bindings the affect REG,
1712 offsetting them so that the offsets are relative to the start of REG
1713 within the cluster.
1714
1715 For example, REG could be one element within an array of structs.
1716
1717 Return the resulting compound_svalue, or NULL if there's a problem. */
1718
1719const svalue *
1720binding_cluster::maybe_get_compound_binding (store_manager *mgr,
1721 const region *reg) const
1722{
7a6564c9
TL
1723 region_offset cluster_offset
1724 = m_base_region->get_offset (mgr->get_svalue_manager ());
808f4dfe
DM
1725 if (cluster_offset.symbolic_p ())
1726 return NULL;
7a6564c9 1727 region_offset reg_offset = reg->get_offset (mgr->get_svalue_manager ());
808f4dfe
DM
1728 if (reg_offset.symbolic_p ())
1729 return NULL;
1730
41faa1d7
DM
1731 if (reg->empty_p ())
1732 return NULL;
1733
e61ffa20
DM
1734 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1735
1736 /* We will a build the result map in two parts:
1737 (a) result_map, holding the concrete keys from this cluster,
1738
1739 (b) default_map, holding the initial values for the region
1740 (e.g. uninitialized, initializer values, or zero), unless this
1741 cluster has been touched.
1742
1743 We will populate (a), and as we do, clobber (b), trimming and
1744 splitting its bindings as necessary.
1745 Finally, we will merge (b) into (a), giving a concrete map
1746 that merges both the initial values and the bound values from
1747 the binding_cluster.
1748 Doing it this way reduces N for the O(N^2) intersection-finding,
1749 perhaps we should have a spatial-organized data structure for
1750 concrete keys, though. */
1751
1752 binding_map result_map;
1753 binding_map default_map;
1754
1755 /* Set up default values in default_map. */
1756 const svalue *default_sval;
1757 if (m_touched)
1758 default_sval = sval_mgr->get_or_create_unknown_svalue (reg->get_type ());
1759 else
1760 default_sval = sval_mgr->get_or_create_initial_value (reg);
1761 const binding_key *default_key = binding_key::make (mgr, reg);
1762 default_map.put (default_key, default_sval);
1763
808f4dfe
DM
1764 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
1765 {
1766 const binding_key *key = (*iter).first;
1767 const svalue *sval = (*iter).second;
1768
1769 if (const concrete_binding *concrete_key
1770 = key->dyn_cast_concrete_binding ())
1771 {
e61ffa20 1772 const bit_range &bound_range = concrete_key->get_bit_range ();
808f4dfe 1773
e61ffa20
DM
1774 bit_size_t reg_bit_size;
1775 if (!reg->get_bit_size (&reg_bit_size))
1776 return NULL;
808f4dfe 1777
e61ffa20
DM
1778 bit_range reg_range (reg_offset.get_bit_offset (),
1779 reg_bit_size);
808f4dfe 1780
e61ffa20
DM
1781 /* Skip bindings that are outside the bit range of REG. */
1782 if (!bound_range.intersects_p (reg_range))
1783 continue;
808f4dfe 1784
e61ffa20
DM
1785 /* We shouldn't have an exact match; that should have been
1786 handled already. */
1787 gcc_assert (!(reg_range == bound_range));
808f4dfe 1788
e61ffa20
DM
1789 bit_range subrange (0, 0);
1790 if (reg_range.contains_p (bound_range, &subrange))
808f4dfe 1791 {
e61ffa20
DM
1792 /* We have a bound range fully within REG.
1793 Add it to map, offsetting accordingly. */
1794
1795 /* Get offset of KEY relative to REG, rather than to
1796 the cluster. */
1797 const concrete_binding *offset_concrete_key
1798 = mgr->get_concrete_binding (subrange);
1799 result_map.put (offset_concrete_key, sval);
1800
1801 /* Clobber default_map, removing/trimming/spliting where
1802 it overlaps with offset_concrete_key. */
1803 default_map.remove_overlapping_bindings (mgr,
1804 offset_concrete_key,
14f5e56a 1805 NULL, NULL, false);
e61ffa20
DM
1806 }
1807 else if (bound_range.contains_p (reg_range, &subrange))
1808 {
1809 /* REG is fully within the bound range, but
1810 is not equal to it; we're extracting a subvalue. */
1811 return sval->extract_bit_range (reg->get_type (),
1812 subrange,
1813 mgr->get_svalue_manager ());
808f4dfe
DM
1814 }
1815 else
1816 {
4892b308
DM
1817 /* REG and the bound range partially overlap. */
1818 bit_range reg_subrange (0, 0);
1819 bit_range bound_subrange (0, 0);
1820 reg_range.intersects_p (bound_range,
1821 &reg_subrange, &bound_subrange);
1822
1823 /* Get the bits from the bound value for the bits at the
1824 intersection (relative to the bound value). */
1825 const svalue *overlap_sval
1826 = sval->extract_bit_range (NULL_TREE,
1827 bound_subrange,
1828 mgr->get_svalue_manager ());
1829
1830 /* Get key for overlap, relative to the REG. */
1831 const concrete_binding *overlap_concrete_key
1832 = mgr->get_concrete_binding (reg_subrange);
1833 result_map.put (overlap_concrete_key, overlap_sval);
1834
1835 /* Clobber default_map, removing/trimming/spliting where
1836 it overlaps with overlap_concrete_key. */
1837 default_map.remove_overlapping_bindings (mgr,
1838 overlap_concrete_key,
14f5e56a 1839 NULL, NULL, false);
808f4dfe
DM
1840 }
1841 }
1842 else
e61ffa20
DM
1843 /* Can't handle symbolic bindings. */
1844 return NULL;
1845 }
1846
1847 if (result_map.elements () == 0)
1848 return NULL;
1849
1850 /* Merge any bindings from default_map into result_map. */
1851 for (auto iter : default_map)
1852 {
1853 const binding_key *key = iter.first;
1854 const svalue *sval = iter.second;
1855 result_map.put (key, sval);
808f4dfe 1856 }
e61ffa20
DM
1857
1858 return sval_mgr->get_or_create_compound_svalue (reg->get_type (), result_map);
808f4dfe
DM
1859}
1860
e61ffa20 1861/* Remove, truncate, and/or split any bindings within this map that
88b939b1
DM
1862 could overlap REG.
1863
1864 If REG's base region or this cluster is symbolic and they're different
1865 base regions, then remove everything in this cluster's map, on the
1866 grounds that REG could be referring to the same memory as anything
1867 in the map.
1868
3a66c289 1869 If UNCERTAINTY is non-NULL, use it to record any svalues that
14f5e56a
DM
1870 were removed, as being maybe-bound.
1871
1872 If MAYBE_LIVE_VALUES is non-NULL, use it to record any svalues that
1873 were removed, as being maybe-live. */
808f4dfe
DM
1874
1875void
1876binding_cluster::remove_overlapping_bindings (store_manager *mgr,
3a66c289 1877 const region *reg,
14f5e56a
DM
1878 uncertainty_t *uncertainty,
1879 svalue_set *maybe_live_values)
808f4dfe 1880{
dfe2ef7f
DM
1881 if (reg->empty_p ())
1882 return;
e61ffa20 1883 const binding_key *reg_binding = binding_key::make (mgr, reg);
808f4dfe 1884
88b939b1
DM
1885 const region *cluster_base_reg = get_base_region ();
1886 const region *other_base_reg = reg->get_base_region ();
1887 /* If at least one of the base regions involved is symbolic, and they're
1888 not the same base region, then consider everything in the map as
1889 potentially overlapping with reg_binding (even if it's a concrete
1890 binding and things in the map are concrete - they could be referring
1891 to the same memory when the symbolic base regions are taken into
1892 account). */
1893 bool always_overlap = (cluster_base_reg != other_base_reg
1894 && (cluster_base_reg->get_kind () == RK_SYMBOLIC
1895 || other_base_reg->get_kind () == RK_SYMBOLIC));
1896 m_map.remove_overlapping_bindings (mgr, reg_binding, uncertainty,
14f5e56a 1897 maybe_live_values,
88b939b1 1898 always_overlap);
808f4dfe
DM
1899}
1900
1901/* Attempt to merge CLUSTER_A and CLUSTER_B into OUT_CLUSTER, using
1902 MGR and MERGER.
1903 Return true if they can be merged, false otherwise. */
1904
1905bool
1906binding_cluster::can_merge_p (const binding_cluster *cluster_a,
1907 const binding_cluster *cluster_b,
1908 binding_cluster *out_cluster,
be6c485b 1909 store *out_store,
808f4dfe
DM
1910 store_manager *mgr,
1911 model_merger *merger)
1912{
1913 gcc_assert (out_cluster);
1914
1915 /* Merge flags ("ESCAPED" and "TOUCHED") by setting the merged flag to
1916 true if either of the inputs is true. */
1917 if ((cluster_a && cluster_a->m_escaped)
1918 || (cluster_b && cluster_b->m_escaped))
1919 out_cluster->m_escaped = true;
1920 if ((cluster_a && cluster_a->m_touched)
1921 || (cluster_b && cluster_b->m_touched))
1922 out_cluster->m_touched = true;
1923
1924 /* At least one of CLUSTER_A and CLUSTER_B are non-NULL, but either
1925 could be NULL. Handle these cases. */
1926 if (cluster_a == NULL)
1927 {
1928 gcc_assert (cluster_b != NULL);
1929 gcc_assert (cluster_b->m_base_region == out_cluster->m_base_region);
be6c485b 1930 out_cluster->make_unknown_relative_to (cluster_b, out_store, mgr);
808f4dfe
DM
1931 return true;
1932 }
1933 if (cluster_b == NULL)
1934 {
1935 gcc_assert (cluster_a != NULL);
1936 gcc_assert (cluster_a->m_base_region == out_cluster->m_base_region);
be6c485b 1937 out_cluster->make_unknown_relative_to (cluster_a, out_store, mgr);
808f4dfe
DM
1938 return true;
1939 }
1940
1941 /* The "both inputs are non-NULL" case. */
1942 gcc_assert (cluster_a != NULL && cluster_b != NULL);
1943 gcc_assert (cluster_a->m_base_region == out_cluster->m_base_region);
1944 gcc_assert (cluster_b->m_base_region == out_cluster->m_base_region);
1945
1946 hash_set<const binding_key *> keys;
1947 for (map_t::iterator iter_a = cluster_a->m_map.begin ();
1948 iter_a != cluster_a->m_map.end (); ++iter_a)
1949 {
1950 const binding_key *key_a = (*iter_a).first;
1951 keys.add (key_a);
1952 }
1953 for (map_t::iterator iter_b = cluster_b->m_map.begin ();
1954 iter_b != cluster_b->m_map.end (); ++iter_b)
1955 {
1956 const binding_key *key_b = (*iter_b).first;
1957 keys.add (key_b);
1958 }
e61ffa20
DM
1959 int num_symbolic_keys = 0;
1960 int num_concrete_keys = 0;
808f4dfe
DM
1961 for (hash_set<const binding_key *>::iterator iter = keys.begin ();
1962 iter != keys.end (); ++iter)
1963 {
13290217 1964 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
808f4dfe
DM
1965 const binding_key *key = *iter;
1966 const svalue *sval_a = cluster_a->get_any_value (key);
1967 const svalue *sval_b = cluster_b->get_any_value (key);
1968
e61ffa20
DM
1969 if (key->symbolic_p ())
1970 num_symbolic_keys++;
1971 else
1972 num_concrete_keys++;
1973
808f4dfe
DM
1974 if (sval_a == sval_b)
1975 {
1976 gcc_assert (sval_a);
1977 out_cluster->m_map.put (key, sval_a);
1978 continue;
1979 }
1980 else if (sval_a && sval_b)
1981 {
808f4dfe
DM
1982 if (const svalue *merged_sval
1983 = sval_a->can_merge_p (sval_b, sval_mgr, merger))
1984 {
1985 out_cluster->m_map.put (key, merged_sval);
1986 continue;
1987 }
1988 /* Merger of the svalues failed. Reject merger of the cluster. */
1989 return false;
1990 }
1991
1992 /* If we get here, then one cluster binds this key and the other
1993 doesn't; merge them as "UNKNOWN". */
1994 gcc_assert (sval_a || sval_b);
13290217
DM
1995
1996 const svalue *bound_sval = sval_a ? sval_a : sval_b;
1997 tree type = bound_sval->get_type ();
808f4dfe
DM
1998 const svalue *unknown_sval
1999 = mgr->get_svalue_manager ()->get_or_create_unknown_svalue (type);
13290217
DM
2000
2001 /* ...but reject the merger if this sval shouldn't be mergeable
2002 (e.g. reject merging svalues that have non-purgable sm-state,
2003 to avoid falsely reporting memory leaks by merging them
2004 with something else). */
2005 if (!bound_sval->can_merge_p (unknown_sval, sval_mgr, merger))
2006 return false;
2007
808f4dfe
DM
2008 out_cluster->m_map.put (key, unknown_sval);
2009 }
2010
e61ffa20
DM
2011 /* We can only have at most one symbolic key per cluster,
2012 and if we do, we can't have any concrete keys.
2013 If this happens, mark the cluster as touched, with no keys. */
2014 if (num_symbolic_keys >= 2
2015 || (num_concrete_keys > 0 && num_symbolic_keys > 0))
2016 {
2017 out_cluster->m_touched = true;
2018 out_cluster->m_map.empty ();
808f4dfe 2019 }
808f4dfe
DM
2020
2021 /* We don't handle other kinds of overlaps yet. */
2022
2023 return true;
2024}
2025
2026/* Update this cluster to reflect an attempt to merge OTHER where there
2027 is no other cluster to merge with, and so we're notionally merging the
2028 bound values in OTHER with the initial value of the relevant regions.
2029
2030 Any bound keys in OTHER should be bound to unknown in this. */
2031
2032void
2033binding_cluster::make_unknown_relative_to (const binding_cluster *other,
be6c485b 2034 store *out_store,
808f4dfe
DM
2035 store_manager *mgr)
2036{
2037 for (map_t::iterator iter = other->m_map.begin ();
2038 iter != other->m_map.end (); ++iter)
2039 {
2040 const binding_key *iter_key = (*iter).first;
2041 const svalue *iter_sval = (*iter).second;
2042 const svalue *unknown_sval
2043 = mgr->get_svalue_manager ()->get_or_create_unknown_svalue
2044 (iter_sval->get_type ());
2045 m_map.put (iter_key, unknown_sval);
be6c485b
DM
2046
2047 /* For any pointers in OTHER, the merger means that the
2048 concrete pointer becomes an unknown value, which could
2049 show up as a false report of a leak when considering what
2050 pointers are live before vs after.
2051 Avoid this by marking the base regions they point to as having
2052 escaped. */
2053 if (const region_svalue *region_sval
2054 = iter_sval->dyn_cast_region_svalue ())
2055 {
2056 const region *base_reg
2057 = region_sval->get_pointee ()->get_base_region ();
8c8993c7
DM
2058 if (base_reg->tracked_p ()
2059 && !base_reg->symbolic_for_unknown_ptr_p ())
ab88f360
DM
2060 {
2061 binding_cluster *c = out_store->get_or_create_cluster (base_reg);
2062 c->mark_as_escaped ();
2063 }
be6c485b 2064 }
808f4dfe
DM
2065 }
2066}
2067
2068/* Mark this cluster as having escaped. */
2069
2070void
2071binding_cluster::mark_as_escaped ()
2072{
2073 m_escaped = true;
2074}
2075
2076/* If this cluster has escaped (by this call, or by an earlier one, or
2077 by being an external param), then unbind all values and mark it
3734527d
DM
2078 as "touched", so that it has a conjured value, rather than an
2079 initial_svalue.
2080 Use P to purge state involving conjured_svalues. */
808f4dfe
DM
2081
2082void
2083binding_cluster::on_unknown_fncall (const gcall *call,
3734527d
DM
2084 store_manager *mgr,
2085 const conjured_purge &p)
808f4dfe
DM
2086{
2087 if (m_escaped)
2088 {
2089 m_map.empty ();
2090
dfe2ef7f
DM
2091 if (!m_base_region->empty_p ())
2092 {
2093 /* Bind it to a new "conjured" value using CALL. */
2094 const svalue *sval
2095 = mgr->get_svalue_manager ()->get_or_create_conjured_svalue
3734527d 2096 (m_base_region->get_type (), call, m_base_region, p);
dfe2ef7f
DM
2097 bind (mgr, m_base_region, sval);
2098 }
808f4dfe
DM
2099
2100 m_touched = true;
2101 }
2102}
2103
3734527d
DM
2104/* Mark this cluster as having been clobbered by STMT.
2105 Use P to purge state involving conjured_svalues. */
ded2c2c0
DM
2106
2107void
2108binding_cluster::on_asm (const gasm *stmt,
3734527d
DM
2109 store_manager *mgr,
2110 const conjured_purge &p)
ded2c2c0
DM
2111{
2112 m_map.empty ();
2113
2114 /* Bind it to a new "conjured" value using CALL. */
2115 const svalue *sval
2116 = mgr->get_svalue_manager ()->get_or_create_conjured_svalue
3734527d 2117 (m_base_region->get_type (), stmt, m_base_region, p);
ded2c2c0
DM
2118 bind (mgr, m_base_region, sval);
2119
2120 m_touched = true;
2121}
2122
3d2d04cd
DM
2123/* Return true if this cluster has escaped. */
2124
2125bool
2126binding_cluster::escaped_p () const
2127{
2128 /* Consider the "errno" region to always have escaped. */
2129 if (m_base_region->get_kind () == RK_ERRNO)
2130 return true;
2131 return m_escaped;
2132}
2133
808f4dfe
DM
2134/* Return true if this binding_cluster has no information
2135 i.e. if there are no bindings, and it hasn't been marked as having
2136 escaped, or touched symbolically. */
2137
2138bool
2139binding_cluster::redundant_p () const
2140{
2141 return (m_map.elements () == 0
2142 && !m_escaped
2143 && !m_touched);
2144}
2145
027e3041 2146/* Add PV to OUT_PVS, casting it to TYPE if it is not already of that type. */
467a4820
DM
2147
2148static void
2149append_pathvar_with_type (path_var pv,
2150 tree type,
2151 auto_vec<path_var> *out_pvs)
2152{
2153 gcc_assert (pv.m_tree);
2154
2155 if (TREE_TYPE (pv.m_tree) != type)
2156 pv.m_tree = build1 (NOP_EXPR, type, pv.m_tree);
2157
2158 out_pvs->safe_push (pv);
2159}
2160
808f4dfe
DM
2161/* Find representative path_vars for SVAL within this binding of BASE_REG,
2162 appending the results to OUT_PVS. */
2163
2164void
2165binding_cluster::get_representative_path_vars (const region_model *model,
2166 svalue_set *visited,
2167 const region *base_reg,
2168 const svalue *sval,
2169 auto_vec<path_var> *out_pvs)
2170 const
2171{
2172 sval = simplify_for_binding (sval);
2173
2174 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
2175 {
2176 const binding_key *key = (*iter).first;
2177 const svalue *bound_sval = (*iter).second;
2178 if (bound_sval == sval)
2179 {
2180 if (const concrete_binding *ckey
2181 = key->dyn_cast_concrete_binding ())
2182 {
2183 auto_vec <const region *> subregions;
2184 base_reg->get_subregions_for_binding
2185 (model->get_manager (),
2186 ckey->get_start_bit_offset (),
2187 ckey->get_size_in_bits (),
2188 sval->get_type (),
2189 &subregions);
2190 unsigned i;
2191 const region *subregion;
2192 FOR_EACH_VEC_ELT (subregions, i, subregion)
2193 {
2194 if (path_var pv
2195 = model->get_representative_path_var (subregion,
2196 visited))
467a4820 2197 append_pathvar_with_type (pv, sval->get_type (), out_pvs);
808f4dfe
DM
2198 }
2199 }
2200 else
2201 {
2202 const symbolic_binding *skey = (const symbolic_binding *)key;
2203 if (path_var pv
2204 = model->get_representative_path_var (skey->get_region (),
2205 visited))
467a4820 2206 append_pathvar_with_type (pv, sval->get_type (), out_pvs);
808f4dfe
DM
2207 }
2208 }
2209 }
2210}
2211
2212/* Get any svalue bound to KEY, or NULL. */
2213
2214const svalue *
2215binding_cluster::get_any_value (const binding_key *key) const
2216{
2217 return m_map.get (key);
2218}
2219
2220/* If this cluster has a single direct binding for the whole of the region,
2221 return it.
2222 For use in simplifying dumps. */
2223
2224const svalue *
2225binding_cluster::maybe_get_simple_value (store_manager *mgr) const
2226{
2227 /* Fail gracefully if MGR is NULL to make it easier to dump store
2228 instances in the debugger. */
2229 if (mgr == NULL)
2230 return NULL;
2231
2232 if (m_map.elements () != 1)
2233 return NULL;
2234
41faa1d7
DM
2235 if (m_base_region->empty_p ())
2236 return NULL;
2237
e61ffa20 2238 const binding_key *key = binding_key::make (mgr, m_base_region);
808f4dfe
DM
2239 return get_any_value (key);
2240}
2241
2242/* class store_manager. */
2243
11a2ff8d
DM
2244logger *
2245store_manager::get_logger () const
2246{
2247 return m_mgr->get_logger ();
2248}
2249
808f4dfe
DM
2250/* binding consolidation. */
2251
2252const concrete_binding *
2253store_manager::get_concrete_binding (bit_offset_t start_bit_offset,
e61ffa20 2254 bit_offset_t size_in_bits)
808f4dfe 2255{
e61ffa20 2256 concrete_binding b (start_bit_offset, size_in_bits);
808f4dfe
DM
2257 if (concrete_binding *existing = m_concrete_binding_key_mgr.get (b))
2258 return existing;
2259
2260 concrete_binding *to_save = new concrete_binding (b);
2261 m_concrete_binding_key_mgr.put (b, to_save);
2262 return to_save;
2263}
2264
2265const symbolic_binding *
e61ffa20 2266store_manager::get_symbolic_binding (const region *reg)
808f4dfe 2267{
e61ffa20 2268 symbolic_binding b (reg);
808f4dfe
DM
2269 if (symbolic_binding *existing = m_symbolic_binding_key_mgr.get (b))
2270 return existing;
2271
2272 symbolic_binding *to_save = new symbolic_binding (b);
2273 m_symbolic_binding_key_mgr.put (b, to_save);
2274 return to_save;
2275}
2276
2277/* class store. */
2278
2279/* store's default ctor. */
2280
2281store::store ()
2282: m_called_unknown_fn (false)
2283{
2284}
2285
2286/* store's copy ctor. */
2287
2288store::store (const store &other)
a58e342d
DM
2289: m_cluster_map (other.m_cluster_map.elements ()),
2290 m_called_unknown_fn (other.m_called_unknown_fn)
808f4dfe
DM
2291{
2292 for (cluster_map_t::iterator iter = other.m_cluster_map.begin ();
2293 iter != other.m_cluster_map.end ();
2294 ++iter)
2295 {
2296 const region *reg = (*iter).first;
2297 gcc_assert (reg);
2298 binding_cluster *c = (*iter).second;
2299 gcc_assert (c);
2300 m_cluster_map.put (reg, new binding_cluster (*c));
2301 }
2302}
2303
2304/* store's dtor. */
2305
2306store::~store ()
2307{
2308 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2309 iter != m_cluster_map.end ();
2310 ++iter)
2311 delete (*iter).second;
2312}
2313
2314/* store's assignment operator. */
2315
2316store &
2317store::operator= (const store &other)
2318{
2319 /* Delete existing cluster map. */
2320 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2321 iter != m_cluster_map.end ();
2322 ++iter)
2323 delete (*iter).second;
2324 m_cluster_map.empty ();
2325
2326 m_called_unknown_fn = other.m_called_unknown_fn;
2327
2328 for (cluster_map_t::iterator iter = other.m_cluster_map.begin ();
2329 iter != other.m_cluster_map.end ();
2330 ++iter)
2331 {
2332 const region *reg = (*iter).first;
2333 gcc_assert (reg);
2334 binding_cluster *c = (*iter).second;
2335 gcc_assert (c);
2336 m_cluster_map.put (reg, new binding_cluster (*c));
2337 }
2338 return *this;
2339}
2340
2341/* store's equality operator. */
2342
2343bool
2344store::operator== (const store &other) const
2345{
2346 if (m_called_unknown_fn != other.m_called_unknown_fn)
2347 return false;
2348
2349 if (m_cluster_map.elements () != other.m_cluster_map.elements ())
2350 return false;
2351
2352 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2353 iter != m_cluster_map.end ();
2354 ++iter)
2355 {
2356 const region *reg = (*iter).first;
2357 binding_cluster *c = (*iter).second;
2358 binding_cluster **other_slot
2359 = const_cast <cluster_map_t &> (other.m_cluster_map).get (reg);
2360 if (other_slot == NULL)
2361 return false;
2362 if (*c != **other_slot)
2363 return false;
2364 }
2365
2366 gcc_checking_assert (hash () == other.hash ());
2367
2368 return true;
2369}
2370
2371/* Get a hash value for this store. */
2372
2373hashval_t
2374store::hash () const
2375{
2376 hashval_t result = 0;
2377 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2378 iter != m_cluster_map.end ();
2379 ++iter)
2380 result ^= (*iter).second->hash ();
2381 return result;
2382}
2383
2384/* Populate OUT with a sorted list of parent regions for the regions in IN,
2385 removing duplicate parents. */
2386
2387static void
2388get_sorted_parent_regions (auto_vec<const region *> *out,
2389 auto_vec<const region *> &in)
2390{
2391 /* Get the set of parent regions. */
2392 hash_set<const region *> parent_regions;
2393 const region *iter_reg;
2394 unsigned i;
2395 FOR_EACH_VEC_ELT (in, i, iter_reg)
2396 {
2397 const region *parent_reg = iter_reg->get_parent_region ();
2398 gcc_assert (parent_reg);
2399 parent_regions.add (parent_reg);
2400 }
2401
2402 /* Write to OUT. */
2403 for (hash_set<const region *>::iterator iter = parent_regions.begin();
2404 iter != parent_regions.end(); ++iter)
2405 out->safe_push (*iter);
2406
2407 /* Sort OUT. */
b0702ac5 2408 out->qsort (region::cmp_ptr_ptr);
808f4dfe
DM
2409}
2410
2411/* Dump a representation of this store to PP, using SIMPLE to control how
2412 svalues and regions are printed.
2413 MGR is used for simplifying dumps if non-NULL, but can also be NULL
2414 (to make it easier to use from the debugger). */
2415
2416void
2417store::dump_to_pp (pretty_printer *pp, bool simple, bool multiline,
2418 store_manager *mgr) const
2419{
2420 /* Sort into some deterministic order. */
2421 auto_vec<const region *> base_regions;
2422 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2423 iter != m_cluster_map.end (); ++iter)
2424 {
2425 const region *base_reg = (*iter).first;
2426 base_regions.safe_push (base_reg);
2427 }
b0702ac5 2428 base_regions.qsort (region::cmp_ptr_ptr);
808f4dfe
DM
2429
2430 /* Gather clusters, organize by parent region, so that we can group
2431 together locals, globals, etc. */
2432 auto_vec<const region *> parent_regions;
2433 get_sorted_parent_regions (&parent_regions, base_regions);
2434
2435 const region *parent_reg;
2436 unsigned i;
2437 FOR_EACH_VEC_ELT (parent_regions, i, parent_reg)
2438 {
2439 gcc_assert (parent_reg);
2440 pp_string (pp, "clusters within ");
2441 parent_reg->dump_to_pp (pp, simple);
2442 if (multiline)
2443 pp_newline (pp);
2444 else
2445 pp_string (pp, " {");
2446
2447 const region *base_reg;
2448 unsigned j;
2449 FOR_EACH_VEC_ELT (base_regions, j, base_reg)
2450 {
2451 /* This is O(N * M), but N ought to be small. */
2452 if (base_reg->get_parent_region () != parent_reg)
2453 continue;
2454 binding_cluster *cluster
2455 = *const_cast<cluster_map_t &> (m_cluster_map).get (base_reg);
2456 if (!multiline)
2457 {
2458 if (j > 0)
2459 pp_string (pp, ", ");
2460 }
2461 if (const svalue *sval = cluster->maybe_get_simple_value (mgr))
2462 {
2463 /* Special-case to simplify dumps for the common case where
2464 we just have one value directly bound to the whole of a
2465 region. */
2466 if (multiline)
2467 {
2468 pp_string (pp, " cluster for: ");
2469 base_reg->dump_to_pp (pp, simple);
2470 pp_string (pp, ": ");
2471 sval->dump_to_pp (pp, simple);
2472 if (cluster->escaped_p ())
2473 pp_string (pp, " (ESCAPED)");
2474 if (cluster->touched_p ())
2475 pp_string (pp, " (TOUCHED)");
2476 pp_newline (pp);
2477 }
2478 else
2479 {
2480 pp_string (pp, "region: {");
2481 base_reg->dump_to_pp (pp, simple);
2482 pp_string (pp, ", value: ");
2483 sval->dump_to_pp (pp, simple);
2484 if (cluster->escaped_p ())
2485 pp_string (pp, " (ESCAPED)");
2486 if (cluster->touched_p ())
2487 pp_string (pp, " (TOUCHED)");
2488 pp_string (pp, "}");
2489 }
2490 }
2491 else if (multiline)
2492 {
2493 pp_string (pp, " cluster for: ");
2494 base_reg->dump_to_pp (pp, simple);
2495 pp_newline (pp);
2496 cluster->dump_to_pp (pp, simple, multiline);
2497 }
2498 else
2499 {
2500 pp_string (pp, "base region: {");
2501 base_reg->dump_to_pp (pp, simple);
2502 pp_string (pp, "} has cluster: {");
2503 cluster->dump_to_pp (pp, simple, multiline);
2504 pp_string (pp, "}");
2505 }
2506 }
2507 if (!multiline)
2508 pp_string (pp, "}");
2509 }
2510 pp_printf (pp, "m_called_unknown_fn: %s",
2511 m_called_unknown_fn ? "TRUE" : "FALSE");
2512 if (multiline)
2513 pp_newline (pp);
2514}
2515
2516/* Dump a multiline representation of this store to stderr. */
2517
2518DEBUG_FUNCTION void
2519store::dump (bool simple) const
2520{
2521 pretty_printer pp;
2522 pp_format_decoder (&pp) = default_tree_printer;
2523 pp_show_color (&pp) = pp_show_color (global_dc->printer);
2524 pp.buffer->stream = stderr;
2525 dump_to_pp (&pp, simple, true, NULL);
2526 pp_newline (&pp);
2527 pp_flush (&pp);
2528}
2529
e61ffa20
DM
2530/* Assert that this object is valid. */
2531
2532void
2533store::validate () const
2534{
2535 for (auto iter : m_cluster_map)
2536 iter.second->validate ();
2537}
2538
809192e7
DM
2539/* Return a new json::object of the form
2540 {PARENT_REGION_DESC: {BASE_REGION_DESC: object for binding_map,
2541 ... for each cluster within parent region},
2542 ...for each parent region,
dea4a32b 2543 "called_unknown_fn": true/false}. */
809192e7
DM
2544
2545json::object *
2546store::to_json () const
2547{
2548 json::object *store_obj = new json::object ();
2549
2550 /* Sort into some deterministic order. */
2551 auto_vec<const region *> base_regions;
2552 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2553 iter != m_cluster_map.end (); ++iter)
2554 {
2555 const region *base_reg = (*iter).first;
2556 base_regions.safe_push (base_reg);
2557 }
b0702ac5 2558 base_regions.qsort (region::cmp_ptr_ptr);
809192e7
DM
2559
2560 /* Gather clusters, organize by parent region, so that we can group
2561 together locals, globals, etc. */
2562 auto_vec<const region *> parent_regions;
2563 get_sorted_parent_regions (&parent_regions, base_regions);
2564
2565 const region *parent_reg;
2566 unsigned i;
2567 FOR_EACH_VEC_ELT (parent_regions, i, parent_reg)
2568 {
2569 gcc_assert (parent_reg);
2570
2571 json::object *clusters_in_parent_reg_obj = new json::object ();
2572
2573 const region *base_reg;
2574 unsigned j;
2575 FOR_EACH_VEC_ELT (base_regions, j, base_reg)
2576 {
2577 /* This is O(N * M), but N ought to be small. */
2578 if (base_reg->get_parent_region () != parent_reg)
2579 continue;
2580 binding_cluster *cluster
2581 = *const_cast<cluster_map_t &> (m_cluster_map).get (base_reg);
2582 label_text base_reg_desc = base_reg->get_desc ();
f858fe7a 2583 clusters_in_parent_reg_obj->set (base_reg_desc.get (),
809192e7 2584 cluster->to_json ());
809192e7
DM
2585 }
2586 label_text parent_reg_desc = parent_reg->get_desc ();
f858fe7a 2587 store_obj->set (parent_reg_desc.get (), clusters_in_parent_reg_obj);
809192e7
DM
2588 }
2589
2590 store_obj->set ("called_unknown_fn", new json::literal (m_called_unknown_fn));
2591
2592 return store_obj;
2593}
2594
808f4dfe
DM
2595/* Get any svalue bound to REG, or NULL. */
2596
2597const svalue *
2598store::get_any_binding (store_manager *mgr, const region *reg) const
2599{
2600 const region *base_reg = reg->get_base_region ();
2601 binding_cluster **cluster_slot
2602 = const_cast <cluster_map_t &> (m_cluster_map).get (base_reg);
2603 if (!cluster_slot)
2604 return NULL;
2605 return (*cluster_slot)->get_any_binding (mgr, reg);
2606}
2607
2608/* Set the value of LHS_REG to RHS_SVAL. */
2609
2610void
2611store::set_value (store_manager *mgr, const region *lhs_reg,
e61ffa20 2612 const svalue *rhs_sval,
3a66c289 2613 uncertainty_t *uncertainty)
808f4dfe 2614{
11a2ff8d
DM
2615 logger *logger = mgr->get_logger ();
2616 LOG_SCOPE (logger);
2617
88b939b1 2618 remove_overlapping_bindings (mgr, lhs_reg, uncertainty);
808f4dfe 2619
db613e8f
DM
2620 if (lhs_reg->get_type ())
2621 rhs_sval = simplify_for_binding (rhs_sval);
2622 /* ...but if we have no type for the region, retain any cast. */
808f4dfe
DM
2623
2624 const region *lhs_base_reg = lhs_reg->get_base_region ();
2625 binding_cluster *lhs_cluster;
2626 if (lhs_base_reg->symbolic_for_unknown_ptr_p ())
1d9f3b7a
DM
2627 {
2628 /* Reject attempting to bind values into a symbolic region
2629 for an unknown ptr; merely invalidate values below. */
2630 lhs_cluster = NULL;
2631
2632 /* The LHS of the write is *UNKNOWN. If the RHS is a pointer,
2633 then treat the region being pointed to as having escaped. */
2634 if (const region_svalue *ptr_sval = rhs_sval->dyn_cast_region_svalue ())
2635 {
2636 const region *ptr_dst = ptr_sval->get_pointee ();
2637 const region *ptr_base_reg = ptr_dst->get_base_region ();
2638 mark_as_escaped (ptr_base_reg);
2639 }
688fc162
DM
2640 if (uncertainty)
2641 uncertainty->on_maybe_bound_sval (rhs_sval);
1d9f3b7a 2642 }
8c8993c7 2643 else if (lhs_base_reg->tracked_p ())
808f4dfe
DM
2644 {
2645 lhs_cluster = get_or_create_cluster (lhs_base_reg);
e61ffa20 2646 lhs_cluster->bind (mgr, lhs_reg, rhs_sval);
808f4dfe 2647 }
8c8993c7
DM
2648 else
2649 {
2650 /* Reject attempting to bind values into an untracked region;
2651 merely invalidate values below. */
2652 lhs_cluster = NULL;
2653 }
808f4dfe
DM
2654
2655 /* Bindings to a cluster can affect other clusters if a symbolic
2656 base region is involved.
2657 Writes to concrete clusters can't affect other concrete clusters,
2658 but can affect symbolic clusters.
2659 Writes to symbolic clusters can affect both concrete and symbolic
2660 clusters.
2661 Invalidate our knowledge of other clusters that might have been
14f5e56a
DM
2662 affected by the write.
2663 Gather the set of all svalues that might still be live even if
2664 the store doesn't refer to them. */
2665 svalue_set maybe_live_values;
808f4dfe
DM
2666 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2667 iter != m_cluster_map.end (); ++iter)
2668 {
2669 const region *iter_base_reg = (*iter).first;
2670 binding_cluster *iter_cluster = (*iter).second;
2671 if (iter_base_reg != lhs_base_reg
2672 && (lhs_cluster == NULL
2673 || lhs_cluster->symbolic_p ()
2674 || iter_cluster->symbolic_p ()))
2675 {
2676 tristate t_alias = eval_alias (lhs_base_reg, iter_base_reg);
2677 switch (t_alias.get_value ())
2678 {
2679 default:
2680 gcc_unreachable ();
2681
2682 case tristate::TS_UNKNOWN:
11a2ff8d
DM
2683 if (logger)
2684 {
2685 pretty_printer *pp = logger->get_printer ();
2686 logger->start_log_line ();
2687 logger->log_partial ("possible aliasing of ");
2688 iter_base_reg->dump_to_pp (pp, true);
2689 logger->log_partial (" when writing SVAL: ");
2690 rhs_sval->dump_to_pp (pp, true);
2691 logger->log_partial (" to LHS_REG: ");
2692 lhs_reg->dump_to_pp (pp, true);
2693 logger->end_log_line ();
2694 }
88b939b1
DM
2695 /* Mark all of iter_cluster's iter_base_reg as unknown,
2696 using LHS_REG when considering overlaps, to handle
2697 symbolic vs concrete issues. */
2698 iter_cluster->mark_region_as_unknown
2699 (mgr,
2700 iter_base_reg, /* reg_to_bind */
2701 lhs_reg, /* reg_for_overlap */
14f5e56a
DM
2702 uncertainty,
2703 &maybe_live_values);
808f4dfe
DM
2704 break;
2705
2706 case tristate::TS_TRUE:
2707 gcc_unreachable ();
2708 break;
2709
2710 case tristate::TS_FALSE:
2711 /* If they can't be aliases, then don't invalidate this
2712 cluster. */
2713 break;
2714 }
2715 }
2716 }
14f5e56a
DM
2717 /* Given the set of svalues that might still be live, process them
2718 (e.g. marking regions as escaped).
2719 We do this after the iteration to avoid potentially changing
2720 m_cluster_map whilst iterating over it. */
2721 on_maybe_live_values (maybe_live_values);
808f4dfe
DM
2722}
2723
2724/* Determine if BASE_REG_A could be an alias of BASE_REG_B. */
2725
2726tristate
2727store::eval_alias (const region *base_reg_a,
c199723d 2728 const region *base_reg_b) const
808f4dfe
DM
2729{
2730 /* SSA names can't alias. */
2731 tree decl_a = base_reg_a->maybe_get_decl ();
2732 if (decl_a && TREE_CODE (decl_a) == SSA_NAME)
2733 return tristate::TS_FALSE;
2734 tree decl_b = base_reg_b->maybe_get_decl ();
2735 if (decl_b && TREE_CODE (decl_b) == SSA_NAME)
2736 return tristate::TS_FALSE;
2737
c199723d
DM
2738 /* Try both ways, for symmetry. */
2739 tristate ts_ab = eval_alias_1 (base_reg_a, base_reg_b);
2740 if (ts_ab.is_false ())
2741 return tristate::TS_FALSE;
2742 tristate ts_ba = eval_alias_1 (base_reg_b, base_reg_a);
2743 if (ts_ba.is_false ())
2744 return tristate::TS_FALSE;
2745 return tristate::TS_UNKNOWN;
2746}
2747
2748/* Half of store::eval_alias; called twice for symmetry. */
2749
2750tristate
2751store::eval_alias_1 (const region *base_reg_a,
2752 const region *base_reg_b) const
2753{
b8a91672
DM
2754 /* If they're in different memory spaces, they can't alias. */
2755 {
2756 enum memory_space memspace_a = base_reg_a->get_memory_space ();
2757 if (memspace_a != MEMSPACE_UNKNOWN)
2758 {
2759 enum memory_space memspace_b = base_reg_b->get_memory_space ();
2760 if (memspace_b != MEMSPACE_UNKNOWN
2761 && memspace_a != memspace_b)
2762 return tristate::TS_FALSE;
2763 }
2764 }
2765
808f4dfe
DM
2766 if (const symbolic_region *sym_reg_a
2767 = base_reg_a->dyn_cast_symbolic_region ())
2768 {
2769 const svalue *sval_a = sym_reg_a->get_pointer ();
d564a83d
DM
2770 if (tree decl_b = base_reg_b->maybe_get_decl ())
2771 {
2772 if (!may_be_aliased (decl_b))
2773 return tristate::TS_FALSE;
2774 if (sval_a->get_kind () == SK_INITIAL)
2775 if (!is_global_var (decl_b))
2776 {
2777 /* The initial value of a pointer can't point to a local. */
2778 return tristate::TS_FALSE;
2779 }
2780 }
2fc20138
DM
2781 if (sval_a->get_kind () == SK_INITIAL
2782 && base_reg_b->get_kind () == RK_HEAP_ALLOCATED)
2783 {
2784 /* The initial value of a pointer can't point to a
2785 region that was allocated on the heap after the beginning of the
2786 path. */
2787 return tristate::TS_FALSE;
2788 }
2789 if (const widening_svalue *widening_sval_a
2790 = sval_a->dyn_cast_widening_svalue ())
2791 {
2792 const svalue *base = widening_sval_a->get_base_svalue ();
2793 if (const region_svalue *region_sval
2794 = base->dyn_cast_region_svalue ())
2795 {
2796 const region *pointee = region_sval->get_pointee ();
2797 /* If we have sval_a is WIDENING(&REGION, OP), and
2798 B can't alias REGION, then B can't alias A either.
2799 For example, A might arise from
2800 for (ptr = &REGION; ...; ptr++)
2801 where sval_a is ptr in the 2nd iteration of the loop.
2802 We want to ensure that "*ptr" can only clobber things
2803 within REGION's base region. */
2804 tristate ts = eval_alias (pointee->get_base_region (),
2805 base_reg_b);
2806 if (ts.is_false ())
2807 return tristate::TS_FALSE;
2808 }
2809 }
808f4dfe 2810 }
808f4dfe
DM
2811 return tristate::TS_UNKNOWN;
2812}
2813
14f5e56a
DM
2814/* Record all of the values in MAYBE_LIVE_VALUES as being possibly live. */
2815
2816void
2817store::on_maybe_live_values (const svalue_set &maybe_live_values)
2818{
2819 for (auto sval : maybe_live_values)
2820 {
2821 if (const region_svalue *ptr_sval = sval->dyn_cast_region_svalue ())
2822 {
2823 const region *base_reg = ptr_sval->get_pointee ()->get_base_region ();
2824 mark_as_escaped (base_reg);
2825 }
2826 }
2827}
2828
808f4dfe
DM
2829/* Remove all bindings overlapping REG within this store. */
2830
2831void
2832store::clobber_region (store_manager *mgr, const region *reg)
2833{
2834 const region *base_reg = reg->get_base_region ();
2835 binding_cluster **slot = m_cluster_map.get (base_reg);
2836 if (!slot)
2837 return;
2838 binding_cluster *cluster = *slot;
2839 cluster->clobber_region (mgr, reg);
2840 if (cluster->redundant_p ())
2841 {
2842 delete cluster;
2843 m_cluster_map.remove (base_reg);
2844 }
2845}
2846
2847/* Remove any bindings for REG within this store. */
2848
2849void
2850store::purge_region (store_manager *mgr, const region *reg)
2851{
2852 const region *base_reg = reg->get_base_region ();
2853 binding_cluster **slot = m_cluster_map.get (base_reg);
2854 if (!slot)
2855 return;
2856 binding_cluster *cluster = *slot;
2857 cluster->purge_region (mgr, reg);
2858 if (cluster->redundant_p ())
2859 {
2860 delete cluster;
2861 m_cluster_map.remove (base_reg);
2862 }
2863}
2864
e61ffa20 2865/* Fill REG with SVAL. */
808f4dfe
DM
2866
2867void
e61ffa20 2868store::fill_region (store_manager *mgr, const region *reg, const svalue *sval)
808f4dfe 2869{
dfe2ef7f
DM
2870 /* Filling an empty region is a no-op. */
2871 if (reg->empty_p ())
2872 return;
2873
808f4dfe 2874 const region *base_reg = reg->get_base_region ();
8c8993c7
DM
2875 if (base_reg->symbolic_for_unknown_ptr_p ()
2876 || !base_reg->tracked_p ())
808f4dfe
DM
2877 return;
2878 binding_cluster *cluster = get_or_create_cluster (base_reg);
e61ffa20
DM
2879 cluster->fill_region (mgr, reg, sval);
2880}
2881
2882/* Zero-fill REG. */
2883
2884void
2885store::zero_fill_region (store_manager *mgr, const region *reg)
2886{
2887 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
2888 const svalue *zero_sval = sval_mgr->get_or_create_int_cst (char_type_node, 0);
2889 fill_region (mgr, reg, zero_sval);
808f4dfe
DM
2890}
2891
2892/* Mark REG as having unknown content. */
2893
2894void
3a66c289 2895store::mark_region_as_unknown (store_manager *mgr, const region *reg,
14f5e56a
DM
2896 uncertainty_t *uncertainty,
2897 svalue_set *maybe_live_values)
808f4dfe
DM
2898{
2899 const region *base_reg = reg->get_base_region ();
8c8993c7
DM
2900 if (base_reg->symbolic_for_unknown_ptr_p ()
2901 || !base_reg->tracked_p ())
808f4dfe
DM
2902 return;
2903 binding_cluster *cluster = get_or_create_cluster (base_reg);
14f5e56a
DM
2904 cluster->mark_region_as_unknown (mgr, reg, reg, uncertainty,
2905 maybe_live_values);
808f4dfe
DM
2906}
2907
33255ad3
DM
2908/* Purge state involving SVAL. */
2909
2910void
2911store::purge_state_involving (const svalue *sval,
2912 region_model_manager *sval_mgr)
2913{
2914 auto_vec <const region *> base_regs_to_purge;
2915 for (auto iter : m_cluster_map)
2916 {
2917 const region *base_reg = iter.first;
2918 if (base_reg->involves_p (sval))
2919 base_regs_to_purge.safe_push (base_reg);
2920 else
2921 {
2922 binding_cluster *cluster = iter.second;
2923 cluster->purge_state_involving (sval, sval_mgr);
2924 }
2925 }
2926
2927 for (auto iter : base_regs_to_purge)
2928 purge_cluster (iter);
2929}
2930
808f4dfe
DM
2931/* Get the cluster for BASE_REG, or NULL (const version). */
2932
2933const binding_cluster *
2934store::get_cluster (const region *base_reg) const
2935{
2936 gcc_assert (base_reg);
2937 gcc_assert (base_reg->get_base_region () == base_reg);
2938 if (binding_cluster **slot
2939 = const_cast <cluster_map_t &> (m_cluster_map).get (base_reg))
2940 return *slot;
2941 else
2942 return NULL;
2943}
2944
2945/* Get the cluster for BASE_REG, or NULL (non-const version). */
2946
2947binding_cluster *
2948store::get_cluster (const region *base_reg)
2949{
2950 gcc_assert (base_reg);
2951 gcc_assert (base_reg->get_base_region () == base_reg);
2952 if (binding_cluster **slot = m_cluster_map.get (base_reg))
2953 return *slot;
2954 else
2955 return NULL;
2956}
2957
2958/* Get the cluster for BASE_REG, creating it if doesn't already exist. */
2959
2960binding_cluster *
2961store::get_or_create_cluster (const region *base_reg)
2962{
2963 gcc_assert (base_reg);
2964 gcc_assert (base_reg->get_base_region () == base_reg);
2965
2966 /* We shouldn't create clusters for dereferencing an UNKNOWN ptr. */
2967 gcc_assert (!base_reg->symbolic_for_unknown_ptr_p ());
2968
5f6197d7
DM
2969 /* We shouldn't create clusters for base regions that aren't trackable. */
2970 gcc_assert (base_reg->tracked_p ());
2971
808f4dfe
DM
2972 if (binding_cluster **slot = m_cluster_map.get (base_reg))
2973 return *slot;
2974
2975 binding_cluster *cluster = new binding_cluster (base_reg);
2976 m_cluster_map.put (base_reg, cluster);
2977
2978 return cluster;
2979}
2980
2981/* Remove any cluster for BASE_REG, for use by
2982 region_model::unbind_region_and_descendents
2983 when popping stack frames and handling deleted heap regions. */
2984
2985void
2986store::purge_cluster (const region *base_reg)
2987{
2988 gcc_assert (base_reg->get_base_region () == base_reg);
2989 binding_cluster **slot = m_cluster_map.get (base_reg);
2990 if (!slot)
2991 return;
2992 binding_cluster *cluster = *slot;
2993 delete cluster;
2994 m_cluster_map.remove (base_reg);
2995}
2996
2997/* Attempt to merge STORE_A and STORE_B into OUT_STORE.
2998 Return true if successful, or false if the stores can't be merged. */
2999
3000bool
3001store::can_merge_p (const store *store_a, const store *store_b,
3002 store *out_store, store_manager *mgr,
3003 model_merger *merger)
3004{
3005 if (store_a->m_called_unknown_fn || store_b->m_called_unknown_fn)
3006 out_store->m_called_unknown_fn = true;
3007
3008 /* Get the union of all base regions for STORE_A and STORE_B. */
3009 hash_set<const region *> base_regions;
3010 for (cluster_map_t::iterator iter_a = store_a->m_cluster_map.begin ();
3011 iter_a != store_a->m_cluster_map.end (); ++iter_a)
3012 {
3013 const region *base_reg_a = (*iter_a).first;
3014 base_regions.add (base_reg_a);
3015 }
3016 for (cluster_map_t::iterator iter_b = store_b->m_cluster_map.begin ();
3017 iter_b != store_b->m_cluster_map.end (); ++iter_b)
3018 {
3019 const region *base_reg_b = (*iter_b).first;
3020 base_regions.add (base_reg_b);
3021 }
3022
b0702ac5
DM
3023 /* Sort the base regions before considering them. This ought not to
3024 affect the results, but can affect which types UNKNOWN_REGIONs are
3025 created for in a run; sorting them thus avoids minor differences
3026 in logfiles. */
3027 auto_vec<const region *> vec_base_regions (base_regions.elements ());
808f4dfe
DM
3028 for (hash_set<const region *>::iterator iter = base_regions.begin ();
3029 iter != base_regions.end (); ++iter)
b0702ac5
DM
3030 vec_base_regions.quick_push (*iter);
3031 vec_base_regions.qsort (region::cmp_ptr_ptr);
3032 unsigned i;
3033 const region *base_reg;
3034 FOR_EACH_VEC_ELT (vec_base_regions, i, base_reg)
808f4dfe 3035 {
808f4dfe
DM
3036 const binding_cluster *cluster_a = store_a->get_cluster (base_reg);
3037 const binding_cluster *cluster_b = store_b->get_cluster (base_reg);
3038 /* At least one of cluster_a and cluster_b must be non-NULL. */
3039 binding_cluster *out_cluster
3040 = out_store->get_or_create_cluster (base_reg);
3041 if (!binding_cluster::can_merge_p (cluster_a, cluster_b,
be6c485b 3042 out_cluster, out_store, mgr, merger))
808f4dfe
DM
3043 return false;
3044 }
3045 return true;
3046}
3047
3048/* Mark the cluster for BASE_REG as having escaped.
3049 For use when handling an unrecognized function call, and
3050 for params to "top-level" calls.
3051 Further unknown function calls could touch it, even if the cluster
3052 isn't reachable from args of those calls. */
3053
3054void
3055store::mark_as_escaped (const region *base_reg)
3056{
3057 gcc_assert (base_reg);
3058 gcc_assert (base_reg->get_base_region () == base_reg);
3059
5f6197d7
DM
3060 if (base_reg->symbolic_for_unknown_ptr_p ()
3061 || !base_reg->tracked_p ())
ee88b536
DM
3062 return;
3063
808f4dfe
DM
3064 binding_cluster *cluster = get_or_create_cluster (base_reg);
3065 cluster->mark_as_escaped ();
3066}
3067
3068/* Handle an unknown fncall by updating any clusters that have escaped
3069 (either in this fncall, or in a prior one). */
3070
3071void
3734527d
DM
3072store::on_unknown_fncall (const gcall *call, store_manager *mgr,
3073 const conjured_purge &p)
808f4dfe
DM
3074{
3075 m_called_unknown_fn = true;
3076
3077 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
3078 iter != m_cluster_map.end (); ++iter)
3734527d 3079 (*iter).second->on_unknown_fncall (call, mgr, p);
808f4dfe
DM
3080}
3081
3082/* Return true if a non-const pointer to BASE_REG (or something within it)
3083 has escaped to code outside of the TU being analyzed. */
3084
3085bool
3086store::escaped_p (const region *base_reg) const
3087{
3088 gcc_assert (base_reg);
3089 gcc_assert (base_reg->get_base_region () == base_reg);
3090
3d2d04cd
DM
3091 /* "errno" can always be modified by external code. */
3092 if (base_reg->get_kind () == RK_ERRNO)
3093 return true;
3094
808f4dfe
DM
3095 if (binding_cluster **cluster_slot
3096 = const_cast <cluster_map_t &>(m_cluster_map).get (base_reg))
3097 return (*cluster_slot)->escaped_p ();
3098 return false;
3099}
3100
3101/* Populate OUT_PVS with a list of path_vars for describing SVAL based on
3102 this store, using VISITED to ensure the traversal terminates. */
3103
3104void
3105store::get_representative_path_vars (const region_model *model,
3106 svalue_set *visited,
3107 const svalue *sval,
3108 auto_vec<path_var> *out_pvs) const
3109{
3110 gcc_assert (sval);
3111
3112 /* Find all bindings that reference SVAL. */
3113 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
3114 iter != m_cluster_map.end (); ++iter)
3115 {
3116 const region *base_reg = (*iter).first;
3117 binding_cluster *cluster = (*iter).second;
3118 cluster->get_representative_path_vars (model, visited, base_reg, sval,
3119 out_pvs);
3120 }
3121
3122 if (const initial_svalue *init_sval = sval->dyn_cast_initial_svalue ())
3123 {
3124 const region *reg = init_sval->get_region ();
3125 if (path_var pv = model->get_representative_path_var (reg,
3126 visited))
3127 out_pvs->safe_push (pv);
3128 }
3129}
3130
3131/* Remove all bindings overlapping REG within this store, removing
88b939b1
DM
3132 any clusters that become redundant.
3133
3134 If UNCERTAINTY is non-NULL, use it to record any svalues that
3135 were removed, as being maybe-bound. */
808f4dfe
DM
3136
3137void
88b939b1
DM
3138store::remove_overlapping_bindings (store_manager *mgr, const region *reg,
3139 uncertainty_t *uncertainty)
808f4dfe
DM
3140{
3141 const region *base_reg = reg->get_base_region ();
3142 if (binding_cluster **cluster_slot = m_cluster_map.get (base_reg))
3143 {
3144 binding_cluster *cluster = *cluster_slot;
3145 if (reg == base_reg && !escaped_p (base_reg))
3146 {
3147 /* Remove whole cluster. */
3148 m_cluster_map.remove (base_reg);
3149 delete cluster;
3150 return;
3151 }
14f5e56a
DM
3152 /* Pass NULL for the maybe_live_values here, as we don't want to
3153 record the old svalues as being maybe-bound. */
3154 cluster->remove_overlapping_bindings (mgr, reg, uncertainty, NULL);
808f4dfe
DM
3155 }
3156}
3157
3158/* Subclass of visitor that accumulates a hash_set of the regions that
3159 were visited. */
3160
3161struct region_finder : public visitor
3162{
ff171cb1 3163 void visit_region (const region *reg) final override
808f4dfe
DM
3164 {
3165 m_regs.add (reg);
3166 }
3167
3168 hash_set<const region *> m_regs;
3169};
3170
3171/* Canonicalize this store, to maximize the chance of equality between
3172 instances. */
3173
3174void
3175store::canonicalize (store_manager *mgr)
3176{
3177 /* If we have e.g.:
3178 cluster for: HEAP_ALLOCATED_REGION(543)
3179 ESCAPED
3180 TOUCHED
3181 where the heap region is empty and unreferenced, then purge that
3182 cluster, to avoid unbounded state chains involving these. */
3183
3184 /* Find regions that are referenced by bound values in the store. */
3185 region_finder s;
3186 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
3187 iter != m_cluster_map.end (); ++iter)
3188 {
3189 binding_cluster *cluster = (*iter).second;
3190 for (binding_cluster::iterator_t bind_iter = cluster->m_map.begin ();
3191 bind_iter != cluster->m_map.end (); ++bind_iter)
3192 (*bind_iter).second->accept (&s);
3193 }
3194
3195 /* Locate heap-allocated regions that have empty bindings that weren't
3196 found above. */
3197 hash_set<const region *> purgeable_regions;
3198 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
3199 iter != m_cluster_map.end (); ++iter)
3200 {
3201 const region *base_reg = (*iter).first;
3202 binding_cluster *cluster = (*iter).second;
3203 if (base_reg->get_kind () == RK_HEAP_ALLOCATED)
3204 {
ce917b04
DM
3205 /* Don't purge a heap-allocated region that's been marked as
3206 escaping, since this could be recording that a ptr to it
3207 was written to an unknown symbolic region along this
3208 path, and so we don't know whether it's referenced or
3209 not, and hence should report it as leaking
3210 (PR analyzer/106473). */
3211 if (cluster->escaped_p ())
3212 continue;
3213
808f4dfe
DM
3214 if (cluster->empty_p ())
3215 if (!s.m_regs.contains (base_reg))
3216 purgeable_regions.add (base_reg);
3217
3218 /* Also cover the UNKNOWN case. */
3219 if (const svalue *sval = cluster->maybe_get_simple_value (mgr))
3220 if (sval->get_kind () == SK_UNKNOWN)
3221 if (!s.m_regs.contains (base_reg))
3222 purgeable_regions.add (base_reg);
3223 }
3224 }
3225
3226 /* Purge them. */
3227 for (hash_set<const region *>::iterator iter = purgeable_regions.begin ();
3228 iter != purgeable_regions.end (); ++iter)
3229 {
3230 const region *base_reg = *iter;
3231 purge_cluster (base_reg);
3232 }
3233}
3234
3235/* Subroutine for use by exploded_path::feasible_p.
3236
3237 We need to deal with state differences between:
3238 (a) when the exploded_graph is being initially constructed and
3239 (b) when replaying the state changes along a specific path in
3240 in exploded_path::feasible_p.
3241
3242 In (a), state merging happens, so when exploring a loop
3243 for (i = 0; i < 1024; i++)
3244 on successive iterations we have i == 0, then i == WIDENING.
3245
3246 In (b), no state merging happens, so naively replaying the path
3247 that goes twice through the loop then exits it
3248 would lead to i == 0, then i == 1, and then a (i >= 1024) eedge
3249 that exits the loop, which would be found to be infeasible as i == 1,
3250 and the path would be rejected.
3251
3252 We need to fix up state during replay. This subroutine is
3253 called whenever we enter a supernode that we've already
3254 visited along this exploded_path, passing in OTHER_STORE
3255 from the destination enode's state.
3256
3257 Find bindings to widening values in OTHER_STORE.
3258 For all that are found, update the binding in this store to UNKNOWN. */
3259
3260void
3261store::loop_replay_fixup (const store *other_store,
3262 region_model_manager *mgr)
3263{
3264 gcc_assert (other_store);
3265 for (cluster_map_t::iterator iter = other_store->m_cluster_map.begin ();
3266 iter != other_store->m_cluster_map.end (); ++iter)
3267 {
3268 const region *base_reg = (*iter).first;
3269 binding_cluster *cluster = (*iter).second;
3270 for (binding_cluster::iterator_t bind_iter = cluster->m_map.begin ();
3271 bind_iter != cluster->m_map.end (); ++bind_iter)
3272 {
3273 const binding_key *key = (*bind_iter).first;
3274 const svalue *sval = (*bind_iter).second;
3275 if (sval->get_kind () == SK_WIDENING)
3276 {
3277 binding_cluster *this_cluster
3278 = get_or_create_cluster (base_reg);
3279 const svalue *unknown
3280 = mgr->get_or_create_unknown_svalue (sval->get_type ());
3281 this_cluster->bind_key (key, unknown);
3282 }
3283 }
3284 }
3285}
3286
bfca9505
DM
3287/* Use R to replay the bindings from SUMMARY into this object. */
3288
3289void
3290store::replay_call_summary (call_summary_replay &r,
3291 const store &summary)
3292{
3293 if (summary.m_called_unknown_fn)
3294 {
3295 /* A call to an external function occurred in the summary.
3296 Hence we need to invalidate our knownledge of globals,
3297 escaped regions, etc. */
3298 on_unknown_fncall (r.get_call_stmt (),
3299 r.get_store_manager (),
3300 conjured_purge (r.get_caller_model (),
3301 r.get_ctxt ()));
3302 }
3303
3304 auto_vec<const region *> keys (summary.m_cluster_map.elements ());
3305 for (auto kv : summary.m_cluster_map)
3306 keys.quick_push (kv.first);
3307 keys.qsort (region::cmp_ptr_ptr);
3308 for (auto base_reg : keys)
3309 replay_call_summary_cluster (r, summary, base_reg);
3310}
3311
3312/* Use R and SUMMARY to replay the bindings in SUMMARY_CLUSTER
3313 into this object. */
3314
3315void
3316store::replay_call_summary_cluster (call_summary_replay &r,
3317 const store &summary,
3318 const region *summary_base_reg)
3319{
3320 const call_details &cd = r.get_call_details ();
3321 region_model_manager *reg_mgr = r.get_manager ();
3322 store_manager *mgr = reg_mgr->get_store_manager ();
3323 const binding_cluster *summary_cluster
3324 = summary.get_cluster (summary_base_reg);
3325
3326 /* Handle "ESCAPED" and "TOUCHED" flags. */
3327 if (summary_cluster->escaped_p () || summary_cluster->touched_p ())
3328 if (const region *caller_reg
3329 = r.convert_region_from_summary (summary_base_reg))
3330 {
3331 const region *caller_base_reg = caller_reg->get_base_region ();
6832c95c
DM
3332 if (caller_base_reg->tracked_p ()
3333 && !caller_base_reg->symbolic_for_unknown_ptr_p ())
3334 {
3335 binding_cluster *caller_cluster
3336 = get_or_create_cluster (caller_base_reg);
3337 if (summary_cluster->escaped_p ())
3338 caller_cluster->mark_as_escaped ();
3339 if (summary_cluster->touched_p ())
3340 caller_cluster->m_touched = true;
3341 }
bfca9505
DM
3342 }
3343
3344 switch (summary_base_reg->get_kind ())
3345 {
3346 /* Top-level regions. */
3347 case RK_FRAME:
3348 case RK_GLOBALS:
3349 case RK_CODE:
3350 case RK_STACK:
3351 case RK_HEAP:
3d2d04cd 3352 case RK_THREAD_LOCAL:
bfca9505
DM
3353 case RK_ROOT:
3354 /* Child regions. */
3355 case RK_FIELD:
3356 case RK_ELEMENT:
3357 case RK_OFFSET:
3358 case RK_SIZED:
3359 case RK_CAST:
3360 case RK_BIT_RANGE:
3361 /* Other regions. */
3362 case RK_VAR_ARG:
3363 case RK_UNKNOWN:
3364 /* These should never be the base region of a binding cluster. */
3365 gcc_unreachable ();
3366 break;
3367
3368 case RK_FUNCTION:
3369 case RK_LABEL:
3370 case RK_STRING:
3371 /* These can be marked as escaping. */
3372 break;
3373
3374 case RK_SYMBOLIC:
3375 {
3376 const symbolic_region *summary_symbolic_reg
3377 = as_a <const symbolic_region *> (summary_base_reg);
3378 const svalue *summary_ptr_sval = summary_symbolic_reg->get_pointer ();
3379 const svalue *caller_ptr_sval
3380 = r.convert_svalue_from_summary (summary_ptr_sval);
3381 if (!caller_ptr_sval)
3382 return;
3383 const region *caller_dest_reg
3384 = cd.get_model ()->deref_rvalue (caller_ptr_sval,
3385 NULL_TREE,
3386 cd.get_ctxt ());
3387 const svalue *summary_sval
3388 = summary.get_any_binding (mgr, summary_base_reg);
3389 if (!summary_sval)
3390 return;
3391 const svalue *caller_sval
3392 = r.convert_svalue_from_summary (summary_sval);
3393 if (!caller_sval)
3394 caller_sval =
3395 reg_mgr->get_or_create_unknown_svalue (summary_sval->get_type ());
3396 set_value (mgr, caller_dest_reg,
3397 caller_sval, NULL /* uncertainty_t * */);
3398 }
3399 break;
629b4813
DM
3400
3401 case RK_HEAP_ALLOCATED:
bfca9505 3402 case RK_DECL:
3d2d04cd 3403 case RK_ERRNO:
f65f63c4 3404 case RK_PRIVATE:
bfca9505
DM
3405 {
3406 const region *caller_dest_reg
3407 = r.convert_region_from_summary (summary_base_reg);
3408 if (!caller_dest_reg)
3409 return;
3410 const svalue *summary_sval
3411 = summary.get_any_binding (mgr, summary_base_reg);
629b4813
DM
3412 if (!summary_sval)
3413 summary_sval = reg_mgr->get_or_create_compound_svalue
3414 (summary_base_reg->get_type (),
3415 summary_cluster->get_map ());
bfca9505
DM
3416 const svalue *caller_sval
3417 = r.convert_svalue_from_summary (summary_sval);
3418 if (!caller_sval)
3419 caller_sval =
3420 reg_mgr->get_or_create_unknown_svalue (summary_sval->get_type ());
3421 set_value (mgr, caller_dest_reg,
3422 caller_sval, NULL /* uncertainty_t * */);
3423 }
3424 break;
bfca9505
DM
3425
3426 case RK_ALLOCA:
3427 /* Ignore bindings of alloca regions in the summary. */
3428 break;
3429 }
3430}
3431
808f4dfe
DM
3432#if CHECKING_P
3433
3434namespace selftest {
3435
d3b1ef7a
DM
3436/* Verify that bit_range::intersects_p works as expected. */
3437
3438static void
3439test_bit_range_intersects_p ()
3440{
3441 bit_range b0 (0, 1);
3442 bit_range b1 (1, 1);
3443 bit_range b2 (2, 1);
3444 bit_range b3 (3, 1);
3445 bit_range b4 (4, 1);
3446 bit_range b5 (5, 1);
3447 bit_range b6 (6, 1);
3448 bit_range b7 (7, 1);
3449 bit_range b1_to_6 (1, 6);
3450 bit_range b0_to_7 (0, 8);
3451 bit_range b3_to_5 (3, 3);
3452 bit_range b6_to_7 (6, 2);
3453
3454 /* self-intersection is true. */
3455 ASSERT_TRUE (b0.intersects_p (b0));
3456 ASSERT_TRUE (b7.intersects_p (b7));
3457 ASSERT_TRUE (b1_to_6.intersects_p (b1_to_6));
3458 ASSERT_TRUE (b0_to_7.intersects_p (b0_to_7));
3459
3460 ASSERT_FALSE (b0.intersects_p (b1));
3461 ASSERT_FALSE (b1.intersects_p (b0));
3462 ASSERT_FALSE (b0.intersects_p (b7));
3463 ASSERT_FALSE (b7.intersects_p (b0));
3464
3465 ASSERT_TRUE (b0_to_7.intersects_p (b0));
3466 ASSERT_TRUE (b0_to_7.intersects_p (b7));
3467 ASSERT_TRUE (b0.intersects_p (b0_to_7));
3468 ASSERT_TRUE (b7.intersects_p (b0_to_7));
3469
3470 ASSERT_FALSE (b0.intersects_p (b1_to_6));
3471 ASSERT_FALSE (b1_to_6.intersects_p (b0));
3472 ASSERT_TRUE (b1.intersects_p (b1_to_6));
3473 ASSERT_TRUE (b1_to_6.intersects_p (b1));
3474 ASSERT_TRUE (b1_to_6.intersects_p (b6));
3475 ASSERT_FALSE (b1_to_6.intersects_p (b7));
3476
3477 ASSERT_TRUE (b1_to_6.intersects_p (b0_to_7));
3478 ASSERT_TRUE (b0_to_7.intersects_p (b1_to_6));
3479
3480 ASSERT_FALSE (b3_to_5.intersects_p (b6_to_7));
3481 ASSERT_FALSE (b6_to_7.intersects_p (b3_to_5));
4892b308
DM
3482
3483 bit_range r1 (0,0);
3484 bit_range r2 (0,0);
3485 ASSERT_TRUE (b1_to_6.intersects_p (b0_to_7, &r1, &r2));
3486 ASSERT_EQ (r1.get_start_bit_offset (), 0);
3487 ASSERT_EQ (r1.m_size_in_bits, 6);
3488 ASSERT_EQ (r2.get_start_bit_offset (), 1);
3489 ASSERT_EQ (r2.m_size_in_bits, 6);
3490
3491 ASSERT_TRUE (b0_to_7.intersects_p (b1_to_6, &r1, &r2));
3492 ASSERT_EQ (r1.get_start_bit_offset (), 1);
3493 ASSERT_EQ (r1.m_size_in_bits, 6);
3494 ASSERT_EQ (r2.get_start_bit_offset (), 0);
3495 ASSERT_EQ (r2.m_size_in_bits, 6);
d3b1ef7a
DM
3496}
3497
3498/* Implementation detail of ASSERT_BIT_RANGE_FROM_MASK_EQ. */
3499
3500static void
3501assert_bit_range_from_mask_eq (const location &loc,
3502 unsigned HOST_WIDE_INT mask,
3503 const bit_range &expected)
3504{
3505 bit_range actual (0, 0);
3506 bool ok = bit_range::from_mask (mask, &actual);
3507 ASSERT_TRUE_AT (loc, ok);
3508 ASSERT_EQ_AT (loc, actual, expected);
3509}
3510
3511/* Assert that bit_range::from_mask (MASK) returns true, and writes
3512 out EXPECTED_BIT_RANGE. */
3513
3514#define ASSERT_BIT_RANGE_FROM_MASK_EQ(MASK, EXPECTED_BIT_RANGE) \
3515 SELFTEST_BEGIN_STMT \
3516 assert_bit_range_from_mask_eq (SELFTEST_LOCATION, MASK, \
3517 EXPECTED_BIT_RANGE); \
3518 SELFTEST_END_STMT
3519
3520/* Implementation detail of ASSERT_NO_BIT_RANGE_FROM_MASK. */
3521
3522static void
3523assert_no_bit_range_from_mask_eq (const location &loc,
3524 unsigned HOST_WIDE_INT mask)
3525{
3526 bit_range actual (0, 0);
3527 bool ok = bit_range::from_mask (mask, &actual);
3528 ASSERT_FALSE_AT (loc, ok);
3529}
3530
3531/* Assert that bit_range::from_mask (MASK) returns false. */
3532
3533#define ASSERT_NO_BIT_RANGE_FROM_MASK(MASK) \
3534 SELFTEST_BEGIN_STMT \
3535 assert_no_bit_range_from_mask_eq (SELFTEST_LOCATION, MASK); \
3536 SELFTEST_END_STMT
3537
3538/* Verify that bit_range::from_mask works as expected. */
3539
3540static void
3541test_bit_range_from_mask ()
3542{
3543 /* Should fail on zero. */
3544 ASSERT_NO_BIT_RANGE_FROM_MASK (0);
3545
3546 /* Verify 1-bit masks. */
3547 ASSERT_BIT_RANGE_FROM_MASK_EQ (1, bit_range (0, 1));
3548 ASSERT_BIT_RANGE_FROM_MASK_EQ (2, bit_range (1, 1));
3549 ASSERT_BIT_RANGE_FROM_MASK_EQ (4, bit_range (2, 1));
3550 ASSERT_BIT_RANGE_FROM_MASK_EQ (8, bit_range (3, 1));
3551 ASSERT_BIT_RANGE_FROM_MASK_EQ (16, bit_range (4, 1));
3552 ASSERT_BIT_RANGE_FROM_MASK_EQ (32, bit_range (5, 1));
3553 ASSERT_BIT_RANGE_FROM_MASK_EQ (64, bit_range (6, 1));
3554 ASSERT_BIT_RANGE_FROM_MASK_EQ (128, bit_range (7, 1));
3555
3556 /* Verify N-bit masks starting at bit 0. */
3557 ASSERT_BIT_RANGE_FROM_MASK_EQ (3, bit_range (0, 2));
3558 ASSERT_BIT_RANGE_FROM_MASK_EQ (7, bit_range (0, 3));
3559 ASSERT_BIT_RANGE_FROM_MASK_EQ (15, bit_range (0, 4));
3560 ASSERT_BIT_RANGE_FROM_MASK_EQ (31, bit_range (0, 5));
3561 ASSERT_BIT_RANGE_FROM_MASK_EQ (63, bit_range (0, 6));
3562 ASSERT_BIT_RANGE_FROM_MASK_EQ (127, bit_range (0, 7));
3563 ASSERT_BIT_RANGE_FROM_MASK_EQ (255, bit_range (0, 8));
3564 ASSERT_BIT_RANGE_FROM_MASK_EQ (0xffff, bit_range (0, 16));
3565
3566 /* Various other tests. */
3567 ASSERT_BIT_RANGE_FROM_MASK_EQ (0x30, bit_range (4, 2));
3568 ASSERT_BIT_RANGE_FROM_MASK_EQ (0x700, bit_range (8, 3));
3569 ASSERT_BIT_RANGE_FROM_MASK_EQ (0x600, bit_range (9, 2));
3570
3571 /* Multiple ranges of set bits should fail. */
3572 ASSERT_NO_BIT_RANGE_FROM_MASK (0x101);
3573 ASSERT_NO_BIT_RANGE_FROM_MASK (0xf0f0f0f0);
3574}
3575
808f4dfe
DM
3576/* Implementation detail of ASSERT_OVERLAP. */
3577
3578static void
3579assert_overlap (const location &loc,
3580 const concrete_binding *b1,
3581 const concrete_binding *b2)
3582{
3583 ASSERT_TRUE_AT (loc, b1->overlaps_p (*b2));
3584 ASSERT_TRUE_AT (loc, b2->overlaps_p (*b1));
3585}
3586
3587/* Implementation detail of ASSERT_DISJOINT. */
3588
3589static void
3590assert_disjoint (const location &loc,
3591 const concrete_binding *b1,
3592 const concrete_binding *b2)
3593{
3594 ASSERT_FALSE_AT (loc, b1->overlaps_p (*b2));
3595 ASSERT_FALSE_AT (loc, b2->overlaps_p (*b1));
3596}
3597
3598/* Assert that B1 and B2 overlap, checking both ways. */
3599
3600#define ASSERT_OVERLAP(B1, B2) \
3601 SELFTEST_BEGIN_STMT \
3602 assert_overlap (SELFTEST_LOCATION, B1, B2); \
3603 SELFTEST_END_STMT
3604
3605/* Assert that B1 and B2 do not overlap, checking both ways. */
3606
3607#define ASSERT_DISJOINT(B1, B2) \
3608 SELFTEST_BEGIN_STMT \
3609 assert_disjoint (SELFTEST_LOCATION, B1, B2); \
3610 SELFTEST_END_STMT
3611
3612/* Verify that concrete_binding::overlaps_p works as expected. */
3613
3614static void
3615test_binding_key_overlap ()
3616{
3617 store_manager mgr (NULL);
3618
3619 /* Various 8-bit bindings. */
e61ffa20
DM
3620 const concrete_binding *cb_0_7 = mgr.get_concrete_binding (0, 8);
3621 const concrete_binding *cb_8_15 = mgr.get_concrete_binding (8, 8);
3622 const concrete_binding *cb_16_23 = mgr.get_concrete_binding (16, 8);
3623 const concrete_binding *cb_24_31 = mgr.get_concrete_binding (24, 8);
808f4dfe
DM
3624
3625 /* 16-bit bindings. */
e61ffa20
DM
3626 const concrete_binding *cb_0_15 = mgr.get_concrete_binding (0, 16);
3627 const concrete_binding *cb_8_23 = mgr.get_concrete_binding (8, 16);
3628 const concrete_binding *cb_16_31 = mgr.get_concrete_binding (16, 16);
808f4dfe
DM
3629
3630 /* 32-bit binding. */
e61ffa20 3631 const concrete_binding *cb_0_31 = mgr.get_concrete_binding (0, 32);
808f4dfe
DM
3632
3633 /* Everything should self-overlap. */
3634 ASSERT_OVERLAP (cb_0_7, cb_0_7);
3635 ASSERT_OVERLAP (cb_8_15, cb_8_15);
3636 ASSERT_OVERLAP (cb_16_23, cb_16_23);
3637 ASSERT_OVERLAP (cb_24_31, cb_24_31);
3638 ASSERT_OVERLAP (cb_0_15, cb_0_15);
3639 ASSERT_OVERLAP (cb_8_23, cb_8_23);
3640 ASSERT_OVERLAP (cb_16_31, cb_16_31);
3641 ASSERT_OVERLAP (cb_0_31, cb_0_31);
3642
3643 /* Verify the 8-bit bindings that don't overlap each other. */
3644 ASSERT_DISJOINT (cb_0_7, cb_8_15);
3645 ASSERT_DISJOINT (cb_8_15, cb_16_23);
3646
3647 /* Check for overlap of differently-sized bindings. */
3648 ASSERT_OVERLAP (cb_0_7, cb_0_31);
3649 /* ...and with differing start points. */
3650 ASSERT_OVERLAP (cb_8_15, cb_0_31);
3651 ASSERT_DISJOINT (cb_8_15, cb_16_31);
3652 ASSERT_OVERLAP (cb_16_23, cb_0_31);
3653 ASSERT_OVERLAP (cb_16_31, cb_0_31);
3654
3655 ASSERT_DISJOINT (cb_0_7, cb_8_23);
3656 ASSERT_OVERLAP (cb_8_23, cb_16_23);
3657 ASSERT_OVERLAP (cb_8_23, cb_16_31);
3658 ASSERT_DISJOINT (cb_8_23, cb_24_31);
3659}
3660
3661/* Run all of the selftests within this file. */
3662
3663void
3664analyzer_store_cc_tests ()
3665{
d3b1ef7a
DM
3666 test_bit_range_intersects_p ();
3667 test_bit_range_from_mask ();
808f4dfe
DM
3668 test_binding_key_overlap ();
3669}
3670
3671} // namespace selftest
3672
3673#endif /* CHECKING_P */
3674
3675} // namespace ana
3676
3677#endif /* #if ENABLE_ANALYZER */