]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/analyzer/store.cc
Daily bump.
[thirdparty/gcc.git] / gcc / analyzer / store.cc
1 /* Classes for modeling the state of memory.
2 Copyright (C) 2020-2024 Free Software Foundation, Inc.
3 Contributed by David Malcolm <dmalcolm@redhat.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #define INCLUDE_MEMORY
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tree.h"
26 #include "function.h"
27 #include "basic-block.h"
28 #include "gimple.h"
29 #include "gimple-iterator.h"
30 #include "diagnostic-core.h"
31 #include "graphviz.h"
32 #include "options.h"
33 #include "cgraph.h"
34 #include "tree-dfa.h"
35 #include "stringpool.h"
36 #include "convert.h"
37 #include "target.h"
38 #include "fold-const.h"
39 #include "tree-pretty-print.h"
40 #include "diagnostic-color.h"
41 #include "bitmap.h"
42 #include "selftest.h"
43 #include "analyzer/analyzer.h"
44 #include "analyzer/analyzer-logging.h"
45 #include "ordered-hash-map.h"
46 #include "options.h"
47 #include "cfg.h"
48 #include "analyzer/supergraph.h"
49 #include "sbitmap.h"
50 #include "analyzer/call-string.h"
51 #include "analyzer/program-point.h"
52 #include "analyzer/store.h"
53 #include "analyzer/region-model.h"
54 #include "analyzer/call-summary.h"
55 #include "analyzer/analyzer-selftests.h"
56 #include "stor-layout.h"
57
58 #if ENABLE_ANALYZER
59
60 namespace ana {
61
62 /* Dump SVALS to PP, sorting them to ensure determinism. */
63
64 static void
65 dump_svalue_set (const hash_set <const svalue *> &svals,
66 pretty_printer *pp, bool simple)
67 {
68 auto_vec <const svalue *> v;
69 for (hash_set<const svalue *>::iterator iter = svals.begin ();
70 iter != svals.end (); ++iter)
71 {
72 v.safe_push (*iter);
73 }
74 v.qsort (svalue::cmp_ptr_ptr);
75
76 pp_character (pp, '{');
77 const svalue *sval;
78 unsigned i;
79 FOR_EACH_VEC_ELT (v, i, sval)
80 {
81 if (i > 0)
82 pp_string (pp, ", ");
83 sval->dump_to_pp (pp, simple);
84 }
85 pp_character (pp, '}');
86 }
87
88 /* class uncertainty_t. */
89
90 /* Dump this object to PP. */
91
92 void
93 uncertainty_t::dump_to_pp (pretty_printer *pp, bool simple) const
94 {
95 pp_string (pp, "{m_maybe_bound_svals: ");
96 dump_svalue_set (m_maybe_bound_svals, pp, simple);
97
98 pp_string (pp, ", m_mutable_at_unknown_call_svals: ");
99 dump_svalue_set (m_mutable_at_unknown_call_svals, pp, simple);
100 pp_string (pp, "}");
101 }
102
103 /* Dump this object to stderr. */
104
105 DEBUG_FUNCTION void
106 uncertainty_t::dump (bool simple) const
107 {
108 pretty_printer pp;
109 pp_format_decoder (&pp) = default_tree_printer;
110 pp_show_color (&pp) = pp_show_color (global_dc->printer);
111 pp.buffer->stream = stderr;
112 dump_to_pp (&pp, simple);
113 pp_newline (&pp);
114 pp_flush (&pp);
115 }
116
117 /* class binding_key. */
118
119 const binding_key *
120 binding_key::make (store_manager *mgr, const region *r)
121 {
122 region_offset offset = r->get_offset (mgr->get_svalue_manager ());
123 if (offset.symbolic_p ())
124 return mgr->get_symbolic_binding (r);
125 else
126 {
127 bit_size_t bit_size;
128 if (r->get_bit_size (&bit_size))
129 {
130 /* Must be non-empty. */
131 gcc_assert (bit_size > 0);
132 return mgr->get_concrete_binding (offset.get_bit_offset (),
133 bit_size);
134 }
135 else
136 return mgr->get_symbolic_binding (r);
137 }
138 }
139
140 /* Dump this binding_key to stderr. */
141
142 DEBUG_FUNCTION void
143 binding_key::dump (bool simple) const
144 {
145 pretty_printer pp;
146 pp_format_decoder (&pp) = default_tree_printer;
147 pp_show_color (&pp) = pp_show_color (global_dc->printer);
148 pp.buffer->stream = stderr;
149 dump_to_pp (&pp, simple);
150 pp_newline (&pp);
151 pp_flush (&pp);
152 }
153
154 /* Get a description of this binding_key. */
155
156 label_text
157 binding_key::get_desc (bool simple) const
158 {
159 pretty_printer pp;
160 pp_format_decoder (&pp) = default_tree_printer;
161 dump_to_pp (&pp, simple);
162 return label_text::take (xstrdup (pp_formatted_text (&pp)));
163 }
164
165 /* qsort callback. */
166
167 int
168 binding_key::cmp_ptrs (const void *p1, const void *p2)
169 {
170 const binding_key * const *pk1 = (const binding_key * const *)p1;
171 const binding_key * const *pk2 = (const binding_key * const *)p2;
172 return cmp (*pk1, *pk2);
173 }
174
175 /* Comparator for binding_keys. */
176
177 int
178 binding_key::cmp (const binding_key *k1, const binding_key *k2)
179 {
180 int concrete1 = k1->concrete_p ();
181 int concrete2 = k2->concrete_p ();
182 if (int concrete_cmp = concrete1 - concrete2)
183 return concrete_cmp;
184 if (concrete1)
185 {
186 const concrete_binding *b1 = (const concrete_binding *)k1;
187 const concrete_binding *b2 = (const concrete_binding *)k2;
188 if (int start_cmp = wi::cmp (b1->get_start_bit_offset (),
189 b2->get_start_bit_offset (),
190 SIGNED))
191 return start_cmp;
192 return wi::cmp (b1->get_next_bit_offset (), b2->get_next_bit_offset (),
193 SIGNED);
194 }
195 else
196 {
197 const symbolic_binding *s1 = (const symbolic_binding *)k1;
198 const symbolic_binding *s2 = (const symbolic_binding *)k2;
199 if (s1 > s2)
200 return 1;
201 if (s1 < s2)
202 return -1;
203 return 0;
204 }
205 }
206
207 /* struct bit_range. */
208
209 void
210 bit_range::dump_to_pp (pretty_printer *pp) const
211 {
212 byte_range bytes (0, 0);
213 if (as_byte_range (&bytes))
214 bytes.dump_to_pp (pp);
215 else
216 {
217 pp_string (pp, "start: ");
218 pp_wide_int (pp, m_start_bit_offset, SIGNED);
219 pp_string (pp, ", size: ");
220 pp_wide_int (pp, m_size_in_bits, SIGNED);
221 pp_string (pp, ", next: ");
222 pp_wide_int (pp, get_next_bit_offset (), SIGNED);
223 }
224 }
225
226 /* Dump this object to stderr. */
227
228 DEBUG_FUNCTION void
229 bit_range::dump () const
230 {
231 pretty_printer pp;
232 pp.buffer->stream = stderr;
233 dump_to_pp (&pp);
234 pp_newline (&pp);
235 pp_flush (&pp);
236 }
237
238 /* Generate a JSON value for this bit_range.
239 This is intended for debugging the analyzer rather
240 than serialization. */
241
242 json::object *
243 bit_range::to_json () const
244 {
245 json::object *obj = new json::object ();
246 obj->set ("start_bit_offset",
247 bit_offset_to_json (m_start_bit_offset));
248 obj->set ("size_in_bits",
249 bit_offset_to_json (m_size_in_bits));
250 return obj;
251 }
252
253 /* If OTHER is a subset of this, return true and, if OUT is
254 non-null, write to *OUT the relative range of OTHER within this.
255 Otherwise return false. */
256
257 bool
258 bit_range::contains_p (const bit_range &other, bit_range *out) const
259 {
260 if (contains_p (other.get_start_bit_offset ())
261 && contains_p (other.get_last_bit_offset ()))
262 {
263 if (out)
264 {
265 out->m_start_bit_offset = other.m_start_bit_offset - m_start_bit_offset;
266 out->m_size_in_bits = other.m_size_in_bits;
267 }
268 return true;
269 }
270 else
271 return false;
272 }
273
274 /* If OTHER intersects this, return true and write
275 the relative range of OTHER within THIS to *OUT_THIS,
276 and the relative range of THIS within OTHER to *OUT_OTHER.
277 Otherwise return false. */
278
279 bool
280 bit_range::intersects_p (const bit_range &other,
281 bit_range *out_this,
282 bit_range *out_other) const
283 {
284 if (get_start_bit_offset () < other.get_next_bit_offset ()
285 && other.get_start_bit_offset () < get_next_bit_offset ())
286 {
287 bit_offset_t overlap_start
288 = MAX (get_start_bit_offset (),
289 other.get_start_bit_offset ());
290 bit_offset_t overlap_next
291 = MIN (get_next_bit_offset (),
292 other.get_next_bit_offset ());
293 gcc_assert (overlap_next > overlap_start);
294 bit_range abs_overlap_bits (overlap_start, overlap_next - overlap_start);
295 *out_this = abs_overlap_bits - get_start_bit_offset ();
296 *out_other = abs_overlap_bits - other.get_start_bit_offset ();
297 return true;
298 }
299 else
300 return false;
301 }
302
303 /* Return true if THIS and OTHER intersect and write the number
304 of bits both buffers overlap to *OUT_NUM_OVERLAP_BITS.
305
306 Otherwise return false. */
307
308 bool
309 bit_range::intersects_p (const bit_range &other,
310 bit_size_t *out_num_overlap_bits) const
311 {
312 if (get_start_bit_offset () < other.get_next_bit_offset ()
313 && other.get_start_bit_offset () < get_next_bit_offset ())
314 {
315 bit_offset_t overlap_start = MAX (get_start_bit_offset (),
316 other.get_start_bit_offset ());
317 bit_offset_t overlap_next = MIN (get_next_bit_offset (),
318 other.get_next_bit_offset ());
319 gcc_assert (overlap_next > overlap_start);
320 *out_num_overlap_bits = overlap_next - overlap_start;
321 return true;
322 }
323 else
324 return false;
325 }
326
327 /* Return true if THIS exceeds OTHER and write the overhanging
328 bit range to OUT_OVERHANGING_BIT_RANGE. */
329
330 bool
331 bit_range::exceeds_p (const bit_range &other,
332 bit_range *out_overhanging_bit_range) const
333 {
334 gcc_assert (!empty_p ());
335
336 if (other.get_next_bit_offset () < get_next_bit_offset ())
337 {
338 /* THIS definitely exceeds OTHER. */
339 bit_offset_t start = MAX (get_start_bit_offset (),
340 other.get_next_bit_offset ());
341 bit_offset_t size = get_next_bit_offset () - start;
342 gcc_assert (size > 0);
343 out_overhanging_bit_range->m_start_bit_offset = start;
344 out_overhanging_bit_range->m_size_in_bits = size;
345 return true;
346 }
347 else
348 return false;
349 }
350
351 /* Return true if THIS falls short of OFFSET and write the
352 bit range fallen short to OUT_FALL_SHORT_BITS. */
353
354 bool
355 bit_range::falls_short_of_p (bit_offset_t offset,
356 bit_range *out_fall_short_bits) const
357 {
358 gcc_assert (!empty_p ());
359
360 if (get_start_bit_offset () < offset)
361 {
362 /* THIS falls short of OFFSET. */
363 bit_offset_t start = get_start_bit_offset ();
364 bit_offset_t size = MIN (offset, get_next_bit_offset ()) - start;
365 gcc_assert (size > 0);
366 out_fall_short_bits->m_start_bit_offset = start;
367 out_fall_short_bits->m_size_in_bits = size;
368 return true;
369 }
370 else
371 return false;
372 }
373
374 int
375 bit_range::cmp (const bit_range &br1, const bit_range &br2)
376 {
377 if (int start_cmp = wi::cmps (br1.m_start_bit_offset,
378 br2.m_start_bit_offset))
379 return start_cmp;
380
381 return wi::cmpu (br1.m_size_in_bits, br2.m_size_in_bits);
382 }
383
384 /* Offset this range by OFFSET. */
385
386 bit_range
387 bit_range::operator- (bit_offset_t offset) const
388 {
389 return bit_range (m_start_bit_offset - offset, m_size_in_bits);
390 }
391
392 /* If MASK is a contiguous range of set bits, write them
393 to *OUT and return true.
394 Otherwise return false. */
395
396 bool
397 bit_range::from_mask (unsigned HOST_WIDE_INT mask, bit_range *out)
398 {
399 unsigned iter_bit_idx = 0;
400 unsigned HOST_WIDE_INT iter_bit_mask = 1;
401
402 /* Find the first contiguous run of set bits in MASK. */
403
404 /* Find first set bit in MASK. */
405 while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
406 {
407 if (mask & iter_bit_mask)
408 break;
409 iter_bit_idx++;
410 iter_bit_mask <<= 1;
411 }
412 if (iter_bit_idx == HOST_BITS_PER_WIDE_INT)
413 /* MASK is zero. */
414 return false;
415
416 unsigned first_set_iter_bit_idx = iter_bit_idx;
417 unsigned num_set_bits = 1;
418 iter_bit_idx++;
419 iter_bit_mask <<= 1;
420
421 /* Find next unset bit in MASK. */
422 while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
423 {
424 if (!(mask & iter_bit_mask))
425 break;
426 num_set_bits++;
427 iter_bit_idx++;
428 iter_bit_mask <<= 1;
429 }
430 if (iter_bit_idx == HOST_BITS_PER_WIDE_INT)
431 {
432 *out = bit_range (first_set_iter_bit_idx, num_set_bits);
433 return true;
434 }
435
436 /* We now have the first contiguous run of set bits in MASK.
437 Fail if any other bits are set. */
438 while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
439 {
440 if (mask & iter_bit_mask)
441 return false;
442 iter_bit_idx++;
443 iter_bit_mask <<= 1;
444 }
445
446 *out = bit_range (first_set_iter_bit_idx, num_set_bits);
447 return true;
448 }
449
450 /* Attempt to convert this bit_range to a byte_range.
451 Return true if it is possible, writing the result to *OUT.
452 Otherwise return false. */
453
454 bool
455 bit_range::as_byte_range (byte_range *out) const
456 {
457 if (m_start_bit_offset % BITS_PER_UNIT == 0
458 && m_size_in_bits % BITS_PER_UNIT == 0)
459 {
460 out->m_start_byte_offset = m_start_bit_offset / BITS_PER_UNIT;
461 out->m_size_in_bytes = m_size_in_bits / BITS_PER_UNIT;
462 return true;
463 }
464 return false;
465 }
466
467 /* Dump this object to PP. */
468
469 void
470 byte_range::dump_to_pp (pretty_printer *pp) const
471 {
472 if (m_size_in_bytes == 0)
473 {
474 pp_string (pp, "empty");
475 }
476 else if (m_size_in_bytes == 1)
477 {
478 pp_string (pp, "byte ");
479 pp_wide_int (pp, m_start_byte_offset, SIGNED);
480 }
481 else
482 {
483 pp_string (pp, "bytes ");
484 pp_wide_int (pp, m_start_byte_offset, SIGNED);
485 pp_string (pp, "-");
486 pp_wide_int (pp, get_last_byte_offset (), SIGNED);
487 }
488 }
489
490 /* Dump this object to stderr. */
491
492 DEBUG_FUNCTION void
493 byte_range::dump () const
494 {
495 pretty_printer pp;
496 pp.buffer->stream = stderr;
497 dump_to_pp (&pp);
498 pp_newline (&pp);
499 pp_flush (&pp);
500 }
501
502 /* Generate a JSON value for this byte_range.
503 This is intended for debugging the analyzer rather
504 than serialization. */
505
506 json::object *
507 byte_range::to_json () const
508 {
509 json::object *obj = new json::object ();
510 obj->set ("start_byte_offset",
511 byte_offset_to_json (m_start_byte_offset));
512 obj->set ("size_in_bytes",
513 byte_offset_to_json (m_size_in_bytes));
514 return obj;
515 }
516
517 /* If OTHER is a subset of this, return true and write
518 to *OUT the relative range of OTHER within this.
519 Otherwise return false. */
520
521 bool
522 byte_range::contains_p (const byte_range &other, byte_range *out) const
523 {
524 if (contains_p (other.get_start_byte_offset ())
525 && contains_p (other.get_last_byte_offset ()))
526 {
527 out->m_start_byte_offset = other.m_start_byte_offset - m_start_byte_offset;
528 out->m_size_in_bytes = other.m_size_in_bytes;
529 return true;
530 }
531 else
532 return false;
533 }
534
535 /* qsort comparator for byte ranges. */
536
537 int
538 byte_range::cmp (const byte_range &br1, const byte_range &br2)
539 {
540 /* Order first by offset. */
541 if (int start_cmp = wi::cmps (br1.m_start_byte_offset,
542 br2.m_start_byte_offset))
543 return start_cmp;
544
545 /* ...then by size. */
546 return wi::cmpu (br1.m_size_in_bytes, br2.m_size_in_bytes);
547 }
548
549 /* class concrete_binding : public binding_key. */
550
551 /* Implementation of binding_key::dump_to_pp vfunc for concrete_binding. */
552
553 void
554 concrete_binding::dump_to_pp (pretty_printer *pp, bool) const
555 {
556 m_bit_range.dump_to_pp (pp);
557 }
558
559 /* Return true if this binding overlaps with OTHER. */
560
561 bool
562 concrete_binding::overlaps_p (const concrete_binding &other) const
563 {
564 if (get_start_bit_offset () < other.get_next_bit_offset ()
565 && get_next_bit_offset () > other.get_start_bit_offset ())
566 return true;
567 return false;
568 }
569
570 /* If this is expressible as a concrete byte range, return true
571 and write it to *OUT. Otherwise return false. */
572
573 bool
574 concrete_binding::get_byte_range (byte_range *out) const
575 {
576 return m_bit_range.as_byte_range (out);
577 }
578
579 /* Comparator for use by vec<const concrete_binding *>::qsort. */
580
581 int
582 concrete_binding::cmp_ptr_ptr (const void *p1, const void *p2)
583 {
584 const concrete_binding *b1 = *(const concrete_binding * const *)p1;
585 const concrete_binding *b2 = *(const concrete_binding * const *)p2;
586
587 return bit_range::cmp (b1->m_bit_range, b2->m_bit_range);
588 }
589
590 /* class symbolic_binding : public binding_key. */
591
592 void
593 symbolic_binding::dump_to_pp (pretty_printer *pp, bool simple) const
594 {
595 //binding_key::dump_to_pp (pp, simple);
596 pp_string (pp, "region: ");
597 m_region->dump_to_pp (pp, simple);
598 }
599
600 /* Comparator for use by vec<const symbolic_binding *>::qsort. */
601
602 int
603 symbolic_binding::cmp_ptr_ptr (const void *p1, const void *p2)
604 {
605 const symbolic_binding *b1 = *(const symbolic_binding * const *)p1;
606 const symbolic_binding *b2 = *(const symbolic_binding * const *)p2;
607
608 return region::cmp_ids (b1->get_region (), b2->get_region ());
609 }
610
611 /* The store is oblivious to the types of the svalues bound within
612 it: any type can get bound at any location.
613 Simplify any casts before binding.
614
615 For example, if we have:
616 struct big { int ia[1024]; };
617 struct big src, dst;
618 memcpy (&dst, &src, sizeof (struct big));
619 this reaches us in gimple form as:
620 MEM <unsigned char[4096]> [(char * {ref-all})&dst]
621 = MEM <unsigned char[4096]> [(char * {ref-all})&src];
622 Using cast_region when handling the MEM_REF would give us:
623 INIT_VAL(CAST_REG(unsigned char[4096], src))
624 as rhs_sval, but we can fold that into a cast svalue:
625 CAST(unsigned char[4096], INIT_VAL(src))
626 We can discard that cast from the svalue when binding it in
627 the store for "dst", and simply store:
628 cluster for: dst
629 key: {kind: direct, start: 0, size: 32768, next: 32768}
630 value: ‘struct big’ {INIT_VAL(src)}. */
631
632 static const svalue *
633 simplify_for_binding (const svalue *sval)
634 {
635 if (const svalue *cast_sval = sval->maybe_undo_cast ())
636 sval = cast_sval;
637 return sval;
638 }
639
640 /* class binding_map. */
641
642 /* binding_map's copy ctor. */
643
644 binding_map::binding_map (const binding_map &other)
645 : m_map (other.m_map)
646 {
647 }
648
649 /* binding_map's assignment operator. */
650
651 binding_map&
652 binding_map::operator=(const binding_map &other)
653 {
654 /* For now, assume we only ever copy to an empty cluster. */
655 gcc_assert (m_map.elements () == 0);
656 for (map_t::iterator iter = other.m_map.begin (); iter != other.m_map.end ();
657 ++iter)
658 {
659 const binding_key *key = (*iter).first;
660 const svalue *sval = (*iter).second;
661 m_map.put (key, sval);
662 }
663 return *this;
664 }
665
666 /* binding_map's equality operator. */
667
668 bool
669 binding_map::operator== (const binding_map &other) const
670 {
671 if (m_map.elements () != other.m_map.elements ())
672 return false;
673
674 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
675 {
676 const binding_key *key = (*iter).first;
677 const svalue *sval = (*iter).second;
678 const svalue **other_slot
679 = const_cast <map_t &> (other.m_map).get (key);
680 if (other_slot == NULL)
681 return false;
682 if (sval != *other_slot)
683 return false;
684 }
685 gcc_checking_assert (hash () == other.hash ());
686 return true;
687 }
688
689 /* Generate a hash value for this binding_map. */
690
691 hashval_t
692 binding_map::hash () const
693 {
694 hashval_t result = 0;
695 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
696 {
697 /* Use a new hasher for each key to avoid depending on the ordering
698 of keys when accumulating the result. */
699 inchash::hash hstate;
700 hstate.add_ptr ((*iter).first);
701 hstate.add_ptr ((*iter).second);
702 result ^= hstate.end ();
703 }
704 return result;
705 }
706
707 /* Dump a representation of this binding_map to PP.
708 SIMPLE controls how values and regions are to be printed.
709 If MULTILINE, then split the dump over multiple lines and
710 use whitespace for readability, otherwise put all on one line. */
711
712 void
713 binding_map::dump_to_pp (pretty_printer *pp, bool simple,
714 bool multiline) const
715 {
716 auto_vec <const binding_key *> binding_keys;
717 for (map_t::iterator iter = m_map.begin ();
718 iter != m_map.end (); ++iter)
719 {
720 const binding_key *key = (*iter).first;
721 binding_keys.safe_push (key);
722 }
723 binding_keys.qsort (binding_key::cmp_ptrs);
724
725 const binding_key *key;
726 unsigned i;
727 FOR_EACH_VEC_ELT (binding_keys, i, key)
728 {
729 const svalue *value = *const_cast <map_t &> (m_map).get (key);
730 if (multiline)
731 {
732 pp_string (pp, " key: {");
733 key->dump_to_pp (pp, simple);
734 pp_string (pp, "}");
735 pp_newline (pp);
736 pp_string (pp, " value: ");
737 if (tree t = value->get_type ())
738 dump_quoted_tree (pp, t);
739 pp_string (pp, " {");
740 value->dump_to_pp (pp, simple);
741 pp_string (pp, "}");
742 pp_newline (pp);
743 }
744 else
745 {
746 if (i > 0)
747 pp_string (pp, ", ");
748 pp_string (pp, "binding key: {");
749 key->dump_to_pp (pp, simple);
750 pp_string (pp, "}, value: {");
751 value->dump_to_pp (pp, simple);
752 pp_string (pp, "}");
753 }
754 }
755 }
756
757 /* Dump a multiline representation of this binding_map to stderr. */
758
759 DEBUG_FUNCTION void
760 binding_map::dump (bool simple) const
761 {
762 pretty_printer pp;
763 pp_format_decoder (&pp) = default_tree_printer;
764 pp_show_color (&pp) = pp_show_color (global_dc->printer);
765 pp.buffer->stream = stderr;
766 dump_to_pp (&pp, simple, true);
767 pp_newline (&pp);
768 pp_flush (&pp);
769 }
770
771 /* Return a new json::object of the form
772 {KEY_DESC : SVALUE_DESC,
773 ...for the various key/value pairs in this binding_map}. */
774
775 json::object *
776 binding_map::to_json () const
777 {
778 json::object *map_obj = new json::object ();
779
780 auto_vec <const binding_key *> binding_keys;
781 for (map_t::iterator iter = m_map.begin ();
782 iter != m_map.end (); ++iter)
783 {
784 const binding_key *key = (*iter).first;
785 binding_keys.safe_push (key);
786 }
787 binding_keys.qsort (binding_key::cmp_ptrs);
788
789 const binding_key *key;
790 unsigned i;
791 FOR_EACH_VEC_ELT (binding_keys, i, key)
792 {
793 const svalue *value = *const_cast <map_t &> (m_map).get (key);
794 label_text key_desc = key->get_desc ();
795 map_obj->set (key_desc.get (), value->to_json ());
796 }
797
798 return map_obj;
799 }
800
801 /* Comparator for imposing an order on binding_maps. */
802
803 int
804 binding_map::cmp (const binding_map &map1, const binding_map &map2)
805 {
806 if (int count_cmp = map1.elements () - map2.elements ())
807 return count_cmp;
808
809 auto_vec <const binding_key *> keys1 (map1.elements ());
810 for (map_t::iterator iter = map1.begin ();
811 iter != map1.end (); ++iter)
812 keys1.quick_push ((*iter).first);
813 keys1.qsort (binding_key::cmp_ptrs);
814
815 auto_vec <const binding_key *> keys2 (map2.elements ());
816 for (map_t::iterator iter = map2.begin ();
817 iter != map2.end (); ++iter)
818 keys2.quick_push ((*iter).first);
819 keys2.qsort (binding_key::cmp_ptrs);
820
821 for (size_t i = 0; i < keys1.length (); i++)
822 {
823 const binding_key *k1 = keys1[i];
824 const binding_key *k2 = keys2[i];
825 if (int key_cmp = binding_key::cmp (k1, k2))
826 return key_cmp;
827 gcc_assert (k1 == k2);
828 if (int sval_cmp = svalue::cmp_ptr (map1.get (k1), map2.get (k2)))
829 return sval_cmp;
830 }
831
832 return 0;
833 }
834
835 /* Get the child region of PARENT_REG based upon INDEX within a
836 CONSTRUCTOR. */
837
838 static const region *
839 get_subregion_within_ctor (const region *parent_reg, tree index,
840 region_model_manager *mgr)
841 {
842 switch (TREE_CODE (index))
843 {
844 default:
845 gcc_unreachable ();
846 case INTEGER_CST:
847 {
848 const svalue *index_sval
849 = mgr->get_or_create_constant_svalue (index);
850 return mgr->get_element_region (parent_reg,
851 TREE_TYPE (parent_reg->get_type ()),
852 index_sval);
853 }
854 break;
855 case FIELD_DECL:
856 return mgr->get_field_region (parent_reg, index);
857 }
858 }
859
860 /* Get the svalue for VAL, a non-CONSTRUCTOR value within a CONSTRUCTOR. */
861
862 static const svalue *
863 get_svalue_for_ctor_val (tree val, region_model_manager *mgr)
864 {
865 /* Reuse the get_rvalue logic from region_model. */
866 region_model m (mgr);
867 return m.get_rvalue (path_var (val, 0), NULL);
868 }
869
870 /* Bind values from CONSTRUCTOR to this map, relative to
871 PARENT_REG's relationship to its base region.
872 Return true if successful, false if there was a problem (e.g. due
873 to hitting a complexity limit). */
874
875 bool
876 binding_map::apply_ctor_to_region (const region *parent_reg, tree ctor,
877 region_model_manager *mgr)
878 {
879 gcc_assert (parent_reg);
880 gcc_assert (TREE_CODE (ctor) == CONSTRUCTOR);
881
882 unsigned ix;
883 tree index;
884 tree val;
885 tree parent_type = parent_reg->get_type ();
886 tree field;
887 if (TREE_CODE (parent_type) == RECORD_TYPE)
888 field = TYPE_FIELDS (parent_type);
889 else
890 field = NULL_TREE;
891 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor), ix, index, val)
892 {
893 if (!index)
894 {
895 /* If index is NULL, then iterate through the fields for
896 a RECORD_TYPE, or use an INTEGER_CST otherwise.
897 Compare with similar logic in output_constructor. */
898 if (field)
899 {
900 index = field;
901 field = DECL_CHAIN (field);
902 }
903 else
904 index = build_int_cst (integer_type_node, ix);
905 }
906 else if (TREE_CODE (index) == RANGE_EXPR)
907 {
908 tree min_index = TREE_OPERAND (index, 0);
909 tree max_index = TREE_OPERAND (index, 1);
910 if (min_index == max_index)
911 {
912 if (!apply_ctor_pair_to_child_region (parent_reg, mgr,
913 min_index, val))
914 return false;
915 }
916 else
917 {
918 if (!apply_ctor_val_to_range (parent_reg, mgr,
919 min_index, max_index, val))
920 return false;
921 }
922 continue;
923 }
924 if (!apply_ctor_pair_to_child_region (parent_reg, mgr, index, val))
925 return false;
926 }
927 return true;
928 }
929
930 /* Bind the value VAL into the range of elements within PARENT_REF
931 from MIN_INDEX to MAX_INDEX (including endpoints).
932 For use in handling RANGE_EXPR within a CONSTRUCTOR.
933 Return true if successful, false if there was a problem (e.g. due
934 to hitting a complexity limit). */
935
936 bool
937 binding_map::apply_ctor_val_to_range (const region *parent_reg,
938 region_model_manager *mgr,
939 tree min_index, tree max_index,
940 tree val)
941 {
942 gcc_assert (TREE_CODE (min_index) == INTEGER_CST);
943 gcc_assert (TREE_CODE (max_index) == INTEGER_CST);
944
945 /* Generate a binding key for the range. */
946 const region *min_element
947 = get_subregion_within_ctor (parent_reg, min_index, mgr);
948 const region *max_element
949 = get_subregion_within_ctor (parent_reg, max_index, mgr);
950 region_offset min_offset = min_element->get_offset (mgr);
951 if (min_offset.symbolic_p ())
952 return false;
953 bit_offset_t start_bit_offset = min_offset.get_bit_offset ();
954 store_manager *smgr = mgr->get_store_manager ();
955 if (max_element->empty_p ())
956 return false;
957 const binding_key *max_element_key = binding_key::make (smgr, max_element);
958 if (max_element_key->symbolic_p ())
959 return false;
960 const concrete_binding *max_element_ckey
961 = max_element_key->dyn_cast_concrete_binding ();
962 bit_size_t range_size_in_bits
963 = max_element_ckey->get_next_bit_offset () - start_bit_offset;
964 const concrete_binding *range_key
965 = smgr->get_concrete_binding (start_bit_offset, range_size_in_bits);
966 if (range_key->symbolic_p ())
967 return false;
968
969 /* Get the value. */
970 if (TREE_CODE (val) == CONSTRUCTOR)
971 return false;
972 const svalue *sval = get_svalue_for_ctor_val (val, mgr);
973
974 /* Bind the value to the range. */
975 put (range_key, sval);
976 return true;
977 }
978
979 /* Bind the value VAL into INDEX within PARENT_REF.
980 For use in handling a pair of entries within a CONSTRUCTOR.
981 Return true if successful, false if there was a problem (e.g. due
982 to hitting a complexity limit). */
983
984 bool
985 binding_map::apply_ctor_pair_to_child_region (const region *parent_reg,
986 region_model_manager *mgr,
987 tree index, tree val)
988 {
989 const region *child_reg
990 = get_subregion_within_ctor (parent_reg, index, mgr);
991 if (TREE_CODE (val) == CONSTRUCTOR)
992 return apply_ctor_to_region (child_reg, val, mgr);
993 else
994 {
995 const svalue *sval = get_svalue_for_ctor_val (val, mgr);
996 if (child_reg->empty_p ())
997 return false;
998 const binding_key *k
999 = binding_key::make (mgr->get_store_manager (), child_reg);
1000 /* Handle the case where we have an unknown size for child_reg
1001 (e.g. due to it being a trailing field with incomplete array
1002 type. */
1003 if (!k->concrete_p ())
1004 {
1005 /* Assume that sval has a well-defined size for this case. */
1006 tree sval_type = sval->get_type ();
1007 gcc_assert (sval_type);
1008 HOST_WIDE_INT sval_byte_size = int_size_in_bytes (sval_type);
1009 gcc_assert (sval_byte_size != -1);
1010 bit_size_t sval_bit_size = sval_byte_size * BITS_PER_UNIT;
1011 /* Get offset of child relative to base region. */
1012 region_offset child_base_offset = child_reg->get_offset (mgr);
1013 if (child_base_offset.symbolic_p ())
1014 return false;
1015 /* Convert to an offset relative to the parent region. */
1016 region_offset parent_base_offset = parent_reg->get_offset (mgr);
1017 gcc_assert (!parent_base_offset.symbolic_p ());
1018 bit_offset_t child_parent_offset
1019 = (child_base_offset.get_bit_offset ()
1020 - parent_base_offset.get_bit_offset ());
1021 /* Create a concrete key for the child within the parent. */
1022 k = mgr->get_store_manager ()->get_concrete_binding
1023 (child_parent_offset, sval_bit_size);
1024 }
1025 gcc_assert (k->concrete_p ());
1026 put (k, sval);
1027 return true;
1028 }
1029 }
1030
1031 /* Populate OUT with all bindings within this map that overlap KEY. */
1032
1033 void
1034 binding_map::get_overlapping_bindings (const binding_key *key,
1035 auto_vec<const binding_key *> *out)
1036 {
1037 for (auto iter : *this)
1038 {
1039 const binding_key *iter_key = iter.first;
1040 if (const concrete_binding *ckey
1041 = key->dyn_cast_concrete_binding ())
1042 {
1043 if (const concrete_binding *iter_ckey
1044 = iter_key->dyn_cast_concrete_binding ())
1045 {
1046 if (ckey->overlaps_p (*iter_ckey))
1047 out->safe_push (iter_key);
1048 }
1049 else
1050 {
1051 /* Assume overlap. */
1052 out->safe_push (iter_key);
1053 }
1054 }
1055 else
1056 {
1057 /* Assume overlap. */
1058 out->safe_push (iter_key);
1059 }
1060 }
1061 }
1062
1063 /* Remove, truncate, and/or split any bindings within this map that
1064 overlap DROP_KEY.
1065
1066 For example, if we have:
1067
1068 +------------------------------------+
1069 | old binding |
1070 +------------------------------------+
1071
1072 which is to be overwritten with:
1073
1074 .......+----------------------+.......
1075 .......| new binding |.......
1076 .......+----------------------+.......
1077
1078 this function "cuts a hole" out of the old binding:
1079
1080 +------+......................+------+
1081 |prefix| hole for new binding |suffix|
1082 +------+......................+------+
1083
1084 into which the new binding can be added without
1085 overlapping the prefix or suffix.
1086
1087 The prefix and suffix (if added) will be bound to the pertinent
1088 parts of the value of the old binding.
1089
1090 For example, given:
1091 struct s5
1092 {
1093 char arr[8];
1094 };
1095 void test_5 (struct s5 *p)
1096 {
1097 struct s5 f = *p;
1098 f.arr[3] = 42;
1099 }
1100 then after the "f = *p;" we have:
1101 cluster for: f: INIT_VAL((*INIT_VAL(p_33(D))))
1102 and at the "f.arr[3] = 42;" we remove the bindings overlapping
1103 "f.arr[3]", replacing it with a prefix (bytes 0-2) and suffix (bytes 4-7)
1104 giving:
1105 cluster for: f
1106 key: {bytes 0-2}
1107 value: {BITS_WITHIN(bytes 0-2, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
1108 key: {bytes 4-7}
1109 value: {BITS_WITHIN(bytes 4-7, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
1110 punching a hole into which the new value can be written at byte 3:
1111 cluster for: f
1112 key: {bytes 0-2}
1113 value: {BITS_WITHIN(bytes 0-2, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
1114 key: {byte 3}
1115 value: 'char' {(char)42}
1116 key: {bytes 4-7}
1117 value: {BITS_WITHIN(bytes 4-7, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
1118
1119 If UNCERTAINTY is non-NULL, use it to record any svalues that
1120 were removed, as being maybe-bound.
1121
1122 If MAYBE_LIVE_VALUES is non-NULL, then use it to record any svalues that
1123 were removed as being maybe-live.
1124
1125 If ALWAYS_OVERLAP, then assume that DROP_KEY can overlap anything
1126 in the map, due to one or both of the underlying clusters being
1127 symbolic (but not the same symbolic region). Hence even if DROP_KEY is a
1128 concrete binding it could actually be referring to the same memory as
1129 distinct concrete bindings in the map. Remove all bindings, but
1130 register any svalues with *UNCERTAINTY. */
1131
1132 void
1133 binding_map::remove_overlapping_bindings (store_manager *mgr,
1134 const binding_key *drop_key,
1135 uncertainty_t *uncertainty,
1136 svalue_set *maybe_live_values,
1137 bool always_overlap)
1138 {
1139 /* Get the bindings of interest within this map. */
1140 auto_vec<const binding_key *> bindings;
1141 if (always_overlap)
1142 for (auto iter : *this)
1143 bindings.safe_push (iter.first); /* Add all bindings. */
1144 else
1145 /* Just add overlapping bindings. */
1146 get_overlapping_bindings (drop_key, &bindings);
1147
1148 unsigned i;
1149 const binding_key *iter_binding;
1150 FOR_EACH_VEC_ELT (bindings, i, iter_binding)
1151 {
1152 /* Record any svalues that were removed to *UNCERTAINTY as being
1153 maybe-bound, provided at least some part of the binding is symbolic.
1154
1155 Specifically, if at least one of the bindings is symbolic, or we
1156 have ALWAYS_OVERLAP for the case where we have possibly aliasing
1157 regions, then we don't know that the svalue has been overwritten,
1158 and should record that to *UNCERTAINTY.
1159
1160 However, if we have concrete keys accessing within the same symbolic
1161 region, then we *know* that the symbolic region has been overwritten,
1162 so we don't record it to *UNCERTAINTY, as this could be a genuine
1163 leak. */
1164 const svalue *old_sval = get (iter_binding);
1165 if (uncertainty
1166 && (drop_key->symbolic_p ()
1167 || iter_binding->symbolic_p ()
1168 || always_overlap))
1169 uncertainty->on_maybe_bound_sval (old_sval);
1170
1171 /* Record any svalues that were removed to *MAYBE_LIVE_VALUES as being
1172 maybe-live. */
1173 if (maybe_live_values)
1174 maybe_live_values->add (old_sval);
1175
1176 /* Begin by removing the old binding. */
1177 m_map.remove (iter_binding);
1178
1179 /* Don't attempt to handle prefixes/suffixes for the
1180 "always_overlap" case; everything's being removed. */
1181 if (always_overlap)
1182 continue;
1183
1184 /* Now potentially add the prefix and suffix. */
1185 if (const concrete_binding *drop_ckey
1186 = drop_key->dyn_cast_concrete_binding ())
1187 if (const concrete_binding *iter_ckey
1188 = iter_binding->dyn_cast_concrete_binding ())
1189 {
1190 gcc_assert (drop_ckey->overlaps_p (*iter_ckey));
1191
1192 const bit_range &drop_bits = drop_ckey->get_bit_range ();
1193 const bit_range &iter_bits = iter_ckey->get_bit_range ();
1194
1195 if (iter_bits.get_start_bit_offset ()
1196 < drop_bits.get_start_bit_offset ())
1197 {
1198 /* We have a truncated prefix. */
1199 bit_range prefix_bits (iter_bits.get_start_bit_offset (),
1200 (drop_bits.get_start_bit_offset ()
1201 - iter_bits.get_start_bit_offset ()));
1202 const concrete_binding *prefix_key
1203 = mgr->get_concrete_binding (prefix_bits);
1204 bit_range rel_prefix (0, prefix_bits.m_size_in_bits);
1205 const svalue *prefix_sval
1206 = old_sval->extract_bit_range (NULL_TREE,
1207 rel_prefix,
1208 mgr->get_svalue_manager ());
1209 m_map.put (prefix_key, prefix_sval);
1210 }
1211
1212 if (iter_bits.get_next_bit_offset ()
1213 > drop_bits.get_next_bit_offset ())
1214 {
1215 /* We have a truncated suffix. */
1216 bit_range suffix_bits (drop_bits.get_next_bit_offset (),
1217 (iter_bits.get_next_bit_offset ()
1218 - drop_bits.get_next_bit_offset ()));
1219 const concrete_binding *suffix_key
1220 = mgr->get_concrete_binding (suffix_bits);
1221 bit_range rel_suffix (drop_bits.get_next_bit_offset ()
1222 - iter_bits.get_start_bit_offset (),
1223 suffix_bits.m_size_in_bits);
1224 const svalue *suffix_sval
1225 = old_sval->extract_bit_range (NULL_TREE,
1226 rel_suffix,
1227 mgr->get_svalue_manager ());
1228 m_map.put (suffix_key, suffix_sval);
1229 }
1230 }
1231 }
1232 }
1233
1234 /* class binding_cluster. */
1235
1236 binding_cluster::binding_cluster (const region *base_region)
1237 : m_base_region (base_region), m_map (),
1238 m_escaped (false), m_touched (false)
1239 {
1240 }
1241
1242 /* binding_cluster's copy ctor. */
1243
1244 binding_cluster::binding_cluster (const binding_cluster &other)
1245 : m_base_region (other.m_base_region), m_map (other.m_map),
1246 m_escaped (other.m_escaped), m_touched (other.m_touched)
1247 {
1248 }
1249
1250 /* binding_cluster's assignment operator. */
1251
1252 binding_cluster&
1253 binding_cluster::operator= (const binding_cluster &other)
1254 {
1255 gcc_assert (m_base_region == other.m_base_region);
1256 m_map = other.m_map;
1257 m_escaped = other.m_escaped;
1258 m_touched = other.m_touched;
1259 return *this;
1260 }
1261
1262 /* binding_cluster's equality operator. */
1263
1264 bool
1265 binding_cluster::operator== (const binding_cluster &other) const
1266 {
1267 if (m_map != other.m_map)
1268 return false;
1269
1270 if (m_base_region != other.m_base_region)
1271 return false;
1272
1273 if (m_escaped != other.m_escaped)
1274 return false;
1275
1276 if (m_touched != other.m_touched)
1277 return false;
1278
1279 gcc_checking_assert (hash () == other.hash ());
1280
1281 return true;
1282 }
1283
1284 /* Generate a hash value for this binding_cluster. */
1285
1286 hashval_t
1287 binding_cluster::hash () const
1288 {
1289 return m_map.hash ();
1290 }
1291
1292 /* Return true if this binding_cluster is symbolic
1293 i.e. its base region is symbolic. */
1294
1295 bool
1296 binding_cluster::symbolic_p () const
1297 {
1298 return m_base_region->get_kind () == RK_SYMBOLIC;
1299 }
1300
1301 /* Dump a representation of this binding_cluster to PP.
1302 SIMPLE controls how values and regions are to be printed.
1303 If MULTILINE, then split the dump over multiple lines and
1304 use whitespace for readability, otherwise put all on one line. */
1305
1306 void
1307 binding_cluster::dump_to_pp (pretty_printer *pp, bool simple,
1308 bool multiline) const
1309 {
1310 if (m_escaped)
1311 {
1312 if (multiline)
1313 {
1314 pp_string (pp, " ESCAPED");
1315 pp_newline (pp);
1316 }
1317 else
1318 pp_string (pp, "(ESCAPED)");
1319 }
1320 if (m_touched)
1321 {
1322 if (multiline)
1323 {
1324 pp_string (pp, " TOUCHED");
1325 pp_newline (pp);
1326 }
1327 else
1328 pp_string (pp, "(TOUCHED)");
1329 }
1330
1331 m_map.dump_to_pp (pp, simple, multiline);
1332 }
1333
1334 /* Dump a multiline representation of this binding_cluster to stderr. */
1335
1336 DEBUG_FUNCTION void
1337 binding_cluster::dump (bool simple) const
1338 {
1339 pretty_printer pp;
1340 pp_format_decoder (&pp) = default_tree_printer;
1341 pp_show_color (&pp) = pp_show_color (global_dc->printer);
1342 pp.buffer->stream = stderr;
1343 pp_string (&pp, " cluster for: ");
1344 m_base_region->dump_to_pp (&pp, simple);
1345 pp_string (&pp, ": ");
1346 pp_newline (&pp);
1347 dump_to_pp (&pp, simple, true);
1348 pp_newline (&pp);
1349 pp_flush (&pp);
1350 }
1351
1352 /* Assert that this object is valid. */
1353
1354 void
1355 binding_cluster::validate () const
1356 {
1357 int num_symbolic = 0;
1358 int num_concrete = 0;
1359 for (auto iter : m_map)
1360 {
1361 if (iter.first->symbolic_p ())
1362 num_symbolic++;
1363 else
1364 num_concrete++;
1365 }
1366 /* We shouldn't have more than one symbolic key per cluster
1367 (or one would have clobbered the other). */
1368 gcc_assert (num_symbolic < 2);
1369 /* We can't have both concrete and symbolic keys. */
1370 gcc_assert (num_concrete == 0 || num_symbolic == 0);
1371 }
1372
1373 /* Return a new json::object of the form
1374 {"escaped": true/false,
1375 "touched": true/false,
1376 "map" : object for the binding_map. */
1377
1378 json::object *
1379 binding_cluster::to_json () const
1380 {
1381 json::object *cluster_obj = new json::object ();
1382
1383 cluster_obj->set ("escaped", new json::literal (m_escaped));
1384 cluster_obj->set ("touched", new json::literal (m_touched));
1385 cluster_obj->set ("map", m_map.to_json ());
1386
1387 return cluster_obj;
1388 }
1389
1390 /* Add a binding of SVAL of kind KIND to REG, unpacking SVAL if it is a
1391 compound_sval. */
1392
1393 void
1394 binding_cluster::bind (store_manager *mgr,
1395 const region *reg, const svalue *sval)
1396 {
1397 if (const compound_svalue *compound_sval
1398 = sval->dyn_cast_compound_svalue ())
1399 {
1400 bind_compound_sval (mgr, reg, compound_sval);
1401 return;
1402 }
1403
1404 if (reg->empty_p ())
1405 return;
1406 const binding_key *binding = binding_key::make (mgr, reg);
1407 bind_key (binding, sval);
1408 }
1409
1410 /* Bind SVAL to KEY.
1411 Unpacking of compound_svalues should already have been done by the
1412 time this is called. */
1413
1414 void
1415 binding_cluster::bind_key (const binding_key *key, const svalue *sval)
1416 {
1417 gcc_assert (sval->get_kind () != SK_COMPOUND);
1418
1419 m_map.put (key, sval);
1420 if (key->symbolic_p ())
1421 m_touched = true;
1422 }
1423
1424 /* Subroutine of binding_cluster::bind.
1425 Unpack compound_svals when binding them, so that we bind them
1426 element-wise. */
1427
1428 void
1429 binding_cluster::bind_compound_sval (store_manager *mgr,
1430 const region *reg,
1431 const compound_svalue *compound_sval)
1432 {
1433 region_offset reg_offset
1434 = reg->get_offset (mgr->get_svalue_manager ());
1435 if (reg_offset.symbolic_p ())
1436 {
1437 m_touched = true;
1438 clobber_region (mgr, reg);
1439 return;
1440 }
1441
1442 for (map_t::iterator iter = compound_sval->begin ();
1443 iter != compound_sval->end (); ++iter)
1444 {
1445 const binding_key *iter_key = (*iter).first;
1446 const svalue *iter_sval = (*iter).second;
1447
1448 if (const concrete_binding *concrete_key
1449 = iter_key->dyn_cast_concrete_binding ())
1450 {
1451 bit_offset_t effective_start
1452 = (concrete_key->get_start_bit_offset ()
1453 + reg_offset.get_bit_offset ());
1454 const concrete_binding *effective_concrete_key
1455 = mgr->get_concrete_binding (effective_start,
1456 concrete_key->get_size_in_bits ());
1457 bind_key (effective_concrete_key, iter_sval);
1458 }
1459 else
1460 gcc_unreachable ();
1461 }
1462 }
1463
1464 /* Remove all bindings overlapping REG within this cluster. */
1465
1466 void
1467 binding_cluster::clobber_region (store_manager *mgr, const region *reg)
1468 {
1469 remove_overlapping_bindings (mgr, reg, NULL, NULL);
1470 }
1471
1472 /* Remove any bindings for REG within this cluster. */
1473
1474 void
1475 binding_cluster::purge_region (store_manager *mgr, const region *reg)
1476 {
1477 gcc_assert (reg->get_kind () == RK_DECL);
1478 if (reg->empty_p ())
1479 return;
1480 const binding_key *binding
1481 = binding_key::make (mgr, const_cast<region *> (reg));
1482 m_map.remove (binding);
1483 }
1484
1485 /* Clobber REG and fill it with repeated copies of SVAL. */
1486
1487 void
1488 binding_cluster::fill_region (store_manager *mgr,
1489 const region *reg,
1490 const svalue *sval)
1491 {
1492 clobber_region (mgr, reg);
1493
1494 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1495 const svalue *byte_size_sval = reg->get_byte_size_sval (sval_mgr);
1496 const svalue *fill_sval
1497 = sval_mgr->get_or_create_repeated_svalue (reg->get_type (),
1498 byte_size_sval, sval);
1499 bind (mgr, reg, fill_sval);
1500 }
1501
1502 /* Clobber REG within this cluster and fill it with zeroes. */
1503
1504 void
1505 binding_cluster::zero_fill_region (store_manager *mgr, const region *reg)
1506 {
1507 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1508 const svalue *zero_sval = sval_mgr->get_or_create_int_cst (char_type_node, 0);
1509 fill_region (mgr, reg, zero_sval);
1510 }
1511
1512 /* Mark REG_TO_BIND within this cluster as being unknown.
1513
1514 Remove any bindings overlapping REG_FOR_OVERLAP.
1515 If UNCERTAINTY is non-NULL, use it to record any svalues that
1516 had bindings to them removed, as being maybe-bound.
1517 If MAYBE_LIVE_VALUES is non-NULL, use it to record any svalues that
1518 had bindings to them removed, as being maybe-live.
1519
1520 REG_TO_BIND and REG_FOR_OVERLAP are the same for
1521 store::mark_region_as_unknown, but are different in
1522 store::set_value's alias handling, for handling the case where
1523 we have a write to a symbolic REG_FOR_OVERLAP. */
1524
1525 void
1526 binding_cluster::mark_region_as_unknown (store_manager *mgr,
1527 const region *reg_to_bind,
1528 const region *reg_for_overlap,
1529 uncertainty_t *uncertainty,
1530 svalue_set *maybe_live_values)
1531 {
1532 if (reg_to_bind->empty_p ())
1533 return;
1534
1535 remove_overlapping_bindings (mgr, reg_for_overlap, uncertainty,
1536 maybe_live_values);
1537
1538 /* Add a default binding to "unknown". */
1539 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1540 const svalue *sval
1541 = sval_mgr->get_or_create_unknown_svalue (reg_to_bind->get_type ());
1542 bind (mgr, reg_to_bind, sval);
1543 }
1544
1545 /* Purge state involving SVAL. */
1546
1547 void
1548 binding_cluster::purge_state_involving (const svalue *sval,
1549 region_model_manager *sval_mgr)
1550 {
1551 auto_vec<const binding_key *> to_remove;
1552 auto_vec<std::pair<const binding_key *, tree> > to_make_unknown;
1553 for (auto iter : m_map)
1554 {
1555 const binding_key *iter_key = iter.first;
1556 if (const symbolic_binding *symbolic_key
1557 = iter_key->dyn_cast_symbolic_binding ())
1558 {
1559 const region *reg = symbolic_key->get_region ();
1560 if (reg->involves_p (sval))
1561 to_remove.safe_push (iter_key);
1562 }
1563 const svalue *iter_sval = iter.second;
1564 if (iter_sval->involves_p (sval))
1565 to_make_unknown.safe_push (std::make_pair(iter_key,
1566 iter_sval->get_type ()));
1567 }
1568 for (auto iter : to_remove)
1569 {
1570 m_map.remove (iter);
1571 m_touched = true;
1572 }
1573 for (auto iter : to_make_unknown)
1574 {
1575 const svalue *new_sval
1576 = sval_mgr->get_or_create_unknown_svalue (iter.second);
1577 m_map.put (iter.first, new_sval);
1578 }
1579 }
1580
1581 /* Get any SVAL bound to REG within this cluster via kind KIND,
1582 without checking parent regions of REG. */
1583
1584 const svalue *
1585 binding_cluster::get_binding (store_manager *mgr,
1586 const region *reg) const
1587 {
1588 if (reg->empty_p ())
1589 return NULL;
1590 const binding_key *reg_binding = binding_key::make (mgr, reg);
1591 const svalue *sval = m_map.get (reg_binding);
1592 if (sval)
1593 {
1594 /* If we have a struct with a single field, then the binding of
1595 the field will equal that of the struct, and looking up e.g.
1596 PARENT_REG.field within:
1597 cluster for PARENT_REG: INIT_VAL(OTHER_REG)
1598 will erroneously return INIT_VAL(OTHER_REG), rather than
1599 SUB_VALUE(INIT_VAL(OTHER_REG), FIELD) == INIT_VAL(OTHER_REG.FIELD).
1600 Fix this issue by iterating upwards whilst the bindings are equal,
1601 expressing the lookups as subvalues.
1602 We have to gather a list of subregion accesses, then walk it
1603 in reverse to get the subvalues. */
1604 auto_vec<const region *> regions;
1605 while (const region *parent_reg = reg->get_parent_region ())
1606 {
1607 const binding_key *parent_reg_binding
1608 = binding_key::make (mgr, parent_reg);
1609 if (parent_reg_binding == reg_binding
1610 && sval->get_type ()
1611 && reg->get_type ()
1612 && sval->get_type () != reg->get_type ())
1613 {
1614 regions.safe_push (reg);
1615 reg = parent_reg;
1616 }
1617 else
1618 break;
1619 }
1620 if (sval->get_type ()
1621 && reg->get_type ()
1622 && sval->get_type () == reg->get_type ())
1623 {
1624 unsigned i;
1625 const region *iter_reg;
1626 FOR_EACH_VEC_ELT_REVERSE (regions, i, iter_reg)
1627 {
1628 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1629 sval = rmm_mgr->get_or_create_sub_svalue (iter_reg->get_type (),
1630 sval, iter_reg);
1631 }
1632 }
1633 }
1634 return sval;
1635 }
1636
1637 /* Get any SVAL bound to REG within this cluster,
1638 either directly for REG, or recursively checking for bindings within
1639 parent regions and extracting subvalues if need be. */
1640
1641 const svalue *
1642 binding_cluster::get_binding_recursive (store_manager *mgr,
1643 const region *reg) const
1644 {
1645 if (const svalue *sval = get_binding (mgr, reg))
1646 return sval;
1647 if (reg != m_base_region)
1648 if (const region *parent_reg = reg->get_parent_region ())
1649 if (const svalue *parent_sval
1650 = get_binding_recursive (mgr, parent_reg))
1651 {
1652 /* Extract child svalue from parent svalue. */
1653 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1654 return rmm_mgr->get_or_create_sub_svalue (reg->get_type (),
1655 parent_sval, reg);
1656 }
1657 return NULL;
1658 }
1659
1660 /* Get any value bound for REG within this cluster. */
1661
1662 const svalue *
1663 binding_cluster::get_any_binding (store_manager *mgr,
1664 const region *reg) const
1665 {
1666 /* Look for a direct binding. */
1667 if (const svalue *direct_sval
1668 = get_binding_recursive (mgr, reg))
1669 return direct_sval;
1670
1671 /* If we had a write to a cluster of unknown size, we might
1672 have a self-binding of the whole base region with an svalue,
1673 where the base region is symbolic.
1674 Handle such cases by returning sub_svalue instances. */
1675 if (const svalue *cluster_sval = maybe_get_simple_value (mgr))
1676 {
1677 /* Extract child svalue from parent svalue. */
1678 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1679 return rmm_mgr->get_or_create_sub_svalue (reg->get_type (),
1680 cluster_sval, reg);
1681 }
1682
1683 /* If this cluster has been touched by a symbolic write, then the content
1684 of any subregion not currently specifically bound is "UNKNOWN". */
1685 if (m_touched)
1686 {
1687 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1688 return rmm_mgr->get_or_create_unknown_svalue (reg->get_type ());
1689 }
1690
1691 /* Alternatively, if this is a symbolic read and the cluster has any bindings,
1692 then we don't know if we're reading those values or not, so the result
1693 is also "UNKNOWN". */
1694 if (reg->get_offset (mgr->get_svalue_manager ()).symbolic_p ()
1695 && m_map.elements () > 0)
1696 {
1697 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1698 return rmm_mgr->get_or_create_unknown_svalue (reg->get_type ());
1699 }
1700
1701 if (const svalue *compound_sval = maybe_get_compound_binding (mgr, reg))
1702 return compound_sval;
1703
1704 /* Otherwise, the initial value, or uninitialized. */
1705 return NULL;
1706 }
1707
1708 /* Attempt to get a compound_svalue for the bindings within the cluster
1709 affecting REG (which could be the base region itself).
1710
1711 Create a compound_svalue with the subset of bindings the affect REG,
1712 offsetting them so that the offsets are relative to the start of REG
1713 within the cluster.
1714
1715 For example, REG could be one element within an array of structs.
1716
1717 Return the resulting compound_svalue, or NULL if there's a problem. */
1718
1719 const svalue *
1720 binding_cluster::maybe_get_compound_binding (store_manager *mgr,
1721 const region *reg) const
1722 {
1723 region_offset cluster_offset
1724 = m_base_region->get_offset (mgr->get_svalue_manager ());
1725 if (cluster_offset.symbolic_p ())
1726 return NULL;
1727 region_offset reg_offset = reg->get_offset (mgr->get_svalue_manager ());
1728 if (reg_offset.symbolic_p ())
1729 return NULL;
1730
1731 if (reg->empty_p ())
1732 return NULL;
1733
1734 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1735
1736 /* We will a build the result map in two parts:
1737 (a) result_map, holding the concrete keys from this cluster,
1738
1739 (b) default_map, holding the initial values for the region
1740 (e.g. uninitialized, initializer values, or zero), unless this
1741 cluster has been touched.
1742
1743 We will populate (a), and as we do, clobber (b), trimming and
1744 splitting its bindings as necessary.
1745 Finally, we will merge (b) into (a), giving a concrete map
1746 that merges both the initial values and the bound values from
1747 the binding_cluster.
1748 Doing it this way reduces N for the O(N^2) intersection-finding,
1749 perhaps we should have a spatial-organized data structure for
1750 concrete keys, though. */
1751
1752 binding_map result_map;
1753 binding_map default_map;
1754
1755 /* Set up default values in default_map. */
1756 const svalue *default_sval;
1757 if (m_touched)
1758 default_sval = sval_mgr->get_or_create_unknown_svalue (reg->get_type ());
1759 else
1760 default_sval = sval_mgr->get_or_create_initial_value (reg);
1761 const binding_key *default_key = binding_key::make (mgr, reg);
1762
1763 /* Express the bit-range of the default key for REG relative to REG,
1764 rather than to the base region. */
1765 const concrete_binding *concrete_default_key
1766 = default_key->dyn_cast_concrete_binding ();
1767 if (!concrete_default_key)
1768 return nullptr;
1769 const concrete_binding *default_key_relative_to_reg
1770 = mgr->get_concrete_binding (0, concrete_default_key->get_size_in_bits ());
1771 default_map.put (default_key_relative_to_reg, default_sval);
1772
1773 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
1774 {
1775 const binding_key *key = (*iter).first;
1776 const svalue *sval = (*iter).second;
1777
1778 if (const concrete_binding *concrete_key
1779 = key->dyn_cast_concrete_binding ())
1780 {
1781 const bit_range &bound_range = concrete_key->get_bit_range ();
1782
1783 bit_size_t reg_bit_size;
1784 if (!reg->get_bit_size (&reg_bit_size))
1785 return NULL;
1786
1787 bit_range reg_range (reg_offset.get_bit_offset (),
1788 reg_bit_size);
1789
1790 /* Skip bindings that are outside the bit range of REG. */
1791 if (!bound_range.intersects_p (reg_range))
1792 continue;
1793
1794 /* We shouldn't have an exact match; that should have been
1795 handled already. */
1796 gcc_assert (!(reg_range == bound_range));
1797
1798 bit_range subrange (0, 0);
1799 if (reg_range.contains_p (bound_range, &subrange))
1800 {
1801 /* We have a bound range fully within REG.
1802 Add it to map, offsetting accordingly. */
1803
1804 /* Get offset of KEY relative to REG, rather than to
1805 the cluster. */
1806 const concrete_binding *offset_concrete_key
1807 = mgr->get_concrete_binding (subrange);
1808 result_map.put (offset_concrete_key, sval);
1809
1810 /* Clobber default_map, removing/trimming/spliting where
1811 it overlaps with offset_concrete_key. */
1812 default_map.remove_overlapping_bindings (mgr,
1813 offset_concrete_key,
1814 NULL, NULL, false);
1815 }
1816 else if (bound_range.contains_p (reg_range, &subrange))
1817 {
1818 /* REG is fully within the bound range, but
1819 is not equal to it; we're extracting a subvalue. */
1820 return sval->extract_bit_range (reg->get_type (),
1821 subrange,
1822 mgr->get_svalue_manager ());
1823 }
1824 else
1825 {
1826 /* REG and the bound range partially overlap. */
1827 bit_range reg_subrange (0, 0);
1828 bit_range bound_subrange (0, 0);
1829 reg_range.intersects_p (bound_range,
1830 &reg_subrange, &bound_subrange);
1831
1832 /* Get the bits from the bound value for the bits at the
1833 intersection (relative to the bound value). */
1834 const svalue *overlap_sval
1835 = sval->extract_bit_range (NULL_TREE,
1836 bound_subrange,
1837 mgr->get_svalue_manager ());
1838
1839 /* Get key for overlap, relative to the REG. */
1840 const concrete_binding *overlap_concrete_key
1841 = mgr->get_concrete_binding (reg_subrange);
1842 result_map.put (overlap_concrete_key, overlap_sval);
1843
1844 /* Clobber default_map, removing/trimming/spliting where
1845 it overlaps with overlap_concrete_key. */
1846 default_map.remove_overlapping_bindings (mgr,
1847 overlap_concrete_key,
1848 NULL, NULL, false);
1849 }
1850 }
1851 else
1852 /* Can't handle symbolic bindings. */
1853 return NULL;
1854 }
1855
1856 if (result_map.elements () == 0)
1857 return NULL;
1858
1859 /* Merge any bindings from default_map into result_map. */
1860 for (auto iter : default_map)
1861 {
1862 const binding_key *key = iter.first;
1863 const svalue *sval = iter.second;
1864 result_map.put (key, sval);
1865 }
1866
1867 return sval_mgr->get_or_create_compound_svalue (reg->get_type (), result_map);
1868 }
1869
1870 /* Remove, truncate, and/or split any bindings within this map that
1871 could overlap REG.
1872
1873 If REG's base region or this cluster is symbolic and they're different
1874 base regions, then remove everything in this cluster's map, on the
1875 grounds that REG could be referring to the same memory as anything
1876 in the map.
1877
1878 If UNCERTAINTY is non-NULL, use it to record any svalues that
1879 were removed, as being maybe-bound.
1880
1881 If MAYBE_LIVE_VALUES is non-NULL, use it to record any svalues that
1882 were removed, as being maybe-live. */
1883
1884 void
1885 binding_cluster::remove_overlapping_bindings (store_manager *mgr,
1886 const region *reg,
1887 uncertainty_t *uncertainty,
1888 svalue_set *maybe_live_values)
1889 {
1890 if (reg->empty_p ())
1891 return;
1892 const binding_key *reg_binding = binding_key::make (mgr, reg);
1893
1894 const region *cluster_base_reg = get_base_region ();
1895 const region *other_base_reg = reg->get_base_region ();
1896 /* If at least one of the base regions involved is symbolic, and they're
1897 not the same base region, then consider everything in the map as
1898 potentially overlapping with reg_binding (even if it's a concrete
1899 binding and things in the map are concrete - they could be referring
1900 to the same memory when the symbolic base regions are taken into
1901 account). */
1902 bool always_overlap = (cluster_base_reg != other_base_reg
1903 && (cluster_base_reg->get_kind () == RK_SYMBOLIC
1904 || other_base_reg->get_kind () == RK_SYMBOLIC));
1905 m_map.remove_overlapping_bindings (mgr, reg_binding, uncertainty,
1906 maybe_live_values,
1907 always_overlap);
1908 }
1909
1910 /* Attempt to merge CLUSTER_A and CLUSTER_B into OUT_CLUSTER, using
1911 MGR and MERGER.
1912 Return true if they can be merged, false otherwise. */
1913
1914 bool
1915 binding_cluster::can_merge_p (const binding_cluster *cluster_a,
1916 const binding_cluster *cluster_b,
1917 binding_cluster *out_cluster,
1918 store *out_store,
1919 store_manager *mgr,
1920 model_merger *merger)
1921 {
1922 gcc_assert (out_cluster);
1923
1924 /* Merge flags ("ESCAPED" and "TOUCHED") by setting the merged flag to
1925 true if either of the inputs is true. */
1926 if ((cluster_a && cluster_a->m_escaped)
1927 || (cluster_b && cluster_b->m_escaped))
1928 out_cluster->m_escaped = true;
1929 if ((cluster_a && cluster_a->m_touched)
1930 || (cluster_b && cluster_b->m_touched))
1931 out_cluster->m_touched = true;
1932
1933 /* At least one of CLUSTER_A and CLUSTER_B are non-NULL, but either
1934 could be NULL. Handle these cases. */
1935 if (cluster_a == NULL)
1936 {
1937 gcc_assert (cluster_b != NULL);
1938 gcc_assert (cluster_b->m_base_region == out_cluster->m_base_region);
1939 out_cluster->make_unknown_relative_to (cluster_b, out_store, mgr);
1940 return true;
1941 }
1942 if (cluster_b == NULL)
1943 {
1944 gcc_assert (cluster_a != NULL);
1945 gcc_assert (cluster_a->m_base_region == out_cluster->m_base_region);
1946 out_cluster->make_unknown_relative_to (cluster_a, out_store, mgr);
1947 return true;
1948 }
1949
1950 /* The "both inputs are non-NULL" case. */
1951 gcc_assert (cluster_a != NULL && cluster_b != NULL);
1952 gcc_assert (cluster_a->m_base_region == out_cluster->m_base_region);
1953 gcc_assert (cluster_b->m_base_region == out_cluster->m_base_region);
1954
1955 hash_set<const binding_key *> keys;
1956 for (map_t::iterator iter_a = cluster_a->m_map.begin ();
1957 iter_a != cluster_a->m_map.end (); ++iter_a)
1958 {
1959 const binding_key *key_a = (*iter_a).first;
1960 keys.add (key_a);
1961 }
1962 for (map_t::iterator iter_b = cluster_b->m_map.begin ();
1963 iter_b != cluster_b->m_map.end (); ++iter_b)
1964 {
1965 const binding_key *key_b = (*iter_b).first;
1966 keys.add (key_b);
1967 }
1968 int num_symbolic_keys = 0;
1969 int num_concrete_keys = 0;
1970 for (hash_set<const binding_key *>::iterator iter = keys.begin ();
1971 iter != keys.end (); ++iter)
1972 {
1973 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1974 const binding_key *key = *iter;
1975 const svalue *sval_a = cluster_a->get_any_value (key);
1976 const svalue *sval_b = cluster_b->get_any_value (key);
1977
1978 if (key->symbolic_p ())
1979 num_symbolic_keys++;
1980 else
1981 num_concrete_keys++;
1982
1983 if (sval_a == sval_b)
1984 {
1985 gcc_assert (sval_a);
1986 out_cluster->m_map.put (key, sval_a);
1987 continue;
1988 }
1989 else if (sval_a && sval_b)
1990 {
1991 if (const svalue *merged_sval
1992 = sval_a->can_merge_p (sval_b, sval_mgr, merger))
1993 {
1994 out_cluster->m_map.put (key, merged_sval);
1995 continue;
1996 }
1997 /* Merger of the svalues failed. Reject merger of the cluster. */
1998 return false;
1999 }
2000
2001 /* If we get here, then one cluster binds this key and the other
2002 doesn't; merge them as "UNKNOWN". */
2003 gcc_assert (sval_a || sval_b);
2004
2005 const svalue *bound_sval = sval_a ? sval_a : sval_b;
2006 tree type = bound_sval->get_type ();
2007 const svalue *unknown_sval
2008 = mgr->get_svalue_manager ()->get_or_create_unknown_svalue (type);
2009
2010 /* ...but reject the merger if this sval shouldn't be mergeable
2011 (e.g. reject merging svalues that have non-purgable sm-state,
2012 to avoid falsely reporting memory leaks by merging them
2013 with something else). */
2014 if (!bound_sval->can_merge_p (unknown_sval, sval_mgr, merger))
2015 return false;
2016
2017 out_cluster->m_map.put (key, unknown_sval);
2018 }
2019
2020 /* We can only have at most one symbolic key per cluster,
2021 and if we do, we can't have any concrete keys.
2022 If this happens, mark the cluster as touched, with no keys. */
2023 if (num_symbolic_keys >= 2
2024 || (num_concrete_keys > 0 && num_symbolic_keys > 0))
2025 {
2026 out_cluster->m_touched = true;
2027 out_cluster->m_map.empty ();
2028 }
2029
2030 /* We don't handle other kinds of overlaps yet. */
2031
2032 return true;
2033 }
2034
2035 /* Update this cluster to reflect an attempt to merge OTHER where there
2036 is no other cluster to merge with, and so we're notionally merging the
2037 bound values in OTHER with the initial value of the relevant regions.
2038
2039 Any bound keys in OTHER should be bound to unknown in this. */
2040
2041 void
2042 binding_cluster::make_unknown_relative_to (const binding_cluster *other,
2043 store *out_store,
2044 store_manager *mgr)
2045 {
2046 for (map_t::iterator iter = other->m_map.begin ();
2047 iter != other->m_map.end (); ++iter)
2048 {
2049 const binding_key *iter_key = (*iter).first;
2050 const svalue *iter_sval = (*iter).second;
2051 const svalue *unknown_sval
2052 = mgr->get_svalue_manager ()->get_or_create_unknown_svalue
2053 (iter_sval->get_type ());
2054 m_map.put (iter_key, unknown_sval);
2055
2056 /* For any pointers in OTHER, the merger means that the
2057 concrete pointer becomes an unknown value, which could
2058 show up as a false report of a leak when considering what
2059 pointers are live before vs after.
2060 Avoid this by marking the base regions they point to as having
2061 escaped. */
2062 if (const region_svalue *region_sval
2063 = iter_sval->dyn_cast_region_svalue ())
2064 {
2065 const region *base_reg
2066 = region_sval->get_pointee ()->get_base_region ();
2067 if (base_reg->tracked_p ()
2068 && !base_reg->symbolic_for_unknown_ptr_p ())
2069 {
2070 binding_cluster *c = out_store->get_or_create_cluster (base_reg);
2071 c->mark_as_escaped ();
2072 }
2073 }
2074 }
2075 }
2076
2077 /* Mark this cluster as having escaped. */
2078
2079 void
2080 binding_cluster::mark_as_escaped ()
2081 {
2082 m_escaped = true;
2083 }
2084
2085 /* If this cluster has escaped (by this call, or by an earlier one, or
2086 by being an external param), then unbind all values and mark it
2087 as "touched", so that it has a conjured value, rather than an
2088 initial_svalue.
2089 Use P to purge state involving conjured_svalues. */
2090
2091 void
2092 binding_cluster::on_unknown_fncall (const gcall *call,
2093 store_manager *mgr,
2094 const conjured_purge &p)
2095 {
2096 if (m_escaped)
2097 {
2098 m_map.empty ();
2099
2100 if (!m_base_region->empty_p ())
2101 {
2102 /* Bind it to a new "conjured" value using CALL. */
2103 const svalue *sval
2104 = mgr->get_svalue_manager ()->get_or_create_conjured_svalue
2105 (m_base_region->get_type (), call, m_base_region, p);
2106 bind (mgr, m_base_region, sval);
2107 }
2108
2109 m_touched = true;
2110 }
2111 }
2112
2113 /* Mark this cluster as having been clobbered by STMT.
2114 Use P to purge state involving conjured_svalues. */
2115
2116 void
2117 binding_cluster::on_asm (const gasm *stmt,
2118 store_manager *mgr,
2119 const conjured_purge &p)
2120 {
2121 m_map.empty ();
2122
2123 /* Bind it to a new "conjured" value using CALL. */
2124 const svalue *sval
2125 = mgr->get_svalue_manager ()->get_or_create_conjured_svalue
2126 (m_base_region->get_type (), stmt, m_base_region, p);
2127 bind (mgr, m_base_region, sval);
2128
2129 m_touched = true;
2130 }
2131
2132 /* Return true if this cluster has escaped. */
2133
2134 bool
2135 binding_cluster::escaped_p () const
2136 {
2137 /* Consider the "errno" region to always have escaped. */
2138 if (m_base_region->get_kind () == RK_ERRNO)
2139 return true;
2140 return m_escaped;
2141 }
2142
2143 /* Return true if this binding_cluster has no information
2144 i.e. if there are no bindings, and it hasn't been marked as having
2145 escaped, or touched symbolically. */
2146
2147 bool
2148 binding_cluster::redundant_p () const
2149 {
2150 return (m_map.elements () == 0
2151 && !m_escaped
2152 && !m_touched);
2153 }
2154
2155 /* Add PV to OUT_PVS, casting it to TYPE if it is not already of that type. */
2156
2157 static void
2158 append_pathvar_with_type (path_var pv,
2159 tree type,
2160 auto_vec<path_var> *out_pvs)
2161 {
2162 gcc_assert (pv.m_tree);
2163
2164 if (TREE_TYPE (pv.m_tree) != type)
2165 pv.m_tree = build1 (NOP_EXPR, type, pv.m_tree);
2166
2167 out_pvs->safe_push (pv);
2168 }
2169
2170 /* Find representative path_vars for SVAL within this binding of BASE_REG,
2171 appending the results to OUT_PVS. */
2172
2173 void
2174 binding_cluster::get_representative_path_vars (const region_model *model,
2175 svalue_set *visited,
2176 const region *base_reg,
2177 const svalue *sval,
2178 auto_vec<path_var> *out_pvs)
2179 const
2180 {
2181 sval = simplify_for_binding (sval);
2182
2183 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
2184 {
2185 const binding_key *key = (*iter).first;
2186 const svalue *bound_sval = (*iter).second;
2187 if (bound_sval == sval)
2188 {
2189 if (const concrete_binding *ckey
2190 = key->dyn_cast_concrete_binding ())
2191 {
2192 auto_vec <const region *> subregions;
2193 base_reg->get_subregions_for_binding
2194 (model->get_manager (),
2195 ckey->get_start_bit_offset (),
2196 ckey->get_size_in_bits (),
2197 sval->get_type (),
2198 &subregions);
2199 unsigned i;
2200 const region *subregion;
2201 FOR_EACH_VEC_ELT (subregions, i, subregion)
2202 {
2203 if (path_var pv
2204 = model->get_representative_path_var (subregion,
2205 visited))
2206 append_pathvar_with_type (pv, sval->get_type (), out_pvs);
2207 }
2208 }
2209 else
2210 {
2211 const symbolic_binding *skey = (const symbolic_binding *)key;
2212 if (path_var pv
2213 = model->get_representative_path_var (skey->get_region (),
2214 visited))
2215 append_pathvar_with_type (pv, sval->get_type (), out_pvs);
2216 }
2217 }
2218 }
2219 }
2220
2221 /* Get any svalue bound to KEY, or NULL. */
2222
2223 const svalue *
2224 binding_cluster::get_any_value (const binding_key *key) const
2225 {
2226 return m_map.get (key);
2227 }
2228
2229 /* If this cluster has a single direct binding for the whole of the region,
2230 return it.
2231 For use in simplifying dumps. */
2232
2233 const svalue *
2234 binding_cluster::maybe_get_simple_value (store_manager *mgr) const
2235 {
2236 /* Fail gracefully if MGR is NULL to make it easier to dump store
2237 instances in the debugger. */
2238 if (mgr == NULL)
2239 return NULL;
2240
2241 if (m_map.elements () != 1)
2242 return NULL;
2243
2244 if (m_base_region->empty_p ())
2245 return NULL;
2246
2247 const binding_key *key = binding_key::make (mgr, m_base_region);
2248 return get_any_value (key);
2249 }
2250
2251 /* class store_manager. */
2252
2253 logger *
2254 store_manager::get_logger () const
2255 {
2256 return m_mgr->get_logger ();
2257 }
2258
2259 /* binding consolidation. */
2260
2261 const concrete_binding *
2262 store_manager::get_concrete_binding (bit_offset_t start_bit_offset,
2263 bit_offset_t size_in_bits)
2264 {
2265 concrete_binding b (start_bit_offset, size_in_bits);
2266 if (concrete_binding *existing = m_concrete_binding_key_mgr.get (b))
2267 return existing;
2268
2269 concrete_binding *to_save = new concrete_binding (b);
2270 m_concrete_binding_key_mgr.put (b, to_save);
2271 return to_save;
2272 }
2273
2274 const symbolic_binding *
2275 store_manager::get_symbolic_binding (const region *reg)
2276 {
2277 symbolic_binding b (reg);
2278 if (symbolic_binding *existing = m_symbolic_binding_key_mgr.get (b))
2279 return existing;
2280
2281 symbolic_binding *to_save = new symbolic_binding (b);
2282 m_symbolic_binding_key_mgr.put (b, to_save);
2283 return to_save;
2284 }
2285
2286 /* class store. */
2287
2288 /* store's default ctor. */
2289
2290 store::store ()
2291 : m_called_unknown_fn (false)
2292 {
2293 }
2294
2295 /* store's copy ctor. */
2296
2297 store::store (const store &other)
2298 : m_cluster_map (other.m_cluster_map.elements ()),
2299 m_called_unknown_fn (other.m_called_unknown_fn)
2300 {
2301 for (cluster_map_t::iterator iter = other.m_cluster_map.begin ();
2302 iter != other.m_cluster_map.end ();
2303 ++iter)
2304 {
2305 const region *reg = (*iter).first;
2306 gcc_assert (reg);
2307 binding_cluster *c = (*iter).second;
2308 gcc_assert (c);
2309 m_cluster_map.put (reg, new binding_cluster (*c));
2310 }
2311 }
2312
2313 /* store's dtor. */
2314
2315 store::~store ()
2316 {
2317 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2318 iter != m_cluster_map.end ();
2319 ++iter)
2320 delete (*iter).second;
2321 }
2322
2323 /* store's assignment operator. */
2324
2325 store &
2326 store::operator= (const store &other)
2327 {
2328 /* Delete existing cluster map. */
2329 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2330 iter != m_cluster_map.end ();
2331 ++iter)
2332 delete (*iter).second;
2333 m_cluster_map.empty ();
2334
2335 m_called_unknown_fn = other.m_called_unknown_fn;
2336
2337 for (cluster_map_t::iterator iter = other.m_cluster_map.begin ();
2338 iter != other.m_cluster_map.end ();
2339 ++iter)
2340 {
2341 const region *reg = (*iter).first;
2342 gcc_assert (reg);
2343 binding_cluster *c = (*iter).second;
2344 gcc_assert (c);
2345 m_cluster_map.put (reg, new binding_cluster (*c));
2346 }
2347 return *this;
2348 }
2349
2350 /* store's equality operator. */
2351
2352 bool
2353 store::operator== (const store &other) const
2354 {
2355 if (m_called_unknown_fn != other.m_called_unknown_fn)
2356 return false;
2357
2358 if (m_cluster_map.elements () != other.m_cluster_map.elements ())
2359 return false;
2360
2361 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2362 iter != m_cluster_map.end ();
2363 ++iter)
2364 {
2365 const region *reg = (*iter).first;
2366 binding_cluster *c = (*iter).second;
2367 binding_cluster **other_slot
2368 = const_cast <cluster_map_t &> (other.m_cluster_map).get (reg);
2369 if (other_slot == NULL)
2370 return false;
2371 if (*c != **other_slot)
2372 return false;
2373 }
2374
2375 gcc_checking_assert (hash () == other.hash ());
2376
2377 return true;
2378 }
2379
2380 /* Get a hash value for this store. */
2381
2382 hashval_t
2383 store::hash () const
2384 {
2385 hashval_t result = 0;
2386 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2387 iter != m_cluster_map.end ();
2388 ++iter)
2389 result ^= (*iter).second->hash ();
2390 return result;
2391 }
2392
2393 /* Populate OUT with a sorted list of parent regions for the regions in IN,
2394 removing duplicate parents. */
2395
2396 static void
2397 get_sorted_parent_regions (auto_vec<const region *> *out,
2398 auto_vec<const region *> &in)
2399 {
2400 /* Get the set of parent regions. */
2401 hash_set<const region *> parent_regions;
2402 const region *iter_reg;
2403 unsigned i;
2404 FOR_EACH_VEC_ELT (in, i, iter_reg)
2405 {
2406 const region *parent_reg = iter_reg->get_parent_region ();
2407 gcc_assert (parent_reg);
2408 parent_regions.add (parent_reg);
2409 }
2410
2411 /* Write to OUT. */
2412 for (hash_set<const region *>::iterator iter = parent_regions.begin();
2413 iter != parent_regions.end(); ++iter)
2414 out->safe_push (*iter);
2415
2416 /* Sort OUT. */
2417 out->qsort (region::cmp_ptr_ptr);
2418 }
2419
2420 /* Dump a representation of this store to PP, using SIMPLE to control how
2421 svalues and regions are printed.
2422 MGR is used for simplifying dumps if non-NULL, but can also be NULL
2423 (to make it easier to use from the debugger). */
2424
2425 void
2426 store::dump_to_pp (pretty_printer *pp, bool simple, bool multiline,
2427 store_manager *mgr) const
2428 {
2429 /* Sort into some deterministic order. */
2430 auto_vec<const region *> base_regions;
2431 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2432 iter != m_cluster_map.end (); ++iter)
2433 {
2434 const region *base_reg = (*iter).first;
2435 base_regions.safe_push (base_reg);
2436 }
2437 base_regions.qsort (region::cmp_ptr_ptr);
2438
2439 /* Gather clusters, organize by parent region, so that we can group
2440 together locals, globals, etc. */
2441 auto_vec<const region *> parent_regions;
2442 get_sorted_parent_regions (&parent_regions, base_regions);
2443
2444 const region *parent_reg;
2445 unsigned i;
2446 FOR_EACH_VEC_ELT (parent_regions, i, parent_reg)
2447 {
2448 gcc_assert (parent_reg);
2449 pp_string (pp, "clusters within ");
2450 parent_reg->dump_to_pp (pp, simple);
2451 if (multiline)
2452 pp_newline (pp);
2453 else
2454 pp_string (pp, " {");
2455
2456 const region *base_reg;
2457 unsigned j;
2458 FOR_EACH_VEC_ELT (base_regions, j, base_reg)
2459 {
2460 /* This is O(N * M), but N ought to be small. */
2461 if (base_reg->get_parent_region () != parent_reg)
2462 continue;
2463 binding_cluster *cluster
2464 = *const_cast<cluster_map_t &> (m_cluster_map).get (base_reg);
2465 if (!multiline)
2466 {
2467 if (j > 0)
2468 pp_string (pp, ", ");
2469 }
2470 if (const svalue *sval = cluster->maybe_get_simple_value (mgr))
2471 {
2472 /* Special-case to simplify dumps for the common case where
2473 we just have one value directly bound to the whole of a
2474 region. */
2475 if (multiline)
2476 {
2477 pp_string (pp, " cluster for: ");
2478 base_reg->dump_to_pp (pp, simple);
2479 pp_string (pp, ": ");
2480 sval->dump_to_pp (pp, simple);
2481 if (cluster->escaped_p ())
2482 pp_string (pp, " (ESCAPED)");
2483 if (cluster->touched_p ())
2484 pp_string (pp, " (TOUCHED)");
2485 pp_newline (pp);
2486 }
2487 else
2488 {
2489 pp_string (pp, "region: {");
2490 base_reg->dump_to_pp (pp, simple);
2491 pp_string (pp, ", value: ");
2492 sval->dump_to_pp (pp, simple);
2493 if (cluster->escaped_p ())
2494 pp_string (pp, " (ESCAPED)");
2495 if (cluster->touched_p ())
2496 pp_string (pp, " (TOUCHED)");
2497 pp_string (pp, "}");
2498 }
2499 }
2500 else if (multiline)
2501 {
2502 pp_string (pp, " cluster for: ");
2503 base_reg->dump_to_pp (pp, simple);
2504 pp_newline (pp);
2505 cluster->dump_to_pp (pp, simple, multiline);
2506 }
2507 else
2508 {
2509 pp_string (pp, "base region: {");
2510 base_reg->dump_to_pp (pp, simple);
2511 pp_string (pp, "} has cluster: {");
2512 cluster->dump_to_pp (pp, simple, multiline);
2513 pp_string (pp, "}");
2514 }
2515 }
2516 if (!multiline)
2517 pp_string (pp, "}");
2518 }
2519 pp_printf (pp, "m_called_unknown_fn: %s",
2520 m_called_unknown_fn ? "TRUE" : "FALSE");
2521 if (multiline)
2522 pp_newline (pp);
2523 }
2524
2525 /* Dump a multiline representation of this store to stderr. */
2526
2527 DEBUG_FUNCTION void
2528 store::dump (bool simple) const
2529 {
2530 pretty_printer pp;
2531 pp_format_decoder (&pp) = default_tree_printer;
2532 pp_show_color (&pp) = pp_show_color (global_dc->printer);
2533 pp.buffer->stream = stderr;
2534 dump_to_pp (&pp, simple, true, NULL);
2535 pp_newline (&pp);
2536 pp_flush (&pp);
2537 }
2538
2539 /* Assert that this object is valid. */
2540
2541 void
2542 store::validate () const
2543 {
2544 for (auto iter : m_cluster_map)
2545 iter.second->validate ();
2546 }
2547
2548 /* Return a new json::object of the form
2549 {PARENT_REGION_DESC: {BASE_REGION_DESC: object for binding_map,
2550 ... for each cluster within parent region},
2551 ...for each parent region,
2552 "called_unknown_fn": true/false}. */
2553
2554 json::object *
2555 store::to_json () const
2556 {
2557 json::object *store_obj = new json::object ();
2558
2559 /* Sort into some deterministic order. */
2560 auto_vec<const region *> base_regions;
2561 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2562 iter != m_cluster_map.end (); ++iter)
2563 {
2564 const region *base_reg = (*iter).first;
2565 base_regions.safe_push (base_reg);
2566 }
2567 base_regions.qsort (region::cmp_ptr_ptr);
2568
2569 /* Gather clusters, organize by parent region, so that we can group
2570 together locals, globals, etc. */
2571 auto_vec<const region *> parent_regions;
2572 get_sorted_parent_regions (&parent_regions, base_regions);
2573
2574 const region *parent_reg;
2575 unsigned i;
2576 FOR_EACH_VEC_ELT (parent_regions, i, parent_reg)
2577 {
2578 gcc_assert (parent_reg);
2579
2580 json::object *clusters_in_parent_reg_obj = new json::object ();
2581
2582 const region *base_reg;
2583 unsigned j;
2584 FOR_EACH_VEC_ELT (base_regions, j, base_reg)
2585 {
2586 /* This is O(N * M), but N ought to be small. */
2587 if (base_reg->get_parent_region () != parent_reg)
2588 continue;
2589 binding_cluster *cluster
2590 = *const_cast<cluster_map_t &> (m_cluster_map).get (base_reg);
2591 label_text base_reg_desc = base_reg->get_desc ();
2592 clusters_in_parent_reg_obj->set (base_reg_desc.get (),
2593 cluster->to_json ());
2594 }
2595 label_text parent_reg_desc = parent_reg->get_desc ();
2596 store_obj->set (parent_reg_desc.get (), clusters_in_parent_reg_obj);
2597 }
2598
2599 store_obj->set ("called_unknown_fn", new json::literal (m_called_unknown_fn));
2600
2601 return store_obj;
2602 }
2603
2604 /* Get any svalue bound to REG, or NULL. */
2605
2606 const svalue *
2607 store::get_any_binding (store_manager *mgr, const region *reg) const
2608 {
2609 const region *base_reg = reg->get_base_region ();
2610 binding_cluster **cluster_slot
2611 = const_cast <cluster_map_t &> (m_cluster_map).get (base_reg);
2612 if (!cluster_slot)
2613 return NULL;
2614 return (*cluster_slot)->get_any_binding (mgr, reg);
2615 }
2616
2617 /* Set the value of LHS_REG to RHS_SVAL. */
2618
2619 void
2620 store::set_value (store_manager *mgr, const region *lhs_reg,
2621 const svalue *rhs_sval,
2622 uncertainty_t *uncertainty)
2623 {
2624 logger *logger = mgr->get_logger ();
2625 LOG_SCOPE (logger);
2626
2627 remove_overlapping_bindings (mgr, lhs_reg, uncertainty);
2628
2629 if (lhs_reg->get_type ())
2630 rhs_sval = simplify_for_binding (rhs_sval);
2631 /* ...but if we have no type for the region, retain any cast. */
2632
2633 const region *lhs_base_reg = lhs_reg->get_base_region ();
2634 binding_cluster *lhs_cluster;
2635 if (lhs_base_reg->symbolic_for_unknown_ptr_p ())
2636 {
2637 /* Reject attempting to bind values into a symbolic region
2638 for an unknown ptr; merely invalidate values below. */
2639 lhs_cluster = NULL;
2640
2641 /* The LHS of the write is *UNKNOWN. If the RHS is a pointer,
2642 then treat the region being pointed to as having escaped. */
2643 if (const region_svalue *ptr_sval = rhs_sval->dyn_cast_region_svalue ())
2644 {
2645 const region *ptr_dst = ptr_sval->get_pointee ();
2646 const region *ptr_base_reg = ptr_dst->get_base_region ();
2647 mark_as_escaped (ptr_base_reg);
2648 }
2649 if (uncertainty)
2650 uncertainty->on_maybe_bound_sval (rhs_sval);
2651 }
2652 else if (lhs_base_reg->tracked_p ())
2653 {
2654 lhs_cluster = get_or_create_cluster (lhs_base_reg);
2655 lhs_cluster->bind (mgr, lhs_reg, rhs_sval);
2656 }
2657 else
2658 {
2659 /* Reject attempting to bind values into an untracked region;
2660 merely invalidate values below. */
2661 lhs_cluster = NULL;
2662 }
2663
2664 /* Bindings to a cluster can affect other clusters if a symbolic
2665 base region is involved.
2666 Writes to concrete clusters can't affect other concrete clusters,
2667 but can affect symbolic clusters.
2668 Writes to symbolic clusters can affect both concrete and symbolic
2669 clusters.
2670 Invalidate our knowledge of other clusters that might have been
2671 affected by the write.
2672 Gather the set of all svalues that might still be live even if
2673 the store doesn't refer to them. */
2674 svalue_set maybe_live_values;
2675 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2676 iter != m_cluster_map.end (); ++iter)
2677 {
2678 const region *iter_base_reg = (*iter).first;
2679 binding_cluster *iter_cluster = (*iter).second;
2680 if (iter_base_reg != lhs_base_reg
2681 && (lhs_cluster == NULL
2682 || lhs_cluster->symbolic_p ()
2683 || iter_cluster->symbolic_p ()))
2684 {
2685 tristate t_alias = eval_alias (lhs_base_reg, iter_base_reg);
2686 switch (t_alias.get_value ())
2687 {
2688 default:
2689 gcc_unreachable ();
2690
2691 case tristate::TS_UNKNOWN:
2692 if (logger)
2693 {
2694 pretty_printer *pp = logger->get_printer ();
2695 logger->start_log_line ();
2696 logger->log_partial ("possible aliasing of ");
2697 iter_base_reg->dump_to_pp (pp, true);
2698 logger->log_partial (" when writing SVAL: ");
2699 rhs_sval->dump_to_pp (pp, true);
2700 logger->log_partial (" to LHS_REG: ");
2701 lhs_reg->dump_to_pp (pp, true);
2702 logger->end_log_line ();
2703 }
2704 /* Mark all of iter_cluster's iter_base_reg as unknown,
2705 using LHS_REG when considering overlaps, to handle
2706 symbolic vs concrete issues. */
2707 iter_cluster->mark_region_as_unknown
2708 (mgr,
2709 iter_base_reg, /* reg_to_bind */
2710 lhs_reg, /* reg_for_overlap */
2711 uncertainty,
2712 &maybe_live_values);
2713 break;
2714
2715 case tristate::TS_TRUE:
2716 gcc_unreachable ();
2717 break;
2718
2719 case tristate::TS_FALSE:
2720 /* If they can't be aliases, then don't invalidate this
2721 cluster. */
2722 break;
2723 }
2724 }
2725 }
2726 /* Given the set of svalues that might still be live, process them
2727 (e.g. marking regions as escaped).
2728 We do this after the iteration to avoid potentially changing
2729 m_cluster_map whilst iterating over it. */
2730 on_maybe_live_values (maybe_live_values);
2731 }
2732
2733 /* Determine if BASE_REG_A could be an alias of BASE_REG_B. */
2734
2735 tristate
2736 store::eval_alias (const region *base_reg_a,
2737 const region *base_reg_b) const
2738 {
2739 /* SSA names can't alias. */
2740 tree decl_a = base_reg_a->maybe_get_decl ();
2741 if (decl_a && TREE_CODE (decl_a) == SSA_NAME)
2742 return tristate::TS_FALSE;
2743 tree decl_b = base_reg_b->maybe_get_decl ();
2744 if (decl_b && TREE_CODE (decl_b) == SSA_NAME)
2745 return tristate::TS_FALSE;
2746
2747 /* Try both ways, for symmetry. */
2748 tristate ts_ab = eval_alias_1 (base_reg_a, base_reg_b);
2749 if (ts_ab.is_false ())
2750 return tristate::TS_FALSE;
2751 tristate ts_ba = eval_alias_1 (base_reg_b, base_reg_a);
2752 if (ts_ba.is_false ())
2753 return tristate::TS_FALSE;
2754 return tristate::TS_UNKNOWN;
2755 }
2756
2757 /* Half of store::eval_alias; called twice for symmetry. */
2758
2759 tristate
2760 store::eval_alias_1 (const region *base_reg_a,
2761 const region *base_reg_b) const
2762 {
2763 /* If they're in different memory spaces, they can't alias. */
2764 {
2765 enum memory_space memspace_a = base_reg_a->get_memory_space ();
2766 if (memspace_a != MEMSPACE_UNKNOWN)
2767 {
2768 enum memory_space memspace_b = base_reg_b->get_memory_space ();
2769 if (memspace_b != MEMSPACE_UNKNOWN
2770 && memspace_a != memspace_b)
2771 return tristate::TS_FALSE;
2772 }
2773 }
2774
2775 if (const symbolic_region *sym_reg_a
2776 = base_reg_a->dyn_cast_symbolic_region ())
2777 {
2778 const svalue *sval_a = sym_reg_a->get_pointer ();
2779 if (tree decl_b = base_reg_b->maybe_get_decl ())
2780 {
2781 if (!may_be_aliased (decl_b))
2782 return tristate::TS_FALSE;
2783 if (sval_a->get_kind () == SK_INITIAL)
2784 if (!is_global_var (decl_b))
2785 {
2786 /* The initial value of a pointer can't point to a local. */
2787 return tristate::TS_FALSE;
2788 }
2789 }
2790 if (sval_a->get_kind () == SK_INITIAL
2791 && base_reg_b->get_kind () == RK_HEAP_ALLOCATED)
2792 {
2793 /* The initial value of a pointer can't point to a
2794 region that was allocated on the heap after the beginning of the
2795 path. */
2796 return tristate::TS_FALSE;
2797 }
2798 if (const widening_svalue *widening_sval_a
2799 = sval_a->dyn_cast_widening_svalue ())
2800 {
2801 const svalue *base = widening_sval_a->get_base_svalue ();
2802 if (const region_svalue *region_sval
2803 = base->dyn_cast_region_svalue ())
2804 {
2805 const region *pointee = region_sval->get_pointee ();
2806 /* If we have sval_a is WIDENING(&REGION, OP), and
2807 B can't alias REGION, then B can't alias A either.
2808 For example, A might arise from
2809 for (ptr = &REGION; ...; ptr++)
2810 where sval_a is ptr in the 2nd iteration of the loop.
2811 We want to ensure that "*ptr" can only clobber things
2812 within REGION's base region. */
2813 tristate ts = eval_alias (pointee->get_base_region (),
2814 base_reg_b);
2815 if (ts.is_false ())
2816 return tristate::TS_FALSE;
2817 }
2818 }
2819 }
2820 return tristate::TS_UNKNOWN;
2821 }
2822
2823 /* Record all of the values in MAYBE_LIVE_VALUES as being possibly live. */
2824
2825 void
2826 store::on_maybe_live_values (const svalue_set &maybe_live_values)
2827 {
2828 for (auto sval : maybe_live_values)
2829 {
2830 if (const region_svalue *ptr_sval = sval->dyn_cast_region_svalue ())
2831 {
2832 const region *base_reg = ptr_sval->get_pointee ()->get_base_region ();
2833 mark_as_escaped (base_reg);
2834 }
2835 }
2836 }
2837
2838 /* Remove all bindings overlapping REG within this store. */
2839
2840 void
2841 store::clobber_region (store_manager *mgr, const region *reg)
2842 {
2843 const region *base_reg = reg->get_base_region ();
2844 binding_cluster **slot = m_cluster_map.get (base_reg);
2845 if (!slot)
2846 return;
2847 binding_cluster *cluster = *slot;
2848 cluster->clobber_region (mgr, reg);
2849 if (cluster->redundant_p ())
2850 {
2851 delete cluster;
2852 m_cluster_map.remove (base_reg);
2853 }
2854 }
2855
2856 /* Remove any bindings for REG within this store. */
2857
2858 void
2859 store::purge_region (store_manager *mgr, const region *reg)
2860 {
2861 const region *base_reg = reg->get_base_region ();
2862 binding_cluster **slot = m_cluster_map.get (base_reg);
2863 if (!slot)
2864 return;
2865 binding_cluster *cluster = *slot;
2866 cluster->purge_region (mgr, reg);
2867 if (cluster->redundant_p ())
2868 {
2869 delete cluster;
2870 m_cluster_map.remove (base_reg);
2871 }
2872 }
2873
2874 /* Fill REG with SVAL. */
2875
2876 void
2877 store::fill_region (store_manager *mgr, const region *reg, const svalue *sval)
2878 {
2879 /* Filling an empty region is a no-op. */
2880 if (reg->empty_p ())
2881 return;
2882
2883 const region *base_reg = reg->get_base_region ();
2884 if (base_reg->symbolic_for_unknown_ptr_p ()
2885 || !base_reg->tracked_p ())
2886 return;
2887 binding_cluster *cluster = get_or_create_cluster (base_reg);
2888 cluster->fill_region (mgr, reg, sval);
2889 }
2890
2891 /* Zero-fill REG. */
2892
2893 void
2894 store::zero_fill_region (store_manager *mgr, const region *reg)
2895 {
2896 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
2897 const svalue *zero_sval = sval_mgr->get_or_create_int_cst (char_type_node, 0);
2898 fill_region (mgr, reg, zero_sval);
2899 }
2900
2901 /* Mark REG as having unknown content. */
2902
2903 void
2904 store::mark_region_as_unknown (store_manager *mgr, const region *reg,
2905 uncertainty_t *uncertainty,
2906 svalue_set *maybe_live_values)
2907 {
2908 const region *base_reg = reg->get_base_region ();
2909 if (base_reg->symbolic_for_unknown_ptr_p ()
2910 || !base_reg->tracked_p ())
2911 return;
2912 binding_cluster *cluster = get_or_create_cluster (base_reg);
2913 cluster->mark_region_as_unknown (mgr, reg, reg, uncertainty,
2914 maybe_live_values);
2915 }
2916
2917 /* Purge state involving SVAL. */
2918
2919 void
2920 store::purge_state_involving (const svalue *sval,
2921 region_model_manager *sval_mgr)
2922 {
2923 auto_vec <const region *> base_regs_to_purge;
2924 for (auto iter : m_cluster_map)
2925 {
2926 const region *base_reg = iter.first;
2927 if (base_reg->involves_p (sval))
2928 base_regs_to_purge.safe_push (base_reg);
2929 else
2930 {
2931 binding_cluster *cluster = iter.second;
2932 cluster->purge_state_involving (sval, sval_mgr);
2933 }
2934 }
2935
2936 for (auto iter : base_regs_to_purge)
2937 purge_cluster (iter);
2938 }
2939
2940 /* Get the cluster for BASE_REG, or NULL (const version). */
2941
2942 const binding_cluster *
2943 store::get_cluster (const region *base_reg) const
2944 {
2945 gcc_assert (base_reg);
2946 gcc_assert (base_reg->get_base_region () == base_reg);
2947 if (binding_cluster **slot
2948 = const_cast <cluster_map_t &> (m_cluster_map).get (base_reg))
2949 return *slot;
2950 else
2951 return NULL;
2952 }
2953
2954 /* Get the cluster for BASE_REG, or NULL (non-const version). */
2955
2956 binding_cluster *
2957 store::get_cluster (const region *base_reg)
2958 {
2959 gcc_assert (base_reg);
2960 gcc_assert (base_reg->get_base_region () == base_reg);
2961 if (binding_cluster **slot = m_cluster_map.get (base_reg))
2962 return *slot;
2963 else
2964 return NULL;
2965 }
2966
2967 /* Get the cluster for BASE_REG, creating it if doesn't already exist. */
2968
2969 binding_cluster *
2970 store::get_or_create_cluster (const region *base_reg)
2971 {
2972 gcc_assert (base_reg);
2973 gcc_assert (base_reg->get_base_region () == base_reg);
2974
2975 /* We shouldn't create clusters for dereferencing an UNKNOWN ptr. */
2976 gcc_assert (!base_reg->symbolic_for_unknown_ptr_p ());
2977
2978 /* We shouldn't create clusters for base regions that aren't trackable. */
2979 gcc_assert (base_reg->tracked_p ());
2980
2981 if (binding_cluster **slot = m_cluster_map.get (base_reg))
2982 return *slot;
2983
2984 binding_cluster *cluster = new binding_cluster (base_reg);
2985 m_cluster_map.put (base_reg, cluster);
2986
2987 return cluster;
2988 }
2989
2990 /* Remove any cluster for BASE_REG, for use by
2991 region_model::unbind_region_and_descendents
2992 when popping stack frames and handling deleted heap regions. */
2993
2994 void
2995 store::purge_cluster (const region *base_reg)
2996 {
2997 gcc_assert (base_reg->get_base_region () == base_reg);
2998 binding_cluster **slot = m_cluster_map.get (base_reg);
2999 if (!slot)
3000 return;
3001 binding_cluster *cluster = *slot;
3002 delete cluster;
3003 m_cluster_map.remove (base_reg);
3004 }
3005
3006 /* Attempt to merge STORE_A and STORE_B into OUT_STORE.
3007 Return true if successful, or false if the stores can't be merged. */
3008
3009 bool
3010 store::can_merge_p (const store *store_a, const store *store_b,
3011 store *out_store, store_manager *mgr,
3012 model_merger *merger)
3013 {
3014 if (store_a->m_called_unknown_fn || store_b->m_called_unknown_fn)
3015 out_store->m_called_unknown_fn = true;
3016
3017 /* Get the union of all base regions for STORE_A and STORE_B. */
3018 hash_set<const region *> base_regions;
3019 for (cluster_map_t::iterator iter_a = store_a->m_cluster_map.begin ();
3020 iter_a != store_a->m_cluster_map.end (); ++iter_a)
3021 {
3022 const region *base_reg_a = (*iter_a).first;
3023 base_regions.add (base_reg_a);
3024 }
3025 for (cluster_map_t::iterator iter_b = store_b->m_cluster_map.begin ();
3026 iter_b != store_b->m_cluster_map.end (); ++iter_b)
3027 {
3028 const region *base_reg_b = (*iter_b).first;
3029 base_regions.add (base_reg_b);
3030 }
3031
3032 /* Sort the base regions before considering them. This ought not to
3033 affect the results, but can affect which types UNKNOWN_REGIONs are
3034 created for in a run; sorting them thus avoids minor differences
3035 in logfiles. */
3036 auto_vec<const region *> vec_base_regions (base_regions.elements ());
3037 for (hash_set<const region *>::iterator iter = base_regions.begin ();
3038 iter != base_regions.end (); ++iter)
3039 vec_base_regions.quick_push (*iter);
3040 vec_base_regions.qsort (region::cmp_ptr_ptr);
3041 unsigned i;
3042 const region *base_reg;
3043 FOR_EACH_VEC_ELT (vec_base_regions, i, base_reg)
3044 {
3045 const binding_cluster *cluster_a = store_a->get_cluster (base_reg);
3046 const binding_cluster *cluster_b = store_b->get_cluster (base_reg);
3047 /* At least one of cluster_a and cluster_b must be non-NULL. */
3048 binding_cluster *out_cluster
3049 = out_store->get_or_create_cluster (base_reg);
3050 if (!binding_cluster::can_merge_p (cluster_a, cluster_b,
3051 out_cluster, out_store, mgr, merger))
3052 return false;
3053 }
3054 return true;
3055 }
3056
3057 /* Mark the cluster for BASE_REG as having escaped.
3058 For use when handling an unrecognized function call, and
3059 for params to "top-level" calls.
3060 Further unknown function calls could touch it, even if the cluster
3061 isn't reachable from args of those calls. */
3062
3063 void
3064 store::mark_as_escaped (const region *base_reg)
3065 {
3066 gcc_assert (base_reg);
3067 gcc_assert (base_reg->get_base_region () == base_reg);
3068
3069 if (base_reg->symbolic_for_unknown_ptr_p ()
3070 || !base_reg->tracked_p ())
3071 return;
3072
3073 binding_cluster *cluster = get_or_create_cluster (base_reg);
3074 cluster->mark_as_escaped ();
3075 }
3076
3077 /* Handle an unknown fncall by updating any clusters that have escaped
3078 (either in this fncall, or in a prior one). */
3079
3080 void
3081 store::on_unknown_fncall (const gcall *call, store_manager *mgr,
3082 const conjured_purge &p)
3083 {
3084 m_called_unknown_fn = true;
3085
3086 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
3087 iter != m_cluster_map.end (); ++iter)
3088 (*iter).second->on_unknown_fncall (call, mgr, p);
3089 }
3090
3091 /* Return true if a non-const pointer to BASE_REG (or something within it)
3092 has escaped to code outside of the TU being analyzed. */
3093
3094 bool
3095 store::escaped_p (const region *base_reg) const
3096 {
3097 gcc_assert (base_reg);
3098 gcc_assert (base_reg->get_base_region () == base_reg);
3099
3100 /* "errno" can always be modified by external code. */
3101 if (base_reg->get_kind () == RK_ERRNO)
3102 return true;
3103
3104 if (binding_cluster **cluster_slot
3105 = const_cast <cluster_map_t &>(m_cluster_map).get (base_reg))
3106 return (*cluster_slot)->escaped_p ();
3107 return false;
3108 }
3109
3110 /* Populate OUT_PVS with a list of path_vars for describing SVAL based on
3111 this store, using VISITED to ensure the traversal terminates. */
3112
3113 void
3114 store::get_representative_path_vars (const region_model *model,
3115 svalue_set *visited,
3116 const svalue *sval,
3117 auto_vec<path_var> *out_pvs) const
3118 {
3119 gcc_assert (sval);
3120
3121 /* Find all bindings that reference SVAL. */
3122 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
3123 iter != m_cluster_map.end (); ++iter)
3124 {
3125 const region *base_reg = (*iter).first;
3126 binding_cluster *cluster = (*iter).second;
3127 cluster->get_representative_path_vars (model, visited, base_reg, sval,
3128 out_pvs);
3129 }
3130
3131 if (const initial_svalue *init_sval = sval->dyn_cast_initial_svalue ())
3132 {
3133 const region *reg = init_sval->get_region ();
3134 if (path_var pv = model->get_representative_path_var (reg,
3135 visited))
3136 out_pvs->safe_push (pv);
3137 }
3138 }
3139
3140 /* Remove all bindings overlapping REG within this store, removing
3141 any clusters that become redundant.
3142
3143 If UNCERTAINTY is non-NULL, use it to record any svalues that
3144 were removed, as being maybe-bound. */
3145
3146 void
3147 store::remove_overlapping_bindings (store_manager *mgr, const region *reg,
3148 uncertainty_t *uncertainty)
3149 {
3150 const region *base_reg = reg->get_base_region ();
3151 if (binding_cluster **cluster_slot = m_cluster_map.get (base_reg))
3152 {
3153 binding_cluster *cluster = *cluster_slot;
3154 if (reg == base_reg && !escaped_p (base_reg))
3155 {
3156 /* Remove whole cluster. */
3157 m_cluster_map.remove (base_reg);
3158 delete cluster;
3159 return;
3160 }
3161 /* Pass NULL for the maybe_live_values here, as we don't want to
3162 record the old svalues as being maybe-bound. */
3163 cluster->remove_overlapping_bindings (mgr, reg, uncertainty, NULL);
3164 }
3165 }
3166
3167 /* Subclass of visitor that accumulates a hash_set of the regions that
3168 were visited. */
3169
3170 struct region_finder : public visitor
3171 {
3172 void visit_region (const region *reg) final override
3173 {
3174 m_regs.add (reg);
3175 }
3176
3177 hash_set<const region *> m_regs;
3178 };
3179
3180 /* Canonicalize this store, to maximize the chance of equality between
3181 instances. */
3182
3183 void
3184 store::canonicalize (store_manager *mgr)
3185 {
3186 /* If we have e.g.:
3187 cluster for: HEAP_ALLOCATED_REGION(543)
3188 ESCAPED
3189 TOUCHED
3190 where the heap region is empty and unreferenced, then purge that
3191 cluster, to avoid unbounded state chains involving these. */
3192
3193 /* Find regions that are referenced by bound values in the store. */
3194 region_finder s;
3195 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
3196 iter != m_cluster_map.end (); ++iter)
3197 {
3198 binding_cluster *cluster = (*iter).second;
3199 for (binding_cluster::iterator_t bind_iter = cluster->m_map.begin ();
3200 bind_iter != cluster->m_map.end (); ++bind_iter)
3201 (*bind_iter).second->accept (&s);
3202 }
3203
3204 /* Locate heap-allocated regions that have empty bindings that weren't
3205 found above. */
3206 hash_set<const region *> purgeable_regions;
3207 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
3208 iter != m_cluster_map.end (); ++iter)
3209 {
3210 const region *base_reg = (*iter).first;
3211 binding_cluster *cluster = (*iter).second;
3212 if (base_reg->get_kind () == RK_HEAP_ALLOCATED)
3213 {
3214 /* Don't purge a heap-allocated region that's been marked as
3215 escaping, since this could be recording that a ptr to it
3216 was written to an unknown symbolic region along this
3217 path, and so we don't know whether it's referenced or
3218 not, and hence should report it as leaking
3219 (PR analyzer/106473). */
3220 if (cluster->escaped_p ())
3221 continue;
3222
3223 if (cluster->empty_p ())
3224 if (!s.m_regs.contains (base_reg))
3225 purgeable_regions.add (base_reg);
3226
3227 /* Also cover the UNKNOWN case. */
3228 if (const svalue *sval = cluster->maybe_get_simple_value (mgr))
3229 if (sval->get_kind () == SK_UNKNOWN)
3230 if (!s.m_regs.contains (base_reg))
3231 purgeable_regions.add (base_reg);
3232 }
3233 }
3234
3235 /* Purge them. */
3236 for (hash_set<const region *>::iterator iter = purgeable_regions.begin ();
3237 iter != purgeable_regions.end (); ++iter)
3238 {
3239 const region *base_reg = *iter;
3240 purge_cluster (base_reg);
3241 }
3242 }
3243
3244 /* Subroutine for use by exploded_path::feasible_p.
3245
3246 We need to deal with state differences between:
3247 (a) when the exploded_graph is being initially constructed and
3248 (b) when replaying the state changes along a specific path in
3249 in exploded_path::feasible_p.
3250
3251 In (a), state merging happens, so when exploring a loop
3252 for (i = 0; i < 1024; i++)
3253 on successive iterations we have i == 0, then i == WIDENING.
3254
3255 In (b), no state merging happens, so naively replaying the path
3256 that goes twice through the loop then exits it
3257 would lead to i == 0, then i == 1, and then a (i >= 1024) eedge
3258 that exits the loop, which would be found to be infeasible as i == 1,
3259 and the path would be rejected.
3260
3261 We need to fix up state during replay. This subroutine is
3262 called whenever we enter a supernode that we've already
3263 visited along this exploded_path, passing in OTHER_STORE
3264 from the destination enode's state.
3265
3266 Find bindings to widening values in OTHER_STORE.
3267 For all that are found, update the binding in this store to UNKNOWN. */
3268
3269 void
3270 store::loop_replay_fixup (const store *other_store,
3271 region_model_manager *mgr)
3272 {
3273 gcc_assert (other_store);
3274 for (cluster_map_t::iterator iter = other_store->m_cluster_map.begin ();
3275 iter != other_store->m_cluster_map.end (); ++iter)
3276 {
3277 const region *base_reg = (*iter).first;
3278 binding_cluster *cluster = (*iter).second;
3279 for (binding_cluster::iterator_t bind_iter = cluster->m_map.begin ();
3280 bind_iter != cluster->m_map.end (); ++bind_iter)
3281 {
3282 const binding_key *key = (*bind_iter).first;
3283 const svalue *sval = (*bind_iter).second;
3284 if (sval->get_kind () == SK_WIDENING)
3285 {
3286 binding_cluster *this_cluster
3287 = get_or_create_cluster (base_reg);
3288 const svalue *unknown
3289 = mgr->get_or_create_unknown_svalue (sval->get_type ());
3290 this_cluster->bind_key (key, unknown);
3291 }
3292 }
3293 }
3294 }
3295
3296 /* Use R to replay the bindings from SUMMARY into this object. */
3297
3298 void
3299 store::replay_call_summary (call_summary_replay &r,
3300 const store &summary)
3301 {
3302 if (summary.m_called_unknown_fn)
3303 {
3304 /* A call to an external function occurred in the summary.
3305 Hence we need to invalidate our knownledge of globals,
3306 escaped regions, etc. */
3307 on_unknown_fncall (r.get_call_stmt (),
3308 r.get_store_manager (),
3309 conjured_purge (r.get_caller_model (),
3310 r.get_ctxt ()));
3311 }
3312
3313 auto_vec<const region *> keys (summary.m_cluster_map.elements ());
3314 for (auto kv : summary.m_cluster_map)
3315 keys.quick_push (kv.first);
3316 keys.qsort (region::cmp_ptr_ptr);
3317 for (auto base_reg : keys)
3318 replay_call_summary_cluster (r, summary, base_reg);
3319 }
3320
3321 /* Use R and SUMMARY to replay the bindings in SUMMARY_CLUSTER
3322 into this object. */
3323
3324 void
3325 store::replay_call_summary_cluster (call_summary_replay &r,
3326 const store &summary,
3327 const region *summary_base_reg)
3328 {
3329 const call_details &cd = r.get_call_details ();
3330 region_model_manager *reg_mgr = r.get_manager ();
3331 store_manager *mgr = reg_mgr->get_store_manager ();
3332 const binding_cluster *summary_cluster
3333 = summary.get_cluster (summary_base_reg);
3334
3335 /* Handle "ESCAPED" and "TOUCHED" flags. */
3336 if (summary_cluster->escaped_p () || summary_cluster->touched_p ())
3337 if (const region *caller_reg
3338 = r.convert_region_from_summary (summary_base_reg))
3339 {
3340 const region *caller_base_reg = caller_reg->get_base_region ();
3341 if (caller_base_reg->tracked_p ()
3342 && !caller_base_reg->symbolic_for_unknown_ptr_p ())
3343 {
3344 binding_cluster *caller_cluster
3345 = get_or_create_cluster (caller_base_reg);
3346 if (summary_cluster->escaped_p ())
3347 caller_cluster->mark_as_escaped ();
3348 if (summary_cluster->touched_p ())
3349 caller_cluster->m_touched = true;
3350 }
3351 }
3352
3353 switch (summary_base_reg->get_kind ())
3354 {
3355 /* Top-level regions. */
3356 case RK_FRAME:
3357 case RK_GLOBALS:
3358 case RK_CODE:
3359 case RK_STACK:
3360 case RK_HEAP:
3361 case RK_THREAD_LOCAL:
3362 case RK_ROOT:
3363 /* Child regions. */
3364 case RK_FIELD:
3365 case RK_ELEMENT:
3366 case RK_OFFSET:
3367 case RK_SIZED:
3368 case RK_CAST:
3369 case RK_BIT_RANGE:
3370 /* Other regions. */
3371 case RK_VAR_ARG:
3372 case RK_UNKNOWN:
3373 /* These should never be the base region of a binding cluster. */
3374 gcc_unreachable ();
3375 break;
3376
3377 case RK_FUNCTION:
3378 case RK_LABEL:
3379 case RK_STRING:
3380 /* These can be marked as escaping. */
3381 break;
3382
3383 case RK_SYMBOLIC:
3384 {
3385 const symbolic_region *summary_symbolic_reg
3386 = as_a <const symbolic_region *> (summary_base_reg);
3387 const svalue *summary_ptr_sval = summary_symbolic_reg->get_pointer ();
3388 const svalue *caller_ptr_sval
3389 = r.convert_svalue_from_summary (summary_ptr_sval);
3390 if (!caller_ptr_sval)
3391 return;
3392 const region *caller_dest_reg
3393 = cd.get_model ()->deref_rvalue (caller_ptr_sval,
3394 NULL_TREE,
3395 cd.get_ctxt ());
3396 const svalue *summary_sval
3397 = summary.get_any_binding (mgr, summary_base_reg);
3398 if (!summary_sval)
3399 return;
3400 const svalue *caller_sval
3401 = r.convert_svalue_from_summary (summary_sval);
3402 if (!caller_sval)
3403 caller_sval =
3404 reg_mgr->get_or_create_unknown_svalue (summary_sval->get_type ());
3405 set_value (mgr, caller_dest_reg,
3406 caller_sval, NULL /* uncertainty_t * */);
3407 }
3408 break;
3409
3410 case RK_HEAP_ALLOCATED:
3411 case RK_DECL:
3412 case RK_ERRNO:
3413 case RK_PRIVATE:
3414 {
3415 const region *caller_dest_reg
3416 = r.convert_region_from_summary (summary_base_reg);
3417 if (!caller_dest_reg)
3418 return;
3419 const svalue *summary_sval
3420 = summary.get_any_binding (mgr, summary_base_reg);
3421 if (!summary_sval)
3422 summary_sval = reg_mgr->get_or_create_compound_svalue
3423 (summary_base_reg->get_type (),
3424 summary_cluster->get_map ());
3425 const svalue *caller_sval
3426 = r.convert_svalue_from_summary (summary_sval);
3427 if (!caller_sval)
3428 caller_sval =
3429 reg_mgr->get_or_create_unknown_svalue (summary_sval->get_type ());
3430 set_value (mgr, caller_dest_reg,
3431 caller_sval, NULL /* uncertainty_t * */);
3432 }
3433 break;
3434
3435 case RK_ALLOCA:
3436 /* Ignore bindings of alloca regions in the summary. */
3437 break;
3438 }
3439 }
3440
3441 #if CHECKING_P
3442
3443 namespace selftest {
3444
3445 /* Verify that bit_range::intersects_p works as expected. */
3446
3447 static void
3448 test_bit_range_intersects_p ()
3449 {
3450 bit_range b0 (0, 1);
3451 bit_range b1 (1, 1);
3452 bit_range b2 (2, 1);
3453 bit_range b3 (3, 1);
3454 bit_range b4 (4, 1);
3455 bit_range b5 (5, 1);
3456 bit_range b6 (6, 1);
3457 bit_range b7 (7, 1);
3458 bit_range b1_to_6 (1, 6);
3459 bit_range b0_to_7 (0, 8);
3460 bit_range b3_to_5 (3, 3);
3461 bit_range b6_to_7 (6, 2);
3462
3463 /* self-intersection is true. */
3464 ASSERT_TRUE (b0.intersects_p (b0));
3465 ASSERT_TRUE (b7.intersects_p (b7));
3466 ASSERT_TRUE (b1_to_6.intersects_p (b1_to_6));
3467 ASSERT_TRUE (b0_to_7.intersects_p (b0_to_7));
3468
3469 ASSERT_FALSE (b0.intersects_p (b1));
3470 ASSERT_FALSE (b1.intersects_p (b0));
3471 ASSERT_FALSE (b0.intersects_p (b7));
3472 ASSERT_FALSE (b7.intersects_p (b0));
3473
3474 ASSERT_TRUE (b0_to_7.intersects_p (b0));
3475 ASSERT_TRUE (b0_to_7.intersects_p (b7));
3476 ASSERT_TRUE (b0.intersects_p (b0_to_7));
3477 ASSERT_TRUE (b7.intersects_p (b0_to_7));
3478
3479 ASSERT_FALSE (b0.intersects_p (b1_to_6));
3480 ASSERT_FALSE (b1_to_6.intersects_p (b0));
3481 ASSERT_TRUE (b1.intersects_p (b1_to_6));
3482 ASSERT_TRUE (b1_to_6.intersects_p (b1));
3483 ASSERT_TRUE (b1_to_6.intersects_p (b6));
3484 ASSERT_FALSE (b1_to_6.intersects_p (b7));
3485
3486 ASSERT_TRUE (b1_to_6.intersects_p (b0_to_7));
3487 ASSERT_TRUE (b0_to_7.intersects_p (b1_to_6));
3488
3489 ASSERT_FALSE (b3_to_5.intersects_p (b6_to_7));
3490 ASSERT_FALSE (b6_to_7.intersects_p (b3_to_5));
3491
3492 bit_range r1 (0,0);
3493 bit_range r2 (0,0);
3494 ASSERT_TRUE (b1_to_6.intersects_p (b0_to_7, &r1, &r2));
3495 ASSERT_EQ (r1.get_start_bit_offset (), 0);
3496 ASSERT_EQ (r1.m_size_in_bits, 6);
3497 ASSERT_EQ (r2.get_start_bit_offset (), 1);
3498 ASSERT_EQ (r2.m_size_in_bits, 6);
3499
3500 ASSERT_TRUE (b0_to_7.intersects_p (b1_to_6, &r1, &r2));
3501 ASSERT_EQ (r1.get_start_bit_offset (), 1);
3502 ASSERT_EQ (r1.m_size_in_bits, 6);
3503 ASSERT_EQ (r2.get_start_bit_offset (), 0);
3504 ASSERT_EQ (r2.m_size_in_bits, 6);
3505 }
3506
3507 /* Implementation detail of ASSERT_BIT_RANGE_FROM_MASK_EQ. */
3508
3509 static void
3510 assert_bit_range_from_mask_eq (const location &loc,
3511 unsigned HOST_WIDE_INT mask,
3512 const bit_range &expected)
3513 {
3514 bit_range actual (0, 0);
3515 bool ok = bit_range::from_mask (mask, &actual);
3516 ASSERT_TRUE_AT (loc, ok);
3517 ASSERT_EQ_AT (loc, actual, expected);
3518 }
3519
3520 /* Assert that bit_range::from_mask (MASK) returns true, and writes
3521 out EXPECTED_BIT_RANGE. */
3522
3523 #define ASSERT_BIT_RANGE_FROM_MASK_EQ(MASK, EXPECTED_BIT_RANGE) \
3524 SELFTEST_BEGIN_STMT \
3525 assert_bit_range_from_mask_eq (SELFTEST_LOCATION, MASK, \
3526 EXPECTED_BIT_RANGE); \
3527 SELFTEST_END_STMT
3528
3529 /* Implementation detail of ASSERT_NO_BIT_RANGE_FROM_MASK. */
3530
3531 static void
3532 assert_no_bit_range_from_mask_eq (const location &loc,
3533 unsigned HOST_WIDE_INT mask)
3534 {
3535 bit_range actual (0, 0);
3536 bool ok = bit_range::from_mask (mask, &actual);
3537 ASSERT_FALSE_AT (loc, ok);
3538 }
3539
3540 /* Assert that bit_range::from_mask (MASK) returns false. */
3541
3542 #define ASSERT_NO_BIT_RANGE_FROM_MASK(MASK) \
3543 SELFTEST_BEGIN_STMT \
3544 assert_no_bit_range_from_mask_eq (SELFTEST_LOCATION, MASK); \
3545 SELFTEST_END_STMT
3546
3547 /* Verify that bit_range::from_mask works as expected. */
3548
3549 static void
3550 test_bit_range_from_mask ()
3551 {
3552 /* Should fail on zero. */
3553 ASSERT_NO_BIT_RANGE_FROM_MASK (0);
3554
3555 /* Verify 1-bit masks. */
3556 ASSERT_BIT_RANGE_FROM_MASK_EQ (1, bit_range (0, 1));
3557 ASSERT_BIT_RANGE_FROM_MASK_EQ (2, bit_range (1, 1));
3558 ASSERT_BIT_RANGE_FROM_MASK_EQ (4, bit_range (2, 1));
3559 ASSERT_BIT_RANGE_FROM_MASK_EQ (8, bit_range (3, 1));
3560 ASSERT_BIT_RANGE_FROM_MASK_EQ (16, bit_range (4, 1));
3561 ASSERT_BIT_RANGE_FROM_MASK_EQ (32, bit_range (5, 1));
3562 ASSERT_BIT_RANGE_FROM_MASK_EQ (64, bit_range (6, 1));
3563 ASSERT_BIT_RANGE_FROM_MASK_EQ (128, bit_range (7, 1));
3564
3565 /* Verify N-bit masks starting at bit 0. */
3566 ASSERT_BIT_RANGE_FROM_MASK_EQ (3, bit_range (0, 2));
3567 ASSERT_BIT_RANGE_FROM_MASK_EQ (7, bit_range (0, 3));
3568 ASSERT_BIT_RANGE_FROM_MASK_EQ (15, bit_range (0, 4));
3569 ASSERT_BIT_RANGE_FROM_MASK_EQ (31, bit_range (0, 5));
3570 ASSERT_BIT_RANGE_FROM_MASK_EQ (63, bit_range (0, 6));
3571 ASSERT_BIT_RANGE_FROM_MASK_EQ (127, bit_range (0, 7));
3572 ASSERT_BIT_RANGE_FROM_MASK_EQ (255, bit_range (0, 8));
3573 ASSERT_BIT_RANGE_FROM_MASK_EQ (0xffff, bit_range (0, 16));
3574
3575 /* Various other tests. */
3576 ASSERT_BIT_RANGE_FROM_MASK_EQ (0x30, bit_range (4, 2));
3577 ASSERT_BIT_RANGE_FROM_MASK_EQ (0x700, bit_range (8, 3));
3578 ASSERT_BIT_RANGE_FROM_MASK_EQ (0x600, bit_range (9, 2));
3579
3580 /* Multiple ranges of set bits should fail. */
3581 ASSERT_NO_BIT_RANGE_FROM_MASK (0x101);
3582 ASSERT_NO_BIT_RANGE_FROM_MASK (0xf0f0f0f0);
3583 }
3584
3585 /* Implementation detail of ASSERT_OVERLAP. */
3586
3587 static void
3588 assert_overlap (const location &loc,
3589 const concrete_binding *b1,
3590 const concrete_binding *b2)
3591 {
3592 ASSERT_TRUE_AT (loc, b1->overlaps_p (*b2));
3593 ASSERT_TRUE_AT (loc, b2->overlaps_p (*b1));
3594 }
3595
3596 /* Implementation detail of ASSERT_DISJOINT. */
3597
3598 static void
3599 assert_disjoint (const location &loc,
3600 const concrete_binding *b1,
3601 const concrete_binding *b2)
3602 {
3603 ASSERT_FALSE_AT (loc, b1->overlaps_p (*b2));
3604 ASSERT_FALSE_AT (loc, b2->overlaps_p (*b1));
3605 }
3606
3607 /* Assert that B1 and B2 overlap, checking both ways. */
3608
3609 #define ASSERT_OVERLAP(B1, B2) \
3610 SELFTEST_BEGIN_STMT \
3611 assert_overlap (SELFTEST_LOCATION, B1, B2); \
3612 SELFTEST_END_STMT
3613
3614 /* Assert that B1 and B2 do not overlap, checking both ways. */
3615
3616 #define ASSERT_DISJOINT(B1, B2) \
3617 SELFTEST_BEGIN_STMT \
3618 assert_disjoint (SELFTEST_LOCATION, B1, B2); \
3619 SELFTEST_END_STMT
3620
3621 /* Verify that concrete_binding::overlaps_p works as expected. */
3622
3623 static void
3624 test_binding_key_overlap ()
3625 {
3626 store_manager mgr (NULL);
3627
3628 /* Various 8-bit bindings. */
3629 const concrete_binding *cb_0_7 = mgr.get_concrete_binding (0, 8);
3630 const concrete_binding *cb_8_15 = mgr.get_concrete_binding (8, 8);
3631 const concrete_binding *cb_16_23 = mgr.get_concrete_binding (16, 8);
3632 const concrete_binding *cb_24_31 = mgr.get_concrete_binding (24, 8);
3633
3634 /* 16-bit bindings. */
3635 const concrete_binding *cb_0_15 = mgr.get_concrete_binding (0, 16);
3636 const concrete_binding *cb_8_23 = mgr.get_concrete_binding (8, 16);
3637 const concrete_binding *cb_16_31 = mgr.get_concrete_binding (16, 16);
3638
3639 /* 32-bit binding. */
3640 const concrete_binding *cb_0_31 = mgr.get_concrete_binding (0, 32);
3641
3642 /* Everything should self-overlap. */
3643 ASSERT_OVERLAP (cb_0_7, cb_0_7);
3644 ASSERT_OVERLAP (cb_8_15, cb_8_15);
3645 ASSERT_OVERLAP (cb_16_23, cb_16_23);
3646 ASSERT_OVERLAP (cb_24_31, cb_24_31);
3647 ASSERT_OVERLAP (cb_0_15, cb_0_15);
3648 ASSERT_OVERLAP (cb_8_23, cb_8_23);
3649 ASSERT_OVERLAP (cb_16_31, cb_16_31);
3650 ASSERT_OVERLAP (cb_0_31, cb_0_31);
3651
3652 /* Verify the 8-bit bindings that don't overlap each other. */
3653 ASSERT_DISJOINT (cb_0_7, cb_8_15);
3654 ASSERT_DISJOINT (cb_8_15, cb_16_23);
3655
3656 /* Check for overlap of differently-sized bindings. */
3657 ASSERT_OVERLAP (cb_0_7, cb_0_31);
3658 /* ...and with differing start points. */
3659 ASSERT_OVERLAP (cb_8_15, cb_0_31);
3660 ASSERT_DISJOINT (cb_8_15, cb_16_31);
3661 ASSERT_OVERLAP (cb_16_23, cb_0_31);
3662 ASSERT_OVERLAP (cb_16_31, cb_0_31);
3663
3664 ASSERT_DISJOINT (cb_0_7, cb_8_23);
3665 ASSERT_OVERLAP (cb_8_23, cb_16_23);
3666 ASSERT_OVERLAP (cb_8_23, cb_16_31);
3667 ASSERT_DISJOINT (cb_8_23, cb_24_31);
3668 }
3669
3670 /* Run all of the selftests within this file. */
3671
3672 void
3673 analyzer_store_cc_tests ()
3674 {
3675 test_bit_range_intersects_p ();
3676 test_bit_range_from_mask ();
3677 test_binding_key_overlap ();
3678 }
3679
3680 } // namespace selftest
3681
3682 #endif /* CHECKING_P */
3683
3684 } // namespace ana
3685
3686 #endif /* #if ENABLE_ANALYZER */