]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/ipa-modref-tree.h
Correct a function pre/postcondition [PR102403].
[thirdparty/gcc.git] / gcc / ipa-modref-tree.h
1 /* Data structure for the modref pass.
2 Copyright (C) 2020-2021 Free Software Foundation, Inc.
3 Contributed by David Cepelik and Jan Hubicka
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* modref_tree represent a decision tree that can be used by alias analysis
22 oracle to determine whether given memory access can be affected by a function
23 call. For every function we collect two trees, one for loads and other
24 for stores. Tree consist of following levels:
25
26 1) Base: this level represent base alias set of the access and refers
27 to sons (ref nodes). Flag all_refs means that all possible references
28 are aliasing.
29
30 Because for LTO streaming we need to stream types rather than alias sets
31 modref_base_node is implemented as a template.
32 2) Ref: this level represent ref alias set and links to accesses unless
33 all_refs flag is set.
34 Again ref is an template to allow LTO streaming.
35 3) Access: this level represent info about individual accesses. Presently
36 we record whether access is through a dereference of a function parameter
37 and if so we record the access range.
38 */
39
40 #ifndef GCC_MODREF_TREE_H
41 #define GCC_MODREF_TREE_H
42
43 struct ipa_modref_summary;
44
45 /* Memory access. */
46 struct GTY(()) modref_access_node
47 {
48
49 /* Access range information (in bits). */
50 poly_int64 offset;
51 poly_int64 size;
52 poly_int64 max_size;
53
54 /* Offset from parameter pointer to the base of the access (in bytes). */
55 poly_int64 parm_offset;
56
57 /* Index of parameter which specifies the base of access. -1 if base is not
58 a function parameter. */
59 int parm_index;
60 bool parm_offset_known;
61 /* Number of times interval was extended during dataflow.
62 This has to be limited in order to keep dataflow finite. */
63 unsigned char adjustments;
64
65 /* Return true if access node holds no useful info. */
66 bool useful_p () const
67 {
68 return parm_index != -1;
69 }
70 /* Return true if range info is useful. */
71 bool range_info_useful_p () const
72 {
73 return parm_index != -1 && parm_offset_known
74 && (known_size_p (size)
75 || known_size_p (max_size)
76 || known_ge (offset, 0));
77 }
78 /* Return true if both accesses are the same. */
79 bool operator == (modref_access_node &a) const
80 {
81 if (parm_index != a.parm_index)
82 return false;
83 if (parm_index >= 0)
84 {
85 if (parm_offset_known != a.parm_offset_known)
86 return false;
87 if (parm_offset_known
88 && !known_eq (parm_offset, a.parm_offset))
89 return false;
90 }
91 if (range_info_useful_p () != a.range_info_useful_p ())
92 return false;
93 if (range_info_useful_p ()
94 && (!known_eq (a.offset, offset)
95 || !known_eq (a.size, size)
96 || !known_eq (a.max_size, max_size)))
97 return false;
98 return true;
99 }
100 /* Return true A is a subaccess. */
101 bool contains (const modref_access_node &a) const
102 {
103 poly_int64 aoffset_adj = 0;
104 if (parm_index >= 0)
105 {
106 if (parm_index != a.parm_index)
107 return false;
108 if (parm_offset_known)
109 {
110 if (!a.parm_offset_known)
111 return false;
112 /* Accesses are never below parm_offset, so look
113 for smaller offset. */
114 if (!known_le (parm_offset, a.parm_offset))
115 return false;
116 aoffset_adj = (a.parm_offset - parm_offset)
117 << LOG2_BITS_PER_UNIT;
118 }
119 }
120 if (range_info_useful_p ())
121 {
122 if (!a.range_info_useful_p ())
123 return false;
124 /* Sizes of stores are used to check that object is big enough
125 to fit the store, so smaller or unknown sotre is more general
126 than large store. */
127 if (known_size_p (size)
128 && (!known_size_p (a.size)
129 || !known_le (size, a.size)))
130 return false;
131 if (known_size_p (max_size))
132 return known_subrange_p (a.offset + aoffset_adj,
133 a.max_size, offset, max_size);
134 else
135 return known_le (offset, a.offset + aoffset_adj);
136 }
137 return true;
138 }
139 /* Update access range to new parameters.
140 If RECORD_ADJUSTMENTS is true, record number of changes in the access
141 and if threshold is exceeded start dropping precision
142 so only constantly many updates are possible. This makes dataflow
143 to converge. */
144 void update (poly_int64 parm_offset1,
145 poly_int64 offset1, poly_int64 size1, poly_int64 max_size1,
146 bool record_adjustments)
147 {
148 if (known_eq (offset, offset1)
149 && known_eq (size, size1)
150 && known_eq (max_size, max_size1))
151 return;
152 if (!record_adjustments
153 || (++adjustments) < param_modref_max_adjustments)
154 {
155 parm_offset = parm_offset1;
156 offset = offset1;
157 size = size1;
158 max_size = max_size1;
159 }
160 else
161 {
162 if (dump_file)
163 fprintf (dump_file,
164 "--param param=modref-max-adjustments limit reached:");
165 if (!known_eq (parm_offset, parm_offset1))
166 {
167 if (dump_file)
168 fprintf (dump_file, " parm_offset cleared");
169 parm_offset_known = false;
170 }
171 if (!known_eq (size, size1))
172 {
173 size = -1;
174 if (dump_file)
175 fprintf (dump_file, " size cleared");
176 }
177 if (!known_eq (max_size, max_size1))
178 {
179 max_size = -1;
180 if (dump_file)
181 fprintf (dump_file, " max_size cleared");
182 }
183 if (!known_eq (offset, offset1))
184 {
185 offset = 0;
186 if (dump_file)
187 fprintf (dump_file, " offset cleared");
188 }
189 if (dump_file)
190 fprintf (dump_file, "\n");
191 }
192 }
193 /* Merge in access A if it is possible to do without losing
194 precision. Return true if successful.
195 If RECORD_ADJUSTMENTs is true, remember how many interval
196 was prolonged and punt when there are too many. */
197 bool merge (const modref_access_node &a, bool record_adjustments)
198 {
199 poly_int64 offset1 = 0;
200 poly_int64 aoffset1 = 0;
201 poly_int64 new_parm_offset = 0;
202
203 /* We assume that containment was tested earlier. */
204 gcc_checking_assert (!contains (a) && !a.contains (*this));
205 if (parm_index >= 0)
206 {
207 if (parm_index != a.parm_index)
208 return false;
209 if (parm_offset_known)
210 {
211 if (!a.parm_offset_known)
212 return false;
213 if (!combined_offsets (a, &new_parm_offset, &offset1, &aoffset1))
214 return false;
215 }
216 }
217 /* See if we can merge ranges. */
218 if (range_info_useful_p ())
219 {
220 /* In this case we have containment that should be
221 handled earlier. */
222 gcc_checking_assert (a.range_info_useful_p ());
223
224 /* If a.size is less specified than size, merge only
225 if intervals are otherwise equivalent. */
226 if (known_size_p (size)
227 && (!known_size_p (a.size) || known_lt (a.size, size)))
228 {
229 if (((known_size_p (max_size) || known_size_p (a.max_size))
230 && !known_eq (max_size, a.max_size))
231 || !known_eq (offset1, aoffset1))
232 return false;
233 update (new_parm_offset, offset1, a.size, max_size,
234 record_adjustments);
235 return true;
236 }
237 /* If sizes are same, we can extend the interval. */
238 if ((known_size_p (size) || known_size_p (a.size))
239 && !known_eq (size, a.size))
240 return false;
241 if (known_le (offset1, aoffset1))
242 {
243 if (!known_size_p (max_size)
244 || known_ge (offset1 + max_size, aoffset1))
245 {
246 update2 (new_parm_offset, offset1, size, max_size,
247 aoffset1, a.size, a.max_size,
248 record_adjustments);
249 return true;
250 }
251 }
252 else if (known_le (aoffset1, offset1))
253 {
254 if (!known_size_p (a.max_size)
255 || known_ge (aoffset1 + a.max_size, offset1))
256 {
257 update2 (new_parm_offset, offset1, size, max_size,
258 aoffset1, a.size, a.max_size,
259 record_adjustments);
260 return true;
261 }
262 }
263 return false;
264 }
265 update (new_parm_offset, offset1,
266 size, max_size, record_adjustments);
267 return true;
268 }
269 /* Return true if A1 and B1 can be merged with lower informatoin
270 less than A2 and B2.
271 Assume that no containment or lossless merging is possible. */
272 static bool closer_pair_p (const modref_access_node &a1,
273 const modref_access_node &b1,
274 const modref_access_node &a2,
275 const modref_access_node &b2)
276 {
277 /* Merging different parm indexes comes to complete loss
278 of range info. */
279 if (a1.parm_index != b1.parm_index)
280 return false;
281 if (a2.parm_index != b2.parm_index)
282 return true;
283 /* If parm is known and parm indexes are the same we should
284 already have containment. */
285 gcc_checking_assert (a1.parm_offset_known && b1.parm_offset_known);
286 gcc_checking_assert (a2.parm_offset_known && b2.parm_offset_known);
287
288 /* First normalize offsets for parm offsets. */
289 poly_int64 new_parm_offset, offseta1, offsetb1, offseta2, offsetb2;
290 if (!a1.combined_offsets (b1, &new_parm_offset, &offseta1, &offsetb1)
291 || !a2.combined_offsets (b2, &new_parm_offset, &offseta2, &offsetb2))
292 gcc_unreachable ();
293
294
295 /* Now compute distnace of the intervals. */
296 poly_int64 dist1, dist2;
297 if (known_le (offseta1, offsetb1))
298 {
299 if (!known_size_p (a1.max_size))
300 dist1 = 0;
301 else
302 dist1 = offsetb1 - offseta1 - a1.max_size;
303 }
304 else
305 {
306 if (!known_size_p (b1.max_size))
307 dist1 = 0;
308 else
309 dist1 = offseta1 - offsetb1 - b1.max_size;
310 }
311 if (known_le (offseta2, offsetb2))
312 {
313 if (!known_size_p (a2.max_size))
314 dist2 = 0;
315 else
316 dist2 = offsetb2 - offseta2 - a2.max_size;
317 }
318 else
319 {
320 if (!known_size_p (b2.max_size))
321 dist2 = 0;
322 else
323 dist2 = offseta2 - offsetb2 - b2.max_size;
324 }
325 /* It may happen that intervals overlap in case size
326 is different. Preffer the overlap to non-overlap. */
327 if (known_lt (dist1, 0) && known_ge (dist2, 0))
328 return true;
329 if (known_lt (dist2, 0) && known_ge (dist1, 0))
330 return false;
331 if (known_lt (dist1, 0))
332 /* If both overlaps minimize overlap. */
333 return known_le (dist2, dist1);
334 else
335 /* If both are disjoint look for smaller distance. */
336 return known_le (dist1, dist2);
337 }
338
339 /* Merge in access A while losing precision. */
340 void forced_merge (const modref_access_node &a, bool record_adjustments)
341 {
342 if (parm_index != a.parm_index)
343 {
344 gcc_checking_assert (parm_index != -1);
345 parm_index = -1;
346 return;
347 }
348
349 /* We assume that containment and lossless merging
350 was tested earlier. */
351 gcc_checking_assert (!contains (a) && !a.contains (*this)
352 && !merge (a, record_adjustments));
353 gcc_checking_assert (parm_offset_known && a.parm_offset_known);
354
355 poly_int64 new_parm_offset, offset1, aoffset1;
356 if (!combined_offsets (a, &new_parm_offset, &offset1, &aoffset1))
357 {
358 parm_offset_known = false;
359 return;
360 }
361 gcc_checking_assert (range_info_useful_p ()
362 && a.range_info_useful_p ());
363 if (record_adjustments)
364 adjustments += a.adjustments;
365 update2 (new_parm_offset,
366 offset1, size, max_size,
367 aoffset1, a.size, a.max_size,
368 record_adjustments);
369 }
370 private:
371 /* Merge two ranges both starting at parm_offset1 and update THIS
372 with result. */
373 void update2 (poly_int64 parm_offset1,
374 poly_int64 offset1, poly_int64 size1, poly_int64 max_size1,
375 poly_int64 offset2, poly_int64 size2, poly_int64 max_size2,
376 bool record_adjustments)
377 {
378 poly_int64 new_size = size1;
379
380 if (!known_size_p (size2)
381 || known_le (size2, size1))
382 new_size = size2;
383 else
384 gcc_checking_assert (known_le (size1, size2));
385
386 if (known_le (offset1, offset2))
387 ;
388 else if (known_le (offset2, offset1))
389 {
390 std::swap (offset1, offset2);
391 std::swap (max_size1, max_size2);
392 }
393 else
394 gcc_unreachable ();
395
396 poly_int64 new_max_size;
397
398 if (!known_size_p (max_size1))
399 new_max_size = max_size1;
400 else if (!known_size_p (max_size2))
401 new_max_size = max_size2;
402 else
403 {
404 max_size2 = max_size2 + offset2 - offset1;
405 if (known_le (max_size, max_size2))
406 new_max_size = max_size2;
407 else if (known_le (max_size2, max_size))
408 new_max_size = max_size;
409 else
410 gcc_unreachable ();
411 }
412
413 update (parm_offset1, offset1,
414 new_size, new_max_size, record_adjustments);
415 }
416 /* Given access nodes THIS and A, return true if they
417 can be done with common parm_offsets. In this case
418 return parm offset in new_parm_offset, new_offset
419 which is start of range in THIS and new_aoffset that
420 is start of range in A. */
421 bool combined_offsets (const modref_access_node &a,
422 poly_int64 *new_parm_offset,
423 poly_int64 *new_offset,
424 poly_int64 *new_aoffset) const
425 {
426 gcc_checking_assert (parm_offset_known && a.parm_offset_known);
427 if (known_le (a.parm_offset, parm_offset))
428 {
429 *new_offset = offset
430 + ((parm_offset - a.parm_offset)
431 << LOG2_BITS_PER_UNIT);
432 *new_aoffset = a.offset;
433 *new_parm_offset = a.parm_offset;
434 return true;
435 }
436 else if (known_le (parm_offset, a.parm_offset))
437 {
438 *new_aoffset = a.offset
439 + ((a.parm_offset - parm_offset)
440 << LOG2_BITS_PER_UNIT);
441 *new_offset = offset;
442 *new_parm_offset = parm_offset;
443 return true;
444 }
445 else
446 return false;
447 }
448 };
449
450 /* Access node specifying no useful info. */
451 const modref_access_node unspecified_modref_access_node
452 = {0, -1, -1, 0, -1, false, 0};
453
454 template <typename T>
455 struct GTY((user)) modref_ref_node
456 {
457 T ref;
458 bool every_access;
459 vec <modref_access_node, va_gc> *accesses;
460
461 modref_ref_node (T ref):
462 ref (ref),
463 every_access (false),
464 accesses (NULL)
465 {}
466
467 /* Collapse the tree. */
468 void collapse ()
469 {
470 vec_free (accesses);
471 accesses = NULL;
472 every_access = true;
473 }
474
475 /* Verify that list does not contain redundant accesses. */
476 void verify ()
477 {
478 size_t i, i2;
479 modref_access_node *a, *a2;
480
481 FOR_EACH_VEC_SAFE_ELT (accesses, i, a)
482 {
483 FOR_EACH_VEC_SAFE_ELT (accesses, i2, a2)
484 if (i != i2)
485 gcc_assert (!a->contains (*a2));
486 }
487 }
488
489 /* Insert access with OFFSET and SIZE.
490 Collapse tree if it has more than MAX_ACCESSES entries.
491 If RECORD_ADJUSTMENTs is true avoid too many interval extensions.
492 Return true if record was changed. */
493 bool insert_access (modref_access_node a, size_t max_accesses,
494 bool record_adjustments)
495 {
496 /* If this base->ref pair has no access information, bail out. */
497 if (every_access)
498 return false;
499
500 /* Otherwise, insert a node for the ref of the access under the base. */
501 size_t i, j;
502 modref_access_node *a2;
503
504 if (flag_checking)
505 verify ();
506
507 if (!a.useful_p ())
508 {
509 if (!every_access)
510 {
511 collapse ();
512 return true;
513 }
514 return false;
515 }
516
517 FOR_EACH_VEC_SAFE_ELT (accesses, i, a2)
518 {
519 if (a2->contains (a))
520 return false;
521 if (a.contains (*a2))
522 {
523 a.adjustments = 0;
524 a2->parm_index = a.parm_index;
525 a2->parm_offset_known = a.parm_offset_known;
526 a2->update (a.parm_offset, a.offset, a.size, a.max_size,
527 record_adjustments);
528 try_merge_with (i);
529 return true;
530 }
531 if (a2->merge (a, record_adjustments))
532 {
533 try_merge_with (i);
534 return true;
535 }
536 gcc_checking_assert (!(a == *a2));
537 }
538
539 /* If this base->ref pair has too many accesses stored, we will clear
540 all accesses and bail out. */
541 if (accesses && accesses->length () >= max_accesses)
542 {
543 if (max_accesses < 2)
544 {
545 collapse ();
546 if (dump_file)
547 fprintf (dump_file,
548 "--param param=modref-max-accesses limit reached;"
549 " collapsing\n");
550 return true;
551 }
552 /* Find least harmful merge and perform it. */
553 int best1 = -1, best2 = -1;
554 FOR_EACH_VEC_SAFE_ELT (accesses, i, a2)
555 {
556 for (j = i + 1; j < accesses->length (); j++)
557 if (best1 < 0
558 || modref_access_node::closer_pair_p
559 (*a2, (*accesses)[j],
560 (*accesses)[best1],
561 best2 < 0 ? a : (*accesses)[best2]))
562 {
563 best1 = i;
564 best2 = j;
565 }
566 if (modref_access_node::closer_pair_p
567 (*a2, a,
568 (*accesses)[best1],
569 best2 < 0 ? a : (*accesses)[best2]))
570 {
571 best1 = i;
572 best2 = -1;
573 }
574 }
575 (*accesses)[best1].forced_merge (best2 < 0 ? a : (*accesses)[best2],
576 record_adjustments);
577 if (!(*accesses)[best1].useful_p ())
578 {
579 collapse ();
580 if (dump_file)
581 fprintf (dump_file,
582 "--param param=modref-max-accesses limit reached;"
583 " collapsing\n");
584 return true;
585 }
586 if (dump_file && best2 >= 0)
587 fprintf (dump_file,
588 "--param param=modref-max-accesses limit reached;"
589 " merging %i and %i\n", best1, best2);
590 else if (dump_file)
591 fprintf (dump_file,
592 "--param param=modref-max-accesses limit reached;"
593 " merging with %i\n", best1);
594 try_merge_with (best1);
595 if (best2 >= 0)
596 insert_access (a, max_accesses, record_adjustments);
597 return 1;
598 }
599 a.adjustments = 0;
600 vec_safe_push (accesses, a);
601 return true;
602 }
603 private:
604 /* Try to optimize the access list after entry INDEX was modified. */
605 void
606 try_merge_with (size_t index)
607 {
608 size_t i;
609
610 for (i = 0; i < accesses->length ();)
611 if (i != index)
612 {
613 bool found = false, restart = false;
614 modref_access_node *a = &(*accesses)[i];
615 modref_access_node *n = &(*accesses)[index];
616
617 if (n->contains (*a))
618 found = true;
619 if (!found && n->merge (*a, false))
620 found = restart = true;
621 if (found)
622 {
623 accesses->unordered_remove (i);
624 if (index == accesses->length ())
625 {
626 index = i;
627 i++;
628 }
629 if (restart)
630 i = 0;
631 }
632 else
633 i++;
634 }
635 else
636 i++;
637 }
638 };
639
640 /* Base of an access. */
641 template <typename T>
642 struct GTY((user)) modref_base_node
643 {
644 T base;
645 vec <modref_ref_node <T> *, va_gc> *refs;
646 bool every_ref;
647
648 modref_base_node (T base):
649 base (base),
650 refs (NULL),
651 every_ref (false) {}
652
653 /* Search REF; return NULL if failed. */
654 modref_ref_node <T> *search (T ref)
655 {
656 size_t i;
657 modref_ref_node <T> *n;
658 FOR_EACH_VEC_SAFE_ELT (refs, i, n)
659 if (n->ref == ref)
660 return n;
661 return NULL;
662 }
663
664 /* Insert REF; collapse tree if there are more than MAX_REFS.
665 Return inserted ref and if CHANGED is non-null set it to true if
666 something changed. */
667 modref_ref_node <T> *insert_ref (T ref, size_t max_refs,
668 bool *changed = NULL)
669 {
670 modref_ref_node <T> *ref_node;
671
672 /* If the node is collapsed, don't do anything. */
673 if (every_ref)
674 return NULL;
675
676 /* Otherwise, insert a node for the ref of the access under the base. */
677 ref_node = search (ref);
678 if (ref_node)
679 return ref_node;
680
681 /* We always allow inserting ref 0. For non-0 refs there is upper
682 limit on number of entries and if exceeded,
683 drop ref conservatively to 0. */
684 if (ref && refs && refs->length () >= max_refs)
685 {
686 if (dump_file)
687 fprintf (dump_file, "--param param=modref-max-refs limit reached;"
688 " using 0\n");
689 ref = 0;
690 ref_node = search (ref);
691 if (ref_node)
692 return ref_node;
693 }
694
695 if (changed)
696 *changed = true;
697
698 ref_node = new (ggc_alloc <modref_ref_node <T> > ())modref_ref_node <T>
699 (ref);
700 vec_safe_push (refs, ref_node);
701 return ref_node;
702 }
703
704 void collapse ()
705 {
706 size_t i;
707 modref_ref_node <T> *r;
708
709 if (refs)
710 {
711 FOR_EACH_VEC_SAFE_ELT (refs, i, r)
712 {
713 r->collapse ();
714 ggc_free (r);
715 }
716 vec_free (refs);
717 }
718 refs = NULL;
719 every_ref = true;
720 }
721 };
722
723 /* Map translating parameters across function call. */
724
725 struct modref_parm_map
726 {
727 /* Index of parameter we translate to.
728 -1 indicates that parameter is unknown
729 -2 indicates that parameter points to local memory and access can be
730 discarded. */
731 int parm_index;
732 bool parm_offset_known;
733 poly_int64 parm_offset;
734 };
735
736 /* Access tree for a single function. */
737 template <typename T>
738 struct GTY((user)) modref_tree
739 {
740 vec <modref_base_node <T> *, va_gc> *bases;
741 size_t max_bases;
742 size_t max_refs;
743 size_t max_accesses;
744 bool every_base;
745
746 modref_tree (size_t max_bases, size_t max_refs, size_t max_accesses):
747 bases (NULL),
748 max_bases (max_bases),
749 max_refs (max_refs),
750 max_accesses (max_accesses),
751 every_base (false) {}
752
753 /* Insert BASE; collapse tree if there are more than MAX_REFS.
754 Return inserted base and if CHANGED is non-null set it to true if
755 something changed.
756 If table gets full, try to insert REF instead. */
757
758 modref_base_node <T> *insert_base (T base, T ref, bool *changed = NULL)
759 {
760 modref_base_node <T> *base_node;
761
762 /* If the node is collapsed, don't do anything. */
763 if (every_base)
764 return NULL;
765
766 /* Otherwise, insert a node for the base of the access into the tree. */
767 base_node = search (base);
768 if (base_node)
769 return base_node;
770
771 /* We always allow inserting base 0. For non-0 base there is upper
772 limit on number of entries and if exceeded,
773 drop base conservatively to ref and if it still does not fit to 0. */
774 if (base && bases && bases->length () >= max_bases)
775 {
776 base_node = search (ref);
777 if (base_node)
778 {
779 if (dump_file)
780 fprintf (dump_file, "--param param=modref-max-bases"
781 " limit reached; using ref\n");
782 return base_node;
783 }
784 if (dump_file)
785 fprintf (dump_file, "--param param=modref-max-bases"
786 " limit reached; using 0\n");
787 base = 0;
788 base_node = search (base);
789 if (base_node)
790 return base_node;
791 }
792
793 if (changed)
794 *changed = true;
795
796 base_node = new (ggc_alloc <modref_base_node <T> > ())
797 modref_base_node <T> (base);
798 vec_safe_push (bases, base_node);
799 return base_node;
800 }
801
802 /* Insert memory access to the tree.
803 Return true if something changed. */
804 bool insert (T base, T ref, modref_access_node a,
805 bool record_adjustments)
806 {
807 if (every_base)
808 return false;
809
810 bool changed = false;
811
812 /* No useful information tracked; collapse everything. */
813 if (!base && !ref && !a.useful_p ())
814 {
815 collapse ();
816 return true;
817 }
818
819 modref_base_node <T> *base_node = insert_base (base, ref, &changed);
820 base = base_node->base;
821 /* If table got full we may end up with useless base. */
822 if (!base && !ref && !a.useful_p ())
823 {
824 collapse ();
825 return true;
826 }
827 if (base_node->every_ref)
828 return changed;
829 gcc_checking_assert (search (base) != NULL);
830
831 /* No useful ref info tracked; collapse base. */
832 if (!ref && !a.useful_p ())
833 {
834 base_node->collapse ();
835 return true;
836 }
837
838 modref_ref_node <T> *ref_node = base_node->insert_ref (ref, max_refs,
839 &changed);
840 ref = ref_node->ref;
841
842 if (ref_node->every_access)
843 return changed;
844 changed |= ref_node->insert_access (a, max_accesses,
845 record_adjustments);
846 /* See if we failed to add useful access. */
847 if (ref_node->every_access)
848 {
849 /* Collapse everything if there is no useful base and ref. */
850 if (!base && !ref)
851 {
852 collapse ();
853 gcc_checking_assert (changed);
854 }
855 /* Collapse base if there is no useful ref. */
856 else if (!ref)
857 {
858 base_node->collapse ();
859 gcc_checking_assert (changed);
860 }
861 }
862 return changed;
863 }
864
865 /* Remove tree branches that are not useful (i.e. they will always pass). */
866
867 void cleanup ()
868 {
869 size_t i, j;
870 modref_base_node <T> *base_node;
871 modref_ref_node <T> *ref_node;
872
873 if (!bases)
874 return;
875
876 for (i = 0; vec_safe_iterate (bases, i, &base_node);)
877 {
878 if (base_node->refs)
879 for (j = 0; vec_safe_iterate (base_node->refs, j, &ref_node);)
880 {
881 if (!ref_node->every_access
882 && (!ref_node->accesses
883 || !ref_node->accesses->length ()))
884 {
885 base_node->refs->unordered_remove (j);
886 vec_free (ref_node->accesses);
887 ggc_delete (ref_node);
888 }
889 else
890 j++;
891 }
892 if (!base_node->every_ref
893 && (!base_node->refs || !base_node->refs->length ()))
894 {
895 bases->unordered_remove (i);
896 vec_free (base_node->refs);
897 ggc_delete (base_node);
898 }
899 else
900 i++;
901 }
902 if (bases && !bases->length ())
903 {
904 vec_free (bases);
905 bases = NULL;
906 }
907 }
908
909 /* Merge OTHER into the tree.
910 PARM_MAP, if non-NULL, maps parm indexes of callee to caller. -2 is used
911 to signalize that parameter is local and does not need to be tracked.
912 Return true if something has changed. */
913 bool merge (modref_tree <T> *other, vec <modref_parm_map> *parm_map,
914 bool record_accesses)
915 {
916 if (!other || every_base)
917 return false;
918 if (other->every_base)
919 {
920 collapse ();
921 return true;
922 }
923
924 bool changed = false;
925 size_t i, j, k;
926 modref_base_node <T> *base_node, *my_base_node;
927 modref_ref_node <T> *ref_node;
928 modref_access_node *access_node;
929 bool release = false;
930
931 /* For self-recursive functions we may end up merging summary into itself;
932 produce copy first so we do not modify summary under our own hands. */
933 if (other == this)
934 {
935 release = true;
936 other = modref_tree<T>::create_ggc (max_bases, max_refs, max_accesses);
937 other->copy_from (this);
938 }
939
940 FOR_EACH_VEC_SAFE_ELT (other->bases, i, base_node)
941 {
942 if (base_node->every_ref)
943 {
944 my_base_node = insert_base (base_node->base, 0, &changed);
945 if (my_base_node && !my_base_node->every_ref)
946 {
947 my_base_node->collapse ();
948 cleanup ();
949 changed = true;
950 }
951 }
952 else
953 FOR_EACH_VEC_SAFE_ELT (base_node->refs, j, ref_node)
954 {
955 if (ref_node->every_access)
956 {
957 changed |= insert (base_node->base,
958 ref_node->ref,
959 unspecified_modref_access_node,
960 record_accesses);
961 }
962 else
963 FOR_EACH_VEC_SAFE_ELT (ref_node->accesses, k, access_node)
964 {
965 modref_access_node a = *access_node;
966
967 if (a.parm_index != -1 && parm_map)
968 {
969 if (a.parm_index >= (int)parm_map->length ())
970 a.parm_index = -1;
971 else if ((*parm_map) [a.parm_index].parm_index == -2)
972 continue;
973 else
974 {
975 a.parm_offset
976 += (*parm_map) [a.parm_index].parm_offset;
977 a.parm_offset_known
978 &= (*parm_map)
979 [a.parm_index].parm_offset_known;
980 a.parm_index
981 = (*parm_map) [a.parm_index].parm_index;
982 }
983 }
984 changed |= insert (base_node->base, ref_node->ref, a,
985 record_accesses);
986 }
987 }
988 }
989 if (release)
990 ggc_delete (other);
991 return changed;
992 }
993
994 /* Copy OTHER to THIS. */
995 void copy_from (modref_tree <T> *other)
996 {
997 merge (other, NULL, false);
998 }
999
1000 /* Search BASE in tree; return NULL if failed. */
1001 modref_base_node <T> *search (T base)
1002 {
1003 size_t i;
1004 modref_base_node <T> *n;
1005 FOR_EACH_VEC_SAFE_ELT (bases, i, n)
1006 if (n->base == base)
1007 return n;
1008 return NULL;
1009 }
1010
1011 /* Return ggc allocated instance. We explicitly call destructors via
1012 ggc_delete and do not want finalizers to be registered and
1013 called at the garbage collection time. */
1014 static modref_tree<T> *create_ggc (size_t max_bases, size_t max_refs,
1015 size_t max_accesses)
1016 {
1017 return new (ggc_alloc_no_dtor<modref_tree<T>> ())
1018 modref_tree<T> (max_bases, max_refs, max_accesses);
1019 }
1020
1021 /* Remove all records and mark tree to alias with everything. */
1022 void collapse ()
1023 {
1024 size_t i;
1025 modref_base_node <T> *n;
1026
1027 if (bases)
1028 {
1029 FOR_EACH_VEC_SAFE_ELT (bases, i, n)
1030 {
1031 n->collapse ();
1032 ggc_free (n);
1033 }
1034 vec_free (bases);
1035 }
1036 bases = NULL;
1037 every_base = true;
1038 }
1039
1040 /* Release memory. */
1041 ~modref_tree ()
1042 {
1043 collapse ();
1044 }
1045
1046 /* Update parameter indexes in TT according to MAP. */
1047 void
1048 remap_params (vec <int> *map)
1049 {
1050 size_t i;
1051 modref_base_node <T> *base_node;
1052 FOR_EACH_VEC_SAFE_ELT (bases, i, base_node)
1053 {
1054 size_t j;
1055 modref_ref_node <T> *ref_node;
1056 FOR_EACH_VEC_SAFE_ELT (base_node->refs, j, ref_node)
1057 {
1058 size_t k;
1059 modref_access_node *access_node;
1060 FOR_EACH_VEC_SAFE_ELT (ref_node->accesses, k, access_node)
1061 if (access_node->parm_index > 0)
1062 {
1063 if (access_node->parm_index < (int)map->length ())
1064 access_node->parm_index = (*map)[access_node->parm_index];
1065 else
1066 access_node->parm_index = -1;
1067 }
1068 }
1069 }
1070 }
1071 };
1072
1073 void modref_c_tests ();
1074
1075 void gt_ggc_mx (modref_tree <int>* const&);
1076 void gt_ggc_mx (modref_tree <tree_node*>* const&);
1077 void gt_pch_nx (modref_tree <int>* const&);
1078 void gt_pch_nx (modref_tree <tree_node*>* const&);
1079 void gt_pch_nx (modref_tree <int>* const&, gt_pointer_operator op, void *cookie);
1080 void gt_pch_nx (modref_tree <tree_node*>* const&, gt_pointer_operator op,
1081 void *cookie);
1082
1083 void gt_ggc_mx (modref_base_node <int>*);
1084 void gt_ggc_mx (modref_base_node <tree_node*>* &);
1085 void gt_pch_nx (modref_base_node <int>* const&);
1086 void gt_pch_nx (modref_base_node <tree_node*>* const&);
1087 void gt_pch_nx (modref_base_node <int>* const&, gt_pointer_operator op,
1088 void *cookie);
1089 void gt_pch_nx (modref_base_node <tree_node*>* const&, gt_pointer_operator op,
1090 void *cookie);
1091
1092 void gt_ggc_mx (modref_ref_node <int>*);
1093 void gt_ggc_mx (modref_ref_node <tree_node*>* &);
1094 void gt_pch_nx (modref_ref_node <int>* const&);
1095 void gt_pch_nx (modref_ref_node <tree_node*>* const&);
1096 void gt_pch_nx (modref_ref_node <int>* const&, gt_pointer_operator op,
1097 void *cookie);
1098 void gt_pch_nx (modref_ref_node <tree_node*>* const&, gt_pointer_operator op,
1099 void *cookie);
1100
1101 #endif