]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/lto-streamer.c
Merge lto branch into trunk.
[thirdparty/gcc.git] / gcc / lto-streamer.c
CommitLineData
d7f09764
DN
1/* Miscellaneous utilities for GIMPLE streaming. Things that are used
2 in both input and output are here.
3
4 Copyright 2009 Free Software Foundation, Inc.
5 Contributed by Doug Kwan <dougkwan@google.com>
6
7This file is part of GCC.
8
9GCC is free software; you can redistribute it and/or modify it under
10the terms of the GNU General Public License as published by the Free
11Software Foundation; either version 3, or (at your option) any later
12version.
13
14GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15WARRANTY; without even the implied warranty of MERCHANTABILITY or
16FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17for more details.
18
19You should have received a copy of the GNU General Public License
20along with GCC; see the file COPYING3. If not see
21<http://www.gnu.org/licenses/>. */
22
23#include "config.h"
24#include "system.h"
25#include "coretypes.h"
26#include "tm.h"
27#include "toplev.h"
28#include "flags.h"
29#include "tree.h"
30#include "gimple.h"
31#include "tree-flow.h"
32#include "diagnostic.h"
33#include "bitmap.h"
34#include "vec.h"
35#include "lto-streamer.h"
36
37/* Statistics gathered during LTO, WPA and LTRANS. */
38struct lto_stats_d lto_stats;
39
40/* LTO uses bitmaps with different life-times. So use a seperate
41 obstack for all LTO bitmaps. */
42static bitmap_obstack lto_obstack;
43static bool lto_obstack_initialized;
44
45
46/* Return a string representing LTO tag TAG. */
47
48const char *
49lto_tag_name (enum LTO_tags tag)
50{
51 if (lto_tag_is_tree_code_p (tag))
52 {
53 /* For tags representing tree nodes, return the name of the
54 associated tree code. */
55 return tree_code_name[lto_tag_to_tree_code (tag)];
56 }
57
58 if (lto_tag_is_gimple_code_p (tag))
59 {
60 /* For tags representing gimple statements, return the name of
61 the associated gimple code. */
62 return gimple_code_name[lto_tag_to_gimple_code (tag)];
63 }
64
65 switch (tag)
66 {
67 case LTO_null:
68 return "LTO_null";
69 case LTO_bb0:
70 return "LTO_bb0";
71 case LTO_bb1:
72 return "LTO_bb1";
73 case LTO_eh_region:
74 return "LTO_eh_region";
75 case LTO_function:
76 return "LTO_function";
77 case LTO_eh_table:
78 return "LTO_eh_table";
79 case LTO_ert_cleanup:
80 return "LTO_ert_cleanup";
81 case LTO_ert_try:
82 return "LTO_ert_try";
83 case LTO_ert_allowed_exceptions:
84 return "LTO_ert_allowed_exceptions";
85 case LTO_ert_must_not_throw:
86 return "LTO_ert_must_not_throw";
87 case LTO_tree_pickle_reference:
88 return "LTO_tree_pickle_reference";
89 case LTO_field_decl_ref:
90 return "LTO_field_decl_ref";
91 case LTO_function_decl_ref:
92 return "LTO_function_decl_ref";
93 case LTO_label_decl_ref:
94 return "LTO_label_decl_ref";
95 case LTO_namespace_decl_ref:
96 return "LTO_namespace_decl_ref";
97 case LTO_result_decl_ref:
98 return "LTO_result_decl_ref";
99 case LTO_ssa_name_ref:
100 return "LTO_ssa_name_ref";
101 case LTO_type_decl_ref:
102 return "LTO_type_decl_ref";
103 case LTO_type_ref:
104 return "LTO_type_ref";
105 case LTO_global_decl_ref:
106 return "LTO_global_decl_ref";
107 default:
108 return "LTO_UNKNOWN";
109 }
110}
111
112
113/* Allocate a bitmap from heap. Initializes the LTO obstack if necessary. */
114
115bitmap
116lto_bitmap_alloc (void)
117{
118 if (!lto_obstack_initialized)
119 {
120 bitmap_obstack_initialize (&lto_obstack);
121 lto_obstack_initialized = true;
122 }
123 return BITMAP_ALLOC (&lto_obstack);
124}
125
126/* Free bitmap B. */
127
128void
129lto_bitmap_free (bitmap b)
130{
131 BITMAP_FREE (b);
132}
133
134
135/* Get a section name for a particular type or name. The NAME field
136 is only used if SECTION_TYPE is LTO_section_function_body or
137 LTO_static_initializer. For all others it is ignored. The callee
138 of this function is responcible to free the returned name. */
139
140char *
141lto_get_section_name (int section_type, const char *name)
142{
143 switch (section_type)
144 {
145 case LTO_section_function_body:
146 return concat (LTO_SECTION_NAME_PREFIX, name, NULL);
147
148 case LTO_section_static_initializer:
149 return concat (LTO_SECTION_NAME_PREFIX, ".statics", NULL);
150
151 case LTO_section_symtab:
152 return concat (LTO_SECTION_NAME_PREFIX, ".symtab", NULL);
153
154 case LTO_section_decls:
155 return concat (LTO_SECTION_NAME_PREFIX, ".decls", NULL);
156
157 case LTO_section_cgraph:
158 return concat (LTO_SECTION_NAME_PREFIX, ".cgraph", NULL);
159
160 case LTO_section_ipa_pure_const:
161 return concat (LTO_SECTION_NAME_PREFIX, ".pureconst", NULL);
162
163 case LTO_section_ipa_reference:
164 return concat (LTO_SECTION_NAME_PREFIX, ".reference", NULL);
165
166 case LTO_section_wpa_fixup:
167 return concat (LTO_SECTION_NAME_PREFIX, ".wpa_fixup", NULL);
168
169 case LTO_section_opts:
170 return concat (LTO_SECTION_NAME_PREFIX, ".opts", NULL);
171
172 default:
173 internal_error ("bytecode stream: unexpected LTO section %s", name);
174 }
175}
176
177
178/* Show various memory usage statistics related to LTO. */
179
180void
181print_lto_report (void)
182{
183 const char *s = (flag_lto) ? "LTO" : (flag_wpa) ? "WPA" : "LTRANS";
184 unsigned i;
185
186 fprintf (stderr, "%s statistics\n", s);
187 fprintf (stderr, "[%s] # of input files: "
188 HOST_WIDE_INT_PRINT_UNSIGNED "\n", s, lto_stats.num_input_files);
189
190 fprintf (stderr, "[%s] # of input cgraph nodes: "
191 HOST_WIDE_INT_PRINT_UNSIGNED "\n", s,
192 lto_stats.num_input_cgraph_nodes);
193
194 fprintf (stderr, "[%s] # of function bodies: "
195 HOST_WIDE_INT_PRINT_UNSIGNED "\n", s,
196 lto_stats.num_function_bodies);
197
198 fprintf (stderr, "[%s] ", s);
199 print_gimple_types_stats ();
200
201 for (i = 0; i < NUM_TREE_CODES; i++)
202 if (lto_stats.num_trees[i])
203 fprintf (stderr, "[%s] # of '%s' objects read: "
204 HOST_WIDE_INT_PRINT_UNSIGNED "\n", s,
205 tree_code_name[i], lto_stats.num_trees[i]);
206
207 if (flag_lto)
208 {
209 fprintf (stderr, "[%s] Compression: "
210 HOST_WIDE_INT_PRINT_UNSIGNED " output bytes, "
211 HOST_WIDE_INT_PRINT_UNSIGNED " compressed bytes", s,
212 lto_stats.num_output_il_bytes,
213 lto_stats.num_compressed_il_bytes);
214 if (lto_stats.num_output_il_bytes > 0)
215 {
216 const float dividend = (float) lto_stats.num_compressed_il_bytes;
217 const float divisor = (float) lto_stats.num_output_il_bytes;
218 fprintf (stderr, " (ratio: %f)", dividend / divisor);
219 }
220 fprintf (stderr, "\n");
221 }
222
223 if (flag_wpa)
224 {
225 fprintf (stderr, "[%s] # of output files: "
226 HOST_WIDE_INT_PRINT_UNSIGNED "\n", s,
227 lto_stats.num_output_files);
228
229 fprintf (stderr, "[%s] # of output cgraph nodes: "
230 HOST_WIDE_INT_PRINT_UNSIGNED "\n", s,
231 lto_stats.num_output_cgraph_nodes);
232
233 fprintf (stderr, "[%s] # callgraph partitions: "
234 HOST_WIDE_INT_PRINT_UNSIGNED "\n", s,
235 lto_stats.num_cgraph_partitions);
236
237 fprintf (stderr, "[%s] Compression: "
238 HOST_WIDE_INT_PRINT_UNSIGNED " input bytes, "
239 HOST_WIDE_INT_PRINT_UNSIGNED " uncompressed bytes", s,
240 lto_stats.num_input_il_bytes,
241 lto_stats.num_uncompressed_il_bytes);
242 if (lto_stats.num_input_il_bytes > 0)
243 {
244 const float dividend = (float) lto_stats.num_uncompressed_il_bytes;
245 const float divisor = (float) lto_stats.num_input_il_bytes;
246 fprintf (stderr, " (ratio: %f)", dividend / divisor);
247 }
248 fprintf (stderr, "\n");
249 }
250
251 for (i = 0; i < LTO_N_SECTION_TYPES; i++)
252 fprintf (stderr, "[%s] Size of mmap'd section %s: "
253 HOST_WIDE_INT_PRINT_UNSIGNED " bytes\n", s,
254 lto_section_name[i], lto_stats.section_size[i]);
255}
256
257
258/* Create a new bitpack. */
259
260struct bitpack_d *
261bitpack_create (void)
262{
263 return XCNEW (struct bitpack_d);
264}
265
266
267/* Free the memory used by bitpack BP. */
268
269void
270bitpack_delete (struct bitpack_d *bp)
271{
272 VEC_free (bitpack_word_t, heap, bp->values);
273 free (bp);
274}
275
276
277/* Return an index to the word in bitpack BP that contains the
278 next NBITS. */
279
280static inline unsigned
281bp_get_next_word (struct bitpack_d *bp, unsigned nbits)
282{
283 unsigned last, ix;
284
285 /* In principle, the next word to use is determined by the
286 number of bits already processed in BP. */
287 ix = bp->num_bits / BITS_PER_BITPACK_WORD;
288
289 /* All the encoded bit patterns in BP are contiguous, therefore if
290 the next NBITS would straddle over two different words, move the
291 index to the next word and update the number of encoded bits
292 by adding up the hole of unused bits created by this move. */
293 bp->first_unused_bit %= BITS_PER_BITPACK_WORD;
294 last = bp->first_unused_bit + nbits - 1;
295 if (last >= BITS_PER_BITPACK_WORD)
296 {
297 ix++;
298 bp->num_bits += (BITS_PER_BITPACK_WORD - bp->first_unused_bit);
299 bp->first_unused_bit = 0;
300 }
301
302 return ix;
303}
304
305
306/* Pack NBITS of value VAL into bitpack BP. */
307
308void
309bp_pack_value (struct bitpack_d *bp, bitpack_word_t val, unsigned nbits)
310{
311 unsigned ix;
312 bitpack_word_t word;
313
314 /* We cannot encode more bits than BITS_PER_BITPACK_WORD. */
315 gcc_assert (nbits > 0 && nbits <= BITS_PER_BITPACK_WORD);
316
317 /* Compute which word will contain the next NBITS. */
318 ix = bp_get_next_word (bp, nbits);
319 if (ix >= VEC_length (bitpack_word_t, bp->values))
320 {
321 /* If there is no room left in the last word of the values
322 array, add a new word. Additionally, we should only
323 need to add a single word, since every pack operation cannot
324 use more bits than fit in a single word. */
325 gcc_assert (ix < VEC_length (bitpack_word_t, bp->values) + 1);
326 VEC_safe_push (bitpack_word_t, heap, bp->values, 0);
327 }
328
329 /* Grab the last word to pack VAL into. */
330 word = VEC_index (bitpack_word_t, bp->values, ix);
331
332 /* To fit VAL in WORD, we need to shift VAL to the left to
333 skip the bottom BP->FIRST_UNUSED_BIT bits. */
334 gcc_assert (BITS_PER_BITPACK_WORD >= bp->first_unused_bit + nbits);
335 val <<= bp->first_unused_bit;
336
337 /* Update WORD with VAL. */
338 word |= val;
339
340 /* Update BP. */
341 VEC_replace (bitpack_word_t, bp->values, ix, word);
342 bp->num_bits += nbits;
343 bp->first_unused_bit += nbits;
344}
345
346
347/* Unpack the next NBITS from bitpack BP. */
348
349bitpack_word_t
350bp_unpack_value (struct bitpack_d *bp, unsigned nbits)
351{
352 bitpack_word_t val, word, mask;
353 unsigned ix;
354
355 /* We cannot decode more bits than BITS_PER_BITPACK_WORD. */
356 gcc_assert (nbits > 0 && nbits <= BITS_PER_BITPACK_WORD);
357
358 /* Compute which word contains the next NBITS. */
359 ix = bp_get_next_word (bp, nbits);
360 word = VEC_index (bitpack_word_t, bp->values, ix);
361
362 /* Compute the mask to get NBITS from WORD. */
363 mask = (nbits == BITS_PER_BITPACK_WORD)
364 ? (bitpack_word_t) -1
365 : ((bitpack_word_t) 1 << nbits) - 1;
366
367 /* Shift WORD to the right to skip over the bits already decoded
368 in word. */
369 word >>= bp->first_unused_bit;
370
371 /* Apply the mask to obtain the requested value. */
372 val = word & mask;
373
374 /* Update BP->NUM_BITS for the next unpack operation. */
375 bp->num_bits += nbits;
376 bp->first_unused_bit += nbits;
377
378 return val;
379}
380
381
382/* Check that all the TS_* structures handled by the lto_output_* and
383 lto_input_* routines are exactly ALL the structures defined in
384 treestruct.def. */
385
386static void
387check_handled_ts_structures (void)
388{
389 bool handled_p[LAST_TS_ENUM];
390 unsigned i;
391
392 memset (&handled_p, 0, sizeof (handled_p));
393
394 /* These are the TS_* structures that are either handled or
395 explicitly ignored by the streamer routines. */
396 handled_p[TS_BASE] = true;
397 handled_p[TS_COMMON] = true;
398 handled_p[TS_INT_CST] = true;
399 handled_p[TS_REAL_CST] = true;
400 handled_p[TS_FIXED_CST] = true;
401 handled_p[TS_VECTOR] = true;
402 handled_p[TS_STRING] = true;
403 handled_p[TS_COMPLEX] = true;
404 handled_p[TS_IDENTIFIER] = true;
405 handled_p[TS_DECL_MINIMAL] = true;
406 handled_p[TS_DECL_COMMON] = true;
407 handled_p[TS_DECL_WRTL] = true;
408 handled_p[TS_DECL_NON_COMMON] = true;
409 handled_p[TS_DECL_WITH_VIS] = true;
410 handled_p[TS_FIELD_DECL] = true;
411 handled_p[TS_VAR_DECL] = true;
412 handled_p[TS_PARM_DECL] = true;
413 handled_p[TS_LABEL_DECL] = true;
414 handled_p[TS_RESULT_DECL] = true;
415 handled_p[TS_CONST_DECL] = true;
416 handled_p[TS_TYPE_DECL] = true;
417 handled_p[TS_FUNCTION_DECL] = true;
418 handled_p[TS_TYPE] = true;
419 handled_p[TS_LIST] = true;
420 handled_p[TS_VEC] = true;
421 handled_p[TS_EXP] = true;
422 handled_p[TS_SSA_NAME] = true;
423 handled_p[TS_BLOCK] = true;
424 handled_p[TS_BINFO] = true;
425 handled_p[TS_STATEMENT_LIST] = true;
426 handled_p[TS_CONSTRUCTOR] = true;
427 handled_p[TS_OMP_CLAUSE] = true;
428 handled_p[TS_OPTIMIZATION] = true;
429 handled_p[TS_TARGET_OPTION] = true;
430
431 /* Anything not marked above will trigger the following assertion.
432 If this assertion triggers, it means that there is a new TS_*
433 structure that should be handled by the streamer. */
434 for (i = 0; i < LAST_TS_ENUM; i++)
435 gcc_assert (handled_p[i]);
436}
437
438
439/* Helper for lto_streamer_cache_insert_1. Add T to CACHE->NODES at
440 slot IX. Add OFFSET to CACHE->OFFSETS at slot IX. */
441
442static void
443lto_streamer_cache_add_to_node_array (struct lto_streamer_cache_d *cache,
444 int ix, tree t, unsigned offset)
445{
446 gcc_assert (ix >= 0);
447
448 /* Grow the array of nodes and offsets to accomodate T at IX. */
449 if (ix >= (int) VEC_length (tree, cache->nodes))
450 {
451 size_t sz = ix + (20 + ix) / 4;
452 VEC_safe_grow_cleared (tree, gc, cache->nodes, sz);
453 VEC_safe_grow_cleared (unsigned, heap, cache->offsets, sz);
454 }
455
456 VEC_replace (tree, cache->nodes, ix, t);
457 VEC_replace (unsigned, cache->offsets, ix, offset);
458}
459
460
461/* Helper for lto_streamer_cache_insert and lto_streamer_cache_insert_at.
462 CACHE, T, IX_P and OFFSET_P are as in lto_streamer_cache_insert.
463
464 If INSERT_AT_NEXT_SLOT_P is true, T is inserted at the next available
465 slot in the cache. Otherwise, T is inserted at the position indicated
466 in *IX_P.
467
468 If T already existed in CACHE, return true. Otherwise,
469 return false. */
470
471static bool
472lto_streamer_cache_insert_1 (struct lto_streamer_cache_d *cache,
473 tree t, int *ix_p, unsigned *offset_p,
474 bool insert_at_next_slot_p)
475{
476 void **slot;
477 struct tree_int_map d_entry, *entry;
478 int ix;
479 unsigned offset;
480 bool existed_p;
481
482 gcc_assert (t);
483
484 d_entry.base.from = t;
485 slot = htab_find_slot (cache->node_map, &d_entry, INSERT);
486 if (*slot == NULL)
487 {
488 /* Determine the next slot to use in the cache. */
489 if (insert_at_next_slot_p)
490 ix = cache->next_slot++;
491 else
492 ix = *ix_p;
493
494 entry = XCNEW (struct tree_int_map);
495 entry->base.from = t;
496 entry->to = (unsigned) ix;
497 *slot = entry;
498
499 /* If no offset was given, store the invalid offset -1. */
500 offset = (offset_p) ? *offset_p : (unsigned) -1;
501
502 lto_streamer_cache_add_to_node_array (cache, ix, t, offset);
503
504 /* Indicate that the item was not present in the cache. */
505 existed_p = false;
506 }
507 else
508 {
509 entry = (struct tree_int_map *) *slot;
510 ix = (int) entry->to;
511 offset = VEC_index (unsigned, cache->offsets, ix);
512
513 if (!insert_at_next_slot_p && ix != *ix_p)
514 {
515 /* If the caller wants to insert T at a specific slot
516 location, and ENTRY->TO does not match *IX_P, add T to
517 the requested location slot. This situation arises when
518 streaming builtin functions.
519
520 For instance, on the writer side we could have two
521 FUNCTION_DECLS T1 and T2 that are represented by the same
522 builtin function. The reader will only instantiate the
523 canonical builtin, but since T1 and T2 had been
524 originally stored in different cache slots (S1 and S2),
525 the reader must be able to find the canonical builtin
526 function at slots S1 and S2. */
527 gcc_assert (lto_stream_as_builtin_p (t));
528 ix = *ix_p;
529
530 /* Since we are storing a builtin, the offset into the
531 stream is not necessary as we will not need to read
532 forward in the stream. */
533 lto_streamer_cache_add_to_node_array (cache, ix, t, -1);
534 }
535
536 /* Indicate that T was already in the cache. */
537 existed_p = true;
538 }
539
540 if (ix_p)
541 *ix_p = ix;
542
543 if (offset_p)
544 *offset_p = offset;
545
546 return existed_p;
547}
548
549
550/* Insert tree node T in CACHE. If T already existed in the cache
551 return true. Otherwise, return false.
552
553 If IX_P is non-null, update it with the index into the cache where
554 T has been stored.
555
556 *OFFSET_P represents the offset in the stream where T is physically
557 written out. The first time T is added to the cache, *OFFSET_P is
558 recorded in the cache together with T. But if T already existed
559 in the cache, *OFFSET_P is updated with the value that was recorded
560 the first time T was added to the cache.
561
562 If OFFSET_P is NULL, it is ignored. */
563
564bool
565lto_streamer_cache_insert (struct lto_streamer_cache_d *cache, tree t,
566 int *ix_p, unsigned *offset_p)
567{
568 return lto_streamer_cache_insert_1 (cache, t, ix_p, offset_p, true);
569}
570
571
572/* Insert tree node T in CACHE at slot IX. If T already
573 existed in the cache return true. Otherwise, return false. */
574
575bool
576lto_streamer_cache_insert_at (struct lto_streamer_cache_d *cache,
577 tree t, int ix)
578{
579 return lto_streamer_cache_insert_1 (cache, t, &ix, NULL, false);
580}
581
582
583/* Return true if tree node T exists in CACHE. If IX_P is
584 not NULL, write to *IX_P the index into the cache where T is stored
585 (-1 if T is not found). */
586
587bool
588lto_streamer_cache_lookup (struct lto_streamer_cache_d *cache, tree t,
589 int *ix_p)
590{
591 void **slot;
592 struct tree_int_map d_slot;
593 bool retval;
594 int ix;
595
596 gcc_assert (t);
597
598 d_slot.base.from = t;
599 slot = htab_find_slot (cache->node_map, &d_slot, NO_INSERT);
600 if (slot == NULL)
601 {
602 retval = false;
603 ix = -1;
604 }
605 else
606 {
607 retval = true;
608 ix = (int) ((struct tree_int_map *) *slot)->to;
609 }
610
611 if (ix_p)
612 *ix_p = ix;
613
614 return retval;
615}
616
617
618/* Return the tree node at slot IX in CACHE. */
619
620tree
621lto_streamer_cache_get (struct lto_streamer_cache_d *cache, int ix)
622{
623 gcc_assert (cache);
624
625 /* If the reader is requesting an index beyond the length of the
626 cache, it will need to read ahead. Return NULL_TREE to indicate
627 that. */
628 if ((unsigned) ix >= VEC_length (tree, cache->nodes))
629 return NULL_TREE;
630
631 return VEC_index (tree, cache->nodes, (unsigned) ix);
632}
633
634
635/* Record NODE in COMMON_NODES if it is not NULL and is not already in
636 SEEN_NODES. */
637
638static void
639lto_record_common_node (tree *nodep, VEC(tree, heap) **common_nodes,
640 struct pointer_set_t *seen_nodes)
641{
642 tree node = *nodep;
643
644 if (node == NULL_TREE)
645 return;
646
647 if (TYPE_P (node))
648 *nodep = node = gimple_register_type (node);
649
650 /* Return if node is already seen. */
651 if (pointer_set_insert (seen_nodes, node))
652 return;
653
654 VEC_safe_push (tree, heap, *common_nodes, node);
655
656 if (tree_node_can_be_shared (node))
657 {
658 if (POINTER_TYPE_P (node)
659 || TREE_CODE (node) == COMPLEX_TYPE
660 || TREE_CODE (node) == ARRAY_TYPE)
661 lto_record_common_node (&TREE_TYPE (node), common_nodes, seen_nodes);
662 }
663}
664
665
666/* Generate a vector of common nodes and make sure they are merged
667 properly according to the the gimple type table. */
668
669static VEC(tree,heap) *
670lto_get_common_nodes (void)
671{
672 unsigned i;
673 VEC(tree,heap) *common_nodes = NULL;
674 struct pointer_set_t *seen_nodes;
675
676 /* The MAIN_IDENTIFIER_NODE is normally set up by the front-end, but the
677 LTO back-end must agree. Currently, the only languages that set this
678 use the name "main". */
679 if (main_identifier_node)
680 {
681 const char *main_name = IDENTIFIER_POINTER (main_identifier_node);
682 gcc_assert (strcmp (main_name, "main") == 0);
683 }
684 else
685 main_identifier_node = get_identifier ("main");
686
687 gcc_assert (ptrdiff_type_node == integer_type_node);
688
689 /* FIXME lto. In the C++ front-end, fileptr_type_node is defined as a
690 variant copy of of ptr_type_node, rather than ptr_node itself. The
691 distinction should only be relevant to the front-end, so we always
692 use the C definition here in lto1.
693
694 These should be assured in pass_ipa_free_lang_data. */
695 gcc_assert (fileptr_type_node == ptr_type_node);
696 gcc_assert (TYPE_MAIN_VARIANT (fileptr_type_node) == ptr_type_node);
697
698 seen_nodes = pointer_set_create ();
699
700 /* Skip itk_char. char_type_node is shared with the appropriately
701 signed variant. */
702 for (i = itk_signed_char; i < itk_none; i++)
703 lto_record_common_node (&integer_types[i], &common_nodes, seen_nodes);
704
705 for (i = 0; i < TYPE_KIND_LAST; i++)
706 lto_record_common_node (&sizetype_tab[i], &common_nodes, seen_nodes);
707
708 for (i = 0; i < TI_MAX; i++)
709 lto_record_common_node (&global_trees[i], &common_nodes, seen_nodes);
710
711 pointer_set_destroy (seen_nodes);
712
713 return common_nodes;
714}
715
716
717/* Assign an index to tree node T and enter it in the streamer cache
718 CACHE. */
719
720static void
721preload_common_node (struct lto_streamer_cache_d *cache, tree t)
722{
723 gcc_assert (t);
724
725 lto_streamer_cache_insert (cache, t, NULL, NULL);
726
727 /* The FIELD_DECLs of structures should be shared, so that every
728 COMPONENT_REF uses the same tree node when referencing a field.
729 Pointer equality between FIELD_DECLs is used by the alias
730 machinery to compute overlapping memory references (See
731 nonoverlapping_component_refs_p). */
732 if (TREE_CODE (t) == RECORD_TYPE)
733 {
734 tree f;
735
736 for (f = TYPE_FIELDS (t); f; f = TREE_CHAIN (f))
737 preload_common_node (cache, f);
738 }
739}
740
741
742/* Create a cache of pickled nodes. */
743
744struct lto_streamer_cache_d *
745lto_streamer_cache_create (void)
746{
747 struct lto_streamer_cache_d *cache;
748 VEC(tree, heap) *common_nodes;
749 unsigned i;
750 tree node;
751
752 cache = XCNEW (struct lto_streamer_cache_d);
753
754 cache->node_map = htab_create (101, tree_int_map_hash, tree_int_map_eq, NULL);
755
756 /* Load all the well-known tree nodes that are always created by
757 the compiler on startup. This prevents writing them out
758 unnecessarily. */
759 common_nodes = lto_get_common_nodes ();
760
761 for (i = 0; VEC_iterate (tree, common_nodes, i, node); i++)
762 preload_common_node (cache, node);
763
764 VEC_free(tree, heap, common_nodes);
765
766 return cache;
767}
768
769
770/* Delete the streamer cache C. */
771
772void
773lto_streamer_cache_delete (struct lto_streamer_cache_d *c)
774{
775 if (c == NULL)
776 return;
777
778 htab_delete (c->node_map);
779 VEC_free (tree, gc, c->nodes);
780 VEC_free (unsigned, heap, c->offsets);
781 free (c);
782}
783
784
785/* Initialization common to the LTO reader and writer. */
786
787void
788lto_streamer_init (void)
789{
790 /* Check that all the TS_* handled by the reader and writer routines
791 match exactly the structures defined in treestruct.def. When a
792 new TS_* astructure is added, the streamer should be updated to
793 handle it. */
794 check_handled_ts_structures ();
795}
796
797
798/* Gate function for all LTO streaming passes. */
799
800bool
801gate_lto_out (void)
802{
803 return ((flag_generate_lto || in_lto_p)
804 /* Don't bother doing anything if the program has errors. */
805 && !(errorcount || sorrycount));
806}
807
808
809#ifdef LTO_STREAMER_DEBUG
810/* Add a mapping between T and ORIG_T, which is the numeric value of
811 the original address of T as it was seen by the LTO writer. This
812 mapping is useful when debugging streaming problems. A debugging
813 session can be started on both reader and writer using ORIG_T
814 as a breakpoint value in both sessions.
815
816 Note that this mapping is transient and only valid while T is
817 being reconstructed. Once T is fully built, the mapping is
818 removed. */
819
820void
821lto_orig_address_map (tree t, intptr_t orig_t)
822{
823 /* FIXME lto. Using the annotation field is quite hacky as it relies
824 on the GC not running while T is being rematerialized. It would
825 be cleaner to use a hash table here. */
826 t->base.ann = (union tree_ann_d *) orig_t;
827}
828
829
830/* Get the original address of T as it was seen by the writer. This
831 is only valid while T is being reconstructed. */
832
833intptr_t
834lto_orig_address_get (tree t)
835{
836 return (intptr_t) t->base.ann;
837}
838
839
840/* Clear the mapping of T to its original address. */
841
842void
843lto_orig_address_remove (tree t)
844{
845 t->base.ann = NULL;
846}
847#endif
848
849
850/* Check that the version MAJOR.MINOR is the correct version number. */
851
852void
853lto_check_version (int major, int minor)
854{
855 if (major != LTO_major_version || minor != LTO_minor_version)
856 fatal_error ("bytecode stream generated with LTO version %d.%d instead "
857 "of the expected %d.%d",
858 major, minor,
859 LTO_major_version, LTO_minor_version);
860}