]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/lto-streamer.c
* bitmap.c (bitmap_and, bitmap_and_into, bitmap_and_compl,
[thirdparty/gcc.git] / gcc / lto-streamer.c
1 /* Miscellaneous utilities for GIMPLE streaming. Things that are used
2 in both input and output are here.
3
4 Copyright 2009, 2010 Free Software Foundation, Inc.
5 Contributed by Doug Kwan <dougkwan@google.com>
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "toplev.h"
28 #include "flags.h"
29 #include "tree.h"
30 #include "gimple.h"
31 #include "tree-flow.h"
32 #include "diagnostic-core.h"
33 #include "bitmap.h"
34 #include "vec.h"
35 #include "lto-streamer.h"
36
37 /* Statistics gathered during LTO, WPA and LTRANS. */
38 struct lto_stats_d lto_stats;
39
40 /* LTO uses bitmaps with different life-times. So use a seperate
41 obstack for all LTO bitmaps. */
42 static bitmap_obstack lto_obstack;
43 static bool lto_obstack_initialized;
44
45
46 /* Return a string representing LTO tag TAG. */
47
48 const char *
49 lto_tag_name (enum LTO_tags tag)
50 {
51 if (lto_tag_is_tree_code_p (tag))
52 {
53 /* For tags representing tree nodes, return the name of the
54 associated tree code. */
55 return tree_code_name[lto_tag_to_tree_code (tag)];
56 }
57
58 if (lto_tag_is_gimple_code_p (tag))
59 {
60 /* For tags representing gimple statements, return the name of
61 the associated gimple code. */
62 return gimple_code_name[lto_tag_to_gimple_code (tag)];
63 }
64
65 switch (tag)
66 {
67 case LTO_null:
68 return "LTO_null";
69 case LTO_bb0:
70 return "LTO_bb0";
71 case LTO_bb1:
72 return "LTO_bb1";
73 case LTO_eh_region:
74 return "LTO_eh_region";
75 case LTO_function:
76 return "LTO_function";
77 case LTO_eh_table:
78 return "LTO_eh_table";
79 case LTO_ert_cleanup:
80 return "LTO_ert_cleanup";
81 case LTO_ert_try:
82 return "LTO_ert_try";
83 case LTO_ert_allowed_exceptions:
84 return "LTO_ert_allowed_exceptions";
85 case LTO_ert_must_not_throw:
86 return "LTO_ert_must_not_throw";
87 case LTO_tree_pickle_reference:
88 return "LTO_tree_pickle_reference";
89 case LTO_field_decl_ref:
90 return "LTO_field_decl_ref";
91 case LTO_function_decl_ref:
92 return "LTO_function_decl_ref";
93 case LTO_label_decl_ref:
94 return "LTO_label_decl_ref";
95 case LTO_namespace_decl_ref:
96 return "LTO_namespace_decl_ref";
97 case LTO_result_decl_ref:
98 return "LTO_result_decl_ref";
99 case LTO_ssa_name_ref:
100 return "LTO_ssa_name_ref";
101 case LTO_type_decl_ref:
102 return "LTO_type_decl_ref";
103 case LTO_type_ref:
104 return "LTO_type_ref";
105 case LTO_global_decl_ref:
106 return "LTO_global_decl_ref";
107 default:
108 return "LTO_UNKNOWN";
109 }
110 }
111
112
113 /* Allocate a bitmap from heap. Initializes the LTO obstack if necessary. */
114
115 bitmap
116 lto_bitmap_alloc (void)
117 {
118 if (!lto_obstack_initialized)
119 {
120 bitmap_obstack_initialize (&lto_obstack);
121 lto_obstack_initialized = true;
122 }
123 return BITMAP_ALLOC (&lto_obstack);
124 }
125
126 /* Free bitmap B. */
127
128 void
129 lto_bitmap_free (bitmap b)
130 {
131 BITMAP_FREE (b);
132 }
133
134
135 /* Get a section name for a particular type or name. The NAME field
136 is only used if SECTION_TYPE is LTO_section_function_body or
137 LTO_static_initializer. For all others it is ignored. The callee
138 of this function is responcible to free the returned name. */
139
140 char *
141 lto_get_section_name (int section_type, const char *name)
142 {
143 switch (section_type)
144 {
145 case LTO_section_function_body:
146 gcc_assert (name != NULL);
147 if (name[0] == '*')
148 name++;
149 return concat (LTO_SECTION_NAME_PREFIX, name, NULL);
150
151 case LTO_section_static_initializer:
152 return concat (LTO_SECTION_NAME_PREFIX, ".statics", NULL);
153
154 case LTO_section_symtab:
155 return concat (LTO_SECTION_NAME_PREFIX, ".symtab", NULL);
156
157 case LTO_section_decls:
158 return concat (LTO_SECTION_NAME_PREFIX, ".decls", NULL);
159
160 case LTO_section_cgraph:
161 return concat (LTO_SECTION_NAME_PREFIX, ".cgraph", NULL);
162
163 case LTO_section_varpool:
164 return concat (LTO_SECTION_NAME_PREFIX, ".vars", NULL);
165
166 case LTO_section_refs:
167 return concat (LTO_SECTION_NAME_PREFIX, ".refs", NULL);
168
169 case LTO_section_jump_functions:
170 return concat (LTO_SECTION_NAME_PREFIX, ".jmpfuncs", NULL);
171
172 case LTO_section_ipa_pure_const:
173 return concat (LTO_SECTION_NAME_PREFIX, ".pureconst", NULL);
174
175 case LTO_section_ipa_reference:
176 return concat (LTO_SECTION_NAME_PREFIX, ".reference", NULL);
177
178 case LTO_section_opts:
179 return concat (LTO_SECTION_NAME_PREFIX, ".opts", NULL);
180
181 case LTO_section_cgraph_opt_sum:
182 return concat (LTO_SECTION_NAME_PREFIX, ".cgraphopt", NULL);
183
184 default:
185 internal_error ("bytecode stream: unexpected LTO section %s", name);
186 }
187 }
188
189
190 /* Show various memory usage statistics related to LTO. */
191
192 void
193 print_lto_report (void)
194 {
195 const char *s = (flag_lto) ? "LTO" : (flag_wpa) ? "WPA" : "LTRANS";
196 unsigned i;
197
198 fprintf (stderr, "%s statistics\n", s);
199 fprintf (stderr, "[%s] # of input files: "
200 HOST_WIDE_INT_PRINT_UNSIGNED "\n", s, lto_stats.num_input_files);
201
202 fprintf (stderr, "[%s] # of input cgraph nodes: "
203 HOST_WIDE_INT_PRINT_UNSIGNED "\n", s,
204 lto_stats.num_input_cgraph_nodes);
205
206 fprintf (stderr, "[%s] # of function bodies: "
207 HOST_WIDE_INT_PRINT_UNSIGNED "\n", s,
208 lto_stats.num_function_bodies);
209
210 fprintf (stderr, "[%s] ", s);
211 print_gimple_types_stats ();
212
213 for (i = 0; i < NUM_TREE_CODES; i++)
214 if (lto_stats.num_trees[i])
215 fprintf (stderr, "[%s] # of '%s' objects read: "
216 HOST_WIDE_INT_PRINT_UNSIGNED "\n", s,
217 tree_code_name[i], lto_stats.num_trees[i]);
218
219 if (flag_lto)
220 {
221 fprintf (stderr, "[%s] Compression: "
222 HOST_WIDE_INT_PRINT_UNSIGNED " output bytes, "
223 HOST_WIDE_INT_PRINT_UNSIGNED " compressed bytes", s,
224 lto_stats.num_output_il_bytes,
225 lto_stats.num_compressed_il_bytes);
226 if (lto_stats.num_output_il_bytes > 0)
227 {
228 const float dividend = (float) lto_stats.num_compressed_il_bytes;
229 const float divisor = (float) lto_stats.num_output_il_bytes;
230 fprintf (stderr, " (ratio: %f)", dividend / divisor);
231 }
232 fprintf (stderr, "\n");
233 }
234
235 if (flag_wpa)
236 {
237 fprintf (stderr, "[%s] # of output files: "
238 HOST_WIDE_INT_PRINT_UNSIGNED "\n", s,
239 lto_stats.num_output_files);
240
241 fprintf (stderr, "[%s] # of output cgraph nodes: "
242 HOST_WIDE_INT_PRINT_UNSIGNED "\n", s,
243 lto_stats.num_output_cgraph_nodes);
244
245 fprintf (stderr, "[%s] # callgraph partitions: "
246 HOST_WIDE_INT_PRINT_UNSIGNED "\n", s,
247 lto_stats.num_cgraph_partitions);
248
249 fprintf (stderr, "[%s] Compression: "
250 HOST_WIDE_INT_PRINT_UNSIGNED " input bytes, "
251 HOST_WIDE_INT_PRINT_UNSIGNED " uncompressed bytes", s,
252 lto_stats.num_input_il_bytes,
253 lto_stats.num_uncompressed_il_bytes);
254 if (lto_stats.num_input_il_bytes > 0)
255 {
256 const float dividend = (float) lto_stats.num_uncompressed_il_bytes;
257 const float divisor = (float) lto_stats.num_input_il_bytes;
258 fprintf (stderr, " (ratio: %f)", dividend / divisor);
259 }
260 fprintf (stderr, "\n");
261 }
262
263 for (i = 0; i < LTO_N_SECTION_TYPES; i++)
264 fprintf (stderr, "[%s] Size of mmap'd section %s: "
265 HOST_WIDE_INT_PRINT_UNSIGNED " bytes\n", s,
266 lto_section_name[i], lto_stats.section_size[i]);
267 }
268
269 /* We cache a single bitpack assuming that usually at most one is
270 life. This saves repeated re-allocations. */
271 static struct bitpack_d *cached_bp;
272
273 /* Create a new bitpack. */
274
275 struct bitpack_d *
276 bitpack_create (void)
277 {
278 if (cached_bp)
279 {
280 struct bitpack_d *bp = cached_bp;
281 cached_bp = NULL;
282 return bp;
283 }
284 return XCNEW (struct bitpack_d);
285 }
286
287
288 /* Free the memory used by bitpack BP. */
289
290 void
291 bitpack_delete (struct bitpack_d *bp)
292 {
293 if (!cached_bp)
294 {
295 bp->num_bits = 0;
296 bp->first_unused_bit = 0;
297 VEC_truncate (bitpack_word_t, bp->values, 0);
298 cached_bp = bp;
299 return;
300 }
301 VEC_free (bitpack_word_t, heap, bp->values);
302 free (bp);
303 }
304
305
306 /* Return an index to the word in bitpack BP that contains the
307 next NBITS. */
308
309 static inline unsigned
310 bp_get_next_word (struct bitpack_d *bp, unsigned nbits)
311 {
312 unsigned last, ix;
313
314 /* In principle, the next word to use is determined by the
315 number of bits already processed in BP. */
316 ix = bp->num_bits / BITS_PER_BITPACK_WORD;
317
318 /* All the encoded bit patterns in BP are contiguous, therefore if
319 the next NBITS would straddle over two different words, move the
320 index to the next word and update the number of encoded bits
321 by adding up the hole of unused bits created by this move. */
322 bp->first_unused_bit %= BITS_PER_BITPACK_WORD;
323 last = bp->first_unused_bit + nbits - 1;
324 if (last >= BITS_PER_BITPACK_WORD)
325 {
326 ix++;
327 bp->num_bits += (BITS_PER_BITPACK_WORD - bp->first_unused_bit);
328 bp->first_unused_bit = 0;
329 }
330
331 return ix;
332 }
333
334
335 /* Pack NBITS of value VAL into bitpack BP. */
336
337 void
338 bp_pack_value (struct bitpack_d *bp, bitpack_word_t val, unsigned nbits)
339 {
340 unsigned ix;
341 bitpack_word_t word;
342
343 /* We cannot encode more bits than BITS_PER_BITPACK_WORD. */
344 #ifdef ENABLE_CHECKING
345 gcc_assert (nbits > 0 && nbits <= BITS_PER_BITPACK_WORD);
346 #endif
347
348 /* Compute which word will contain the next NBITS. */
349 ix = bp_get_next_word (bp, nbits);
350 if (ix >= VEC_length (bitpack_word_t, bp->values))
351 {
352 /* If there is no room left in the last word of the values
353 array, add a new word. Additionally, we should only
354 need to add a single word, since every pack operation cannot
355 use more bits than fit in a single word. */
356 VEC_safe_push (bitpack_word_t, heap, bp->values, 0);
357 }
358
359 /* Grab the last word to pack VAL into. */
360 word = VEC_index (bitpack_word_t, bp->values, ix);
361
362 /* To fit VAL in WORD, we need to shift VAL to the left to
363 skip the bottom BP->FIRST_UNUSED_BIT bits. */
364 val <<= bp->first_unused_bit;
365
366 /* Update WORD with VAL. */
367 word |= val;
368
369 /* Update BP. */
370 VEC_replace (bitpack_word_t, bp->values, ix, word);
371 bp->num_bits += nbits;
372 bp->first_unused_bit += nbits;
373 }
374
375
376 /* Unpack the next NBITS from bitpack BP. */
377
378 bitpack_word_t
379 bp_unpack_value (struct bitpack_d *bp, unsigned nbits)
380 {
381 bitpack_word_t val, word, mask;
382 unsigned ix;
383
384 /* We cannot decode more bits than BITS_PER_BITPACK_WORD. */
385 gcc_checking_assert (nbits > 0 && nbits <= BITS_PER_BITPACK_WORD);
386
387 /* Compute which word contains the next NBITS. */
388 ix = bp_get_next_word (bp, nbits);
389 word = VEC_index (bitpack_word_t, bp->values, ix);
390
391 /* Compute the mask to get NBITS from WORD. */
392 mask = (nbits == BITS_PER_BITPACK_WORD)
393 ? (bitpack_word_t) -1
394 : ((bitpack_word_t) 1 << nbits) - 1;
395
396 /* Shift WORD to the right to skip over the bits already decoded
397 in word. */
398 word >>= bp->first_unused_bit;
399
400 /* Apply the mask to obtain the requested value. */
401 val = word & mask;
402
403 /* Update BP->NUM_BITS for the next unpack operation. */
404 bp->num_bits += nbits;
405 bp->first_unused_bit += nbits;
406
407 return val;
408 }
409
410
411 /* Check that all the TS_* structures handled by the lto_output_* and
412 lto_input_* routines are exactly ALL the structures defined in
413 treestruct.def. */
414
415 static void
416 check_handled_ts_structures (void)
417 {
418 bool handled_p[LAST_TS_ENUM];
419 unsigned i;
420
421 memset (&handled_p, 0, sizeof (handled_p));
422
423 /* These are the TS_* structures that are either handled or
424 explicitly ignored by the streamer routines. */
425 handled_p[TS_BASE] = true;
426 handled_p[TS_COMMON] = true;
427 handled_p[TS_INT_CST] = true;
428 handled_p[TS_REAL_CST] = true;
429 handled_p[TS_FIXED_CST] = true;
430 handled_p[TS_VECTOR] = true;
431 handled_p[TS_STRING] = true;
432 handled_p[TS_COMPLEX] = true;
433 handled_p[TS_IDENTIFIER] = true;
434 handled_p[TS_DECL_MINIMAL] = true;
435 handled_p[TS_DECL_COMMON] = true;
436 handled_p[TS_DECL_WRTL] = true;
437 handled_p[TS_DECL_NON_COMMON] = true;
438 handled_p[TS_DECL_WITH_VIS] = true;
439 handled_p[TS_FIELD_DECL] = true;
440 handled_p[TS_VAR_DECL] = true;
441 handled_p[TS_PARM_DECL] = true;
442 handled_p[TS_LABEL_DECL] = true;
443 handled_p[TS_RESULT_DECL] = true;
444 handled_p[TS_CONST_DECL] = true;
445 handled_p[TS_TYPE_DECL] = true;
446 handled_p[TS_FUNCTION_DECL] = true;
447 handled_p[TS_TYPE] = true;
448 handled_p[TS_LIST] = true;
449 handled_p[TS_VEC] = true;
450 handled_p[TS_EXP] = true;
451 handled_p[TS_SSA_NAME] = true;
452 handled_p[TS_BLOCK] = true;
453 handled_p[TS_BINFO] = true;
454 handled_p[TS_STATEMENT_LIST] = true;
455 handled_p[TS_CONSTRUCTOR] = true;
456 handled_p[TS_OMP_CLAUSE] = true;
457 handled_p[TS_OPTIMIZATION] = true;
458 handled_p[TS_TARGET_OPTION] = true;
459
460 /* Anything not marked above will trigger the following assertion.
461 If this assertion triggers, it means that there is a new TS_*
462 structure that should be handled by the streamer. */
463 for (i = 0; i < LAST_TS_ENUM; i++)
464 gcc_assert (handled_p[i]);
465 }
466
467
468 /* Helper for lto_streamer_cache_insert_1. Add T to CACHE->NODES at
469 slot IX. Add OFFSET to CACHE->OFFSETS at slot IX. */
470
471 static void
472 lto_streamer_cache_add_to_node_array (struct lto_streamer_cache_d *cache,
473 int ix, tree t, unsigned offset)
474 {
475 gcc_assert (ix >= 0);
476
477 /* Grow the array of nodes and offsets to accomodate T at IX. */
478 if (ix >= (int) VEC_length (tree, cache->nodes))
479 {
480 size_t sz = ix + (20 + ix) / 4;
481 VEC_safe_grow_cleared (tree, heap, cache->nodes, sz);
482 VEC_safe_grow_cleared (unsigned, heap, cache->offsets, sz);
483 }
484
485 VEC_replace (tree, cache->nodes, ix, t);
486 VEC_replace (unsigned, cache->offsets, ix, offset);
487 }
488
489
490 /* Helper for lto_streamer_cache_insert and lto_streamer_cache_insert_at.
491 CACHE, T, IX_P and OFFSET_P are as in lto_streamer_cache_insert.
492
493 If INSERT_AT_NEXT_SLOT_P is true, T is inserted at the next available
494 slot in the cache. Otherwise, T is inserted at the position indicated
495 in *IX_P.
496
497 If T already existed in CACHE, return true. Otherwise,
498 return false. */
499
500 static bool
501 lto_streamer_cache_insert_1 (struct lto_streamer_cache_d *cache,
502 tree t, int *ix_p, unsigned *offset_p,
503 bool insert_at_next_slot_p)
504 {
505 void **slot;
506 struct tree_int_map d_entry, *entry;
507 int ix;
508 unsigned offset;
509 bool existed_p;
510
511 gcc_assert (t);
512
513 d_entry.base.from = t;
514 slot = htab_find_slot (cache->node_map, &d_entry, INSERT);
515 if (*slot == NULL)
516 {
517 /* Determine the next slot to use in the cache. */
518 if (insert_at_next_slot_p)
519 ix = cache->next_slot++;
520 else
521 ix = *ix_p;
522
523 entry = (struct tree_int_map *)pool_alloc (cache->node_map_entries);
524 entry->base.from = t;
525 entry->to = (unsigned) ix;
526 *slot = entry;
527
528 /* If no offset was given, store the invalid offset -1. */
529 offset = (offset_p) ? *offset_p : (unsigned) -1;
530
531 lto_streamer_cache_add_to_node_array (cache, ix, t, offset);
532
533 /* Indicate that the item was not present in the cache. */
534 existed_p = false;
535 }
536 else
537 {
538 entry = (struct tree_int_map *) *slot;
539 ix = (int) entry->to;
540 offset = VEC_index (unsigned, cache->offsets, ix);
541
542 if (!insert_at_next_slot_p && ix != *ix_p)
543 {
544 /* If the caller wants to insert T at a specific slot
545 location, and ENTRY->TO does not match *IX_P, add T to
546 the requested location slot. This situation arises when
547 streaming builtin functions.
548
549 For instance, on the writer side we could have two
550 FUNCTION_DECLS T1 and T2 that are represented by the same
551 builtin function. The reader will only instantiate the
552 canonical builtin, but since T1 and T2 had been
553 originally stored in different cache slots (S1 and S2),
554 the reader must be able to find the canonical builtin
555 function at slots S1 and S2. */
556 gcc_assert (lto_stream_as_builtin_p (t));
557 ix = *ix_p;
558
559 /* Since we are storing a builtin, the offset into the
560 stream is not necessary as we will not need to read
561 forward in the stream. */
562 lto_streamer_cache_add_to_node_array (cache, ix, t, -1);
563 }
564
565 /* Indicate that T was already in the cache. */
566 existed_p = true;
567 }
568
569 if (ix_p)
570 *ix_p = ix;
571
572 if (offset_p)
573 *offset_p = offset;
574
575 return existed_p;
576 }
577
578
579 /* Insert tree node T in CACHE. If T already existed in the cache
580 return true. Otherwise, return false.
581
582 If IX_P is non-null, update it with the index into the cache where
583 T has been stored.
584
585 *OFFSET_P represents the offset in the stream where T is physically
586 written out. The first time T is added to the cache, *OFFSET_P is
587 recorded in the cache together with T. But if T already existed
588 in the cache, *OFFSET_P is updated with the value that was recorded
589 the first time T was added to the cache.
590
591 If OFFSET_P is NULL, it is ignored. */
592
593 bool
594 lto_streamer_cache_insert (struct lto_streamer_cache_d *cache, tree t,
595 int *ix_p, unsigned *offset_p)
596 {
597 return lto_streamer_cache_insert_1 (cache, t, ix_p, offset_p, true);
598 }
599
600
601 /* Insert tree node T in CACHE at slot IX. If T already
602 existed in the cache return true. Otherwise, return false. */
603
604 bool
605 lto_streamer_cache_insert_at (struct lto_streamer_cache_d *cache,
606 tree t, int ix)
607 {
608 return lto_streamer_cache_insert_1 (cache, t, &ix, NULL, false);
609 }
610
611
612 /* Return true if tree node T exists in CACHE. If IX_P is
613 not NULL, write to *IX_P the index into the cache where T is stored
614 (-1 if T is not found). */
615
616 bool
617 lto_streamer_cache_lookup (struct lto_streamer_cache_d *cache, tree t,
618 int *ix_p)
619 {
620 void **slot;
621 struct tree_int_map d_slot;
622 bool retval;
623 int ix;
624
625 gcc_assert (t);
626
627 d_slot.base.from = t;
628 slot = htab_find_slot (cache->node_map, &d_slot, NO_INSERT);
629 if (slot == NULL)
630 {
631 retval = false;
632 ix = -1;
633 }
634 else
635 {
636 retval = true;
637 ix = (int) ((struct tree_int_map *) *slot)->to;
638 }
639
640 if (ix_p)
641 *ix_p = ix;
642
643 return retval;
644 }
645
646
647 /* Return the tree node at slot IX in CACHE. */
648
649 tree
650 lto_streamer_cache_get (struct lto_streamer_cache_d *cache, int ix)
651 {
652 gcc_assert (cache);
653
654 /* If the reader is requesting an index beyond the length of the
655 cache, it will need to read ahead. Return NULL_TREE to indicate
656 that. */
657 if ((unsigned) ix >= VEC_length (tree, cache->nodes))
658 return NULL_TREE;
659
660 return VEC_index (tree, cache->nodes, (unsigned) ix);
661 }
662
663
664 /* Record NODE in COMMON_NODES if it is not NULL and is not already in
665 SEEN_NODES. */
666
667 static void
668 lto_record_common_node (tree *nodep, VEC(tree, heap) **common_nodes,
669 struct pointer_set_t *seen_nodes)
670 {
671 tree node = *nodep;
672
673 if (node == NULL_TREE)
674 return;
675
676 if (TYPE_P (node))
677 {
678 /* Type merging will get confused by the canonical types as they
679 are set by the middle-end. */
680 TYPE_CANONICAL (node) = NULL_TREE;
681 *nodep = node = gimple_register_type (node);
682 }
683
684 /* Return if node is already seen. */
685 if (pointer_set_insert (seen_nodes, node))
686 return;
687
688 VEC_safe_push (tree, heap, *common_nodes, node);
689
690 if (tree_node_can_be_shared (node))
691 {
692 if (POINTER_TYPE_P (node)
693 || TREE_CODE (node) == COMPLEX_TYPE
694 || TREE_CODE (node) == ARRAY_TYPE)
695 lto_record_common_node (&TREE_TYPE (node), common_nodes, seen_nodes);
696 }
697 }
698
699
700 /* Generate a vector of common nodes and make sure they are merged
701 properly according to the the gimple type table. */
702
703 static VEC(tree,heap) *
704 lto_get_common_nodes (void)
705 {
706 unsigned i;
707 VEC(tree,heap) *common_nodes = NULL;
708 struct pointer_set_t *seen_nodes;
709
710 /* The MAIN_IDENTIFIER_NODE is normally set up by the front-end, but the
711 LTO back-end must agree. Currently, the only languages that set this
712 use the name "main". */
713 if (main_identifier_node)
714 {
715 const char *main_name = IDENTIFIER_POINTER (main_identifier_node);
716 gcc_assert (strcmp (main_name, "main") == 0);
717 }
718 else
719 main_identifier_node = get_identifier ("main");
720
721 gcc_assert (ptrdiff_type_node == integer_type_node);
722
723 /* FIXME lto. In the C++ front-end, fileptr_type_node is defined as a
724 variant copy of of ptr_type_node, rather than ptr_node itself. The
725 distinction should only be relevant to the front-end, so we always
726 use the C definition here in lto1.
727
728 These should be assured in pass_ipa_free_lang_data. */
729 gcc_assert (fileptr_type_node == ptr_type_node);
730 gcc_assert (TYPE_MAIN_VARIANT (fileptr_type_node) == ptr_type_node);
731
732 seen_nodes = pointer_set_create ();
733
734 /* Skip itk_char. char_type_node is shared with the appropriately
735 signed variant. */
736 for (i = itk_signed_char; i < itk_none; i++)
737 lto_record_common_node (&integer_types[i], &common_nodes, seen_nodes);
738
739 for (i = 0; i < TYPE_KIND_LAST; i++)
740 lto_record_common_node (&sizetype_tab[i], &common_nodes, seen_nodes);
741
742 for (i = 0; i < TI_MAX; i++)
743 lto_record_common_node (&global_trees[i], &common_nodes, seen_nodes);
744
745 pointer_set_destroy (seen_nodes);
746
747 return common_nodes;
748 }
749
750
751 /* Assign an index to tree node T and enter it in the streamer cache
752 CACHE. */
753
754 static void
755 preload_common_node (struct lto_streamer_cache_d *cache, tree t)
756 {
757 gcc_assert (t);
758
759 lto_streamer_cache_insert (cache, t, NULL, NULL);
760
761 /* The FIELD_DECLs of structures should be shared, so that every
762 COMPONENT_REF uses the same tree node when referencing a field.
763 Pointer equality between FIELD_DECLs is used by the alias
764 machinery to compute overlapping memory references (See
765 nonoverlapping_component_refs_p). */
766 if (TREE_CODE (t) == RECORD_TYPE)
767 {
768 tree f;
769
770 for (f = TYPE_FIELDS (t); f; f = TREE_CHAIN (f))
771 preload_common_node (cache, f);
772 }
773 }
774
775
776 /* Create a cache of pickled nodes. */
777
778 struct lto_streamer_cache_d *
779 lto_streamer_cache_create (void)
780 {
781 struct lto_streamer_cache_d *cache;
782 VEC(tree, heap) *common_nodes;
783 unsigned i;
784 tree node;
785
786 cache = XCNEW (struct lto_streamer_cache_d);
787
788 cache->node_map = htab_create (101, tree_int_map_hash, tree_int_map_eq, NULL);
789
790 cache->node_map_entries = create_alloc_pool ("node map",
791 sizeof (struct tree_int_map),
792 100);
793
794 /* Load all the well-known tree nodes that are always created by
795 the compiler on startup. This prevents writing them out
796 unnecessarily. */
797 common_nodes = lto_get_common_nodes ();
798
799 for (i = 0; VEC_iterate (tree, common_nodes, i, node); i++)
800 preload_common_node (cache, node);
801
802 VEC_free(tree, heap, common_nodes);
803
804 return cache;
805 }
806
807
808 /* Delete the streamer cache C. */
809
810 void
811 lto_streamer_cache_delete (struct lto_streamer_cache_d *c)
812 {
813 if (c == NULL)
814 return;
815
816 htab_delete (c->node_map);
817 free_alloc_pool (c->node_map_entries);
818 VEC_free (tree, heap, c->nodes);
819 VEC_free (unsigned, heap, c->offsets);
820 free (c);
821 }
822
823
824 #ifdef LTO_STREAMER_DEBUG
825 static htab_t tree_htab;
826
827 struct tree_hash_entry
828 {
829 tree key;
830 intptr_t value;
831 };
832
833 static hashval_t
834 hash_tree (const void *p)
835 {
836 const struct tree_hash_entry *e = (const struct tree_hash_entry *) p;
837 return htab_hash_pointer (e->key);
838 }
839
840 static int
841 eq_tree (const void *p1, const void *p2)
842 {
843 const struct tree_hash_entry *e1 = (const struct tree_hash_entry *) p1;
844 const struct tree_hash_entry *e2 = (const struct tree_hash_entry *) p2;
845 return (e1->key == e2->key);
846 }
847 #endif
848
849 /* Initialization common to the LTO reader and writer. */
850
851 void
852 lto_streamer_init (void)
853 {
854 /* Check that all the TS_* handled by the reader and writer routines
855 match exactly the structures defined in treestruct.def. When a
856 new TS_* astructure is added, the streamer should be updated to
857 handle it. */
858 check_handled_ts_structures ();
859
860 #ifdef LTO_STREAMER_DEBUG
861 tree_htab = htab_create (31, hash_tree, eq_tree, NULL);
862 #endif
863 }
864
865
866 /* Gate function for all LTO streaming passes. */
867
868 bool
869 gate_lto_out (void)
870 {
871 return ((flag_generate_lto || in_lto_p)
872 /* Don't bother doing anything if the program has errors. */
873 && !seen_error ());
874 }
875
876
877 #ifdef LTO_STREAMER_DEBUG
878 /* Add a mapping between T and ORIG_T, which is the numeric value of
879 the original address of T as it was seen by the LTO writer. This
880 mapping is useful when debugging streaming problems. A debugging
881 session can be started on both reader and writer using ORIG_T
882 as a breakpoint value in both sessions.
883
884 Note that this mapping is transient and only valid while T is
885 being reconstructed. Once T is fully built, the mapping is
886 removed. */
887
888 void
889 lto_orig_address_map (tree t, intptr_t orig_t)
890 {
891 struct tree_hash_entry ent;
892 struct tree_hash_entry **slot;
893
894 ent.key = t;
895 ent.value = orig_t;
896 slot
897 = (struct tree_hash_entry **) htab_find_slot (tree_htab, &ent, INSERT);
898 gcc_assert (!*slot);
899 *slot = XNEW (struct tree_hash_entry);
900 **slot = ent;
901 }
902
903
904 /* Get the original address of T as it was seen by the writer. This
905 is only valid while T is being reconstructed. */
906
907 intptr_t
908 lto_orig_address_get (tree t)
909 {
910 struct tree_hash_entry ent;
911 struct tree_hash_entry **slot;
912
913 ent.key = t;
914 slot
915 = (struct tree_hash_entry **) htab_find_slot (tree_htab, &ent, NO_INSERT);
916 return (slot ? (*slot)->value : 0);
917 }
918
919
920 /* Clear the mapping of T to its original address. */
921
922 void
923 lto_orig_address_remove (tree t)
924 {
925 struct tree_hash_entry ent;
926 struct tree_hash_entry **slot;
927
928 ent.key = t;
929 slot
930 = (struct tree_hash_entry **) htab_find_slot (tree_htab, &ent, NO_INSERT);
931 gcc_assert (slot);
932 free (*slot);
933 htab_clear_slot (tree_htab, (PTR *)slot);
934 }
935 #endif
936
937
938 /* Check that the version MAJOR.MINOR is the correct version number. */
939
940 void
941 lto_check_version (int major, int minor)
942 {
943 if (major != LTO_major_version || minor != LTO_minor_version)
944 fatal_error ("bytecode stream generated with LTO version %d.%d instead "
945 "of the expected %d.%d",
946 major, minor,
947 LTO_major_version, LTO_minor_version);
948 }