1 /* Miscellaneous utilities for GIMPLE streaming. Things that are used
2 in both input and output are here.
4 Copyright 2009, 2010 Free Software Foundation, Inc.
5 Contributed by Doug Kwan <dougkwan@google.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "tree-flow.h"
32 #include "diagnostic-core.h"
35 #include "lto-streamer.h"
37 /* Statistics gathered during LTO, WPA and LTRANS. */
38 struct lto_stats_d lto_stats
;
40 /* LTO uses bitmaps with different life-times. So use a seperate
41 obstack for all LTO bitmaps. */
42 static bitmap_obstack lto_obstack
;
43 static bool lto_obstack_initialized
;
46 /* Return a string representing LTO tag TAG. */
49 lto_tag_name (enum LTO_tags tag
)
51 if (lto_tag_is_tree_code_p (tag
))
53 /* For tags representing tree nodes, return the name of the
54 associated tree code. */
55 return tree_code_name
[lto_tag_to_tree_code (tag
)];
58 if (lto_tag_is_gimple_code_p (tag
))
60 /* For tags representing gimple statements, return the name of
61 the associated gimple code. */
62 return gimple_code_name
[lto_tag_to_gimple_code (tag
)];
74 return "LTO_eh_region";
76 return "LTO_function";
78 return "LTO_eh_table";
80 return "LTO_ert_cleanup";
83 case LTO_ert_allowed_exceptions
:
84 return "LTO_ert_allowed_exceptions";
85 case LTO_ert_must_not_throw
:
86 return "LTO_ert_must_not_throw";
87 case LTO_tree_pickle_reference
:
88 return "LTO_tree_pickle_reference";
89 case LTO_field_decl_ref
:
90 return "LTO_field_decl_ref";
91 case LTO_function_decl_ref
:
92 return "LTO_function_decl_ref";
93 case LTO_label_decl_ref
:
94 return "LTO_label_decl_ref";
95 case LTO_namespace_decl_ref
:
96 return "LTO_namespace_decl_ref";
97 case LTO_result_decl_ref
:
98 return "LTO_result_decl_ref";
99 case LTO_ssa_name_ref
:
100 return "LTO_ssa_name_ref";
101 case LTO_type_decl_ref
:
102 return "LTO_type_decl_ref";
104 return "LTO_type_ref";
105 case LTO_global_decl_ref
:
106 return "LTO_global_decl_ref";
108 return "LTO_UNKNOWN";
113 /* Allocate a bitmap from heap. Initializes the LTO obstack if necessary. */
116 lto_bitmap_alloc (void)
118 if (!lto_obstack_initialized
)
120 bitmap_obstack_initialize (<o_obstack
);
121 lto_obstack_initialized
= true;
123 return BITMAP_ALLOC (<o_obstack
);
129 lto_bitmap_free (bitmap b
)
135 /* Get a section name for a particular type or name. The NAME field
136 is only used if SECTION_TYPE is LTO_section_function_body or
137 LTO_static_initializer. For all others it is ignored. The callee
138 of this function is responcible to free the returned name. */
141 lto_get_section_name (int section_type
, const char *name
)
143 switch (section_type
)
145 case LTO_section_function_body
:
146 gcc_assert (name
!= NULL
);
149 return concat (LTO_SECTION_NAME_PREFIX
, name
, NULL
);
151 case LTO_section_static_initializer
:
152 return concat (LTO_SECTION_NAME_PREFIX
, ".statics", NULL
);
154 case LTO_section_symtab
:
155 return concat (LTO_SECTION_NAME_PREFIX
, ".symtab", NULL
);
157 case LTO_section_decls
:
158 return concat (LTO_SECTION_NAME_PREFIX
, ".decls", NULL
);
160 case LTO_section_cgraph
:
161 return concat (LTO_SECTION_NAME_PREFIX
, ".cgraph", NULL
);
163 case LTO_section_varpool
:
164 return concat (LTO_SECTION_NAME_PREFIX
, ".vars", NULL
);
166 case LTO_section_refs
:
167 return concat (LTO_SECTION_NAME_PREFIX
, ".refs", NULL
);
169 case LTO_section_jump_functions
:
170 return concat (LTO_SECTION_NAME_PREFIX
, ".jmpfuncs", NULL
);
172 case LTO_section_ipa_pure_const
:
173 return concat (LTO_SECTION_NAME_PREFIX
, ".pureconst", NULL
);
175 case LTO_section_ipa_reference
:
176 return concat (LTO_SECTION_NAME_PREFIX
, ".reference", NULL
);
178 case LTO_section_opts
:
179 return concat (LTO_SECTION_NAME_PREFIX
, ".opts", NULL
);
181 case LTO_section_cgraph_opt_sum
:
182 return concat (LTO_SECTION_NAME_PREFIX
, ".cgraphopt", NULL
);
185 internal_error ("bytecode stream: unexpected LTO section %s", name
);
190 /* Show various memory usage statistics related to LTO. */
193 print_lto_report (void)
195 const char *s
= (flag_lto
) ? "LTO" : (flag_wpa
) ? "WPA" : "LTRANS";
198 fprintf (stderr
, "%s statistics\n", s
);
199 fprintf (stderr
, "[%s] # of input files: "
200 HOST_WIDE_INT_PRINT_UNSIGNED
"\n", s
, lto_stats
.num_input_files
);
202 fprintf (stderr
, "[%s] # of input cgraph nodes: "
203 HOST_WIDE_INT_PRINT_UNSIGNED
"\n", s
,
204 lto_stats
.num_input_cgraph_nodes
);
206 fprintf (stderr
, "[%s] # of function bodies: "
207 HOST_WIDE_INT_PRINT_UNSIGNED
"\n", s
,
208 lto_stats
.num_function_bodies
);
210 fprintf (stderr
, "[%s] ", s
);
211 print_gimple_types_stats ();
213 for (i
= 0; i
< NUM_TREE_CODES
; i
++)
214 if (lto_stats
.num_trees
[i
])
215 fprintf (stderr
, "[%s] # of '%s' objects read: "
216 HOST_WIDE_INT_PRINT_UNSIGNED
"\n", s
,
217 tree_code_name
[i
], lto_stats
.num_trees
[i
]);
221 fprintf (stderr
, "[%s] Compression: "
222 HOST_WIDE_INT_PRINT_UNSIGNED
" output bytes, "
223 HOST_WIDE_INT_PRINT_UNSIGNED
" compressed bytes", s
,
224 lto_stats
.num_output_il_bytes
,
225 lto_stats
.num_compressed_il_bytes
);
226 if (lto_stats
.num_output_il_bytes
> 0)
228 const float dividend
= (float) lto_stats
.num_compressed_il_bytes
;
229 const float divisor
= (float) lto_stats
.num_output_il_bytes
;
230 fprintf (stderr
, " (ratio: %f)", dividend
/ divisor
);
232 fprintf (stderr
, "\n");
237 fprintf (stderr
, "[%s] # of output files: "
238 HOST_WIDE_INT_PRINT_UNSIGNED
"\n", s
,
239 lto_stats
.num_output_files
);
241 fprintf (stderr
, "[%s] # of output cgraph nodes: "
242 HOST_WIDE_INT_PRINT_UNSIGNED
"\n", s
,
243 lto_stats
.num_output_cgraph_nodes
);
245 fprintf (stderr
, "[%s] # callgraph partitions: "
246 HOST_WIDE_INT_PRINT_UNSIGNED
"\n", s
,
247 lto_stats
.num_cgraph_partitions
);
249 fprintf (stderr
, "[%s] Compression: "
250 HOST_WIDE_INT_PRINT_UNSIGNED
" input bytes, "
251 HOST_WIDE_INT_PRINT_UNSIGNED
" uncompressed bytes", s
,
252 lto_stats
.num_input_il_bytes
,
253 lto_stats
.num_uncompressed_il_bytes
);
254 if (lto_stats
.num_input_il_bytes
> 0)
256 const float dividend
= (float) lto_stats
.num_uncompressed_il_bytes
;
257 const float divisor
= (float) lto_stats
.num_input_il_bytes
;
258 fprintf (stderr
, " (ratio: %f)", dividend
/ divisor
);
260 fprintf (stderr
, "\n");
263 for (i
= 0; i
< LTO_N_SECTION_TYPES
; i
++)
264 fprintf (stderr
, "[%s] Size of mmap'd section %s: "
265 HOST_WIDE_INT_PRINT_UNSIGNED
" bytes\n", s
,
266 lto_section_name
[i
], lto_stats
.section_size
[i
]);
269 /* We cache a single bitpack assuming that usually at most one is
270 life. This saves repeated re-allocations. */
271 static struct bitpack_d
*cached_bp
;
273 /* Create a new bitpack. */
276 bitpack_create (void)
280 struct bitpack_d
*bp
= cached_bp
;
284 return XCNEW (struct bitpack_d
);
288 /* Free the memory used by bitpack BP. */
291 bitpack_delete (struct bitpack_d
*bp
)
296 bp
->first_unused_bit
= 0;
297 VEC_truncate (bitpack_word_t
, bp
->values
, 0);
301 VEC_free (bitpack_word_t
, heap
, bp
->values
);
306 /* Return an index to the word in bitpack BP that contains the
309 static inline unsigned
310 bp_get_next_word (struct bitpack_d
*bp
, unsigned nbits
)
314 /* In principle, the next word to use is determined by the
315 number of bits already processed in BP. */
316 ix
= bp
->num_bits
/ BITS_PER_BITPACK_WORD
;
318 /* All the encoded bit patterns in BP are contiguous, therefore if
319 the next NBITS would straddle over two different words, move the
320 index to the next word and update the number of encoded bits
321 by adding up the hole of unused bits created by this move. */
322 bp
->first_unused_bit
%= BITS_PER_BITPACK_WORD
;
323 last
= bp
->first_unused_bit
+ nbits
- 1;
324 if (last
>= BITS_PER_BITPACK_WORD
)
327 bp
->num_bits
+= (BITS_PER_BITPACK_WORD
- bp
->first_unused_bit
);
328 bp
->first_unused_bit
= 0;
335 /* Pack NBITS of value VAL into bitpack BP. */
338 bp_pack_value (struct bitpack_d
*bp
, bitpack_word_t val
, unsigned nbits
)
343 /* We cannot encode more bits than BITS_PER_BITPACK_WORD. */
344 #ifdef ENABLE_CHECKING
345 gcc_assert (nbits
> 0 && nbits
<= BITS_PER_BITPACK_WORD
);
348 /* Compute which word will contain the next NBITS. */
349 ix
= bp_get_next_word (bp
, nbits
);
350 if (ix
>= VEC_length (bitpack_word_t
, bp
->values
))
352 /* If there is no room left in the last word of the values
353 array, add a new word. Additionally, we should only
354 need to add a single word, since every pack operation cannot
355 use more bits than fit in a single word. */
356 VEC_safe_push (bitpack_word_t
, heap
, bp
->values
, 0);
359 /* Grab the last word to pack VAL into. */
360 word
= VEC_index (bitpack_word_t
, bp
->values
, ix
);
362 /* To fit VAL in WORD, we need to shift VAL to the left to
363 skip the bottom BP->FIRST_UNUSED_BIT bits. */
364 val
<<= bp
->first_unused_bit
;
366 /* Update WORD with VAL. */
370 VEC_replace (bitpack_word_t
, bp
->values
, ix
, word
);
371 bp
->num_bits
+= nbits
;
372 bp
->first_unused_bit
+= nbits
;
376 /* Unpack the next NBITS from bitpack BP. */
379 bp_unpack_value (struct bitpack_d
*bp
, unsigned nbits
)
381 bitpack_word_t val
, word
, mask
;
384 /* We cannot decode more bits than BITS_PER_BITPACK_WORD. */
385 gcc_checking_assert (nbits
> 0 && nbits
<= BITS_PER_BITPACK_WORD
);
387 /* Compute which word contains the next NBITS. */
388 ix
= bp_get_next_word (bp
, nbits
);
389 word
= VEC_index (bitpack_word_t
, bp
->values
, ix
);
391 /* Compute the mask to get NBITS from WORD. */
392 mask
= (nbits
== BITS_PER_BITPACK_WORD
)
393 ? (bitpack_word_t
) -1
394 : ((bitpack_word_t
) 1 << nbits
) - 1;
396 /* Shift WORD to the right to skip over the bits already decoded
398 word
>>= bp
->first_unused_bit
;
400 /* Apply the mask to obtain the requested value. */
403 /* Update BP->NUM_BITS for the next unpack operation. */
404 bp
->num_bits
+= nbits
;
405 bp
->first_unused_bit
+= nbits
;
411 /* Check that all the TS_* structures handled by the lto_output_* and
412 lto_input_* routines are exactly ALL the structures defined in
416 check_handled_ts_structures (void)
418 bool handled_p
[LAST_TS_ENUM
];
421 memset (&handled_p
, 0, sizeof (handled_p
));
423 /* These are the TS_* structures that are either handled or
424 explicitly ignored by the streamer routines. */
425 handled_p
[TS_BASE
] = true;
426 handled_p
[TS_COMMON
] = true;
427 handled_p
[TS_INT_CST
] = true;
428 handled_p
[TS_REAL_CST
] = true;
429 handled_p
[TS_FIXED_CST
] = true;
430 handled_p
[TS_VECTOR
] = true;
431 handled_p
[TS_STRING
] = true;
432 handled_p
[TS_COMPLEX
] = true;
433 handled_p
[TS_IDENTIFIER
] = true;
434 handled_p
[TS_DECL_MINIMAL
] = true;
435 handled_p
[TS_DECL_COMMON
] = true;
436 handled_p
[TS_DECL_WRTL
] = true;
437 handled_p
[TS_DECL_NON_COMMON
] = true;
438 handled_p
[TS_DECL_WITH_VIS
] = true;
439 handled_p
[TS_FIELD_DECL
] = true;
440 handled_p
[TS_VAR_DECL
] = true;
441 handled_p
[TS_PARM_DECL
] = true;
442 handled_p
[TS_LABEL_DECL
] = true;
443 handled_p
[TS_RESULT_DECL
] = true;
444 handled_p
[TS_CONST_DECL
] = true;
445 handled_p
[TS_TYPE_DECL
] = true;
446 handled_p
[TS_FUNCTION_DECL
] = true;
447 handled_p
[TS_TYPE
] = true;
448 handled_p
[TS_LIST
] = true;
449 handled_p
[TS_VEC
] = true;
450 handled_p
[TS_EXP
] = true;
451 handled_p
[TS_SSA_NAME
] = true;
452 handled_p
[TS_BLOCK
] = true;
453 handled_p
[TS_BINFO
] = true;
454 handled_p
[TS_STATEMENT_LIST
] = true;
455 handled_p
[TS_CONSTRUCTOR
] = true;
456 handled_p
[TS_OMP_CLAUSE
] = true;
457 handled_p
[TS_OPTIMIZATION
] = true;
458 handled_p
[TS_TARGET_OPTION
] = true;
460 /* Anything not marked above will trigger the following assertion.
461 If this assertion triggers, it means that there is a new TS_*
462 structure that should be handled by the streamer. */
463 for (i
= 0; i
< LAST_TS_ENUM
; i
++)
464 gcc_assert (handled_p
[i
]);
468 /* Helper for lto_streamer_cache_insert_1. Add T to CACHE->NODES at
469 slot IX. Add OFFSET to CACHE->OFFSETS at slot IX. */
472 lto_streamer_cache_add_to_node_array (struct lto_streamer_cache_d
*cache
,
473 int ix
, tree t
, unsigned offset
)
475 gcc_assert (ix
>= 0);
477 /* Grow the array of nodes and offsets to accomodate T at IX. */
478 if (ix
>= (int) VEC_length (tree
, cache
->nodes
))
480 size_t sz
= ix
+ (20 + ix
) / 4;
481 VEC_safe_grow_cleared (tree
, heap
, cache
->nodes
, sz
);
482 VEC_safe_grow_cleared (unsigned, heap
, cache
->offsets
, sz
);
485 VEC_replace (tree
, cache
->nodes
, ix
, t
);
486 VEC_replace (unsigned, cache
->offsets
, ix
, offset
);
490 /* Helper for lto_streamer_cache_insert and lto_streamer_cache_insert_at.
491 CACHE, T, IX_P and OFFSET_P are as in lto_streamer_cache_insert.
493 If INSERT_AT_NEXT_SLOT_P is true, T is inserted at the next available
494 slot in the cache. Otherwise, T is inserted at the position indicated
497 If T already existed in CACHE, return true. Otherwise,
501 lto_streamer_cache_insert_1 (struct lto_streamer_cache_d
*cache
,
502 tree t
, int *ix_p
, unsigned *offset_p
,
503 bool insert_at_next_slot_p
)
506 struct tree_int_map d_entry
, *entry
;
513 d_entry
.base
.from
= t
;
514 slot
= htab_find_slot (cache
->node_map
, &d_entry
, INSERT
);
517 /* Determine the next slot to use in the cache. */
518 if (insert_at_next_slot_p
)
519 ix
= cache
->next_slot
++;
523 entry
= (struct tree_int_map
*)pool_alloc (cache
->node_map_entries
);
524 entry
->base
.from
= t
;
525 entry
->to
= (unsigned) ix
;
528 /* If no offset was given, store the invalid offset -1. */
529 offset
= (offset_p
) ? *offset_p
: (unsigned) -1;
531 lto_streamer_cache_add_to_node_array (cache
, ix
, t
, offset
);
533 /* Indicate that the item was not present in the cache. */
538 entry
= (struct tree_int_map
*) *slot
;
539 ix
= (int) entry
->to
;
540 offset
= VEC_index (unsigned, cache
->offsets
, ix
);
542 if (!insert_at_next_slot_p
&& ix
!= *ix_p
)
544 /* If the caller wants to insert T at a specific slot
545 location, and ENTRY->TO does not match *IX_P, add T to
546 the requested location slot. This situation arises when
547 streaming builtin functions.
549 For instance, on the writer side we could have two
550 FUNCTION_DECLS T1 and T2 that are represented by the same
551 builtin function. The reader will only instantiate the
552 canonical builtin, but since T1 and T2 had been
553 originally stored in different cache slots (S1 and S2),
554 the reader must be able to find the canonical builtin
555 function at slots S1 and S2. */
556 gcc_assert (lto_stream_as_builtin_p (t
));
559 /* Since we are storing a builtin, the offset into the
560 stream is not necessary as we will not need to read
561 forward in the stream. */
562 lto_streamer_cache_add_to_node_array (cache
, ix
, t
, -1);
565 /* Indicate that T was already in the cache. */
579 /* Insert tree node T in CACHE. If T already existed in the cache
580 return true. Otherwise, return false.
582 If IX_P is non-null, update it with the index into the cache where
585 *OFFSET_P represents the offset in the stream where T is physically
586 written out. The first time T is added to the cache, *OFFSET_P is
587 recorded in the cache together with T. But if T already existed
588 in the cache, *OFFSET_P is updated with the value that was recorded
589 the first time T was added to the cache.
591 If OFFSET_P is NULL, it is ignored. */
594 lto_streamer_cache_insert (struct lto_streamer_cache_d
*cache
, tree t
,
595 int *ix_p
, unsigned *offset_p
)
597 return lto_streamer_cache_insert_1 (cache
, t
, ix_p
, offset_p
, true);
601 /* Insert tree node T in CACHE at slot IX. If T already
602 existed in the cache return true. Otherwise, return false. */
605 lto_streamer_cache_insert_at (struct lto_streamer_cache_d
*cache
,
608 return lto_streamer_cache_insert_1 (cache
, t
, &ix
, NULL
, false);
612 /* Return true if tree node T exists in CACHE. If IX_P is
613 not NULL, write to *IX_P the index into the cache where T is stored
614 (-1 if T is not found). */
617 lto_streamer_cache_lookup (struct lto_streamer_cache_d
*cache
, tree t
,
621 struct tree_int_map d_slot
;
627 d_slot
.base
.from
= t
;
628 slot
= htab_find_slot (cache
->node_map
, &d_slot
, NO_INSERT
);
637 ix
= (int) ((struct tree_int_map
*) *slot
)->to
;
647 /* Return the tree node at slot IX in CACHE. */
650 lto_streamer_cache_get (struct lto_streamer_cache_d
*cache
, int ix
)
654 /* If the reader is requesting an index beyond the length of the
655 cache, it will need to read ahead. Return NULL_TREE to indicate
657 if ((unsigned) ix
>= VEC_length (tree
, cache
->nodes
))
660 return VEC_index (tree
, cache
->nodes
, (unsigned) ix
);
664 /* Record NODE in COMMON_NODES if it is not NULL and is not already in
668 lto_record_common_node (tree
*nodep
, VEC(tree
, heap
) **common_nodes
,
669 struct pointer_set_t
*seen_nodes
)
673 if (node
== NULL_TREE
)
678 /* Type merging will get confused by the canonical types as they
679 are set by the middle-end. */
680 TYPE_CANONICAL (node
) = NULL_TREE
;
681 *nodep
= node
= gimple_register_type (node
);
684 /* Return if node is already seen. */
685 if (pointer_set_insert (seen_nodes
, node
))
688 VEC_safe_push (tree
, heap
, *common_nodes
, node
);
690 if (tree_node_can_be_shared (node
))
692 if (POINTER_TYPE_P (node
)
693 || TREE_CODE (node
) == COMPLEX_TYPE
694 || TREE_CODE (node
) == ARRAY_TYPE
)
695 lto_record_common_node (&TREE_TYPE (node
), common_nodes
, seen_nodes
);
700 /* Generate a vector of common nodes and make sure they are merged
701 properly according to the the gimple type table. */
703 static VEC(tree
,heap
) *
704 lto_get_common_nodes (void)
707 VEC(tree
,heap
) *common_nodes
= NULL
;
708 struct pointer_set_t
*seen_nodes
;
710 /* The MAIN_IDENTIFIER_NODE is normally set up by the front-end, but the
711 LTO back-end must agree. Currently, the only languages that set this
712 use the name "main". */
713 if (main_identifier_node
)
715 const char *main_name
= IDENTIFIER_POINTER (main_identifier_node
);
716 gcc_assert (strcmp (main_name
, "main") == 0);
719 main_identifier_node
= get_identifier ("main");
721 gcc_assert (ptrdiff_type_node
== integer_type_node
);
723 /* FIXME lto. In the C++ front-end, fileptr_type_node is defined as a
724 variant copy of of ptr_type_node, rather than ptr_node itself. The
725 distinction should only be relevant to the front-end, so we always
726 use the C definition here in lto1.
728 These should be assured in pass_ipa_free_lang_data. */
729 gcc_assert (fileptr_type_node
== ptr_type_node
);
730 gcc_assert (TYPE_MAIN_VARIANT (fileptr_type_node
) == ptr_type_node
);
732 seen_nodes
= pointer_set_create ();
734 /* Skip itk_char. char_type_node is shared with the appropriately
736 for (i
= itk_signed_char
; i
< itk_none
; i
++)
737 lto_record_common_node (&integer_types
[i
], &common_nodes
, seen_nodes
);
739 for (i
= 0; i
< TYPE_KIND_LAST
; i
++)
740 lto_record_common_node (&sizetype_tab
[i
], &common_nodes
, seen_nodes
);
742 for (i
= 0; i
< TI_MAX
; i
++)
743 lto_record_common_node (&global_trees
[i
], &common_nodes
, seen_nodes
);
745 pointer_set_destroy (seen_nodes
);
751 /* Assign an index to tree node T and enter it in the streamer cache
755 preload_common_node (struct lto_streamer_cache_d
*cache
, tree t
)
759 lto_streamer_cache_insert (cache
, t
, NULL
, NULL
);
761 /* The FIELD_DECLs of structures should be shared, so that every
762 COMPONENT_REF uses the same tree node when referencing a field.
763 Pointer equality between FIELD_DECLs is used by the alias
764 machinery to compute overlapping memory references (See
765 nonoverlapping_component_refs_p). */
766 if (TREE_CODE (t
) == RECORD_TYPE
)
770 for (f
= TYPE_FIELDS (t
); f
; f
= TREE_CHAIN (f
))
771 preload_common_node (cache
, f
);
776 /* Create a cache of pickled nodes. */
778 struct lto_streamer_cache_d
*
779 lto_streamer_cache_create (void)
781 struct lto_streamer_cache_d
*cache
;
782 VEC(tree
, heap
) *common_nodes
;
786 cache
= XCNEW (struct lto_streamer_cache_d
);
788 cache
->node_map
= htab_create (101, tree_int_map_hash
, tree_int_map_eq
, NULL
);
790 cache
->node_map_entries
= create_alloc_pool ("node map",
791 sizeof (struct tree_int_map
),
794 /* Load all the well-known tree nodes that are always created by
795 the compiler on startup. This prevents writing them out
797 common_nodes
= lto_get_common_nodes ();
799 for (i
= 0; VEC_iterate (tree
, common_nodes
, i
, node
); i
++)
800 preload_common_node (cache
, node
);
802 VEC_free(tree
, heap
, common_nodes
);
808 /* Delete the streamer cache C. */
811 lto_streamer_cache_delete (struct lto_streamer_cache_d
*c
)
816 htab_delete (c
->node_map
);
817 free_alloc_pool (c
->node_map_entries
);
818 VEC_free (tree
, heap
, c
->nodes
);
819 VEC_free (unsigned, heap
, c
->offsets
);
824 #ifdef LTO_STREAMER_DEBUG
825 static htab_t tree_htab
;
827 struct tree_hash_entry
834 hash_tree (const void *p
)
836 const struct tree_hash_entry
*e
= (const struct tree_hash_entry
*) p
;
837 return htab_hash_pointer (e
->key
);
841 eq_tree (const void *p1
, const void *p2
)
843 const struct tree_hash_entry
*e1
= (const struct tree_hash_entry
*) p1
;
844 const struct tree_hash_entry
*e2
= (const struct tree_hash_entry
*) p2
;
845 return (e1
->key
== e2
->key
);
849 /* Initialization common to the LTO reader and writer. */
852 lto_streamer_init (void)
854 /* Check that all the TS_* handled by the reader and writer routines
855 match exactly the structures defined in treestruct.def. When a
856 new TS_* astructure is added, the streamer should be updated to
858 check_handled_ts_structures ();
860 #ifdef LTO_STREAMER_DEBUG
861 tree_htab
= htab_create (31, hash_tree
, eq_tree
, NULL
);
866 /* Gate function for all LTO streaming passes. */
871 return ((flag_generate_lto
|| in_lto_p
)
872 /* Don't bother doing anything if the program has errors. */
877 #ifdef LTO_STREAMER_DEBUG
878 /* Add a mapping between T and ORIG_T, which is the numeric value of
879 the original address of T as it was seen by the LTO writer. This
880 mapping is useful when debugging streaming problems. A debugging
881 session can be started on both reader and writer using ORIG_T
882 as a breakpoint value in both sessions.
884 Note that this mapping is transient and only valid while T is
885 being reconstructed. Once T is fully built, the mapping is
889 lto_orig_address_map (tree t
, intptr_t orig_t
)
891 struct tree_hash_entry ent
;
892 struct tree_hash_entry
**slot
;
897 = (struct tree_hash_entry
**) htab_find_slot (tree_htab
, &ent
, INSERT
);
899 *slot
= XNEW (struct tree_hash_entry
);
904 /* Get the original address of T as it was seen by the writer. This
905 is only valid while T is being reconstructed. */
908 lto_orig_address_get (tree t
)
910 struct tree_hash_entry ent
;
911 struct tree_hash_entry
**slot
;
915 = (struct tree_hash_entry
**) htab_find_slot (tree_htab
, &ent
, NO_INSERT
);
916 return (slot
? (*slot
)->value
: 0);
920 /* Clear the mapping of T to its original address. */
923 lto_orig_address_remove (tree t
)
925 struct tree_hash_entry ent
;
926 struct tree_hash_entry
**slot
;
930 = (struct tree_hash_entry
**) htab_find_slot (tree_htab
, &ent
, NO_INSERT
);
933 htab_clear_slot (tree_htab
, (PTR
*)slot
);
938 /* Check that the version MAJOR.MINOR is the correct version number. */
941 lto_check_version (int major
, int minor
)
943 if (major
!= LTO_major_version
|| minor
!= LTO_minor_version
)
944 fatal_error ("bytecode stream generated with LTO version %d.%d instead "
945 "of the expected %d.%d",
947 LTO_major_version
, LTO_minor_version
);