]>
Commit | Line | Data |
---|---|---|
7bfefa9d | 1 | /* Miscellaneous utilities for GIMPLE streaming. Things that are used |
2 | in both input and output are here. | |
3 | ||
4 | Copyright 2009 Free Software Foundation, Inc. | |
5 | Contributed by Doug Kwan <dougkwan@google.com> | |
6 | ||
7 | This file is part of GCC. | |
8 | ||
9 | GCC is free software; you can redistribute it and/or modify it under | |
10 | the terms of the GNU General Public License as published by the Free | |
11 | Software Foundation; either version 3, or (at your option) any later | |
12 | version. | |
13 | ||
14 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
15 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
16 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
17 | for more details. | |
18 | ||
19 | You should have received a copy of the GNU General Public License | |
20 | along with GCC; see the file COPYING3. If not see | |
21 | <http://www.gnu.org/licenses/>. */ | |
22 | ||
23 | #include "config.h" | |
24 | #include "system.h" | |
25 | #include "coretypes.h" | |
26 | #include "tm.h" | |
27 | #include "toplev.h" | |
28 | #include "flags.h" | |
29 | #include "tree.h" | |
30 | #include "gimple.h" | |
31 | #include "tree-flow.h" | |
32 | #include "diagnostic.h" | |
33 | #include "bitmap.h" | |
34 | #include "vec.h" | |
35 | #include "lto-streamer.h" | |
36 | ||
37 | /* Statistics gathered during LTO, WPA and LTRANS. */ | |
38 | struct lto_stats_d lto_stats; | |
39 | ||
40 | /* LTO uses bitmaps with different life-times. So use a seperate | |
41 | obstack for all LTO bitmaps. */ | |
42 | static bitmap_obstack lto_obstack; | |
43 | static bool lto_obstack_initialized; | |
44 | ||
45 | ||
46 | /* Return a string representing LTO tag TAG. */ | |
47 | ||
48 | const char * | |
49 | lto_tag_name (enum LTO_tags tag) | |
50 | { | |
51 | if (lto_tag_is_tree_code_p (tag)) | |
52 | { | |
53 | /* For tags representing tree nodes, return the name of the | |
54 | associated tree code. */ | |
55 | return tree_code_name[lto_tag_to_tree_code (tag)]; | |
56 | } | |
57 | ||
58 | if (lto_tag_is_gimple_code_p (tag)) | |
59 | { | |
60 | /* For tags representing gimple statements, return the name of | |
61 | the associated gimple code. */ | |
62 | return gimple_code_name[lto_tag_to_gimple_code (tag)]; | |
63 | } | |
64 | ||
65 | switch (tag) | |
66 | { | |
67 | case LTO_null: | |
68 | return "LTO_null"; | |
69 | case LTO_bb0: | |
70 | return "LTO_bb0"; | |
71 | case LTO_bb1: | |
72 | return "LTO_bb1"; | |
73 | case LTO_eh_region: | |
74 | return "LTO_eh_region"; | |
75 | case LTO_function: | |
76 | return "LTO_function"; | |
77 | case LTO_eh_table: | |
78 | return "LTO_eh_table"; | |
79 | case LTO_ert_cleanup: | |
80 | return "LTO_ert_cleanup"; | |
81 | case LTO_ert_try: | |
82 | return "LTO_ert_try"; | |
83 | case LTO_ert_allowed_exceptions: | |
84 | return "LTO_ert_allowed_exceptions"; | |
85 | case LTO_ert_must_not_throw: | |
86 | return "LTO_ert_must_not_throw"; | |
87 | case LTO_tree_pickle_reference: | |
88 | return "LTO_tree_pickle_reference"; | |
89 | case LTO_field_decl_ref: | |
90 | return "LTO_field_decl_ref"; | |
91 | case LTO_function_decl_ref: | |
92 | return "LTO_function_decl_ref"; | |
93 | case LTO_label_decl_ref: | |
94 | return "LTO_label_decl_ref"; | |
95 | case LTO_namespace_decl_ref: | |
96 | return "LTO_namespace_decl_ref"; | |
97 | case LTO_result_decl_ref: | |
98 | return "LTO_result_decl_ref"; | |
99 | case LTO_ssa_name_ref: | |
100 | return "LTO_ssa_name_ref"; | |
101 | case LTO_type_decl_ref: | |
102 | return "LTO_type_decl_ref"; | |
103 | case LTO_type_ref: | |
104 | return "LTO_type_ref"; | |
105 | case LTO_global_decl_ref: | |
106 | return "LTO_global_decl_ref"; | |
107 | default: | |
108 | return "LTO_UNKNOWN"; | |
109 | } | |
110 | } | |
111 | ||
112 | ||
113 | /* Allocate a bitmap from heap. Initializes the LTO obstack if necessary. */ | |
114 | ||
115 | bitmap | |
116 | lto_bitmap_alloc (void) | |
117 | { | |
118 | if (!lto_obstack_initialized) | |
119 | { | |
120 | bitmap_obstack_initialize (<o_obstack); | |
121 | lto_obstack_initialized = true; | |
122 | } | |
123 | return BITMAP_ALLOC (<o_obstack); | |
124 | } | |
125 | ||
126 | /* Free bitmap B. */ | |
127 | ||
128 | void | |
129 | lto_bitmap_free (bitmap b) | |
130 | { | |
131 | BITMAP_FREE (b); | |
132 | } | |
133 | ||
134 | ||
135 | /* Get a section name for a particular type or name. The NAME field | |
136 | is only used if SECTION_TYPE is LTO_section_function_body or | |
137 | LTO_static_initializer. For all others it is ignored. The callee | |
138 | of this function is responcible to free the returned name. */ | |
139 | ||
140 | char * | |
141 | lto_get_section_name (int section_type, const char *name) | |
142 | { | |
143 | switch (section_type) | |
144 | { | |
145 | case LTO_section_function_body: | |
146 | return concat (LTO_SECTION_NAME_PREFIX, name, NULL); | |
147 | ||
148 | case LTO_section_static_initializer: | |
149 | return concat (LTO_SECTION_NAME_PREFIX, ".statics", NULL); | |
150 | ||
151 | case LTO_section_symtab: | |
152 | return concat (LTO_SECTION_NAME_PREFIX, ".symtab", NULL); | |
153 | ||
154 | case LTO_section_decls: | |
155 | return concat (LTO_SECTION_NAME_PREFIX, ".decls", NULL); | |
156 | ||
157 | case LTO_section_cgraph: | |
158 | return concat (LTO_SECTION_NAME_PREFIX, ".cgraph", NULL); | |
159 | ||
8867b500 | 160 | case LTO_section_jump_functions: |
161 | return concat (LTO_SECTION_NAME_PREFIX, ".jmpfuncs", NULL); | |
162 | ||
7bfefa9d | 163 | case LTO_section_ipa_pure_const: |
164 | return concat (LTO_SECTION_NAME_PREFIX, ".pureconst", NULL); | |
165 | ||
166 | case LTO_section_ipa_reference: | |
167 | return concat (LTO_SECTION_NAME_PREFIX, ".reference", NULL); | |
168 | ||
169 | case LTO_section_wpa_fixup: | |
170 | return concat (LTO_SECTION_NAME_PREFIX, ".wpa_fixup", NULL); | |
171 | ||
172 | case LTO_section_opts: | |
173 | return concat (LTO_SECTION_NAME_PREFIX, ".opts", NULL); | |
174 | ||
175 | default: | |
176 | internal_error ("bytecode stream: unexpected LTO section %s", name); | |
177 | } | |
178 | } | |
179 | ||
180 | ||
181 | /* Show various memory usage statistics related to LTO. */ | |
182 | ||
183 | void | |
184 | print_lto_report (void) | |
185 | { | |
186 | const char *s = (flag_lto) ? "LTO" : (flag_wpa) ? "WPA" : "LTRANS"; | |
187 | unsigned i; | |
188 | ||
189 | fprintf (stderr, "%s statistics\n", s); | |
190 | fprintf (stderr, "[%s] # of input files: " | |
191 | HOST_WIDE_INT_PRINT_UNSIGNED "\n", s, lto_stats.num_input_files); | |
192 | ||
48e1416a | 193 | fprintf (stderr, "[%s] # of input cgraph nodes: " |
7bfefa9d | 194 | HOST_WIDE_INT_PRINT_UNSIGNED "\n", s, |
195 | lto_stats.num_input_cgraph_nodes); | |
196 | ||
197 | fprintf (stderr, "[%s] # of function bodies: " | |
198 | HOST_WIDE_INT_PRINT_UNSIGNED "\n", s, | |
199 | lto_stats.num_function_bodies); | |
200 | ||
201 | fprintf (stderr, "[%s] ", s); | |
202 | print_gimple_types_stats (); | |
203 | ||
204 | for (i = 0; i < NUM_TREE_CODES; i++) | |
205 | if (lto_stats.num_trees[i]) | |
206 | fprintf (stderr, "[%s] # of '%s' objects read: " | |
207 | HOST_WIDE_INT_PRINT_UNSIGNED "\n", s, | |
208 | tree_code_name[i], lto_stats.num_trees[i]); | |
209 | ||
210 | if (flag_lto) | |
211 | { | |
212 | fprintf (stderr, "[%s] Compression: " | |
213 | HOST_WIDE_INT_PRINT_UNSIGNED " output bytes, " | |
214 | HOST_WIDE_INT_PRINT_UNSIGNED " compressed bytes", s, | |
215 | lto_stats.num_output_il_bytes, | |
216 | lto_stats.num_compressed_il_bytes); | |
217 | if (lto_stats.num_output_il_bytes > 0) | |
218 | { | |
219 | const float dividend = (float) lto_stats.num_compressed_il_bytes; | |
220 | const float divisor = (float) lto_stats.num_output_il_bytes; | |
221 | fprintf (stderr, " (ratio: %f)", dividend / divisor); | |
222 | } | |
223 | fprintf (stderr, "\n"); | |
224 | } | |
225 | ||
226 | if (flag_wpa) | |
227 | { | |
228 | fprintf (stderr, "[%s] # of output files: " | |
229 | HOST_WIDE_INT_PRINT_UNSIGNED "\n", s, | |
230 | lto_stats.num_output_files); | |
231 | ||
232 | fprintf (stderr, "[%s] # of output cgraph nodes: " | |
233 | HOST_WIDE_INT_PRINT_UNSIGNED "\n", s, | |
234 | lto_stats.num_output_cgraph_nodes); | |
235 | ||
236 | fprintf (stderr, "[%s] # callgraph partitions: " | |
237 | HOST_WIDE_INT_PRINT_UNSIGNED "\n", s, | |
238 | lto_stats.num_cgraph_partitions); | |
239 | ||
240 | fprintf (stderr, "[%s] Compression: " | |
241 | HOST_WIDE_INT_PRINT_UNSIGNED " input bytes, " | |
242 | HOST_WIDE_INT_PRINT_UNSIGNED " uncompressed bytes", s, | |
243 | lto_stats.num_input_il_bytes, | |
244 | lto_stats.num_uncompressed_il_bytes); | |
245 | if (lto_stats.num_input_il_bytes > 0) | |
246 | { | |
247 | const float dividend = (float) lto_stats.num_uncompressed_il_bytes; | |
248 | const float divisor = (float) lto_stats.num_input_il_bytes; | |
249 | fprintf (stderr, " (ratio: %f)", dividend / divisor); | |
250 | } | |
251 | fprintf (stderr, "\n"); | |
252 | } | |
253 | ||
254 | for (i = 0; i < LTO_N_SECTION_TYPES; i++) | |
255 | fprintf (stderr, "[%s] Size of mmap'd section %s: " | |
256 | HOST_WIDE_INT_PRINT_UNSIGNED " bytes\n", s, | |
257 | lto_section_name[i], lto_stats.section_size[i]); | |
258 | } | |
259 | ||
260 | ||
261 | /* Create a new bitpack. */ | |
262 | ||
263 | struct bitpack_d * | |
264 | bitpack_create (void) | |
265 | { | |
266 | return XCNEW (struct bitpack_d); | |
267 | } | |
268 | ||
269 | ||
270 | /* Free the memory used by bitpack BP. */ | |
271 | ||
272 | void | |
273 | bitpack_delete (struct bitpack_d *bp) | |
274 | { | |
275 | VEC_free (bitpack_word_t, heap, bp->values); | |
276 | free (bp); | |
277 | } | |
278 | ||
279 | ||
280 | /* Return an index to the word in bitpack BP that contains the | |
281 | next NBITS. */ | |
282 | ||
283 | static inline unsigned | |
284 | bp_get_next_word (struct bitpack_d *bp, unsigned nbits) | |
285 | { | |
286 | unsigned last, ix; | |
287 | ||
288 | /* In principle, the next word to use is determined by the | |
289 | number of bits already processed in BP. */ | |
290 | ix = bp->num_bits / BITS_PER_BITPACK_WORD; | |
291 | ||
292 | /* All the encoded bit patterns in BP are contiguous, therefore if | |
293 | the next NBITS would straddle over two different words, move the | |
294 | index to the next word and update the number of encoded bits | |
295 | by adding up the hole of unused bits created by this move. */ | |
296 | bp->first_unused_bit %= BITS_PER_BITPACK_WORD; | |
297 | last = bp->first_unused_bit + nbits - 1; | |
298 | if (last >= BITS_PER_BITPACK_WORD) | |
299 | { | |
300 | ix++; | |
301 | bp->num_bits += (BITS_PER_BITPACK_WORD - bp->first_unused_bit); | |
302 | bp->first_unused_bit = 0; | |
303 | } | |
304 | ||
305 | return ix; | |
306 | } | |
307 | ||
308 | ||
309 | /* Pack NBITS of value VAL into bitpack BP. */ | |
310 | ||
311 | void | |
312 | bp_pack_value (struct bitpack_d *bp, bitpack_word_t val, unsigned nbits) | |
313 | { | |
314 | unsigned ix; | |
315 | bitpack_word_t word; | |
316 | ||
317 | /* We cannot encode more bits than BITS_PER_BITPACK_WORD. */ | |
318 | gcc_assert (nbits > 0 && nbits <= BITS_PER_BITPACK_WORD); | |
319 | ||
320 | /* Compute which word will contain the next NBITS. */ | |
321 | ix = bp_get_next_word (bp, nbits); | |
322 | if (ix >= VEC_length (bitpack_word_t, bp->values)) | |
323 | { | |
324 | /* If there is no room left in the last word of the values | |
325 | array, add a new word. Additionally, we should only | |
326 | need to add a single word, since every pack operation cannot | |
327 | use more bits than fit in a single word. */ | |
328 | gcc_assert (ix < VEC_length (bitpack_word_t, bp->values) + 1); | |
329 | VEC_safe_push (bitpack_word_t, heap, bp->values, 0); | |
330 | } | |
331 | ||
332 | /* Grab the last word to pack VAL into. */ | |
333 | word = VEC_index (bitpack_word_t, bp->values, ix); | |
334 | ||
335 | /* To fit VAL in WORD, we need to shift VAL to the left to | |
336 | skip the bottom BP->FIRST_UNUSED_BIT bits. */ | |
337 | gcc_assert (BITS_PER_BITPACK_WORD >= bp->first_unused_bit + nbits); | |
338 | val <<= bp->first_unused_bit; | |
339 | ||
340 | /* Update WORD with VAL. */ | |
341 | word |= val; | |
342 | ||
343 | /* Update BP. */ | |
344 | VEC_replace (bitpack_word_t, bp->values, ix, word); | |
345 | bp->num_bits += nbits; | |
346 | bp->first_unused_bit += nbits; | |
347 | } | |
348 | ||
349 | ||
350 | /* Unpack the next NBITS from bitpack BP. */ | |
351 | ||
352 | bitpack_word_t | |
353 | bp_unpack_value (struct bitpack_d *bp, unsigned nbits) | |
354 | { | |
355 | bitpack_word_t val, word, mask; | |
356 | unsigned ix; | |
357 | ||
358 | /* We cannot decode more bits than BITS_PER_BITPACK_WORD. */ | |
359 | gcc_assert (nbits > 0 && nbits <= BITS_PER_BITPACK_WORD); | |
360 | ||
361 | /* Compute which word contains the next NBITS. */ | |
362 | ix = bp_get_next_word (bp, nbits); | |
363 | word = VEC_index (bitpack_word_t, bp->values, ix); | |
364 | ||
365 | /* Compute the mask to get NBITS from WORD. */ | |
366 | mask = (nbits == BITS_PER_BITPACK_WORD) | |
367 | ? (bitpack_word_t) -1 | |
368 | : ((bitpack_word_t) 1 << nbits) - 1; | |
369 | ||
370 | /* Shift WORD to the right to skip over the bits already decoded | |
371 | in word. */ | |
372 | word >>= bp->first_unused_bit; | |
373 | ||
374 | /* Apply the mask to obtain the requested value. */ | |
375 | val = word & mask; | |
376 | ||
377 | /* Update BP->NUM_BITS for the next unpack operation. */ | |
378 | bp->num_bits += nbits; | |
379 | bp->first_unused_bit += nbits; | |
380 | ||
381 | return val; | |
382 | } | |
383 | ||
384 | ||
385 | /* Check that all the TS_* structures handled by the lto_output_* and | |
386 | lto_input_* routines are exactly ALL the structures defined in | |
387 | treestruct.def. */ | |
388 | ||
389 | static void | |
390 | check_handled_ts_structures (void) | |
391 | { | |
392 | bool handled_p[LAST_TS_ENUM]; | |
393 | unsigned i; | |
394 | ||
395 | memset (&handled_p, 0, sizeof (handled_p)); | |
396 | ||
397 | /* These are the TS_* structures that are either handled or | |
398 | explicitly ignored by the streamer routines. */ | |
399 | handled_p[TS_BASE] = true; | |
400 | handled_p[TS_COMMON] = true; | |
401 | handled_p[TS_INT_CST] = true; | |
402 | handled_p[TS_REAL_CST] = true; | |
403 | handled_p[TS_FIXED_CST] = true; | |
404 | handled_p[TS_VECTOR] = true; | |
405 | handled_p[TS_STRING] = true; | |
406 | handled_p[TS_COMPLEX] = true; | |
407 | handled_p[TS_IDENTIFIER] = true; | |
408 | handled_p[TS_DECL_MINIMAL] = true; | |
409 | handled_p[TS_DECL_COMMON] = true; | |
410 | handled_p[TS_DECL_WRTL] = true; | |
411 | handled_p[TS_DECL_NON_COMMON] = true; | |
412 | handled_p[TS_DECL_WITH_VIS] = true; | |
413 | handled_p[TS_FIELD_DECL] = true; | |
414 | handled_p[TS_VAR_DECL] = true; | |
415 | handled_p[TS_PARM_DECL] = true; | |
416 | handled_p[TS_LABEL_DECL] = true; | |
417 | handled_p[TS_RESULT_DECL] = true; | |
418 | handled_p[TS_CONST_DECL] = true; | |
419 | handled_p[TS_TYPE_DECL] = true; | |
420 | handled_p[TS_FUNCTION_DECL] = true; | |
421 | handled_p[TS_TYPE] = true; | |
422 | handled_p[TS_LIST] = true; | |
423 | handled_p[TS_VEC] = true; | |
424 | handled_p[TS_EXP] = true; | |
425 | handled_p[TS_SSA_NAME] = true; | |
426 | handled_p[TS_BLOCK] = true; | |
427 | handled_p[TS_BINFO] = true; | |
428 | handled_p[TS_STATEMENT_LIST] = true; | |
429 | handled_p[TS_CONSTRUCTOR] = true; | |
430 | handled_p[TS_OMP_CLAUSE] = true; | |
431 | handled_p[TS_OPTIMIZATION] = true; | |
432 | handled_p[TS_TARGET_OPTION] = true; | |
433 | ||
434 | /* Anything not marked above will trigger the following assertion. | |
435 | If this assertion triggers, it means that there is a new TS_* | |
436 | structure that should be handled by the streamer. */ | |
437 | for (i = 0; i < LAST_TS_ENUM; i++) | |
438 | gcc_assert (handled_p[i]); | |
439 | } | |
440 | ||
441 | ||
442 | /* Helper for lto_streamer_cache_insert_1. Add T to CACHE->NODES at | |
443 | slot IX. Add OFFSET to CACHE->OFFSETS at slot IX. */ | |
444 | ||
445 | static void | |
446 | lto_streamer_cache_add_to_node_array (struct lto_streamer_cache_d *cache, | |
447 | int ix, tree t, unsigned offset) | |
448 | { | |
449 | gcc_assert (ix >= 0); | |
450 | ||
451 | /* Grow the array of nodes and offsets to accomodate T at IX. */ | |
452 | if (ix >= (int) VEC_length (tree, cache->nodes)) | |
453 | { | |
454 | size_t sz = ix + (20 + ix) / 4; | |
455 | VEC_safe_grow_cleared (tree, gc, cache->nodes, sz); | |
456 | VEC_safe_grow_cleared (unsigned, heap, cache->offsets, sz); | |
457 | } | |
458 | ||
459 | VEC_replace (tree, cache->nodes, ix, t); | |
460 | VEC_replace (unsigned, cache->offsets, ix, offset); | |
461 | } | |
462 | ||
463 | ||
464 | /* Helper for lto_streamer_cache_insert and lto_streamer_cache_insert_at. | |
465 | CACHE, T, IX_P and OFFSET_P are as in lto_streamer_cache_insert. | |
466 | ||
467 | If INSERT_AT_NEXT_SLOT_P is true, T is inserted at the next available | |
468 | slot in the cache. Otherwise, T is inserted at the position indicated | |
469 | in *IX_P. | |
470 | ||
471 | If T already existed in CACHE, return true. Otherwise, | |
472 | return false. */ | |
473 | ||
474 | static bool | |
475 | lto_streamer_cache_insert_1 (struct lto_streamer_cache_d *cache, | |
476 | tree t, int *ix_p, unsigned *offset_p, | |
477 | bool insert_at_next_slot_p) | |
478 | { | |
479 | void **slot; | |
480 | struct tree_int_map d_entry, *entry; | |
481 | int ix; | |
482 | unsigned offset; | |
483 | bool existed_p; | |
484 | ||
485 | gcc_assert (t); | |
486 | ||
487 | d_entry.base.from = t; | |
488 | slot = htab_find_slot (cache->node_map, &d_entry, INSERT); | |
489 | if (*slot == NULL) | |
490 | { | |
491 | /* Determine the next slot to use in the cache. */ | |
492 | if (insert_at_next_slot_p) | |
493 | ix = cache->next_slot++; | |
494 | else | |
495 | ix = *ix_p; | |
496 | ||
497 | entry = XCNEW (struct tree_int_map); | |
498 | entry->base.from = t; | |
499 | entry->to = (unsigned) ix; | |
500 | *slot = entry; | |
501 | ||
502 | /* If no offset was given, store the invalid offset -1. */ | |
503 | offset = (offset_p) ? *offset_p : (unsigned) -1; | |
504 | ||
505 | lto_streamer_cache_add_to_node_array (cache, ix, t, offset); | |
506 | ||
507 | /* Indicate that the item was not present in the cache. */ | |
508 | existed_p = false; | |
509 | } | |
510 | else | |
511 | { | |
512 | entry = (struct tree_int_map *) *slot; | |
513 | ix = (int) entry->to; | |
514 | offset = VEC_index (unsigned, cache->offsets, ix); | |
515 | ||
516 | if (!insert_at_next_slot_p && ix != *ix_p) | |
517 | { | |
518 | /* If the caller wants to insert T at a specific slot | |
519 | location, and ENTRY->TO does not match *IX_P, add T to | |
520 | the requested location slot. This situation arises when | |
521 | streaming builtin functions. | |
522 | ||
523 | For instance, on the writer side we could have two | |
524 | FUNCTION_DECLS T1 and T2 that are represented by the same | |
525 | builtin function. The reader will only instantiate the | |
526 | canonical builtin, but since T1 and T2 had been | |
527 | originally stored in different cache slots (S1 and S2), | |
528 | the reader must be able to find the canonical builtin | |
529 | function at slots S1 and S2. */ | |
530 | gcc_assert (lto_stream_as_builtin_p (t)); | |
531 | ix = *ix_p; | |
532 | ||
533 | /* Since we are storing a builtin, the offset into the | |
534 | stream is not necessary as we will not need to read | |
535 | forward in the stream. */ | |
536 | lto_streamer_cache_add_to_node_array (cache, ix, t, -1); | |
537 | } | |
538 | ||
539 | /* Indicate that T was already in the cache. */ | |
540 | existed_p = true; | |
541 | } | |
542 | ||
543 | if (ix_p) | |
544 | *ix_p = ix; | |
545 | ||
546 | if (offset_p) | |
48e1416a | 547 | *offset_p = offset; |
7bfefa9d | 548 | |
549 | return existed_p; | |
550 | } | |
551 | ||
552 | ||
553 | /* Insert tree node T in CACHE. If T already existed in the cache | |
554 | return true. Otherwise, return false. | |
555 | ||
556 | If IX_P is non-null, update it with the index into the cache where | |
557 | T has been stored. | |
558 | ||
559 | *OFFSET_P represents the offset in the stream where T is physically | |
560 | written out. The first time T is added to the cache, *OFFSET_P is | |
561 | recorded in the cache together with T. But if T already existed | |
562 | in the cache, *OFFSET_P is updated with the value that was recorded | |
563 | the first time T was added to the cache. | |
564 | ||
565 | If OFFSET_P is NULL, it is ignored. */ | |
566 | ||
567 | bool | |
568 | lto_streamer_cache_insert (struct lto_streamer_cache_d *cache, tree t, | |
569 | int *ix_p, unsigned *offset_p) | |
570 | { | |
571 | return lto_streamer_cache_insert_1 (cache, t, ix_p, offset_p, true); | |
572 | } | |
573 | ||
574 | ||
575 | /* Insert tree node T in CACHE at slot IX. If T already | |
576 | existed in the cache return true. Otherwise, return false. */ | |
577 | ||
578 | bool | |
579 | lto_streamer_cache_insert_at (struct lto_streamer_cache_d *cache, | |
580 | tree t, int ix) | |
581 | { | |
582 | return lto_streamer_cache_insert_1 (cache, t, &ix, NULL, false); | |
583 | } | |
584 | ||
585 | ||
586 | /* Return true if tree node T exists in CACHE. If IX_P is | |
587 | not NULL, write to *IX_P the index into the cache where T is stored | |
588 | (-1 if T is not found). */ | |
589 | ||
590 | bool | |
591 | lto_streamer_cache_lookup (struct lto_streamer_cache_d *cache, tree t, | |
592 | int *ix_p) | |
593 | { | |
594 | void **slot; | |
595 | struct tree_int_map d_slot; | |
596 | bool retval; | |
597 | int ix; | |
598 | ||
599 | gcc_assert (t); | |
600 | ||
601 | d_slot.base.from = t; | |
602 | slot = htab_find_slot (cache->node_map, &d_slot, NO_INSERT); | |
603 | if (slot == NULL) | |
604 | { | |
605 | retval = false; | |
606 | ix = -1; | |
607 | } | |
608 | else | |
609 | { | |
610 | retval = true; | |
611 | ix = (int) ((struct tree_int_map *) *slot)->to; | |
612 | } | |
613 | ||
614 | if (ix_p) | |
615 | *ix_p = ix; | |
616 | ||
617 | return retval; | |
618 | } | |
619 | ||
620 | ||
621 | /* Return the tree node at slot IX in CACHE. */ | |
622 | ||
623 | tree | |
624 | lto_streamer_cache_get (struct lto_streamer_cache_d *cache, int ix) | |
625 | { | |
626 | gcc_assert (cache); | |
627 | ||
628 | /* If the reader is requesting an index beyond the length of the | |
629 | cache, it will need to read ahead. Return NULL_TREE to indicate | |
630 | that. */ | |
631 | if ((unsigned) ix >= VEC_length (tree, cache->nodes)) | |
632 | return NULL_TREE; | |
633 | ||
634 | return VEC_index (tree, cache->nodes, (unsigned) ix); | |
635 | } | |
636 | ||
637 | ||
638 | /* Record NODE in COMMON_NODES if it is not NULL and is not already in | |
639 | SEEN_NODES. */ | |
640 | ||
641 | static void | |
642 | lto_record_common_node (tree *nodep, VEC(tree, heap) **common_nodes, | |
643 | struct pointer_set_t *seen_nodes) | |
644 | { | |
645 | tree node = *nodep; | |
646 | ||
647 | if (node == NULL_TREE) | |
648 | return; | |
649 | ||
650 | if (TYPE_P (node)) | |
651 | *nodep = node = gimple_register_type (node); | |
652 | ||
653 | /* Return if node is already seen. */ | |
654 | if (pointer_set_insert (seen_nodes, node)) | |
655 | return; | |
656 | ||
657 | VEC_safe_push (tree, heap, *common_nodes, node); | |
658 | ||
659 | if (tree_node_can_be_shared (node)) | |
660 | { | |
661 | if (POINTER_TYPE_P (node) | |
662 | || TREE_CODE (node) == COMPLEX_TYPE | |
663 | || TREE_CODE (node) == ARRAY_TYPE) | |
664 | lto_record_common_node (&TREE_TYPE (node), common_nodes, seen_nodes); | |
665 | } | |
666 | } | |
667 | ||
668 | ||
669 | /* Generate a vector of common nodes and make sure they are merged | |
670 | properly according to the the gimple type table. */ | |
671 | ||
672 | static VEC(tree,heap) * | |
673 | lto_get_common_nodes (void) | |
674 | { | |
675 | unsigned i; | |
676 | VEC(tree,heap) *common_nodes = NULL; | |
677 | struct pointer_set_t *seen_nodes; | |
678 | ||
679 | /* The MAIN_IDENTIFIER_NODE is normally set up by the front-end, but the | |
680 | LTO back-end must agree. Currently, the only languages that set this | |
681 | use the name "main". */ | |
682 | if (main_identifier_node) | |
683 | { | |
684 | const char *main_name = IDENTIFIER_POINTER (main_identifier_node); | |
685 | gcc_assert (strcmp (main_name, "main") == 0); | |
686 | } | |
687 | else | |
688 | main_identifier_node = get_identifier ("main"); | |
689 | ||
690 | gcc_assert (ptrdiff_type_node == integer_type_node); | |
691 | ||
692 | /* FIXME lto. In the C++ front-end, fileptr_type_node is defined as a | |
693 | variant copy of of ptr_type_node, rather than ptr_node itself. The | |
694 | distinction should only be relevant to the front-end, so we always | |
695 | use the C definition here in lto1. | |
696 | ||
697 | These should be assured in pass_ipa_free_lang_data. */ | |
698 | gcc_assert (fileptr_type_node == ptr_type_node); | |
699 | gcc_assert (TYPE_MAIN_VARIANT (fileptr_type_node) == ptr_type_node); | |
48e1416a | 700 | |
7bfefa9d | 701 | seen_nodes = pointer_set_create (); |
702 | ||
703 | /* Skip itk_char. char_type_node is shared with the appropriately | |
704 | signed variant. */ | |
705 | for (i = itk_signed_char; i < itk_none; i++) | |
706 | lto_record_common_node (&integer_types[i], &common_nodes, seen_nodes); | |
707 | ||
708 | for (i = 0; i < TYPE_KIND_LAST; i++) | |
709 | lto_record_common_node (&sizetype_tab[i], &common_nodes, seen_nodes); | |
710 | ||
711 | for (i = 0; i < TI_MAX; i++) | |
712 | lto_record_common_node (&global_trees[i], &common_nodes, seen_nodes); | |
713 | ||
714 | pointer_set_destroy (seen_nodes); | |
715 | ||
716 | return common_nodes; | |
717 | } | |
718 | ||
719 | ||
720 | /* Assign an index to tree node T and enter it in the streamer cache | |
721 | CACHE. */ | |
722 | ||
723 | static void | |
724 | preload_common_node (struct lto_streamer_cache_d *cache, tree t) | |
725 | { | |
726 | gcc_assert (t); | |
727 | ||
728 | lto_streamer_cache_insert (cache, t, NULL, NULL); | |
729 | ||
730 | /* The FIELD_DECLs of structures should be shared, so that every | |
731 | COMPONENT_REF uses the same tree node when referencing a field. | |
732 | Pointer equality between FIELD_DECLs is used by the alias | |
733 | machinery to compute overlapping memory references (See | |
734 | nonoverlapping_component_refs_p). */ | |
735 | if (TREE_CODE (t) == RECORD_TYPE) | |
736 | { | |
737 | tree f; | |
738 | ||
739 | for (f = TYPE_FIELDS (t); f; f = TREE_CHAIN (f)) | |
740 | preload_common_node (cache, f); | |
741 | } | |
742 | } | |
743 | ||
744 | ||
745 | /* Create a cache of pickled nodes. */ | |
746 | ||
747 | struct lto_streamer_cache_d * | |
748 | lto_streamer_cache_create (void) | |
749 | { | |
750 | struct lto_streamer_cache_d *cache; | |
751 | VEC(tree, heap) *common_nodes; | |
752 | unsigned i; | |
753 | tree node; | |
754 | ||
755 | cache = XCNEW (struct lto_streamer_cache_d); | |
756 | ||
757 | cache->node_map = htab_create (101, tree_int_map_hash, tree_int_map_eq, NULL); | |
758 | ||
759 | /* Load all the well-known tree nodes that are always created by | |
760 | the compiler on startup. This prevents writing them out | |
761 | unnecessarily. */ | |
762 | common_nodes = lto_get_common_nodes (); | |
763 | ||
764 | for (i = 0; VEC_iterate (tree, common_nodes, i, node); i++) | |
765 | preload_common_node (cache, node); | |
766 | ||
767 | VEC_free(tree, heap, common_nodes); | |
768 | ||
769 | return cache; | |
770 | } | |
771 | ||
772 | ||
773 | /* Delete the streamer cache C. */ | |
774 | ||
775 | void | |
776 | lto_streamer_cache_delete (struct lto_streamer_cache_d *c) | |
777 | { | |
778 | if (c == NULL) | |
779 | return; | |
780 | ||
781 | htab_delete (c->node_map); | |
782 | VEC_free (tree, gc, c->nodes); | |
783 | VEC_free (unsigned, heap, c->offsets); | |
784 | free (c); | |
785 | } | |
786 | ||
787 | ||
788 | /* Initialization common to the LTO reader and writer. */ | |
789 | ||
790 | void | |
791 | lto_streamer_init (void) | |
792 | { | |
793 | /* Check that all the TS_* handled by the reader and writer routines | |
794 | match exactly the structures defined in treestruct.def. When a | |
795 | new TS_* astructure is added, the streamer should be updated to | |
796 | handle it. */ | |
797 | check_handled_ts_structures (); | |
798 | } | |
799 | ||
800 | ||
801 | /* Gate function for all LTO streaming passes. */ | |
802 | ||
803 | bool | |
804 | gate_lto_out (void) | |
805 | { | |
806 | return ((flag_generate_lto || in_lto_p) | |
807 | /* Don't bother doing anything if the program has errors. */ | |
808 | && !(errorcount || sorrycount)); | |
809 | } | |
810 | ||
811 | ||
812 | #ifdef LTO_STREAMER_DEBUG | |
813 | /* Add a mapping between T and ORIG_T, which is the numeric value of | |
814 | the original address of T as it was seen by the LTO writer. This | |
815 | mapping is useful when debugging streaming problems. A debugging | |
816 | session can be started on both reader and writer using ORIG_T | |
817 | as a breakpoint value in both sessions. | |
818 | ||
48e1416a | 819 | Note that this mapping is transient and only valid while T is |
7bfefa9d | 820 | being reconstructed. Once T is fully built, the mapping is |
821 | removed. */ | |
822 | ||
823 | void | |
824 | lto_orig_address_map (tree t, intptr_t orig_t) | |
825 | { | |
826 | /* FIXME lto. Using the annotation field is quite hacky as it relies | |
827 | on the GC not running while T is being rematerialized. It would | |
828 | be cleaner to use a hash table here. */ | |
829 | t->base.ann = (union tree_ann_d *) orig_t; | |
830 | } | |
831 | ||
832 | ||
833 | /* Get the original address of T as it was seen by the writer. This | |
834 | is only valid while T is being reconstructed. */ | |
835 | ||
836 | intptr_t | |
837 | lto_orig_address_get (tree t) | |
838 | { | |
839 | return (intptr_t) t->base.ann; | |
840 | } | |
841 | ||
842 | ||
843 | /* Clear the mapping of T to its original address. */ | |
844 | ||
845 | void | |
846 | lto_orig_address_remove (tree t) | |
847 | { | |
848 | t->base.ann = NULL; | |
849 | } | |
850 | #endif | |
851 | ||
852 | ||
853 | /* Check that the version MAJOR.MINOR is the correct version number. */ | |
854 | ||
855 | void | |
856 | lto_check_version (int major, int minor) | |
857 | { | |
858 | if (major != LTO_major_version || minor != LTO_minor_version) | |
859 | fatal_error ("bytecode stream generated with LTO version %d.%d instead " | |
860 | "of the expected %d.%d", | |
861 | major, minor, | |
862 | LTO_major_version, LTO_minor_version); | |
863 | } |