]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/cgraph.c
gcc/
[thirdparty/gcc.git] / gcc / cgraph.c
1 /* Callgraph handling code.
2 Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
21
22 /* This file contains basic routines manipulating call graph and variable pool
23
24 The callgraph:
25
26 The call-graph is data structure designed for intra-procedural optimization
27 but it is also used in non-unit-at-a-time compilation to allow easier code
28 sharing.
29
30 The call-graph consist of nodes and edges represented via linked lists.
31 Each function (external or not) corresponds to the unique node (in
32 contrast to tree DECL nodes where we can have multiple nodes for each
33 function).
34
35 The mapping from declarations to call-graph nodes is done using hash table
36 based on DECL_ASSEMBLER_NAME, so it is essential for assembler name to
37 not change once the declaration is inserted into the call-graph.
38 The call-graph nodes are created lazily using cgraph_node function when
39 called for unknown declaration.
40
41 When built, there is one edge for each direct call. It is possible that
42 the reference will be later optimized out. The call-graph is built
43 conservatively in order to make conservative data flow analysis possible.
44
45 The callgraph at the moment does not represent indirect calls or calls
46 from other compilation unit. Flag NEEDED is set for each node that may
47 be accessed in such an invisible way and it shall be considered an
48 entry point to the callgraph.
49
50 Interprocedural information:
51
52 Callgraph is place to store data needed for interprocedural optimization.
53 All data structures are divided into three components: local_info that
54 is produced while analyzing the function, global_info that is result
55 of global walking of the callgraph on the end of compilation and
56 rtl_info used by RTL backend to propagate data from already compiled
57 functions to their callers.
58
59 Inlining plans:
60
61 The function inlining information is decided in advance and maintained
62 in the callgraph as so called inline plan.
63 For each inlined call, the callee's node is cloned to represent the
64 new function copy produced by inliner.
65 Each inlined call gets a unique corresponding clone node of the callee
66 and the data structure is updated while inlining is performed, so
67 the clones are eliminated and their callee edges redirected to the
68 caller.
69
70 Each edge has "inline_failed" field. When the field is set to NULL,
71 the call will be inlined. When it is non-NULL it contains a reason
72 why inlining wasn't performed.
73
74
75 The varpool data structure:
76
77 Varpool is used to maintain variables in similar manner as call-graph
78 is used for functions. Most of the API is symmetric replacing cgraph
79 function prefix by cgraph_varpool */
80
81
82 #include "config.h"
83 #include "system.h"
84 #include "coretypes.h"
85 #include "tm.h"
86 #include "tree.h"
87 #include "tree-inline.h"
88 #include "langhooks.h"
89 #include "hashtab.h"
90 #include "toplev.h"
91 #include "flags.h"
92 #include "ggc.h"
93 #include "debug.h"
94 #include "target.h"
95 #include "basic-block.h"
96 #include "cgraph.h"
97 #include "varray.h"
98 #include "output.h"
99 #include "intl.h"
100 #include "tree-gimple.h"
101 #include "tree-dump.h"
102
103 static void cgraph_node_remove_callers (struct cgraph_node *node);
104 static inline void cgraph_edge_remove_caller (struct cgraph_edge *e);
105 static inline void cgraph_edge_remove_callee (struct cgraph_edge *e);
106
107 /* Hash table used to convert declarations into nodes. */
108 static GTY((param_is (struct cgraph_node))) htab_t cgraph_hash;
109
110 /* The linked list of cgraph nodes. */
111 struct cgraph_node *cgraph_nodes;
112
113 /* Queue of cgraph nodes scheduled to be lowered. */
114 struct cgraph_node *cgraph_nodes_queue;
115
116 /* Queue of cgraph nodes scheduled to be expanded. This is a
117 secondary queue used during optimization to accommodate passes that
118 may generate new functions that need to be optimized and expanded. */
119 struct cgraph_node *cgraph_expand_queue;
120
121 /* Number of nodes in existence. */
122 int cgraph_n_nodes;
123
124 /* Maximal uid used in cgraph nodes. */
125 int cgraph_max_uid;
126
127 /* Set when whole unit has been analyzed so we can access global info. */
128 bool cgraph_global_info_ready = false;
129
130 /* Set when the cgraph is fully build and the basic flags are computed. */
131 bool cgraph_function_flags_ready = false;
132
133 /* Hash table used to convert declarations into nodes. */
134 static GTY((param_is (struct cgraph_varpool_node))) htab_t cgraph_varpool_hash;
135
136 /* Queue of cgraph nodes scheduled to be lowered and output. */
137 struct cgraph_varpool_node *cgraph_varpool_nodes_queue, *cgraph_varpool_first_unanalyzed_node;
138
139 /* The linked list of cgraph varpool nodes. */
140 struct cgraph_varpool_node *cgraph_varpool_nodes;
141
142 /* End of the varpool queue. Needs to be QTYed to work with PCH. */
143 static GTY(()) struct cgraph_varpool_node *cgraph_varpool_last_needed_node;
144
145 /* Linked list of cgraph asm nodes. */
146 struct cgraph_asm_node *cgraph_asm_nodes;
147
148 /* Last node in cgraph_asm_nodes. */
149 static GTY(()) struct cgraph_asm_node *cgraph_asm_last_node;
150
151 /* The order index of the next cgraph node to be created. This is
152 used so that we can sort the cgraph nodes in order by when we saw
153 them, to support -fno-toplevel-reorder. */
154 int cgraph_order;
155
156 static hashval_t hash_node (const void *);
157 static int eq_node (const void *, const void *);
158
159 /* Returns a hash code for P. */
160
161 static hashval_t
162 hash_node (const void *p)
163 {
164 const struct cgraph_node *n = (const struct cgraph_node *) p;
165 return (hashval_t) DECL_UID (n->decl);
166 }
167
168 /* Returns nonzero if P1 and P2 are equal. */
169
170 static int
171 eq_node (const void *p1, const void *p2)
172 {
173 const struct cgraph_node *n1 = (const struct cgraph_node *) p1;
174 const struct cgraph_node *n2 = (const struct cgraph_node *) p2;
175 return DECL_UID (n1->decl) == DECL_UID (n2->decl);
176 }
177
178 /* Allocate new callgraph node and insert it into basic data structures. */
179 static struct cgraph_node *
180 cgraph_create_node (void)
181 {
182 struct cgraph_node *node;
183
184 node = GGC_CNEW (struct cgraph_node);
185 node->next = cgraph_nodes;
186 node->uid = cgraph_max_uid++;
187 node->order = cgraph_order++;
188 if (cgraph_nodes)
189 cgraph_nodes->previous = node;
190 node->previous = NULL;
191 node->global.estimated_growth = INT_MIN;
192 cgraph_nodes = node;
193 cgraph_n_nodes++;
194 return node;
195 }
196
197 /* Return cgraph node assigned to DECL. Create new one when needed. */
198 struct cgraph_node *
199 cgraph_node (tree decl)
200 {
201 struct cgraph_node key, *node, **slot;
202
203 gcc_assert (TREE_CODE (decl) == FUNCTION_DECL);
204
205 if (!cgraph_hash)
206 cgraph_hash = htab_create_ggc (10, hash_node, eq_node, NULL);
207
208 key.decl = decl;
209
210 slot = (struct cgraph_node **) htab_find_slot (cgraph_hash, &key, INSERT);
211
212 if (*slot)
213 {
214 node = *slot;
215 if (!node->master_clone)
216 node->master_clone = node;
217 return node;
218 }
219
220 node = cgraph_create_node ();
221 node->decl = decl;
222 *slot = node;
223 if (DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL)
224 {
225 node->origin = cgraph_node (DECL_CONTEXT (decl));
226 node->next_nested = node->origin->nested;
227 node->origin->nested = node;
228 node->master_clone = node;
229 }
230 return node;
231 }
232
233 /* Insert already constructed node into hashtable. */
234
235 void
236 cgraph_insert_node_to_hashtable (struct cgraph_node *node)
237 {
238 struct cgraph_node **slot;
239
240 slot = (struct cgraph_node **) htab_find_slot (cgraph_hash, node, INSERT);
241
242 gcc_assert (!*slot);
243 *slot = node;
244 }
245
246 /* Compare ASMNAME with the DECL_ASSEMBLER_NAME of DECL. */
247
248 static bool
249 decl_assembler_name_equal (tree decl, tree asmname)
250 {
251 tree decl_asmname = DECL_ASSEMBLER_NAME (decl);
252
253 if (decl_asmname == asmname)
254 return true;
255
256 /* If the target assembler name was set by the user, things are trickier.
257 We have a leading '*' to begin with. After that, it's arguable what
258 is the correct thing to do with -fleading-underscore. Arguably, we've
259 historically been doing the wrong thing in assemble_alias by always
260 printing the leading underscore. Since we're not changing that, make
261 sure user_label_prefix follows the '*' before matching. */
262 if (IDENTIFIER_POINTER (decl_asmname)[0] == '*')
263 {
264 const char *decl_str = IDENTIFIER_POINTER (decl_asmname) + 1;
265 size_t ulp_len = strlen (user_label_prefix);
266
267 if (ulp_len == 0)
268 ;
269 else if (strncmp (decl_str, user_label_prefix, ulp_len) == 0)
270 decl_str += ulp_len;
271 else
272 return false;
273
274 return strcmp (decl_str, IDENTIFIER_POINTER (asmname)) == 0;
275 }
276
277 return false;
278 }
279
280
281 /* Return the cgraph node that has ASMNAME for its DECL_ASSEMBLER_NAME.
282 Return NULL if there's no such node. */
283
284 struct cgraph_node *
285 cgraph_node_for_asm (tree asmname)
286 {
287 struct cgraph_node *node;
288
289 for (node = cgraph_nodes; node ; node = node->next)
290 if (decl_assembler_name_equal (node->decl, asmname))
291 return node;
292
293 return NULL;
294 }
295
296 /* Returns a hash value for X (which really is a die_struct). */
297
298 static hashval_t
299 edge_hash (const void *x)
300 {
301 return htab_hash_pointer (((struct cgraph_edge *) x)->call_stmt);
302 }
303
304 /* Return nonzero if decl_id of die_struct X is the same as UID of decl *Y. */
305
306 static int
307 edge_eq (const void *x, const void *y)
308 {
309 return ((struct cgraph_edge *) x)->call_stmt == y;
310 }
311
312 /* Return callgraph edge representing CALL_EXPR statement. */
313 struct cgraph_edge *
314 cgraph_edge (struct cgraph_node *node, tree call_stmt)
315 {
316 struct cgraph_edge *e, *e2;
317 int n = 0;
318
319 if (node->call_site_hash)
320 return htab_find_with_hash (node->call_site_hash, call_stmt,
321 htab_hash_pointer (call_stmt));
322
323 /* This loop may turn out to be performance problem. In such case adding
324 hashtables into call nodes with very many edges is probably best
325 solution. It is not good idea to add pointer into CALL_EXPR itself
326 because we want to make possible having multiple cgraph nodes representing
327 different clones of the same body before the body is actually cloned. */
328 for (e = node->callees; e; e= e->next_callee)
329 {
330 if (e->call_stmt == call_stmt)
331 break;
332 n++;
333 }
334 if (n > 100)
335 {
336 node->call_site_hash = htab_create_ggc (120, edge_hash, edge_eq, NULL);
337 for (e2 = node->callees; e2; e2 = e2->next_callee)
338 {
339 void **slot;
340 slot = htab_find_slot_with_hash (node->call_site_hash,
341 e2->call_stmt,
342 htab_hash_pointer (e2->call_stmt),
343 INSERT);
344 gcc_assert (!*slot);
345 *slot = e2;
346 }
347 }
348 return e;
349 }
350
351 /* Change call_smtt of edge E to NEW_STMT. */
352 void
353 cgraph_set_call_stmt (struct cgraph_edge *e, tree new_stmt)
354 {
355 if (e->caller->call_site_hash)
356 {
357 htab_remove_elt_with_hash (e->caller->call_site_hash,
358 e->call_stmt,
359 htab_hash_pointer (e->call_stmt));
360 }
361 e->call_stmt = new_stmt;
362 if (e->caller->call_site_hash)
363 {
364 void **slot;
365 slot = htab_find_slot_with_hash (e->caller->call_site_hash,
366 e->call_stmt,
367 htab_hash_pointer
368 (e->call_stmt), INSERT);
369 gcc_assert (!*slot);
370 *slot = e;
371 }
372 }
373
374 /* Create edge from CALLER to CALLEE in the cgraph. */
375
376 struct cgraph_edge *
377 cgraph_create_edge (struct cgraph_node *caller, struct cgraph_node *callee,
378 tree call_stmt, gcov_type count, int nest)
379 {
380 struct cgraph_edge *edge = GGC_NEW (struct cgraph_edge);
381 #ifdef ENABLE_CHECKING
382 struct cgraph_edge *e;
383
384 for (e = caller->callees; e; e = e->next_callee)
385 gcc_assert (e->call_stmt != call_stmt);
386 #endif
387
388 gcc_assert (get_call_expr_in (call_stmt));
389
390 if (!DECL_SAVED_TREE (callee->decl))
391 edge->inline_failed = N_("function body not available");
392 else if (callee->local.redefined_extern_inline)
393 edge->inline_failed = N_("redefined extern inline functions are not "
394 "considered for inlining");
395 else if (callee->local.inlinable)
396 edge->inline_failed = N_("function not considered for inlining");
397 else
398 edge->inline_failed = N_("function not inlinable");
399
400 edge->aux = NULL;
401
402 edge->caller = caller;
403 edge->callee = callee;
404 edge->call_stmt = call_stmt;
405 edge->prev_caller = NULL;
406 edge->next_caller = callee->callers;
407 if (callee->callers)
408 callee->callers->prev_caller = edge;
409 edge->prev_callee = NULL;
410 edge->next_callee = caller->callees;
411 if (caller->callees)
412 caller->callees->prev_callee = edge;
413 caller->callees = edge;
414 callee->callers = edge;
415 edge->count = count;
416 edge->loop_nest = nest;
417 if (caller->call_site_hash)
418 {
419 void **slot;
420 slot = htab_find_slot_with_hash (caller->call_site_hash,
421 edge->call_stmt,
422 htab_hash_pointer
423 (edge->call_stmt),
424 INSERT);
425 gcc_assert (!*slot);
426 *slot = edge;
427 }
428 return edge;
429 }
430
431 /* Remove the edge E from the list of the callers of the callee. */
432
433 static inline void
434 cgraph_edge_remove_callee (struct cgraph_edge *e)
435 {
436 if (e->prev_caller)
437 e->prev_caller->next_caller = e->next_caller;
438 if (e->next_caller)
439 e->next_caller->prev_caller = e->prev_caller;
440 if (!e->prev_caller)
441 e->callee->callers = e->next_caller;
442 }
443
444 /* Remove the edge E from the list of the callees of the caller. */
445
446 static inline void
447 cgraph_edge_remove_caller (struct cgraph_edge *e)
448 {
449 if (e->prev_callee)
450 e->prev_callee->next_callee = e->next_callee;
451 if (e->next_callee)
452 e->next_callee->prev_callee = e->prev_callee;
453 if (!e->prev_callee)
454 e->caller->callees = e->next_callee;
455 if (e->caller->call_site_hash)
456 htab_remove_elt_with_hash (e->caller->call_site_hash,
457 e->call_stmt,
458 htab_hash_pointer (e->call_stmt));
459 }
460
461 /* Remove the edge E in the cgraph. */
462
463 void
464 cgraph_remove_edge (struct cgraph_edge *e)
465 {
466 /* Remove from callers list of the callee. */
467 cgraph_edge_remove_callee (e);
468
469 /* Remove from callees list of the callers. */
470 cgraph_edge_remove_caller (e);
471 }
472
473 /* Redirect callee of E to N. The function does not update underlying
474 call expression. */
475
476 void
477 cgraph_redirect_edge_callee (struct cgraph_edge *e, struct cgraph_node *n)
478 {
479 /* Remove from callers list of the current callee. */
480 cgraph_edge_remove_callee (e);
481
482 /* Insert to callers list of the new callee. */
483 e->prev_caller = NULL;
484 if (n->callers)
485 n->callers->prev_caller = e;
486 e->next_caller = n->callers;
487 n->callers = e;
488 e->callee = n;
489 }
490
491 /* Remove all callees from the node. */
492
493 void
494 cgraph_node_remove_callees (struct cgraph_node *node)
495 {
496 struct cgraph_edge *e;
497
498 /* It is sufficient to remove the edges from the lists of callers of
499 the callees. The callee list of the node can be zapped with one
500 assignment. */
501 for (e = node->callees; e; e = e->next_callee)
502 cgraph_edge_remove_callee (e);
503 node->callees = NULL;
504 if (node->call_site_hash)
505 {
506 htab_delete (node->call_site_hash);
507 node->call_site_hash = NULL;
508 }
509 }
510
511 /* Remove all callers from the node. */
512
513 static void
514 cgraph_node_remove_callers (struct cgraph_node *node)
515 {
516 struct cgraph_edge *e;
517
518 /* It is sufficient to remove the edges from the lists of callees of
519 the callers. The caller list of the node can be zapped with one
520 assignment. */
521 for (e = node->callers; e; e = e->next_caller)
522 cgraph_edge_remove_caller (e);
523 node->callers = NULL;
524 }
525
526 /* Remove the node from cgraph. */
527
528 void
529 cgraph_remove_node (struct cgraph_node *node)
530 {
531 void **slot;
532 bool kill_body = false;
533
534 cgraph_node_remove_callers (node);
535 cgraph_node_remove_callees (node);
536 /* Incremental inlining access removed nodes stored in the postorder list.
537 */
538 node->needed = node->reachable = false;
539 while (node->nested)
540 cgraph_remove_node (node->nested);
541 if (node->origin)
542 {
543 struct cgraph_node **node2 = &node->origin->nested;
544
545 while (*node2 != node)
546 node2 = &(*node2)->next_nested;
547 *node2 = node->next_nested;
548 }
549 if (node->previous)
550 node->previous->next = node->next;
551 else
552 cgraph_nodes = node->next;
553 if (node->next)
554 node->next->previous = node->previous;
555 node->next = NULL;
556 node->previous = NULL;
557 slot = htab_find_slot (cgraph_hash, node, NO_INSERT);
558 if (*slot == node)
559 {
560 if (node->next_clone)
561 {
562 struct cgraph_node *new_node = node->next_clone;
563 struct cgraph_node *n;
564
565 /* Make the next clone be the master clone */
566 for (n = new_node; n; n = n->next_clone)
567 n->master_clone = new_node;
568
569 *slot = new_node;
570 node->next_clone->prev_clone = NULL;
571 }
572 else
573 {
574 htab_clear_slot (cgraph_hash, slot);
575 kill_body = true;
576 }
577 }
578 else
579 {
580 node->prev_clone->next_clone = node->next_clone;
581 if (node->next_clone)
582 node->next_clone->prev_clone = node->prev_clone;
583 }
584
585 /* While all the clones are removed after being proceeded, the function
586 itself is kept in the cgraph even after it is compiled. Check whether
587 we are done with this body and reclaim it proactively if this is the case.
588 */
589 if (!kill_body && *slot)
590 {
591 struct cgraph_node *n = (struct cgraph_node *) *slot;
592 if (!n->next_clone && !n->global.inlined_to
593 && (cgraph_global_info_ready
594 && (TREE_ASM_WRITTEN (n->decl) || DECL_EXTERNAL (n->decl))))
595 kill_body = true;
596 }
597
598 if (kill_body && flag_unit_at_a_time)
599 {
600 DECL_SAVED_TREE (node->decl) = NULL;
601 DECL_STRUCT_FUNCTION (node->decl) = NULL;
602 DECL_INITIAL (node->decl) = error_mark_node;
603 }
604 node->decl = NULL;
605 if (node->call_site_hash)
606 {
607 htab_delete (node->call_site_hash);
608 node->call_site_hash = NULL;
609 }
610 cgraph_n_nodes--;
611 /* Do not free the structure itself so the walk over chain can continue. */
612 }
613
614 /* Notify finalize_compilation_unit that given node is reachable. */
615
616 void
617 cgraph_mark_reachable_node (struct cgraph_node *node)
618 {
619 if (!node->reachable && node->local.finalized)
620 {
621 notice_global_symbol (node->decl);
622 node->reachable = 1;
623 gcc_assert (!cgraph_global_info_ready);
624
625 node->next_needed = cgraph_nodes_queue;
626 cgraph_nodes_queue = node;
627 }
628 }
629
630 /* Likewise indicate that a node is needed, i.e. reachable via some
631 external means. */
632
633 void
634 cgraph_mark_needed_node (struct cgraph_node *node)
635 {
636 node->needed = 1;
637 cgraph_mark_reachable_node (node);
638 }
639
640 /* Return local info for the compiled function. */
641
642 struct cgraph_local_info *
643 cgraph_local_info (tree decl)
644 {
645 struct cgraph_node *node;
646
647 gcc_assert (TREE_CODE (decl) == FUNCTION_DECL);
648 node = cgraph_node (decl);
649 return &node->local;
650 }
651
652 /* Return local info for the compiled function. */
653
654 struct cgraph_global_info *
655 cgraph_global_info (tree decl)
656 {
657 struct cgraph_node *node;
658
659 gcc_assert (TREE_CODE (decl) == FUNCTION_DECL && cgraph_global_info_ready);
660 node = cgraph_node (decl);
661 return &node->global;
662 }
663
664 /* Return local info for the compiled function. */
665
666 struct cgraph_rtl_info *
667 cgraph_rtl_info (tree decl)
668 {
669 struct cgraph_node *node;
670
671 gcc_assert (TREE_CODE (decl) == FUNCTION_DECL);
672 node = cgraph_node (decl);
673 if (decl != current_function_decl
674 && !TREE_ASM_WRITTEN (node->decl))
675 return NULL;
676 return &node->rtl;
677 }
678
679 /* Return name of the node used in debug output. */
680 const char *
681 cgraph_node_name (struct cgraph_node *node)
682 {
683 return lang_hooks.decl_printable_name (node->decl, 2);
684 }
685
686 /* Return name of the node used in debug output. */
687 static const char *
688 cgraph_varpool_node_name (struct cgraph_varpool_node *node)
689 {
690 return lang_hooks.decl_printable_name (node->decl, 2);
691 }
692
693 /* Names used to print out the availability enum. */
694 static const char * const availability_names[] =
695 {"unset", "not_available", "overwrittable", "available", "local"};
696
697 /* Dump given cgraph node. */
698 void
699 dump_cgraph_node (FILE *f, struct cgraph_node *node)
700 {
701 struct cgraph_edge *edge;
702 fprintf (f, "%s/%i:", cgraph_node_name (node), node->uid);
703 if (node->global.inlined_to)
704 fprintf (f, " (inline copy in %s/%i)",
705 cgraph_node_name (node->global.inlined_to),
706 node->global.inlined_to->uid);
707 if (cgraph_function_flags_ready)
708 fprintf (f, " availability:%s",
709 availability_names [cgraph_function_body_availability (node)]);
710 if (node->master_clone && node->master_clone->uid != node->uid)
711 fprintf (f, "(%i)", node->master_clone->uid);
712 if (node->count)
713 fprintf (f, " executed "HOST_WIDEST_INT_PRINT_DEC"x",
714 (HOST_WIDEST_INT)node->count);
715 if (node->local.self_insns)
716 fprintf (f, " %i insns", node->local.self_insns);
717 if (node->global.insns && node->global.insns != node->local.self_insns)
718 fprintf (f, " (%i after inlining)", node->global.insns);
719 if (node->origin)
720 fprintf (f, " nested in: %s", cgraph_node_name (node->origin));
721 if (node->needed)
722 fprintf (f, " needed");
723 else if (node->reachable)
724 fprintf (f, " reachable");
725 if (DECL_SAVED_TREE (node->decl))
726 fprintf (f, " tree");
727 if (node->output)
728 fprintf (f, " output");
729 if (node->local.local)
730 fprintf (f, " local");
731 if (node->local.externally_visible)
732 fprintf (f, " externally_visible");
733 if (node->local.finalized)
734 fprintf (f, " finalized");
735 if (node->local.disregard_inline_limits)
736 fprintf (f, " always_inline");
737 else if (node->local.inlinable)
738 fprintf (f, " inlinable");
739 if (node->local.redefined_extern_inline)
740 fprintf (f, " redefined_extern_inline");
741 if (TREE_ASM_WRITTEN (node->decl))
742 fprintf (f, " asm_written");
743
744 fprintf (f, "\n called by: ");
745 for (edge = node->callers; edge; edge = edge->next_caller)
746 {
747 fprintf (f, "%s/%i ", cgraph_node_name (edge->caller),
748 edge->caller->uid);
749 if (edge->count)
750 fprintf (f, "("HOST_WIDEST_INT_PRINT_DEC"x) ",
751 (HOST_WIDEST_INT)edge->count);
752 if (!edge->inline_failed)
753 fprintf(f, "(inlined) ");
754 }
755
756 fprintf (f, "\n calls: ");
757 for (edge = node->callees; edge; edge = edge->next_callee)
758 {
759 fprintf (f, "%s/%i ", cgraph_node_name (edge->callee),
760 edge->callee->uid);
761 if (!edge->inline_failed)
762 fprintf(f, "(inlined) ");
763 if (edge->count)
764 fprintf (f, "("HOST_WIDEST_INT_PRINT_DEC"x) ",
765 (HOST_WIDEST_INT)edge->count);
766 if (edge->loop_nest)
767 fprintf (f, "(nested in %i loops) ", edge->loop_nest);
768 }
769 fprintf (f, "\n");
770 }
771
772 /* Dump the callgraph. */
773
774 void
775 dump_cgraph (FILE *f)
776 {
777 struct cgraph_node *node;
778
779 fprintf (f, "callgraph:\n\n");
780 for (node = cgraph_nodes; node; node = node->next)
781 dump_cgraph_node (f, node);
782 }
783
784 /* Dump given cgraph node. */
785 void
786 dump_cgraph_varpool_node (FILE *f, struct cgraph_varpool_node *node)
787 {
788 fprintf (f, "%s:", cgraph_varpool_node_name (node));
789 fprintf (f, " availability:%s",
790 cgraph_function_flags_ready
791 ? availability_names[cgraph_variable_initializer_availability (node)]
792 : "not-ready");
793 if (DECL_INITIAL (node->decl))
794 fprintf (f, " initialized");
795 if (node->needed)
796 fprintf (f, " needed");
797 if (node->analyzed)
798 fprintf (f, " analyzed");
799 if (node->finalized)
800 fprintf (f, " finalized");
801 if (node->output)
802 fprintf (f, " output");
803 if (node->externally_visible)
804 fprintf (f, " externally_visible");
805 fprintf (f, "\n");
806 }
807
808 /* Dump the callgraph. */
809
810 void
811 dump_varpool (FILE *f)
812 {
813 struct cgraph_varpool_node *node;
814
815 fprintf (f, "variable pool:\n\n");
816 for (node = cgraph_varpool_nodes; node; node = node->next_needed)
817 dump_cgraph_varpool_node (f, node);
818 }
819
820 /* Returns a hash code for P. */
821
822 static hashval_t
823 hash_varpool_node (const void *p)
824 {
825 const struct cgraph_varpool_node *n = (const struct cgraph_varpool_node *) p;
826 return (hashval_t) DECL_UID (n->decl);
827 }
828
829 /* Returns nonzero if P1 and P2 are equal. */
830
831 static int
832 eq_varpool_node (const void *p1, const void *p2)
833 {
834 const struct cgraph_varpool_node *n1 =
835 (const struct cgraph_varpool_node *) p1;
836 const struct cgraph_varpool_node *n2 =
837 (const struct cgraph_varpool_node *) p2;
838 return DECL_UID (n1->decl) == DECL_UID (n2->decl);
839 }
840
841 /* Return cgraph_varpool node assigned to DECL. Create new one when needed. */
842 struct cgraph_varpool_node *
843 cgraph_varpool_node (tree decl)
844 {
845 struct cgraph_varpool_node key, *node, **slot;
846
847 gcc_assert (DECL_P (decl) && TREE_CODE (decl) != FUNCTION_DECL);
848
849 if (!cgraph_varpool_hash)
850 cgraph_varpool_hash = htab_create_ggc (10, hash_varpool_node,
851 eq_varpool_node, NULL);
852 key.decl = decl;
853 slot = (struct cgraph_varpool_node **)
854 htab_find_slot (cgraph_varpool_hash, &key, INSERT);
855 if (*slot)
856 return *slot;
857 node = GGC_CNEW (struct cgraph_varpool_node);
858 node->decl = decl;
859 node->order = cgraph_order++;
860 node->next = cgraph_varpool_nodes;
861 cgraph_varpool_nodes = node;
862 *slot = node;
863 return node;
864 }
865
866 struct cgraph_varpool_node *
867 cgraph_varpool_node_for_asm (tree asmname)
868 {
869 struct cgraph_varpool_node *node;
870
871 for (node = cgraph_varpool_nodes; node ; node = node->next)
872 if (decl_assembler_name_equal (node->decl, asmname))
873 return node;
874
875 return NULL;
876 }
877
878 /* Set the DECL_ASSEMBLER_NAME and update cgraph hashtables. */
879 void
880 change_decl_assembler_name (tree decl, tree name)
881 {
882 if (!DECL_ASSEMBLER_NAME_SET_P (decl))
883 {
884 SET_DECL_ASSEMBLER_NAME (decl, name);
885 return;
886 }
887 if (name == DECL_ASSEMBLER_NAME (decl))
888 return;
889
890 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl))
891 && DECL_RTL_SET_P (decl))
892 warning (0, "%D renamed after being referenced in assembly", decl);
893
894 SET_DECL_ASSEMBLER_NAME (decl, name);
895 }
896
897 /* Helper function for finalization code - add node into lists so it will
898 be analyzed and compiled. */
899 void
900 cgraph_varpool_enqueue_needed_node (struct cgraph_varpool_node *node)
901 {
902 if (cgraph_varpool_last_needed_node)
903 cgraph_varpool_last_needed_node->next_needed = node;
904 cgraph_varpool_last_needed_node = node;
905 node->next_needed = NULL;
906 if (!cgraph_varpool_nodes_queue)
907 cgraph_varpool_nodes_queue = node;
908 if (!cgraph_varpool_first_unanalyzed_node)
909 cgraph_varpool_first_unanalyzed_node = node;
910 notice_global_symbol (node->decl);
911 }
912
913 /* Reset the queue of needed nodes. */
914 void
915 cgraph_varpool_reset_queue (void)
916 {
917 cgraph_varpool_last_needed_node = NULL;
918 cgraph_varpool_nodes_queue = NULL;
919 cgraph_varpool_first_unanalyzed_node = NULL;
920 }
921
922 /* Notify finalize_compilation_unit that given node is reachable
923 or needed. */
924 void
925 cgraph_varpool_mark_needed_node (struct cgraph_varpool_node *node)
926 {
927 if (!node->needed && node->finalized
928 && !TREE_ASM_WRITTEN (node->decl))
929 cgraph_varpool_enqueue_needed_node (node);
930 node->needed = 1;
931 }
932
933 /* Determine if variable DECL is needed. That is, visible to something
934 either outside this translation unit, something magic in the system
935 configury, or (if not doing unit-at-a-time) to something we haven't
936 seen yet. */
937
938 bool
939 decide_is_variable_needed (struct cgraph_varpool_node *node, tree decl)
940 {
941 /* If the user told us it is used, then it must be so. */
942 if (node->externally_visible || node->force_output)
943 return true;
944 if (!flag_unit_at_a_time
945 && lookup_attribute ("used", DECL_ATTRIBUTES (decl)))
946 return true;
947
948 /* ??? If the assembler name is set by hand, it is possible to assemble
949 the name later after finalizing the function and the fact is noticed
950 in assemble_name then. This is arguably a bug. */
951 if (DECL_ASSEMBLER_NAME_SET_P (decl)
952 && TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
953 return true;
954
955 /* If we decided it was needed before, but at the time we didn't have
956 the definition available, then it's still needed. */
957 if (node->needed)
958 return true;
959
960 /* Externally visible variables must be output. The exception is
961 COMDAT variables that must be output only when they are needed. */
962 if (TREE_PUBLIC (decl) && !flag_whole_program && !DECL_COMDAT (decl)
963 && !DECL_EXTERNAL (decl))
964 return true;
965
966 /* When emulating tls, we actually see references to the control
967 variable, rather than the user-level variable. */
968 if (!targetm.have_tls
969 && TREE_CODE (decl) == VAR_DECL
970 && DECL_THREAD_LOCAL_P (decl))
971 {
972 tree control = emutls_decl (decl);
973 if (decide_is_variable_needed (cgraph_varpool_node (control), control))
974 return true;
975 }
976
977 /* When not reordering top level variables, we have to assume that
978 we are going to keep everything. */
979 if (flag_unit_at_a_time && flag_toplevel_reorder)
980 return false;
981
982 /* We want to emit COMDAT variables only when absolutely necessary. */
983 if (DECL_COMDAT (decl))
984 return false;
985 return true;
986 }
987
988 void
989 cgraph_varpool_finalize_decl (tree decl)
990 {
991 struct cgraph_varpool_node *node = cgraph_varpool_node (decl);
992
993 /* The first declaration of a variable that comes through this function
994 decides whether it is global (in C, has external linkage)
995 or local (in C, has internal linkage). So do nothing more
996 if this function has already run. */
997 if (node->finalized)
998 {
999 if (cgraph_global_info_ready || (!flag_unit_at_a_time && !flag_openmp))
1000 cgraph_varpool_assemble_pending_decls ();
1001 return;
1002 }
1003 if (node->needed)
1004 cgraph_varpool_enqueue_needed_node (node);
1005 node->finalized = true;
1006
1007 if (decide_is_variable_needed (node, decl))
1008 cgraph_varpool_mark_needed_node (node);
1009 /* Since we reclaim unreachable nodes at the end of every language
1010 level unit, we need to be conservative about possible entry points
1011 there. */
1012 else if (TREE_PUBLIC (decl) && !DECL_COMDAT (decl) && !DECL_EXTERNAL (decl))
1013 cgraph_varpool_mark_needed_node (node);
1014 if (cgraph_global_info_ready || (!flag_unit_at_a_time && !flag_openmp))
1015 cgraph_varpool_assemble_pending_decls ();
1016 }
1017
1018 /* Add a top-level asm statement to the list. */
1019
1020 struct cgraph_asm_node *
1021 cgraph_add_asm_node (tree asm_str)
1022 {
1023 struct cgraph_asm_node *node;
1024
1025 node = GGC_CNEW (struct cgraph_asm_node);
1026 node->asm_str = asm_str;
1027 node->order = cgraph_order++;
1028 node->next = NULL;
1029 if (cgraph_asm_nodes == NULL)
1030 cgraph_asm_nodes = node;
1031 else
1032 cgraph_asm_last_node->next = node;
1033 cgraph_asm_last_node = node;
1034 return node;
1035 }
1036
1037 /* Return true when the DECL can possibly be inlined. */
1038 bool
1039 cgraph_function_possibly_inlined_p (tree decl)
1040 {
1041 if (!cgraph_global_info_ready)
1042 return (DECL_INLINE (decl) && !flag_really_no_inline);
1043 return DECL_POSSIBLY_INLINED (decl);
1044 }
1045
1046 /* Create clone of E in the node N represented by CALL_EXPR the callgraph. */
1047 struct cgraph_edge *
1048 cgraph_clone_edge (struct cgraph_edge *e, struct cgraph_node *n,
1049 tree call_stmt, gcov_type count_scale, int loop_nest,
1050 bool update_original)
1051 {
1052 struct cgraph_edge *new;
1053
1054 new = cgraph_create_edge (n, e->callee, call_stmt,
1055 e->count * count_scale / REG_BR_PROB_BASE,
1056 e->loop_nest + loop_nest);
1057
1058 new->inline_failed = e->inline_failed;
1059 if (update_original)
1060 {
1061 e->count -= new->count;
1062 if (e->count < 0)
1063 e->count = 0;
1064 }
1065 return new;
1066 }
1067
1068 /* Create node representing clone of N executed COUNT times. Decrease
1069 the execution counts from original node too.
1070
1071 When UPDATE_ORIGINAL is true, the counts are subtracted from the original
1072 function's profile to reflect the fact that part of execution is handled
1073 by node. */
1074 struct cgraph_node *
1075 cgraph_clone_node (struct cgraph_node *n, gcov_type count, int loop_nest,
1076 bool update_original)
1077 {
1078 struct cgraph_node *new = cgraph_create_node ();
1079 struct cgraph_edge *e;
1080 gcov_type count_scale;
1081
1082 new->decl = n->decl;
1083 new->origin = n->origin;
1084 if (new->origin)
1085 {
1086 new->next_nested = new->origin->nested;
1087 new->origin->nested = new;
1088 }
1089 new->analyzed = n->analyzed;
1090 new->local = n->local;
1091 new->global = n->global;
1092 new->rtl = n->rtl;
1093 new->master_clone = n->master_clone;
1094 new->count = count;
1095 if (n->count)
1096 count_scale = new->count * REG_BR_PROB_BASE / n->count;
1097 else
1098 count_scale = 0;
1099 if (update_original)
1100 {
1101 n->count -= count;
1102 if (n->count < 0)
1103 n->count = 0;
1104 }
1105
1106 for (e = n->callees;e; e=e->next_callee)
1107 cgraph_clone_edge (e, new, e->call_stmt, count_scale, loop_nest,
1108 update_original);
1109
1110 new->next_clone = n->next_clone;
1111 new->prev_clone = n;
1112 n->next_clone = new;
1113 if (new->next_clone)
1114 new->next_clone->prev_clone = new;
1115
1116 return new;
1117 }
1118
1119 /* Return true if N is an master_clone, (see cgraph_master_clone). */
1120
1121 bool
1122 cgraph_is_master_clone (struct cgraph_node *n)
1123 {
1124 return (n == cgraph_master_clone (n));
1125 }
1126
1127 struct cgraph_node *
1128 cgraph_master_clone (struct cgraph_node *n)
1129 {
1130 enum availability avail = cgraph_function_body_availability (n);
1131
1132 if (avail == AVAIL_NOT_AVAILABLE || avail == AVAIL_OVERWRITABLE)
1133 return NULL;
1134
1135 if (!n->master_clone)
1136 n->master_clone = cgraph_node (n->decl);
1137
1138 return n->master_clone;
1139 }
1140
1141 /* NODE is no longer nested function; update cgraph accordingly. */
1142 void
1143 cgraph_unnest_node (struct cgraph_node *node)
1144 {
1145 struct cgraph_node **node2 = &node->origin->nested;
1146 gcc_assert (node->origin);
1147
1148 while (*node2 != node)
1149 node2 = &(*node2)->next_nested;
1150 *node2 = node->next_nested;
1151 node->origin = NULL;
1152 }
1153
1154 /* Return function availability. See cgraph.h for description of individual
1155 return values. */
1156 enum availability
1157 cgraph_function_body_availability (struct cgraph_node *node)
1158 {
1159 enum availability avail;
1160 gcc_assert (cgraph_function_flags_ready);
1161 if (!node->analyzed)
1162 avail = AVAIL_NOT_AVAILABLE;
1163 else if (node->local.local)
1164 avail = AVAIL_LOCAL;
1165 else if (node->local.externally_visible)
1166 avail = AVAIL_AVAILABLE;
1167
1168 /* If the function can be overwritten, return OVERWRITABLE. Take
1169 care at least of two notable extensions - the COMDAT functions
1170 used to share template instantiations in C++ (this is symmetric
1171 to code cp_cannot_inline_tree_fn and probably shall be shared and
1172 the inlinability hooks completely eliminated).
1173
1174 ??? Does the C++ one definition rule allow us to always return
1175 AVAIL_AVAILABLE here? That would be good reason to preserve this
1176 hook Similarly deal with extern inline functions - this is again
1177 necessary to get C++ shared functions having keyed templates
1178 right and in the C extension documentation we probably should
1179 document the requirement of both versions of function (extern
1180 inline and offline) having same side effect characteristics as
1181 good optimization is what this optimization is about. */
1182
1183 else if (!(*targetm.binds_local_p) (node->decl)
1184 && !DECL_COMDAT (node->decl) && !DECL_EXTERNAL (node->decl))
1185 avail = AVAIL_OVERWRITABLE;
1186 else avail = AVAIL_AVAILABLE;
1187
1188 return avail;
1189 }
1190
1191 /* Return variable availability. See cgraph.h for description of individual
1192 return values. */
1193 enum availability
1194 cgraph_variable_initializer_availability (struct cgraph_varpool_node *node)
1195 {
1196 gcc_assert (cgraph_function_flags_ready);
1197 if (!node->finalized)
1198 return AVAIL_NOT_AVAILABLE;
1199 if (!TREE_PUBLIC (node->decl))
1200 return AVAIL_AVAILABLE;
1201 /* If the variable can be overwritten, return OVERWRITABLE. Takes
1202 care of at least two notable extensions - the COMDAT variables
1203 used to share template instantiations in C++. */
1204 if (!(*targetm.binds_local_p) (node->decl) && !DECL_COMDAT (node->decl))
1205 return AVAIL_OVERWRITABLE;
1206 return AVAIL_AVAILABLE;
1207 }
1208
1209
1210 /* Add the function FNDECL to the call graph. FNDECL is assumed to be
1211 in low GIMPLE form and ready to be processed by cgraph_finalize_function.
1212
1213 When operating in unit-at-a-time, a new callgraph node is added to
1214 CGRAPH_EXPAND_QUEUE, which is processed after all the original
1215 functions in the call graph .
1216
1217 When not in unit-at-a-time, the new callgraph node is added to
1218 CGRAPH_NODES_QUEUE for cgraph_assemble_pending_functions to
1219 process. */
1220
1221 void
1222 cgraph_add_new_function (tree fndecl)
1223 {
1224 struct cgraph_node *n = cgraph_node (fndecl);
1225 n->next_needed = cgraph_expand_queue;
1226 cgraph_expand_queue = n;
1227 }
1228
1229 #include "gt-cgraph.h"