]>
Commit | Line | Data |
---|---|---|
d7c6d889 | 1 | /* Callgraph based intraprocedural optimizations. |
ae01b312 | 2 | Copyright (C) 2003 Free Software Foundation, Inc. |
3 | Contributed by Jan Hubicka | |
4 | ||
5 | This file is part of GCC. | |
6 | ||
7 | GCC is free software; you can redistribute it and/or modify it under | |
8 | the terms of the GNU General Public License as published by the Free | |
9 | Software Foundation; either version 2, or (at your option) any later | |
10 | version. | |
11 | ||
12 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
13 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
15 | for more details. | |
16 | ||
17 | You should have received a copy of the GNU General Public License | |
18 | along with GCC; see the file COPYING. If not, write to the Free | |
19 | Software Foundation, 59 Temple Place - Suite 330, Boston, MA | |
20 | 02111-1307, USA. */ | |
21 | ||
22 | #include "config.h" | |
23 | #include "system.h" | |
24 | #include "coretypes.h" | |
25 | #include "tm.h" | |
26 | #include "tree.h" | |
27 | #include "tree-inline.h" | |
28 | #include "langhooks.h" | |
29 | #include "hashtab.h" | |
30 | #include "toplev.h" | |
31 | #include "flags.h" | |
32 | #include "ggc.h" | |
33 | #include "debug.h" | |
34 | #include "target.h" | |
35 | #include "cgraph.h" | |
80a85d8a | 36 | #include "diagnostic.h" |
f79b6507 | 37 | #include "timevar.h" |
d7c6d889 | 38 | #include "params.h" |
39 | #include "fibheap.h" | |
40 | #include "c-common.h" | |
41 | ||
42 | #define INSNS_PER_CALL 10 | |
ae01b312 | 43 | |
44 | static void cgraph_expand_functions PARAMS ((void)); | |
45 | static void cgraph_mark_functions_to_output PARAMS ((void)); | |
46 | static void cgraph_expand_function PARAMS ((struct cgraph_node *)); | |
47 | static tree record_call_1 PARAMS ((tree *, int *, void *)); | |
80a85d8a | 48 | static void cgraph_mark_local_functions PARAMS ((void)); |
961e3b13 | 49 | static void cgraph_optimize_function PARAMS ((struct cgraph_node *)); |
ae01b312 | 50 | |
d7c6d889 | 51 | /* Statistics we collect about inlining algorithm. */ |
52 | static int ncalls_inlined; | |
53 | static int nfunctions_inlined; | |
54 | static int initial_insns; | |
55 | static int overall_insns; | |
56 | ||
ae01b312 | 57 | /* Analyze function once it is parsed. Set up the local information |
d01481af | 58 | available - create cgraph edges for function calls via BODY. */ |
ae01b312 | 59 | |
60 | void | |
61 | cgraph_finalize_function (decl, body) | |
62 | tree decl; | |
63 | tree body ATTRIBUTE_UNUSED; | |
64 | { | |
65 | struct cgraph_node *node = cgraph_node (decl); | |
66 | ||
67 | node->decl = decl; | |
79bb87b4 | 68 | node->local.finalized = true; |
ae01b312 | 69 | |
3d7bfc56 | 70 | if (/* Externally visible functions must be output. The exception are |
71 | COMDAT functions that must be output only when they are needed. | |
d01481af | 72 | Similarly are handled deferred functions and |
3d7bfc56 | 73 | external functions (GCC extension "extern inline") */ |
74 | (TREE_PUBLIC (decl) && !DECL_COMDAT (decl) && !DECL_EXTERNAL (decl)) | |
75 | /* ??? Constructors and destructors not called otherwise can be inlined | |
76 | into single construction/destruction function per section to save some | |
77 | resources. For now just mark it as reachable. */ | |
78 | || DECL_STATIC_CONSTRUCTOR (decl) | |
79 | || DECL_STATIC_DESTRUCTOR (decl) | |
80 | /* Function whose name is output to the assembler file must be produced. | |
81 | It is possible to assemble the name later after finalizing the function | |
82 | and the fact is noticed in assemble_name then. */ | |
83 | || (DECL_ASSEMBLER_NAME_SET_P (decl) | |
40109983 | 84 | && TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl))) |
85 | || lookup_attribute ("used", DECL_ATTRIBUTES (decl))) | |
3d7bfc56 | 86 | { |
87 | cgraph_mark_needed_node (node, 1); | |
88 | } | |
89 | ||
ae01b312 | 90 | (*debug_hooks->deferred_inline_function) (decl); |
91 | } | |
92 | ||
ae01b312 | 93 | /* Walk tree and record all calls. Called via walk_tree. */ |
94 | static tree | |
95 | record_call_1 (tp, walk_subtrees, data) | |
96 | tree *tp; | |
97 | int *walk_subtrees; | |
98 | void *data; | |
99 | { | |
229dcfae | 100 | if (TREE_CODE (*tp) == VAR_DECL && TREE_STATIC (*tp)) |
101 | cgraph_varpool_mark_needed_node (cgraph_varpool_node (*tp)); | |
ae01b312 | 102 | /* Record dereferences to the functions. This makes the functions |
103 | reachable unconditionally. */ | |
229dcfae | 104 | else if (TREE_CODE (*tp) == ADDR_EXPR) |
ae01b312 | 105 | { |
106 | tree decl = TREE_OPERAND (*tp, 0); | |
107 | if (TREE_CODE (decl) == FUNCTION_DECL) | |
108 | cgraph_mark_needed_node (cgraph_node (decl), 1); | |
109 | } | |
110 | else if (TREE_CODE (*tp) == CALL_EXPR) | |
111 | { | |
3d7bfc56 | 112 | tree decl = get_callee_fndecl (*tp); |
113 | if (decl && TREE_CODE (decl) == FUNCTION_DECL) | |
ae01b312 | 114 | { |
115 | if (DECL_BUILT_IN (decl)) | |
116 | return NULL; | |
117 | cgraph_record_call (data, decl); | |
e6d2b2d8 | 118 | |
119 | /* When we see a function call, we don't want to look at the | |
120 | function reference in the ADDR_EXPR that is hanging from | |
121 | the CALL_EXPR we're examining here, because we would | |
122 | conclude incorrectly that the function's address could be | |
123 | taken by something that is not a function call. So only | |
124 | walk the function parameter list, skip the other subtrees. */ | |
125 | ||
ae01b312 | 126 | walk_tree (&TREE_OPERAND (*tp, 1), record_call_1, data, NULL); |
127 | *walk_subtrees = 0; | |
128 | } | |
129 | } | |
130 | return NULL; | |
131 | } | |
132 | ||
e6d2b2d8 | 133 | /* Create cgraph edges for function calls inside BODY from DECL. */ |
ae01b312 | 134 | |
135 | void | |
136 | cgraph_create_edges (decl, body) | |
137 | tree decl; | |
138 | tree body; | |
139 | { | |
e6d2b2d8 | 140 | /* The nodes we're interested in are never shared, so walk |
141 | the tree ignoring duplicates. */ | |
142 | walk_tree_without_duplicates (&body, record_call_1, decl); | |
ae01b312 | 143 | } |
144 | ||
145 | /* Analyze the whole compilation unit once it is parsed completely. */ | |
146 | ||
147 | void | |
148 | cgraph_finalize_compilation_unit () | |
149 | { | |
150 | struct cgraph_node *node; | |
151 | struct cgraph_edge *edge; | |
152 | ||
229dcfae | 153 | cgraph_varpool_assemble_pending_decls (); |
d7c6d889 | 154 | if (!quiet_flag) |
155 | fprintf (stderr, "\nAnalyzing compilation unit\n"); | |
229dcfae | 156 | |
f79b6507 | 157 | timevar_push (TV_CGRAPH); |
158 | if (cgraph_dump_file) | |
ae01b312 | 159 | { |
f79b6507 | 160 | fprintf (cgraph_dump_file, "\nInitial entry points:"); |
3d7bfc56 | 161 | for (node = cgraph_nodes; node; node = node->next) |
162 | if (node->needed && DECL_SAVED_TREE (node->decl)) | |
f79b6507 | 163 | fprintf (cgraph_dump_file, " %s", cgraph_node_name (node)); |
164 | fprintf (cgraph_dump_file, "\n"); | |
ae01b312 | 165 | } |
166 | ||
e6d2b2d8 | 167 | /* Propagate reachability flag and lower representation of all reachable |
168 | functions. In the future, lowering will introduce new functions and | |
169 | new entry points on the way (by template instantiation and virtual | |
170 | method table generation for instance). */ | |
3d7bfc56 | 171 | while (cgraph_nodes_queue) |
ae01b312 | 172 | { |
3d7bfc56 | 173 | tree decl = cgraph_nodes_queue->decl; |
174 | ||
175 | node = cgraph_nodes_queue; | |
d87976fb | 176 | cgraph_nodes_queue = cgraph_nodes_queue->next_needed; |
ae01b312 | 177 | |
ae01b312 | 178 | if (node->lowered || !node->reachable || !DECL_SAVED_TREE (decl)) |
179 | abort (); | |
180 | ||
3d7bfc56 | 181 | if (lang_hooks.callgraph.lower_function) |
182 | (*lang_hooks.callgraph.lower_function) (decl); | |
183 | ||
e8bdbc3d | 184 | current_function_decl = node->decl; |
79bb87b4 | 185 | |
ae01b312 | 186 | /* At the moment frontend automatically emits all nested functions. */ |
187 | if (node->nested) | |
188 | { | |
189 | struct cgraph_node *node2; | |
190 | ||
191 | for (node2 = node->nested; node2; node2 = node2->next_nested) | |
192 | if (!node2->reachable) | |
193 | cgraph_mark_needed_node (node2, 0); | |
194 | } | |
195 | ||
d7c6d889 | 196 | /* First kill forward declaration so reverse inlining works properly. */ |
ae01b312 | 197 | cgraph_create_edges (decl, DECL_SAVED_TREE (decl)); |
198 | ||
d7c6d889 | 199 | node->local.inlinable = tree_inlinable_function_p (decl, 1); |
200 | if (!DECL_ESTIMATED_INSNS (decl)) | |
201 | DECL_ESTIMATED_INSNS (decl) | |
202 | = (*lang_hooks.tree_inlining.estimate_num_insns) (decl); | |
203 | node->local.self_insns = DECL_ESTIMATED_INSNS (decl); | |
204 | if (node->local.inlinable) | |
205 | node->local.disgread_inline_limits | |
206 | = (*lang_hooks.tree_inlining.disregard_inline_limits) (decl); | |
207 | ||
ae01b312 | 208 | for (edge = node->callees; edge; edge = edge->next_callee) |
209 | { | |
210 | if (!edge->callee->reachable) | |
211 | cgraph_mark_needed_node (edge->callee, 0); | |
212 | } | |
213 | node->lowered = true; | |
229dcfae | 214 | cgraph_varpool_assemble_pending_decls (); |
ae01b312 | 215 | } |
3d7bfc56 | 216 | /* Collect entry points to the unit. */ |
217 | ||
f79b6507 | 218 | if (cgraph_dump_file) |
3d7bfc56 | 219 | { |
f79b6507 | 220 | fprintf (cgraph_dump_file, "\nUnit entry points:"); |
3d7bfc56 | 221 | for (node = cgraph_nodes; node; node = node->next) |
222 | if (node->needed && DECL_SAVED_TREE (node->decl)) | |
f79b6507 | 223 | fprintf (cgraph_dump_file, " %s", cgraph_node_name (node)); |
224 | fprintf (cgraph_dump_file, "\n"); | |
3d7bfc56 | 225 | } |
e6d2b2d8 | 226 | |
f79b6507 | 227 | if (cgraph_dump_file) |
228 | fprintf (cgraph_dump_file, "\nReclaiming functions:"); | |
ae01b312 | 229 | |
230 | for (node = cgraph_nodes; node; node = node->next) | |
231 | { | |
232 | tree decl = node->decl; | |
233 | ||
234 | if (!node->reachable && DECL_SAVED_TREE (decl)) | |
235 | { | |
961e3b13 | 236 | cgraph_remove_node (node); |
f79b6507 | 237 | if (cgraph_dump_file) |
238 | fprintf (cgraph_dump_file, " %s", cgraph_node_name (node)); | |
ae01b312 | 239 | } |
240 | } | |
f79b6507 | 241 | if (cgraph_dump_file) |
242 | fprintf (cgraph_dump_file, "\n"); | |
ae01b312 | 243 | ggc_collect (); |
f79b6507 | 244 | timevar_pop (TV_CGRAPH); |
ae01b312 | 245 | } |
246 | ||
247 | /* Figure out what functions we want to assemble. */ | |
248 | ||
249 | static void | |
250 | cgraph_mark_functions_to_output () | |
251 | { | |
252 | struct cgraph_node *node; | |
253 | ||
ae01b312 | 254 | for (node = cgraph_nodes; node; node = node->next) |
255 | { | |
256 | tree decl = node->decl; | |
d7c6d889 | 257 | struct cgraph_edge *e; |
258 | if (node->output) | |
259 | abort (); | |
260 | ||
261 | for (e = node->callers; e; e = e->next_caller) | |
262 | if (!e->inline_call) | |
263 | break; | |
ae01b312 | 264 | |
e6d2b2d8 | 265 | /* We need to output all local functions that are used and not |
266 | always inlined, as well as those that are reachable from | |
267 | outside the current compilation unit. */ | |
ae01b312 | 268 | if (DECL_SAVED_TREE (decl) |
269 | && (node->needed | |
d7c6d889 | 270 | || (e && node->reachable)) |
ae01b312 | 271 | && !TREE_ASM_WRITTEN (decl) && !node->origin |
272 | && !DECL_EXTERNAL (decl)) | |
273 | node->output = 1; | |
274 | } | |
275 | } | |
276 | ||
961e3b13 | 277 | /* Optimize the function before expansion. */ |
e6d2b2d8 | 278 | |
961e3b13 | 279 | static void |
280 | cgraph_optimize_function (node) | |
281 | struct cgraph_node *node; | |
282 | { | |
283 | tree decl = node->decl; | |
284 | ||
f79b6507 | 285 | timevar_push (TV_INTEGRATION); |
d7c6d889 | 286 | /* optimize_inline_calls avoids inlining of current_function_decl. */ |
287 | current_function_decl = 0; | |
961e3b13 | 288 | if (flag_inline_trees) |
289 | optimize_inline_calls (decl); | |
290 | if (node->nested) | |
291 | { | |
292 | for (node = node->nested; node; node = node->next_nested) | |
293 | cgraph_optimize_function (node); | |
294 | } | |
f79b6507 | 295 | timevar_pop (TV_INTEGRATION); |
961e3b13 | 296 | } |
297 | ||
ae01b312 | 298 | /* Expand function specified by NODE. */ |
e6d2b2d8 | 299 | |
ae01b312 | 300 | static void |
301 | cgraph_expand_function (node) | |
302 | struct cgraph_node *node; | |
303 | { | |
304 | tree decl = node->decl; | |
d7c6d889 | 305 | struct cgraph_edge *e; |
ae01b312 | 306 | |
307 | announce_function (decl); | |
961e3b13 | 308 | |
309 | cgraph_optimize_function (node); | |
80a85d8a | 310 | |
e6d2b2d8 | 311 | /* Generate RTL for the body of DECL. Nested functions are expanded |
312 | via lang_expand_decl_stmt. */ | |
ae01b312 | 313 | (*lang_hooks.callgraph.expand_function) (decl); |
961e3b13 | 314 | |
d7c6d889 | 315 | for (e = node->callers; e; e = e->next_caller) |
316 | if (e->inline_call) | |
317 | break; | |
318 | if (!e) | |
ae01b312 | 319 | DECL_SAVED_TREE (decl) = NULL; |
320 | current_function_decl = NULL; | |
321 | } | |
322 | ||
d7c6d889 | 323 | /* Fill array order with all nodes with output flag set in the reverse |
324 | topological order. */ | |
325 | static int | |
326 | cgraph_postorder (struct cgraph_node **order) | |
ae01b312 | 327 | { |
328 | struct cgraph_node *node, *node2; | |
ae01b312 | 329 | int stack_size = 0; |
330 | int order_pos = 0; | |
331 | struct cgraph_edge *edge, last; | |
ae01b312 | 332 | |
d7c6d889 | 333 | struct cgraph_node **stack = |
334 | xcalloc (sizeof (struct cgraph_node *), cgraph_n_nodes); | |
ae01b312 | 335 | |
e6d2b2d8 | 336 | /* We have to deal with cycles nicely, so use a depth first traversal |
337 | output algorithm. Ignore the fact that some functions won't need | |
338 | to be output and put them into order as well, so we get dependencies | |
339 | right through intline functions. */ | |
ae01b312 | 340 | for (node = cgraph_nodes; node; node = node->next) |
341 | node->aux = NULL; | |
342 | for (node = cgraph_nodes; node; node = node->next) | |
3d7bfc56 | 343 | if (!node->aux) |
ae01b312 | 344 | { |
345 | node2 = node; | |
346 | if (!node->callers) | |
347 | node->aux = &last; | |
348 | else | |
349 | node->aux = node->callers; | |
350 | while (node2) | |
351 | { | |
352 | while (node2->aux != &last) | |
353 | { | |
354 | edge = node2->aux; | |
355 | if (edge->next_caller) | |
356 | node2->aux = edge->next_caller; | |
357 | else | |
358 | node2->aux = &last; | |
359 | if (!edge->caller->aux) | |
360 | { | |
361 | if (!edge->caller->callers) | |
362 | edge->caller->aux = &last; | |
363 | else | |
364 | edge->caller->aux = edge->caller->callers; | |
365 | stack[stack_size++] = node2; | |
366 | node2 = edge->caller; | |
367 | break; | |
368 | } | |
369 | } | |
370 | if (node2->aux == &last) | |
371 | { | |
372 | order[order_pos++] = node2; | |
373 | if (stack_size) | |
374 | node2 = stack[--stack_size]; | |
375 | else | |
376 | node2 = NULL; | |
377 | } | |
378 | } | |
379 | } | |
d7c6d889 | 380 | free (stack); |
381 | return order_pos; | |
382 | } | |
383 | ||
384 | #define INLINED_TIMES(node) ((size_t)(node)->aux) | |
385 | #define SET_INLINED_TIMES(node,times) ((node)->aux = (void *)(times)) | |
386 | ||
387 | /* Return list of nodes we decided to inline NODE into, set their output | |
388 | flag and compute INLINED_TIMES. | |
389 | ||
390 | We do simple backtracing to get INLINED_TIMES right. This should not be | |
391 | expensive as we limit the amount of inlining. Alternatively we may first | |
392 | discover set of nodes, topologically sort these and propagate | |
393 | INLINED_TIMES */ | |
394 | ||
395 | static int | |
396 | cgraph_inlined_into (struct cgraph_node *node, struct cgraph_node **array) | |
397 | { | |
398 | int nfound = 0; | |
399 | struct cgraph_edge **stack; | |
400 | struct cgraph_edge *e, *e1; | |
401 | int sp; | |
402 | int i; | |
403 | ||
404 | /* Fast path: since we traverse in mostly topological order, we will likely | |
405 | find no edges. */ | |
406 | for (e = node->callers; e; e = e->next_caller) | |
407 | if (e->inline_call) | |
408 | break; | |
409 | ||
410 | if (!e) | |
411 | return 0; | |
412 | ||
413 | /* Allocate stack for back-tracking up callgraph. */ | |
414 | stack = xmalloc ((cgraph_n_nodes + 1) * sizeof (struct cgraph_edge)); | |
415 | sp = 0; | |
416 | ||
417 | /* Push the first edge on to the stack. */ | |
418 | stack[sp++] = e; | |
419 | ||
420 | while (sp) | |
ae01b312 | 421 | { |
d7c6d889 | 422 | struct cgraph_node *caller; |
423 | ||
424 | /* Look at the edge on the top of the stack. */ | |
425 | e = stack[sp - 1]; | |
426 | caller = e->caller; | |
427 | ||
428 | /* Check if the caller destination has been visited yet. */ | |
429 | if (!caller->output) | |
ae01b312 | 430 | { |
d7c6d889 | 431 | array[nfound++] = e->caller; |
432 | /* Mark that we have visited the destination. */ | |
433 | caller->output = true; | |
434 | SET_INLINED_TIMES (caller, 0); | |
435 | } | |
436 | SET_INLINED_TIMES (caller, INLINED_TIMES (caller) + 1); | |
437 | ||
438 | for (e1 = caller->callers; e1; e1 = e1->next_caller) | |
439 | if (e1->inline_call) | |
440 | break; | |
441 | if (e1) | |
442 | stack[sp++] = e1; | |
443 | else | |
444 | { | |
445 | while (true) | |
446 | { | |
447 | for (e1 = e->next_caller; e1; e1 = e1->next_caller) | |
448 | if (e1->inline_call) | |
449 | break; | |
450 | ||
451 | if (e1) | |
452 | { | |
453 | stack[sp - 1] = e1; | |
454 | break; | |
455 | } | |
456 | else | |
457 | { | |
458 | sp--; | |
459 | if (!sp) | |
460 | break; | |
461 | e = stack[sp - 1]; | |
462 | } | |
463 | } | |
ae01b312 | 464 | } |
465 | } | |
d7c6d889 | 466 | |
ae01b312 | 467 | free (stack); |
d7c6d889 | 468 | |
469 | ||
470 | if (cgraph_dump_file) | |
471 | { | |
472 | fprintf (cgraph_dump_file, "Found inline predecesors of %s:", | |
473 | cgraph_node_name (node)); | |
474 | for (i = 0; i < nfound; i++) | |
475 | { | |
476 | fprintf (cgraph_dump_file, " %s", cgraph_node_name (array[i])); | |
477 | if (INLINED_TIMES (array[i]) != 1) | |
478 | fprintf (cgraph_dump_file, " (%i times)", | |
479 | INLINED_TIMES (array[i])); | |
480 | } | |
481 | fprintf (cgraph_dump_file, "\n"); | |
482 | } | |
483 | ||
484 | return nfound; | |
ae01b312 | 485 | } |
486 | ||
d7c6d889 | 487 | /* Return list of nodes we decided to inline into NODE, set their output |
488 | flag and compute INLINED_TIMES. | |
489 | ||
490 | This function is identical to cgraph_inlined_into with callers and callees | |
491 | nodes swapped. */ | |
492 | ||
493 | static int | |
494 | cgraph_inlined_callees (struct cgraph_node *node, struct cgraph_node **array) | |
495 | { | |
496 | int nfound = 0; | |
497 | struct cgraph_edge **stack; | |
498 | struct cgraph_edge *e, *e1; | |
499 | int sp; | |
500 | int i; | |
501 | ||
502 | /* Fast path: since we traverse in mostly topological order, we will likely | |
503 | find no edges. */ | |
504 | for (e = node->callees; e; e = e->next_callee) | |
505 | if (e->inline_call) | |
506 | break; | |
507 | ||
508 | if (!e) | |
509 | return 0; | |
510 | ||
511 | /* Allocate stack for back-tracking up callgraph. */ | |
512 | stack = xmalloc ((cgraph_n_nodes + 1) * sizeof (struct cgraph_edge)); | |
513 | sp = 0; | |
514 | ||
515 | /* Push the first edge on to the stack. */ | |
516 | stack[sp++] = e; | |
517 | ||
518 | while (sp) | |
519 | { | |
520 | struct cgraph_node *callee; | |
521 | ||
522 | /* Look at the edge on the top of the stack. */ | |
523 | e = stack[sp - 1]; | |
524 | callee = e->callee; | |
525 | ||
526 | /* Check if the callee destination has been visited yet. */ | |
527 | if (!callee->output) | |
528 | { | |
529 | array[nfound++] = e->callee; | |
530 | /* Mark that we have visited the destination. */ | |
531 | callee->output = true; | |
532 | SET_INLINED_TIMES (callee, 0); | |
533 | } | |
534 | SET_INLINED_TIMES (callee, INLINED_TIMES (callee) + 1); | |
535 | ||
536 | for (e1 = callee->callees; e1; e1 = e1->next_callee) | |
537 | if (e1->inline_call) | |
538 | break; | |
539 | if (e1) | |
540 | stack[sp++] = e1; | |
541 | else | |
542 | { | |
543 | while (true) | |
544 | { | |
545 | for (e1 = e->next_callee; e1; e1 = e1->next_callee) | |
546 | if (e1->inline_call) | |
547 | break; | |
548 | ||
549 | if (e1) | |
550 | { | |
551 | stack[sp - 1] = e1; | |
552 | break; | |
553 | } | |
554 | else | |
555 | { | |
556 | sp--; | |
557 | if (!sp) | |
558 | break; | |
559 | e = stack[sp - 1]; | |
560 | } | |
561 | } | |
562 | } | |
563 | } | |
564 | ||
565 | free (stack); | |
566 | ||
567 | if (cgraph_dump_file) | |
568 | { | |
569 | fprintf (cgraph_dump_file, "Found inline successors of %s:", | |
570 | cgraph_node_name (node)); | |
571 | for (i = 0; i < nfound; i++) | |
572 | { | |
573 | fprintf (cgraph_dump_file, " %s", cgraph_node_name (array[i])); | |
574 | if (INLINED_TIMES (array[i]) != 1) | |
575 | fprintf (cgraph_dump_file, " (%i times)", | |
576 | INLINED_TIMES (array[i])); | |
577 | } | |
578 | fprintf (cgraph_dump_file, "\n"); | |
579 | } | |
580 | ||
581 | return nfound; | |
582 | } | |
583 | ||
584 | /* Estimate size of the function after inlining WHAT into TO. */ | |
585 | ||
586 | static int | |
587 | cgraph_estimate_size_after_inlining (int times, | |
588 | struct cgraph_node *to, | |
589 | struct cgraph_node *what) | |
590 | { | |
591 | return (what->global.insns - INSNS_PER_CALL) *times + to->global.insns; | |
592 | } | |
593 | ||
594 | /* Estimate the growth caused by inlining NODE into all callees. */ | |
595 | ||
596 | static int | |
597 | cgraph_estimate_growth (struct cgraph_node *node) | |
598 | { | |
599 | int growth = 0; | |
600 | int calls_saved = 0; | |
601 | int clones_added = 0; | |
602 | struct cgraph_edge *e; | |
603 | ||
604 | for (e = node->callers; e; e = e->next_caller) | |
605 | if (!e->inline_call) | |
606 | { | |
607 | growth += ((cgraph_estimate_size_after_inlining (1, e->caller, node) | |
608 | - | |
609 | e->caller->global.insns) *e->caller->global.cloned_times); | |
610 | calls_saved += e->caller->global.cloned_times; | |
611 | clones_added += e->caller->global.cloned_times; | |
612 | } | |
613 | ||
614 | /* ??? Wrong for self recursive functions or cases where we decide to not | |
615 | inline for different reasons, but it is not big deal as in that case | |
616 | we will keep the body around, but we will also avoid some inlining. */ | |
617 | if (!node->needed && !node->origin && !DECL_EXTERNAL (node->decl)) | |
618 | growth -= node->global.insns, clones_added--; | |
619 | ||
620 | if (!calls_saved) | |
621 | calls_saved = 1; | |
622 | ||
623 | return growth; | |
624 | } | |
625 | ||
626 | /* Update insn sizes after inlining WHAT into TO that is already inlined into | |
627 | all nodes in INLINED array. */ | |
80a85d8a | 628 | |
629 | static void | |
d7c6d889 | 630 | cgraph_mark_inline (struct cgraph_node *to, |
631 | struct cgraph_node *what, | |
632 | struct cgraph_node **inlined, int ninlined, | |
633 | struct cgraph_node **inlined_callees, | |
634 | int ninlined_callees) | |
635 | { | |
636 | int i; | |
637 | int times = 0; | |
638 | int clones = 0; | |
639 | struct cgraph_edge *e; | |
640 | bool called = false; | |
641 | int new_insns; | |
642 | ||
643 | for (e = what->callers; e; e = e->next_caller) | |
644 | { | |
645 | if (e->caller == to) | |
646 | { | |
647 | if (e->inline_call) | |
648 | abort (); | |
649 | e->inline_call = true; | |
650 | times++; | |
651 | clones += e->caller->global.cloned_times; | |
652 | } | |
653 | else if (!e->inline_call) | |
654 | called = true; | |
655 | } | |
656 | if (!times) | |
657 | abort (); | |
658 | ncalls_inlined += times; | |
659 | ||
660 | new_insns = cgraph_estimate_size_after_inlining (times, to, what); | |
661 | if (to->global.will_be_output) | |
662 | overall_insns += new_insns - to->global.insns; | |
663 | to->global.insns = new_insns; | |
664 | ||
665 | to->global.calls += (what->global.calls - 1) *times; | |
666 | if (!called && !what->needed && !what->origin | |
667 | && !DECL_EXTERNAL (what->decl)) | |
668 | { | |
669 | if (!what->global.will_be_output) | |
670 | abort (); | |
671 | clones--; | |
672 | nfunctions_inlined++; | |
673 | what->global.will_be_output = 0; | |
674 | overall_insns -= what->global.insns; | |
675 | } | |
676 | what->global.cloned_times += clones; | |
677 | if (to->global.calls < 0) | |
678 | abort (); | |
679 | for (i = 0; i < ninlined; i++) | |
680 | { | |
681 | new_insns = | |
682 | cgraph_estimate_size_after_inlining (INLINED_TIMES (inlined[i]) * | |
683 | times, inlined[i], what); | |
684 | if (inlined[i]->global.will_be_output) | |
685 | overall_insns += new_insns - inlined[i]->global.insns; | |
686 | inlined[i]->global.insns = new_insns; | |
687 | inlined[i]->global.calls += | |
688 | (what->global.calls - 1) *INLINED_TIMES (inlined[i]) * times; | |
689 | if (inlined[i]->global.calls < 0) | |
690 | abort (); | |
691 | } | |
692 | for (i = 0; i < ninlined_callees; i++) | |
693 | { | |
694 | inlined_callees[i]->global.cloned_times += | |
695 | INLINED_TIMES (inlined_callees[i]) * clones; | |
696 | } | |
697 | } | |
698 | ||
699 | /* Return false when inlining WHAT into TO is not good idea as it would cause | |
700 | too large growth of function bodies. */ | |
701 | ||
702 | static bool | |
703 | cgraph_check_inline_limits (struct cgraph_node *to, | |
704 | struct cgraph_node *what, | |
705 | struct cgraph_node **inlined, int ninlined) | |
80a85d8a | 706 | { |
d7c6d889 | 707 | int i; |
708 | int times = 0; | |
709 | struct cgraph_edge *e; | |
710 | int newsize; | |
711 | int limit; | |
712 | ||
713 | for (e = to->callees; e; e = e->next_callee) | |
714 | if (e->callee == what) | |
715 | times++; | |
716 | ||
717 | /* When inlining large function body called once into small function, | |
718 | take the inlined function as base for limiting the growth. */ | |
719 | if (to->local.self_insns > what->local.self_insns) | |
720 | limit = to->local.self_insns; | |
721 | else | |
722 | limit = what->local.self_insns; | |
723 | ||
724 | limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100; | |
725 | ||
726 | newsize = cgraph_estimate_size_after_inlining (times, to, what); | |
727 | if (newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS) | |
728 | && newsize > limit) | |
729 | return false; | |
730 | for (i = 0; i < ninlined; i++) | |
731 | { | |
732 | newsize = | |
733 | cgraph_estimate_size_after_inlining (INLINED_TIMES (inlined[i]) * | |
734 | times, inlined[i], what); | |
735 | if (newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS) | |
736 | && newsize > | |
737 | inlined[i]->local.self_insns * | |
738 | (100 + PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH)) / 100) | |
739 | return false; | |
740 | } | |
741 | return true; | |
742 | } | |
743 | ||
744 | /* Return true when function N is small enought to be inlined. */ | |
745 | ||
746 | static bool | |
747 | cgraph_default_inline_p (struct cgraph_node *n) | |
748 | { | |
749 | if (!DECL_INLINE (n->decl) || !DECL_SAVED_TREE (n->decl)) | |
750 | return false; | |
751 | if (DID_INLINE_FUNC (n->decl)) | |
752 | return n->global.insns < MAX_INLINE_INSNS_AUTO; | |
753 | else | |
754 | return n->global.insns < MAX_INLINE_INSNS_SINGLE; | |
755 | } | |
756 | ||
757 | /* We use greedy algorithm for inlining of small functions: | |
758 | All inline candidates are put into prioritized heap based on estimated | |
759 | growth of the overall number of instructions and then update the estimates. | |
760 | ||
761 | INLINED and INLINED_CALEES are just pointers to arrays large enought | |
762 | to be passed to cgraph_inlined_into and cgraph_inlined_callees. */ | |
763 | ||
764 | static void | |
765 | cgraph_decide_inlining_of_small_functions (struct cgraph_node **inlined, | |
766 | struct cgraph_node | |
767 | **inlined_callees) | |
768 | { | |
769 | int i; | |
80a85d8a | 770 | struct cgraph_node *node; |
d7c6d889 | 771 | fibheap_t heap = fibheap_new (); |
772 | struct fibnode **heap_node = | |
773 | xcalloc (sizeof (struct fibnode *), cgraph_max_uid); | |
774 | int ninlined, ninlined_callees; | |
775 | int max_insns = ((HOST_WIDEST_INT) initial_insns | |
776 | * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100); | |
80a85d8a | 777 | |
d7c6d889 | 778 | /* Put all inline candidates into the heap. */ |
80a85d8a | 779 | |
80a85d8a | 780 | for (node = cgraph_nodes; node; node = node->next) |
781 | { | |
d7c6d889 | 782 | struct cgraph_edge *e; |
783 | ||
784 | if (!node->local.inlinable || !node->callers | |
785 | || !cgraph_default_inline_p (node)) | |
786 | continue; | |
787 | ||
788 | /* Rule out always_inline functions we dealt with earler. */ | |
789 | for (e = node->callers; e; e = e->next_caller) | |
790 | if (e->inline_call) | |
791 | break; | |
792 | if (e) | |
793 | continue; | |
794 | heap_node[node->uid] = | |
795 | fibheap_insert (heap, cgraph_estimate_growth (node), node); | |
80a85d8a | 796 | } |
d7c6d889 | 797 | |
f79b6507 | 798 | if (cgraph_dump_file) |
d7c6d889 | 799 | fprintf (cgraph_dump_file, "\n\nDeciding on inlining: "); |
800 | while ((node = fibheap_extract_min (heap)) && overall_insns <= max_insns) | |
801 | { | |
802 | struct cgraph_edge *e; | |
803 | int old_insns = overall_insns; | |
804 | ||
805 | heap_node[node->uid] = NULL; | |
806 | if (cgraph_dump_file) | |
807 | fprintf (cgraph_dump_file, "Considering %s %i insns, growth %i.\n", | |
808 | cgraph_node_name (node), node->global.insns, | |
809 | cgraph_estimate_growth (node)); | |
810 | if (!cgraph_default_inline_p (node)) | |
811 | { | |
812 | if (cgraph_dump_file) | |
813 | fprintf (cgraph_dump_file, "Function too large.\n"); | |
814 | continue; | |
815 | } | |
816 | ninlined_callees = cgraph_inlined_callees (node, inlined_callees); | |
817 | for (e = node->callers; e; e = e->next_caller) | |
818 | if (!e->inline_call && e->caller != node) | |
819 | { | |
820 | ninlined = cgraph_inlined_into (e->caller, inlined); | |
821 | if (e->callee->output | |
822 | || !cgraph_check_inline_limits (e->caller, node, inlined, | |
823 | ninlined)) | |
824 | { | |
825 | for (i = 0; i < ninlined; i++) | |
826 | inlined[i]->output = 0, node->aux = 0; | |
827 | if (cgraph_dump_file) | |
828 | fprintf (cgraph_dump_file, "Not inlining into %s\n", | |
829 | cgraph_node_name (e->caller)); | |
830 | continue; | |
831 | } | |
832 | cgraph_mark_inline (e->caller, node, inlined, ninlined, | |
833 | inlined_callees, ninlined_callees); | |
834 | if (heap_node[e->caller->uid]) | |
835 | fibheap_replace_key (heap, heap_node[e->caller->uid], | |
836 | cgraph_estimate_growth (e->caller)); | |
837 | ||
838 | /* Size of the functions we updated into has changed, so update | |
839 | the keys. */ | |
840 | for (i = 0; i < ninlined; i++) | |
841 | { | |
842 | inlined[i]->output = 0, node->aux = 0; | |
843 | if (heap_node[inlined[i]->uid]) | |
844 | fibheap_replace_key (heap, heap_node[inlined[i]->uid], | |
845 | cgraph_estimate_growth (inlined[i])); | |
846 | } | |
847 | } | |
848 | ||
849 | /* Similarly all functions called by function we just inlined | |
850 | are now called more times; update keys. */ | |
851 | ||
852 | for (e = node->callees; e; e = e->next_callee) | |
853 | if (!e->inline_call && heap_node[e->callee->uid]) | |
854 | fibheap_replace_key (heap, heap_node[e->callee->uid], | |
855 | cgraph_estimate_growth (e->callee)); | |
856 | ||
857 | for (i = 0; i < ninlined_callees; i++) | |
858 | { | |
859 | struct cgraph_edge *e; | |
860 | ||
861 | for (e = inlined_callees[i]->callees; e; e = e->next_callee) | |
862 | if (!e->inline_call && heap_node[e->callee->uid]) | |
863 | fibheap_replace_key (heap, heap_node[e->callee->uid], | |
864 | cgraph_estimate_growth (e->callee)); | |
865 | ||
866 | inlined_callees[i]->output = 0, node->aux = 0; | |
867 | } | |
868 | if (cgraph_dump_file) | |
869 | fprintf (cgraph_dump_file, | |
870 | "Created %i clones, Num insns:%i (%+i), %.2f%%.\n\n", | |
871 | node->global.cloned_times - 1, | |
872 | overall_insns, overall_insns - old_insns, | |
873 | overall_insns * 100.0 / initial_insns); | |
874 | } | |
875 | if (cgraph_dump_file && !fibheap_empty (heap)) | |
876 | fprintf (cgraph_dump_file, "inline-unit-growth limit reached.\n"); | |
877 | fibheap_delete (heap); | |
878 | free (heap_node); | |
80a85d8a | 879 | } |
880 | ||
d7c6d889 | 881 | /* Decide on the inlining. We do so in the topological order to avoid |
882 | expenses on updating datastructures. */ | |
961e3b13 | 883 | |
884 | static void | |
d7c6d889 | 885 | cgraph_decide_inlining (void) |
961e3b13 | 886 | { |
d7c6d889 | 887 | struct cgraph_node *node; |
888 | int nnodes; | |
889 | struct cgraph_node **order = | |
890 | xcalloc (sizeof (struct cgraph_node *), cgraph_n_nodes); | |
891 | struct cgraph_node **inlined = | |
892 | xcalloc (sizeof (struct cgraph_node *), cgraph_n_nodes); | |
893 | struct cgraph_node **inlined_callees = | |
894 | xcalloc (sizeof (struct cgraph_node *), cgraph_n_nodes); | |
895 | int ninlined; | |
896 | int ninlined_callees; | |
897 | int i, y; | |
961e3b13 | 898 | |
d7c6d889 | 899 | for (node = cgraph_nodes; node; node = node->next) |
900 | { | |
901 | int ncalls = 0; | |
902 | struct cgraph_edge *e; | |
903 | ||
904 | node->global.insns = node->local.self_insns; | |
905 | for (e = node->callees; e; e = e->next_callee) | |
906 | ncalls++; | |
907 | node->global.calls = ncalls; | |
908 | if (!DECL_EXTERNAL (node->decl)) | |
909 | { | |
910 | node->global.cloned_times = 1; | |
911 | initial_insns += node->local.self_insns; | |
912 | node->global.will_be_output = true; | |
913 | } | |
914 | } | |
915 | overall_insns = initial_insns; | |
916 | ||
917 | nnodes = cgraph_postorder (order); | |
961e3b13 | 918 | |
961e3b13 | 919 | for (node = cgraph_nodes; node; node = node->next) |
d7c6d889 | 920 | node->aux = 0; |
921 | ||
922 | if (cgraph_dump_file) | |
923 | fprintf (cgraph_dump_file, "\n\nDeciding on always_inline functions:\n"); | |
924 | ||
925 | /* In the first pass mark all always_inline edges. Do this with a priority | |
926 | so no our decisions makes this impossible. */ | |
927 | for (i = nnodes - 1; i >= 0; i--) | |
928 | { | |
929 | struct cgraph_edge *e; | |
930 | ||
931 | node = order[i]; | |
932 | ||
933 | for (e = node->callees; e; e = e->next_callee) | |
934 | if (e->callee->local.disgread_inline_limits) | |
935 | break; | |
936 | if (!e) | |
937 | continue; | |
938 | if (cgraph_dump_file) | |
939 | fprintf (cgraph_dump_file, | |
940 | "Considering %s %i insns (always inline)\n", | |
941 | cgraph_node_name (node), node->global.insns); | |
942 | ninlined = cgraph_inlined_into (order[i], inlined); | |
943 | for (; e; e = e->next_callee) | |
944 | { | |
945 | if (e->inline_call || !e->callee->local.disgread_inline_limits) | |
946 | continue; | |
947 | if (e->callee->output || e->callee == node) | |
948 | continue; | |
949 | ninlined_callees = | |
950 | cgraph_inlined_callees (e->callee, inlined_callees); | |
951 | cgraph_mark_inline (node, e->callee, inlined, ninlined, | |
952 | inlined_callees, ninlined_callees); | |
953 | for (y = 0; y < ninlined_callees; y++) | |
954 | inlined_callees[y]->output = 0, node->aux = 0; | |
955 | if (cgraph_dump_file) | |
956 | fprintf (cgraph_dump_file, "Inlined %i times. Now %i insns\n\n", | |
957 | node->global.cloned_times, overall_insns); | |
958 | } | |
959 | for (y = 0; y < ninlined; y++) | |
960 | inlined[y]->output = 0, node->aux = 0; | |
961 | } | |
962 | ||
963 | cgraph_decide_inlining_of_small_functions (inlined, inlined_callees); | |
964 | ||
965 | if (cgraph_dump_file) | |
966 | fprintf (cgraph_dump_file, "\n\nFunctions to inline once:\n"); | |
967 | ||
968 | /* And finally decide what functions are called once. */ | |
969 | ||
970 | for (i = nnodes - 1; i >= 0; i--) | |
961e3b13 | 971 | { |
d7c6d889 | 972 | node = order[i]; |
973 | ||
961e3b13 | 974 | if (node->callers && !node->callers->next_caller && !node->needed |
d7c6d889 | 975 | && node->local.inlinable && !node->callers->inline_call |
976 | && !DECL_EXTERNAL (node->decl) && !DECL_COMDAT (node->decl)) | |
961e3b13 | 977 | { |
978 | bool ok = true; | |
d7c6d889 | 979 | struct cgraph_node *node1; |
961e3b13 | 980 | |
981 | /* Verify that we won't duplicate the caller. */ | |
982 | for (node1 = node->callers->caller; | |
d7c6d889 | 983 | node1->callers && node1->callers->inline_call |
984 | && ok; node1 = node1->callers->caller) | |
961e3b13 | 985 | if (node1->callers->next_caller || node1->needed) |
986 | ok = false; | |
987 | if (ok) | |
988 | { | |
f79b6507 | 989 | if (cgraph_dump_file) |
d7c6d889 | 990 | fprintf (cgraph_dump_file, |
991 | "Considering %s %i insns (called once)\n", | |
992 | cgraph_node_name (node), node->global.insns); | |
993 | ninlined = cgraph_inlined_into (node->callers->caller, inlined); | |
994 | if (cgraph_check_inline_limits | |
995 | (node->callers->caller, node, inlined, ninlined)) | |
996 | { | |
997 | ninlined_callees = | |
998 | cgraph_inlined_callees (node, inlined_callees); | |
999 | cgraph_mark_inline (node->callers->caller, node, inlined, | |
1000 | ninlined, inlined_callees, | |
1001 | ninlined_callees); | |
1002 | for (y = 0; y < ninlined_callees; y++) | |
1003 | inlined_callees[y]->output = 0, node->aux = 0; | |
1004 | if (cgraph_dump_file) | |
1005 | fprintf (cgraph_dump_file, "Inlined. Now %i insns\n\n", overall_insns); | |
1006 | } | |
1007 | for (y = 0; y < ninlined; y++) | |
1008 | inlined[y]->output = 0, node->aux = 0; | |
961e3b13 | 1009 | } |
1010 | } | |
1011 | } | |
d7c6d889 | 1012 | |
f79b6507 | 1013 | if (cgraph_dump_file) |
d7c6d889 | 1014 | fprintf (cgraph_dump_file, |
1015 | "\nInlined %i calls, elliminated %i functions, %i insns turned to %i insns.\n", | |
1016 | ncalls_inlined, nfunctions_inlined, initial_insns, | |
1017 | overall_insns); | |
1018 | free (order); | |
1019 | free (inlined); | |
1020 | free (inlined_callees); | |
961e3b13 | 1021 | } |
1022 | ||
d7c6d889 | 1023 | /* Return true when CALLER_DECL should be inlined into CALLEE_DECL. */ |
1024 | ||
1025 | bool | |
1026 | cgraph_inline_p (tree caller_decl, tree callee_decl) | |
1027 | { | |
1028 | struct cgraph_node *caller = cgraph_node (caller_decl); | |
1029 | struct cgraph_node *callee = cgraph_node (callee_decl); | |
1030 | struct cgraph_edge *e; | |
1031 | ||
1032 | for (e = caller->callees; e; e = e->next_callee) | |
1033 | if (e->callee == callee) | |
1034 | return e->inline_call; | |
1035 | /* We do not record builtins in the callgraph. Perhaps it would make more | |
1036 | sense to do so and then prune out those not overwriten by explicit | |
1037 | function body. */ | |
1038 | return false; | |
1039 | } | |
1040 | /* Expand all functions that must be output. | |
1041 | ||
1042 | Attempt to topologically sort the nodes so function is output when | |
1043 | all called functions are already assembled to allow data to be | |
1044 | propagated accross the callgraph. Use a stack to get smaller distance | |
1045 | between a function and it's callees (later we may choose to use a more | |
1046 | sophisticated algorithm for function reordering; we will likely want | |
1047 | to use subsections to make the output functions appear in top-down | |
1048 | order). */ | |
1049 | ||
1050 | static void | |
1051 | cgraph_expand_functions () | |
1052 | { | |
1053 | struct cgraph_node *node; | |
1054 | struct cgraph_node **order = | |
1055 | xcalloc (sizeof (struct cgraph_node *), cgraph_n_nodes); | |
1056 | int order_pos = 0; | |
1057 | int i; | |
1058 | ||
1059 | cgraph_mark_functions_to_output (); | |
1060 | ||
1061 | order_pos = cgraph_postorder (order); | |
1062 | ||
1063 | for (i = order_pos - 1; i >= 0; i--) | |
1064 | { | |
1065 | node = order[i]; | |
1066 | if (node->output) | |
1067 | { | |
1068 | if (!node->reachable) | |
1069 | abort (); | |
1070 | node->output = 0; | |
1071 | cgraph_expand_function (node); | |
1072 | } | |
1073 | } | |
1074 | free (order); | |
1075 | } | |
1076 | ||
1077 | /* Mark all local functions. | |
1078 | ||
1079 | Local function is function whose calls can occur only in the | |
1080 | current compilation unit so we change it's calling convetion. | |
1081 | We simply mark all static functions whose address is not taken | |
1082 | as local. */ | |
1083 | ||
1084 | static void | |
1085 | cgraph_mark_local_functions () | |
1086 | { | |
1087 | struct cgraph_node *node; | |
1088 | ||
1089 | if (cgraph_dump_file) | |
1090 | fprintf (cgraph_dump_file, "Marking local functions:"); | |
1091 | ||
1092 | /* Figure out functions we want to assemble. */ | |
1093 | for (node = cgraph_nodes; node; node = node->next) | |
1094 | { | |
1095 | node->local.local = (!node->needed | |
1096 | && DECL_SAVED_TREE (node->decl) | |
1097 | && !TREE_PUBLIC (node->decl)); | |
1098 | if (cgraph_dump_file && node->local.local) | |
1099 | fprintf (cgraph_dump_file, " %s", cgraph_node_name (node)); | |
1100 | } | |
1101 | if (cgraph_dump_file) | |
1102 | fprintf (cgraph_dump_file, "\n"); | |
1103 | } | |
80a85d8a | 1104 | |
ae01b312 | 1105 | /* Perform simple optimizations based on callgraph. */ |
1106 | ||
1107 | void | |
1108 | cgraph_optimize () | |
1109 | { | |
f79b6507 | 1110 | timevar_push (TV_CGRAPHOPT); |
d7c6d889 | 1111 | if (!quiet_flag) |
1112 | fprintf (stderr, "Performing intraprocedural optimizations\n"); | |
f79b6507 | 1113 | if (cgraph_dump_file) |
1114 | { | |
1115 | fprintf (cgraph_dump_file, "Initial callgraph:"); | |
1116 | dump_cgraph (cgraph_dump_file); | |
1117 | } | |
80a85d8a | 1118 | cgraph_mark_local_functions (); |
1119 | ||
d7c6d889 | 1120 | cgraph_decide_inlining (); |
961e3b13 | 1121 | |
80a85d8a | 1122 | cgraph_global_info_ready = true; |
f79b6507 | 1123 | if (cgraph_dump_file) |
1124 | { | |
1125 | fprintf (cgraph_dump_file, "Optimized callgraph:"); | |
1126 | dump_cgraph (cgraph_dump_file); | |
1127 | } | |
1128 | timevar_pop (TV_CGRAPHOPT); | |
ae01b312 | 1129 | if (!quiet_flag) |
d7c6d889 | 1130 | fprintf (stderr, "Assembling functions:"); |
ae01b312 | 1131 | |
d7c6d889 | 1132 | /* Output everything. */ |
ae01b312 | 1133 | cgraph_expand_functions (); |
f79b6507 | 1134 | if (cgraph_dump_file) |
1135 | { | |
1136 | fprintf (cgraph_dump_file, "Final callgraph:"); | |
1137 | dump_cgraph (cgraph_dump_file); | |
1138 | } | |
ae01b312 | 1139 | } |