]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/ipa-utils.c
Remove cgraph_global_info.
[thirdparty/gcc.git] / gcc / ipa-utils.c
1 /* Utilities for ipa analysis.
2 Copyright (C) 2005-2019 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "predict.h"
28 #include "alloc-pool.h"
29 #include "cgraph.h"
30 #include "lto-streamer.h"
31 #include "dumpfile.h"
32 #include "splay-tree.h"
33 #include "ipa-utils.h"
34 #include "symbol-summary.h"
35 #include "tree-vrp.h"
36 #include "ipa-prop.h"
37 #include "ipa-fnsummary.h"
38
39 /* Debugging function for postorder and inorder code. NOTE is a string
40 that is printed before the nodes are printed. ORDER is an array of
41 cgraph_nodes that has COUNT useful nodes in it. */
42
43 void
44 ipa_print_order (FILE* out,
45 const char * note,
46 struct cgraph_node** order,
47 int count)
48 {
49 int i;
50 fprintf (out, "\n\n ordered call graph: %s\n", note);
51
52 for (i = count - 1; i >= 0; i--)
53 order[i]->dump (out);
54 fprintf (out, "\n");
55 fflush (out);
56 }
57
58
59 struct searchc_env {
60 struct cgraph_node **stack;
61 struct cgraph_node **result;
62 int stack_size;
63 int order_pos;
64 splay_tree nodes_marked_new;
65 bool reduce;
66 int count;
67 };
68
69 /* This is an implementation of Tarjan's strongly connected region
70 finder as reprinted in Aho Hopcraft and Ullman's The Design and
71 Analysis of Computer Programs (1975) pages 192-193. This version
72 has been customized for cgraph_nodes. The env parameter is because
73 it is recursive and there are no nested functions here. This
74 function should only be called from itself or
75 ipa_reduced_postorder. ENV is a stack env and would be
76 unnecessary if C had nested functions. V is the node to start
77 searching from. */
78
79 static void
80 searchc (struct searchc_env* env, struct cgraph_node *v,
81 bool (*ignore_edge) (struct cgraph_edge *))
82 {
83 struct cgraph_edge *edge;
84 struct ipa_dfs_info *v_info = (struct ipa_dfs_info *) v->aux;
85
86 /* mark node as old */
87 v_info->new_node = false;
88 splay_tree_remove (env->nodes_marked_new, v->get_uid ());
89
90 v_info->dfn_number = env->count;
91 v_info->low_link = env->count;
92 env->count++;
93 env->stack[(env->stack_size)++] = v;
94 v_info->on_stack = true;
95
96 for (edge = v->callees; edge; edge = edge->next_callee)
97 {
98 struct ipa_dfs_info * w_info;
99 enum availability avail;
100 struct cgraph_node *w = edge->callee->ultimate_alias_target (&avail);
101
102 if (!w || (ignore_edge && ignore_edge (edge)))
103 continue;
104
105 if (w->aux
106 && (avail >= AVAIL_INTERPOSABLE))
107 {
108 w_info = (struct ipa_dfs_info *) w->aux;
109 if (w_info->new_node)
110 {
111 searchc (env, w, ignore_edge);
112 v_info->low_link =
113 (v_info->low_link < w_info->low_link) ?
114 v_info->low_link : w_info->low_link;
115 }
116 else
117 if ((w_info->dfn_number < v_info->dfn_number)
118 && (w_info->on_stack))
119 v_info->low_link =
120 (w_info->dfn_number < v_info->low_link) ?
121 w_info->dfn_number : v_info->low_link;
122 }
123 }
124
125
126 if (v_info->low_link == v_info->dfn_number)
127 {
128 struct cgraph_node *last = NULL;
129 struct cgraph_node *x;
130 struct ipa_dfs_info *x_info;
131 do {
132 x = env->stack[--(env->stack_size)];
133 x_info = (struct ipa_dfs_info *) x->aux;
134 x_info->on_stack = false;
135 x_info->scc_no = v_info->dfn_number;
136
137 if (env->reduce)
138 {
139 x_info->next_cycle = last;
140 last = x;
141 }
142 else
143 env->result[env->order_pos++] = x;
144 }
145 while (v != x);
146 if (env->reduce)
147 env->result[env->order_pos++] = v;
148 }
149 }
150
151 /* Topsort the call graph by caller relation. Put the result in ORDER.
152
153 The REDUCE flag is true if you want the cycles reduced to single nodes.
154 You can use ipa_get_nodes_in_cycle to obtain a vector containing all real
155 call graph nodes in a reduced node.
156
157 Set ALLOW_OVERWRITABLE if nodes with such availability should be included.
158 IGNORE_EDGE, if non-NULL is a hook that may make some edges insignificant
159 for the topological sort. */
160
161 int
162 ipa_reduced_postorder (struct cgraph_node **order,
163 bool reduce,
164 bool (*ignore_edge) (struct cgraph_edge *))
165 {
166 struct cgraph_node *node;
167 struct searchc_env env;
168 splay_tree_node result;
169 env.stack = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
170 env.stack_size = 0;
171 env.result = order;
172 env.order_pos = 0;
173 env.nodes_marked_new = splay_tree_new (splay_tree_compare_ints, 0, 0);
174 env.count = 1;
175 env.reduce = reduce;
176
177 FOR_EACH_DEFINED_FUNCTION (node)
178 {
179 enum availability avail = node->get_availability ();
180
181 if (avail > AVAIL_INTERPOSABLE
182 || avail == AVAIL_INTERPOSABLE)
183 {
184 /* Reuse the info if it is already there. */
185 struct ipa_dfs_info *info = (struct ipa_dfs_info *) node->aux;
186 if (!info)
187 info = XCNEW (struct ipa_dfs_info);
188 info->new_node = true;
189 info->on_stack = false;
190 info->next_cycle = NULL;
191 node->aux = info;
192
193 splay_tree_insert (env.nodes_marked_new,
194 (splay_tree_key)node->get_uid (),
195 (splay_tree_value)node);
196 }
197 else
198 node->aux = NULL;
199 }
200 result = splay_tree_min (env.nodes_marked_new);
201 while (result)
202 {
203 node = (struct cgraph_node *)result->value;
204 searchc (&env, node, ignore_edge);
205 result = splay_tree_min (env.nodes_marked_new);
206 }
207 splay_tree_delete (env.nodes_marked_new);
208 free (env.stack);
209
210 return env.order_pos;
211 }
212
213 /* Deallocate all ipa_dfs_info structures pointed to by the aux pointer of call
214 graph nodes. */
215
216 void
217 ipa_free_postorder_info (void)
218 {
219 struct cgraph_node *node;
220 FOR_EACH_DEFINED_FUNCTION (node)
221 {
222 /* Get rid of the aux information. */
223 if (node->aux)
224 {
225 free (node->aux);
226 node->aux = NULL;
227 }
228 }
229 }
230
231 /* Get the set of nodes for the cycle in the reduced call graph starting
232 from NODE. */
233
234 vec<cgraph_node *>
235 ipa_get_nodes_in_cycle (struct cgraph_node *node)
236 {
237 vec<cgraph_node *> v = vNULL;
238 struct ipa_dfs_info *node_dfs_info;
239 while (node)
240 {
241 v.safe_push (node);
242 node_dfs_info = (struct ipa_dfs_info *) node->aux;
243 node = node_dfs_info->next_cycle;
244 }
245 return v;
246 }
247
248 /* Return true iff the CS is an edge within a strongly connected component as
249 computed by ipa_reduced_postorder. */
250
251 bool
252 ipa_edge_within_scc (struct cgraph_edge *cs)
253 {
254 struct ipa_dfs_info *caller_dfs = (struct ipa_dfs_info *) cs->caller->aux;
255 struct ipa_dfs_info *callee_dfs;
256 struct cgraph_node *callee = cs->callee->function_symbol ();
257
258 callee_dfs = (struct ipa_dfs_info *) callee->aux;
259 return (caller_dfs
260 && callee_dfs
261 && caller_dfs->scc_no == callee_dfs->scc_no);
262 }
263
264 struct postorder_stack
265 {
266 struct cgraph_node *node;
267 struct cgraph_edge *edge;
268 int ref;
269 };
270
271 /* Fill array order with all nodes with output flag set in the reverse
272 topological order. Return the number of elements in the array.
273 FIXME: While walking, consider aliases, too. */
274
275 int
276 ipa_reverse_postorder (struct cgraph_node **order)
277 {
278 struct cgraph_node *node, *node2;
279 int stack_size = 0;
280 int order_pos = 0;
281 struct cgraph_edge *edge;
282 int pass;
283 struct ipa_ref *ref = NULL;
284
285 struct postorder_stack *stack =
286 XCNEWVEC (struct postorder_stack, symtab->cgraph_count);
287
288 /* We have to deal with cycles nicely, so use a depth first traversal
289 output algorithm. Ignore the fact that some functions won't need
290 to be output and put them into order as well, so we get dependencies
291 right through inline functions. */
292 FOR_EACH_FUNCTION (node)
293 node->aux = NULL;
294 for (pass = 0; pass < 2; pass++)
295 FOR_EACH_FUNCTION (node)
296 if (!node->aux
297 && (pass
298 || (!node->address_taken
299 && !node->inlined_to
300 && !node->alias && !node->thunk.thunk_p
301 && !node->only_called_directly_p ())))
302 {
303 stack_size = 0;
304 stack[stack_size].node = node;
305 stack[stack_size].edge = node->callers;
306 stack[stack_size].ref = 0;
307 node->aux = (void *)(size_t)1;
308 while (stack_size >= 0)
309 {
310 while (true)
311 {
312 node2 = NULL;
313 while (stack[stack_size].edge && !node2)
314 {
315 edge = stack[stack_size].edge;
316 node2 = edge->caller;
317 stack[stack_size].edge = edge->next_caller;
318 /* Break possible cycles involving always-inline
319 functions by ignoring edges from always-inline
320 functions to non-always-inline functions. */
321 if (DECL_DISREGARD_INLINE_LIMITS (edge->caller->decl)
322 && !DECL_DISREGARD_INLINE_LIMITS
323 (edge->callee->function_symbol ()->decl))
324 node2 = NULL;
325 }
326 for (; stack[stack_size].node->iterate_referring (
327 stack[stack_size].ref,
328 ref) && !node2;
329 stack[stack_size].ref++)
330 {
331 if (ref->use == IPA_REF_ALIAS)
332 node2 = dyn_cast <cgraph_node *> (ref->referring);
333 }
334 if (!node2)
335 break;
336 if (!node2->aux)
337 {
338 stack[++stack_size].node = node2;
339 stack[stack_size].edge = node2->callers;
340 stack[stack_size].ref = 0;
341 node2->aux = (void *)(size_t)1;
342 }
343 }
344 order[order_pos++] = stack[stack_size--].node;
345 }
346 }
347 free (stack);
348 FOR_EACH_FUNCTION (node)
349 node->aux = NULL;
350 return order_pos;
351 }
352
353
354
355 /* Given a memory reference T, will return the variable at the bottom
356 of the access. Unlike get_base_address, this will recurse through
357 INDIRECT_REFS. */
358
359 tree
360 get_base_var (tree t)
361 {
362 while (!SSA_VAR_P (t)
363 && (!CONSTANT_CLASS_P (t))
364 && TREE_CODE (t) != LABEL_DECL
365 && TREE_CODE (t) != FUNCTION_DECL
366 && TREE_CODE (t) != CONST_DECL
367 && TREE_CODE (t) != CONSTRUCTOR)
368 {
369 t = TREE_OPERAND (t, 0);
370 }
371 return t;
372 }
373
374 /* Scale function of calls in NODE by ratio ORIG_COUNT/NODE->count. */
375
376 void
377 scale_ipa_profile_for_fn (struct cgraph_node *node, profile_count orig_count)
378 {
379 profile_count to = node->count;
380 profile_count::adjust_for_ipa_scaling (&to, &orig_count);
381 struct cgraph_edge *e;
382
383 for (e = node->callees; e; e = e->next_callee)
384 e->count = e->count.apply_scale (to, orig_count);
385 for (e = node->indirect_calls; e; e = e->next_callee)
386 e->count = e->count.apply_scale (to, orig_count);
387 }
388
389 /* SRC and DST are going to be merged. Take SRC's profile and merge it into
390 DST so it is not going to be lost. Possibly destroy SRC's body on the way
391 unless PRESERVE_BODY is set. */
392
393 void
394 ipa_merge_profiles (struct cgraph_node *dst,
395 struct cgraph_node *src,
396 bool preserve_body)
397 {
398 tree oldsrcdecl = src->decl;
399 struct function *srccfun, *dstcfun;
400 bool match = true;
401
402 if (!src->definition
403 || !dst->definition)
404 return;
405
406 if (src->frequency < dst->frequency)
407 src->frequency = dst->frequency;
408
409 /* Time profiles are merged. */
410 if (dst->tp_first_run > src->tp_first_run && src->tp_first_run)
411 dst->tp_first_run = src->tp_first_run;
412
413 if (src->profile_id && !dst->profile_id)
414 dst->profile_id = src->profile_id;
415
416 /* Merging zero profile to dst is no-op. */
417 if (src->count.ipa () == profile_count::zero ())
418 return;
419
420 /* FIXME when we merge in unknown profile, we ought to set counts as
421 unsafe. */
422 if (!src->count.initialized_p ()
423 || !(src->count.ipa () == src->count))
424 return;
425 if (symtab->dump_file)
426 {
427 fprintf (symtab->dump_file, "Merging profiles of %s to %s\n",
428 src->dump_name (), dst->dump_name ());
429 }
430 profile_count orig_count = dst->count;
431
432 if (dst->count.initialized_p () && dst->count.ipa () == dst->count)
433 dst->count += src->count.ipa ();
434 else
435 dst->count = src->count.ipa ();
436
437 /* First handle functions with no gimple body. */
438 if (dst->thunk.thunk_p || dst->alias
439 || src->thunk.thunk_p || src->alias)
440 {
441 scale_ipa_profile_for_fn (dst, orig_count);
442 return;
443 }
444
445 /* This is ugly. We need to get both function bodies into memory.
446 If declaration is merged, we need to duplicate it to be able
447 to load body that is being replaced. This makes symbol table
448 temporarily inconsistent. */
449 if (src->decl == dst->decl)
450 {
451 struct lto_in_decl_state temp;
452 struct lto_in_decl_state *state;
453
454 /* We are going to move the decl, we want to remove its file decl data.
455 and link these with the new decl. */
456 temp.fn_decl = src->decl;
457 lto_in_decl_state **slot
458 = src->lto_file_data->function_decl_states->find_slot (&temp,
459 NO_INSERT);
460 state = *slot;
461 src->lto_file_data->function_decl_states->clear_slot (slot);
462 gcc_assert (state);
463
464 /* Duplicate the decl and be sure it does not link into body of DST. */
465 src->decl = copy_node (src->decl);
466 DECL_STRUCT_FUNCTION (src->decl) = NULL;
467 DECL_ARGUMENTS (src->decl) = NULL;
468 DECL_INITIAL (src->decl) = NULL;
469 DECL_RESULT (src->decl) = NULL;
470
471 /* Associate the decl state with new declaration, so LTO streamer
472 can look it up. */
473 state->fn_decl = src->decl;
474 slot
475 = src->lto_file_data->function_decl_states->find_slot (state, INSERT);
476 gcc_assert (!*slot);
477 *slot = state;
478 }
479 src->get_untransformed_body ();
480 dst->get_untransformed_body ();
481 srccfun = DECL_STRUCT_FUNCTION (src->decl);
482 dstcfun = DECL_STRUCT_FUNCTION (dst->decl);
483 if (n_basic_blocks_for_fn (srccfun)
484 != n_basic_blocks_for_fn (dstcfun))
485 {
486 if (symtab->dump_file)
487 fprintf (symtab->dump_file,
488 "Giving up; number of basic block mismatch.\n");
489 match = false;
490 }
491 else if (last_basic_block_for_fn (srccfun)
492 != last_basic_block_for_fn (dstcfun))
493 {
494 if (symtab->dump_file)
495 fprintf (symtab->dump_file,
496 "Giving up; last block mismatch.\n");
497 match = false;
498 }
499 else
500 {
501 basic_block srcbb, dstbb;
502
503 FOR_ALL_BB_FN (srcbb, srccfun)
504 {
505 unsigned int i;
506
507 dstbb = BASIC_BLOCK_FOR_FN (dstcfun, srcbb->index);
508 if (dstbb == NULL)
509 {
510 if (symtab->dump_file)
511 fprintf (symtab->dump_file,
512 "No matching block for bb %i.\n",
513 srcbb->index);
514 match = false;
515 break;
516 }
517 if (EDGE_COUNT (srcbb->succs) != EDGE_COUNT (dstbb->succs))
518 {
519 if (symtab->dump_file)
520 fprintf (symtab->dump_file,
521 "Edge count mistmatch for bb %i.\n",
522 srcbb->index);
523 match = false;
524 break;
525 }
526 for (i = 0; i < EDGE_COUNT (srcbb->succs); i++)
527 {
528 edge srce = EDGE_SUCC (srcbb, i);
529 edge dste = EDGE_SUCC (dstbb, i);
530 if (srce->dest->index != dste->dest->index)
531 {
532 if (symtab->dump_file)
533 fprintf (symtab->dump_file,
534 "Succ edge mistmatch for bb %i.\n",
535 srce->dest->index);
536 match = false;
537 break;
538 }
539 }
540 }
541 }
542 if (match)
543 {
544 struct cgraph_edge *e, *e2;
545 basic_block srcbb, dstbb;
546
547 /* TODO: merge also statement histograms. */
548 FOR_ALL_BB_FN (srcbb, srccfun)
549 {
550 unsigned int i;
551
552 dstbb = BASIC_BLOCK_FOR_FN (dstcfun, srcbb->index);
553
554 /* Either sum the profiles if both are IPA and not global0, or
555 pick more informative one (that is nonzero IPA if other is
556 uninitialized, guessed or global0). */
557 if (!dstbb->count.ipa ().initialized_p ()
558 || (dstbb->count.ipa () == profile_count::zero ()
559 && (srcbb->count.ipa ().initialized_p ()
560 && !(srcbb->count.ipa () == profile_count::zero ()))))
561 {
562 dstbb->count = srcbb->count;
563 for (i = 0; i < EDGE_COUNT (srcbb->succs); i++)
564 {
565 edge srce = EDGE_SUCC (srcbb, i);
566 edge dste = EDGE_SUCC (dstbb, i);
567 if (srce->probability.initialized_p ())
568 dste->probability = srce->probability;
569 }
570 }
571 else if (srcbb->count.ipa ().initialized_p ()
572 && !(srcbb->count.ipa () == profile_count::zero ()))
573 {
574 for (i = 0; i < EDGE_COUNT (srcbb->succs); i++)
575 {
576 edge srce = EDGE_SUCC (srcbb, i);
577 edge dste = EDGE_SUCC (dstbb, i);
578 dste->probability =
579 dste->probability * dstbb->count.probability_in (dstbb->count + srcbb->count)
580 + srce->probability * srcbb->count.probability_in (dstbb->count + srcbb->count);
581 }
582 dstbb->count += srcbb->count;
583 }
584 }
585 push_cfun (dstcfun);
586 update_max_bb_count ();
587 compute_function_frequency ();
588 pop_cfun ();
589 for (e = dst->callees; e; e = e->next_callee)
590 {
591 if (e->speculative)
592 continue;
593 e->count = gimple_bb (e->call_stmt)->count;
594 }
595 for (e = dst->indirect_calls, e2 = src->indirect_calls; e;
596 e2 = (e2 ? e2->next_callee : NULL), e = e->next_callee)
597 {
598 profile_count count = gimple_bb (e->call_stmt)->count;
599 /* When call is speculative, we need to re-distribute probabilities
600 the same way as they was. This is not really correct because
601 in the other copy the speculation may differ; but probably it
602 is not really worth the effort. */
603 if (e->speculative)
604 {
605 cgraph_edge *direct, *indirect;
606 cgraph_edge *direct2 = NULL, *indirect2 = NULL;
607 ipa_ref *ref;
608
609 e->speculative_call_info (direct, indirect, ref);
610 gcc_assert (e == indirect);
611 if (e2 && e2->speculative)
612 e2->speculative_call_info (direct2, indirect2, ref);
613 if (indirect->count > profile_count::zero ()
614 || direct->count > profile_count::zero ())
615 {
616 /* We should mismatch earlier if there is no matching
617 indirect edge. */
618 if (!e2)
619 {
620 if (dump_file)
621 fprintf (dump_file,
622 "Mismatch in merging indirect edges\n");
623 }
624 else if (!e2->speculative)
625 indirect->count += e2->count;
626 else if (e2->speculative)
627 {
628 if (DECL_ASSEMBLER_NAME (direct2->callee->decl)
629 != DECL_ASSEMBLER_NAME (direct->callee->decl))
630 {
631 if (direct2->count >= direct->count)
632 {
633 direct->redirect_callee (direct2->callee);
634 indirect->count += indirect2->count
635 + direct->count;
636 direct->count = direct2->count;
637 }
638 else
639 indirect->count += indirect2->count + direct2->count;
640 }
641 else
642 {
643 direct->count += direct2->count;
644 indirect->count += indirect2->count;
645 }
646 }
647 }
648 else
649 /* At the moment we should have only profile feedback based
650 speculations when merging. */
651 gcc_unreachable ();
652 }
653 else if (e2 && e2->speculative)
654 {
655 cgraph_edge *direct, *indirect;
656 ipa_ref *ref;
657
658 e2->speculative_call_info (direct, indirect, ref);
659 e->count = count;
660 e->make_speculative (direct->callee, direct->count);
661 }
662 else
663 e->count = count;
664 }
665 if (!preserve_body)
666 src->release_body ();
667 /* Update summary. */
668 compute_fn_summary (dst, 0);
669 }
670 /* We can't update CFG profile, but we can scale IPA profile. CFG
671 will be scaled according to dst->count after IPA passes. */
672 else
673 scale_ipa_profile_for_fn (dst, orig_count);
674 src->decl = oldsrcdecl;
675 }
676
677 /* Return true if call to DEST is known to be self-recusive call withing FUNC. */
678
679 bool
680 recursive_call_p (tree func, tree dest)
681 {
682 struct cgraph_node *dest_node = cgraph_node::get_create (dest);
683 struct cgraph_node *cnode = cgraph_node::get_create (func);
684 ipa_ref *alias;
685 enum availability avail;
686
687 gcc_assert (!cnode->alias);
688 if (cnode != dest_node->ultimate_alias_target (&avail))
689 return false;
690 if (avail >= AVAIL_AVAILABLE)
691 return true;
692 if (!dest_node->semantically_equivalent_p (cnode))
693 return false;
694 /* If there is only one way to call the fuction or we know all of them
695 are semantically equivalent, we still can consider call recursive. */
696 FOR_EACH_ALIAS (cnode, alias)
697 if (!dest_node->semantically_equivalent_p (alias->referring))
698 return false;
699 return true;
700 }