]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/ipa-utils.c
Put the CL into the right dir.
[thirdparty/gcc.git] / gcc / ipa-utils.c
CommitLineData
f7d118a9 1/* Utilities for ipa analysis.
fbd26352 2 Copyright (C) 2005-2019 Free Software Foundation, Inc.
f7d118a9 3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
8c4c00c1 9Software Foundation; either version 3, or (at your option) any later
f7d118a9 10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
8c4c00c1 18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
f7d118a9 20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
9ef16211 24#include "backend.h"
25#include "tree.h"
26#include "gimple.h"
7c29e30e 27#include "predict.h"
28#include "alloc-pool.h"
29#include "cgraph.h"
30#include "lto-streamer.h"
b9ed1410 31#include "dumpfile.h"
5863771b 32#include "splay-tree.h"
f7d118a9 33#include "ipa-utils.h"
2cc80ac3 34#include "symbol-summary.h"
25a8e007 35#include "tree-vrp.h"
1140c305 36#include "ipa-prop.h"
b9a58fc5 37#include "ipa-fnsummary.h"
f7d118a9 38
39/* Debugging function for postorder and inorder code. NOTE is a string
40 that is printed before the nodes are printed. ORDER is an array of
41 cgraph_nodes that has COUNT useful nodes in it. */
42
48e1416a 43void
7771d558 44ipa_print_order (FILE* out,
45 const char * note,
46 struct cgraph_node** order,
47 int count)
f7d118a9 48{
49 int i;
50 fprintf (out, "\n\n ordered call graph: %s\n", note);
48e1416a 51
f7d118a9 52 for (i = count - 1; i >= 0; i--)
415d1b9a 53 order[i]->dump (out);
f7d118a9 54 fprintf (out, "\n");
9af5ce0c 55 fflush (out);
f7d118a9 56}
57
415d1b9a 58
f7d118a9 59struct searchc_env {
60 struct cgraph_node **stack;
f7d118a9 61 struct cgraph_node **result;
487798e2 62 int stack_size;
f7d118a9 63 int order_pos;
64 splay_tree nodes_marked_new;
65 bool reduce;
66 int count;
67};
68
69/* This is an implementation of Tarjan's strongly connected region
70 finder as reprinted in Aho Hopcraft and Ullman's The Design and
71 Analysis of Computer Programs (1975) pages 192-193. This version
72 has been customized for cgraph_nodes. The env parameter is because
73 it is recursive and there are no nested functions here. This
74 function should only be called from itself or
7771d558 75 ipa_reduced_postorder. ENV is a stack env and would be
f7d118a9 76 unnecessary if C had nested functions. V is the node to start
77 searching from. */
78
79static void
17b28e52 80searchc (struct searchc_env* env, struct cgraph_node *v,
81 bool (*ignore_edge) (struct cgraph_edge *))
f7d118a9 82{
83 struct cgraph_edge *edge;
02774f2d 84 struct ipa_dfs_info *v_info = (struct ipa_dfs_info *) v->aux;
48e1416a 85
f7d118a9 86 /* mark node as old */
cda6870f 87 v_info->new_node = false;
2c8bbd94 88 splay_tree_remove (env->nodes_marked_new, v->get_uid ());
48e1416a 89
f7d118a9 90 v_info->dfn_number = env->count;
91 v_info->low_link = env->count;
92 env->count++;
93 env->stack[(env->stack_size)++] = v;
94 v_info->on_stack = true;
48e1416a 95
f7d118a9 96 for (edge = v->callees; edge; edge = edge->next_callee)
97 {
98 struct ipa_dfs_info * w_info;
b2c2e188 99 enum availability avail;
415d1b9a 100 struct cgraph_node *w = edge->callee->ultimate_alias_target (&avail);
86844d6c 101
b2c2e188 102 if (!w || (ignore_edge && ignore_edge (edge)))
17b28e52 103 continue;
104
02774f2d 105 if (w->aux
d02f3bb1 106 && (avail >= AVAIL_INTERPOSABLE))
f7d118a9 107 {
02774f2d 108 w_info = (struct ipa_dfs_info *) w->aux;
48e1416a 109 if (w_info->new_node)
f7d118a9 110 {
17b28e52 111 searchc (env, w, ignore_edge);
f7d118a9 112 v_info->low_link =
113 (v_info->low_link < w_info->low_link) ?
114 v_info->low_link : w_info->low_link;
48e1416a 115 }
116 else
117 if ((w_info->dfn_number < v_info->dfn_number)
118 && (w_info->on_stack))
f7d118a9 119 v_info->low_link =
120 (w_info->dfn_number < v_info->low_link) ?
121 w_info->dfn_number : v_info->low_link;
122 }
123 }
124
125
48e1416a 126 if (v_info->low_link == v_info->dfn_number)
f7d118a9 127 {
128 struct cgraph_node *last = NULL;
129 struct cgraph_node *x;
130 struct ipa_dfs_info *x_info;
131 do {
132 x = env->stack[--(env->stack_size)];
02774f2d 133 x_info = (struct ipa_dfs_info *) x->aux;
f7d118a9 134 x_info->on_stack = false;
572635a5 135 x_info->scc_no = v_info->dfn_number;
48e1416a 136
137 if (env->reduce)
f7d118a9 138 {
139 x_info->next_cycle = last;
140 last = x;
48e1416a 141 }
142 else
f7d118a9 143 env->result[env->order_pos++] = x;
48e1416a 144 }
f7d118a9 145 while (v != x);
48e1416a 146 if (env->reduce)
f7d118a9 147 env->result[env->order_pos++] = v;
148 }
149}
150
151/* Topsort the call graph by caller relation. Put the result in ORDER.
152
9631926a 153 The REDUCE flag is true if you want the cycles reduced to single nodes.
154 You can use ipa_get_nodes_in_cycle to obtain a vector containing all real
155 call graph nodes in a reduced node.
156
157 Set ALLOW_OVERWRITABLE if nodes with such availability should be included.
7771d558 158 IGNORE_EDGE, if non-NULL is a hook that may make some edges insignificant
159 for the topological sort. */
f7d118a9 160
161int
7771d558 162ipa_reduced_postorder (struct cgraph_node **order,
8cdb0206 163 bool reduce,
7771d558 164 bool (*ignore_edge) (struct cgraph_edge *))
f7d118a9 165{
166 struct cgraph_node *node;
167 struct searchc_env env;
168 splay_tree_node result;
35ee1c66 169 env.stack = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
f7d118a9 170 env.stack_size = 0;
171 env.result = order;
172 env.order_pos = 0;
173 env.nodes_marked_new = splay_tree_new (splay_tree_compare_ints, 0, 0);
174 env.count = 1;
175 env.reduce = reduce;
48e1416a 176
7c455d87 177 FOR_EACH_DEFINED_FUNCTION (node)
86844d6c 178 {
415d1b9a 179 enum availability avail = node->get_availability ();
86844d6c 180
415d1b9a 181 if (avail > AVAIL_INTERPOSABLE
8cdb0206 182 || avail == AVAIL_INTERPOSABLE)
86844d6c 183 {
184 /* Reuse the info if it is already there. */
02774f2d 185 struct ipa_dfs_info *info = (struct ipa_dfs_info *) node->aux;
86844d6c 186 if (!info)
187 info = XCNEW (struct ipa_dfs_info);
188 info->new_node = true;
189 info->on_stack = false;
190 info->next_cycle = NULL;
02774f2d 191 node->aux = info;
48e1416a 192
86844d6c 193 splay_tree_insert (env.nodes_marked_new,
2c8bbd94 194 (splay_tree_key)node->get_uid (),
86844d6c 195 (splay_tree_value)node);
48e1416a 196 }
197 else
02774f2d 198 node->aux = NULL;
86844d6c 199 }
f7d118a9 200 result = splay_tree_min (env.nodes_marked_new);
201 while (result)
202 {
203 node = (struct cgraph_node *)result->value;
17b28e52 204 searchc (&env, node, ignore_edge);
f7d118a9 205 result = splay_tree_min (env.nodes_marked_new);
206 }
207 splay_tree_delete (env.nodes_marked_new);
208 free (env.stack);
209
210 return env.order_pos;
211}
212
7771d558 213/* Deallocate all ipa_dfs_info structures pointed to by the aux pointer of call
214 graph nodes. */
215
216void
217ipa_free_postorder_info (void)
218{
219 struct cgraph_node *node;
7c455d87 220 FOR_EACH_DEFINED_FUNCTION (node)
7771d558 221 {
222 /* Get rid of the aux information. */
02774f2d 223 if (node->aux)
7771d558 224 {
02774f2d 225 free (node->aux);
226 node->aux = NULL;
7771d558 227 }
228 }
229}
230
9631926a 231/* Get the set of nodes for the cycle in the reduced call graph starting
232 from NODE. */
233
415d1b9a 234vec<cgraph_node *>
9631926a 235ipa_get_nodes_in_cycle (struct cgraph_node *node)
236{
415d1b9a 237 vec<cgraph_node *> v = vNULL;
9631926a 238 struct ipa_dfs_info *node_dfs_info;
239 while (node)
240 {
f1f41a6c 241 v.safe_push (node);
02774f2d 242 node_dfs_info = (struct ipa_dfs_info *) node->aux;
9631926a 243 node = node_dfs_info->next_cycle;
244 }
245 return v;
246}
247
a0255a70 248/* Return true iff the CS is an edge within a strongly connected component as
249 computed by ipa_reduced_postorder. */
250
251bool
252ipa_edge_within_scc (struct cgraph_edge *cs)
253{
02774f2d 254 struct ipa_dfs_info *caller_dfs = (struct ipa_dfs_info *) cs->caller->aux;
a0255a70 255 struct ipa_dfs_info *callee_dfs;
415d1b9a 256 struct cgraph_node *callee = cs->callee->function_symbol ();
a0255a70 257
02774f2d 258 callee_dfs = (struct ipa_dfs_info *) callee->aux;
a0255a70 259 return (caller_dfs
260 && callee_dfs
261 && caller_dfs->scc_no == callee_dfs->scc_no);
262}
263
ee5e516b 264struct postorder_stack
265{
266 struct cgraph_node *node;
267 struct cgraph_edge *edge;
268 int ref;
269};
270
7771d558 271/* Fill array order with all nodes with output flag set in the reverse
c70f46b0 272 topological order. Return the number of elements in the array.
273 FIXME: While walking, consider aliases, too. */
7771d558 274
275int
276ipa_reverse_postorder (struct cgraph_node **order)
277{
278 struct cgraph_node *node, *node2;
279 int stack_size = 0;
280 int order_pos = 0;
ee5e516b 281 struct cgraph_edge *edge;
7771d558 282 int pass;
51ce5652 283 struct ipa_ref *ref = NULL;
7771d558 284
ee5e516b 285 struct postorder_stack *stack =
35ee1c66 286 XCNEWVEC (struct postorder_stack, symtab->cgraph_count);
7771d558 287
288 /* We have to deal with cycles nicely, so use a depth first traversal
289 output algorithm. Ignore the fact that some functions won't need
290 to be output and put them into order as well, so we get dependencies
291 right through inline functions. */
7c455d87 292 FOR_EACH_FUNCTION (node)
02774f2d 293 node->aux = NULL;
7771d558 294 for (pass = 0; pass < 2; pass++)
7c455d87 295 FOR_EACH_FUNCTION (node)
02774f2d 296 if (!node->aux
7771d558 297 && (pass
02774f2d 298 || (!node->address_taken
7771d558 299 && !node->global.inlined_to
02774f2d 300 && !node->alias && !node->thunk.thunk_p
415d1b9a 301 && !node->only_called_directly_p ())))
7771d558 302 {
ee5e516b 303 stack_size = 0;
304 stack[stack_size].node = node;
305 stack[stack_size].edge = node->callers;
306 stack[stack_size].ref = 0;
02774f2d 307 node->aux = (void *)(size_t)1;
ee5e516b 308 while (stack_size >= 0)
7771d558 309 {
ee5e516b 310 while (true)
7771d558 311 {
ee5e516b 312 node2 = NULL;
313 while (stack[stack_size].edge && !node2)
7771d558 314 {
ee5e516b 315 edge = stack[stack_size].edge;
7771d558 316 node2 = edge->caller;
ee5e516b 317 stack[stack_size].edge = edge->next_caller;
318 /* Break possible cycles involving always-inline
319 functions by ignoring edges from always-inline
320 functions to non-always-inline functions. */
02774f2d 321 if (DECL_DISREGARD_INLINE_LIMITS (edge->caller->decl)
ee5e516b 322 && !DECL_DISREGARD_INLINE_LIMITS
415d1b9a 323 (edge->callee->function_symbol ()->decl))
ee5e516b 324 node2 = NULL;
325 }
51ce5652 326 for (; stack[stack_size].node->iterate_referring (
ee5e516b 327 stack[stack_size].ref,
328 ref) && !node2;
329 stack[stack_size].ref++)
330 {
331 if (ref->use == IPA_REF_ALIAS)
51ce5652 332 node2 = dyn_cast <cgraph_node *> (ref->referring);
ee5e516b 333 }
334 if (!node2)
335 break;
02774f2d 336 if (!node2->aux)
ee5e516b 337 {
338 stack[++stack_size].node = node2;
339 stack[stack_size].edge = node2->callers;
340 stack[stack_size].ref = 0;
02774f2d 341 node2->aux = (void *)(size_t)1;
7771d558 342 }
343 }
ee5e516b 344 order[order_pos++] = stack[stack_size--].node;
7771d558 345 }
346 }
347 free (stack);
7c455d87 348 FOR_EACH_FUNCTION (node)
02774f2d 349 node->aux = NULL;
7771d558 350 return order_pos;
351}
352
353
f7d118a9 354
355/* Given a memory reference T, will return the variable at the bottom
9d75589a 356 of the access. Unlike get_base_address, this will recurse through
f7d118a9 357 INDIRECT_REFS. */
358
359tree
360get_base_var (tree t)
361{
48e1416a 362 while (!SSA_VAR_P (t)
f7d118a9 363 && (!CONSTANT_CLASS_P (t))
364 && TREE_CODE (t) != LABEL_DECL
365 && TREE_CODE (t) != FUNCTION_DECL
9ed5b1f5 366 && TREE_CODE (t) != CONST_DECL
367 && TREE_CODE (t) != CONSTRUCTOR)
f7d118a9 368 {
369 t = TREE_OPERAND (t, 0);
370 }
371 return t;
48e1416a 372}
f7d118a9 373
d4006ff3 374/* Scale function of calls in NODE by ratio ORIG_COUNT/NODE->count. */
375
376void
377scale_ipa_profile_for_fn (struct cgraph_node *node, profile_count orig_count)
378{
379 profile_count to = node->count;
380 profile_count::adjust_for_ipa_scaling (&to, &orig_count);
381 struct cgraph_edge *e;
382
383 for (e = node->callees; e; e = e->next_callee)
384 e->count = e->count.apply_scale (to, orig_count);
385 for (e = node->indirect_calls; e; e = e->next_callee)
386 e->count = e->count.apply_scale (to, orig_count);
387}
19ad01f7 388
7e4b741c 389/* SRC and DST are going to be merged. Take SRC's profile and merge it into
96bf18ea 390 DST so it is not going to be lost. Possibly destroy SRC's body on the way
391 unless PRESERVE_BODY is set. */
7e4b741c 392
393void
394ipa_merge_profiles (struct cgraph_node *dst,
96bf18ea 395 struct cgraph_node *src,
396 bool preserve_body)
7e4b741c 397{
02774f2d 398 tree oldsrcdecl = src->decl;
7e4b741c 399 struct function *srccfun, *dstcfun;
400 bool match = true;
401
02774f2d 402 if (!src->definition
403 || !dst->definition)
7e4b741c 404 return;
b5e4c6ae 405
7e4b741c 406 if (src->frequency < dst->frequency)
407 src->frequency = dst->frequency;
af48f0b1 408
409 /* Time profiles are merged. */
410 if (dst->tp_first_run > src->tp_first_run && src->tp_first_run)
411 dst->tp_first_run = src->tp_first_run;
412
09809ecc 413 if (src->profile_id && !dst->profile_id)
414 dst->profile_id = src->profile_id;
df565eb6 415
e94feafd 416 /* Merging zero profile to dst is no-op. */
417 if (src->count.ipa () == profile_count::zero ())
418 return;
419
db9cef39 420 /* FIXME when we merge in unknown profile, we ought to set counts as
421 unsafe. */
151b9ff5 422 if (!src->count.initialized_p ()
423 || !(src->count.ipa () == src->count))
7e4b741c 424 return;
35ee1c66 425 if (symtab->dump_file)
7e4b741c 426 {
0e388735 427 fprintf (symtab->dump_file, "Merging profiles of %s to %s\n",
428 src->dump_name (), dst->dump_name ());
7e4b741c 429 }
b5e4c6ae 430 profile_count orig_count = dst->count;
431
151b9ff5 432 if (dst->count.initialized_p () && dst->count.ipa () == dst->count)
433 dst->count += src->count.ipa ();
434 else
435 dst->count = src->count.ipa ();
7e4b741c 436
d4006ff3 437 /* First handle functions with no gimple body. */
438 if (dst->thunk.thunk_p || dst->alias
439 || src->thunk.thunk_p || src->alias)
440 {
441 scale_ipa_profile_for_fn (dst, orig_count);
442 return;
443 }
444
7e4b741c 445 /* This is ugly. We need to get both function bodies into memory.
446 If declaration is merged, we need to duplicate it to be able
447 to load body that is being replaced. This makes symbol table
448 temporarily inconsistent. */
02774f2d 449 if (src->decl == dst->decl)
7e4b741c 450 {
7e4b741c 451 struct lto_in_decl_state temp;
452 struct lto_in_decl_state *state;
453
454 /* We are going to move the decl, we want to remove its file decl data.
455 and link these with the new decl. */
02774f2d 456 temp.fn_decl = src->decl;
9f9f871f 457 lto_in_decl_state **slot
458 = src->lto_file_data->function_decl_states->find_slot (&temp,
459 NO_INSERT);
460 state = *slot;
461 src->lto_file_data->function_decl_states->clear_slot (slot);
7e4b741c 462 gcc_assert (state);
463
464 /* Duplicate the decl and be sure it does not link into body of DST. */
02774f2d 465 src->decl = copy_node (src->decl);
466 DECL_STRUCT_FUNCTION (src->decl) = NULL;
467 DECL_ARGUMENTS (src->decl) = NULL;
468 DECL_INITIAL (src->decl) = NULL;
469 DECL_RESULT (src->decl) = NULL;
7e4b741c 470
471 /* Associate the decl state with new declaration, so LTO streamer
472 can look it up. */
02774f2d 473 state->fn_decl = src->decl;
9f9f871f 474 slot
475 = src->lto_file_data->function_decl_states->find_slot (state, INSERT);
7e4b741c 476 gcc_assert (!*slot);
477 *slot = state;
478 }
549519dd 479 src->get_untransformed_body ();
480 dst->get_untransformed_body ();
02774f2d 481 srccfun = DECL_STRUCT_FUNCTION (src->decl);
482 dstcfun = DECL_STRUCT_FUNCTION (dst->decl);
a28770e1 483 if (n_basic_blocks_for_fn (srccfun)
484 != n_basic_blocks_for_fn (dstcfun))
7e4b741c 485 {
35ee1c66 486 if (symtab->dump_file)
487 fprintf (symtab->dump_file,
7e4b741c 488 "Giving up; number of basic block mismatch.\n");
489 match = false;
490 }
776b0663 491 else if (last_basic_block_for_fn (srccfun)
492 != last_basic_block_for_fn (dstcfun))
7e4b741c 493 {
35ee1c66 494 if (symtab->dump_file)
495 fprintf (symtab->dump_file,
7e4b741c 496 "Giving up; last block mismatch.\n");
497 match = false;
498 }
499 else
500 {
501 basic_block srcbb, dstbb;
502
503 FOR_ALL_BB_FN (srcbb, srccfun)
504 {
505 unsigned int i;
506
98e6ab47 507 dstbb = BASIC_BLOCK_FOR_FN (dstcfun, srcbb->index);
7e4b741c 508 if (dstbb == NULL)
509 {
35ee1c66 510 if (symtab->dump_file)
511 fprintf (symtab->dump_file,
7e4b741c 512 "No matching block for bb %i.\n",
513 srcbb->index);
514 match = false;
515 break;
516 }
517 if (EDGE_COUNT (srcbb->succs) != EDGE_COUNT (dstbb->succs))
518 {
35ee1c66 519 if (symtab->dump_file)
520 fprintf (symtab->dump_file,
7e4b741c 521 "Edge count mistmatch for bb %i.\n",
522 srcbb->index);
523 match = false;
524 break;
525 }
526 for (i = 0; i < EDGE_COUNT (srcbb->succs); i++)
527 {
528 edge srce = EDGE_SUCC (srcbb, i);
529 edge dste = EDGE_SUCC (dstbb, i);
530 if (srce->dest->index != dste->dest->index)
531 {
35ee1c66 532 if (symtab->dump_file)
533 fprintf (symtab->dump_file,
7e4b741c 534 "Succ edge mistmatch for bb %i.\n",
535 srce->dest->index);
536 match = false;
537 break;
538 }
539 }
540 }
541 }
542 if (match)
543 {
69d99fc4 544 struct cgraph_edge *e, *e2;
7e4b741c 545 basic_block srcbb, dstbb;
546
547 /* TODO: merge also statement histograms. */
548 FOR_ALL_BB_FN (srcbb, srccfun)
549 {
550 unsigned int i;
551
98e6ab47 552 dstbb = BASIC_BLOCK_FOR_FN (dstcfun, srcbb->index);
205ce1aa 553
554 /* Either sum the profiles if both are IPA and not global0, or
555 pick more informative one (that is nonzero IPA if other is
556 uninitialized, guessed or global0). */
557 if (!dstbb->count.ipa ().initialized_p ()
558 || (dstbb->count.ipa () == profile_count::zero ()
559 && (srcbb->count.ipa ().initialized_p ()
560 && !(srcbb->count.ipa () == profile_count::zero ()))))
7e4b741c 561 {
ea5d3981 562 dstbb->count = srcbb->count;
563 for (i = 0; i < EDGE_COUNT (srcbb->succs); i++)
564 {
565 edge srce = EDGE_SUCC (srcbb, i);
566 edge dste = EDGE_SUCC (dstbb, i);
567 if (srce->probability.initialized_p ())
568 dste->probability = srce->probability;
569 }
570 }
205ce1aa 571 else if (srcbb->count.ipa ().initialized_p ()
572 && !(srcbb->count.ipa () == profile_count::zero ()))
ea5d3981 573 {
574 for (i = 0; i < EDGE_COUNT (srcbb->succs); i++)
575 {
576 edge srce = EDGE_SUCC (srcbb, i);
577 edge dste = EDGE_SUCC (dstbb, i);
578 dste->probability =
579 dste->probability * dstbb->count.probability_in (dstbb->count + srcbb->count)
580 + srce->probability * srcbb->count.probability_in (dstbb->count + srcbb->count);
581 }
582 dstbb->count += srcbb->count;
7e4b741c 583 }
584 }
585 push_cfun (dstcfun);
688b6bc6 586 update_max_bb_count ();
7e4b741c 587 compute_function_frequency ();
588 pop_cfun ();
589 for (e = dst->callees; e; e = e->next_callee)
590 {
69d99fc4 591 if (e->speculative)
592 continue;
151b9ff5 593 e->count = gimple_bb (e->call_stmt)->count;
7e4b741c 594 }
69d99fc4 595 for (e = dst->indirect_calls, e2 = src->indirect_calls; e;
596 e2 = (e2 ? e2->next_callee : NULL), e = e->next_callee)
7e4b741c 597 {
db9cef39 598 profile_count count = gimple_bb (e->call_stmt)->count;
69d99fc4 599 /* When call is speculative, we need to re-distribute probabilities
600 the same way as they was. This is not really correct because
601 in the other copy the speculation may differ; but probably it
602 is not really worth the effort. */
603 if (e->speculative)
604 {
605 cgraph_edge *direct, *indirect;
606 cgraph_edge *direct2 = NULL, *indirect2 = NULL;
607 ipa_ref *ref;
608
609 e->speculative_call_info (direct, indirect, ref);
610 gcc_assert (e == indirect);
611 if (e2 && e2->speculative)
612 e2->speculative_call_info (direct2, indirect2, ref);
db9cef39 613 if (indirect->count > profile_count::zero ()
614 || direct->count > profile_count::zero ())
69d99fc4 615 {
616 /* We should mismatch earlier if there is no matching
617 indirect edge. */
618 if (!e2)
619 {
620 if (dump_file)
621 fprintf (dump_file,
622 "Mismatch in merging indirect edges\n");
623 }
624 else if (!e2->speculative)
625 indirect->count += e2->count;
626 else if (e2->speculative)
627 {
628 if (DECL_ASSEMBLER_NAME (direct2->callee->decl)
629 != DECL_ASSEMBLER_NAME (direct->callee->decl))
630 {
631 if (direct2->count >= direct->count)
632 {
633 direct->redirect_callee (direct2->callee);
634 indirect->count += indirect2->count
635 + direct->count;
636 direct->count = direct2->count;
637 }
638 else
639 indirect->count += indirect2->count + direct2->count;
640 }
641 else
642 {
643 direct->count += direct2->count;
644 indirect->count += indirect2->count;
645 }
646 }
69d99fc4 647 }
648 else
649 /* At the moment we should have only profile feedback based
650 speculations when merging. */
651 gcc_unreachable ();
652 }
7b1e7168 653 else if (e2 && e2->speculative)
69d99fc4 654 {
655 cgraph_edge *direct, *indirect;
656 ipa_ref *ref;
657
658 e2->speculative_call_info (direct, indirect, ref);
151b9ff5 659 e->count = count;
660 e->make_speculative (direct->callee, direct->count);
69d99fc4 661 }
662 else
151b9ff5 663 e->count = count;
7e4b741c 664 }
96bf18ea 665 if (!preserve_body)
666 src->release_body ();
bff6533d 667 /* Update summary. */
b5e4c6ae 668 compute_fn_summary (dst, 0);
669 }
670 /* We can't update CFG profile, but we can scale IPA profile. CFG
671 will be scaled according to dst->count after IPA passes. */
672 else
d4006ff3 673 scale_ipa_profile_for_fn (dst, orig_count);
02774f2d 674 src->decl = oldsrcdecl;
7e4b741c 675}
676
e9de52cc 677/* Return true if call to DEST is known to be self-recusive call withing FUNC. */
678
679bool
680recursive_call_p (tree func, tree dest)
681{
415d1b9a 682 struct cgraph_node *dest_node = cgraph_node::get_create (dest);
683 struct cgraph_node *cnode = cgraph_node::get_create (func);
29b884ae 684 ipa_ref *alias;
685 enum availability avail;
686
687 gcc_assert (!cnode->alias);
688 if (cnode != dest_node->ultimate_alias_target (&avail))
689 return false;
690 if (avail >= AVAIL_AVAILABLE)
691 return true;
692 if (!dest_node->semantically_equivalent_p (cnode))
693 return false;
694 /* If there is only one way to call the fuction or we know all of them
695 are semantically equivalent, we still can consider call recursive. */
696 FOR_EACH_ALIAS (cnode, alias)
697 if (!dest_node->semantically_equivalent_p (alias->referring))
698 return false;
699 return true;
e9de52cc 700}