]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/ipa-utils.c
Make cgraph_edge::uid really unique.
[thirdparty/gcc.git] / gcc / ipa-utils.c
CommitLineData
ea900239 1/* Utilities for ipa analysis.
85ec4feb 2 Copyright (C) 2005-2018 Free Software Foundation, Inc.
ea900239
DB
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9dcd6f09 9Software Foundation; either version 3, or (at your option) any later
ea900239
DB
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
9dcd6f09
NC
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
ea900239
DB
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
c7131fb2
AM
24#include "backend.h"
25#include "tree.h"
26#include "gimple.h"
957060b5
AM
27#include "predict.h"
28#include "alloc-pool.h"
29#include "cgraph.h"
30#include "lto-streamer.h"
7ee2468b 31#include "dumpfile.h"
ea264ca5 32#include "splay-tree.h"
ea900239 33#include "ipa-utils.h"
dd912cb8 34#include "symbol-summary.h"
8bc5448f 35#include "tree-vrp.h"
c582198b 36#include "ipa-prop.h"
27d020cf 37#include "ipa-fnsummary.h"
ea900239
DB
38
39/* Debugging function for postorder and inorder code. NOTE is a string
40 that is printed before the nodes are printed. ORDER is an array of
41 cgraph_nodes that has COUNT useful nodes in it. */
42
b8698a0f 43void
af8bca3c
MJ
44ipa_print_order (FILE* out,
45 const char * note,
46 struct cgraph_node** order,
47 int count)
ea900239
DB
48{
49 int i;
50 fprintf (out, "\n\n ordered call graph: %s\n", note);
b8698a0f 51
ea900239 52 for (i = count - 1; i >= 0; i--)
d52f5295 53 order[i]->dump (out);
ea900239 54 fprintf (out, "\n");
c3284718 55 fflush (out);
ea900239
DB
56}
57
d52f5295 58
ea900239
DB
59struct searchc_env {
60 struct cgraph_node **stack;
ea900239 61 struct cgraph_node **result;
34e82342 62 int stack_size;
ea900239
DB
63 int order_pos;
64 splay_tree nodes_marked_new;
65 bool reduce;
b6156cf2 66 bool allow_overwritable;
ea900239
DB
67 int count;
68};
69
70/* This is an implementation of Tarjan's strongly connected region
71 finder as reprinted in Aho Hopcraft and Ullman's The Design and
72 Analysis of Computer Programs (1975) pages 192-193. This version
73 has been customized for cgraph_nodes. The env parameter is because
74 it is recursive and there are no nested functions here. This
75 function should only be called from itself or
af8bca3c 76 ipa_reduced_postorder. ENV is a stack env and would be
ea900239
DB
77 unnecessary if C had nested functions. V is the node to start
78 searching from. */
79
80static void
2505c5ed
JH
81searchc (struct searchc_env* env, struct cgraph_node *v,
82 bool (*ignore_edge) (struct cgraph_edge *))
ea900239
DB
83{
84 struct cgraph_edge *edge;
67348ccc 85 struct ipa_dfs_info *v_info = (struct ipa_dfs_info *) v->aux;
b8698a0f 86
ea900239 87 /* mark node as old */
c5274326 88 v_info->new_node = false;
ea900239 89 splay_tree_remove (env->nodes_marked_new, v->uid);
b8698a0f 90
ea900239
DB
91 v_info->dfn_number = env->count;
92 v_info->low_link = env->count;
93 env->count++;
94 env->stack[(env->stack_size)++] = v;
95 v_info->on_stack = true;
b8698a0f 96
ea900239
DB
97 for (edge = v->callees; edge; edge = edge->next_callee)
98 {
99 struct ipa_dfs_info * w_info;
fede8efa 100 enum availability avail;
d52f5295 101 struct cgraph_node *w = edge->callee->ultimate_alias_target (&avail);
e2c9111c 102
fede8efa 103 if (!w || (ignore_edge && ignore_edge (edge)))
2505c5ed
JH
104 continue;
105
67348ccc 106 if (w->aux
d52f5295
ML
107 && (avail > AVAIL_INTERPOSABLE
108 || (env->allow_overwritable && avail == AVAIL_INTERPOSABLE)))
ea900239 109 {
67348ccc 110 w_info = (struct ipa_dfs_info *) w->aux;
b8698a0f 111 if (w_info->new_node)
ea900239 112 {
2505c5ed 113 searchc (env, w, ignore_edge);
ea900239
DB
114 v_info->low_link =
115 (v_info->low_link < w_info->low_link) ?
116 v_info->low_link : w_info->low_link;
b8698a0f
L
117 }
118 else
119 if ((w_info->dfn_number < v_info->dfn_number)
120 && (w_info->on_stack))
ea900239
DB
121 v_info->low_link =
122 (w_info->dfn_number < v_info->low_link) ?
123 w_info->dfn_number : v_info->low_link;
124 }
125 }
126
127
b8698a0f 128 if (v_info->low_link == v_info->dfn_number)
ea900239
DB
129 {
130 struct cgraph_node *last = NULL;
131 struct cgraph_node *x;
132 struct ipa_dfs_info *x_info;
133 do {
134 x = env->stack[--(env->stack_size)];
67348ccc 135 x_info = (struct ipa_dfs_info *) x->aux;
ea900239 136 x_info->on_stack = false;
11026b51 137 x_info->scc_no = v_info->dfn_number;
b8698a0f
L
138
139 if (env->reduce)
ea900239
DB
140 {
141 x_info->next_cycle = last;
142 last = x;
b8698a0f
L
143 }
144 else
ea900239 145 env->result[env->order_pos++] = x;
b8698a0f 146 }
ea900239 147 while (v != x);
b8698a0f 148 if (env->reduce)
ea900239
DB
149 env->result[env->order_pos++] = v;
150 }
151}
152
153/* Topsort the call graph by caller relation. Put the result in ORDER.
154
df92c640
SB
155 The REDUCE flag is true if you want the cycles reduced to single nodes.
156 You can use ipa_get_nodes_in_cycle to obtain a vector containing all real
157 call graph nodes in a reduced node.
158
159 Set ALLOW_OVERWRITABLE if nodes with such availability should be included.
af8bca3c
MJ
160 IGNORE_EDGE, if non-NULL is a hook that may make some edges insignificant
161 for the topological sort. */
ea900239
DB
162
163int
af8bca3c
MJ
164ipa_reduced_postorder (struct cgraph_node **order,
165 bool reduce, bool allow_overwritable,
166 bool (*ignore_edge) (struct cgraph_edge *))
ea900239
DB
167{
168 struct cgraph_node *node;
169 struct searchc_env env;
170 splay_tree_node result;
3dafb85c 171 env.stack = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
ea900239
DB
172 env.stack_size = 0;
173 env.result = order;
174 env.order_pos = 0;
175 env.nodes_marked_new = splay_tree_new (splay_tree_compare_ints, 0, 0);
176 env.count = 1;
177 env.reduce = reduce;
b6156cf2 178 env.allow_overwritable = allow_overwritable;
b8698a0f 179
65c70e6b 180 FOR_EACH_DEFINED_FUNCTION (node)
e2c9111c 181 {
d52f5295 182 enum availability avail = node->get_availability ();
e2c9111c 183
d52f5295 184 if (avail > AVAIL_INTERPOSABLE
b8698a0f 185 || (allow_overwritable
d52f5295 186 && (avail == AVAIL_INTERPOSABLE)))
e2c9111c
JH
187 {
188 /* Reuse the info if it is already there. */
67348ccc 189 struct ipa_dfs_info *info = (struct ipa_dfs_info *) node->aux;
e2c9111c
JH
190 if (!info)
191 info = XCNEW (struct ipa_dfs_info);
192 info->new_node = true;
193 info->on_stack = false;
194 info->next_cycle = NULL;
67348ccc 195 node->aux = info;
b8698a0f 196
e2c9111c 197 splay_tree_insert (env.nodes_marked_new,
b8698a0f 198 (splay_tree_key)node->uid,
e2c9111c 199 (splay_tree_value)node);
b8698a0f
L
200 }
201 else
67348ccc 202 node->aux = NULL;
e2c9111c 203 }
ea900239
DB
204 result = splay_tree_min (env.nodes_marked_new);
205 while (result)
206 {
207 node = (struct cgraph_node *)result->value;
2505c5ed 208 searchc (&env, node, ignore_edge);
ea900239
DB
209 result = splay_tree_min (env.nodes_marked_new);
210 }
211 splay_tree_delete (env.nodes_marked_new);
212 free (env.stack);
213
214 return env.order_pos;
215}
216
af8bca3c
MJ
217/* Deallocate all ipa_dfs_info structures pointed to by the aux pointer of call
218 graph nodes. */
219
220void
221ipa_free_postorder_info (void)
222{
223 struct cgraph_node *node;
65c70e6b 224 FOR_EACH_DEFINED_FUNCTION (node)
af8bca3c
MJ
225 {
226 /* Get rid of the aux information. */
67348ccc 227 if (node->aux)
af8bca3c 228 {
67348ccc
DM
229 free (node->aux);
230 node->aux = NULL;
af8bca3c
MJ
231 }
232 }
233}
234
df92c640
SB
235/* Get the set of nodes for the cycle in the reduced call graph starting
236 from NODE. */
237
d52f5295 238vec<cgraph_node *>
df92c640
SB
239ipa_get_nodes_in_cycle (struct cgraph_node *node)
240{
d52f5295 241 vec<cgraph_node *> v = vNULL;
df92c640
SB
242 struct ipa_dfs_info *node_dfs_info;
243 while (node)
244 {
9771b263 245 v.safe_push (node);
67348ccc 246 node_dfs_info = (struct ipa_dfs_info *) node->aux;
df92c640
SB
247 node = node_dfs_info->next_cycle;
248 }
249 return v;
250}
251
4cb13597
MJ
252/* Return true iff the CS is an edge within a strongly connected component as
253 computed by ipa_reduced_postorder. */
254
255bool
256ipa_edge_within_scc (struct cgraph_edge *cs)
257{
67348ccc 258 struct ipa_dfs_info *caller_dfs = (struct ipa_dfs_info *) cs->caller->aux;
4cb13597 259 struct ipa_dfs_info *callee_dfs;
d52f5295 260 struct cgraph_node *callee = cs->callee->function_symbol ();
4cb13597 261
67348ccc 262 callee_dfs = (struct ipa_dfs_info *) callee->aux;
4cb13597
MJ
263 return (caller_dfs
264 && callee_dfs
265 && caller_dfs->scc_no == callee_dfs->scc_no);
266}
267
8775a18b
JH
268struct postorder_stack
269{
270 struct cgraph_node *node;
271 struct cgraph_edge *edge;
272 int ref;
273};
274
af8bca3c 275/* Fill array order with all nodes with output flag set in the reverse
39e2db00
JH
276 topological order. Return the number of elements in the array.
277 FIXME: While walking, consider aliases, too. */
af8bca3c
MJ
278
279int
280ipa_reverse_postorder (struct cgraph_node **order)
281{
282 struct cgraph_node *node, *node2;
283 int stack_size = 0;
284 int order_pos = 0;
8775a18b 285 struct cgraph_edge *edge;
af8bca3c 286 int pass;
d122681a 287 struct ipa_ref *ref = NULL;
af8bca3c 288
8775a18b 289 struct postorder_stack *stack =
3dafb85c 290 XCNEWVEC (struct postorder_stack, symtab->cgraph_count);
af8bca3c
MJ
291
292 /* We have to deal with cycles nicely, so use a depth first traversal
293 output algorithm. Ignore the fact that some functions won't need
294 to be output and put them into order as well, so we get dependencies
295 right through inline functions. */
65c70e6b 296 FOR_EACH_FUNCTION (node)
67348ccc 297 node->aux = NULL;
af8bca3c 298 for (pass = 0; pass < 2; pass++)
65c70e6b 299 FOR_EACH_FUNCTION (node)
67348ccc 300 if (!node->aux
af8bca3c 301 && (pass
67348ccc 302 || (!node->address_taken
af8bca3c 303 && !node->global.inlined_to
67348ccc 304 && !node->alias && !node->thunk.thunk_p
d52f5295 305 && !node->only_called_directly_p ())))
af8bca3c 306 {
8775a18b
JH
307 stack_size = 0;
308 stack[stack_size].node = node;
309 stack[stack_size].edge = node->callers;
310 stack[stack_size].ref = 0;
67348ccc 311 node->aux = (void *)(size_t)1;
8775a18b 312 while (stack_size >= 0)
af8bca3c 313 {
8775a18b 314 while (true)
af8bca3c 315 {
8775a18b
JH
316 node2 = NULL;
317 while (stack[stack_size].edge && !node2)
af8bca3c 318 {
8775a18b 319 edge = stack[stack_size].edge;
af8bca3c 320 node2 = edge->caller;
8775a18b
JH
321 stack[stack_size].edge = edge->next_caller;
322 /* Break possible cycles involving always-inline
323 functions by ignoring edges from always-inline
324 functions to non-always-inline functions. */
67348ccc 325 if (DECL_DISREGARD_INLINE_LIMITS (edge->caller->decl)
8775a18b 326 && !DECL_DISREGARD_INLINE_LIMITS
d52f5295 327 (edge->callee->function_symbol ()->decl))
8775a18b
JH
328 node2 = NULL;
329 }
d122681a 330 for (; stack[stack_size].node->iterate_referring (
8775a18b
JH
331 stack[stack_size].ref,
332 ref) && !node2;
333 stack[stack_size].ref++)
334 {
335 if (ref->use == IPA_REF_ALIAS)
d122681a 336 node2 = dyn_cast <cgraph_node *> (ref->referring);
8775a18b
JH
337 }
338 if (!node2)
339 break;
67348ccc 340 if (!node2->aux)
8775a18b
JH
341 {
342 stack[++stack_size].node = node2;
343 stack[stack_size].edge = node2->callers;
344 stack[stack_size].ref = 0;
67348ccc 345 node2->aux = (void *)(size_t)1;
af8bca3c
MJ
346 }
347 }
8775a18b 348 order[order_pos++] = stack[stack_size--].node;
af8bca3c
MJ
349 }
350 }
351 free (stack);
65c70e6b 352 FOR_EACH_FUNCTION (node)
67348ccc 353 node->aux = NULL;
af8bca3c
MJ
354 return order_pos;
355}
356
357
ea900239
DB
358
359/* Given a memory reference T, will return the variable at the bottom
073a8998 360 of the access. Unlike get_base_address, this will recurse through
ea900239
DB
361 INDIRECT_REFS. */
362
363tree
364get_base_var (tree t)
365{
b8698a0f 366 while (!SSA_VAR_P (t)
ea900239
DB
367 && (!CONSTANT_CLASS_P (t))
368 && TREE_CODE (t) != LABEL_DECL
369 && TREE_CODE (t) != FUNCTION_DECL
3baf459d
DN
370 && TREE_CODE (t) != CONST_DECL
371 && TREE_CODE (t) != CONSTRUCTOR)
ea900239
DB
372 {
373 t = TREE_OPERAND (t, 0);
374 }
375 return t;
b8698a0f 376}
ea900239 377
1cb1a99f 378
4843f032 379/* SRC and DST are going to be merged. Take SRC's profile and merge it into
b730d1c9
JH
380 DST so it is not going to be lost. Possibly destroy SRC's body on the way
381 unless PRESERVE_BODY is set. */
4843f032
JH
382
383void
384ipa_merge_profiles (struct cgraph_node *dst,
b730d1c9
JH
385 struct cgraph_node *src,
386 bool preserve_body)
4843f032 387{
67348ccc 388 tree oldsrcdecl = src->decl;
4843f032
JH
389 struct function *srccfun, *dstcfun;
390 bool match = true;
391
67348ccc
DM
392 if (!src->definition
393 || !dst->definition)
4843f032
JH
394 return;
395 if (src->frequency < dst->frequency)
396 src->frequency = dst->frequency;
9cec31f4
ML
397
398 /* Time profiles are merged. */
399 if (dst->tp_first_run > src->tp_first_run && src->tp_first_run)
400 dst->tp_first_run = src->tp_first_run;
401
fd29c024
ML
402 if (src->profile_id && !dst->profile_id)
403 dst->profile_id = src->profile_id;
cb90235d 404
3995f3a2
JH
405 /* FIXME when we merge in unknown profile, we ought to set counts as
406 unsafe. */
1bad9c18
JH
407 if (!src->count.initialized_p ()
408 || !(src->count.ipa () == src->count))
4843f032 409 return;
3dafb85c 410 if (symtab->dump_file)
4843f032 411 {
464d0118
ML
412 fprintf (symtab->dump_file, "Merging profiles of %s to %s\n",
413 src->dump_name (), dst->dump_name ());
4843f032 414 }
1bad9c18
JH
415 if (dst->count.initialized_p () && dst->count.ipa () == dst->count)
416 dst->count += src->count.ipa ();
417 else
418 dst->count = src->count.ipa ();
4843f032
JH
419
420 /* This is ugly. We need to get both function bodies into memory.
421 If declaration is merged, we need to duplicate it to be able
422 to load body that is being replaced. This makes symbol table
423 temporarily inconsistent. */
67348ccc 424 if (src->decl == dst->decl)
4843f032 425 {
4843f032
JH
426 struct lto_in_decl_state temp;
427 struct lto_in_decl_state *state;
428
429 /* We are going to move the decl, we want to remove its file decl data.
430 and link these with the new decl. */
67348ccc 431 temp.fn_decl = src->decl;
907dadbd
TS
432 lto_in_decl_state **slot
433 = src->lto_file_data->function_decl_states->find_slot (&temp,
434 NO_INSERT);
435 state = *slot;
436 src->lto_file_data->function_decl_states->clear_slot (slot);
4843f032
JH
437 gcc_assert (state);
438
439 /* Duplicate the decl and be sure it does not link into body of DST. */
67348ccc
DM
440 src->decl = copy_node (src->decl);
441 DECL_STRUCT_FUNCTION (src->decl) = NULL;
442 DECL_ARGUMENTS (src->decl) = NULL;
443 DECL_INITIAL (src->decl) = NULL;
444 DECL_RESULT (src->decl) = NULL;
4843f032
JH
445
446 /* Associate the decl state with new declaration, so LTO streamer
447 can look it up. */
67348ccc 448 state->fn_decl = src->decl;
907dadbd
TS
449 slot
450 = src->lto_file_data->function_decl_states->find_slot (state, INSERT);
4843f032
JH
451 gcc_assert (!*slot);
452 *slot = state;
453 }
e3bde69a
JH
454 src->get_untransformed_body ();
455 dst->get_untransformed_body ();
67348ccc
DM
456 srccfun = DECL_STRUCT_FUNCTION (src->decl);
457 dstcfun = DECL_STRUCT_FUNCTION (dst->decl);
0cae8d31
DM
458 if (n_basic_blocks_for_fn (srccfun)
459 != n_basic_blocks_for_fn (dstcfun))
4843f032 460 {
3dafb85c
ML
461 if (symtab->dump_file)
462 fprintf (symtab->dump_file,
4843f032
JH
463 "Giving up; number of basic block mismatch.\n");
464 match = false;
465 }
3986e690
DM
466 else if (last_basic_block_for_fn (srccfun)
467 != last_basic_block_for_fn (dstcfun))
4843f032 468 {
3dafb85c
ML
469 if (symtab->dump_file)
470 fprintf (symtab->dump_file,
4843f032
JH
471 "Giving up; last block mismatch.\n");
472 match = false;
473 }
474 else
475 {
476 basic_block srcbb, dstbb;
477
478 FOR_ALL_BB_FN (srcbb, srccfun)
479 {
480 unsigned int i;
481
bbd79259 482 dstbb = BASIC_BLOCK_FOR_FN (dstcfun, srcbb->index);
4843f032
JH
483 if (dstbb == NULL)
484 {
3dafb85c
ML
485 if (symtab->dump_file)
486 fprintf (symtab->dump_file,
4843f032
JH
487 "No matching block for bb %i.\n",
488 srcbb->index);
489 match = false;
490 break;
491 }
492 if (EDGE_COUNT (srcbb->succs) != EDGE_COUNT (dstbb->succs))
493 {
3dafb85c
ML
494 if (symtab->dump_file)
495 fprintf (symtab->dump_file,
4843f032
JH
496 "Edge count mistmatch for bb %i.\n",
497 srcbb->index);
498 match = false;
499 break;
500 }
501 for (i = 0; i < EDGE_COUNT (srcbb->succs); i++)
502 {
503 edge srce = EDGE_SUCC (srcbb, i);
504 edge dste = EDGE_SUCC (dstbb, i);
505 if (srce->dest->index != dste->dest->index)
506 {
3dafb85c
ML
507 if (symtab->dump_file)
508 fprintf (symtab->dump_file,
4843f032
JH
509 "Succ edge mistmatch for bb %i.\n",
510 srce->dest->index);
511 match = false;
512 break;
513 }
514 }
515 }
516 }
517 if (match)
518 {
befb1f36 519 struct cgraph_edge *e, *e2;
4843f032
JH
520 basic_block srcbb, dstbb;
521
522 /* TODO: merge also statement histograms. */
523 FOR_ALL_BB_FN (srcbb, srccfun)
524 {
525 unsigned int i;
526
bbd79259 527 dstbb = BASIC_BLOCK_FOR_FN (dstcfun, srcbb->index);
e7a74006
JH
528
529 /* Either sum the profiles if both are IPA and not global0, or
530 pick more informative one (that is nonzero IPA if other is
531 uninitialized, guessed or global0). */
532 if (!dstbb->count.ipa ().initialized_p ()
533 || (dstbb->count.ipa () == profile_count::zero ()
534 && (srcbb->count.ipa ().initialized_p ()
535 && !(srcbb->count.ipa () == profile_count::zero ()))))
4843f032 536 {
ef30ab83
JH
537 dstbb->count = srcbb->count;
538 for (i = 0; i < EDGE_COUNT (srcbb->succs); i++)
539 {
540 edge srce = EDGE_SUCC (srcbb, i);
541 edge dste = EDGE_SUCC (dstbb, i);
542 if (srce->probability.initialized_p ())
543 dste->probability = srce->probability;
544 }
545 }
e7a74006
JH
546 else if (srcbb->count.ipa ().initialized_p ()
547 && !(srcbb->count.ipa () == profile_count::zero ()))
ef30ab83
JH
548 {
549 for (i = 0; i < EDGE_COUNT (srcbb->succs); i++)
550 {
551 edge srce = EDGE_SUCC (srcbb, i);
552 edge dste = EDGE_SUCC (dstbb, i);
553 dste->probability =
554 dste->probability * dstbb->count.probability_in (dstbb->count + srcbb->count)
555 + srce->probability * srcbb->count.probability_in (dstbb->count + srcbb->count);
556 }
557 dstbb->count += srcbb->count;
4843f032
JH
558 }
559 }
560 push_cfun (dstcfun);
fc06ae0d 561 update_max_bb_count ();
4843f032
JH
562 compute_function_frequency ();
563 pop_cfun ();
564 for (e = dst->callees; e; e = e->next_callee)
565 {
befb1f36
JH
566 if (e->speculative)
567 continue;
1bad9c18 568 e->count = gimple_bb (e->call_stmt)->count;
4843f032 569 }
befb1f36
JH
570 for (e = dst->indirect_calls, e2 = src->indirect_calls; e;
571 e2 = (e2 ? e2->next_callee : NULL), e = e->next_callee)
4843f032 572 {
3995f3a2 573 profile_count count = gimple_bb (e->call_stmt)->count;
befb1f36
JH
574 /* When call is speculative, we need to re-distribute probabilities
575 the same way as they was. This is not really correct because
576 in the other copy the speculation may differ; but probably it
577 is not really worth the effort. */
578 if (e->speculative)
579 {
580 cgraph_edge *direct, *indirect;
581 cgraph_edge *direct2 = NULL, *indirect2 = NULL;
582 ipa_ref *ref;
583
584 e->speculative_call_info (direct, indirect, ref);
585 gcc_assert (e == indirect);
586 if (e2 && e2->speculative)
587 e2->speculative_call_info (direct2, indirect2, ref);
3995f3a2
JH
588 if (indirect->count > profile_count::zero ()
589 || direct->count > profile_count::zero ())
befb1f36
JH
590 {
591 /* We should mismatch earlier if there is no matching
592 indirect edge. */
593 if (!e2)
594 {
595 if (dump_file)
596 fprintf (dump_file,
597 "Mismatch in merging indirect edges\n");
598 }
599 else if (!e2->speculative)
600 indirect->count += e2->count;
601 else if (e2->speculative)
602 {
603 if (DECL_ASSEMBLER_NAME (direct2->callee->decl)
604 != DECL_ASSEMBLER_NAME (direct->callee->decl))
605 {
606 if (direct2->count >= direct->count)
607 {
608 direct->redirect_callee (direct2->callee);
609 indirect->count += indirect2->count
610 + direct->count;
611 direct->count = direct2->count;
612 }
613 else
614 indirect->count += indirect2->count + direct2->count;
615 }
616 else
617 {
618 direct->count += direct2->count;
619 indirect->count += indirect2->count;
620 }
621 }
befb1f36
JH
622 }
623 else
624 /* At the moment we should have only profile feedback based
625 speculations when merging. */
626 gcc_unreachable ();
627 }
173148bb 628 else if (e2 && e2->speculative)
befb1f36
JH
629 {
630 cgraph_edge *direct, *indirect;
631 ipa_ref *ref;
632
633 e2->speculative_call_info (direct, indirect, ref);
1bad9c18
JH
634 e->count = count;
635 e->make_speculative (direct->callee, direct->count);
befb1f36
JH
636 }
637 else
1bad9c18 638 e->count = count;
4843f032 639 }
b730d1c9
JH
640 if (!preserve_body)
641 src->release_body ();
0bceb671 642 ipa_update_overall_fn_summary (dst);
4843f032
JH
643 }
644 /* TODO: if there is no match, we can scale up. */
67348ccc 645 src->decl = oldsrcdecl;
4843f032
JH
646}
647
fc11f321
JH
648/* Return true if call to DEST is known to be self-recusive call withing FUNC. */
649
650bool
651recursive_call_p (tree func, tree dest)
652{
d52f5295
ML
653 struct cgraph_node *dest_node = cgraph_node::get_create (dest);
654 struct cgraph_node *cnode = cgraph_node::get_create (func);
acbbac04
JH
655 ipa_ref *alias;
656 enum availability avail;
657
658 gcc_assert (!cnode->alias);
659 if (cnode != dest_node->ultimate_alias_target (&avail))
660 return false;
661 if (avail >= AVAIL_AVAILABLE)
662 return true;
663 if (!dest_node->semantically_equivalent_p (cnode))
664 return false;
665 /* If there is only one way to call the fuction or we know all of them
666 are semantically equivalent, we still can consider call recursive. */
667 FOR_EACH_ALIAS (cnode, alias)
668 if (!dest_node->semantically_equivalent_p (alias->referring))
669 return false;
670 return true;
fc11f321 671}