]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/ipa-utils.c
re PR c++/88636 (ICE: Segmentation fault (in c_tree_chain_next))
[thirdparty/gcc.git] / gcc / ipa-utils.c
CommitLineData
ea900239 1/* Utilities for ipa analysis.
a5544970 2 Copyright (C) 2005-2019 Free Software Foundation, Inc.
ea900239
DB
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9dcd6f09 9Software Foundation; either version 3, or (at your option) any later
ea900239
DB
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
9dcd6f09
NC
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
ea900239
DB
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
c7131fb2
AM
24#include "backend.h"
25#include "tree.h"
26#include "gimple.h"
957060b5
AM
27#include "predict.h"
28#include "alloc-pool.h"
29#include "cgraph.h"
30#include "lto-streamer.h"
7ee2468b 31#include "dumpfile.h"
ea264ca5 32#include "splay-tree.h"
ea900239 33#include "ipa-utils.h"
dd912cb8 34#include "symbol-summary.h"
8bc5448f 35#include "tree-vrp.h"
c582198b 36#include "ipa-prop.h"
27d020cf 37#include "ipa-fnsummary.h"
ea900239
DB
38
39/* Debugging function for postorder and inorder code. NOTE is a string
40 that is printed before the nodes are printed. ORDER is an array of
41 cgraph_nodes that has COUNT useful nodes in it. */
42
b8698a0f 43void
af8bca3c
MJ
44ipa_print_order (FILE* out,
45 const char * note,
46 struct cgraph_node** order,
47 int count)
ea900239
DB
48{
49 int i;
50 fprintf (out, "\n\n ordered call graph: %s\n", note);
b8698a0f 51
ea900239 52 for (i = count - 1; i >= 0; i--)
d52f5295 53 order[i]->dump (out);
ea900239 54 fprintf (out, "\n");
c3284718 55 fflush (out);
ea900239
DB
56}
57
d52f5295 58
ea900239
DB
59struct searchc_env {
60 struct cgraph_node **stack;
ea900239 61 struct cgraph_node **result;
34e82342 62 int stack_size;
ea900239
DB
63 int order_pos;
64 splay_tree nodes_marked_new;
65 bool reduce;
b6156cf2 66 bool allow_overwritable;
ea900239
DB
67 int count;
68};
69
70/* This is an implementation of Tarjan's strongly connected region
71 finder as reprinted in Aho Hopcraft and Ullman's The Design and
72 Analysis of Computer Programs (1975) pages 192-193. This version
73 has been customized for cgraph_nodes. The env parameter is because
74 it is recursive and there are no nested functions here. This
75 function should only be called from itself or
af8bca3c 76 ipa_reduced_postorder. ENV is a stack env and would be
ea900239
DB
77 unnecessary if C had nested functions. V is the node to start
78 searching from. */
79
80static void
2505c5ed
JH
81searchc (struct searchc_env* env, struct cgraph_node *v,
82 bool (*ignore_edge) (struct cgraph_edge *))
ea900239
DB
83{
84 struct cgraph_edge *edge;
67348ccc 85 struct ipa_dfs_info *v_info = (struct ipa_dfs_info *) v->aux;
b8698a0f 86
ea900239 87 /* mark node as old */
c5274326 88 v_info->new_node = false;
4325656f 89 splay_tree_remove (env->nodes_marked_new, v->get_uid ());
b8698a0f 90
ea900239
DB
91 v_info->dfn_number = env->count;
92 v_info->low_link = env->count;
93 env->count++;
94 env->stack[(env->stack_size)++] = v;
95 v_info->on_stack = true;
b8698a0f 96
ea900239
DB
97 for (edge = v->callees; edge; edge = edge->next_callee)
98 {
99 struct ipa_dfs_info * w_info;
fede8efa 100 enum availability avail;
d52f5295 101 struct cgraph_node *w = edge->callee->ultimate_alias_target (&avail);
e2c9111c 102
fede8efa 103 if (!w || (ignore_edge && ignore_edge (edge)))
2505c5ed
JH
104 continue;
105
67348ccc 106 if (w->aux
d52f5295
ML
107 && (avail > AVAIL_INTERPOSABLE
108 || (env->allow_overwritable && avail == AVAIL_INTERPOSABLE)))
ea900239 109 {
67348ccc 110 w_info = (struct ipa_dfs_info *) w->aux;
b8698a0f 111 if (w_info->new_node)
ea900239 112 {
2505c5ed 113 searchc (env, w, ignore_edge);
ea900239
DB
114 v_info->low_link =
115 (v_info->low_link < w_info->low_link) ?
116 v_info->low_link : w_info->low_link;
b8698a0f
L
117 }
118 else
119 if ((w_info->dfn_number < v_info->dfn_number)
120 && (w_info->on_stack))
ea900239
DB
121 v_info->low_link =
122 (w_info->dfn_number < v_info->low_link) ?
123 w_info->dfn_number : v_info->low_link;
124 }
125 }
126
127
b8698a0f 128 if (v_info->low_link == v_info->dfn_number)
ea900239
DB
129 {
130 struct cgraph_node *last = NULL;
131 struct cgraph_node *x;
132 struct ipa_dfs_info *x_info;
133 do {
134 x = env->stack[--(env->stack_size)];
67348ccc 135 x_info = (struct ipa_dfs_info *) x->aux;
ea900239 136 x_info->on_stack = false;
11026b51 137 x_info->scc_no = v_info->dfn_number;
b8698a0f
L
138
139 if (env->reduce)
ea900239
DB
140 {
141 x_info->next_cycle = last;
142 last = x;
b8698a0f
L
143 }
144 else
ea900239 145 env->result[env->order_pos++] = x;
b8698a0f 146 }
ea900239 147 while (v != x);
b8698a0f 148 if (env->reduce)
ea900239
DB
149 env->result[env->order_pos++] = v;
150 }
151}
152
153/* Topsort the call graph by caller relation. Put the result in ORDER.
154
df92c640
SB
155 The REDUCE flag is true if you want the cycles reduced to single nodes.
156 You can use ipa_get_nodes_in_cycle to obtain a vector containing all real
157 call graph nodes in a reduced node.
158
159 Set ALLOW_OVERWRITABLE if nodes with such availability should be included.
af8bca3c
MJ
160 IGNORE_EDGE, if non-NULL is a hook that may make some edges insignificant
161 for the topological sort. */
ea900239
DB
162
163int
af8bca3c
MJ
164ipa_reduced_postorder (struct cgraph_node **order,
165 bool reduce, bool allow_overwritable,
166 bool (*ignore_edge) (struct cgraph_edge *))
ea900239
DB
167{
168 struct cgraph_node *node;
169 struct searchc_env env;
170 splay_tree_node result;
3dafb85c 171 env.stack = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
ea900239
DB
172 env.stack_size = 0;
173 env.result = order;
174 env.order_pos = 0;
175 env.nodes_marked_new = splay_tree_new (splay_tree_compare_ints, 0, 0);
176 env.count = 1;
177 env.reduce = reduce;
b6156cf2 178 env.allow_overwritable = allow_overwritable;
b8698a0f 179
65c70e6b 180 FOR_EACH_DEFINED_FUNCTION (node)
e2c9111c 181 {
d52f5295 182 enum availability avail = node->get_availability ();
e2c9111c 183
d52f5295 184 if (avail > AVAIL_INTERPOSABLE
b8698a0f 185 || (allow_overwritable
d52f5295 186 && (avail == AVAIL_INTERPOSABLE)))
e2c9111c
JH
187 {
188 /* Reuse the info if it is already there. */
67348ccc 189 struct ipa_dfs_info *info = (struct ipa_dfs_info *) node->aux;
e2c9111c
JH
190 if (!info)
191 info = XCNEW (struct ipa_dfs_info);
192 info->new_node = true;
193 info->on_stack = false;
194 info->next_cycle = NULL;
67348ccc 195 node->aux = info;
b8698a0f 196
e2c9111c 197 splay_tree_insert (env.nodes_marked_new,
4325656f 198 (splay_tree_key)node->get_uid (),
e2c9111c 199 (splay_tree_value)node);
b8698a0f
L
200 }
201 else
67348ccc 202 node->aux = NULL;
e2c9111c 203 }
ea900239
DB
204 result = splay_tree_min (env.nodes_marked_new);
205 while (result)
206 {
207 node = (struct cgraph_node *)result->value;
2505c5ed 208 searchc (&env, node, ignore_edge);
ea900239
DB
209 result = splay_tree_min (env.nodes_marked_new);
210 }
211 splay_tree_delete (env.nodes_marked_new);
212 free (env.stack);
213
214 return env.order_pos;
215}
216
af8bca3c
MJ
217/* Deallocate all ipa_dfs_info structures pointed to by the aux pointer of call
218 graph nodes. */
219
220void
221ipa_free_postorder_info (void)
222{
223 struct cgraph_node *node;
65c70e6b 224 FOR_EACH_DEFINED_FUNCTION (node)
af8bca3c
MJ
225 {
226 /* Get rid of the aux information. */
67348ccc 227 if (node->aux)
af8bca3c 228 {
67348ccc
DM
229 free (node->aux);
230 node->aux = NULL;
af8bca3c
MJ
231 }
232 }
233}
234
df92c640
SB
235/* Get the set of nodes for the cycle in the reduced call graph starting
236 from NODE. */
237
d52f5295 238vec<cgraph_node *>
df92c640
SB
239ipa_get_nodes_in_cycle (struct cgraph_node *node)
240{
d52f5295 241 vec<cgraph_node *> v = vNULL;
df92c640
SB
242 struct ipa_dfs_info *node_dfs_info;
243 while (node)
244 {
9771b263 245 v.safe_push (node);
67348ccc 246 node_dfs_info = (struct ipa_dfs_info *) node->aux;
df92c640
SB
247 node = node_dfs_info->next_cycle;
248 }
249 return v;
250}
251
4cb13597
MJ
252/* Return true iff the CS is an edge within a strongly connected component as
253 computed by ipa_reduced_postorder. */
254
255bool
256ipa_edge_within_scc (struct cgraph_edge *cs)
257{
67348ccc 258 struct ipa_dfs_info *caller_dfs = (struct ipa_dfs_info *) cs->caller->aux;
4cb13597 259 struct ipa_dfs_info *callee_dfs;
d52f5295 260 struct cgraph_node *callee = cs->callee->function_symbol ();
4cb13597 261
67348ccc 262 callee_dfs = (struct ipa_dfs_info *) callee->aux;
4cb13597
MJ
263 return (caller_dfs
264 && callee_dfs
265 && caller_dfs->scc_no == callee_dfs->scc_no);
266}
267
8775a18b
JH
268struct postorder_stack
269{
270 struct cgraph_node *node;
271 struct cgraph_edge *edge;
272 int ref;
273};
274
af8bca3c 275/* Fill array order with all nodes with output flag set in the reverse
39e2db00
JH
276 topological order. Return the number of elements in the array.
277 FIXME: While walking, consider aliases, too. */
af8bca3c
MJ
278
279int
280ipa_reverse_postorder (struct cgraph_node **order)
281{
282 struct cgraph_node *node, *node2;
283 int stack_size = 0;
284 int order_pos = 0;
8775a18b 285 struct cgraph_edge *edge;
af8bca3c 286 int pass;
d122681a 287 struct ipa_ref *ref = NULL;
af8bca3c 288
8775a18b 289 struct postorder_stack *stack =
3dafb85c 290 XCNEWVEC (struct postorder_stack, symtab->cgraph_count);
af8bca3c
MJ
291
292 /* We have to deal with cycles nicely, so use a depth first traversal
293 output algorithm. Ignore the fact that some functions won't need
294 to be output and put them into order as well, so we get dependencies
295 right through inline functions. */
65c70e6b 296 FOR_EACH_FUNCTION (node)
67348ccc 297 node->aux = NULL;
af8bca3c 298 for (pass = 0; pass < 2; pass++)
65c70e6b 299 FOR_EACH_FUNCTION (node)
67348ccc 300 if (!node->aux
af8bca3c 301 && (pass
67348ccc 302 || (!node->address_taken
af8bca3c 303 && !node->global.inlined_to
67348ccc 304 && !node->alias && !node->thunk.thunk_p
d52f5295 305 && !node->only_called_directly_p ())))
af8bca3c 306 {
8775a18b
JH
307 stack_size = 0;
308 stack[stack_size].node = node;
309 stack[stack_size].edge = node->callers;
310 stack[stack_size].ref = 0;
67348ccc 311 node->aux = (void *)(size_t)1;
8775a18b 312 while (stack_size >= 0)
af8bca3c 313 {
8775a18b 314 while (true)
af8bca3c 315 {
8775a18b
JH
316 node2 = NULL;
317 while (stack[stack_size].edge && !node2)
af8bca3c 318 {
8775a18b 319 edge = stack[stack_size].edge;
af8bca3c 320 node2 = edge->caller;
8775a18b
JH
321 stack[stack_size].edge = edge->next_caller;
322 /* Break possible cycles involving always-inline
323 functions by ignoring edges from always-inline
324 functions to non-always-inline functions. */
67348ccc 325 if (DECL_DISREGARD_INLINE_LIMITS (edge->caller->decl)
8775a18b 326 && !DECL_DISREGARD_INLINE_LIMITS
d52f5295 327 (edge->callee->function_symbol ()->decl))
8775a18b
JH
328 node2 = NULL;
329 }
d122681a 330 for (; stack[stack_size].node->iterate_referring (
8775a18b
JH
331 stack[stack_size].ref,
332 ref) && !node2;
333 stack[stack_size].ref++)
334 {
335 if (ref->use == IPA_REF_ALIAS)
d122681a 336 node2 = dyn_cast <cgraph_node *> (ref->referring);
8775a18b
JH
337 }
338 if (!node2)
339 break;
67348ccc 340 if (!node2->aux)
8775a18b
JH
341 {
342 stack[++stack_size].node = node2;
343 stack[stack_size].edge = node2->callers;
344 stack[stack_size].ref = 0;
67348ccc 345 node2->aux = (void *)(size_t)1;
af8bca3c
MJ
346 }
347 }
8775a18b 348 order[order_pos++] = stack[stack_size--].node;
af8bca3c
MJ
349 }
350 }
351 free (stack);
65c70e6b 352 FOR_EACH_FUNCTION (node)
67348ccc 353 node->aux = NULL;
af8bca3c
MJ
354 return order_pos;
355}
356
357
ea900239
DB
358
359/* Given a memory reference T, will return the variable at the bottom
073a8998 360 of the access. Unlike get_base_address, this will recurse through
ea900239
DB
361 INDIRECT_REFS. */
362
363tree
364get_base_var (tree t)
365{
b8698a0f 366 while (!SSA_VAR_P (t)
ea900239
DB
367 && (!CONSTANT_CLASS_P (t))
368 && TREE_CODE (t) != LABEL_DECL
369 && TREE_CODE (t) != FUNCTION_DECL
3baf459d
DN
370 && TREE_CODE (t) != CONST_DECL
371 && TREE_CODE (t) != CONSTRUCTOR)
ea900239
DB
372 {
373 t = TREE_OPERAND (t, 0);
374 }
375 return t;
b8698a0f 376}
ea900239 377
1cb1a99f 378
4843f032 379/* SRC and DST are going to be merged. Take SRC's profile and merge it into
b730d1c9
JH
380 DST so it is not going to be lost. Possibly destroy SRC's body on the way
381 unless PRESERVE_BODY is set. */
4843f032
JH
382
383void
384ipa_merge_profiles (struct cgraph_node *dst,
b730d1c9
JH
385 struct cgraph_node *src,
386 bool preserve_body)
4843f032 387{
67348ccc 388 tree oldsrcdecl = src->decl;
4843f032
JH
389 struct function *srccfun, *dstcfun;
390 bool match = true;
391
67348ccc
DM
392 if (!src->definition
393 || !dst->definition)
4843f032 394 return;
959b8c82 395
4843f032
JH
396 if (src->frequency < dst->frequency)
397 src->frequency = dst->frequency;
9cec31f4
ML
398
399 /* Time profiles are merged. */
400 if (dst->tp_first_run > src->tp_first_run && src->tp_first_run)
401 dst->tp_first_run = src->tp_first_run;
402
fd29c024
ML
403 if (src->profile_id && !dst->profile_id)
404 dst->profile_id = src->profile_id;
cb90235d 405
6263c29d
JH
406 /* Merging zero profile to dst is no-op. */
407 if (src->count.ipa () == profile_count::zero ())
408 return;
409
3995f3a2
JH
410 /* FIXME when we merge in unknown profile, we ought to set counts as
411 unsafe. */
1bad9c18
JH
412 if (!src->count.initialized_p ()
413 || !(src->count.ipa () == src->count))
4843f032 414 return;
3dafb85c 415 if (symtab->dump_file)
4843f032 416 {
464d0118
ML
417 fprintf (symtab->dump_file, "Merging profiles of %s to %s\n",
418 src->dump_name (), dst->dump_name ());
4843f032 419 }
959b8c82
JH
420 profile_count orig_count = dst->count;
421
1bad9c18
JH
422 if (dst->count.initialized_p () && dst->count.ipa () == dst->count)
423 dst->count += src->count.ipa ();
424 else
425 dst->count = src->count.ipa ();
4843f032
JH
426
427 /* This is ugly. We need to get both function bodies into memory.
428 If declaration is merged, we need to duplicate it to be able
429 to load body that is being replaced. This makes symbol table
430 temporarily inconsistent. */
67348ccc 431 if (src->decl == dst->decl)
4843f032 432 {
4843f032
JH
433 struct lto_in_decl_state temp;
434 struct lto_in_decl_state *state;
435
436 /* We are going to move the decl, we want to remove its file decl data.
437 and link these with the new decl. */
67348ccc 438 temp.fn_decl = src->decl;
907dadbd
TS
439 lto_in_decl_state **slot
440 = src->lto_file_data->function_decl_states->find_slot (&temp,
441 NO_INSERT);
442 state = *slot;
443 src->lto_file_data->function_decl_states->clear_slot (slot);
4843f032
JH
444 gcc_assert (state);
445
446 /* Duplicate the decl and be sure it does not link into body of DST. */
67348ccc
DM
447 src->decl = copy_node (src->decl);
448 DECL_STRUCT_FUNCTION (src->decl) = NULL;
449 DECL_ARGUMENTS (src->decl) = NULL;
450 DECL_INITIAL (src->decl) = NULL;
451 DECL_RESULT (src->decl) = NULL;
4843f032
JH
452
453 /* Associate the decl state with new declaration, so LTO streamer
454 can look it up. */
67348ccc 455 state->fn_decl = src->decl;
907dadbd
TS
456 slot
457 = src->lto_file_data->function_decl_states->find_slot (state, INSERT);
4843f032
JH
458 gcc_assert (!*slot);
459 *slot = state;
460 }
e3bde69a
JH
461 src->get_untransformed_body ();
462 dst->get_untransformed_body ();
67348ccc
DM
463 srccfun = DECL_STRUCT_FUNCTION (src->decl);
464 dstcfun = DECL_STRUCT_FUNCTION (dst->decl);
0cae8d31
DM
465 if (n_basic_blocks_for_fn (srccfun)
466 != n_basic_blocks_for_fn (dstcfun))
4843f032 467 {
3dafb85c
ML
468 if (symtab->dump_file)
469 fprintf (symtab->dump_file,
4843f032
JH
470 "Giving up; number of basic block mismatch.\n");
471 match = false;
472 }
3986e690
DM
473 else if (last_basic_block_for_fn (srccfun)
474 != last_basic_block_for_fn (dstcfun))
4843f032 475 {
3dafb85c
ML
476 if (symtab->dump_file)
477 fprintf (symtab->dump_file,
4843f032
JH
478 "Giving up; last block mismatch.\n");
479 match = false;
480 }
481 else
482 {
483 basic_block srcbb, dstbb;
484
485 FOR_ALL_BB_FN (srcbb, srccfun)
486 {
487 unsigned int i;
488
bbd79259 489 dstbb = BASIC_BLOCK_FOR_FN (dstcfun, srcbb->index);
4843f032
JH
490 if (dstbb == NULL)
491 {
3dafb85c
ML
492 if (symtab->dump_file)
493 fprintf (symtab->dump_file,
4843f032
JH
494 "No matching block for bb %i.\n",
495 srcbb->index);
496 match = false;
497 break;
498 }
499 if (EDGE_COUNT (srcbb->succs) != EDGE_COUNT (dstbb->succs))
500 {
3dafb85c
ML
501 if (symtab->dump_file)
502 fprintf (symtab->dump_file,
4843f032
JH
503 "Edge count mistmatch for bb %i.\n",
504 srcbb->index);
505 match = false;
506 break;
507 }
508 for (i = 0; i < EDGE_COUNT (srcbb->succs); i++)
509 {
510 edge srce = EDGE_SUCC (srcbb, i);
511 edge dste = EDGE_SUCC (dstbb, i);
512 if (srce->dest->index != dste->dest->index)
513 {
3dafb85c
ML
514 if (symtab->dump_file)
515 fprintf (symtab->dump_file,
4843f032
JH
516 "Succ edge mistmatch for bb %i.\n",
517 srce->dest->index);
518 match = false;
519 break;
520 }
521 }
522 }
523 }
524 if (match)
525 {
befb1f36 526 struct cgraph_edge *e, *e2;
4843f032
JH
527 basic_block srcbb, dstbb;
528
529 /* TODO: merge also statement histograms. */
530 FOR_ALL_BB_FN (srcbb, srccfun)
531 {
532 unsigned int i;
533
bbd79259 534 dstbb = BASIC_BLOCK_FOR_FN (dstcfun, srcbb->index);
e7a74006
JH
535
536 /* Either sum the profiles if both are IPA and not global0, or
537 pick more informative one (that is nonzero IPA if other is
538 uninitialized, guessed or global0). */
539 if (!dstbb->count.ipa ().initialized_p ()
540 || (dstbb->count.ipa () == profile_count::zero ()
541 && (srcbb->count.ipa ().initialized_p ()
542 && !(srcbb->count.ipa () == profile_count::zero ()))))
4843f032 543 {
ef30ab83
JH
544 dstbb->count = srcbb->count;
545 for (i = 0; i < EDGE_COUNT (srcbb->succs); i++)
546 {
547 edge srce = EDGE_SUCC (srcbb, i);
548 edge dste = EDGE_SUCC (dstbb, i);
549 if (srce->probability.initialized_p ())
550 dste->probability = srce->probability;
551 }
552 }
e7a74006
JH
553 else if (srcbb->count.ipa ().initialized_p ()
554 && !(srcbb->count.ipa () == profile_count::zero ()))
ef30ab83
JH
555 {
556 for (i = 0; i < EDGE_COUNT (srcbb->succs); i++)
557 {
558 edge srce = EDGE_SUCC (srcbb, i);
559 edge dste = EDGE_SUCC (dstbb, i);
560 dste->probability =
561 dste->probability * dstbb->count.probability_in (dstbb->count + srcbb->count)
562 + srce->probability * srcbb->count.probability_in (dstbb->count + srcbb->count);
563 }
564 dstbb->count += srcbb->count;
4843f032
JH
565 }
566 }
567 push_cfun (dstcfun);
fc06ae0d 568 update_max_bb_count ();
4843f032
JH
569 compute_function_frequency ();
570 pop_cfun ();
571 for (e = dst->callees; e; e = e->next_callee)
572 {
befb1f36
JH
573 if (e->speculative)
574 continue;
1bad9c18 575 e->count = gimple_bb (e->call_stmt)->count;
4843f032 576 }
befb1f36
JH
577 for (e = dst->indirect_calls, e2 = src->indirect_calls; e;
578 e2 = (e2 ? e2->next_callee : NULL), e = e->next_callee)
4843f032 579 {
3995f3a2 580 profile_count count = gimple_bb (e->call_stmt)->count;
befb1f36
JH
581 /* When call is speculative, we need to re-distribute probabilities
582 the same way as they was. This is not really correct because
583 in the other copy the speculation may differ; but probably it
584 is not really worth the effort. */
585 if (e->speculative)
586 {
587 cgraph_edge *direct, *indirect;
588 cgraph_edge *direct2 = NULL, *indirect2 = NULL;
589 ipa_ref *ref;
590
591 e->speculative_call_info (direct, indirect, ref);
592 gcc_assert (e == indirect);
593 if (e2 && e2->speculative)
594 e2->speculative_call_info (direct2, indirect2, ref);
3995f3a2
JH
595 if (indirect->count > profile_count::zero ()
596 || direct->count > profile_count::zero ())
befb1f36
JH
597 {
598 /* We should mismatch earlier if there is no matching
599 indirect edge. */
600 if (!e2)
601 {
602 if (dump_file)
603 fprintf (dump_file,
604 "Mismatch in merging indirect edges\n");
605 }
606 else if (!e2->speculative)
607 indirect->count += e2->count;
608 else if (e2->speculative)
609 {
610 if (DECL_ASSEMBLER_NAME (direct2->callee->decl)
611 != DECL_ASSEMBLER_NAME (direct->callee->decl))
612 {
613 if (direct2->count >= direct->count)
614 {
615 direct->redirect_callee (direct2->callee);
616 indirect->count += indirect2->count
617 + direct->count;
618 direct->count = direct2->count;
619 }
620 else
621 indirect->count += indirect2->count + direct2->count;
622 }
623 else
624 {
625 direct->count += direct2->count;
626 indirect->count += indirect2->count;
627 }
628 }
befb1f36
JH
629 }
630 else
631 /* At the moment we should have only profile feedback based
632 speculations when merging. */
633 gcc_unreachable ();
634 }
173148bb 635 else if (e2 && e2->speculative)
befb1f36
JH
636 {
637 cgraph_edge *direct, *indirect;
638 ipa_ref *ref;
639
640 e2->speculative_call_info (direct, indirect, ref);
1bad9c18
JH
641 e->count = count;
642 e->make_speculative (direct->callee, direct->count);
befb1f36
JH
643 }
644 else
1bad9c18 645 e->count = count;
4843f032 646 }
b730d1c9
JH
647 if (!preserve_body)
648 src->release_body ();
61e8dc4b 649 /* Update summary. */
959b8c82
JH
650 compute_fn_summary (dst, 0);
651 }
652 /* We can't update CFG profile, but we can scale IPA profile. CFG
653 will be scaled according to dst->count after IPA passes. */
654 else
655 {
656 profile_count to = dst->count;
657 profile_count::adjust_for_ipa_scaling (&to, &orig_count);
658 struct cgraph_edge *e;
659
660 for (e = dst->callees; e; e = e->next_callee)
661 e->count = e->count.apply_scale (to, orig_count);
662 for (e = dst->indirect_calls; e; e = e->next_callee)
663 e->count = e->count.apply_scale (to, orig_count);
4843f032 664 }
67348ccc 665 src->decl = oldsrcdecl;
4843f032
JH
666}
667
fc11f321
JH
668/* Return true if call to DEST is known to be self-recusive call withing FUNC. */
669
670bool
671recursive_call_p (tree func, tree dest)
672{
d52f5295
ML
673 struct cgraph_node *dest_node = cgraph_node::get_create (dest);
674 struct cgraph_node *cnode = cgraph_node::get_create (func);
acbbac04
JH
675 ipa_ref *alias;
676 enum availability avail;
677
678 gcc_assert (!cnode->alias);
679 if (cnode != dest_node->ultimate_alias_target (&avail))
680 return false;
681 if (avail >= AVAIL_AVAILABLE)
682 return true;
683 if (!dest_node->semantically_equivalent_p (cnode))
684 return false;
685 /* If there is only one way to call the fuction or we know all of them
686 are semantically equivalent, we still can consider call recursive. */
687 FOR_EACH_ALIAS (cnode, alias)
688 if (!dest_node->semantically_equivalent_p (alias->referring))
689 return false;
690 return true;
fc11f321 691}