]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/ipa-utils.c
configure.ac: Check for support of __atomic extensions.
[thirdparty/gcc.git] / gcc / ipa-utils.c
CommitLineData
ea900239 1/* Utilities for ipa analysis.
d1e082c2 2 Copyright (C) 2005-2013 Free Software Foundation, Inc.
ea900239
DB
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9dcd6f09 9Software Foundation; either version 3, or (at your option) any later
ea900239
DB
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
9dcd6f09
NC
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
ea900239
DB
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
24#include "tm.h"
25#include "tree.h"
8e9055ae 26#include "gimple.h"
ea900239 27#include "tree-inline.h"
7ee2468b 28#include "dumpfile.h"
ea900239
DB
29#include "langhooks.h"
30#include "pointer-set.h"
ea264ca5 31#include "splay-tree.h"
ea900239
DB
32#include "ggc.h"
33#include "ipa-utils.h"
34#include "ipa-reference.h"
ea900239 35#include "flags.h"
ea900239
DB
36#include "diagnostic.h"
37#include "langhooks.h"
4843f032
JH
38#include "lto-streamer.h"
39#include "ipa-inline.h"
ea900239
DB
40
41/* Debugging function for postorder and inorder code. NOTE is a string
42 that is printed before the nodes are printed. ORDER is an array of
43 cgraph_nodes that has COUNT useful nodes in it. */
44
b8698a0f 45void
af8bca3c
MJ
46ipa_print_order (FILE* out,
47 const char * note,
48 struct cgraph_node** order,
49 int count)
ea900239
DB
50{
51 int i;
52 fprintf (out, "\n\n ordered call graph: %s\n", note);
b8698a0f 53
ea900239 54 for (i = count - 1; i >= 0; i--)
c3284718 55 dump_cgraph_node (dump_file, order[i]);
ea900239 56 fprintf (out, "\n");
c3284718 57 fflush (out);
ea900239
DB
58}
59
60\f
61struct searchc_env {
62 struct cgraph_node **stack;
63 int stack_size;
64 struct cgraph_node **result;
65 int order_pos;
66 splay_tree nodes_marked_new;
67 bool reduce;
b6156cf2 68 bool allow_overwritable;
ea900239
DB
69 int count;
70};
71
72/* This is an implementation of Tarjan's strongly connected region
73 finder as reprinted in Aho Hopcraft and Ullman's The Design and
74 Analysis of Computer Programs (1975) pages 192-193. This version
75 has been customized for cgraph_nodes. The env parameter is because
76 it is recursive and there are no nested functions here. This
77 function should only be called from itself or
af8bca3c 78 ipa_reduced_postorder. ENV is a stack env and would be
ea900239
DB
79 unnecessary if C had nested functions. V is the node to start
80 searching from. */
81
82static void
2505c5ed
JH
83searchc (struct searchc_env* env, struct cgraph_node *v,
84 bool (*ignore_edge) (struct cgraph_edge *))
ea900239
DB
85{
86 struct cgraph_edge *edge;
67348ccc 87 struct ipa_dfs_info *v_info = (struct ipa_dfs_info *) v->aux;
b8698a0f 88
ea900239 89 /* mark node as old */
c5274326 90 v_info->new_node = false;
ea900239 91 splay_tree_remove (env->nodes_marked_new, v->uid);
b8698a0f 92
ea900239
DB
93 v_info->dfn_number = env->count;
94 v_info->low_link = env->count;
95 env->count++;
96 env->stack[(env->stack_size)++] = v;
97 v_info->on_stack = true;
b8698a0f 98
ea900239
DB
99 for (edge = v->callees; edge; edge = edge->next_callee)
100 {
101 struct ipa_dfs_info * w_info;
fede8efa
JH
102 enum availability avail;
103 struct cgraph_node *w = cgraph_function_or_thunk_node (edge->callee, &avail);
e2c9111c 104
fede8efa 105 if (!w || (ignore_edge && ignore_edge (edge)))
2505c5ed
JH
106 continue;
107
67348ccc 108 if (w->aux
b6156cf2
MJ
109 && (avail > AVAIL_OVERWRITABLE
110 || (env->allow_overwritable && avail == AVAIL_OVERWRITABLE)))
ea900239 111 {
67348ccc 112 w_info = (struct ipa_dfs_info *) w->aux;
b8698a0f 113 if (w_info->new_node)
ea900239 114 {
2505c5ed 115 searchc (env, w, ignore_edge);
ea900239
DB
116 v_info->low_link =
117 (v_info->low_link < w_info->low_link) ?
118 v_info->low_link : w_info->low_link;
b8698a0f
L
119 }
120 else
121 if ((w_info->dfn_number < v_info->dfn_number)
122 && (w_info->on_stack))
ea900239
DB
123 v_info->low_link =
124 (w_info->dfn_number < v_info->low_link) ?
125 w_info->dfn_number : v_info->low_link;
126 }
127 }
128
129
b8698a0f 130 if (v_info->low_link == v_info->dfn_number)
ea900239
DB
131 {
132 struct cgraph_node *last = NULL;
133 struct cgraph_node *x;
134 struct ipa_dfs_info *x_info;
135 do {
136 x = env->stack[--(env->stack_size)];
67348ccc 137 x_info = (struct ipa_dfs_info *) x->aux;
ea900239 138 x_info->on_stack = false;
11026b51 139 x_info->scc_no = v_info->dfn_number;
b8698a0f
L
140
141 if (env->reduce)
ea900239
DB
142 {
143 x_info->next_cycle = last;
144 last = x;
b8698a0f
L
145 }
146 else
ea900239 147 env->result[env->order_pos++] = x;
b8698a0f 148 }
ea900239 149 while (v != x);
b8698a0f 150 if (env->reduce)
ea900239
DB
151 env->result[env->order_pos++] = v;
152 }
153}
154
155/* Topsort the call graph by caller relation. Put the result in ORDER.
156
df92c640
SB
157 The REDUCE flag is true if you want the cycles reduced to single nodes.
158 You can use ipa_get_nodes_in_cycle to obtain a vector containing all real
159 call graph nodes in a reduced node.
160
161 Set ALLOW_OVERWRITABLE if nodes with such availability should be included.
af8bca3c
MJ
162 IGNORE_EDGE, if non-NULL is a hook that may make some edges insignificant
163 for the topological sort. */
ea900239
DB
164
165int
af8bca3c
MJ
166ipa_reduced_postorder (struct cgraph_node **order,
167 bool reduce, bool allow_overwritable,
168 bool (*ignore_edge) (struct cgraph_edge *))
ea900239
DB
169{
170 struct cgraph_node *node;
171 struct searchc_env env;
172 splay_tree_node result;
5ed6ace5 173 env.stack = XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
ea900239
DB
174 env.stack_size = 0;
175 env.result = order;
176 env.order_pos = 0;
177 env.nodes_marked_new = splay_tree_new (splay_tree_compare_ints, 0, 0);
178 env.count = 1;
179 env.reduce = reduce;
b6156cf2 180 env.allow_overwritable = allow_overwritable;
b8698a0f 181
65c70e6b 182 FOR_EACH_DEFINED_FUNCTION (node)
e2c9111c
JH
183 {
184 enum availability avail = cgraph_function_body_availability (node);
185
186 if (avail > AVAIL_OVERWRITABLE
b8698a0f 187 || (allow_overwritable
e2c9111c
JH
188 && (avail == AVAIL_OVERWRITABLE)))
189 {
190 /* Reuse the info if it is already there. */
67348ccc 191 struct ipa_dfs_info *info = (struct ipa_dfs_info *) node->aux;
e2c9111c
JH
192 if (!info)
193 info = XCNEW (struct ipa_dfs_info);
194 info->new_node = true;
195 info->on_stack = false;
196 info->next_cycle = NULL;
67348ccc 197 node->aux = info;
b8698a0f 198
e2c9111c 199 splay_tree_insert (env.nodes_marked_new,
b8698a0f 200 (splay_tree_key)node->uid,
e2c9111c 201 (splay_tree_value)node);
b8698a0f
L
202 }
203 else
67348ccc 204 node->aux = NULL;
e2c9111c 205 }
ea900239
DB
206 result = splay_tree_min (env.nodes_marked_new);
207 while (result)
208 {
209 node = (struct cgraph_node *)result->value;
2505c5ed 210 searchc (&env, node, ignore_edge);
ea900239
DB
211 result = splay_tree_min (env.nodes_marked_new);
212 }
213 splay_tree_delete (env.nodes_marked_new);
214 free (env.stack);
215
216 return env.order_pos;
217}
218
af8bca3c
MJ
219/* Deallocate all ipa_dfs_info structures pointed to by the aux pointer of call
220 graph nodes. */
221
222void
223ipa_free_postorder_info (void)
224{
225 struct cgraph_node *node;
65c70e6b 226 FOR_EACH_DEFINED_FUNCTION (node)
af8bca3c
MJ
227 {
228 /* Get rid of the aux information. */
67348ccc 229 if (node->aux)
af8bca3c 230 {
67348ccc
DM
231 free (node->aux);
232 node->aux = NULL;
af8bca3c
MJ
233 }
234 }
235}
236
df92c640
SB
237/* Get the set of nodes for the cycle in the reduced call graph starting
238 from NODE. */
239
9771b263 240vec<cgraph_node_ptr>
df92c640
SB
241ipa_get_nodes_in_cycle (struct cgraph_node *node)
242{
6e1aa848 243 vec<cgraph_node_ptr> v = vNULL;
df92c640
SB
244 struct ipa_dfs_info *node_dfs_info;
245 while (node)
246 {
9771b263 247 v.safe_push (node);
67348ccc 248 node_dfs_info = (struct ipa_dfs_info *) node->aux;
df92c640
SB
249 node = node_dfs_info->next_cycle;
250 }
251 return v;
252}
253
4cb13597
MJ
254/* Return true iff the CS is an edge within a strongly connected component as
255 computed by ipa_reduced_postorder. */
256
257bool
258ipa_edge_within_scc (struct cgraph_edge *cs)
259{
67348ccc 260 struct ipa_dfs_info *caller_dfs = (struct ipa_dfs_info *) cs->caller->aux;
4cb13597
MJ
261 struct ipa_dfs_info *callee_dfs;
262 struct cgraph_node *callee = cgraph_function_node (cs->callee, NULL);
263
67348ccc 264 callee_dfs = (struct ipa_dfs_info *) callee->aux;
4cb13597
MJ
265 return (caller_dfs
266 && callee_dfs
267 && caller_dfs->scc_no == callee_dfs->scc_no);
268}
269
8775a18b
JH
270struct postorder_stack
271{
272 struct cgraph_node *node;
273 struct cgraph_edge *edge;
274 int ref;
275};
276
af8bca3c 277/* Fill array order with all nodes with output flag set in the reverse
39e2db00
JH
278 topological order. Return the number of elements in the array.
279 FIXME: While walking, consider aliases, too. */
af8bca3c
MJ
280
281int
282ipa_reverse_postorder (struct cgraph_node **order)
283{
284 struct cgraph_node *node, *node2;
285 int stack_size = 0;
286 int order_pos = 0;
8775a18b 287 struct cgraph_edge *edge;
af8bca3c 288 int pass;
8775a18b 289 struct ipa_ref *ref;
af8bca3c 290
8775a18b
JH
291 struct postorder_stack *stack =
292 XCNEWVEC (struct postorder_stack, cgraph_n_nodes);
af8bca3c
MJ
293
294 /* We have to deal with cycles nicely, so use a depth first traversal
295 output algorithm. Ignore the fact that some functions won't need
296 to be output and put them into order as well, so we get dependencies
297 right through inline functions. */
65c70e6b 298 FOR_EACH_FUNCTION (node)
67348ccc 299 node->aux = NULL;
af8bca3c 300 for (pass = 0; pass < 2; pass++)
65c70e6b 301 FOR_EACH_FUNCTION (node)
67348ccc 302 if (!node->aux
af8bca3c 303 && (pass
67348ccc 304 || (!node->address_taken
af8bca3c 305 && !node->global.inlined_to
67348ccc 306 && !node->alias && !node->thunk.thunk_p
8775a18b 307 && !cgraph_only_called_directly_p (node))))
af8bca3c 308 {
8775a18b
JH
309 stack_size = 0;
310 stack[stack_size].node = node;
311 stack[stack_size].edge = node->callers;
312 stack[stack_size].ref = 0;
67348ccc 313 node->aux = (void *)(size_t)1;
8775a18b 314 while (stack_size >= 0)
af8bca3c 315 {
8775a18b 316 while (true)
af8bca3c 317 {
8775a18b
JH
318 node2 = NULL;
319 while (stack[stack_size].edge && !node2)
af8bca3c 320 {
8775a18b 321 edge = stack[stack_size].edge;
af8bca3c 322 node2 = edge->caller;
8775a18b
JH
323 stack[stack_size].edge = edge->next_caller;
324 /* Break possible cycles involving always-inline
325 functions by ignoring edges from always-inline
326 functions to non-always-inline functions. */
67348ccc 327 if (DECL_DISREGARD_INLINE_LIMITS (edge->caller->decl)
8775a18b 328 && !DECL_DISREGARD_INLINE_LIMITS
67348ccc 329 (cgraph_function_node (edge->callee, NULL)->decl))
8775a18b
JH
330 node2 = NULL;
331 }
67348ccc 332 for (;ipa_ref_list_referring_iterate (&stack[stack_size].node->ref_list,
8775a18b
JH
333 stack[stack_size].ref,
334 ref) && !node2;
335 stack[stack_size].ref++)
336 {
337 if (ref->use == IPA_REF_ALIAS)
5932a4d4 338 node2 = ipa_ref_referring_node (ref);
8775a18b
JH
339 }
340 if (!node2)
341 break;
67348ccc 342 if (!node2->aux)
8775a18b
JH
343 {
344 stack[++stack_size].node = node2;
345 stack[stack_size].edge = node2->callers;
346 stack[stack_size].ref = 0;
67348ccc 347 node2->aux = (void *)(size_t)1;
af8bca3c
MJ
348 }
349 }
8775a18b 350 order[order_pos++] = stack[stack_size--].node;
af8bca3c
MJ
351 }
352 }
353 free (stack);
65c70e6b 354 FOR_EACH_FUNCTION (node)
67348ccc 355 node->aux = NULL;
af8bca3c
MJ
356 return order_pos;
357}
358
359
ea900239
DB
360
361/* Given a memory reference T, will return the variable at the bottom
073a8998 362 of the access. Unlike get_base_address, this will recurse through
ea900239
DB
363 INDIRECT_REFS. */
364
365tree
366get_base_var (tree t)
367{
b8698a0f 368 while (!SSA_VAR_P (t)
ea900239
DB
369 && (!CONSTANT_CLASS_P (t))
370 && TREE_CODE (t) != LABEL_DECL
371 && TREE_CODE (t) != FUNCTION_DECL
3baf459d
DN
372 && TREE_CODE (t) != CONST_DECL
373 && TREE_CODE (t) != CONSTRUCTOR)
ea900239
DB
374 {
375 t = TREE_OPERAND (t, 0);
376 }
377 return t;
b8698a0f 378}
ea900239 379
1cb1a99f
JH
380
381/* Create a new cgraph node set. */
382
383cgraph_node_set
384cgraph_node_set_new (void)
385{
386 cgraph_node_set new_node_set;
387
388 new_node_set = XCNEW (struct cgraph_node_set_def);
389 new_node_set->map = pointer_map_create ();
9771b263 390 new_node_set->nodes.create (0);
1cb1a99f
JH
391 return new_node_set;
392}
393
394
395/* Add cgraph_node NODE to cgraph_node_set SET. */
396
397void
398cgraph_node_set_add (cgraph_node_set set, struct cgraph_node *node)
399{
400 void **slot;
401
402 slot = pointer_map_insert (set->map, node);
403
404 if (*slot)
405 {
406 int index = (size_t) *slot - 1;
9771b263 407 gcc_checking_assert ((set->nodes[index]
1cb1a99f
JH
408 == node));
409 return;
410 }
411
9771b263 412 *slot = (void *)(size_t) (set->nodes.length () + 1);
1cb1a99f
JH
413
414 /* Insert into node vector. */
9771b263 415 set->nodes.safe_push (node);
1cb1a99f
JH
416}
417
418
419/* Remove cgraph_node NODE from cgraph_node_set SET. */
420
421void
422cgraph_node_set_remove (cgraph_node_set set, struct cgraph_node *node)
423{
424 void **slot, **last_slot;
425 int index;
426 struct cgraph_node *last_node;
427
428 slot = pointer_map_contains (set->map, node);
429 if (slot == NULL || !*slot)
430 return;
431
432 index = (size_t) *slot - 1;
9771b263 433 gcc_checking_assert (set->nodes[index]
1cb1a99f
JH
434 == node);
435
436 /* Remove from vector. We do this by swapping node with the last element
437 of the vector. */
9771b263 438 last_node = set->nodes.pop ();
1cb1a99f
JH
439 if (last_node != node)
440 {
441 last_slot = pointer_map_contains (set->map, last_node);
442 gcc_checking_assert (last_slot && *last_slot);
443 *last_slot = (void *)(size_t) (index + 1);
444
445 /* Move the last element to the original spot of NODE. */
9771b263 446 set->nodes[index] = last_node;
1cb1a99f
JH
447 }
448
449 /* Remove element from hash table. */
450 *slot = NULL;
451}
452
453
454/* Find NODE in SET and return an iterator to it if found. A null iterator
455 is returned if NODE is not in SET. */
456
457cgraph_node_set_iterator
458cgraph_node_set_find (cgraph_node_set set, struct cgraph_node *node)
459{
460 void **slot;
461 cgraph_node_set_iterator csi;
462
463 slot = pointer_map_contains (set->map, node);
464 if (slot == NULL || !*slot)
465 csi.index = (unsigned) ~0;
466 else
467 csi.index = (size_t)*slot - 1;
468 csi.set = set;
469
470 return csi;
471}
472
473
474/* Dump content of SET to file F. */
475
476void
477dump_cgraph_node_set (FILE *f, cgraph_node_set set)
478{
479 cgraph_node_set_iterator iter;
480
481 for (iter = csi_start (set); !csi_end_p (iter); csi_next (&iter))
482 {
483 struct cgraph_node *node = csi_node (iter);
fec39fa6 484 fprintf (f, " %s/%i", node->name (), node->order);
1cb1a99f
JH
485 }
486 fprintf (f, "\n");
487}
488
489
490/* Dump content of SET to stderr. */
491
492DEBUG_FUNCTION void
493debug_cgraph_node_set (cgraph_node_set set)
494{
495 dump_cgraph_node_set (stderr, set);
496}
497
498
499/* Free varpool node set. */
500
501void
502free_cgraph_node_set (cgraph_node_set set)
503{
9771b263 504 set->nodes.release ();
1cb1a99f
JH
505 pointer_map_destroy (set->map);
506 free (set);
507}
508
509
510/* Create a new varpool node set. */
511
512varpool_node_set
513varpool_node_set_new (void)
514{
515 varpool_node_set new_node_set;
516
517 new_node_set = XCNEW (struct varpool_node_set_def);
518 new_node_set->map = pointer_map_create ();
9771b263 519 new_node_set->nodes.create (0);
1cb1a99f
JH
520 return new_node_set;
521}
522
523
524/* Add varpool_node NODE to varpool_node_set SET. */
525
526void
527varpool_node_set_add (varpool_node_set set, struct varpool_node *node)
528{
529 void **slot;
530
531 slot = pointer_map_insert (set->map, node);
532
533 if (*slot)
534 {
535 int index = (size_t) *slot - 1;
9771b263 536 gcc_checking_assert ((set->nodes[index]
1cb1a99f
JH
537 == node));
538 return;
539 }
540
9771b263 541 *slot = (void *)(size_t) (set->nodes.length () + 1);
1cb1a99f
JH
542
543 /* Insert into node vector. */
9771b263 544 set->nodes.safe_push (node);
1cb1a99f
JH
545}
546
547
548/* Remove varpool_node NODE from varpool_node_set SET. */
549
550void
551varpool_node_set_remove (varpool_node_set set, struct varpool_node *node)
552{
553 void **slot, **last_slot;
554 int index;
555 struct varpool_node *last_node;
556
557 slot = pointer_map_contains (set->map, node);
558 if (slot == NULL || !*slot)
559 return;
560
561 index = (size_t) *slot - 1;
9771b263 562 gcc_checking_assert (set->nodes[index]
1cb1a99f
JH
563 == node);
564
565 /* Remove from vector. We do this by swapping node with the last element
566 of the vector. */
9771b263 567 last_node = set->nodes.pop ();
1cb1a99f
JH
568 if (last_node != node)
569 {
570 last_slot = pointer_map_contains (set->map, last_node);
571 gcc_checking_assert (last_slot && *last_slot);
572 *last_slot = (void *)(size_t) (index + 1);
573
574 /* Move the last element to the original spot of NODE. */
9771b263 575 set->nodes[index] = last_node;
1cb1a99f
JH
576 }
577
578 /* Remove element from hash table. */
579 *slot = NULL;
580}
581
582
583/* Find NODE in SET and return an iterator to it if found. A null iterator
584 is returned if NODE is not in SET. */
585
586varpool_node_set_iterator
587varpool_node_set_find (varpool_node_set set, struct varpool_node *node)
588{
589 void **slot;
590 varpool_node_set_iterator vsi;
591
592 slot = pointer_map_contains (set->map, node);
593 if (slot == NULL || !*slot)
594 vsi.index = (unsigned) ~0;
595 else
596 vsi.index = (size_t)*slot - 1;
597 vsi.set = set;
598
599 return vsi;
600}
601
602
603/* Dump content of SET to file F. */
604
605void
606dump_varpool_node_set (FILE *f, varpool_node_set set)
607{
608 varpool_node_set_iterator iter;
609
610 for (iter = vsi_start (set); !vsi_end_p (iter); vsi_next (&iter))
611 {
612 struct varpool_node *node = vsi_node (iter);
fec39fa6 613 fprintf (f, " %s", node->name ());
1cb1a99f
JH
614 }
615 fprintf (f, "\n");
616}
617
618
619/* Free varpool node set. */
620
621void
622free_varpool_node_set (varpool_node_set set)
623{
9771b263 624 set->nodes.release ();
1cb1a99f
JH
625 pointer_map_destroy (set->map);
626 free (set);
627}
628
629
630/* Dump content of SET to stderr. */
631
632DEBUG_FUNCTION void
633debug_varpool_node_set (varpool_node_set set)
634{
635 dump_varpool_node_set (stderr, set);
636}
4843f032
JH
637
638
639/* SRC and DST are going to be merged. Take SRC's profile and merge it into
640 DST so it is not going to be lost. Destroy SRC's body on the way. */
641
642void
643ipa_merge_profiles (struct cgraph_node *dst,
644 struct cgraph_node *src)
645{
67348ccc 646 tree oldsrcdecl = src->decl;
4843f032
JH
647 struct function *srccfun, *dstcfun;
648 bool match = true;
649
67348ccc
DM
650 if (!src->definition
651 || !dst->definition)
4843f032
JH
652 return;
653 if (src->frequency < dst->frequency)
654 src->frequency = dst->frequency;
655 if (!dst->count)
656 return;
657 if (cgraph_dump_file)
658 {
659 fprintf (cgraph_dump_file, "Merging profiles of %s/%i to %s/%i\n",
fec39fa6
TS
660 xstrdup (src->name ()), src->order,
661 xstrdup (dst->name ()), dst->order);
4843f032
JH
662 }
663 dst->count += src->count;
664
665 /* This is ugly. We need to get both function bodies into memory.
666 If declaration is merged, we need to duplicate it to be able
667 to load body that is being replaced. This makes symbol table
668 temporarily inconsistent. */
67348ccc 669 if (src->decl == dst->decl)
4843f032
JH
670 {
671 void **slot;
672 struct lto_in_decl_state temp;
673 struct lto_in_decl_state *state;
674
675 /* We are going to move the decl, we want to remove its file decl data.
676 and link these with the new decl. */
67348ccc
DM
677 temp.fn_decl = src->decl;
678 slot = htab_find_slot (src->lto_file_data->function_decl_states,
4843f032
JH
679 &temp, NO_INSERT);
680 state = (lto_in_decl_state *)*slot;
67348ccc 681 htab_clear_slot (src->lto_file_data->function_decl_states, slot);
4843f032
JH
682 gcc_assert (state);
683
684 /* Duplicate the decl and be sure it does not link into body of DST. */
67348ccc
DM
685 src->decl = copy_node (src->decl);
686 DECL_STRUCT_FUNCTION (src->decl) = NULL;
687 DECL_ARGUMENTS (src->decl) = NULL;
688 DECL_INITIAL (src->decl) = NULL;
689 DECL_RESULT (src->decl) = NULL;
4843f032
JH
690
691 /* Associate the decl state with new declaration, so LTO streamer
692 can look it up. */
67348ccc
DM
693 state->fn_decl = src->decl;
694 slot = htab_find_slot (src->lto_file_data->function_decl_states,
4843f032
JH
695 state, INSERT);
696 gcc_assert (!*slot);
697 *slot = state;
698 }
699 cgraph_get_body (src);
700 cgraph_get_body (dst);
67348ccc
DM
701 srccfun = DECL_STRUCT_FUNCTION (src->decl);
702 dstcfun = DECL_STRUCT_FUNCTION (dst->decl);
4843f032
JH
703 if (n_basic_blocks_for_function (srccfun)
704 != n_basic_blocks_for_function (dstcfun))
705 {
706 if (cgraph_dump_file)
707 fprintf (cgraph_dump_file,
708 "Giving up; number of basic block mismatch.\n");
709 match = false;
710 }
711 else if (last_basic_block_for_function (srccfun)
712 != last_basic_block_for_function (dstcfun))
713 {
714 if (cgraph_dump_file)
715 fprintf (cgraph_dump_file,
716 "Giving up; last block mismatch.\n");
717 match = false;
718 }
719 else
720 {
721 basic_block srcbb, dstbb;
722
723 FOR_ALL_BB_FN (srcbb, srccfun)
724 {
725 unsigned int i;
726
727 dstbb = BASIC_BLOCK_FOR_FUNCTION (dstcfun, srcbb->index);
728 if (dstbb == NULL)
729 {
730 if (cgraph_dump_file)
731 fprintf (cgraph_dump_file,
732 "No matching block for bb %i.\n",
733 srcbb->index);
734 match = false;
735 break;
736 }
737 if (EDGE_COUNT (srcbb->succs) != EDGE_COUNT (dstbb->succs))
738 {
739 if (cgraph_dump_file)
740 fprintf (cgraph_dump_file,
741 "Edge count mistmatch for bb %i.\n",
742 srcbb->index);
743 match = false;
744 break;
745 }
746 for (i = 0; i < EDGE_COUNT (srcbb->succs); i++)
747 {
748 edge srce = EDGE_SUCC (srcbb, i);
749 edge dste = EDGE_SUCC (dstbb, i);
750 if (srce->dest->index != dste->dest->index)
751 {
752 if (cgraph_dump_file)
753 fprintf (cgraph_dump_file,
754 "Succ edge mistmatch for bb %i.\n",
755 srce->dest->index);
756 match = false;
757 break;
758 }
759 }
760 }
761 }
762 if (match)
763 {
764 struct cgraph_edge *e;
765 basic_block srcbb, dstbb;
766
767 /* TODO: merge also statement histograms. */
768 FOR_ALL_BB_FN (srcbb, srccfun)
769 {
770 unsigned int i;
771
772 dstbb = BASIC_BLOCK_FOR_FUNCTION (dstcfun, srcbb->index);
773 dstbb->count += srcbb->count;
774 for (i = 0; i < EDGE_COUNT (srcbb->succs); i++)
775 {
776 edge srce = EDGE_SUCC (srcbb, i);
777 edge dste = EDGE_SUCC (dstbb, i);
778 dste->count += srce->count;
779 }
780 }
781 push_cfun (dstcfun);
782 counts_to_freqs ();
783 compute_function_frequency ();
784 pop_cfun ();
785 for (e = dst->callees; e; e = e->next_callee)
786 {
787 gcc_assert (!e->speculative);
788 e->count = gimple_bb (e->call_stmt)->count;
789 e->frequency = compute_call_stmt_bb_frequency
67348ccc 790 (dst->decl,
4843f032
JH
791 gimple_bb (e->call_stmt));
792 }
793 for (e = dst->indirect_calls; e; e = e->next_callee)
794 {
795 gcc_assert (!e->speculative);
796 e->count = gimple_bb (e->call_stmt)->count;
797 e->frequency = compute_call_stmt_bb_frequency
67348ccc 798 (dst->decl,
4843f032
JH
799 gimple_bb (e->call_stmt));
800 }
801 cgraph_release_function_body (src);
802 inline_update_overall_summary (dst);
803 }
804 /* TODO: if there is no match, we can scale up. */
67348ccc 805 src->decl = oldsrcdecl;
4843f032
JH
806}
807
fc11f321
JH
808/* Return true if call to DEST is known to be self-recusive call withing FUNC. */
809
810bool
811recursive_call_p (tree func, tree dest)
812{
813 struct cgraph_node *dest_node = cgraph_get_create_node (dest);
814 struct cgraph_node *cnode = cgraph_get_create_node (func);
815
67348ccc
DM
816 return symtab_semantically_equivalent_p (dest_node,
817 cnode);
fc11f321 818}