]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/ipa-utils.c
Daily bump.
[thirdparty/gcc.git] / gcc / ipa-utils.c
CommitLineData
ea900239 1/* Utilities for ipa analysis.
66647d44 2 Copyright (C) 2005, 2007, 2008 Free Software Foundation, Inc.
ea900239
DB
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9dcd6f09 9Software Foundation; either version 3, or (at your option) any later
ea900239
DB
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
9dcd6f09
NC
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
ea900239
DB
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
24#include "tm.h"
25#include "tree.h"
26#include "tree-flow.h"
27#include "tree-inline.h"
7ee2468b 28#include "dumpfile.h"
ea900239
DB
29#include "langhooks.h"
30#include "pointer-set.h"
ea264ca5 31#include "splay-tree.h"
ea900239
DB
32#include "ggc.h"
33#include "ipa-utils.h"
34#include "ipa-reference.h"
726a989a 35#include "gimple.h"
ea900239 36#include "cgraph.h"
ea900239 37#include "flags.h"
ea900239
DB
38#include "diagnostic.h"
39#include "langhooks.h"
40
41/* Debugging function for postorder and inorder code. NOTE is a string
42 that is printed before the nodes are printed. ORDER is an array of
43 cgraph_nodes that has COUNT useful nodes in it. */
44
b8698a0f 45void
af8bca3c
MJ
46ipa_print_order (FILE* out,
47 const char * note,
48 struct cgraph_node** order,
49 int count)
ea900239
DB
50{
51 int i;
52 fprintf (out, "\n\n ordered call graph: %s\n", note);
b8698a0f 53
ea900239
DB
54 for (i = count - 1; i >= 0; i--)
55 dump_cgraph_node(dump_file, order[i]);
56 fprintf (out, "\n");
57 fflush(out);
58}
59
60\f
61struct searchc_env {
62 struct cgraph_node **stack;
63 int stack_size;
64 struct cgraph_node **result;
65 int order_pos;
66 splay_tree nodes_marked_new;
67 bool reduce;
b6156cf2 68 bool allow_overwritable;
ea900239
DB
69 int count;
70};
71
72/* This is an implementation of Tarjan's strongly connected region
73 finder as reprinted in Aho Hopcraft and Ullman's The Design and
74 Analysis of Computer Programs (1975) pages 192-193. This version
75 has been customized for cgraph_nodes. The env parameter is because
76 it is recursive and there are no nested functions here. This
77 function should only be called from itself or
af8bca3c 78 ipa_reduced_postorder. ENV is a stack env and would be
ea900239
DB
79 unnecessary if C had nested functions. V is the node to start
80 searching from. */
81
82static void
2505c5ed
JH
83searchc (struct searchc_env* env, struct cgraph_node *v,
84 bool (*ignore_edge) (struct cgraph_edge *))
ea900239
DB
85{
86 struct cgraph_edge *edge;
960bfb69 87 struct ipa_dfs_info *v_info = (struct ipa_dfs_info *) v->symbol.aux;
b8698a0f 88
ea900239 89 /* mark node as old */
c5274326 90 v_info->new_node = false;
ea900239 91 splay_tree_remove (env->nodes_marked_new, v->uid);
b8698a0f 92
ea900239
DB
93 v_info->dfn_number = env->count;
94 v_info->low_link = env->count;
95 env->count++;
96 env->stack[(env->stack_size)++] = v;
97 v_info->on_stack = true;
b8698a0f 98
ea900239
DB
99 for (edge = v->callees; edge; edge = edge->next_callee)
100 {
101 struct ipa_dfs_info * w_info;
fede8efa
JH
102 enum availability avail;
103 struct cgraph_node *w = cgraph_function_or_thunk_node (edge->callee, &avail);
e2c9111c 104
fede8efa 105 if (!w || (ignore_edge && ignore_edge (edge)))
2505c5ed
JH
106 continue;
107
960bfb69 108 if (w->symbol.aux
b6156cf2
MJ
109 && (avail > AVAIL_OVERWRITABLE
110 || (env->allow_overwritable && avail == AVAIL_OVERWRITABLE)))
ea900239 111 {
960bfb69 112 w_info = (struct ipa_dfs_info *) w->symbol.aux;
b8698a0f 113 if (w_info->new_node)
ea900239 114 {
2505c5ed 115 searchc (env, w, ignore_edge);
ea900239
DB
116 v_info->low_link =
117 (v_info->low_link < w_info->low_link) ?
118 v_info->low_link : w_info->low_link;
b8698a0f
L
119 }
120 else
121 if ((w_info->dfn_number < v_info->dfn_number)
122 && (w_info->on_stack))
ea900239
DB
123 v_info->low_link =
124 (w_info->dfn_number < v_info->low_link) ?
125 w_info->dfn_number : v_info->low_link;
126 }
127 }
128
129
b8698a0f 130 if (v_info->low_link == v_info->dfn_number)
ea900239
DB
131 {
132 struct cgraph_node *last = NULL;
133 struct cgraph_node *x;
134 struct ipa_dfs_info *x_info;
135 do {
136 x = env->stack[--(env->stack_size)];
960bfb69 137 x_info = (struct ipa_dfs_info *) x->symbol.aux;
ea900239 138 x_info->on_stack = false;
11026b51 139 x_info->scc_no = v_info->dfn_number;
b8698a0f
L
140
141 if (env->reduce)
ea900239
DB
142 {
143 x_info->next_cycle = last;
144 last = x;
b8698a0f
L
145 }
146 else
ea900239 147 env->result[env->order_pos++] = x;
b8698a0f 148 }
ea900239 149 while (v != x);
b8698a0f 150 if (env->reduce)
ea900239
DB
151 env->result[env->order_pos++] = v;
152 }
153}
154
155/* Topsort the call graph by caller relation. Put the result in ORDER.
156
df92c640
SB
157 The REDUCE flag is true if you want the cycles reduced to single nodes.
158 You can use ipa_get_nodes_in_cycle to obtain a vector containing all real
159 call graph nodes in a reduced node.
160
161 Set ALLOW_OVERWRITABLE if nodes with such availability should be included.
af8bca3c
MJ
162 IGNORE_EDGE, if non-NULL is a hook that may make some edges insignificant
163 for the topological sort. */
ea900239
DB
164
165int
af8bca3c
MJ
166ipa_reduced_postorder (struct cgraph_node **order,
167 bool reduce, bool allow_overwritable,
168 bool (*ignore_edge) (struct cgraph_edge *))
ea900239
DB
169{
170 struct cgraph_node *node;
171 struct searchc_env env;
172 splay_tree_node result;
5ed6ace5 173 env.stack = XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
ea900239
DB
174 env.stack_size = 0;
175 env.result = order;
176 env.order_pos = 0;
177 env.nodes_marked_new = splay_tree_new (splay_tree_compare_ints, 0, 0);
178 env.count = 1;
179 env.reduce = reduce;
b6156cf2 180 env.allow_overwritable = allow_overwritable;
b8698a0f 181
65c70e6b 182 FOR_EACH_DEFINED_FUNCTION (node)
e2c9111c
JH
183 {
184 enum availability avail = cgraph_function_body_availability (node);
185
186 if (avail > AVAIL_OVERWRITABLE
b8698a0f 187 || (allow_overwritable
e2c9111c
JH
188 && (avail == AVAIL_OVERWRITABLE)))
189 {
190 /* Reuse the info if it is already there. */
960bfb69 191 struct ipa_dfs_info *info = (struct ipa_dfs_info *) node->symbol.aux;
e2c9111c
JH
192 if (!info)
193 info = XCNEW (struct ipa_dfs_info);
194 info->new_node = true;
195 info->on_stack = false;
196 info->next_cycle = NULL;
960bfb69 197 node->symbol.aux = info;
b8698a0f 198
e2c9111c 199 splay_tree_insert (env.nodes_marked_new,
b8698a0f 200 (splay_tree_key)node->uid,
e2c9111c 201 (splay_tree_value)node);
b8698a0f
L
202 }
203 else
960bfb69 204 node->symbol.aux = NULL;
e2c9111c 205 }
ea900239
DB
206 result = splay_tree_min (env.nodes_marked_new);
207 while (result)
208 {
209 node = (struct cgraph_node *)result->value;
2505c5ed 210 searchc (&env, node, ignore_edge);
ea900239
DB
211 result = splay_tree_min (env.nodes_marked_new);
212 }
213 splay_tree_delete (env.nodes_marked_new);
214 free (env.stack);
215
216 return env.order_pos;
217}
218
af8bca3c
MJ
219/* Deallocate all ipa_dfs_info structures pointed to by the aux pointer of call
220 graph nodes. */
221
222void
223ipa_free_postorder_info (void)
224{
225 struct cgraph_node *node;
65c70e6b 226 FOR_EACH_DEFINED_FUNCTION (node)
af8bca3c
MJ
227 {
228 /* Get rid of the aux information. */
960bfb69 229 if (node->symbol.aux)
af8bca3c 230 {
960bfb69
JH
231 free (node->symbol.aux);
232 node->symbol.aux = NULL;
af8bca3c
MJ
233 }
234 }
235}
236
df92c640
SB
237/* Get the set of nodes for the cycle in the reduced call graph starting
238 from NODE. */
239
240VEC (cgraph_node_p, heap) *
241ipa_get_nodes_in_cycle (struct cgraph_node *node)
242{
243 VEC (cgraph_node_p, heap) *v = NULL;
244 struct ipa_dfs_info *node_dfs_info;
245 while (node)
246 {
247 VEC_safe_push (cgraph_node_p, heap, v, node);
248 node_dfs_info = (struct ipa_dfs_info *) node->symbol.aux;
249 node = node_dfs_info->next_cycle;
250 }
251 return v;
252}
253
8775a18b
JH
254struct postorder_stack
255{
256 struct cgraph_node *node;
257 struct cgraph_edge *edge;
258 int ref;
259};
260
af8bca3c 261/* Fill array order with all nodes with output flag set in the reverse
39e2db00
JH
262 topological order. Return the number of elements in the array.
263 FIXME: While walking, consider aliases, too. */
af8bca3c
MJ
264
265int
266ipa_reverse_postorder (struct cgraph_node **order)
267{
268 struct cgraph_node *node, *node2;
269 int stack_size = 0;
270 int order_pos = 0;
8775a18b 271 struct cgraph_edge *edge;
af8bca3c 272 int pass;
8775a18b 273 struct ipa_ref *ref;
af8bca3c 274
8775a18b
JH
275 struct postorder_stack *stack =
276 XCNEWVEC (struct postorder_stack, cgraph_n_nodes);
af8bca3c
MJ
277
278 /* We have to deal with cycles nicely, so use a depth first traversal
279 output algorithm. Ignore the fact that some functions won't need
280 to be output and put them into order as well, so we get dependencies
281 right through inline functions. */
65c70e6b 282 FOR_EACH_FUNCTION (node)
960bfb69 283 node->symbol.aux = NULL;
af8bca3c 284 for (pass = 0; pass < 2; pass++)
65c70e6b 285 FOR_EACH_FUNCTION (node)
960bfb69 286 if (!node->symbol.aux
af8bca3c 287 && (pass
960bfb69 288 || (!node->symbol.address_taken
af8bca3c 289 && !node->global.inlined_to
8775a18b
JH
290 && !node->alias && !node->thunk.thunk_p
291 && !cgraph_only_called_directly_p (node))))
af8bca3c 292 {
8775a18b
JH
293 stack_size = 0;
294 stack[stack_size].node = node;
295 stack[stack_size].edge = node->callers;
296 stack[stack_size].ref = 0;
960bfb69 297 node->symbol.aux = (void *)(size_t)1;
8775a18b 298 while (stack_size >= 0)
af8bca3c 299 {
8775a18b 300 while (true)
af8bca3c 301 {
8775a18b
JH
302 node2 = NULL;
303 while (stack[stack_size].edge && !node2)
af8bca3c 304 {
8775a18b 305 edge = stack[stack_size].edge;
af8bca3c 306 node2 = edge->caller;
8775a18b
JH
307 stack[stack_size].edge = edge->next_caller;
308 /* Break possible cycles involving always-inline
309 functions by ignoring edges from always-inline
310 functions to non-always-inline functions. */
960bfb69 311 if (DECL_DISREGARD_INLINE_LIMITS (edge->caller->symbol.decl)
8775a18b 312 && !DECL_DISREGARD_INLINE_LIMITS
960bfb69 313 (cgraph_function_node (edge->callee, NULL)->symbol.decl))
8775a18b
JH
314 node2 = NULL;
315 }
5932a4d4 316 for (;ipa_ref_list_referring_iterate (&stack[stack_size].node->symbol.ref_list,
8775a18b
JH
317 stack[stack_size].ref,
318 ref) && !node2;
319 stack[stack_size].ref++)
320 {
321 if (ref->use == IPA_REF_ALIAS)
5932a4d4 322 node2 = ipa_ref_referring_node (ref);
8775a18b
JH
323 }
324 if (!node2)
325 break;
960bfb69 326 if (!node2->symbol.aux)
8775a18b
JH
327 {
328 stack[++stack_size].node = node2;
329 stack[stack_size].edge = node2->callers;
330 stack[stack_size].ref = 0;
960bfb69 331 node2->symbol.aux = (void *)(size_t)1;
af8bca3c
MJ
332 }
333 }
8775a18b 334 order[order_pos++] = stack[stack_size--].node;
af8bca3c
MJ
335 }
336 }
337 free (stack);
65c70e6b 338 FOR_EACH_FUNCTION (node)
960bfb69 339 node->symbol.aux = NULL;
af8bca3c
MJ
340 return order_pos;
341}
342
343
ea900239
DB
344
345/* Given a memory reference T, will return the variable at the bottom
073a8998 346 of the access. Unlike get_base_address, this will recurse through
ea900239
DB
347 INDIRECT_REFS. */
348
349tree
350get_base_var (tree t)
351{
b8698a0f 352 while (!SSA_VAR_P (t)
ea900239
DB
353 && (!CONSTANT_CLASS_P (t))
354 && TREE_CODE (t) != LABEL_DECL
355 && TREE_CODE (t) != FUNCTION_DECL
3baf459d
DN
356 && TREE_CODE (t) != CONST_DECL
357 && TREE_CODE (t) != CONSTRUCTOR)
ea900239
DB
358 {
359 t = TREE_OPERAND (t, 0);
360 }
361 return t;
b8698a0f 362}
ea900239 363
1cb1a99f
JH
364
365/* Create a new cgraph node set. */
366
367cgraph_node_set
368cgraph_node_set_new (void)
369{
370 cgraph_node_set new_node_set;
371
372 new_node_set = XCNEW (struct cgraph_node_set_def);
373 new_node_set->map = pointer_map_create ();
374 new_node_set->nodes = NULL;
375 return new_node_set;
376}
377
378
379/* Add cgraph_node NODE to cgraph_node_set SET. */
380
381void
382cgraph_node_set_add (cgraph_node_set set, struct cgraph_node *node)
383{
384 void **slot;
385
386 slot = pointer_map_insert (set->map, node);
387
388 if (*slot)
389 {
390 int index = (size_t) *slot - 1;
391 gcc_checking_assert ((VEC_index (cgraph_node_ptr, set->nodes, index)
392 == node));
393 return;
394 }
395
396 *slot = (void *)(size_t) (VEC_length (cgraph_node_ptr, set->nodes) + 1);
397
398 /* Insert into node vector. */
399 VEC_safe_push (cgraph_node_ptr, heap, set->nodes, node);
400}
401
402
403/* Remove cgraph_node NODE from cgraph_node_set SET. */
404
405void
406cgraph_node_set_remove (cgraph_node_set set, struct cgraph_node *node)
407{
408 void **slot, **last_slot;
409 int index;
410 struct cgraph_node *last_node;
411
412 slot = pointer_map_contains (set->map, node);
413 if (slot == NULL || !*slot)
414 return;
415
416 index = (size_t) *slot - 1;
417 gcc_checking_assert (VEC_index (cgraph_node_ptr, set->nodes, index)
418 == node);
419
420 /* Remove from vector. We do this by swapping node with the last element
421 of the vector. */
422 last_node = VEC_pop (cgraph_node_ptr, set->nodes);
423 if (last_node != node)
424 {
425 last_slot = pointer_map_contains (set->map, last_node);
426 gcc_checking_assert (last_slot && *last_slot);
427 *last_slot = (void *)(size_t) (index + 1);
428
429 /* Move the last element to the original spot of NODE. */
430 VEC_replace (cgraph_node_ptr, set->nodes, index, last_node);
431 }
432
433 /* Remove element from hash table. */
434 *slot = NULL;
435}
436
437
438/* Find NODE in SET and return an iterator to it if found. A null iterator
439 is returned if NODE is not in SET. */
440
441cgraph_node_set_iterator
442cgraph_node_set_find (cgraph_node_set set, struct cgraph_node *node)
443{
444 void **slot;
445 cgraph_node_set_iterator csi;
446
447 slot = pointer_map_contains (set->map, node);
448 if (slot == NULL || !*slot)
449 csi.index = (unsigned) ~0;
450 else
451 csi.index = (size_t)*slot - 1;
452 csi.set = set;
453
454 return csi;
455}
456
457
458/* Dump content of SET to file F. */
459
460void
461dump_cgraph_node_set (FILE *f, cgraph_node_set set)
462{
463 cgraph_node_set_iterator iter;
464
465 for (iter = csi_start (set); !csi_end_p (iter); csi_next (&iter))
466 {
467 struct cgraph_node *node = csi_node (iter);
468 fprintf (f, " %s/%i", cgraph_node_name (node), node->uid);
469 }
470 fprintf (f, "\n");
471}
472
473
474/* Dump content of SET to stderr. */
475
476DEBUG_FUNCTION void
477debug_cgraph_node_set (cgraph_node_set set)
478{
479 dump_cgraph_node_set (stderr, set);
480}
481
482
483/* Free varpool node set. */
484
485void
486free_cgraph_node_set (cgraph_node_set set)
487{
488 VEC_free (cgraph_node_ptr, heap, set->nodes);
489 pointer_map_destroy (set->map);
490 free (set);
491}
492
493
494/* Create a new varpool node set. */
495
496varpool_node_set
497varpool_node_set_new (void)
498{
499 varpool_node_set new_node_set;
500
501 new_node_set = XCNEW (struct varpool_node_set_def);
502 new_node_set->map = pointer_map_create ();
503 new_node_set->nodes = NULL;
504 return new_node_set;
505}
506
507
508/* Add varpool_node NODE to varpool_node_set SET. */
509
510void
511varpool_node_set_add (varpool_node_set set, struct varpool_node *node)
512{
513 void **slot;
514
515 slot = pointer_map_insert (set->map, node);
516
517 if (*slot)
518 {
519 int index = (size_t) *slot - 1;
520 gcc_checking_assert ((VEC_index (varpool_node_ptr, set->nodes, index)
521 == node));
522 return;
523 }
524
525 *slot = (void *)(size_t) (VEC_length (varpool_node_ptr, set->nodes) + 1);
526
527 /* Insert into node vector. */
528 VEC_safe_push (varpool_node_ptr, heap, set->nodes, node);
529}
530
531
532/* Remove varpool_node NODE from varpool_node_set SET. */
533
534void
535varpool_node_set_remove (varpool_node_set set, struct varpool_node *node)
536{
537 void **slot, **last_slot;
538 int index;
539 struct varpool_node *last_node;
540
541 slot = pointer_map_contains (set->map, node);
542 if (slot == NULL || !*slot)
543 return;
544
545 index = (size_t) *slot - 1;
546 gcc_checking_assert (VEC_index (varpool_node_ptr, set->nodes, index)
547 == node);
548
549 /* Remove from vector. We do this by swapping node with the last element
550 of the vector. */
551 last_node = VEC_pop (varpool_node_ptr, set->nodes);
552 if (last_node != node)
553 {
554 last_slot = pointer_map_contains (set->map, last_node);
555 gcc_checking_assert (last_slot && *last_slot);
556 *last_slot = (void *)(size_t) (index + 1);
557
558 /* Move the last element to the original spot of NODE. */
559 VEC_replace (varpool_node_ptr, set->nodes, index, last_node);
560 }
561
562 /* Remove element from hash table. */
563 *slot = NULL;
564}
565
566
567/* Find NODE in SET and return an iterator to it if found. A null iterator
568 is returned if NODE is not in SET. */
569
570varpool_node_set_iterator
571varpool_node_set_find (varpool_node_set set, struct varpool_node *node)
572{
573 void **slot;
574 varpool_node_set_iterator vsi;
575
576 slot = pointer_map_contains (set->map, node);
577 if (slot == NULL || !*slot)
578 vsi.index = (unsigned) ~0;
579 else
580 vsi.index = (size_t)*slot - 1;
581 vsi.set = set;
582
583 return vsi;
584}
585
586
587/* Dump content of SET to file F. */
588
589void
590dump_varpool_node_set (FILE *f, varpool_node_set set)
591{
592 varpool_node_set_iterator iter;
593
594 for (iter = vsi_start (set); !vsi_end_p (iter); vsi_next (&iter))
595 {
596 struct varpool_node *node = vsi_node (iter);
597 fprintf (f, " %s", varpool_node_name (node));
598 }
599 fprintf (f, "\n");
600}
601
602
603/* Free varpool node set. */
604
605void
606free_varpool_node_set (varpool_node_set set)
607{
608 VEC_free (varpool_node_ptr, heap, set->nodes);
609 pointer_map_destroy (set->map);
610 free (set);
611}
612
613
614/* Dump content of SET to stderr. */
615
616DEBUG_FUNCTION void
617debug_varpool_node_set (varpool_node_set set)
618{
619 dump_varpool_node_set (stderr, set);
620}