]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/ipa-fnsummary.c
ipa-inline-analysis.c (cgraph_2edge_hook_list, [...]): Remove.
[thirdparty/gcc.git] / gcc / ipa-fnsummary.c
CommitLineData
27d020cf
JH
1/* Function summary pass.
2 Copyright (C) 2003-2017 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 3, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
20
21/* Analysis of function bodies used by inter-procedural passes
22
23 We estimate for each function
24 - function body size and size after specializing into given context
25 - average function execution time in a given context
26 - function frame size
27 For each call
28 - call statement size, time and how often the parameters change
29
0bceb671 30 ipa_fn_summary data structures store above information locally (i.e.
27d020cf
JH
31 parameters of the function itself) and globally (i.e. parameters of
32 the function created by applying all the inline decisions already
33 present in the callgraph).
34
0bceb671 35 We provide access to the ipa_fn_summary data structure and
27d020cf
JH
36 basic logic updating the parameters when inlining is performed.
37
38 The summaries are context sensitive. Context means
39 1) partial assignment of known constant values of operands
40 2) whether function is inlined into the call or not.
41 It is easy to add more variants. To represent function size and time
42 that depends on context (i.e. it is known to be optimized away when
43 context is known either by inlining or from IP-CP and cloning),
44 we use predicates.
45
46 estimate_edge_size_and_time can be used to query
0bceb671 47 function size/time in the given context. ipa_merge_fn_summary_after_inlining merges
27d020cf
JH
48 properties of caller and callee after inlining.
49
50 Finally pass_inline_parameters is exported. This is used to drive
51 computation of function parameters used by the early inliner. IPA
52 inlined performs analysis via its analyze_function method. */
53
54#include "config.h"
55#include "system.h"
56#include "coretypes.h"
57#include "backend.h"
58#include "tree.h"
59#include "gimple.h"
60#include "alloc-pool.h"
61#include "tree-pass.h"
62#include "ssa.h"
63#include "tree-streamer.h"
64#include "cgraph.h"
65#include "diagnostic.h"
66#include "fold-const.h"
67#include "print-tree.h"
68#include "tree-inline.h"
69#include "gimple-pretty-print.h"
70#include "params.h"
71#include "cfganal.h"
72#include "gimple-iterator.h"
73#include "tree-cfg.h"
74#include "tree-ssa-loop-niter.h"
75#include "tree-ssa-loop.h"
76#include "symbol-summary.h"
77#include "ipa-prop.h"
78#include "ipa-fnsummary.h"
79#include "cfgloop.h"
80#include "tree-scalar-evolution.h"
81#include "ipa-utils.h"
82#include "cilk.h"
83#include "cfgexpand.h"
84#include "gimplify.h"
85
86/* Summaries. */
0bceb671 87function_summary <ipa_fn_summary *> *ipa_fn_summaries;
27d020cf
JH
88call_summary <ipa_call_summary *> *ipa_call_summaries;
89
90/* Edge predicates goes here. */
91static object_allocator<predicate> edge_predicate_pool ("edge predicates");
92
93
0bceb671 94/* Dump IPA hints. */
27d020cf 95void
0bceb671 96ipa_dump_hints (FILE *f, ipa_hints hints)
27d020cf
JH
97{
98 if (!hints)
99 return;
0bceb671 100 fprintf (f, "IPA hints:");
27d020cf
JH
101 if (hints & INLINE_HINT_indirect_call)
102 {
103 hints &= ~INLINE_HINT_indirect_call;
104 fprintf (f, " indirect_call");
105 }
106 if (hints & INLINE_HINT_loop_iterations)
107 {
108 hints &= ~INLINE_HINT_loop_iterations;
109 fprintf (f, " loop_iterations");
110 }
111 if (hints & INLINE_HINT_loop_stride)
112 {
113 hints &= ~INLINE_HINT_loop_stride;
114 fprintf (f, " loop_stride");
115 }
116 if (hints & INLINE_HINT_same_scc)
117 {
118 hints &= ~INLINE_HINT_same_scc;
119 fprintf (f, " same_scc");
120 }
121 if (hints & INLINE_HINT_in_scc)
122 {
123 hints &= ~INLINE_HINT_in_scc;
124 fprintf (f, " in_scc");
125 }
126 if (hints & INLINE_HINT_cross_module)
127 {
128 hints &= ~INLINE_HINT_cross_module;
129 fprintf (f, " cross_module");
130 }
131 if (hints & INLINE_HINT_declared_inline)
132 {
133 hints &= ~INLINE_HINT_declared_inline;
134 fprintf (f, " declared_inline");
135 }
136 if (hints & INLINE_HINT_array_index)
137 {
138 hints &= ~INLINE_HINT_array_index;
139 fprintf (f, " array_index");
140 }
141 if (hints & INLINE_HINT_known_hot)
142 {
143 hints &= ~INLINE_HINT_known_hot;
144 fprintf (f, " known_hot");
145 }
146 gcc_assert (!hints);
147}
148
149
150/* Record SIZE and TIME to SUMMARY.
151 The accounted code will be executed when EXEC_PRED is true.
152 When NONCONST_PRED is false the code will evaulate to constant and
153 will get optimized out in specialized clones of the function. */
154
155void
0bceb671 156ipa_fn_summary::account_size_time (int size, sreal time,
27d020cf
JH
157 const predicate &exec_pred,
158 const predicate &nonconst_pred_in)
159{
160 size_time_entry *e;
161 bool found = false;
162 int i;
163 predicate nonconst_pred;
164
165 if (exec_pred == false)
166 return;
167
168 nonconst_pred = nonconst_pred_in & exec_pred;
169
170 if (nonconst_pred == false)
171 return;
172
173 /* We need to create initial empty unconitional clause, but otherwie
174 we don't need to account empty times and sizes. */
175 if (!size && time == 0 && size_time_table)
176 return;
177
178 gcc_assert (time >= 0);
179
180 for (i = 0; vec_safe_iterate (size_time_table, i, &e); i++)
181 if (e->exec_predicate == exec_pred
182 && e->nonconst_predicate == nonconst_pred)
183 {
184 found = true;
185 break;
186 }
187 if (i == 256)
188 {
189 i = 0;
190 found = true;
191 e = &(*size_time_table)[0];
192 if (dump_file && (dump_flags & TDF_DETAILS))
193 fprintf (dump_file,
194 "\t\tReached limit on number of entries, "
195 "ignoring the predicate.");
196 }
197 if (dump_file && (dump_flags & TDF_DETAILS) && (time != 0 || size))
198 {
199 fprintf (dump_file,
200 "\t\tAccounting size:%3.2f, time:%3.2f on %spredicate exec:",
0bceb671 201 ((double) size) / ipa_fn_summary::size_scale,
27d020cf
JH
202 (time.to_double ()), found ? "" : "new ");
203 exec_pred.dump (dump_file, conds, 0);
204 if (exec_pred != nonconst_pred)
205 {
206 fprintf (dump_file, " nonconst:");
207 nonconst_pred.dump (dump_file, conds);
208 }
209 else
210 fprintf (dump_file, "\n");
211 }
212 if (!found)
213 {
214 struct size_time_entry new_entry;
215 new_entry.size = size;
216 new_entry.time = time;
217 new_entry.exec_predicate = exec_pred;
218 new_entry.nonconst_predicate = nonconst_pred;
219 vec_safe_push (size_time_table, new_entry);
220 }
221 else
222 {
223 e->size += size;
224 e->time += time;
225 }
226}
227
228/* We proved E to be unreachable, redirect it to __bultin_unreachable. */
229
230static struct cgraph_edge *
231redirect_to_unreachable (struct cgraph_edge *e)
232{
233 struct cgraph_node *callee = !e->inline_failed ? e->callee : NULL;
234 struct cgraph_node *target = cgraph_node::get_create
235 (builtin_decl_implicit (BUILT_IN_UNREACHABLE));
236
237 if (e->speculative)
238 e = e->resolve_speculation (target->decl);
239 else if (!e->callee)
240 e->make_direct (target);
241 else
242 e->redirect_callee (target);
243 struct ipa_call_summary *es = ipa_call_summaries->get (e);
244 e->inline_failed = CIF_UNREACHABLE;
245 e->frequency = 0;
246 e->count = 0;
247 es->call_stmt_size = 0;
248 es->call_stmt_time = 0;
249 if (callee)
250 callee->remove_symbol_and_inline_clones ();
251 return e;
252}
253
254/* Set predicate for edge E. */
255
256static void
257edge_set_predicate (struct cgraph_edge *e, predicate *predicate)
258{
259 /* If the edge is determined to be never executed, redirect it
0bceb671
JH
260 to BUILTIN_UNREACHABLE to make it clear to IPA passes the call will
261 be optimized out. */
27d020cf
JH
262 if (predicate && *predicate == false
263 /* When handling speculative edges, we need to do the redirection
264 just once. Do it always on the direct edge, so we do not
265 attempt to resolve speculation while duplicating the edge. */
266 && (!e->speculative || e->callee))
267 e = redirect_to_unreachable (e);
268
269 struct ipa_call_summary *es = ipa_call_summaries->get (e);
270 if (predicate && *predicate != true)
271 {
272 if (!es->predicate)
273 es->predicate = edge_predicate_pool.allocate ();
274 *es->predicate = *predicate;
275 }
276 else
277 {
278 if (es->predicate)
279 edge_predicate_pool.remove (es->predicate);
280 es->predicate = NULL;
281 }
282}
283
284/* Set predicate for hint *P. */
285
286static void
287set_hint_predicate (predicate **p, predicate new_predicate)
288{
289 if (new_predicate == false || new_predicate == true)
290 {
291 if (*p)
292 edge_predicate_pool.remove (*p);
293 *p = NULL;
294 }
295 else
296 {
297 if (!*p)
298 *p = edge_predicate_pool.allocate ();
299 **p = new_predicate;
300 }
301}
302
303
304/* Compute what conditions may or may not hold given invormation about
305 parameters. RET_CLAUSE returns truths that may hold in a specialized copy,
306 whie RET_NONSPEC_CLAUSE returns truths that may hold in an nonspecialized
307 copy when called in a given context. It is a bitmask of conditions. Bit
308 0 means that condition is known to be false, while bit 1 means that condition
309 may or may not be true. These differs - for example NOT_INLINED condition
310 is always false in the second and also builtin_constant_p tests can not use
311 the fact that parameter is indeed a constant.
312
313 KNOWN_VALS is partial mapping of parameters of NODE to constant values.
314 KNOWN_AGGS is a vector of aggreggate jump functions for each parameter.
315 Return clause of possible truths. When INLINE_P is true, assume that we are
316 inlining.
317
318 ERROR_MARK means compile time invariant. */
319
320static void
321evaluate_conditions_for_known_args (struct cgraph_node *node,
322 bool inline_p,
323 vec<tree> known_vals,
324 vec<ipa_agg_jump_function_p>
325 known_aggs,
326 clause_t *ret_clause,
327 clause_t *ret_nonspec_clause)
328{
329 clause_t clause = inline_p ? 0 : 1 << predicate::not_inlined_condition;
330 clause_t nonspec_clause = 1 << predicate::not_inlined_condition;
0bceb671 331 struct ipa_fn_summary *info = ipa_fn_summaries->get (node);
27d020cf
JH
332 int i;
333 struct condition *c;
334
335 for (i = 0; vec_safe_iterate (info->conds, i, &c); i++)
336 {
337 tree val;
338 tree res;
339
340 /* We allow call stmt to have fewer arguments than the callee function
341 (especially for K&R style programs). So bound check here (we assume
342 known_aggs vector, if non-NULL, has the same length as
343 known_vals). */
344 gcc_checking_assert (!known_aggs.exists ()
345 || (known_vals.length () == known_aggs.length ()));
346 if (c->operand_num >= (int) known_vals.length ())
347 {
348 clause |= 1 << (i + predicate::first_dynamic_condition);
349 nonspec_clause |= 1 << (i + predicate::first_dynamic_condition);
350 continue;
351 }
352
353 if (c->agg_contents)
354 {
355 struct ipa_agg_jump_function *agg;
356
357 if (c->code == predicate::changed
358 && !c->by_ref
359 && (known_vals[c->operand_num] == error_mark_node))
360 continue;
361
362 if (known_aggs.exists ())
363 {
364 agg = known_aggs[c->operand_num];
365 val = ipa_find_agg_cst_for_param (agg, known_vals[c->operand_num],
366 c->offset, c->by_ref);
367 }
368 else
369 val = NULL_TREE;
370 }
371 else
372 {
373 val = known_vals[c->operand_num];
374 if (val == error_mark_node && c->code != predicate::changed)
375 val = NULL_TREE;
376 }
377
378 if (!val)
379 {
380 clause |= 1 << (i + predicate::first_dynamic_condition);
381 nonspec_clause |= 1 << (i + predicate::first_dynamic_condition);
382 continue;
383 }
384 if (c->code == predicate::changed)
385 {
386 nonspec_clause |= 1 << (i + predicate::first_dynamic_condition);
387 continue;
388 }
389
390 if (tree_to_shwi (TYPE_SIZE (TREE_TYPE (val))) != c->size)
391 {
392 clause |= 1 << (i + predicate::first_dynamic_condition);
393 nonspec_clause |= 1 << (i + predicate::first_dynamic_condition);
394 continue;
395 }
396 if (c->code == predicate::is_not_constant)
397 {
398 nonspec_clause |= 1 << (i + predicate::first_dynamic_condition);
399 continue;
400 }
401
402 val = fold_unary (VIEW_CONVERT_EXPR, TREE_TYPE (c->val), val);
403 res = val
404 ? fold_binary_to_constant (c->code, boolean_type_node, val, c->val)
405 : NULL;
406
407 if (res && integer_zerop (res))
408 continue;
409
410 clause |= 1 << (i + predicate::first_dynamic_condition);
411 nonspec_clause |= 1 << (i + predicate::first_dynamic_condition);
412 }
413 *ret_clause = clause;
414 if (ret_nonspec_clause)
415 *ret_nonspec_clause = nonspec_clause;
416}
417
418
419/* Work out what conditions might be true at invocation of E. */
420
421void
422evaluate_properties_for_edge (struct cgraph_edge *e, bool inline_p,
423 clause_t *clause_ptr,
424 clause_t *nonspec_clause_ptr,
425 vec<tree> *known_vals_ptr,
426 vec<ipa_polymorphic_call_context>
427 *known_contexts_ptr,
428 vec<ipa_agg_jump_function_p> *known_aggs_ptr)
429{
430 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
0bceb671 431 struct ipa_fn_summary *info = ipa_fn_summaries->get (callee);
27d020cf
JH
432 vec<tree> known_vals = vNULL;
433 vec<ipa_agg_jump_function_p> known_aggs = vNULL;
434
435 if (clause_ptr)
436 *clause_ptr = inline_p ? 0 : 1 << predicate::not_inlined_condition;
437 if (known_vals_ptr)
438 known_vals_ptr->create (0);
439 if (known_contexts_ptr)
440 known_contexts_ptr->create (0);
441
442 if (ipa_node_params_sum
443 && !e->call_stmt_cannot_inline_p
444 && ((clause_ptr && info->conds) || known_vals_ptr || known_contexts_ptr))
445 {
446 struct ipa_node_params *parms_info;
447 struct ipa_edge_args *args = IPA_EDGE_REF (e);
448 struct ipa_call_summary *es = ipa_call_summaries->get (e);
449 int i, count = ipa_get_cs_argument_count (args);
450
451 if (e->caller->global.inlined_to)
452 parms_info = IPA_NODE_REF (e->caller->global.inlined_to);
453 else
454 parms_info = IPA_NODE_REF (e->caller);
455
456 if (count && (info->conds || known_vals_ptr))
457 known_vals.safe_grow_cleared (count);
458 if (count && (info->conds || known_aggs_ptr))
459 known_aggs.safe_grow_cleared (count);
460 if (count && known_contexts_ptr)
461 known_contexts_ptr->safe_grow_cleared (count);
462
463 for (i = 0; i < count; i++)
464 {
465 struct ipa_jump_func *jf = ipa_get_ith_jump_func (args, i);
466 tree cst = ipa_value_from_jfunc (parms_info, jf);
467
468 if (!cst && e->call_stmt
469 && i < (int)gimple_call_num_args (e->call_stmt))
470 {
471 cst = gimple_call_arg (e->call_stmt, i);
472 if (!is_gimple_min_invariant (cst))
473 cst = NULL;
474 }
475 if (cst)
476 {
477 gcc_checking_assert (TREE_CODE (cst) != TREE_BINFO);
478 if (known_vals.exists ())
479 known_vals[i] = cst;
480 }
481 else if (inline_p && !es->param[i].change_prob)
482 known_vals[i] = error_mark_node;
483
484 if (known_contexts_ptr)
485 (*known_contexts_ptr)[i] = ipa_context_from_jfunc (parms_info, e,
486 i, jf);
487 /* TODO: When IPA-CP starts propagating and merging aggregate jump
488 functions, use its knowledge of the caller too, just like the
489 scalar case above. */
490 known_aggs[i] = &jf->agg;
491 }
492 }
493 else if (e->call_stmt && !e->call_stmt_cannot_inline_p
494 && ((clause_ptr && info->conds) || known_vals_ptr))
495 {
496 int i, count = (int)gimple_call_num_args (e->call_stmt);
497
498 if (count && (info->conds || known_vals_ptr))
499 known_vals.safe_grow_cleared (count);
500 for (i = 0; i < count; i++)
501 {
502 tree cst = gimple_call_arg (e->call_stmt, i);
503 if (!is_gimple_min_invariant (cst))
504 cst = NULL;
505 if (cst)
506 known_vals[i] = cst;
507 }
508 }
509
510 evaluate_conditions_for_known_args (callee, inline_p,
511 known_vals, known_aggs, clause_ptr,
512 nonspec_clause_ptr);
513
514 if (known_vals_ptr)
515 *known_vals_ptr = known_vals;
516 else
517 known_vals.release ();
518
519 if (known_aggs_ptr)
520 *known_aggs_ptr = known_aggs;
521 else
522 known_aggs.release ();
523}
524
525
0bceb671 526/* Allocate the function summary. */
27d020cf
JH
527
528static void
0bceb671 529ipa_fn_summary_alloc (void)
27d020cf 530{
0bceb671
JH
531 gcc_checking_assert (!ipa_fn_summaries);
532 ipa_fn_summaries = ipa_fn_summary_t::create_ggc (symtab);
533 ipa_call_summaries = new ipa_call_summary_t (symtab, false);
27d020cf
JH
534}
535
536/* We are called multiple time for given function; clear
537 data from previous run so they are not cumulated. */
538
539void
540ipa_call_summary::reset ()
541{
542 call_stmt_size = call_stmt_time = 0;
543 if (predicate)
544 edge_predicate_pool.remove (predicate);
545 predicate = NULL;
546 param.release ();
547}
548
549/* We are called multiple time for given function; clear
550 data from previous run so they are not cumulated. */
551
552void
0bceb671 553ipa_fn_summary::reset (struct cgraph_node *node)
27d020cf
JH
554{
555 struct cgraph_edge *e;
556
557 self_size = 0;
558 estimated_stack_size = 0;
559 estimated_self_stack_size = 0;
560 stack_frame_offset = 0;
561 size = 0;
562 time = 0;
563 growth = 0;
564 scc_no = 0;
565 if (loop_iterations)
566 {
567 edge_predicate_pool.remove (loop_iterations);
568 loop_iterations = NULL;
569 }
570 if (loop_stride)
571 {
572 edge_predicate_pool.remove (loop_stride);
573 loop_stride = NULL;
574 }
575 if (array_index)
576 {
577 edge_predicate_pool.remove (array_index);
578 array_index = NULL;
579 }
580 vec_free (conds);
581 vec_free (size_time_table);
582 for (e = node->callees; e; e = e->next_callee)
583 ipa_call_summaries->get (e)->reset ();
584 for (e = node->indirect_calls; e; e = e->next_callee)
585 ipa_call_summaries->get (e)->reset ();
586 fp_expressions = false;
587}
588
589/* Hook that is called by cgraph.c when a node is removed. */
590
591void
0bceb671 592ipa_fn_summary_t::remove (cgraph_node *node, ipa_fn_summary *info)
27d020cf
JH
593{
594 info->reset (node);
595}
596
597/* Same as remap_predicate_after_duplication but handle hint predicate *P.
598 Additionally care about allocating new memory slot for updated predicate
599 and set it to NULL when it becomes true or false (and thus uninteresting).
600 */
601
602static void
603remap_hint_predicate_after_duplication (predicate **p,
604 clause_t possible_truths)
605{
606 predicate new_predicate;
607
608 if (!*p)
609 return;
610
611 new_predicate = (*p)->remap_after_duplication (possible_truths);
612 /* We do not want to free previous predicate; it is used by node origin. */
613 *p = NULL;
614 set_hint_predicate (p, new_predicate);
615}
616
617
618/* Hook that is called by cgraph.c when a node is duplicated. */
619void
0bceb671 620ipa_fn_summary_t::duplicate (cgraph_node *src,
27d020cf 621 cgraph_node *dst,
0bceb671
JH
622 ipa_fn_summary *,
623 ipa_fn_summary *info)
27d020cf 624{
0bceb671 625 memcpy (info, ipa_fn_summaries->get (src), sizeof (ipa_fn_summary));
27d020cf
JH
626 /* TODO: as an optimization, we may avoid copying conditions
627 that are known to be false or true. */
628 info->conds = vec_safe_copy (info->conds);
629
630 /* When there are any replacements in the function body, see if we can figure
631 out that something was optimized out. */
632 if (ipa_node_params_sum && dst->clone.tree_map)
633 {
634 vec<size_time_entry, va_gc> *entry = info->size_time_table;
635 /* Use SRC parm info since it may not be copied yet. */
636 struct ipa_node_params *parms_info = IPA_NODE_REF (src);
637 vec<tree> known_vals = vNULL;
638 int count = ipa_get_param_count (parms_info);
639 int i, j;
640 clause_t possible_truths;
641 predicate true_pred = true;
642 size_time_entry *e;
643 int optimized_out_size = 0;
644 bool inlined_to_p = false;
645 struct cgraph_edge *edge, *next;
646
647 info->size_time_table = 0;
648 known_vals.safe_grow_cleared (count);
649 for (i = 0; i < count; i++)
650 {
651 struct ipa_replace_map *r;
652
653 for (j = 0; vec_safe_iterate (dst->clone.tree_map, j, &r); j++)
654 {
655 if (((!r->old_tree && r->parm_num == i)
656 || (r->old_tree && r->old_tree == ipa_get_param (parms_info, i)))
657 && r->replace_p && !r->ref_p)
658 {
659 known_vals[i] = r->new_tree;
660 break;
661 }
662 }
663 }
664 evaluate_conditions_for_known_args (dst, false,
665 known_vals,
666 vNULL,
667 &possible_truths,
668 /* We are going to specialize,
669 so ignore nonspec truths. */
670 NULL);
671 known_vals.release ();
672
673 info->account_size_time (0, 0, true_pred, true_pred);
674
675 /* Remap size_time vectors.
676 Simplify the predicate by prunning out alternatives that are known
677 to be false.
678 TODO: as on optimization, we can also eliminate conditions known
679 to be true. */
680 for (i = 0; vec_safe_iterate (entry, i, &e); i++)
681 {
682 predicate new_exec_pred;
683 predicate new_nonconst_pred;
684 new_exec_pred = e->exec_predicate.remap_after_duplication
685 (possible_truths);
686 new_nonconst_pred = e->nonconst_predicate.remap_after_duplication
687 (possible_truths);
688 if (new_exec_pred == false || new_nonconst_pred == false)
689 optimized_out_size += e->size;
690 else
691 info->account_size_time (e->size, e->time, new_exec_pred,
692 new_nonconst_pred);
693 }
694
695 /* Remap edge predicates with the same simplification as above.
696 Also copy constantness arrays. */
697 for (edge = dst->callees; edge; edge = next)
698 {
699 predicate new_predicate;
700 struct ipa_call_summary *es = ipa_call_summaries->get (edge);
701 next = edge->next_callee;
702
703 if (!edge->inline_failed)
704 inlined_to_p = true;
705 if (!es->predicate)
706 continue;
707 new_predicate = es->predicate->remap_after_duplication
708 (possible_truths);
709 if (new_predicate == false && *es->predicate != false)
0bceb671 710 optimized_out_size += es->call_stmt_size * ipa_fn_summary::size_scale;
27d020cf
JH
711 edge_set_predicate (edge, &new_predicate);
712 }
713
714 /* Remap indirect edge predicates with the same simplificaiton as above.
715 Also copy constantness arrays. */
716 for (edge = dst->indirect_calls; edge; edge = next)
717 {
718 predicate new_predicate;
719 struct ipa_call_summary *es = ipa_call_summaries->get (edge);
720 next = edge->next_callee;
721
722 gcc_checking_assert (edge->inline_failed);
723 if (!es->predicate)
724 continue;
725 new_predicate = es->predicate->remap_after_duplication
726 (possible_truths);
727 if (new_predicate == false && *es->predicate != false)
0bceb671 728 optimized_out_size += es->call_stmt_size * ipa_fn_summary::size_scale;
27d020cf
JH
729 edge_set_predicate (edge, &new_predicate);
730 }
731 remap_hint_predicate_after_duplication (&info->loop_iterations,
732 possible_truths);
733 remap_hint_predicate_after_duplication (&info->loop_stride,
734 possible_truths);
735 remap_hint_predicate_after_duplication (&info->array_index,
736 possible_truths);
737
738 /* If inliner or someone after inliner will ever start producing
739 non-trivial clones, we will get trouble with lack of information
740 about updating self sizes, because size vectors already contains
741 sizes of the calees. */
742 gcc_assert (!inlined_to_p || !optimized_out_size);
743 }
744 else
745 {
746 info->size_time_table = vec_safe_copy (info->size_time_table);
747 if (info->loop_iterations)
748 {
749 predicate p = *info->loop_iterations;
750 info->loop_iterations = NULL;
751 set_hint_predicate (&info->loop_iterations, p);
752 }
753 if (info->loop_stride)
754 {
755 predicate p = *info->loop_stride;
756 info->loop_stride = NULL;
757 set_hint_predicate (&info->loop_stride, p);
758 }
759 if (info->array_index)
760 {
761 predicate p = *info->array_index;
762 info->array_index = NULL;
763 set_hint_predicate (&info->array_index, p);
764 }
765 }
766 if (!dst->global.inlined_to)
0bceb671 767 ipa_update_overall_fn_summary (dst);
27d020cf
JH
768}
769
770
771/* Hook that is called by cgraph.c when a node is duplicated. */
772
773void
774ipa_call_summary_t::duplicate (struct cgraph_edge *src,
775 struct cgraph_edge *dst,
776 struct ipa_call_summary *srcinfo,
777 struct ipa_call_summary *info)
778{
779 *info = *srcinfo;
780 info->predicate = NULL;
781 edge_set_predicate (dst, srcinfo->predicate);
782 info->param = srcinfo->param.copy ();
783 if (!dst->indirect_unknown_callee && src->indirect_unknown_callee)
784 {
785 info->call_stmt_size -= (eni_size_weights.indirect_call_cost
786 - eni_size_weights.call_cost);
787 info->call_stmt_time -= (eni_time_weights.indirect_call_cost
788 - eni_time_weights.call_cost);
789 }
790}
791
792
793/* Keep edge cache consistent across edge removal. */
794
795void
796ipa_call_summary_t::remove (struct cgraph_edge *,
797 struct ipa_call_summary *sum)
798{
799 sum->reset ();
800}
801
802
803/* Dump edge summaries associated to NODE and recursively to all clones.
804 Indent by INDENT. */
805
806static void
807dump_ipa_call_summary (FILE *f, int indent, struct cgraph_node *node,
0bceb671 808 struct ipa_fn_summary *info)
27d020cf
JH
809{
810 struct cgraph_edge *edge;
811 for (edge = node->callees; edge; edge = edge->next_callee)
812 {
813 struct ipa_call_summary *es = ipa_call_summaries->get (edge);
814 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
815 int i;
816
817 fprintf (f,
818 "%*s%s/%i %s\n%*s loop depth:%2i freq:%4i size:%2i"
819 " time: %2i callee size:%2i stack:%2i",
820 indent, "", callee->name (), callee->order,
821 !edge->inline_failed
822 ? "inlined" : cgraph_inline_failed_string (edge-> inline_failed),
823 indent, "", es->loop_depth, edge->frequency,
824 es->call_stmt_size, es->call_stmt_time,
0bceb671
JH
825 (int) ipa_fn_summaries->get (callee)->size / ipa_fn_summary::size_scale,
826 (int) ipa_fn_summaries->get (callee)->estimated_stack_size);
27d020cf
JH
827
828 if (es->predicate)
829 {
830 fprintf (f, " predicate: ");
831 es->predicate->dump (f, info->conds);
832 }
833 else
834 fprintf (f, "\n");
835 if (es->param.exists ())
836 for (i = 0; i < (int) es->param.length (); i++)
837 {
838 int prob = es->param[i].change_prob;
839
840 if (!prob)
841 fprintf (f, "%*s op%i is compile time invariant\n",
842 indent + 2, "", i);
843 else if (prob != REG_BR_PROB_BASE)
844 fprintf (f, "%*s op%i change %f%% of time\n", indent + 2, "", i,
845 prob * 100.0 / REG_BR_PROB_BASE);
846 }
847 if (!edge->inline_failed)
848 {
849 fprintf (f, "%*sStack frame offset %i, callee self size %i,"
850 " callee size %i\n",
851 indent + 2, "",
0bceb671
JH
852 (int) ipa_fn_summaries->get (callee)->stack_frame_offset,
853 (int) ipa_fn_summaries->get (callee)->estimated_self_stack_size,
854 (int) ipa_fn_summaries->get (callee)->estimated_stack_size);
27d020cf
JH
855 dump_ipa_call_summary (f, indent + 2, callee, info);
856 }
857 }
858 for (edge = node->indirect_calls; edge; edge = edge->next_callee)
859 {
860 struct ipa_call_summary *es = ipa_call_summaries->get (edge);
861 fprintf (f, "%*sindirect call loop depth:%2i freq:%4i size:%2i"
862 " time: %2i",
863 indent, "",
864 es->loop_depth,
865 edge->frequency, es->call_stmt_size, es->call_stmt_time);
866 if (es->predicate)
867 {
868 fprintf (f, "predicate: ");
869 es->predicate->dump (f, info->conds);
870 }
871 else
872 fprintf (f, "\n");
873 }
874}
875
876
877void
0bceb671 878ipa_dump_fn_summary (FILE *f, struct cgraph_node *node)
27d020cf
JH
879{
880 if (node->definition)
881 {
0bceb671 882 struct ipa_fn_summary *s = ipa_fn_summaries->get (node);
27d020cf
JH
883 size_time_entry *e;
884 int i;
0bceb671 885 fprintf (f, "IPA function summary for %s/%i", node->name (),
27d020cf
JH
886 node->order);
887 if (DECL_DISREGARD_INLINE_LIMITS (node->decl))
888 fprintf (f, " always_inline");
889 if (s->inlinable)
890 fprintf (f, " inlinable");
891 if (s->contains_cilk_spawn)
892 fprintf (f, " contains_cilk_spawn");
893 if (s->fp_expressions)
894 fprintf (f, " fp_expression");
895 fprintf (f, "\n global time: %f\n", s->time.to_double ());
896 fprintf (f, " self size: %i\n", s->self_size);
897 fprintf (f, " global size: %i\n", s->size);
898 fprintf (f, " min size: %i\n", s->min_size);
899 fprintf (f, " self stack: %i\n",
900 (int) s->estimated_self_stack_size);
901 fprintf (f, " global stack: %i\n", (int) s->estimated_stack_size);
902 if (s->growth)
903 fprintf (f, " estimated growth:%i\n", (int) s->growth);
904 if (s->scc_no)
905 fprintf (f, " In SCC: %i\n", (int) s->scc_no);
906 for (i = 0; vec_safe_iterate (s->size_time_table, i, &e); i++)
907 {
908 fprintf (f, " size:%f, time:%f",
0bceb671 909 (double) e->size / ipa_fn_summary::size_scale,
27d020cf
JH
910 e->time.to_double ());
911 if (e->exec_predicate != true)
912 {
913 fprintf (f, ", executed if:");
914 e->exec_predicate.dump (f, s->conds, 0);
915 }
916 if (e->exec_predicate != e->nonconst_predicate)
917 {
918 fprintf (f, ", nonconst if:");
919 e->nonconst_predicate.dump (f, s->conds, 0);
920 }
921 fprintf (f, "\n");
922 }
923 if (s->loop_iterations)
924 {
925 fprintf (f, " loop iterations:");
926 s->loop_iterations->dump (f, s->conds);
927 }
928 if (s->loop_stride)
929 {
930 fprintf (f, " loop stride:");
931 s->loop_stride->dump (f, s->conds);
932 }
933 if (s->array_index)
934 {
935 fprintf (f, " array index:");
936 s->array_index->dump (f, s->conds);
937 }
938 fprintf (f, " calls:\n");
939 dump_ipa_call_summary (f, 4, node, s);
940 fprintf (f, "\n");
941 }
942}
943
944DEBUG_FUNCTION void
0bceb671 945ipa_debug_fn_summary (struct cgraph_node *node)
27d020cf 946{
0bceb671 947 ipa_dump_fn_summary (stderr, node);
27d020cf
JH
948}
949
950void
0bceb671 951ipa_dump_fn_summaries (FILE *f)
27d020cf
JH
952{
953 struct cgraph_node *node;
954
955 FOR_EACH_DEFINED_FUNCTION (node)
956 if (!node->global.inlined_to)
0bceb671 957 ipa_dump_fn_summary (f, node);
27d020cf
JH
958}
959
960/* Callback of walk_aliased_vdefs. Flags that it has been invoked to the
961 boolean variable pointed to by DATA. */
962
963static bool
964mark_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef ATTRIBUTE_UNUSED,
965 void *data)
966{
967 bool *b = (bool *) data;
968 *b = true;
969 return true;
970}
971
972/* If OP refers to value of function parameter, return the corresponding
973 parameter. If non-NULL, the size of the memory load (or the SSA_NAME of the
974 PARM_DECL) will be stored to *SIZE_P in that case too. */
975
976static tree
977unmodified_parm_1 (gimple *stmt, tree op, HOST_WIDE_INT *size_p)
978{
979 /* SSA_NAME referring to parm default def? */
980 if (TREE_CODE (op) == SSA_NAME
981 && SSA_NAME_IS_DEFAULT_DEF (op)
982 && TREE_CODE (SSA_NAME_VAR (op)) == PARM_DECL)
983 {
984 if (size_p)
985 *size_p = tree_to_shwi (TYPE_SIZE (TREE_TYPE (op)));
986 return SSA_NAME_VAR (op);
987 }
988 /* Non-SSA parm reference? */
989 if (TREE_CODE (op) == PARM_DECL)
990 {
991 bool modified = false;
992
993 ao_ref refd;
994 ao_ref_init (&refd, op);
995 walk_aliased_vdefs (&refd, gimple_vuse (stmt), mark_modified, &modified,
996 NULL);
997 if (!modified)
998 {
999 if (size_p)
1000 *size_p = tree_to_shwi (TYPE_SIZE (TREE_TYPE (op)));
1001 return op;
1002 }
1003 }
1004 return NULL_TREE;
1005}
1006
1007/* If OP refers to value of function parameter, return the corresponding
1008 parameter. Also traverse chains of SSA register assignments. If non-NULL,
1009 the size of the memory load (or the SSA_NAME of the PARM_DECL) will be
1010 stored to *SIZE_P in that case too. */
1011
1012static tree
1013unmodified_parm (gimple *stmt, tree op, HOST_WIDE_INT *size_p)
1014{
1015 tree res = unmodified_parm_1 (stmt, op, size_p);
1016 if (res)
1017 return res;
1018
1019 if (TREE_CODE (op) == SSA_NAME
1020 && !SSA_NAME_IS_DEFAULT_DEF (op)
1021 && gimple_assign_single_p (SSA_NAME_DEF_STMT (op)))
1022 return unmodified_parm (SSA_NAME_DEF_STMT (op),
1023 gimple_assign_rhs1 (SSA_NAME_DEF_STMT (op)),
1024 size_p);
1025 return NULL_TREE;
1026}
1027
1028/* If OP refers to a value of a function parameter or value loaded from an
1029 aggregate passed to a parameter (either by value or reference), return TRUE
1030 and store the number of the parameter to *INDEX_P, the access size into
1031 *SIZE_P, and information whether and how it has been loaded from an
1032 aggregate into *AGGPOS. INFO describes the function parameters, STMT is the
1033 statement in which OP is used or loaded. */
1034
1035static bool
1036unmodified_parm_or_parm_agg_item (struct ipa_func_body_info *fbi,
1037 gimple *stmt, tree op, int *index_p,
1038 HOST_WIDE_INT *size_p,
1039 struct agg_position_info *aggpos)
1040{
1041 tree res = unmodified_parm_1 (stmt, op, size_p);
1042
1043 gcc_checking_assert (aggpos);
1044 if (res)
1045 {
1046 *index_p = ipa_get_param_decl_index (fbi->info, res);
1047 if (*index_p < 0)
1048 return false;
1049 aggpos->agg_contents = false;
1050 aggpos->by_ref = false;
1051 return true;
1052 }
1053
1054 if (TREE_CODE (op) == SSA_NAME)
1055 {
1056 if (SSA_NAME_IS_DEFAULT_DEF (op)
1057 || !gimple_assign_single_p (SSA_NAME_DEF_STMT (op)))
1058 return false;
1059 stmt = SSA_NAME_DEF_STMT (op);
1060 op = gimple_assign_rhs1 (stmt);
1061 if (!REFERENCE_CLASS_P (op))
1062 return unmodified_parm_or_parm_agg_item (fbi, stmt, op, index_p, size_p,
1063 aggpos);
1064 }
1065
1066 aggpos->agg_contents = true;
1067 return ipa_load_from_parm_agg (fbi, fbi->info->descriptors,
1068 stmt, op, index_p, &aggpos->offset,
1069 size_p, &aggpos->by_ref);
1070}
1071
1072/* See if statement might disappear after inlining.
1073 0 - means not eliminated
1074 1 - half of statements goes away
1075 2 - for sure it is eliminated.
1076 We are not terribly sophisticated, basically looking for simple abstraction
1077 penalty wrappers. */
1078
1079static int
1080eliminated_by_inlining_prob (gimple *stmt)
1081{
1082 enum gimple_code code = gimple_code (stmt);
1083 enum tree_code rhs_code;
1084
1085 if (!optimize)
1086 return 0;
1087
1088 switch (code)
1089 {
1090 case GIMPLE_RETURN:
1091 return 2;
1092 case GIMPLE_ASSIGN:
1093 if (gimple_num_ops (stmt) != 2)
1094 return 0;
1095
1096 rhs_code = gimple_assign_rhs_code (stmt);
1097
1098 /* Casts of parameters, loads from parameters passed by reference
1099 and stores to return value or parameters are often free after
1100 inlining dua to SRA and further combining.
1101 Assume that half of statements goes away. */
1102 if (CONVERT_EXPR_CODE_P (rhs_code)
1103 || rhs_code == VIEW_CONVERT_EXPR
1104 || rhs_code == ADDR_EXPR
1105 || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
1106 {
1107 tree rhs = gimple_assign_rhs1 (stmt);
1108 tree lhs = gimple_assign_lhs (stmt);
1109 tree inner_rhs = get_base_address (rhs);
1110 tree inner_lhs = get_base_address (lhs);
1111 bool rhs_free = false;
1112 bool lhs_free = false;
1113
1114 if (!inner_rhs)
1115 inner_rhs = rhs;
1116 if (!inner_lhs)
1117 inner_lhs = lhs;
1118
1119 /* Reads of parameter are expected to be free. */
1120 if (unmodified_parm (stmt, inner_rhs, NULL))
1121 rhs_free = true;
1122 /* Match expressions of form &this->field. Those will most likely
1123 combine with something upstream after inlining. */
1124 else if (TREE_CODE (inner_rhs) == ADDR_EXPR)
1125 {
1126 tree op = get_base_address (TREE_OPERAND (inner_rhs, 0));
1127 if (TREE_CODE (op) == PARM_DECL)
1128 rhs_free = true;
1129 else if (TREE_CODE (op) == MEM_REF
1130 && unmodified_parm (stmt, TREE_OPERAND (op, 0), NULL))
1131 rhs_free = true;
1132 }
1133
1134 /* When parameter is not SSA register because its address is taken
1135 and it is just copied into one, the statement will be completely
1136 free after inlining (we will copy propagate backward). */
1137 if (rhs_free && is_gimple_reg (lhs))
1138 return 2;
1139
1140 /* Reads of parameters passed by reference
1141 expected to be free (i.e. optimized out after inlining). */
1142 if (TREE_CODE (inner_rhs) == MEM_REF
1143 && unmodified_parm (stmt, TREE_OPERAND (inner_rhs, 0), NULL))
1144 rhs_free = true;
1145
1146 /* Copying parameter passed by reference into gimple register is
1147 probably also going to copy propagate, but we can't be quite
1148 sure. */
1149 if (rhs_free && is_gimple_reg (lhs))
1150 lhs_free = true;
1151
1152 /* Writes to parameters, parameters passed by value and return value
1153 (either dirrectly or passed via invisible reference) are free.
1154
1155 TODO: We ought to handle testcase like
1156 struct a {int a,b;};
1157 struct a
1158 retrurnsturct (void)
1159 {
1160 struct a a ={1,2};
1161 return a;
1162 }
1163
1164 This translate into:
1165
1166 retrurnsturct ()
1167 {
1168 int a$b;
1169 int a$a;
1170 struct a a;
1171 struct a D.2739;
1172
1173 <bb 2>:
1174 D.2739.a = 1;
1175 D.2739.b = 2;
1176 return D.2739;
1177
1178 }
1179 For that we either need to copy ipa-split logic detecting writes
1180 to return value. */
1181 if (TREE_CODE (inner_lhs) == PARM_DECL
1182 || TREE_CODE (inner_lhs) == RESULT_DECL
1183 || (TREE_CODE (inner_lhs) == MEM_REF
1184 && (unmodified_parm (stmt, TREE_OPERAND (inner_lhs, 0), NULL)
1185 || (TREE_CODE (TREE_OPERAND (inner_lhs, 0)) == SSA_NAME
1186 && SSA_NAME_VAR (TREE_OPERAND (inner_lhs, 0))
1187 && TREE_CODE (SSA_NAME_VAR (TREE_OPERAND
1188 (inner_lhs,
1189 0))) == RESULT_DECL))))
1190 lhs_free = true;
1191 if (lhs_free
1192 && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
1193 rhs_free = true;
1194 if (lhs_free && rhs_free)
1195 return 1;
1196 }
1197 return 0;
1198 default:
1199 return 0;
1200 }
1201}
1202
1203
1204/* If BB ends by a conditional we can turn into predicates, attach corresponding
1205 predicates to the CFG edges. */
1206
1207static void
1208set_cond_stmt_execution_predicate (struct ipa_func_body_info *fbi,
0bceb671 1209 struct ipa_fn_summary *summary,
27d020cf
JH
1210 basic_block bb)
1211{
1212 gimple *last;
1213 tree op;
1214 int index;
1215 HOST_WIDE_INT size;
1216 struct agg_position_info aggpos;
1217 enum tree_code code, inverted_code;
1218 edge e;
1219 edge_iterator ei;
1220 gimple *set_stmt;
1221 tree op2;
1222
1223 last = last_stmt (bb);
1224 if (!last || gimple_code (last) != GIMPLE_COND)
1225 return;
1226 if (!is_gimple_ip_invariant (gimple_cond_rhs (last)))
1227 return;
1228 op = gimple_cond_lhs (last);
1229 /* TODO: handle conditionals like
1230 var = op0 < 4;
1231 if (var != 0). */
1232 if (unmodified_parm_or_parm_agg_item (fbi, last, op, &index, &size, &aggpos))
1233 {
1234 code = gimple_cond_code (last);
1235 inverted_code = invert_tree_comparison (code, HONOR_NANS (op));
1236
1237 FOR_EACH_EDGE (e, ei, bb->succs)
1238 {
1239 enum tree_code this_code = (e->flags & EDGE_TRUE_VALUE
1240 ? code : inverted_code);
1241 /* invert_tree_comparison will return ERROR_MARK on FP
1242 comparsions that are not EQ/NE instead of returning proper
1243 unordered one. Be sure it is not confused with NON_CONSTANT. */
1244 if (this_code != ERROR_MARK)
1245 {
1246 predicate p
1247 = add_condition (summary, index, size, &aggpos, this_code,
1248 unshare_expr_without_location
1249 (gimple_cond_rhs (last)));
1250 e->aux = edge_predicate_pool.allocate ();
1251 *(predicate *) e->aux = p;
1252 }
1253 }
1254 }
1255
1256 if (TREE_CODE (op) != SSA_NAME)
1257 return;
1258 /* Special case
1259 if (builtin_constant_p (op))
1260 constant_code
1261 else
1262 nonconstant_code.
1263 Here we can predicate nonconstant_code. We can't
1264 really handle constant_code since we have no predicate
1265 for this and also the constant code is not known to be
1266 optimized away when inliner doen't see operand is constant.
1267 Other optimizers might think otherwise. */
1268 if (gimple_cond_code (last) != NE_EXPR
1269 || !integer_zerop (gimple_cond_rhs (last)))
1270 return;
1271 set_stmt = SSA_NAME_DEF_STMT (op);
1272 if (!gimple_call_builtin_p (set_stmt, BUILT_IN_CONSTANT_P)
1273 || gimple_call_num_args (set_stmt) != 1)
1274 return;
1275 op2 = gimple_call_arg (set_stmt, 0);
1276 if (!unmodified_parm_or_parm_agg_item (fbi, set_stmt, op2, &index, &size,
1277 &aggpos))
1278 return;
1279 FOR_EACH_EDGE (e, ei, bb->succs) if (e->flags & EDGE_FALSE_VALUE)
1280 {
1281 predicate p = add_condition (summary, index, size, &aggpos,
1282 predicate::is_not_constant, NULL_TREE);
1283 e->aux = edge_predicate_pool.allocate ();
1284 *(predicate *) e->aux = p;
1285 }
1286}
1287
1288
1289/* If BB ends by a switch we can turn into predicates, attach corresponding
1290 predicates to the CFG edges. */
1291
1292static void
1293set_switch_stmt_execution_predicate (struct ipa_func_body_info *fbi,
0bceb671 1294 struct ipa_fn_summary *summary,
27d020cf
JH
1295 basic_block bb)
1296{
1297 gimple *lastg;
1298 tree op;
1299 int index;
1300 HOST_WIDE_INT size;
1301 struct agg_position_info aggpos;
1302 edge e;
1303 edge_iterator ei;
1304 size_t n;
1305 size_t case_idx;
1306
1307 lastg = last_stmt (bb);
1308 if (!lastg || gimple_code (lastg) != GIMPLE_SWITCH)
1309 return;
1310 gswitch *last = as_a <gswitch *> (lastg);
1311 op = gimple_switch_index (last);
1312 if (!unmodified_parm_or_parm_agg_item (fbi, last, op, &index, &size, &aggpos))
1313 return;
1314
1315 FOR_EACH_EDGE (e, ei, bb->succs)
1316 {
1317 e->aux = edge_predicate_pool.allocate ();
1318 *(predicate *) e->aux = false;
1319 }
1320 n = gimple_switch_num_labels (last);
1321 for (case_idx = 0; case_idx < n; ++case_idx)
1322 {
1323 tree cl = gimple_switch_label (last, case_idx);
1324 tree min, max;
1325 predicate p;
1326
1327 e = find_edge (bb, label_to_block (CASE_LABEL (cl)));
1328 min = CASE_LOW (cl);
1329 max = CASE_HIGH (cl);
1330
1331 /* For default we might want to construct predicate that none
1332 of cases is met, but it is bit hard to do not having negations
1333 of conditionals handy. */
1334 if (!min && !max)
1335 p = true;
1336 else if (!max)
1337 p = add_condition (summary, index, size, &aggpos, EQ_EXPR,
1338 unshare_expr_without_location (min));
1339 else
1340 {
1341 predicate p1, p2;
1342 p1 = add_condition (summary, index, size, &aggpos, GE_EXPR,
1343 unshare_expr_without_location (min));
1344 p2 = add_condition (summary, index, size, &aggpos, LE_EXPR,
1345 unshare_expr_without_location (max));
1346 p = p1 & p2;
1347 }
1348 *(struct predicate *) e->aux
1349 = p.or_with (summary->conds, *(struct predicate *) e->aux);
1350 }
1351}
1352
1353
1354/* For each BB in NODE attach to its AUX pointer predicate under
1355 which it is executable. */
1356
1357static void
1358compute_bb_predicates (struct ipa_func_body_info *fbi,
1359 struct cgraph_node *node,
0bceb671 1360 struct ipa_fn_summary *summary)
27d020cf
JH
1361{
1362 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
1363 bool done = false;
1364 basic_block bb;
1365
1366 FOR_EACH_BB_FN (bb, my_function)
1367 {
1368 set_cond_stmt_execution_predicate (fbi, summary, bb);
1369 set_switch_stmt_execution_predicate (fbi, summary, bb);
1370 }
1371
1372 /* Entry block is always executable. */
1373 ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux
1374 = edge_predicate_pool.allocate ();
1375 *(predicate *) ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux = true;
1376
1377 /* A simple dataflow propagation of predicates forward in the CFG.
1378 TODO: work in reverse postorder. */
1379 while (!done)
1380 {
1381 done = true;
1382 FOR_EACH_BB_FN (bb, my_function)
1383 {
1384 predicate p = false;
1385 edge e;
1386 edge_iterator ei;
1387 FOR_EACH_EDGE (e, ei, bb->preds)
1388 {
1389 if (e->src->aux)
1390 {
1391 predicate this_bb_predicate
1392 = *(predicate *) e->src->aux;
1393 if (e->aux)
1394 this_bb_predicate &= (*(struct predicate *) e->aux);
1395 p = p.or_with (summary->conds, this_bb_predicate);
1396 if (p == true)
1397 break;
1398 }
1399 }
1400 if (p == false)
1401 gcc_checking_assert (!bb->aux);
1402 else
1403 {
1404 if (!bb->aux)
1405 {
1406 done = false;
1407 bb->aux = edge_predicate_pool.allocate ();
1408 *((predicate *) bb->aux) = p;
1409 }
1410 else if (p != *(predicate *) bb->aux)
1411 {
1412 /* This OR operation is needed to ensure monotonous data flow
1413 in the case we hit the limit on number of clauses and the
1414 and/or operations above give approximate answers. */
1415 p = p.or_with (summary->conds, *(predicate *)bb->aux);
1416 if (p != *(predicate *) bb->aux)
1417 {
1418 done = false;
1419 *((predicate *) bb->aux) = p;
1420 }
1421 }
1422 }
1423 }
1424 }
1425}
1426
1427
1428/* Return predicate specifying when the STMT might have result that is not
1429 a compile time constant. */
1430
1431static predicate
1432will_be_nonconstant_expr_predicate (struct ipa_node_params *info,
0bceb671 1433 struct ipa_fn_summary *summary,
27d020cf
JH
1434 tree expr,
1435 vec<predicate> nonconstant_names)
1436{
1437 tree parm;
1438 int index;
1439 HOST_WIDE_INT size;
1440
1441 while (UNARY_CLASS_P (expr))
1442 expr = TREE_OPERAND (expr, 0);
1443
1444 parm = unmodified_parm (NULL, expr, &size);
1445 if (parm && (index = ipa_get_param_decl_index (info, parm)) >= 0)
1446 return add_condition (summary, index, size, NULL, predicate::changed,
1447 NULL_TREE);
1448 if (is_gimple_min_invariant (expr))
1449 return false;
1450 if (TREE_CODE (expr) == SSA_NAME)
1451 return nonconstant_names[SSA_NAME_VERSION (expr)];
1452 if (BINARY_CLASS_P (expr) || COMPARISON_CLASS_P (expr))
1453 {
1454 predicate p1 = will_be_nonconstant_expr_predicate
1455 (info, summary, TREE_OPERAND (expr, 0),
1456 nonconstant_names);
1457 if (p1 == true)
1458 return p1;
1459
1460 predicate p2;
1461 p2 = will_be_nonconstant_expr_predicate (info, summary,
1462 TREE_OPERAND (expr, 1),
1463 nonconstant_names);
1464 return p1.or_with (summary->conds, p2);
1465 }
1466 else if (TREE_CODE (expr) == COND_EXPR)
1467 {
1468 predicate p1 = will_be_nonconstant_expr_predicate
1469 (info, summary, TREE_OPERAND (expr, 0),
1470 nonconstant_names);
1471 if (p1 == true)
1472 return p1;
1473
1474 predicate p2;
1475 p2 = will_be_nonconstant_expr_predicate (info, summary,
1476 TREE_OPERAND (expr, 1),
1477 nonconstant_names);
1478 if (p2 == true)
1479 return p2;
1480 p1 = p1.or_with (summary->conds, p2);
1481 p2 = will_be_nonconstant_expr_predicate (info, summary,
1482 TREE_OPERAND (expr, 2),
1483 nonconstant_names);
1484 return p2.or_with (summary->conds, p1);
1485 }
1486 else
1487 {
1488 debug_tree (expr);
1489 gcc_unreachable ();
1490 }
1491 return false;
1492}
1493
1494
1495/* Return predicate specifying when the STMT might have result that is not
1496 a compile time constant. */
1497
1498static predicate
1499will_be_nonconstant_predicate (struct ipa_func_body_info *fbi,
0bceb671 1500 struct ipa_fn_summary *summary,
27d020cf
JH
1501 gimple *stmt,
1502 vec<predicate> nonconstant_names)
1503{
1504 predicate p = true;
1505 ssa_op_iter iter;
1506 tree use;
1507 predicate op_non_const;
1508 bool is_load;
1509 int base_index;
1510 HOST_WIDE_INT size;
1511 struct agg_position_info aggpos;
1512
1513 /* What statments might be optimized away
1514 when their arguments are constant. */
1515 if (gimple_code (stmt) != GIMPLE_ASSIGN
1516 && gimple_code (stmt) != GIMPLE_COND
1517 && gimple_code (stmt) != GIMPLE_SWITCH
1518 && (gimple_code (stmt) != GIMPLE_CALL
1519 || !(gimple_call_flags (stmt) & ECF_CONST)))
1520 return p;
1521
1522 /* Stores will stay anyway. */
1523 if (gimple_store_p (stmt))
1524 return p;
1525
1526 is_load = gimple_assign_load_p (stmt);
1527
1528 /* Loads can be optimized when the value is known. */
1529 if (is_load)
1530 {
1531 tree op;
1532 gcc_assert (gimple_assign_single_p (stmt));
1533 op = gimple_assign_rhs1 (stmt);
1534 if (!unmodified_parm_or_parm_agg_item (fbi, stmt, op, &base_index, &size,
1535 &aggpos))
1536 return p;
1537 }
1538 else
1539 base_index = -1;
1540
1541 /* See if we understand all operands before we start
1542 adding conditionals. */
1543 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
1544 {
1545 tree parm = unmodified_parm (stmt, use, NULL);
1546 /* For arguments we can build a condition. */
1547 if (parm && ipa_get_param_decl_index (fbi->info, parm) >= 0)
1548 continue;
1549 if (TREE_CODE (use) != SSA_NAME)
1550 return p;
1551 /* If we know when operand is constant,
1552 we still can say something useful. */
1553 if (nonconstant_names[SSA_NAME_VERSION (use)] != true)
1554 continue;
1555 return p;
1556 }
1557
1558 if (is_load)
1559 op_non_const =
1560 add_condition (summary, base_index, size, &aggpos, predicate::changed,
1561 NULL);
1562 else
1563 op_non_const = false;
1564 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
1565 {
1566 HOST_WIDE_INT size;
1567 tree parm = unmodified_parm (stmt, use, &size);
1568 int index;
1569
1570 if (parm && (index = ipa_get_param_decl_index (fbi->info, parm)) >= 0)
1571 {
1572 if (index != base_index)
1573 p = add_condition (summary, index, size, NULL, predicate::changed,
1574 NULL_TREE);
1575 else
1576 continue;
1577 }
1578 else
1579 p = nonconstant_names[SSA_NAME_VERSION (use)];
1580 op_non_const = p.or_with (summary->conds, op_non_const);
1581 }
1582 if ((gimple_code (stmt) == GIMPLE_ASSIGN || gimple_code (stmt) == GIMPLE_CALL)
1583 && gimple_op (stmt, 0)
1584 && TREE_CODE (gimple_op (stmt, 0)) == SSA_NAME)
1585 nonconstant_names[SSA_NAME_VERSION (gimple_op (stmt, 0))]
1586 = op_non_const;
1587 return op_non_const;
1588}
1589
1590struct record_modified_bb_info
1591{
1592 bitmap bb_set;
1593 gimple *stmt;
1594};
1595
1596/* Value is initialized in INIT_BB and used in USE_BB. We want to copute
1597 probability how often it changes between USE_BB.
1598 INIT_BB->frequency/USE_BB->frequency is an estimate, but if INIT_BB
1599 is in different loop nest, we can do better.
1600 This is all just estimate. In theory we look for minimal cut separating
1601 INIT_BB and USE_BB, but we only want to anticipate loop invariant motion
1602 anyway. */
1603
1604static basic_block
1605get_minimal_bb (basic_block init_bb, basic_block use_bb)
1606{
1607 struct loop *l = find_common_loop (init_bb->loop_father, use_bb->loop_father);
1608 if (l && l->header->frequency < init_bb->frequency)
1609 return l->header;
1610 return init_bb;
1611}
1612
1613/* Callback of walk_aliased_vdefs. Records basic blocks where the value may be
1614 set except for info->stmt. */
1615
1616static bool
1617record_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef, void *data)
1618{
1619 struct record_modified_bb_info *info =
1620 (struct record_modified_bb_info *) data;
1621 if (SSA_NAME_DEF_STMT (vdef) == info->stmt)
1622 return false;
1623 bitmap_set_bit (info->bb_set,
1624 SSA_NAME_IS_DEFAULT_DEF (vdef)
1625 ? ENTRY_BLOCK_PTR_FOR_FN (cfun)->index
1626 : get_minimal_bb
1627 (gimple_bb (SSA_NAME_DEF_STMT (vdef)),
1628 gimple_bb (info->stmt))->index);
1629 return false;
1630}
1631
1632/* Return probability (based on REG_BR_PROB_BASE) that I-th parameter of STMT
1633 will change since last invocation of STMT.
1634
1635 Value 0 is reserved for compile time invariants.
1636 For common parameters it is REG_BR_PROB_BASE. For loop invariants it
1637 ought to be REG_BR_PROB_BASE / estimated_iters. */
1638
1639static int
1640param_change_prob (gimple *stmt, int i)
1641{
1642 tree op = gimple_call_arg (stmt, i);
1643 basic_block bb = gimple_bb (stmt);
1644
1645 if (TREE_CODE (op) == WITH_SIZE_EXPR)
1646 op = TREE_OPERAND (op, 0);
1647
1648 tree base = get_base_address (op);
1649
1650 /* Global invariants never change. */
1651 if (is_gimple_min_invariant (base))
1652 return 0;
1653
1654 /* We would have to do non-trivial analysis to really work out what
1655 is the probability of value to change (i.e. when init statement
1656 is in a sibling loop of the call).
1657
1658 We do an conservative estimate: when call is executed N times more often
1659 than the statement defining value, we take the frequency 1/N. */
1660 if (TREE_CODE (base) == SSA_NAME)
1661 {
1662 int init_freq;
1663
1664 if (!bb->frequency)
1665 return REG_BR_PROB_BASE;
1666
1667 if (SSA_NAME_IS_DEFAULT_DEF (base))
1668 init_freq = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
1669 else
1670 init_freq = get_minimal_bb
1671 (gimple_bb (SSA_NAME_DEF_STMT (base)),
1672 gimple_bb (stmt))->frequency;
1673
1674 if (!init_freq)
1675 init_freq = 1;
1676 if (init_freq < bb->frequency)
1677 return MAX (GCOV_COMPUTE_SCALE (init_freq, bb->frequency), 1);
1678 else
1679 return REG_BR_PROB_BASE;
1680 }
1681 else
1682 {
1683 ao_ref refd;
1684 int max;
1685 struct record_modified_bb_info info;
1686 bitmap_iterator bi;
1687 unsigned index;
1688 tree init = ctor_for_folding (base);
1689
1690 if (init != error_mark_node)
1691 return 0;
1692 if (!bb->frequency)
1693 return REG_BR_PROB_BASE;
1694 ao_ref_init (&refd, op);
1695 info.stmt = stmt;
1696 info.bb_set = BITMAP_ALLOC (NULL);
1697 walk_aliased_vdefs (&refd, gimple_vuse (stmt), record_modified, &info,
1698 NULL);
1699 if (bitmap_bit_p (info.bb_set, bb->index))
1700 {
1701 BITMAP_FREE (info.bb_set);
1702 return REG_BR_PROB_BASE;
1703 }
1704
1705 /* Assume that every memory is initialized at entry.
1706 TODO: Can we easilly determine if value is always defined
1707 and thus we may skip entry block? */
1708 if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency)
1709 max = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
1710 else
1711 max = 1;
1712
1713 EXECUTE_IF_SET_IN_BITMAP (info.bb_set, 0, index, bi)
1714 max = MIN (max, BASIC_BLOCK_FOR_FN (cfun, index)->frequency);
1715
1716 BITMAP_FREE (info.bb_set);
1717 if (max < bb->frequency)
1718 return MAX (GCOV_COMPUTE_SCALE (max, bb->frequency), 1);
1719 else
1720 return REG_BR_PROB_BASE;
1721 }
1722}
1723
1724/* Find whether a basic block BB is the final block of a (half) diamond CFG
1725 sub-graph and if the predicate the condition depends on is known. If so,
1726 return true and store the pointer the predicate in *P. */
1727
1728static bool
1729phi_result_unknown_predicate (struct ipa_node_params *info,
0bceb671 1730 ipa_fn_summary *summary, basic_block bb,
27d020cf
JH
1731 predicate *p,
1732 vec<predicate> nonconstant_names)
1733{
1734 edge e;
1735 edge_iterator ei;
1736 basic_block first_bb = NULL;
1737 gimple *stmt;
1738
1739 if (single_pred_p (bb))
1740 {
1741 *p = false;
1742 return true;
1743 }
1744
1745 FOR_EACH_EDGE (e, ei, bb->preds)
1746 {
1747 if (single_succ_p (e->src))
1748 {
1749 if (!single_pred_p (e->src))
1750 return false;
1751 if (!first_bb)
1752 first_bb = single_pred (e->src);
1753 else if (single_pred (e->src) != first_bb)
1754 return false;
1755 }
1756 else
1757 {
1758 if (!first_bb)
1759 first_bb = e->src;
1760 else if (e->src != first_bb)
1761 return false;
1762 }
1763 }
1764
1765 if (!first_bb)
1766 return false;
1767
1768 stmt = last_stmt (first_bb);
1769 if (!stmt
1770 || gimple_code (stmt) != GIMPLE_COND
1771 || !is_gimple_ip_invariant (gimple_cond_rhs (stmt)))
1772 return false;
1773
1774 *p = will_be_nonconstant_expr_predicate (info, summary,
1775 gimple_cond_lhs (stmt),
1776 nonconstant_names);
1777 if (*p == true)
1778 return false;
1779 else
1780 return true;
1781}
1782
1783/* Given a PHI statement in a function described by inline properties SUMMARY
1784 and *P being the predicate describing whether the selected PHI argument is
1785 known, store a predicate for the result of the PHI statement into
1786 NONCONSTANT_NAMES, if possible. */
1787
1788static void
0bceb671 1789predicate_for_phi_result (struct ipa_fn_summary *summary, gphi *phi,
27d020cf
JH
1790 predicate *p,
1791 vec<predicate> nonconstant_names)
1792{
1793 unsigned i;
1794
1795 for (i = 0; i < gimple_phi_num_args (phi); i++)
1796 {
1797 tree arg = gimple_phi_arg (phi, i)->def;
1798 if (!is_gimple_min_invariant (arg))
1799 {
1800 gcc_assert (TREE_CODE (arg) == SSA_NAME);
1801 *p = p->or_with (summary->conds,
1802 nonconstant_names[SSA_NAME_VERSION (arg)]);
1803 if (*p == true)
1804 return;
1805 }
1806 }
1807
1808 if (dump_file && (dump_flags & TDF_DETAILS))
1809 {
1810 fprintf (dump_file, "\t\tphi predicate: ");
1811 p->dump (dump_file, summary->conds);
1812 }
1813 nonconstant_names[SSA_NAME_VERSION (gimple_phi_result (phi))] = *p;
1814}
1815
1816/* Return predicate specifying when array index in access OP becomes non-constant. */
1817
1818static predicate
0bceb671 1819array_index_predicate (ipa_fn_summary *info,
27d020cf
JH
1820 vec< predicate> nonconstant_names, tree op)
1821{
1822 predicate p = false;
1823 while (handled_component_p (op))
1824 {
1825 if (TREE_CODE (op) == ARRAY_REF || TREE_CODE (op) == ARRAY_RANGE_REF)
1826 {
1827 if (TREE_CODE (TREE_OPERAND (op, 1)) == SSA_NAME)
1828 p = p.or_with (info->conds,
1829 nonconstant_names[SSA_NAME_VERSION
1830 (TREE_OPERAND (op, 1))]);
1831 }
1832 op = TREE_OPERAND (op, 0);
1833 }
1834 return p;
1835}
1836
1837/* For a typical usage of __builtin_expect (a<b, 1), we
1838 may introduce an extra relation stmt:
1839 With the builtin, we have
1840 t1 = a <= b;
1841 t2 = (long int) t1;
1842 t3 = __builtin_expect (t2, 1);
1843 if (t3 != 0)
1844 goto ...
1845 Without the builtin, we have
1846 if (a<=b)
1847 goto...
1848 This affects the size/time estimation and may have
1849 an impact on the earlier inlining.
1850 Here find this pattern and fix it up later. */
1851
1852static gimple *
1853find_foldable_builtin_expect (basic_block bb)
1854{
1855 gimple_stmt_iterator bsi;
1856
1857 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1858 {
1859 gimple *stmt = gsi_stmt (bsi);
1860 if (gimple_call_builtin_p (stmt, BUILT_IN_EXPECT)
1861 || gimple_call_internal_p (stmt, IFN_BUILTIN_EXPECT))
1862 {
1863 tree var = gimple_call_lhs (stmt);
1864 tree arg = gimple_call_arg (stmt, 0);
1865 use_operand_p use_p;
1866 gimple *use_stmt;
1867 bool match = false;
1868 bool done = false;
1869
1870 if (!var || !arg)
1871 continue;
1872 gcc_assert (TREE_CODE (var) == SSA_NAME);
1873
1874 while (TREE_CODE (arg) == SSA_NAME)
1875 {
1876 gimple *stmt_tmp = SSA_NAME_DEF_STMT (arg);
1877 if (!is_gimple_assign (stmt_tmp))
1878 break;
1879 switch (gimple_assign_rhs_code (stmt_tmp))
1880 {
1881 case LT_EXPR:
1882 case LE_EXPR:
1883 case GT_EXPR:
1884 case GE_EXPR:
1885 case EQ_EXPR:
1886 case NE_EXPR:
1887 match = true;
1888 done = true;
1889 break;
1890 CASE_CONVERT:
1891 break;
1892 default:
1893 done = true;
1894 break;
1895 }
1896 if (done)
1897 break;
1898 arg = gimple_assign_rhs1 (stmt_tmp);
1899 }
1900
1901 if (match && single_imm_use (var, &use_p, &use_stmt)
1902 && gimple_code (use_stmt) == GIMPLE_COND)
1903 return use_stmt;
1904 }
1905 }
1906 return NULL;
1907}
1908
1909/* Return true when the basic blocks contains only clobbers followed by RESX.
1910 Such BBs are kept around to make removal of dead stores possible with
1911 presence of EH and will be optimized out by optimize_clobbers later in the
1912 game.
1913
1914 NEED_EH is used to recurse in case the clobber has non-EH predecestors
1915 that can be clobber only, too.. When it is false, the RESX is not necessary
1916 on the end of basic block. */
1917
1918static bool
1919clobber_only_eh_bb_p (basic_block bb, bool need_eh = true)
1920{
1921 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1922 edge_iterator ei;
1923 edge e;
1924
1925 if (need_eh)
1926 {
1927 if (gsi_end_p (gsi))
1928 return false;
1929 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_RESX)
1930 return false;
1931 gsi_prev (&gsi);
1932 }
1933 else if (!single_succ_p (bb))
1934 return false;
1935
1936 for (; !gsi_end_p (gsi); gsi_prev (&gsi))
1937 {
1938 gimple *stmt = gsi_stmt (gsi);
1939 if (is_gimple_debug (stmt))
1940 continue;
1941 if (gimple_clobber_p (stmt))
1942 continue;
1943 if (gimple_code (stmt) == GIMPLE_LABEL)
1944 break;
1945 return false;
1946 }
1947
1948 /* See if all predecestors are either throws or clobber only BBs. */
1949 FOR_EACH_EDGE (e, ei, bb->preds)
1950 if (!(e->flags & EDGE_EH)
1951 && !clobber_only_eh_bb_p (e->src, false))
1952 return false;
1953
1954 return true;
1955}
1956
1957/* Return true if STMT compute a floating point expression that may be affected
1958 by -ffast-math and similar flags. */
1959
1960static bool
1961fp_expression_p (gimple *stmt)
1962{
1963 ssa_op_iter i;
1964 tree op;
1965
1966 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF|SSA_OP_USE)
1967 if (FLOAT_TYPE_P (TREE_TYPE (op)))
1968 return true;
1969 return false;
1970}
1971
0bceb671
JH
1972/* Analyze function body for NODE.
1973 EARLY indicates run from early optimization pipeline. */
27d020cf
JH
1974
1975static void
0bceb671 1976analyze_function_body (struct cgraph_node *node, bool early)
27d020cf
JH
1977{
1978 sreal time = 0;
1979 /* Estimate static overhead for function prologue/epilogue and alignment. */
1980 int size = 2;
1981 /* Benefits are scaled by probability of elimination that is in range
1982 <0,2>. */
1983 basic_block bb;
1984 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
1985 int freq;
0bceb671 1986 struct ipa_fn_summary *info = ipa_fn_summaries->get (node);
27d020cf
JH
1987 predicate bb_predicate;
1988 struct ipa_func_body_info fbi;
1989 vec<predicate> nonconstant_names = vNULL;
1990 int nblocks, n;
1991 int *order;
1992 predicate array_index = true;
1993 gimple *fix_builtin_expect_stmt;
1994
1995 gcc_assert (my_function && my_function->cfg);
1996 gcc_assert (cfun == my_function);
1997
1998 memset(&fbi, 0, sizeof(fbi));
1999 info->conds = NULL;
2000 info->size_time_table = NULL;
2001
2002 /* When optimizing and analyzing for IPA inliner, initialize loop optimizer
2003 so we can produce proper inline hints.
2004
2005 When optimizing and analyzing for early inliner, initialize node params
2006 so we can produce correct BB predicates. */
2007
2008 if (opt_for_fn (node->decl, optimize))
2009 {
2010 calculate_dominance_info (CDI_DOMINATORS);
2011 if (!early)
2012 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
2013 else
2014 {
2015 ipa_check_create_node_params ();
2016 ipa_initialize_node_params (node);
2017 }
2018
2019 if (ipa_node_params_sum)
2020 {
2021 fbi.node = node;
2022 fbi.info = IPA_NODE_REF (node);
2023 fbi.bb_infos = vNULL;
2024 fbi.bb_infos.safe_grow_cleared (last_basic_block_for_fn (cfun));
2025 fbi.param_count = count_formal_params(node->decl);
2026 nonconstant_names.safe_grow_cleared
2027 (SSANAMES (my_function)->length ());
2028 }
2029 }
2030
2031 if (dump_file)
2032 fprintf (dump_file, "\nAnalyzing function body size: %s\n",
2033 node->name ());
2034
2035 /* When we run into maximal number of entries, we assign everything to the
2036 constant truth case. Be sure to have it in list. */
2037 bb_predicate = true;
2038 info->account_size_time (0, 0, bb_predicate, bb_predicate);
2039
2040 bb_predicate = predicate::not_inlined ();
0bceb671 2041 info->account_size_time (2 * ipa_fn_summary::size_scale, 0, bb_predicate,
27d020cf
JH
2042 bb_predicate);
2043
2044 if (fbi.info)
2045 compute_bb_predicates (&fbi, node, info);
2046 order = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
2047 nblocks = pre_and_rev_post_order_compute (NULL, order, false);
2048 for (n = 0; n < nblocks; n++)
2049 {
2050 bb = BASIC_BLOCK_FOR_FN (cfun, order[n]);
2051 freq = compute_call_stmt_bb_frequency (node->decl, bb);
2052 if (clobber_only_eh_bb_p (bb))
2053 {
2054 if (dump_file && (dump_flags & TDF_DETAILS))
2055 fprintf (dump_file, "\n Ignoring BB %i;"
2056 " it will be optimized away by cleanup_clobbers\n",
2057 bb->index);
2058 continue;
2059 }
2060
2061 /* TODO: Obviously predicates can be propagated down across CFG. */
2062 if (fbi.info)
2063 {
2064 if (bb->aux)
2065 bb_predicate = *(predicate *) bb->aux;
2066 else
2067 bb_predicate = false;
2068 }
2069 else
2070 bb_predicate = true;
2071
2072 if (dump_file && (dump_flags & TDF_DETAILS))
2073 {
2074 fprintf (dump_file, "\n BB %i predicate:", bb->index);
2075 bb_predicate.dump (dump_file, info->conds);
2076 }
2077
2078 if (fbi.info && nonconstant_names.exists ())
2079 {
2080 predicate phi_predicate;
2081 bool first_phi = true;
2082
2083 for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi);
2084 gsi_next (&bsi))
2085 {
2086 if (first_phi
2087 && !phi_result_unknown_predicate (fbi.info, info, bb,
2088 &phi_predicate,
2089 nonconstant_names))
2090 break;
2091 first_phi = false;
2092 if (dump_file && (dump_flags & TDF_DETAILS))
2093 {
2094 fprintf (dump_file, " ");
2095 print_gimple_stmt (dump_file, gsi_stmt (bsi), 0);
2096 }
2097 predicate_for_phi_result (info, bsi.phi (), &phi_predicate,
2098 nonconstant_names);
2099 }
2100 }
2101
2102 fix_builtin_expect_stmt = find_foldable_builtin_expect (bb);
2103
2104 for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi);
2105 gsi_next (&bsi))
2106 {
2107 gimple *stmt = gsi_stmt (bsi);
2108 int this_size = estimate_num_insns (stmt, &eni_size_weights);
2109 int this_time = estimate_num_insns (stmt, &eni_time_weights);
2110 int prob;
2111 predicate will_be_nonconstant;
2112
2113 /* This relation stmt should be folded after we remove
2114 buildin_expect call. Adjust the cost here. */
2115 if (stmt == fix_builtin_expect_stmt)
2116 {
2117 this_size--;
2118 this_time--;
2119 }
2120
2121 if (dump_file && (dump_flags & TDF_DETAILS))
2122 {
2123 fprintf (dump_file, " ");
2124 print_gimple_stmt (dump_file, stmt, 0);
2125 fprintf (dump_file, "\t\tfreq:%3.2f size:%3i time:%3i\n",
2126 ((double) freq) / CGRAPH_FREQ_BASE, this_size,
2127 this_time);
2128 }
2129
2130 if (gimple_assign_load_p (stmt) && nonconstant_names.exists ())
2131 {
2132 predicate this_array_index;
2133 this_array_index =
2134 array_index_predicate (info, nonconstant_names,
2135 gimple_assign_rhs1 (stmt));
2136 if (this_array_index != false)
2137 array_index &= this_array_index;
2138 }
2139 if (gimple_store_p (stmt) && nonconstant_names.exists ())
2140 {
2141 predicate this_array_index;
2142 this_array_index =
2143 array_index_predicate (info, nonconstant_names,
2144 gimple_get_lhs (stmt));
2145 if (this_array_index != false)
2146 array_index &= this_array_index;
2147 }
2148
2149
2150 if (is_gimple_call (stmt)
2151 && !gimple_call_internal_p (stmt))
2152 {
2153 struct cgraph_edge *edge = node->get_edge (stmt);
2154 struct ipa_call_summary *es = ipa_call_summaries->get (edge);
2155
2156 /* Special case: results of BUILT_IN_CONSTANT_P will be always
2157 resolved as constant. We however don't want to optimize
2158 out the cgraph edges. */
2159 if (nonconstant_names.exists ()
2160 && gimple_call_builtin_p (stmt, BUILT_IN_CONSTANT_P)
2161 && gimple_call_lhs (stmt)
2162 && TREE_CODE (gimple_call_lhs (stmt)) == SSA_NAME)
2163 {
2164 predicate false_p = false;
2165 nonconstant_names[SSA_NAME_VERSION (gimple_call_lhs (stmt))]
2166 = false_p;
2167 }
2168 if (ipa_node_params_sum)
2169 {
2170 int count = gimple_call_num_args (stmt);
2171 int i;
2172
2173 if (count)
2174 es->param.safe_grow_cleared (count);
2175 for (i = 0; i < count; i++)
2176 {
2177 int prob = param_change_prob (stmt, i);
2178 gcc_assert (prob >= 0 && prob <= REG_BR_PROB_BASE);
2179 es->param[i].change_prob = prob;
2180 }
2181 }
2182
2183 es->call_stmt_size = this_size;
2184 es->call_stmt_time = this_time;
2185 es->loop_depth = bb_loop_depth (bb);
2186 edge_set_predicate (edge, &bb_predicate);
2187 }
2188
2189 /* TODO: When conditional jump or swithc is known to be constant, but
2190 we did not translate it into the predicates, we really can account
2191 just maximum of the possible paths. */
2192 if (fbi.info)
2193 will_be_nonconstant
2194 = will_be_nonconstant_predicate (&fbi, info,
2195 stmt, nonconstant_names);
2196 else
2197 will_be_nonconstant = true;
2198 if (this_time || this_size)
2199 {
2200 this_time *= freq;
2201
2202 prob = eliminated_by_inlining_prob (stmt);
2203 if (prob == 1 && dump_file && (dump_flags & TDF_DETAILS))
2204 fprintf (dump_file,
2205 "\t\t50%% will be eliminated by inlining\n");
2206 if (prob == 2 && dump_file && (dump_flags & TDF_DETAILS))
2207 fprintf (dump_file, "\t\tWill be eliminated by inlining\n");
2208
2209 struct predicate p = bb_predicate & will_be_nonconstant;
2210
2211 /* We can ignore statement when we proved it is never going
2212 to happen, but we can not do that for call statements
2213 because edges are accounted specially. */
2214
2215 if (*(is_gimple_call (stmt) ? &bb_predicate : &p) != false)
2216 {
2217 time += this_time;
2218 size += this_size;
2219 }
2220
2221 /* We account everything but the calls. Calls have their own
2222 size/time info attached to cgraph edges. This is necessary
2223 in order to make the cost disappear after inlining. */
2224 if (!is_gimple_call (stmt))
2225 {
2226 if (prob)
2227 {
2228 predicate ip = bb_predicate & predicate::not_inlined ();
2229 info->account_size_time (this_size * prob,
2230 (sreal)(this_time * prob)
2231 / (CGRAPH_FREQ_BASE * 2), ip,
2232 p);
2233 }
2234 if (prob != 2)
2235 info->account_size_time (this_size * (2 - prob),
2236 (sreal)(this_time * (2 - prob))
2237 / (CGRAPH_FREQ_BASE * 2),
2238 bb_predicate,
2239 p);
2240 }
2241
2242 if (!info->fp_expressions && fp_expression_p (stmt))
2243 {
2244 info->fp_expressions = true;
2245 if (dump_file)
2246 fprintf (dump_file, " fp_expression set\n");
2247 }
2248
2249 gcc_assert (time >= 0);
2250 gcc_assert (size >= 0);
2251 }
2252 }
2253 }
0bceb671 2254 set_hint_predicate (&ipa_fn_summaries->get (node)->array_index, array_index);
27d020cf
JH
2255 time = time / CGRAPH_FREQ_BASE;
2256 free (order);
2257
2258 if (nonconstant_names.exists () && !early)
2259 {
2260 struct loop *loop;
2261 predicate loop_iterations = true;
2262 predicate loop_stride = true;
2263
2264 if (dump_file && (dump_flags & TDF_DETAILS))
2265 flow_loops_dump (dump_file, NULL, 0);
2266 scev_initialize ();
2267 FOR_EACH_LOOP (loop, 0)
2268 {
2269 vec<edge> exits;
2270 edge ex;
2271 unsigned int j;
2272 struct tree_niter_desc niter_desc;
2273 bb_predicate = *(predicate *) loop->header->aux;
2274
2275 exits = get_loop_exit_edges (loop);
2276 FOR_EACH_VEC_ELT (exits, j, ex)
2277 if (number_of_iterations_exit (loop, ex, &niter_desc, false)
2278 && !is_gimple_min_invariant (niter_desc.niter))
2279 {
2280 predicate will_be_nonconstant
2281 = will_be_nonconstant_expr_predicate (fbi.info, info,
2282 niter_desc.niter,
2283 nonconstant_names);
2284 if (will_be_nonconstant != true)
2285 will_be_nonconstant = bb_predicate & will_be_nonconstant;
2286 if (will_be_nonconstant != true
2287 && will_be_nonconstant != false)
2288 /* This is slightly inprecise. We may want to represent each
2289 loop with independent predicate. */
2290 loop_iterations &= will_be_nonconstant;
2291 }
2292 exits.release ();
2293 }
2294
2295 /* To avoid quadratic behavior we analyze stride predicates only
2296 with respect to the containing loop. Thus we simply iterate
2297 over all defs in the outermost loop body. */
2298 for (loop = loops_for_fn (cfun)->tree_root->inner;
2299 loop != NULL; loop = loop->next)
2300 {
2301 basic_block *body = get_loop_body (loop);
2302 for (unsigned i = 0; i < loop->num_nodes; i++)
2303 {
2304 gimple_stmt_iterator gsi;
2305 bb_predicate = *(predicate *) body[i]->aux;
2306 for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi);
2307 gsi_next (&gsi))
2308 {
2309 gimple *stmt = gsi_stmt (gsi);
2310
2311 if (!is_gimple_assign (stmt))
2312 continue;
2313
2314 tree def = gimple_assign_lhs (stmt);
2315 if (TREE_CODE (def) != SSA_NAME)
2316 continue;
2317
2318 affine_iv iv;
2319 if (!simple_iv (loop_containing_stmt (stmt),
2320 loop_containing_stmt (stmt),
2321 def, &iv, true)
2322 || is_gimple_min_invariant (iv.step))
2323 continue;
2324
2325 predicate will_be_nonconstant
2326 = will_be_nonconstant_expr_predicate (fbi.info, info,
2327 iv.step,
2328 nonconstant_names);
2329 if (will_be_nonconstant != true)
2330 will_be_nonconstant = bb_predicate & will_be_nonconstant;
2331 if (will_be_nonconstant != true
2332 && will_be_nonconstant != false)
2333 /* This is slightly inprecise. We may want to represent
2334 each loop with independent predicate. */
2335 loop_stride = loop_stride & will_be_nonconstant;
2336 }
2337 }
2338 free (body);
2339 }
0bceb671 2340 set_hint_predicate (&ipa_fn_summaries->get (node)->loop_iterations,
27d020cf 2341 loop_iterations);
0bceb671 2342 set_hint_predicate (&ipa_fn_summaries->get (node)->loop_stride,
27d020cf
JH
2343 loop_stride);
2344 scev_finalize ();
2345 }
2346 FOR_ALL_BB_FN (bb, my_function)
2347 {
2348 edge e;
2349 edge_iterator ei;
2350
2351 if (bb->aux)
2352 edge_predicate_pool.remove ((predicate *)bb->aux);
2353 bb->aux = NULL;
2354 FOR_EACH_EDGE (e, ei, bb->succs)
2355 {
2356 if (e->aux)
2357 edge_predicate_pool.remove ((predicate *) e->aux);
2358 e->aux = NULL;
2359 }
2360 }
0bceb671
JH
2361 ipa_fn_summaries->get (node)->time = time;
2362 ipa_fn_summaries->get (node)->self_size = size;
27d020cf
JH
2363 nonconstant_names.release ();
2364 ipa_release_body_info (&fbi);
2365 if (opt_for_fn (node->decl, optimize))
2366 {
2367 if (!early)
2368 loop_optimizer_finalize ();
2369 else if (!ipa_edge_args_sum)
2370 ipa_free_all_node_params ();
2371 free_dominance_info (CDI_DOMINATORS);
2372 }
2373 if (dump_file)
2374 {
2375 fprintf (dump_file, "\n");
0bceb671 2376 ipa_dump_fn_summary (dump_file, node);
27d020cf
JH
2377 }
2378}
2379
2380
0bceb671
JH
2381/* Compute function summary.
2382 EARLY is true when we compute parameters during early opts. */
27d020cf
JH
2383
2384void
0bceb671 2385compute_fn_summary (struct cgraph_node *node, bool early)
27d020cf
JH
2386{
2387 HOST_WIDE_INT self_stack_size;
2388 struct cgraph_edge *e;
0bceb671 2389 struct ipa_fn_summary *info;
27d020cf
JH
2390
2391 gcc_assert (!node->global.inlined_to);
2392
0bceb671
JH
2393 if (!ipa_fn_summaries)
2394 ipa_fn_summary_alloc ();
27d020cf 2395
0bceb671 2396 info = ipa_fn_summaries->get (node);
27d020cf
JH
2397 info->reset (node);
2398
2399 /* Estimate the stack size for the function if we're optimizing. */
2400 self_stack_size = optimize && !node->thunk.thunk_p
2401 ? estimated_stack_frame_size (node) : 0;
2402 info->estimated_self_stack_size = self_stack_size;
2403 info->estimated_stack_size = self_stack_size;
2404 info->stack_frame_offset = 0;
2405
2406 if (node->thunk.thunk_p)
2407 {
2408 struct ipa_call_summary *es = ipa_call_summaries->get (node->callees);
2409 predicate t = true;
2410
2411 node->local.can_change_signature = false;
2412 es->call_stmt_size = eni_size_weights.call_cost;
2413 es->call_stmt_time = eni_time_weights.call_cost;
0bceb671 2414 info->account_size_time (ipa_fn_summary::size_scale * 2, 2, t, t);
27d020cf 2415 t = predicate::not_inlined ();
0bceb671
JH
2416 info->account_size_time (2 * ipa_fn_summary::size_scale, 0, t, t);
2417 ipa_update_overall_fn_summary (node);
27d020cf
JH
2418 info->self_size = info->size;
2419 /* We can not inline instrumentation clones. */
2420 if (node->thunk.add_pointer_bounds_args)
2421 {
2422 info->inlinable = false;
2423 node->callees->inline_failed = CIF_CHKP;
2424 }
2425 else
2426 info->inlinable = true;
2427 }
2428 else
2429 {
2430 /* Even is_gimple_min_invariant rely on current_function_decl. */
2431 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
2432
2433 /* Can this function be inlined at all? */
2434 if (!opt_for_fn (node->decl, optimize)
2435 && !lookup_attribute ("always_inline",
2436 DECL_ATTRIBUTES (node->decl)))
2437 info->inlinable = false;
2438 else
2439 info->inlinable = tree_inlinable_function_p (node->decl);
2440
2441 info->contains_cilk_spawn = fn_contains_cilk_spawn_p (cfun);
2442
2443 /* Type attributes can use parameter indices to describe them. */
2444 if (TYPE_ATTRIBUTES (TREE_TYPE (node->decl)))
2445 node->local.can_change_signature = false;
2446 else
2447 {
2448 /* Otherwise, inlinable functions always can change signature. */
2449 if (info->inlinable)
2450 node->local.can_change_signature = true;
2451 else
2452 {
2453 /* Functions calling builtin_apply can not change signature. */
2454 for (e = node->callees; e; e = e->next_callee)
2455 {
2456 tree cdecl = e->callee->decl;
2457 if (DECL_BUILT_IN (cdecl)
2458 && DECL_BUILT_IN_CLASS (cdecl) == BUILT_IN_NORMAL
2459 && (DECL_FUNCTION_CODE (cdecl) == BUILT_IN_APPLY_ARGS
2460 || DECL_FUNCTION_CODE (cdecl) == BUILT_IN_VA_START))
2461 break;
2462 }
2463 node->local.can_change_signature = !e;
2464 }
2465 }
2466 /* Functions called by instrumentation thunk can't change signature
2467 because instrumentation thunk modification is not supported. */
2468 if (node->local.can_change_signature)
2469 for (e = node->callers; e; e = e->next_caller)
2470 if (e->caller->thunk.thunk_p
2471 && e->caller->thunk.add_pointer_bounds_args)
2472 {
2473 node->local.can_change_signature = false;
2474 break;
2475 }
0bceb671 2476 analyze_function_body (node, early);
27d020cf
JH
2477 pop_cfun ();
2478 }
2479 for (e = node->callees; e; e = e->next_callee)
2480 if (e->callee->comdat_local_p ())
2481 break;
2482 node->calls_comdat_local = (e != NULL);
2483
2484 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
2485 info->size = info->self_size;
2486 info->stack_frame_offset = 0;
2487 info->estimated_stack_size = info->estimated_self_stack_size;
2488
2489 /* Code above should compute exactly the same result as
0bceb671 2490 ipa_update_overall_fn_summary but because computation happens in
27d020cf 2491 different order the roundoff errors result in slight changes. */
0bceb671 2492 ipa_update_overall_fn_summary (node);
27d020cf
JH
2493 gcc_assert (info->size == info->self_size);
2494}
2495
2496
2497/* Compute parameters of functions used by inliner using
2498 current_function_decl. */
2499
2500static unsigned int
0bceb671 2501compute_fn_summary_for_current (void)
27d020cf 2502{
0bceb671 2503 compute_fn_summary (cgraph_node::get (current_function_decl), true);
27d020cf
JH
2504 return 0;
2505}
2506
2507namespace {
2508
2509const pass_data pass_data_inline_parameters =
2510{
2511 GIMPLE_PASS, /* type */
2512 "inline_param", /* name */
2513 OPTGROUP_INLINE, /* optinfo_flags */
2514 TV_INLINE_PARAMETERS, /* tv_id */
2515 0, /* properties_required */
2516 0, /* properties_provided */
2517 0, /* properties_destroyed */
2518 0, /* todo_flags_start */
2519 0, /* todo_flags_finish */
2520};
2521
2522class pass_inline_parameters : public gimple_opt_pass
2523{
2524public:
2525 pass_inline_parameters (gcc::context *ctxt)
2526 : gimple_opt_pass (pass_data_inline_parameters, ctxt)
2527 {}
2528
2529 /* opt_pass methods: */
2530 opt_pass * clone () { return new pass_inline_parameters (m_ctxt); }
2531 virtual unsigned int execute (function *)
2532 {
0bceb671 2533 return compute_fn_summary_for_current ();
27d020cf
JH
2534 }
2535
2536}; // class pass_inline_parameters
2537
2538} // anon namespace
2539
2540gimple_opt_pass *
2541make_pass_inline_parameters (gcc::context *ctxt)
2542{
2543 return new pass_inline_parameters (ctxt);
2544}
2545
2546
2547/* Estimate benefit devirtualizing indirect edge IE, provided KNOWN_VALS,
2548 KNOWN_CONTEXTS and KNOWN_AGGS. */
2549
2550static bool
2551estimate_edge_devirt_benefit (struct cgraph_edge *ie,
2552 int *size, int *time,
2553 vec<tree> known_vals,
2554 vec<ipa_polymorphic_call_context> known_contexts,
2555 vec<ipa_agg_jump_function_p> known_aggs)
2556{
2557 tree target;
2558 struct cgraph_node *callee;
0bceb671 2559 struct ipa_fn_summary *isummary;
27d020cf
JH
2560 enum availability avail;
2561 bool speculative;
2562
2563 if (!known_vals.exists () && !known_contexts.exists ())
2564 return false;
2565 if (!opt_for_fn (ie->caller->decl, flag_indirect_inlining))
2566 return false;
2567
2568 target = ipa_get_indirect_edge_target (ie, known_vals, known_contexts,
2569 known_aggs, &speculative);
2570 if (!target || speculative)
2571 return false;
2572
2573 /* Account for difference in cost between indirect and direct calls. */
2574 *size -= (eni_size_weights.indirect_call_cost - eni_size_weights.call_cost);
2575 *time -= (eni_time_weights.indirect_call_cost - eni_time_weights.call_cost);
2576 gcc_checking_assert (*time >= 0);
2577 gcc_checking_assert (*size >= 0);
2578
2579 callee = cgraph_node::get (target);
2580 if (!callee || !callee->definition)
2581 return false;
2582 callee = callee->function_symbol (&avail);
2583 if (avail < AVAIL_AVAILABLE)
2584 return false;
0bceb671 2585 isummary = ipa_fn_summaries->get (callee);
27d020cf
JH
2586 return isummary->inlinable;
2587}
2588
2589/* Increase SIZE, MIN_SIZE (if non-NULL) and TIME for size and time needed to
2590 handle edge E with probability PROB.
2591 Set HINTS if edge may be devirtualized.
2592 KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS describe context of the call
2593 site. */
2594
2595static inline void
2596estimate_edge_size_and_time (struct cgraph_edge *e, int *size, int *min_size,
2597 sreal *time,
2598 int prob,
2599 vec<tree> known_vals,
2600 vec<ipa_polymorphic_call_context> known_contexts,
2601 vec<ipa_agg_jump_function_p> known_aggs,
0bceb671 2602 ipa_hints *hints)
27d020cf
JH
2603{
2604 struct ipa_call_summary *es = ipa_call_summaries->get (e);
2605 int call_size = es->call_stmt_size;
2606 int call_time = es->call_stmt_time;
2607 int cur_size;
2608 if (!e->callee
2609 && estimate_edge_devirt_benefit (e, &call_size, &call_time,
2610 known_vals, known_contexts, known_aggs)
2611 && hints && e->maybe_hot_p ())
2612 *hints |= INLINE_HINT_indirect_call;
0bceb671 2613 cur_size = call_size * ipa_fn_summary::size_scale;
27d020cf
JH
2614 *size += cur_size;
2615 if (min_size)
2616 *min_size += cur_size;
2617 if (prob == REG_BR_PROB_BASE)
2618 *time += ((sreal)(call_time * e->frequency)) / CGRAPH_FREQ_BASE;
2619 else
2620 *time += ((sreal)call_time) * (prob * e->frequency)
2621 / (CGRAPH_FREQ_BASE * REG_BR_PROB_BASE);
2622}
2623
2624
2625
2626/* Increase SIZE, MIN_SIZE and TIME for size and time needed to handle all
2627 calls in NODE. POSSIBLE_TRUTHS, KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS
2628 describe context of the call site. */
2629
2630static void
2631estimate_calls_size_and_time (struct cgraph_node *node, int *size,
2632 int *min_size, sreal *time,
0bceb671 2633 ipa_hints *hints,
27d020cf
JH
2634 clause_t possible_truths,
2635 vec<tree> known_vals,
2636 vec<ipa_polymorphic_call_context> known_contexts,
2637 vec<ipa_agg_jump_function_p> known_aggs)
2638{
2639 struct cgraph_edge *e;
2640 for (e = node->callees; e; e = e->next_callee)
2641 {
2642 struct ipa_call_summary *es = ipa_call_summaries->get (e);
2643
2644 /* Do not care about zero sized builtins. */
2645 if (e->inline_failed && !es->call_stmt_size)
2646 {
2647 gcc_checking_assert (!es->call_stmt_time);
2648 continue;
2649 }
2650 if (!es->predicate
2651 || es->predicate->evaluate (possible_truths))
2652 {
2653 if (e->inline_failed)
2654 {
2655 /* Predicates of calls shall not use NOT_CHANGED codes,
2656 sowe do not need to compute probabilities. */
2657 estimate_edge_size_and_time (e, size,
2658 es->predicate ? NULL : min_size,
2659 time, REG_BR_PROB_BASE,
2660 known_vals, known_contexts,
2661 known_aggs, hints);
2662 }
2663 else
2664 estimate_calls_size_and_time (e->callee, size, min_size, time,
2665 hints,
2666 possible_truths,
2667 known_vals, known_contexts,
2668 known_aggs);
2669 }
2670 }
2671 for (e = node->indirect_calls; e; e = e->next_callee)
2672 {
2673 struct ipa_call_summary *es = ipa_call_summaries->get (e);
2674 if (!es->predicate
2675 || es->predicate->evaluate (possible_truths))
2676 estimate_edge_size_and_time (e, size,
2677 es->predicate ? NULL : min_size,
2678 time, REG_BR_PROB_BASE,
2679 known_vals, known_contexts, known_aggs,
2680 hints);
2681 }
2682}
2683
2684
2685/* Estimate size and time needed to execute NODE assuming
2686 POSSIBLE_TRUTHS clause, and KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS
2687 information about NODE's arguments. If non-NULL use also probability
2688 information present in INLINE_PARAM_SUMMARY vector.
2689 Additionally detemine hints determined by the context. Finally compute
2690 minimal size needed for the call that is independent on the call context and
2691 can be used for fast estimates. Return the values in RET_SIZE,
2692 RET_MIN_SIZE, RET_TIME and RET_HINTS. */
2693
2694void
2695estimate_node_size_and_time (struct cgraph_node *node,
2696 clause_t possible_truths,
2697 clause_t nonspec_possible_truths,
2698 vec<tree> known_vals,
2699 vec<ipa_polymorphic_call_context> known_contexts,
2700 vec<ipa_agg_jump_function_p> known_aggs,
2701 int *ret_size, int *ret_min_size,
2702 sreal *ret_time,
2703 sreal *ret_nonspecialized_time,
0bceb671 2704 ipa_hints *ret_hints,
27d020cf
JH
2705 vec<inline_param_summary>
2706 inline_param_summary)
2707{
0bceb671 2708 struct ipa_fn_summary *info = ipa_fn_summaries->get (node);
27d020cf
JH
2709 size_time_entry *e;
2710 int size = 0;
2711 sreal time = 0;
2712 int min_size = 0;
0bceb671 2713 ipa_hints hints = 0;
27d020cf
JH
2714 int i;
2715
2716 if (dump_file && (dump_flags & TDF_DETAILS))
2717 {
2718 bool found = false;
2719 fprintf (dump_file, " Estimating body: %s/%i\n"
2720 " Known to be false: ", node->name (),
2721 node->order);
2722
2723 for (i = predicate::not_inlined_condition;
2724 i < (predicate::first_dynamic_condition
2725 + (int) vec_safe_length (info->conds)); i++)
2726 if (!(possible_truths & (1 << i)))
2727 {
2728 if (found)
2729 fprintf (dump_file, ", ");
2730 found = true;
2731 dump_condition (dump_file, info->conds, i);
2732 }
2733 }
2734
2735 estimate_calls_size_and_time (node, &size, &min_size, &time, &hints, possible_truths,
2736 known_vals, known_contexts, known_aggs);
2737 sreal nonspecialized_time = time;
2738
2739 for (i = 0; vec_safe_iterate (info->size_time_table, i, &e); i++)
2740 {
2741 bool nonconst = e->nonconst_predicate.evaluate (possible_truths);
2742 bool exec = e->exec_predicate.evaluate (nonspec_possible_truths);
2743 gcc_assert (!nonconst || exec);
2744 if (exec)
2745 {
2746 gcc_checking_assert (e->time >= 0);
2747 gcc_checking_assert (time >= 0);
2748
2749 /* We compute specialized size only because size of nonspecialized
2750 copy is context independent.
2751
2752 The difference between nonspecialized execution and specialized is
2753 that nonspecialized is not going to have optimized out computations
2754 known to be constant in a specialized setting. */
2755 if (nonconst)
2756 size += e->size;
2757 nonspecialized_time += e->time;
2758 if (!nonconst)
2759 ;
2760 else if (!inline_param_summary.exists ())
2761 {
2762 if (nonconst)
2763 time += e->time;
2764 }
2765 else
2766 {
2767 int prob = e->nonconst_predicate.probability
2768 (info->conds, possible_truths,
2769 inline_param_summary);
2770 gcc_checking_assert (prob >= 0);
2771 gcc_checking_assert (prob <= REG_BR_PROB_BASE);
2772 time += e->time * prob / REG_BR_PROB_BASE;
2773 }
2774 gcc_checking_assert (time >= 0);
2775 }
2776 }
2777 gcc_checking_assert ((*info->size_time_table)[0].exec_predicate == true);
2778 gcc_checking_assert ((*info->size_time_table)[0].nonconst_predicate == true);
2779 min_size = (*info->size_time_table)[0].size;
2780 gcc_checking_assert (size >= 0);
2781 gcc_checking_assert (time >= 0);
2782 /* nonspecialized_time should be always bigger than specialized time.
2783 Roundoff issues however may get into the way. */
2784 gcc_checking_assert ((nonspecialized_time - time) >= -1);
2785
2786 /* Roundoff issues may make specialized time bigger than nonspecialized
2787 time. We do not really want that to happen because some heurstics
2788 may get confused by seeing negative speedups. */
2789 if (time > nonspecialized_time)
2790 time = nonspecialized_time;
2791
2792 if (info->loop_iterations
2793 && !info->loop_iterations->evaluate (possible_truths))
2794 hints |= INLINE_HINT_loop_iterations;
2795 if (info->loop_stride
2796 && !info->loop_stride->evaluate (possible_truths))
2797 hints |= INLINE_HINT_loop_stride;
2798 if (info->array_index
2799 && !info->array_index->evaluate (possible_truths))
2800 hints |= INLINE_HINT_array_index;
2801 if (info->scc_no)
2802 hints |= INLINE_HINT_in_scc;
2803 if (DECL_DECLARED_INLINE_P (node->decl))
2804 hints |= INLINE_HINT_declared_inline;
2805
0bceb671
JH
2806 size = RDIV (size, ipa_fn_summary::size_scale);
2807 min_size = RDIV (min_size, ipa_fn_summary::size_scale);
27d020cf
JH
2808
2809 if (dump_file && (dump_flags & TDF_DETAILS))
2810 fprintf (dump_file, "\n size:%i time:%f nonspec time:%f\n", (int) size,
2811 time.to_double (), nonspecialized_time.to_double ());
2812 if (ret_time)
2813 *ret_time = time;
2814 if (ret_nonspecialized_time)
2815 *ret_nonspecialized_time = nonspecialized_time;
2816 if (ret_size)
2817 *ret_size = size;
2818 if (ret_min_size)
2819 *ret_min_size = min_size;
2820 if (ret_hints)
2821 *ret_hints = hints;
2822 return;
2823}
2824
2825
2826/* Estimate size and time needed to execute callee of EDGE assuming that
2827 parameters known to be constant at caller of EDGE are propagated.
2828 KNOWN_VALS and KNOWN_CONTEXTS are vectors of assumed known constant values
2829 and types for parameters. */
2830
2831void
2832estimate_ipcp_clone_size_and_time (struct cgraph_node *node,
2833 vec<tree> known_vals,
2834 vec<ipa_polymorphic_call_context>
2835 known_contexts,
2836 vec<ipa_agg_jump_function_p> known_aggs,
2837 int *ret_size, sreal *ret_time,
2838 sreal *ret_nonspec_time,
0bceb671 2839 ipa_hints *hints)
27d020cf
JH
2840{
2841 clause_t clause, nonspec_clause;
2842
2843 evaluate_conditions_for_known_args (node, false, known_vals, known_aggs,
2844 &clause, &nonspec_clause);
2845 estimate_node_size_and_time (node, clause, nonspec_clause,
2846 known_vals, known_contexts,
2847 known_aggs, ret_size, NULL, ret_time,
2848 ret_nonspec_time, hints, vNULL);
2849}
2850
2851
2852/* Update summary information of inline clones after inlining.
2853 Compute peak stack usage. */
2854
2855static void
2856inline_update_callee_summaries (struct cgraph_node *node, int depth)
2857{
2858 struct cgraph_edge *e;
0bceb671
JH
2859 struct ipa_fn_summary *callee_info = ipa_fn_summaries->get (node);
2860 struct ipa_fn_summary *caller_info = ipa_fn_summaries->get (node->callers->caller);
27d020cf
JH
2861 HOST_WIDE_INT peak;
2862
2863 callee_info->stack_frame_offset
2864 = caller_info->stack_frame_offset
2865 + caller_info->estimated_self_stack_size;
2866 peak = callee_info->stack_frame_offset
2867 + callee_info->estimated_self_stack_size;
0bceb671
JH
2868 if (ipa_fn_summaries->get (node->global.inlined_to)->estimated_stack_size < peak)
2869 ipa_fn_summaries->get (node->global.inlined_to)->estimated_stack_size = peak;
27d020cf
JH
2870 ipa_propagate_frequency (node);
2871 for (e = node->callees; e; e = e->next_callee)
2872 {
2873 if (!e->inline_failed)
2874 inline_update_callee_summaries (e->callee, depth);
2875 ipa_call_summaries->get (e)->loop_depth += depth;
2876 }
2877 for (e = node->indirect_calls; e; e = e->next_callee)
2878 ipa_call_summaries->get (e)->loop_depth += depth;
2879}
2880
2881/* Update change_prob of EDGE after INLINED_EDGE has been inlined.
2882 When functoin A is inlined in B and A calls C with parameter that
2883 changes with probability PROB1 and C is known to be passthroug
2884 of argument if B that change with probability PROB2, the probability
2885 of change is now PROB1*PROB2. */
2886
2887static void
2888remap_edge_change_prob (struct cgraph_edge *inlined_edge,
2889 struct cgraph_edge *edge)
2890{
2891 if (ipa_node_params_sum)
2892 {
2893 int i;
2894 struct ipa_edge_args *args = IPA_EDGE_REF (edge);
2895 struct ipa_call_summary *es = ipa_call_summaries->get (edge);
2896 struct ipa_call_summary *inlined_es
2897 = ipa_call_summaries->get (inlined_edge);
2898
2899 for (i = 0; i < ipa_get_cs_argument_count (args); i++)
2900 {
2901 struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i);
2902 if (jfunc->type == IPA_JF_PASS_THROUGH
2903 || jfunc->type == IPA_JF_ANCESTOR)
2904 {
2905 int id = jfunc->type == IPA_JF_PASS_THROUGH
2906 ? ipa_get_jf_pass_through_formal_id (jfunc)
2907 : ipa_get_jf_ancestor_formal_id (jfunc);
2908 if (id < (int) inlined_es->param.length ())
2909 {
2910 int prob1 = es->param[i].change_prob;
2911 int prob2 = inlined_es->param[id].change_prob;
2912 int prob = combine_probabilities (prob1, prob2);
2913
2914 if (prob1 && prob2 && !prob)
2915 prob = 1;
2916
2917 es->param[i].change_prob = prob;
2918 }
2919 }
2920 }
2921 }
2922}
2923
2924/* Update edge summaries of NODE after INLINED_EDGE has been inlined.
2925
2926 Remap predicates of callees of NODE. Rest of arguments match
2927 remap_predicate.
2928
2929 Also update change probabilities. */
2930
2931static void
2932remap_edge_summaries (struct cgraph_edge *inlined_edge,
2933 struct cgraph_node *node,
0bceb671
JH
2934 struct ipa_fn_summary *info,
2935 struct ipa_fn_summary *callee_info,
27d020cf
JH
2936 vec<int> operand_map,
2937 vec<int> offset_map,
2938 clause_t possible_truths,
2939 predicate *toplev_predicate)
2940{
2941 struct cgraph_edge *e, *next;
2942 for (e = node->callees; e; e = next)
2943 {
2944 struct ipa_call_summary *es = ipa_call_summaries->get (e);
2945 predicate p;
2946 next = e->next_callee;
2947
2948 if (e->inline_failed)
2949 {
2950 remap_edge_change_prob (inlined_edge, e);
2951
2952 if (es->predicate)
2953 {
2954 p = es->predicate->remap_after_inlining
2955 (info, callee_info, operand_map,
2956 offset_map, possible_truths,
2957 *toplev_predicate);
2958 edge_set_predicate (e, &p);
2959 }
2960 else
2961 edge_set_predicate (e, toplev_predicate);
2962 }
2963 else
2964 remap_edge_summaries (inlined_edge, e->callee, info, callee_info,
2965 operand_map, offset_map, possible_truths,
2966 toplev_predicate);
2967 }
2968 for (e = node->indirect_calls; e; e = next)
2969 {
2970 struct ipa_call_summary *es = ipa_call_summaries->get (e);
2971 predicate p;
2972 next = e->next_callee;
2973
2974 remap_edge_change_prob (inlined_edge, e);
2975 if (es->predicate)
2976 {
2977 p = es->predicate->remap_after_inlining
2978 (info, callee_info, operand_map, offset_map,
2979 possible_truths, *toplev_predicate);
2980 edge_set_predicate (e, &p);
2981 }
2982 else
2983 edge_set_predicate (e, toplev_predicate);
2984 }
2985}
2986
2987/* Same as remap_predicate, but set result into hint *HINT. */
2988
2989static void
0bceb671
JH
2990remap_hint_predicate (struct ipa_fn_summary *info,
2991 struct ipa_fn_summary *callee_info,
27d020cf
JH
2992 predicate **hint,
2993 vec<int> operand_map,
2994 vec<int> offset_map,
2995 clause_t possible_truths,
2996 predicate *toplev_predicate)
2997{
2998 predicate p;
2999
3000 if (!*hint)
3001 return;
3002 p = (*hint)->remap_after_inlining
3003 (info, callee_info,
3004 operand_map, offset_map,
3005 possible_truths, *toplev_predicate);
3006 if (p != false && p != true)
3007 {
3008 if (!*hint)
3009 set_hint_predicate (hint, p);
3010 else
3011 **hint &= p;
3012 }
3013}
3014
3015/* We inlined EDGE. Update summary of the function we inlined into. */
3016
3017void
0bceb671 3018ipa_merge_fn_summary_after_inlining (struct cgraph_edge *edge)
27d020cf 3019{
0bceb671 3020 struct ipa_fn_summary *callee_info = ipa_fn_summaries->get (edge->callee);
27d020cf
JH
3021 struct cgraph_node *to = (edge->caller->global.inlined_to
3022 ? edge->caller->global.inlined_to : edge->caller);
0bceb671 3023 struct ipa_fn_summary *info = ipa_fn_summaries->get (to);
27d020cf
JH
3024 clause_t clause = 0; /* not_inline is known to be false. */
3025 size_time_entry *e;
3026 vec<int> operand_map = vNULL;
3027 vec<int> offset_map = vNULL;
3028 int i;
3029 predicate toplev_predicate;
3030 predicate true_p = true;
3031 struct ipa_call_summary *es = ipa_call_summaries->get (edge);
3032
3033 if (es->predicate)
3034 toplev_predicate = *es->predicate;
3035 else
3036 toplev_predicate = true;
3037
3038 info->fp_expressions |= callee_info->fp_expressions;
3039
3040 if (callee_info->conds)
3041 evaluate_properties_for_edge (edge, true, &clause, NULL, NULL, NULL, NULL);
3042 if (ipa_node_params_sum && callee_info->conds)
3043 {
3044 struct ipa_edge_args *args = IPA_EDGE_REF (edge);
3045 int count = ipa_get_cs_argument_count (args);
3046 int i;
3047
3048 if (count)
3049 {
3050 operand_map.safe_grow_cleared (count);
3051 offset_map.safe_grow_cleared (count);
3052 }
3053 for (i = 0; i < count; i++)
3054 {
3055 struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i);
3056 int map = -1;
3057
3058 /* TODO: handle non-NOPs when merging. */
3059 if (jfunc->type == IPA_JF_PASS_THROUGH)
3060 {
3061 if (ipa_get_jf_pass_through_operation (jfunc) == NOP_EXPR)
3062 map = ipa_get_jf_pass_through_formal_id (jfunc);
3063 if (!ipa_get_jf_pass_through_agg_preserved (jfunc))
3064 offset_map[i] = -1;
3065 }
3066 else if (jfunc->type == IPA_JF_ANCESTOR)
3067 {
3068 HOST_WIDE_INT offset = ipa_get_jf_ancestor_offset (jfunc);
3069 if (offset >= 0 && offset < INT_MAX)
3070 {
3071 map = ipa_get_jf_ancestor_formal_id (jfunc);
3072 if (!ipa_get_jf_ancestor_agg_preserved (jfunc))
3073 offset = -1;
3074 offset_map[i] = offset;
3075 }
3076 }
3077 operand_map[i] = map;
3078 gcc_assert (map < ipa_get_param_count (IPA_NODE_REF (to)));
3079 }
3080 }
3081 for (i = 0; vec_safe_iterate (callee_info->size_time_table, i, &e); i++)
3082 {
3083 predicate p;
3084 p = e->exec_predicate.remap_after_inlining
3085 (info, callee_info, operand_map,
3086 offset_map, clause,
3087 toplev_predicate);
3088 predicate nonconstp;
3089 nonconstp = e->nonconst_predicate.remap_after_inlining
3090 (info, callee_info, operand_map,
3091 offset_map, clause,
3092 toplev_predicate);
3093 if (p != false && nonconstp != false)
3094 {
3095 sreal add_time = ((sreal)e->time * edge->frequency) / CGRAPH_FREQ_BASE;
3096 int prob = e->nonconst_predicate.probability (callee_info->conds,
3097 clause, es->param);
3098 add_time = add_time * prob / REG_BR_PROB_BASE;
3099 if (prob != REG_BR_PROB_BASE
3100 && dump_file && (dump_flags & TDF_DETAILS))
3101 {
3102 fprintf (dump_file, "\t\tScaling time by probability:%f\n",
3103 (double) prob / REG_BR_PROB_BASE);
3104 }
3105 info->account_size_time (e->size, add_time, p, nonconstp);
3106 }
3107 }
3108 remap_edge_summaries (edge, edge->callee, info, callee_info, operand_map,
3109 offset_map, clause, &toplev_predicate);
3110 remap_hint_predicate (info, callee_info,
3111 &callee_info->loop_iterations,
3112 operand_map, offset_map, clause, &toplev_predicate);
3113 remap_hint_predicate (info, callee_info,
3114 &callee_info->loop_stride,
3115 operand_map, offset_map, clause, &toplev_predicate);
3116 remap_hint_predicate (info, callee_info,
3117 &callee_info->array_index,
3118 operand_map, offset_map, clause, &toplev_predicate);
3119
3120 inline_update_callee_summaries (edge->callee,
3121 ipa_call_summaries->get (edge)->loop_depth);
3122
3123 /* We do not maintain predicates of inlined edges, free it. */
3124 edge_set_predicate (edge, &true_p);
3125 /* Similarly remove param summaries. */
3126 es->param.release ();
3127 operand_map.release ();
3128 offset_map.release ();
3129}
3130
0bceb671 3131/* For performance reasons ipa_merge_fn_summary_after_inlining is not updating overall size
27d020cf
JH
3132 and time. Recompute it. */
3133
3134void
0bceb671 3135ipa_update_overall_fn_summary (struct cgraph_node *node)
27d020cf 3136{
0bceb671 3137 struct ipa_fn_summary *info = ipa_fn_summaries->get (node);
27d020cf
JH
3138 size_time_entry *e;
3139 int i;
3140
3141 info->size = 0;
3142 info->time = 0;
3143 for (i = 0; vec_safe_iterate (info->size_time_table, i, &e); i++)
3144 {
3145 info->size += e->size;
3146 info->time += e->time;
3147 }
3148 estimate_calls_size_and_time (node, &info->size, &info->min_size,
3149 &info->time, NULL,
3150 ~(clause_t) (1 << predicate::false_condition),
3151 vNULL, vNULL, vNULL);
0bceb671 3152 info->size = (info->size + ipa_fn_summary::size_scale / 2) / ipa_fn_summary::size_scale;
27d020cf
JH
3153}
3154
3155
3156/* This function performs intraprocedural analysis in NODE that is required to
3157 inline indirect calls. */
3158
3159static void
3160inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
3161{
3162 ipa_analyze_node (node);
3163 if (dump_file && (dump_flags & TDF_DETAILS))
3164 {
3165 ipa_print_node_params (dump_file, node);
3166 ipa_print_node_jump_functions (dump_file, node);
3167 }
3168}
3169
3170
3171/* Note function body size. */
3172
3173void
3174inline_analyze_function (struct cgraph_node *node)
3175{
3176 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
3177
3178 if (dump_file)
3179 fprintf (dump_file, "\nAnalyzing function: %s/%u\n",
3180 node->name (), node->order);
3181 if (opt_for_fn (node->decl, optimize) && !node->thunk.thunk_p)
3182 inline_indirect_intraprocedural_analysis (node);
0bceb671 3183 compute_fn_summary (node, false);
27d020cf
JH
3184 if (!optimize)
3185 {
3186 struct cgraph_edge *e;
3187 for (e = node->callees; e; e = e->next_callee)
3188 e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED;
3189 for (e = node->indirect_calls; e; e = e->next_callee)
3190 e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED;
3191 }
3192
3193 pop_cfun ();
3194}
3195
3196
3197/* Called when new function is inserted to callgraph late. */
3198
3199void
0bceb671 3200ipa_fn_summary_t::insert (struct cgraph_node *node, ipa_fn_summary *)
27d020cf
JH
3201{
3202 inline_analyze_function (node);
3203}
3204
3205/* Note function body size. */
3206
3207void
3208inline_generate_summary (void)
3209{
3210 struct cgraph_node *node;
3211
3212 FOR_EACH_DEFINED_FUNCTION (node)
3213 if (DECL_STRUCT_FUNCTION (node->decl))
3214 node->local.versionable = tree_versionable_function_p (node->decl);
3215
3216 /* When not optimizing, do not bother to analyze. Inlining is still done
3217 because edge redirection needs to happen there. */
3218 if (!optimize && !flag_generate_lto && !flag_generate_offload && !flag_wpa)
3219 return;
3220
0bceb671 3221 ipa_fn_summary_alloc ();
27d020cf 3222
0bceb671 3223 ipa_fn_summaries->enable_insertion_hook ();
27d020cf
JH
3224
3225 ipa_register_cgraph_hooks ();
3226 inline_free_summary ();
3227
3228 FOR_EACH_DEFINED_FUNCTION (node)
3229 if (!node->alias)
3230 inline_analyze_function (node);
3231}
3232
3233
3234/* Write inline summary for edge E to OB. */
3235
3236static void
3237read_ipa_call_summary (struct lto_input_block *ib, struct cgraph_edge *e)
3238{
3239 struct ipa_call_summary *es = ipa_call_summaries->get (e);
3240 predicate p;
3241 int length, i;
3242
3243 es->call_stmt_size = streamer_read_uhwi (ib);
3244 es->call_stmt_time = streamer_read_uhwi (ib);
3245 es->loop_depth = streamer_read_uhwi (ib);
3246 p.stream_in (ib);
3247 edge_set_predicate (e, &p);
3248 length = streamer_read_uhwi (ib);
3249 if (length)
3250 {
3251 es->param.safe_grow_cleared (length);
3252 for (i = 0; i < length; i++)
3253 es->param[i].change_prob = streamer_read_uhwi (ib);
3254 }
3255}
3256
3257
3258/* Stream in inline summaries from the section. */
3259
3260static void
3261inline_read_section (struct lto_file_decl_data *file_data, const char *data,
3262 size_t len)
3263{
3264 const struct lto_function_header *header =
3265 (const struct lto_function_header *) data;
3266 const int cfg_offset = sizeof (struct lto_function_header);
3267 const int main_offset = cfg_offset + header->cfg_size;
3268 const int string_offset = main_offset + header->main_size;
3269 struct data_in *data_in;
3270 unsigned int i, count2, j;
3271 unsigned int f_count;
3272
3273 lto_input_block ib ((const char *) data + main_offset, header->main_size,
3274 file_data->mode_table);
3275
3276 data_in =
3277 lto_data_in_create (file_data, (const char *) data + string_offset,
3278 header->string_size, vNULL);
3279 f_count = streamer_read_uhwi (&ib);
3280 for (i = 0; i < f_count; i++)
3281 {
3282 unsigned int index;
3283 struct cgraph_node *node;
0bceb671 3284 struct ipa_fn_summary *info;
27d020cf
JH
3285 lto_symtab_encoder_t encoder;
3286 struct bitpack_d bp;
3287 struct cgraph_edge *e;
3288 predicate p;
3289
3290 index = streamer_read_uhwi (&ib);
3291 encoder = file_data->symtab_node_encoder;
3292 node = dyn_cast<cgraph_node *> (lto_symtab_encoder_deref (encoder,
3293 index));
0bceb671 3294 info = ipa_fn_summaries->get (node);
27d020cf
JH
3295
3296 info->estimated_stack_size
3297 = info->estimated_self_stack_size = streamer_read_uhwi (&ib);
3298 info->size = info->self_size = streamer_read_uhwi (&ib);
3299 info->time = sreal::stream_in (&ib);
3300
3301 bp = streamer_read_bitpack (&ib);
3302 info->inlinable = bp_unpack_value (&bp, 1);
3303 info->contains_cilk_spawn = bp_unpack_value (&bp, 1);
3304 info->fp_expressions = bp_unpack_value (&bp, 1);
3305
3306 count2 = streamer_read_uhwi (&ib);
3307 gcc_assert (!info->conds);
3308 for (j = 0; j < count2; j++)
3309 {
3310 struct condition c;
3311 c.operand_num = streamer_read_uhwi (&ib);
3312 c.size = streamer_read_uhwi (&ib);
3313 c.code = (enum tree_code) streamer_read_uhwi (&ib);
3314 c.val = stream_read_tree (&ib, data_in);
3315 bp = streamer_read_bitpack (&ib);
3316 c.agg_contents = bp_unpack_value (&bp, 1);
3317 c.by_ref = bp_unpack_value (&bp, 1);
3318 if (c.agg_contents)
3319 c.offset = streamer_read_uhwi (&ib);
3320 vec_safe_push (info->conds, c);
3321 }
3322 count2 = streamer_read_uhwi (&ib);
3323 gcc_assert (!info->size_time_table);
3324 for (j = 0; j < count2; j++)
3325 {
3326 struct size_time_entry e;
3327
3328 e.size = streamer_read_uhwi (&ib);
3329 e.time = sreal::stream_in (&ib);
3330 e.exec_predicate.stream_in (&ib);
3331 e.nonconst_predicate.stream_in (&ib);
3332
3333 vec_safe_push (info->size_time_table, e);
3334 }
3335
3336 p.stream_in (&ib);
3337 set_hint_predicate (&info->loop_iterations, p);
3338 p.stream_in (&ib);
3339 set_hint_predicate (&info->loop_stride, p);
3340 p.stream_in (&ib);
3341 set_hint_predicate (&info->array_index, p);
3342 for (e = node->callees; e; e = e->next_callee)
3343 read_ipa_call_summary (&ib, e);
3344 for (e = node->indirect_calls; e; e = e->next_callee)
3345 read_ipa_call_summary (&ib, e);
3346 }
3347
0bceb671 3348 lto_free_section_data (file_data, LTO_section_ipa_fn_summary, NULL, data,
27d020cf
JH
3349 len);
3350 lto_data_in_delete (data_in);
3351}
3352
3353
3354/* Read inline summary. Jump functions are shared among ipa-cp
3355 and inliner, so when ipa-cp is active, we don't need to write them
3356 twice. */
3357
3358void
3359inline_read_summary (void)
3360{
3361 struct lto_file_decl_data **file_data_vec = lto_get_file_decl_data ();
3362 struct lto_file_decl_data *file_data;
3363 unsigned int j = 0;
3364
0bceb671 3365 ipa_fn_summary_alloc ();
27d020cf
JH
3366
3367 while ((file_data = file_data_vec[j++]))
3368 {
3369 size_t len;
3370 const char *data = lto_get_section_data (file_data,
0bceb671 3371 LTO_section_ipa_fn_summary,
27d020cf
JH
3372 NULL, &len);
3373 if (data)
3374 inline_read_section (file_data, data, len);
3375 else
3376 /* Fatal error here. We do not want to support compiling ltrans units
3377 with different version of compiler or different flags than the WPA
3378 unit, so this should never happen. */
3379 fatal_error (input_location,
3380 "ipa inline summary is missing in input file");
3381 }
3382 if (optimize)
3383 {
3384 ipa_register_cgraph_hooks ();
3385 if (!flag_ipa_cp)
3386 ipa_prop_read_jump_functions ();
3387 }
3388
0bceb671
JH
3389 gcc_assert (ipa_fn_summaries);
3390 ipa_fn_summaries->enable_insertion_hook ();
27d020cf
JH
3391}
3392
3393
3394/* Write inline summary for edge E to OB. */
3395
3396static void
3397write_ipa_call_summary (struct output_block *ob, struct cgraph_edge *e)
3398{
3399 struct ipa_call_summary *es = ipa_call_summaries->get (e);
3400 int i;
3401
3402 streamer_write_uhwi (ob, es->call_stmt_size);
3403 streamer_write_uhwi (ob, es->call_stmt_time);
3404 streamer_write_uhwi (ob, es->loop_depth);
3405 if (es->predicate)
3406 es->predicate->stream_out (ob);
3407 else
3408 streamer_write_uhwi (ob, 0);
3409 streamer_write_uhwi (ob, es->param.length ());
3410 for (i = 0; i < (int) es->param.length (); i++)
3411 streamer_write_uhwi (ob, es->param[i].change_prob);
3412}
3413
3414
3415/* Write inline summary for node in SET.
3416 Jump functions are shared among ipa-cp and inliner, so when ipa-cp is
3417 active, we don't need to write them twice. */
3418
3419void
3420inline_write_summary (void)
3421{
0bceb671 3422 struct output_block *ob = create_output_block (LTO_section_ipa_fn_summary);
27d020cf
JH
3423 lto_symtab_encoder_t encoder = ob->decl_state->symtab_node_encoder;
3424 unsigned int count = 0;
3425 int i;
3426
3427 for (i = 0; i < lto_symtab_encoder_size (encoder); i++)
3428 {
3429 symtab_node *snode = lto_symtab_encoder_deref (encoder, i);
3430 cgraph_node *cnode = dyn_cast <cgraph_node *> (snode);
3431 if (cnode && cnode->definition && !cnode->alias)
3432 count++;
3433 }
3434 streamer_write_uhwi (ob, count);
3435
3436 for (i = 0; i < lto_symtab_encoder_size (encoder); i++)
3437 {
3438 symtab_node *snode = lto_symtab_encoder_deref (encoder, i);
3439 cgraph_node *cnode = dyn_cast <cgraph_node *> (snode);
3440 if (cnode && cnode->definition && !cnode->alias)
3441 {
0bceb671 3442 struct ipa_fn_summary *info = ipa_fn_summaries->get (cnode);
27d020cf
JH
3443 struct bitpack_d bp;
3444 struct cgraph_edge *edge;
3445 int i;
3446 size_time_entry *e;
3447 struct condition *c;
3448
3449 streamer_write_uhwi (ob, lto_symtab_encoder_encode (encoder, cnode));
3450 streamer_write_hwi (ob, info->estimated_self_stack_size);
3451 streamer_write_hwi (ob, info->self_size);
3452 info->time.stream_out (ob);
3453 bp = bitpack_create (ob->main_stream);
3454 bp_pack_value (&bp, info->inlinable, 1);
3455 bp_pack_value (&bp, info->contains_cilk_spawn, 1);
3456 bp_pack_value (&bp, info->fp_expressions, 1);
3457 streamer_write_bitpack (&bp);
3458 streamer_write_uhwi (ob, vec_safe_length (info->conds));
3459 for (i = 0; vec_safe_iterate (info->conds, i, &c); i++)
3460 {
3461 streamer_write_uhwi (ob, c->operand_num);
3462 streamer_write_uhwi (ob, c->size);
3463 streamer_write_uhwi (ob, c->code);
3464 stream_write_tree (ob, c->val, true);
3465 bp = bitpack_create (ob->main_stream);
3466 bp_pack_value (&bp, c->agg_contents, 1);
3467 bp_pack_value (&bp, c->by_ref, 1);
3468 streamer_write_bitpack (&bp);
3469 if (c->agg_contents)
3470 streamer_write_uhwi (ob, c->offset);
3471 }
3472 streamer_write_uhwi (ob, vec_safe_length (info->size_time_table));
3473 for (i = 0; vec_safe_iterate (info->size_time_table, i, &e); i++)
3474 {
3475 streamer_write_uhwi (ob, e->size);
3476 e->time.stream_out (ob);
3477 e->exec_predicate.stream_out (ob);
3478 e->nonconst_predicate.stream_out (ob);
3479 }
3480 if (info->loop_iterations)
3481 info->loop_iterations->stream_out (ob);
3482 else
3483 streamer_write_uhwi (ob, 0);
3484 if (info->loop_stride)
3485 info->loop_stride->stream_out (ob);
3486 else
3487 streamer_write_uhwi (ob, 0);
3488 if (info->array_index)
3489 info->array_index->stream_out (ob);
3490 else
3491 streamer_write_uhwi (ob, 0);
3492 for (edge = cnode->callees; edge; edge = edge->next_callee)
3493 write_ipa_call_summary (ob, edge);
3494 for (edge = cnode->indirect_calls; edge; edge = edge->next_callee)
3495 write_ipa_call_summary (ob, edge);
3496 }
3497 }
3498 streamer_write_char_stream (ob->main_stream, 0);
3499 produce_asm (ob, NULL);
3500 destroy_output_block (ob);
3501
3502 if (optimize && !flag_ipa_cp)
3503 ipa_prop_write_jump_functions ();
3504}
3505
3506
3507/* Release inline summary. */
3508
3509void
3510inline_free_summary (void)
3511{
3512 struct cgraph_node *node;
3513 if (!ipa_call_summaries)
3514 return;
3515 FOR_EACH_DEFINED_FUNCTION (node)
3516 if (!node->alias)
0bceb671
JH
3517 ipa_fn_summaries->get (node)->reset (node);
3518 ipa_fn_summaries->release ();
3519 ipa_fn_summaries = NULL;
27d020cf
JH
3520 ipa_call_summaries->release ();
3521 delete ipa_call_summaries;
3522 ipa_call_summaries = NULL;
3523 edge_predicate_pool.release ();
3524}