/* Interprocedural constant propagation
- Copyright (C) 2005-2013 Free Software Foundation, Inc.
+ Copyright (C) 2005-2014 Free Software Foundation, Inc.
Contributed by Razya Ladelsky <RAZYA@il.ibm.com> and Martin Jambor
<mjambor@suse.cz>
#include "gimple-fold.h"
#include "gimple-expr.h"
#include "target.h"
+#include "predict.h"
+#include "basic-block.h"
+#include "vec.h"
+#include "hash-map.h"
+#include "is-a.h"
+#include "plugin-api.h"
+#include "hashtab.h"
+#include "hash-set.h"
+#include "machmode.h"
+#include "tm.h"
+#include "hard-reg-set.h"
+#include "input.h"
+#include "function.h"
+#include "ipa-ref.h"
+#include "cgraph.h"
+#include "alloc-pool.h"
#include "ipa-prop.h"
#include "bitmap.h"
#include "tree-pass.h"
#include "ipa-inline.h"
#include "ipa-utils.h"
-struct ipcp_value;
+template <typename valtype> class ipcp_value;
/* Describes a particular source for an IPA-CP value. */
-struct ipcp_value_source
+template <typename valtype>
+class ipcp_value_source
{
+public:
/* Aggregate offset of the source, negative if the source is scalar value of
the argument itself. */
HOST_WIDE_INT offset;
/* The incoming edge that brought the value. */
- struct cgraph_edge *cs;
+ cgraph_edge *cs;
/* If the jump function that resulted into his value was a pass-through or an
ancestor, this is the ipcp_value of the caller from which the described
value has been derived. Otherwise it is NULL. */
- struct ipcp_value *val;
+ ipcp_value<valtype> *val;
/* Next pointer in a linked list of sources of a value. */
- struct ipcp_value_source *next;
+ ipcp_value_source *next;
/* If the jump function that resulted into his value was a pass-through or an
ancestor, this is the index of the parameter of the caller the jump
function references. */
int index;
};
+/* Common ancestor for all ipcp_value instantiations. */
+
+class ipcp_value_base
+{
+public:
+ /* Time benefit and size cost that specializing the function for this value
+ would bring about in this function alone. */
+ int local_time_benefit, local_size_cost;
+ /* Time benefit and size cost that specializing the function for this value
+ can bring about in it's callees (transitively). */
+ int prop_time_benefit, prop_size_cost;
+};
+
/* Describes one particular value stored in struct ipcp_lattice. */
-struct ipcp_value
+template <typename valtype>
+class ipcp_value : public ipcp_value_base
{
- /* The actual value for the given parameter. This is either an IPA invariant
- or a TREE_BINFO describing a type that can be used for
- devirtualization. */
- tree value;
+public:
+ /* The actual value for the given parameter. */
+ valtype value;
/* The list of sources from which this value originates. */
- struct ipcp_value_source *sources;
+ ipcp_value_source <valtype> *sources;
/* Next pointers in a linked list of all values in a lattice. */
- struct ipcp_value *next;
+ ipcp_value *next;
/* Next pointers in a linked list of values in a strongly connected component
of values. */
- struct ipcp_value *scc_next;
+ ipcp_value *scc_next;
/* Next pointers in a linked list of SCCs of values sorted topologically
according their sources. */
- struct ipcp_value *topo_next;
+ ipcp_value *topo_next;
/* A specialized node created for this value, NULL if none has been (so far)
created. */
- struct cgraph_node *spec_node;
+ cgraph_node *spec_node;
/* Depth first search number and low link for topological sorting of
values. */
int dfs, low_link;
- /* Time benefit and size cost that specializing the function for this value
- would bring about in this function alone. */
- int local_time_benefit, local_size_cost;
- /* Time benefit and size cost that specializing the function for this value
- can bring about in it's callees (transitively). */
- int prop_time_benefit, prop_size_cost;
/* True if this valye is currently on the topo-sort stack. */
bool on_stack;
+
+ void add_source (cgraph_edge *cs, ipcp_value *src_val, int src_idx,
+ HOST_WIDE_INT offset);
};
/* Lattice describing potential values of a formal parameter of a function, or
by a lattice with the bottom flag set. In that case, values and
contains_variable flag should be disregarded. */
-struct ipcp_lattice
+template <typename valtype>
+class ipcp_lattice
{
+public:
/* The list of known values and types in this lattice. Note that values are
not deallocated if a lattice is set to bottom because there may be value
sources referencing them. */
- struct ipcp_value *values;
+ ipcp_value<valtype> *values;
/* Number of known values and types in this lattice. */
int values_count;
/* The lattice contains a variable component (in addition to values). */
/* The value of the lattice is bottom (i.e. variable and unusable for any
propagation). */
bool bottom;
+
+ inline bool is_single_const ();
+ inline bool set_to_bottom ();
+ inline bool set_contains_variable ();
+ bool add_value (valtype newval, cgraph_edge *cs,
+ ipcp_value<valtype> *src_val = NULL,
+ int src_idx = 0, HOST_WIDE_INT offset = -1);
+ void print (FILE * f, bool dump_sources, bool dump_benefits);
};
-/* Lattice with an offset to describe a part of an aggregate. */
+/* Lattice of tree values with an offset to describe a part of an
+ aggregate. */
-struct ipcp_agg_lattice : public ipcp_lattice
+class ipcp_agg_lattice : public ipcp_lattice<tree>
{
+public:
/* Offset that is being described by this lattice. */
HOST_WIDE_INT offset;
/* Size so that we don't have to re-compute it every time we traverse the
aggregates that are passed in the parameter or by a reference in a parameter
plus some other useful flags. */
-struct ipcp_param_lattices
+class ipcp_param_lattices
{
+public:
/* Lattice describing the value of the parameter itself. */
- struct ipcp_lattice itself;
+ ipcp_lattice<tree> itself;
+ /* Lattice describing the the polymorphic contexts of a parameter. */
+ ipcp_lattice<ipa_polymorphic_call_context> ctxlat;
/* Lattices describing aggregate parts. */
- struct ipcp_agg_lattice *aggs;
+ ipcp_agg_lattice *aggs;
/* Number of aggregate lattices */
int aggs_count;
/* True if aggregate data were passed by reference (as opposed to by
/* Allocation pools for values and their sources in ipa-cp. */
-alloc_pool ipcp_values_pool;
+alloc_pool ipcp_cst_values_pool;
+alloc_pool ipcp_poly_ctx_values_pool;
alloc_pool ipcp_sources_pool;
alloc_pool ipcp_agg_lattice_pool;
static long overall_size, max_new_size;
-/* Head of the linked list of topologically sorted values. */
-
-static struct ipcp_value *values_topo;
-
/* Return the param lattices structure corresponding to the Ith formal
parameter of the function described by INFO. */
static inline struct ipcp_param_lattices *
/* Return the lattice corresponding to the scalar value of the Ith formal
parameter of the function described by INFO. */
-static inline struct ipcp_lattice *
+static inline ipcp_lattice<tree> *
ipa_get_scalar_lat (struct ipa_node_params *info, int i)
{
struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
return &plats->itself;
}
+/* Return the lattice corresponding to the scalar value of the Ith formal
+ parameter of the function described by INFO. */
+static inline ipcp_lattice<ipa_polymorphic_call_context> *
+ipa_get_poly_ctx_lat (struct ipa_node_params *info, int i)
+{
+ struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
+ return &plats->ctxlat;
+}
+
/* Return whether LAT is a lattice with a single constant and without an
undefined value. */
-static inline bool
-ipa_lat_is_single_const (struct ipcp_lattice *lat)
+template <typename valtype>
+inline bool
+ipcp_lattice<valtype>::is_single_const ()
{
- if (lat->bottom
- || lat->contains_variable
- || lat->values_count != 1)
+ if (bottom || contains_variable || values_count != 1)
return false;
else
return true;
static void
print_ipcp_constant_value (FILE * f, tree v)
{
- if (TREE_CODE (v) == TREE_BINFO)
- {
- fprintf (f, "BINFO ");
- print_generic_expr (f, BINFO_TYPE (v), 0);
- }
- else if (TREE_CODE (v) == ADDR_EXPR
+ if (TREE_CODE (v) == ADDR_EXPR
&& TREE_CODE (TREE_OPERAND (v, 0)) == CONST_DECL)
{
fprintf (f, "& ");
print_generic_expr (f, v, 0);
}
-/* Print a lattice LAT to F. */
+/* Print V which is extracted from a value in a lattice to F. */
static void
-print_lattice (FILE * f, struct ipcp_lattice *lat,
- bool dump_sources, bool dump_benefits)
+print_ipcp_constant_value (FILE * f, ipa_polymorphic_call_context v)
+{
+ v.dump(f, false);
+}
+
+/* Print a lattice LAT to F. */
+
+template <typename valtype>
+void
+ipcp_lattice<valtype>::print (FILE * f, bool dump_sources, bool dump_benefits)
{
- struct ipcp_value *val;
+ ipcp_value<valtype> *val;
bool prev = false;
- if (lat->bottom)
+ if (bottom)
{
fprintf (f, "BOTTOM\n");
return;
}
- if (!lat->values_count && !lat->contains_variable)
+ if (!values_count && !contains_variable)
{
fprintf (f, "TOP\n");
return;
}
- if (lat->contains_variable)
+ if (contains_variable)
{
fprintf (f, "VARIABLE");
prev = true;
fprintf (f, "\n");
}
- for (val = lat->values; val; val = val->next)
+ for (val = values; val; val = val->next)
{
if (dump_benefits && prev)
fprintf (f, " ");
if (dump_sources)
{
- struct ipcp_value_source *s;
+ ipcp_value_source<valtype> *s;
fprintf (f, " [from:");
for (s = val->sources; s; s = s->next)
struct ipcp_agg_lattice *aglat;
struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
fprintf (f, " param [%d]: ", i);
- print_lattice (f, &plats->itself, dump_sources, dump_benefits);
-
+ plats->itself.print (f, dump_sources, dump_benefits);
+ fprintf (f, " ctxs: ");
+ plats->ctxlat.print (f, dump_sources, dump_benefits);
if (plats->virt_call)
fprintf (f, " virt_call flag set\n");
{
fprintf (f, " %soffset " HOST_WIDE_INT_PRINT_DEC ": ",
plats->aggs_by_ref ? "ref " : "", aglat->offset);
- print_lattice (f, aglat, dump_sources, dump_benefits);
+ aglat->print (f, dump_sources, dump_benefits);
}
}
}
reason = "alias or thunk";
else if (!node->local.versionable)
reason = "not a tree_versionable_function";
- else if (cgraph_function_body_availability (node) <= AVAIL_OVERWRITABLE)
+ else if (node->get_availability () <= AVAIL_INTERPOSABLE)
reason = "insufficient body availability";
+ else if (!opt_for_fn (node->decl, optimize)
+ || !opt_for_fn (node->decl, flag_ipa_cp))
+ reason = "non-optimized function";
+ else if (lookup_attribute ("omp declare simd", DECL_ATTRIBUTES (node->decl)))
+ {
+ /* Ideally we should clone the SIMD clones themselves and create
+ vector copies of them, so IPA-cp and SIMD clones can happily
+ coexist, but that may not be worth the effort. */
+ reason = "function has SIMD clones";
+ }
+ /* Don't clone decls local to a comdat group; it breaks and for C++
+ decloned constructors, inlining is always better anyway. */
+ else if (node->comdat_local_p ())
+ reason = "comdat-local function";
if (reason && dump_file && !node->alias && !node->thunk.thunk_p)
fprintf (dump_file, "Function %s/%i is not versionable, reason: %s.\n",
for (cs = node->callers; cs; cs = cs->next_caller)
if (cs->caller->thunk.thunk_p)
- cgraph_for_node_and_aliases (cs->caller, gather_caller_stats,
- stats, false);
+ cs->caller->call_for_symbol_thunks_and_aliases (gather_caller_stats,
+ stats, false);
else
{
stats->count_sum += cs->count;
stats->freq_sum += cs->frequency;
stats->n_calls++;
- if (cgraph_maybe_hot_edge_p (cs))
+ if (cs->maybe_hot_p ())
stats->n_hot_calls ++;
}
return false;
{
struct caller_statistics stats;
- gcc_checking_assert (cgraph_function_with_gimple_body_p (node));
+ gcc_checking_assert (node->has_gimple_body_p ());
if (!flag_ipa_cp_clone)
{
}
init_caller_stats (&stats);
- cgraph_for_node_and_aliases (node, gather_caller_stats, &stats, false);
+ node->call_for_symbol_thunks_and_aliases (gather_caller_stats, &stats, false);
if (inline_summary (node)->self_size < stats.n_calls)
{
return true;
}
+template <typename valtype>
+class value_topo_info
+{
+public:
+ /* Head of the linked list of topologically sorted values. */
+ ipcp_value<valtype> *values_topo;
+ /* Stack for creating SCCs, represented by a linked list too. */
+ ipcp_value<valtype> *stack;
+ /* Counter driving the algorithm in add_val_to_toposort. */
+ int dfs_counter;
+
+ value_topo_info () : values_topo (NULL), stack (NULL), dfs_counter (0)
+ {}
+ void add_val (ipcp_value<valtype> *cur_val);
+ void propagate_effects ();
+};
+
/* Arrays representing a topological ordering of call graph nodes and a stack
- of noes used during constant propagation. */
+ of nodes used during constant propagation and also data required to perform
+ topological sort of values and propagation of benefits in the determined
+ order. */
-struct topo_info
+class ipa_topo_info
{
+public:
+ /* Array with obtained topological order of cgraph nodes. */
struct cgraph_node **order;
+ /* Stack of cgraph nodes used during propagation within SCC until all values
+ in the SCC stabilize. */
struct cgraph_node **stack;
int nnodes, stack_top;
+
+ value_topo_info<tree> constants;
+ value_topo_info<ipa_polymorphic_call_context> contexts;
+
+ ipa_topo_info () : order(NULL), stack(NULL), nnodes(0), stack_top(0),
+ constants ()
+ {}
};
/* Allocate the arrays in TOPO and topologically sort the nodes into order. */
static void
-build_toporder_info (struct topo_info *topo)
+build_toporder_info (struct ipa_topo_info *topo)
{
- topo->order = XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
- topo->stack = XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
- topo->stack_top = 0;
+ topo->order = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
+ topo->stack = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
+
+ gcc_checking_assert (topo->stack_top == 0);
topo->nnodes = ipa_reduced_postorder (topo->order, true, true, NULL);
}
TOPO. */
static void
-free_toporder_info (struct topo_info *topo)
+free_toporder_info (struct ipa_topo_info *topo)
{
ipa_free_postorder_info ();
free (topo->order);
/* Add NODE to the stack in TOPO, unless it is already there. */
static inline void
-push_node_to_stack (struct topo_info *topo, struct cgraph_node *node)
+push_node_to_stack (struct ipa_topo_info *topo, struct cgraph_node *node)
{
struct ipa_node_params *info = IPA_NODE_REF (node);
if (info->node_enqueued)
is empty. */
static struct cgraph_node *
-pop_node_from_stack (struct topo_info *topo)
+pop_node_from_stack (struct ipa_topo_info *topo)
{
if (topo->stack_top)
{
/* Set lattice LAT to bottom and return true if it previously was not set as
such. */
-static inline bool
-set_lattice_to_bottom (struct ipcp_lattice *lat)
+template <typename valtype>
+inline bool
+ipcp_lattice<valtype>::set_to_bottom ()
{
- bool ret = !lat->bottom;
- lat->bottom = true;
+ bool ret = !bottom;
+ bottom = true;
return ret;
}
/* Mark lattice as containing an unknown value and return true if it previously
was not marked as such. */
-static inline bool
-set_lattice_contains_variable (struct ipcp_lattice *lat)
+template <typename valtype>
+inline bool
+ipcp_lattice<valtype>::set_contains_variable ()
{
- bool ret = !lat->contains_variable;
- lat->contains_variable = true;
+ bool ret = !contains_variable;
+ contains_variable = true;
return ret;
}
static inline bool
set_all_contains_variable (struct ipcp_param_lattices *plats)
{
- bool ret = !plats->itself.contains_variable || !plats->aggs_contain_variable;
- plats->itself.contains_variable = true;
- plats->aggs_contain_variable = true;
+ bool ret;
+ ret = plats->itself.set_contains_variable ();
+ ret |= plats->ctxlat.set_contains_variable ();
+ ret |= set_agg_lats_contain_variable (plats);
return ret;
}
bool disable = false, variable = false;
int i;
- gcc_checking_assert (cgraph_function_with_gimple_body_p (node));
- if (!node->local.local)
+ gcc_checking_assert (node->has_gimple_body_p ());
+ if (!cgraph_local_p (node))
{
/* When cloning is allowed, we can assume that externally visible
functions are not called. We will compensate this by cloning
struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
if (disable)
{
- set_lattice_to_bottom (&plats->itself);
+ plats->itself.set_to_bottom ();
+ plats->ctxlat.set_to_bottom ();
set_agg_lats_to_bottom (plats);
}
else
{
tree restype, res;
- if (TREE_CODE (input) == TREE_BINFO)
- {
- if (ipa_get_jf_pass_through_type_preserved (jfunc))
- {
- gcc_checking_assert (ipa_get_jf_pass_through_operation (jfunc)
- == NOP_EXPR);
- return input;
- }
- return NULL_TREE;
- }
-
+ gcc_checking_assert (is_gimple_ip_invariant (input));
if (ipa_get_jf_pass_through_operation (jfunc) == NOP_EXPR)
return input;
- gcc_checking_assert (is_gimple_ip_invariant (input));
if (TREE_CODE_CLASS (ipa_get_jf_pass_through_operation (jfunc))
== tcc_comparison)
restype = boolean_type_node;
static tree
ipa_get_jf_ancestor_result (struct ipa_jump_func *jfunc, tree input)
{
- if (TREE_CODE (input) == TREE_BINFO)
- {
- if (!ipa_get_jf_ancestor_type_preserved (jfunc))
- return NULL;
- return get_binfo_at_offset (input,
- ipa_get_jf_ancestor_offset (jfunc),
- ipa_get_jf_ancestor_type (jfunc));
- }
- else if (TREE_CODE (input) == ADDR_EXPR)
+ gcc_checking_assert (TREE_CODE (input) != TREE_BINFO);
+ if (TREE_CODE (input) == ADDR_EXPR)
{
tree t = TREE_OPERAND (input, 0);
t = build_ref_for_offset (EXPR_LOCATION (t), t,
ipa_get_jf_ancestor_offset (jfunc),
- ipa_get_jf_ancestor_type (jfunc), NULL, false);
+ ptr_type_node, NULL, false);
return build_fold_addr_expr (t);
}
else
return NULL_TREE;
}
-/* Determine whether JFUNC evaluates to a known value (that is either a
- constant or a binfo) and if so, return it. Otherwise return NULL. INFO
- describes the caller node so that pass-through jump functions can be
+/* Determine whether JFUNC evaluates to a single known constant value and if
+ so, return it. Otherwise return NULL. INFO describes the caller node or
+ the one it is inlined to, so that pass-through jump functions can be
evaluated. */
tree
{
if (jfunc->type == IPA_JF_CONST)
return ipa_get_jf_constant (jfunc);
- else if (jfunc->type == IPA_JF_KNOWN_TYPE)
- return ipa_binfo_from_known_type_jfunc (jfunc);
else if (jfunc->type == IPA_JF_PASS_THROUGH
|| jfunc->type == IPA_JF_ANCESTOR)
{
idx = ipa_get_jf_ancestor_formal_id (jfunc);
if (info->ipcp_orig_node)
- input = info->known_vals[idx];
+ input = info->known_csts[idx];
else
{
- struct ipcp_lattice *lat;
+ ipcp_lattice<tree> *lat;
if (!info->lattices)
{
return NULL_TREE;
}
lat = ipa_get_scalar_lat (info, idx);
- if (!ipa_lat_is_single_const (lat))
+ if (!lat->is_single_const ())
return NULL_TREE;
input = lat->values->value;
}
return NULL_TREE;
}
+/* Determie whether JFUNC evaluates to single known polymorphic context, given
+ that INFO describes the caller node or the one it is inlined to, CS is the
+ call graph edge corresponding to JFUNC and CSIDX index of the described
+ parameter. */
+
+ipa_polymorphic_call_context
+ipa_context_from_jfunc (ipa_node_params *info, cgraph_edge *cs, int csidx,
+ ipa_jump_func *jfunc)
+{
+ ipa_edge_args *args = IPA_EDGE_REF (cs);
+ ipa_polymorphic_call_context ctx;
+ ipa_polymorphic_call_context *edge_ctx
+ = cs ? ipa_get_ith_polymorhic_call_context (args, csidx) : NULL;
+
+ if (edge_ctx && !edge_ctx->useless_p ())
+ ctx = *edge_ctx;
+
+ if (jfunc->type == IPA_JF_PASS_THROUGH
+ || jfunc->type == IPA_JF_ANCESTOR)
+ {
+ ipa_polymorphic_call_context srcctx;
+ int srcidx;
+ if (jfunc->type == IPA_JF_PASS_THROUGH)
+ {
+ if (ipa_get_jf_pass_through_operation (jfunc) != NOP_EXPR
+ || !ipa_get_jf_pass_through_type_preserved (jfunc))
+ return ctx;
+ srcidx = ipa_get_jf_pass_through_formal_id (jfunc);
+ }
+ else
+ {
+ if (!ipa_get_jf_ancestor_type_preserved (jfunc))
+ return ctx;
+ srcidx = ipa_get_jf_ancestor_formal_id (jfunc);
+ }
+ if (info->ipcp_orig_node)
+ {
+ if (info->known_contexts.exists ())
+ srcctx = info->known_contexts[srcidx];
+ }
+ else
+ {
+ if (!info->lattices)
+ {
+ gcc_checking_assert (!flag_ipa_cp);
+ return ctx;
+ }
+ ipcp_lattice<ipa_polymorphic_call_context> *lat;
+ lat = ipa_get_poly_ctx_lat (info, srcidx);
+ if (!lat->is_single_const ())
+ return ctx;
+ srcctx = lat->values->value;
+ }
+ if (srcctx.useless_p ())
+ return ctx;
+ if (jfunc->type == IPA_JF_ANCESTOR)
+ srcctx.offset_by (ipa_get_jf_ancestor_offset (jfunc));
+ ctx.combine_with (srcctx);
+ }
+
+ return ctx;
+}
/* If checking is enabled, verify that no lattice is in the TOP state, i.e. not
bottom, not containing a variable component and without any known value at
for (i = 0; i < count; i++)
{
- struct ipcp_lattice *lat = ipa_get_scalar_lat (info, i);
+ ipcp_lattice<tree> *lat = ipa_get_scalar_lat (info, i);
if (!lat->bottom
&& !lat->contains_variable
{
if (dump_file)
{
+ symtab_node::dump_table (dump_file);
fprintf (dump_file, "\nIPA lattices after constant "
- "propagation:\n");
+ "propagation, before gcc_unreachable:\n");
print_all_lattices (dump_file, true, false);
}
if (x == y)
return true;
- if (TREE_CODE (x) == TREE_BINFO || TREE_CODE (y) == TREE_BINFO)
- return false;
-
if (TREE_CODE (x) == ADDR_EXPR
&& TREE_CODE (y) == ADDR_EXPR
&& TREE_CODE (TREE_OPERAND (x, 0)) == CONST_DECL
return operand_equal_p (x, y, 0);
}
-/* Add a new value source to VAL, marking that a value comes from edge CS and
- (if the underlying jump function is a pass-through or an ancestor one) from
- a caller value SRC_VAL of a caller parameter described by SRC_INDEX. OFFSET
- is negative if the source was the scalar value of the parameter itself or
- the offset within an aggregate. */
+/* Return true iff X and Y should be considered equal contexts by IPA-CP. */
-static void
-add_value_source (struct ipcp_value *val, struct cgraph_edge *cs,
- struct ipcp_value *src_val, int src_idx, HOST_WIDE_INT offset)
+static bool
+values_equal_for_ipcp_p (ipa_polymorphic_call_context x,
+ ipa_polymorphic_call_context y)
{
- struct ipcp_value_source *src;
+ return x.equal_to (y);
+}
+
- src = (struct ipcp_value_source *) pool_alloc (ipcp_sources_pool);
+/* Add a new value source to the value represented by THIS, marking that a
+ value comes from edge CS and (if the underlying jump function is a
+ pass-through or an ancestor one) from a caller value SRC_VAL of a caller
+ parameter described by SRC_INDEX. OFFSET is negative if the source was the
+ scalar value of the parameter itself or the offset within an aggregate. */
+
+template <typename valtype>
+void
+ipcp_value<valtype>::add_source (cgraph_edge *cs, ipcp_value *src_val,
+ int src_idx, HOST_WIDE_INT offset)
+{
+ ipcp_value_source<valtype> *src;
+
+ src = new (pool_alloc (ipcp_sources_pool)) ipcp_value_source<valtype>;
src->offset = offset;
src->cs = cs;
src->val = src_val;
src->index = src_idx;
- src->next = val->sources;
- val->sources = src;
+ src->next = sources;
+ sources = src;
}
-/* Try to add NEWVAL to LAT, potentially creating a new struct ipcp_value for
- it. CS, SRC_VAL SRC_INDEX and OFFSET are meant for add_value_source and
- have the same meaning. */
+/* Allocate a new ipcp_value holding a tree constant, initialize its value to
+ SOURCE and clear all other fields. */
-static bool
-add_value_to_lattice (struct ipcp_lattice *lat, tree newval,
- struct cgraph_edge *cs, struct ipcp_value *src_val,
- int src_idx, HOST_WIDE_INT offset)
+static ipcp_value<tree> *
+allocate_and_init_ipcp_value (tree source)
{
- struct ipcp_value *val;
+ ipcp_value<tree> *val;
+
+ val = new (pool_alloc (ipcp_cst_values_pool)) ipcp_value<tree>;
+ memset (val, 0, sizeof (*val));
+ val->value = source;
+ return val;
+}
+
+/* Allocate a new ipcp_value holding a polymorphic context, initialize its
+ value to SOURCE and clear all other fields. */
+
+static ipcp_value<ipa_polymorphic_call_context> *
+allocate_and_init_ipcp_value (ipa_polymorphic_call_context source)
+{
+ ipcp_value<ipa_polymorphic_call_context> *val;
+
+ val = new (pool_alloc (ipcp_poly_ctx_values_pool))
+ ipcp_value<ipa_polymorphic_call_context>;
+ memset (val, 0, sizeof (*val));
+ val->value = source;
+ return val;
+}
- if (lat->bottom)
+/* Try to add NEWVAL to LAT, potentially creating a new ipcp_value for it. CS,
+ SRC_VAL SRC_INDEX and OFFSET are meant for add_source and have the same
+ meaning. OFFSET -1 means the source is scalar and not a part of an
+ aggregate. */
+
+template <typename valtype>
+bool
+ipcp_lattice<valtype>::add_value (valtype newval, cgraph_edge *cs,
+ ipcp_value<valtype> *src_val,
+ int src_idx, HOST_WIDE_INT offset)
+{
+ ipcp_value<valtype> *val;
+
+ if (bottom)
return false;
- for (val = lat->values; val; val = val->next)
+ for (val = values; val; val = val->next)
if (values_equal_for_ipcp_p (val->value, newval))
{
if (ipa_edge_within_scc (cs))
{
- struct ipcp_value_source *s;
+ ipcp_value_source<valtype> *s;
for (s = val->sources; s ; s = s->next)
if (s->cs == cs)
break;
return false;
}
- add_value_source (val, cs, src_val, src_idx, offset);
+ val->add_source (cs, src_val, src_idx, offset);
return false;
}
- if (lat->values_count == PARAM_VALUE (PARAM_IPA_CP_VALUE_LIST_SIZE))
+ if (values_count == PARAM_VALUE (PARAM_IPA_CP_VALUE_LIST_SIZE))
{
/* We can only free sources, not the values themselves, because sources
of other values in this this SCC might point to them. */
- for (val = lat->values; val; val = val->next)
+ for (val = values; val; val = val->next)
{
while (val->sources)
{
- struct ipcp_value_source *src = val->sources;
+ ipcp_value_source<valtype> *src = val->sources;
val->sources = src->next;
pool_free (ipcp_sources_pool, src);
}
}
- lat->values = NULL;
- return set_lattice_to_bottom (lat);
+ values = NULL;
+ return set_to_bottom ();
}
- lat->values_count++;
- val = (struct ipcp_value *) pool_alloc (ipcp_values_pool);
- memset (val, 0, sizeof (*val));
-
- add_value_source (val, cs, src_val, src_idx, offset);
- val->value = newval;
- val->next = lat->values;
- lat->values = val;
+ values_count++;
+ val = allocate_and_init_ipcp_value (newval);
+ val->add_source (cs, src_val, src_idx, offset);
+ val->next = values;
+ values = val;
return true;
}
-/* Like above but passes a special value of offset to distinguish that the
- origin is the scalar value of the parameter rather than a part of an
- aggregate. */
-
-static inline bool
-add_scalar_value_to_lattice (struct ipcp_lattice *lat, tree newval,
- struct cgraph_edge *cs,
- struct ipcp_value *src_val, int src_idx)
-{
- return add_value_to_lattice (lat, newval, cs, src_val, src_idx, -1);
-}
-
/* Propagate values through a pass-through jump function JFUNC associated with
edge CS, taking values from SRC_LAT and putting them into DEST_LAT. SRC_IDX
is the index of the source parameter. */
static bool
-propagate_vals_accross_pass_through (struct cgraph_edge *cs,
- struct ipa_jump_func *jfunc,
- struct ipcp_lattice *src_lat,
- struct ipcp_lattice *dest_lat,
+propagate_vals_accross_pass_through (cgraph_edge *cs,
+ ipa_jump_func *jfunc,
+ ipcp_lattice<tree> *src_lat,
+ ipcp_lattice<tree> *dest_lat,
int src_idx)
{
- struct ipcp_value *src_val;
+ ipcp_value<tree> *src_val;
bool ret = false;
/* Do not create new values when propagating within an SCC because if there
number of them and we would just make lattices bottom. */
if ((ipa_get_jf_pass_through_operation (jfunc) != NOP_EXPR)
&& ipa_edge_within_scc (cs))
- ret = set_lattice_contains_variable (dest_lat);
+ ret = dest_lat->set_contains_variable ();
else
for (src_val = src_lat->values; src_val; src_val = src_val->next)
{
tree cstval = ipa_get_jf_pass_through_result (jfunc, src_val->value);
if (cstval)
- ret |= add_scalar_value_to_lattice (dest_lat, cstval, cs, src_val,
- src_idx);
+ ret |= dest_lat->add_value (cstval, cs, src_val, src_idx);
else
- ret |= set_lattice_contains_variable (dest_lat);
+ ret |= dest_lat->set_contains_variable ();
}
return ret;
static bool
propagate_vals_accross_ancestor (struct cgraph_edge *cs,
struct ipa_jump_func *jfunc,
- struct ipcp_lattice *src_lat,
- struct ipcp_lattice *dest_lat,
+ ipcp_lattice<tree> *src_lat,
+ ipcp_lattice<tree> *dest_lat,
int src_idx)
{
- struct ipcp_value *src_val;
+ ipcp_value<tree> *src_val;
bool ret = false;
if (ipa_edge_within_scc (cs))
- return set_lattice_contains_variable (dest_lat);
+ return dest_lat->set_contains_variable ();
for (src_val = src_lat->values; src_val; src_val = src_val->next)
{
tree t = ipa_get_jf_ancestor_result (jfunc, src_val->value);
if (t)
- ret |= add_scalar_value_to_lattice (dest_lat, t, cs, src_val, src_idx);
+ ret |= dest_lat->add_value (t, cs, src_val, src_idx);
else
- ret |= set_lattice_contains_variable (dest_lat);
+ ret |= dest_lat->set_contains_variable ();
}
return ret;
static bool
propagate_scalar_accross_jump_function (struct cgraph_edge *cs,
struct ipa_jump_func *jfunc,
- struct ipcp_lattice *dest_lat)
+ ipcp_lattice<tree> *dest_lat)
{
if (dest_lat->bottom)
return false;
- if (jfunc->type == IPA_JF_CONST
- || jfunc->type == IPA_JF_KNOWN_TYPE)
+ if (jfunc->type == IPA_JF_CONST)
{
- tree val;
-
- if (jfunc->type == IPA_JF_KNOWN_TYPE)
- {
- val = ipa_binfo_from_known_type_jfunc (jfunc);
- if (!val)
- return set_lattice_contains_variable (dest_lat);
- }
- else
- val = ipa_get_jf_constant (jfunc);
- return add_scalar_value_to_lattice (dest_lat, val, cs, NULL, 0);
+ tree val = ipa_get_jf_constant (jfunc);
+ return dest_lat->add_value (val, cs, NULL, 0);
}
else if (jfunc->type == IPA_JF_PASS_THROUGH
|| jfunc->type == IPA_JF_ANCESTOR)
{
struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
- struct ipcp_lattice *src_lat;
+ ipcp_lattice<tree> *src_lat;
int src_idx;
bool ret;
src_lat = ipa_get_scalar_lat (caller_info, src_idx);
if (src_lat->bottom)
- return set_lattice_contains_variable (dest_lat);
+ return dest_lat->set_contains_variable ();
/* If we would need to clone the caller and cannot, do not propagate. */
if (!ipcp_versionable_function_p (cs->caller)
&& (src_lat->contains_variable
|| (src_lat->values_count > 1)))
- return set_lattice_contains_variable (dest_lat);
+ return dest_lat->set_contains_variable ();
if (jfunc->type == IPA_JF_PASS_THROUGH)
ret = propagate_vals_accross_pass_through (cs, jfunc, src_lat,
src_idx);
if (src_lat->contains_variable)
- ret |= set_lattice_contains_variable (dest_lat);
+ ret |= dest_lat->set_contains_variable ();
return ret;
}
/* TODO: We currently do not handle member method pointers in IPA-CP (we only
use it for indirect inlining), we should propagate them too. */
- return set_lattice_contains_variable (dest_lat);
+ return dest_lat->set_contains_variable ();
+}
+
+/* Propagate scalar values across jump function JFUNC that is associated with
+ edge CS and describes argument IDX and put the values into DEST_LAT. */
+
+static bool
+propagate_context_accross_jump_function (cgraph_edge *cs,
+ ipa_jump_func *jfunc, int idx,
+ ipcp_lattice<ipa_polymorphic_call_context> *dest_lat)
+{
+ ipa_edge_args *args = IPA_EDGE_REF (cs);
+ if (dest_lat->bottom)
+ return false;
+ bool ret = false;
+ bool added_sth = false;
+
+ ipa_polymorphic_call_context edge_ctx, *edge_ctx_ptr
+ = ipa_get_ith_polymorhic_call_context (args, idx);
+
+ if (edge_ctx_ptr)
+ {
+ edge_ctx = *edge_ctx_ptr;
+ edge_ctx.clear_speculation ();
+ }
+
+ if (jfunc->type == IPA_JF_PASS_THROUGH
+ || jfunc->type == IPA_JF_ANCESTOR)
+ {
+ struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
+ int src_idx;
+ ipcp_lattice<ipa_polymorphic_call_context> *src_lat;
+
+ /* TODO: Once we figure out how to propagate speculations, it will
+ probably be a good idea to switch to speculation if type_preserved is
+ not set instead of punting. */
+ if (jfunc->type == IPA_JF_PASS_THROUGH)
+ {
+ if (ipa_get_jf_pass_through_operation (jfunc) != NOP_EXPR
+ || !ipa_get_jf_pass_through_type_preserved (jfunc))
+ goto prop_fail;
+ src_idx = ipa_get_jf_pass_through_formal_id (jfunc);
+ }
+ else
+ {
+ if (!ipa_get_jf_ancestor_type_preserved (jfunc))
+ goto prop_fail;
+ src_idx = ipa_get_jf_ancestor_formal_id (jfunc);
+ }
+
+ src_lat = ipa_get_poly_ctx_lat (caller_info, src_idx);
+ /* If we would need to clone the caller and cannot, do not propagate. */
+ if (!ipcp_versionable_function_p (cs->caller)
+ && (src_lat->contains_variable
+ || (src_lat->values_count > 1)))
+ goto prop_fail;
+ if (src_lat->contains_variable)
+ ret |= dest_lat->set_contains_variable ();
+
+ ipcp_value<ipa_polymorphic_call_context> *src_val;
+ for (src_val = src_lat->values; src_val; src_val = src_val->next)
+ {
+ ipa_polymorphic_call_context cur = src_val->value;
+ if (jfunc->type == IPA_JF_ANCESTOR)
+ cur.offset_by (ipa_get_jf_ancestor_offset (jfunc));
+ /* TODO: Perhaps attempt to look up some used OTR type? */
+ cur.clear_speculation ();
+ if (!edge_ctx.useless_p ())
+ cur.combine_with (edge_ctx);
+ if (!cur.useless_p ())
+ {
+ ret |= dest_lat->add_value (cur, cs, src_val, src_idx);
+ added_sth = true;
+ }
+ }
+
+ }
+
+ prop_fail:
+ if (!added_sth)
+ {
+ if (!edge_ctx.useless_p ())
+ ret |= dest_lat->add_value (edge_ctx, cs);
+ else
+ ret |= dest_lat->set_contains_variable ();
+ }
+
+ return ret;
}
/* If DEST_PLATS already has aggregate items, check that aggs_by_ref matches
set_agg_lats_to_bottom (dest_plats);
return false;
}
- *change |= set_lattice_contains_variable (**aglat);
+ *change |= (**aglat)->set_contains_variable ();
*aglat = &(**aglat)->next;
}
bool ret = false;
while (aglat)
{
- ret |= set_lattice_contains_variable (aglat);
+ ret |= aglat->set_contains_variable ();
aglat = aglat->next;
}
return ret;
dst_aglat = &(*dst_aglat)->next;
if (src_aglat->bottom)
{
- ret |= set_lattice_contains_variable (new_al);
+ ret |= new_al->set_contains_variable ();
continue;
}
if (src_aglat->contains_variable)
- ret |= set_lattice_contains_variable (new_al);
- for (struct ipcp_value *val = src_aglat->values;
+ ret |= new_al->set_contains_variable ();
+ for (ipcp_value<tree> *val = src_aglat->values;
val;
val = val->next)
- ret |= add_value_to_lattice (new_al, val->value, cs, val, src_idx,
- src_aglat->offset);
+ ret |= new_al->add_value (val->value, cs, val, src_idx,
+ src_aglat->offset);
}
else if (dest_plats->aggs_bottom)
return true;
if (merge_agg_lats_step (dest_plats, item->offset, val_size,
&aglat, pre_existing, &ret))
{
- ret |= add_value_to_lattice (*aglat, item->value, cs, NULL, 0, 0);
+ ret |= (*aglat)->add_value (item->value, cs, NULL, 0, 0);
aglat = &(*aglat)->next;
}
else if (dest_plats->aggs_bottom)
bool ret = false;
int i, args_count, parms_count;
- callee = cgraph_function_node (cs->callee, &availability);
+ callee = cs->callee->function_symbol (&availability);
if (!callee->definition)
return false;
- gcc_checking_assert (cgraph_function_with_gimple_body_p (callee));
+ gcc_checking_assert (callee->has_gimple_body_p ());
callee_info = IPA_NODE_REF (callee);
args = IPA_EDGE_REF (cs);
args_count = ipa_get_cs_argument_count (args);
parms_count = ipa_get_param_count (callee_info);
+ if (parms_count == 0)
+ return false;
+
+ /* No propagation through instrumentation thunks is available yet.
+ It should be possible with proper mapping of call args and
+ instrumented callee params in the propagation loop below. But
+ this case mostly occurs when legacy code calls instrumented code
+ and it is not a primary target for optimizations.
+ We detect instrumentation thunks in aliases and thunks chain by
+ checking instrumentation_clone flag for chain source and target.
+ Going through instrumentation thunks we always have it changed
+ from 0 to 1 and all other nodes do not change it. */
+ if (!cs->callee->instrumentation_clone
+ && callee->instrumentation_clone)
+ {
+ for (i = 0; i < parms_count; i++)
+ ret |= set_all_contains_variable (ipa_get_parm_lattices (callee_info,
+ i));
+ return ret;
+ }
/* If this call goes through a thunk we must not propagate to the first (0th)
parameter. However, we might need to uncover a thunk from below a series
of aliases first. */
alias_or_thunk = cs->callee;
while (alias_or_thunk->alias)
- alias_or_thunk = cgraph_alias_target (alias_or_thunk);
+ alias_or_thunk = alias_or_thunk->get_alias_target ();
if (alias_or_thunk->thunk.thunk_p)
{
ret |= set_all_contains_variable (ipa_get_parm_lattices (callee_info,
struct ipcp_param_lattices *dest_plats;
dest_plats = ipa_get_parm_lattices (callee_info, i);
- if (availability == AVAIL_OVERWRITABLE)
+ if (availability == AVAIL_INTERPOSABLE)
ret |= set_all_contains_variable (dest_plats);
else
{
ret |= propagate_scalar_accross_jump_function (cs, jump_func,
&dest_plats->itself);
+ ret |= propagate_context_accross_jump_function (cs, jump_func, i,
+ &dest_plats->ctxlat);
ret |= propagate_aggs_accross_jump_function (cs, jump_func,
dest_plats);
}
}
/* If an indirect edge IE can be turned into a direct one based on KNOWN_VALS
- (which can contain both constants and binfos), KNOWN_BINFOS, KNOWN_AGGS or
- AGG_REPS return the destination. The latter three can be NULL. If AGG_REPS
- is not NULL, KNOWN_AGGS is ignored. */
+ KNOWN_CONTEXTS, KNOWN_AGGS or AGG_REPS return the destination. The latter
+ three can be NULL. If AGG_REPS is not NULL, KNOWN_AGGS is ignored. */
static tree
ipa_get_indirect_edge_target_1 (struct cgraph_edge *ie,
- vec<tree> known_vals,
- vec<tree> known_binfos,
+ vec<tree> known_csts,
+ vec<ipa_polymorphic_call_context> known_contexts,
vec<ipa_agg_jump_function_p> known_aggs,
struct ipa_agg_replacement_value *agg_reps)
{
int param_index = ie->indirect_info->param_index;
- HOST_WIDE_INT token, anc_offset;
- tree otr_type;
+ HOST_WIDE_INT anc_offset;
tree t;
- tree target;
+ tree target = NULL;
if (param_index == -1
- || known_vals.length () <= (unsigned int) param_index)
+ || known_csts.length () <= (unsigned int) param_index)
return NULL_TREE;
if (!ie->indirect_info->polymorphic)
t = NULL;
}
else
- t = known_vals[param_index];
+ t = known_csts[param_index];
if (t &&
TREE_CODE (t) == ADDR_EXPR
return NULL_TREE;
}
+ if (!flag_devirtualize)
+ return NULL_TREE;
+
gcc_assert (!ie->indirect_info->agg_contents);
- token = ie->indirect_info->otr_token;
anc_offset = ie->indirect_info->offset;
- otr_type = ie->indirect_info->otr_type;
- t = known_vals[param_index];
- if (!t && known_binfos.length () > (unsigned int) param_index)
- t = known_binfos[param_index];
- if (!t)
- return NULL_TREE;
+ t = NULL;
- if (TREE_CODE (t) != TREE_BINFO)
+ /* Try to work out value of virtual table pointer value in replacemnets. */
+ if (!t && agg_reps && !ie->indirect_info->by_ref
+ && !ie->indirect_info->vptr_changed)
{
- tree binfo;
- binfo = gimple_extract_devirt_binfo_from_cst
- (t, ie->indirect_info->otr_type);
- if (!binfo)
- return NULL_TREE;
- binfo = get_binfo_at_offset (binfo, anc_offset, otr_type);
- if (!binfo)
- return NULL_TREE;
- target = gimple_get_virt_method_for_binfo (token, binfo);
+ while (agg_reps)
+ {
+ if (agg_reps->index == param_index
+ && agg_reps->offset == ie->indirect_info->offset
+ && agg_reps->by_ref)
+ {
+ t = agg_reps->value;
+ break;
+ }
+ agg_reps = agg_reps->next;
+ }
}
- else
+
+ /* Try to work out value of virtual table pointer value in known
+ aggregate values. */
+ if (!t && known_aggs.length () > (unsigned int) param_index
+ && !ie->indirect_info->by_ref
+ && !ie->indirect_info->vptr_changed)
{
- tree binfo;
+ struct ipa_agg_jump_function *agg;
+ agg = known_aggs[param_index];
+ t = ipa_find_agg_cst_for_param (agg, ie->indirect_info->offset,
+ true);
+ }
- binfo = get_binfo_at_offset (t, anc_offset, otr_type);
- if (!binfo)
- return NULL_TREE;
- target = gimple_get_virt_method_for_binfo (token, binfo);
+ /* If we found the virtual table pointer, lookup the target. */
+ if (t)
+ {
+ tree vtable;
+ unsigned HOST_WIDE_INT offset;
+ if (vtable_pointer_value_to_vtable (t, &vtable, &offset))
+ {
+ target = gimple_get_virt_method_for_vtable (ie->indirect_info->otr_token,
+ vtable, offset);
+ if (target)
+ {
+ if ((TREE_CODE (TREE_TYPE (target)) == FUNCTION_TYPE
+ && DECL_FUNCTION_CODE (target) == BUILT_IN_UNREACHABLE)
+ || !possible_polymorphic_call_target_p
+ (ie, cgraph_node::get (target)))
+ target = ipa_impossible_devirt_target (ie, target);
+ return target;
+ }
+ }
}
-#ifdef ENABLE_CHECKING
- if (target)
- gcc_assert (possible_polymorphic_call_target_p
- (ie, cgraph_get_node (target)));
-#endif
+
+ /* Do we know the constant value of pointer? */
+ if (!t)
+ t = known_csts[param_index];
+
+ gcc_checking_assert (!t || TREE_CODE (t) != TREE_BINFO);
+
+ ipa_polymorphic_call_context context;
+ if (known_contexts.length () > (unsigned int) param_index)
+ {
+ context = known_contexts[param_index];
+ if (t)
+ {
+ ipa_polymorphic_call_context ctx2 = ipa_polymorphic_call_context
+ (t, ie->indirect_info->otr_type, anc_offset);
+ if (!ctx2.useless_p ())
+ context.combine_with (ctx2, ie->indirect_info->otr_type);
+ }
+ }
+ else if (t)
+ context = ipa_polymorphic_call_context (t, ie->indirect_info->otr_type,
+ anc_offset);
+ else
+ return NULL_TREE;
+
+ vec <cgraph_node *>targets;
+ bool final;
+
+ targets = possible_polymorphic_call_targets
+ (ie->indirect_info->otr_type,
+ ie->indirect_info->otr_token,
+ context, &final);
+ if (!final || targets.length () > 1)
+ return NULL_TREE;
+ if (targets.length () == 1)
+ target = targets[0]->decl;
+ else
+ target = ipa_impossible_devirt_target (ie, NULL_TREE);
+
+ if (target && !possible_polymorphic_call_target_p (ie,
+ cgraph_node::get (target)))
+ target = ipa_impossible_devirt_target (ie, target);
return target;
}
-/* If an indirect edge IE can be turned into a direct one based on KNOWN_VALS
- (which can contain both constants and binfos), KNOWN_BINFOS (which can be
- NULL) or KNOWN_AGGS (which also can be NULL) return the destination. */
+/* If an indirect edge IE can be turned into a direct one based on KNOWN_CSTS,
+ KNOWN_CONTEXTS (which can be vNULL) or KNOWN_AGGS (which also can be vNULL)
+ return the destination. */
tree
ipa_get_indirect_edge_target (struct cgraph_edge *ie,
- vec<tree> known_vals,
- vec<tree> known_binfos,
+ vec<tree> known_csts,
+ vec<ipa_polymorphic_call_context> known_contexts,
vec<ipa_agg_jump_function_p> known_aggs)
{
- return ipa_get_indirect_edge_target_1 (ie, known_vals, known_binfos,
+ return ipa_get_indirect_edge_target_1 (ie, known_csts, known_contexts,
known_aggs, NULL);
}
/* Calculate devirtualization time bonus for NODE, assuming we know KNOWN_CSTS
- and KNOWN_BINFOS. */
+ and KNOWN_CONTEXTS. */
static int
devirtualization_time_bonus (struct cgraph_node *node,
vec<tree> known_csts,
- vec<tree> known_binfos,
+ vec<ipa_polymorphic_call_context> known_contexts,
vec<ipa_agg_jump_function_p> known_aggs)
{
struct cgraph_edge *ie;
{
struct cgraph_node *callee;
struct inline_summary *isummary;
+ enum availability avail;
tree target;
- target = ipa_get_indirect_edge_target (ie, known_csts, known_binfos,
+ target = ipa_get_indirect_edge_target (ie, known_csts, known_contexts,
known_aggs);
if (!target)
continue;
/* Only bare minimum benefit for clearly un-inlineable targets. */
res += 1;
- callee = cgraph_get_node (target);
+ callee = cgraph_node::get (target);
if (!callee || !callee->definition)
continue;
+ callee = callee->function_symbol (&avail);
+ if (avail < AVAIL_AVAILABLE)
+ continue;
isummary = inline_summary (callee);
if (!isummary->inlinable)
continue;
if (max_count)
{
int factor = (count_sum * 1000) / max_count;
- HOST_WIDEST_INT evaluation = (((HOST_WIDEST_INT) time_benefit * factor)
+ int64_t evaluation = (((int64_t) time_benefit * factor)
/ size_cost);
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, " good_cloning_opportunity_p (time: %i, "
"size: %i, count_sum: " HOST_WIDE_INT_PRINT_DEC
- ") -> evaluation: " HOST_WIDEST_INT_PRINT_DEC
+ ") -> evaluation: " "%"PRId64
", threshold: %i\n",
time_benefit, size_cost, (HOST_WIDE_INT) count_sum,
evaluation, PARAM_VALUE (PARAM_IPA_CP_EVAL_THRESHOLD));
}
else
{
- HOST_WIDEST_INT evaluation = (((HOST_WIDEST_INT) time_benefit * freq_sum)
+ int64_t evaluation = (((int64_t) time_benefit * freq_sum)
/ size_cost);
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, " good_cloning_opportunity_p (time: %i, "
"size: %i, freq_sum: %i) -> evaluation: "
- HOST_WIDEST_INT_PRINT_DEC ", threshold: %i\n",
+ "%"PRId64 ", threshold: %i\n",
time_benefit, size_cost, freq_sum, evaluation,
PARAM_VALUE (PARAM_IPA_CP_EVAL_THRESHOLD));
/* Return all context independent values from aggregate lattices in PLATS in a
vector. Return NULL if there are none. */
-static vec<ipa_agg_jf_item_t, va_gc> *
+static vec<ipa_agg_jf_item, va_gc> *
context_independent_aggregate_values (struct ipcp_param_lattices *plats)
{
- vec<ipa_agg_jf_item_t, va_gc> *res = NULL;
+ vec<ipa_agg_jf_item, va_gc> *res = NULL;
if (plats->aggs_bottom
|| plats->aggs_contain_variable
for (struct ipcp_agg_lattice *aglat = plats->aggs;
aglat;
aglat = aglat->next)
- if (ipa_lat_is_single_const (aglat))
+ if (aglat->is_single_const ())
{
struct ipa_agg_jf_item item;
item.offset = aglat->offset;
return res;
}
-/* Allocate KNOWN_CSTS, KNOWN_BINFOS and, if non-NULL, KNOWN_AGGS and populate
- them with values of parameters that are known independent of the context.
- INFO describes the function. If REMOVABLE_PARAMS_COST is non-NULL, the
- movement cost of all removable parameters will be stored in it. */
+/* Allocate KNOWN_CSTS, KNOWN_CONTEXTS and, if non-NULL, KNOWN_AGGS and
+ populate them with values of parameters that are known independent of the
+ context. INFO describes the function. If REMOVABLE_PARAMS_COST is
+ non-NULL, the movement cost of all removable parameters will be stored in
+ it. */
static bool
gather_context_independent_values (struct ipa_node_params *info,
- vec<tree> *known_csts,
- vec<tree> *known_binfos,
- vec<ipa_agg_jump_function_t> *known_aggs,
- int *removable_params_cost)
+ vec<tree> *known_csts,
+ vec<ipa_polymorphic_call_context>
+ *known_contexts,
+ vec<ipa_agg_jump_function> *known_aggs,
+ int *removable_params_cost)
{
int i, count = ipa_get_param_count (info);
bool ret = false;
known_csts->create (0);
- known_binfos->create (0);
+ known_contexts->create (0);
known_csts->safe_grow_cleared (count);
- known_binfos->safe_grow_cleared (count);
+ known_contexts->safe_grow_cleared (count);
if (known_aggs)
{
known_aggs->create (0);
for (i = 0; i < count ; i++)
{
struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
- struct ipcp_lattice *lat = &plats->itself;
+ ipcp_lattice<tree> *lat = &plats->itself;
- if (ipa_lat_is_single_const (lat))
+ if (lat->is_single_const ())
{
- struct ipcp_value *val = lat->values;
- if (TREE_CODE (val->value) != TREE_BINFO)
- {
- (*known_csts)[i] = val->value;
- if (removable_params_cost)
- *removable_params_cost
- += estimate_move_cost (TREE_TYPE (val->value));
- ret = true;
- }
- else if (plats->virt_call)
- {
- (*known_binfos)[i] = val->value;
- ret = true;
- }
- else if (removable_params_cost
- && !ipa_is_param_used (info, i))
- *removable_params_cost += ipa_get_param_move_cost (info, i);
+ ipcp_value<tree> *val = lat->values;
+ gcc_checking_assert (TREE_CODE (val->value) != TREE_BINFO);
+ (*known_csts)[i] = val->value;
+ if (removable_params_cost)
+ *removable_params_cost
+ += estimate_move_cost (TREE_TYPE (val->value), false);
+ ret = true;
}
else if (removable_params_cost
&& !ipa_is_param_used (info, i))
*removable_params_cost
+= ipa_get_param_move_cost (info, i);
+ ipcp_lattice<ipa_polymorphic_call_context> *ctxlat = &plats->ctxlat;
+ if (ctxlat->is_single_const ())
+ {
+ (*known_contexts)[i] = ctxlat->values->value;
+ ret = true;
+ }
+
if (known_aggs)
{
- vec<ipa_agg_jf_item_t, va_gc> *agg_items;
+ vec<ipa_agg_jf_item, va_gc> *agg_items;
struct ipa_agg_jump_function *ajf;
agg_items = context_independent_aggregate_values (plats);
issue. */
static vec<ipa_agg_jump_function_p>
-agg_jmp_p_vec_for_t_vec (vec<ipa_agg_jump_function_t> known_aggs)
+agg_jmp_p_vec_for_t_vec (vec<ipa_agg_jump_function> known_aggs)
{
vec<ipa_agg_jump_function_p> ret;
struct ipa_agg_jump_function *ajf;
return ret;
}
+/* Perform time and size measurement of NODE with the context given in
+ KNOWN_CSTS, KNOWN_CONTEXTS and KNOWN_AGGS, calculate the benefit and cost
+ given BASE_TIME of the node without specialization, REMOVABLE_PARAMS_COST of
+ all context-independent removable parameters and EST_MOVE_COST of estimated
+ movement of the considered parameter and store it into VAL. */
+
+static void
+perform_estimation_of_a_value (cgraph_node *node, vec<tree> known_csts,
+ vec<ipa_polymorphic_call_context> known_contexts,
+ vec<ipa_agg_jump_function_p> known_aggs_ptrs,
+ int base_time, int removable_params_cost,
+ int est_move_cost, ipcp_value_base *val)
+{
+ int time, size, time_benefit;
+ inline_hints hints;
+
+ estimate_ipcp_clone_size_and_time (node, known_csts, known_contexts,
+ known_aggs_ptrs, &size, &time,
+ &hints);
+ time_benefit = base_time - time
+ + devirtualization_time_bonus (node, known_csts, known_contexts,
+ known_aggs_ptrs)
+ + hint_time_bonus (hints)
+ + removable_params_cost + est_move_cost;
+
+ gcc_checking_assert (size >=0);
+ /* The inliner-heuristics based estimates may think that in certain
+ contexts some functions do not have any size at all but we want
+ all specializations to have at least a tiny cost, not least not to
+ divide by zero. */
+ if (size == 0)
+ size = 1;
+
+ val->local_time_benefit = time_benefit;
+ val->local_size_cost = size;
+}
+
/* Iterate over known values of parameters of NODE and estimate the local
effects in terms of time and size they have. */
{
struct ipa_node_params *info = IPA_NODE_REF (node);
int i, count = ipa_get_param_count (info);
- vec<tree> known_csts, known_binfos;
- vec<ipa_agg_jump_function_t> known_aggs;
+ vec<tree> known_csts;
+ vec<ipa_polymorphic_call_context> known_contexts;
+ vec<ipa_agg_jump_function> known_aggs;
vec<ipa_agg_jump_function_p> known_aggs_ptrs;
bool always_const;
int base_time = inline_summary (node)->time;
node->name (), node->order, base_time);
always_const = gather_context_independent_values (info, &known_csts,
- &known_binfos, &known_aggs,
+ &known_contexts, &known_aggs,
&removable_params_cost);
known_aggs_ptrs = agg_jmp_p_vec_for_t_vec (known_aggs);
if (always_const)
int time, size;
init_caller_stats (&stats);
- cgraph_for_node_and_aliases (node, gather_caller_stats, &stats, false);
- estimate_ipcp_clone_size_and_time (node, known_csts, known_binfos,
+ node->call_for_symbol_thunks_and_aliases (gather_caller_stats, &stats,
+ false);
+ estimate_ipcp_clone_size_and_time (node, known_csts, known_contexts,
known_aggs_ptrs, &size, &time, &hints);
- time -= devirtualization_time_bonus (node, known_csts, known_binfos,
+ time -= devirtualization_time_bonus (node, known_csts, known_contexts,
known_aggs_ptrs);
time -= hint_time_bonus (hints);
time -= removable_params_cost;
"time_benefit: %i\n", size, base_time - time);
if (size <= 0
- || cgraph_will_be_removed_from_program_if_no_direct_calls (node))
+ || node->will_be_removed_from_program_if_no_direct_calls_p ())
{
info->do_clone_for_all_contexts = true;
base_time = time;
for (i = 0; i < count ; i++)
{
struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
- struct ipcp_lattice *lat = &plats->itself;
- struct ipcp_value *val;
- int emc;
+ ipcp_lattice<tree> *lat = &plats->itself;
+ ipcp_value<tree> *val;
if (lat->bottom
|| !lat->values
- || known_csts[i]
- || known_binfos[i])
+ || known_csts[i])
continue;
for (val = lat->values; val; val = val->next)
{
- int time, size, time_benefit;
- inline_hints hints;
+ gcc_checking_assert (TREE_CODE (val->value) != TREE_BINFO);
+ known_csts[i] = val->value;
- if (TREE_CODE (val->value) != TREE_BINFO)
- {
- known_csts[i] = val->value;
- known_binfos[i] = NULL_TREE;
- emc = estimate_move_cost (TREE_TYPE (val->value));
- }
- else if (plats->virt_call)
+ int emc = estimate_move_cost (TREE_TYPE (val->value), true);
+ perform_estimation_of_a_value (node, known_csts, known_contexts,
+ known_aggs_ptrs, base_time,
+ removable_params_cost, emc, val);
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
{
- known_csts[i] = NULL_TREE;
- known_binfos[i] = val->value;
- emc = 0;
+ fprintf (dump_file, " - estimates for value ");
+ print_ipcp_constant_value (dump_file, val->value);
+ fprintf (dump_file, " for ");
+ ipa_dump_param (dump_file, info, i);
+ fprintf (dump_file, ": time_benefit: %i, size: %i\n",
+ val->local_time_benefit, val->local_size_cost);
}
- else
- continue;
+ }
+ known_csts[i] = NULL_TREE;
+ }
- estimate_ipcp_clone_size_and_time (node, known_csts, known_binfos,
- known_aggs_ptrs, &size, &time,
- &hints);
- time_benefit = base_time - time
- + devirtualization_time_bonus (node, known_csts, known_binfos,
- known_aggs_ptrs)
- + hint_time_bonus (hints)
- + removable_params_cost + emc;
-
- gcc_checking_assert (size >=0);
- /* The inliner-heuristics based estimates may think that in certain
- contexts some functions do not have any size at all but we want
- all specializations to have at least a tiny cost, not least not to
- divide by zero. */
- if (size == 0)
- size = 1;
+ for (i = 0; i < count; i++)
+ {
+ struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
+
+ if (!plats->virt_call)
+ continue;
+
+ ipcp_lattice<ipa_polymorphic_call_context> *ctxlat = &plats->ctxlat;
+ ipcp_value<ipa_polymorphic_call_context> *val;
+
+ if (ctxlat->bottom
+ || !ctxlat->values
+ || !known_contexts[i].useless_p ())
+ continue;
+
+ for (val = ctxlat->values; val; val = val->next)
+ {
+ known_contexts[i] = val->value;
+ perform_estimation_of_a_value (node, known_csts, known_contexts,
+ known_aggs_ptrs, base_time,
+ removable_params_cost, 0, val);
if (dump_file && (dump_flags & TDF_DETAILS))
{
- fprintf (dump_file, " - estimates for value ");
+ fprintf (dump_file, " - estimates for polymorphic context ");
print_ipcp_constant_value (dump_file, val->value);
fprintf (dump_file, " for ");
ipa_dump_param (dump_file, info, i);
fprintf (dump_file, ": time_benefit: %i, size: %i\n",
- time_benefit, size);
+ val->local_time_benefit, val->local_size_cost);
}
-
- val->local_time_benefit = time_benefit;
- val->local_size_cost = size;
}
- known_binfos[i] = NULL_TREE;
- known_csts[i] = NULL_TREE;
+ known_contexts[i] = ipa_polymorphic_call_context ();
}
for (i = 0; i < count ; i++)
ajf = &known_aggs[i];
for (aglat = plats->aggs; aglat; aglat = aglat->next)
{
- struct ipcp_value *val;
+ ipcp_value<tree> *val;
if (aglat->bottom || !aglat->values
/* If the following is true, the one value is in known_aggs. */
|| (!plats->aggs_contain_variable
- && ipa_lat_is_single_const (aglat)))
+ && aglat->is_single_const ()))
continue;
for (val = aglat->values; val; val = val->next)
{
- int time, size, time_benefit;
struct ipa_agg_jf_item item;
- inline_hints hints;
item.offset = aglat->offset;
item.value = val->value;
vec_safe_push (ajf->items, item);
- estimate_ipcp_clone_size_and_time (node, known_csts, known_binfos,
- known_aggs_ptrs, &size, &time,
- &hints);
- time_benefit = base_time - time
- + devirtualization_time_bonus (node, known_csts, known_binfos,
- known_aggs_ptrs)
- + hint_time_bonus (hints);
- gcc_checking_assert (size >=0);
- if (size == 0)
- size = 1;
+ perform_estimation_of_a_value (node, known_csts, known_contexts,
+ known_aggs_ptrs, base_time,
+ removable_params_cost, 0, val);
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, " for ");
ipa_dump_param (dump_file, info, i);
fprintf (dump_file, "[%soffset: " HOST_WIDE_INT_PRINT_DEC
- "]: time_benefit: %i, size: %i\n",
- plats->aggs_by_ref ? "ref " : "",
- aglat->offset, time_benefit, size);
+ "]: time_benefit: %i, size: %i\n",
+ plats->aggs_by_ref ? "ref " : "",
+ aglat->offset,
+ val->local_time_benefit, val->local_size_cost);
}
- val->local_time_benefit = time_benefit;
- val->local_size_cost = size;
ajf->items->pop ();
}
}
vec_free (known_aggs[i].items);
known_csts.release ();
- known_binfos.release ();
+ known_contexts.release ();
known_aggs.release ();
known_aggs_ptrs.release ();
}
/* Add value CUR_VAL and all yet-unsorted values it is dependent on to the
topological sort of values. */
-static void
-add_val_to_toposort (struct ipcp_value *cur_val)
+template <typename valtype>
+void
+value_topo_info<valtype>::add_val (ipcp_value<valtype> *cur_val)
{
- static int dfs_counter = 0;
- static struct ipcp_value *stack;
- struct ipcp_value_source *src;
+ ipcp_value_source<valtype> *src;
if (cur_val->dfs)
return;
{
if (src->val->dfs == 0)
{
- add_val_to_toposort (src->val);
+ add_val (src->val);
if (src->val->low_link < cur_val->low_link)
cur_val->low_link = src->val->low_link;
}
if (cur_val->dfs == cur_val->low_link)
{
- struct ipcp_value *v, *scc_list = NULL;
+ ipcp_value<valtype> *v, *scc_list = NULL;
do
{
they are not there yet. */
static void
-add_all_node_vals_to_toposort (struct cgraph_node *node)
+add_all_node_vals_to_toposort (cgraph_node *node, ipa_topo_info *topo)
{
struct ipa_node_params *info = IPA_NODE_REF (node);
int i, count = ipa_get_param_count (info);
for (i = 0; i < count ; i++)
{
struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
- struct ipcp_lattice *lat = &plats->itself;
+ ipcp_lattice<tree> *lat = &plats->itself;
struct ipcp_agg_lattice *aglat;
- struct ipcp_value *val;
if (!lat->bottom)
- for (val = lat->values; val; val = val->next)
- add_val_to_toposort (val);
+ {
+ ipcp_value<tree> *val;
+ for (val = lat->values; val; val = val->next)
+ topo->constants.add_val (val);
+ }
if (!plats->aggs_bottom)
for (aglat = plats->aggs; aglat; aglat = aglat->next)
if (!aglat->bottom)
- for (val = aglat->values; val; val = val->next)
- add_val_to_toposort (val);
+ {
+ ipcp_value<tree> *val;
+ for (val = aglat->values; val; val = val->next)
+ topo->constants.add_val (val);
+ }
+
+ ipcp_lattice<ipa_polymorphic_call_context> *ctxlat = &plats->ctxlat;
+ if (!ctxlat->bottom)
+ {
+ ipcp_value<ipa_polymorphic_call_context> *ctxval;
+ for (ctxval = ctxlat->values; ctxval; ctxval = ctxval->next)
+ topo->contexts.add_val (ctxval);
+ }
}
}
connected components. */
static void
-propagate_constants_topo (struct topo_info *topo)
+propagate_constants_topo (struct ipa_topo_info *topo)
{
int i;
{
unsigned j;
struct cgraph_node *v, *node = topo->order[i];
- vec<cgraph_node_ptr> cycle_nodes = ipa_get_nodes_in_cycle (node);
+ vec<cgraph_node *> cycle_nodes = ipa_get_nodes_in_cycle (node);
/* First, iteratively propagate within the strongly connected component
until all lattices stabilize. */
FOR_EACH_VEC_ELT (cycle_nodes, j, v)
- if (cgraph_function_with_gimple_body_p (v))
+ if (v->has_gimple_body_p ())
push_node_to_stack (topo, v);
v = pop_node_from_stack (topo);
the local effects of the discovered constants and all valid values to
their topological sort. */
FOR_EACH_VEC_ELT (cycle_nodes, j, v)
- if (cgraph_function_with_gimple_body_p (v))
+ if (v->has_gimple_body_p ())
{
struct cgraph_edge *cs;
estimate_local_effects (v);
- add_all_node_vals_to_toposort (v);
+ add_all_node_vals_to_toposort (v, topo);
for (cs = v->callees; cs; cs = cs->next_callee)
if (!ipa_edge_within_scc (cs))
propagate_constants_accross_call (cs);
/* Propagate the estimated effects of individual values along the topological
from the dependent values to those they depend on. */
-static void
-propagate_effects (void)
+template <typename valtype>
+void
+value_topo_info<valtype>::propagate_effects ()
{
- struct ipcp_value *base;
+ ipcp_value<valtype> *base;
for (base = values_topo; base; base = base->topo_next)
{
- struct ipcp_value_source *src;
- struct ipcp_value *val;
+ ipcp_value_source<valtype> *src;
+ ipcp_value<valtype> *val;
int time = 0, size = 0;
for (val = base; val; val = val->scc_next)
for (val = base; val; val = val->scc_next)
for (src = val->sources; src; src = src->next)
if (src->val
- && cgraph_maybe_hot_edge_p (src->cs))
+ && src->cs->maybe_hot_p ())
{
src->val->prop_time_benefit = safe_add (time,
src->val->prop_time_benefit);
}
-/* Propagate constants, binfos and their effects from the summaries
- interprocedurally. */
+/* Propagate constants, polymorphic contexts and their effects from the
+ summaries interprocedurally. */
static void
-ipcp_propagate_stage (struct topo_info *topo)
+ipcp_propagate_stage (struct ipa_topo_info *topo)
{
struct cgraph_node *node;
struct ipa_node_params *info = IPA_NODE_REF (node);
determine_versionability (node);
- if (cgraph_function_with_gimple_body_p (node))
+ if (node->has_gimple_body_p ())
{
info->lattices = XCNEWVEC (struct ipcp_param_lattices,
ipa_get_param_count (info));
#ifdef ENABLE_CHECKING
ipcp_verify_propagated_values ();
#endif
- propagate_effects ();
+ topo->constants.propagate_effects ();
+ topo->contexts.propagate_effects ();
if (dump_file)
{
}
/* Discover newly direct outgoing edges from NODE which is a new clone with
- known KNOWN_VALS and make them direct. */
+ known KNOWN_CSTS and make them direct. */
static void
ipcp_discover_new_direct_edges (struct cgraph_node *node,
- vec<tree> known_vals,
+ vec<tree> known_csts,
+ vec<ipa_polymorphic_call_context>
+ known_contexts,
struct ipa_agg_replacement_value *aggvals)
{
struct cgraph_edge *ie, *next_ie;
tree target;
next_ie = ie->next_callee;
- target = ipa_get_indirect_edge_target_1 (ie, known_vals, vNULL, vNULL,
- aggvals);
+ target = ipa_get_indirect_edge_target_1 (ie, known_csts, known_contexts,
+ vNULL, aggvals);
if (target)
{
bool agg_contents = ie->indirect_info->agg_contents;
bool polymorphic = ie->indirect_info->polymorphic;
- bool param_index = ie->indirect_info->param_index;
+ int param_index = ie->indirect_info->param_index;
struct cgraph_edge *cs = ipa_make_edge_direct_to_target (ie, target);
found = true;
fprintf (dump_file, " controlled uses count of param "
"%i bumped down to %i\n", param_index, c);
if (c == 0
- && (to_del = ipa_find_reference (node,
- cs->callee,
- NULL, 0)))
+ && (to_del = node->find_reference (cs->callee, NULL, 0)))
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, " and even removing its "
"cloning-created reference\n");
- ipa_remove_reference (to_del);
+ to_del->remove_reference ();
}
}
}
/* Vector of pointers which for linked lists of clones of an original crgaph
edge. */
-static vec<cgraph_edge_p> next_edge_clone;
+static vec<cgraph_edge *> next_edge_clone;
+static vec<cgraph_edge *> prev_edge_clone;
static inline void
-grow_next_edge_clone_vector (void)
+grow_edge_clone_vectors (void)
{
if (next_edge_clone.length ()
- <= (unsigned) cgraph_edge_max_uid)
- next_edge_clone.safe_grow_cleared (cgraph_edge_max_uid + 1);
+ <= (unsigned) symtab->edges_max_uid)
+ next_edge_clone.safe_grow_cleared (symtab->edges_max_uid + 1);
+ if (prev_edge_clone.length ()
+ <= (unsigned) symtab->edges_max_uid)
+ prev_edge_clone.safe_grow_cleared (symtab->edges_max_uid + 1);
}
/* Edge duplication hook to grow the appropriate linked list in
static void
ipcp_edge_duplication_hook (struct cgraph_edge *src, struct cgraph_edge *dst,
- __attribute__((unused)) void *data)
+ void *)
{
- grow_next_edge_clone_vector ();
- next_edge_clone[dst->uid] = next_edge_clone[src->uid];
+ grow_edge_clone_vectors ();
+
+ struct cgraph_edge *old_next = next_edge_clone[src->uid];
+ if (old_next)
+ prev_edge_clone[old_next->uid] = dst;
+ prev_edge_clone[dst->uid] = src;
+
+ next_edge_clone[dst->uid] = old_next;
next_edge_clone[src->uid] = dst;
}
+/* Hook that is called by cgraph.c when an edge is removed. */
+
+static void
+ipcp_edge_removal_hook (struct cgraph_edge *cs, void *)
+{
+ grow_edge_clone_vectors ();
+
+ struct cgraph_edge *prev = prev_edge_clone[cs->uid];
+ struct cgraph_edge *next = next_edge_clone[cs->uid];
+ if (prev)
+ next_edge_clone[prev->uid] = next;
+ if (next)
+ prev_edge_clone[next->uid] = prev;
+}
+
/* See if NODE is a clone with a known aggregate value at a given OFFSET of a
parameter with the given INDEX. */
static tree
-get_clone_agg_value (struct cgraph_node *node, HOST_WIDEST_INT offset,
+get_clone_agg_value (struct cgraph_node *node, HOST_WIDE_INT offset,
int index)
{
struct ipa_agg_replacement_value *aggval;
static bool
cgraph_edge_brings_value_p (struct cgraph_edge *cs,
- struct ipcp_value_source *src)
+ ipcp_value_source<tree> *src)
{
struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
- struct ipa_node_params *dst_info = IPA_NODE_REF (cs->callee);
+ cgraph_node *real_dest = cs->callee->function_symbol ();
+ struct ipa_node_params *dst_info = IPA_NODE_REF (real_dest);
if ((dst_info->ipcp_orig_node && !dst_info->is_all_contexts_clone)
|| caller_info->node_dead)
{
tree t;
if (src->offset == -1)
- t = caller_info->known_vals[src->index];
+ t = caller_info->known_csts[src->index];
else
t = get_clone_agg_value (cs->caller, src->offset, src->index);
return (t != NULL_TREE
struct ipcp_param_lattices *plats = ipa_get_parm_lattices (caller_info,
src->index);
if (src->offset == -1)
- return (ipa_lat_is_single_const (&plats->itself)
+ return (plats->itself.is_single_const ()
&& values_equal_for_ipcp_p (src->val->value,
plats->itself.values->value));
else
return false;
for (aglat = plats->aggs; aglat; aglat = aglat->next)
if (aglat->offset == src->offset)
- return (ipa_lat_is_single_const (aglat)
+ return (aglat->is_single_const ()
&& values_equal_for_ipcp_p (src->val->value,
aglat->values->value));
}
}
}
+/* Return true if edge CS does bring about the value described by SRC. */
+
+static bool
+cgraph_edge_brings_value_p (struct cgraph_edge *cs,
+ ipcp_value_source<ipa_polymorphic_call_context>
+ *src)
+{
+ struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
+ cgraph_node *real_dest = cs->callee->function_symbol ();
+ struct ipa_node_params *dst_info = IPA_NODE_REF (real_dest);
+
+ if ((dst_info->ipcp_orig_node && !dst_info->is_all_contexts_clone)
+ || caller_info->node_dead)
+ return false;
+ if (!src->val)
+ return true;
+
+ if (caller_info->ipcp_orig_node)
+ return (caller_info->known_contexts.length () > (unsigned) src->index)
+ && values_equal_for_ipcp_p (src->val->value,
+ caller_info->known_contexts[src->index]);
+
+ struct ipcp_param_lattices *plats = ipa_get_parm_lattices (caller_info,
+ src->index);
+ return plats->ctxlat.is_single_const ()
+ && values_equal_for_ipcp_p (src->val->value,
+ plats->ctxlat.values->value);
+}
+
/* Get the next clone in the linked list of clones of an edge. */
static inline struct cgraph_edge *
edge frequency and their number into *FREQUENCY and *CALLER_COUNT
respectively. */
+template <typename valtype>
static bool
-get_info_about_necessary_edges (struct ipcp_value *val, int *freq_sum,
+get_info_about_necessary_edges (ipcp_value<valtype> *val, int *freq_sum,
gcov_type *count_sum, int *caller_count)
{
- struct ipcp_value_source *src;
+ ipcp_value_source<valtype> *src;
int freq = 0, count = 0;
gcov_type cnt = 0;
bool hot = false;
count++;
freq += cs->frequency;
cnt += cs->count;
- hot |= cgraph_maybe_hot_edge_p (cs);
+ hot |= cs->maybe_hot_p ();
}
cs = get_next_cgraph_edge_clone (cs);
}
/* Return a vector of incoming edges that do bring value VAL. It is assumed
their number is known and equal to CALLER_COUNT. */
-static vec<cgraph_edge_p>
-gather_edges_for_value (struct ipcp_value *val, int caller_count)
+template <typename valtype>
+static vec<cgraph_edge *>
+gather_edges_for_value (ipcp_value<valtype> *val, int caller_count)
{
- struct ipcp_value_source *src;
- vec<cgraph_edge_p> ret;
+ ipcp_value_source<valtype> *src;
+ vec<cgraph_edge *> ret;
ret.create (caller_count);
for (src = val->sources; src; src = src->next)
struct ipa_replace_map *replace_map;
- replace_map = ggc_alloc_ipa_replace_map ();
+ replace_map = ggc_alloc<ipa_replace_map> ();
if (dump_file)
{
fprintf (dump_file, " replacing ");
return;
init_caller_stats (&stats);
- cgraph_for_node_and_aliases (orig_node, gather_caller_stats, &stats, false);
+ orig_node->call_for_symbol_thunks_and_aliases (gather_caller_stats, &stats,
+ false);
orig_sum = stats.count_sum;
init_caller_stats (&stats);
- cgraph_for_node_and_aliases (new_node, gather_caller_stats, &stats, false);
+ new_node->call_for_symbol_thunks_and_aliases (gather_caller_stats, &stats,
+ false);
new_sum = stats.count_sum;
if (orig_node_count < orig_sum + new_sum)
dump_profile_updates (orig_node, new_node);
}
-/* Create a specialized version of NODE with known constants and types of
- parameters in KNOWN_VALS and redirect all edges in CALLERS to it. */
+/* Create a specialized version of NODE with known constants in KNOWN_CSTS,
+ known contexts in KNOWN_CONTEXTS and known aggregate values in AGGVALS and
+ redirect all edges in CALLERS to it. */
static struct cgraph_node *
create_specialized_node (struct cgraph_node *node,
- vec<tree> known_vals,
+ vec<tree> known_csts,
+ vec<ipa_polymorphic_call_context> known_contexts,
struct ipa_agg_replacement_value *aggvals,
- vec<cgraph_edge_p> callers)
+ vec<cgraph_edge *> callers)
{
struct ipa_node_params *new_info, *info = IPA_NODE_REF (node);
- vec<ipa_replace_map_p, va_gc> *replace_trees = NULL;
+ vec<ipa_replace_map *, va_gc> *replace_trees = NULL;
struct ipa_agg_replacement_value *av;
struct cgraph_node *new_node;
int i, count = ipa_get_param_count (info);
args_to_skip = BITMAP_GGC_ALLOC ();
for (i = 0; i < count; i++)
{
- tree t = known_vals[i];
+ tree t = known_csts[i];
- if ((t && TREE_CODE (t) != TREE_BINFO)
- || !ipa_is_param_used (info, i))
+ if (t || !ipa_is_param_used (info, i))
bitmap_set_bit (args_to_skip, i);
}
}
for (i = 0; i < count ; i++)
{
- tree t = known_vals[i];
- if (t && TREE_CODE (t) != TREE_BINFO)
+ tree t = known_csts[i];
+ if (t)
{
struct ipa_replace_map *replace_map;
+ gcc_checking_assert (TREE_CODE (t) != TREE_BINFO);
replace_map = get_replacement_map (info, t, i);
if (replace_map)
vec_safe_push (replace_trees, replace_map);
}
}
- new_node = cgraph_create_virtual_clone (node, callers, replace_trees,
- args_to_skip, "constprop");
+ new_node = node->create_virtual_clone (callers, replace_trees,
+ args_to_skip, "constprop");
ipa_set_node_agg_value_chain (new_node, aggvals);
for (av = aggvals; av; av = av->next)
- ipa_maybe_record_reference (new_node, av->value,
- IPA_REF_ADDR, NULL);
+ new_node->maybe_create_reference (av->value, IPA_REF_ADDR, NULL);
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, " the new node is %s/%i.\n",
new_node->name (), new_node->order);
+ if (known_contexts.exists ())
+ {
+ for (i = 0; i < count ; i++)
+ if (!known_contexts[i].useless_p ())
+ {
+ fprintf (dump_file, " known ctx %i is ", i);
+ known_contexts[i].dump (dump_file);
+ }
+ }
if (aggvals)
ipa_dump_agg_replacement_values (dump_file, aggvals);
}
- gcc_checking_assert (ipa_node_params_vector.exists ()
- && (ipa_node_params_vector.length ()
- > (unsigned) cgraph_max_uid));
+ ipa_check_create_node_params ();
update_profiling_info (node, new_node);
new_info = IPA_NODE_REF (new_node);
new_info->ipcp_orig_node = node;
- new_info->known_vals = known_vals;
+ new_info->known_csts = known_csts;
+ new_info->known_contexts = known_contexts;
- ipcp_discover_new_direct_edges (new_node, known_vals, aggvals);
+ ipcp_discover_new_direct_edges (new_node, known_csts, known_contexts, aggvals);
callers.release ();
return new_node;
}
/* Given a NODE, and a subset of its CALLERS, try to populate blanks slots in
- KNOWN_VALS with constants and types that are also known for all of the
- CALLERS. */
+ KNOWN_CSTS with constants that are also known for all of the CALLERS. */
static void
find_more_scalar_values_for_callers_subset (struct cgraph_node *node,
- vec<tree> known_vals,
- vec<cgraph_edge_p> callers)
+ vec<tree> known_csts,
+ vec<cgraph_edge *> callers)
{
struct ipa_node_params *info = IPA_NODE_REF (node);
int i, count = ipa_get_param_count (info);
tree newval = NULL_TREE;
int j;
- if (ipa_get_scalar_lat (info, i)->bottom || known_vals[i])
+ if (ipa_get_scalar_lat (info, i)->bottom || known_csts[i])
continue;
FOR_EACH_VEC_ELT (callers, j, cs)
fprintf (dump_file, "\n");
}
- known_vals[i] = newval;
+ known_csts[i] = newval;
+ }
+ }
+}
+
+/* Given a NODE and a subset of its CALLERS, try to populate plank slots in
+ KNOWN_CONTEXTS with polymorphic contexts that are also known for all of the
+ CALLERS. */
+
+static void
+find_more_contexts_for_caller_subset (cgraph_node *node,
+ vec<ipa_polymorphic_call_context>
+ *known_contexts,
+ vec<cgraph_edge *> callers)
+{
+ ipa_node_params *info = IPA_NODE_REF (node);
+ int i, count = ipa_get_param_count (info);
+
+ for (i = 0; i < count ; i++)
+ {
+ cgraph_edge *cs;
+
+ if (ipa_get_poly_ctx_lat (info, i)->bottom
+ || (known_contexts->exists ()
+ && !(*known_contexts)[i].useless_p ()))
+ continue;
+
+ ipa_polymorphic_call_context newval;
+ bool found = false;
+ int j;
+
+ FOR_EACH_VEC_ELT (callers, j, cs)
+ {
+ if (i >= ipa_get_cs_argument_count (IPA_EDGE_REF (cs)))
+ return;
+ ipa_jump_func *jfunc = ipa_get_ith_jump_func (IPA_EDGE_REF (cs),
+ i);
+ ipa_polymorphic_call_context ctx;
+ ctx = ipa_context_from_jfunc (IPA_NODE_REF (cs->caller), cs, i,
+ jfunc);
+ ctx.clear_speculation ();
+ if (ctx.useless_p ()
+ || (found && !values_equal_for_ipcp_p (newval, ctx)))
+ {
+ found = false;
+ break;
+ }
+ else if (!found)
+ {
+ found = true;
+ newval = ctx;
+ }
+ }
+
+ if (found)
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, " adding an extra known polymorphic "
+ "context ");
+ print_ipcp_constant_value (dump_file, newval);
+ fprintf (dump_file, " for ");
+ ipa_dump_param (dump_file, info, i);
+ fprintf (dump_file, "\n");
+ }
+
+ if (!known_contexts->exists ())
+ known_contexts->safe_grow_cleared (ipa_get_param_count (info));
+ (*known_contexts)[i] = newval;
}
+
}
}
/* Go through PLATS and create a vector of values consisting of values and
offsets (minus OFFSET) of lattices that contain only a single value. */
-static vec<ipa_agg_jf_item_t>
+static vec<ipa_agg_jf_item>
copy_plats_to_inter (struct ipcp_param_lattices *plats, HOST_WIDE_INT offset)
{
- vec<ipa_agg_jf_item_t> res = vNULL;
+ vec<ipa_agg_jf_item> res = vNULL;
if (!plats->aggs || plats->aggs_contain_variable || plats->aggs_bottom)
return vNULL;
for (struct ipcp_agg_lattice *aglat = plats->aggs; aglat; aglat = aglat->next)
- if (ipa_lat_is_single_const (aglat))
+ if (aglat->is_single_const ())
{
struct ipa_agg_jf_item ti;
ti.offset = aglat->offset - offset;
static void
intersect_with_plats (struct ipcp_param_lattices *plats,
- vec<ipa_agg_jf_item_t> *inter,
+ vec<ipa_agg_jf_item> *inter,
HOST_WIDE_INT offset)
{
struct ipcp_agg_lattice *aglat;
/* Copy agggregate replacement values of NODE (which is an IPA-CP clone) to the
vector result while subtracting OFFSET from the individual value offsets. */
-static vec<ipa_agg_jf_item_t>
+static vec<ipa_agg_jf_item>
agg_replacements_to_vector (struct cgraph_node *node, int index,
HOST_WIDE_INT offset)
{
struct ipa_agg_replacement_value *av;
- vec<ipa_agg_jf_item_t> res = vNULL;
+ vec<ipa_agg_jf_item> res = vNULL;
for (av = ipa_get_agg_replacements_for_node (node); av; av = av->next)
if (av->index == index
static void
intersect_with_agg_replacements (struct cgraph_node *node, int index,
- vec<ipa_agg_jf_item_t> *inter,
+ vec<ipa_agg_jf_item> *inter,
HOST_WIDE_INT offset)
{
struct ipa_agg_replacement_value *srcvals;
copy all incoming values to it. If we determine we ended up with no values
whatsoever, return a released vector. */
-static vec<ipa_agg_jf_item_t>
+static vec<ipa_agg_jf_item>
intersect_aggregates_with_edge (struct cgraph_edge *cs, int index,
- vec<ipa_agg_jf_item_t> inter)
+ vec<ipa_agg_jf_item> inter)
{
struct ipa_jump_func *jfunc;
jfunc = ipa_get_ith_jump_func (IPA_EDGE_REF (cs), index);
intersect_with_agg_replacements (cs->caller, src_idx,
&inter, 0);
}
+ else
+ {
+ inter.release ();
+ return vNULL;
+ }
}
else
{
else
intersect_with_plats (src_plats, &inter, 0);
}
+ else
+ {
+ inter.release ();
+ return vNULL;
+ }
}
}
else if (jfunc->type == IPA_JF_ANCESTOR
else
{
inter.release ();
- return vec<ipa_agg_jf_item_t>();
+ return vec<ipa_agg_jf_item>();
}
return inter;
}
static struct ipa_agg_replacement_value *
find_aggregate_values_for_callers_subset (struct cgraph_node *node,
- vec<cgraph_edge_p> callers)
+ vec<cgraph_edge *> callers)
{
struct ipa_node_params *dest_info = IPA_NODE_REF (node);
- struct ipa_agg_replacement_value *res = NULL;
+ struct ipa_agg_replacement_value *res;
+ struct ipa_agg_replacement_value **tail = &res;
struct cgraph_edge *cs;
int i, j, count = ipa_get_param_count (dest_info);
for (i = 0; i < count ; i++)
{
struct cgraph_edge *cs;
- vec<ipa_agg_jf_item_t> inter = vNULL;
+ vec<ipa_agg_jf_item> inter = vNULL;
struct ipa_agg_jf_item *item;
struct ipcp_param_lattices *plats = ipa_get_parm_lattices (dest_info, i);
int j;
if (!item->value)
continue;
- v = ggc_alloc_ipa_agg_replacement_value ();
+ v = ggc_alloc<ipa_agg_replacement_value> ();
v->index = i;
v->offset = item->offset;
v->value = item->value;
v->by_ref = plats->aggs_by_ref;
- v->next = res;
- res = v;
+ *tail = v;
+ tail = &v->next;
}
next_param:
if (inter.exists ())
inter.release ();
}
+ *tail = NULL;
return res;
}
/* Turn KNOWN_AGGS into a list of aggreate replacement values. */
static struct ipa_agg_replacement_value *
-known_aggs_to_agg_replacement_list (vec<ipa_agg_jump_function_t> known_aggs)
+known_aggs_to_agg_replacement_list (vec<ipa_agg_jump_function> known_aggs)
{
- struct ipa_agg_replacement_value *res = NULL;
+ struct ipa_agg_replacement_value *res;
+ struct ipa_agg_replacement_value **tail = &res;
struct ipa_agg_jump_function *aggjf;
struct ipa_agg_jf_item *item;
int i, j;
FOR_EACH_VEC_SAFE_ELT (aggjf->items, j, item)
{
struct ipa_agg_replacement_value *v;
- v = ggc_alloc_ipa_agg_replacement_value ();
+ v = ggc_alloc<ipa_agg_replacement_value> ();
v->index = i;
v->offset = item->offset;
v->value = item->value;
v->by_ref = aggjf->by_ref;
- v->next = res;
- res = v;
+ *tail = v;
+ tail = &v->next;
}
+ *tail = NULL;
return res;
}
struct ipa_jump_func *jump_func;
tree val, t;
- val = dest_info->known_vals[i];
+ val = dest_info->known_csts[i];
if (!val)
continue;
struct cgraph_node *node)
{
struct ipa_node_params *orig_caller_info = IPA_NODE_REF (cs->caller);
+ struct ipa_node_params *orig_node_info;
struct ipa_agg_replacement_value *aggval;
int i, ec, count;
if (aggval->index >= ec)
return false;
+ orig_node_info = IPA_NODE_REF (IPA_NODE_REF (node)->ipcp_orig_node);
if (orig_caller_info->ipcp_orig_node)
orig_caller_info = IPA_NODE_REF (orig_caller_info->ipcp_orig_node);
for (i = 0; i < count; i++)
{
- static vec<ipa_agg_jf_item_t> values = vec<ipa_agg_jf_item_t>();
+ static vec<ipa_agg_jf_item> values = vec<ipa_agg_jf_item>();
struct ipcp_param_lattices *plats;
bool interesting = false;
for (struct ipa_agg_replacement_value *av = aggval; av; av = av->next)
if (!interesting)
continue;
- plats = ipa_get_parm_lattices (orig_caller_info, aggval->index);
+ plats = ipa_get_parm_lattices (orig_node_info, aggval->index);
if (plats->aggs_bottom)
return false;
all other criteria such that they can be redirected the the special node.
This function can therefore redirect the final edge in a SCC. */
+template <typename valtype>
static void
-perhaps_add_new_callers (struct cgraph_node *node, struct ipcp_value *val)
+perhaps_add_new_callers (cgraph_node *node, ipcp_value<valtype> *val)
{
- struct ipcp_value_source *src;
+ ipcp_value_source<valtype> *src;
gcov_type redirected_sum = 0;
for (src = val->sources; src; src = src->next)
while (cs)
{
enum availability availability;
- struct cgraph_node *dst = cgraph_function_node (cs->callee,
- &availability);
+ struct cgraph_node *dst = cs->callee->function_symbol (&availability);
if ((dst == node || IPA_NODE_REF (dst)->is_all_contexts_clone)
- && availability > AVAIL_OVERWRITABLE
+ && availability > AVAIL_INTERPOSABLE
&& cgraph_edge_brings_value_p (cs, src))
{
if (cgraph_edge_brings_all_scalars_for_node (cs, val->spec_node)
xstrdup (val->spec_node->name ()),
val->spec_node->order);
- cgraph_redirect_edge_callee (cs, val->spec_node);
+ cs->redirect_callee (val->spec_node);
redirected_sum += cs->count;
}
}
update_specialized_profile (val->spec_node, node, redirected_sum);
}
+/* Return true if KNOWN_CONTEXTS contain at least one useful context. */
-/* Copy KNOWN_BINFOS to KNOWN_VALS. */
+static bool
+known_contexts_useful_p (vec<ipa_polymorphic_call_context> known_contexts)
+{
+ ipa_polymorphic_call_context *ctx;
+ int i;
+
+ FOR_EACH_VEC_ELT (known_contexts, i, ctx)
+ if (!ctx->useless_p ())
+ return true;
+ return false;
+}
+
+/* Return a copy of KNOWN_CSTS if it is not empty, otherwise return vNULL. */
+
+static vec<ipa_polymorphic_call_context>
+copy_useful_known_contexts (vec<ipa_polymorphic_call_context> known_contexts)
+{
+ if (known_contexts_useful_p (known_contexts))
+ return known_contexts.copy ();
+ else
+ return vNULL;
+}
+
+/* Copy KNOWN_CSTS and modify the copy according to VAL and INDEX. If
+ non-empty, replace KNOWN_CONTEXTS with its copy too. */
static void
-move_binfos_to_values (vec<tree> known_vals,
- vec<tree> known_binfos)
+modify_known_vectors_with_val (vec<tree> *known_csts,
+ vec<ipa_polymorphic_call_context> *known_contexts,
+ ipcp_value<tree> *val,
+ int index)
{
- tree t;
- int i;
+ *known_csts = known_csts->copy ();
+ *known_contexts = copy_useful_known_contexts (*known_contexts);
+ (*known_csts)[index] = val->value;
+}
+
+/* Replace KNOWN_CSTS with its copy. Also copy KNOWN_CONTEXTS and modify the
+ copy according to VAL and INDEX. */
- for (i = 0; known_binfos.iterate (i, &t); i++)
- if (t)
- known_vals[i] = t;
+static void
+modify_known_vectors_with_val (vec<tree> *known_csts,
+ vec<ipa_polymorphic_call_context> *known_contexts,
+ ipcp_value<ipa_polymorphic_call_context> *val,
+ int index)
+{
+ *known_csts = known_csts->copy ();
+ *known_contexts = known_contexts->copy ();
+ (*known_contexts)[index] = val->value;
}
-/* Return true if there is a replacement equivalent to VALUE, INDEX and OFFSET
- among those in the AGGVALS list. */
+/* Return true if OFFSET indicates this was not an aggregate value or there is
+ a replacement equivalent to VALUE, INDEX and OFFSET among those in the
+ AGGVALS list. */
DEBUG_FUNCTION bool
-ipcp_val_in_agg_replacements_p (struct ipa_agg_replacement_value *aggvals,
- int index, HOST_WIDE_INT offset, tree value)
+ipcp_val_agg_replacement_ok_p (ipa_agg_replacement_value *aggvals,
+ int index, HOST_WIDE_INT offset, tree value)
{
+ if (offset == -1)
+ return true;
+
while (aggvals)
{
if (aggvals->index == index
return false;
}
+/* Return true if offset is minus one because source of a polymorphic contect
+ cannot be an aggregate value. */
+
+DEBUG_FUNCTION bool
+ipcp_val_agg_replacement_ok_p (ipa_agg_replacement_value *,
+ int , HOST_WIDE_INT offset,
+ ipa_polymorphic_call_context)
+{
+ return offset == -1;
+}
+
/* Decide wheter to create a special version of NODE for value VAL of parameter
at the given INDEX. If OFFSET is -1, the value is for the parameter itself,
otherwise it is stored at the given OFFSET of the parameter. KNOWN_CSTS,
- KNOWN_BINFOS and KNOWN_AGGS describe the other already known values. */
+ KNOWN_CONTEXTS and KNOWN_AGGS describe the other already known values. */
+template <typename valtype>
static bool
decide_about_value (struct cgraph_node *node, int index, HOST_WIDE_INT offset,
- struct ipcp_value *val, vec<tree> known_csts,
- vec<tree> known_binfos)
+ ipcp_value<valtype> *val, vec<tree> known_csts,
+ vec<ipa_polymorphic_call_context> known_contexts)
{
struct ipa_agg_replacement_value *aggvals;
int freq_sum, caller_count;
gcov_type count_sum;
- vec<cgraph_edge_p> callers;
- vec<tree> kv;
+ vec<cgraph_edge *> callers;
if (val->spec_node)
{
node->name (), node->order);
callers = gather_edges_for_value (val, caller_count);
- kv = known_csts.copy ();
- move_binfos_to_values (kv, known_binfos);
if (offset == -1)
- kv[index] = val->value;
- find_more_scalar_values_for_callers_subset (node, kv, callers);
+ modify_known_vectors_with_val (&known_csts, &known_contexts, val, index);
+ else
+ {
+ known_csts = known_csts.copy ();
+ known_contexts = copy_useful_known_contexts (known_contexts);
+ }
+ find_more_scalar_values_for_callers_subset (node, known_csts, callers);
+ find_more_contexts_for_caller_subset (node, &known_contexts, callers);
aggvals = find_aggregate_values_for_callers_subset (node, callers);
- gcc_checking_assert (offset == -1
- || ipcp_val_in_agg_replacements_p (aggvals, index,
- offset, val->value));
- val->spec_node = create_specialized_node (node, kv, aggvals, callers);
+ gcc_checking_assert (ipcp_val_agg_replacement_ok_p (aggvals, index,
+ offset, val->value));
+ val->spec_node = create_specialized_node (node, known_csts, known_contexts,
+ aggvals, callers);
overall_size += val->local_size_cost;
/* TODO: If for some lattice there is only one other known value
{
struct ipa_node_params *info = IPA_NODE_REF (node);
int i, count = ipa_get_param_count (info);
- vec<tree> known_csts, known_binfos;
- vec<ipa_agg_jump_function_t> known_aggs = vNULL;
+ vec<tree> known_csts;
+ vec<ipa_polymorphic_call_context> known_contexts;
+ vec<ipa_agg_jump_function> known_aggs = vNULL;
bool ret = false;
if (count == 0)
fprintf (dump_file, "\nEvaluating opportunities for %s/%i.\n",
node->name (), node->order);
- gather_context_independent_values (info, &known_csts, &known_binfos,
+ gather_context_independent_values (info, &known_csts, &known_contexts,
info->do_clone_for_all_contexts ? &known_aggs
: NULL, NULL);
for (i = 0; i < count ;i++)
{
struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
- struct ipcp_lattice *lat = &plats->itself;
- struct ipcp_value *val;
+ ipcp_lattice<tree> *lat = &plats->itself;
+ ipcp_lattice<ipa_polymorphic_call_context> *ctxlat = &plats->ctxlat;
if (!lat->bottom
- && !known_csts[i]
- && !known_binfos[i])
- for (val = lat->values; val; val = val->next)
- ret |= decide_about_value (node, i, -1, val, known_csts,
- known_binfos);
+ && !known_csts[i])
+ {
+ ipcp_value<tree> *val;
+ for (val = lat->values; val; val = val->next)
+ ret |= decide_about_value (node, i, -1, val, known_csts,
+ known_contexts);
+ }
if (!plats->aggs_bottom)
{
struct ipcp_agg_lattice *aglat;
- struct ipcp_value *val;
+ ipcp_value<tree> *val;
for (aglat = plats->aggs; aglat; aglat = aglat->next)
if (!aglat->bottom && aglat->values
/* If the following is false, the one value is in
known_aggs. */
&& (plats->aggs_contain_variable
- || !ipa_lat_is_single_const (aglat)))
+ || !aglat->is_single_const ()))
for (val = aglat->values; val; val = val->next)
ret |= decide_about_value (node, i, aglat->offset, val,
- known_csts, known_binfos);
+ known_csts, known_contexts);
+ }
+
+ if (!ctxlat->bottom
+ && known_contexts[i].useless_p ())
+ {
+ ipcp_value<ipa_polymorphic_call_context> *val;
+ for (val = ctxlat->values; val; val = val->next)
+ ret |= decide_about_value (node, i, -1, val, known_csts,
+ known_contexts);
}
+
info = IPA_NODE_REF (node);
}
if (info->do_clone_for_all_contexts)
{
struct cgraph_node *clone;
- vec<cgraph_edge_p> callers;
+ vec<cgraph_edge *> callers;
if (dump_file)
fprintf (dump_file, " - Creating a specialized node of %s/%i "
"for all known contexts.\n", node->name (),
node->order);
- callers = collect_callers_of_node (node);
- move_binfos_to_values (known_csts, known_binfos);
- clone = create_specialized_node (node, known_csts,
+ callers = node->collect_callers ();
+
+ if (!known_contexts_useful_p (known_contexts))
+ {
+ known_contexts.release ();
+ known_contexts = vNULL;
+ }
+ clone = create_specialized_node (node, known_csts, known_contexts,
known_aggs_to_agg_replacement_list (known_aggs),
callers);
info = IPA_NODE_REF (node);
ret = true;
}
else
- known_csts.release ();
+ {
+ known_csts.release ();
+ known_contexts.release ();
+ }
- known_binfos.release ();
return ret;
}
struct cgraph_node *callee;
struct ipa_node_params *info;
- callee = cgraph_function_node (cs->callee, NULL);
+ callee = cs->callee->function_symbol (NULL);
info = IPA_NODE_REF (callee);
if (info->node_dead)
for (cs = node->callers; cs; cs = cs->next_caller)
if (cs->caller->thunk.thunk_p
- && cgraph_for_node_and_aliases (cs->caller,
- has_undead_caller_from_outside_scc_p,
- NULL, true))
+ && cs->caller->call_for_symbol_thunks_and_aliases
+ (has_undead_caller_from_outside_scc_p, NULL, true))
return true;
else if (!ipa_edge_within_scc (cs)
&& !IPA_NODE_REF (cs->caller)->node_dead)
{
struct cgraph_node *v;
for (v = node; v ; v = ((struct ipa_dfs_info *) v->aux)->next_cycle)
- if (cgraph_will_be_removed_from_program_if_no_direct_calls (v)
- && !cgraph_for_node_and_aliases (v,
- has_undead_caller_from_outside_scc_p,
- NULL, true))
+ if (v->will_be_removed_from_program_if_no_direct_calls_p ()
+ && !v->call_for_symbol_thunks_and_aliases
+ (has_undead_caller_from_outside_scc_p, NULL, true))
IPA_NODE_REF (v)->node_dead = 1;
for (v = node; v ; v = ((struct ipa_dfs_info *) v->aux)->next_cycle)
TOPO and make specialized clones if deemed beneficial. */
static void
-ipcp_decision_stage (struct topo_info *topo)
+ipcp_decision_stage (struct ipa_topo_info *topo)
{
int i;
struct cgraph_node *v;
iterate = false;
for (v = node; v ; v = ((struct ipa_dfs_info *) v->aux)->next_cycle)
- if (cgraph_function_with_gimple_body_p (v)
+ if (v->has_gimple_body_p ()
&& ipcp_versionable_function_p (v))
iterate |= decide_whether_version_node (v);
ipcp_driver (void)
{
struct cgraph_2edge_hook_list *edge_duplication_hook_holder;
- struct topo_info topo;
+ struct cgraph_edge_hook_list *edge_removal_hook_holder;
+ struct ipa_topo_info topo;
ipa_check_create_node_params ();
ipa_check_create_edge_args ();
- grow_next_edge_clone_vector ();
+ grow_edge_clone_vectors ();
edge_duplication_hook_holder =
- cgraph_add_edge_duplication_hook (&ipcp_edge_duplication_hook, NULL);
- ipcp_values_pool = create_alloc_pool ("IPA-CP values",
- sizeof (struct ipcp_value), 32);
+ symtab->add_edge_duplication_hook (&ipcp_edge_duplication_hook, NULL);
+ edge_removal_hook_holder =
+ symtab->add_edge_removal_hook (&ipcp_edge_removal_hook, NULL);
+
+ ipcp_cst_values_pool = create_alloc_pool ("IPA-CP constant values",
+ sizeof (ipcp_value<tree>), 32);
+ ipcp_poly_ctx_values_pool = create_alloc_pool
+ ("IPA-CP polymorphic contexts",
+ sizeof (ipcp_value<ipa_polymorphic_call_context>), 32);
ipcp_sources_pool = create_alloc_pool ("IPA-CP value sources",
- sizeof (struct ipcp_value_source), 64);
+ sizeof (ipcp_value_source<tree>), 64);
ipcp_agg_lattice_pool = create_alloc_pool ("IPA_CP aggregate lattices",
sizeof (struct ipcp_agg_lattice),
32);
/* Free all IPCP structures. */
free_toporder_info (&topo);
next_edge_clone.release ();
- cgraph_remove_edge_duplication_hook (edge_duplication_hook_holder);
+ symtab->remove_edge_removal_hook (edge_removal_hook_holder);
+ symtab->remove_edge_duplication_hook (edge_duplication_hook_holder);
ipa_free_all_structures_after_ipa_cp ();
if (dump_file)
fprintf (dump_file, "\nIPA constant propagation end\n");
ipa_prop_read_jump_functions ();
}
-/* Gate for IPCP optimization. */
-
-static bool
-cgraph_gate_cp (void)
-{
- /* FIXME: We should remove the optimize check after we ensure we never run
- IPA passes when not optimizing. */
- return flag_ipa_cp && optimize;
-}
-
namespace {
const pass_data pass_data_ipa_cp =
IPA_PASS, /* type */
"cp", /* name */
OPTGROUP_NONE, /* optinfo_flags */
- true, /* has_gate */
- true, /* has_execute */
TV_IPA_CONSTANT_PROP, /* tv_id */
0, /* properties_required */
0, /* properties_provided */
{}
/* opt_pass methods: */
- bool gate () { return cgraph_gate_cp (); }
- unsigned int execute () { return ipcp_driver (); }
+ virtual bool gate (function *)
+ {
+ /* FIXME: We should remove the optimize check after we ensure we never run
+ IPA passes when not optimizing. */
+ return flag_ipa_cp && optimize;
+ }
+
+ virtual unsigned int execute (function *) { return ipcp_driver (); }
}; // class pass_ipa_cp
{
return new pass_ipa_cp (ctxt);
}
+
+/* Reset all state within ipa-cp.c so that we can rerun the compiler
+ within the same process. For use by toplev::finalize. */
+
+void
+ipa_cp_c_finalize (void)
+{
+ max_count = 0;
+ overall_size = 0;
+ max_new_size = 0;
+}