/* Interprocedural analyses.
- Copyright (C) 2005-2015 Free Software Foundation, Inc.
+ Copyright (C) 2005-2019 Free Software Foundation, Inc.
This file is part of GCC.
#include "config.h"
#include "system.h"
#include "coretypes.h"
-#include "alias.h"
#include "backend.h"
+#include "rtl.h"
#include "tree.h"
#include "gimple.h"
-#include "rtl.h"
+#include "alloc-pool.h"
+#include "tree-pass.h"
#include "ssa.h"
-#include "options.h"
+#include "tree-streamer.h"
+#include "cgraph.h"
+#include "diagnostic.h"
#include "fold-const.h"
-#include "internal-fn.h"
#include "gimple-fold.h"
#include "tree-eh.h"
-#include "flags.h"
-#include "insn-config.h"
-#include "expmed.h"
-#include "dojump.h"
-#include "explow.h"
#include "calls.h"
-#include "emit-rtl.h"
-#include "varasm.h"
-#include "stmt.h"
-#include "expr.h"
#include "stor-layout.h"
#include "print-tree.h"
#include "gimplify.h"
#include "gimple-iterator.h"
#include "gimplify-me.h"
#include "gimple-walk.h"
-#include "langhooks.h"
-#include "target.h"
-#include "cgraph.h"
-#include "alloc-pool.h"
#include "symbol-summary.h"
#include "ipa-prop.h"
#include "tree-cfg.h"
-#include "tree-into-ssa.h"
#include "tree-dfa.h"
-#include "tree-pass.h"
#include "tree-inline.h"
-#include "ipa-inline.h"
-#include "diagnostic.h"
+#include "ipa-fnsummary.h"
#include "gimple-pretty-print.h"
-#include "lto-streamer.h"
-#include "data-streamer.h"
-#include "tree-streamer.h"
#include "params.h"
#include "ipa-utils.h"
#include "dbgcnt.h"
#include "domwalk.h"
#include "builtins.h"
+#include "tree-cfgcleanup.h"
-/* Intermediate information that we get from alias analysis about a particular
- parameter in a particular basic_block. When a parameter or the memory it
- references is marked modified, we use that information in all dominatd
- blocks without cosulting alias analysis oracle. */
-
-struct param_aa_status
-{
- /* Set when this structure contains meaningful information. If not, the
- structure describing a dominating BB should be used instead. */
- bool valid;
-
- /* Whether we have seen something which might have modified the data in
- question. PARM is for the parameter itself, REF is for data it points to
- but using the alias type of individual accesses and PT is the same thing
- but for computing aggregate pass-through functions using a very inclusive
- ao_ref. */
- bool parm_modified, ref_modified, pt_modified;
-};
+/* Function summary where the parameter infos are actually stored. */
+ipa_node_params_t *ipa_node_params_sum = NULL;
-/* Information related to a given BB that used only when looking at function
- body. */
+function_summary <ipcp_transformation *> *ipcp_transformation_sum = NULL;
-struct ipa_bb_info
-{
- /* Call graph edges going out of this BB. */
- vec<cgraph_edge *> cg_edges;
- /* Alias analysis statuses of each formal parameter at this bb. */
- vec<param_aa_status> param_aa_statuses;
-};
+/* Edge summary for IPA-CP edge information. */
+ipa_edge_args_sum_t *ipa_edge_args_sum;
-/* Structure with global information that is only used when looking at function
- body. */
+/* Traits for a hash table for reusing already existing ipa_bits. */
-struct func_body_info
+struct ipa_bit_ggc_hash_traits : public ggc_cache_remove <ipa_bits *>
{
- /* The node that is being analyzed. */
- cgraph_node *node;
-
- /* Its info. */
- struct ipa_node_params *info;
+ typedef ipa_bits *value_type;
+ typedef ipa_bits *compare_type;
+ static hashval_t
+ hash (const ipa_bits *p)
+ {
+ hashval_t t = (hashval_t) p->value.to_shwi ();
+ return iterative_hash_host_wide_int (p->mask.to_shwi (), t);
+ }
+ static bool
+ equal (const ipa_bits *a, const ipa_bits *b)
+ {
+ return a->value == b->value && a->mask == b->mask;
+ }
+ static void
+ mark_empty (ipa_bits *&p)
+ {
+ p = NULL;
+ }
+ static bool
+ is_empty (const ipa_bits *p)
+ {
+ return p == NULL;
+ }
+ static bool
+ is_deleted (const ipa_bits *p)
+ {
+ return p == reinterpret_cast<const ipa_bits *> (1);
+ }
+ static void
+ mark_deleted (ipa_bits *&p)
+ {
+ p = reinterpret_cast<ipa_bits *> (1);
+ }
+};
- /* Information about individual BBs. */
- vec<ipa_bb_info> bb_infos;
+/* Hash table for avoid repeated allocations of equal ipa_bits. */
+static GTY ((cache)) hash_table<ipa_bit_ggc_hash_traits> *ipa_bits_hash_table;
- /* Number of parameters. */
- int param_count;
+/* Traits for a hash table for reusing value_ranges used for IPA. Note that
+ the equiv bitmap is not hashed and is expected to be NULL. */
- /* Number of statements already walked by when analyzing this function. */
- unsigned int aa_walked;
+struct ipa_vr_ggc_hash_traits : public ggc_cache_remove <value_range_base *>
+{
+ typedef value_range_base *value_type;
+ typedef value_range_base *compare_type;
+ static hashval_t
+ hash (const value_range_base *p)
+ {
+ inchash::hash hstate (p->kind ());
+ inchash::add_expr (p->min (), hstate);
+ inchash::add_expr (p->max (), hstate);
+ return hstate.end ();
+ }
+ static bool
+ equal (const value_range_base *a, const value_range_base *b)
+ {
+ return a->equal_p (*b);
+ }
+ static void
+ mark_empty (value_range_base *&p)
+ {
+ p = NULL;
+ }
+ static bool
+ is_empty (const value_range_base *p)
+ {
+ return p == NULL;
+ }
+ static bool
+ is_deleted (const value_range_base *p)
+ {
+ return p == reinterpret_cast<const value_range_base *> (1);
+ }
+ static void
+ mark_deleted (value_range_base *&p)
+ {
+ p = reinterpret_cast<value_range_base *> (1);
+ }
};
-/* Function summary where the parameter infos are actually stored. */
-ipa_node_params_t *ipa_node_params_sum = NULL;
-/* Vector of IPA-CP transformation data for each clone. */
-vec<ipcp_transformation_summary, va_gc> *ipcp_transformations;
-/* Vector where the parameter infos are actually stored. */
-vec<ipa_edge_args, va_gc> *ipa_edge_args_vector;
+/* Hash table for avoid repeated allocations of equal value_ranges. */
+static GTY ((cache)) hash_table<ipa_vr_ggc_hash_traits> *ipa_vr_hash_table;
/* Holders of ipa cgraph hooks: */
-static struct cgraph_edge_hook_list *edge_removal_hook_holder;
-static struct cgraph_2edge_hook_list *edge_duplication_hook_holder;
static struct cgraph_node_hook_list *function_insertion_hook_holder;
/* Description of a reference to an IPA constant. */
/* Allocation pool for reference descriptions. */
-static pool_allocator<ipa_cst_ref_desc> ipa_refdesc_pool
- ("IPA-PROP ref descriptions", 32);
+static object_allocator<ipa_cst_ref_desc> ipa_refdesc_pool
+ ("IPA-PROP ref descriptions");
/* Return true if DECL_FUNCTION_SPECIFIC_OPTIMIZATION of the decl associated
with NODE should prevent us from analyzing it for the purposes of IPA-CP. */
to INFO. */
static int
-ipa_get_param_decl_index_1 (vec<ipa_param_descriptor> descriptors, tree ptree)
+ipa_get_param_decl_index_1 (vec<ipa_param_descriptor, va_gc> *descriptors,
+ tree ptree)
{
int i, count;
- count = descriptors.length ();
+ count = vec_safe_length (descriptors);
for (i = 0; i < count; i++)
- if (descriptors[i].decl == ptree)
+ if ((*descriptors)[i].decl_or_type == ptree)
return i;
return -1;
static void
ipa_populate_param_decls (struct cgraph_node *node,
- vec<ipa_param_descriptor> &descriptors)
+ vec<ipa_param_descriptor, va_gc> &descriptors)
{
tree fndecl;
tree fnargs;
param_num = 0;
for (parm = fnargs; parm; parm = DECL_CHAIN (parm))
{
- descriptors[param_num].decl = parm;
+ descriptors[param_num].decl_or_type = parm;
descriptors[param_num].move_cost = estimate_move_cost (TREE_TYPE (parm),
true);
param_num++;
ipa_dump_param (FILE *file, struct ipa_node_params *info, int i)
{
fprintf (file, "param #%i", i);
- if (info->descriptors[i].decl)
+ if ((*info->descriptors)[i].decl_or_type)
{
fprintf (file, " ");
- print_generic_expr (file, info->descriptors[i].decl, 0);
+ print_generic_expr (file, (*info->descriptors)[i].decl_or_type);
}
}
-/* Initialize the ipa_node_params structure associated with NODE
- to hold PARAM_COUNT parameters. */
+/* If necessary, allocate vector of parameter descriptors in info of NODE.
+ Return true if they were allocated, false if not. */
-void
+static bool
ipa_alloc_node_params (struct cgraph_node *node, int param_count)
{
struct ipa_node_params *info = IPA_NODE_REF (node);
- if (!info->descriptors.exists () && param_count)
- info->descriptors.safe_grow_cleared (param_count);
+ if (!info->descriptors && param_count)
+ {
+ vec_safe_grow_cleared (info->descriptors, param_count);
+ return true;
+ }
+ else
+ return false;
}
/* Initialize the ipa_node_params structure associated with NODE by counting
{
struct ipa_node_params *info = IPA_NODE_REF (node);
- if (!info->descriptors.exists ())
- {
- ipa_alloc_node_params (node, count_formal_params (node->decl));
- ipa_populate_param_decls (node, info->descriptors);
- }
+ if (!info->descriptors
+ && ipa_alloc_node_params (node, count_formal_params (node->decl)))
+ ipa_populate_param_decls (node, *info->descriptors);
}
/* Print the jump functions associated with call graph edge CS to file F. */
{
tree val = jump_func->value.constant.value;
fprintf (f, "CONST: ");
- print_generic_expr (f, val, 0);
+ print_generic_expr (f, val);
if (TREE_CODE (val) == ADDR_EXPR
&& TREE_CODE (TREE_OPERAND (val, 0)) == CONST_DECL)
{
fprintf (f, " -> ");
- print_generic_expr (f, DECL_INITIAL (TREE_OPERAND (val, 0)),
- 0);
+ print_generic_expr (f, DECL_INITIAL (TREE_OPERAND (val, 0)));
}
fprintf (f, "\n");
}
if (jump_func->value.pass_through.operation != NOP_EXPR)
{
fprintf (f, " ");
- print_generic_expr (f,
- jump_func->value.pass_through.operand, 0);
+ print_generic_expr (f, jump_func->value.pass_through.operand);
}
if (jump_func->value.pass_through.agg_preserved)
fprintf (f, ", agg_preserved");
else
{
fprintf (f, "cst: ");
- print_generic_expr (f, item->value, 0);
+ print_generic_expr (f, item->value);
}
fprintf (f, "\n");
}
ctx->dump (dump_file);
}
- if (jump_func->alignment.known)
+ if (jump_func->bits)
+ {
+ fprintf (f, " value: ");
+ print_hex (jump_func->bits->value, f);
+ fprintf (f, ", mask: ");
+ print_hex (jump_func->bits->mask, f);
+ fprintf (f, "\n");
+ }
+ else
+ fprintf (f, " Unknown bits\n");
+
+ if (jump_func->m_vr)
{
- fprintf (f, " Alignment: %u, misalignment: %u\n",
- jump_func->alignment.align,
- jump_func->alignment.misalign);
+ fprintf (f, " VR ");
+ fprintf (f, "%s[",
+ (jump_func->m_vr->kind () == VR_ANTI_RANGE) ? "~" : "");
+ print_decs (wi::to_wide (jump_func->m_vr->min ()), f);
+ fprintf (f, ", ");
+ print_decs (wi::to_wide (jump_func->m_vr->max ()), f);
+ fprintf (f, "]\n");
}
else
- fprintf (f, " Unknown alignment\n");
+ fprintf (f, " Unknown VR\n");
}
}
{
struct cgraph_edge *cs;
- fprintf (f, " Jump functions of caller %s/%i:\n", node->name (),
- node->order);
+ fprintf (f, " Jump functions of caller %s:\n", node->dump_name ());
for (cs = node->callees; cs; cs = cs->next_callee)
{
if (!ipa_edge_args_info_available_for_edge_p (cs))
continue;
- fprintf (f, " callsite %s/%i -> %s/%i : \n",
- xstrdup_for_dump (node->name ()), node->order,
- xstrdup_for_dump (cs->callee->name ()),
- cs->callee->order);
+ fprintf (f, " callsite %s -> %s : \n",
+ node->dump_name (),
+ cs->callee->dump_name ());
ipa_print_node_jump_functions_for_edge (f, cs);
}
ipa_set_jf_unknown (struct ipa_jump_func *jfunc)
{
jfunc->type = IPA_JF_UNKNOWN;
- jfunc->alignment.known = false;
+ jfunc->bits = NULL;
+ jfunc->m_vr = NULL;
}
/* Set JFUNC to be a copy of another jmp (to be used by jump function
ipa_set_jf_constant (struct ipa_jump_func *jfunc, tree constant,
struct cgraph_edge *cs)
{
- constant = unshare_expr (constant);
- if (constant && EXPR_P (constant))
- SET_EXPR_LOCATION (constant, UNKNOWN_LOCATION);
jfunc->type = IPA_JF_CONST;
jfunc->value.constant.value = unshare_expr_without_location (constant);
jfunc->value.pass_through.agg_preserved = agg_preserved;
}
+/* Set JFUNC to be an unary pass through jump function. */
+
+static void
+ipa_set_jf_unary_pass_through (struct ipa_jump_func *jfunc, int formal_id,
+ enum tree_code operation)
+{
+ jfunc->type = IPA_JF_PASS_THROUGH;
+ jfunc->value.pass_through.operand = NULL_TREE;
+ jfunc->value.pass_through.formal_id = formal_id;
+ jfunc->value.pass_through.operation = operation;
+ jfunc->value.pass_through.agg_preserved = false;
+}
/* Set JFUNC to be an arithmetic pass through jump function. */
static void
of this function body. */
static struct ipa_bb_info *
-ipa_get_bb_info (struct func_body_info *fbi, basic_block bb)
+ipa_get_bb_info (struct ipa_func_body_info *fbi, basic_block bb)
{
gcc_checking_assert (fbi);
return &fbi->bb_infos[bb->index];
*/
static bool
-stmt_may_be_vtbl_ptr_store (gimple stmt)
+stmt_may_be_vtbl_ptr_store (gimple *stmt)
{
if (is_gimple_call (stmt))
return false;
if (TREE_CODE (lhs) == COMPONENT_REF
&& !DECL_VIRTUAL_P (TREE_OPERAND (lhs, 1)))
return false;
- /* In the future we might want to use get_base_ref_and_offset to find
+ /* In the future we might want to use get_ref_base_and_extent to find
if there is a field corresponding to the offset and if so, proceed
almost like if it was a component ref. */
}
static bool
check_stmt_for_type_change (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef, void *data)
{
- gimple stmt = SSA_NAME_DEF_STMT (vdef);
+ gimple *stmt = SSA_NAME_DEF_STMT (vdef);
struct prop_type_change_info *tci = (struct prop_type_change_info *) data;
if (stmt_may_be_vtbl_ptr_store (stmt))
in between beggining of the function until CALL is invoked.
Generally functions are not allowed to change type of such instances,
- but they call destructors. We assume that methods can not destroy the THIS
+ but they call destructors. We assume that methods cannot destroy the THIS
pointer. Also as a special cases, constructor and destructors may change
type of the THIS pointer. */
static bool
-param_type_may_change_p (tree function, tree arg, gimple call)
+param_type_may_change_p (tree function, tree arg, gimple *call)
{
- /* Pure functions can not do any changes on the dynamic type;
+ /* Pure functions cannot do any changes on the dynamic type;
that require writting to memory. */
if (flags_from_decl_or_type (function) & (ECF_PURE | ECF_CONST))
return false;
that does the heavy work which is usually unnecesary. */
static bool
-detect_type_change_from_memory_writes (tree arg, tree base, tree comp_type,
- gcall *call, struct ipa_jump_func *jfunc,
+detect_type_change_from_memory_writes (ipa_func_body_info *fbi, tree arg,
+ tree base, tree comp_type, gcall *call,
+ struct ipa_jump_func *jfunc,
HOST_WIDE_INT offset)
{
struct prop_type_change_info tci;
ao_ref ao;
- bool entry_reached = false;
gcc_checking_assert (DECL_P (arg)
|| TREE_CODE (arg) == MEM_REF
tci.object = get_base_address (arg);
tci.type_maybe_changed = false;
- walk_aliased_vdefs (&ao, gimple_vuse (call), check_stmt_for_type_change,
- &tci, NULL, &entry_reached);
- if (!tci.type_maybe_changed)
+ int walked
+ = walk_aliased_vdefs (&ao, gimple_vuse (call), check_stmt_for_type_change,
+ &tci, NULL, NULL, fbi->aa_walk_budget + 1);
+
+ if (walked >= 0 && !tci.type_maybe_changed)
return false;
ipa_set_jf_unknown (jfunc);
returned by get_ref_base_and_extent, as is the offset. */
static bool
-detect_type_change (tree arg, tree base, tree comp_type, gcall *call,
- struct ipa_jump_func *jfunc, HOST_WIDE_INT offset)
+detect_type_change (ipa_func_body_info *fbi, tree arg, tree base,
+ tree comp_type, gcall *call, struct ipa_jump_func *jfunc,
+ HOST_WIDE_INT offset)
{
if (!flag_devirtualize)
return false;
TREE_OPERAND (base, 0),
call))
return false;
- return detect_type_change_from_memory_writes (arg, base, comp_type,
+ return detect_type_change_from_memory_writes (fbi, arg, base, comp_type,
call, jfunc, offset);
}
be zero). */
static bool
-detect_type_change_ssa (tree arg, tree comp_type,
+detect_type_change_ssa (ipa_func_body_info *fbi, tree arg, tree comp_type,
gcall *call, struct ipa_jump_func *jfunc)
{
gcc_checking_assert (TREE_CODE (arg) == SSA_NAME);
arg = build2 (MEM_REF, ptr_type_node, arg,
build_int_cst (ptr_type_node, 0));
- return detect_type_change_from_memory_writes (arg, arg, comp_type,
+ return detect_type_change_from_memory_writes (fbi, arg, arg, comp_type,
call, jfunc, 0);
}
return true;
}
-/* Return true if we have already walked so many statements in AA that we
- should really just start giving up. */
-
-static bool
-aa_overwalked (struct func_body_info *fbi)
-{
- gcc_checking_assert (fbi);
- return fbi->aa_walked > (unsigned) PARAM_VALUE (PARAM_IPA_MAX_AA_STEPS);
-}
-
/* Find the nearest valid aa status for parameter specified by INDEX that
dominates BB. */
-static struct param_aa_status *
-find_dominating_aa_status (struct func_body_info *fbi, basic_block bb,
+static struct ipa_param_aa_status *
+find_dominating_aa_status (struct ipa_func_body_info *fbi, basic_block bb,
int index)
{
while (true)
structures and/or intialize the result with a dominating description as
necessary. */
-static struct param_aa_status *
-parm_bb_aa_status_for_bb (struct func_body_info *fbi, basic_block bb,
+static struct ipa_param_aa_status *
+parm_bb_aa_status_for_bb (struct ipa_func_body_info *fbi, basic_block bb,
int index)
{
gcc_checking_assert (fbi);
struct ipa_bb_info *bi = ipa_get_bb_info (fbi, bb);
if (bi->param_aa_statuses.is_empty ())
bi->param_aa_statuses.safe_grow_cleared (fbi->param_count);
- struct param_aa_status *paa = &bi->param_aa_statuses[index];
+ struct ipa_param_aa_status *paa = &bi->param_aa_statuses[index];
if (!paa->valid)
{
gcc_checking_assert (!paa->parm_modified
&& !paa->ref_modified
&& !paa->pt_modified);
- struct param_aa_status *dom_paa;
+ struct ipa_param_aa_status *dom_paa;
dom_paa = find_dominating_aa_status (fbi, bb, index);
if (dom_paa)
*paa = *dom_paa;
gathered but do not survive the summary building stage. */
static bool
-parm_preserved_before_stmt_p (struct func_body_info *fbi, int index,
- gimple stmt, tree parm_load)
+parm_preserved_before_stmt_p (struct ipa_func_body_info *fbi, int index,
+ gimple *stmt, tree parm_load)
{
- struct param_aa_status *paa;
+ struct ipa_param_aa_status *paa;
bool modified = false;
ao_ref refd;
- /* FIXME: FBI can be NULL if we are being called from outside
- ipa_node_analysis or ipcp_transform_function, which currently happens
- during inlining analysis. It would be great to extend fbi's lifetime and
- always have it. Currently, we are just not afraid of too much walking in
- that case. */
- if (fbi)
- {
- if (aa_overwalked (fbi))
- return false;
- paa = parm_bb_aa_status_for_bb (fbi, gimple_bb (stmt), index);
- if (paa->parm_modified)
- return false;
- }
- else
- paa = NULL;
+ tree base = get_base_address (parm_load);
+ gcc_assert (TREE_CODE (base) == PARM_DECL);
+ if (TREE_READONLY (base))
+ return true;
+
+ gcc_checking_assert (fbi);
+ paa = parm_bb_aa_status_for_bb (fbi, gimple_bb (stmt), index);
+ if (paa->parm_modified)
+ return false;
gcc_checking_assert (gimple_vuse (stmt) != NULL_TREE);
ao_ref_init (&refd, parm_load);
int walked = walk_aliased_vdefs (&refd, gimple_vuse (stmt), mark_modified,
- &modified, NULL);
- if (fbi)
- fbi->aa_walked += walked;
+ &modified, NULL, NULL,
+ fbi->aa_walk_budget + 1);
+ if (walked < 0)
+ {
+ modified = true;
+ if (fbi)
+ fbi->aa_walk_budget = 0;
+ }
+ else if (fbi)
+ fbi->aa_walk_budget -= walked;
if (paa && modified)
paa->parm_modified = true;
return !modified;
modified. Otherwise return -1. */
static int
-load_from_unmodified_param (struct func_body_info *fbi,
- vec<ipa_param_descriptor> descriptors,
- gimple stmt)
+load_from_unmodified_param (struct ipa_func_body_info *fbi,
+ vec<ipa_param_descriptor, va_gc> *descriptors,
+ gimple *stmt)
{
int index;
tree op1;
before reaching statement STMT. */
static bool
-parm_ref_data_preserved_p (struct func_body_info *fbi,
- int index, gimple stmt, tree ref)
+parm_ref_data_preserved_p (struct ipa_func_body_info *fbi,
+ int index, gimple *stmt, tree ref)
{
- struct param_aa_status *paa;
+ struct ipa_param_aa_status *paa;
bool modified = false;
ao_ref refd;
- /* FIXME: FBI can be NULL if we are being called from outside
- ipa_node_analysis or ipcp_transform_function, which currently happens
- during inlining analysis. It would be great to extend fbi's lifetime and
- always have it. Currently, we are just not afraid of too much walking in
- that case. */
- if (fbi)
- {
- if (aa_overwalked (fbi))
- return false;
- paa = parm_bb_aa_status_for_bb (fbi, gimple_bb (stmt), index);
- if (paa->ref_modified)
- return false;
- }
- else
- paa = NULL;
+ gcc_checking_assert (fbi);
+ paa = parm_bb_aa_status_for_bb (fbi, gimple_bb (stmt), index);
+ if (paa->ref_modified)
+ return false;
gcc_checking_assert (gimple_vuse (stmt));
ao_ref_init (&refd, ref);
int walked = walk_aliased_vdefs (&refd, gimple_vuse (stmt), mark_modified,
- &modified, NULL);
- if (fbi)
- fbi->aa_walked += walked;
- if (paa && modified)
+ &modified, NULL, NULL,
+ fbi->aa_walk_budget + 1);
+ if (walked < 0)
+ {
+ modified = true;
+ fbi->aa_walk_budget = 0;
+ }
+ else
+ fbi->aa_walk_budget -= walked;
+ if (modified)
paa->ref_modified = true;
return !modified;
}
CALL into which it is passed. FBI describes the function body. */
static bool
-parm_ref_data_pass_through_p (struct func_body_info *fbi, int index,
- gimple call, tree parm)
+parm_ref_data_pass_through_p (struct ipa_func_body_info *fbi, int index,
+ gimple *call, tree parm)
{
bool modified = false;
ao_ref refd;
function because it is not goin to use it. But do not cache the result
either. Also, no such calculations for non-pointers. */
if (!gimple_vuse (call)
- || !POINTER_TYPE_P (TREE_TYPE (parm))
- || aa_overwalked (fbi))
+ || !POINTER_TYPE_P (TREE_TYPE (parm)))
return false;
- struct param_aa_status *paa = parm_bb_aa_status_for_bb (fbi, gimple_bb (call),
- index);
+ struct ipa_param_aa_status *paa = parm_bb_aa_status_for_bb (fbi,
+ gimple_bb (call),
+ index);
if (paa->pt_modified)
return false;
ao_ref_init_from_ptr_and_size (&refd, parm, NULL_TREE);
int walked = walk_aliased_vdefs (&refd, gimple_vuse (call), mark_modified,
- &modified, NULL);
- fbi->aa_walked += walked;
+ &modified, NULL, NULL,
+ fbi->aa_walk_budget + 1);
+ if (walked < 0)
+ {
+ fbi->aa_walk_budget = 0;
+ modified = true;
+ }
+ else
+ fbi->aa_walk_budget -= walked;
if (modified)
paa->pt_modified = true;
return !modified;
}
-/* Return true if we can prove that OP is a memory reference loading unmodified
- data from an aggregate passed as a parameter and if the aggregate is passed
- by reference, that the alias type of the load corresponds to the type of the
- formal parameter (so that we can rely on this type for TBAA in callers).
+/* Return true if we can prove that OP is a memory reference loading
+ data from an aggregate passed as a parameter.
+
+ The function works in two modes. If GUARANTEED_UNMODIFIED is NULL, it return
+ false if it cannot prove that the value has not been modified before the
+ load in STMT. If GUARANTEED_UNMODIFIED is not NULL, it will return true even
+ if it cannot prove the value has not been modified, in that case it will
+ store false to *GUARANTEED_UNMODIFIED, otherwise it will store true there.
+
INFO and PARMS_AINFO describe parameters of the current function (but the
latter can be NULL), STMT is the load statement. If function returns true,
*INDEX_P, *OFFSET_P and *BY_REF is filled with the parameter index, offset
within the aggregate and whether it is a load from a value passed by
reference respectively. */
-static bool
-ipa_load_from_parm_agg_1 (struct func_body_info *fbi,
- vec<ipa_param_descriptor> descriptors,
- gimple stmt, tree op, int *index_p,
- HOST_WIDE_INT *offset_p, HOST_WIDE_INT *size_p,
- bool *by_ref_p)
+bool
+ipa_load_from_parm_agg (struct ipa_func_body_info *fbi,
+ vec<ipa_param_descriptor, va_gc> *descriptors,
+ gimple *stmt, tree op, int *index_p,
+ HOST_WIDE_INT *offset_p, HOST_WIDE_INT *size_p,
+ bool *by_ref_p, bool *guaranteed_unmodified)
{
int index;
- HOST_WIDE_INT size, max_size;
- tree base = get_ref_base_and_extent (op, offset_p, &size, &max_size);
+ HOST_WIDE_INT size;
+ bool reverse;
+ tree base = get_ref_base_and_extent_hwi (op, offset_p, &size, &reverse);
- if (max_size == -1 || max_size != size || *offset_p < 0)
+ if (!base)
return false;
if (DECL_P (base))
*by_ref_p = false;
if (size_p)
*size_p = size;
+ if (guaranteed_unmodified)
+ *guaranteed_unmodified = true;
return true;
}
return false;
gdp = &p;
*/
- gimple def = SSA_NAME_DEF_STMT (TREE_OPERAND (base, 0));
+ gimple *def = SSA_NAME_DEF_STMT (TREE_OPERAND (base, 0));
index = load_from_unmodified_param (fbi, descriptors, def);
}
- if (index >= 0
- && parm_ref_data_preserved_p (fbi, index, stmt, op))
+ if (index >= 0)
{
+ bool data_preserved = parm_ref_data_preserved_p (fbi, index, stmt, op);
+ if (!data_preserved && !guaranteed_unmodified)
+ return false;
+
*index_p = index;
*by_ref_p = true;
if (size_p)
*size_p = size;
+ if (guaranteed_unmodified)
+ *guaranteed_unmodified = data_preserved;
return true;
}
return false;
}
-/* Just like the previous function, just without the param_analysis_info
- pointer, for users outside of this file. */
-
-bool
-ipa_load_from_parm_agg (struct ipa_node_params *info, gimple stmt,
- tree op, int *index_p, HOST_WIDE_INT *offset_p,
- bool *by_ref_p)
-{
- return ipa_load_from_parm_agg_1 (NULL, info->descriptors, stmt, op, index_p,
- offset_p, NULL, by_ref_p);
-}
-
/* Given that an actual argument is an SSA_NAME (given in NAME) and is a result
of an assignment statement STMT, try to determine whether we are actually
handling any of the following cases and construct an appropriate jump
only needed for intraprocedural analysis. */
static void
-compute_complex_assign_jump_func (struct func_body_info *fbi,
+compute_complex_assign_jump_func (struct ipa_func_body_info *fbi,
struct ipa_node_params *info,
struct ipa_jump_func *jfunc,
- gcall *call, gimple stmt, tree name,
+ gcall *call, gimple *stmt, tree name,
tree param_type)
{
- HOST_WIDE_INT offset, size, max_size;
+ HOST_WIDE_INT offset, size;
tree op1, tc_ssa, base, ssa;
+ bool reverse;
int index;
op1 = gimple_assign_rhs1 (stmt);
if (index >= 0)
{
- tree op2 = gimple_assign_rhs2 (stmt);
-
- if (op2)
- {
- if (!is_gimple_ip_invariant (op2)
- || (TREE_CODE_CLASS (gimple_expr_code (stmt)) != tcc_comparison
- && !useless_type_conversion_p (TREE_TYPE (name),
- TREE_TYPE (op1))))
- return;
-
- ipa_set_jf_arith_pass_through (jfunc, index, op2,
- gimple_assign_rhs_code (stmt));
- }
- else if (gimple_assign_single_p (stmt))
+ switch (gimple_assign_rhs_class (stmt))
{
- bool agg_p = parm_ref_data_pass_through_p (fbi, index, call, tc_ssa);
- ipa_set_jf_simple_pass_through (jfunc, index, agg_p);
+ case GIMPLE_BINARY_RHS:
+ {
+ tree op2 = gimple_assign_rhs2 (stmt);
+ if (!is_gimple_ip_invariant (op2)
+ || ((TREE_CODE_CLASS (gimple_assign_rhs_code (stmt))
+ != tcc_comparison)
+ && !useless_type_conversion_p (TREE_TYPE (name),
+ TREE_TYPE (op1))))
+ return;
+
+ ipa_set_jf_arith_pass_through (jfunc, index, op2,
+ gimple_assign_rhs_code (stmt));
+ break;
+ }
+ case GIMPLE_SINGLE_RHS:
+ {
+ bool agg_p = parm_ref_data_pass_through_p (fbi, index, call,
+ tc_ssa);
+ ipa_set_jf_simple_pass_through (jfunc, index, agg_p);
+ break;
+ }
+ case GIMPLE_UNARY_RHS:
+ if (is_gimple_assign (stmt)
+ && gimple_assign_rhs_class (stmt) == GIMPLE_UNARY_RHS
+ && ! CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt)))
+ ipa_set_jf_unary_pass_through (jfunc, index,
+ gimple_assign_rhs_code (stmt));
+ default:;
}
return;
}
op1 = TREE_OPERAND (op1, 0);
if (TREE_CODE (TREE_TYPE (op1)) != RECORD_TYPE)
return;
- base = get_ref_base_and_extent (op1, &offset, &size, &max_size);
- if (TREE_CODE (base) != MEM_REF
- /* If this is a varying address, punt. */
- || max_size == -1
- || max_size != size)
+ base = get_ref_base_and_extent_hwi (op1, &offset, &size, &reverse);
+ offset_int mem_offset;
+ if (!base
+ || TREE_CODE (base) != MEM_REF
+ || !mem_ref_offset (base).is_constant (&mem_offset))
return;
- offset += mem_ref_offset (base).to_short_addr () * BITS_PER_UNIT;
+ offset += mem_offset.to_short_addr () * BITS_PER_UNIT;
ssa = TREE_OPERAND (base, 0);
if (TREE_CODE (ssa) != SSA_NAME
|| !SSA_NAME_IS_DEFAULT_DEF (ssa)
RHS stripped off the ADDR_EXPR is stored into *OBJ_P. */
static tree
-get_ancestor_addr_info (gimple assign, tree *obj_p, HOST_WIDE_INT *offset)
+get_ancestor_addr_info (gimple *assign, tree *obj_p, HOST_WIDE_INT *offset)
{
- HOST_WIDE_INT size, max_size;
+ HOST_WIDE_INT size;
tree expr, parm, obj;
+ bool reverse;
if (!gimple_assign_single_p (assign))
return NULL_TREE;
return NULL_TREE;
expr = TREE_OPERAND (expr, 0);
obj = expr;
- expr = get_ref_base_and_extent (expr, offset, &size, &max_size);
+ expr = get_ref_base_and_extent_hwi (expr, offset, &size, &reverse);
- if (TREE_CODE (expr) != MEM_REF
- /* If this is a varying address, punt. */
- || max_size == -1
- || max_size != size
- || *offset < 0)
+ offset_int mem_offset;
+ if (!expr
+ || TREE_CODE (expr) != MEM_REF
+ || !mem_ref_offset (expr).is_constant (&mem_offset))
return NULL_TREE;
parm = TREE_OPERAND (expr, 0);
if (TREE_CODE (parm) != SSA_NAME
|| TREE_CODE (SSA_NAME_VAR (parm)) != PARM_DECL)
return NULL_TREE;
- *offset += mem_ref_offset (expr).to_short_addr () * BITS_PER_UNIT;
+ *offset += mem_offset.to_short_addr () * BITS_PER_UNIT;
*obj_p = obj;
return expr;
}
return D.1879_6; */
static void
-compute_complex_ancestor_jump_func (struct func_body_info *fbi,
+compute_complex_ancestor_jump_func (struct ipa_func_body_info *fbi,
struct ipa_node_params *info,
struct ipa_jump_func *jfunc,
gcall *call, gphi *phi)
{
HOST_WIDE_INT offset;
- gimple assign, cond;
+ gimple *assign, *cond;
basic_block phi_bb, assign_bb, cond_bb;
tree tmp, parm, expr, obj;
int index, i;
{
while (TREE_CODE (rhs) == SSA_NAME && !SSA_NAME_IS_DEFAULT_DEF (rhs))
{
- gimple def_stmt = SSA_NAME_DEF_STMT (rhs);
+ gimple *def_stmt = SSA_NAME_DEF_STMT (rhs);
if (gimple_assign_single_p (def_stmt))
rhs = gimple_assign_rhs1 (def_stmt);
bool check_ref, by_ref;
ao_ref r;
+ if (PARAM_VALUE (PARAM_IPA_MAX_AGG_ITEMS) == 0)
+ return;
+
/* The function operates in three stages. First, we prepare check_ref, r,
arg_base and arg_offset based on what is actually passed as an actual
argument. */
if (TREE_CODE (arg) == SSA_NAME)
{
tree type_size;
- if (!tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (arg_type))))
+ if (!tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (arg_type)))
+ || !POINTER_TYPE_P (TREE_TYPE (arg)))
return;
check_ref = true;
arg_base = arg;
}
else if (TREE_CODE (arg) == ADDR_EXPR)
{
- HOST_WIDE_INT arg_max_size;
+ bool reverse;
arg = TREE_OPERAND (arg, 0);
- arg_base = get_ref_base_and_extent (arg, &arg_offset, &arg_size,
- &arg_max_size);
- if (arg_max_size == -1
- || arg_max_size != arg_size
- || arg_offset < 0)
+ arg_base = get_ref_base_and_extent_hwi (arg, &arg_offset,
+ &arg_size, &reverse);
+ if (!arg_base)
return;
if (DECL_P (arg_base))
{
}
else
{
- HOST_WIDE_INT arg_max_size;
+ bool reverse;
gcc_checking_assert (AGGREGATE_TYPE_P (TREE_TYPE (arg)));
by_ref = false;
check_ref = false;
- arg_base = get_ref_base_and_extent (arg, &arg_offset, &arg_size,
- &arg_max_size);
- if (arg_max_size == -1
- || arg_max_size != arg_size
- || arg_offset < 0)
+ arg_base = get_ref_base_and_extent_hwi (arg, &arg_offset,
+ &arg_size, &reverse);
+ if (!arg_base)
return;
ao_ref_init (&r, arg);
for (; !gsi_end_p (gsi); gsi_prev (&gsi))
{
struct ipa_known_agg_contents_list *n, **p;
- gimple stmt = gsi_stmt (gsi);
- HOST_WIDE_INT lhs_offset, lhs_size, lhs_max_size;
+ gimple *stmt = gsi_stmt (gsi);
+ HOST_WIDE_INT lhs_offset, lhs_size;
tree lhs, rhs, lhs_base;
+ bool reverse;
if (!stmt_may_clobber_ref_p_1 (stmt, &r))
continue;
|| contains_bitfld_component_ref_p (lhs))
break;
- lhs_base = get_ref_base_and_extent (lhs, &lhs_offset, &lhs_size,
- &lhs_max_size);
- if (lhs_max_size == -1
- || lhs_max_size != lhs_size)
+ lhs_base = get_ref_base_and_extent_hwi (lhs, &lhs_offset,
+ &lhs_size, &reverse);
+ if (!lhs_base)
break;
if (check_ref)
}
}
-static tree
+/* Return the Ith param type of callee associated with call graph
+ edge E. */
+
+tree
ipa_get_callee_param_type (struct cgraph_edge *e, int i)
{
int n;
return NULL;
}
+/* Return ipa_bits with VALUE and MASK values, which can be either a newly
+ allocated structure or a previously existing one shared with other jump
+ functions and/or transformation summaries. */
+
+ipa_bits *
+ipa_get_ipa_bits_for_value (const widest_int &value, const widest_int &mask)
+{
+ ipa_bits tmp;
+ tmp.value = value;
+ tmp.mask = mask;
+
+ ipa_bits **slot = ipa_bits_hash_table->find_slot (&tmp, INSERT);
+ if (*slot)
+ return *slot;
+
+ ipa_bits *res = ggc_alloc<ipa_bits> ();
+ res->value = value;
+ res->mask = mask;
+ *slot = res;
+
+ return res;
+}
+
+/* Assign to JF a pointer to ipa_bits structure with VALUE and MASK. Use hash
+ table in order to avoid creating multiple same ipa_bits structures. */
+
+static void
+ipa_set_jfunc_bits (ipa_jump_func *jf, const widest_int &value,
+ const widest_int &mask)
+{
+ jf->bits = ipa_get_ipa_bits_for_value (value, mask);
+}
+
+/* Return a pointer to a value_range just like *TMP, but either find it in
+ ipa_vr_hash_table or allocate it in GC memory. TMP->equiv must be NULL. */
+
+static value_range_base *
+ipa_get_value_range (value_range_base *tmp)
+{
+ value_range_base **slot = ipa_vr_hash_table->find_slot (tmp, INSERT);
+ if (*slot)
+ return *slot;
+
+ value_range_base *vr = ggc_alloc<value_range_base> ();
+ *vr = *tmp;
+ *slot = vr;
+
+ return vr;
+}
+
+/* Return a pointer to a value range consisting of TYPE, MIN, MAX and an empty
+ equiv set. Use hash table in order to avoid creating multiple same copies of
+ value_ranges. */
+
+static value_range_base *
+ipa_get_value_range (enum value_range_kind type, tree min, tree max)
+{
+ value_range_base tmp (type, min, max);
+ return ipa_get_value_range (&tmp);
+}
+
+/* Assign to JF a pointer to a value_range structure with TYPE, MIN and MAX and
+ a NULL equiv bitmap. Use hash table in order to avoid creating multiple
+ same value_range structures. */
+
+static void
+ipa_set_jfunc_vr (ipa_jump_func *jf, enum value_range_kind type,
+ tree min, tree max)
+{
+ jf->m_vr = ipa_get_value_range (type, min, max);
+}
+
+/* Assign to JF a pointer to a value_range just liek TMP but either fetch a
+ copy from ipa_vr_hash_table or allocate a new on in GC memory. */
+
+static void
+ipa_set_jfunc_vr (ipa_jump_func *jf, value_range_base *tmp)
+{
+ jf->m_vr = ipa_get_value_range (tmp);
+}
+
/* Compute jump function for all arguments of callsite CS and insert the
information in the jump_functions array in the ipa_edge_args corresponding
to this callsite. */
static void
-ipa_compute_jump_functions_for_edge (struct func_body_info *fbi,
+ipa_compute_jump_functions_for_edge (struct ipa_func_body_info *fbi,
struct cgraph_edge *cs)
{
struct ipa_node_params *info = IPA_NODE_REF (cs->caller);
struct ipa_polymorphic_call_context context (cs->caller->decl,
arg, cs->call_stmt,
&instance);
- context.get_dynamic_type (instance, arg, NULL, cs->call_stmt);
+ context.get_dynamic_type (instance, arg, NULL, cs->call_stmt,
+ &fbi->aa_walk_budget);
*ipa_get_ith_polymorhic_call_context (args, n) = context;
if (!context.useless_p ())
useful_context = true;
}
- if (POINTER_TYPE_P (TREE_TYPE(arg)))
+ if (POINTER_TYPE_P (TREE_TYPE (arg)))
{
- unsigned HOST_WIDE_INT hwi_bitpos;
- unsigned align;
+ bool addr_nonzero = false;
+ bool strict_overflow = false;
+
+ if (TREE_CODE (arg) == SSA_NAME
+ && param_type
+ && get_ptr_nonnull (arg))
+ addr_nonzero = true;
+ else if (tree_single_nonzero_warnv_p (arg, &strict_overflow))
+ addr_nonzero = true;
- if (get_pointer_alignment_1 (arg, &align, &hwi_bitpos)
- && align % BITS_PER_UNIT == 0
- && hwi_bitpos % BITS_PER_UNIT == 0)
+ if (addr_nonzero)
+ {
+ tree z = build_int_cst (TREE_TYPE (arg), 0);
+ ipa_set_jfunc_vr (jfunc, VR_ANTI_RANGE, z, z);
+ }
+ else
+ gcc_assert (!jfunc->m_vr);
+ }
+ else
+ {
+ wide_int min, max;
+ value_range_kind type;
+ if (TREE_CODE (arg) == SSA_NAME
+ && param_type
+ && (type = get_range_info (arg, &min, &max))
+ && (type == VR_RANGE || type == VR_ANTI_RANGE))
{
- jfunc->alignment.known = true;
- jfunc->alignment.align = align / BITS_PER_UNIT;
- jfunc->alignment.misalign = hwi_bitpos / BITS_PER_UNIT;
+ value_range_base resvr;
+ value_range_base tmpvr (type,
+ wide_int_to_tree (TREE_TYPE (arg), min),
+ wide_int_to_tree (TREE_TYPE (arg), max));
+ extract_range_from_unary_expr (&resvr, NOP_EXPR, param_type,
+ &tmpvr, TREE_TYPE (arg));
+ if (!resvr.undefined_p () && !resvr.varying_p ())
+ ipa_set_jfunc_vr (jfunc, &resvr);
+ else
+ gcc_assert (!jfunc->m_vr);
}
else
- gcc_assert (!jfunc->alignment.known);
+ gcc_assert (!jfunc->m_vr);
+ }
+
+ if (INTEGRAL_TYPE_P (TREE_TYPE (arg))
+ && (TREE_CODE (arg) == SSA_NAME || TREE_CODE (arg) == INTEGER_CST))
+ {
+ if (TREE_CODE (arg) == SSA_NAME)
+ ipa_set_jfunc_bits (jfunc, 0,
+ widest_int::from (get_nonzero_bits (arg),
+ TYPE_SIGN (TREE_TYPE (arg))));
+ else
+ ipa_set_jfunc_bits (jfunc, wi::to_widest (arg), 0);
+ }
+ else if (POINTER_TYPE_P (TREE_TYPE (arg)))
+ {
+ unsigned HOST_WIDE_INT bitpos;
+ unsigned align;
+
+ get_pointer_alignment_1 (arg, &align, &bitpos);
+ widest_int mask = wi::bit_and_not
+ (wi::mask<widest_int> (TYPE_PRECISION (TREE_TYPE (arg)), false),
+ align / BITS_PER_UNIT - 1);
+ widest_int value = bitpos / BITS_PER_UNIT;
+ ipa_set_jfunc_bits (jfunc, value, mask);
}
else
- gcc_assert (!jfunc->alignment.known);
+ gcc_assert (!jfunc->bits);
- if (is_gimple_ip_invariant (arg))
+ if (is_gimple_ip_invariant (arg)
+ || (VAR_P (arg)
+ && is_global_var (arg)
+ && TREE_READONLY (arg)))
ipa_set_jf_constant (jfunc, arg, cs);
else if (!is_gimple_reg_type (TREE_TYPE (arg))
&& TREE_CODE (arg) == PARM_DECL)
}
else
{
- gimple stmt = SSA_NAME_DEF_STMT (arg);
+ gimple *stmt = SSA_NAME_DEF_STMT (arg);
if (is_gimple_assign (stmt))
compute_complex_assign_jump_func (fbi, info, jfunc,
call, stmt, arg, param_type);
}
}
- /* If ARG is pointer, we can not use its type to determine the type of aggregate
+ /* If ARG is pointer, we cannot use its type to determine the type of aggregate
passed (because type conversions are ignored in gimple). Usually we can
safely get type from function declaration, but in case of K&R prototypes or
variadic functions we can try our luck with type of the pointer passed.
from BB. */
static void
-ipa_compute_jump_functions_for_bb (struct func_body_info *fbi, basic_block bb)
+ipa_compute_jump_functions_for_bb (struct ipa_func_body_info *fbi, basic_block bb)
{
struct ipa_bb_info *bi = ipa_get_bb_info (fbi, bb);
int i;
field rather than the pfn. */
static tree
-ipa_get_stmt_member_ptr_load_param (gimple stmt, bool use_delta,
+ipa_get_stmt_member_ptr_load_param (gimple *stmt, bool use_delta,
HOST_WIDE_INT *offset_p)
{
tree rhs, rec, ref_field, ref_offset, fld, ptr_field, delta_field;
cs->indirect_info->param_index = param_index;
cs->indirect_info->agg_contents = 0;
cs->indirect_info->member_ptr = 0;
+ cs->indirect_info->guaranteed_unmodified = 0;
return cs;
}
passed by value or reference. */
static void
-ipa_analyze_indirect_call_uses (struct func_body_info *fbi, gcall *call,
+ipa_analyze_indirect_call_uses (struct ipa_func_body_info *fbi, gcall *call,
tree target)
{
struct ipa_node_params *info = fbi->info;
}
int index;
- gimple def = SSA_NAME_DEF_STMT (target);
+ gimple *def = SSA_NAME_DEF_STMT (target);
+ bool guaranteed_unmodified;
if (gimple_assign_single_p (def)
- && ipa_load_from_parm_agg_1 (fbi, info->descriptors, def,
- gimple_assign_rhs1 (def), &index, &offset,
- NULL, &by_ref))
+ && ipa_load_from_parm_agg (fbi, info->descriptors, def,
+ gimple_assign_rhs1 (def), &index, &offset,
+ NULL, &by_ref, &guaranteed_unmodified))
{
struct cgraph_edge *cs = ipa_note_param_call (fbi->node, index, call);
cs->indirect_info->offset = offset;
cs->indirect_info->agg_contents = 1;
cs->indirect_info->by_ref = by_ref;
+ cs->indirect_info->guaranteed_unmodified = guaranteed_unmodified;
return;
}
tree n2 = PHI_ARG_DEF (def, 1);
if (!ipa_is_ssa_with_stmt_def (n1) || !ipa_is_ssa_with_stmt_def (n2))
return;
- gimple d1 = SSA_NAME_DEF_STMT (n1);
- gimple d2 = SSA_NAME_DEF_STMT (n2);
+ gimple *d1 = SSA_NAME_DEF_STMT (n1);
+ gimple *d2 = SSA_NAME_DEF_STMT (n2);
tree rec;
basic_block bb, virt_bb;
/* Third, let's see that the branching is done depending on the least
significant bit of the pfn. */
- gimple branch = last_stmt (bb);
+ gimple *branch = last_stmt (bb);
if (!branch || gimple_code (branch) != GIMPLE_COND)
return;
cs->indirect_info->offset = offset;
cs->indirect_info->agg_contents = 1;
cs->indirect_info->member_ptr = 1;
+ cs->indirect_info->guaranteed_unmodified = 1;
}
return;
statement. */
static void
-ipa_analyze_virtual_call_uses (struct func_body_info *fbi,
+ipa_analyze_virtual_call_uses (struct ipa_func_body_info *fbi,
gcall *call, tree target)
{
tree obj = OBJ_TYPE_REF_OBJECT (target);
anc_offset = 0;
index = ipa_get_param_decl_index (info, SSA_NAME_VAR (obj));
gcc_assert (index >= 0);
- if (detect_type_change_ssa (obj, obj_type_ref_class (target),
+ if (detect_type_change_ssa (fbi, obj, obj_type_ref_class (target),
call, &jfunc))
return;
}
else
{
struct ipa_jump_func jfunc;
- gimple stmt = SSA_NAME_DEF_STMT (obj);
+ gimple *stmt = SSA_NAME_DEF_STMT (obj);
tree expr;
expr = get_ancestor_addr_info (stmt, &obj, &anc_offset);
index = ipa_get_param_decl_index (info,
SSA_NAME_VAR (TREE_OPERAND (expr, 0)));
gcc_assert (index >= 0);
- if (detect_type_change (obj, expr, obj_type_ref_class (target),
+ if (detect_type_change (fbi, obj, expr, obj_type_ref_class (target),
call, &jfunc, anc_offset))
return;
}
containing intermediate information about each formal parameter. */
static void
-ipa_analyze_call_uses (struct func_body_info *fbi, gcall *call)
+ipa_analyze_call_uses (struct ipa_func_body_info *fbi, gcall *call)
{
tree target = gimple_call_fn (call);
cs->indirect_info->vptr_changed
= !context.get_dynamic_type (instance,
OBJ_TYPE_REF_OBJECT (target),
- obj_type_ref_class (target), call);
+ obj_type_ref_class (target), call,
+ &fbi->aa_walk_budget);
cs->indirect_info->context = context;
}
formal parameters are called. */
static void
-ipa_analyze_stmt_uses (struct func_body_info *fbi, gimple stmt)
+ipa_analyze_stmt_uses (struct ipa_func_body_info *fbi, gimple *stmt)
{
if (is_gimple_call (stmt))
ipa_analyze_call_uses (fbi, as_a <gcall *> (stmt));
passed in DATA. */
static bool
-visit_ref_for_mod_analysis (gimple, tree op, tree, void *data)
+visit_ref_for_mod_analysis (gimple *, tree op, tree, void *data)
{
struct ipa_node_params *info = (struct ipa_node_params *) data;
the function being analyzed. */
static void
-ipa_analyze_params_uses_in_bb (struct func_body_info *fbi, basic_block bb)
+ipa_analyze_params_uses_in_bb (struct ipa_func_body_info *fbi, basic_block bb)
{
gimple_stmt_iterator gsi;
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
- gimple stmt = gsi_stmt (gsi);
+ gimple *stmt = gsi_stmt (gsi);
if (is_gimple_debug (stmt))
continue;
class analysis_dom_walker : public dom_walker
{
public:
- analysis_dom_walker (struct func_body_info *fbi)
+ analysis_dom_walker (struct ipa_func_body_info *fbi)
: dom_walker (CDI_DOMINATORS), m_fbi (fbi) {}
- virtual void before_dom_children (basic_block);
+ virtual edge before_dom_children (basic_block);
private:
- struct func_body_info *m_fbi;
+ struct ipa_func_body_info *m_fbi;
};
-void
+edge
analysis_dom_walker::before_dom_children (basic_block bb)
{
ipa_analyze_params_uses_in_bb (m_fbi, bb);
ipa_compute_jump_functions_for_bb (m_fbi, bb);
+ return NULL;
+}
+
+/* Release body info FBI. */
+
+void
+ipa_release_body_info (struct ipa_func_body_info *fbi)
+{
+ int i;
+ struct ipa_bb_info *bi;
+
+ FOR_EACH_VEC_ELT (fbi->bb_infos, i, bi)
+ free_ipa_bb_info (bi);
+ fbi->bb_infos.release ();
}
/* Initialize the array describing properties of formal parameters
void
ipa_analyze_node (struct cgraph_node *node)
{
- struct func_body_info fbi;
+ struct ipa_func_body_info fbi;
struct ipa_node_params *info;
ipa_check_create_node_params ();
fbi.bb_infos = vNULL;
fbi.bb_infos.safe_grow_cleared (last_basic_block_for_fn (cfun));
fbi.param_count = ipa_get_param_count (info);
- fbi.aa_walked = 0;
+ fbi.aa_walk_budget = PARAM_VALUE (PARAM_IPA_MAX_AA_STEPS);
for (struct cgraph_edge *cs = node->callees; cs; cs = cs->next_callee)
{
analysis_dom_walker (&fbi).walk (ENTRY_BLOCK_PTR_FOR_FN (cfun));
- int i;
- struct ipa_bb_info *bi;
- FOR_EACH_VEC_ELT (fbi.bb_infos, i, bi)
- free_ipa_bb_info (bi);
- fbi.bb_infos.release ();
+ ipa_release_body_info (&fbi);
free_dominance_info (CDI_DOMINATORS);
pop_cfun ();
}
ctx.offset_by (dst->value.ancestor.offset);
if (!ctx.useless_p ())
{
- vec_safe_grow_cleared (args->polymorphic_call_contexts,
- count);
- dst_ctx = ipa_get_ith_polymorhic_call_context (args, i);
+ if (!dst_ctx)
+ {
+ vec_safe_grow_cleared (args->polymorphic_call_contexts,
+ count);
+ dst_ctx = ipa_get_ith_polymorhic_call_context (args, i);
+ }
+
+ dst_ctx->combine_with (ctx);
}
- dst_ctx->combine_with (ctx);
}
if (src->agg.items
dst->value.ancestor.agg_preserved &=
src->value.pass_through.agg_preserved;
}
+ else if (src->type == IPA_JF_PASS_THROUGH
+ && TREE_CODE_CLASS (src->value.pass_through.operation) == tcc_unary)
+ {
+ dst->value.ancestor.formal_id = src->value.pass_through.formal_id;
+ dst->value.ancestor.agg_preserved = false;
+ }
else if (src->type == IPA_JF_ANCESTOR)
{
dst->value.ancestor.formal_id = src->value.ancestor.formal_id;
&& ipa_get_jf_pass_through_agg_preserved (src);
ipa_set_jf_simple_pass_through (dst, formal_id, agg_p);
}
+ else if (TREE_CODE_CLASS (operation) == tcc_unary)
+ ipa_set_jf_unary_pass_through (dst, formal_id, operation);
else
{
tree operand = ipa_get_jf_pass_through_operand (src);
bool speculative)
{
struct cgraph_node *callee;
- struct inline_edge_summary *es = inline_edge_summary (ie);
bool unreachable = false;
if (TREE_CODE (target) == ADDR_EXPR)
{
if (dump_enabled_p ())
{
- location_t loc = gimple_location_safe (ie->call_stmt);
- dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, loc,
- "discovered direct call non-invariant "
- "%s/%i\n",
- ie->caller->name (), ie->caller->order);
+ dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, ie->call_stmt,
+ "discovered direct call non-invariant %s\n",
+ ie->caller->dump_name ());
}
return NULL;
}
if (dump_enabled_p ())
{
- location_t loc = gimple_location_safe (ie->call_stmt);
- dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, loc,
- "discovered direct call to non-function in %s/%i, "
+ dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, ie->call_stmt,
+ "discovered direct call to non-function in %s, "
"making it __builtin_unreachable\n",
- ie->caller->name (), ie->caller->order);
+ ie->caller->dump_name ());
}
target = builtin_decl_implicit (BUILT_IN_UNREACHABLE);
{
if (dump_file)
fprintf (dump_file, "ipa-prop: Discovered call to a known target "
- "(%s/%i -> %s/%i) but can not refer to it. Giving up.\n",
- xstrdup_for_dump (ie->caller->name ()),
- ie->caller->order,
- xstrdup_for_dump (ie->callee->name ()),
- ie->callee->order);
+ "(%s -> %s) but cannot refer to it. Giving up.\n",
+ ie->caller->dump_name (),
+ ie->callee->dump_name ());
return NULL;
}
callee = cgraph_node::get_create (target);
!= callee->ultimate_alias_target ())
{
if (dump_file)
- fprintf (dump_file, "ipa-prop: Discovered call to a speculative target "
- "(%s/%i -> %s/%i) but the call is already speculated to %s/%i. Giving up.\n",
- xstrdup_for_dump (ie->caller->name ()),
- ie->caller->order,
- xstrdup_for_dump (callee->name ()),
- callee->order,
- xstrdup_for_dump (e2->callee->name ()),
- e2->callee->order);
+ fprintf (dump_file, "ipa-prop: Discovered call to a speculative "
+ "target (%s -> %s) but the call is already "
+ "speculated to %s. Giving up.\n",
+ ie->caller->dump_name (), callee->dump_name (),
+ e2->callee->dump_name ());
}
else
{
if (dump_file)
fprintf (dump_file, "ipa-prop: Discovered call to a speculative target "
- "(%s/%i -> %s/%i) this agree with previous speculation.\n",
- xstrdup_for_dump (ie->caller->name ()),
- ie->caller->order,
- xstrdup_for_dump (callee->name ()),
- callee->order);
+ "(%s -> %s) this agree with previous speculation.\n",
+ ie->caller->dump_name (), callee->dump_name ());
}
return NULL;
}
ipa_check_create_node_params ();
- /* We can not make edges to inline clones. It is bug that someone removed
+ /* We cannot make edges to inline clones. It is bug that someone removed
the cgraph node too early. */
gcc_assert (!callee->global.inlined_to);
if (dump_file && !unreachable)
{
fprintf (dump_file, "ipa-prop: Discovered %s call to a %s target "
- "(%s/%i -> %s/%i), for stmt ",
+ "(%s -> %s), for stmt ",
ie->indirect_info->polymorphic ? "a virtual" : "an indirect",
speculative ? "speculative" : "known",
- xstrdup_for_dump (ie->caller->name ()),
- ie->caller->order,
- xstrdup_for_dump (callee->name ()),
- callee->order);
+ ie->caller->dump_name (),
+ callee->dump_name ());
if (ie->call_stmt)
print_gimple_stmt (dump_file, ie->call_stmt, 2, TDF_SLIM);
else
}
if (dump_enabled_p ())
{
- location_t loc = gimple_location_safe (ie->call_stmt);
-
- dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, loc,
+ dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, ie->call_stmt,
"converting indirect call in %s to direct call to %s\n",
ie->caller->name (), callee->name ());
}
for direct call (adjusted by inline_edge_duplication_hook). */
if (ie == orig)
{
- es = inline_edge_summary (ie);
+ ipa_call_summary *es = ipa_call_summaries->get (ie);
es->call_stmt_size -= (eni_size_weights.indirect_call_cost
- eni_size_weights.call_cost);
es->call_stmt_time -= (eni_time_weights.indirect_call_cost
}
/* make_speculative will update ie's cost to direct call cost. */
ie = ie->make_speculative
- (callee, ie->count * 8 / 10, ie->frequency * 8 / 10);
+ (callee, ie->count.apply_scale (8, 10));
}
return ie;
}
-/* Retrieve value from aggregate jump function AGG for the given OFFSET or
- return NULL if there is not any. BY_REF specifies whether the value has to
- be passed by reference or by value. */
+/* Attempt to locate an interprocedural constant at a given REQ_OFFSET in
+ CONSTRUCTOR and return it. Return NULL if the search fails for some
+ reason. */
+
+static tree
+find_constructor_constant_at_offset (tree constructor, HOST_WIDE_INT req_offset)
+{
+ tree type = TREE_TYPE (constructor);
+ if (TREE_CODE (type) != ARRAY_TYPE
+ && TREE_CODE (type) != RECORD_TYPE)
+ return NULL;
+
+ unsigned ix;
+ tree index, val;
+ FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (constructor), ix, index, val)
+ {
+ HOST_WIDE_INT elt_offset;
+ if (TREE_CODE (type) == ARRAY_TYPE)
+ {
+ offset_int off;
+ tree unit_size = TYPE_SIZE_UNIT (TREE_TYPE (type));
+ gcc_assert (TREE_CODE (unit_size) == INTEGER_CST);
+
+ if (index)
+ {
+ if (TREE_CODE (index) == RANGE_EXPR)
+ off = wi::to_offset (TREE_OPERAND (index, 0));
+ else
+ off = wi::to_offset (index);
+ if (TYPE_DOMAIN (type) && TYPE_MIN_VALUE (TYPE_DOMAIN (type)))
+ {
+ tree low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (type));
+ gcc_assert (TREE_CODE (unit_size) == INTEGER_CST);
+ off = wi::sext (off - wi::to_offset (low_bound),
+ TYPE_PRECISION (TREE_TYPE (index)));
+ }
+ off *= wi::to_offset (unit_size);
+ /* ??? Handle more than just the first index of a
+ RANGE_EXPR. */
+ }
+ else
+ off = wi::to_offset (unit_size) * ix;
+
+ off = wi::lshift (off, LOG2_BITS_PER_UNIT);
+ if (!wi::fits_shwi_p (off) || wi::neg_p (off))
+ continue;
+ elt_offset = off.to_shwi ();
+ }
+ else if (TREE_CODE (type) == RECORD_TYPE)
+ {
+ gcc_checking_assert (index && TREE_CODE (index) == FIELD_DECL);
+ if (DECL_BIT_FIELD (index))
+ continue;
+ elt_offset = int_bit_position (index);
+ }
+ else
+ gcc_unreachable ();
+
+ if (elt_offset > req_offset)
+ return NULL;
+
+ if (TREE_CODE (val) == CONSTRUCTOR)
+ return find_constructor_constant_at_offset (val,
+ req_offset - elt_offset);
+
+ if (elt_offset == req_offset
+ && is_gimple_reg_type (TREE_TYPE (val))
+ && is_gimple_ip_invariant (val))
+ return val;
+ }
+ return NULL;
+}
+
+/* Check whether SCALAR could be used to look up an aggregate interprocedural
+ invariant from a static constructor and if so, return it. Otherwise return
+ NULL. */
+
+static tree
+ipa_find_agg_cst_from_init (tree scalar, HOST_WIDE_INT offset, bool by_ref)
+{
+ if (by_ref)
+ {
+ if (TREE_CODE (scalar) != ADDR_EXPR)
+ return NULL;
+ scalar = TREE_OPERAND (scalar, 0);
+ }
+
+ if (!VAR_P (scalar)
+ || !is_global_var (scalar)
+ || !TREE_READONLY (scalar)
+ || !DECL_INITIAL (scalar)
+ || TREE_CODE (DECL_INITIAL (scalar)) != CONSTRUCTOR)
+ return NULL;
+
+ return find_constructor_constant_at_offset (DECL_INITIAL (scalar), offset);
+}
+
+/* Retrieve value from aggregate jump function AGG or static initializer of
+ SCALAR (which can be NULL) for the given OFFSET or return NULL if there is
+ none. BY_REF specifies whether the value has to be passed by reference or
+ by value. If FROM_GLOBAL_CONSTANT is non-NULL, then the boolean it points
+ to is set to true if the value comes from an initializer of a constant. */
tree
-ipa_find_agg_cst_for_param (struct ipa_agg_jump_function *agg,
- HOST_WIDE_INT offset, bool by_ref)
+ipa_find_agg_cst_for_param (struct ipa_agg_jump_function *agg, tree scalar,
+ HOST_WIDE_INT offset, bool by_ref,
+ bool *from_global_constant)
{
struct ipa_agg_jf_item *item;
int i;
- if (by_ref != agg->by_ref)
+ if (scalar)
+ {
+ tree res = ipa_find_agg_cst_from_init (scalar, offset, by_ref);
+ if (res)
+ {
+ if (from_global_constant)
+ *from_global_constant = true;
+ return res;
+ }
+ }
+
+ if (!agg
+ || by_ref != agg->by_ref)
return NULL;
FOR_EACH_VEC_SAFE_ELT (agg->items, i, item)
/* Currently we do not have clobber values, return NULL for them once
we do. */
gcc_checking_assert (is_gimple_ip_invariant (item->value));
+ if (from_global_constant)
+ *from_global_constant = false;
return item->value;
}
return NULL;
to_del->remove_reference ();
if (dump_file)
- fprintf (dump_file, "ipa-prop: Removed a reference from %s/%i to %s.\n",
- xstrdup_for_dump (origin->caller->name ()),
- origin->caller->order, xstrdup_for_dump (symbol->name ()));
+ fprintf (dump_file, "ipa-prop: Removed a reference from %s to %s.\n",
+ origin->caller->dump_name (), xstrdup_for_dump (symbol->name ()));
return true;
}
/* Try to find a destination for indirect edge IE that corresponds to a simple
call or a call of a member function pointer and where the destination is a
- pointer formal parameter described by jump function JFUNC. If it can be
- determined, return the newly direct edge, otherwise return NULL.
+ pointer formal parameter described by jump function JFUNC. TARGET_TYPE is
+ the type of the parameter to which the result of JFUNC is passed. If it can
+ be determined, return the newly direct edge, otherwise return NULL.
NEW_ROOT_INFO is the node info that JFUNC lattices are relative to. */
static struct cgraph_edge *
try_make_edge_direct_simple_call (struct cgraph_edge *ie,
- struct ipa_jump_func *jfunc,
+ struct ipa_jump_func *jfunc, tree target_type,
struct ipa_node_params *new_root_info)
{
struct cgraph_edge *cs;
tree target;
bool agg_contents = ie->indirect_info->agg_contents;
-
- if (ie->indirect_info->agg_contents)
- target = ipa_find_agg_cst_for_param (&jfunc->agg,
- ie->indirect_info->offset,
- ie->indirect_info->by_ref);
+ tree scalar = ipa_value_from_jfunc (new_root_info, jfunc, target_type);
+ if (agg_contents)
+ {
+ bool from_global_constant;
+ target = ipa_find_agg_cst_for_param (&jfunc->agg, scalar,
+ ie->indirect_info->offset,
+ ie->indirect_info->by_ref,
+ &from_global_constant);
+ if (target
+ && !from_global_constant
+ && !ie->indirect_info->guaranteed_unmodified)
+ return NULL;
+ }
else
- target = ipa_value_from_jfunc (new_root_info, jfunc);
+ target = scalar;
if (!target)
return NULL;
cs = ipa_make_edge_direct_to_target (ie, target);
{
if (target)
fprintf (dump_file,
- "Type inconsistent devirtualization: %s/%i->%s\n",
- ie->caller->name (), ie->caller->order,
+ "Type inconsistent devirtualization: %s->%s\n",
+ ie->caller->dump_name (),
IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (target)));
else
fprintf (dump_file,
- "No devirtualization target in %s/%i\n",
- ie->caller->name (), ie->caller->order);
+ "No devirtualization target in %s\n",
+ ie->caller->dump_name ());
}
tree new_target = builtin_decl_implicit (BUILT_IN_UNREACHABLE);
cgraph_node::get_create (new_target);
{
tree vtable;
unsigned HOST_WIDE_INT offset;
- tree t = ipa_find_agg_cst_for_param (&jfunc->agg,
+ tree scalar = (jfunc->type == IPA_JF_CONST) ? ipa_get_jf_constant (jfunc)
+ : NULL;
+ tree t = ipa_find_agg_cst_for_param (&jfunc->agg, scalar,
ie->indirect_info->offset,
true);
if (t && vtable_pointer_value_to_vtable (t, &vtable, &offset))
{
+ bool can_refer;
t = gimple_get_virt_method_for_vtable (ie->indirect_info->otr_token,
- vtable, offset);
- if (t)
+ vtable, offset, &can_refer);
+ if (can_refer)
{
- if ((TREE_CODE (TREE_TYPE (t)) == FUNCTION_TYPE
- && DECL_FUNCTION_CODE (t) == BUILT_IN_UNREACHABLE)
+ if (!t
+ || (TREE_CODE (TREE_TYPE (t)) == FUNCTION_TYPE
+ && DECL_FUNCTION_CODE (t) == BUILT_IN_UNREACHABLE)
|| !possible_polymorphic_call_target_p
(ie, cgraph_node::get (t)))
{
/* Do not speculate builtin_unreachable, it is stupid! */
if (!ie->indirect_info->vptr_changed)
target = ipa_impossible_devirt_target (ie, target);
+ else
+ target = NULL;
}
else
{
{
struct ipa_edge_args *top;
struct cgraph_edge *ie, *next_ie, *new_direct_edge;
- struct ipa_node_params *new_root_info;
+ struct ipa_node_params *new_root_info, *inlined_node_info;
bool res = false;
ipa_check_create_edge_args ();
new_root_info = IPA_NODE_REF (cs->caller->global.inlined_to
? cs->caller->global.inlined_to
: cs->caller);
+ inlined_node_info = IPA_NODE_REF (cs->callee->function_symbol ());
for (ie = node->indirect_calls; ie; ie = next_ie)
{
new_direct_edge = try_make_edge_direct_virtual_call (ie, jfunc, ctx);
}
else
- new_direct_edge = try_make_edge_direct_simple_call (ie, jfunc,
- new_root_info);
+ {
+ tree target_type = ipa_get_type (inlined_node_info, param_index);
+ new_direct_edge = try_make_edge_direct_simple_call (ie, jfunc,
+ target_type,
+ new_root_info);
+ }
+
/* If speculation was removed, then we need to do nothing. */
if (new_direct_edge && new_direct_edge != ie
&& new_direct_edge->callee == spec_target)
{
if (dump_file)
fprintf (dump_file, "ipa-prop: Removing cloning-created "
- "reference from %s/%i to %s/%i.\n",
- xstrdup_for_dump (new_root->name ()),
- new_root->order,
- xstrdup_for_dump (n->name ()), n->order);
+ "reference from %s to %s.\n",
+ new_root->dump_name (),
+ n->dump_name ());
ref->remove_reference ();
}
}
if (dump_file)
fprintf (dump_file, "ipa-prop: Removing "
"cloning-created reference "
- "from %s/%i to %s/%i.\n",
- xstrdup_for_dump (clone->name ()),
- clone->order,
- xstrdup_for_dump (n->name ()),
- n->order);
+ "from %s to %s.\n",
+ clone->dump_name (),
+ n->dump_name ());
ref->remove_reference ();
}
clone = clone->callers->caller;
(i.e. during early inlining). */
if (!ipa_node_params_sum)
return false;
- gcc_assert (ipa_edge_args_vector);
+ gcc_assert (ipa_edge_args_sum);
propagate_controlled_uses (cs);
changed = propagate_info_to_inlined_callees (cs, cs->callee, new_edges);
return changed;
}
-/* Frees all dynamically allocated structures that the argument info points
- to. */
+/* Ensure that array of edge arguments infos is big enough to accommodate a
+ structure for all edges and reallocates it if not. Also, allocate
+ associated hash tables is they do not already exist. */
void
-ipa_free_edge_args_substructures (struct ipa_edge_args *args)
+ipa_check_create_edge_args (void)
{
- vec_free (args->jump_functions);
- memset (args, 0, sizeof (*args));
+ if (!ipa_edge_args_sum)
+ ipa_edge_args_sum
+ = (new (ggc_cleared_alloc <ipa_edge_args_sum_t> ())
+ ipa_edge_args_sum_t (symtab, true));
+ if (!ipa_bits_hash_table)
+ ipa_bits_hash_table = hash_table<ipa_bit_ggc_hash_traits>::create_ggc (37);
+ if (!ipa_vr_hash_table)
+ ipa_vr_hash_table = hash_table<ipa_vr_ggc_hash_traits>::create_ggc (37);
}
/* Free all ipa_edge structures. */
void
ipa_free_all_edge_args (void)
{
- int i;
- struct ipa_edge_args *args;
-
- if (!ipa_edge_args_vector)
+ if (!ipa_edge_args_sum)
return;
- FOR_EACH_VEC_ELT (*ipa_edge_args_vector, i, args)
- ipa_free_edge_args_substructures (args);
-
- vec_free (ipa_edge_args_vector);
-}
-
-/* Frees all dynamically allocated structures that the param info points
- to. */
-
-ipa_node_params::~ipa_node_params ()
-{
- descriptors.release ();
- free (lattices);
- /* Lattice values and their sources are deallocated with their alocation
- pool. */
- known_contexts.release ();
-
- lattices = NULL;
- ipcp_orig_node = NULL;
- analysis_done = 0;
- node_enqueued = 0;
- do_clone_for_all_contexts = 0;
- is_all_contexts_clone = 0;
- node_dead = 0;
+ ipa_edge_args_sum->release ();
+ ipa_edge_args_sum = NULL;
}
/* Free all ipa_node_params structures. */
void
ipa_free_all_node_params (void)
{
- delete ipa_node_params_sum;
+ ipa_node_params_sum->release ();
ipa_node_params_sum = NULL;
}
-/* Grow ipcp_transformations if necessary. */
+/* Initialize IPA CP transformation summary and also allocate any necessary hash
+ tables if they do not already exist. */
void
-ipcp_grow_transformations_if_necessary (void)
+ipcp_transformation_initialize (void)
{
- if (vec_safe_length (ipcp_transformations)
- <= (unsigned) symtab->cgraph_max_uid)
- vec_safe_grow_cleared (ipcp_transformations, symtab->cgraph_max_uid + 1);
+ if (!ipa_bits_hash_table)
+ ipa_bits_hash_table = hash_table<ipa_bit_ggc_hash_traits>::create_ggc (37);
+ if (!ipa_vr_hash_table)
+ ipa_vr_hash_table = hash_table<ipa_vr_ggc_hash_traits>::create_ggc (37);
+ if (ipcp_transformation_sum == NULL)
+ ipcp_transformation_sum = ipcp_transformation_t::create_ggc (symtab);
}
/* Set the aggregate replacements of NODE to be AGGVALS. */
ipa_set_node_agg_value_chain (struct cgraph_node *node,
struct ipa_agg_replacement_value *aggvals)
{
- ipcp_grow_transformations_if_necessary ();
- (*ipcp_transformations)[node->uid].agg_values = aggvals;
+ ipcp_transformation_initialize ();
+ ipcp_transformation *s = ipcp_transformation_sum->get_create (node);
+ s->agg_values = aggvals;
}
-/* Hook that is called by cgraph.c when an edge is removed. */
+/* Hook that is called by cgraph.c when an edge is removed. Adjust reference
+ count data structures accordingly. */
-static void
-ipa_edge_removal_hook (struct cgraph_edge *cs, void *data ATTRIBUTE_UNUSED)
+void
+ipa_edge_args_sum_t::remove (cgraph_edge *cs, ipa_edge_args *args)
{
- struct ipa_edge_args *args;
-
- /* During IPA-CP updating we can be called on not-yet analyzed clones. */
- if (vec_safe_length (ipa_edge_args_vector) <= (unsigned)cs->uid)
- return;
-
- args = IPA_EDGE_REF (cs);
if (args->jump_functions)
{
struct ipa_jump_func *jf;
rdesc->cs = NULL;
}
}
-
- ipa_free_edge_args_substructures (IPA_EDGE_REF (cs));
}
-/* Hook that is called by cgraph.c when an edge is duplicated. */
+/* Method invoked when an edge is duplicated. Copy ipa_edge_args and adjust
+ reference count data strucutres accordingly. */
-static void
-ipa_edge_duplication_hook (struct cgraph_edge *src, struct cgraph_edge *dst,
- void *)
+void
+ipa_edge_args_sum_t::duplicate (cgraph_edge *src, cgraph_edge *dst,
+ ipa_edge_args *old_args, ipa_edge_args *new_args)
{
- struct ipa_edge_args *old_args, *new_args;
unsigned int i;
- ipa_check_create_edge_args ();
-
- old_args = IPA_EDGE_REF (src);
- new_args = IPA_EDGE_REF (dst);
-
new_args->jump_functions = vec_safe_copy (old_args->jump_functions);
if (old_args->polymorphic_call_contexts)
new_args->polymorphic_call_contexts
{
ipa_agg_replacement_value *old_av, *new_av;
- new_info->descriptors = old_info->descriptors.copy ();
+ new_info->descriptors = vec_safe_copy (old_info->descriptors);
new_info->lattices = NULL;
new_info->ipcp_orig_node = old_info->ipcp_orig_node;
+ new_info->known_csts = old_info->known_csts.copy ();
+ new_info->known_contexts = old_info->known_contexts.copy ();
new_info->analysis_done = old_info->analysis_done;
new_info->node_enqueued = old_info->node_enqueued;
+ new_info->versionable = old_info->versionable;
old_av = ipa_get_agg_replacements_for_node (src);
if (old_av)
ipa_set_node_agg_value_chain (dst, new_av);
}
- ipcp_transformation_summary *src_trans = ipcp_get_transformation_summary (src);
+ ipcp_transformation *src_trans = ipcp_get_transformation_summary (src);
- if (src_trans && vec_safe_length (src_trans->alignments) > 0)
+ if (src_trans)
{
- ipcp_grow_transformations_if_necessary ();
- src_trans = ipcp_get_transformation_summary (src);
- const vec<ipa_alignment, va_gc> *src_alignments = src_trans->alignments;
- vec<ipa_alignment, va_gc> *&dst_alignments
- = ipcp_get_transformation_summary (dst)->alignments;
- vec_safe_reserve_exact (dst_alignments, src_alignments->length ());
- for (unsigned i = 0; i < src_alignments->length (); ++i)
- dst_alignments->quick_push ((*src_alignments)[i]);
+ ipcp_transformation_initialize ();
+ src_trans = ipcp_transformation_sum->get_create (src);
+ ipcp_transformation *dst_trans
+ = ipcp_transformation_sum->get_create (dst);
+
+ dst_trans->bits = vec_safe_copy (src_trans->bits);
+
+ const vec<ipa_vr, va_gc> *src_vr = src_trans->m_vr;
+ vec<ipa_vr, va_gc> *&dst_vr
+ = ipcp_get_transformation_summary (dst)->m_vr;
+ if (vec_safe_length (src_trans->m_vr) > 0)
+ {
+ vec_safe_reserve_exact (dst_vr, src_vr->length ());
+ for (unsigned i = 0; i < src_vr->length (); ++i)
+ dst_vr->quick_push ((*src_vr)[i]);
+ }
}
}
ipa_register_cgraph_hooks (void)
{
ipa_check_create_node_params ();
+ ipa_check_create_edge_args ();
- if (!edge_removal_hook_holder)
- edge_removal_hook_holder =
- symtab->add_edge_removal_hook (&ipa_edge_removal_hook, NULL);
- if (!edge_duplication_hook_holder)
- edge_duplication_hook_holder =
- symtab->add_edge_duplication_hook (&ipa_edge_duplication_hook, NULL);
function_insertion_hook_holder =
symtab->add_cgraph_insertion_hook (&ipa_add_new_function, NULL);
}
static void
ipa_unregister_cgraph_hooks (void)
{
- symtab->remove_edge_removal_hook (edge_removal_hook_holder);
- edge_removal_hook_holder = NULL;
- symtab->remove_edge_duplication_hook (edge_duplication_hook_holder);
- edge_duplication_hook_holder = NULL;
symtab->remove_cgraph_insertion_hook (function_insertion_hook_holder);
function_insertion_hook_holder = NULL;
}
if (!node->definition)
return;
info = IPA_NODE_REF (node);
- fprintf (f, " function %s/%i parameter descriptors:\n",
- node->name (), node->order);
+ fprintf (f, " function %s parameter descriptors:\n", node->dump_name ());
count = ipa_get_param_count (info);
for (i = 0; i < count; i++)
{
ipa_print_node_params (f, node);
}
-/* Return a heap allocated vector containing formal parameters of FNDECL. */
-
-vec<tree>
-ipa_get_vector_of_formal_parms (tree fndecl)
-{
- vec<tree> args;
- int count;
- tree parm;
-
- gcc_assert (!flag_wpa);
- count = count_formal_params (fndecl);
- args.create (count);
- for (parm = DECL_ARGUMENTS (fndecl); parm; parm = DECL_CHAIN (parm))
- args.quick_push (parm);
-
- return args;
-}
-
-/* Return a heap allocated vector containing types of formal parameters of
- function type FNTYPE. */
-
-vec<tree>
-ipa_get_vector_of_formal_parm_types (tree fntype)
-{
- vec<tree> types;
- int count = 0;
- tree t;
-
- for (t = TYPE_ARG_TYPES (fntype); t; t = TREE_CHAIN (t))
- count++;
-
- types.create (count);
- for (t = TYPE_ARG_TYPES (fntype); t; t = TREE_CHAIN (t))
- types.quick_push (TREE_VALUE (t));
-
- return types;
-}
-
-/* Modify the function declaration FNDECL and its type according to the plan in
- ADJUSTMENTS. It also sets base fields of individual adjustments structures
- to reflect the actual parameters being modified which are determined by the
- base_index field. */
-
-void
-ipa_modify_formal_parameters (tree fndecl, ipa_parm_adjustment_vec adjustments)
-{
- vec<tree> oparms = ipa_get_vector_of_formal_parms (fndecl);
- tree orig_type = TREE_TYPE (fndecl);
- tree old_arg_types = TYPE_ARG_TYPES (orig_type);
-
- /* The following test is an ugly hack, some functions simply don't have any
- arguments in their type. This is probably a bug but well... */
- bool care_for_types = (old_arg_types != NULL_TREE);
- bool last_parm_void;
- vec<tree> otypes;
- if (care_for_types)
- {
- last_parm_void = (TREE_VALUE (tree_last (old_arg_types))
- == void_type_node);
- otypes = ipa_get_vector_of_formal_parm_types (orig_type);
- if (last_parm_void)
- gcc_assert (oparms.length () + 1 == otypes.length ());
- else
- gcc_assert (oparms.length () == otypes.length ());
- }
- else
- {
- last_parm_void = false;
- otypes.create (0);
- }
-
- int len = adjustments.length ();
- tree *link = &DECL_ARGUMENTS (fndecl);
- tree new_arg_types = NULL;
- for (int i = 0; i < len; i++)
- {
- struct ipa_parm_adjustment *adj;
- gcc_assert (link);
-
- adj = &adjustments[i];
- tree parm;
- if (adj->op == IPA_PARM_OP_NEW)
- parm = NULL;
- else
- parm = oparms[adj->base_index];
- adj->base = parm;
-
- if (adj->op == IPA_PARM_OP_COPY)
- {
- if (care_for_types)
- new_arg_types = tree_cons (NULL_TREE, otypes[adj->base_index],
- new_arg_types);
- *link = parm;
- link = &DECL_CHAIN (parm);
- }
- else if (adj->op != IPA_PARM_OP_REMOVE)
- {
- tree new_parm;
- tree ptype;
-
- if (adj->by_ref)
- ptype = build_pointer_type (adj->type);
- else
- {
- ptype = adj->type;
- if (is_gimple_reg_type (ptype))
- {
- unsigned malign = GET_MODE_ALIGNMENT (TYPE_MODE (ptype));
- if (TYPE_ALIGN (ptype) < malign)
- ptype = build_aligned_type (ptype, malign);
- }
- }
-
- if (care_for_types)
- new_arg_types = tree_cons (NULL_TREE, ptype, new_arg_types);
-
- new_parm = build_decl (UNKNOWN_LOCATION, PARM_DECL, NULL_TREE,
- ptype);
- const char *prefix = adj->arg_prefix ? adj->arg_prefix : "SYNTH";
- DECL_NAME (new_parm) = create_tmp_var_name (prefix);
- DECL_ARTIFICIAL (new_parm) = 1;
- DECL_ARG_TYPE (new_parm) = ptype;
- DECL_CONTEXT (new_parm) = fndecl;
- TREE_USED (new_parm) = 1;
- DECL_IGNORED_P (new_parm) = 1;
- layout_decl (new_parm, 0);
-
- if (adj->op == IPA_PARM_OP_NEW)
- adj->base = NULL;
- else
- adj->base = parm;
- adj->new_decl = new_parm;
-
- *link = new_parm;
- link = &DECL_CHAIN (new_parm);
- }
- }
-
- *link = NULL_TREE;
-
- tree new_reversed = NULL;
- if (care_for_types)
- {
- new_reversed = nreverse (new_arg_types);
- if (last_parm_void)
- {
- if (new_reversed)
- TREE_CHAIN (new_arg_types) = void_list_node;
- else
- new_reversed = void_list_node;
- }
- }
-
- /* Use copy_node to preserve as much as possible from original type
- (debug info, attribute lists etc.)
- Exception is METHOD_TYPEs must have THIS argument.
- When we are asked to remove it, we need to build new FUNCTION_TYPE
- instead. */
- tree new_type = NULL;
- if (TREE_CODE (orig_type) != METHOD_TYPE
- || (adjustments[0].op == IPA_PARM_OP_COPY
- && adjustments[0].base_index == 0))
- {
- new_type = build_distinct_type_copy (orig_type);
- TYPE_ARG_TYPES (new_type) = new_reversed;
- }
- else
- {
- new_type
- = build_distinct_type_copy (build_function_type (TREE_TYPE (orig_type),
- new_reversed));
- TYPE_CONTEXT (new_type) = TYPE_CONTEXT (orig_type);
- DECL_VINDEX (fndecl) = NULL_TREE;
- }
-
- /* When signature changes, we need to clear builtin info. */
- if (DECL_BUILT_IN (fndecl))
- {
- DECL_BUILT_IN_CLASS (fndecl) = NOT_BUILT_IN;
- DECL_FUNCTION_CODE (fndecl) = (enum built_in_function) 0;
- }
-
- TREE_TYPE (fndecl) = new_type;
- DECL_VIRTUAL_P (fndecl) = 0;
- DECL_LANG_SPECIFIC (fndecl) = NULL;
- otypes.release ();
- oparms.release ();
-}
-
-/* Modify actual arguments of a function call CS as indicated in ADJUSTMENTS.
- If this is a directly recursive call, CS must be NULL. Otherwise it must
- contain the corresponding call graph edge. */
-
-void
-ipa_modify_call_arguments (struct cgraph_edge *cs, gcall *stmt,
- ipa_parm_adjustment_vec adjustments)
-{
- struct cgraph_node *current_node = cgraph_node::get (current_function_decl);
- vec<tree> vargs;
- vec<tree, va_gc> **debug_args = NULL;
- gcall *new_stmt;
- gimple_stmt_iterator gsi, prev_gsi;
- tree callee_decl;
- int i, len;
-
- len = adjustments.length ();
- vargs.create (len);
- callee_decl = !cs ? gimple_call_fndecl (stmt) : cs->callee->decl;
- current_node->remove_stmt_references (stmt);
-
- gsi = gsi_for_stmt (stmt);
- prev_gsi = gsi;
- gsi_prev (&prev_gsi);
- for (i = 0; i < len; i++)
- {
- struct ipa_parm_adjustment *adj;
-
- adj = &adjustments[i];
-
- if (adj->op == IPA_PARM_OP_COPY)
- {
- tree arg = gimple_call_arg (stmt, adj->base_index);
-
- vargs.quick_push (arg);
- }
- else if (adj->op != IPA_PARM_OP_REMOVE)
- {
- tree expr, base, off;
- location_t loc;
- unsigned int deref_align = 0;
- bool deref_base = false;
-
- /* We create a new parameter out of the value of the old one, we can
- do the following kind of transformations:
-
- - A scalar passed by reference is converted to a scalar passed by
- value. (adj->by_ref is false and the type of the original
- actual argument is a pointer to a scalar).
-
- - A part of an aggregate is passed instead of the whole aggregate.
- The part can be passed either by value or by reference, this is
- determined by value of adj->by_ref. Moreover, the code below
- handles both situations when the original aggregate is passed by
- value (its type is not a pointer) and when it is passed by
- reference (it is a pointer to an aggregate).
-
- When the new argument is passed by reference (adj->by_ref is true)
- it must be a part of an aggregate and therefore we form it by
- simply taking the address of a reference inside the original
- aggregate. */
-
- gcc_checking_assert (adj->offset % BITS_PER_UNIT == 0);
- base = gimple_call_arg (stmt, adj->base_index);
- loc = DECL_P (base) ? DECL_SOURCE_LOCATION (base)
- : EXPR_LOCATION (base);
-
- if (TREE_CODE (base) != ADDR_EXPR
- && POINTER_TYPE_P (TREE_TYPE (base)))
- off = build_int_cst (adj->alias_ptr_type,
- adj->offset / BITS_PER_UNIT);
- else
- {
- HOST_WIDE_INT base_offset;
- tree prev_base;
- bool addrof;
-
- if (TREE_CODE (base) == ADDR_EXPR)
- {
- base = TREE_OPERAND (base, 0);
- addrof = true;
- }
- else
- addrof = false;
- prev_base = base;
- base = get_addr_base_and_unit_offset (base, &base_offset);
- /* Aggregate arguments can have non-invariant addresses. */
- if (!base)
- {
- base = build_fold_addr_expr (prev_base);
- off = build_int_cst (adj->alias_ptr_type,
- adj->offset / BITS_PER_UNIT);
- }
- else if (TREE_CODE (base) == MEM_REF)
- {
- if (!addrof)
- {
- deref_base = true;
- deref_align = TYPE_ALIGN (TREE_TYPE (base));
- }
- off = build_int_cst (adj->alias_ptr_type,
- base_offset
- + adj->offset / BITS_PER_UNIT);
- off = int_const_binop (PLUS_EXPR, TREE_OPERAND (base, 1),
- off);
- base = TREE_OPERAND (base, 0);
- }
- else
- {
- off = build_int_cst (adj->alias_ptr_type,
- base_offset
- + adj->offset / BITS_PER_UNIT);
- base = build_fold_addr_expr (base);
- }
- }
-
- if (!adj->by_ref)
- {
- tree type = adj->type;
- unsigned int align;
- unsigned HOST_WIDE_INT misalign;
-
- if (deref_base)
- {
- align = deref_align;
- misalign = 0;
- }
- else
- {
- get_pointer_alignment_1 (base, &align, &misalign);
- if (TYPE_ALIGN (type) > align)
- align = TYPE_ALIGN (type);
- }
- misalign += (offset_int::from (off, SIGNED).to_short_addr ()
- * BITS_PER_UNIT);
- misalign = misalign & (align - 1);
- if (misalign != 0)
- align = (misalign & -misalign);
- if (align < TYPE_ALIGN (type))
- type = build_aligned_type (type, align);
- base = force_gimple_operand_gsi (&gsi, base,
- true, NULL, true, GSI_SAME_STMT);
- expr = fold_build2_loc (loc, MEM_REF, type, base, off);
- /* If expr is not a valid gimple call argument emit
- a load into a temporary. */
- if (is_gimple_reg_type (TREE_TYPE (expr)))
- {
- gimple tem = gimple_build_assign (NULL_TREE, expr);
- if (gimple_in_ssa_p (cfun))
- {
- gimple_set_vuse (tem, gimple_vuse (stmt));
- expr = make_ssa_name (TREE_TYPE (expr), tem);
- }
- else
- expr = create_tmp_reg (TREE_TYPE (expr));
- gimple_assign_set_lhs (tem, expr);
- gsi_insert_before (&gsi, tem, GSI_SAME_STMT);
- }
- }
- else
- {
- expr = fold_build2_loc (loc, MEM_REF, adj->type, base, off);
- expr = build_fold_addr_expr (expr);
- expr = force_gimple_operand_gsi (&gsi, expr,
- true, NULL, true, GSI_SAME_STMT);
- }
- vargs.quick_push (expr);
- }
- if (adj->op != IPA_PARM_OP_COPY && MAY_HAVE_DEBUG_STMTS)
- {
- unsigned int ix;
- tree ddecl = NULL_TREE, origin = DECL_ORIGIN (adj->base), arg;
- gimple def_temp;
-
- arg = gimple_call_arg (stmt, adj->base_index);
- if (!useless_type_conversion_p (TREE_TYPE (origin), TREE_TYPE (arg)))
- {
- if (!fold_convertible_p (TREE_TYPE (origin), arg))
- continue;
- arg = fold_convert_loc (gimple_location (stmt),
- TREE_TYPE (origin), arg);
- }
- if (debug_args == NULL)
- debug_args = decl_debug_args_insert (callee_decl);
- for (ix = 0; vec_safe_iterate (*debug_args, ix, &ddecl); ix += 2)
- if (ddecl == origin)
- {
- ddecl = (**debug_args)[ix + 1];
- break;
- }
- if (ddecl == NULL)
- {
- ddecl = make_node (DEBUG_EXPR_DECL);
- DECL_ARTIFICIAL (ddecl) = 1;
- TREE_TYPE (ddecl) = TREE_TYPE (origin);
- DECL_MODE (ddecl) = DECL_MODE (origin);
-
- vec_safe_push (*debug_args, origin);
- vec_safe_push (*debug_args, ddecl);
- }
- def_temp = gimple_build_debug_bind (ddecl, unshare_expr (arg), stmt);
- gsi_insert_before (&gsi, def_temp, GSI_SAME_STMT);
- }
- }
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "replacing stmt:");
- print_gimple_stmt (dump_file, gsi_stmt (gsi), 0, 0);
- }
-
- new_stmt = gimple_build_call_vec (callee_decl, vargs);
- vargs.release ();
- if (gimple_call_lhs (stmt))
- gimple_call_set_lhs (new_stmt, gimple_call_lhs (stmt));
-
- gimple_set_block (new_stmt, gimple_block (stmt));
- if (gimple_has_location (stmt))
- gimple_set_location (new_stmt, gimple_location (stmt));
- gimple_call_set_chain (new_stmt, gimple_call_chain (stmt));
- gimple_call_copy_flags (new_stmt, stmt);
- if (gimple_in_ssa_p (cfun))
- {
- gimple_set_vuse (new_stmt, gimple_vuse (stmt));
- if (gimple_vdef (stmt))
- {
- gimple_set_vdef (new_stmt, gimple_vdef (stmt));
- SSA_NAME_DEF_STMT (gimple_vdef (new_stmt)) = new_stmt;
- }
- }
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "with stmt:");
- print_gimple_stmt (dump_file, new_stmt, 0, 0);
- fprintf (dump_file, "\n");
- }
- gsi_replace (&gsi, new_stmt, true);
- if (cs)
- cs->set_call_stmt (new_stmt);
- do
- {
- current_node->record_stmt_references (gsi_stmt (gsi));
- gsi_prev (&gsi);
- }
- while (gsi_stmt (gsi) != gsi_stmt (prev_gsi));
-}
-
-/* If the expression *EXPR should be replaced by a reduction of a parameter, do
- so. ADJUSTMENTS is a pointer to a vector of adjustments. CONVERT
- specifies whether the function should care about type incompatibility the
- current and new expressions. If it is false, the function will leave
- incompatibility issues to the caller. Return true iff the expression
- was modified. */
-
-bool
-ipa_modify_expr (tree *expr, bool convert,
- ipa_parm_adjustment_vec adjustments)
-{
- struct ipa_parm_adjustment *cand
- = ipa_get_adjustment_candidate (&expr, &convert, adjustments, false);
- if (!cand)
- return false;
-
- tree src;
- if (cand->by_ref)
- src = build_simple_mem_ref (cand->new_decl);
- else
- src = cand->new_decl;
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "About to replace expr ");
- print_generic_expr (dump_file, *expr, 0);
- fprintf (dump_file, " with ");
- print_generic_expr (dump_file, src, 0);
- fprintf (dump_file, "\n");
- }
-
- if (convert && !useless_type_conversion_p (TREE_TYPE (*expr), cand->type))
- {
- tree vce = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (*expr), src);
- *expr = vce;
- }
- else
- *expr = src;
- return true;
-}
-
-/* If T is an SSA_NAME, return NULL if it is not a default def or
- return its base variable if it is. If IGNORE_DEFAULT_DEF is true,
- the base variable is always returned, regardless if it is a default
- def. Return T if it is not an SSA_NAME. */
-
-static tree
-get_ssa_base_param (tree t, bool ignore_default_def)
-{
- if (TREE_CODE (t) == SSA_NAME)
- {
- if (ignore_default_def || SSA_NAME_IS_DEFAULT_DEF (t))
- return SSA_NAME_VAR (t);
- else
- return NULL_TREE;
- }
- return t;
-}
-
-/* Given an expression, return an adjustment entry specifying the
- transformation to be done on EXPR. If no suitable adjustment entry
- was found, returns NULL.
-
- If IGNORE_DEFAULT_DEF is set, consider SSA_NAMEs which are not a
- default def, otherwise bail on them.
-
- If CONVERT is non-NULL, this function will set *CONVERT if the
- expression provided is a component reference. ADJUSTMENTS is the
- adjustments vector. */
-
-ipa_parm_adjustment *
-ipa_get_adjustment_candidate (tree **expr, bool *convert,
- ipa_parm_adjustment_vec adjustments,
- bool ignore_default_def)
-{
- if (TREE_CODE (**expr) == BIT_FIELD_REF
- || TREE_CODE (**expr) == IMAGPART_EXPR
- || TREE_CODE (**expr) == REALPART_EXPR)
- {
- *expr = &TREE_OPERAND (**expr, 0);
- if (convert)
- *convert = true;
- }
-
- HOST_WIDE_INT offset, size, max_size;
- tree base = get_ref_base_and_extent (**expr, &offset, &size, &max_size);
- if (!base || size == -1 || max_size == -1)
- return NULL;
-
- if (TREE_CODE (base) == MEM_REF)
- {
- offset += mem_ref_offset (base).to_short_addr () * BITS_PER_UNIT;
- base = TREE_OPERAND (base, 0);
- }
-
- base = get_ssa_base_param (base, ignore_default_def);
- if (!base || TREE_CODE (base) != PARM_DECL)
- return NULL;
-
- struct ipa_parm_adjustment *cand = NULL;
- unsigned int len = adjustments.length ();
- for (unsigned i = 0; i < len; i++)
- {
- struct ipa_parm_adjustment *adj = &adjustments[i];
-
- if (adj->base == base
- && (adj->offset == offset || adj->op == IPA_PARM_OP_REMOVE))
- {
- cand = adj;
- break;
- }
- }
-
- if (!cand || cand->op == IPA_PARM_OP_COPY || cand->op == IPA_PARM_OP_REMOVE)
- return NULL;
- return cand;
-}
-
-/* Return true iff BASE_INDEX is in ADJUSTMENTS more than once. */
-
-static bool
-index_in_adjustments_multiple_times_p (int base_index,
- ipa_parm_adjustment_vec adjustments)
-{
- int i, len = adjustments.length ();
- bool one = false;
-
- for (i = 0; i < len; i++)
- {
- struct ipa_parm_adjustment *adj;
- adj = &adjustments[i];
-
- if (adj->base_index == base_index)
- {
- if (one)
- return true;
- else
- one = true;
- }
- }
- return false;
-}
-
-
-/* Return adjustments that should have the same effect on function parameters
- and call arguments as if they were first changed according to adjustments in
- INNER and then by adjustments in OUTER. */
-
-ipa_parm_adjustment_vec
-ipa_combine_adjustments (ipa_parm_adjustment_vec inner,
- ipa_parm_adjustment_vec outer)
-{
- int i, outlen = outer.length ();
- int inlen = inner.length ();
- int removals = 0;
- ipa_parm_adjustment_vec adjustments, tmp;
-
- tmp.create (inlen);
- for (i = 0; i < inlen; i++)
- {
- struct ipa_parm_adjustment *n;
- n = &inner[i];
-
- if (n->op == IPA_PARM_OP_REMOVE)
- removals++;
- else
- {
- /* FIXME: Handling of new arguments are not implemented yet. */
- gcc_assert (n->op != IPA_PARM_OP_NEW);
- tmp.quick_push (*n);
- }
- }
-
- adjustments.create (outlen + removals);
- for (i = 0; i < outlen; i++)
- {
- struct ipa_parm_adjustment r;
- struct ipa_parm_adjustment *out = &outer[i];
- struct ipa_parm_adjustment *in = &tmp[out->base_index];
-
- memset (&r, 0, sizeof (r));
- gcc_assert (in->op != IPA_PARM_OP_REMOVE);
- if (out->op == IPA_PARM_OP_REMOVE)
- {
- if (!index_in_adjustments_multiple_times_p (in->base_index, tmp))
- {
- r.op = IPA_PARM_OP_REMOVE;
- adjustments.quick_push (r);
- }
- continue;
- }
- else
- {
- /* FIXME: Handling of new arguments are not implemented yet. */
- gcc_assert (out->op != IPA_PARM_OP_NEW);
- }
-
- r.base_index = in->base_index;
- r.type = out->type;
-
- /* FIXME: Create nonlocal value too. */
-
- if (in->op == IPA_PARM_OP_COPY && out->op == IPA_PARM_OP_COPY)
- r.op = IPA_PARM_OP_COPY;
- else if (in->op == IPA_PARM_OP_COPY)
- r.offset = out->offset;
- else if (out->op == IPA_PARM_OP_COPY)
- r.offset = in->offset;
- else
- r.offset = in->offset + out->offset;
- adjustments.quick_push (r);
- }
-
- for (i = 0; i < inlen; i++)
- {
- struct ipa_parm_adjustment *n = &inner[i];
-
- if (n->op == IPA_PARM_OP_REMOVE)
- adjustments.quick_push (*n);
- }
-
- tmp.release ();
- return adjustments;
-}
-
-/* Dump the adjustments in the vector ADJUSTMENTS to dump_file in a human
- friendly way, assuming they are meant to be applied to FNDECL. */
-
-void
-ipa_dump_param_adjustments (FILE *file, ipa_parm_adjustment_vec adjustments,
- tree fndecl)
-{
- int i, len = adjustments.length ();
- bool first = true;
- vec<tree> parms = ipa_get_vector_of_formal_parms (fndecl);
-
- fprintf (file, "IPA param adjustments: ");
- for (i = 0; i < len; i++)
- {
- struct ipa_parm_adjustment *adj;
- adj = &adjustments[i];
-
- if (!first)
- fprintf (file, " ");
- else
- first = false;
-
- fprintf (file, "%i. base_index: %i - ", i, adj->base_index);
- print_generic_expr (file, parms[adj->base_index], 0);
- if (adj->base)
- {
- fprintf (file, ", base: ");
- print_generic_expr (file, adj->base, 0);
- }
- if (adj->new_decl)
- {
- fprintf (file, ", new_decl: ");
- print_generic_expr (file, adj->new_decl, 0);
- }
- if (adj->new_ssa_base)
- {
- fprintf (file, ", new_ssa_base: ");
- print_generic_expr (file, adj->new_ssa_base, 0);
- }
-
- if (adj->op == IPA_PARM_OP_COPY)
- fprintf (file, ", copy_param");
- else if (adj->op == IPA_PARM_OP_REMOVE)
- fprintf (file, ", remove_param");
- else
- fprintf (file, ", offset %li", (long) adj->offset);
- if (adj->by_ref)
- fprintf (file, ", by_ref");
- print_node_brief (file, ", type: ", adj->type, 0);
- fprintf (file, "\n");
- }
- parms.release ();
-}
-
/* Dump the AV linked list. */
void
{
fprintf (f, "%s %i[" HOST_WIDE_INT_PRINT_DEC "]=", comma ? "," : "",
av->index, av->offset);
- print_generic_expr (f, av->value, 0);
+ print_generic_expr (f, av->value);
comma = true;
}
fprintf (f, "\n");
struct ipa_agg_jf_item *item;
struct bitpack_d bp;
int i, count;
+ int flag = 0;
- streamer_write_uhwi (ob, jump_func->type);
+ /* ADDR_EXPRs are very comon IP invariants; save some streamer data
+ as well as WPA memory by handling them specially. */
+ if (jump_func->type == IPA_JF_CONST
+ && TREE_CODE (jump_func->value.constant.value) == ADDR_EXPR)
+ flag = 1;
+
+ streamer_write_uhwi (ob, jump_func->type * 2 + flag);
switch (jump_func->type)
{
case IPA_JF_UNKNOWN:
case IPA_JF_CONST:
gcc_assert (
EXPR_LOCATION (jump_func->value.constant.value) == UNKNOWN_LOCATION);
- stream_write_tree (ob, jump_func->value.constant.value, true);
+ stream_write_tree (ob,
+ flag
+ ? TREE_OPERAND (jump_func->value.constant.value, 0)
+ : jump_func->value.constant.value, true);
break;
case IPA_JF_PASS_THROUGH:
streamer_write_uhwi (ob, jump_func->value.pass_through.operation);
bp_pack_value (&bp, jump_func->value.pass_through.agg_preserved, 1);
streamer_write_bitpack (&bp);
}
+ else if (TREE_CODE_CLASS (jump_func->value.pass_through.operation)
+ == tcc_unary)
+ streamer_write_uhwi (ob, jump_func->value.pass_through.formal_id);
else
{
stream_write_tree (ob, jump_func->value.pass_through.operand, true);
}
bp = bitpack_create (ob->main_stream);
- bp_pack_value (&bp, jump_func->alignment.known, 1);
+ bp_pack_value (&bp, !!jump_func->bits, 1);
streamer_write_bitpack (&bp);
- if (jump_func->alignment.known)
+ if (jump_func->bits)
{
- streamer_write_uhwi (ob, jump_func->alignment.align);
- streamer_write_uhwi (ob, jump_func->alignment.misalign);
+ streamer_write_widest_int (ob, jump_func->bits->value);
+ streamer_write_widest_int (ob, jump_func->bits->mask);
+ }
+ bp_pack_value (&bp, !!jump_func->m_vr, 1);
+ streamer_write_bitpack (&bp);
+ if (jump_func->m_vr)
+ {
+ streamer_write_enum (ob->main_stream, value_rang_type,
+ VR_LAST, jump_func->m_vr->kind ());
+ stream_write_tree (ob, jump_func->m_vr->min (), true);
+ stream_write_tree (ob, jump_func->m_vr->max (), true);
}
}
ipa_read_jump_function (struct lto_input_block *ib,
struct ipa_jump_func *jump_func,
struct cgraph_edge *cs,
- struct data_in *data_in)
+ struct data_in *data_in,
+ bool prevails)
{
enum jump_func_type jftype;
enum tree_code operation;
int i, count;
+ int val = streamer_read_uhwi (ib);
+ bool flag = val & 1;
- jftype = (enum jump_func_type) streamer_read_uhwi (ib);
+ jftype = (enum jump_func_type) (val / 2);
switch (jftype)
{
case IPA_JF_UNKNOWN:
ipa_set_jf_unknown (jump_func);
break;
case IPA_JF_CONST:
- ipa_set_jf_constant (jump_func, stream_read_tree (ib, data_in), cs);
+ {
+ tree t = stream_read_tree (ib, data_in);
+ if (flag && prevails)
+ t = build_fold_addr_expr (t);
+ ipa_set_jf_constant (jump_func, t, cs);
+ }
break;
case IPA_JF_PASS_THROUGH:
operation = (enum tree_code) streamer_read_uhwi (ib);
bool agg_preserved = bp_unpack_value (&bp, 1);
ipa_set_jf_simple_pass_through (jump_func, formal_id, agg_preserved);
}
+ else if (TREE_CODE_CLASS (operation) == tcc_unary)
+ {
+ int formal_id = streamer_read_uhwi (ib);
+ ipa_set_jf_unary_pass_through (jump_func, formal_id, operation);
+ }
else
{
tree operand = stream_read_tree (ib, data_in);
ipa_set_ancestor_jf (jump_func, offset, formal_id, agg_preserved);
break;
}
+ default:
+ fatal_error (UNKNOWN_LOCATION, "invalid jump function in LTO stream");
}
count = streamer_read_uhwi (ib);
- vec_alloc (jump_func->agg.items, count);
+ if (prevails)
+ vec_alloc (jump_func->agg.items, count);
if (count)
{
struct bitpack_d bp = streamer_read_bitpack (ib);
struct ipa_agg_jf_item item;
item.offset = streamer_read_uhwi (ib);
item.value = stream_read_tree (ib, data_in);
- jump_func->agg.items->quick_push (item);
+ if (prevails)
+ jump_func->agg.items->quick_push (item);
}
struct bitpack_d bp = streamer_read_bitpack (ib);
- bool alignment_known = bp_unpack_value (&bp, 1);
- if (alignment_known)
+ bool bits_known = bp_unpack_value (&bp, 1);
+ if (bits_known)
{
- jump_func->alignment.known = true;
- jump_func->alignment.align = streamer_read_uhwi (ib);
- jump_func->alignment.misalign = streamer_read_uhwi (ib);
+ widest_int value = streamer_read_widest_int (ib);
+ widest_int mask = streamer_read_widest_int (ib);
+ if (prevails)
+ ipa_set_jfunc_bits (jump_func, value, mask);
}
else
- jump_func->alignment.known = false;
+ jump_func->bits = NULL;
+
+ struct bitpack_d vr_bp = streamer_read_bitpack (ib);
+ bool vr_known = bp_unpack_value (&vr_bp, 1);
+ if (vr_known)
+ {
+ enum value_range_kind type = streamer_read_enum (ib, value_range_kind,
+ VR_LAST);
+ tree min = stream_read_tree (ib, data_in);
+ tree max = stream_read_tree (ib, data_in);
+ if (prevails)
+ ipa_set_jfunc_vr (jump_func, type, min, max);
+ }
+ else
+ jump_func->m_vr = NULL;
}
/* Stream out parts of cgraph_indirect_call_info corresponding to CS that are
bp_pack_value (&bp, ii->agg_contents, 1);
bp_pack_value (&bp, ii->member_ptr, 1);
bp_pack_value (&bp, ii->by_ref, 1);
+ bp_pack_value (&bp, ii->guaranteed_unmodified, 1);
bp_pack_value (&bp, ii->vptr_changed, 1);
streamer_write_bitpack (&bp);
if (ii->agg_contents || ii->polymorphic)
ii->agg_contents = bp_unpack_value (&bp, 1);
ii->member_ptr = bp_unpack_value (&bp, 1);
ii->by_ref = bp_unpack_value (&bp, 1);
+ ii->guaranteed_unmodified = bp_unpack_value (&bp, 1);
ii->vptr_changed = bp_unpack_value (&bp, 1);
if (ii->agg_contents || ii->polymorphic)
ii->offset = (HOST_WIDE_INT) streamer_read_hwi (ib);
bp_pack_value (&bp, ipa_is_param_used (info, j), 1);
streamer_write_bitpack (&bp);
for (j = 0; j < ipa_get_param_count (info); j++)
- streamer_write_hwi (ob, ipa_get_controlled_uses (info, j));
+ {
+ streamer_write_hwi (ob, ipa_get_controlled_uses (info, j));
+ stream_write_tree (ob, ipa_get_type (info, j), true);
+ }
for (e = node->callees; e; e = e->next_callee)
{
struct ipa_edge_args *args = IPA_EDGE_REF (e);
}
}
-/* Stream in NODE info from IB. */
+/* Stream in edge E from IB. */
static void
-ipa_read_node_info (struct lto_input_block *ib, struct cgraph_node *node,
- struct data_in *data_in)
+ipa_read_edge_info (struct lto_input_block *ib,
+ struct data_in *data_in,
+ struct cgraph_edge *e, bool prevails)
{
- struct ipa_node_params *info = IPA_NODE_REF (node);
- int k;
- struct cgraph_edge *e;
- struct bitpack_d bp;
-
- ipa_alloc_node_params (node, streamer_read_uhwi (ib));
+ int count = streamer_read_uhwi (ib);
+ bool contexts_computed = count & 1;
- for (k = 0; k < ipa_get_param_count (info); k++)
- info->descriptors[k].move_cost = streamer_read_uhwi (ib);
-
- bp = streamer_read_bitpack (ib);
- if (ipa_get_param_count (info) != 0)
- info->analysis_done = true;
- info->node_enqueued = false;
- for (k = 0; k < ipa_get_param_count (info); k++)
- ipa_set_param_used (info, k, bp_unpack_value (&bp, 1));
- for (k = 0; k < ipa_get_param_count (info); k++)
- ipa_set_controlled_uses (info, k, streamer_read_hwi (ib));
- for (e = node->callees; e; e = e->next_callee)
+ count /= 2;
+ if (!count)
+ return;
+ if (prevails && e->possibly_call_in_translation_unit_p ())
{
struct ipa_edge_args *args = IPA_EDGE_REF (e);
- int count = streamer_read_uhwi (ib);
- bool contexts_computed = count & 1;
- count /= 2;
-
- if (!count)
- continue;
vec_safe_grow_cleared (args->jump_functions, count);
if (contexts_computed)
vec_safe_grow_cleared (args->polymorphic_call_contexts, count);
-
- for (k = 0; k < ipa_get_cs_argument_count (args); k++)
+ for (int k = 0; k < count; k++)
{
ipa_read_jump_function (ib, ipa_get_ith_jump_func (args, k), e,
- data_in);
+ data_in, prevails);
if (contexts_computed)
- ipa_get_ith_polymorhic_call_context (args, k)->stream_in (ib, data_in);
+ ipa_get_ith_polymorhic_call_context (args, k)->stream_in
+ (ib, data_in);
}
}
- for (e = node->indirect_calls; e; e = e->next_callee)
+ else
{
- struct ipa_edge_args *args = IPA_EDGE_REF (e);
- int count = streamer_read_uhwi (ib);
- bool contexts_computed = count & 1;
- count /= 2;
-
- if (count)
+ for (int k = 0; k < count; k++)
{
- vec_safe_grow_cleared (args->jump_functions, count);
+ struct ipa_jump_func dummy;
+ ipa_read_jump_function (ib, &dummy, e,
+ data_in, prevails);
if (contexts_computed)
- vec_safe_grow_cleared (args->polymorphic_call_contexts, count);
- for (k = 0; k < ipa_get_cs_argument_count (args); k++)
{
- ipa_read_jump_function (ib, ipa_get_ith_jump_func (args, k), e,
- data_in);
- if (contexts_computed)
- ipa_get_ith_polymorhic_call_context (args, k)->stream_in (ib, data_in);
+ struct ipa_polymorphic_call_context ctx;
+ ctx.stream_in (ib, data_in);
}
}
+ }
+}
+
+/* Stream in NODE info from IB. */
+
+static void
+ipa_read_node_info (struct lto_input_block *ib, struct cgraph_node *node,
+ struct data_in *data_in)
+{
+ int k;
+ struct cgraph_edge *e;
+ struct bitpack_d bp;
+ bool prevails = node->prevailing_p ();
+ struct ipa_node_params *info = prevails ? IPA_NODE_REF (node) : NULL;
+
+ int param_count = streamer_read_uhwi (ib);
+ if (prevails)
+ {
+ ipa_alloc_node_params (node, param_count);
+ for (k = 0; k < param_count; k++)
+ (*info->descriptors)[k].move_cost = streamer_read_uhwi (ib);
+ if (ipa_get_param_count (info) != 0)
+ info->analysis_done = true;
+ info->node_enqueued = false;
+ }
+ else
+ for (k = 0; k < param_count; k++)
+ streamer_read_uhwi (ib);
+
+ bp = streamer_read_bitpack (ib);
+ for (k = 0; k < param_count; k++)
+ {
+ bool used = bp_unpack_value (&bp, 1);
+
+ if (prevails)
+ ipa_set_param_used (info, k, used);
+ }
+ for (k = 0; k < param_count; k++)
+ {
+ int nuses = streamer_read_hwi (ib);
+ tree type = stream_read_tree (ib, data_in);
+
+ if (prevails)
+ {
+ ipa_set_controlled_uses (info, k, nuses);
+ (*info->descriptors)[k].decl_or_type = type;
+ }
+ }
+ for (e = node->callees; e; e = e->next_callee)
+ ipa_read_edge_info (ib, data_in, e, prevails);
+ for (e = node->indirect_calls; e; e = e->next_callee)
+ {
+ ipa_read_edge_info (ib, data_in, e, prevails);
ipa_read_indirect_edge_info (ib, data_in, e);
}
}
lto_symtab_encoder_iterator lsei;
lto_symtab_encoder_t encoder;
- if (!ipa_node_params_sum)
+ if (!ipa_node_params_sum || !ipa_edge_args_sum)
return;
ob = create_output_block (LTO_section_jump_functions);
}
}
-/* After merging units, we can get mismatch in argument counts.
- Also decl merging might've rendered parameter lists obsolete.
- Also compute called_with_variable_arg info. */
-
-void
-ipa_update_after_lto_read (void)
-{
- ipa_check_create_node_params ();
- ipa_check_create_edge_args ();
-}
-
void
write_ipcp_transformation_info (output_block *ob, cgraph_node *node)
{
streamer_write_bitpack (&bp);
}
- ipcp_transformation_summary *ts = ipcp_get_transformation_summary (node);
- if (ts && vec_safe_length (ts->alignments) > 0)
+ ipcp_transformation *ts = ipcp_get_transformation_summary (node);
+ if (ts && vec_safe_length (ts->m_vr) > 0)
{
- count = ts->alignments->length ();
-
+ count = ts->m_vr->length ();
streamer_write_uhwi (ob, count);
for (unsigned i = 0; i < count; ++i)
{
- ipa_alignment *parm_al = &(*ts->alignments)[i];
-
struct bitpack_d bp;
+ ipa_vr *parm_vr = &(*ts->m_vr)[i];
bp = bitpack_create (ob->main_stream);
- bp_pack_value (&bp, parm_al->known, 1);
+ bp_pack_value (&bp, parm_vr->known, 1);
streamer_write_bitpack (&bp);
- if (parm_al->known)
+ if (parm_vr->known)
{
- streamer_write_uhwi (ob, parm_al->align);
- streamer_write_hwi_in_range (ob->main_stream, 0, parm_al->align,
- parm_al->misalign);
+ streamer_write_enum (ob->main_stream, value_rang_type,
+ VR_LAST, parm_vr->type);
+ streamer_write_wide_int (ob, parm_vr->min);
+ streamer_write_wide_int (ob, parm_vr->max);
+ }
+ }
+ }
+ else
+ streamer_write_uhwi (ob, 0);
+
+ if (ts && vec_safe_length (ts->bits) > 0)
+ {
+ count = ts->bits->length ();
+ streamer_write_uhwi (ob, count);
+
+ for (unsigned i = 0; i < count; ++i)
+ {
+ const ipa_bits *bits_jfunc = (*ts->bits)[i];
+ struct bitpack_d bp = bitpack_create (ob->main_stream);
+ bp_pack_value (&bp, !!bits_jfunc, 1);
+ streamer_write_bitpack (&bp);
+ if (bits_jfunc)
+ {
+ streamer_write_widest_int (ob, bits_jfunc->value);
+ streamer_write_widest_int (ob, bits_jfunc->mask);
}
}
}
aggvals = av;
}
ipa_set_node_agg_value_chain (node, aggvals);
-
+
count = streamer_read_uhwi (ib);
if (count > 0)
{
- ipcp_grow_transformations_if_necessary ();
-
- ipcp_transformation_summary *ts = ipcp_get_transformation_summary (node);
- vec_safe_grow_cleared (ts->alignments, count);
-
+ ipcp_transformation_initialize ();
+ ipcp_transformation *ts = ipcp_transformation_sum->get_create (node);
+ vec_safe_grow_cleared (ts->m_vr, count);
for (i = 0; i < count; i++)
{
- ipa_alignment *parm_al;
- parm_al = &(*ts->alignments)[i];
+ ipa_vr *parm_vr;
+ parm_vr = &(*ts->m_vr)[i];
struct bitpack_d bp;
bp = streamer_read_bitpack (ib);
- parm_al->known = bp_unpack_value (&bp, 1);
- if (parm_al->known)
+ parm_vr->known = bp_unpack_value (&bp, 1);
+ if (parm_vr->known)
+ {
+ parm_vr->type = streamer_read_enum (ib, value_range_kind,
+ VR_LAST);
+ parm_vr->min = streamer_read_wide_int (ib);
+ parm_vr->max = streamer_read_wide_int (ib);
+ }
+ }
+ }
+ count = streamer_read_uhwi (ib);
+ if (count > 0)
+ {
+ ipcp_transformation_initialize ();
+ ipcp_transformation *ts = ipcp_transformation_sum->get_create (node);
+ vec_safe_grow_cleared (ts->bits, count);
+
+ for (i = 0; i < count; i++)
+ {
+ struct bitpack_d bp = streamer_read_bitpack (ib);
+ bool known = bp_unpack_value (&bp, 1);
+ if (known)
{
- parm_al->align = streamer_read_uhwi (ib);
- parm_al->misalign
- = streamer_read_hwi_in_range (ib, "ipa-prop misalign",
- 0, parm_al->align);
+ ipa_bits *bits
+ = ipa_get_ipa_bits_for_value (streamer_read_widest_int (ib),
+ streamer_read_widest_int (ib));
+ (*ts->bits)[i] = bits;
}
}
}
class ipcp_modif_dom_walker : public dom_walker
{
public:
- ipcp_modif_dom_walker (struct func_body_info *fbi,
- vec<ipa_param_descriptor> descs,
+ ipcp_modif_dom_walker (struct ipa_func_body_info *fbi,
+ vec<ipa_param_descriptor, va_gc> *descs,
struct ipa_agg_replacement_value *av,
bool *sc, bool *cc)
: dom_walker (CDI_DOMINATORS), m_fbi (fbi), m_descriptors (descs),
m_aggval (av), m_something_changed (sc), m_cfg_changed (cc) {}
- virtual void before_dom_children (basic_block);
+ virtual edge before_dom_children (basic_block);
private:
- struct func_body_info *m_fbi;
- vec<ipa_param_descriptor> m_descriptors;
+ struct ipa_func_body_info *m_fbi;
+ vec<ipa_param_descriptor, va_gc> *m_descriptors;
struct ipa_agg_replacement_value *m_aggval;
bool *m_something_changed, *m_cfg_changed;
};
-void
+edge
ipcp_modif_dom_walker::before_dom_children (basic_block bb)
{
gimple_stmt_iterator gsi;
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
struct ipa_agg_replacement_value *v;
- gimple stmt = gsi_stmt (gsi);
+ gimple *stmt = gsi_stmt (gsi);
tree rhs, val, t;
HOST_WIDE_INT offset, size;
int index;
if (vce)
continue;
- if (!ipa_load_from_parm_agg_1 (m_fbi, m_descriptors, stmt, rhs, &index,
- &offset, &size, &by_ref))
+ if (!ipa_load_from_parm_agg (m_fbi, m_descriptors, stmt, rhs, &index,
+ &offset, &size, &by_ref))
continue;
for (v = m_aggval; v; v = v->next)
if (v->index == index
if (dump_file)
{
fprintf (dump_file, " const ");
- print_generic_expr (dump_file, v->value, 0);
+ print_generic_expr (dump_file, v->value);
fprintf (dump_file, " can't be converted to type of ");
- print_generic_expr (dump_file, rhs, 0);
+ print_generic_expr (dump_file, rhs);
fprintf (dump_file, "\n");
}
continue;
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "Modifying stmt:\n ");
- print_gimple_stmt (dump_file, stmt, 0, 0);
+ print_gimple_stmt (dump_file, stmt, 0);
}
gimple_assign_set_rhs_from_tree (&gsi, val);
update_stmt (stmt);
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "into:\n ");
- print_gimple_stmt (dump_file, stmt, 0, 0);
+ print_gimple_stmt (dump_file, stmt, 0);
fprintf (dump_file, "\n");
}
&& gimple_purge_dead_eh_edges (gimple_bb (stmt)))
*m_cfg_changed = true;
}
-
+ return NULL;
}
-/* Update alignment of formal parameters as described in
- ipcp_transformation_summary. */
+/* Update bits info of formal parameters as described in
+ ipcp_transformation. */
static void
-ipcp_update_alignments (struct cgraph_node *node)
+ipcp_update_bits (struct cgraph_node *node)
{
- tree fndecl = node->decl;
- tree parm = DECL_ARGUMENTS (fndecl);
+ tree parm = DECL_ARGUMENTS (node->decl);
tree next_parm = parm;
- ipcp_transformation_summary *ts = ipcp_get_transformation_summary (node);
- if (!ts || vec_safe_length (ts->alignments) == 0)
+ ipcp_transformation *ts = ipcp_get_transformation_summary (node);
+
+ if (!ts || vec_safe_length (ts->bits) == 0)
return;
- const vec<ipa_alignment, va_gc> &alignments = *ts->alignments;
- unsigned count = alignments.length ();
+
+ vec<ipa_bits *, va_gc> &bits = *ts->bits;
+ unsigned count = bits.length ();
for (unsigned i = 0; i < count; ++i, parm = next_parm)
{
if (node->clone.combined_args_to_skip
&& bitmap_bit_p (node->clone.combined_args_to_skip, i))
continue;
+
gcc_checking_assert (parm);
next_parm = DECL_CHAIN (parm);
- if (!alignments[i].known || !is_gimple_reg (parm))
+ if (!bits[i]
+ || !(INTEGRAL_TYPE_P (TREE_TYPE (parm))
+ || POINTER_TYPE_P (TREE_TYPE (parm)))
+ || !is_gimple_reg (parm))
continue;
+
tree ddef = ssa_default_def (DECL_STRUCT_FUNCTION (node->decl), parm);
if (!ddef)
continue;
if (dump_file)
- fprintf (dump_file, " Adjusting alignment of param %u to %u, "
- "misalignment to %u\n", i, alignments[i].align,
- alignments[i].misalign);
-
- struct ptr_info_def *pi = get_ptr_info (ddef);
- gcc_checking_assert (pi);
- unsigned old_align;
- unsigned old_misalign;
- bool old_known = get_ptr_info_alignment (pi, &old_align, &old_misalign);
-
- if (old_known
- && old_align >= alignments[i].align)
{
- if (dump_file)
- fprintf (dump_file, " But the alignment was already %u.\n",
- old_align);
- continue;
+ fprintf (dump_file, "Adjusting mask for param %u to ", i);
+ print_hex (bits[i]->mask, dump_file);
+ fprintf (dump_file, "\n");
+ }
+
+ if (INTEGRAL_TYPE_P (TREE_TYPE (ddef)))
+ {
+ unsigned prec = TYPE_PRECISION (TREE_TYPE (ddef));
+ signop sgn = TYPE_SIGN (TREE_TYPE (ddef));
+
+ wide_int nonzero_bits = wide_int::from (bits[i]->mask, prec, UNSIGNED)
+ | wide_int::from (bits[i]->value, prec, sgn);
+ set_nonzero_bits (ddef, nonzero_bits);
+ }
+ else
+ {
+ unsigned tem = bits[i]->mask.to_uhwi ();
+ unsigned HOST_WIDE_INT bitpos = bits[i]->value.to_uhwi ();
+ unsigned align = tem & -tem;
+ unsigned misalign = bitpos & (align - 1);
+
+ if (align > 1)
+ {
+ if (dump_file)
+ fprintf (dump_file, "Adjusting align: %u, misalign: %u\n", align, misalign);
+
+ unsigned old_align, old_misalign;
+ struct ptr_info_def *pi = get_ptr_info (ddef);
+ bool old_known = get_ptr_info_alignment (pi, &old_align, &old_misalign);
+
+ if (old_known
+ && old_align > align)
+ {
+ if (dump_file)
+ {
+ fprintf (dump_file, "But alignment was already %u.\n", old_align);
+ if ((old_misalign & (align - 1)) != misalign)
+ fprintf (dump_file, "old_misalign (%u) and misalign (%u) mismatch\n",
+ old_misalign, misalign);
+ }
+ continue;
+ }
+
+ if (old_known
+ && ((misalign & (old_align - 1)) != old_misalign)
+ && dump_file)
+ fprintf (dump_file, "old_misalign (%u) and misalign (%u) mismatch\n",
+ old_misalign, misalign);
+
+ set_ptr_info_alignment (pi, align, misalign);
+ }
+ }
+ }
+}
+
+/* Update value range of formal parameters as described in
+ ipcp_transformation. */
+
+static void
+ipcp_update_vr (struct cgraph_node *node)
+{
+ tree fndecl = node->decl;
+ tree parm = DECL_ARGUMENTS (fndecl);
+ tree next_parm = parm;
+ ipcp_transformation *ts = ipcp_get_transformation_summary (node);
+ if (!ts || vec_safe_length (ts->m_vr) == 0)
+ return;
+ const vec<ipa_vr, va_gc> &vr = *ts->m_vr;
+ unsigned count = vr.length ();
+
+ for (unsigned i = 0; i < count; ++i, parm = next_parm)
+ {
+ if (node->clone.combined_args_to_skip
+ && bitmap_bit_p (node->clone.combined_args_to_skip, i))
+ continue;
+ gcc_checking_assert (parm);
+ next_parm = DECL_CHAIN (parm);
+ tree ddef = ssa_default_def (DECL_STRUCT_FUNCTION (node->decl), parm);
+
+ if (!ddef || !is_gimple_reg (parm))
+ continue;
+
+ if (vr[i].known
+ && (vr[i].type == VR_RANGE || vr[i].type == VR_ANTI_RANGE))
+ {
+ tree type = TREE_TYPE (ddef);
+ unsigned prec = TYPE_PRECISION (type);
+ if (INTEGRAL_TYPE_P (TREE_TYPE (ddef)))
+ {
+ if (dump_file)
+ {
+ fprintf (dump_file, "Setting value range of param %u ", i);
+ fprintf (dump_file, "%s[",
+ (vr[i].type == VR_ANTI_RANGE) ? "~" : "");
+ print_decs (vr[i].min, dump_file);
+ fprintf (dump_file, ", ");
+ print_decs (vr[i].max, dump_file);
+ fprintf (dump_file, "]\n");
+ }
+ set_range_info (ddef, vr[i].type,
+ wide_int_storage::from (vr[i].min, prec,
+ TYPE_SIGN (type)),
+ wide_int_storage::from (vr[i].max, prec,
+ TYPE_SIGN (type)));
+ }
+ else if (POINTER_TYPE_P (TREE_TYPE (ddef))
+ && vr[i].type == VR_ANTI_RANGE
+ && wi::eq_p (vr[i].min, 0)
+ && wi::eq_p (vr[i].max, 0))
+ {
+ if (dump_file)
+ fprintf (dump_file, "Setting nonnull for %u\n", i);
+ set_ptr_nonnull (ddef);
+ }
}
- set_ptr_info_alignment (pi, alignments[i].align, alignments[i].misalign);
}
}
unsigned int
ipcp_transform_function (struct cgraph_node *node)
{
- vec<ipa_param_descriptor> descriptors = vNULL;
- struct func_body_info fbi;
+ vec<ipa_param_descriptor, va_gc> *descriptors = NULL;
+ struct ipa_func_body_info fbi;
struct ipa_agg_replacement_value *aggval;
int param_count;
bool cfg_changed = false, something_changed = false;
gcc_checking_assert (current_function_decl);
if (dump_file)
- fprintf (dump_file, "Modification phase of node %s/%i\n",
- node->name (), node->order);
+ fprintf (dump_file, "Modification phase of node %s\n",
+ node->dump_name ());
- ipcp_update_alignments (node);
+ ipcp_update_bits (node);
+ ipcp_update_vr (node);
aggval = ipa_get_agg_replacements_for_node (node);
if (!aggval)
return 0;
fbi.bb_infos = vNULL;
fbi.bb_infos.safe_grow_cleared (last_basic_block_for_fn (cfun));
fbi.param_count = param_count;
- fbi.aa_walked = 0;
+ fbi.aa_walk_budget = PARAM_VALUE (PARAM_IPA_MAX_AA_STEPS);
- descriptors.safe_grow_cleared (param_count);
- ipa_populate_param_decls (node, descriptors);
+ vec_safe_grow_cleared (descriptors, param_count);
+ ipa_populate_param_decls (node, *descriptors);
calculate_dominance_info (CDI_DOMINATORS);
ipcp_modif_dom_walker (&fbi, descriptors, aggval, &something_changed,
&cfg_changed).walk (ENTRY_BLOCK_PTR_FOR_FN (cfun));
free_ipa_bb_info (bi);
fbi.bb_infos.release ();
free_dominance_info (CDI_DOMINATORS);
- (*ipcp_transformations)[node->uid].agg_values = NULL;
- (*ipcp_transformations)[node->uid].alignments = NULL;
- descriptors.release ();
+
+ ipcp_transformation *s = ipcp_transformation_sum->get (node);
+ s->agg_values = NULL;
+ s->bits = NULL;
+ s->m_vr = NULL;
+
+ vec_free (descriptors);
if (!something_changed)
return 0;
- else if (cfg_changed)
- return TODO_update_ssa_only_virtuals | TODO_cleanup_cfg;
- else
- return TODO_update_ssa_only_virtuals;
+
+ if (cfg_changed)
+ delete_unreachable_blocks_update_callgraph (node, false);
+
+ return TODO_update_ssa_only_virtuals;
}
+
+#include "gt-ipa-prop.h"