/* Alias analysis for GNU C
- Copyright (C) 1997-2017 Free Software Foundation, Inc.
+ Copyright (C) 1997-2020 Free Software Foundation, Inc.
Contributed by John Carr (jfc@mit.edu).
This file is part of GCC.
#include "cfganal.h"
#include "rtl-iter.h"
#include "cgraph.h"
+#include "ipa-utils.h"
/* The aliasing API provided here solves related but different problems:
};
static int rtx_equal_for_memref_p (const_rtx, const_rtx);
-static int memrefs_conflict_p (int, rtx, int, rtx, HOST_WIDE_INT);
static void record_set (rtx, const_rtx, void *);
static int base_alias_check (rtx, rtx, rtx, rtx, machine_mode,
machine_mode);
&& TREE_CODE (TMR_BASE (base)) == SSA_NAME)))
return false;
- /* If this is a reference based on a partitioned decl replace the
- base with a MEM_REF of the pointer representative we
- created during stack slot partitioning. */
- if (VAR_P (base)
- && ! is_global_var (base)
- && cfun->gimple_df->decls_to_pointers != NULL)
- {
- tree *namep = cfun->gimple_df->decls_to_pointers->get (base);
- if (namep)
- ref->base = build_simple_mem_ref (*namep);
- }
-
ref->ref_alias_set = MEM_ALIAS_SET (mem);
/* If MEM_OFFSET or MEM_SIZE are unknown what we got from MEM_EXPR
/* If MEM_OFFSET/MEM_SIZE get us outside of ref->offset/ref->max_size
drop ref->ref. */
- if (MEM_OFFSET (mem) < 0
- || (ref->max_size != -1
- && ((MEM_OFFSET (mem) + MEM_SIZE (mem)) * BITS_PER_UNIT
- > ref->max_size)))
+ if (maybe_lt (MEM_OFFSET (mem), 0)
+ || (ref->max_size_known_p ()
+ && maybe_gt ((MEM_OFFSET (mem) + MEM_SIZE (mem)) * BITS_PER_UNIT,
+ ref->max_size)))
ref->ref = NULL_TREE;
/* Refine size and offset we got from analyzing MEM_EXPR by using
/* The MEM may extend into adjacent fields, so adjust max_size if
necessary. */
- if (ref->max_size != -1
- && ref->size > ref->max_size)
- ref->max_size = ref->size;
+ if (ref->max_size_known_p ())
+ ref->max_size = upper_bound (ref->max_size, ref->size);
- /* If MEM_OFFSET and MEM_SIZE get us outside of the base object of
+ /* If MEM_OFFSET and MEM_SIZE might get us outside of the base object of
the MEM_EXPR punt. This happens for STRICT_ALIGNMENT targets a lot. */
if (MEM_EXPR (mem) != get_spill_slot_decl (false)
- && (ref->offset < 0
+ && (maybe_lt (ref->offset, 0)
|| (DECL_P (ref->base)
&& (DECL_SIZE (ref->base) == NULL_TREE
- || TREE_CODE (DECL_SIZE (ref->base)) != INTEGER_CST
- || wi::ltu_p (wi::to_offset (DECL_SIZE (ref->base)),
- ref->offset + ref->size)))))
+ || !poly_int_tree_p (DECL_SIZE (ref->base))
+ || maybe_lt (wi::to_poly_offset (DECL_SIZE (ref->base)),
+ ref->offset + ref->size)))))
return false;
return true;
&& MEM_ALIAS_SET (mem) != 0);
}
+/* Return true if the ref EARLIER behaves the same as LATER with respect
+ to TBAA for every memory reference that might follow LATER. */
+
+bool
+refs_same_for_tbaa_p (tree earlier, tree later)
+{
+ ao_ref earlier_ref, later_ref;
+ ao_ref_init (&earlier_ref, earlier);
+ ao_ref_init (&later_ref, later);
+ alias_set_type earlier_set = ao_ref_alias_set (&earlier_ref);
+ alias_set_type later_set = ao_ref_alias_set (&later_ref);
+ if (!(earlier_set == later_set
+ || alias_set_subset_of (later_set, earlier_set)))
+ return false;
+ alias_set_type later_base_set = ao_ref_base_alias_set (&later_ref);
+ alias_set_type earlier_base_set = ao_ref_base_alias_set (&earlier_ref);
+ return (earlier_base_set == later_base_set
+ || alias_set_subset_of (later_base_set, earlier_base_set));
+}
+
/* Returns a pointer to the alias set entry for ALIAS_SET, if there is
such an entry, or NULL otherwise. */
return alias_sets_must_conflict_p (set1, set2);
}
\f
+/* Return true if T is an end of the access path which can be used
+ by type based alias oracle. */
+
+bool
+ends_tbaa_access_path_p (const_tree t)
+{
+ switch (TREE_CODE (t))
+ {
+ case COMPONENT_REF:
+ if (DECL_NONADDRESSABLE_P (TREE_OPERAND (t, 1)))
+ return true;
+ /* Permit type-punning when accessing a union, provided the access
+ is directly through the union. For example, this code does not
+ permit taking the address of a union member and then storing
+ through it. Even the type-punning allowed here is a GCC
+ extension, albeit a common and useful one; the C standard says
+ that such accesses have implementation-defined behavior. */
+ else if (TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0))) == UNION_TYPE)
+ return true;
+ break;
+
+ case ARRAY_REF:
+ case ARRAY_RANGE_REF:
+ if (TYPE_NONALIASED_COMPONENT (TREE_TYPE (TREE_OPERAND (t, 0))))
+ return true;
+ break;
+
+ case REALPART_EXPR:
+ case IMAGPART_EXPR:
+ break;
+
+ case BIT_FIELD_REF:
+ case VIEW_CONVERT_EXPR:
+ /* Bitfields and casts are never addressable. */
+ return true;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ return false;
+}
+
/* Return the outermost parent of component present in the chain of
component references handled by get_inner_reference in T with the
following property:
- - the component is non-addressable, or
- - the parent has alias set zero,
+ - the component is non-addressable
or NULL_TREE if no such parent exists. In the former cases, the alias
set of this parent is the alias set that must be used for T itself. */
{
const_tree found = NULL_TREE;
- if (AGGREGATE_TYPE_P (TREE_TYPE (t))
- && TYPE_TYPELESS_STORAGE (TREE_TYPE (t)))
- return const_cast <tree> (t);
-
while (handled_component_p (t))
{
- switch (TREE_CODE (t))
- {
- case COMPONENT_REF:
- if (DECL_NONADDRESSABLE_P (TREE_OPERAND (t, 1)))
- found = t;
- /* Permit type-punning when accessing a union, provided the access
- is directly through the union. For example, this code does not
- permit taking the address of a union member and then storing
- through it. Even the type-punning allowed here is a GCC
- extension, albeit a common and useful one; the C standard says
- that such accesses have implementation-defined behavior. */
- else if (TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0))) == UNION_TYPE)
- found = t;
- break;
-
- case ARRAY_REF:
- case ARRAY_RANGE_REF:
- if (TYPE_NONALIASED_COMPONENT (TREE_TYPE (TREE_OPERAND (t, 0))))
- found = t;
- break;
-
- case REALPART_EXPR:
- case IMAGPART_EXPR:
- break;
-
- case BIT_FIELD_REF:
- case VIEW_CONVERT_EXPR:
- /* Bitfields and casts are never addressable. */
- found = t;
- break;
-
- default:
- gcc_unreachable ();
- }
-
- if (get_alias_set (TREE_TYPE (TREE_OPERAND (t, 0))) == 0)
+ if (ends_tbaa_access_path_p (t))
found = t;
t = TREE_OPERAND (t, 0);
|| ref_all_alias_ptr_type_p (t2))
return false;
- return (TYPE_MAIN_VARIANT (TREE_TYPE (t1))
- == TYPE_MAIN_VARIANT (TREE_TYPE (t2)));
+ /* This function originally abstracts from simply comparing
+ get_deref_alias_set so that we are sure this still computes
+ the same result after LTO type merging is applied.
+ When in LTO type merging is done we can actually do this compare.
+ */
+ if (in_lto_p)
+ return get_deref_alias_set (t1) == get_deref_alias_set (t2);
+ else
+ return (TYPE_MAIN_VARIANT (TREE_TYPE (t1))
+ == TYPE_MAIN_VARIANT (TREE_TYPE (t2)));
}
/* Create emptry alias set entry. */
{
alias_set_type set;
- /* We can not give up with -fno-strict-aliasing because we need to build
- proper type representation for possible functions which are build with
+ /* We cannot give up with -fno-strict-aliasing because we need to build
+ proper type representations for possible functions which are built with
-fstrict-aliasing. */
/* return 0 if this or its type is an error. */
if (set != -1)
return set;
/* Handle structure type equality for pointer types, arrays and vectors.
- This is easy to do, because the code bellow ignore canonical types on
+ This is easy to do, because the code below ignores canonical types on
these anyway. This is important for LTO, where TYPE_CANONICAL for
- pointers can not be meaningfuly computed by the frotnend. */
+ pointers cannot be meaningfully computed by the frontend. */
if (canonical_type_used_p (t))
{
/* In LTO we set canonical types for all types where it makes
|| TREE_CODE (p) == VECTOR_TYPE;
p = TREE_TYPE (p))
{
- /* Ada supports recusive pointers. Instead of doing recrusion check
- just give up once the preallocated space of 8 elements is up.
- In this case just punt to void * alias set. */
+ /* Ada supports recursive pointers. Instead of doing recursion
+ check, just give up once the preallocated space of 8 elements
+ is up. In this case just punt to void * alias set. */
if (reference.length () == 8)
{
p = ptr_type_node;
}
p = TYPE_MAIN_VARIANT (p);
+ /* In LTO for C++ programs we can turn incomplete types to complete
+ using ODR name lookup. */
+ if (in_lto_p && TYPE_STRUCTURAL_EQUALITY_P (p) && odr_type_p (p))
+ {
+ p = prevailing_odr_type (p);
+ gcc_checking_assert (TYPE_MAIN_VARIANT (p) == p);
+ }
+
/* Make void * compatible with char * and also void **.
Programs are commonly violating TBAA by this.
}
/* Assign the alias set to both p and t.
- We can not call get_alias_set (p) here as that would trigger
+ We cannot call get_alias_set (p) here as that would trigger
infinite recursion when p == t. In other cases it would just
trigger unnecesary legwork of rebuilding the pointer again. */
gcc_checking_assert (p == TYPE_MAIN_VARIANT (p));
superset_entry->has_zero_child = 1;
else
{
- subset_entry = get_alias_set_entry (subset);
if (!superset_entry->children)
superset_entry->children
= hash_map<alias_set_hash, int>::create_ggc (64);
+
+ /* Enter the SUBSET itself as a child of the SUPERSET. If it was
+ already there we're done. */
+ if (superset_entry->children->put (subset, 0))
+ return;
+
+ subset_entry = get_alias_set_entry (subset);
/* If there is an entry for the subset, enter all of its children
(if they are not already present) as children of the SUPERSET. */
if (subset_entry)
superset_entry->children->put ((*iter).first, (*iter).second);
}
}
-
- /* Enter the SUBSET itself as a child of the SUPERSET. */
- superset_entry->children->put (subset, 0);
}
}
-/* Record that component types of TYPE, if any, are part of that type for
+/* Record that component types of TYPE, if any, are part of SUPERSET for
aliasing purposes. For record types, we only record component types
for fields that are not marked non-addressable. For array types, we
only record the component type if it is not marked non-aliased. */
void
-record_component_aliases (tree type)
+record_component_aliases (tree type, alias_set_type superset)
{
- alias_set_type superset = get_alias_set (type);
tree field;
if (superset == 0)
case RECORD_TYPE:
case UNION_TYPE:
case QUAL_UNION_TYPE:
- for (field = TYPE_FIELDS (type); field != 0; field = DECL_CHAIN (field))
- if (TREE_CODE (field) == FIELD_DECL && !DECL_NONADDRESSABLE_P (field))
- {
- /* LTO type merging does not make any difference between
- component pointer types. We may have
-
- struct foo {int *a;};
-
- as TYPE_CANONICAL of
-
- struct bar {float *a;};
-
- Because accesses to int * and float * do not alias, we would get
- false negative when accessing the same memory location by
- float ** and bar *. We thus record the canonical type as:
-
- struct {void *a;};
-
- void * is special cased and works as a universal pointer type.
- Accesses to it conflicts with accesses to any other pointer
- type. */
- tree t = TREE_TYPE (field);
- if (in_lto_p)
- {
- /* VECTOR_TYPE and ARRAY_TYPE share the alias set with their
- element type and that type has to be normalized to void *,
- too, in the case it is a pointer. */
- while (!canonical_type_used_p (t) && !POINTER_TYPE_P (t))
- {
- gcc_checking_assert (TYPE_STRUCTURAL_EQUALITY_P (t));
- t = TREE_TYPE (t);
- }
- if (POINTER_TYPE_P (t))
- t = ptr_type_node;
- else if (flag_checking)
- gcc_checking_assert (get_alias_set (t)
- == get_alias_set (TREE_TYPE (field)));
- }
-
- record_alias_subset (superset, get_alias_set (t));
- }
+ {
+ /* LTO non-ODR type merging does not make any difference between
+ component pointer types. We may have
+
+ struct foo {int *a;};
+
+ as TYPE_CANONICAL of
+
+ struct bar {float *a;};
+
+ Because accesses to int * and float * do not alias, we would get
+ false negative when accessing the same memory location by
+ float ** and bar *. We thus record the canonical type as:
+
+ struct {void *a;};
+
+ void * is special cased and works as a universal pointer type.
+ Accesses to it conflicts with accesses to any other pointer
+ type. */
+ bool void_pointers = in_lto_p
+ && (!odr_type_p (type)
+ || !odr_based_tbaa_p (type));
+ for (field = TYPE_FIELDS (type); field != 0; field = DECL_CHAIN (field))
+ if (TREE_CODE (field) == FIELD_DECL && !DECL_NONADDRESSABLE_P (field))
+ {
+ tree t = TREE_TYPE (field);
+ if (void_pointers)
+ {
+ /* VECTOR_TYPE and ARRAY_TYPE share the alias set with their
+ element type and that type has to be normalized to void *,
+ too, in the case it is a pointer. */
+ while (!canonical_type_used_p (t) && !POINTER_TYPE_P (t))
+ {
+ gcc_checking_assert (TYPE_STRUCTURAL_EQUALITY_P (t));
+ t = TREE_TYPE (t);
+ }
+ if (POINTER_TYPE_P (t))
+ t = ptr_type_node;
+ else if (flag_checking)
+ gcc_checking_assert (get_alias_set (t)
+ == get_alias_set (TREE_TYPE (field)));
+ }
+
+ alias_set_type set = get_alias_set (t);
+ record_alias_subset (superset, set);
+ /* If the field has alias-set zero make sure to still record
+ any componets of it. This makes sure that for
+ struct A {
+ struct B {
+ int i;
+ char c[4];
+ } b;
+ };
+ in C++ even though 'B' has alias-set zero because
+ TYPE_TYPELESS_STORAGE is set, 'A' has the alias-set of
+ 'int' as subset. */
+ if (set == 0)
+ record_component_aliases (t, superset);
+ }
+ }
break;
case COMPLEX_TYPE:
}
}
+/* Record that component types of TYPE, if any, are part of that type for
+ aliasing purposes. For record types, we only record component types
+ for fields that are not marked non-addressable. For array types, we
+ only record the component type if it is not marked non-aliased. */
+
+void
+record_component_aliases (tree type)
+{
+ alias_set_type superset = get_alias_set (type);
+ record_component_aliases (type, superset);
+}
+
+
/* Allocate an alias set for use in storing and reading from the varargs
spill area. */
return find_base_value (XEXP (src, 1));
case AND:
- /* If the second operand is constant set the base
- address to the first operand. */
- if (CONST_INT_P (XEXP (src, 1)) && INTVAL (XEXP (src, 1)) != 0)
+ /* Look through aligning ANDs. And AND with zero or one with
+ the LSB set isn't one (see for example PR92462). */
+ if (CONST_INT_P (XEXP (src, 1))
+ && INTVAL (XEXP (src, 1)) != 0
+ && (INTVAL (XEXP (src, 1)) & 1) == 0)
return find_base_value (XEXP (src, 0));
return 0;
new_reg_base_value[regno] = 0;
return;
}
+
src = SET_SRC (set);
}
else
return 0;
break;
+ case 'p':
+ if (maybe_ne (SUBREG_BYTE (x), SUBREG_BYTE (y)))
+ return 0;
+ break;
+
case 'E':
/* Two vectors must have the same length. */
if (XVECLEN (x, i) != XVECLEN (y, i))
}
static rtx
-find_base_term (rtx x)
+find_base_term (rtx x, vec<std::pair<cselib_val *,
+ struct elt_loc_list *> > &visited_vals)
{
cselib_val *val;
struct elt_loc_list *l, *f;
case POST_DEC:
case PRE_MODIFY:
case POST_MODIFY:
- return find_base_term (XEXP (x, 0));
+ return find_base_term (XEXP (x, 0), visited_vals);
case ZERO_EXTEND:
case SIGN_EXTEND: /* Used for Alpha/NT pointers */
return 0;
{
- rtx temp = find_base_term (XEXP (x, 0));
+ rtx temp = find_base_term (XEXP (x, 0), visited_vals);
if (temp != 0 && CONSTANT_P (temp))
temp = convert_memory_address (Pmode, temp);
if (cselib_sp_based_value_p (val))
return static_reg_base_value[STACK_POINTER_REGNUM];
+ if (visited_vals.length () > (unsigned) param_max_find_base_term_values)
+ return ret;
+
f = val->locs;
- /* Temporarily reset val->locs to avoid infinite recursion. */
+ /* Reset val->locs to avoid infinite recursion. */
+ if (f)
+ visited_vals.safe_push (std::make_pair (val, f));
val->locs = NULL;
for (l = f; l; l = l->next)
&& !CSELIB_VAL_PTR (l->loc)->locs->next
&& CSELIB_VAL_PTR (l->loc)->locs->loc == x)
continue;
- else if ((ret = find_base_term (l->loc)) != 0)
+ else if ((ret = find_base_term (l->loc, visited_vals)) != 0)
break;
- val->locs = f;
return ret;
case LO_SUM:
/* The standard form is (lo_sum reg sym) so look only at the
second operand. */
- return find_base_term (XEXP (x, 1));
+ return find_base_term (XEXP (x, 1), visited_vals);
case CONST:
x = XEXP (x, 0);
other operand is the base register. */
if (tmp1 == pic_offset_table_rtx && CONSTANT_P (tmp2))
- return find_base_term (tmp2);
+ return find_base_term (tmp2, visited_vals);
/* If either operand is known to be a pointer, then prefer it
to determine the base term. */
term is from a pointer or is a named object or a special address
(like an argument or stack reference), then use it for the
base term. */
- rtx base = find_base_term (tmp1);
+ rtx base = find_base_term (tmp1, visited_vals);
if (base != NULL_RTX
&& ((REG_P (tmp1) && REG_POINTER (tmp1))
|| known_base_value_p (base)))
return base;
- base = find_base_term (tmp2);
+ base = find_base_term (tmp2, visited_vals);
if (base != NULL_RTX
&& ((REG_P (tmp2) && REG_POINTER (tmp2))
|| known_base_value_p (base)))
}
case AND:
- if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) != 0)
- return find_base_term (XEXP (x, 0));
+ /* Look through aligning ANDs. And AND with zero or one with
+ the LSB set isn't one (see for example PR92462). */
+ if (CONST_INT_P (XEXP (x, 1))
+ && INTVAL (XEXP (x, 1)) != 0
+ && (INTVAL (XEXP (x, 1)) & 1) == 0)
+ return find_base_term (XEXP (x, 0), visited_vals);
return 0;
case SYMBOL_REF:
}
}
+/* Wrapper around the worker above which removes locs from visited VALUEs
+ to avoid visiting them multiple times. We unwind that changes here. */
+
+static rtx
+find_base_term (rtx x)
+{
+ auto_vec<std::pair<cselib_val *, struct elt_loc_list *>, 32> visited_vals;
+ rtx res = find_base_term (x, visited_vals);
+ for (unsigned i = 0; i < visited_vals.length (); ++i)
+ visited_vals[i].first->locs = visited_vals[i].second;
+ return res;
+}
+
/* Return true if accesses to address X may alias accesses based
on the stack pointer. */
}
/* BASE1 and BASE2 are decls. Return 1 if they refer to same object, 0
- if they refer to different objects and -1 if we can not decide. */
+ if they refer to different objects and -1 if we cannot decide. */
int
compare_base_decls (tree base1, tree base2)
symtab_node *x_node = symtab_node::get_create (x_decl)
->ultimate_alias_target ();
- /* External variable can not be in section anchor. */
+ /* External variable cannot be in section anchor. */
if (!x_node->definition)
return 0;
x_base = XEXP (DECL_RTL (x_node->decl), 0);
rtx op0 = get_addr (XEXP (x, 0));
if (op0 != XEXP (x, 0))
{
+ poly_int64 c;
if (GET_CODE (x) == PLUS
- && GET_CODE (XEXP (x, 1)) == CONST_INT)
- return plus_constant (GET_MODE (x), op0, INTVAL (XEXP (x, 1)));
+ && poly_int_rtx_p (XEXP (x, 1), &c))
+ return plus_constant (GET_MODE (x), op0, c);
return simplify_gen_binary (GET_CODE (x), GET_MODE (x),
op0, XEXP (x, 1));
}
is not modified by the memory reference then ADDR is returned. */
static rtx
-addr_side_effect_eval (rtx addr, int size, int n_refs)
+addr_side_effect_eval (rtx addr, poly_int64 size, int n_refs)
{
- int offset = 0;
+ poly_int64 offset = 0;
switch (GET_CODE (addr))
{
return addr;
}
- if (offset)
- addr = gen_rtx_PLUS (GET_MODE (addr), XEXP (addr, 0),
- gen_int_mode (offset, GET_MODE (addr)));
- else
- addr = XEXP (addr, 0);
+ addr = plus_constant (GET_MODE (addr), XEXP (addr, 0), offset);
addr = canon_rtx (addr);
return addr;
absolute value of the sizes as the actual sizes. */
static inline bool
-offset_overlap_p (HOST_WIDE_INT c, int xsize, int ysize)
+offset_overlap_p (poly_int64 c, poly_int64 xsize, poly_int64 ysize)
{
- return (xsize == 0 || ysize == 0
- || (c >= 0
- ? (abs (xsize) > c)
- : (abs (ysize) > -c)));
+ if (known_eq (xsize, 0) || known_eq (ysize, 0))
+ return true;
+
+ if (maybe_ge (c, 0))
+ return maybe_gt (maybe_lt (xsize, 0) ? -xsize : xsize, c);
+ else
+ return maybe_gt (maybe_lt (ysize, 0) ? -ysize : ysize, -c);
}
/* Return one if X and Y (memory addresses) reference the
If that is fixed the TBAA hack for union type-punning can be removed. */
static int
-memrefs_conflict_p (int xsize, rtx x, int ysize, rtx y, HOST_WIDE_INT c)
+memrefs_conflict_p (poly_int64 xsize, rtx x, poly_int64 ysize, rtx y,
+ poly_int64 c)
{
if (GET_CODE (x) == VALUE)
{
else if (GET_CODE (x) == LO_SUM)
x = XEXP (x, 1);
else
- x = addr_side_effect_eval (x, abs (xsize), 0);
+ x = addr_side_effect_eval (x, maybe_lt (xsize, 0) ? -xsize : xsize, 0);
if (GET_CODE (y) == HIGH)
y = XEXP (y, 0);
else if (GET_CODE (y) == LO_SUM)
y = XEXP (y, 1);
else
- y = addr_side_effect_eval (y, abs (ysize), 0);
+ y = addr_side_effect_eval (y, maybe_lt (ysize, 0) ? -ysize : ysize, 0);
if (GET_CODE (x) == SYMBOL_REF && GET_CODE (y) == SYMBOL_REF)
{
through alignment adjustments (i.e., that have negative
sizes), because we can't know how far they are from each
other. */
- if (xsize < 0 || ysize < 0)
+ if (maybe_lt (xsize, 0) || maybe_lt (ysize, 0))
return -1;
/* If decls are different or we know by offsets that there is no overlap,
we win. */
else if (x1 == y)
return memrefs_conflict_p (xsize, x0, ysize, const0_rtx, c);
+ poly_int64 cx1, cy1;
if (GET_CODE (y) == PLUS)
{
/* The fact that Y is canonicalized means that this
return memrefs_conflict_p (xsize, x0, ysize, y0, c);
if (rtx_equal_for_memref_p (x0, y0))
return memrefs_conflict_p (xsize, x1, ysize, y1, c);
- if (CONST_INT_P (x1))
+ if (poly_int_rtx_p (x1, &cx1))
{
- if (CONST_INT_P (y1))
+ if (poly_int_rtx_p (y1, &cy1))
return memrefs_conflict_p (xsize, x0, ysize, y0,
- c - INTVAL (x1) + INTVAL (y1));
+ c - cx1 + cy1);
else
- return memrefs_conflict_p (xsize, x0, ysize, y,
- c - INTVAL (x1));
+ return memrefs_conflict_p (xsize, x0, ysize, y, c - cx1);
}
- else if (CONST_INT_P (y1))
- return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
+ else if (poly_int_rtx_p (y1, &cy1))
+ return memrefs_conflict_p (xsize, x, ysize, y0, c + cy1);
return -1;
}
- else if (CONST_INT_P (x1))
- return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
+ else if (poly_int_rtx_p (x1, &cx1))
+ return memrefs_conflict_p (xsize, x0, ysize, y, c - cx1);
}
else if (GET_CODE (y) == PLUS)
{
if (x == y1)
return memrefs_conflict_p (xsize, const0_rtx, ysize, y0, c);
- if (CONST_INT_P (y1))
- return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
+ poly_int64 cy1;
+ if (poly_int_rtx_p (y1, &cy1))
+ return memrefs_conflict_p (xsize, x, ysize, y0, c + cy1);
else
return -1;
}
return offset_overlap_p (c, xsize, ysize);
/* Can't properly adjust our sizes. */
- if (!CONST_INT_P (x1))
+ poly_int64 c1;
+ if (!poly_int_rtx_p (x1, &c1)
+ || !can_div_trunc_p (xsize, c1, &xsize)
+ || !can_div_trunc_p (ysize, c1, &ysize)
+ || !can_div_trunc_p (c, c1, &c))
return -1;
- xsize /= INTVAL (x1);
- ysize /= INTVAL (x1);
- c /= INTVAL (x1);
return memrefs_conflict_p (xsize, x0, ysize, y0, c);
}
unsigned HOST_WIDE_INT uc = sc;
if (sc < 0 && pow2_or_zerop (-uc))
{
- if (xsize > 0)
+ if (maybe_gt (xsize, 0))
xsize = -xsize;
- if (xsize)
+ if (maybe_ne (xsize, 0))
xsize += sc + 1;
c -= sc + 1;
return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
unsigned HOST_WIDE_INT uc = sc;
if (sc < 0 && pow2_or_zerop (-uc))
{
- if (ysize > 0)
+ if (maybe_gt (ysize, 0))
ysize = -ysize;
- if (ysize)
+ if (maybe_ne (ysize, 0))
ysize += sc + 1;
c += sc + 1;
return memrefs_conflict_p (xsize, x,
if (CONSTANT_P (x))
{
- if (CONST_INT_P (x) && CONST_INT_P (y))
+ poly_int64 cx, cy;
+ if (poly_int_rtx_p (x, &cx) && poly_int_rtx_p (y, &cy))
{
- c += (INTVAL (y) - INTVAL (x));
+ c += cy - cx;
return offset_overlap_p (c, xsize, ysize);
}
sizes), because we can't know how far they are from each
other. */
if (CONSTANT_P (y))
- return (xsize < 0 || ysize < 0 || offset_overlap_p (c, xsize, ysize));
+ return (maybe_lt (xsize, 0)
+ || maybe_lt (ysize, 0)
+ || offset_overlap_p (c, xsize, ysize));
return -1;
}
ways.
If both memory references are volatile, then there must always be a
- dependence between the two references, since their order can not be
+ dependence between the two references, since their order cannot be
changed. A volatile and non-volatile reference can be interchanged
though.
static void
adjust_offset_for_component_ref (tree x, bool *known_p,
- HOST_WIDE_INT *offset)
+ poly_int64 *offset)
{
if (!*known_p)
return;
{
tree xoffset = component_ref_field_offset (x);
tree field = TREE_OPERAND (x, 1);
- if (TREE_CODE (xoffset) != INTEGER_CST)
+ if (!poly_int_tree_p (xoffset))
{
*known_p = false;
return;
}
- offset_int woffset
- = (wi::to_offset (xoffset)
+ poly_offset_int woffset
+ = (wi::to_poly_offset (xoffset)
+ (wi::to_offset (DECL_FIELD_BIT_OFFSET (field))
- >> LOG2_BITS_PER_UNIT));
- if (!wi::fits_uhwi_p (woffset))
+ >> LOG2_BITS_PER_UNIT)
+ + *offset);
+ if (!woffset.to_shwi (offset))
{
*known_p = false;
return;
}
- *offset += woffset.to_uhwi ();
x = TREE_OPERAND (x, 0);
}
rtx rtlx, rtly;
rtx basex, basey;
bool moffsetx_known_p, moffsety_known_p;
- HOST_WIDE_INT moffsetx = 0, moffsety = 0;
- HOST_WIDE_INT offsetx = 0, offsety = 0, sizex, sizey;
+ poly_int64 moffsetx = 0, moffsety = 0;
+ poly_int64 offsetx = 0, offsety = 0, sizex, sizey;
/* Unless both have exprs, we can't tell anything. */
if (exprx == 0 || expry == 0)
we can avoid overlap is if we can deduce that they are nonoverlapping
pieces of that decl, which is very rare. */
basex = MEM_P (rtlx) ? XEXP (rtlx, 0) : rtlx;
- if (GET_CODE (basex) == PLUS && CONST_INT_P (XEXP (basex, 1)))
- offsetx = INTVAL (XEXP (basex, 1)), basex = XEXP (basex, 0);
+ basex = strip_offset_and_add (basex, &offsetx);
basey = MEM_P (rtly) ? XEXP (rtly, 0) : rtly;
- if (GET_CODE (basey) == PLUS && CONST_INT_P (XEXP (basey, 1)))
- offsety = INTVAL (XEXP (basey, 1)), basey = XEXP (basey, 0);
+ basey = strip_offset_and_add (basey, &offsety);
/* If the bases are different, we know they do not overlap if both
are constants or if one is a constant and the other a pointer into the
declarations are necessarily different
(i.e. compare_base_decls (exprx, expry) == -1) */
- sizex = (!MEM_P (rtlx) ? (int) GET_MODE_SIZE (GET_MODE (rtlx))
+ sizex = (!MEM_P (rtlx) ? poly_int64 (GET_MODE_SIZE (GET_MODE (rtlx)))
: MEM_SIZE_KNOWN_P (rtlx) ? MEM_SIZE (rtlx)
: -1);
- sizey = (!MEM_P (rtly) ? (int) GET_MODE_SIZE (GET_MODE (rtly))
+ sizey = (!MEM_P (rtly) ? poly_int64 (GET_MODE_SIZE (GET_MODE (rtly)))
: MEM_SIZE_KNOWN_P (rtly) ? MEM_SIZE (rtly)
: -1);
if (MEM_SIZE_KNOWN_P (y) && moffsety_known_p)
sizey = MEM_SIZE (y);
- /* Put the values of the memref with the lower offset in X's values. */
- if (offsetx > offsety)
- {
- std::swap (offsetx, offsety);
- std::swap (sizex, sizey);
- }
-
- /* If we don't know the size of the lower-offset value, we can't tell
- if they conflict. Otherwise, we do the test. */
- return sizex >= 0 && offsety >= offsetx + sizex;
+ return !ranges_maybe_overlap_p (offsetx, sizex, offsety, sizey);
}
/* Helper for true_dependence and canon_true_dependence.
int ret;
gcc_checking_assert (x_canonicalized
- ? (x_addr != NULL_RTX && x_mode != VOIDmode)
+ ? (x_addr != NULL_RTX
+ && (x_mode != VOIDmode || GET_MODE (x) == VOIDmode))
: (x_addr == NULL_RTX && x_mode == VOIDmode));
if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
&& targetm.hard_regno_mode_ok (i, Pmode))
static_reg_base_value[i] = arg_base_value;
+ /* RTL code is required to be consistent about whether it uses the
+ stack pointer, the frame pointer or the argument pointer to
+ access a given area of the frame. We can therefore use the
+ base address to distinguish between the different areas. */
static_reg_base_value[STACK_POINTER_REGNUM]
= unique_base_value (UNIQUE_BASE_VALUE_SP);
static_reg_base_value[ARG_POINTER_REGNUM]
= unique_base_value (UNIQUE_BASE_VALUE_ARGP);
static_reg_base_value[FRAME_POINTER_REGNUM]
= unique_base_value (UNIQUE_BASE_VALUE_FP);
+
+ /* The above rules extend post-reload, with eliminations applying
+ consistently to each of the three pointers. Cope with cases in
+ which the frame pointer is eliminated to the hard frame pointer
+ rather than the stack pointer. */
if (!HARD_FRAME_POINTER_IS_FRAME_POINTER)
static_reg_base_value[HARD_FRAME_POINTER_REGNUM]
= unique_base_value (UNIQUE_BASE_VALUE_HFP);
if (CALL_P (insn))
return true;
memory_modified = false;
- note_stores (PATTERN (insn), memory_modified_1, CONST_CAST_RTX(mem));
+ note_stores (as_a<const rtx_insn *> (insn), memory_modified_1,
+ CONST_CAST_RTX(mem));
return memory_modified;
}
-/* Return TRUE if the destination of a set is rtx identical to
- ITEM. */
-static inline bool
-set_dest_equal_p (const_rtx set, const_rtx item)
-{
- rtx dest = SET_DEST (set);
- return rtx_equal_p (dest, item);
-}
-
/* Initialize the aliasing machinery. Initialize the REG_KNOWN_VALUE
array. */
/* Initialize the alias information for this pass. */
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- if (static_reg_base_value[i])
+ if (static_reg_base_value[i]
+ /* Don't treat the hard frame pointer as special if we
+ eliminated the frame pointer to the stack pointer instead. */
+ && !(i == HARD_FRAME_POINTER_REGNUM
+ && reload_completed
+ && !frame_pointer_needed
+ && targetm.can_eliminate (FRAME_POINTER_REGNUM,
+ STACK_POINTER_REGNUM)))
{
new_reg_base_value[i] = static_reg_base_value[i];
bitmap_set_bit (reg_seen, i);
&& find_reg_note (insn, REG_NOALIAS, NULL_RTX))
record_set (SET_DEST (PATTERN (insn)), NULL_RTX, NULL);
else
- note_stores (PATTERN (insn), record_set, NULL);
+ note_stores (insn, record_set, NULL);
set = single_set (insn);
&& DF_REG_DEF_COUNT (regno) != 1)
note = NULL_RTX;
+ poly_int64 offset;
if (note != NULL_RTX
&& GET_CODE (XEXP (note, 0)) != EXPR_LIST
&& ! rtx_varies_p (XEXP (note, 0), 1)
&& GET_CODE (src) == PLUS
&& REG_P (XEXP (src, 0))
&& (t = get_reg_known_value (REGNO (XEXP (src, 0))))
- && CONST_INT_P (XEXP (src, 1)))
+ && poly_int_rtx_p (XEXP (src, 1), &offset))
{
- t = plus_constant (GET_MODE (src), t,
- INTVAL (XEXP (src, 1)));
+ t = plus_constant (GET_MODE (src), t, offset);
set_reg_known_value (regno, t);
set_reg_known_equiv_p (regno, false);
}