/* Language-independent node constructors for parse phase of GNU compiler.
- Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
- 2011, 2012 Free Software Foundation, Inc.
+ Copyright (C) 1987-2013 Free Software Foundation, Inc.
This file is part of GCC.
#include "tm.h"
#include "flags.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "calls.h"
+#include "attribs.h"
+#include "varasm.h"
#include "tm_p.h"
#include "function.h"
#include "obstack.h"
#include "tree-inline.h"
#include "tree-iterator.h"
#include "basic-block.h"
-#include "tree-flow.h"
+#include "bitmap.h"
+#include "gimple.h"
+#include "gimple-iterator.h"
+#include "gimplify.h"
+#include "gimple-ssa.h"
+#include "cgraph.h"
+#include "tree-phinodes.h"
+#include "stringpool.h"
+#include "tree-ssanames.h"
+#include "expr.h"
+#include "tree-dfa.h"
#include "params.h"
#include "pointer-set.h"
#include "tree-pass.h"
#include "diagnostic.h"
#include "tree-diagnostic.h"
#include "tree-pretty-print.h"
-#include "cgraph.h"
#include "except.h"
#include "debug.h"
#include "intl.h"
+#include "wide-int.h"
/* Tree code classes. */
#define DEFTREECODE(SYM, NAME, TYPE, LEN) NAME,
#define END_OF_BASE_TREE_CODES "@dummy",
-const char *const tree_code_name[] = {
+static const char *const tree_code_name[] = {
#include "all-tree.def"
};
static int type_hash_marked_p (const void *);
static unsigned int type_hash_list (const_tree, hashval_t);
static unsigned int attribute_hash_list (const_tree, hashval_t);
+static bool decls_same_for_odr (tree decl1, tree decl2);
tree global_trees[TI_MAX];
tree integer_types[itk_none];
4, /* OMP_CLAUSE_REDUCTION */
1, /* OMP_CLAUSE_COPYIN */
1, /* OMP_CLAUSE_COPYPRIVATE */
+ 2, /* OMP_CLAUSE_LINEAR */
+ 2, /* OMP_CLAUSE_ALIGNED */
+ 1, /* OMP_CLAUSE_DEPEND */
+ 1, /* OMP_CLAUSE_UNIFORM */
+ 2, /* OMP_CLAUSE_FROM */
+ 2, /* OMP_CLAUSE_TO */
+ 2, /* OMP_CLAUSE_MAP */
+ 1, /* OMP_CLAUSE__LOOPTEMP_ */
1, /* OMP_CLAUSE_IF */
1, /* OMP_CLAUSE_NUM_THREADS */
1, /* OMP_CLAUSE_SCHEDULE */
3, /* OMP_CLAUSE_COLLAPSE */
0, /* OMP_CLAUSE_UNTIED */
1, /* OMP_CLAUSE_FINAL */
- 0 /* OMP_CLAUSE_MERGEABLE */
+ 0, /* OMP_CLAUSE_MERGEABLE */
+ 1, /* OMP_CLAUSE_DEVICE */
+ 1, /* OMP_CLAUSE_DIST_SCHEDULE */
+ 0, /* OMP_CLAUSE_INBRANCH */
+ 0, /* OMP_CLAUSE_NOTINBRANCH */
+ 1, /* OMP_CLAUSE_NUM_TEAMS */
+ 1, /* OMP_CLAUSE_THREAD_LIMIT */
+ 0, /* OMP_CLAUSE_PROC_BIND */
+ 1, /* OMP_CLAUSE_SAFELEN */
+ 1, /* OMP_CLAUSE_SIMDLEN */
+ 0, /* OMP_CLAUSE_FOR */
+ 0, /* OMP_CLAUSE_PARALLEL */
+ 0, /* OMP_CLAUSE_SECTIONS */
+ 0, /* OMP_CLAUSE_TASKGROUP */
+ 1, /* OMP_CLAUSE__SIMDUID_ */
};
const char * const omp_clause_code_name[] =
"reduction",
"copyin",
"copyprivate",
+ "linear",
+ "aligned",
+ "depend",
+ "uniform",
+ "from",
+ "to",
+ "map",
+ "_looptemp_",
"if",
"num_threads",
"schedule",
"collapse",
"untied",
"final",
- "mergeable"
+ "mergeable",
+ "device",
+ "dist_schedule",
+ "inbranch",
+ "notinbranch",
+ "num_teams",
+ "thread_limit",
+ "proc_bind",
+ "safelen",
+ "simdlen",
+ "for",
+ "parallel",
+ "sections",
+ "taskgroup",
+ "_simduid_"
};
int_cst_hash_table = htab_create_ggc (1024, int_cst_hash_hash,
int_cst_hash_eq, NULL);
- int_cst_node = make_node (INTEGER_CST);
+ int_cst_node = make_int_cst (1, 1);
cl_option_hash_table = htab_create_ggc (64, cl_option_hash_hash,
cl_option_hash_eq, NULL);
return DECL_WITH_VIS_CHECK (decl)->decl_with_vis.assembler_name;
}
-/* Compare ASMNAME with the DECL_ASSEMBLER_NAME of DECL. */
-
-bool
-decl_assembler_name_equal (tree decl, const_tree asmname)
-{
- tree decl_asmname = DECL_ASSEMBLER_NAME (decl);
- const char *decl_str;
- const char *asmname_str;
- bool test = false;
-
- if (decl_asmname == asmname)
- return true;
-
- decl_str = IDENTIFIER_POINTER (decl_asmname);
- asmname_str = IDENTIFIER_POINTER (asmname);
-
-
- /* If the target assembler name was set by the user, things are trickier.
- We have a leading '*' to begin with. After that, it's arguable what
- is the correct thing to do with -fleading-underscore. Arguably, we've
- historically been doing the wrong thing in assemble_alias by always
- printing the leading underscore. Since we're not changing that, make
- sure user_label_prefix follows the '*' before matching. */
- if (decl_str[0] == '*')
- {
- size_t ulp_len = strlen (user_label_prefix);
-
- decl_str ++;
-
- if (ulp_len == 0)
- test = true;
- else if (strncmp (decl_str, user_label_prefix, ulp_len) == 0)
- decl_str += ulp_len, test=true;
- else
- decl_str --;
- }
- if (asmname_str[0] == '*')
- {
- size_t ulp_len = strlen (user_label_prefix);
-
- asmname_str ++;
-
- if (ulp_len == 0)
- test = true;
- else if (strncmp (asmname_str, user_label_prefix, ulp_len) == 0)
- asmname_str += ulp_len, test=true;
- else
- asmname_str --;
- }
-
- if (!test)
- return false;
- return strcmp (decl_str, asmname_str) == 0;
-}
-
-/* Hash asmnames ignoring the user specified marks. */
-
-hashval_t
-decl_assembler_name_hash (const_tree asmname)
-{
- if (IDENTIFIER_POINTER (asmname)[0] == '*')
- {
- const char *decl_str = IDENTIFIER_POINTER (asmname) + 1;
- size_t ulp_len = strlen (user_label_prefix);
-
- if (ulp_len == 0)
- ;
- else if (strncmp (decl_str, user_label_prefix, ulp_len) == 0)
- decl_str += ulp_len;
-
- return htab_hash_string (decl_str);
- }
-
- return htab_hash_string (IDENTIFIER_POINTER (asmname));
-}
-
/* Compute the number of bytes occupied by a tree with code CODE.
This function cannot be used for nodes that have variable sizes,
- including TREE_VEC, STRING_CST, and CALL_EXPR. */
+ including TREE_VEC, INTEGER_CST, STRING_CST, and CALL_EXPR. */
size_t
tree_code_size (enum tree_code code)
{
case tcc_constant: /* a constant */
switch (code)
{
- case INTEGER_CST: return sizeof (struct tree_int_cst);
+ case INTEGER_CST: gcc_unreachable ();
case REAL_CST: return sizeof (struct tree_real_cst);
case FIXED_CST: return sizeof (struct tree_fixed_cst);
case COMPLEX_CST: return sizeof (struct tree_complex);
const enum tree_code code = TREE_CODE (node);
switch (code)
{
+ case INTEGER_CST:
+ return (sizeof (struct tree_int_cst)
+ + (TREE_INT_CST_EXT_NUNITS (node) - 1) * sizeof (HOST_WIDE_INT));
+
case TREE_BINFO:
return (offsetof (struct tree_binfo, base_binfos)
- + VEC_embedded_size (tree, BINFO_N_BASE_BINFOS (node)));
+ + vec<tree, va_gc>
+ ::embedded_size (BINFO_N_BASE_BINFOS (node)));
case TREE_VEC:
return (sizeof (struct tree_vec)
/* Return a newly allocated node of code CODE. For decl and type
nodes, some other fields are initialized. The rest of the node is
- initialized to zero. This function cannot be used for TREE_VEC or
- OMP_CLAUSE nodes, which is enforced by asserts in tree_code_size.
+ initialized to zero. This function cannot be used for TREE_VEC,
+ INTEGER_CST or OMP_CLAUSE nodes, which is enforced by asserts in
+ tree_code_size.
Achoo! I got a code in the node. */
record_node_allocation_statistics (code, length);
- t = ggc_alloc_zone_cleared_tree_node_stat (
- (code == IDENTIFIER_NODE) ? &tree_id_zone : &tree_zone,
- length PASS_MEM_STAT);
+ t = ggc_alloc_cleared_tree_node_stat (length PASS_MEM_STAT);
TREE_SET_CODE (t, code);
switch (type)
length = tree_size (node);
record_node_allocation_statistics (code, length);
- t = ggc_alloc_zone_tree_node_stat (&tree_zone, length PASS_MEM_STAT);
+ t = ggc_alloc_tree_node_stat (length PASS_MEM_STAT);
memcpy (t, node, length);
if (CODE_CONTAINS_STRUCT (code, TS_COMMON))
SET_DECL_VALUE_EXPR (t, DECL_VALUE_EXPR (node));
DECL_HAS_VALUE_EXPR_P (t) = 1;
}
+ /* DECL_DEBUG_EXPR is copied explicitely by callers. */
+ if (TREE_CODE (node) == VAR_DECL)
+ DECL_HAS_DEBUG_EXPR_P (t) = 0;
if (TREE_CODE (node) == VAR_DECL && DECL_HAS_INIT_PRIORITY_P (node))
{
SET_DECL_INIT_PRIORITY (t, DECL_INIT_PRIORITY (node));
TYPE_SYMTAB_ADDRESS (t) = 0;
/* Do not copy the values cache. */
- if (TYPE_CACHED_VALUES_P(t))
+ if (TYPE_CACHED_VALUES_P (t))
{
TYPE_CACHED_VALUES_P (t) = 0;
TYPE_CACHED_VALUES (t) = NULL_TREE;
}
\f
+/* Return the value that TREE_INT_CST_EXT_NUNITS should have for an
+ INTEGER_CST with value CST and type TYPE. */
+
+static unsigned int
+get_int_cst_ext_nunits (tree type, const wide_int &cst)
+{
+ gcc_checking_assert (cst.get_precision () == TYPE_PRECISION (type));
+ /* We need an extra zero HWI if CST is an unsigned integer with its
+ upper bit set, and if CST occupies a whole number of HWIs. */
+ if (TYPE_UNSIGNED (type)
+ && wi::neg_p (cst)
+ && (cst.get_precision () % HOST_BITS_PER_WIDE_INT) == 0)
+ return cst.get_precision () / HOST_BITS_PER_WIDE_INT + 1;
+ return cst.get_len ();
+}
+
+/* Return a new INTEGER_CST with value CST and type TYPE. */
+
+static tree
+build_new_int_cst (tree type, const wide_int &cst)
+{
+ unsigned int len = cst.get_len ();
+ unsigned int ext_len = get_int_cst_ext_nunits (type, cst);
+ tree nt = make_int_cst (len, ext_len);
+
+ if (len < ext_len)
+ {
+ --ext_len;
+ TREE_INT_CST_ELT (nt, ext_len) = 0;
+ for (unsigned int i = len; i < ext_len; ++i)
+ TREE_INT_CST_ELT (nt, i) = -1;
+ }
+ else if (TYPE_UNSIGNED (type)
+ && cst.get_precision () < len * HOST_BITS_PER_WIDE_INT)
+ {
+ len--;
+ TREE_INT_CST_ELT (nt, len)
+ = zext_hwi (cst.elt (len),
+ cst.get_precision () % HOST_BITS_PER_WIDE_INT);
+ }
+
+ for (unsigned int i = 0; i < len; i++)
+ TREE_INT_CST_ELT (nt, i) = cst.elt (i);
+ TREE_TYPE (nt) = type;
+ return nt;
+}
+
/* Create an INT_CST node with a LOW value sign extended to TYPE. */
tree
if (!type)
type = integer_type_node;
- return double_int_to_tree (type, double_int::from_shwi (low));
+ return wide_int_to_tree (type, wi::shwi (low, TYPE_PRECISION (type)));
+}
+
+tree
+build_int_cstu (tree type, unsigned HOST_WIDE_INT cst)
+{
+ return wide_int_to_tree (type, wi::uhwi (cst, TYPE_PRECISION (type)));
}
/* Create an INT_CST node with a LOW value sign extended to TYPE. */
build_int_cst_type (tree type, HOST_WIDE_INT low)
{
gcc_assert (type);
-
- return double_int_to_tree (type, double_int::from_shwi (low));
+ return wide_int_to_tree (type, wi::shwi (low, TYPE_PRECISION (type)));
}
/* Constructs tree in type TYPE from with value given by CST. Signedness
tree
double_int_to_tree (tree type, double_int cst)
{
- bool sign_extended_type = !TYPE_UNSIGNED (type);
-
- cst = cst.ext (TYPE_PRECISION (type), !sign_extended_type);
-
- return build_int_cst_wide (type, cst.low, cst.high);
-}
-
-/* Returns true if CST fits into range of TYPE. Signedness of CST is assumed
- to be the same as the signedness of TYPE. */
-
-bool
-double_int_fits_to_tree_p (const_tree type, double_int cst)
-{
- /* Size types *are* sign extended. */
- bool sign_extended_type = !TYPE_UNSIGNED (type);
-
- double_int ext
- = cst.ext (TYPE_PRECISION (type), !sign_extended_type);
-
- return cst == ext;
+ return wide_int_to_tree (type, widest_int::from (cst, TYPE_SIGN (type)));
}
-/* We force the double_int CST to the range of the type TYPE by sign or
+/* We force the wide_int CST to the range of the type TYPE by sign or
zero extending it. OVERFLOWABLE indicates if we are interested in
overflow of the value, when >0 we are only interested in signed
overflow, for <0 we are interested in any overflow. OVERFLOWED
OVERFLOWED is nonzero,
or OVERFLOWABLE is >0 and signed overflow occurs
or OVERFLOWABLE is <0 and any overflow occurs
- We return a new tree node for the extended double_int. The node
+ We return a new tree node for the extended wide_int. The node
is shared if no overflow flags are set. */
tree
-force_fit_type_double (tree type, double_int cst, int overflowable,
- bool overflowed)
+force_fit_type (tree type, const wide_int_ref &cst,
+ int overflowable, bool overflowed)
{
- bool sign_extended_type;
-
- /* Size types *are* sign extended. */
- sign_extended_type = !TYPE_UNSIGNED (type);
+ signop sign = TYPE_SIGN (type);
/* If we need to set overflow flags, return a new unshared node. */
- if (overflowed || !double_int_fits_to_tree_p(type, cst))
+ if (overflowed || !wi::fits_to_tree_p (cst, type))
{
if (overflowed
|| overflowable < 0
- || (overflowable > 0 && sign_extended_type))
+ || (overflowable > 0 && sign == SIGNED))
{
- tree t = make_node (INTEGER_CST);
- TREE_INT_CST (t) = cst.ext (TYPE_PRECISION (type),
- !sign_extended_type);
- TREE_TYPE (t) = type;
+ wide_int tmp = wide_int::from (cst, TYPE_PRECISION (type), sign);
+ tree t = build_new_int_cst (type, tmp);
TREE_OVERFLOW (t) = 1;
return t;
}
}
/* Else build a shared node. */
- return double_int_to_tree (type, cst);
+ return wide_int_to_tree (type, cst);
}
/* These are the hash table functions for the hash table of INTEGER_CST
int_cst_hash_hash (const void *x)
{
const_tree const t = (const_tree) x;
+ hashval_t code = htab_hash_pointer (TREE_TYPE (t));
+ int i;
- return (TREE_INT_CST_HIGH (t) ^ TREE_INT_CST_LOW (t)
- ^ htab_hash_pointer (TREE_TYPE (t)));
+ for (i = 0; i < TREE_INT_CST_NUNITS (t); i++)
+ code ^= TREE_INT_CST_ELT (t, i);
+
+ return code;
}
/* Return nonzero if the value represented by *X (an INTEGER_CST tree node)
const_tree const xt = (const_tree) x;
const_tree const yt = (const_tree) y;
- return (TREE_TYPE (xt) == TREE_TYPE (yt)
- && TREE_INT_CST_HIGH (xt) == TREE_INT_CST_HIGH (yt)
- && TREE_INT_CST_LOW (xt) == TREE_INT_CST_LOW (yt));
+ if (TREE_TYPE (xt) != TREE_TYPE (yt)
+ || TREE_INT_CST_NUNITS (xt) != TREE_INT_CST_NUNITS (yt)
+ || TREE_INT_CST_EXT_NUNITS (xt) != TREE_INT_CST_EXT_NUNITS (yt))
+ return false;
+
+ for (int i = 0; i < TREE_INT_CST_NUNITS (xt); i++)
+ if (TREE_INT_CST_ELT (xt, i) != TREE_INT_CST_ELT (yt, i))
+ return false;
+
+ return true;
}
-/* Create an INT_CST node of TYPE and value HI:LOW.
+/* Create an INT_CST node of TYPE and value CST.
The returned node is always shared. For small integers we use a
- per-type vector cache, for larger ones we use a single hash table. */
+ per-type vector cache, for larger ones we use a single hash table.
+ The value is extended from its precision according to the sign of
+ the type to be a multiple of HOST_BITS_PER_WIDE_INT. This defines
+ the upper bits and ensures that hashing and value equality based
+ upon the underlying HOST_WIDE_INTs works without masking. */
tree
-build_int_cst_wide (tree type, unsigned HOST_WIDE_INT low, HOST_WIDE_INT hi)
+wide_int_to_tree (tree type, const wide_int_ref &pcst)
{
tree t;
int ix = -1;
int limit = 0;
gcc_assert (type);
+ unsigned int prec = TYPE_PRECISION (type);
+ signop sgn = TYPE_SIGN (type);
+
+ /* Verify that everything is canonical. */
+ int l = pcst.get_len ();
+ if (l > 1)
+ {
+ if (pcst.elt (l - 1) == 0)
+ gcc_assert (pcst.elt (l - 2) < 0);
+ if (pcst.elt (l - 1) == (HOST_WIDE_INT) -1)
+ gcc_assert (pcst.elt (l - 2) >= 0);
+ }
+
+ wide_int cst = wide_int::from (pcst, prec, sgn);
+ unsigned int ext_len = get_int_cst_ext_nunits (type, cst);
switch (TREE_CODE (type))
{
case NULLPTR_TYPE:
- gcc_assert (hi == 0 && low == 0);
+ gcc_assert (cst == 0);
/* Fallthru. */
case POINTER_TYPE:
case REFERENCE_TYPE:
- /* Cache NULL pointer. */
- if (!hi && !low)
+ case POINTER_BOUNDS_TYPE:
+ /* Cache NULL pointer and zero bounds. */
+ if (cst == 0)
{
limit = 1;
ix = 0;
case BOOLEAN_TYPE:
/* Cache false or true. */
limit = 2;
- if (!hi && low < 2)
- ix = low;
+ if (wi::leu_p (cst, 1))
+ ix = cst.to_uhwi ();
break;
case INTEGER_TYPE:
case OFFSET_TYPE:
- if (TYPE_UNSIGNED (type))
+ if (TYPE_SIGN (type) == UNSIGNED)
{
/* Cache 0..N */
limit = INTEGER_SHARE_LIMIT;
- if (!hi && low < (unsigned HOST_WIDE_INT)INTEGER_SHARE_LIMIT)
- ix = low;
+
+ /* This is a little hokie, but if the prec is smaller than
+ what is necessary to hold INTEGER_SHARE_LIMIT, then the
+ obvious test will not get the correct answer. */
+ if (prec < HOST_BITS_PER_WIDE_INT)
+ {
+ if (cst.to_uhwi () < (unsigned HOST_WIDE_INT) INTEGER_SHARE_LIMIT)
+ ix = cst.to_uhwi ();
+ }
+ else if (wi::ltu_p (cst, INTEGER_SHARE_LIMIT))
+ ix = cst.to_uhwi ();
}
else
{
/* Cache -1..N */
limit = INTEGER_SHARE_LIMIT + 1;
- if (!hi && low < (unsigned HOST_WIDE_INT)INTEGER_SHARE_LIMIT)
- ix = low + 1;
- else if (hi == -1 && low == -(unsigned HOST_WIDE_INT)1)
+
+ if (cst == -1)
ix = 0;
+ else if (!wi::neg_p (cst))
+ {
+ if (prec < HOST_BITS_PER_WIDE_INT)
+ {
+ if (cst.to_shwi () < INTEGER_SHARE_LIMIT)
+ ix = cst.to_shwi () + 1;
+ }
+ else if (wi::lts_p (cst, INTEGER_SHARE_LIMIT))
+ ix = cst.to_shwi () + 1;
+ }
}
break;
gcc_unreachable ();
}
- if (ix >= 0)
+ if (ext_len == 1)
{
- /* Look for it in the type's vector of small shared ints. */
- if (!TYPE_CACHED_VALUES_P (type))
+ /* We just need to store a single HOST_WIDE_INT. */
+ HOST_WIDE_INT hwi;
+ if (TYPE_UNSIGNED (type))
+ hwi = cst.to_uhwi ();
+ else
+ hwi = cst.to_shwi ();
+ if (ix >= 0)
{
- TYPE_CACHED_VALUES_P (type) = 1;
- TYPE_CACHED_VALUES (type) = make_tree_vec (limit);
- }
+ /* Look for it in the type's vector of small shared ints. */
+ if (!TYPE_CACHED_VALUES_P (type))
+ {
+ TYPE_CACHED_VALUES_P (type) = 1;
+ TYPE_CACHED_VALUES (type) = make_tree_vec (limit);
+ }
- t = TREE_VEC_ELT (TYPE_CACHED_VALUES (type), ix);
- if (t)
- {
- /* Make sure no one is clobbering the shared constant. */
- gcc_assert (TREE_TYPE (t) == type);
- gcc_assert (TREE_INT_CST_LOW (t) == low);
- gcc_assert (TREE_INT_CST_HIGH (t) == hi);
+ t = TREE_VEC_ELT (TYPE_CACHED_VALUES (type), ix);
+ if (t)
+ /* Make sure no one is clobbering the shared constant. */
+ gcc_assert (TREE_TYPE (t) == type
+ && TREE_INT_CST_NUNITS (t) == 1
+ && TREE_INT_CST_EXT_NUNITS (t) == 1
+ && TREE_INT_CST_ELT (t, 0) == hwi);
+ else
+ {
+ /* Create a new shared int. */
+ t = build_new_int_cst (type, cst);
+ TREE_VEC_ELT (TYPE_CACHED_VALUES (type), ix) = t;
+ }
}
else
{
- /* Create a new shared int. */
- t = make_node (INTEGER_CST);
+ /* Use the cache of larger shared ints, using int_cst_node as
+ a temporary. */
+ void **slot;
- TREE_INT_CST_LOW (t) = low;
- TREE_INT_CST_HIGH (t) = hi;
- TREE_TYPE (t) = type;
+ TREE_INT_CST_ELT (int_cst_node, 0) = hwi;
+ TREE_TYPE (int_cst_node) = type;
- TREE_VEC_ELT (TYPE_CACHED_VALUES (type), ix) = t;
+ slot = htab_find_slot (int_cst_hash_table, int_cst_node, INSERT);
+ t = (tree) *slot;
+ if (!t)
+ {
+ /* Insert this one into the hash table. */
+ t = int_cst_node;
+ *slot = t;
+ /* Make a new node for next time round. */
+ int_cst_node = make_int_cst (1, 1);
+ }
}
}
else
{
- /* Use the cache of larger shared ints. */
+ /* The value either hashes properly or we drop it on the floor
+ for the gc to take care of. There will not be enough of them
+ to worry about. */
void **slot;
- TREE_INT_CST_LOW (int_cst_node) = low;
- TREE_INT_CST_HIGH (int_cst_node) = hi;
- TREE_TYPE (int_cst_node) = type;
-
- slot = htab_find_slot (int_cst_hash_table, int_cst_node, INSERT);
+ tree nt = build_new_int_cst (type, cst);
+ slot = htab_find_slot (int_cst_hash_table, nt, INSERT);
t = (tree) *slot;
if (!t)
{
/* Insert this one into the hash table. */
- t = int_cst_node;
+ t = nt;
*slot = t;
- /* Make a new node for next time round. */
- int_cst_node = make_node (INTEGER_CST);
}
}
return t;
}
-/* Builds an integer constant in TYPE such that lowest BITS bits are ones
- and the rest are zeros. */
-
-tree
-build_low_bits_mask (tree type, unsigned bits)
+void
+cache_integer_cst (tree t)
{
- double_int mask;
+ tree type = TREE_TYPE (t);
+ int ix = -1;
+ int limit = 0;
+ int prec = TYPE_PRECISION (type);
- gcc_assert (bits <= TYPE_PRECISION (type));
+ gcc_assert (!TREE_OVERFLOW (t));
+
+ switch (TREE_CODE (type))
+ {
+ case NULLPTR_TYPE:
+ gcc_assert (integer_zerop (t));
+ /* Fallthru. */
+
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ /* Cache NULL pointer. */
+ if (integer_zerop (t))
+ {
+ limit = 1;
+ ix = 0;
+ }
+ break;
+
+ case BOOLEAN_TYPE:
+ /* Cache false or true. */
+ limit = 2;
+ if (wi::ltu_p (t, 2))
+ ix = TREE_INT_CST_ELT (t, 0);
+ break;
+
+ case INTEGER_TYPE:
+ case OFFSET_TYPE:
+ if (TYPE_UNSIGNED (type))
+ {
+ /* Cache 0..N */
+ limit = INTEGER_SHARE_LIMIT;
- if (bits == TYPE_PRECISION (type)
- && !TYPE_UNSIGNED (type))
- /* Sign extended all-ones mask. */
- mask = double_int_minus_one;
+ /* This is a little hokie, but if the prec is smaller than
+ what is necessary to hold INTEGER_SHARE_LIMIT, then the
+ obvious test will not get the correct answer. */
+ if (prec < HOST_BITS_PER_WIDE_INT)
+ {
+ if (tree_to_uhwi (t) < (unsigned HOST_WIDE_INT) INTEGER_SHARE_LIMIT)
+ ix = tree_to_uhwi (t);
+ }
+ else if (wi::ltu_p (t, INTEGER_SHARE_LIMIT))
+ ix = tree_to_uhwi (t);
+ }
+ else
+ {
+ /* Cache -1..N */
+ limit = INTEGER_SHARE_LIMIT + 1;
+
+ if (integer_minus_onep (t))
+ ix = 0;
+ else if (!wi::neg_p (t))
+ {
+ if (prec < HOST_BITS_PER_WIDE_INT)
+ {
+ if (tree_to_shwi (t) < INTEGER_SHARE_LIMIT)
+ ix = tree_to_shwi (t) + 1;
+ }
+ else if (wi::ltu_p (t, INTEGER_SHARE_LIMIT))
+ ix = tree_to_shwi (t) + 1;
+ }
+ }
+ break;
+
+ case ENUMERAL_TYPE:
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ if (ix >= 0)
+ {
+ /* Look for it in the type's vector of small shared ints. */
+ if (!TYPE_CACHED_VALUES_P (type))
+ {
+ TYPE_CACHED_VALUES_P (type) = 1;
+ TYPE_CACHED_VALUES (type) = make_tree_vec (limit);
+ }
+
+ gcc_assert (TREE_VEC_ELT (TYPE_CACHED_VALUES (type), ix) == NULL_TREE);
+ TREE_VEC_ELT (TYPE_CACHED_VALUES (type), ix) = t;
+ }
else
- mask = double_int::mask (bits);
+ {
+ /* Use the cache of larger shared ints. */
+ void **slot;
- return build_int_cst_wide (type, mask.low, mask.high);
+ slot = htab_find_slot (int_cst_hash_table, t, INSERT);
+ /* If there is already an entry for the number verify it's the
+ same. */
+ if (*slot)
+ gcc_assert (wi::eq_p (tree (*slot), t));
+ else
+ /* Otherwise insert this one into the hash table. */
+ *slot = t;
+ }
}
-/* Checks that X is integer constant that can be expressed in (unsigned)
- HOST_WIDE_INT without loss of precision. */
-bool
-cst_and_fits_in_hwi (const_tree x)
-{
- if (TREE_CODE (x) != INTEGER_CST)
- return false;
+/* Builds an integer constant in TYPE such that lowest BITS bits are ones
+ and the rest are zeros. */
- if (TYPE_PRECISION (TREE_TYPE (x)) > HOST_BITS_PER_WIDE_INT)
- return false;
+tree
+build_low_bits_mask (tree type, unsigned bits)
+{
+ gcc_assert (bits <= TYPE_PRECISION (type));
- return (TREE_INT_CST_HIGH (x) == 0
- || TREE_INT_CST_HIGH (x) == -1);
+ return wide_int_to_tree (type, wi::mask (bits, false,
+ TYPE_PRECISION (type)));
}
/* Build a newly constructed TREE_VEC node of length LEN. */
record_node_allocation_statistics (VECTOR_CST, length);
- t = ggc_alloc_zone_cleared_tree_node_stat (&tree_zone, length PASS_MEM_STAT);
+ t = ggc_alloc_cleared_tree_node_stat (length PASS_MEM_STAT);
TREE_SET_CODE (t, VECTOR_CST);
TREE_CONSTANT (t) = 1;
are extracted from V, a vector of CONSTRUCTOR_ELT. */
tree
-build_vector_from_ctor (tree type, VEC(constructor_elt,gc) *v)
+build_vector_from_ctor (tree type, vec<constructor_elt, va_gc> *v)
{
tree *vec = XALLOCAVEC (tree, TYPE_VECTOR_SUBPARTS (type));
unsigned HOST_WIDE_INT idx;
}
else
{
- VEC(constructor_elt, gc) *v = VEC_alloc (constructor_elt, gc, nunits);
+ vec<constructor_elt, va_gc> *v;
+ vec_alloc (v, nunits);
for (i = 0; i < nunits; ++i)
CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, sc);
return build_constructor (vectype, v);
}
/* Return a new CONSTRUCTOR node whose type is TYPE and whose values
- are in the VEC pointed to by VALS. */
+ are in the vec pointed to by VALS. */
tree
-build_constructor (tree type, VEC(constructor_elt,gc) *vals)
+build_constructor (tree type, vec<constructor_elt, va_gc> *vals)
{
tree c = make_node (CONSTRUCTOR);
unsigned int i;
TREE_TYPE (c) = type;
CONSTRUCTOR_ELTS (c) = vals;
- FOR_EACH_VEC_ELT (constructor_elt, vals, i, elt)
+ FOR_EACH_VEC_SAFE_ELT (vals, i, elt)
{
/* Mostly ctors will have elts that don't have side-effects, so
the usual case is to scan all the elements. Hence a single
tree
build_constructor_single (tree type, tree index, tree value)
{
- VEC(constructor_elt,gc) *v;
+ vec<constructor_elt, va_gc> *v;
constructor_elt elt = {index, value};
- v = VEC_alloc (constructor_elt, gc, 1);
- VEC_quick_push (constructor_elt, v, elt);
+ vec_alloc (v, 1);
+ v->quick_push (elt);
return build_constructor (type, v);
}
build_constructor_from_list (tree type, tree vals)
{
tree t;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
if (vals)
{
- v = VEC_alloc (constructor_elt, gc, list_length (vals));
+ vec_alloc (v, list_length (vals));
for (t = vals; t; t = TREE_CHAIN (t))
CONSTRUCTOR_APPEND_ELT (v, TREE_PURPOSE (t), TREE_VALUE (t));
}
return build_constructor (type, v);
}
+/* Return a new CONSTRUCTOR node whose type is TYPE. NELTS is the number
+ of elements, provided as index/value pairs. */
+
+tree
+build_constructor_va (tree type, int nelts, ...)
+{
+ vec<constructor_elt, va_gc> *v = NULL;
+ va_list p;
+
+ va_start (p, nelts);
+ vec_alloc (v, nelts);
+ while (nelts--)
+ {
+ tree index = va_arg (p, tree);
+ tree value = va_arg (p, tree);
+ CONSTRUCTOR_APPEND_ELT (v, index, value);
+ }
+ va_end (p);
+ return build_constructor (type, v);
+}
+
/* Return a new FIXED_CST node whose type is TYPE and value is F. */
tree
memset (&d, 0, sizeof d);
real_from_integer (&d, type ? TYPE_MODE (type) : VOIDmode,
- TREE_INT_CST_LOW (i), TREE_INT_CST_HIGH (i),
- TYPE_UNSIGNED (TREE_TYPE (i)));
+ wide_int (i), TYPE_SIGN (TREE_TYPE (i)));
return d;
}
case FIXED_POINT_TYPE:
/* We can only generate 1 for accum types. */
gcc_assert (ALL_SCALAR_ACCUM_MODE_P (TYPE_MODE (type)));
- return build_fixed (type, FCONST1(TYPE_MODE (type)));
+ return build_fixed (type, FCONST1 (TYPE_MODE (type)));
case VECTOR_TYPE:
{
}
}
+/* Return an integer of type TYPE containing all 1's in as much precision as
+ it contains, or a complex or vector whose subparts are such integers. */
+
+tree
+build_all_ones_cst (tree type)
+{
+ if (TREE_CODE (type) == COMPLEX_TYPE)
+ {
+ tree scalar = build_all_ones_cst (TREE_TYPE (type));
+ return build_complex (type, scalar, scalar);
+ }
+ else
+ return build_minus_one_cst (type);
+}
+
+/* Return a constant of arithmetic type TYPE which is the
+ opposite of the multiplicative identity of the set TYPE. */
+
+tree
+build_minus_one_cst (tree type)
+{
+ switch (TREE_CODE (type))
+ {
+ case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE:
+ case POINTER_TYPE: case REFERENCE_TYPE:
+ case OFFSET_TYPE:
+ return build_int_cst (type, -1);
+
+ case REAL_TYPE:
+ return build_real (type, dconstm1);
+
+ case FIXED_POINT_TYPE:
+ /* We can only generate 1 for accum types. */
+ gcc_assert (ALL_SCALAR_ACCUM_MODE_P (TYPE_MODE (type)));
+ return build_fixed (type, fixed_from_double_int (double_int_minus_one,
+ TYPE_MODE (type)));
+
+ case VECTOR_TYPE:
+ {
+ tree scalar = build_minus_one_cst (TREE_TYPE (type));
+
+ return build_vector_from_val (type, scalar);
+ }
+
+ case COMPLEX_TYPE:
+ return build_complex (type,
+ build_minus_one_cst (TREE_TYPE (type)),
+ build_zero_cst (TREE_TYPE (type)));
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
/* Build 0 constant of type TYPE. This is used by constructor folding
and thus the constant should be represented in memory by
zero(es). */
{
tree t;
size_t length = (offsetof (struct tree_binfo, base_binfos)
- + VEC_embedded_size (tree, base_binfos));
+ + vec<tree, va_gc>::embedded_size (base_binfos));
record_node_allocation_statistics (TREE_BINFO, length);
- t = ggc_alloc_zone_tree_node_stat (&tree_zone, length PASS_MEM_STAT);
+ t = ggc_alloc_tree_node_stat (length PASS_MEM_STAT);
memset (t, 0, offsetof (struct tree_binfo, base_binfos));
TREE_SET_CODE (t, TREE_BINFO);
- VEC_embedded_init (tree, BINFO_BASE_BINFOS (t), base_binfos);
+ BINFO_BASE_BINFOS (t)->embedded_init (base_binfos);
return t;
}
return t;
}
+/* Build a newly constructed INTEGER_CST node. LEN and EXT_LEN are the
+ values of TREE_INT_CST_NUNITS and TREE_INT_CST_EXT_NUNITS respectively.
+ The latter determines the length of the HOST_WIDE_INT vector. */
+
+tree
+make_int_cst_stat (int len, int ext_len MEM_STAT_DECL)
+{
+ tree t;
+ int length = (ext_len - 1) * sizeof (tree) + sizeof (struct tree_int_cst);
+
+ gcc_assert (len);
+ record_node_allocation_statistics (INTEGER_CST, length);
+
+ t = ggc_alloc_cleared_tree_node_stat (length PASS_MEM_STAT);
+
+ TREE_SET_CODE (t, INTEGER_CST);
+ TREE_INT_CST_NUNITS (t) = len;
+ TREE_INT_CST_EXT_NUNITS (t) = ext_len;
+
+ TREE_CONSTANT (t) = 1;
+
+ return t;
+}
+
/* Build a newly constructed TREE_VEC node of length LEN. */
tree
record_node_allocation_statistics (TREE_VEC, length);
- t = ggc_alloc_zone_cleared_tree_node_stat (&tree_zone, length PASS_MEM_STAT);
+ t = ggc_alloc_cleared_tree_node_stat (length PASS_MEM_STAT);
TREE_SET_CODE (t, TREE_VEC);
TREE_VEC_LENGTH (t) = len;
return t;
}
+
+/* Grow a TREE_VEC node to new length LEN. */
+
+tree
+grow_tree_vec_stat (tree v, int len MEM_STAT_DECL)
+{
+ gcc_assert (TREE_CODE (v) == TREE_VEC);
+
+ int oldlen = TREE_VEC_LENGTH (v);
+ gcc_assert (len > oldlen);
+
+ int oldlength = (oldlen - 1) * sizeof (tree) + sizeof (struct tree_vec);
+ int length = (len - 1) * sizeof (tree) + sizeof (struct tree_vec);
+
+ record_node_allocation_statistics (TREE_VEC, length - oldlength);
+
+ v = (tree) ggc_realloc_stat (v, length PASS_MEM_STAT);
+
+ TREE_VEC_LENGTH (v) = len;
+
+ return v;
+}
\f
/* Return 1 if EXPR is the integer constant zero or a complex constant
of zero. */
switch (TREE_CODE (expr))
{
case INTEGER_CST:
- return (TREE_INT_CST_LOW (expr) == 0
- && TREE_INT_CST_HIGH (expr) == 0);
+ return wi::eq_p (expr, 0);
case COMPLEX_CST:
return (integer_zerop (TREE_REALPART (expr))
&& integer_zerop (TREE_IMAGPART (expr)));
switch (TREE_CODE (expr))
{
case INTEGER_CST:
- return (TREE_INT_CST_LOW (expr) == 1
- && TREE_INT_CST_HIGH (expr) == 0);
+ return wi::eq_p (wi::to_widest (expr), 1);
case COMPLEX_CST:
return (integer_onep (TREE_REALPART (expr))
&& integer_zerop (TREE_IMAGPART (expr)));
}
/* Return 1 if EXPR is an integer containing all 1's in as much precision as
- it contains. Likewise for the corresponding complex constant. */
+ it contains, or a complex or vector whose subparts are such integers. */
int
integer_all_onesp (const_tree expr)
{
- int prec;
- int uns;
-
STRIP_NOPS (expr);
if (TREE_CODE (expr) == COMPLEX_CST
&& integer_all_onesp (TREE_REALPART (expr))
- && integer_zerop (TREE_IMAGPART (expr)))
+ && integer_all_onesp (TREE_IMAGPART (expr)))
return 1;
else if (TREE_CODE (expr) == VECTOR_CST)
else if (TREE_CODE (expr) != INTEGER_CST)
return 0;
- uns = TYPE_UNSIGNED (TREE_TYPE (expr));
- if (TREE_INT_CST_LOW (expr) == ~(unsigned HOST_WIDE_INT) 0
- && TREE_INT_CST_HIGH (expr) == -1)
- return 1;
- if (!uns)
- return 0;
-
- prec = TYPE_PRECISION (TREE_TYPE (expr));
- if (prec >= HOST_BITS_PER_WIDE_INT)
- {
- HOST_WIDE_INT high_value;
- int shift_amount;
+ return wi::max_value (TYPE_PRECISION (TREE_TYPE (expr)), UNSIGNED) == expr;
+}
- shift_amount = prec - HOST_BITS_PER_WIDE_INT;
+/* Return 1 if EXPR is the integer constant minus one. */
- /* Can not handle precisions greater than twice the host int size. */
- gcc_assert (shift_amount <= HOST_BITS_PER_WIDE_INT);
- if (shift_amount == HOST_BITS_PER_WIDE_INT)
- /* Shifting by the host word size is undefined according to the ANSI
- standard, so we must handle this as a special case. */
- high_value = -1;
- else
- high_value = ((HOST_WIDE_INT) 1 << shift_amount) - 1;
+int
+integer_minus_onep (const_tree expr)
+{
+ STRIP_NOPS (expr);
- return (TREE_INT_CST_LOW (expr) == ~(unsigned HOST_WIDE_INT) 0
- && TREE_INT_CST_HIGH (expr) == high_value);
- }
+ if (TREE_CODE (expr) == COMPLEX_CST)
+ return (integer_all_onesp (TREE_REALPART (expr))
+ && integer_zerop (TREE_IMAGPART (expr)));
else
- return TREE_INT_CST_LOW (expr) == ((unsigned HOST_WIDE_INT) 1 << prec) - 1;
+ return integer_all_onesp (expr);
}
/* Return 1 if EXPR is an integer constant that is a power of 2 (i.e., has only
int
integer_pow2p (const_tree expr)
{
- int prec;
- unsigned HOST_WIDE_INT high, low;
-
STRIP_NOPS (expr);
if (TREE_CODE (expr) == COMPLEX_CST
if (TREE_CODE (expr) != INTEGER_CST)
return 0;
- prec = TYPE_PRECISION (TREE_TYPE (expr));
- high = TREE_INT_CST_HIGH (expr);
- low = TREE_INT_CST_LOW (expr);
-
- /* First clear all bits that are beyond the type's precision in case
- we've been sign extended. */
-
- if (prec == HOST_BITS_PER_DOUBLE_INT)
- ;
- else if (prec > HOST_BITS_PER_WIDE_INT)
- high &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
- else
- {
- high = 0;
- if (prec < HOST_BITS_PER_WIDE_INT)
- low &= ~((HOST_WIDE_INT) (-1) << prec);
- }
-
- if (high == 0 && low == 0)
- return 0;
-
- return ((high == 0 && (low & (low - 1)) == 0)
- || (low == 0 && (high & (high - 1)) == 0));
+ return wi::popcount (expr) == 1;
}
/* Return 1 if EXPR is an integer constant other than zero or a
STRIP_NOPS (expr);
return ((TREE_CODE (expr) == INTEGER_CST
- && (TREE_INT_CST_LOW (expr) != 0
- || TREE_INT_CST_HIGH (expr) != 0))
+ && !wi::eq_p (expr, 0))
|| (TREE_CODE (expr) == COMPLEX_CST
&& (integer_nonzerop (TREE_REALPART (expr))
|| integer_nonzerop (TREE_IMAGPART (expr)))));
int
tree_log2 (const_tree expr)
{
- int prec;
- HOST_WIDE_INT high, low;
-
STRIP_NOPS (expr);
if (TREE_CODE (expr) == COMPLEX_CST)
return tree_log2 (TREE_REALPART (expr));
- prec = TYPE_PRECISION (TREE_TYPE (expr));
- high = TREE_INT_CST_HIGH (expr);
- low = TREE_INT_CST_LOW (expr);
-
- /* First clear all bits that are beyond the type's precision in case
- we've been sign extended. */
-
- if (prec == HOST_BITS_PER_DOUBLE_INT)
- ;
- else if (prec > HOST_BITS_PER_WIDE_INT)
- high &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
- else
- {
- high = 0;
- if (prec < HOST_BITS_PER_WIDE_INT)
- low &= ~((HOST_WIDE_INT) (-1) << prec);
- }
-
- return (high != 0 ? HOST_BITS_PER_WIDE_INT + exact_log2 (high)
- : exact_log2 (low));
+ return wi::exact_log2 (expr);
}
/* Similar, but return the largest integer Y such that 2 ** Y is less
int
tree_floor_log2 (const_tree expr)
{
- int prec;
- HOST_WIDE_INT high, low;
-
STRIP_NOPS (expr);
if (TREE_CODE (expr) == COMPLEX_CST)
return tree_log2 (TREE_REALPART (expr));
- prec = TYPE_PRECISION (TREE_TYPE (expr));
- high = TREE_INT_CST_HIGH (expr);
- low = TREE_INT_CST_LOW (expr);
+ return wi::floor_log2 (expr);
+}
- /* First clear all bits that are beyond the type's precision in case
- we've been sign extended. Ignore if type's precision hasn't been set
- since what we are doing is setting it. */
+/* Return number of known trailing zero bits in EXPR, or, if the value of
+ EXPR is known to be zero, the precision of it's type. */
- if (prec == HOST_BITS_PER_DOUBLE_INT || prec == 0)
- ;
- else if (prec > HOST_BITS_PER_WIDE_INT)
- high &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
- else
+unsigned int
+tree_ctz (const_tree expr)
+{
+ if (!INTEGRAL_TYPE_P (TREE_TYPE (expr))
+ && !POINTER_TYPE_P (TREE_TYPE (expr)))
+ return 0;
+
+ unsigned int ret1, ret2, prec = TYPE_PRECISION (TREE_TYPE (expr));
+ switch (TREE_CODE (expr))
{
- high = 0;
- if (prec < HOST_BITS_PER_WIDE_INT)
- low &= ~((HOST_WIDE_INT) (-1) << prec);
+ case INTEGER_CST:
+ ret1 = wi::ctz (expr);
+ return MIN (ret1, prec);
+ case SSA_NAME:
+ ret1 = wi::ctz (get_nonzero_bits (expr));
+ return MIN (ret1, prec);
+ case PLUS_EXPR:
+ case MINUS_EXPR:
+ case BIT_IOR_EXPR:
+ case BIT_XOR_EXPR:
+ case MIN_EXPR:
+ case MAX_EXPR:
+ ret1 = tree_ctz (TREE_OPERAND (expr, 0));
+ if (ret1 == 0)
+ return ret1;
+ ret2 = tree_ctz (TREE_OPERAND (expr, 1));
+ return MIN (ret1, ret2);
+ case POINTER_PLUS_EXPR:
+ ret1 = tree_ctz (TREE_OPERAND (expr, 0));
+ ret2 = tree_ctz (TREE_OPERAND (expr, 1));
+ /* Second operand is sizetype, which could be in theory
+ wider than pointer's precision. Make sure we never
+ return more than prec. */
+ ret2 = MIN (ret2, prec);
+ return MIN (ret1, ret2);
+ case BIT_AND_EXPR:
+ ret1 = tree_ctz (TREE_OPERAND (expr, 0));
+ ret2 = tree_ctz (TREE_OPERAND (expr, 1));
+ return MAX (ret1, ret2);
+ case MULT_EXPR:
+ ret1 = tree_ctz (TREE_OPERAND (expr, 0));
+ ret2 = tree_ctz (TREE_OPERAND (expr, 1));
+ return MIN (ret1 + ret2, prec);
+ case LSHIFT_EXPR:
+ ret1 = tree_ctz (TREE_OPERAND (expr, 0));
+ if (tree_fits_uhwi_p (TREE_OPERAND (expr, 1))
+ && (tree_to_uhwi (TREE_OPERAND (expr, 1)) < prec))
+ {
+ ret2 = tree_to_uhwi (TREE_OPERAND (expr, 1));
+ return MIN (ret1 + ret2, prec);
+ }
+ return ret1;
+ case RSHIFT_EXPR:
+ if (tree_fits_uhwi_p (TREE_OPERAND (expr, 1))
+ && (tree_to_uhwi (TREE_OPERAND (expr, 1)) < prec))
+ {
+ ret1 = tree_ctz (TREE_OPERAND (expr, 0));
+ ret2 = tree_to_uhwi (TREE_OPERAND (expr, 1));
+ if (ret1 > ret2)
+ return ret1 - ret2;
+ }
+ return 0;
+ case TRUNC_DIV_EXPR:
+ case CEIL_DIV_EXPR:
+ case FLOOR_DIV_EXPR:
+ case ROUND_DIV_EXPR:
+ case EXACT_DIV_EXPR:
+ if (TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST
+ && tree_int_cst_sgn (TREE_OPERAND (expr, 1)) == 1)
+ {
+ int l = tree_log2 (TREE_OPERAND (expr, 1));
+ if (l >= 0)
+ {
+ ret1 = tree_ctz (TREE_OPERAND (expr, 0));
+ ret2 = l;
+ if (ret1 > ret2)
+ return ret1 - ret2;
+ }
+ }
+ return 0;
+ CASE_CONVERT:
+ ret1 = tree_ctz (TREE_OPERAND (expr, 0));
+ if (ret1 && ret1 == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (expr, 0))))
+ ret1 = prec;
+ return MIN (ret1, prec);
+ case SAVE_EXPR:
+ return tree_ctz (TREE_OPERAND (expr, 0));
+ case COND_EXPR:
+ ret1 = tree_ctz (TREE_OPERAND (expr, 1));
+ if (ret1 == 0)
+ return 0;
+ ret2 = tree_ctz (TREE_OPERAND (expr, 2));
+ return MIN (ret1, ret2);
+ case COMPOUND_EXPR:
+ return tree_ctz (TREE_OPERAND (expr, 1));
+ case ADDR_EXPR:
+ ret1 = get_pointer_alignment (CONST_CAST_TREE (expr));
+ if (ret1 > BITS_PER_UNIT)
+ {
+ ret1 = ctz_hwi (ret1 / BITS_PER_UNIT);
+ return MIN (ret1, prec);
+ }
+ return 0;
+ default:
+ return 0;
}
-
- return (high != 0 ? HOST_BITS_PER_WIDE_INT + floor_log2 (high)
- : floor_log2 (low));
}
/* Return 1 if EXPR is the real constant zero. Trailing zeroes matter for
{
STRIP_NOPS (expr);
- return ((TREE_CODE (expr) == REAL_CST
- && REAL_VALUES_EQUAL (TREE_REAL_CST (expr), dconst0)
- && !(DECIMAL_FLOAT_MODE_P (TYPE_MODE (TREE_TYPE (expr)))))
- || (TREE_CODE (expr) == COMPLEX_CST
- && real_zerop (TREE_REALPART (expr))
- && real_zerop (TREE_IMAGPART (expr))));
+ switch (TREE_CODE (expr))
+ {
+ case REAL_CST:
+ return REAL_VALUES_EQUAL (TREE_REAL_CST (expr), dconst0)
+ && !(DECIMAL_FLOAT_MODE_P (TYPE_MODE (TREE_TYPE (expr))));
+ case COMPLEX_CST:
+ return real_zerop (TREE_REALPART (expr))
+ && real_zerop (TREE_IMAGPART (expr));
+ case VECTOR_CST:
+ {
+ unsigned i;
+ for (i = 0; i < VECTOR_CST_NELTS (expr); ++i)
+ if (!real_zerop (VECTOR_CST_ELT (expr, i)))
+ return false;
+ return true;
+ }
+ default:
+ return false;
+ }
}
/* Return 1 if EXPR is the real constant one in real or complex form.
{
STRIP_NOPS (expr);
- return ((TREE_CODE (expr) == REAL_CST
- && REAL_VALUES_EQUAL (TREE_REAL_CST (expr), dconst1)
- && !(DECIMAL_FLOAT_MODE_P (TYPE_MODE (TREE_TYPE (expr)))))
- || (TREE_CODE (expr) == COMPLEX_CST
- && real_onep (TREE_REALPART (expr))
- && real_zerop (TREE_IMAGPART (expr))));
-}
-
-/* Return 1 if EXPR is the real constant two. Trailing zeroes matter
- for decimal float constants, so don't return 1 for them. */
-
-int
-real_twop (const_tree expr)
-{
- STRIP_NOPS (expr);
-
- return ((TREE_CODE (expr) == REAL_CST
- && REAL_VALUES_EQUAL (TREE_REAL_CST (expr), dconst2)
- && !(DECIMAL_FLOAT_MODE_P (TYPE_MODE (TREE_TYPE (expr)))))
- || (TREE_CODE (expr) == COMPLEX_CST
- && real_twop (TREE_REALPART (expr))
- && real_zerop (TREE_IMAGPART (expr))));
+ switch (TREE_CODE (expr))
+ {
+ case REAL_CST:
+ return REAL_VALUES_EQUAL (TREE_REAL_CST (expr), dconst1)
+ && !(DECIMAL_FLOAT_MODE_P (TYPE_MODE (TREE_TYPE (expr))));
+ case COMPLEX_CST:
+ return real_onep (TREE_REALPART (expr))
+ && real_zerop (TREE_IMAGPART (expr));
+ case VECTOR_CST:
+ {
+ unsigned i;
+ for (i = 0; i < VECTOR_CST_NELTS (expr); ++i)
+ if (!real_onep (VECTOR_CST_ELT (expr, i)))
+ return false;
+ return true;
+ }
+ default:
+ return false;
+ }
}
/* Return 1 if EXPR is the real constant minus one. Trailing zeroes
{
STRIP_NOPS (expr);
- return ((TREE_CODE (expr) == REAL_CST
- && REAL_VALUES_EQUAL (TREE_REAL_CST (expr), dconstm1)
- && !(DECIMAL_FLOAT_MODE_P (TYPE_MODE (TREE_TYPE (expr)))))
- || (TREE_CODE (expr) == COMPLEX_CST
- && real_minus_onep (TREE_REALPART (expr))
- && real_zerop (TREE_IMAGPART (expr))));
+ switch (TREE_CODE (expr))
+ {
+ case REAL_CST:
+ return REAL_VALUES_EQUAL (TREE_REAL_CST (expr), dconstm1)
+ && !(DECIMAL_FLOAT_MODE_P (TYPE_MODE (TREE_TYPE (expr))));
+ case COMPLEX_CST:
+ return real_minus_onep (TREE_REALPART (expr))
+ && real_zerop (TREE_IMAGPART (expr));
+ case VECTOR_CST:
+ {
+ unsigned i;
+ for (i = 0; i < VECTOR_CST_NELTS (expr); ++i)
+ if (!real_minus_onep (VECTOR_CST_ELT (expr, i)))
+ return false;
+ return true;
+ }
+ default:
+ return false;
+ }
}
/* Nonzero if EXP is a constant or a cast of a constant. */
/* Return true if ELEM is in V. */
bool
-vec_member (const_tree elem, VEC(tree,gc) *v)
+vec_member (const_tree elem, vec<tree, va_gc> *v)
{
unsigned ix;
tree t;
- FOR_EACH_VEC_ELT (tree, v, ix, t)
+ FOR_EACH_VEC_SAFE_ELT (v, ix, t)
if (elem == t)
return true;
return false;
return len;
}
-/* Returns the number of FIELD_DECLs in TYPE. */
-
-int
-fields_length (const_tree type)
-{
- tree t = TYPE_FIELDS (type);
- int count = 0;
-
- for (; t; t = DECL_CHAIN (t))
- if (TREE_CODE (t) == FIELD_DECL)
- ++count;
-
- return count;
-}
-
/* Returns the first FIELD_DECL in the TYPE_FIELDS of the RECORD_TYPE or
UNION_TYPE TYPE, or NULL_TREE if none. */
/* Build a chain of TREE_LIST nodes from a vector. */
tree
-build_tree_list_vec_stat (const VEC(tree,gc) *vec MEM_STAT_DECL)
+build_tree_list_vec_stat (const vec<tree, va_gc> *vec MEM_STAT_DECL)
{
tree ret = NULL_TREE;
tree *pp = &ret;
unsigned int i;
tree t;
- FOR_EACH_VEC_ELT (tree, vec, i, t)
+ FOR_EACH_VEC_SAFE_ELT (vec, i, t)
{
*pp = build_tree_list_stat (NULL, t PASS_MEM_STAT);
pp = &TREE_CHAIN (*pp);
{
tree node;
- node = ggc_alloc_zone_tree_node_stat (&tree_zone, sizeof (struct tree_list)
- PASS_MEM_STAT);
+ node = ggc_alloc_tree_node_stat (sizeof (struct tree_list) PASS_MEM_STAT);
memset (node, 0, sizeof (struct tree_common));
record_node_allocation_statistics (TREE_LIST, sizeof (struct tree_list));
/* Return the values of the elements of a CONSTRUCTOR as a vector of
trees. */
-VEC(tree,gc) *
+vec<tree, va_gc> *
ctor_to_vec (tree ctor)
{
- VEC(tree, gc) *vec = VEC_alloc (tree, gc, CONSTRUCTOR_NELTS (ctor));
+ vec<tree, va_gc> *vec;
+ vec_alloc (vec, CONSTRUCTOR_NELTS (ctor));
unsigned int ix;
tree val;
FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (ctor), ix, val)
- VEC_quick_push (tree, vec, val);
+ vec->quick_push (val);
return vec;
}
type = TYPE_MAIN_VARIANT (type);
t = TYPE_SIZE_UNIT (type);
- if (t == 0
- || TREE_CODE (t) != INTEGER_CST
- || TREE_INT_CST_HIGH (t) != 0
- /* If the result would appear negative, it's too big to represent. */
- || (HOST_WIDE_INT) TREE_INT_CST_LOW (t) < 0)
- return -1;
- return TREE_INT_CST_LOW (t);
+ if (t && cst_fits_uhwi_p (t))
+ return TREE_INT_CST_LOW (t);
+ else
+ return -1;
}
/* Return the maximum size of TYPE (in bytes) as a wide integer
{
size_tree = TYPE_ARRAY_MAX_SIZE (type);
- if (size_tree && host_integerp (size_tree, 1))
- size = tree_low_cst (size_tree, 1);
+ if (size_tree && tree_fits_uhwi_p (size_tree))
+ size = tree_to_uhwi (size_tree);
}
/* If we still haven't been able to get a size, see if the language
{
size_tree = lang_hooks.types.max_size (type);
- if (size_tree && host_integerp (size_tree, 1))
- size = tree_low_cst (size_tree, 1);
+ if (size_tree && tree_fits_uhwi_p (size_tree))
+ size = tree_to_uhwi (size_tree);
}
return size;
}
-
-/* Returns a tree for the size of EXP in bytes. */
-
-tree
-tree_expr_size (const_tree exp)
-{
- if (DECL_P (exp)
- && DECL_SIZE_UNIT (exp) != 0)
- return DECL_SIZE_UNIT (exp);
- else
- return size_in_bytes (TREE_TYPE (exp));
-}
\f
/* Return the bit position of FIELD, in bits from the start of the record.
This is a tree of type bitsizetype. */
HOST_WIDE_INT
int_bit_position (const_tree field)
{
- return tree_low_cst (bit_position (field), 0);
+ return tree_to_shwi (bit_position (field));
}
\f
/* Return the byte position of FIELD, in bytes from the start of the record.
HOST_WIDE_INT
int_byte_position (const_tree field)
{
- return tree_low_cst (byte_position (field), 0);
+ return tree_to_shwi (byte_position (field));
}
\f
/* Return the strictest alignment, in bits, that T is known to have. */
return t;
}
-/* Look inside EXPR and into any simple arithmetic operations. Return
- the innermost non-arithmetic node. */
+/* Look inside EXPR into any simple arithmetic operations. Return the
+ outermost non-arithmetic or non-invariant node. */
tree
skip_simple_arithmetic (tree expr)
{
- tree inner;
-
/* We don't care about whether this can be used as an lvalue in this
context. */
while (TREE_CODE (expr) == NON_LVALUE_EXPR)
a constant, it will be more efficient to not make another SAVE_EXPR since
it will allow better simplification and GCSE will be able to merge the
computations if they actually occur. */
- inner = expr;
- while (1)
+ while (true)
{
- if (UNARY_CLASS_P (inner))
- inner = TREE_OPERAND (inner, 0);
- else if (BINARY_CLASS_P (inner))
+ if (UNARY_CLASS_P (expr))
+ expr = TREE_OPERAND (expr, 0);
+ else if (BINARY_CLASS_P (expr))
{
- if (tree_invariant_p (TREE_OPERAND (inner, 1)))
- inner = TREE_OPERAND (inner, 0);
- else if (tree_invariant_p (TREE_OPERAND (inner, 0)))
- inner = TREE_OPERAND (inner, 1);
+ if (tree_invariant_p (TREE_OPERAND (expr, 1)))
+ expr = TREE_OPERAND (expr, 0);
+ else if (tree_invariant_p (TREE_OPERAND (expr, 0)))
+ expr = TREE_OPERAND (expr, 1);
else
break;
}
break;
}
- return inner;
+ return expr;
}
+/* Look inside EXPR into simple arithmetic operations involving constants.
+ Return the outermost non-arithmetic or non-constant node. */
+
+tree
+skip_simple_constant_arithmetic (tree expr)
+{
+ while (TREE_CODE (expr) == NON_LVALUE_EXPR)
+ expr = TREE_OPERAND (expr, 0);
+
+ while (true)
+ {
+ if (UNARY_CLASS_P (expr))
+ expr = TREE_OPERAND (expr, 0);
+ else if (BINARY_CLASS_P (expr))
+ {
+ if (TREE_CONSTANT (TREE_OPERAND (expr, 1)))
+ expr = TREE_OPERAND (expr, 0);
+ else if (TREE_CONSTANT (TREE_OPERAND (expr, 0)))
+ expr = TREE_OPERAND (expr, 1);
+ else
+ break;
+ }
+ else
+ break;
+ }
+
+ return expr;
+}
/* Return which tree structure is used by T. */
switch (TREE_CODE (type))
{
case VOID_TYPE:
+ case POINTER_BOUNDS_TYPE:
case COMPLEX_TYPE:
case ENUMERAL_TYPE:
case BOOLEAN_TYPE:
/* Push tree EXP onto vector QUEUE if it is not already present. */
static void
-push_without_duplicates (tree exp, VEC (tree, heap) **queue)
+push_without_duplicates (tree exp, vec<tree> *queue)
{
unsigned int i;
tree iter;
- FOR_EACH_VEC_ELT (tree, *queue, i, iter)
+ FOR_EACH_VEC_ELT (*queue, i, iter)
if (simple_cst_equal (iter, exp) == 1)
break;
if (!iter)
- VEC_safe_push (tree, heap, *queue, exp);
+ queue->safe_push (exp);
}
/* Given a tree EXP, find all occurrences of references to fields
argument list. */
void
-find_placeholder_in_expr (tree exp, VEC (tree, heap) **refs)
+find_placeholder_in_expr (tree exp, vec<tree> *refs)
{
enum tree_code code = TREE_CODE (exp);
tree inner;
return new_tree;
}
\f
+
+/* Subroutine of stabilize_reference; this is called for subtrees of
+ references. Any expression with side-effects must be put in a SAVE_EXPR
+ to ensure that it is only evaluated once.
+
+ We don't put SAVE_EXPR nodes around everything, because assigning very
+ simple expressions to temporaries causes us to miss good opportunities
+ for optimizations. Among other things, the opportunity to fold in the
+ addition of a constant into an addressing mode often gets lost, e.g.
+ "y[i+1] += x;". In general, we take the approach that we should not make
+ an assignment unless we are forced into it - i.e., that any non-side effect
+ operator should be allowed, and that cse should take care of coalescing
+ multiple utterances of the same expression should that prove fruitful. */
+
+static tree
+stabilize_reference_1 (tree e)
+{
+ tree result;
+ enum tree_code code = TREE_CODE (e);
+
+ /* We cannot ignore const expressions because it might be a reference
+ to a const array but whose index contains side-effects. But we can
+ ignore things that are actual constant or that already have been
+ handled by this function. */
+
+ if (tree_invariant_p (e))
+ return e;
+
+ switch (TREE_CODE_CLASS (code))
+ {
+ case tcc_exceptional:
+ case tcc_type:
+ case tcc_declaration:
+ case tcc_comparison:
+ case tcc_statement:
+ case tcc_expression:
+ case tcc_reference:
+ case tcc_vl_exp:
+ /* If the expression has side-effects, then encase it in a SAVE_EXPR
+ so that it will only be evaluated once. */
+ /* The reference (r) and comparison (<) classes could be handled as
+ below, but it is generally faster to only evaluate them once. */
+ if (TREE_SIDE_EFFECTS (e))
+ return save_expr (e);
+ return e;
+
+ case tcc_constant:
+ /* Constants need no processing. In fact, we should never reach
+ here. */
+ return e;
+
+ case tcc_binary:
+ /* Division is slow and tends to be compiled with jumps,
+ especially the division by powers of 2 that is often
+ found inside of an array reference. So do it just once. */
+ if (code == TRUNC_DIV_EXPR || code == TRUNC_MOD_EXPR
+ || code == FLOOR_DIV_EXPR || code == FLOOR_MOD_EXPR
+ || code == CEIL_DIV_EXPR || code == CEIL_MOD_EXPR
+ || code == ROUND_DIV_EXPR || code == ROUND_MOD_EXPR)
+ return save_expr (e);
+ /* Recursively stabilize each operand. */
+ result = build_nt (code, stabilize_reference_1 (TREE_OPERAND (e, 0)),
+ stabilize_reference_1 (TREE_OPERAND (e, 1)));
+ break;
+
+ case tcc_unary:
+ /* Recursively stabilize each operand. */
+ result = build_nt (code, stabilize_reference_1 (TREE_OPERAND (e, 0)));
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ TREE_TYPE (result) = TREE_TYPE (e);
+ TREE_READONLY (result) = TREE_READONLY (e);
+ TREE_SIDE_EFFECTS (result) = TREE_SIDE_EFFECTS (e);
+ TREE_THIS_VOLATILE (result) = TREE_THIS_VOLATILE (e);
+
+ return result;
+}
+
/* Stabilize a reference so that we can use it any number of times
without causing its operands to be evaluated more than once.
Returns the stabilized reference. This works by means of save_expr,
return result;
}
-
-/* Subroutine of stabilize_reference; this is called for subtrees of
- references. Any expression with side-effects must be put in a SAVE_EXPR
- to ensure that it is only evaluated once.
-
- We don't put SAVE_EXPR nodes around everything, because assigning very
- simple expressions to temporaries causes us to miss good opportunities
- for optimizations. Among other things, the opportunity to fold in the
- addition of a constant into an addressing mode often gets lost, e.g.
- "y[i+1] += x;". In general, we take the approach that we should not make
- an assignment unless we are forced into it - i.e., that any non-side effect
- operator should be allowed, and that cse should take care of coalescing
- multiple utterances of the same expression should that prove fruitful. */
-
-tree
-stabilize_reference_1 (tree e)
-{
- tree result;
- enum tree_code code = TREE_CODE (e);
-
- /* We cannot ignore const expressions because it might be a reference
- to a const array but whose index contains side-effects. But we can
- ignore things that are actual constant or that already have been
- handled by this function. */
-
- if (tree_invariant_p (e))
- return e;
-
- switch (TREE_CODE_CLASS (code))
- {
- case tcc_exceptional:
- case tcc_type:
- case tcc_declaration:
- case tcc_comparison:
- case tcc_statement:
- case tcc_expression:
- case tcc_reference:
- case tcc_vl_exp:
- /* If the expression has side-effects, then encase it in a SAVE_EXPR
- so that it will only be evaluated once. */
- /* The reference (r) and comparison (<) classes could be handled as
- below, but it is generally faster to only evaluate them once. */
- if (TREE_SIDE_EFFECTS (e))
- return save_expr (e);
- return e;
-
- case tcc_constant:
- /* Constants need no processing. In fact, we should never reach
- here. */
- return e;
-
- case tcc_binary:
- /* Division is slow and tends to be compiled with jumps,
- especially the division by powers of 2 that is often
- found inside of an array reference. So do it just once. */
- if (code == TRUNC_DIV_EXPR || code == TRUNC_MOD_EXPR
- || code == FLOOR_DIV_EXPR || code == FLOOR_MOD_EXPR
- || code == CEIL_DIV_EXPR || code == CEIL_MOD_EXPR
- || code == ROUND_DIV_EXPR || code == ROUND_MOD_EXPR)
- return save_expr (e);
- /* Recursively stabilize each operand. */
- result = build_nt (code, stabilize_reference_1 (TREE_OPERAND (e, 0)),
- stabilize_reference_1 (TREE_OPERAND (e, 1)));
- break;
-
- case tcc_unary:
- /* Recursively stabilize each operand. */
- result = build_nt (code, stabilize_reference_1 (TREE_OPERAND (e, 0)));
- break;
-
- default:
- gcc_unreachable ();
- }
-
- TREE_TYPE (result) = TREE_TYPE (e);
- TREE_READONLY (result) = TREE_READONLY (e);
- TREE_SIDE_EFFECTS (result) = TREE_SIDE_EFFECTS (e);
- TREE_THIS_VOLATILE (result) = TREE_THIS_VOLATILE (e);
-
- return result;
-}
\f
/* Low-level constructors for expressions. */
gcc_assert (TREE_CODE_LENGTH (code) == 1);
- t = ggc_alloc_zone_tree_node_stat (&tree_zone, length PASS_MEM_STAT);
+ t = ggc_alloc_tree_node_stat (length PASS_MEM_STAT);
memset (t, 0, sizeof (struct tree_common));
read_only = 1;
side_effects = TREE_SIDE_EFFECTS (t);
- PROCESS_ARG(0);
- PROCESS_ARG(1);
+ PROCESS_ARG (0);
+ PROCESS_ARG (1);
TREE_READONLY (t) = read_only;
TREE_CONSTANT (t) = constant;
else
side_effects = TREE_SIDE_EFFECTS (t);
- PROCESS_ARG(0);
- PROCESS_ARG(1);
- PROCESS_ARG(2);
+ PROCESS_ARG (0);
+ PROCESS_ARG (1);
+ PROCESS_ARG (2);
if (code == COND_EXPR)
TREE_READONLY (t) = read_only;
side_effects = TREE_SIDE_EFFECTS (t);
- PROCESS_ARG(0);
- PROCESS_ARG(1);
- PROCESS_ARG(2);
- PROCESS_ARG(3);
+ PROCESS_ARG (0);
+ PROCESS_ARG (1);
+ PROCESS_ARG (2);
+ PROCESS_ARG (3);
TREE_SIDE_EFFECTS (t) = side_effects;
TREE_THIS_VOLATILE (t)
side_effects = TREE_SIDE_EFFECTS (t);
- PROCESS_ARG(0);
- PROCESS_ARG(1);
- PROCESS_ARG(2);
- PROCESS_ARG(3);
- PROCESS_ARG(4);
+ PROCESS_ARG (0);
+ PROCESS_ARG (1);
+ PROCESS_ARG (2);
+ PROCESS_ARG (3);
+ PROCESS_ARG (4);
TREE_SIDE_EFFECTS (t) = side_effects;
TREE_THIS_VOLATILE (t)
/* Return the constant offset of a MEM_REF or TARGET_MEM_REF tree T. */
-double_int
+offset_int
mem_ref_offset (const_tree t)
{
- tree toff = TREE_OPERAND (t, 1);
- return tree_to_double_int (toff).sext (TYPE_PRECISION (TREE_TYPE (toff)));
-}
-
-/* Return the pointer-type relevant for TBAA purposes from the
- gimple memory reference tree T. This is the type to be used for
- the offset operand of MEM_REF or TARGET_MEM_REF replacements of T. */
-
-tree
-reference_alias_ptr_type (const_tree t)
-{
- const_tree base = t;
- while (handled_component_p (base))
- base = TREE_OPERAND (base, 0);
- if (TREE_CODE (base) == MEM_REF)
- return TREE_TYPE (TREE_OPERAND (base, 1));
- else if (TREE_CODE (base) == TARGET_MEM_REF)
- return TREE_TYPE (TMR_OFFSET (base));
- else
- return build_pointer_type (TYPE_MAIN_VARIANT (TREE_TYPE (base)));
+ return offset_int::from (TREE_OPERAND (t, 1), SIGNED);
}
/* Return an invariant ADDR_EXPR of type TYPE taking the address of BASE
}
/* Similar to build_nt, but for creating a CALL_EXPR object with a
- tree VEC. */
+ tree vec. */
tree
-build_nt_call_vec (tree fn, VEC(tree,gc) *args)
+build_nt_call_vec (tree fn, vec<tree, va_gc> *args)
{
tree ret, t;
unsigned int ix;
- ret = build_vl_exp (CALL_EXPR, VEC_length (tree, args) + 3);
+ ret = build_vl_exp (CALL_EXPR, vec_safe_length (args) + 3);
CALL_EXPR_FN (ret) = fn;
CALL_EXPR_STATIC_CHAIN (ret) = NULL_TREE;
- FOR_EACH_VEC_ELT (tree, args, ix, t)
+ FOR_EACH_VEC_SAFE_ELT (args, ix, t)
CALL_EXPR_ARG (ret, ix) = t;
return ret;
}
return decl;
}
-VEC(tree,gc) *all_translation_units;
+vec<tree, va_gc> *all_translation_units;
/* Builds a new translation-unit decl with name NAME, queues it in the
global list of translation-unit decls and returns it. */
tree tu = build_decl (UNKNOWN_LOCATION, TRANSLATION_UNIT_DECL,
name, NULL_TREE);
TRANSLATION_UNIT_LANGUAGE (tu) = lang_hooks.name;
- VEC_safe_push (tree, gc, all_translation_units, tu);
+ vec_safe_push (all_translation_units, tu);
return tu;
}
{
hashval_t hashcode = 0;
tree ntype;
+ int i;
+ tree t;
enum tree_code code = TREE_CODE (ttype);
/* Building a distinct copy of a tagged type is inappropriate; it
hashcode);
break;
case INTEGER_TYPE:
- hashcode = iterative_hash_object
- (TREE_INT_CST_LOW (TYPE_MAX_VALUE (ntype)), hashcode);
- hashcode = iterative_hash_object
- (TREE_INT_CST_HIGH (TYPE_MAX_VALUE (ntype)), hashcode);
+ t = TYPE_MAX_VALUE (ntype);
+ for (i = 0; i < TREE_INT_CST_NUNITS (t); i++)
+ hashcode = iterative_hash_object (TREE_INT_CST_ELT (t, i), hashcode);
break;
case REAL_TYPE:
case FIXED_POINT_TYPE:
break;
}
- ntype = type_hash_canon (hashcode, ntype);
+ ntype = type_hash_canon (hashcode, ntype);
+
+ /* If the target-dependent attributes make NTYPE different from
+ its canonical type, we will need to use structural equality
+ checks for this type. */
+ if (TYPE_STRUCTURAL_EQUALITY_P (ttype)
+ || !comp_type_attributes (ntype, ttype))
+ SET_TYPE_STRUCTURAL_EQUALITY (ntype);
+ else if (TYPE_CANONICAL (ntype) == ntype)
+ TYPE_CANONICAL (ntype) = TYPE_CANONICAL (ttype);
+
+ ttype = build_qualified_type (ntype, quals);
+ }
+ else if (TYPE_QUALS (ttype) != quals)
+ ttype = build_qualified_type (ttype, quals);
+
+ return ttype;
+}
+
+/* Check if "omp declare simd" attribute arguments, CLAUSES1 and CLAUSES2, are
+ the same. */
+
+static bool
+omp_declare_simd_clauses_equal (tree clauses1, tree clauses2)
+{
+ tree cl1, cl2;
+ for (cl1 = clauses1, cl2 = clauses2;
+ cl1 && cl2;
+ cl1 = OMP_CLAUSE_CHAIN (cl1), cl2 = OMP_CLAUSE_CHAIN (cl2))
+ {
+ if (OMP_CLAUSE_CODE (cl1) != OMP_CLAUSE_CODE (cl2))
+ return false;
+ if (OMP_CLAUSE_CODE (cl1) != OMP_CLAUSE_SIMDLEN)
+ {
+ if (simple_cst_equal (OMP_CLAUSE_DECL (cl1),
+ OMP_CLAUSE_DECL (cl2)) != 1)
+ return false;
+ }
+ switch (OMP_CLAUSE_CODE (cl1))
+ {
+ case OMP_CLAUSE_ALIGNED:
+ if (simple_cst_equal (OMP_CLAUSE_ALIGNED_ALIGNMENT (cl1),
+ OMP_CLAUSE_ALIGNED_ALIGNMENT (cl2)) != 1)
+ return false;
+ break;
+ case OMP_CLAUSE_LINEAR:
+ if (simple_cst_equal (OMP_CLAUSE_LINEAR_STEP (cl1),
+ OMP_CLAUSE_LINEAR_STEP (cl2)) != 1)
+ return false;
+ break;
+ case OMP_CLAUSE_SIMDLEN:
+ if (simple_cst_equal (OMP_CLAUSE_SIMDLEN_EXPR (cl1),
+ OMP_CLAUSE_SIMDLEN_EXPR (cl2)) != 1)
+ return false;
+ default:
+ break;
+ }
+ }
+ return true;
+}
+
+/* Compare two constructor-element-type constants. Return 1 if the lists
+ are known to be equal; otherwise return 0. */
- /* If the target-dependent attributes make NTYPE different from
- its canonical type, we will need to use structural equality
- checks for this type. */
- if (TYPE_STRUCTURAL_EQUALITY_P (ttype)
- || !comp_type_attributes (ntype, ttype))
- SET_TYPE_STRUCTURAL_EQUALITY (ntype);
- else if (TYPE_CANONICAL (ntype) == ntype)
- TYPE_CANONICAL (ntype) = TYPE_CANONICAL (ttype);
+static bool
+simple_cst_list_equal (const_tree l1, const_tree l2)
+{
+ while (l1 != NULL_TREE && l2 != NULL_TREE)
+ {
+ if (simple_cst_equal (TREE_VALUE (l1), TREE_VALUE (l2)) != 1)
+ return false;
- ttype = build_qualified_type (ntype, quals);
+ l1 = TREE_CHAIN (l1);
+ l2 = TREE_CHAIN (l2);
}
- else if (TYPE_QUALS (ttype) != quals)
- ttype = build_qualified_type (ttype, quals);
- return ttype;
+ return l1 == l2;
}
/* Compare two attributes for their value identity. Return true if the
return (simple_cst_list_equal (TREE_VALUE (attr1),
TREE_VALUE (attr2)) == 1);
+ if ((flag_openmp || flag_openmp_simd)
+ && TREE_VALUE (attr1) && TREE_VALUE (attr2)
+ && TREE_CODE (TREE_VALUE (attr1)) == OMP_CLAUSE
+ && TREE_CODE (TREE_VALUE (attr2)) == OMP_CLAUSE)
+ return omp_declare_simd_clauses_equal (TREE_VALUE (attr1),
+ TREE_VALUE (attr2));
+
return (simple_cst_equal (TREE_VALUE (attr1), TREE_VALUE (attr2)) == 1);
}
BINFO_INHERITANCE_CHAIN (binfo) = NULL_TREE;
BINFO_SUBVTT_INDEX (binfo) = NULL_TREE;
- FOR_EACH_VEC_ELT (tree, BINFO_BASE_BINFOS (binfo), i, t)
+ FOR_EACH_VEC_ELT (*BINFO_BASE_BINFOS (binfo), i, t)
free_lang_data_in_binfo (t);
}
if (TREE_CODE (decl) == FUNCTION_DECL)
{
+ struct cgraph_node *node;
+ if (!(node = cgraph_get_node (decl))
+ || (!node->definition && !node->clones))
+ {
+ if (node)
+ cgraph_release_function_body (node);
+ else
+ {
+ release_function_body (decl);
+ DECL_ARGUMENTS (decl) = NULL;
+ DECL_RESULT (decl) = NULL;
+ DECL_INITIAL (decl) = error_mark_node;
+ }
+ }
if (gimple_has_body_p (decl))
{
tree t;
DECL_VINDEX referring to itself into a vtable slot number as it
should. Happens with functions that are copied and then forgotten
about. Just clear it, it won't matter anymore. */
- if (DECL_VINDEX (decl) && !host_integerp (DECL_VINDEX (decl), 0))
+ if (DECL_VINDEX (decl) && !tree_fits_shwi_p (DECL_VINDEX (decl)))
DECL_VINDEX (decl) = NULL_TREE;
}
else if (TREE_CODE (decl) == VAR_DECL)
struct free_lang_data_d
{
/* Worklist to avoid excessive recursion. */
- VEC(tree,heap) *worklist;
+ vec<tree> worklist;
/* Set of traversed objects. Used to avoid duplicate visits. */
struct pointer_set_t *pset;
/* Array of symbols to process with free_lang_data_in_decl. */
- VEC(tree,heap) *decls;
+ vec<tree> decls;
/* Array of types to process with free_lang_data_in_type. */
- VEC(tree,heap) *types;
+ vec<tree> types;
};
{
if (DECL_P (t))
{
- VEC_safe_push (tree, heap, fld->decls, t);
+ fld->decls.safe_push (t);
if (debug_info_level > DINFO_LEVEL_TERSE)
save_debug_info_for_decl (t);
}
else if (TYPE_P (t))
{
- VEC_safe_push (tree, heap, fld->types, t);
+ fld->types.safe_push (t);
if (debug_info_level > DINFO_LEVEL_TERSE)
save_debug_info_for_type (t);
}
fld_worklist_push (tree t, struct free_lang_data_d *fld)
{
if (t && !is_lang_specific (t) && !pointer_set_contains (fld->pset, t))
- VEC_safe_push (tree, heap, fld->worklist, (t));
+ fld->worklist.safe_push ((t));
}
{
unsigned i;
tree tem;
- for (i = 0; VEC_iterate (tree, BINFO_BASE_BINFOS (TYPE_BINFO (t)),
- i, tem); ++i)
+ FOR_EACH_VEC_ELT (*BINFO_BASE_BINFOS (TYPE_BINFO (t)), i, tem)
fld_worklist_push (TREE_TYPE (tem), fld);
tem = BINFO_VIRTUALS (TYPE_BINFO (t));
if (tem
{
if (!pointer_set_contains (fld->pset, t))
walk_tree (&t, find_decls_types_r, fld, fld->pset);
- if (VEC_empty (tree, fld->worklist))
+ if (fld->worklist.is_empty ())
break;
- t = VEC_pop (tree, fld->worklist);
+ t = fld->worklist.pop ();
}
}
unsigned ix;
tree t;
- find_decls_types (n->symbol.decl, fld);
+ find_decls_types (n->decl, fld);
- if (!gimple_has_body_p (n->symbol.decl))
+ if (!gimple_has_body_p (n->decl))
return;
gcc_assert (current_function_decl == NULL_TREE && cfun == NULL);
- fn = DECL_STRUCT_FUNCTION (n->symbol.decl);
+ fn = DECL_STRUCT_FUNCTION (n->decl);
/* Traverse locals. */
FOR_EACH_LOCAL_DECL (fn, ix, t)
static void
find_decls_types_in_var (struct varpool_node *v, struct free_lang_data_d *fld)
{
- find_decls_types (v->symbol.decl, fld);
+ find_decls_types (v->decl, fld);
}
/* If T needs an assembler name, have one created for it. */
/* Initialize sets and arrays to store referenced decls and types. */
fld.pset = pointer_set_create ();
- fld.worklist = NULL;
- fld.decls = VEC_alloc (tree, heap, 100);
- fld.types = VEC_alloc (tree, heap, 100);
+ fld.worklist.create (0);
+ fld.decls.create (100);
+ fld.types.create (100);
/* Find decls and types in the body of every function in the callgraph. */
FOR_EACH_FUNCTION (n)
find_decls_types_in_node (n, &fld);
- FOR_EACH_VEC_ELT (alias_pair, alias_pairs, i, p)
+ FOR_EACH_VEC_SAFE_ELT (alias_pairs, i, p)
find_decls_types (p->decl, &fld);
/* Find decls and types in every varpool symbol. */
/* Set the assembler name on every decl found. We need to do this
now because free_lang_data_in_decl will invalidate data needed
for mangling. This breaks mangling on interdependent decls. */
- FOR_EACH_VEC_ELT (tree, fld.decls, i, t)
+ FOR_EACH_VEC_ELT (fld.decls, i, t)
assign_assembler_name_if_neeeded (t);
/* Traverse every decl found freeing its language data. */
- FOR_EACH_VEC_ELT (tree, fld.decls, i, t)
+ FOR_EACH_VEC_ELT (fld.decls, i, t)
free_lang_data_in_decl (t);
/* Traverse every type found freeing its language data. */
- FOR_EACH_VEC_ELT (tree, fld.types, i, t)
+ FOR_EACH_VEC_ELT (fld.types, i, t)
free_lang_data_in_type (t);
pointer_set_destroy (fld.pset);
- VEC_free (tree, heap, fld.worklist);
- VEC_free (tree, heap, fld.decls);
- VEC_free (tree, heap, fld.types);
+ fld.worklist.release ();
+ fld.decls.release ();
+ fld.types.release ();
}
}
-struct simple_ipa_opt_pass pass_ipa_free_lang_data =
-{
- {
- SIMPLE_IPA_PASS,
- "*free_lang_data", /* name */
- NULL, /* gate */
- free_lang_data, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_IPA_FREE_LANG_DATA, /* tv_id */
- 0, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- TODO_ggc_collect /* todo_flags_finish */
- }
+namespace {
+
+const pass_data pass_data_ipa_free_lang_data =
+{
+ SIMPLE_IPA_PASS, /* type */
+ "*free_lang_data", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ false, /* has_gate */
+ true, /* has_execute */
+ TV_IPA_FREE_LANG_DATA, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0, /* todo_flags_finish */
};
+class pass_ipa_free_lang_data : public simple_ipa_opt_pass
+{
+public:
+ pass_ipa_free_lang_data (gcc::context *ctxt)
+ : simple_ipa_opt_pass (pass_data_ipa_free_lang_data, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ unsigned int execute () { return free_lang_data (); }
+
+}; // class pass_ipa_free_lang_data
+
+} // anon namespace
+
+simple_ipa_opt_pass *
+make_pass_ipa_free_lang_data (gcc::context *ctxt)
+{
+ return new pass_ipa_free_lang_data (ctxt);
+}
+
/* The backbone of is_attribute_p(). ATTR_LEN is the string length of
ATTR_NAME. Also used internally by remove_attribute(). */
bool
TYPE_READONLY (type) = (type_quals & TYPE_QUAL_CONST) != 0;
TYPE_VOLATILE (type) = (type_quals & TYPE_QUAL_VOLATILE) != 0;
TYPE_RESTRICT (type) = (type_quals & TYPE_QUAL_RESTRICT) != 0;
+ TYPE_ATOMIC (type) = (type_quals & TYPE_QUAL_ATOMIC) != 0;
TYPE_ADDR_SPACE (type) = DECODE_QUAL_ADDR_SPACE (type_quals);
}
TYPE_ATTRIBUTES (base)));
}
+/* This function checks to see if TYPE matches the size one of the built-in
+ atomic types, and returns that core atomic type. */
+
+static tree
+find_atomic_core_type (tree type)
+{
+ tree base_atomic_type;
+
+ /* Only handle complete types. */
+ if (TYPE_SIZE (type) == NULL_TREE)
+ return NULL_TREE;
+
+ HOST_WIDE_INT type_size = tree_to_uhwi (TYPE_SIZE (type));
+ switch (type_size)
+ {
+ case 8:
+ base_atomic_type = atomicQI_type_node;
+ break;
+
+ case 16:
+ base_atomic_type = atomicHI_type_node;
+ break;
+
+ case 32:
+ base_atomic_type = atomicSI_type_node;
+ break;
+
+ case 64:
+ base_atomic_type = atomicDI_type_node;
+ break;
+
+ case 128:
+ base_atomic_type = atomicTI_type_node;
+ break;
+
+ default:
+ base_atomic_type = NULL_TREE;
+ }
+
+ return base_atomic_type;
+}
+
/* Return a version of the TYPE, qualified as indicated by the
TYPE_QUALS, if one exists. If no qualified version exists yet,
return NULL_TREE. */
t = build_variant_type_copy (type);
set_type_quals (t, type_quals);
+ if (((type_quals & TYPE_QUAL_ATOMIC) == TYPE_QUAL_ATOMIC))
+ {
+ /* See if this object can map to a basic atomic type. */
+ tree atomic_type = find_atomic_core_type (type);
+ if (atomic_type)
+ {
+ /* Ensure the alignment of this type is compatible with
+ the required alignment of the atomic type. */
+ if (TYPE_ALIGN (atomic_type) > TYPE_ALIGN (t))
+ TYPE_ALIGN (t) = TYPE_ALIGN (atomic_type);
+ }
+ }
+
if (TYPE_STRUCTURAL_EQUALITY_P (type))
/* Propagate structural equality. */
SET_TYPE_STRUCTURAL_EQUALITY (t);
/* Lookup a vector of debug arguments for FROM, and return it if we
find one. */
-VEC(tree, gc) **
+vec<tree, va_gc> **
decl_debug_args_lookup (tree from)
{
struct tree_vec_map *h, in;
/* Insert a mapping FROM->empty vector of debug arguments in the value
expression hashtable. */
-VEC(tree, gc) **
+vec<tree, va_gc> **
decl_debug_args_insert (tree from)
{
struct tree_vec_map *h;
case INTEGER_TYPE:
case REAL_TYPE:
case BOOLEAN_TYPE:
+ if (TYPE_PRECISION (a->type) != TYPE_PRECISION (b->type))
+ return false;
return ((TYPE_MAX_VALUE (a->type) == TYPE_MAX_VALUE (b->type)
|| tree_int_cst_equal (TYPE_MAX_VALUE (a->type),
TYPE_MAX_VALUE (b->type)))
/* Look in the type hash table for a type isomorphic to TYPE.
If one is found, return it. Otherwise return 0. */
-tree
+static tree
type_hash_lookup (hashval_t hashcode, tree type)
{
struct type_hash *h, in;
/* Add an entry to the type-hash-table
for a type TYPE whose hash code is HASHCODE. */
-void
+static void
type_hash_add (hashval_t hashcode, tree type)
{
struct type_hash *h;
/* This CONST_CAST is okay because lookup_attribute does not
modify its argument and the return value is assigned to a
const_tree. */
- for (attr = lookup_ident_attribute (get_attribute_name (t2), CONST_CAST_TREE(l1));
+ for (attr = lookup_ident_attribute (get_attribute_name (t2),
+ CONST_CAST_TREE (l1));
attr != NULL_TREE && !attribute_value_equal (t2, attr);
- attr = lookup_ident_attribute (get_attribute_name (t2), TREE_CHAIN (attr)))
+ attr = lookup_ident_attribute (get_attribute_name (t2),
+ TREE_CHAIN (attr)))
;
if (attr == NULL_TREE)
if (TREE_CODE (t1) == INTEGER_CST
&& TREE_CODE (t2) == INTEGER_CST
- && TREE_INT_CST_LOW (t1) == TREE_INT_CST_LOW (t2)
- && TREE_INT_CST_HIGH (t1) == TREE_INT_CST_HIGH (t2))
+ && wi::to_widest (t1) == wi::to_widest (t2))
return 1;
return 0;
int
tree_int_cst_lt (const_tree t1, const_tree t2)
{
- if (t1 == t2)
- return 0;
-
- if (TYPE_UNSIGNED (TREE_TYPE (t1)) != TYPE_UNSIGNED (TREE_TYPE (t2)))
- {
- int t1_sgn = tree_int_cst_sgn (t1);
- int t2_sgn = tree_int_cst_sgn (t2);
-
- if (t1_sgn < t2_sgn)
- return 1;
- else if (t1_sgn > t2_sgn)
- return 0;
- /* Otherwise, both are non-negative, so we compare them as
- unsigned just in case one of them would overflow a signed
- type. */
- }
- else if (!TYPE_UNSIGNED (TREE_TYPE (t1)))
- return INT_CST_LT (t1, t2);
-
- return INT_CST_LT_UNSIGNED (t1, t2);
+ return INT_CST_LT (t1, t2);
}
/* Returns -1 if T1 < T2, 0 if T1 == T2, and 1 if T1 > T2. */
int
tree_int_cst_compare (const_tree t1, const_tree t2)
{
- if (tree_int_cst_lt (t1, t2))
- return -1;
- else if (tree_int_cst_lt (t2, t1))
- return 1;
- else
- return 0;
+ return wi::cmps (wi::to_widest (t1), wi::to_widest (t2));
}
-/* Return 1 if T is an INTEGER_CST that can be manipulated efficiently on
- the host. If POS is zero, the value can be represented in a single
- HOST_WIDE_INT. If POS is nonzero, the value must be non-negative and can
- be represented in a single unsigned HOST_WIDE_INT. */
+/* Return true if T is an INTEGER_CST whose numerical value (extended
+ according to TYPE_UNSIGNED) fits in a signed HOST_WIDE_INT. */
-int
-host_integerp (const_tree t, int pos)
+bool
+tree_fits_shwi_p (const_tree t)
{
- if (t == NULL_TREE)
- return 0;
+ return (t != NULL_TREE
+ && TREE_CODE (t) == INTEGER_CST
+ && wi::fits_shwi_p (wi::to_widest (t)));
+}
- return (TREE_CODE (t) == INTEGER_CST
- && ((TREE_INT_CST_HIGH (t) == 0
- && (HOST_WIDE_INT) TREE_INT_CST_LOW (t) >= 0)
- || (! pos && TREE_INT_CST_HIGH (t) == -1
- && (HOST_WIDE_INT) TREE_INT_CST_LOW (t) < 0
- && !TYPE_UNSIGNED (TREE_TYPE (t)))
- || (pos && TREE_INT_CST_HIGH (t) == 0)));
+/* Return true if T is an INTEGER_CST whose numerical value (extended
+ according to TYPE_UNSIGNED) fits in an unsigned HOST_WIDE_INT. */
+
+bool
+tree_fits_uhwi_p (const_tree t)
+{
+ return (t != NULL_TREE
+ && TREE_CODE (t) == INTEGER_CST
+ && wi::fits_uhwi_p (wi::to_widest (t)));
}
-/* Return the HOST_WIDE_INT least significant bits of T if it is an
- INTEGER_CST and there is no overflow. POS is nonzero if the result must
- be non-negative. We must be able to satisfy the above conditions. */
+/* T is an INTEGER_CST whose numerical value (extended according to
+ TYPE_UNSIGNED) fits in a signed HOST_WIDE_INT. Return that
+ HOST_WIDE_INT. */
HOST_WIDE_INT
-tree_low_cst (const_tree t, int pos)
+tree_to_shwi (const_tree t)
{
- gcc_assert (host_integerp (t, pos));
- return TREE_INT_CST_LOW (t);
+ gcc_assert (tree_fits_shwi_p (t));
+ return TREE_INT_CST_ELT (t, 0);
}
-/* Return the HOST_WIDE_INT least significant bits of T, a sizetype
- kind INTEGER_CST. This makes sure to properly sign-extend the
- constant. */
+/* T is an INTEGER_CST whose numerical value (extended according to
+ TYPE_UNSIGNED) fits in an unsigned HOST_WIDE_INT. Return that
+ HOST_WIDE_INT. */
-HOST_WIDE_INT
-size_low_cst (const_tree t)
+unsigned HOST_WIDE_INT
+tree_to_uhwi (const_tree t)
{
- double_int d = tree_to_double_int (t);
- return d.sext (TYPE_PRECISION (TREE_TYPE (t))).low;
+ gcc_assert (tree_fits_uhwi_p (t));
+ return TREE_INT_CST_ELT (t, 0);
}
/* Return the most significant (sign) bit of T. */
tree_int_cst_sign_bit (const_tree t)
{
unsigned bitno = TYPE_PRECISION (TREE_TYPE (t)) - 1;
- unsigned HOST_WIDE_INT w;
- if (bitno < HOST_BITS_PER_WIDE_INT)
- w = TREE_INT_CST_LOW (t);
- else
- {
- w = TREE_INT_CST_HIGH (t);
- bitno -= HOST_BITS_PER_WIDE_INT;
- }
-
- return (w >> bitno) & 1;
+ return wi::extract_uhwi (t, bitno, 1);
}
/* Return an indication of the sign of the integer constant T.
int
tree_int_cst_sgn (const_tree t)
{
- if (TREE_INT_CST_LOW (t) == 0 && TREE_INT_CST_HIGH (t) == 0)
+ if (wi::eq_p (t, 0))
return 0;
else if (TYPE_UNSIGNED (TREE_TYPE (t)))
return 1;
- else if (TREE_INT_CST_HIGH (t) < 0)
+ else if (wi::neg_p (t))
return -1;
else
return 1;
signed or unsigned type, UNSIGNEDP says which. */
unsigned int
-tree_int_cst_min_precision (tree value, bool unsignedp)
+tree_int_cst_min_precision (tree value, signop sgn)
{
- int log;
-
/* If the value is negative, compute its negative minus 1. The latter
adjustment is because the absolute value of the largest negative value
is one larger than the largest positive value. This is equivalent to
value = fold_build1 (BIT_NOT_EXPR, TREE_TYPE (value), value);
/* Return the number of bits needed, taking into account the fact
- that we need one more bit for a signed than unsigned type. */
+ that we need one more bit for a signed than unsigned type.
+ If value is 0 or -1, the minimum precision is 1 no matter
+ whether unsignedp is true or false. */
if (integer_zerop (value))
- log = 0;
+ return 1;
else
- log = tree_floor_log2 (value);
-
- return log + 1 + !unsignedp;
-}
-
-/* Compare two constructor-element-type constants. Return 1 if the lists
- are known to be equal; otherwise return 0. */
-
-int
-simple_cst_list_equal (const_tree l1, const_tree l2)
-{
- while (l1 != NULL_TREE && l2 != NULL_TREE)
- {
- if (simple_cst_equal (TREE_VALUE (l1), TREE_VALUE (l2)) != 1)
- return 0;
-
- l1 = TREE_CHAIN (l1);
- l2 = TREE_CHAIN (l2);
- }
-
- return l1 == l2;
+ return tree_floor_log2 (value) + 1 + (sgn == SIGNED ? 1 : 0) ;
}
/* Return truthvalue of whether T1 is the same tree structure as T2.
switch (code1)
{
case INTEGER_CST:
- return (TREE_INT_CST_LOW (t1) == TREE_INT_CST_LOW (t2)
- && TREE_INT_CST_HIGH (t1) == TREE_INT_CST_HIGH (t2));
+ return wi::to_widest (t1) == wi::to_widest (t2);
case REAL_CST:
return REAL_VALUES_IDENTICAL (TREE_REAL_CST (t1), TREE_REAL_CST (t2));
case CONSTRUCTOR:
{
unsigned HOST_WIDE_INT idx;
- VEC(constructor_elt, gc) *v1 = CONSTRUCTOR_ELTS (t1);
- VEC(constructor_elt, gc) *v2 = CONSTRUCTOR_ELTS (t2);
+ vec<constructor_elt, va_gc> *v1 = CONSTRUCTOR_ELTS (t1);
+ vec<constructor_elt, va_gc> *v2 = CONSTRUCTOR_ELTS (t2);
- if (VEC_length (constructor_elt, v1) != VEC_length (constructor_elt, v2))
+ if (vec_safe_length (v1) != vec_safe_length (v2))
return false;
- for (idx = 0; idx < VEC_length (constructor_elt, v1); ++idx)
+ for (idx = 0; idx < vec_safe_length (v1); ++idx)
/* ??? Should we handle also fields here? */
- if (!simple_cst_equal (VEC_index (constructor_elt, v1, idx).value,
- VEC_index (constructor_elt, v2, idx).value))
+ if (!simple_cst_equal ((*v1)[idx].value, (*v2)[idx].value))
return false;
return true;
}
{
if (tree_int_cst_sgn (t) < 0)
return -1;
- else if (TREE_INT_CST_HIGH (t) != 0)
+ else if (!cst_fits_uhwi_p (t))
return 1;
else if (TREE_INT_CST_LOW (t) == u)
return 0;
bool
valid_constant_size_p (const_tree size)
{
- if (! host_integerp (size, 1)
+ if (! tree_fits_uhwi_p (size)
|| TREE_OVERFLOW (size)
|| tree_int_cst_sign_bit (size) != 0)
return false;
return true;
}
+/* Return the precision of the type, or for a complex or vector type the
+ precision of the type of its elements. */
+
+unsigned int
+element_precision (const_tree type)
+{
+ enum tree_code code = TREE_CODE (type);
+ if (code == COMPLEX_TYPE || code == VECTOR_TYPE)
+ type = TREE_TYPE (type);
+
+ return TYPE_PRECISION (type);
+}
+
/* Return true if CODE represents an associative tree code. Otherwise
return false. */
bool
/* Alas, constants aren't shared, so we can't rely on pointer
identity. */
case INTEGER_CST:
- val = iterative_hash_host_wide_int (TREE_INT_CST_LOW (t), val);
- return iterative_hash_host_wide_int (TREE_INT_CST_HIGH (t), val);
+ for (i = 0; i < TREE_INT_CST_NUNITS (t); i++)
+ val = iterative_hash_host_wide_int (TREE_INT_CST_ELT (t, i), val);
+ return val;
case REAL_CST:
{
unsigned int val2 = real_hash (TREE_REAL_CST_PTR (t));
}
return val;
}
- case MEM_REF:
- {
- /* The type of the second operand is relevant, except for
- its top-level qualifiers. */
- tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_OPERAND (t, 1)));
-
- val = iterative_hash_object (TYPE_HASH (type), val);
-
- /* We could use the standard hash computation from this point
- on. */
- val = iterative_hash_object (code, val);
- val = iterative_hash_expr (TREE_OPERAND (t, 1), val);
- val = iterative_hash_expr (TREE_OPERAND (t, 0), val);
- return val;
- }
case FUNCTION_DECL:
/* When referring to a built-in FUNCTION_DECL, use the __builtin__ form.
Otherwise nodes that compare equal according to operand_equal_p might
}
}
-/* Generate a hash value for a pair of expressions. This can be used
- iteratively by passing a previous result as the VAL argument.
-
- The same hash value is always returned for a given pair of expressions,
- regardless of the order in which they are presented. This is useful in
- hashing the operands of commutative functions. */
-
-hashval_t
-iterative_hash_exprs_commutative (const_tree t1,
- const_tree t2, hashval_t val)
-{
- hashval_t one = iterative_hash_expr (t1, 0);
- hashval_t two = iterative_hash_expr (t2, 0);
- hashval_t t;
-
- if (one > two)
- t = one, one = two, two = t;
- val = iterative_hash_hashval_t (one, val);
- val = iterative_hash_hashval_t (two, val);
-
- return val;
-}
-\f
/* Constructors for pointer, array and function types.
(RECORD_TYPE, UNION_TYPE and ENUMERAL_TYPE nodes are
constructed by language-dependent code, not here.) */
return build_reference_type_for_mode (to_type, pointer_mode, false);
}
-/* Build a type that is compatible with t but has no cv quals anywhere
- in its type, thus
-
- const char *const *const * -> char ***. */
-
-tree
-build_type_no_quals (tree t)
-{
- switch (TREE_CODE (t))
- {
- case POINTER_TYPE:
- return build_pointer_type_for_mode (build_type_no_quals (TREE_TYPE (t)),
- TYPE_MODE (t),
- TYPE_REF_CAN_ALIAS_ALL (t));
- case REFERENCE_TYPE:
- return
- build_reference_type_for_mode (build_type_no_quals (TREE_TYPE (t)),
- TYPE_MODE (t),
- TYPE_REF_CAN_ALIAS_ALL (t));
- default:
- return TYPE_MAIN_VARIANT (t);
- }
-}
-
#define MAX_INT_CACHED_PREC \
(HOST_BITS_PER_WIDE_INT > 64 ? HOST_BITS_PER_WIDE_INT : 64)
static GTY(()) tree nonstandard_integer_type_cache[2 * MAX_INT_CACHED_PREC + 2];
fixup_signed_type (itype);
ret = itype;
- if (host_integerp (TYPE_MAX_VALUE (itype), 1))
- ret = type_hash_canon (tree_low_cst (TYPE_MAX_VALUE (itype), 1), itype);
+ if (tree_fits_uhwi_p (TYPE_MAX_VALUE (itype)))
+ ret = type_hash_canon (tree_to_uhwi (TYPE_MAX_VALUE (itype)), itype);
if (precision <= MAX_INT_CACHED_PREC)
nonstandard_integer_type_cache[precision + unsignedp] = ret;
true) or would not differ from ARGTYPES. */
static tree
-maybe_canonicalize_argtypes(tree argtypes,
- bool *any_structural_p,
- bool *any_noncanonical_p)
+maybe_canonicalize_argtypes (tree argtypes,
+ bool *any_structural_p,
+ bool *any_noncanonical_p)
{
tree arg;
bool any_noncanonical_argtypes_p = false;
return t;
}
-/* Build variant of function type ORIG_TYPE skipping ARGS_TO_SKIP and the
- return value if SKIP_RETURN is true. */
-
-static tree
-build_function_type_skip_args (tree orig_type, bitmap args_to_skip,
- bool skip_return)
-{
- tree new_type = NULL;
- tree args, new_args = NULL, t;
- tree new_reversed;
- int i = 0;
-
- for (args = TYPE_ARG_TYPES (orig_type); args && args != void_list_node;
- args = TREE_CHAIN (args), i++)
- if (!args_to_skip || !bitmap_bit_p (args_to_skip, i))
- new_args = tree_cons (NULL_TREE, TREE_VALUE (args), new_args);
-
- new_reversed = nreverse (new_args);
- if (args)
- {
- if (new_reversed)
- TREE_CHAIN (new_args) = void_list_node;
- else
- new_reversed = void_list_node;
- }
-
- /* Use copy_node to preserve as much as possible from original type
- (debug info, attribute lists etc.)
- Exception is METHOD_TYPEs must have THIS argument.
- When we are asked to remove it, we need to build new FUNCTION_TYPE
- instead. */
- if (TREE_CODE (orig_type) != METHOD_TYPE
- || !args_to_skip
- || !bitmap_bit_p (args_to_skip, 0))
- {
- new_type = build_distinct_type_copy (orig_type);
- TYPE_ARG_TYPES (new_type) = new_reversed;
- }
- else
- {
- new_type
- = build_distinct_type_copy (build_function_type (TREE_TYPE (orig_type),
- new_reversed));
- TYPE_CONTEXT (new_type) = TYPE_CONTEXT (orig_type);
- }
-
- if (skip_return)
- TREE_TYPE (new_type) = void_type_node;
-
- /* This is a new type, not a copy of an old type. Need to reassociate
- variants. We can handle everything except the main variant lazily. */
- t = TYPE_MAIN_VARIANT (orig_type);
- if (t != orig_type)
- {
- t = build_function_type_skip_args (t, args_to_skip, skip_return);
- TYPE_MAIN_VARIANT (new_type) = t;
- TYPE_NEXT_VARIANT (new_type) = TYPE_NEXT_VARIANT (t);
- TYPE_NEXT_VARIANT (t) = new_type;
- }
- else
- {
- TYPE_MAIN_VARIANT (new_type) = new_type;
- TYPE_NEXT_VARIANT (new_type) = NULL;
- }
-
- return new_type;
-}
-
-/* Build variant of function decl ORIG_DECL skipping ARGS_TO_SKIP and the
- return value if SKIP_RETURN is true.
-
- Arguments from DECL_ARGUMENTS list can't be removed now, since they are
- linked by TREE_CHAIN directly. The caller is responsible for eliminating
- them when they are being duplicated (i.e. copy_arguments_for_versioning). */
-
-tree
-build_function_decl_skip_args (tree orig_decl, bitmap args_to_skip,
- bool skip_return)
-{
- tree new_decl = copy_node (orig_decl);
- tree new_type;
-
- new_type = TREE_TYPE (orig_decl);
- if (prototype_p (new_type)
- || (skip_return && !VOID_TYPE_P (TREE_TYPE (new_type))))
- new_type
- = build_function_type_skip_args (new_type, args_to_skip, skip_return);
- TREE_TYPE (new_decl) = new_type;
-
- /* For declarations setting DECL_VINDEX (i.e. methods)
- we expect first argument to be THIS pointer. */
- if (args_to_skip && bitmap_bit_p (args_to_skip, 0))
- DECL_VINDEX (new_decl) = NULL_TREE;
-
- /* When signature changes, we need to clear builtin info. */
- if (DECL_BUILT_IN (new_decl)
- && args_to_skip
- && !bitmap_empty_p (args_to_skip))
- {
- DECL_BUILT_IN_CLASS (new_decl) = NOT_BUILT_IN;
- DECL_FUNCTION_CODE (new_decl) = (enum built_in_function) 0;
- }
- return new_decl;
-}
-
/* Build a function type. The RETURN_TYPE is the type returned by the
function. If VAARGS is set, no void_type_node is appended to the
the list. ARGP must be always be terminated be a NULL_TREE. */
&& TREE_CODE (TREE_TYPE (op)) != FIXED_POINT_TYPE
/* Ensure field is laid out already. */
&& DECL_SIZE (TREE_OPERAND (op, 1)) != 0
- && host_integerp (DECL_SIZE (TREE_OPERAND (op, 1)), 1))
+ && tree_fits_uhwi_p (DECL_SIZE (TREE_OPERAND (op, 1))))
{
unsigned HOST_WIDE_INT innerprec
- = tree_low_cst (DECL_SIZE (TREE_OPERAND (op, 1)), 1);
+ = tree_to_uhwi (DECL_SIZE (TREE_OPERAND (op, 1)));
int unsignedp = (DECL_UNSIGNED (TREE_OPERAND (op, 1))
|| TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (op, 1))));
tree type = lang_hooks.types.type_for_size (innerprec, unsignedp);
int_fits_type_p (const_tree c, const_tree type)
{
tree type_low_bound, type_high_bound;
- bool ok_for_low_bound, ok_for_high_bound, unsc;
- double_int dc, dd;
-
- dc = tree_to_double_int (c);
- unsc = TYPE_UNSIGNED (TREE_TYPE (c));
+ bool ok_for_low_bound, ok_for_high_bound;
+ signop sgn_c = TYPE_SIGN (TREE_TYPE (c));
retry:
type_low_bound = TYPE_MIN_VALUE (type);
/* If at least one bound of the type is a constant integer, we can check
ourselves and maybe make a decision. If no such decision is possible, but
this type is a subtype, try checking against that. Otherwise, use
- double_int_fits_to_tree_p, which checks against the precision.
+ fits_to_tree_p, which checks against the precision.
Compute the status for each possibly constant bound, and return if we see
one does not match. Use ok_for_xxx_bound for this purpose, assigning -1
/* Check if c >= type_low_bound. */
if (type_low_bound && TREE_CODE (type_low_bound) == INTEGER_CST)
{
- dd = tree_to_double_int (type_low_bound);
- if (unsc != TYPE_UNSIGNED (TREE_TYPE (type_low_bound)))
- {
- int c_neg = (!unsc && dc.is_negative ());
- int t_neg = (unsc && dd.is_negative ());
-
- if (c_neg && !t_neg)
- return false;
- if ((c_neg || !t_neg) && dc.ult (dd))
- return false;
- }
- else if (dc.cmp (dd, unsc) < 0)
+ if (INT_CST_LT (c, type_low_bound))
return false;
ok_for_low_bound = true;
}
/* Check if c <= type_high_bound. */
if (type_high_bound && TREE_CODE (type_high_bound) == INTEGER_CST)
{
- dd = tree_to_double_int (type_high_bound);
- if (unsc != TYPE_UNSIGNED (TREE_TYPE (type_high_bound)))
- {
- int c_neg = (!unsc && dc.is_negative ());
- int t_neg = (unsc && dd.is_negative ());
-
- if (t_neg && !c_neg)
- return false;
- if ((t_neg || !c_neg) && dc.ugt (dd))
- return false;
- }
- else if (dc.cmp (dd, unsc) > 0)
+ if (INT_CST_LT (type_high_bound, c))
return false;
ok_for_high_bound = true;
}
/* Perform some generic filtering which may allow making a decision
even if the bounds are not constant. First, negative integers
never fit in unsigned types, */
- if (TYPE_UNSIGNED (type) && !unsc && dc.is_negative ())
+ if (TYPE_UNSIGNED (type) && sgn_c == SIGNED && wi::neg_p (c))
return false;
/* Second, narrower types always fit in wider ones. */
return true;
/* Third, unsigned integers with top bit set never fit signed types. */
- if (! TYPE_UNSIGNED (type) && unsc)
+ if (!TYPE_UNSIGNED (type) && sgn_c == UNSIGNED)
{
- int prec = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (c))) - 1;
- if (prec < HOST_BITS_PER_WIDE_INT)
+ int prec = GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (c))) - 1;
+ if (prec < TYPE_PRECISION (TREE_TYPE (c)))
{
- if (((((unsigned HOST_WIDE_INT) 1) << prec) & dc.low) != 0)
+ /* When a tree_cst is converted to a wide-int, the precision
+ is taken from the type. However, if the precision of the
+ mode underneath the type is smaller than that, it is
+ possible that the value will not fit. The test below
+ fails if any bit is set between the sign bit of the
+ underlying mode and the top bit of the type. */
+ if (wi::ne_p (wi::zext (c, prec - 1), c))
return false;
- }
- else if (((((unsigned HOST_WIDE_INT) 1)
- << (prec - HOST_BITS_PER_WIDE_INT)) & dc.high) != 0)
+ }
+ else if (wi::neg_p (c))
return false;
}
goto retry;
}
- /* Or to double_int_fits_to_tree_p, if nothing else. */
- return double_int_fits_to_tree_p (type, dc);
+ /* Or to fits_to_tree_p, if nothing else. */
+ return wi::fits_to_tree_p (c, type);
}
/* Stores bounds of an integer TYPE in MIN and MAX. If TYPE has non-constant
{
if (!POINTER_TYPE_P (type) && TYPE_MIN_VALUE (type)
&& TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST)
- mpz_set_double_int (min, tree_to_double_int (TYPE_MIN_VALUE (type)),
- TYPE_UNSIGNED (type));
+ wi::to_mpz (TYPE_MIN_VALUE (type), min, TYPE_SIGN (type));
else
{
if (TYPE_UNSIGNED (type))
mpz_set_ui (min, 0);
else
{
- double_int mn;
- mn = double_int::mask (TYPE_PRECISION (type) - 1);
- mn = (mn + double_int_one).sext (TYPE_PRECISION (type));
- mpz_set_double_int (min, mn, false);
+ wide_int mn = wi::min_value (TYPE_PRECISION (type), SIGNED);
+ wi::to_mpz (mn, min, SIGNED);
}
}
if (!POINTER_TYPE_P (type) && TYPE_MAX_VALUE (type)
&& TREE_CODE (TYPE_MAX_VALUE (type)) == INTEGER_CST)
- mpz_set_double_int (max, tree_to_double_int (TYPE_MAX_VALUE (type)),
- TYPE_UNSIGNED (type));
+ wi::to_mpz (TYPE_MAX_VALUE (type), max, TYPE_SIGN (type));
else
{
- if (TYPE_UNSIGNED (type))
- mpz_set_double_int (max, double_int::mask (TYPE_PRECISION (type)),
- true);
- else
- mpz_set_double_int (max, double_int::mask (TYPE_PRECISION (type) - 1),
- true);
+ wide_int mn = wi::max_value (TYPE_PRECISION (type), TYPE_SIGN (type));
+ wi::to_mpz (mn, max, TYPE_SIGN (type));
}
}
tree t;
/* Test if T is either variable (if FN is zero) or an expression containing
- a variable in FN. */
+ a variable in FN. If TYPE isn't gimplified, return true also if
+ gimplify_one_sizepos would gimplify the expression into a local
+ variable. */
#define RETURN_TRUE_IF_VAR(T) \
do { tree _t = (T); \
if (_t != NULL_TREE \
&& _t != error_mark_node \
&& TREE_CODE (_t) != INTEGER_CST \
&& TREE_CODE (_t) != PLACEHOLDER_EXPR \
- && (!fn || walk_tree (&_t, find_var_from_fn, fn, NULL))) \
+ && (!fn \
+ || (!TYPE_SIZES_GIMPLIFIED (type) \
+ && !is_gimple_sizepos (_t)) \
+ || walk_tree (&_t, find_var_from_fn, fn, NULL))) \
return true; } while (0)
if (type == error_mark_node)
fprintf (stderr, "Code Nodes\n");
fprintf (stderr, "----------------------------\n");
for (i = 0; i < (int) MAX_TREE_CODES; i++)
- fprintf (stderr, "%-20s %7d\n", tree_code_name[i], tree_code_counts[i]);
+ fprintf (stderr, "%-20s %7d\n", get_tree_code_name ((enum tree_code) i),
+ tree_code_counts[i]);
fprintf (stderr, "----------------------------\n");
ssanames_print_statistics ();
phinodes_print_statistics ();
va_list args;
const char *buffer;
unsigned length = 0;
- int code;
+ enum tree_code code;
va_start (args, function);
- while ((code = va_arg (args, int)))
- length += 4 + strlen (tree_code_name[code]);
+ while ((code = (enum tree_code) va_arg (args, int)))
+ length += 4 + strlen (get_tree_code_name (code));
va_end (args);
if (length)
{
length += strlen ("expected ");
buffer = tmp = (char *) alloca (length);
length = 0;
- while ((code = va_arg (args, int)))
+ while ((code = (enum tree_code) va_arg (args, int)))
{
const char *prefix = length ? " or " : "expected ";
strcpy (tmp + length, prefix);
length += strlen (prefix);
- strcpy (tmp + length, tree_code_name[code]);
- length += strlen (tree_code_name[code]);
+ strcpy (tmp + length, get_tree_code_name (code));
+ length += strlen (get_tree_code_name (code));
}
va_end (args);
}
buffer = "unexpected node";
internal_error ("tree check: %s, have %s in %s, at %s:%d",
- buffer, tree_code_name[TREE_CODE (node)],
+ buffer, get_tree_code_name (TREE_CODE (node)),
function, trim_filename (file), line);
}
va_list args;
char *buffer;
unsigned length = 0;
- int code;
+ enum tree_code code;
va_start (args, function);
- while ((code = va_arg (args, int)))
- length += 4 + strlen (tree_code_name[code]);
+ while ((code = (enum tree_code) va_arg (args, int)))
+ length += 4 + strlen (get_tree_code_name (code));
va_end (args);
va_start (args, function);
buffer = (char *) alloca (length);
length = 0;
- while ((code = va_arg (args, int)))
+ while ((code = (enum tree_code) va_arg (args, int)))
{
if (length)
{
strcpy (buffer + length, " or ");
length += 4;
}
- strcpy (buffer + length, tree_code_name[code]);
- length += strlen (tree_code_name[code]);
+ strcpy (buffer + length, get_tree_code_name (code));
+ length += strlen (get_tree_code_name (code));
}
va_end (args);
internal_error ("tree check: expected none of %s, have %s in %s, at %s:%d",
- buffer, tree_code_name[TREE_CODE (node)],
+ buffer, get_tree_code_name (TREE_CODE (node)),
function, trim_filename (file), line);
}
("tree check: expected class %qs, have %qs (%s) in %s, at %s:%d",
TREE_CODE_CLASS_STRING (cl),
TREE_CODE_CLASS_STRING (TREE_CODE_CLASS (TREE_CODE (node))),
- tree_code_name[TREE_CODE (node)], function, trim_filename (file), line);
+ get_tree_code_name (TREE_CODE (node)), function, trim_filename (file), line);
}
/* Similar to tree_check_failed, except that instead of specifying a
unsigned int c;
for (c = c1; c <= c2; ++c)
- length += 4 + strlen (tree_code_name[c]);
+ length += 4 + strlen (get_tree_code_name ((enum tree_code) c));
length += strlen ("expected ");
buffer = (char *) alloca (length);
strcpy (buffer + length, prefix);
length += strlen (prefix);
- strcpy (buffer + length, tree_code_name[c]);
- length += strlen (tree_code_name[c]);
+ strcpy (buffer + length, get_tree_code_name ((enum tree_code) c));
+ length += strlen (get_tree_code_name ((enum tree_code) c));
}
internal_error ("tree check: %s, have %s in %s, at %s:%d",
- buffer, tree_code_name[TREE_CODE (node)],
+ buffer, get_tree_code_name (TREE_CODE (node)),
function, trim_filename (file), line);
}
("tree check: did not expect class %qs, have %qs (%s) in %s, at %s:%d",
TREE_CODE_CLASS_STRING (cl),
TREE_CODE_CLASS_STRING (TREE_CODE_CLASS (TREE_CODE (node))),
- tree_code_name[TREE_CODE (node)], function, trim_filename (file), line);
+ get_tree_code_name (TREE_CODE (node)), function, trim_filename (file), line);
}
const char *function, enum omp_clause_code code)
{
internal_error ("tree check: expected omp_clause %s, have %s in %s, at %s:%d",
- omp_clause_code_name[code], tree_code_name[TREE_CODE (node)],
+ omp_clause_code_name[code], get_tree_code_name (TREE_CODE (node)),
function, trim_filename (file), line);
}
{
internal_error
("tree check: expected tree that contains %qs structure, have %qs in %s, at %s:%d",
- TS_ENUM_NAME(en),
- tree_code_name[TREE_CODE (node)], function, trim_filename (file), line);
+ TS_ENUM_NAME (en),
+ get_tree_code_name (TREE_CODE (node)), function, trim_filename (file), line);
}
+/* Similar to above, except that the check is for the bounds of a TREE_VEC's
+ (dynamically sized) vector. */
+
+void
+tree_int_cst_elt_check_failed (int idx, int len, const char *file, int line,
+ const char *function)
+{
+ internal_error
+ ("tree check: accessed elt %d of tree_int_cst with %d elts in %s, at %s:%d",
+ idx + 1, len, function, trim_filename (file), line);
+}
+
/* Similar to above, except that the check is for the bounds of a TREE_VEC's
(dynamically sized) vector. */
tree_operand_check_failed (int idx, const_tree exp, const char *file,
int line, const char *function)
{
- int code = TREE_CODE (exp);
+ enum tree_code code = TREE_CODE (exp);
internal_error
("tree check: accessed operand %d of %s with %d operands in %s, at %s:%d",
- idx + 1, tree_code_name[code], TREE_OPERAND_LENGTH (exp),
+ idx + 1, get_tree_code_name (code), TREE_OPERAND_LENGTH (exp),
function, trim_filename (file), line);
}
return make_accum_type (size, unsignedp, satp);
}
+
+/* Create an atomic variant node for TYPE. This routine is called
+ during initialization of data types to create the 5 basic atomic
+ types. The generic build_variant_type function requires these to
+ already be set up in order to function properly, so cannot be
+ called from there. */
+
+static tree
+build_atomic_base (tree type)
+{
+ tree t;
+
+ /* Make sure its not already registered. */
+ if ((t = get_qualified_type (type, TYPE_QUAL_ATOMIC)))
+ return t;
+
+ t = build_variant_type_copy (type);
+ set_type_quals (t, TYPE_QUAL_ATOMIC);
+
+ return t;
+}
+
/* Create nodes for all integer types (and error_mark_node) using the sizes
of C datatypes. SIGNED_CHAR specifies whether char is signed,
SHORT_DOUBLE specifies whether double should be of the same precision
#endif
/* Define a boolean type. This type only represents boolean values but
- may be larger than char depending on the value of BOOL_TYPE_SIZE.
- Front ends which want to override this size (i.e. Java) can redefine
- boolean_type_node before calling build_common_tree_nodes_2. */
+ may be larger than char depending on the value of BOOL_TYPE_SIZE. */
boolean_type_node = make_unsigned_type (BOOL_TYPE_SIZE);
TREE_SET_CODE (boolean_type_node, BOOLEAN_TYPE);
- TYPE_MAX_VALUE (boolean_type_node) = build_int_cst (boolean_type_node, 1);
TYPE_PRECISION (boolean_type_node) = 1;
+ TYPE_MAX_VALUE (boolean_type_node) = build_int_cst (boolean_type_node, 1);
/* Define what type to use for size_t. */
if (strcmp (SIZE_TYPE, "unsigned int") == 0)
unsigned_intDI_type_node = make_or_reuse_type (GET_MODE_BITSIZE (DImode), 1);
unsigned_intTI_type_node = make_or_reuse_type (GET_MODE_BITSIZE (TImode), 1);
+ /* Don't call build_qualified type for atomics. That routine does
+ special processing for atomics, and until they are initialized
+ it's better not to make that call. */
+
+ atomicQI_type_node = build_atomic_base (unsigned_intQI_type_node);
+ atomicHI_type_node = build_atomic_base (unsigned_intHI_type_node);
+ atomicSI_type_node = build_atomic_base (unsigned_intSI_type_node);
+ atomicDI_type_node = build_atomic_base (unsigned_intDI_type_node);
+ atomicTI_type_node = build_atomic_base (unsigned_intTI_type_node);
+
access_public_node = get_identifier ("public");
access_protected_node = get_identifier ("protected");
access_private_node = get_identifier ("private");
void_type_node = make_node (VOID_TYPE);
layout_type (void_type_node);
+ pointer_bounds_type_node = targetm.chkp_bound_type ();
+
/* We are not going to have real types in C with less than byte alignment,
so we might as well not have any types that claim to have it. */
TYPE_ALIGN (void_type_node) = BITS_PER_UNIT;
= build_pointer_type (build_type_variant (void_type_node, 1, 0));
fileptr_type_node = ptr_type_node;
+ pointer_sized_int_node = build_nonstandard_integer_type (POINTER_SIZE, 1);
+
float_type_node = make_node (REAL_TYPE);
TYPE_PRECISION (float_type_node) = FLOAT_TYPE_SIZE;
layout_type (float_type_node);
}
}
-/* A subroutine of build_common_builtin_nodes. Define a builtin function. */
+/* Modify DECL for given flags.
+ TM_PURE attribute is set only on types, so the function will modify
+ DECL's type when ECF_TM_PURE is used. */
-static void
-local_define_builtin (const char *name, tree type, enum built_in_function code,
- const char *library_name, int ecf_flags)
+void
+set_call_expr_flags (tree decl, int flags)
{
- tree decl;
-
- decl = add_builtin_function (name, type, code, BUILT_IN_NORMAL,
- library_name, NULL_TREE);
- if (ecf_flags & ECF_CONST)
+ if (flags & ECF_NOTHROW)
+ TREE_NOTHROW (decl) = 1;
+ if (flags & ECF_CONST)
TREE_READONLY (decl) = 1;
- if (ecf_flags & ECF_PURE)
+ if (flags & ECF_PURE)
DECL_PURE_P (decl) = 1;
- if (ecf_flags & ECF_LOOPING_CONST_OR_PURE)
+ if (flags & ECF_LOOPING_CONST_OR_PURE)
DECL_LOOPING_CONST_OR_PURE_P (decl) = 1;
- if (ecf_flags & ECF_NORETURN)
+ if (flags & ECF_NOVOPS)
+ DECL_IS_NOVOPS (decl) = 1;
+ if (flags & ECF_NORETURN)
TREE_THIS_VOLATILE (decl) = 1;
- if (ecf_flags & ECF_NOTHROW)
- TREE_NOTHROW (decl) = 1;
- if (ecf_flags & ECF_MALLOC)
+ if (flags & ECF_MALLOC)
DECL_IS_MALLOC (decl) = 1;
- if (ecf_flags & ECF_LEAF)
+ if (flags & ECF_RETURNS_TWICE)
+ DECL_IS_RETURNS_TWICE (decl) = 1;
+ if (flags & ECF_LEAF)
DECL_ATTRIBUTES (decl) = tree_cons (get_identifier ("leaf"),
NULL, DECL_ATTRIBUTES (decl));
- if ((ecf_flags & ECF_TM_PURE) && flag_tm)
+ if ((flags & ECF_TM_PURE) && flag_tm)
apply_tm_attr (decl, get_identifier ("transaction_pure"));
+ /* Looping const or pure is implied by noreturn.
+ There is currently no way to declare looping const or looping pure alone. */
+ gcc_assert (!(flags & ECF_LOOPING_CONST_OR_PURE)
+ || ((flags & ECF_NORETURN) && (flags & (ECF_CONST | ECF_PURE))));
+}
+
+
+/* A subroutine of build_common_builtin_nodes. Define a builtin function. */
+
+static void
+local_define_builtin (const char *name, tree type, enum built_in_function code,
+ const char *library_name, int ecf_flags)
+{
+ tree decl;
+
+ decl = add_builtin_function (name, type, code, BUILT_IN_NORMAL,
+ library_name, NULL_TREE);
+ set_call_expr_flags (decl, ecf_flags);
set_builtin_decl (code, decl, true);
}
}
}
+/* Check if vector VEC consists of all the equal elements and
+ that the number of elements corresponds to the type of VEC.
+ The function returns first element of the vector
+ or NULL_TREE if the vector is not uniform. */
+tree
+uniform_vector_p (const_tree vec)
+{
+ tree first, t;
+ unsigned i;
+
+ if (vec == NULL_TREE)
+ return NULL_TREE;
+
+ gcc_assert (VECTOR_TYPE_P (TREE_TYPE (vec)));
+
+ if (TREE_CODE (vec) == VECTOR_CST)
+ {
+ first = VECTOR_CST_ELT (vec, 0);
+ for (i = 1; i < VECTOR_CST_NELTS (vec); ++i)
+ if (!operand_equal_p (first, VECTOR_CST_ELT (vec, i), 0))
+ return NULL_TREE;
+
+ return first;
+ }
+
+ else if (TREE_CODE (vec) == CONSTRUCTOR)
+ {
+ first = error_mark_node;
+
+ FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (vec), i, t)
+ {
+ if (i == 0)
+ {
+ first = t;
+ continue;
+ }
+ if (!operand_equal_p (first, t, 0))
+ return NULL_TREE;
+ }
+ if (i != TYPE_VECTOR_SUBPARTS (TREE_TYPE (vec)))
+ return NULL_TREE;
+
+ return first;
+ }
+
+ return NULL_TREE;
+}
+
/* Build an empty statement at location LOC. */
tree
record_node_allocation_statistics (code, length);
- t = ggc_alloc_zone_cleared_tree_node_stat (&tree_zone, length PASS_MEM_STAT);
+ t = ggc_alloc_cleared_tree_node_stat (length PASS_MEM_STAT);
TREE_SET_CODE (t, code);
return t;
}
-/* Like build_call_array, but takes a VEC. */
+/* Like build_call_array, but takes a vec. */
tree
-build_call_vec (tree return_type, tree fn, VEC(tree,gc) *args)
+build_call_vec (tree return_type, tree fn, vec<tree, va_gc> *args)
{
tree ret, t;
unsigned int ix;
- ret = build_call_1 (return_type, fn, VEC_length (tree, args));
- FOR_EACH_VEC_ELT (tree, args, ix, t)
+ ret = build_call_1 (return_type, fn, vec_safe_length (args));
+ FOR_EACH_VEC_SAFE_ELT (args, ix, t)
CALL_EXPR_ARG (ret, ix) = t;
process_call_operands (ret);
return ret;
}
-
-/* Returns true if it is possible to prove that the index of
- an array access REF (an ARRAY_REF expression) falls into the
- array bounds. */
-
-bool
-in_array_bounds_p (tree ref)
-{
- tree idx = TREE_OPERAND (ref, 1);
- tree min, max;
-
- if (TREE_CODE (idx) != INTEGER_CST)
- return false;
-
- min = array_ref_low_bound (ref);
- max = array_ref_up_bound (ref);
- if (!min
- || !max
- || TREE_CODE (min) != INTEGER_CST
- || TREE_CODE (max) != INTEGER_CST)
- return false;
-
- if (tree_int_cst_lt (idx, min)
- || tree_int_cst_lt (max, idx))
- return false;
-
- return true;
-}
-
-/* Returns true if it is possible to prove that the range of
- an array access REF (an ARRAY_RANGE_REF expression) falls
- into the array bounds. */
-
-bool
-range_in_array_bounds_p (tree ref)
-{
- tree domain_type = TYPE_DOMAIN (TREE_TYPE (ref));
- tree range_min, range_max, min, max;
-
- range_min = TYPE_MIN_VALUE (domain_type);
- range_max = TYPE_MAX_VALUE (domain_type);
- if (!range_min
- || !range_max
- || TREE_CODE (range_min) != INTEGER_CST
- || TREE_CODE (range_max) != INTEGER_CST)
- return false;
-
- min = array_ref_low_bound (ref);
- max = array_ref_up_bound (ref);
- if (!min
- || !max
- || TREE_CODE (min) != INTEGER_CST
- || TREE_CODE (max) != INTEGER_CST)
- return false;
-
- if (tree_int_cst_lt (range_min, min)
- || tree_int_cst_lt (max, range_max))
- return false;
-
- return true;
-}
-
/* Return true if T (assumed to be a DECL) must be assigned a memory
location. */
unsigned HOST_WIDE_INT val = TREE_INT_CST_LOW (x);
/* Make sure the sign-extended value will fit in a HOST_WIDE_INT. */
- gcc_assert (TREE_INT_CST_HIGH (x) == 0
- || TREE_INT_CST_HIGH (x) == -1);
+ gcc_assert (cst_fits_shwi_p (x));
if (bits < HOST_BITS_PER_WIDE_INT)
{
#if HOST_BITS_PER_WIDEST_INT > HOST_BITS_PER_WIDE_INT
gcc_assert (HOST_BITS_PER_WIDEST_INT >= HOST_BITS_PER_DOUBLE_INT);
- val |= (((unsigned HOST_WIDEST_INT) TREE_INT_CST_HIGH (x))
- << HOST_BITS_PER_WIDE_INT);
+ gcc_assert (TREE_INT_CST_NUNITS (x) == 2);
+
+ if (TREE_INT_CST_NUNITS (x) == 1)
+ val = HOST_WIDE_INT (val);
+ else
+ val |= (((unsigned HOST_WIDEST_INT) TREE_INT_CST_ELT (x, 1))
+ << HOST_BITS_PER_WIDE_INT);
#else
/* Make sure the sign-extended value will fit in a HOST_WIDE_INT. */
- gcc_assert (TREE_INT_CST_HIGH (x) == 0
- || TREE_INT_CST_HIGH (x) == -1);
+ gcc_assert (TREE_INT_CST_NUNITS (x) == 1);
#endif
if (bits < HOST_BITS_PER_WIDEST_INT)
if (TREE_CODE (type) == INTEGER_TYPE && TYPE_UNSIGNED (type) == unsignedp)
return type;
+ if (TREE_CODE (type) == VECTOR_TYPE)
+ {
+ tree inner = TREE_TYPE (type);
+ tree inner2 = signed_or_unsigned_type_for (unsignedp, inner);
+ if (!inner2)
+ return NULL_TREE;
+ if (inner == inner2)
+ return type;
+ return build_vector_type (inner2, TYPE_VECTOR_SUBPARTS (type));
+ }
+
if (!INTEGRAL_TYPE_P (type)
&& !POINTER_TYPE_P (type))
return NULL_TREE;
tree
upper_bound_in_type (tree outer, tree inner)
{
- double_int high;
unsigned int det = 0;
unsigned oprec = TYPE_PRECISION (outer);
unsigned iprec = TYPE_PRECISION (inner);
gcc_unreachable ();
}
- /* Compute 2^^prec - 1. */
- if (prec <= HOST_BITS_PER_WIDE_INT)
- {
- high.high = 0;
- high.low = ((~(unsigned HOST_WIDE_INT) 0)
- >> (HOST_BITS_PER_WIDE_INT - prec));
- }
- else
- {
- high.high = ((~(unsigned HOST_WIDE_INT) 0)
- >> (HOST_BITS_PER_DOUBLE_INT - prec));
- high.low = ~(unsigned HOST_WIDE_INT) 0;
- }
-
- return double_int_to_tree (outer, high);
+ return wide_int_to_tree (outer,
+ wi::mask (prec, false, TYPE_PRECISION (outer)));
}
/* Returns the smallest value obtainable by casting something in INNER type to
tree
lower_bound_in_type (tree outer, tree inner)
{
- double_int low;
unsigned oprec = TYPE_PRECISION (outer);
unsigned iprec = TYPE_PRECISION (inner);
contains all values of INNER type. In particular, both INNER
and OUTER types have zero in common. */
|| (oprec > iprec && TYPE_UNSIGNED (inner)))
- low.low = low.high = 0;
+ return build_int_cst (outer, 0);
else
{
/* If we are widening a signed type to another signed type, we
precision or narrowing to a signed type, we want to obtain
-2^(oprec-1). */
unsigned prec = oprec > iprec ? iprec : oprec;
-
- if (prec <= HOST_BITS_PER_WIDE_INT)
- {
- low.high = ~(unsigned HOST_WIDE_INT) 0;
- low.low = (~(unsigned HOST_WIDE_INT) 0) << (prec - 1);
- }
- else
- {
- low.high = ((~(unsigned HOST_WIDE_INT) 0)
- << (prec - HOST_BITS_PER_WIDE_INT - 1));
- low.low = 0;
- }
+ return wide_int_to_tree (outer,
+ wi::mask (prec - 1, true,
+ TYPE_PRECISION (outer)));
}
-
- return double_int_to_tree (outer, low);
}
/* Return nonzero if two operands that are suitable for PHI nodes are
return operand_equal_p (arg0, arg1, 0);
}
-/* Returns number of zeros at the end of binary representation of X.
-
- ??? Use ffs if available? */
+/* Returns number of zeros at the end of binary representation of X. */
tree
num_ending_zeros (const_tree x)
{
- unsigned HOST_WIDE_INT fr, nfr;
- unsigned num, abits;
- tree type = TREE_TYPE (x);
-
- if (TREE_INT_CST_LOW (x) == 0)
- {
- num = HOST_BITS_PER_WIDE_INT;
- fr = TREE_INT_CST_HIGH (x);
- }
- else
- {
- num = 0;
- fr = TREE_INT_CST_LOW (x);
- }
-
- for (abits = HOST_BITS_PER_WIDE_INT / 2; abits; abits /= 2)
- {
- nfr = fr >> abits;
- if (nfr << abits == fr)
- {
- num += abits;
- fr = nfr;
- }
- }
-
- if (num > TYPE_PRECISION (type))
- num = TYPE_PRECISION (type);
-
- return build_int_cst_type (type, num);
+ return build_int_cst (TREE_TYPE (x), wi::ctz (x));
}
unsigned HOST_WIDE_INT idx;
constructor_elt *ce;
- for (idx = 0;
- VEC_iterate(constructor_elt, CONSTRUCTOR_ELTS (*tp), idx, ce);
+ for (idx = 0; vec_safe_iterate (CONSTRUCTOR_ELTS (*tp), idx, &ce);
idx++)
WALK_SUBTREE (ce->value);
}
case OMP_CLAUSE_IF:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_SCHEDULE:
+ case OMP_CLAUSE_UNIFORM:
+ case OMP_CLAUSE_DEPEND:
+ case OMP_CLAUSE_NUM_TEAMS:
+ case OMP_CLAUSE_THREAD_LIMIT:
+ case OMP_CLAUSE_DEVICE:
+ case OMP_CLAUSE_DIST_SCHEDULE:
+ case OMP_CLAUSE_SAFELEN:
+ case OMP_CLAUSE_SIMDLEN:
+ case OMP_CLAUSE__LOOPTEMP_:
+ case OMP_CLAUSE__SIMDUID_:
WALK_SUBTREE (OMP_CLAUSE_OPERAND (*tp, 0));
/* FALLTHRU */
case OMP_CLAUSE_DEFAULT:
case OMP_CLAUSE_UNTIED:
case OMP_CLAUSE_MERGEABLE:
+ case OMP_CLAUSE_PROC_BIND:
+ case OMP_CLAUSE_INBRANCH:
+ case OMP_CLAUSE_NOTINBRANCH:
+ case OMP_CLAUSE_FOR:
+ case OMP_CLAUSE_PARALLEL:
+ case OMP_CLAUSE_SECTIONS:
+ case OMP_CLAUSE_TASKGROUP:
WALK_SUBTREE_TAIL (OMP_CLAUSE_CHAIN (*tp));
case OMP_CLAUSE_LASTPRIVATE:
WALK_SUBTREE_TAIL (OMP_CLAUSE_CHAIN (*tp));
}
+ case OMP_CLAUSE_ALIGNED:
+ case OMP_CLAUSE_LINEAR:
+ case OMP_CLAUSE_FROM:
+ case OMP_CLAUSE_TO:
+ case OMP_CLAUSE_MAP:
+ WALK_SUBTREE (OMP_CLAUSE_DECL (*tp));
+ WALK_SUBTREE (OMP_CLAUSE_OPERAND (*tp, 1));
+ WALK_SUBTREE_TAIL (OMP_CLAUSE_CHAIN (*tp));
+
case OMP_CLAUSE_REDUCTION:
{
int i;
if (!fntype)
return false;
- FOREACH_FUNCTION_ARGS(fntype, t, args_iter)
+ FOREACH_FUNCTION_ARGS (fntype, t, args_iter)
{
n = t;
}
return (memcmp (xp, yp, len) == 0);
}
-/* Build an OPTIMIZATION_NODE based on the current options. */
+/* Build an OPTIMIZATION_NODE based on the options in OPTS. */
tree
-build_optimization_node (void)
+build_optimization_node (struct gcc_options *opts)
{
tree t;
void **slot;
/* Use the cache of optimization nodes. */
cl_optimization_save (TREE_OPTIMIZATION (cl_optimization_node),
- &global_options);
+ opts);
slot = htab_find_slot (cl_option_hash_table, cl_optimization_node, INSERT);
t = (tree) *slot;
return t;
}
-/* Build a TARGET_OPTION_NODE based on the current options. */
+/* Build a TARGET_OPTION_NODE based on the options in OPTS. */
tree
-build_target_option_node (void)
+build_target_option_node (struct gcc_options *opts)
{
tree t;
void **slot;
/* Use the cache of optimization nodes. */
cl_target_option_save (TREE_TARGET_OPTION (cl_target_option_node),
- &global_options);
+ opts);
slot = htab_find_slot (cl_option_hash_table, cl_target_option_node, INSERT);
t = (tree) *slot;
}
}
-/* Return true if T1 and T2 are equivalent lists. */
-
-bool
-list_equal_p (const_tree t1, const_tree t2)
-{
- for (; t1 && t2; t1 = TREE_CHAIN (t1) , t2 = TREE_CHAIN (t2))
- if (TREE_VALUE (t1) != TREE_VALUE (t2))
- return false;
- return !t1 && !t2;
-}
-
/* Return true iff conversion in EXP generates no instruction. Mark
it inline so that we fully inline into the stripping functions even
though we have two uses of this function. */
return gcc_eh_personality_decl;
}
+/* For languages with One Definition Rule, work out if
+ trees are actually the same even if the tree representation
+ differs. This handles only decls appearing in TYPE_NAME
+ and TYPE_CONTEXT. That is NAMESPACE_DECL, TYPE_DECL,
+ RECORD_TYPE and IDENTIFIER_NODE. */
+
+static bool
+same_for_odr (tree t1, tree t2)
+{
+ if (t1 == t2)
+ return true;
+ if (!t1 || !t2)
+ return false;
+ /* C and C++ FEs differ by using IDENTIFIER_NODE and TYPE_DECL. */
+ if (TREE_CODE (t1) == IDENTIFIER_NODE
+ && TREE_CODE (t2) == TYPE_DECL
+ && DECL_FILE_SCOPE_P (t1))
+ {
+ t2 = DECL_NAME (t2);
+ gcc_assert (TREE_CODE (t2) == IDENTIFIER_NODE);
+ }
+ if (TREE_CODE (t2) == IDENTIFIER_NODE
+ && TREE_CODE (t1) == TYPE_DECL
+ && DECL_FILE_SCOPE_P (t2))
+ {
+ t1 = DECL_NAME (t1);
+ gcc_assert (TREE_CODE (t1) == IDENTIFIER_NODE);
+ }
+ if (TREE_CODE (t1) != TREE_CODE (t2))
+ return false;
+ if (TYPE_P (t1))
+ return types_same_for_odr (t1, t2);
+ if (DECL_P (t1))
+ return decls_same_for_odr (t1, t2);
+ return false;
+}
+
+/* For languages with One Definition Rule, work out if
+ decls are actually the same even if the tree representation
+ differs. This handles only decls appearing in TYPE_NAME
+ and TYPE_CONTEXT. That is NAMESPACE_DECL, TYPE_DECL,
+ RECORD_TYPE and IDENTIFIER_NODE. */
+
+static bool
+decls_same_for_odr (tree decl1, tree decl2)
+{
+ if (decl1 && TREE_CODE (decl1) == TYPE_DECL
+ && DECL_ORIGINAL_TYPE (decl1))
+ decl1 = DECL_ORIGINAL_TYPE (decl1);
+ if (decl2 && TREE_CODE (decl2) == TYPE_DECL
+ && DECL_ORIGINAL_TYPE (decl2))
+ decl2 = DECL_ORIGINAL_TYPE (decl2);
+ if (decl1 == decl2)
+ return true;
+ if (!decl1 || !decl2)
+ return false;
+ gcc_checking_assert (DECL_P (decl1) && DECL_P (decl2));
+ if (TREE_CODE (decl1) != TREE_CODE (decl2))
+ return false;
+ if (TREE_CODE (decl1) == TRANSLATION_UNIT_DECL)
+ return true;
+ if (TREE_CODE (decl1) != NAMESPACE_DECL
+ && TREE_CODE (decl1) != TYPE_DECL)
+ return false;
+ if (!DECL_NAME (decl1))
+ return false;
+ gcc_checking_assert (TREE_CODE (DECL_NAME (decl1)) == IDENTIFIER_NODE);
+ gcc_checking_assert (!DECL_NAME (decl2)
+ || TREE_CODE (DECL_NAME (decl2)) == IDENTIFIER_NODE);
+ if (DECL_NAME (decl1) != DECL_NAME (decl2))
+ return false;
+ return same_for_odr (DECL_CONTEXT (decl1),
+ DECL_CONTEXT (decl2));
+}
+
+/* For languages with One Definition Rule, work out if
+ types are same even if the tree representation differs.
+ This is non-trivial for LTO where minnor differences in
+ the type representation may have prevented type merging
+ to merge two copies of otherwise equivalent type. */
+
+bool
+types_same_for_odr (tree type1, tree type2)
+{
+ gcc_checking_assert (TYPE_P (type1) && TYPE_P (type2));
+ type1 = TYPE_MAIN_VARIANT (type1);
+ type2 = TYPE_MAIN_VARIANT (type2);
+ if (type1 == type2)
+ return true;
+
+#ifndef ENABLE_CHECKING
+ if (!in_lto_p)
+ return false;
+#endif
+
+ /* Check for anonymous namespaces. Those have !TREE_PUBLIC
+ on the corresponding TYPE_STUB_DECL. */
+ if (type_in_anonymous_namespace_p (type1)
+ || type_in_anonymous_namespace_p (type2))
+ return false;
+ /* When assembler name of virtual table is available, it is
+ easy to compare types for equivalence. */
+ if (TYPE_BINFO (type1) && TYPE_BINFO (type2)
+ && BINFO_VTABLE (TYPE_BINFO (type1))
+ && BINFO_VTABLE (TYPE_BINFO (type2)))
+ {
+ tree v1 = BINFO_VTABLE (TYPE_BINFO (type1));
+ tree v2 = BINFO_VTABLE (TYPE_BINFO (type2));
+
+ if (TREE_CODE (v1) == POINTER_PLUS_EXPR)
+ {
+ if (TREE_CODE (v2) != POINTER_PLUS_EXPR
+ || !operand_equal_p (TREE_OPERAND (v1, 1),
+ TREE_OPERAND (v2, 1), 0))
+ return false;
+ v1 = TREE_OPERAND (TREE_OPERAND (v1, 0), 0);
+ v2 = TREE_OPERAND (TREE_OPERAND (v2, 0), 0);
+ }
+ v1 = DECL_ASSEMBLER_NAME (v1);
+ v2 = DECL_ASSEMBLER_NAME (v2);
+ return (v1 == v2);
+ }
+
+ /* FIXME: the code comparing type names consider all instantiations of the
+ same template to have same name. This is because we have no access
+ to template parameters. For types with no virtual method tables
+ we thus can return false positives. At the moment we do not need
+ to compare types in other scenarios than devirtualization. */
+
+ /* If types are not structuraly same, do not bother to contnue.
+ Match in the remainder of code would mean ODR violation. */
+ if (!types_compatible_p (type1, type2))
+ return false;
+ if (!TYPE_NAME (type1))
+ return false;
+ if (!decls_same_for_odr (TYPE_NAME (type1), TYPE_NAME (type2)))
+ return false;
+ if (!same_for_odr (TYPE_CONTEXT (type1), TYPE_CONTEXT (type2)))
+ return false;
+ /* When not in LTO the MAIN_VARIANT check should be the same. */
+ gcc_assert (in_lto_p);
+
+ return true;
+}
+
+/* TARGET is a call target of GIMPLE call statement
+ (obtained by gimple_call_fn). Return true if it is
+ OBJ_TYPE_REF representing an virtual call of C++ method.
+ (As opposed to OBJ_TYPE_REF representing objc calls
+ through a cast where middle-end devirtualization machinery
+ can't apply.) */
+
+bool
+virtual_method_call_p (tree target)
+{
+ if (TREE_CODE (target) != OBJ_TYPE_REF)
+ return false;
+ target = TREE_TYPE (target);
+ gcc_checking_assert (TREE_CODE (target) == POINTER_TYPE);
+ target = TREE_TYPE (target);
+ if (TREE_CODE (target) == FUNCTION_TYPE)
+ return false;
+ gcc_checking_assert (TREE_CODE (target) == METHOD_TYPE);
+ return true;
+}
+
+/* REF is OBJ_TYPE_REF, return the class the ref corresponds to. */
+
+tree
+obj_type_ref_class (tree ref)
+{
+ gcc_checking_assert (TREE_CODE (ref) == OBJ_TYPE_REF);
+ ref = TREE_TYPE (ref);
+ gcc_checking_assert (TREE_CODE (ref) == POINTER_TYPE);
+ ref = TREE_TYPE (ref);
+ /* We look for type THIS points to. ObjC also builds
+ OBJ_TYPE_REF with non-method calls, Their first parameter
+ ID however also corresponds to class type. */
+ gcc_checking_assert (TREE_CODE (ref) == METHOD_TYPE
+ || TREE_CODE (ref) == FUNCTION_TYPE);
+ ref = TREE_VALUE (TYPE_ARG_TYPES (ref));
+ gcc_checking_assert (TREE_CODE (ref) == POINTER_TYPE);
+ return TREE_TYPE (ref);
+}
+
+/* Return true if T is in anonymous namespace. */
+
+bool
+type_in_anonymous_namespace_p (tree t)
+{
+ return (TYPE_STUB_DECL (t) && !TREE_PUBLIC (TYPE_STUB_DECL (t)));
+}
+
/* Try to find a base info of BINFO that would have its field decl at offset
OFFSET within the BINFO type and which is of EXPECTED_TYPE. If it can be
found, return, otherwise return NULL_TREE. */
tree fld;
int i;
- if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (expected_type))
+ if (types_same_for_odr (type, expected_type))
return binfo;
if (offset < 0)
return NULL_TREE;
continue;
pos = int_bit_position (fld);
- size = tree_low_cst (DECL_SIZE (fld), 1);
+ size = tree_to_uhwi (DECL_SIZE (fld));
if (pos <= offset && (pos + size) > offset)
break;
}
{
tree base_binfo, found_binfo = NULL_TREE;
for (i = 0; BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
- if (TREE_TYPE (base_binfo) == TREE_TYPE (fld))
+ if (types_same_for_odr (TREE_TYPE (base_binfo), TREE_TYPE (fld)))
{
found_binfo = base_binfo;
break;
expanded_location xloc = expand_location (DECL_SOURCE_LOCATION (node));
if (msg)
warning (OPT_Wdeprecated_declarations,
- "%qD is deprecated (declared at %s:%d): %s",
- node, xloc.file, xloc.line, msg);
+ "%qD is deprecated (declared at %r%s:%d%R): %s",
+ node, "locus", xloc.file, xloc.line, msg);
else
warning (OPT_Wdeprecated_declarations,
- "%qD is deprecated (declared at %s:%d)",
- node, xloc.file, xloc.line);
+ "%qD is deprecated (declared at %r%s:%d%R)",
+ node, "locus", xloc.file, xloc.line);
}
else if (TYPE_P (node))
{
{
if (msg)
warning (OPT_Wdeprecated_declarations,
- "%qE is deprecated (declared at %s:%d): %s",
- what, xloc.file, xloc.line, msg);
+ "%qE is deprecated (declared at %r%s:%d%R): %s",
+ what, "locus", xloc.file, xloc.line, msg);
else
warning (OPT_Wdeprecated_declarations,
- "%qE is deprecated (declared at %s:%d)", what,
- xloc.file, xloc.line);
+ "%qE is deprecated (declared at %r%s:%d%R)",
+ what, "locus", xloc.file, xloc.line);
}
else
{
if (msg)
warning (OPT_Wdeprecated_declarations,
- "type is deprecated (declared at %s:%d): %s",
- xloc.file, xloc.line, msg);
+ "type is deprecated (declared at %r%s:%d%R): %s",
+ "locus", xloc.file, xloc.line, msg);
else
warning (OPT_Wdeprecated_declarations,
- "type is deprecated (declared at %s:%d)",
- xloc.file, xloc.line);
+ "type is deprecated (declared at %r%s:%d%R)",
+ "locus", xloc.file, xloc.line);
}
}
else
}
}
+/* Return true if REF has a COMPONENT_REF with a bit-field field declaration
+ somewhere in it. */
+
+bool
+contains_bitfld_component_ref_p (const_tree ref)
+{
+ while (handled_component_p (ref))
+ {
+ if (TREE_CODE (ref) == COMPONENT_REF
+ && DECL_BIT_FIELD (TREE_OPERAND (ref, 1)))
+ return true;
+ ref = TREE_OPERAND (ref, 0);
+ }
+
+ return false;
+}
+
+/* Try to determine whether a TRY_CATCH expression can fall through.
+ This is a subroutine of block_may_fallthru. */
+
+static bool
+try_catch_may_fallthru (const_tree stmt)
+{
+ tree_stmt_iterator i;
+
+ /* If the TRY block can fall through, the whole TRY_CATCH can
+ fall through. */
+ if (block_may_fallthru (TREE_OPERAND (stmt, 0)))
+ return true;
+
+ i = tsi_start (TREE_OPERAND (stmt, 1));
+ switch (TREE_CODE (tsi_stmt (i)))
+ {
+ case CATCH_EXPR:
+ /* We expect to see a sequence of CATCH_EXPR trees, each with a
+ catch expression and a body. The whole TRY_CATCH may fall
+ through iff any of the catch bodies falls through. */
+ for (; !tsi_end_p (i); tsi_next (&i))
+ {
+ if (block_may_fallthru (CATCH_BODY (tsi_stmt (i))))
+ return true;
+ }
+ return false;
+
+ case EH_FILTER_EXPR:
+ /* The exception filter expression only matters if there is an
+ exception. If the exception does not match EH_FILTER_TYPES,
+ we will execute EH_FILTER_FAILURE, and we will fall through
+ if that falls through. If the exception does match
+ EH_FILTER_TYPES, the stack unwinder will continue up the
+ stack, so we will not fall through. We don't know whether we
+ will throw an exception which matches EH_FILTER_TYPES or not,
+ so we just ignore EH_FILTER_TYPES and assume that we might
+ throw an exception which doesn't match. */
+ return block_may_fallthru (EH_FILTER_FAILURE (tsi_stmt (i)));
+
+ default:
+ /* This case represents statements to be executed when an
+ exception occurs. Those statements are implicitly followed
+ by a RESX statement to resume execution after the exception.
+ So in this case the TRY_CATCH never falls through. */
+ return false;
+ }
+}
+
+/* Try to determine if we can fall out of the bottom of BLOCK. This guess
+ need not be 100% accurate; simply be conservative and return true if we
+ don't know. This is used only to avoid stupidly generating extra code.
+ If we're wrong, we'll just delete the extra code later. */
+
+bool
+block_may_fallthru (const_tree block)
+{
+ /* This CONST_CAST is okay because expr_last returns its argument
+ unmodified and we assign it to a const_tree. */
+ const_tree stmt = expr_last (CONST_CAST_TREE (block));
+
+ switch (stmt ? TREE_CODE (stmt) : ERROR_MARK)
+ {
+ case GOTO_EXPR:
+ case RETURN_EXPR:
+ /* Easy cases. If the last statement of the block implies
+ control transfer, then we can't fall through. */
+ return false;
+
+ case SWITCH_EXPR:
+ /* If SWITCH_LABELS is set, this is lowered, and represents a
+ branch to a selected label and hence can not fall through.
+ Otherwise SWITCH_BODY is set, and the switch can fall
+ through. */
+ return SWITCH_LABELS (stmt) == NULL_TREE;
+
+ case COND_EXPR:
+ if (block_may_fallthru (COND_EXPR_THEN (stmt)))
+ return true;
+ return block_may_fallthru (COND_EXPR_ELSE (stmt));
+
+ case BIND_EXPR:
+ return block_may_fallthru (BIND_EXPR_BODY (stmt));
+
+ case TRY_CATCH_EXPR:
+ return try_catch_may_fallthru (stmt);
+
+ case TRY_FINALLY_EXPR:
+ /* The finally clause is always executed after the try clause,
+ so if it does not fall through, then the try-finally will not
+ fall through. Otherwise, if the try clause does not fall
+ through, then when the finally clause falls through it will
+ resume execution wherever the try clause was going. So the
+ whole try-finally will only fall through if both the try
+ clause and the finally clause fall through. */
+ return (block_may_fallthru (TREE_OPERAND (stmt, 0))
+ && block_may_fallthru (TREE_OPERAND (stmt, 1)));
+
+ case MODIFY_EXPR:
+ if (TREE_CODE (TREE_OPERAND (stmt, 1)) == CALL_EXPR)
+ stmt = TREE_OPERAND (stmt, 1);
+ else
+ return true;
+ /* FALLTHRU */
+
+ case CALL_EXPR:
+ /* Functions that do not return do not fall through. */
+ return (call_expr_flags (stmt) & ECF_NORETURN) == 0;
+
+ case CLEANUP_POINT_EXPR:
+ return block_may_fallthru (TREE_OPERAND (stmt, 0));
+
+ case TARGET_EXPR:
+ return block_may_fallthru (TREE_OPERAND (stmt, 1));
+
+ case ERROR_MARK:
+ return true;
+
+ default:
+ return lang_hooks.block_may_fallthru (stmt);
+ }
+}
+
+/* True if we are using EH to handle cleanups. */
+static bool using_eh_for_cleanups_flag = false;
+
+/* This routine is called from front ends to indicate eh should be used for
+ cleanups. */
+void
+using_eh_for_cleanups (void)
+{
+ using_eh_for_cleanups_flag = true;
+}
+
+/* Query whether EH is used for cleanups. */
+bool
+using_eh_for_cleanups_p (void)
+{
+ return using_eh_for_cleanups_flag;
+}
+
+/* Wrapper for tree_code_name to ensure that tree code is valid */
+const char *
+get_tree_code_name (enum tree_code code)
+{
+ const char *invalid = "<invalid tree code>";
+
+ if (code >= MAX_TREE_CODES)
+ return invalid;
+
+ return tree_code_name[code];
+}
+
+/* Drops the TREE_OVERFLOW flag from T. */
+
+tree
+drop_tree_overflow (tree t)
+{
+ gcc_checking_assert (TREE_OVERFLOW (t));
+
+ /* For tree codes with a sharing machinery re-build the result. */
+ if (TREE_CODE (t) == INTEGER_CST)
+ return wide_int_to_tree (TREE_TYPE (t), t);
+
+ /* Otherwise, as all tcc_constants are possibly shared, copy the node
+ and drop the flag. */
+ t = copy_node (t);
+ TREE_OVERFLOW (t) = 0;
+ return t;
+}
+
+/* Given a memory reference expression T, return its base address.
+ The base address of a memory reference expression is the main
+ object being referenced. For instance, the base address for
+ 'array[i].fld[j]' is 'array'. You can think of this as stripping
+ away the offset part from a memory address.
+
+ This function calls handled_component_p to strip away all the inner
+ parts of the memory reference until it reaches the base object. */
+
+tree
+get_base_address (tree t)
+{
+ while (handled_component_p (t))
+ t = TREE_OPERAND (t, 0);
+
+ if ((TREE_CODE (t) == MEM_REF
+ || TREE_CODE (t) == TARGET_MEM_REF)
+ && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR)
+ t = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
+
+ /* ??? Either the alias oracle or all callers need to properly deal
+ with WITH_SIZE_EXPRs before we can look through those. */
+ if (TREE_CODE (t) == WITH_SIZE_EXPR)
+ return NULL_TREE;
+
+ return t;
+}
+
#include "gt-tree.h"