#include "tm.h"
#include "flags.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "calls.h"
+#include "attribs.h"
+#include "varasm.h"
#include "tm_p.h"
#include "function.h"
#include "obstack.h"
#include "tree-inline.h"
#include "tree-iterator.h"
#include "basic-block.h"
-#include "tree-ssa.h"
+#include "bitmap.h"
+#include "gimple.h"
+#include "gimple-iterator.h"
+#include "gimplify.h"
+#include "gimple-ssa.h"
+#include "cgraph.h"
+#include "tree-phinodes.h"
+#include "stringpool.h"
+#include "tree-ssanames.h"
+#include "expr.h"
+#include "tree-dfa.h"
#include "params.h"
#include "pointer-set.h"
#include "tree-pass.h"
#include "diagnostic.h"
#include "tree-diagnostic.h"
#include "tree-pretty-print.h"
-#include "cgraph.h"
#include "except.h"
#include "debug.h"
#include "intl.h"
+#include "wide-int.h"
/* Tree code classes. */
#define DEFTREECODE(SYM, NAME, TYPE, LEN) NAME,
#define END_OF_BASE_TREE_CODES "@dummy",
-const char *const tree_code_name[] = {
+static const char *const tree_code_name[] = {
#include "all-tree.def"
};
1, /* OMP_CLAUSE_COPYIN */
1, /* OMP_CLAUSE_COPYPRIVATE */
2, /* OMP_CLAUSE_LINEAR */
+ 2, /* OMP_CLAUSE_ALIGNED */
+ 1, /* OMP_CLAUSE_DEPEND */
1, /* OMP_CLAUSE_UNIFORM */
+ 2, /* OMP_CLAUSE_FROM */
+ 2, /* OMP_CLAUSE_TO */
+ 2, /* OMP_CLAUSE_MAP */
+ 1, /* OMP_CLAUSE__LOOPTEMP_ */
1, /* OMP_CLAUSE_IF */
1, /* OMP_CLAUSE_NUM_THREADS */
1, /* OMP_CLAUSE_SCHEDULE */
0, /* OMP_CLAUSE_UNTIED */
1, /* OMP_CLAUSE_FINAL */
0, /* OMP_CLAUSE_MERGEABLE */
+ 1, /* OMP_CLAUSE_DEVICE */
+ 1, /* OMP_CLAUSE_DIST_SCHEDULE */
+ 0, /* OMP_CLAUSE_INBRANCH */
+ 0, /* OMP_CLAUSE_NOTINBRANCH */
+ 1, /* OMP_CLAUSE_NUM_TEAMS */
+ 1, /* OMP_CLAUSE_THREAD_LIMIT */
+ 0, /* OMP_CLAUSE_PROC_BIND */
1, /* OMP_CLAUSE_SAFELEN */
+ 1, /* OMP_CLAUSE_SIMDLEN */
+ 0, /* OMP_CLAUSE_FOR */
+ 0, /* OMP_CLAUSE_PARALLEL */
+ 0, /* OMP_CLAUSE_SECTIONS */
+ 0, /* OMP_CLAUSE_TASKGROUP */
1, /* OMP_CLAUSE__SIMDUID_ */
};
"copyin",
"copyprivate",
"linear",
+ "aligned",
+ "depend",
"uniform",
+ "from",
+ "to",
+ "map",
+ "_looptemp_",
"if",
"num_threads",
"schedule",
"untied",
"final",
"mergeable",
+ "device",
+ "dist_schedule",
+ "inbranch",
+ "notinbranch",
+ "num_teams",
+ "thread_limit",
+ "proc_bind",
"safelen",
+ "simdlen",
+ "for",
+ "parallel",
+ "sections",
+ "taskgroup",
"_simduid_"
};
int_cst_hash_table = htab_create_ggc (1024, int_cst_hash_hash,
int_cst_hash_eq, NULL);
- int_cst_node = make_node (INTEGER_CST);
+ int_cst_node = make_int_cst (1, 1);
cl_option_hash_table = htab_create_ggc (64, cl_option_hash_hash,
cl_option_hash_eq, NULL);
return DECL_WITH_VIS_CHECK (decl)->decl_with_vis.assembler_name;
}
-/* Compare ASMNAME with the DECL_ASSEMBLER_NAME of DECL. */
-
-bool
-decl_assembler_name_equal (tree decl, const_tree asmname)
-{
- tree decl_asmname = DECL_ASSEMBLER_NAME (decl);
- const char *decl_str;
- const char *asmname_str;
- bool test = false;
-
- if (decl_asmname == asmname)
- return true;
-
- decl_str = IDENTIFIER_POINTER (decl_asmname);
- asmname_str = IDENTIFIER_POINTER (asmname);
-
-
- /* If the target assembler name was set by the user, things are trickier.
- We have a leading '*' to begin with. After that, it's arguable what
- is the correct thing to do with -fleading-underscore. Arguably, we've
- historically been doing the wrong thing in assemble_alias by always
- printing the leading underscore. Since we're not changing that, make
- sure user_label_prefix follows the '*' before matching. */
- if (decl_str[0] == '*')
- {
- size_t ulp_len = strlen (user_label_prefix);
-
- decl_str ++;
-
- if (ulp_len == 0)
- test = true;
- else if (strncmp (decl_str, user_label_prefix, ulp_len) == 0)
- decl_str += ulp_len, test=true;
- else
- decl_str --;
- }
- if (asmname_str[0] == '*')
- {
- size_t ulp_len = strlen (user_label_prefix);
-
- asmname_str ++;
-
- if (ulp_len == 0)
- test = true;
- else if (strncmp (asmname_str, user_label_prefix, ulp_len) == 0)
- asmname_str += ulp_len, test=true;
- else
- asmname_str --;
- }
-
- if (!test)
- return false;
- return strcmp (decl_str, asmname_str) == 0;
-}
-
-/* Hash asmnames ignoring the user specified marks. */
-
-hashval_t
-decl_assembler_name_hash (const_tree asmname)
-{
- if (IDENTIFIER_POINTER (asmname)[0] == '*')
- {
- const char *decl_str = IDENTIFIER_POINTER (asmname) + 1;
- size_t ulp_len = strlen (user_label_prefix);
-
- if (ulp_len == 0)
- ;
- else if (strncmp (decl_str, user_label_prefix, ulp_len) == 0)
- decl_str += ulp_len;
-
- return htab_hash_string (decl_str);
- }
-
- return htab_hash_string (IDENTIFIER_POINTER (asmname));
-}
-
/* Compute the number of bytes occupied by a tree with code CODE.
This function cannot be used for nodes that have variable sizes,
- including TREE_VEC, STRING_CST, and CALL_EXPR. */
+ including TREE_VEC, INTEGER_CST, STRING_CST, and CALL_EXPR. */
size_t
tree_code_size (enum tree_code code)
{
case tcc_constant: /* a constant */
switch (code)
{
- case INTEGER_CST: return sizeof (struct tree_int_cst);
+ case INTEGER_CST: gcc_unreachable ();
case REAL_CST: return sizeof (struct tree_real_cst);
case FIXED_CST: return sizeof (struct tree_fixed_cst);
case COMPLEX_CST: return sizeof (struct tree_complex);
const enum tree_code code = TREE_CODE (node);
switch (code)
{
+ case INTEGER_CST:
+ return (sizeof (struct tree_int_cst)
+ + (TREE_INT_CST_EXT_NUNITS (node) - 1) * sizeof (HOST_WIDE_INT));
+
case TREE_BINFO:
return (offsetof (struct tree_binfo, base_binfos)
+ vec<tree, va_gc>
/* Return a newly allocated node of code CODE. For decl and type
nodes, some other fields are initialized. The rest of the node is
- initialized to zero. This function cannot be used for TREE_VEC or
- OMP_CLAUSE nodes, which is enforced by asserts in tree_code_size.
+ initialized to zero. This function cannot be used for TREE_VEC,
+ INTEGER_CST or OMP_CLAUSE nodes, which is enforced by asserts in
+ tree_code_size.
Achoo! I got a code in the node. */
TYPE_SYMTAB_ADDRESS (t) = 0;
/* Do not copy the values cache. */
- if (TYPE_CACHED_VALUES_P(t))
+ if (TYPE_CACHED_VALUES_P (t))
{
TYPE_CACHED_VALUES_P (t) = 0;
TYPE_CACHED_VALUES (t) = NULL_TREE;
}
\f
+/* Return the value that TREE_INT_CST_EXT_NUNITS should have for an
+ INTEGER_CST with value CST and type TYPE. */
+
+static unsigned int
+get_int_cst_ext_nunits (tree type, const wide_int &cst)
+{
+ gcc_checking_assert (cst.get_precision () == TYPE_PRECISION (type));
+ /* We need an extra zero HWI if CST is an unsigned integer with its
+ upper bit set, and if CST occupies a whole number of HWIs. */
+ if (TYPE_UNSIGNED (type)
+ && wi::neg_p (cst)
+ && (cst.get_precision () % HOST_BITS_PER_WIDE_INT) == 0)
+ return cst.get_precision () / HOST_BITS_PER_WIDE_INT + 1;
+ return cst.get_len ();
+}
+
+/* Return a new INTEGER_CST with value CST and type TYPE. */
+
+static tree
+build_new_int_cst (tree type, const wide_int &cst)
+{
+ unsigned int len = cst.get_len ();
+ unsigned int ext_len = get_int_cst_ext_nunits (type, cst);
+ tree nt = make_int_cst (len, ext_len);
+
+ if (len < ext_len)
+ {
+ --ext_len;
+ TREE_INT_CST_ELT (nt, ext_len) = 0;
+ for (unsigned int i = len; i < ext_len; ++i)
+ TREE_INT_CST_ELT (nt, i) = -1;
+ }
+ else if (TYPE_UNSIGNED (type)
+ && cst.get_precision () < len * HOST_BITS_PER_WIDE_INT)
+ {
+ len--;
+ TREE_INT_CST_ELT (nt, len)
+ = zext_hwi (cst.elt (len),
+ cst.get_precision () % HOST_BITS_PER_WIDE_INT);
+ }
+
+ for (unsigned int i = 0; i < len; i++)
+ TREE_INT_CST_ELT (nt, i) = cst.elt (i);
+ TREE_TYPE (nt) = type;
+ return nt;
+}
+
/* Create an INT_CST node with a LOW value sign extended to TYPE. */
tree
if (!type)
type = integer_type_node;
- return double_int_to_tree (type, double_int::from_shwi (low));
+ return wide_int_to_tree (type, wi::shwi (low, TYPE_PRECISION (type)));
+}
+
+tree
+build_int_cstu (tree type, unsigned HOST_WIDE_INT cst)
+{
+ return wide_int_to_tree (type, wi::uhwi (cst, TYPE_PRECISION (type)));
}
/* Create an INT_CST node with a LOW value sign extended to TYPE. */
build_int_cst_type (tree type, HOST_WIDE_INT low)
{
gcc_assert (type);
-
- return double_int_to_tree (type, double_int::from_shwi (low));
+ return wide_int_to_tree (type, wi::shwi (low, TYPE_PRECISION (type)));
}
/* Constructs tree in type TYPE from with value given by CST. Signedness
tree
double_int_to_tree (tree type, double_int cst)
{
- bool sign_extended_type = !TYPE_UNSIGNED (type);
-
- cst = cst.ext (TYPE_PRECISION (type), !sign_extended_type);
-
- return build_int_cst_wide (type, cst.low, cst.high);
-}
-
-/* Returns true if CST fits into range of TYPE. Signedness of CST is assumed
- to be the same as the signedness of TYPE. */
-
-bool
-double_int_fits_to_tree_p (const_tree type, double_int cst)
-{
- bool sign_extended_type = !TYPE_UNSIGNED (type);
-
- double_int ext
- = cst.ext (TYPE_PRECISION (type), !sign_extended_type);
-
- return cst == ext;
+ return wide_int_to_tree (type, widest_int::from (cst, TYPE_SIGN (type)));
}
-/* We force the double_int CST to the range of the type TYPE by sign or
+/* We force the wide_int CST to the range of the type TYPE by sign or
zero extending it. OVERFLOWABLE indicates if we are interested in
overflow of the value, when >0 we are only interested in signed
overflow, for <0 we are interested in any overflow. OVERFLOWED
OVERFLOWED is nonzero,
or OVERFLOWABLE is >0 and signed overflow occurs
or OVERFLOWABLE is <0 and any overflow occurs
- We return a new tree node for the extended double_int. The node
+ We return a new tree node for the extended wide_int. The node
is shared if no overflow flags are set. */
tree
-force_fit_type_double (tree type, double_int cst, int overflowable,
- bool overflowed)
+force_fit_type (tree type, const wide_int_ref &cst,
+ int overflowable, bool overflowed)
{
- bool sign_extended_type = !TYPE_UNSIGNED (type);
+ signop sign = TYPE_SIGN (type);
/* If we need to set overflow flags, return a new unshared node. */
- if (overflowed || !double_int_fits_to_tree_p(type, cst))
+ if (overflowed || !wi::fits_to_tree_p (cst, type))
{
if (overflowed
|| overflowable < 0
- || (overflowable > 0 && sign_extended_type))
+ || (overflowable > 0 && sign == SIGNED))
{
- tree t = make_node (INTEGER_CST);
- TREE_INT_CST (t)
- = cst.ext (TYPE_PRECISION (type), !sign_extended_type);
- TREE_TYPE (t) = type;
+ wide_int tmp = wide_int::from (cst, TYPE_PRECISION (type), sign);
+ tree t = build_new_int_cst (type, tmp);
TREE_OVERFLOW (t) = 1;
return t;
}
}
/* Else build a shared node. */
- return double_int_to_tree (type, cst);
+ return wide_int_to_tree (type, cst);
}
/* These are the hash table functions for the hash table of INTEGER_CST
int_cst_hash_hash (const void *x)
{
const_tree const t = (const_tree) x;
+ hashval_t code = htab_hash_pointer (TREE_TYPE (t));
+ int i;
+
+ for (i = 0; i < TREE_INT_CST_NUNITS (t); i++)
+ code ^= TREE_INT_CST_ELT (t, i);
- return (TREE_INT_CST_HIGH (t) ^ TREE_INT_CST_LOW (t)
- ^ htab_hash_pointer (TREE_TYPE (t)));
+ return code;
}
/* Return nonzero if the value represented by *X (an INTEGER_CST tree node)
const_tree const xt = (const_tree) x;
const_tree const yt = (const_tree) y;
- return (TREE_TYPE (xt) == TREE_TYPE (yt)
- && TREE_INT_CST_HIGH (xt) == TREE_INT_CST_HIGH (yt)
- && TREE_INT_CST_LOW (xt) == TREE_INT_CST_LOW (yt));
+ if (TREE_TYPE (xt) != TREE_TYPE (yt)
+ || TREE_INT_CST_NUNITS (xt) != TREE_INT_CST_NUNITS (yt)
+ || TREE_INT_CST_EXT_NUNITS (xt) != TREE_INT_CST_EXT_NUNITS (yt))
+ return false;
+
+ for (int i = 0; i < TREE_INT_CST_NUNITS (xt); i++)
+ if (TREE_INT_CST_ELT (xt, i) != TREE_INT_CST_ELT (yt, i))
+ return false;
+
+ return true;
}
-/* Create an INT_CST node of TYPE and value HI:LOW.
+/* Create an INT_CST node of TYPE and value CST.
The returned node is always shared. For small integers we use a
- per-type vector cache, for larger ones we use a single hash table. */
+ per-type vector cache, for larger ones we use a single hash table.
+ The value is extended from its precision according to the sign of
+ the type to be a multiple of HOST_BITS_PER_WIDE_INT. This defines
+ the upper bits and ensures that hashing and value equality based
+ upon the underlying HOST_WIDE_INTs works without masking. */
tree
-build_int_cst_wide (tree type, unsigned HOST_WIDE_INT low, HOST_WIDE_INT hi)
+wide_int_to_tree (tree type, const wide_int_ref &pcst)
{
tree t;
int ix = -1;
int limit = 0;
gcc_assert (type);
+ unsigned int prec = TYPE_PRECISION (type);
+ signop sgn = TYPE_SIGN (type);
+
+ /* Verify that everything is canonical. */
+ int l = pcst.get_len ();
+ if (l > 1)
+ {
+ if (pcst.elt (l - 1) == 0)
+ gcc_assert (pcst.elt (l - 2) < 0);
+ if (pcst.elt (l - 1) == (HOST_WIDE_INT) -1)
+ gcc_assert (pcst.elt (l - 2) >= 0);
+ }
+
+ wide_int cst = wide_int::from (pcst, prec, sgn);
+ unsigned int ext_len = get_int_cst_ext_nunits (type, cst);
switch (TREE_CODE (type))
{
case NULLPTR_TYPE:
- gcc_assert (hi == 0 && low == 0);
+ gcc_assert (cst == 0);
/* Fallthru. */
case POINTER_TYPE:
case REFERENCE_TYPE:
- /* Cache NULL pointer. */
- if (!hi && !low)
+ case POINTER_BOUNDS_TYPE:
+ /* Cache NULL pointer and zero bounds. */
+ if (cst == 0)
{
limit = 1;
ix = 0;
case BOOLEAN_TYPE:
/* Cache false or true. */
limit = 2;
- if (!hi && low < 2)
- ix = low;
+ if (wi::leu_p (cst, 1))
+ ix = cst.to_uhwi ();
break;
case INTEGER_TYPE:
case OFFSET_TYPE:
- if (TYPE_UNSIGNED (type))
+ if (TYPE_SIGN (type) == UNSIGNED)
{
/* Cache 0..N */
limit = INTEGER_SHARE_LIMIT;
- if (!hi && low < (unsigned HOST_WIDE_INT)INTEGER_SHARE_LIMIT)
- ix = low;
+
+ /* This is a little hokie, but if the prec is smaller than
+ what is necessary to hold INTEGER_SHARE_LIMIT, then the
+ obvious test will not get the correct answer. */
+ if (prec < HOST_BITS_PER_WIDE_INT)
+ {
+ if (cst.to_uhwi () < (unsigned HOST_WIDE_INT) INTEGER_SHARE_LIMIT)
+ ix = cst.to_uhwi ();
+ }
+ else if (wi::ltu_p (cst, INTEGER_SHARE_LIMIT))
+ ix = cst.to_uhwi ();
}
else
{
/* Cache -1..N */
limit = INTEGER_SHARE_LIMIT + 1;
- if (!hi && low < (unsigned HOST_WIDE_INT)INTEGER_SHARE_LIMIT)
- ix = low + 1;
- else if (hi == -1 && low == -(unsigned HOST_WIDE_INT)1)
+
+ if (cst == -1)
ix = 0;
+ else if (!wi::neg_p (cst))
+ {
+ if (prec < HOST_BITS_PER_WIDE_INT)
+ {
+ if (cst.to_shwi () < INTEGER_SHARE_LIMIT)
+ ix = cst.to_shwi () + 1;
+ }
+ else if (wi::lts_p (cst, INTEGER_SHARE_LIMIT))
+ ix = cst.to_shwi () + 1;
+ }
}
break;
gcc_unreachable ();
}
- if (ix >= 0)
+ if (ext_len == 1)
{
- /* Look for it in the type's vector of small shared ints. */
- if (!TYPE_CACHED_VALUES_P (type))
+ /* We just need to store a single HOST_WIDE_INT. */
+ HOST_WIDE_INT hwi;
+ if (TYPE_UNSIGNED (type))
+ hwi = cst.to_uhwi ();
+ else
+ hwi = cst.to_shwi ();
+ if (ix >= 0)
{
- TYPE_CACHED_VALUES_P (type) = 1;
- TYPE_CACHED_VALUES (type) = make_tree_vec (limit);
- }
+ /* Look for it in the type's vector of small shared ints. */
+ if (!TYPE_CACHED_VALUES_P (type))
+ {
+ TYPE_CACHED_VALUES_P (type) = 1;
+ TYPE_CACHED_VALUES (type) = make_tree_vec (limit);
+ }
- t = TREE_VEC_ELT (TYPE_CACHED_VALUES (type), ix);
- if (t)
- {
- /* Make sure no one is clobbering the shared constant. */
- gcc_assert (TREE_TYPE (t) == type);
- gcc_assert (TREE_INT_CST_LOW (t) == low);
- gcc_assert (TREE_INT_CST_HIGH (t) == hi);
+ t = TREE_VEC_ELT (TYPE_CACHED_VALUES (type), ix);
+ if (t)
+ /* Make sure no one is clobbering the shared constant. */
+ gcc_assert (TREE_TYPE (t) == type
+ && TREE_INT_CST_NUNITS (t) == 1
+ && TREE_INT_CST_EXT_NUNITS (t) == 1
+ && TREE_INT_CST_ELT (t, 0) == hwi);
+ else
+ {
+ /* Create a new shared int. */
+ t = build_new_int_cst (type, cst);
+ TREE_VEC_ELT (TYPE_CACHED_VALUES (type), ix) = t;
+ }
}
else
{
- /* Create a new shared int. */
- t = make_node (INTEGER_CST);
+ /* Use the cache of larger shared ints, using int_cst_node as
+ a temporary. */
+ void **slot;
- TREE_INT_CST_LOW (t) = low;
- TREE_INT_CST_HIGH (t) = hi;
- TREE_TYPE (t) = type;
+ TREE_INT_CST_ELT (int_cst_node, 0) = hwi;
+ TREE_TYPE (int_cst_node) = type;
- TREE_VEC_ELT (TYPE_CACHED_VALUES (type), ix) = t;
+ slot = htab_find_slot (int_cst_hash_table, int_cst_node, INSERT);
+ t = (tree) *slot;
+ if (!t)
+ {
+ /* Insert this one into the hash table. */
+ t = int_cst_node;
+ *slot = t;
+ /* Make a new node for next time round. */
+ int_cst_node = make_int_cst (1, 1);
+ }
}
}
else
{
- /* Use the cache of larger shared ints. */
+ /* The value either hashes properly or we drop it on the floor
+ for the gc to take care of. There will not be enough of them
+ to worry about. */
void **slot;
- TREE_INT_CST_LOW (int_cst_node) = low;
- TREE_INT_CST_HIGH (int_cst_node) = hi;
- TREE_TYPE (int_cst_node) = type;
-
- slot = htab_find_slot (int_cst_hash_table, int_cst_node, INSERT);
+ tree nt = build_new_int_cst (type, cst);
+ slot = htab_find_slot (int_cst_hash_table, nt, INSERT);
t = (tree) *slot;
if (!t)
{
/* Insert this one into the hash table. */
- t = int_cst_node;
+ t = nt;
*slot = t;
- /* Make a new node for next time round. */
- int_cst_node = make_node (INTEGER_CST);
}
}
cache_integer_cst (tree t)
{
tree type = TREE_TYPE (t);
- HOST_WIDE_INT hi = TREE_INT_CST_HIGH (t);
- unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (t);
int ix = -1;
int limit = 0;
+ int prec = TYPE_PRECISION (type);
gcc_assert (!TREE_OVERFLOW (t));
switch (TREE_CODE (type))
{
case NULLPTR_TYPE:
- gcc_assert (hi == 0 && low == 0);
+ gcc_assert (integer_zerop (t));
/* Fallthru. */
case POINTER_TYPE:
case REFERENCE_TYPE:
/* Cache NULL pointer. */
- if (!hi && !low)
+ if (integer_zerop (t))
{
limit = 1;
ix = 0;
case BOOLEAN_TYPE:
/* Cache false or true. */
limit = 2;
- if (!hi && low < 2)
- ix = low;
+ if (wi::ltu_p (t, 2))
+ ix = TREE_INT_CST_ELT (t, 0);
break;
case INTEGER_TYPE:
{
/* Cache 0..N */
limit = INTEGER_SHARE_LIMIT;
- if (!hi && low < (unsigned HOST_WIDE_INT)INTEGER_SHARE_LIMIT)
- ix = low;
+
+ /* This is a little hokie, but if the prec is smaller than
+ what is necessary to hold INTEGER_SHARE_LIMIT, then the
+ obvious test will not get the correct answer. */
+ if (prec < HOST_BITS_PER_WIDE_INT)
+ {
+ if (tree_to_uhwi (t) < (unsigned HOST_WIDE_INT) INTEGER_SHARE_LIMIT)
+ ix = tree_to_uhwi (t);
+ }
+ else if (wi::ltu_p (t, INTEGER_SHARE_LIMIT))
+ ix = tree_to_uhwi (t);
}
else
{
/* Cache -1..N */
limit = INTEGER_SHARE_LIMIT + 1;
- if (!hi && low < (unsigned HOST_WIDE_INT)INTEGER_SHARE_LIMIT)
- ix = low + 1;
- else if (hi == -1 && low == -(unsigned HOST_WIDE_INT)1)
+
+ if (integer_minus_onep (t))
ix = 0;
+ else if (!wi::neg_p (t))
+ {
+ if (prec < HOST_BITS_PER_WIDE_INT)
+ {
+ if (tree_to_shwi (t) < INTEGER_SHARE_LIMIT)
+ ix = tree_to_shwi (t) + 1;
+ }
+ else if (wi::ltu_p (t, INTEGER_SHARE_LIMIT))
+ ix = tree_to_shwi (t) + 1;
+ }
}
break;
/* If there is already an entry for the number verify it's the
same. */
if (*slot)
- {
- gcc_assert (TREE_INT_CST_LOW ((tree)*slot) == low
- && TREE_INT_CST_HIGH ((tree)*slot) == hi);
- return;
- }
- /* Otherwise insert this one into the hash table. */
- *slot = t;
+ gcc_assert (wi::eq_p (tree (*slot), t));
+ else
+ /* Otherwise insert this one into the hash table. */
+ *slot = t;
}
}
tree
build_low_bits_mask (tree type, unsigned bits)
{
- double_int mask;
-
gcc_assert (bits <= TYPE_PRECISION (type));
- if (bits == TYPE_PRECISION (type)
- && !TYPE_UNSIGNED (type))
- /* Sign extended all-ones mask. */
- mask = double_int_minus_one;
- else
- mask = double_int::mask (bits);
-
- return build_int_cst_wide (type, mask.low, mask.high);
-}
-
-/* Checks that X is integer constant that can be expressed in (unsigned)
- HOST_WIDE_INT without loss of precision. */
-
-bool
-cst_and_fits_in_hwi (const_tree x)
-{
- if (TREE_CODE (x) != INTEGER_CST)
- return false;
-
- if (TYPE_PRECISION (TREE_TYPE (x)) > HOST_BITS_PER_WIDE_INT)
- return false;
-
- return (TREE_INT_CST_HIGH (x) == 0
- || TREE_INT_CST_HIGH (x) == -1);
+ return wide_int_to_tree (type, wi::mask (bits, false,
+ TYPE_PRECISION (type)));
}
/* Build a newly constructed TREE_VEC node of length LEN. */
memset (&d, 0, sizeof d);
real_from_integer (&d, type ? TYPE_MODE (type) : VOIDmode,
- TREE_INT_CST_LOW (i), TREE_INT_CST_HIGH (i),
- TYPE_UNSIGNED (TREE_TYPE (i)));
+ wide_int (i), TYPE_SIGN (TREE_TYPE (i)));
return d;
}
case FIXED_POINT_TYPE:
/* We can only generate 1 for accum types. */
gcc_assert (ALL_SCALAR_ACCUM_MODE_P (TYPE_MODE (type)));
- return build_fixed (type, FCONST1(TYPE_MODE (type)));
+ return build_fixed (type, FCONST1 (TYPE_MODE (type)));
case VECTOR_TYPE:
{
return t;
}
+/* Build a newly constructed INTEGER_CST node. LEN and EXT_LEN are the
+ values of TREE_INT_CST_NUNITS and TREE_INT_CST_EXT_NUNITS respectively.
+ The latter determines the length of the HOST_WIDE_INT vector. */
+
+tree
+make_int_cst_stat (int len, int ext_len MEM_STAT_DECL)
+{
+ tree t;
+ int length = (ext_len - 1) * sizeof (tree) + sizeof (struct tree_int_cst);
+
+ gcc_assert (len);
+ record_node_allocation_statistics (INTEGER_CST, length);
+
+ t = ggc_alloc_cleared_tree_node_stat (length PASS_MEM_STAT);
+
+ TREE_SET_CODE (t, INTEGER_CST);
+ TREE_INT_CST_NUNITS (t) = len;
+ TREE_INT_CST_EXT_NUNITS (t) = ext_len;
+
+ TREE_CONSTANT (t) = 1;
+
+ return t;
+}
+
/* Build a newly constructed TREE_VEC node of length LEN. */
tree
return t;
}
+
+/* Grow a TREE_VEC node to new length LEN. */
+
+tree
+grow_tree_vec_stat (tree v, int len MEM_STAT_DECL)
+{
+ gcc_assert (TREE_CODE (v) == TREE_VEC);
+
+ int oldlen = TREE_VEC_LENGTH (v);
+ gcc_assert (len > oldlen);
+
+ int oldlength = (oldlen - 1) * sizeof (tree) + sizeof (struct tree_vec);
+ int length = (len - 1) * sizeof (tree) + sizeof (struct tree_vec);
+
+ record_node_allocation_statistics (TREE_VEC, length - oldlength);
+
+ v = (tree) ggc_realloc_stat (v, length PASS_MEM_STAT);
+
+ TREE_VEC_LENGTH (v) = len;
+
+ return v;
+}
\f
/* Return 1 if EXPR is the integer constant zero or a complex constant
of zero. */
switch (TREE_CODE (expr))
{
case INTEGER_CST:
- return (TREE_INT_CST_LOW (expr) == 0
- && TREE_INT_CST_HIGH (expr) == 0);
+ return wi::eq_p (expr, 0);
case COMPLEX_CST:
return (integer_zerop (TREE_REALPART (expr))
&& integer_zerop (TREE_IMAGPART (expr)));
switch (TREE_CODE (expr))
{
case INTEGER_CST:
- return (TREE_INT_CST_LOW (expr) == 1
- && TREE_INT_CST_HIGH (expr) == 0);
+ return wi::eq_p (wi::to_widest (expr), 1);
case COMPLEX_CST:
return (integer_onep (TREE_REALPART (expr))
&& integer_zerop (TREE_IMAGPART (expr)));
int
integer_all_onesp (const_tree expr)
{
- int prec;
- int uns;
-
STRIP_NOPS (expr);
if (TREE_CODE (expr) == COMPLEX_CST
else if (TREE_CODE (expr) != INTEGER_CST)
return 0;
- uns = TYPE_UNSIGNED (TREE_TYPE (expr));
- if (TREE_INT_CST_LOW (expr) == ~(unsigned HOST_WIDE_INT) 0
- && TREE_INT_CST_HIGH (expr) == -1)
- return 1;
- if (!uns)
- return 0;
-
- prec = TYPE_PRECISION (TREE_TYPE (expr));
- if (prec >= HOST_BITS_PER_WIDE_INT)
- {
- HOST_WIDE_INT high_value;
- int shift_amount;
-
- shift_amount = prec - HOST_BITS_PER_WIDE_INT;
-
- /* Can not handle precisions greater than twice the host int size. */
- gcc_assert (shift_amount <= HOST_BITS_PER_WIDE_INT);
- if (shift_amount == HOST_BITS_PER_WIDE_INT)
- /* Shifting by the host word size is undefined according to the ANSI
- standard, so we must handle this as a special case. */
- high_value = -1;
- else
- high_value = ((HOST_WIDE_INT) 1 << shift_amount) - 1;
-
- return (TREE_INT_CST_LOW (expr) == ~(unsigned HOST_WIDE_INT) 0
- && TREE_INT_CST_HIGH (expr) == high_value);
- }
- else
- return TREE_INT_CST_LOW (expr) == ((unsigned HOST_WIDE_INT) 1 << prec) - 1;
+ return wi::max_value (TYPE_PRECISION (TREE_TYPE (expr)), UNSIGNED) == expr;
}
/* Return 1 if EXPR is the integer constant minus one. */
int
integer_pow2p (const_tree expr)
{
- int prec;
- unsigned HOST_WIDE_INT high, low;
-
STRIP_NOPS (expr);
if (TREE_CODE (expr) == COMPLEX_CST
if (TREE_CODE (expr) != INTEGER_CST)
return 0;
- prec = TYPE_PRECISION (TREE_TYPE (expr));
- high = TREE_INT_CST_HIGH (expr);
- low = TREE_INT_CST_LOW (expr);
-
- /* First clear all bits that are beyond the type's precision in case
- we've been sign extended. */
-
- if (prec == HOST_BITS_PER_DOUBLE_INT)
- ;
- else if (prec > HOST_BITS_PER_WIDE_INT)
- high &= ~(HOST_WIDE_INT_M1U << (prec - HOST_BITS_PER_WIDE_INT));
- else
- {
- high = 0;
- if (prec < HOST_BITS_PER_WIDE_INT)
- low &= ~(HOST_WIDE_INT_M1U << prec);
- }
-
- if (high == 0 && low == 0)
- return 0;
-
- return ((high == 0 && (low & (low - 1)) == 0)
- || (low == 0 && (high & (high - 1)) == 0));
+ return wi::popcount (expr) == 1;
}
/* Return 1 if EXPR is an integer constant other than zero or a
STRIP_NOPS (expr);
return ((TREE_CODE (expr) == INTEGER_CST
- && (TREE_INT_CST_LOW (expr) != 0
- || TREE_INT_CST_HIGH (expr) != 0))
+ && !wi::eq_p (expr, 0))
|| (TREE_CODE (expr) == COMPLEX_CST
&& (integer_nonzerop (TREE_REALPART (expr))
|| integer_nonzerop (TREE_IMAGPART (expr)))));
int
tree_log2 (const_tree expr)
{
- int prec;
- HOST_WIDE_INT high, low;
-
STRIP_NOPS (expr);
if (TREE_CODE (expr) == COMPLEX_CST)
return tree_log2 (TREE_REALPART (expr));
- prec = TYPE_PRECISION (TREE_TYPE (expr));
- high = TREE_INT_CST_HIGH (expr);
- low = TREE_INT_CST_LOW (expr);
-
- /* First clear all bits that are beyond the type's precision in case
- we've been sign extended. */
-
- if (prec == HOST_BITS_PER_DOUBLE_INT)
- ;
- else if (prec > HOST_BITS_PER_WIDE_INT)
- high &= ~(HOST_WIDE_INT_M1U << (prec - HOST_BITS_PER_WIDE_INT));
- else
- {
- high = 0;
- if (prec < HOST_BITS_PER_WIDE_INT)
- low &= ~(HOST_WIDE_INT_M1U << prec);
- }
-
- return (high != 0 ? HOST_BITS_PER_WIDE_INT + exact_log2 (high)
- : exact_log2 (low));
+ return wi::exact_log2 (expr);
}
/* Similar, but return the largest integer Y such that 2 ** Y is less
int
tree_floor_log2 (const_tree expr)
{
- int prec;
- HOST_WIDE_INT high, low;
-
STRIP_NOPS (expr);
if (TREE_CODE (expr) == COMPLEX_CST)
return tree_log2 (TREE_REALPART (expr));
- prec = TYPE_PRECISION (TREE_TYPE (expr));
- high = TREE_INT_CST_HIGH (expr);
- low = TREE_INT_CST_LOW (expr);
+ return wi::floor_log2 (expr);
+}
- /* First clear all bits that are beyond the type's precision in case
- we've been sign extended. Ignore if type's precision hasn't been set
- since what we are doing is setting it. */
+/* Return number of known trailing zero bits in EXPR, or, if the value of
+ EXPR is known to be zero, the precision of it's type. */
- if (prec == HOST_BITS_PER_DOUBLE_INT || prec == 0)
- ;
- else if (prec > HOST_BITS_PER_WIDE_INT)
- high &= ~(HOST_WIDE_INT_M1U << (prec - HOST_BITS_PER_WIDE_INT));
- else
+unsigned int
+tree_ctz (const_tree expr)
+{
+ if (!INTEGRAL_TYPE_P (TREE_TYPE (expr))
+ && !POINTER_TYPE_P (TREE_TYPE (expr)))
+ return 0;
+
+ unsigned int ret1, ret2, prec = TYPE_PRECISION (TREE_TYPE (expr));
+ switch (TREE_CODE (expr))
{
- high = 0;
- if (prec < HOST_BITS_PER_WIDE_INT)
- low &= ~(HOST_WIDE_INT_M1U << prec);
+ case INTEGER_CST:
+ ret1 = wi::ctz (expr);
+ return MIN (ret1, prec);
+ case SSA_NAME:
+ ret1 = wi::ctz (get_nonzero_bits (expr));
+ return MIN (ret1, prec);
+ case PLUS_EXPR:
+ case MINUS_EXPR:
+ case BIT_IOR_EXPR:
+ case BIT_XOR_EXPR:
+ case MIN_EXPR:
+ case MAX_EXPR:
+ ret1 = tree_ctz (TREE_OPERAND (expr, 0));
+ if (ret1 == 0)
+ return ret1;
+ ret2 = tree_ctz (TREE_OPERAND (expr, 1));
+ return MIN (ret1, ret2);
+ case POINTER_PLUS_EXPR:
+ ret1 = tree_ctz (TREE_OPERAND (expr, 0));
+ ret2 = tree_ctz (TREE_OPERAND (expr, 1));
+ /* Second operand is sizetype, which could be in theory
+ wider than pointer's precision. Make sure we never
+ return more than prec. */
+ ret2 = MIN (ret2, prec);
+ return MIN (ret1, ret2);
+ case BIT_AND_EXPR:
+ ret1 = tree_ctz (TREE_OPERAND (expr, 0));
+ ret2 = tree_ctz (TREE_OPERAND (expr, 1));
+ return MAX (ret1, ret2);
+ case MULT_EXPR:
+ ret1 = tree_ctz (TREE_OPERAND (expr, 0));
+ ret2 = tree_ctz (TREE_OPERAND (expr, 1));
+ return MIN (ret1 + ret2, prec);
+ case LSHIFT_EXPR:
+ ret1 = tree_ctz (TREE_OPERAND (expr, 0));
+ if (tree_fits_uhwi_p (TREE_OPERAND (expr, 1))
+ && (tree_to_uhwi (TREE_OPERAND (expr, 1)) < prec))
+ {
+ ret2 = tree_to_uhwi (TREE_OPERAND (expr, 1));
+ return MIN (ret1 + ret2, prec);
+ }
+ return ret1;
+ case RSHIFT_EXPR:
+ if (tree_fits_uhwi_p (TREE_OPERAND (expr, 1))
+ && (tree_to_uhwi (TREE_OPERAND (expr, 1)) < prec))
+ {
+ ret1 = tree_ctz (TREE_OPERAND (expr, 0));
+ ret2 = tree_to_uhwi (TREE_OPERAND (expr, 1));
+ if (ret1 > ret2)
+ return ret1 - ret2;
+ }
+ return 0;
+ case TRUNC_DIV_EXPR:
+ case CEIL_DIV_EXPR:
+ case FLOOR_DIV_EXPR:
+ case ROUND_DIV_EXPR:
+ case EXACT_DIV_EXPR:
+ if (TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST
+ && tree_int_cst_sgn (TREE_OPERAND (expr, 1)) == 1)
+ {
+ int l = tree_log2 (TREE_OPERAND (expr, 1));
+ if (l >= 0)
+ {
+ ret1 = tree_ctz (TREE_OPERAND (expr, 0));
+ ret2 = l;
+ if (ret1 > ret2)
+ return ret1 - ret2;
+ }
+ }
+ return 0;
+ CASE_CONVERT:
+ ret1 = tree_ctz (TREE_OPERAND (expr, 0));
+ if (ret1 && ret1 == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (expr, 0))))
+ ret1 = prec;
+ return MIN (ret1, prec);
+ case SAVE_EXPR:
+ return tree_ctz (TREE_OPERAND (expr, 0));
+ case COND_EXPR:
+ ret1 = tree_ctz (TREE_OPERAND (expr, 1));
+ if (ret1 == 0)
+ return 0;
+ ret2 = tree_ctz (TREE_OPERAND (expr, 2));
+ return MIN (ret1, ret2);
+ case COMPOUND_EXPR:
+ return tree_ctz (TREE_OPERAND (expr, 1));
+ case ADDR_EXPR:
+ ret1 = get_pointer_alignment (CONST_CAST_TREE (expr));
+ if (ret1 > BITS_PER_UNIT)
+ {
+ ret1 = ctz_hwi (ret1 / BITS_PER_UNIT);
+ return MIN (ret1, prec);
+ }
+ return 0;
+ default:
+ return 0;
}
-
- return (high != 0 ? HOST_BITS_PER_WIDE_INT + floor_log2 (high)
- : floor_log2 (low));
}
/* Return 1 if EXPR is the real constant zero. Trailing zeroes matter for
}
}
-/* Return 1 if EXPR is the real constant two. Trailing zeroes matter
- for decimal float constants, so don't return 1 for them. */
-
-int
-real_twop (const_tree expr)
-{
- STRIP_NOPS (expr);
-
- switch (TREE_CODE (expr))
- {
- case REAL_CST:
- return REAL_VALUES_EQUAL (TREE_REAL_CST (expr), dconst2)
- && !(DECIMAL_FLOAT_MODE_P (TYPE_MODE (TREE_TYPE (expr))));
- case COMPLEX_CST:
- return real_twop (TREE_REALPART (expr))
- && real_zerop (TREE_IMAGPART (expr));
- case VECTOR_CST:
- {
- unsigned i;
- for (i = 0; i < VECTOR_CST_NELTS (expr); ++i)
- if (!real_twop (VECTOR_CST_ELT (expr, i)))
- return false;
- return true;
- }
- default:
- return false;
- }
-}
-
/* Return 1 if EXPR is the real constant minus one. Trailing zeroes
matter for decimal float constants, so don't return 1 for them. */
return len;
}
-/* Returns the number of FIELD_DECLs in TYPE. */
-
-int
-fields_length (const_tree type)
-{
- tree t = TYPE_FIELDS (type);
- int count = 0;
-
- for (; t; t = DECL_CHAIN (t))
- if (TREE_CODE (t) == FIELD_DECL)
- ++count;
-
- return count;
-}
-
/* Returns the first FIELD_DECL in the TYPE_FIELDS of the RECORD_TYPE or
UNION_TYPE TYPE, or NULL_TREE if none. */
type = TYPE_MAIN_VARIANT (type);
t = TYPE_SIZE_UNIT (type);
- if (t == 0
- || TREE_CODE (t) != INTEGER_CST
- || TREE_INT_CST_HIGH (t) != 0
- /* If the result would appear negative, it's too big to represent. */
- || (HOST_WIDE_INT) TREE_INT_CST_LOW (t) < 0)
- return -1;
- return TREE_INT_CST_LOW (t);
+ if (t && cst_fits_uhwi_p (t))
+ return TREE_INT_CST_LOW (t);
+ else
+ return -1;
}
/* Return the maximum size of TYPE (in bytes) as a wide integer
{
size_tree = TYPE_ARRAY_MAX_SIZE (type);
- if (size_tree && host_integerp (size_tree, 1))
- size = tree_low_cst (size_tree, 1);
+ if (size_tree && tree_fits_uhwi_p (size_tree))
+ size = tree_to_uhwi (size_tree);
}
/* If we still haven't been able to get a size, see if the language
{
size_tree = lang_hooks.types.max_size (type);
- if (size_tree && host_integerp (size_tree, 1))
- size = tree_low_cst (size_tree, 1);
+ if (size_tree && tree_fits_uhwi_p (size_tree))
+ size = tree_to_uhwi (size_tree);
}
return size;
}
-
-/* Returns a tree for the size of EXP in bytes. */
-
-tree
-tree_expr_size (const_tree exp)
-{
- if (DECL_P (exp)
- && DECL_SIZE_UNIT (exp) != 0)
- return DECL_SIZE_UNIT (exp);
- else
- return size_in_bytes (TREE_TYPE (exp));
-}
\f
/* Return the bit position of FIELD, in bits from the start of the record.
This is a tree of type bitsizetype. */
HOST_WIDE_INT
int_bit_position (const_tree field)
{
- return tree_low_cst (bit_position (field), 0);
+ return tree_to_shwi (bit_position (field));
}
\f
/* Return the byte position of FIELD, in bytes from the start of the record.
HOST_WIDE_INT
int_byte_position (const_tree field)
{
- return tree_low_cst (byte_position (field), 0);
+ return tree_to_shwi (byte_position (field));
}
\f
/* Return the strictest alignment, in bits, that T is known to have. */
switch (TREE_CODE (type))
{
case VOID_TYPE:
+ case POINTER_BOUNDS_TYPE:
case COMPLEX_TYPE:
case ENUMERAL_TYPE:
case BOOLEAN_TYPE:
{
int i;
- new_tree = NULL_TREE;
-
- for (i = 1; i < TREE_OPERAND_LENGTH (exp); i++)
- {
- tree op = TREE_OPERAND (exp, i);
- tree new_op = SUBSTITUTE_PLACEHOLDER_IN_EXPR (op, obj);
- if (new_op != op)
- {
- if (!new_tree)
- new_tree = copy_node (exp);
- TREE_OPERAND (new_tree, i) = new_op;
- }
- }
-
- if (new_tree)
- {
- new_tree = fold (new_tree);
- if (TREE_CODE (new_tree) == CALL_EXPR)
- process_call_operands (new_tree);
- }
- else
- return exp;
- }
- break;
-
- default:
- gcc_unreachable ();
- }
-
- TREE_READONLY (new_tree) |= TREE_READONLY (exp);
-
- if (code == INDIRECT_REF || code == ARRAY_REF || code == ARRAY_RANGE_REF)
- TREE_THIS_NOTRAP (new_tree) |= TREE_THIS_NOTRAP (exp);
-
- return new_tree;
-}
-\f
-/* Stabilize a reference so that we can use it any number of times
- without causing its operands to be evaluated more than once.
- Returns the stabilized reference. This works by means of save_expr,
- so see the caveats in the comments about save_expr.
-
- Also allows conversion expressions whose operands are references.
- Any other kind of expression is returned unchanged. */
-
-tree
-stabilize_reference (tree ref)
-{
- tree result;
- enum tree_code code = TREE_CODE (ref);
-
- switch (code)
- {
- case VAR_DECL:
- case PARM_DECL:
- case RESULT_DECL:
- /* No action is needed in this case. */
- return ref;
-
- CASE_CONVERT:
- case FLOAT_EXPR:
- case FIX_TRUNC_EXPR:
- result = build_nt (code, stabilize_reference (TREE_OPERAND (ref, 0)));
- break;
-
- case INDIRECT_REF:
- result = build_nt (INDIRECT_REF,
- stabilize_reference_1 (TREE_OPERAND (ref, 0)));
- break;
-
- case COMPONENT_REF:
- result = build_nt (COMPONENT_REF,
- stabilize_reference (TREE_OPERAND (ref, 0)),
- TREE_OPERAND (ref, 1), NULL_TREE);
- break;
-
- case BIT_FIELD_REF:
- result = build_nt (BIT_FIELD_REF,
- stabilize_reference (TREE_OPERAND (ref, 0)),
- TREE_OPERAND (ref, 1), TREE_OPERAND (ref, 2));
- break;
-
- case ARRAY_REF:
- result = build_nt (ARRAY_REF,
- stabilize_reference (TREE_OPERAND (ref, 0)),
- stabilize_reference_1 (TREE_OPERAND (ref, 1)),
- TREE_OPERAND (ref, 2), TREE_OPERAND (ref, 3));
- break;
-
- case ARRAY_RANGE_REF:
- result = build_nt (ARRAY_RANGE_REF,
- stabilize_reference (TREE_OPERAND (ref, 0)),
- stabilize_reference_1 (TREE_OPERAND (ref, 1)),
- TREE_OPERAND (ref, 2), TREE_OPERAND (ref, 3));
- break;
+ new_tree = NULL_TREE;
- case COMPOUND_EXPR:
- /* We cannot wrap the first expression in a SAVE_EXPR, as then
- it wouldn't be ignored. This matters when dealing with
- volatiles. */
- return stabilize_reference_1 (ref);
+ for (i = 1; i < TREE_OPERAND_LENGTH (exp); i++)
+ {
+ tree op = TREE_OPERAND (exp, i);
+ tree new_op = SUBSTITUTE_PLACEHOLDER_IN_EXPR (op, obj);
+ if (new_op != op)
+ {
+ if (!new_tree)
+ new_tree = copy_node (exp);
+ TREE_OPERAND (new_tree, i) = new_op;
+ }
+ }
- /* If arg isn't a kind of lvalue we recognize, make no change.
- Caller should recognize the error for an invalid lvalue. */
- default:
- return ref;
+ if (new_tree)
+ {
+ new_tree = fold (new_tree);
+ if (TREE_CODE (new_tree) == CALL_EXPR)
+ process_call_operands (new_tree);
+ }
+ else
+ return exp;
+ }
+ break;
- case ERROR_MARK:
- return error_mark_node;
- }
+ default:
+ gcc_unreachable ();
+ }
- TREE_TYPE (result) = TREE_TYPE (ref);
- TREE_READONLY (result) = TREE_READONLY (ref);
- TREE_SIDE_EFFECTS (result) = TREE_SIDE_EFFECTS (ref);
- TREE_THIS_VOLATILE (result) = TREE_THIS_VOLATILE (ref);
+ TREE_READONLY (new_tree) |= TREE_READONLY (exp);
- return result;
+ if (code == INDIRECT_REF || code == ARRAY_REF || code == ARRAY_RANGE_REF)
+ TREE_THIS_NOTRAP (new_tree) |= TREE_THIS_NOTRAP (exp);
+
+ return new_tree;
}
+\f
/* Subroutine of stabilize_reference; this is called for subtrees of
references. Any expression with side-effects must be put in a SAVE_EXPR
operator should be allowed, and that cse should take care of coalescing
multiple utterances of the same expression should that prove fruitful. */
-tree
+static tree
stabilize_reference_1 (tree e)
{
tree result;
return result;
}
+
+/* Stabilize a reference so that we can use it any number of times
+ without causing its operands to be evaluated more than once.
+ Returns the stabilized reference. This works by means of save_expr,
+ so see the caveats in the comments about save_expr.
+
+ Also allows conversion expressions whose operands are references.
+ Any other kind of expression is returned unchanged. */
+
+tree
+stabilize_reference (tree ref)
+{
+ tree result;
+ enum tree_code code = TREE_CODE (ref);
+
+ switch (code)
+ {
+ case VAR_DECL:
+ case PARM_DECL:
+ case RESULT_DECL:
+ /* No action is needed in this case. */
+ return ref;
+
+ CASE_CONVERT:
+ case FLOAT_EXPR:
+ case FIX_TRUNC_EXPR:
+ result = build_nt (code, stabilize_reference (TREE_OPERAND (ref, 0)));
+ break;
+
+ case INDIRECT_REF:
+ result = build_nt (INDIRECT_REF,
+ stabilize_reference_1 (TREE_OPERAND (ref, 0)));
+ break;
+
+ case COMPONENT_REF:
+ result = build_nt (COMPONENT_REF,
+ stabilize_reference (TREE_OPERAND (ref, 0)),
+ TREE_OPERAND (ref, 1), NULL_TREE);
+ break;
+
+ case BIT_FIELD_REF:
+ result = build_nt (BIT_FIELD_REF,
+ stabilize_reference (TREE_OPERAND (ref, 0)),
+ TREE_OPERAND (ref, 1), TREE_OPERAND (ref, 2));
+ break;
+
+ case ARRAY_REF:
+ result = build_nt (ARRAY_REF,
+ stabilize_reference (TREE_OPERAND (ref, 0)),
+ stabilize_reference_1 (TREE_OPERAND (ref, 1)),
+ TREE_OPERAND (ref, 2), TREE_OPERAND (ref, 3));
+ break;
+
+ case ARRAY_RANGE_REF:
+ result = build_nt (ARRAY_RANGE_REF,
+ stabilize_reference (TREE_OPERAND (ref, 0)),
+ stabilize_reference_1 (TREE_OPERAND (ref, 1)),
+ TREE_OPERAND (ref, 2), TREE_OPERAND (ref, 3));
+ break;
+
+ case COMPOUND_EXPR:
+ /* We cannot wrap the first expression in a SAVE_EXPR, as then
+ it wouldn't be ignored. This matters when dealing with
+ volatiles. */
+ return stabilize_reference_1 (ref);
+
+ /* If arg isn't a kind of lvalue we recognize, make no change.
+ Caller should recognize the error for an invalid lvalue. */
+ default:
+ return ref;
+
+ case ERROR_MARK:
+ return error_mark_node;
+ }
+
+ TREE_TYPE (result) = TREE_TYPE (ref);
+ TREE_READONLY (result) = TREE_READONLY (ref);
+ TREE_SIDE_EFFECTS (result) = TREE_SIDE_EFFECTS (ref);
+ TREE_THIS_VOLATILE (result) = TREE_THIS_VOLATILE (ref);
+
+ return result;
+}
\f
/* Low-level constructors for expressions. */
read_only = 1;
side_effects = TREE_SIDE_EFFECTS (t);
- PROCESS_ARG(0);
- PROCESS_ARG(1);
+ PROCESS_ARG (0);
+ PROCESS_ARG (1);
TREE_READONLY (t) = read_only;
TREE_CONSTANT (t) = constant;
else
side_effects = TREE_SIDE_EFFECTS (t);
- PROCESS_ARG(0);
- PROCESS_ARG(1);
- PROCESS_ARG(2);
+ PROCESS_ARG (0);
+ PROCESS_ARG (1);
+ PROCESS_ARG (2);
if (code == COND_EXPR)
TREE_READONLY (t) = read_only;
side_effects = TREE_SIDE_EFFECTS (t);
- PROCESS_ARG(0);
- PROCESS_ARG(1);
- PROCESS_ARG(2);
- PROCESS_ARG(3);
+ PROCESS_ARG (0);
+ PROCESS_ARG (1);
+ PROCESS_ARG (2);
+ PROCESS_ARG (3);
TREE_SIDE_EFFECTS (t) = side_effects;
TREE_THIS_VOLATILE (t)
side_effects = TREE_SIDE_EFFECTS (t);
- PROCESS_ARG(0);
- PROCESS_ARG(1);
- PROCESS_ARG(2);
- PROCESS_ARG(3);
- PROCESS_ARG(4);
+ PROCESS_ARG (0);
+ PROCESS_ARG (1);
+ PROCESS_ARG (2);
+ PROCESS_ARG (3);
+ PROCESS_ARG (4);
TREE_SIDE_EFFECTS (t) = side_effects;
TREE_THIS_VOLATILE (t)
/* Return the constant offset of a MEM_REF or TARGET_MEM_REF tree T. */
-double_int
+offset_int
mem_ref_offset (const_tree t)
{
- tree toff = TREE_OPERAND (t, 1);
- return tree_to_double_int (toff).sext (TYPE_PRECISION (TREE_TYPE (toff)));
+ return offset_int::from (TREE_OPERAND (t, 1), SIGNED);
}
/* Return an invariant ADDR_EXPR of type TYPE taking the address of BASE
{
hashval_t hashcode = 0;
tree ntype;
+ int i;
+ tree t;
enum tree_code code = TREE_CODE (ttype);
/* Building a distinct copy of a tagged type is inappropriate; it
hashcode);
break;
case INTEGER_TYPE:
- hashcode = iterative_hash_object
- (TREE_INT_CST_LOW (TYPE_MAX_VALUE (ntype)), hashcode);
- hashcode = iterative_hash_object
- (TREE_INT_CST_HIGH (TYPE_MAX_VALUE (ntype)), hashcode);
+ t = TYPE_MAX_VALUE (ntype);
+ for (i = 0; i < TREE_INT_CST_NUNITS (t); i++)
+ hashcode = iterative_hash_object (TREE_INT_CST_ELT (t, i), hashcode);
break;
case REAL_TYPE:
case FIXED_POINT_TYPE:
return ttype;
}
+/* Check if "omp declare simd" attribute arguments, CLAUSES1 and CLAUSES2, are
+ the same. */
+
+static bool
+omp_declare_simd_clauses_equal (tree clauses1, tree clauses2)
+{
+ tree cl1, cl2;
+ for (cl1 = clauses1, cl2 = clauses2;
+ cl1 && cl2;
+ cl1 = OMP_CLAUSE_CHAIN (cl1), cl2 = OMP_CLAUSE_CHAIN (cl2))
+ {
+ if (OMP_CLAUSE_CODE (cl1) != OMP_CLAUSE_CODE (cl2))
+ return false;
+ if (OMP_CLAUSE_CODE (cl1) != OMP_CLAUSE_SIMDLEN)
+ {
+ if (simple_cst_equal (OMP_CLAUSE_DECL (cl1),
+ OMP_CLAUSE_DECL (cl2)) != 1)
+ return false;
+ }
+ switch (OMP_CLAUSE_CODE (cl1))
+ {
+ case OMP_CLAUSE_ALIGNED:
+ if (simple_cst_equal (OMP_CLAUSE_ALIGNED_ALIGNMENT (cl1),
+ OMP_CLAUSE_ALIGNED_ALIGNMENT (cl2)) != 1)
+ return false;
+ break;
+ case OMP_CLAUSE_LINEAR:
+ if (simple_cst_equal (OMP_CLAUSE_LINEAR_STEP (cl1),
+ OMP_CLAUSE_LINEAR_STEP (cl2)) != 1)
+ return false;
+ break;
+ case OMP_CLAUSE_SIMDLEN:
+ if (simple_cst_equal (OMP_CLAUSE_SIMDLEN_EXPR (cl1),
+ OMP_CLAUSE_SIMDLEN_EXPR (cl2)) != 1)
+ return false;
+ default:
+ break;
+ }
+ }
+ return true;
+}
+
+/* Compare two constructor-element-type constants. Return 1 if the lists
+ are known to be equal; otherwise return 0. */
+
+static bool
+simple_cst_list_equal (const_tree l1, const_tree l2)
+{
+ while (l1 != NULL_TREE && l2 != NULL_TREE)
+ {
+ if (simple_cst_equal (TREE_VALUE (l1), TREE_VALUE (l2)) != 1)
+ return false;
+
+ l1 = TREE_CHAIN (l1);
+ l2 = TREE_CHAIN (l2);
+ }
+
+ return l1 == l2;
+}
+
/* Compare two attributes for their value identity. Return true if the
attribute values are known to be equal; otherwise return false.
*/
return (simple_cst_list_equal (TREE_VALUE (attr1),
TREE_VALUE (attr2)) == 1);
+ if ((flag_openmp || flag_openmp_simd)
+ && TREE_VALUE (attr1) && TREE_VALUE (attr2)
+ && TREE_CODE (TREE_VALUE (attr1)) == OMP_CLAUSE
+ && TREE_CODE (TREE_VALUE (attr2)) == OMP_CLAUSE)
+ return omp_declare_simd_clauses_equal (TREE_VALUE (attr1),
+ TREE_VALUE (attr2));
+
return (simple_cst_equal (TREE_VALUE (attr1), TREE_VALUE (attr2)) == 1);
}
{
struct cgraph_node *node;
if (!(node = cgraph_get_node (decl))
- || (!node->symbol.definition && !node->clones))
+ || (!node->definition && !node->clones))
{
if (node)
cgraph_release_function_body (node);
DECL_VINDEX referring to itself into a vtable slot number as it
should. Happens with functions that are copied and then forgotten
about. Just clear it, it won't matter anymore. */
- if (DECL_VINDEX (decl) && !host_integerp (DECL_VINDEX (decl), 0))
+ if (DECL_VINDEX (decl) && !tree_fits_shwi_p (DECL_VINDEX (decl)))
DECL_VINDEX (decl) = NULL_TREE;
}
else if (TREE_CODE (decl) == VAR_DECL)
unsigned ix;
tree t;
- find_decls_types (n->symbol.decl, fld);
+ find_decls_types (n->decl, fld);
- if (!gimple_has_body_p (n->symbol.decl))
+ if (!gimple_has_body_p (n->decl))
return;
gcc_assert (current_function_decl == NULL_TREE && cfun == NULL);
- fn = DECL_STRUCT_FUNCTION (n->symbol.decl);
+ fn = DECL_STRUCT_FUNCTION (n->decl);
/* Traverse locals. */
FOR_EACH_LOCAL_DECL (fn, ix, t)
static void
find_decls_types_in_var (struct varpool_node *v, struct free_lang_data_d *fld)
{
- find_decls_types (v->symbol.decl, fld);
+ find_decls_types (v->decl, fld);
}
/* If T needs an assembler name, have one created for it. */
class pass_ipa_free_lang_data : public simple_ipa_opt_pass
{
public:
- pass_ipa_free_lang_data(gcc::context *ctxt)
- : simple_ipa_opt_pass(pass_data_ipa_free_lang_data, ctxt)
+ pass_ipa_free_lang_data (gcc::context *ctxt)
+ : simple_ipa_opt_pass (pass_data_ipa_free_lang_data, ctxt)
{}
/* opt_pass methods: */
TYPE_READONLY (type) = (type_quals & TYPE_QUAL_CONST) != 0;
TYPE_VOLATILE (type) = (type_quals & TYPE_QUAL_VOLATILE) != 0;
TYPE_RESTRICT (type) = (type_quals & TYPE_QUAL_RESTRICT) != 0;
+ TYPE_ATOMIC (type) = (type_quals & TYPE_QUAL_ATOMIC) != 0;
TYPE_ADDR_SPACE (type) = DECODE_QUAL_ADDR_SPACE (type_quals);
}
TYPE_ATTRIBUTES (base)));
}
+/* This function checks to see if TYPE matches the size one of the built-in
+ atomic types, and returns that core atomic type. */
+
+static tree
+find_atomic_core_type (tree type)
+{
+ tree base_atomic_type;
+
+ /* Only handle complete types. */
+ if (TYPE_SIZE (type) == NULL_TREE)
+ return NULL_TREE;
+
+ HOST_WIDE_INT type_size = tree_to_uhwi (TYPE_SIZE (type));
+ switch (type_size)
+ {
+ case 8:
+ base_atomic_type = atomicQI_type_node;
+ break;
+
+ case 16:
+ base_atomic_type = atomicHI_type_node;
+ break;
+
+ case 32:
+ base_atomic_type = atomicSI_type_node;
+ break;
+
+ case 64:
+ base_atomic_type = atomicDI_type_node;
+ break;
+
+ case 128:
+ base_atomic_type = atomicTI_type_node;
+ break;
+
+ default:
+ base_atomic_type = NULL_TREE;
+ }
+
+ return base_atomic_type;
+}
+
/* Return a version of the TYPE, qualified as indicated by the
TYPE_QUALS, if one exists. If no qualified version exists yet,
return NULL_TREE. */
t = build_variant_type_copy (type);
set_type_quals (t, type_quals);
+ if (((type_quals & TYPE_QUAL_ATOMIC) == TYPE_QUAL_ATOMIC))
+ {
+ /* See if this object can map to a basic atomic type. */
+ tree atomic_type = find_atomic_core_type (type);
+ if (atomic_type)
+ {
+ /* Ensure the alignment of this type is compatible with
+ the required alignment of the atomic type. */
+ if (TYPE_ALIGN (atomic_type) > TYPE_ALIGN (t))
+ TYPE_ALIGN (t) = TYPE_ALIGN (atomic_type);
+ }
+ }
+
if (TYPE_STRUCTURAL_EQUALITY_P (type))
/* Propagate structural equality. */
SET_TYPE_STRUCTURAL_EQUALITY (t);
case INTEGER_TYPE:
case REAL_TYPE:
case BOOLEAN_TYPE:
+ if (TYPE_PRECISION (a->type) != TYPE_PRECISION (b->type))
+ return false;
return ((TYPE_MAX_VALUE (a->type) == TYPE_MAX_VALUE (b->type)
|| tree_int_cst_equal (TYPE_MAX_VALUE (a->type),
TYPE_MAX_VALUE (b->type)))
/* This CONST_CAST is okay because lookup_attribute does not
modify its argument and the return value is assigned to a
const_tree. */
- for (attr = lookup_ident_attribute (get_attribute_name (t2), CONST_CAST_TREE(l1));
+ for (attr = lookup_ident_attribute (get_attribute_name (t2),
+ CONST_CAST_TREE (l1));
attr != NULL_TREE && !attribute_value_equal (t2, attr);
- attr = lookup_ident_attribute (get_attribute_name (t2), TREE_CHAIN (attr)))
+ attr = lookup_ident_attribute (get_attribute_name (t2),
+ TREE_CHAIN (attr)))
;
if (attr == NULL_TREE)
if (TREE_CODE (t1) == INTEGER_CST
&& TREE_CODE (t2) == INTEGER_CST
- && TREE_INT_CST_LOW (t1) == TREE_INT_CST_LOW (t2)
- && TREE_INT_CST_HIGH (t1) == TREE_INT_CST_HIGH (t2))
+ && wi::to_widest (t1) == wi::to_widest (t2))
return 1;
return 0;
int
tree_int_cst_lt (const_tree t1, const_tree t2)
{
- if (t1 == t2)
- return 0;
-
- if (TYPE_UNSIGNED (TREE_TYPE (t1)) != TYPE_UNSIGNED (TREE_TYPE (t2)))
- {
- int t1_sgn = tree_int_cst_sgn (t1);
- int t2_sgn = tree_int_cst_sgn (t2);
-
- if (t1_sgn < t2_sgn)
- return 1;
- else if (t1_sgn > t2_sgn)
- return 0;
- /* Otherwise, both are non-negative, so we compare them as
- unsigned just in case one of them would overflow a signed
- type. */
- }
- else if (!TYPE_UNSIGNED (TREE_TYPE (t1)))
- return INT_CST_LT (t1, t2);
-
- return INT_CST_LT_UNSIGNED (t1, t2);
+ return INT_CST_LT (t1, t2);
}
/* Returns -1 if T1 < T2, 0 if T1 == T2, and 1 if T1 > T2. */
int
tree_int_cst_compare (const_tree t1, const_tree t2)
{
- if (tree_int_cst_lt (t1, t2))
- return -1;
- else if (tree_int_cst_lt (t2, t1))
- return 1;
- else
- return 0;
+ return wi::cmps (wi::to_widest (t1), wi::to_widest (t2));
}
-/* Return 1 if T is an INTEGER_CST that can be manipulated efficiently on
- the host. If POS is zero, the value can be represented in a single
- HOST_WIDE_INT. If POS is nonzero, the value must be non-negative and can
- be represented in a single unsigned HOST_WIDE_INT. */
+/* Return true if T is an INTEGER_CST whose numerical value (extended
+ according to TYPE_UNSIGNED) fits in a signed HOST_WIDE_INT. */
-int
-host_integerp (const_tree t, int pos)
+bool
+tree_fits_shwi_p (const_tree t)
{
- if (t == NULL_TREE)
- return 0;
+ return (t != NULL_TREE
+ && TREE_CODE (t) == INTEGER_CST
+ && wi::fits_shwi_p (wi::to_widest (t)));
+}
- return (TREE_CODE (t) == INTEGER_CST
- && ((TREE_INT_CST_HIGH (t) == 0
- && (HOST_WIDE_INT) TREE_INT_CST_LOW (t) >= 0)
- || (! pos && TREE_INT_CST_HIGH (t) == -1
- && (HOST_WIDE_INT) TREE_INT_CST_LOW (t) < 0
- && !TYPE_UNSIGNED (TREE_TYPE (t)))
- || (pos && TREE_INT_CST_HIGH (t) == 0)));
+/* Return true if T is an INTEGER_CST whose numerical value (extended
+ according to TYPE_UNSIGNED) fits in an unsigned HOST_WIDE_INT. */
+
+bool
+tree_fits_uhwi_p (const_tree t)
+{
+ return (t != NULL_TREE
+ && TREE_CODE (t) == INTEGER_CST
+ && wi::fits_uhwi_p (wi::to_widest (t)));
}
-/* Return the HOST_WIDE_INT least significant bits of T if it is an
- INTEGER_CST and there is no overflow. POS is nonzero if the result must
- be non-negative. We must be able to satisfy the above conditions. */
+/* T is an INTEGER_CST whose numerical value (extended according to
+ TYPE_UNSIGNED) fits in a signed HOST_WIDE_INT. Return that
+ HOST_WIDE_INT. */
HOST_WIDE_INT
-tree_low_cst (const_tree t, int pos)
+tree_to_shwi (const_tree t)
{
- gcc_assert (host_integerp (t, pos));
- return TREE_INT_CST_LOW (t);
+ gcc_assert (tree_fits_shwi_p (t));
+ return TREE_INT_CST_ELT (t, 0);
}
-/* Return the HOST_WIDE_INT least significant bits of T, a sizetype
- kind INTEGER_CST. This makes sure to properly sign-extend the
- constant. */
+/* T is an INTEGER_CST whose numerical value (extended according to
+ TYPE_UNSIGNED) fits in an unsigned HOST_WIDE_INT. Return that
+ HOST_WIDE_INT. */
-HOST_WIDE_INT
-size_low_cst (const_tree t)
+unsigned HOST_WIDE_INT
+tree_to_uhwi (const_tree t)
{
- double_int d = tree_to_double_int (t);
- return d.sext (TYPE_PRECISION (TREE_TYPE (t))).low;
+ gcc_assert (tree_fits_uhwi_p (t));
+ return TREE_INT_CST_ELT (t, 0);
}
/* Return the most significant (sign) bit of T. */
tree_int_cst_sign_bit (const_tree t)
{
unsigned bitno = TYPE_PRECISION (TREE_TYPE (t)) - 1;
- unsigned HOST_WIDE_INT w;
-
- if (bitno < HOST_BITS_PER_WIDE_INT)
- w = TREE_INT_CST_LOW (t);
- else
- {
- w = TREE_INT_CST_HIGH (t);
- bitno -= HOST_BITS_PER_WIDE_INT;
- }
- return (w >> bitno) & 1;
+ return wi::extract_uhwi (t, bitno, 1);
}
/* Return an indication of the sign of the integer constant T.
int
tree_int_cst_sgn (const_tree t)
{
- if (TREE_INT_CST_LOW (t) == 0 && TREE_INT_CST_HIGH (t) == 0)
+ if (wi::eq_p (t, 0))
return 0;
else if (TYPE_UNSIGNED (TREE_TYPE (t)))
return 1;
- else if (TREE_INT_CST_HIGH (t) < 0)
+ else if (wi::neg_p (t))
return -1;
else
return 1;
signed or unsigned type, UNSIGNEDP says which. */
unsigned int
-tree_int_cst_min_precision (tree value, bool unsignedp)
+tree_int_cst_min_precision (tree value, signop sgn)
{
/* If the value is negative, compute its negative minus 1. The latter
adjustment is because the absolute value of the largest negative value
a bit-wise negation, so use that operation instead. */
if (tree_int_cst_sgn (value) < 0)
- value = fold_build1 (BIT_NOT_EXPR, TREE_TYPE (value), value);
-
- /* Return the number of bits needed, taking into account the fact
- that we need one more bit for a signed than unsigned type.
- If value is 0 or -1, the minimum precision is 1 no matter
- whether unsignedp is true or false. */
-
- if (integer_zerop (value))
- return 1;
- else
- return tree_floor_log2 (value) + 1 + !unsignedp;
-}
-
-/* Compare two constructor-element-type constants. Return 1 if the lists
- are known to be equal; otherwise return 0. */
-
-int
-simple_cst_list_equal (const_tree l1, const_tree l2)
-{
- while (l1 != NULL_TREE && l2 != NULL_TREE)
- {
- if (simple_cst_equal (TREE_VALUE (l1), TREE_VALUE (l2)) != 1)
- return 0;
+ value = fold_build1 (BIT_NOT_EXPR, TREE_TYPE (value), value);
- l1 = TREE_CHAIN (l1);
- l2 = TREE_CHAIN (l2);
- }
+ /* Return the number of bits needed, taking into account the fact
+ that we need one more bit for a signed than unsigned type.
+ If value is 0 or -1, the minimum precision is 1 no matter
+ whether unsignedp is true or false. */
- return l1 == l2;
+ if (integer_zerop (value))
+ return 1;
+ else
+ return tree_floor_log2 (value) + 1 + (sgn == SIGNED ? 1 : 0) ;
}
/* Return truthvalue of whether T1 is the same tree structure as T2.
switch (code1)
{
case INTEGER_CST:
- return (TREE_INT_CST_LOW (t1) == TREE_INT_CST_LOW (t2)
- && TREE_INT_CST_HIGH (t1) == TREE_INT_CST_HIGH (t2));
+ return wi::to_widest (t1) == wi::to_widest (t2);
case REAL_CST:
return REAL_VALUES_IDENTICAL (TREE_REAL_CST (t1), TREE_REAL_CST (t2));
{
if (tree_int_cst_sgn (t) < 0)
return -1;
- else if (TREE_INT_CST_HIGH (t) != 0)
+ else if (!cst_fits_uhwi_p (t))
return 1;
else if (TREE_INT_CST_LOW (t) == u)
return 0;
bool
valid_constant_size_p (const_tree size)
{
- if (! host_integerp (size, 1)
+ if (! tree_fits_uhwi_p (size)
|| TREE_OVERFLOW (size)
|| tree_int_cst_sign_bit (size) != 0)
return false;
/* Alas, constants aren't shared, so we can't rely on pointer
identity. */
case INTEGER_CST:
- val = iterative_hash_host_wide_int (TREE_INT_CST_LOW (t), val);
- return iterative_hash_host_wide_int (TREE_INT_CST_HIGH (t), val);
+ for (i = 0; i < TREE_INT_CST_NUNITS (t); i++)
+ val = iterative_hash_host_wide_int (TREE_INT_CST_ELT (t, i), val);
+ return val;
case REAL_CST:
{
unsigned int val2 = real_hash (TREE_REAL_CST_PTR (t));
}
}
-/* Generate a hash value for a pair of expressions. This can be used
- iteratively by passing a previous result as the VAL argument.
-
- The same hash value is always returned for a given pair of expressions,
- regardless of the order in which they are presented. This is useful in
- hashing the operands of commutative functions. */
-
-hashval_t
-iterative_hash_exprs_commutative (const_tree t1,
- const_tree t2, hashval_t val)
-{
- hashval_t one = iterative_hash_expr (t1, 0);
- hashval_t two = iterative_hash_expr (t2, 0);
- hashval_t t;
-
- if (one > two)
- t = one, one = two, two = t;
- val = iterative_hash_hashval_t (one, val);
- val = iterative_hash_hashval_t (two, val);
-
- return val;
-}
-\f
/* Constructors for pointer, array and function types.
(RECORD_TYPE, UNION_TYPE and ENUMERAL_TYPE nodes are
constructed by language-dependent code, not here.) */
return build_reference_type_for_mode (to_type, pointer_mode, false);
}
-/* Build a type that is compatible with t but has no cv quals anywhere
- in its type, thus
-
- const char *const *const * -> char ***. */
-
-tree
-build_type_no_quals (tree t)
-{
- switch (TREE_CODE (t))
- {
- case POINTER_TYPE:
- return build_pointer_type_for_mode (build_type_no_quals (TREE_TYPE (t)),
- TYPE_MODE (t),
- TYPE_REF_CAN_ALIAS_ALL (t));
- case REFERENCE_TYPE:
- return
- build_reference_type_for_mode (build_type_no_quals (TREE_TYPE (t)),
- TYPE_MODE (t),
- TYPE_REF_CAN_ALIAS_ALL (t));
- default:
- return TYPE_MAIN_VARIANT (t);
- }
-}
-
#define MAX_INT_CACHED_PREC \
(HOST_BITS_PER_WIDE_INT > 64 ? HOST_BITS_PER_WIDE_INT : 64)
static GTY(()) tree nonstandard_integer_type_cache[2 * MAX_INT_CACHED_PREC + 2];
fixup_signed_type (itype);
ret = itype;
- if (host_integerp (TYPE_MAX_VALUE (itype), 1))
- ret = type_hash_canon (tree_low_cst (TYPE_MAX_VALUE (itype), 1), itype);
+ if (tree_fits_uhwi_p (TYPE_MAX_VALUE (itype)))
+ ret = type_hash_canon (tree_to_uhwi (TYPE_MAX_VALUE (itype)), itype);
if (precision <= MAX_INT_CACHED_PREC)
nonstandard_integer_type_cache[precision + unsignedp] = ret;
true) or would not differ from ARGTYPES. */
static tree
-maybe_canonicalize_argtypes(tree argtypes,
- bool *any_structural_p,
- bool *any_noncanonical_p)
+maybe_canonicalize_argtypes (tree argtypes,
+ bool *any_structural_p,
+ bool *any_noncanonical_p)
{
tree arg;
bool any_noncanonical_argtypes_p = false;
return t;
}
-/* Build variant of function type ORIG_TYPE skipping ARGS_TO_SKIP and the
- return value if SKIP_RETURN is true. */
-
-static tree
-build_function_type_skip_args (tree orig_type, bitmap args_to_skip,
- bool skip_return)
-{
- tree new_type = NULL;
- tree args, new_args = NULL, t;
- tree new_reversed;
- int i = 0;
-
- for (args = TYPE_ARG_TYPES (orig_type); args && args != void_list_node;
- args = TREE_CHAIN (args), i++)
- if (!args_to_skip || !bitmap_bit_p (args_to_skip, i))
- new_args = tree_cons (NULL_TREE, TREE_VALUE (args), new_args);
-
- new_reversed = nreverse (new_args);
- if (args)
- {
- if (new_reversed)
- TREE_CHAIN (new_args) = void_list_node;
- else
- new_reversed = void_list_node;
- }
-
- /* Use copy_node to preserve as much as possible from original type
- (debug info, attribute lists etc.)
- Exception is METHOD_TYPEs must have THIS argument.
- When we are asked to remove it, we need to build new FUNCTION_TYPE
- instead. */
- if (TREE_CODE (orig_type) != METHOD_TYPE
- || !args_to_skip
- || !bitmap_bit_p (args_to_skip, 0))
- {
- new_type = build_distinct_type_copy (orig_type);
- TYPE_ARG_TYPES (new_type) = new_reversed;
- }
- else
- {
- new_type
- = build_distinct_type_copy (build_function_type (TREE_TYPE (orig_type),
- new_reversed));
- TYPE_CONTEXT (new_type) = TYPE_CONTEXT (orig_type);
- }
-
- if (skip_return)
- TREE_TYPE (new_type) = void_type_node;
-
- /* This is a new type, not a copy of an old type. Need to reassociate
- variants. We can handle everything except the main variant lazily. */
- t = TYPE_MAIN_VARIANT (orig_type);
- if (t != orig_type)
- {
- t = build_function_type_skip_args (t, args_to_skip, skip_return);
- TYPE_MAIN_VARIANT (new_type) = t;
- TYPE_NEXT_VARIANT (new_type) = TYPE_NEXT_VARIANT (t);
- TYPE_NEXT_VARIANT (t) = new_type;
- }
- else
- {
- TYPE_MAIN_VARIANT (new_type) = new_type;
- TYPE_NEXT_VARIANT (new_type) = NULL;
- }
-
- return new_type;
-}
-
-/* Build variant of function decl ORIG_DECL skipping ARGS_TO_SKIP and the
- return value if SKIP_RETURN is true.
-
- Arguments from DECL_ARGUMENTS list can't be removed now, since they are
- linked by TREE_CHAIN directly. The caller is responsible for eliminating
- them when they are being duplicated (i.e. copy_arguments_for_versioning). */
-
-tree
-build_function_decl_skip_args (tree orig_decl, bitmap args_to_skip,
- bool skip_return)
-{
- tree new_decl = copy_node (orig_decl);
- tree new_type;
-
- new_type = TREE_TYPE (orig_decl);
- if (prototype_p (new_type)
- || (skip_return && !VOID_TYPE_P (TREE_TYPE (new_type))))
- new_type
- = build_function_type_skip_args (new_type, args_to_skip, skip_return);
- TREE_TYPE (new_decl) = new_type;
-
- /* For declarations setting DECL_VINDEX (i.e. methods)
- we expect first argument to be THIS pointer. */
- if (args_to_skip && bitmap_bit_p (args_to_skip, 0))
- DECL_VINDEX (new_decl) = NULL_TREE;
-
- /* When signature changes, we need to clear builtin info. */
- if (DECL_BUILT_IN (new_decl)
- && args_to_skip
- && !bitmap_empty_p (args_to_skip))
- {
- DECL_BUILT_IN_CLASS (new_decl) = NOT_BUILT_IN;
- DECL_FUNCTION_CODE (new_decl) = (enum built_in_function) 0;
- }
- return new_decl;
-}
-
/* Build a function type. The RETURN_TYPE is the type returned by the
function. If VAARGS is set, no void_type_node is appended to the
the list. ARGP must be always be terminated be a NULL_TREE. */
&& TREE_CODE (TREE_TYPE (op)) != FIXED_POINT_TYPE
/* Ensure field is laid out already. */
&& DECL_SIZE (TREE_OPERAND (op, 1)) != 0
- && host_integerp (DECL_SIZE (TREE_OPERAND (op, 1)), 1))
+ && tree_fits_uhwi_p (DECL_SIZE (TREE_OPERAND (op, 1))))
{
unsigned HOST_WIDE_INT innerprec
- = tree_low_cst (DECL_SIZE (TREE_OPERAND (op, 1)), 1);
+ = tree_to_uhwi (DECL_SIZE (TREE_OPERAND (op, 1)));
int unsignedp = (DECL_UNSIGNED (TREE_OPERAND (op, 1))
|| TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (op, 1))));
tree type = lang_hooks.types.type_for_size (innerprec, unsignedp);
int_fits_type_p (const_tree c, const_tree type)
{
tree type_low_bound, type_high_bound;
- bool ok_for_low_bound, ok_for_high_bound, unsc;
- double_int dc, dd;
-
- dc = tree_to_double_int (c);
- unsc = TYPE_UNSIGNED (TREE_TYPE (c));
+ bool ok_for_low_bound, ok_for_high_bound;
+ signop sgn_c = TYPE_SIGN (TREE_TYPE (c));
retry:
type_low_bound = TYPE_MIN_VALUE (type);
/* If at least one bound of the type is a constant integer, we can check
ourselves and maybe make a decision. If no such decision is possible, but
this type is a subtype, try checking against that. Otherwise, use
- double_int_fits_to_tree_p, which checks against the precision.
+ fits_to_tree_p, which checks against the precision.
Compute the status for each possibly constant bound, and return if we see
one does not match. Use ok_for_xxx_bound for this purpose, assigning -1
/* Check if c >= type_low_bound. */
if (type_low_bound && TREE_CODE (type_low_bound) == INTEGER_CST)
{
- dd = tree_to_double_int (type_low_bound);
- if (unsc != TYPE_UNSIGNED (TREE_TYPE (type_low_bound)))
- {
- int c_neg = (!unsc && dc.is_negative ());
- int t_neg = (unsc && dd.is_negative ());
-
- if (c_neg && !t_neg)
- return false;
- if ((c_neg || !t_neg) && dc.ult (dd))
- return false;
- }
- else if (dc.cmp (dd, unsc) < 0)
+ if (INT_CST_LT (c, type_low_bound))
return false;
ok_for_low_bound = true;
}
/* Check if c <= type_high_bound. */
if (type_high_bound && TREE_CODE (type_high_bound) == INTEGER_CST)
{
- dd = tree_to_double_int (type_high_bound);
- if (unsc != TYPE_UNSIGNED (TREE_TYPE (type_high_bound)))
- {
- int c_neg = (!unsc && dc.is_negative ());
- int t_neg = (unsc && dd.is_negative ());
-
- if (t_neg && !c_neg)
- return false;
- if ((t_neg || !c_neg) && dc.ugt (dd))
- return false;
- }
- else if (dc.cmp (dd, unsc) > 0)
+ if (INT_CST_LT (type_high_bound, c))
return false;
ok_for_high_bound = true;
}
/* Perform some generic filtering which may allow making a decision
even if the bounds are not constant. First, negative integers
never fit in unsigned types, */
- if (TYPE_UNSIGNED (type) && !unsc && dc.is_negative ())
+ if (TYPE_UNSIGNED (type) && sgn_c == SIGNED && wi::neg_p (c))
return false;
/* Second, narrower types always fit in wider ones. */
return true;
/* Third, unsigned integers with top bit set never fit signed types. */
- if (! TYPE_UNSIGNED (type) && unsc)
+ if (!TYPE_UNSIGNED (type) && sgn_c == UNSIGNED)
{
- int prec = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (c))) - 1;
- if (prec < HOST_BITS_PER_WIDE_INT)
+ int prec = GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (c))) - 1;
+ if (prec < TYPE_PRECISION (TREE_TYPE (c)))
{
- if (((((unsigned HOST_WIDE_INT) 1) << prec) & dc.low) != 0)
+ /* When a tree_cst is converted to a wide-int, the precision
+ is taken from the type. However, if the precision of the
+ mode underneath the type is smaller than that, it is
+ possible that the value will not fit. The test below
+ fails if any bit is set between the sign bit of the
+ underlying mode and the top bit of the type. */
+ if (wi::ne_p (wi::zext (c, prec - 1), c))
return false;
- }
- else if (((((unsigned HOST_WIDE_INT) 1)
- << (prec - HOST_BITS_PER_WIDE_INT)) & dc.high) != 0)
+ }
+ else if (wi::neg_p (c))
return false;
}
goto retry;
}
- /* Or to double_int_fits_to_tree_p, if nothing else. */
- return double_int_fits_to_tree_p (type, dc);
+ /* Or to fits_to_tree_p, if nothing else. */
+ return wi::fits_to_tree_p (c, type);
}
/* Stores bounds of an integer TYPE in MIN and MAX. If TYPE has non-constant
{
if (!POINTER_TYPE_P (type) && TYPE_MIN_VALUE (type)
&& TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST)
- mpz_set_double_int (min, tree_to_double_int (TYPE_MIN_VALUE (type)),
- TYPE_UNSIGNED (type));
+ wi::to_mpz (TYPE_MIN_VALUE (type), min, TYPE_SIGN (type));
else
{
if (TYPE_UNSIGNED (type))
mpz_set_ui (min, 0);
else
{
- double_int mn;
- mn = double_int::mask (TYPE_PRECISION (type) - 1);
- mn = (mn + double_int_one).sext (TYPE_PRECISION (type));
- mpz_set_double_int (min, mn, false);
+ wide_int mn = wi::min_value (TYPE_PRECISION (type), SIGNED);
+ wi::to_mpz (mn, min, SIGNED);
}
}
if (!POINTER_TYPE_P (type) && TYPE_MAX_VALUE (type)
&& TREE_CODE (TYPE_MAX_VALUE (type)) == INTEGER_CST)
- mpz_set_double_int (max, tree_to_double_int (TYPE_MAX_VALUE (type)),
- TYPE_UNSIGNED (type));
+ wi::to_mpz (TYPE_MAX_VALUE (type), max, TYPE_SIGN (type));
else
{
- if (TYPE_UNSIGNED (type))
- mpz_set_double_int (max, double_int::mask (TYPE_PRECISION (type)),
- true);
- else
- mpz_set_double_int (max, double_int::mask (TYPE_PRECISION (type) - 1),
- true);
+ wide_int mn = wi::max_value (TYPE_PRECISION (type), TYPE_SIGN (type));
+ wi::to_mpz (mn, max, TYPE_SIGN (type));
}
}
fprintf (stderr, "Code Nodes\n");
fprintf (stderr, "----------------------------\n");
for (i = 0; i < (int) MAX_TREE_CODES; i++)
- fprintf (stderr, "%-20s %7d\n", tree_code_name[i], tree_code_counts[i]);
+ fprintf (stderr, "%-20s %7d\n", get_tree_code_name ((enum tree_code) i),
+ tree_code_counts[i]);
fprintf (stderr, "----------------------------\n");
ssanames_print_statistics ();
phinodes_print_statistics ();
va_list args;
const char *buffer;
unsigned length = 0;
- int code;
+ enum tree_code code;
va_start (args, function);
- while ((code = va_arg (args, int)))
- length += 4 + strlen (tree_code_name[code]);
+ while ((code = (enum tree_code) va_arg (args, int)))
+ length += 4 + strlen (get_tree_code_name (code));
va_end (args);
if (length)
{
length += strlen ("expected ");
buffer = tmp = (char *) alloca (length);
length = 0;
- while ((code = va_arg (args, int)))
+ while ((code = (enum tree_code) va_arg (args, int)))
{
const char *prefix = length ? " or " : "expected ";
strcpy (tmp + length, prefix);
length += strlen (prefix);
- strcpy (tmp + length, tree_code_name[code]);
- length += strlen (tree_code_name[code]);
+ strcpy (tmp + length, get_tree_code_name (code));
+ length += strlen (get_tree_code_name (code));
}
va_end (args);
}
buffer = "unexpected node";
internal_error ("tree check: %s, have %s in %s, at %s:%d",
- buffer, tree_code_name[TREE_CODE (node)],
+ buffer, get_tree_code_name (TREE_CODE (node)),
function, trim_filename (file), line);
}
va_list args;
char *buffer;
unsigned length = 0;
- int code;
+ enum tree_code code;
va_start (args, function);
- while ((code = va_arg (args, int)))
- length += 4 + strlen (tree_code_name[code]);
+ while ((code = (enum tree_code) va_arg (args, int)))
+ length += 4 + strlen (get_tree_code_name (code));
va_end (args);
va_start (args, function);
buffer = (char *) alloca (length);
length = 0;
- while ((code = va_arg (args, int)))
+ while ((code = (enum tree_code) va_arg (args, int)))
{
if (length)
{
strcpy (buffer + length, " or ");
length += 4;
}
- strcpy (buffer + length, tree_code_name[code]);
- length += strlen (tree_code_name[code]);
+ strcpy (buffer + length, get_tree_code_name (code));
+ length += strlen (get_tree_code_name (code));
}
va_end (args);
internal_error ("tree check: expected none of %s, have %s in %s, at %s:%d",
- buffer, tree_code_name[TREE_CODE (node)],
+ buffer, get_tree_code_name (TREE_CODE (node)),
function, trim_filename (file), line);
}
("tree check: expected class %qs, have %qs (%s) in %s, at %s:%d",
TREE_CODE_CLASS_STRING (cl),
TREE_CODE_CLASS_STRING (TREE_CODE_CLASS (TREE_CODE (node))),
- tree_code_name[TREE_CODE (node)], function, trim_filename (file), line);
+ get_tree_code_name (TREE_CODE (node)), function, trim_filename (file), line);
}
/* Similar to tree_check_failed, except that instead of specifying a
unsigned int c;
for (c = c1; c <= c2; ++c)
- length += 4 + strlen (tree_code_name[c]);
+ length += 4 + strlen (get_tree_code_name ((enum tree_code) c));
length += strlen ("expected ");
buffer = (char *) alloca (length);
strcpy (buffer + length, prefix);
length += strlen (prefix);
- strcpy (buffer + length, tree_code_name[c]);
- length += strlen (tree_code_name[c]);
+ strcpy (buffer + length, get_tree_code_name ((enum tree_code) c));
+ length += strlen (get_tree_code_name ((enum tree_code) c));
}
internal_error ("tree check: %s, have %s in %s, at %s:%d",
- buffer, tree_code_name[TREE_CODE (node)],
+ buffer, get_tree_code_name (TREE_CODE (node)),
function, trim_filename (file), line);
}
("tree check: did not expect class %qs, have %qs (%s) in %s, at %s:%d",
TREE_CODE_CLASS_STRING (cl),
TREE_CODE_CLASS_STRING (TREE_CODE_CLASS (TREE_CODE (node))),
- tree_code_name[TREE_CODE (node)], function, trim_filename (file), line);
+ get_tree_code_name (TREE_CODE (node)), function, trim_filename (file), line);
}
const char *function, enum omp_clause_code code)
{
internal_error ("tree check: expected omp_clause %s, have %s in %s, at %s:%d",
- omp_clause_code_name[code], tree_code_name[TREE_CODE (node)],
+ omp_clause_code_name[code], get_tree_code_name (TREE_CODE (node)),
function, trim_filename (file), line);
}
{
internal_error
("tree check: expected tree that contains %qs structure, have %qs in %s, at %s:%d",
- TS_ENUM_NAME(en),
- tree_code_name[TREE_CODE (node)], function, trim_filename (file), line);
+ TS_ENUM_NAME (en),
+ get_tree_code_name (TREE_CODE (node)), function, trim_filename (file), line);
}
+/* Similar to above, except that the check is for the bounds of a TREE_VEC's
+ (dynamically sized) vector. */
+
+void
+tree_int_cst_elt_check_failed (int idx, int len, const char *file, int line,
+ const char *function)
+{
+ internal_error
+ ("tree check: accessed elt %d of tree_int_cst with %d elts in %s, at %s:%d",
+ idx + 1, len, function, trim_filename (file), line);
+}
+
/* Similar to above, except that the check is for the bounds of a TREE_VEC's
(dynamically sized) vector. */
tree_operand_check_failed (int idx, const_tree exp, const char *file,
int line, const char *function)
{
- int code = TREE_CODE (exp);
+ enum tree_code code = TREE_CODE (exp);
internal_error
("tree check: accessed operand %d of %s with %d operands in %s, at %s:%d",
- idx + 1, tree_code_name[code], TREE_OPERAND_LENGTH (exp),
+ idx + 1, get_tree_code_name (code), TREE_OPERAND_LENGTH (exp),
function, trim_filename (file), line);
}
return make_accum_type (size, unsignedp, satp);
}
+
+/* Create an atomic variant node for TYPE. This routine is called
+ during initialization of data types to create the 5 basic atomic
+ types. The generic build_variant_type function requires these to
+ already be set up in order to function properly, so cannot be
+ called from there. */
+
+static tree
+build_atomic_base (tree type)
+{
+ tree t;
+
+ /* Make sure its not already registered. */
+ if ((t = get_qualified_type (type, TYPE_QUAL_ATOMIC)))
+ return t;
+
+ t = build_variant_type_copy (type);
+ set_type_quals (t, TYPE_QUAL_ATOMIC);
+
+ return t;
+}
+
/* Create nodes for all integer types (and error_mark_node) using the sizes
of C datatypes. SIGNED_CHAR specifies whether char is signed,
SHORT_DOUBLE specifies whether double should be of the same precision
#endif
/* Define a boolean type. This type only represents boolean values but
- may be larger than char depending on the value of BOOL_TYPE_SIZE.
- Front ends which want to override this size (i.e. Java) can redefine
- boolean_type_node before calling build_common_tree_nodes_2. */
+ may be larger than char depending on the value of BOOL_TYPE_SIZE. */
boolean_type_node = make_unsigned_type (BOOL_TYPE_SIZE);
TREE_SET_CODE (boolean_type_node, BOOLEAN_TYPE);
- TYPE_MAX_VALUE (boolean_type_node) = build_int_cst (boolean_type_node, 1);
TYPE_PRECISION (boolean_type_node) = 1;
+ TYPE_MAX_VALUE (boolean_type_node) = build_int_cst (boolean_type_node, 1);
/* Define what type to use for size_t. */
if (strcmp (SIZE_TYPE, "unsigned int") == 0)
unsigned_intDI_type_node = make_or_reuse_type (GET_MODE_BITSIZE (DImode), 1);
unsigned_intTI_type_node = make_or_reuse_type (GET_MODE_BITSIZE (TImode), 1);
+ /* Don't call build_qualified type for atomics. That routine does
+ special processing for atomics, and until they are initialized
+ it's better not to make that call. */
+
+ atomicQI_type_node = build_atomic_base (unsigned_intQI_type_node);
+ atomicHI_type_node = build_atomic_base (unsigned_intHI_type_node);
+ atomicSI_type_node = build_atomic_base (unsigned_intSI_type_node);
+ atomicDI_type_node = build_atomic_base (unsigned_intDI_type_node);
+ atomicTI_type_node = build_atomic_base (unsigned_intTI_type_node);
+
access_public_node = get_identifier ("public");
access_protected_node = get_identifier ("protected");
access_private_node = get_identifier ("private");
void_type_node = make_node (VOID_TYPE);
layout_type (void_type_node);
+ pointer_bounds_type_node = targetm.chkp_bound_type ();
+
/* We are not going to have real types in C with less than byte alignment,
so we might as well not have any types that claim to have it. */
TYPE_ALIGN (void_type_node) = BITS_PER_UNIT;
return ret;
}
-
-/* Returns true if it is possible to prove that the index of
- an array access REF (an ARRAY_REF expression) falls into the
- array bounds. */
-
-bool
-in_array_bounds_p (tree ref)
-{
- tree idx = TREE_OPERAND (ref, 1);
- tree min, max;
-
- if (TREE_CODE (idx) != INTEGER_CST)
- return false;
-
- min = array_ref_low_bound (ref);
- max = array_ref_up_bound (ref);
- if (!min
- || !max
- || TREE_CODE (min) != INTEGER_CST
- || TREE_CODE (max) != INTEGER_CST)
- return false;
-
- if (tree_int_cst_lt (idx, min)
- || tree_int_cst_lt (max, idx))
- return false;
-
- return true;
-}
-
-/* Returns true if it is possible to prove that the range of
- an array access REF (an ARRAY_RANGE_REF expression) falls
- into the array bounds. */
-
-bool
-range_in_array_bounds_p (tree ref)
-{
- tree domain_type = TYPE_DOMAIN (TREE_TYPE (ref));
- tree range_min, range_max, min, max;
-
- range_min = TYPE_MIN_VALUE (domain_type);
- range_max = TYPE_MAX_VALUE (domain_type);
- if (!range_min
- || !range_max
- || TREE_CODE (range_min) != INTEGER_CST
- || TREE_CODE (range_max) != INTEGER_CST)
- return false;
-
- min = array_ref_low_bound (ref);
- max = array_ref_up_bound (ref);
- if (!min
- || !max
- || TREE_CODE (min) != INTEGER_CST
- || TREE_CODE (max) != INTEGER_CST)
- return false;
-
- if (tree_int_cst_lt (range_min, min)
- || tree_int_cst_lt (max, range_max))
- return false;
-
- return true;
-}
-
/* Return true if T (assumed to be a DECL) must be assigned a memory
location. */
unsigned HOST_WIDE_INT val = TREE_INT_CST_LOW (x);
/* Make sure the sign-extended value will fit in a HOST_WIDE_INT. */
- gcc_assert (TREE_INT_CST_HIGH (x) == 0
- || TREE_INT_CST_HIGH (x) == -1);
+ gcc_assert (cst_fits_shwi_p (x));
if (bits < HOST_BITS_PER_WIDE_INT)
{
#if HOST_BITS_PER_WIDEST_INT > HOST_BITS_PER_WIDE_INT
gcc_assert (HOST_BITS_PER_WIDEST_INT >= HOST_BITS_PER_DOUBLE_INT);
- val |= (((unsigned HOST_WIDEST_INT) TREE_INT_CST_HIGH (x))
- << HOST_BITS_PER_WIDE_INT);
+ gcc_assert (TREE_INT_CST_NUNITS (x) == 2);
+
+ if (TREE_INT_CST_NUNITS (x) == 1)
+ val = HOST_WIDE_INT (val);
+ else
+ val |= (((unsigned HOST_WIDEST_INT) TREE_INT_CST_ELT (x, 1))
+ << HOST_BITS_PER_WIDE_INT);
#else
/* Make sure the sign-extended value will fit in a HOST_WIDE_INT. */
- gcc_assert (TREE_INT_CST_HIGH (x) == 0
- || TREE_INT_CST_HIGH (x) == -1);
+ gcc_assert (TREE_INT_CST_NUNITS (x) == 1);
#endif
if (bits < HOST_BITS_PER_WIDEST_INT)
tree
upper_bound_in_type (tree outer, tree inner)
{
- double_int high;
unsigned int det = 0;
unsigned oprec = TYPE_PRECISION (outer);
unsigned iprec = TYPE_PRECISION (inner);
gcc_unreachable ();
}
- /* Compute 2^^prec - 1. */
- if (prec <= HOST_BITS_PER_WIDE_INT)
- {
- high.high = 0;
- high.low = ((~(unsigned HOST_WIDE_INT) 0)
- >> (HOST_BITS_PER_WIDE_INT - prec));
- }
- else
- {
- high.high = ((~(unsigned HOST_WIDE_INT) 0)
- >> (HOST_BITS_PER_DOUBLE_INT - prec));
- high.low = ~(unsigned HOST_WIDE_INT) 0;
- }
-
- return double_int_to_tree (outer, high);
+ return wide_int_to_tree (outer,
+ wi::mask (prec, false, TYPE_PRECISION (outer)));
}
/* Returns the smallest value obtainable by casting something in INNER type to
tree
lower_bound_in_type (tree outer, tree inner)
{
- double_int low;
unsigned oprec = TYPE_PRECISION (outer);
unsigned iprec = TYPE_PRECISION (inner);
contains all values of INNER type. In particular, both INNER
and OUTER types have zero in common. */
|| (oprec > iprec && TYPE_UNSIGNED (inner)))
- low.low = low.high = 0;
+ return build_int_cst (outer, 0);
else
{
/* If we are widening a signed type to another signed type, we
precision or narrowing to a signed type, we want to obtain
-2^(oprec-1). */
unsigned prec = oprec > iprec ? iprec : oprec;
-
- if (prec <= HOST_BITS_PER_WIDE_INT)
- {
- low.high = ~(unsigned HOST_WIDE_INT) 0;
- low.low = (~(unsigned HOST_WIDE_INT) 0) << (prec - 1);
- }
- else
- {
- low.high = ((~(unsigned HOST_WIDE_INT) 0)
- << (prec - HOST_BITS_PER_WIDE_INT - 1));
- low.low = 0;
- }
+ return wide_int_to_tree (outer,
+ wi::mask (prec - 1, true,
+ TYPE_PRECISION (outer)));
}
-
- return double_int_to_tree (outer, low);
}
/* Return nonzero if two operands that are suitable for PHI nodes are
return operand_equal_p (arg0, arg1, 0);
}
-/* Returns number of zeros at the end of binary representation of X.
-
- ??? Use ffs if available? */
+/* Returns number of zeros at the end of binary representation of X. */
tree
num_ending_zeros (const_tree x)
{
- unsigned HOST_WIDE_INT fr, nfr;
- unsigned num, abits;
- tree type = TREE_TYPE (x);
-
- if (TREE_INT_CST_LOW (x) == 0)
- {
- num = HOST_BITS_PER_WIDE_INT;
- fr = TREE_INT_CST_HIGH (x);
- }
- else
- {
- num = 0;
- fr = TREE_INT_CST_LOW (x);
- }
-
- for (abits = HOST_BITS_PER_WIDE_INT / 2; abits; abits /= 2)
- {
- nfr = fr >> abits;
- if (nfr << abits == fr)
- {
- num += abits;
- fr = nfr;
- }
- }
-
- if (num > TYPE_PRECISION (type))
- num = TYPE_PRECISION (type);
-
- return build_int_cst_type (type, num);
+ return build_int_cst (TREE_TYPE (x), wi::ctz (x));
}
unsigned HOST_WIDE_INT idx;
constructor_elt *ce;
- for (idx = 0; vec_safe_iterate(CONSTRUCTOR_ELTS (*tp), idx, &ce); idx++)
+ for (idx = 0; vec_safe_iterate (CONSTRUCTOR_ELTS (*tp), idx, &ce);
+ idx++)
WALK_SUBTREE (ce->value);
}
break;
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_SCHEDULE:
case OMP_CLAUSE_UNIFORM:
+ case OMP_CLAUSE_DEPEND:
+ case OMP_CLAUSE_NUM_TEAMS:
+ case OMP_CLAUSE_THREAD_LIMIT:
+ case OMP_CLAUSE_DEVICE:
+ case OMP_CLAUSE_DIST_SCHEDULE:
case OMP_CLAUSE_SAFELEN:
+ case OMP_CLAUSE_SIMDLEN:
+ case OMP_CLAUSE__LOOPTEMP_:
case OMP_CLAUSE__SIMDUID_:
WALK_SUBTREE (OMP_CLAUSE_OPERAND (*tp, 0));
/* FALLTHRU */
case OMP_CLAUSE_DEFAULT:
case OMP_CLAUSE_UNTIED:
case OMP_CLAUSE_MERGEABLE:
+ case OMP_CLAUSE_PROC_BIND:
+ case OMP_CLAUSE_INBRANCH:
+ case OMP_CLAUSE_NOTINBRANCH:
+ case OMP_CLAUSE_FOR:
+ case OMP_CLAUSE_PARALLEL:
+ case OMP_CLAUSE_SECTIONS:
+ case OMP_CLAUSE_TASKGROUP:
WALK_SUBTREE_TAIL (OMP_CLAUSE_CHAIN (*tp));
case OMP_CLAUSE_LASTPRIVATE:
WALK_SUBTREE_TAIL (OMP_CLAUSE_CHAIN (*tp));
}
+ case OMP_CLAUSE_ALIGNED:
case OMP_CLAUSE_LINEAR:
+ case OMP_CLAUSE_FROM:
+ case OMP_CLAUSE_TO:
+ case OMP_CLAUSE_MAP:
WALK_SUBTREE (OMP_CLAUSE_DECL (*tp));
WALK_SUBTREE (OMP_CLAUSE_OPERAND (*tp, 1));
WALK_SUBTREE_TAIL (OMP_CLAUSE_CHAIN (*tp));
if (!fntype)
return false;
- FOREACH_FUNCTION_ARGS(fntype, t, args_iter)
+ FOREACH_FUNCTION_ARGS (fntype, t, args_iter)
{
n = t;
}
return (memcmp (xp, yp, len) == 0);
}
-/* Build an OPTIMIZATION_NODE based on the current options. */
+/* Build an OPTIMIZATION_NODE based on the options in OPTS. */
tree
-build_optimization_node (void)
+build_optimization_node (struct gcc_options *opts)
{
tree t;
void **slot;
/* Use the cache of optimization nodes. */
cl_optimization_save (TREE_OPTIMIZATION (cl_optimization_node),
- &global_options);
+ opts);
slot = htab_find_slot (cl_option_hash_table, cl_optimization_node, INSERT);
t = (tree) *slot;
return t;
}
-/* Build a TARGET_OPTION_NODE based on the current options. */
+/* Build a TARGET_OPTION_NODE based on the options in OPTS. */
tree
-build_target_option_node (void)
+build_target_option_node (struct gcc_options *opts)
{
tree t;
void **slot;
/* Use the cache of optimization nodes. */
cl_target_option_save (TREE_TARGET_OPTION (cl_target_option_node),
- &global_options);
+ opts);
slot = htab_find_slot (cl_option_hash_table, cl_target_option_node, INSERT);
t = (tree) *slot;
}
}
-/* Return true if T1 and T2 are equivalent lists. */
-
-bool
-list_equal_p (const_tree t1, const_tree t2)
-{
- for (; t1 && t2; t1 = TREE_CHAIN (t1) , t2 = TREE_CHAIN (t2))
- if (TREE_VALUE (t1) != TREE_VALUE (t2))
- return false;
- return !t1 && !t2;
-}
-
/* Return true iff conversion in EXP generates no instruction. Mark
it inline so that we fully inline into the stripping functions even
though we have two uses of this function. */
continue;
pos = int_bit_position (fld);
- size = tree_low_cst (DECL_SIZE (fld), 1);
+ size = tree_to_uhwi (DECL_SIZE (fld));
if (pos <= offset && (pos + size) > offset)
break;
}
return false;
}
+/* Try to determine whether a TRY_CATCH expression can fall through.
+ This is a subroutine of block_may_fallthru. */
+
+static bool
+try_catch_may_fallthru (const_tree stmt)
+{
+ tree_stmt_iterator i;
+
+ /* If the TRY block can fall through, the whole TRY_CATCH can
+ fall through. */
+ if (block_may_fallthru (TREE_OPERAND (stmt, 0)))
+ return true;
+
+ i = tsi_start (TREE_OPERAND (stmt, 1));
+ switch (TREE_CODE (tsi_stmt (i)))
+ {
+ case CATCH_EXPR:
+ /* We expect to see a sequence of CATCH_EXPR trees, each with a
+ catch expression and a body. The whole TRY_CATCH may fall
+ through iff any of the catch bodies falls through. */
+ for (; !tsi_end_p (i); tsi_next (&i))
+ {
+ if (block_may_fallthru (CATCH_BODY (tsi_stmt (i))))
+ return true;
+ }
+ return false;
+
+ case EH_FILTER_EXPR:
+ /* The exception filter expression only matters if there is an
+ exception. If the exception does not match EH_FILTER_TYPES,
+ we will execute EH_FILTER_FAILURE, and we will fall through
+ if that falls through. If the exception does match
+ EH_FILTER_TYPES, the stack unwinder will continue up the
+ stack, so we will not fall through. We don't know whether we
+ will throw an exception which matches EH_FILTER_TYPES or not,
+ so we just ignore EH_FILTER_TYPES and assume that we might
+ throw an exception which doesn't match. */
+ return block_may_fallthru (EH_FILTER_FAILURE (tsi_stmt (i)));
+
+ default:
+ /* This case represents statements to be executed when an
+ exception occurs. Those statements are implicitly followed
+ by a RESX statement to resume execution after the exception.
+ So in this case the TRY_CATCH never falls through. */
+ return false;
+ }
+}
+
+/* Try to determine if we can fall out of the bottom of BLOCK. This guess
+ need not be 100% accurate; simply be conservative and return true if we
+ don't know. This is used only to avoid stupidly generating extra code.
+ If we're wrong, we'll just delete the extra code later. */
+
+bool
+block_may_fallthru (const_tree block)
+{
+ /* This CONST_CAST is okay because expr_last returns its argument
+ unmodified and we assign it to a const_tree. */
+ const_tree stmt = expr_last (CONST_CAST_TREE (block));
+
+ switch (stmt ? TREE_CODE (stmt) : ERROR_MARK)
+ {
+ case GOTO_EXPR:
+ case RETURN_EXPR:
+ /* Easy cases. If the last statement of the block implies
+ control transfer, then we can't fall through. */
+ return false;
+
+ case SWITCH_EXPR:
+ /* If SWITCH_LABELS is set, this is lowered, and represents a
+ branch to a selected label and hence can not fall through.
+ Otherwise SWITCH_BODY is set, and the switch can fall
+ through. */
+ return SWITCH_LABELS (stmt) == NULL_TREE;
+
+ case COND_EXPR:
+ if (block_may_fallthru (COND_EXPR_THEN (stmt)))
+ return true;
+ return block_may_fallthru (COND_EXPR_ELSE (stmt));
+
+ case BIND_EXPR:
+ return block_may_fallthru (BIND_EXPR_BODY (stmt));
+
+ case TRY_CATCH_EXPR:
+ return try_catch_may_fallthru (stmt);
+
+ case TRY_FINALLY_EXPR:
+ /* The finally clause is always executed after the try clause,
+ so if it does not fall through, then the try-finally will not
+ fall through. Otherwise, if the try clause does not fall
+ through, then when the finally clause falls through it will
+ resume execution wherever the try clause was going. So the
+ whole try-finally will only fall through if both the try
+ clause and the finally clause fall through. */
+ return (block_may_fallthru (TREE_OPERAND (stmt, 0))
+ && block_may_fallthru (TREE_OPERAND (stmt, 1)));
+
+ case MODIFY_EXPR:
+ if (TREE_CODE (TREE_OPERAND (stmt, 1)) == CALL_EXPR)
+ stmt = TREE_OPERAND (stmt, 1);
+ else
+ return true;
+ /* FALLTHRU */
+
+ case CALL_EXPR:
+ /* Functions that do not return do not fall through. */
+ return (call_expr_flags (stmt) & ECF_NORETURN) == 0;
+
+ case CLEANUP_POINT_EXPR:
+ return block_may_fallthru (TREE_OPERAND (stmt, 0));
+
+ case TARGET_EXPR:
+ return block_may_fallthru (TREE_OPERAND (stmt, 1));
+
+ case ERROR_MARK:
+ return true;
+
+ default:
+ return lang_hooks.block_may_fallthru (stmt);
+ }
+}
+
+/* True if we are using EH to handle cleanups. */
+static bool using_eh_for_cleanups_flag = false;
+
+/* This routine is called from front ends to indicate eh should be used for
+ cleanups. */
+void
+using_eh_for_cleanups (void)
+{
+ using_eh_for_cleanups_flag = true;
+}
+
+/* Query whether EH is used for cleanups. */
+bool
+using_eh_for_cleanups_p (void)
+{
+ return using_eh_for_cleanups_flag;
+}
+
+/* Wrapper for tree_code_name to ensure that tree code is valid */
+const char *
+get_tree_code_name (enum tree_code code)
+{
+ const char *invalid = "<invalid tree code>";
+
+ if (code >= MAX_TREE_CODES)
+ return invalid;
+
+ return tree_code_name[code];
+}
+
+/* Drops the TREE_OVERFLOW flag from T. */
+
+tree
+drop_tree_overflow (tree t)
+{
+ gcc_checking_assert (TREE_OVERFLOW (t));
+
+ /* For tree codes with a sharing machinery re-build the result. */
+ if (TREE_CODE (t) == INTEGER_CST)
+ return wide_int_to_tree (TREE_TYPE (t), t);
+
+ /* Otherwise, as all tcc_constants are possibly shared, copy the node
+ and drop the flag. */
+ t = copy_node (t);
+ TREE_OVERFLOW (t) = 0;
+ return t;
+}
+
+/* Given a memory reference expression T, return its base address.
+ The base address of a memory reference expression is the main
+ object being referenced. For instance, the base address for
+ 'array[i].fld[j]' is 'array'. You can think of this as stripping
+ away the offset part from a memory address.
+
+ This function calls handled_component_p to strip away all the inner
+ parts of the memory reference until it reaches the base object. */
+
+tree
+get_base_address (tree t)
+{
+ while (handled_component_p (t))
+ t = TREE_OPERAND (t, 0);
+
+ if ((TREE_CODE (t) == MEM_REF
+ || TREE_CODE (t) == TARGET_MEM_REF)
+ && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR)
+ t = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
+
+ /* ??? Either the alias oracle or all callers need to properly deal
+ with WITH_SIZE_EXPRs before we can look through those. */
+ if (TREE_CODE (t) == WITH_SIZE_EXPR)
+ return NULL_TREE;
+
+ return t;
+}
+
#include "gt-tree.h"