m_base[1] = max;
m_num_ranges = 1;
m_kind = VR_RANGE;
- m_nonzero_mask = wi::shwi (-1, TYPE_PRECISION (TREE_TYPE (min)));
+ m_nonzero_mask = NULL;
normalize_kind ();
if (flag_checking)
}
m_kind = VR_RANGE;
- m_nonzero_mask = wi::shwi (-1, TYPE_PRECISION (TREE_TYPE (min)));
+ m_nonzero_mask = NULL;
normalize_kind ();
if (flag_checking)
m_base[0] = min;
m_base[1] = max;
m_num_ranges = 1;
- m_nonzero_mask = wi::shwi (-1, TYPE_PRECISION (TREE_TYPE (min)));
+ m_nonzero_mask = NULL;
return;
}
m_base[0] = min;
m_base[1] = max;
m_num_ranges = 1;
- m_nonzero_mask = wi::shwi (-1, TYPE_PRECISION (TREE_TYPE (min)));
+ m_nonzero_mask = NULL;
normalize_kind ();
if (flag_checking)
verify_range ();
}
if (m_kind == VR_VARYING)
{
- gcc_checking_assert (m_nonzero_mask == -1);
+ gcc_checking_assert (!m_nonzero_mask
+ || wi::to_wide (m_nonzero_mask) == -1);
gcc_checking_assert (m_num_ranges == 1);
gcc_checking_assert (varying_compatible_p ());
return;
gcc_checking_assert (TREE_CODE (cst) == INTEGER_CST);
// See if we can exclude CST based on the nonzero bits.
- if (m_nonzero_mask != -1)
+ if (m_nonzero_mask)
{
wide_int cstw = wi::to_wide (cst);
- if (cstw != 0 && wi::bit_and (m_nonzero_mask, cstw) == 0)
+ if (cstw != 0 && wi::bit_and (wi::to_wide (m_nonzero_mask), cstw) == 0)
return false;
}
signop sign = TYPE_SIGN (ttype);
wide_int type_min = wi::min_value (prec, sign);
wide_int type_max = wi::max_value (prec, sign);
- m_nonzero_mask = wi::shwi (-1, prec);
+ m_nonzero_mask = NULL;
if (m_num_ranges == m_max_ranges
&& lower_bound () != type_min
&& upper_bound () != type_max)
irange::set_range_from_nonzero_bits ()
{
gcc_checking_assert (!undefined_p ());
- unsigned popcount = wi::popcount (m_nonzero_mask);
+ if (!m_nonzero_mask)
+ return false;
+ unsigned popcount = wi::popcount (wi::to_wide (m_nonzero_mask));
// If we have only one bit set in the mask, we can figure out the
// range immediately.
if (popcount == 1)
{
// Make sure we don't pessimize the range.
- if (!contains_p (wide_int_to_tree (type (), m_nonzero_mask)))
+ if (!contains_p (m_nonzero_mask))
return false;
bool has_zero = contains_p (build_zero_cst (type ()));
- wide_int bits = m_nonzero_mask;
- set (type (), bits, bits);
- m_nonzero_mask = bits;
+ tree nz = m_nonzero_mask;
+ set (nz, nz);
+ m_nonzero_mask = nz;
if (has_zero)
{
int_range<2> zero;
gcc_checking_assert (!undefined_p ());
unsigned prec = TYPE_PRECISION (type ());
+ if (bits == -1)
+ {
+ m_nonzero_mask = NULL;
+ normalize_kind ();
+ if (flag_checking)
+ verify_range ();
+ return;
+ }
+
// Drop VARYINGs with a nonzero mask to a plain range.
if (m_kind == VR_VARYING && bits != -1)
m_kind = VR_RANGE;
- m_nonzero_mask = wide_int::from (bits, prec, TYPE_SIGN (type ()));
+ wide_int nz = wide_int::from (bits, prec, TYPE_SIGN (type ()));
+ m_nonzero_mask = wide_int_to_tree (type (), nz);
if (set_range_from_nonzero_bits ())
return;
// the mask precisely up to date at all times. Instead, we default
// to -1 and set it when explicitly requested. However, this
// function will always return the correct mask.
- return m_nonzero_mask & get_nonzero_bits_from_range ();
+ if (m_nonzero_mask)
+ return wi::to_wide (m_nonzero_mask) & get_nonzero_bits_from_range ();
+ else
+ return get_nonzero_bits_from_range ();
+}
+
+// Convert tree mask to wide_int. Returns -1 for NULL masks.
+
+inline wide_int
+mask_to_wi (tree mask, tree type)
+{
+ if (mask)
+ return wi::to_wide (mask);
+ else
+ return wi::shwi (-1, TYPE_PRECISION (type));
}
// Intersect the nonzero bits in R into THIS and normalize the range.
{
gcc_checking_assert (!undefined_p () && !r.undefined_p ());
+ if (!m_nonzero_mask && !r.m_nonzero_mask)
+ {
+ normalize_kind ();
+ if (flag_checking)
+ verify_range ();
+ return false;
+ }
+
bool changed = false;
- if (m_nonzero_mask != r.m_nonzero_mask)
+ tree t = type ();
+ if (mask_to_wi (m_nonzero_mask, t) != mask_to_wi (r.m_nonzero_mask, t))
{
- m_nonzero_mask = get_nonzero_bits () & r.get_nonzero_bits ();
+ wide_int nz = get_nonzero_bits () & r.get_nonzero_bits ();
+ m_nonzero_mask = wide_int_to_tree (t, nz);
if (set_range_from_nonzero_bits ())
return true;
changed = true;
{
gcc_checking_assert (!undefined_p () && !r.undefined_p ());
+ if (!m_nonzero_mask && !r.m_nonzero_mask)
+ {
+ normalize_kind ();
+ if (flag_checking)
+ verify_range ();
+ return false;
+ }
+
bool changed = false;
- if (m_nonzero_mask != r.m_nonzero_mask)
+ tree t = type ();
+ if (mask_to_wi (m_nonzero_mask, t) != mask_to_wi (r.m_nonzero_mask, t))
{
- m_nonzero_mask = get_nonzero_bits () | r.get_nonzero_bits ();
+ wide_int nz = get_nonzero_bits () | r.get_nonzero_bits ();
+ m_nonzero_mask = wide_int_to_tree (t, nz);
// No need to call set_range_from_nonzero_bits, because we'll
// never narrow the range. Besides, it would cause endless
// recursion because of the union_ in
bool intersect (const wide_int& lb, const wide_int& ub);
unsigned char m_num_ranges;
unsigned char m_max_ranges;
- wide_int m_nonzero_mask;
+ tree m_nonzero_mask;
tree *m_base;
};
if (INTEGRAL_TYPE_P (t))
return (wi::to_wide (l) == wi::min_value (prec, sign)
&& wi::to_wide (u) == wi::max_value (prec, sign)
- && m_nonzero_mask == -1);
+ && (!m_nonzero_mask || wi::to_wide (m_nonzero_mask) == -1));
if (POINTER_TYPE_P (t))
return (wi::to_wide (l) == 0
&& wi::to_wide (u) == wi::max_value (prec, sign)
- && m_nonzero_mask == -1);
+ && (!m_nonzero_mask || wi::to_wide (m_nonzero_mask) == -1));
return true;
}
gt_ggc_mx (x->m_base[i * 2]);
gt_ggc_mx (x->m_base[i * 2 + 1]);
}
+ if (x->m_nonzero_mask)
+ gt_ggc_mx (x->m_nonzero_mask);
}
inline void
gt_pch_nx (x->m_base[i * 2]);
gt_pch_nx (x->m_base[i * 2 + 1]);
}
+ if (x->m_nonzero_mask)
+ gt_pch_nx (x->m_nonzero_mask);
}
inline void
op (&x->m_base[i * 2], NULL, cookie);
op (&x->m_base[i * 2 + 1], NULL, cookie);
}
+ if (x->m_nonzero_mask)
+ op (&x->m_nonzero_mask, NULL, cookie);
}
template<unsigned N>
{
m_kind = VR_UNDEFINED;
m_num_ranges = 0;
+ m_nonzero_mask = NULL;
}
inline void
{
m_kind = VR_VARYING;
m_num_ranges = 1;
-
- if (type == error_mark_node)
- m_nonzero_mask = wi::shwi (-1, 1);
- else
- m_nonzero_mask = wi::shwi (-1, TYPE_PRECISION (type));
+ m_nonzero_mask = NULL;
if (INTEGRAL_TYPE_P (type))
{