maxval = convert (*restype_ptr, maxval);
}
- if (unsignedp && unsignedp0)
- {
- min_gt = INT_CST_LT_UNSIGNED (primop1, minval);
- max_gt = INT_CST_LT_UNSIGNED (primop1, maxval);
- min_lt = INT_CST_LT_UNSIGNED (minval, primop1);
- max_lt = INT_CST_LT_UNSIGNED (maxval, primop1);
- }
- else
- {
- min_gt = INT_CST_LT (primop1, minval);
- max_gt = INT_CST_LT (primop1, maxval);
- min_lt = INT_CST_LT (minval, primop1);
- max_lt = INT_CST_LT (maxval, primop1);
- }
+ min_gt = INT_CST_LT (primop1, minval);
+ max_gt = INT_CST_LT (primop1, maxval);
+ min_lt = INT_CST_LT (minval, primop1);
+ max_lt = INT_CST_LT (maxval, primop1);
val = 0;
/* This used to be a switch, but Genix compiler can't handle that. */
enum overflow_type *);
static tree interpret_fixed (const cpp_token *, unsigned int);
static enum integer_type_kind narrowest_unsigned_type
- (const wide_int &, unsigned int);
+ (const widest_int &, unsigned int);
static enum integer_type_kind narrowest_signed_type
- (const wide_int &, unsigned int);
+ (const widest_int &, unsigned int);
static enum cpp_ttype lex_string (const cpp_token *, tree *, bool, bool);
static tree lex_charconst (const cpp_token *);
static void update_header_times (const char *);
there isn't one. */
static enum integer_type_kind
-narrowest_unsigned_type (const wide_int &val, unsigned int flags)
+narrowest_unsigned_type (const widest_int &val, unsigned int flags)
{
int itk;
continue;
upper = TYPE_MAX_VALUE (integer_types[itk]);
- if (wi::geu_p (upper, val))
+ if (wi::geu_p (wi::to_widest (upper), val))
return (enum integer_type_kind) itk;
}
/* Ditto, but narrowest signed type. */
static enum integer_type_kind
-narrowest_signed_type (const wide_int &val, unsigned int flags)
+narrowest_signed_type (const widest_int &val, unsigned int flags)
{
int itk;
continue;
upper = TYPE_MAX_VALUE (integer_types[itk]);
- if (wi::geu_p (upper, val))
+ if (wi::geu_p (wi::to_widest (upper), val))
return (enum integer_type_kind) itk;
}
tree value, type;
enum integer_type_kind itk;
cpp_num integer;
- cpp_options *options = cpp_get_options (parse_in);
- HOST_WIDE_INT ival[2];
- wide_int wval;
+ HOST_WIDE_INT ival[3];
*overflow = OT_NONE;
integer = cpp_interpret_integer (parse_in, token, flags);
- integer = cpp_num_sign_extend (integer, options->precision);
if (integer.overflow)
*overflow = OT_OVERFLOW;
ival[0] = integer.low;
ival[1] = integer.high;
- wval = wide_int::from_array (ival, 2, HOST_BITS_PER_WIDE_INT * 2);
+ ival[2] = 0;
+ widest_int wval = widest_int::from_array (ival, 3);
/* The type of a constant with a U suffix is straightforward. */
if (flags & CPP_N_UNSIGNED)
else if (targetm.calls.promote_prototypes (type)
&& INTEGRAL_TYPE_P (type)
&& COMPLETE_TYPE_P (type)
- && INT_CST_LT_UNSIGNED (TYPE_SIZE (type),
- TYPE_SIZE (integer_type_node)))
+ && INT_CST_LT (TYPE_SIZE (type), TYPE_SIZE (integer_type_node)))
type = integer_type_node;
return type;
else if (targetm.calls.promote_prototypes (type)
&& INTEGRAL_TYPE_P (type)
&& COMPLETE_TYPE_P (type)
- && INT_CST_LT_UNSIGNED (TYPE_SIZE (type),
- TYPE_SIZE (integer_type_node)))
+ && INT_CST_LT (TYPE_SIZE (type), TYPE_SIZE (integer_type_node)))
val = cp_perform_integral_promotions (val, complain);
if ((complain & tf_warning)
&& warn_suggest_attribute_format)
continue;
offset = end_of_base (base_binfo);
- if (INT_CST_LT_UNSIGNED (result, offset))
+ if (INT_CST_LT (result, offset))
result = offset;
}
vec_safe_iterate (vbases, i, &base_binfo); i++)
{
offset = end_of_base (base_binfo);
- if (INT_CST_LT_UNSIGNED (result, offset))
+ if (INT_CST_LT (result, offset))
result = offset;
}
CLASSTYPE_AS_BASE (rli->t) != NULL_TREE);
rli_size = rli_size_unit_so_far (rli);
if (TREE_CODE (rli_size) == INTEGER_CST
- && INT_CST_LT_UNSIGNED (rli_size, eoc))
+ && INT_CST_LT (rli_size, eoc))
{
if (!abi_version_at_least (2))
/* In version 1 of the ABI, the size of a class that ends with
gcc_assert (TREE_CODE (size) == INTEGER_CST);
cookie_size = targetm.cxx.get_cookie_size (elt_type);
gcc_assert (TREE_CODE (cookie_size) == INTEGER_CST);
- gcc_checking_assert (wi::ltu_p (cookie_size, max_size));
+ gcc_checking_assert (wi::ltu_p (wi::to_offset (cookie_size), max_size));
/* Unconditionally subtract the cookie size. This decreases the
maximum object size and is safe even if we choose not to use
a cookie after all. */
{
if (code == EQ_EXPR)
result = tree_int_cst_equal (op0, op1);
- else if (TYPE_UNSIGNED (TREE_TYPE (op0)))
- result = INT_CST_LT_UNSIGNED (op0, op1);
else
result = INT_CST_LT (op0, op1);
}
be larger than size of array element. */
if (!TYPE_SIZE_UNIT (type)
|| TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST
- || wi::lts_p (elt_size, TYPE_SIZE_UNIT (type)))
+ || wi::lts_p (elt_size, wi::to_offset (TYPE_SIZE_UNIT (type))))
return NULL_TREE;
/* Compute the array index we look for. */
tree field_offset = byte_position (field_decl);
if (! page_size)
page_size = size_int (4096);
- check = ! INT_CST_LT_UNSIGNED (field_offset, page_size);
+ check = ! INT_CST_LT (field_offset, page_size);
}
if (base_type != TREE_TYPE (self_value))
Note that the maximum value loaded is iterations_max - 1. */
if (get_max_loop_iterations (loop, &iterations)
&& wi::leu_p (iterations,
- wi::set_bit_in_zero (GET_MODE_PRECISION (mode) - 1,
- GET_MODE_PRECISION (mode))))
+ wi::set_bit_in_zero <widest_int>
+ (GET_MODE_PRECISION (mode) - 1)))
nonneg = 1;
break;
sizetype_size = size_in_bytes (sizetype);
type_align = size_int (TYPE_ALIGN_UNIT (type));
- if (INT_CST_LT_UNSIGNED (type_align, sizetype_size))
+ if (INT_CST_LT (type_align, sizetype_size))
cookie_size = sizetype_size;
else
cookie_size = type_align;
|| !integer_zerop (niter.may_be_zero)
|| !niter.niter
|| TREE_CODE (niter.niter) != INTEGER_CST
- || !wi::ltu_p (loop->nb_iterations_upper_bound, niter.niter))
+ || !wi::ltu_p (loop->nb_iterations_upper_bound,
+ wi::to_widest (niter.niter)))
continue;
if (dump_file && (dump_flags & TDF_DETAILS))
|| loop->warned_aggressive_loop_optimizations
/* Only warn if undefined behavior gives us lower estimate than the
known constant bound. */
- || wi::cmpu (i_bound, loop->nb_iterations) >= 0
+ || wi::cmpu (i_bound, wi::to_widest (loop->nb_iterations)) >= 0
/* And undefined behavior happens unconditionally. */
|| !dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (stmt)))
return;
if (cmpc == EQ_EXPR)
result = tree_int_cst_equal (val, boundary);
else if (cmpc == LT_EXPR)
- result = INT_CST_LT_UNSIGNED (val, boundary);
+ result = INT_CST_LT (val, boundary);
else
{
gcc_assert (cmpc == LE_EXPR);
- result = (tree_int_cst_equal (val, boundary)
- || INT_CST_LT_UNSIGNED (val, boundary));
+ result = INT_CST_LE (val, boundary);
}
}
else
&& useless_type_conversion_p (TREE_TYPE (base),
TREE_TYPE (TREE_TYPE (decl)))
&& wi::fits_uhwi_p (mem_ref_offset (base))
- && wi::gtu_p (TYPE_SIZE_UNIT (TREE_TYPE (decl)),
+ && wi::gtu_p (wi::to_offset (TYPE_SIZE_UNIT (TREE_TYPE (decl))),
mem_ref_offset (base))
&& multiple_of_p (sizetype, TREE_OPERAND (base, 1),
TYPE_SIZE_UNIT (TREE_TYPE (base))))
{
/* LT is folded faster than GE and others. Inline the common case. */
if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
- {
- if (TYPE_UNSIGNED (TREE_TYPE (val)))
- return INT_CST_LT_UNSIGNED (val, val2);
- else
- {
- if (INT_CST_LT (val, val2))
- return 1;
- }
- }
+ return INT_CST_LT (val, val2);
else
{
tree tcmp;
int
tree_int_cst_lt (const_tree t1, const_tree t2)
{
- if (t1 == t2)
- return 0;
-
- if (TYPE_UNSIGNED (TREE_TYPE (t1)) != TYPE_UNSIGNED (TREE_TYPE (t2)))
- {
- int t1_sgn = tree_int_cst_sgn (t1);
- int t2_sgn = tree_int_cst_sgn (t2);
-
- if (t1_sgn < t2_sgn)
- return 1;
- else if (t1_sgn > t2_sgn)
- return 0;
- /* Otherwise, both are non-negative, so we compare them as
- unsigned just in case one of them would overflow a signed
- type. */
- }
- else if (!TYPE_UNSIGNED (TREE_TYPE (t1)))
- return INT_CST_LT (t1, t2);
-
- return INT_CST_LT_UNSIGNED (t1, t2);
+ return INT_CST_LT (t1, t2);
}
/* Returns -1 if T1 < T2, 0 if T1 == T2, and 1 if T1 > T2. */
int
tree_int_cst_compare (const_tree t1, const_tree t2)
{
- if (tree_int_cst_lt (t1, t2))
- return -1;
- else if (tree_int_cst_lt (t2, t1))
- return 1;
- else
- return 0;
+ return wi::cmps (wi::to_widest (t1), wi::to_widest (t2));
}
/* Return the HOST_WIDE_INT least significant bits of T, a sizetype
/* Check if c >= type_low_bound. */
if (type_low_bound && TREE_CODE (type_low_bound) == INTEGER_CST)
{
- wd = type_low_bound;
- if (sgn_c != TYPE_SIGN (TREE_TYPE (type_low_bound)))
- {
- int c_neg = (sgn_c == SIGNED && wi::neg_p (wc));
- int t_neg = (sgn_c == UNSIGNED && wi::neg_p (wd));
-
- if (c_neg && !t_neg)
- return false;
- if ((c_neg || !t_neg) && wi::ltu_p (wc, wd))
- return false;
- }
- else if (wi::lt_p (wc, wd, sgn_c))
+ if (INT_CST_LT (c, type_low_bound))
return false;
ok_for_low_bound = true;
}
/* Check if c <= type_high_bound. */
if (type_high_bound && TREE_CODE (type_high_bound) == INTEGER_CST)
{
- wd = type_high_bound;
- if (sgn_c != TYPE_SIGN (TREE_TYPE (type_high_bound)))
- {
- int c_neg = (sgn_c == SIGNED && wi::neg_p (wc));
- int t_neg = (sgn_c == UNSIGNED && wi::neg_p (wd));
-
- if (t_neg && !c_neg)
- return false;
- if ((t_neg || !c_neg) && wi::gtu_p (wc, wd))
- return false;
- }
- else if (wi::gt_p (wc, wd, sgn_c))
+ if (INT_CST_LT (type_high_bound, c))
return false;
ok_for_high_bound = true;
}
\f
/* Define additional fields and accessors for nodes representing constants. */
-#define INT_CST_LT(A, B) \
- (wi::lts_p (A, B))
-
-#define INT_CST_LT_UNSIGNED(A, B) \
- (wi::ltu_p (A, B))
-
#define TREE_INT_CST_NUNITS(NODE) \
(INTEGER_CST_CHECK (NODE)->base.u.int_length.unextended)
#define TREE_INT_CST_EXT_NUNITS(NODE) \
(INTEGER_CST_CHECK (NODE)->base.u.int_length.extended)
#define TREE_INT_CST_ELT(NODE, I) TREE_INT_CST_ELT_CHECK (NODE, I)
+#define INT_CST_LT(A, B) (wi::lts_p (wi::to_widest (A), wi::to_widest (B)))
+#define INT_CST_LE(A, B) (wi::les_p (wi::to_widest (A), wi::to_widest (B)))
+
#define TREE_REAL_CST_PTR(NODE) (REAL_CST_CHECK (NODE)->real_cst.real_cst_ptr)
#define TREE_REAL_CST(NODE) (*TREE_REAL_CST_PTR (NODE))
/* Return true if OP0 < OP1 using signed comparisons. */
bool
wi::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
- unsigned int p0,
- const HOST_WIDE_INT *op1, unsigned int op1len,
- unsigned int p1)
+ unsigned int precision,
+ const HOST_WIDE_INT *op1, unsigned int op1len)
{
HOST_WIDE_INT s0, s1;
unsigned HOST_WIDE_INT u0, u1;
- unsigned int blocks_needed0 = BLOCKS_NEEDED (p0);
- unsigned int blocks_needed1 = BLOCKS_NEEDED (p1);
- unsigned int small_prec0 = p0 & (HOST_BITS_PER_WIDE_INT - 1);
- unsigned int small_prec1 = p1 & (HOST_BITS_PER_WIDE_INT - 1);
+ unsigned int blocks_needed = BLOCKS_NEEDED (precision);
+ unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
int l = MAX (op0len - 1, op1len - 1);
/* Only the top block is compared as signed. The rest are unsigned
comparisons. */
- s0 = selt (op0, op0len, blocks_needed0, small_prec0, l, SIGNED);
- s1 = selt (op1, op1len, blocks_needed1, small_prec1, l, SIGNED);
+ s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
+ s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
if (s0 < s1)
return true;
if (s0 > s1)
l--;
while (l >= 0)
{
- u0 = selt (op0, op0len, blocks_needed0, small_prec0, l, SIGNED);
- u1 = selt (op1, op1len, blocks_needed1, small_prec1, l, SIGNED);
+ u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
+ u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
if (u0 < u1)
return true;
signed compares. */
int
wi::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len,
- unsigned int p0,
- const HOST_WIDE_INT *op1, unsigned int op1len,
- unsigned int p1)
+ unsigned int precision,
+ const HOST_WIDE_INT *op1, unsigned int op1len)
{
HOST_WIDE_INT s0, s1;
unsigned HOST_WIDE_INT u0, u1;
- unsigned int blocks_needed0 = BLOCKS_NEEDED (p0);
- unsigned int blocks_needed1 = BLOCKS_NEEDED (p1);
- unsigned int small_prec0 = p0 & (HOST_BITS_PER_WIDE_INT - 1);
- unsigned int small_prec1 = p1 & (HOST_BITS_PER_WIDE_INT - 1);
+ unsigned int blocks_needed = BLOCKS_NEEDED (precision);
+ unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
int l = MAX (op0len - 1, op1len - 1);
/* Only the top block is compared as signed. The rest are unsigned
comparisons. */
- s0 = selt (op0, op0len, blocks_needed0, small_prec0, l, SIGNED);
- s1 = selt (op1, op1len, blocks_needed1, small_prec1, l, SIGNED);
+ s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
+ s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
if (s0 < s1)
return -1;
if (s0 > s1)
l--;
while (l >= 0)
{
- u0 = selt (op0, op0len, blocks_needed0, small_prec0, l, SIGNED);
- u1 = selt (op1, op1len, blocks_needed1, small_prec1, l, SIGNED);
+ u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
+ u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
if (u0 < u1)
return -1;
/* Return true if OP0 < OP1 using unsigned comparisons. */
bool
-wi::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len, unsigned int p0,
- const HOST_WIDE_INT *op1, unsigned int op1len, unsigned int p1)
+wi::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
+ unsigned int precision,
+ const HOST_WIDE_INT *op1, unsigned int op1len)
{
unsigned HOST_WIDE_INT x0;
unsigned HOST_WIDE_INT x1;
- unsigned int blocks_needed0 = BLOCKS_NEEDED (p0);
- unsigned int blocks_needed1 = BLOCKS_NEEDED (p1);
- unsigned int small_prec0 = p0 & (HOST_BITS_PER_WIDE_INT - 1);
- unsigned int small_prec1 = p1 & (HOST_BITS_PER_WIDE_INT - 1);
+ unsigned int blocks_needed = BLOCKS_NEEDED (precision);
+ unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
int l = MAX (op0len - 1, op1len - 1);
while (l >= 0)
{
- x0 = selt (op0, op0len, blocks_needed0, small_prec0, l, UNSIGNED);
- x1 = selt (op1, op1len, blocks_needed1, small_prec1, l, UNSIGNED);
+ x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
+ x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
if (x0 < x1)
return true;
if (x0 > x1)
/* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
unsigned compares. */
int
-wi::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len, unsigned int p0,
- const HOST_WIDE_INT *op1, unsigned int op1len, unsigned int p1)
+wi::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len,
+ unsigned int precision,
+ const HOST_WIDE_INT *op1, unsigned int op1len)
{
unsigned HOST_WIDE_INT x0;
unsigned HOST_WIDE_INT x1;
- unsigned int blocks_needed0 = BLOCKS_NEEDED (p0);
- unsigned int blocks_needed1 = BLOCKS_NEEDED (p1);
- unsigned int small_prec0 = p0 & (HOST_BITS_PER_WIDE_INT - 1);
- unsigned int small_prec1 = p1 & (HOST_BITS_PER_WIDE_INT - 1);
+ unsigned int blocks_needed = BLOCKS_NEEDED (precision);
+ unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
int l = MAX (op0len - 1, op1len - 1);
while (l >= 0)
{
- x0 = selt (op0, op0len, blocks_needed0, small_prec0, l, UNSIGNED);
- x1 = selt (op1, op1len, blocks_needed1, small_prec1, l, UNSIGNED);
+ x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
+ x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
if (x0 < x1)
return -1;
if (x0 > x1)
bool eq_p_large (const HOST_WIDE_INT *, unsigned int,
const HOST_WIDE_INT *, unsigned int, unsigned int);
bool lts_p_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
- const HOST_WIDE_INT *, unsigned int, unsigned int);
+ const HOST_WIDE_INT *, unsigned int);
bool ltu_p_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
- const HOST_WIDE_INT *, unsigned int, unsigned int);
+ const HOST_WIDE_INT *, unsigned int);
int cmps_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
- const HOST_WIDE_INT *, unsigned int, unsigned int);
+ const HOST_WIDE_INT *, unsigned int);
int cmpu_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
- const HOST_WIDE_INT *, unsigned int, unsigned int);
+ const HOST_WIDE_INT *, unsigned int);
unsigned int sext_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
unsigned int,
unsigned int, unsigned int);
inline bool
wi::lts_p (const T1 &x, const T2 &y)
{
- WIDE_INT_REF_FOR (T1) xi (x);
- WIDE_INT_REF_FOR (T2) yi (y);
+ unsigned int precision = get_binary_precision (x, y);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
// We optimize x < y, where y is 64 or fewer bits.
- if (yi.precision <= HOST_BITS_PER_WIDE_INT)
+ if (wi::fits_shwi_p (yi))
{
// If x fits directly into a shwi, we can compare directly.
if (wi::fits_shwi_p (xi))
// and hence greater than y.
return false;
}
- return lts_p_large (xi.val, xi.len, xi.precision, yi.val, yi.len,
- yi.precision);
+ return lts_p_large (xi.val, xi.len, precision, yi.val, yi.len);
}
/* Return true if X < Y when both are treated as unsigned values. */
inline bool
wi::ltu_p (const T1 &x, const T2 &y)
{
- WIDE_INT_REF_FOR (T1) xi (x);
- WIDE_INT_REF_FOR (T2) yi (y);
- if (xi.precision <= HOST_BITS_PER_WIDE_INT
- && yi.precision <= HOST_BITS_PER_WIDE_INT)
+ unsigned int precision = get_binary_precision (x, y);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (precision <= HOST_BITS_PER_WIDE_INT)
{
unsigned HOST_WIDE_INT xl = xi.to_uhwi ();
unsigned HOST_WIDE_INT yl = yi.to_uhwi ();
return xl < yl;
}
- else
- return ltu_p_large (xi.val, xi.len, xi.precision,
- yi.val, yi.len, yi.precision);
+ return ltu_p_large (xi.val, xi.len, precision, yi.val, yi.len);
}
/* Return true if X < Y. Signedness of X and Y is indicated by SGN. */
inline int
wi::cmps (const T1 &x, const T2 &y)
{
- WIDE_INT_REF_FOR (T1) xi (x);
- WIDE_INT_REF_FOR (T2) yi (y);
- if (xi.precision <= HOST_BITS_PER_WIDE_INT
- && yi.precision <= HOST_BITS_PER_WIDE_INT)
+ unsigned int precision = get_binary_precision (x, y);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (precision <= HOST_BITS_PER_WIDE_INT)
{
HOST_WIDE_INT xl = xi.to_shwi ();
HOST_WIDE_INT yl = yi.to_shwi ();
else
return 0;
}
- return cmps_large (xi.val, xi.len, xi.precision, yi.val, yi.len,
- yi.precision);
+ return cmps_large (xi.val, xi.len, precision, yi.val, yi.len);
}
/* Return -1 if X < Y, 0 if X == Y and 1 if X > Y. Treat both X and Y
inline int
wi::cmpu (const T1 &x, const T2 &y)
{
- WIDE_INT_REF_FOR (T1) xi (x);
- WIDE_INT_REF_FOR (T2) yi (y);
- if (xi.precision <= HOST_BITS_PER_WIDE_INT
- && yi.precision <= HOST_BITS_PER_WIDE_INT)
+ unsigned int precision = get_binary_precision (x, y);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (precision <= HOST_BITS_PER_WIDE_INT)
{
unsigned HOST_WIDE_INT xl = xi.to_uhwi ();
unsigned HOST_WIDE_INT yl = yi.to_uhwi ();
else
return 1;
}
- return cmpu_large (xi.val, xi.len, xi.precision, yi.val, yi.len,
- yi.precision);
+ return cmpu_large (xi.val, xi.len, precision, yi.val, yi.len);
}
/* Return -1 if X < Y, 0 if X == Y and 1 if X > Y. Signedness of