From ddb1be658b6726ca8727a5765216944fd33f54ad Mon Sep 17 00:00:00 2001 From: rsandifo Date: Thu, 21 Nov 2013 10:18:18 +0000 Subject: [PATCH] Remove trailing whitespace. Add missing dbxout.c hunk. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/branches/wide-int@205205 138bc75d-0d04-0410-961f-82ee72b054a4 --- gcc/builtins.c | 4 +-- gcc/c-family/c-ada-spec.c | 2 +- gcc/c-family/c-common.c | 2 +- gcc/c/c-decl.c | 2 +- gcc/c/c-typeck.c | 4 +-- gcc/combine.c | 2 +- gcc/dbxout.c | 2 +- gcc/doc/rtl.texi | 2 +- gcc/dwarf2out.c | 4 +-- gcc/emit-rtl.c | 4 +-- gcc/expmed.c | 4 +-- gcc/expr.c | 6 ++-- gcc/final.c | 2 +- gcc/fixed-value.c | 4 +-- gcc/fold-const.c | 24 +++++++------- gcc/fortran/trans-types.c | 4 +-- gcc/gimple-fold.c | 6 ++-- gcc/gimple-ssa-strength-reduction.c | 10 +++--- gcc/lto-streamer-in.c | 6 ++-- gcc/lto-streamer-out.c | 2 +- gcc/optabs.c | 2 +- gcc/real.c | 12 +++---- gcc/recog.c | 6 ++-- gcc/rtl.h | 8 ++--- gcc/rtlanal.c | 2 +- gcc/simplify-rtx.c | 28 ++++++++-------- gcc/tree-ssa-ccp.c | 8 ++--- gcc/tree-streamer-out.c | 2 +- gcc/tree-switch-conversion.c | 4 +-- gcc/tree-vect-generic.c | 4 +-- gcc/tree-vrp.c | 16 ++++----- gcc/tree.c | 6 ++-- gcc/value-prof.c | 4 +-- gcc/varasm.c | 2 +- gcc/wide-int-print.cc | 18 +++++------ gcc/wide-int.cc | 2 +- gcc/wide-int.h | 50 ++++++++++++++--------------- 37 files changed, 135 insertions(+), 135 deletions(-) diff --git a/gcc/builtins.c b/gcc/builtins.c index 8439fd95e05c..5f840e96d80e 100644 --- a/gcc/builtins.c +++ b/gcc/builtins.c @@ -702,7 +702,7 @@ c_readstr (const char *str, enum machine_mode mode) ch = (unsigned char) str[i]; tmp[j / HOST_BITS_PER_WIDE_INT] |= ch << (j % HOST_BITS_PER_WIDE_INT); } - + wide_int c = wide_int::from_array (tmp, len, GET_MODE_PRECISION (mode)); return immed_wide_int_const (c, mode); } @@ -8179,7 +8179,7 @@ fold_builtin_bswap (tree fndecl, tree arg) case BUILT_IN_BSWAP64: { signop sgn = TYPE_SIGN (type); - tree result = + tree result = wide_int_to_tree (type, wide_int::from (arg, TYPE_PRECISION (type), sgn).bswap ()); diff --git a/gcc/c-family/c-ada-spec.c b/gcc/c-family/c-ada-spec.c index bdba8f98ea14..e194d28a1564 100644 --- a/gcc/c-family/c-ada-spec.c +++ b/gcc/c-family/c-ada-spec.c @@ -2205,7 +2205,7 @@ dump_generic_ada_node (pretty_printer *buffer, tree node, tree type, int spc, val = -val; } sprintf (pp_buffer (buffer)->digit_buffer, - "16#%" HOST_LONG_FORMAT "x", val.elt (val.get_len () - 1)); + "16#%" HOST_LONG_FORMAT "x", val.elt (val.get_len () - 1)); for (i = val.get_len () - 2; i <= 0; i--) sprintf (pp_buffer (buffer)->digit_buffer, HOST_WIDE_INT_PRINT_PADDED_HEX, val.elt (i)); diff --git a/gcc/c-family/c-common.c b/gcc/c-family/c-common.c index f9534a0a6e2e..e5e93894a849 100644 --- a/gcc/c-family/c-common.c +++ b/gcc/c-family/c-common.c @@ -9211,7 +9211,7 @@ check_function_arguments_recurse (void (*callback) /* Extract the argument number, which was previously checked to be valid. */ format_num_expr = TREE_VALUE (TREE_VALUE (attrs)); - + format_num = tree_to_uhwi (format_num_expr); for (inner_arg = first_call_expr_arg (param, &iter), i = 1; diff --git a/gcc/c/c-decl.c b/gcc/c/c-decl.c index 3773191dec21..d2d54b0079d6 100644 --- a/gcc/c/c-decl.c +++ b/gcc/c/c-decl.c @@ -7565,7 +7565,7 @@ finish_enum (tree enumtype, tree values, tree attributes) { tree pair, tem; tree minnode = 0, maxnode = 0; - int precision; + int precision; signop sign; bool toplevel = (file_scope == current_scope); struct lang_type *lt; diff --git a/gcc/c/c-typeck.c b/gcc/c/c-typeck.c index 26cc73195c80..7a0d664cc869 100644 --- a/gcc/c/c-typeck.c +++ b/gcc/c/c-typeck.c @@ -8053,8 +8053,8 @@ set_nonincremental_init_from_string (tree str, << (bitpos - HOST_BITS_PER_WIDE_INT); } - value = wide_int_to_tree (type, - wide_int::from_array (val, 2, + value = wide_int_to_tree (type, + wide_int::from_array (val, 2, HOST_BITS_PER_WIDE_INT * 2)); add_pending_init (purpose, value, NULL_TREE, true, braced_init_obstack); diff --git a/gcc/combine.c b/gcc/combine.c index 52382a75712b..1c6dc85e0064 100644 --- a/gcc/combine.c +++ b/gcc/combine.c @@ -2690,7 +2690,7 @@ try_combine (rtx i3, rtx i2, rtx i1, rtx i0, int *new_direct_jump_p, /* Replace the source in I2 with the new constant and make the resulting insn the new pattern for I3. Then skip to where we validate the pattern. Everything was set up above. */ - SUBST (SET_SRC (temp), + SUBST (SET_SRC (temp), immed_wide_int_const (o, GET_MODE (SET_DEST (temp)))); newpat = PATTERN (i2); diff --git a/gcc/dbxout.c b/gcc/dbxout.c index 5fe79cf69c3d..5da1e0d0e8a7 100644 --- a/gcc/dbxout.c +++ b/gcc/dbxout.c @@ -2252,7 +2252,7 @@ dbxout_type (tree type, int full) if (TREE_CODE (value) == CONST_DECL) value = DECL_INITIAL (value); - if (cst_fits_shwi_p (value)) + if (cst_and_fits_in_hwi (value)) stabstr_D (TREE_INT_CST_LOW (value)); else stabstr_O (value); diff --git a/gcc/doc/rtl.texi b/gcc/doc/rtl.texi index db65b212c0db..25659954ad3f 100644 --- a/gcc/doc/rtl.texi +++ b/gcc/doc/rtl.texi @@ -1583,7 +1583,7 @@ they were before. The values are stored in a compressed format. The higher-order 0s or -1s are not represented if they are just the logical sign -extension of the number that is represented. +extension of the number that is represented. @findex CONST_WIDE_INT_VEC @item CONST_WIDE_INT_VEC (@var{code}) diff --git a/gcc/dwarf2out.c b/gcc/dwarf2out.c index f611a6909dc2..d31cab990122 100644 --- a/gcc/dwarf2out.c +++ b/gcc/dwarf2out.c @@ -11950,7 +11950,7 @@ clz_loc_descriptor (rtx rtl, enum machine_mode mode, msb = GEN_INT ((unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)); else - msb = immed_wide_int_const + msb = immed_wide_int_const (wi::set_bit_in_zero (GET_MODE_PRECISION (mode) - 1, GET_MODE_PRECISION (mode)), mode); if (GET_CODE (msb) == CONST_INT && INTVAL (msb) < 0) @@ -15174,7 +15174,7 @@ add_const_value_attribute (dw_die_ref die, rtx rtl) return true; case CONST_WIDE_INT: - add_AT_wide (die, DW_AT_const_value, + add_AT_wide (die, DW_AT_const_value, std::make_pair (rtl, GET_MODE (rtl))); return true; diff --git a/gcc/emit-rtl.c b/gcc/emit-rtl.c index 7b7633a55076..c3d4f21a43b3 100644 --- a/gcc/emit-rtl.c +++ b/gcc/emit-rtl.c @@ -229,7 +229,7 @@ const_wide_int_htab_eq (const void *x, const void *y) for (i = 0; i < CONST_WIDE_INT_NUNITS (xr); i++) if (CONST_WIDE_INT_ELT (xr, i) != CONST_WIDE_INT_ELT (yr, i)) return 0; - + return 1; } #endif @@ -587,7 +587,7 @@ immed_wide_int_const (const wide_int &v, enum machine_mode mode) { unsigned int i; rtx value; - unsigned int blocks_needed + unsigned int blocks_needed = (prec + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT; if (len > blocks_needed) diff --git a/gcc/expmed.c b/gcc/expmed.c index cca7a0df9fbf..8c13687ff417 100644 --- a/gcc/expmed.c +++ b/gcc/expmed.c @@ -68,10 +68,10 @@ static rtx expand_sdiv_pow2 (enum machine_mode, rtx, HOST_WIDE_INT); The mask is truncated if necessary to the width of mode MODE. The mask is zero-extended if BITSIZE+BITPOS is too small for MODE. */ -static inline rtx +static inline rtx mask_rtx (enum machine_mode mode, int bitpos, int bitsize, bool complement) { - return immed_wide_int_const + return immed_wide_int_const (wi::shifted_mask (bitpos, bitsize, complement, GET_MODE_PRECISION (mode)), mode); } diff --git a/gcc/expr.c b/gcc/expr.c index cdc1baff5afe..95d24647e75f 100644 --- a/gcc/expr.c +++ b/gcc/expr.c @@ -722,7 +722,7 @@ convert_modes (enum machine_mode mode, enum machine_mode oldmode, rtx x, int uns if (mode == oldmode) return x; - if (CONST_SCALAR_INT_P (x) + if (CONST_SCALAR_INT_P (x) && GET_MODE_CLASS (mode) == MODE_INT) { /* If the caller did not tell us the old mode, then there is @@ -10543,7 +10543,7 @@ reduce_to_bit_field_precision (rtx exp, rtx target, tree type) else if (TYPE_UNSIGNED (type)) { enum machine_mode mode = GET_MODE (exp); - rtx mask = immed_wide_int_const + rtx mask = immed_wide_int_const (wi::mask (prec, false, GET_MODE_PRECISION (mode)), mode); return expand_and (mode, exp, mask, target); } @@ -11119,7 +11119,7 @@ const_vector_from_tree (tree exp) RTVEC_ELT (v, i) = CONST_FIXED_FROM_FIXED_VALUE (TREE_FIXED_CST (elt), inner); else - RTVEC_ELT (v, i) + RTVEC_ELT (v, i) = immed_wide_int_const (elt, TYPE_MODE (TREE_TYPE (elt))); } diff --git a/gcc/final.c b/gcc/final.c index e8d14ab3264a..1128b5b68ea9 100644 --- a/gcc/final.c +++ b/gcc/final.c @@ -3902,7 +3902,7 @@ output_addr_const (FILE *file, rtx x) { wide_int w = wide_int::from_array (&CONST_WIDE_INT_ELT (x, 0), CONST_WIDE_INT_NUNITS (x), - CONST_WIDE_INT_NUNITS (x) + CONST_WIDE_INT_NUNITS (x) * HOST_BITS_PER_WIDE_INT, false); print_decs (w, file); diff --git a/gcc/fixed-value.c b/gcc/fixed-value.c index 5341029f8aa2..03fd599cdea0 100644 --- a/gcc/fixed-value.c +++ b/gcc/fixed-value.c @@ -159,7 +159,7 @@ fixed_to_decimal (char *str, const FIXED_VALUE_TYPE *f_orig, signop sgn = UNSIGNED_FIXED_POINT_MODE_P (f_orig->mode) ? UNSIGNED : SIGNED; real_2expN (&base_value, GET_MODE_FBIT (f_orig->mode), f_orig->mode); - real_from_integer (&real_value, VOIDmode, + real_from_integer (&real_value, VOIDmode, wide_int::from (f_orig->data, GET_MODE_PRECISION (f_orig->mode), sgn), sgn); @@ -1105,7 +1105,7 @@ real_convert_from_fixed (REAL_VALUE_TYPE *r, enum machine_mode mode, signop sgn = UNSIGNED_FIXED_POINT_MODE_P (f->mode) ? UNSIGNED : SIGNED; real_2expN (&base_value, GET_MODE_FBIT (f->mode), f->mode); - real_from_integer (&fixed_value, VOIDmode, + real_from_integer (&fixed_value, VOIDmode, wide_int::from (f->data, GET_MODE_PRECISION (f->mode), sgn), sgn); real_arithmetic (&real_value, RDIV_EXPR, &fixed_value, &base_value); diff --git a/gcc/fold-const.c b/gcc/fold-const.c index 9977c3b6205f..22857337845f 100644 --- a/gcc/fold-const.c +++ b/gcc/fold-const.c @@ -985,7 +985,7 @@ int_const_binop_1 (enum tree_code code, const_tree arg1, const_tree parg2, else code = RSHIFT_EXPR; } - + if (code == RSHIFT_EXPR) /* It's unclear from the C standard whether shifts can overflow. The following code ignores overflow; perhaps a C standard @@ -994,7 +994,7 @@ int_const_binop_1 (enum tree_code code, const_tree arg1, const_tree parg2, else res = wi::lshift (arg1, arg2); break; - + case RROTATE_EXPR: case LROTATE_EXPR: if (wi::neg_p (arg2)) @@ -1005,7 +1005,7 @@ int_const_binop_1 (enum tree_code code, const_tree arg1, const_tree parg2, else code = RROTATE_EXPR; } - + if (code == RROTATE_EXPR) res = wi::rrotate (arg1, arg2); else @@ -1019,7 +1019,7 @@ int_const_binop_1 (enum tree_code code, const_tree arg1, const_tree parg2, case MINUS_EXPR: res = wi::sub (arg1, arg2, sign, &overflow); break; - + case MULT_EXPR: res = wi::mul (arg1, arg2, sign, &overflow); break; @@ -1686,7 +1686,7 @@ fold_convert_const_int_from_fixed (tree type, const_tree arg1) && (TYPE_UNSIGNED (type) < TYPE_UNSIGNED (TREE_TYPE (arg1)))) | TREE_OVERFLOW (arg1)); - + return t; } @@ -1774,8 +1774,8 @@ fold_convert_const_fixed_from_int (tree type, const_tree arg1) di.low = TREE_INT_CST_ELT (arg1, 0); if (TREE_INT_CST_NUNITS (arg1) == 1) - di.high = (HOST_WIDE_INT)di.low < 0 ? (HOST_WIDE_INT)-1 : 0; - else + di.high = (HOST_WIDE_INT)di.low < 0 ? (HOST_WIDE_INT)-1 : 0; + else di.high = TREE_INT_CST_ELT (arg1, 1); overflow_p = fixed_convert_from_int (&value, TYPE_MODE (type), di, @@ -4357,7 +4357,7 @@ range_predecessor (tree val) && operand_equal_p (val, TYPE_MIN_VALUE (type), 0)) return 0; else - return range_binop (MINUS_EXPR, NULL_TREE, val, 0, + return range_binop (MINUS_EXPR, NULL_TREE, val, 0, build_int_cst (TREE_TYPE (val), 1), 0); } @@ -4372,7 +4372,7 @@ range_successor (tree val) && operand_equal_p (val, TYPE_MAX_VALUE (type), 0)) return 0; else - return range_binop (PLUS_EXPR, NULL_TREE, val, 0, + return range_binop (PLUS_EXPR, NULL_TREE, val, 0, build_int_cst (TREE_TYPE (val), 1), 0); } @@ -13636,14 +13636,14 @@ fold_binary_loc (location_t loc, switch (code) { case GE_EXPR: - arg1 = const_binop (MINUS_EXPR, arg1, + arg1 = const_binop (MINUS_EXPR, arg1, build_int_cst (TREE_TYPE (arg1), 1)); return fold_build2_loc (loc, NE_EXPR, type, fold_convert_loc (loc, TREE_TYPE (arg1), arg0), arg1); case LT_EXPR: - arg1 = const_binop (MINUS_EXPR, arg1, + arg1 = const_binop (MINUS_EXPR, arg1, build_int_cst (TREE_TYPE (arg1), 1)); return fold_build2_loc (loc, EQ_EXPR, type, fold_convert_loc (loc, TREE_TYPE (arg1), @@ -14193,7 +14193,7 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type, outer_width = TYPE_PRECISION (type); wide_int mask = wi::shifted_mask - (inner_width, outer_width - inner_width, false, + (inner_width, outer_width - inner_width, false, TYPE_PRECISION (TREE_TYPE (arg1))); wide_int common = mask & arg1; diff --git a/gcc/fortran/trans-types.c b/gcc/fortran/trans-types.c index 58b0c252336b..f4471558a22c 100644 --- a/gcc/fortran/trans-types.c +++ b/gcc/fortran/trans-types.c @@ -954,8 +954,8 @@ gfc_init_types (void) descriptor. */ n = TYPE_PRECISION (gfc_array_index_type) - GFC_DTYPE_SIZE_SHIFT; - gfc_max_array_element_size - = wide_int_to_tree (long_unsigned_type_node, + gfc_max_array_element_size + = wide_int_to_tree (long_unsigned_type_node, wi::mask (n, UNSIGNED, TYPE_PRECISION (long_unsigned_type_node))); diff --git a/gcc/gimple-fold.c b/gcc/gimple-fold.c index 5d904029769c..18c0a5315ad1 100644 --- a/gcc/gimple-fold.c +++ b/gcc/gimple-fold.c @@ -3052,7 +3052,7 @@ fold_const_aggregate_ref_1 (tree t, tree (*valueize) (tree)) && (idx = (*valueize) (TREE_OPERAND (t, 1))) && TREE_CODE (idx) == INTEGER_CST) { - tree low_bound = array_ref_low_bound (t); + tree low_bound = array_ref_low_bound (t); tree unit_size = array_ref_element_size (t); /* If the resulting bit-offset is constant, track it. */ @@ -3062,7 +3062,7 @@ fold_const_aggregate_ref_1 (tree t, tree (*valueize) (tree)) offset_int woffset = wi::sext (wi::to_offset (idx) - wi::to_offset (low_bound), TYPE_PRECISION (TREE_TYPE (idx))); - + if (wi::fits_shwi_p (woffset)) { offset = woffset.to_shwi (); @@ -3070,7 +3070,7 @@ fold_const_aggregate_ref_1 (tree t, tree (*valueize) (tree)) to see if it fits. */ offset *= tree_to_uhwi (unit_size); offset *= BITS_PER_UNIT; - + base = TREE_OPERAND (t, 0); ctor = get_base_constructor (base, &offset, valueize); /* Empty constructor. Always fold to 0. */ diff --git a/gcc/gimple-ssa-strength-reduction.c b/gcc/gimple-ssa-strength-reduction.c index 268fda26fe0d..b9fc9362c83a 100644 --- a/gcc/gimple-ssa-strength-reduction.c +++ b/gcc/gimple-ssa-strength-reduction.c @@ -755,7 +755,7 @@ slsr_process_phi (gimple phi, bool speed) CAND_PHI. */ base_type = TREE_TYPE (arg0_base); - c = alloc_cand_and_find_basis (CAND_PHI, phi, arg0_base, + c = alloc_cand_and_find_basis (CAND_PHI, phi, arg0_base, 0, integer_one_node, base_type, savings); /* Add the candidate to the statement-candidate mapping. */ @@ -1516,9 +1516,9 @@ slsr_process_cast (gimple gs, tree rhs1, bool speed) The first of these is somewhat arbitrary, but the choice of 1 for the stride simplifies the logic for propagating casts into their uses. */ - c = alloc_cand_and_find_basis (CAND_ADD, gs, rhs1, + c = alloc_cand_and_find_basis (CAND_ADD, gs, rhs1, 0, integer_one_node, ctype, 0); - c2 = alloc_cand_and_find_basis (CAND_MULT, gs, rhs1, + c2 = alloc_cand_and_find_basis (CAND_MULT, gs, rhs1, 0, integer_one_node, ctype, 0); c->next_interp = c2->cand_num; } @@ -1573,9 +1573,9 @@ slsr_process_copy (gimple gs, tree rhs1, bool speed) The first of these is somewhat arbitrary, but the choice of 1 for the stride simplifies the logic for propagating casts into their uses. */ - c = alloc_cand_and_find_basis (CAND_ADD, gs, rhs1, + c = alloc_cand_and_find_basis (CAND_ADD, gs, rhs1, 0, integer_one_node, TREE_TYPE (rhs1), 0); - c2 = alloc_cand_and_find_basis (CAND_MULT, gs, rhs1, + c2 = alloc_cand_and_find_basis (CAND_MULT, gs, rhs1, 0, integer_one_node, TREE_TYPE (rhs1), 0); c->next_interp = c2->cand_num; } diff --git a/gcc/lto-streamer-in.c b/gcc/lto-streamer-in.c index c5b73ffb8d90..f2a3c0fd869a 100644 --- a/gcc/lto-streamer-in.c +++ b/gcc/lto-streamer-in.c @@ -708,7 +708,7 @@ input_cfg (struct lto_input_block *ib, struct function *fn, int len = streamer_read_uhwi (ib); for (i = 0; i < len; i++) a[i] = streamer_read_hwi (ib); - + loop->nb_iterations_upper_bound = widest_int::from_array (a, len); } loop->any_estimate = streamer_read_hwi (ib); @@ -720,7 +720,7 @@ input_cfg (struct lto_input_block *ib, struct function *fn, int len = streamer_read_uhwi (ib); for (i = 0; i < len; i++) a[i] = streamer_read_hwi (ib); - + loop->nb_iterations_estimate = widest_int::from_array (a, len); } @@ -1276,7 +1276,7 @@ lto_input_tree_1 (struct lto_input_block *ib, struct data_in *data_in, tree type = stream_read_tree (ib, data_in); unsigned HOST_WIDE_INT len = streamer_read_uhwi (ib); unsigned HOST_WIDE_INT i; - HOST_WIDE_INT a[WIDE_INT_MAX_ELTS]; + HOST_WIDE_INT a[WIDE_INT_MAX_ELTS]; for (i = 0; i < len; i++) a[i] = streamer_read_hwi (ib); diff --git a/gcc/lto-streamer-out.c b/gcc/lto-streamer-out.c index 733119f69532..a06f38639eb4 100644 --- a/gcc/lto-streamer-out.c +++ b/gcc/lto-streamer-out.c @@ -714,7 +714,7 @@ hash_tree (struct streamer_tree_cache_d *cache, tree t) if (CODE_CONTAINS_STRUCT (code, TS_INT_CST)) { - int i; + int i; v = iterative_hash_host_wide_int (TREE_INT_CST_NUNITS (t), v); v = iterative_hash_host_wide_int (TREE_INT_CST_EXT_NUNITS (t), v); for (i = 0; i < TREE_INT_CST_NUNITS (t); i++) diff --git a/gcc/optabs.c b/gcc/optabs.c index 7400b493640e..d81ac89e87e5 100644 --- a/gcc/optabs.c +++ b/gcc/optabs.c @@ -870,7 +870,7 @@ expand_subword_shift (enum machine_mode op1_mode, optab binoptab, outof_input, const1_rtx, 0, unsignedp, methods); if (shift_mask == BITS_PER_WORD - 1) { - tmp = immed_wide_int_const + tmp = immed_wide_int_const (wi::minus_one (GET_MODE_PRECISION (op1_mode)), op1_mode); tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp, 0, true, methods); diff --git a/gcc/real.c b/gcc/real.c index 11bb482b751e..536aa977eaf3 100644 --- a/gcc/real.c +++ b/gcc/real.c @@ -1400,7 +1400,7 @@ real_to_integer (const REAL_VALUE_TYPE *r, bool *fail, int precision) case rvc_nan: overflow: *fail = true; - + if (r->sign) return wi::set_bit_in_zero (precision - 1, precision); else @@ -1429,7 +1429,7 @@ real_to_integer (const REAL_VALUE_TYPE *r, bool *fail, int precision) for (int i = 0; i < words; i++) { int j = SIGSZ - words + i; - val[i] = (j < 0) ? 0 : r->sig[j]; + val[i] = (j < 0) ? 0 : r->sig[j]; } #else gcc_assert (HOST_BITS_PER_WIDE_INT == 2*HOST_BITS_PER_LONG); @@ -1438,15 +1438,15 @@ real_to_integer (const REAL_VALUE_TYPE *r, bool *fail, int precision) int j = SIGSZ - (words * 2) + (i + 2) + 1; if (j < 0) val[i] = 0; - else + else { val[i] = r->sig[j]; val[i] <<= HOST_BITS_PER_LONG; val[i] |= r->sig[j - 1]; } } -#endif - w = SIGSZ * HOST_BITS_PER_LONG + words * HOST_BITS_PER_WIDE_INT; +#endif + w = SIGSZ * HOST_BITS_PER_LONG + words * HOST_BITS_PER_WIDE_INT; result = wide_int::from_array (val, (w + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT, w); result = wi::lrshift (result, (words * HOST_BITS_PER_WIDE_INT) - exp); @@ -2172,7 +2172,7 @@ real_from_integer (REAL_VALUE_TYPE *r, enum machine_mode mode, if (r->sign) val = -val; - + /* Ensure a multiple of HOST_BITS_PER_WIDE_INT, ceiling, as elt won't work with precisions that are not a multiple of HOST_BITS_PER_WIDE_INT. */ diff --git a/gcc/recog.c b/gcc/recog.c index ecb7c06794d5..b8073afb3a62 100644 --- a/gcc/recog.c +++ b/gcc/recog.c @@ -1181,16 +1181,16 @@ const_scalar_int_operand (rtx op, enum machine_mode mode) { int prec = GET_MODE_PRECISION (mode); int bitsize = GET_MODE_BITSIZE (mode); - + if (CONST_WIDE_INT_NUNITS (op) * HOST_BITS_PER_WIDE_INT > bitsize) return 0; - + if (prec == bitsize) return 1; else { /* Multiword partial int. */ - HOST_WIDE_INT x + HOST_WIDE_INT x = CONST_WIDE_INT_ELT (op, CONST_WIDE_INT_NUNITS (op) - 1); return (sext_hwi (x, prec & (HOST_BITS_PER_WIDE_INT - 1)) == x); } diff --git a/gcc/rtl.h b/gcc/rtl.h index 5b6351bceceb..7449f53c2c71 100644 --- a/gcc/rtl.h +++ b/gcc/rtl.h @@ -428,7 +428,7 @@ struct GTY((variable_size)) rtvec_def { case CONST_INT: \ case CONST_WIDE_INT -/* Match CONST_*s for which pointer equality corresponds to value +/* Match CONST_*s for which pointer equality corresponds to value equality. */ #define CASE_CONST_UNIQUE \ case CONST_INT: \ @@ -1441,16 +1441,16 @@ wi::int_traits ::decompose (HOST_WIDE_INT *, || (x.second == BImode && INTVAL (x.first) == 1)); return wi::storage_ref (&INTVAL (x.first), 1, precision); - + case CONST_WIDE_INT: return wi::storage_ref (&CONST_WIDE_INT_ELT (x.first, 0), CONST_WIDE_INT_NUNITS (x.first), precision); - + #if TARGET_SUPPORTS_WIDE_INT == 0 case CONST_DOUBLE: return wi::storage_ref (&CONST_DOUBLE_LOW (x.first), 2, precision); #endif - + default: gcc_unreachable (); } diff --git a/gcc/rtlanal.c b/gcc/rtlanal.c index a0a31a6c2f66..4885bd4aa10f 100644 --- a/gcc/rtlanal.c +++ b/gcc/rtlanal.c @@ -5330,7 +5330,7 @@ get_address_mode (rtx mem) /* Split up a CONST_DOUBLE or integer constant rtx into two rtx's for single words, storing in *FIRST the word that comes first in memory in the target - and in *SECOND the other. + and in *SECOND the other. TODO: This function needs to be rewritten to work on any size integer. */ diff --git a/gcc/simplify-rtx.c b/gcc/simplify-rtx.c index 44b500a38bbc..a594ef64202b 100644 --- a/gcc/simplify-rtx.c +++ b/gcc/simplify-rtx.c @@ -1659,7 +1659,7 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, simplify something and so you if you added this to the test above the code would die later anyway. If this assert happens, you just need to make the port support wide int. */ - gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT); + gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT); #endif switch (code) @@ -3705,7 +3705,7 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode, wide_int result; bool overflow; rtx_mode_t pop0 = std::make_pair (op0, mode); - rtx_mode_t pop1 = std::make_pair (op1, mode); + rtx_mode_t pop1 = std::make_pair (op1, mode); #if TARGET_SUPPORTS_WIDE_INT == 0 /* This assert keeps the simplification from producing a result @@ -3735,7 +3735,7 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode, if (overflow) return NULL_RTX; break; - + case MOD: result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow); if (overflow) @@ -3797,15 +3797,15 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode, case LSHIFTRT: result = wi::lrshift (pop0, wop1); break; - + case ASHIFTRT: result = wi::arshift (pop0, wop1); break; - + case ASHIFT: result = wi::lshift (pop0, wop1); break; - + default: gcc_unreachable (); } @@ -3822,7 +3822,7 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode, case ROTATE: result = wi::lrotate (pop0, pop1); break; - + case ROTATERT: result = wi::rrotate (pop0, pop1); break; @@ -5155,7 +5155,7 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op, return NULL_RTX; /* We support any size mode. */ - max_bitsize = MAX (GET_MODE_BITSIZE (outermode), + max_bitsize = MAX (GET_MODE_BITSIZE (outermode), GET_MODE_BITSIZE (innermode)); /* Unpack the value. */ @@ -5212,7 +5212,7 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op, rtx_mode_t val = std::make_pair (el, innermode); unsigned char extend = wi::sign_mask (val); - for (i = 0; i < elem_bitsize; i += value_bit) + for (i = 0; i < elem_bitsize; i += value_bit) *vp++ = wi::extract_uhwi (val, i, value_bit); for (; i < elem_bitsize; i += value_bit) *vp++ = extend; @@ -5366,17 +5366,17 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op, { int u; int base = 0; - int units - = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1) + int units + = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT; HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT]; wide_int r; - for (u = 0; u < units; u++) + for (u = 0; u < units; u++) { unsigned HOST_WIDE_INT buf = 0; - for (i = 0; - i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize; + for (i = 0; + i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize; i += value_bit) buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i; diff --git a/gcc/tree-ssa-ccp.c b/gcc/tree-ssa-ccp.c index 6154549dc52e..c836f53defe8 100644 --- a/gcc/tree-ssa-ccp.c +++ b/gcc/tree-ssa-ccp.c @@ -1235,7 +1235,7 @@ bit_value_binop_1 (enum tree_code code, tree type, *mask = r1mask; *val = r1val; } - else + else { if (wi::neg_p (shift)) { @@ -1272,7 +1272,7 @@ bit_value_binop_1 (enum tree_code code, tree type, *mask = r1mask; *val = r1val; } - else + else { if (wi::neg_p (shift)) { @@ -1375,14 +1375,14 @@ bit_value_binop_1 (enum tree_code code, tree type, widest_int o1val, o2val, o1mask, o2mask; int minmax, maxmin; - if ((code == GE_EXPR) || (code == GT_EXPR)) + if ((code == GE_EXPR) || (code == GT_EXPR)) { o1val = r2val; o1mask = r2mask; o2val = r1val; o2mask = r1mask; code = swap_tree_comparison (code); - } + } else { o1val = r1val; diff --git a/gcc/tree-streamer-out.c b/gcc/tree-streamer-out.c index 7d3620d18a9c..a90877ae2acc 100644 --- a/gcc/tree-streamer-out.c +++ b/gcc/tree-streamer-out.c @@ -120,7 +120,7 @@ pack_ts_base_value_fields (struct bitpack_d *bp, tree expr) expression EXPR into bitpack BP. */ static void -pack_ts_int_cst_value_fields (struct bitpack_d *bp, +pack_ts_int_cst_value_fields (struct bitpack_d *bp, tree expr ATTRIBUTE_UNUSED) { int i; diff --git a/gcc/tree-switch-conversion.c b/gcc/tree-switch-conversion.c index 4d71efec08db..fc0b5749407f 100644 --- a/gcc/tree-switch-conversion.c +++ b/gcc/tree-switch-conversion.c @@ -449,8 +449,8 @@ emit_case_bit_tests (gimple swtch, tree index_expr, a[0] = test[k].lo; a[1] = test[k].hi; - tmp = wide_int_to_tree (word_type_node, - wide_int::from_array (a, 2, + tmp = wide_int_to_tree (word_type_node, + wide_int::from_array (a, 2, TYPE_PRECISION (word_type_node))); tmp = fold_build2 (BIT_AND_EXPR, word_type_node, csui, tmp); tmp = force_gimple_operand_gsi (&gsi, tmp, diff --git a/gcc/tree-vect-generic.c b/gcc/tree-vect-generic.c index 777687cbf2e0..50ee7d78a11a 100644 --- a/gcc/tree-vect-generic.c +++ b/gcc/tree-vect-generic.c @@ -70,7 +70,7 @@ build_replicated_const (tree type, tree inner_type, HOST_WIDE_INT value) for (i = 0; i < n; i++) a[i] = low; - return wide_int_to_tree + return wide_int_to_tree (type, wide_int::from_array (a, n, TYPE_PRECISION (type))); } @@ -405,7 +405,7 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0, unsigned HOST_WIDE_INT *mulc = XALLOCAVEC (unsigned HOST_WIDE_INT, nunits); int prec = TYPE_PRECISION (TREE_TYPE (type)); int dummy_int; - unsigned int i; + unsigned int i; signop sign_p = TYPE_SIGN (TREE_TYPE (type)); unsigned HOST_WIDE_INT mask = GET_MODE_MASK (TYPE_MODE (TREE_TYPE (type))); tree *vec; diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c index 2da3b2b3f757..f3e0ffef3e4f 100644 --- a/gcc/tree-vrp.c +++ b/gcc/tree-vrp.c @@ -2622,7 +2622,7 @@ extract_range_from_binary_expr_1 (value_range_t *vr, /* Extend the values using the sign of the result to PREC2. From here on out, everthing is just signed math no matter - what the input types were. */ + what the input types were. */ wide_int min0 = wide_int::from (vr0.min, prec2, sign); wide_int max0 = wide_int::from (vr0.max, prec2, sign); wide_int min1 = wide_int::from (vr1.min, prec2, sign); @@ -3808,7 +3808,7 @@ adjust_range_with_scev (value_range_t *vr, struct loop *loop, value_range_t maxvr = VR_INITIALIZER; signop sgn = TYPE_SIGN (TREE_TYPE (step)); bool overflow; - + wide_int wtmp = wi::mul (wi::to_widest (step), nit, sgn, &overflow); /* If the multiplication overflowed we can't do a meaningful adjustment. Likewise if the result doesn't fit in the type @@ -6111,7 +6111,7 @@ check_array_ref (location_t location, tree ref, bool ignore_off_by_one) } low_bound = array_ref_low_bound (ref); - up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound, + up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound, build_int_cst (TREE_TYPE (up_bound), 1)); if (TREE_CODE (low_sub) == SSA_NAME) @@ -7553,10 +7553,10 @@ union_ranges (enum value_range_type *vr0type, && vrp_val_is_max (vr1max)) { tree min = int_const_binop (PLUS_EXPR, - *vr0max, + *vr0max, build_int_cst (TREE_TYPE (*vr0max), 1)); tree max = int_const_binop (MINUS_EXPR, - vr1min, + vr1min, build_int_cst (TREE_TYPE (vr1min), 1)); if (!operand_less_p (max, min)) { @@ -7619,7 +7619,7 @@ union_ranges (enum value_range_type *vr0type, { /* Arbitrarily choose the right or left gap. */ if (!mineq && TREE_CODE (vr1min) == INTEGER_CST) - *vr0max = int_const_binop (MINUS_EXPR, vr1min, + *vr0max = int_const_binop (MINUS_EXPR, vr1min, build_int_cst (TREE_TYPE (vr1min), 1)); else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST) *vr0min = int_const_binop (PLUS_EXPR, vr1max, @@ -7689,7 +7689,7 @@ union_ranges (enum value_range_type *vr0type, && vr1type == VR_RANGE) { if (TREE_CODE (vr1min) == INTEGER_CST) - *vr0max = int_const_binop (MINUS_EXPR, vr1min, + *vr0max = int_const_binop (MINUS_EXPR, vr1min, build_int_cst (TREE_TYPE (vr1min), 1)); else goto give_up; @@ -8423,7 +8423,7 @@ simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt) if (rhs_code == EQ_EXPR) { if (TREE_CODE (op1) == INTEGER_CST) - op1 = int_const_binop (BIT_XOR_EXPR, op1, + op1 = int_const_binop (BIT_XOR_EXPR, op1, build_int_cst (TREE_TYPE (op1), 1)); else return false; diff --git a/gcc/tree.c b/gcc/tree.c index 979ea246b9cb..31d830f1adcb 100644 --- a/gcc/tree.c +++ b/gcc/tree.c @@ -10513,7 +10513,7 @@ widest_int_cst_value (const_tree x) #if HOST_BITS_PER_WIDEST_INT > HOST_BITS_PER_WIDE_INT gcc_assert (HOST_BITS_PER_WIDEST_INT >= HOST_BITS_PER_DOUBLE_INT); gcc_assert (TREE_INT_CST_NUNITS (x) == 2); - + if (TREE_INT_CST_NUNITS (x) == 1) val = HOST_WIDE_INT (val); else @@ -10649,7 +10649,7 @@ upper_bound_in_type (tree outer, tree inner) gcc_unreachable (); } - return wide_int_to_tree (outer, + return wide_int_to_tree (outer, wi::mask (prec, false, TYPE_PRECISION (outer))); } @@ -10677,7 +10677,7 @@ lower_bound_in_type (tree outer, tree inner) precision or narrowing to a signed type, we want to obtain -2^(oprec-1). */ unsigned prec = oprec > iprec ? iprec : oprec; - return wide_int_to_tree (outer, + return wide_int_to_tree (outer, wi::mask (prec - 1, true, TYPE_PRECISION (outer))); } diff --git a/gcc/value-prof.c b/gcc/value-prof.c index 80854daa2ab9..cfa752fc6df3 100644 --- a/gcc/value-prof.c +++ b/gcc/value-prof.c @@ -838,7 +838,7 @@ gimple_divmod_fixed_value_transform (gimple_stmt_iterator *si) HOST_WIDE_INT a[2]; a[0] = (unsigned HOST_WIDE_INT) val; a[1] = val >> (HOST_BITS_PER_WIDE_INT - 1) >> 1; - + tree_val = wide_int_to_tree (get_gcov_type (), wide_int::from_array (a, 2, TYPE_PRECISION (get_gcov_type ()), false)); } @@ -1757,7 +1757,7 @@ gimple_stringops_transform (gimple_stmt_iterator *gsi) HOST_WIDE_INT a[2]; a[0] = (unsigned HOST_WIDE_INT) val; a[1] = val >> (HOST_BITS_PER_WIDE_INT - 1) >> 1; - + tree_val = wide_int_to_tree (get_gcov_type (), wide_int::from_array (a, 2, TYPE_PRECISION (get_gcov_type ()), false)); } diff --git a/gcc/varasm.c b/gcc/varasm.c index 70df7467347f..f627fe9b771a 100644 --- a/gcc/varasm.c +++ b/gcc/varasm.c @@ -3526,7 +3526,7 @@ const_rtx_hash_1 (rtx *xp, void *data) { int shift = sizeof (hashval_t) * CHAR_BIT; const int n = sizeof (HOST_WIDE_INT) / sizeof (hashval_t); - + h ^= (hashval_t) hwi; for (i = 1; i < n; ++i) { diff --git a/gcc/wide-int-print.cc b/gcc/wide-int-print.cc index c83d9e44a0d1..61d732ba3cb9 100644 --- a/gcc/wide-int-print.cc +++ b/gcc/wide-int-print.cc @@ -33,7 +33,7 @@ along with GCC; see the file COPYING3. If not see #define BLOCKS_NEEDED(PREC) \ (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) -void +void print_dec (const wide_int &wi, char *buf, signop sgn) { if (sgn == SIGNED) @@ -42,7 +42,7 @@ print_dec (const wide_int &wi, char *buf, signop sgn) print_decu (wi, buf); } -void +void print_dec (const wide_int &wi, FILE *file, signop sgn) { if (sgn == SIGNED) @@ -55,7 +55,7 @@ print_dec (const wide_int &wi, FILE *file, signop sgn) /* Try to print the signed self in decimal to BUF if the number fits in a HWI. Other print in hex. */ -void +void print_decs (const wide_int &wi, char *buf) { if ((wi.get_precision () <= HOST_BITS_PER_WIDE_INT) @@ -73,7 +73,7 @@ print_decs (const wide_int &wi, char *buf) /* Try to print the signed self in decimal to FILE if the number fits in a HWI. Other print in hex. */ -void +void print_decs (const wide_int &wi, FILE *file) { char buf[WIDE_INT_PRINT_BUFFER_SIZE]; @@ -84,7 +84,7 @@ print_decs (const wide_int &wi, FILE *file) /* Try to print the unsigned self in decimal to BUF if the number fits in a HWI. Other print in hex. */ -void +void print_decu (const wide_int &wi, char *buf) { if ((wi.get_precision () <= HOST_BITS_PER_WIDE_INT) @@ -97,7 +97,7 @@ print_decu (const wide_int &wi, char *buf) /* Try to print the signed self in decimal to FILE if the number fits in a HWI. Other print in hex. */ -void +void print_decu (const wide_int &wi, FILE *file) { char buf[WIDE_INT_PRINT_BUFFER_SIZE]; @@ -105,7 +105,7 @@ print_decu (const wide_int &wi, FILE *file) fputs (buf, file); } -void +void print_hex (const wide_int &wi, char *buf) { int i = wi.get_len (); @@ -123,7 +123,7 @@ print_hex (const wide_int &wi, char *buf) buf += sprintf (buf, "0x"); for (j = BLOCKS_NEEDED (wi.get_precision ()); j > i; j--) buf += sprintf (buf, HOST_WIDE_INT_PRINT_PADDED_HEX, (HOST_WIDE_INT) -1); - + } else buf += sprintf (buf, "0x"HOST_WIDE_INT_PRINT_HEX_PURE, wi.elt (--i)); @@ -135,7 +135,7 @@ print_hex (const wide_int &wi, char *buf) /* Print one big hex number to FILE. Note that some assemblers may not accept this for large modes. */ -void +void print_hex (const wide_int &wi, FILE *file) { char buf[WIDE_INT_PRINT_BUFFER_SIZE]; diff --git a/gcc/wide-int.cc b/gcc/wide-int.cc index f22b348797bd..742f0accfcaf 100644 --- a/gcc/wide-int.cc +++ b/gcc/wide-int.cc @@ -190,7 +190,7 @@ wi::to_mpz (wide_int x, mpz_t result, signop sgn) if (sgn == UNSIGNED && small_prec) { HOST_WIDE_INT t[WIDE_INT_MAX_ELTS]; - + for (int i = 0; i < len - 1; i++) t[i] = v[i]; t[len-1] = zext_hwi (v[len-1], small_prec); diff --git a/gcc/wide-int.h b/gcc/wide-int.h index 5e02b03b3162..a60389f6620d 100644 --- a/gcc/wide-int.h +++ b/gcc/wide-int.h @@ -1364,7 +1364,7 @@ namespace wi const HOST_WIDE_INT *, unsigned int); int cmpu_large (const HOST_WIDE_INT *, unsigned int, unsigned int, const HOST_WIDE_INT *, unsigned int); - unsigned int sext_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, + unsigned int sext_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, unsigned int, unsigned int); unsigned int zext_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, @@ -1973,7 +1973,7 @@ wi::bit_or (const T1 &x, const T2 &y) result.set_len (1, is_sign_extended); } else - result.set_len (or_large (val, xi.val, xi.len, + result.set_len (or_large (val, xi.val, xi.len, yi.val, yi.len, precision), is_sign_extended); return result; } @@ -2015,7 +2015,7 @@ wi::bit_xor (const T1 &x, const T2 &y) result.set_len (1, is_sign_extended); } else - result.set_len (xor_large (val, xi.val, xi.len, + result.set_len (xor_large (val, xi.val, xi.len, yi.val, yi.len, precision), is_sign_extended); return result; } @@ -2035,7 +2035,7 @@ wi::add (const T1 &x, const T2 &y) result.set_len (1); } else - result.set_len (add_large (val, xi.val, xi.len, + result.set_len (add_large (val, xi.val, xi.len, yi.val, yi.len, precision, UNSIGNED, 0)); return result; @@ -2068,7 +2068,7 @@ wi::add (const T1 &x, const T2 &y, signop sgn, bool *overflow) result.set_len (1); } else - result.set_len (add_large (val, xi.val, xi.len, + result.set_len (add_large (val, xi.val, xi.len, yi.val, yi.len, precision, sgn, overflow)); return result; @@ -2089,7 +2089,7 @@ wi::sub (const T1 &x, const T2 &y) result.set_len (1); } else - result.set_len (sub_large (val, xi.val, xi.len, + result.set_len (sub_large (val, xi.val, xi.len, yi.val, yi.len, precision, UNSIGNED, 0)); return result; @@ -2121,7 +2121,7 @@ wi::sub (const T1 &x, const T2 &y, signop sgn, bool *overflow) result.set_len (1); } else - result.set_len (sub_large (val, xi.val, xi.len, + result.set_len (sub_large (val, xi.val, xi.len, yi.val, yi.len, precision, sgn, overflow)); return result; @@ -2157,7 +2157,7 @@ wi::mul (const T1 &x, const T2 &y, signop sgn, bool *overflow) unsigned int precision = get_precision (result); WIDE_INT_REF_FOR (T1) xi (x, precision); WIDE_INT_REF_FOR (T2) yi (y, precision); - result.set_len (mul_internal (val, xi.val, xi.len, + result.set_len (mul_internal (val, xi.val, xi.len, yi.val, yi.len, precision, sgn, overflow, false, false)); return result; @@ -2191,7 +2191,7 @@ wi::mul_high (const T1 &x, const T2 &y, signop sgn) unsigned int precision = get_precision (result); WIDE_INT_REF_FOR (T1) xi (x, precision); WIDE_INT_REF_FOR (T2) yi (y, precision); - result.set_len (mul_internal (val, xi.val, xi.len, + result.set_len (mul_internal (val, xi.val, xi.len, yi.val, yi.len, precision, sgn, 0, true, false)); return result; @@ -2246,8 +2246,8 @@ wi::div_floor (const T1 &x, const T2 &y, signop sgn, bool *overflow) WIDE_INT_REF_FOR (T2) yi (y); unsigned int remainder_len; - quotient.set_len (divmod_internal (quotient_val, - &remainder_len, remainder_val, + quotient.set_len (divmod_internal (quotient_val, + &remainder_len, remainder_val, xi.val, xi.len, precision, yi.val, yi.len, yi.precision, sgn, overflow)); @@ -2288,7 +2288,7 @@ wi::div_ceil (const T1 &x, const T2 &y, signop sgn, bool *overflow) WIDE_INT_REF_FOR (T2) yi (y); unsigned int remainder_len; - quotient.set_len (divmod_internal (quotient_val, + quotient.set_len (divmod_internal (quotient_val, &remainder_len, remainder_val, xi.val, xi.len, precision, yi.val, yi.len, yi.precision, sgn, @@ -2313,8 +2313,8 @@ wi::div_round (const T1 &x, const T2 &y, signop sgn, bool *overflow) WIDE_INT_REF_FOR (T2) yi (y); unsigned int remainder_len; - quotient.set_len (divmod_internal (quotient_val, - &remainder_len, remainder_val, + quotient.set_len (divmod_internal (quotient_val, + &remainder_len, remainder_val, xi.val, xi.len, precision, yi.val, yi.len, yi.precision, sgn, overflow)); @@ -2356,8 +2356,8 @@ wi::divmod_trunc (const T1 &x, const T2 &y, signop sgn, WIDE_INT_REF_FOR (T2) yi (y); unsigned int remainder_len; - quotient.set_len (divmod_internal (quotient_val, - &remainder_len, remainder_val, + quotient.set_len (divmod_internal (quotient_val, + &remainder_len, remainder_val, xi.val, xi.len, precision, yi.val, yi.len, yi.precision, sgn, 0)); remainder.set_len (remainder_len); @@ -2379,7 +2379,7 @@ wi::mod_trunc (const T1 &x, const T2 &y, signop sgn, bool *overflow) WIDE_INT_REF_FOR (T2) yi (y); unsigned int remainder_len; - divmod_internal (0, &remainder_len, remainder_val, + divmod_internal (0, &remainder_len, remainder_val, xi.val, xi.len, precision, yi.val, yi.len, yi.precision, sgn, overflow); remainder.set_len (remainder_len); @@ -2419,8 +2419,8 @@ wi::mod_floor (const T1 &x, const T2 &y, signop sgn, bool *overflow) WIDE_INT_REF_FOR (T2) yi (y); unsigned int remainder_len; - quotient.set_len (divmod_internal (quotient_val, - &remainder_len, remainder_val, + quotient.set_len (divmod_internal (quotient_val, + &remainder_len, remainder_val, xi.val, xi.len, precision, yi.val, yi.len, yi.precision, sgn, overflow)); @@ -2455,8 +2455,8 @@ wi::mod_ceil (const T1 &x, const T2 &y, signop sgn, bool *overflow) WIDE_INT_REF_FOR (T2) yi (y); unsigned int remainder_len; - quotient.set_len (divmod_internal (quotient_val, - &remainder_len, remainder_val, + quotient.set_len (divmod_internal (quotient_val, + &remainder_len, remainder_val, xi.val, xi.len, precision, yi.val, yi.len, yi.precision, sgn, overflow)); @@ -2481,7 +2481,7 @@ wi::mod_round (const T1 &x, const T2 &y, signop sgn, bool *overflow) WIDE_INT_REF_FOR (T2) yi (y); unsigned int remainder_len; - quotient.set_len (divmod_internal (quotient_val, + quotient.set_len (divmod_internal (quotient_val, &remainder_len, remainder_val, xi.val, xi.len, precision, yi.val, yi.len, yi.precision, sgn, @@ -2518,7 +2518,7 @@ wi::multiple_of_p (const T1 &x, const T2 &y, signop sgn, WI_BINARY_RESULT (T1, T2) *res) { WI_BINARY_RESULT (T1, T2) remainder; - WI_BINARY_RESULT (T1, T2) quotient + WI_BINARY_RESULT (T1, T2) quotient = divmod_trunc (x, y, sgn, &remainder); if (remainder == 0) { @@ -2553,7 +2553,7 @@ wi::lshift (const T1 &x, const T2 &y) result.set_len (1); } else - result.set_len (lshift_large (val, xi.val, xi.len, + result.set_len (lshift_large (val, xi.val, xi.len, precision, shift)); } return result; @@ -2839,7 +2839,7 @@ wi::shifted_mask (unsigned int start, unsigned int width, bool negate_p) { STATIC_ASSERT (wi::int_traits::precision); T result; - result.set_len (shifted_mask (result.write_val (), start, width, + result.set_len (shifted_mask (result.write_val (), start, width, negate_p, wi::int_traits ::precision)); return result; -- 2.39.5