+2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * machmode.h (mode_size): Change from unsigned short to
+ poly_uint16_pod.
+ (mode_to_bytes): Return a poly_uint16 rather than an unsigned short.
+ (GET_MODE_SIZE): Return a constant if ONLY_FIXED_SIZE_MODES,
+ or if measurement_type is not polynomial.
+ (fixed_size_mode::includes_p): Check for constant-sized modes.
+ * genmodes.c (emit_mode_size_inline): Make mode_size_inline
+ return a poly_uint16 rather than an unsigned short.
+ (emit_mode_size): Change the type of mode_size from unsigned short
+ to poly_uint16_pod. Use ZERO_COEFFS for the initializer.
+ (emit_mode_adjustments): Cope with polynomial vector sizes.
+ * lto-streamer-in.c (lto_input_mode_table): Use bp_unpack_poly_value
+ for GET_MODE_SIZE.
+ * lto-streamer-out.c (lto_write_mode_table): Use bp_pack_poly_value
+ for GET_MODE_SIZE.
+ * auto-inc-dec.c (try_merge): Treat GET_MODE_SIZE as polynomial.
+ * builtins.c (expand_ifn_atomic_compare_exchange_into_call): Likewise.
+ * caller-save.c (setup_save_areas): Likewise.
+ (replace_reg_with_saved_mem): Likewise.
+ * calls.c (emit_library_call_value_1): Likewise.
+ * combine-stack-adj.c (combine_stack_adjustments_for_block): Likewise.
+ * combine.c (simplify_set, make_extraction, simplify_shift_const_1)
+ (gen_lowpart_for_combine): Likewise.
+ * convert.c (convert_to_integer_1): Likewise.
+ * cse.c (equiv_constant, cse_insn): Likewise.
+ * cselib.c (autoinc_split, cselib_hash_rtx): Likewise.
+ (cselib_subst_to_values): Likewise.
+ * dce.c (word_dce_process_block): Likewise.
+ * df-problems.c (df_word_lr_mark_ref): Likewise.
+ * dwarf2cfi.c (init_one_dwarf_reg_size): Likewise.
+ * dwarf2out.c (multiple_reg_loc_descriptor, mem_loc_descriptor)
+ (concat_loc_descriptor, concatn_loc_descriptor, loc_descriptor)
+ (rtl_for_decl_location): Likewise.
+ * emit-rtl.c (gen_highpart, widen_memory_access): Likewise.
+ * expmed.c (extract_bit_field_1, extract_integral_bit_field): Likewise.
+ * expr.c (emit_group_load_1, clear_storage_hints): Likewise.
+ (emit_move_complex, emit_move_multi_word, emit_push_insn): Likewise.
+ (expand_expr_real_1): Likewise.
+ * function.c (assign_parm_setup_block_p, assign_parm_setup_block)
+ (pad_below): Likewise.
+ * gimple-fold.c (optimize_atomic_compare_exchange_p): Likewise.
+ * gimple-ssa-store-merging.c (rhs_valid_for_store_merging_p): Likewise.
+ * ira.c (get_subreg_tracking_sizes): Likewise.
+ * ira-build.c (ira_create_allocno_objects): Likewise.
+ * ira-color.c (coalesced_pseudo_reg_slot_compare): Likewise.
+ (ira_sort_regnos_for_alter_reg): Likewise.
+ * ira-costs.c (record_operand_costs): Likewise.
+ * lower-subreg.c (interesting_mode_p, simplify_gen_subreg_concatn)
+ (resolve_simple_move): Likewise.
+ * lra-constraints.c (get_reload_reg, operands_match_p): Likewise.
+ (process_addr_reg, simplify_operand_subreg, curr_insn_transform)
+ (lra_constraints): Likewise.
+ (CONST_POOL_OK_P): Reject variable-sized modes.
+ * lra-spills.c (slot, assign_mem_slot, pseudo_reg_slot_compare)
+ (add_pseudo_to_slot, lra_spill): Likewise.
+ * omp-low.c (omp_clause_aligned_alignment): Likewise.
+ * optabs-query.c (get_best_extraction_insn): Likewise.
+ * optabs-tree.c (expand_vec_cond_expr_p): Likewise.
+ * optabs.c (expand_vec_perm_var, expand_vec_cond_expr): Likewise.
+ (expand_mult_highpart, valid_multiword_target_p): Likewise.
+ * recog.c (offsettable_address_addr_space_p): Likewise.
+ * regcprop.c (maybe_mode_change): Likewise.
+ * reginfo.c (choose_hard_reg_mode, record_subregs_of_mode): Likewise.
+ * regrename.c (build_def_use): Likewise.
+ * regstat.c (dump_reg_info): Likewise.
+ * reload.c (complex_word_subreg_p, push_reload, find_dummy_reload)
+ (find_reloads, find_reloads_subreg_address): Likewise.
+ * reload1.c (eliminate_regs_1): Likewise.
+ * rtlanal.c (for_each_inc_dec_find_inc_dec, rtx_cost): Likewise.
+ * simplify-rtx.c (avoid_constant_pool_reference): Likewise.
+ (simplify_binary_operation_1, simplify_subreg): Likewise.
+ * targhooks.c (default_function_arg_padding): Likewise.
+ (default_hard_regno_nregs, default_class_max_nregs): Likewise.
+ * tree-cfg.c (verify_gimple_assign_binary): Likewise.
+ (verify_gimple_assign_ternary): Likewise.
+ * tree-inline.c (estimate_move_cost): Likewise.
+ * tree-ssa-forwprop.c (simplify_vector_constructor): Likewise.
+ * tree-ssa-loop-ivopts.c (add_autoinc_candidates): Likewise.
+ (get_address_cost_ainc): Likewise.
+ * tree-vect-data-refs.c (vect_enhance_data_refs_alignment): Likewise.
+ (vect_supportable_dr_alignment): Likewise.
+ * tree-vect-loop.c (vect_determine_vectorization_factor): Likewise.
+ (vectorizable_reduction): Likewise.
+ * tree-vect-stmts.c (vectorizable_assignment, vectorizable_shift)
+ (vectorizable_operation, vectorizable_load): Likewise.
+ * tree.c (build_same_sized_truth_vector_type): Likewise.
+ * valtrack.c (cleanup_auto_inc_dec): Likewise.
+ * var-tracking.c (emit_note_insn_var_location): Likewise.
+ * config/arc/arc.h (ASM_OUTPUT_CASE_END): Use as_a <scalar_int_mode>.
+ (ADDR_VEC_ALIGN): Likewise.
+
2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
inc_insn.reg_res : mem_insn.reg0;
/* The width of the mem being accessed. */
- int size = GET_MODE_SIZE (GET_MODE (mem));
+ poly_int64 size = GET_MODE_SIZE (GET_MODE (mem));
rtx_insn *last_insn = NULL;
machine_mode reg_mode = GET_MODE (inc_reg);
for (z = 4; z < 6; z++)
vec->quick_push (gimple_call_arg (call, z));
/* At present we only have BUILT_IN_ATOMIC_COMPARE_EXCHANGE_{1,2,4,8,16}. */
- unsigned int bytes_log2 = exact_log2 (GET_MODE_SIZE (mode));
+ unsigned int bytes_log2 = exact_log2 (GET_MODE_SIZE (mode).to_constant ());
gcc_assert (bytes_log2 < 5);
built_in_function fncode
= (built_in_function) ((int) BUILT_IN_ATOMIC_COMPARE_EXCHANGE_1
break;
}
if (k < 0
- && (GET_MODE_SIZE (regno_save_mode[regno][1])
- <= GET_MODE_SIZE (regno_save_mode
- [saved_reg2->hard_regno][1])))
+ && known_le (GET_MODE_SIZE (regno_save_mode[regno][1]),
+ GET_MODE_SIZE (regno_save_mode
+ [saved_reg2->hard_regno][1])))
{
saved_reg->slot
= adjust_address_nv
slot = prev_save_slots[j];
if (slot == NULL_RTX)
continue;
- if (GET_MODE_SIZE (regno_save_mode[regno][1])
- <= GET_MODE_SIZE (GET_MODE (slot))
+ if (known_le (GET_MODE_SIZE (regno_save_mode[regno][1]),
+ GET_MODE_SIZE (GET_MODE (slot)))
&& best_slot_num < 0)
best_slot_num = j;
if (GET_MODE (slot) == regno_save_mode[regno][1])
machine_mode smode = save_mode[regno];
gcc_assert (smode != VOIDmode);
if (hard_regno_nregs (regno, smode) > 1)
- smode = mode_for_size (GET_MODE_SIZE (mode) / nregs,
+ smode = mode_for_size (exact_div (GET_MODE_SIZE (mode), nregs),
GET_MODE_CLASS (mode), 0).require ();
XVECEXP (mem, 0, i) = gen_rtx_REG (smode, regno + i);
}
rtx mem_value = 0;
rtx valreg;
int pcc_struct_value = 0;
- int struct_value_size = 0;
+ poly_int64 struct_value_size = 0;
int flags;
int reg_parm_stack_space = 0;
poly_int64 needed;
end it should be padded. */
argvec[count].locate.where_pad =
BLOCK_REG_PADDING (mode, NULL_TREE,
- GET_MODE_SIZE (mode) <= UNITS_PER_WORD);
+ known_le (GET_MODE_SIZE (mode), UNITS_PER_WORD));
#endif
targetm.calls.function_arg_advance (args_so_far, mode, (tree) 0, true);
rtx val = argvec[argnum].value;
rtx reg = argvec[argnum].reg;
int partial = argvec[argnum].partial;
-#ifdef BLOCK_REG_PADDING
- int size = 0;
-#endif
/* Handle calls that pass values in multiple non-contiguous
locations. The PA64 has examples of this for library calls. */
{
emit_move_insn (reg, val);
#ifdef BLOCK_REG_PADDING
- size = GET_MODE_SIZE (argvec[argnum].mode);
+ poly_int64 size = GET_MODE_SIZE (argvec[argnum].mode);
/* Copied from load_register_parameters. */
/* Handle case where we have a value that needs shifting
up to the msb. eg. a QImode value and we're padding
upward on a BYTES_BIG_ENDIAN machine. */
- if (size < UNITS_PER_WORD
+ if (known_lt (size, UNITS_PER_WORD)
&& (argvec[argnum].locate.where_pad
== (BYTES_BIG_ENDIAN ? PAD_UPWARD : PAD_DOWNWARD)))
{
rtx x;
- int shift = (UNITS_PER_WORD - size) * BITS_PER_UNIT;
+ poly_int64 shift = (UNITS_PER_WORD - size) * BITS_PER_UNIT;
/* Assigning REG here rather than a temp makes CALL_FUSAGE
report the whole reg as used. Strictly speaking, the
if (MEM_P (dest)
&& ((STACK_GROWS_DOWNWARD
? (GET_CODE (XEXP (dest, 0)) == PRE_DEC
- && last_sp_adjust
- == (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (dest)))
+ && known_eq (last_sp_adjust,
+ GET_MODE_SIZE (GET_MODE (dest))))
: (GET_CODE (XEXP (dest, 0)) == PRE_INC
- && last_sp_adjust
- == -(HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (dest))))
+ && known_eq (-last_sp_adjust,
+ GET_MODE_SIZE (GET_MODE (dest)))))
|| ((STACK_GROWS_DOWNWARD
? last_sp_adjust >= 0 : last_sp_adjust <= 0)
&& GET_CODE (XEXP (dest, 0)) == PRE_MODIFY
if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
&& !OBJECT_P (SUBREG_REG (src))
- && (((GET_MODE_SIZE (GET_MODE (src)) + (UNITS_PER_WORD - 1))
- / UNITS_PER_WORD)
- == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))
- + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))
+ && (known_equal_after_align_up
+ (GET_MODE_SIZE (GET_MODE (src)),
+ GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))),
+ UNITS_PER_WORD))
&& (WORD_REGISTER_OPERATIONS || !paradoxical_subreg_p (src))
&& ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER
&& !REG_CAN_CHANGE_MODE_P (REGNO (dest),
&& ! mode_dependent_address_p (XEXP (inner, 0), MEM_ADDR_SPACE (inner))
&& ! MEM_VOLATILE_P (inner))
{
- int offset = 0;
+ poly_int64 offset = 0;
/* The computations below will be correct if the machine is big
endian in both bits and bytes or little endian in bits and bytes.
machine_mode mode = result_mode;
machine_mode shift_mode;
scalar_int_mode tmode, inner_mode, int_mode, int_varop_mode, int_result_mode;
- unsigned int mode_words
- = (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
/* We form (outer_op (code varop count) (outer_const)). */
enum rtx_code outer_op = UNKNOWN;
HOST_WIDE_INT outer_const = 0;
if (subreg_lowpart_p (varop)
&& is_int_mode (GET_MODE (SUBREG_REG (varop)), &inner_mode)
&& GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_varop_mode)
- && (unsigned int) ((GET_MODE_SIZE (inner_mode)
- + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
- == mode_words
+ && (CEIL (GET_MODE_SIZE (inner_mode), UNITS_PER_WORD)
+ == CEIL (GET_MODE_SIZE (int_mode), UNITS_PER_WORD))
&& GET_MODE_CLASS (int_varop_mode) == MODE_INT)
{
varop = SUBREG_REG (varop);
gen_lowpart_for_combine (machine_mode omode, rtx x)
{
machine_mode imode = GET_MODE (x);
- unsigned int osize = GET_MODE_SIZE (omode);
- unsigned int isize = GET_MODE_SIZE (imode);
rtx result;
if (omode == imode)
/* We can only support MODE being wider than a word if X is a
constant integer or has a mode the same size. */
- if (GET_MODE_SIZE (omode) > UNITS_PER_WORD
- && ! (CONST_SCALAR_INT_P (x) || isize == osize))
+ if (maybe_gt (GET_MODE_SIZE (omode), UNITS_PER_WORD)
+ && ! (CONST_SCALAR_INT_P (x)
+ || known_eq (GET_MODE_SIZE (imode), GET_MODE_SIZE (omode))))
goto fail;
/* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
if (imode == omode)
return x;
-
- isize = GET_MODE_SIZE (imode);
}
result = gen_lowpart_common (omode, x);
do \
{ \
if (GET_CODE (PATTERN (JUMPTABLE)) == ADDR_DIFF_VEC \
- && ((GET_MODE_SIZE (GET_MODE (PATTERN (JUMPTABLE))) \
+ && ((GET_MODE_SIZE (as_a <scalar_int_mode> \
+ (GET_MODE (PATTERN (JUMPTABLE)))) \
* XVECLEN (PATTERN (JUMPTABLE), 1) + 1) \
& 2)) \
arc_toggle_unalign (); \
: SImode)
#define ADDR_VEC_ALIGN(VEC_INSN) \
- (exact_log2 (GET_MODE_SIZE (GET_MODE (PATTERN (VEC_INSN)))))
+ (exact_log2 (GET_MODE_SIZE (as_a <scalar_int_mode> \
+ (GET_MODE (PATTERN (VEC_INSN))))))
#undef ASM_OUTPUT_BEFORE_CASE_LABEL
#define ASM_OUTPUT_BEFORE_CASE_LABEL(FILE, PREFIX, NUM, TABLE) \
ASM_OUTPUT_ALIGN ((FILE), ADDR_VEC_ALIGN (TABLE))
}
CASE_CONVERT:
- /* Don't introduce a "can't convert between vector values of
- different size" error. */
- if (TREE_CODE (TREE_TYPE (TREE_OPERAND (expr, 0))) == VECTOR_TYPE
- && (GET_MODE_SIZE (TYPE_MODE
- (TREE_TYPE (TREE_OPERAND (expr, 0))))
- != GET_MODE_SIZE (TYPE_MODE (type))))
- break;
+ {
+ tree argtype = TREE_TYPE (TREE_OPERAND (expr, 0));
+ /* Don't introduce a "can't convert between vector values
+ of different size" error. */
+ if (TREE_CODE (argtype) == VECTOR_TYPE
+ && maybe_ne (GET_MODE_SIZE (TYPE_MODE (argtype)),
+ GET_MODE_SIZE (TYPE_MODE (type))))
+ break;
+ }
/* If truncating after truncating, might as well do all at once.
If truncating after extending, we may get rid of wasted work. */
return convert (type, get_unwidened (TREE_OPERAND (expr, 0), type));
/* If we didn't and if doing so makes sense, see if we previously
assigned a constant value to the enclosing word mode SUBREG. */
- if (GET_MODE_SIZE (mode) < GET_MODE_SIZE (word_mode)
- && GET_MODE_SIZE (word_mode) < GET_MODE_SIZE (imode))
+ if (known_lt (GET_MODE_SIZE (mode), UNITS_PER_WORD)
+ && known_lt (UNITS_PER_WORD, GET_MODE_SIZE (imode)))
{
poly_int64 byte = (SUBREG_BYTE (x)
- subreg_lowpart_offset (mode, word_mode));
already entered SRC and DEST of the SET in the table. */
if (GET_CODE (dest) == SUBREG
- && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))) - 1)
- / UNITS_PER_WORD)
- == (GET_MODE_SIZE (GET_MODE (dest)) - 1) / UNITS_PER_WORD)
+ && (known_equal_after_align_down
+ (GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))) - 1,
+ GET_MODE_SIZE (GET_MODE (dest)) - 1,
+ UNITS_PER_WORD))
&& !partial_subreg_p (dest)
&& sets[i].src_elt != 0)
{
if (memmode == VOIDmode)
return x;
- *off = GEN_INT (-GET_MODE_SIZE (memmode));
+ *off = gen_int_mode (-GET_MODE_SIZE (memmode), GET_MODE (x));
return XEXP (x, 0);
case PRE_INC:
if (memmode == VOIDmode)
return x;
- *off = GEN_INT (GET_MODE_SIZE (memmode));
+ *off = gen_int_mode (GET_MODE_SIZE (memmode), GET_MODE (x));
return XEXP (x, 0);
case PRE_MODIFY:
cselib_hash_rtx (rtx x, int create, machine_mode memmode)
{
cselib_val *e;
+ poly_int64 offset;
int i, j;
enum rtx_code code;
const char *fmt;
case PRE_INC:
/* We can't compute these without knowing the MEM mode. */
gcc_assert (memmode != VOIDmode);
- i = GET_MODE_SIZE (memmode);
+ offset = GET_MODE_SIZE (memmode);
if (code == PRE_DEC)
- i = -i;
+ offset = -offset;
/* Adjust the hash so that (mem:MEMMODE (pre_* (reg))) hashes
like (mem:MEMMODE (plus (reg) (const_int I))). */
hash += (unsigned) PLUS - (unsigned)code
+ cselib_hash_rtx (XEXP (x, 0), create, memmode)
- + cselib_hash_rtx (GEN_INT (i), create, memmode);
+ + cselib_hash_rtx (gen_int_mode (offset, GET_MODE (x)),
+ create, memmode);
return hash ? hash : 1 + (unsigned) PLUS;
case PRE_MODIFY:
struct elt_list *l;
rtx copy = x;
int i;
+ poly_int64 offset;
switch (code)
{
case PRE_DEC:
case PRE_INC:
gcc_assert (memmode != VOIDmode);
- i = GET_MODE_SIZE (memmode);
+ offset = GET_MODE_SIZE (memmode);
if (code == PRE_DEC)
- i = -i;
+ offset = -offset;
return cselib_subst_to_values (plus_constant (GET_MODE (x),
- XEXP (x, 0), i),
+ XEXP (x, 0), offset),
memmode);
case PRE_MODIFY:
df_ref use;
FOR_EACH_INSN_USE (use, insn)
if (DF_REF_REGNO (use) >= FIRST_PSEUDO_REGISTER
- && (GET_MODE_SIZE (GET_MODE (DF_REF_REAL_REG (use)))
- == 2 * UNITS_PER_WORD)
+ && known_eq (GET_MODE_SIZE (GET_MODE (DF_REF_REAL_REG (use))),
+ 2 * UNITS_PER_WORD)
&& !bitmap_bit_p (local_live, 2 * DF_REF_REGNO (use))
&& !bitmap_bit_p (local_live, 2 * DF_REF_REGNO (use) + 1))
dead_debug_add (&debug, use, DF_REF_REGNO (use));
regno = REGNO (reg);
reg_mode = GET_MODE (reg);
if (regno < FIRST_PSEUDO_REGISTER
- || GET_MODE_SIZE (reg_mode) != 2 * UNITS_PER_WORD)
+ || maybe_ne (GET_MODE_SIZE (reg_mode), 2 * UNITS_PER_WORD))
return true;
if (GET_CODE (orig_reg) == SUBREG
const unsigned int rnum = DWARF2_FRAME_REG_OUT (dnum, 1);
const unsigned int dcol = DWARF_REG_TO_UNWIND_COLUMN (rnum);
- const HOST_WIDE_INT slotoffset = dcol * GET_MODE_SIZE (slotmode);
- const HOST_WIDE_INT regsize = GET_MODE_SIZE (regmode);
+ poly_int64 slotoffset = dcol * GET_MODE_SIZE (slotmode);
+ poly_int64 regsize = GET_MODE_SIZE (regmode);
init_state->processed_regno[regno] = true;
init_state->wrote_return_column = true;
}
- if (slotoffset < 0)
+ /* ??? When is this true? Should it be a test based on DCOL instead? */
+ if (maybe_lt (slotoffset, 0))
return;
emit_move_insn (adjust_address (table, slotmode, slotoffset),
gcc_assert ((unsigned) DBX_REGISTER_NUMBER (reg) == dbx_reg_number (rtl));
nregs = REG_NREGS (rtl);
- size = GET_MODE_SIZE (GET_MODE (rtl)) / nregs;
+ /* At present we only track constant-sized pieces. */
+ if (!GET_MODE_SIZE (GET_MODE (rtl)).is_constant (&size))
+ return NULL;
+ size /= nregs;
loc_result = NULL;
while (nregs--)
gcc_assert (GET_CODE (regs) == PARALLEL);
- size = GET_MODE_SIZE (GET_MODE (XVECEXP (regs, 0, 0)));
+ /* At present we only track constant-sized pieces. */
+ if (!GET_MODE_SIZE (GET_MODE (XVECEXP (regs, 0, 0))).is_constant (&size))
+ return NULL;
loc_result = NULL;
for (i = 0; i < XVECLEN (regs, 0); ++i)
if (is_a <scalar_int_mode> (mode, &int_mode)
&& is_a <scalar_int_mode> (GET_MODE (inner), &inner_mode)
? GET_MODE_SIZE (int_mode) <= GET_MODE_SIZE (inner_mode)
- : GET_MODE_SIZE (mode) == GET_MODE_SIZE (GET_MODE (inner)))
+ : known_eq (GET_MODE_SIZE (mode), GET_MODE_SIZE (GET_MODE (inner))))
{
dw_die_ref type_die;
dw_loc_descr_ref cvt;
mem_loc_result = NULL;
break;
}
- if (GET_MODE_SIZE (mode)
- != GET_MODE_SIZE (GET_MODE (inner)))
+ if (maybe_ne (GET_MODE_SIZE (mode), GET_MODE_SIZE (GET_MODE (inner))))
cvt = new_loc_descr (dwarf_OP (DW_OP_convert), 0, 0);
else
cvt = new_loc_descr (dwarf_OP (DW_OP_reinterpret), 0, 0);
{
dw_die_ref type_die;
dw_loc_descr_ref deref;
+ HOST_WIDE_INT size;
if (dwarf_strict && dwarf_version < 5)
return NULL;
+ if (!GET_MODE_SIZE (mode).is_constant (&size))
+ return NULL;
type_die
= base_type_for_mode (mode, SCALAR_INT_MODE_P (mode));
if (type_die == NULL)
return NULL;
- deref = new_loc_descr (dwarf_OP (DW_OP_deref_type),
- GET_MODE_SIZE (mode), 0);
+ deref = new_loc_descr (dwarf_OP (DW_OP_deref_type), size, 0);
deref->dw_loc_oprnd2.val_class = dw_val_class_die_ref;
deref->dw_loc_oprnd2.v.val_die_ref.die = type_die;
deref->dw_loc_oprnd2.v.val_die_ref.external = 0;
static dw_loc_descr_ref
concat_loc_descriptor (rtx x0, rtx x1, enum var_init_status initialized)
{
+ /* At present we only track constant-sized pieces. */
+ unsigned int size0, size1;
+ if (!GET_MODE_SIZE (GET_MODE (x0)).is_constant (&size0)
+ || !GET_MODE_SIZE (GET_MODE (x1)).is_constant (&size1))
+ return 0;
+
dw_loc_descr_ref cc_loc_result = NULL;
dw_loc_descr_ref x0_ref
= loc_descriptor (x0, VOIDmode, VAR_INIT_STATUS_INITIALIZED);
return 0;
cc_loc_result = x0_ref;
- add_loc_descr_op_piece (&cc_loc_result, GET_MODE_SIZE (GET_MODE (x0)));
+ add_loc_descr_op_piece (&cc_loc_result, size0);
add_loc_descr (&cc_loc_result, x1_ref);
- add_loc_descr_op_piece (&cc_loc_result, GET_MODE_SIZE (GET_MODE (x1)));
+ add_loc_descr_op_piece (&cc_loc_result, size1);
if (initialized == VAR_INIT_STATUS_UNINITIALIZED)
add_loc_descr (&cc_loc_result, new_loc_descr (DW_OP_GNU_uninit, 0, 0));
unsigned int i;
dw_loc_descr_ref cc_loc_result = NULL;
unsigned int n = XVECLEN (concatn, 0);
+ unsigned int size;
for (i = 0; i < n; ++i)
{
dw_loc_descr_ref ref;
rtx x = XVECEXP (concatn, 0, i);
+ /* At present we only track constant-sized pieces. */
+ if (!GET_MODE_SIZE (GET_MODE (x)).is_constant (&size))
+ return NULL;
+
ref = loc_descriptor (x, VOIDmode, VAR_INIT_STATUS_INITIALIZED);
if (ref == NULL)
return NULL;
add_loc_descr (&cc_loc_result, ref);
- add_loc_descr_op_piece (&cc_loc_result, GET_MODE_SIZE (GET_MODE (x)));
+ add_loc_descr_op_piece (&cc_loc_result, size);
}
if (cc_loc_result && initialized == VAR_INIT_STATUS_UNINITIALIZED)
rtvec par_elems = XVEC (rtl, 0);
int num_elem = GET_NUM_ELEM (par_elems);
machine_mode mode;
- int i;
+ int i, size;
/* Create the first one, so we have something to add to. */
loc_result = loc_descriptor (XEXP (RTVEC_ELT (par_elems, 0), 0),
if (loc_result == NULL)
return NULL;
mode = GET_MODE (XEXP (RTVEC_ELT (par_elems, 0), 0));
- add_loc_descr_op_piece (&loc_result, GET_MODE_SIZE (mode));
+ /* At present we only track constant-sized pieces. */
+ if (!GET_MODE_SIZE (mode).is_constant (&size))
+ return NULL;
+ add_loc_descr_op_piece (&loc_result, size);
for (i = 1; i < num_elem; i++)
{
dw_loc_descr_ref temp;
return NULL;
add_loc_descr (&loc_result, temp);
mode = GET_MODE (XEXP (RTVEC_ELT (par_elems, i), 0));
- add_loc_descr_op_piece (&loc_result, GET_MODE_SIZE (mode));
+ /* At present we only track constant-sized pieces. */
+ if (!GET_MODE_SIZE (mode).is_constant (&size))
+ return NULL;
+ add_loc_descr_op_piece (&loc_result, size);
}
}
break;
rtl = DECL_INCOMING_RTL (decl);
else if ((rtl == NULL_RTX || is_pseudo_reg (rtl))
&& SCALAR_INT_MODE_P (dmode)
- && GET_MODE_SIZE (dmode) <= GET_MODE_SIZE (pmode)
+ && known_le (GET_MODE_SIZE (dmode), GET_MODE_SIZE (pmode))
&& DECL_INCOMING_RTL (decl))
{
rtx inc = DECL_INCOMING_RTL (decl);
/* Big endian correction check. */
&& BYTES_BIG_ENDIAN
&& TYPE_MODE (TREE_TYPE (decl)) != GET_MODE (rtl)
- && (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (decl)))
- < UNITS_PER_WORD))
+ && known_lt (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (decl))),
+ UNITS_PER_WORD))
{
machine_mode addr_mode = get_address_mode (rtl);
- int offset = (UNITS_PER_WORD
- - GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (decl))));
+ poly_int64 offset = (UNITS_PER_WORD
+ - GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (decl))));
rtl = gen_rtx_MEM (TYPE_MODE (TREE_TYPE (decl)),
plus_constant (addr_mode, XEXP (rtl, 0), offset));
rtx
gen_highpart (machine_mode mode, rtx x)
{
- unsigned int msize = GET_MODE_SIZE (mode);
+ poly_uint64 msize = GET_MODE_SIZE (mode);
rtx result;
/* This case loses if X is a subreg. To catch bugs early,
complain if an invalid MODE is used even in other cases. */
- gcc_assert (msize <= UNITS_PER_WORD
- || msize == (unsigned int) GET_MODE_UNIT_SIZE (GET_MODE (x)));
+ gcc_assert (known_le (msize, (unsigned int) UNITS_PER_WORD)
+ || known_eq (msize, GET_MODE_UNIT_SIZE (GET_MODE (x))));
result = simplify_gen_subreg (mode, x, GET_MODE (x),
subreg_highpart_offset (mode, GET_MODE (x)));
widen_memory_access (rtx memref, machine_mode mode, poly_int64 offset)
{
rtx new_rtx = adjust_address_1 (memref, mode, offset, 1, 1, 0, 0);
- unsigned int size = GET_MODE_SIZE (mode);
+ poly_uint64 size = GET_MODE_SIZE (mode);
/* If there are no changes, just return the original memory reference. */
if (new_rtx == memref)
&& !MEM_P (op0)
&& VECTOR_MODE_P (tmode)
&& known_eq (bitsize, GET_MODE_SIZE (tmode))
- && GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (tmode))
+ && maybe_gt (GET_MODE_SIZE (GET_MODE (op0)), GET_MODE_SIZE (tmode)))
{
machine_mode new_mode = GET_MODE (op0);
if (GET_MODE_INNER (new_mode) != GET_MODE_INNER (tmode))
GET_MODE_UNIT_BITSIZE (tmode), &nunits)
|| !mode_for_vector (inner_mode, nunits).exists (&new_mode)
|| !VECTOR_MODE_P (new_mode)
- || GET_MODE_SIZE (new_mode) != GET_MODE_SIZE (GET_MODE (op0))
+ || maybe_ne (GET_MODE_SIZE (new_mode),
+ GET_MODE_SIZE (GET_MODE (op0)))
|| GET_MODE_INNER (new_mode) != GET_MODE_INNER (tmode)
|| !targetm.vector_mode_supported_p (new_mode))
new_mode = VOIDmode;
new_mode = MIN_MODE_VECTOR_INT;
FOR_EACH_MODE_FROM (new_mode, new_mode)
- if (GET_MODE_SIZE (new_mode) == GET_MODE_SIZE (GET_MODE (op0))
- && GET_MODE_UNIT_SIZE (new_mode) == GET_MODE_SIZE (tmode)
+ if (known_eq (GET_MODE_SIZE (new_mode), GET_MODE_SIZE (GET_MODE (op0)))
+ && known_eq (GET_MODE_UNIT_SIZE (new_mode), GET_MODE_SIZE (tmode))
&& targetm.vector_mode_supported_p (new_mode))
break;
if (new_mode != VOIDmode)
}
else
{
- HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (op0));
+ poly_int64 size = GET_MODE_SIZE (GET_MODE (op0));
rtx mem = assign_stack_temp (GET_MODE (op0), size);
emit_move_insn (mem, op0);
op0 = adjust_bitfield_address_size (mem, BLKmode, 0, size);
/* The mode must be fixed-size, since extract_bit_field_1 handles
extractions from variable-sized objects before calling this
function. */
- unsigned int target_size = GET_MODE_SIZE (GET_MODE (target));
+ unsigned int target_size
+ = GET_MODE_SIZE (GET_MODE (target)).to_constant ();
last = get_last_insn ();
for (i = 0; i < nwords; i++)
{
else if (VECTOR_MODE_P (GET_MODE (dst))
&& REG_P (src))
{
- int slen = GET_MODE_SIZE (GET_MODE (src));
+ poly_uint64 slen = GET_MODE_SIZE (GET_MODE (src));
rtx mem;
mem = assign_stack_temp (GET_MODE (src), slen);
just move a zero. Otherwise, do this a piece at a time. */
if (mode != BLKmode
&& CONST_INT_P (size)
- && INTVAL (size) == (HOST_WIDE_INT) GET_MODE_SIZE (mode))
+ && known_eq (INTVAL (size), GET_MODE_SIZE (mode)))
{
rtx zero = CONST0_RTX (mode);
if (zero != NULL)
existing block move logic. */
if (MEM_P (x) && MEM_P (y))
{
- emit_block_move (x, y, GEN_INT (GET_MODE_SIZE (mode)),
+ emit_block_move (x, y, gen_int_mode (GET_MODE_SIZE (mode), Pmode),
BLOCK_OP_NO_LIBCALL);
return get_last_insn ();
}
rtx_insn *seq;
rtx inner;
bool need_clobber;
- int i;
+ int i, mode_size;
- gcc_assert (GET_MODE_SIZE (mode) >= UNITS_PER_WORD);
+ /* This function can only handle cases where the number of words is
+ known at compile time. */
+ mode_size = GET_MODE_SIZE (mode).to_constant ();
+ gcc_assert (mode_size >= UNITS_PER_WORD);
/* If X is a push on the stack, do the push now and replace
X with a reference to the stack pointer. */
start_sequence ();
need_clobber = false;
- for (i = 0;
- i < (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
- i++)
+ for (i = 0; i < CEIL (mode_size, UNITS_PER_WORD); i++)
{
rtx xpart = operand_subword (x, i, 1, mode);
rtx ypart;
/* A value is to be stored in an insufficiently aligned
stack slot; copy via a suitably aligned slot if
necessary. */
- size = GEN_INT (GET_MODE_SIZE (mode));
+ size = gen_int_mode (GET_MODE_SIZE (mode), Pmode);
if (!MEM_P (xinner))
{
temp = assign_temp (type, 1, 1);
}
else if (partial > 0)
{
- /* Scalar partly in registers. */
-
- int size = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
+ /* Scalar partly in registers. This case is only supported
+ for fixed-wdth modes. */
+ int size = GET_MODE_SIZE (mode).to_constant ();
+ size /= UNITS_PER_WORD;
int i;
int not_stack;
/* # bytes of start of argument
gcc_assert (!TREE_ADDRESSABLE (exp));
if (GET_MODE (op0) == BLKmode)
- emit_block_move (new_with_op0_mode, op0,
- GEN_INT (GET_MODE_SIZE (mode)),
- (modifier == EXPAND_STACK_PARM
- ? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL));
+ {
+ rtx size_rtx = gen_int_mode (mode_size, Pmode);
+ emit_block_move (new_with_op0_mode, op0, size_rtx,
+ (modifier == EXPAND_STACK_PARM
+ ? BLOCK_OP_CALL_PARM
+ : BLOCK_OP_NORMAL));
+ }
else
emit_move_insn (new_with_op0_mode, op0);
/* Only assign_parm_setup_block knows how to deal with register arguments
that are padded at the least significant end. */
if (REG_P (data->entry_parm)
- && GET_MODE_SIZE (data->promoted_mode) < UNITS_PER_WORD
+ && known_lt (GET_MODE_SIZE (data->promoted_mode), UNITS_PER_WORD)
&& (BLOCK_REG_PADDING (data->passed_mode, data->passed_type, 1)
== (BYTES_BIG_ENDIAN ? PAD_UPWARD : PAD_DOWNWARD)))
return true;
SET_DECL_ALIGN (parm, MAX (DECL_ALIGN (parm), BITS_PER_WORD));
stack_parm = assign_stack_local (BLKmode, size_stored,
DECL_ALIGN (parm));
- if (GET_MODE_SIZE (GET_MODE (entry_parm)) == size)
+ if (known_eq (GET_MODE_SIZE (GET_MODE (entry_parm)), size))
PUT_MODE (stack_parm, GET_MODE (entry_parm));
set_mem_attributes (stack_parm, parm, 1);
}
pad_below (struct args_size *offset_ptr, machine_mode passed_mode, tree sizetree)
{
unsigned int align = PARM_BOUNDARY / BITS_PER_UNIT;
- if (passed_mode != BLKmode)
- offset_ptr->constant += -GET_MODE_SIZE (passed_mode) & (align - 1);
+ int misalign;
+ if (passed_mode != BLKmode
+ && known_misalignment (GET_MODE_SIZE (passed_mode), align, &misalign))
+ offset_ptr->constant += -misalign & (align - 1);
else
{
if (TREE_CODE (sizetree) != INTEGER_CST
#else\n\
extern __inline__ __attribute__((__always_inline__, __gnu_inline__))\n\
#endif\n\
-unsigned short\n\
+poly_uint16\n\
mode_size_inline (machine_mode mode)\n\
{\n\
- extern %sunsigned short mode_size[NUM_MACHINE_MODES];\n\
+ extern %spoly_uint16_pod mode_size[NUM_MACHINE_MODES];\n\
gcc_assert (mode >= 0 && mode < NUM_MACHINE_MODES);\n\
switch (mode)\n\
{\n", adj_bytesize ? "" : "const ");
int c;
struct mode_data *m;
- print_maybe_const_decl ("%sunsigned short", "mode_size",
+ print_maybe_const_decl ("%spoly_uint16_pod", "mode_size",
"NUM_MACHINE_MODES", bytesize);
for_all_modes (c, m)
- tagged_printf ("%u", m->bytesize, m->name);
+ tagged_printf ("{ %u" ZERO_COEFFS " }", m->bytesize, m->name);
print_closer ();
}
\nvoid\
\ninit_adjust_machine_modes (void)\
\n{\
-\n size_t s ATTRIBUTE_UNUSED;");
+\n poly_uint16 ps ATTRIBUTE_UNUSED;\n\
+ size_t s ATTRIBUTE_UNUSED;");
/* Size adjustments must be propagated to all containing modes.
A size adjustment forces us to recalculate the alignment too. */
for (a = adj_bytesize; a; a = a->next)
{
- printf ("\n /* %s:%d */\n s = %s;\n",
- a->file, a->line, a->adjustment);
- printf (" mode_size[E_%smode] = s;\n", a->mode->name);
- printf (" mode_unit_size[E_%smode] = s;\n", a->mode->name);
- printf (" mode_base_align[E_%smode] = s & (~s + 1);\n",
+ printf ("\n /* %s:%d */\n", a->file, a->line);
+ switch (a->mode->cl)
+ {
+ case MODE_VECTOR_INT:
+ case MODE_VECTOR_FLOAT:
+ case MODE_VECTOR_FRACT:
+ case MODE_VECTOR_UFRACT:
+ case MODE_VECTOR_ACCUM:
+ case MODE_VECTOR_UACCUM:
+ printf (" ps = %s;\n", a->adjustment);
+ printf (" s = mode_unit_size[E_%smode];\n", a->mode->name);
+ break;
+
+ default:
+ printf (" ps = s = %s;\n", a->adjustment);
+ printf (" mode_unit_size[E_%smode] = s;\n", a->mode->name);
+ break;
+ }
+ printf (" mode_size[E_%smode] = ps;\n", a->mode->name);
+ printf (" mode_base_align[E_%smode] = known_alignment (ps);\n",
a->mode->name);
for (m = a->mode->contained; m; m = m->next_cont)
case MODE_VECTOR_UFRACT:
case MODE_VECTOR_ACCUM:
case MODE_VECTOR_UACCUM:
- printf (" mode_size[E_%smode] = %d*s;\n",
+ printf (" mode_size[E_%smode] = %d * ps;\n",
m->name, m->ncomponents);
printf (" mode_unit_size[E_%smode] = s;\n", m->name);
- printf (" mode_base_align[E_%smode] = (%d*s) & (~(%d*s)+1);\n",
- m->name, m->ncomponents, m->ncomponents);
+ printf (" mode_base_align[E_%smode]"
+ " = known_alignment (%d * ps);\n",
+ m->name, m->ncomponents);
break;
default:
&& optab_handler (sync_compare_and_swap_optab, mode) == CODE_FOR_nothing)
return false;
- if (int_size_in_bytes (etype) != GET_MODE_SIZE (mode))
+ if (maybe_ne (int_size_in_bytes (etype), GET_MODE_SIZE (mode)))
return false;
return true;
static bool
rhs_valid_for_store_merging_p (tree rhs)
{
- return native_encode_expr (rhs, NULL,
- GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (rhs)))) != 0;
+ unsigned HOST_WIDE_INT size;
+ return (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (rhs))).is_constant (&size)
+ && native_encode_expr (rhs, NULL, size) != 0);
}
/* If MEM is a memory reference usable for store merging (either as
int n = ira_reg_class_max_nregs[aclass][mode];
int i;
- if (GET_MODE_SIZE (mode) != 2 * UNITS_PER_WORD || n != 2)
+ if (n != 2 || maybe_ne (GET_MODE_SIZE (mode), n * UNITS_PER_WORD))
n = 1;
ALLOCNO_NUM_OBJECTS (a) = n;
regno_max_ref_mode[regno1]);
mode2 = wider_subreg_mode (PSEUDO_REGNO_MODE (regno2),
regno_max_ref_mode[regno2]);
- if ((diff = GET_MODE_SIZE (mode2) - GET_MODE_SIZE (mode1)) != 0)
+ if ((diff = compare_sizes_for_sort (GET_MODE_SIZE (mode2),
+ GET_MODE_SIZE (mode1))) != 0)
return diff;
return regno1 - regno2;
}
machine_mode mode = wider_subreg_mode
(PSEUDO_REGNO_MODE (ALLOCNO_REGNO (a)),
reg_max_ref_mode[ALLOCNO_REGNO (a)]);
- fprintf (ira_dump_file, " a%dr%d(%d,%d)",
- ALLOCNO_NUM (a), ALLOCNO_REGNO (a), ALLOCNO_FREQ (a),
- GET_MODE_SIZE (mode));
+ fprintf (ira_dump_file, " a%dr%d(%d,",
+ ALLOCNO_NUM (a), ALLOCNO_REGNO (a), ALLOCNO_FREQ (a));
+ print_dec (GET_MODE_SIZE (mode), ira_dump_file, SIGNED);
+ fprintf (ira_dump_file, ")\n");
}
if (a == allocno)
rtx src = SET_SRC (set);
if (GET_CODE (dest) == SUBREG
- && (GET_MODE_SIZE (GET_MODE (dest))
- == GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))))
+ && known_eq (GET_MODE_SIZE (GET_MODE (dest)),
+ GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))))
dest = SUBREG_REG (dest);
if (GET_CODE (src) == SUBREG
- && (GET_MODE_SIZE (GET_MODE (src))
- == GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))))
+ && known_eq (GET_MODE_SIZE (GET_MODE (src)),
+ GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))))
src = SUBREG_REG (src);
if (REG_P (src) && REG_P (dest)
&& find_regno_note (insn, REG_DEAD, REGNO (src))
HOST_WIDE_INT *inner_size, HOST_WIDE_INT *start)
{
rtx reg = regno_reg_rtx[REGNO (SUBREG_REG (x))];
- *outer_size = GET_MODE_SIZE (GET_MODE (x));
- *inner_size = GET_MODE_SIZE (GET_MODE (reg));
- return SUBREG_BYTE (x).is_constant (start);
+ return (GET_MODE_SIZE (GET_MODE (x)).is_constant (outer_size)
+ && GET_MODE_SIZE (GET_MODE (reg)).is_constant (inner_size)
+ && SUBREG_BYTE (x).is_constant (start));
}
/* Init LIVE_SUBREGS[ALLOCNUM] and LIVE_SUBREGS_USED[ALLOCNUM] for
interesting_mode_p (machine_mode mode, unsigned int *bytes,
unsigned int *words)
{
- *bytes = GET_MODE_SIZE (mode);
+ if (!GET_MODE_SIZE (mode).is_constant (bytes))
+ return false;
*words = CEIL (*bytes, UNITS_PER_WORD);
return true;
}
{
rtx op2;
- if ((GET_MODE_SIZE (GET_MODE (op))
- == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
+ if (known_eq (GET_MODE_SIZE (GET_MODE (op)),
+ GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
&& known_eq (SUBREG_BYTE (op), 0))
return simplify_gen_subreg_concatn (outermode, SUBREG_REG (op),
GET_MODE (SUBREG_REG (op)), byte);
if (GET_CODE (src) == SUBREG
&& resolve_reg_p (SUBREG_REG (src))
&& (maybe_ne (SUBREG_BYTE (src), 0)
- || (GET_MODE_SIZE (orig_mode)
- != GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))))))
+ || maybe_ne (orig_size, GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))))))
{
real_dest = dest;
dest = gen_reg_rtx (orig_mode);
if (GET_CODE (dest) == SUBREG
&& resolve_reg_p (SUBREG_REG (dest))
&& (maybe_ne (SUBREG_BYTE (dest), 0)
- || (GET_MODE_SIZE (orig_mode)
- != GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))))))
+ || maybe_ne (orig_size,
+ GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))))))
{
rtx reg, smove;
rtx_insn *minsn;
{
if (in_subreg_p)
continue;
- if (GET_MODE_SIZE (GET_MODE (reg)) < GET_MODE_SIZE (mode))
+ if (maybe_lt (GET_MODE_SIZE (GET_MODE (reg)),
+ GET_MODE_SIZE (mode)))
continue;
reg = lowpart_subreg (mode, reg, GET_MODE (reg));
if (reg == NULL_RTX || GET_CODE (reg) != SUBREG)
((MODE) != VOIDmode \
&& CONSTANT_P (X) \
&& GET_CODE (X) != HIGH \
+ && GET_MODE_SIZE (MODE).is_constant () \
&& !targetm.cannot_force_const_mem (MODE, X))
/* True if C is a non-empty register class that has too few registers
-fno-split-wide-types specified. */
if (!REG_P (reg)
|| in_class_p (reg, cl, &new_class)
- || GET_MODE_SIZE (mode) <= GET_MODE_SIZE (ptr_mode))
+ || known_le (GET_MODE_SIZE (mode), GET_MODE_SIZE (ptr_mode)))
loc = &SUBREG_REG (*loc);
}
a word. */
if (!(maybe_ne (GET_MODE_PRECISION (mode),
GET_MODE_PRECISION (innermode))
- && GET_MODE_SIZE (mode) <= UNITS_PER_WORD
- && GET_MODE_SIZE (innermode) <= UNITS_PER_WORD
+ && known_le (GET_MODE_SIZE (mode), UNITS_PER_WORD)
+ && known_le (GET_MODE_SIZE (innermode), UNITS_PER_WORD)
&& WORD_REGISTER_OPERATIONS)
&& (!(MEM_ALIGN (subst) < GET_MODE_ALIGNMENT (mode)
&& targetm.slow_unaligned_access (mode, MEM_ALIGN (subst)))
(ira_class_hard_regs[goal_alt[i]][0],
GET_MODE (reg), byte, mode) >= 0)))
|| (partial_subreg_p (mode, GET_MODE (reg))
- && GET_MODE_SIZE (GET_MODE (reg)) <= UNITS_PER_WORD
+ && known_le (GET_MODE_SIZE (GET_MODE (reg)),
+ UNITS_PER_WORD)
&& WORD_REGISTER_OPERATIONS)))
{
/* An OP_INOUT is required when reloading a subreg of a
/* Prevent access beyond equivalent memory for
paradoxical subregs. */
|| (MEM_P (x)
- && (GET_MODE_SIZE (lra_reg_info[i].biggest_mode)
- > GET_MODE_SIZE (GET_MODE (x))))
+ && maybe_gt (GET_MODE_SIZE (lra_reg_info[i].biggest_mode),
+ GET_MODE_SIZE (GET_MODE (x))))
|| (pic_offset_table_rtx
&& ((CONST_POOL_OK_P (PSEUDO_REGNO_MODE (i), x)
&& (targetm.preferred_reload_class
/* Maximum alignment required by all users of the slot. */
unsigned int align;
/* Maximum size required by all users of the slot. */
- HOST_WIDE_INT size;
+ poly_int64 size;
/* Memory representing the all stack slot. It can be different from
memory representing a pseudo belonging to give stack slot because
pseudo can be placed in a part of the corresponding stack slot.
{
rtx x = NULL_RTX;
machine_mode mode = GET_MODE (regno_reg_rtx[i]);
- HOST_WIDE_INT inherent_size = PSEUDO_REGNO_BYTES (i);
+ poly_int64 inherent_size = PSEUDO_REGNO_BYTES (i);
machine_mode wider_mode
= wider_subreg_mode (mode, lra_reg_info[i].biggest_mode);
- HOST_WIDE_INT total_size = GET_MODE_SIZE (wider_mode);
+ poly_int64 total_size = GET_MODE_SIZE (wider_mode);
poly_int64 adjust = 0;
lra_assert (regno_reg_rtx[i] != NULL_RTX && REG_P (regno_reg_rtx[i])
const int regno1 = *(const int *) v1p;
const int regno2 = *(const int *) v2p;
int diff, slot_num1, slot_num2;
- int total_size1, total_size2;
slot_num1 = pseudo_slots[regno1].slot_num;
slot_num2 = pseudo_slots[regno2].slot_num;
if ((diff = slot_num1 - slot_num2) != 0)
return (frame_pointer_needed
|| (!FRAME_GROWS_DOWNWARD) == STACK_GROWS_DOWNWARD ? diff : -diff);
- total_size1 = GET_MODE_SIZE (lra_reg_info[regno1].biggest_mode);
- total_size2 = GET_MODE_SIZE (lra_reg_info[regno2].biggest_mode);
- if ((diff = total_size2 - total_size1) != 0)
+ poly_int64 total_size1 = GET_MODE_SIZE (lra_reg_info[regno1].biggest_mode);
+ poly_int64 total_size2 = GET_MODE_SIZE (lra_reg_info[regno2].biggest_mode);
+ if ((diff = compare_sizes_for_sort (total_size2, total_size1)) != 0)
return diff;
return regno1 - regno2;
}
lra_reg_info[regno].biggest_mode);
unsigned int align = spill_slot_alignment (mode);
slots[slot_num].align = MAX (slots[slot_num].align, align);
- slots[slot_num].size = MAX (slots[slot_num].size, GET_MODE_SIZE (mode));
+ slots[slot_num].size = upper_bound (slots[slot_num].size,
+ GET_MODE_SIZE (mode));
if (slots[slot_num].regno < 0)
{
{
for (i = 0; i < slots_num; i++)
{
- fprintf (lra_dump_file, " Slot %d regnos (width = %d):", i,
- GET_MODE_SIZE (GET_MODE (slots[i].mem)));
+ fprintf (lra_dump_file, " Slot %d regnos (width = ", i);
+ print_dec (GET_MODE_SIZE (GET_MODE (slots[i].mem)),
+ lra_dump_file, SIGNED);
+ fprintf (lra_dump_file, "):");
for (curr_regno = slots[i].regno;;
curr_regno = pseudo_slots[curr_regno].next - pseudo_slots)
{
{
enum mode_class mclass
= bp_unpack_enum (&bp, mode_class, MAX_MODE_CLASS);
- unsigned int size = bp_unpack_value (&bp, 8);
+ poly_uint16 size = bp_unpack_poly_value (&bp, 16);
poly_uint16 prec = bp_unpack_poly_value (&bp, 16);
machine_mode inner = (machine_mode) bp_unpack_value (&bp, 8);
poly_uint16 nunits = bp_unpack_poly_value (&bp, 16);
pass ? mr = (machine_mode) (mr + 1)
: mr = GET_MODE_WIDER_MODE (mr).else_void ())
if (GET_MODE_CLASS (mr) != mclass
- || GET_MODE_SIZE (mr) != size
+ || maybe_ne (GET_MODE_SIZE (mr), size)
|| maybe_ne (GET_MODE_PRECISION (mr), prec)
|| (inner == m
? GET_MODE_INNER (mr) != mr
continue;
bp_pack_value (&bp, m, 8);
bp_pack_enum (&bp, mode_class, MAX_MODE_CLASS, GET_MODE_CLASS (m));
- bp_pack_value (&bp, GET_MODE_SIZE (m), 8);
+ bp_pack_poly_value (&bp, GET_MODE_SIZE (m), 16);
bp_pack_poly_value (&bp, GET_MODE_PRECISION (m), 16);
bp_pack_value (&bp, GET_MODE_INNER (m), 8);
bp_pack_poly_value (&bp, GET_MODE_NUNITS (m), 16);
typedef opt_mode<machine_mode> opt_machine_mode;
-extern CONST_MODE_SIZE unsigned short mode_size[NUM_MACHINE_MODES];
+extern CONST_MODE_SIZE poly_uint16_pod mode_size[NUM_MACHINE_MODES];
extern const poly_uint16_pod mode_precision[NUM_MACHINE_MODES];
extern const unsigned char mode_inner[NUM_MACHINE_MODES];
extern const poly_uint16_pod mode_nunits[NUM_MACHINE_MODES];
/* Return the base GET_MODE_SIZE value for MODE. */
-ALWAYS_INLINE unsigned short
+ALWAYS_INLINE poly_uint16
mode_to_bytes (machine_mode mode)
{
#if GCC_VERSION >= 4001
/* Get the size in bytes of an object of mode MODE. */
-#define GET_MODE_SIZE(MODE) (mode_to_bytes (MODE))
+#if ONLY_FIXED_SIZE_MODES
+#define GET_MODE_SIZE(MODE) ((unsigned short) mode_to_bytes (MODE).coeffs[0])
+#else
+ALWAYS_INLINE poly_uint16
+GET_MODE_SIZE (machine_mode mode)
+{
+ return mode_to_bytes (mode);
+}
+
+template<typename T>
+ALWAYS_INLINE typename if_poly<typename T::measurement_type>::type
+GET_MODE_SIZE (const T &mode)
+{
+ return mode_to_bytes (mode);
+}
+
+template<typename T>
+ALWAYS_INLINE typename if_nonpoly<typename T::measurement_type>::type
+GET_MODE_SIZE (const T &mode)
+{
+ return mode_to_bytes (mode).coeffs[0];
+}
+#endif
/* Get the size in bits of an object of mode MODE. */
/* Return true if MODE has a fixed size. */
inline bool
-fixed_size_mode::includes_p (machine_mode)
+fixed_size_mode::includes_p (machine_mode mode)
{
- return true;
+ return mode_to_bytes (mode).is_constant ();
}
/* Wrapper for mode arguments to target macros, so that if a target
tree type = lang_hooks.types.type_for_mode (mode, 1);
if (type == NULL_TREE || TYPE_MODE (type) != mode)
continue;
- unsigned int nelts = GET_MODE_SIZE (vmode) / GET_MODE_SIZE (mode);
+ poly_uint64 nelts = exact_div (GET_MODE_SIZE (vmode),
+ GET_MODE_SIZE (mode));
type = build_vector_type (type, nelts);
if (TYPE_MODE (type) != vmode)
continue;
FOR_EACH_MODE_FROM (mode_iter, mode)
{
mode = mode_iter.require ();
- if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (field_mode)
+ if (maybe_gt (GET_MODE_SIZE (mode), GET_MODE_SIZE (field_mode))
|| TRULY_NOOP_TRUNCATION_MODES_P (insn->field_mode,
field_mode))
break;
TYPE_MODE (cmp_op_type)) != CODE_FOR_nothing)
return true;
- if (GET_MODE_SIZE (value_mode) != GET_MODE_SIZE (cmp_op_mode)
+ if (maybe_ne (GET_MODE_SIZE (value_mode), GET_MODE_SIZE (cmp_op_mode))
|| maybe_ne (GET_MODE_NUNITS (value_mode), GET_MODE_NUNITS (cmp_op_mode)))
return false;
expand_vec_perm_var (machine_mode mode, rtx v0, rtx v1, rtx sel, rtx target)
{
enum insn_code icode;
- unsigned int i, w, u;
+ unsigned int i, u;
rtx tmp, sel_qi;
- w = GET_MODE_SIZE (mode);
u = GET_MODE_UNIT_SIZE (mode);
if (!target || GET_MODE (target) != mode)
/* Broadcast the low byte each element into each of its bytes.
The encoding has U interleaved stepped patterns, one for each
byte of an element. */
- vec_perm_builder const_sel (w, u, 3);
+ vec_perm_builder const_sel (GET_MODE_SIZE (mode), u, 3);
unsigned int low_byte_in_u = BYTES_BIG_ENDIAN ? u - 1 : 0;
for (i = 0; i < 3; ++i)
for (unsigned int j = 0; j < u; ++j)
unsignedp = TYPE_UNSIGNED (TREE_TYPE (op0a));
- gcc_assert (GET_MODE_SIZE (mode) == GET_MODE_SIZE (cmp_op_mode)
+ gcc_assert (known_eq (GET_MODE_SIZE (mode), GET_MODE_SIZE (cmp_op_mode))
&& known_eq (GET_MODE_NUNITS (mode),
GET_MODE_NUNITS (cmp_op_mode)));
wmode = insn_data[icode].operand[0].mode;
gcc_checking_assert (known_eq (2 * GET_MODE_NUNITS (wmode),
GET_MODE_NUNITS (mode)));
- gcc_checking_assert (GET_MODE_SIZE (wmode) == GET_MODE_SIZE (mode));
+ gcc_checking_assert (known_eq (GET_MODE_SIZE (wmode), GET_MODE_SIZE (mode)));
create_output_operand (&eops[0], gen_reg_rtx (wmode), wmode);
create_input_operand (&eops[1], op0, mode);
valid_multiword_target_p (rtx target)
{
machine_mode mode;
- int i;
+ int i, size;
mode = GET_MODE (target);
- for (i = 0; i < GET_MODE_SIZE (mode); i += UNITS_PER_WORD)
+ if (!GET_MODE_SIZE (mode).is_constant (&size))
+ return false;
+ for (i = 0; i < size; i += UNITS_PER_WORD)
if (!validate_subreg (word_mode, mode, target, i))
return false;
return true;
int (*addressp) (machine_mode, rtx, addr_space_t) =
(strictp ? strict_memory_address_addr_space_p
: memory_address_addr_space_p);
- unsigned int mode_sz = GET_MODE_SIZE (mode);
+ poly_int64 mode_sz = GET_MODE_SIZE (mode);
if (CONSTANT_ADDRESS_P (y))
return 1;
Clearly that depends on the situation in which it's being used.
However, the current situation in which we test 0xffffffff is
less than ideal. Caveat user. */
- if (mode_sz == 0)
+ if (known_eq (mode_sz, 0))
mode_sz = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
/* If the expression contains a constant term,
go inside a LO_SUM here, so we do so as well. */
if (GET_CODE (y) == LO_SUM
&& mode != BLKmode
- && mode_sz <= GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT)
+ && known_le (mode_sz, GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT))
z = gen_rtx_LO_SUM (address_mode, XEXP (y, 0),
plus_constant (address_mode, XEXP (y, 1),
mode_sz - 1));
{
int copy_nregs = hard_regno_nregs (copy_regno, copy_mode);
int use_nregs = hard_regno_nregs (copy_regno, new_mode);
- int copy_offset
- = GET_MODE_SIZE (copy_mode) / copy_nregs * (copy_nregs - use_nregs);
+ poly_uint64 bytes_per_reg;
+ if (!can_div_trunc_p (GET_MODE_SIZE (copy_mode),
+ copy_nregs, &bytes_per_reg))
+ return NULL_RTX;
+ poly_uint64 copy_offset = bytes_per_reg * (copy_nregs - use_nregs);
poly_uint64 offset
= subreg_size_lowpart_offset (GET_MODE_SIZE (new_mode) + copy_offset,
GET_MODE_SIZE (orig_mode));
/* We first look for the largest integer mode that can be validly
held in REGNO. If none, we look for the largest floating-point mode.
- If we still didn't find a valid mode, try CCmode. */
+ If we still didn't find a valid mode, try CCmode.
+ The tests use maybe_gt rather than known_gt because we want (for example)
+ N V4SFs to win over plain V4SF even though N might be 1. */
FOR_EACH_MODE_IN_CLASS (mode, MODE_INT)
if (hard_regno_nregs (regno, mode) == nregs
&& targetm.hard_regno_mode_ok (regno, mode)
&& (!call_saved
|| !targetm.hard_regno_call_part_clobbered (regno, mode))
- && GET_MODE_SIZE (mode) > GET_MODE_SIZE (found_mode))
+ && maybe_gt (GET_MODE_SIZE (mode), GET_MODE_SIZE (found_mode)))
found_mode = mode;
FOR_EACH_MODE_IN_CLASS (mode, MODE_FLOAT)
&& targetm.hard_regno_mode_ok (regno, mode)
&& (!call_saved
|| !targetm.hard_regno_call_part_clobbered (regno, mode))
- && GET_MODE_SIZE (mode) > GET_MODE_SIZE (found_mode))
+ && maybe_gt (GET_MODE_SIZE (mode), GET_MODE_SIZE (found_mode)))
found_mode = mode;
FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_FLOAT)
&& targetm.hard_regno_mode_ok (regno, mode)
&& (!call_saved
|| !targetm.hard_regno_call_part_clobbered (regno, mode))
- && GET_MODE_SIZE (mode) > GET_MODE_SIZE (found_mode))
+ && maybe_gt (GET_MODE_SIZE (mode), GET_MODE_SIZE (found_mode)))
found_mode = mode;
FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_INT)
&& targetm.hard_regno_mode_ok (regno, mode)
&& (!call_saved
|| !targetm.hard_regno_call_part_clobbered (regno, mode))
- && GET_MODE_SIZE (mode) > GET_MODE_SIZE (found_mode))
+ && maybe_gt (GET_MODE_SIZE (mode), GET_MODE_SIZE (found_mode)))
found_mode = mode;
if (found_mode != VOIDmode)
The size of the outer mode must ordered wrt the size of the
inner mode's registers, since otherwise we wouldn't know at
compile time how many registers the outer mode occupies. */
- poly_uint64 size = MAX (REGMODE_NATURAL_SIZE (shape.inner_mode),
- GET_MODE_SIZE (shape.outer_mode));
+ poly_uint64 size = ordered_max (REGMODE_NATURAL_SIZE (shape.inner_mode),
+ GET_MODE_SIZE (shape.outer_mode));
gcc_checking_assert (known_lt (size, GET_MODE_SIZE (shape.inner_mode)));
if (known_ge (shape.offset, size))
shape.offset -= size;
not already tracking such a reg, we won't start here,
and we must instead make sure to make the operand visible
to the machinery that tracks hard registers. */
+ machine_mode i_mode = recog_data.operand_mode[i];
+ machine_mode matches_mode = recog_data.operand_mode[matches];
if (matches >= 0
- && (GET_MODE_SIZE (recog_data.operand_mode[i])
- != GET_MODE_SIZE (recog_data.operand_mode[matches]))
+ && maybe_ne (GET_MODE_SIZE (i_mode),
+ GET_MODE_SIZE (matches_mode))
&& !verify_reg_in_set (op, &live_in_chains))
{
untracked_operands |= 1 << i;
else if (REG_N_CALLS_CROSSED (i))
fprintf (file, "; crosses %d calls", REG_N_CALLS_CROSSED (i));
if (regno_reg_rtx[i] != NULL
- && PSEUDO_REGNO_BYTES (i) != UNITS_PER_WORD)
- fprintf (file, "; %d bytes", PSEUDO_REGNO_BYTES (i));
+ && maybe_ne (PSEUDO_REGNO_BYTES (i), UNITS_PER_WORD))
+ {
+ fprintf (file, "; ");
+ print_dec (PSEUDO_REGNO_BYTES (i), file, SIGNED);
+ fprintf (file, " bytes");
+ }
rclass = reg_preferred_class (i);
altclass = reg_alternate_class (i);
complex_word_subreg_p (machine_mode outer_mode, rtx reg)
{
machine_mode inner_mode = GET_MODE (reg);
- return (GET_MODE_SIZE (outer_mode) <= UNITS_PER_WORD
- && GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD
- && GET_MODE_SIZE (inner_mode) / UNITS_PER_WORD != REG_NREGS (reg));
+ poly_uint64 reg_words = REG_NREGS (reg) * UNITS_PER_WORD;
+ return (known_le (GET_MODE_SIZE (outer_mode), UNITS_PER_WORD)
+ && maybe_gt (GET_MODE_SIZE (inner_mode), UNITS_PER_WORD)
+ && !known_equal_after_align_up (GET_MODE_SIZE (inner_mode),
+ reg_words, UNITS_PER_WORD));
}
/* Return true if X is a SUBREG that will need reloading of its SUBREG_REG
&& REGNO (SUBREG_REG (in)) >= FIRST_PSEUDO_REGISTER)
|| MEM_P (SUBREG_REG (in)))
&& (paradoxical_subreg_p (inmode, GET_MODE (SUBREG_REG (in)))
- || (GET_MODE_SIZE (inmode) <= UNITS_PER_WORD
+ || (known_le (GET_MODE_SIZE (inmode), UNITS_PER_WORD)
&& is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (in)),
&inner_mode)
&& GET_MODE_SIZE (inner_mode) <= UNITS_PER_WORD
&& LOAD_EXTEND_OP (inner_mode) != UNKNOWN)
|| (WORD_REGISTER_OPERATIONS
&& partial_subreg_p (inmode, GET_MODE (SUBREG_REG (in)))
- && ((GET_MODE_SIZE (inmode) - 1) / UNITS_PER_WORD ==
- ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (in))) - 1)
- / UNITS_PER_WORD)))))
+ && (known_equal_after_align_down
+ (GET_MODE_SIZE (inmode) - 1,
+ GET_MODE_SIZE (GET_MODE (SUBREG_REG (in))) - 1,
+ UNITS_PER_WORD)))))
|| (REG_P (SUBREG_REG (in))
&& REGNO (SUBREG_REG (in)) < FIRST_PSEUDO_REGISTER
/* The case where out is nonzero
&& MEM_P (in))
/* This is supposed to happen only for paradoxical subregs made by
combine.c. (SUBREG (MEM)) isn't supposed to occur other ways. */
- gcc_assert (GET_MODE_SIZE (GET_MODE (in)) <= GET_MODE_SIZE (inmode));
+ gcc_assert (known_le (GET_MODE_SIZE (GET_MODE (in)),
+ GET_MODE_SIZE (inmode)));
inmode = GET_MODE (in);
}
&& (paradoxical_subreg_p (outmode, GET_MODE (SUBREG_REG (out)))
|| (WORD_REGISTER_OPERATIONS
&& partial_subreg_p (outmode, GET_MODE (SUBREG_REG (out)))
- && ((GET_MODE_SIZE (outmode) - 1) / UNITS_PER_WORD ==
- ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (out))) - 1)
- / UNITS_PER_WORD)))))
+ && (known_equal_after_align_down
+ (GET_MODE_SIZE (outmode) - 1,
+ GET_MODE_SIZE (GET_MODE (SUBREG_REG (out))) - 1,
+ UNITS_PER_WORD)))))
|| (REG_P (SUBREG_REG (out))
&& REGNO (SUBREG_REG (out)) < FIRST_PSEUDO_REGISTER
/* The case of a word mode subreg
is handled differently in the following statement. */
- && ! (GET_MODE_SIZE (outmode) <= UNITS_PER_WORD
- && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (out)))
- > UNITS_PER_WORD))
+ && ! (known_le (GET_MODE_SIZE (outmode), UNITS_PER_WORD)
+ && maybe_gt (GET_MODE_SIZE (GET_MODE (SUBREG_REG (out))),
+ UNITS_PER_WORD))
&& !targetm.hard_regno_mode_ok (subreg_regno (out), outmode))
|| (secondary_reload_class (0, rclass, outmode, out) != NO_REGS
&& (secondary_reload_class (0, rclass, GET_MODE (SUBREG_REG (out)),
outloc = &SUBREG_REG (out);
out = *outloc;
gcc_assert (WORD_REGISTER_OPERATIONS || !MEM_P (out)
- || GET_MODE_SIZE (GET_MODE (out))
- <= GET_MODE_SIZE (outmode));
+ || known_le (GET_MODE_SIZE (GET_MODE (out)),
+ GET_MODE_SIZE (outmode)));
outmode = GET_MODE (out);
}
What's going on here. */
&& (in != out
|| (GET_CODE (in) == SUBREG
- && (((GET_MODE_SIZE (GET_MODE (in)) + (UNITS_PER_WORD - 1))
- / UNITS_PER_WORD)
- == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (in)))
- + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))))
+ && (known_equal_after_align_up
+ (GET_MODE_SIZE (GET_MODE (in)),
+ GET_MODE_SIZE (GET_MODE (SUBREG_REG (in))),
+ UNITS_PER_WORD))))
/* Make sure the operand fits in the reg that dies. */
- && (GET_MODE_SIZE (rel_mode)
- <= GET_MODE_SIZE (GET_MODE (XEXP (note, 0))))
+ && known_le (GET_MODE_SIZE (rel_mode),
+ GET_MODE_SIZE (GET_MODE (XEXP (note, 0))))
&& targetm.hard_regno_mode_ok (regno, inmode)
&& targetm.hard_regno_mode_ok (regno, outmode))
{
/* If operands exceed a word, we can't use either of them
unless they have the same size. */
- if (GET_MODE_SIZE (outmode) != GET_MODE_SIZE (inmode)
- && (GET_MODE_SIZE (outmode) > UNITS_PER_WORD
- || GET_MODE_SIZE (inmode) > UNITS_PER_WORD))
+ if (maybe_ne (GET_MODE_SIZE (outmode), GET_MODE_SIZE (inmode))
+ && (maybe_gt (GET_MODE_SIZE (outmode), UNITS_PER_WORD)
+ || maybe_gt (GET_MODE_SIZE (inmode), UNITS_PER_WORD)))
return 0;
/* Note that {in,out}_offset are needed only when 'in' or 'out'
if (replace
&& MEM_P (op)
&& REG_P (reg)
- && (GET_MODE_SIZE (GET_MODE (reg))
- >= GET_MODE_SIZE (GET_MODE (op)))
+ && known_ge (GET_MODE_SIZE (GET_MODE (reg)),
+ GET_MODE_SIZE (GET_MODE (op)))
&& reg_equiv_constant (REGNO (reg)) == 0)
set_unique_reg_note (emit_insn_before (gen_rtx_USE (VOIDmode, reg),
insn),
&& (paradoxical_subreg_p
(operand_mode[i], GET_MODE (operand)))))
|| BYTES_BIG_ENDIAN
- || ((GET_MODE_SIZE (operand_mode[i])
- <= UNITS_PER_WORD)
+ || (known_le (GET_MODE_SIZE (operand_mode[i]),
+ UNITS_PER_WORD)
&& (is_a <scalar_int_mode>
(GET_MODE (operand), &inner_mode))
&& (GET_MODE_SIZE (inner_mode)
if (! win && ! did_match
&& this_alternative[i] != NO_REGS
- && GET_MODE_SIZE (operand_mode[i]) <= UNITS_PER_WORD
+ && known_le (GET_MODE_SIZE (operand_mode[i]), UNITS_PER_WORD)
&& reg_class_size [(int) preferred_class[i]] > 0
&& ! small_register_class_p (preferred_class[i]))
{
if (WORD_REGISTER_OPERATIONS
&& partial_subreg_p (outer_mode, inner_mode)
- && ((GET_MODE_SIZE (outer_mode) - 1) / UNITS_PER_WORD
- == (GET_MODE_SIZE (inner_mode) - 1) / UNITS_PER_WORD))
+ && known_equal_after_align_down (GET_MODE_SIZE (outer_mode) - 1,
+ GET_MODE_SIZE (inner_mode) - 1,
+ UNITS_PER_WORD))
return NULL;
/* Since we don't attempt to handle paradoxical subregs, we can just
if (new_rtx != SUBREG_REG (x))
{
- int x_size = GET_MODE_SIZE (GET_MODE (x));
- int new_size = GET_MODE_SIZE (GET_MODE (new_rtx));
+ poly_int64 x_size = GET_MODE_SIZE (GET_MODE (x));
+ poly_int64 new_size = GET_MODE_SIZE (GET_MODE (new_rtx));
if (MEM_P (new_rtx)
&& ((partial_subreg_p (GET_MODE (x), GET_MODE (new_rtx))
So if the number of words is the same, preserve the
subreg so that push_reload can see it. */
&& !(WORD_REGISTER_OPERATIONS
- && (x_size - 1) / UNITS_PER_WORD
- == (new_size -1 ) / UNITS_PER_WORD))
- || x_size == new_size)
+ && known_equal_after_align_down (x_size - 1,
+ new_size - 1,
+ UNITS_PER_WORD)))
+ || known_eq (x_size, new_size))
)
return adjust_address_nv (new_rtx, GET_MODE (x), SUBREG_BYTE (x));
else if (insn && GET_CODE (insn) == DEBUG_INSN)
case PRE_INC:
case POST_INC:
{
- int size = GET_MODE_SIZE (GET_MODE (mem));
+ poly_int64 size = GET_MODE_SIZE (GET_MODE (mem));
rtx r1 = XEXP (x, 0);
rtx c = gen_int_mode (size, GET_MODE (r1));
return fn (mem, x, r1, r1, c, data);
case PRE_DEC:
case POST_DEC:
{
- int size = GET_MODE_SIZE (GET_MODE (mem));
+ poly_int64 size = GET_MODE_SIZE (GET_MODE (mem));
rtx r1 = XEXP (x, 0);
rtx c = gen_int_mode (-size, GET_MODE (r1));
return fn (mem, x, r1, r1, c, data);
/* A size N times larger than UNITS_PER_WORD likely needs N times as
many insns, taking N times as long. */
- factor = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
+ factor = estimated_poly_value (GET_MODE_SIZE (mode)) / UNITS_PER_WORD;
if (factor == 0)
factor = 1;
/* A SET doesn't have a mode, so let's look at the SET_DEST to get
the mode for the factor. */
mode = GET_MODE (SET_DEST (x));
- factor = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
+ factor = estimated_poly_value (GET_MODE_SIZE (mode)) / UNITS_PER_WORD;
if (factor == 0)
factor = 1;
/* FALLTHRU */
If that fails we have no choice but to return the original memory. */
if (offset == 0 && cmode == GET_MODE (x))
return c;
- else if (offset >= 0 && offset < GET_MODE_SIZE (cmode))
+ else if (known_in_range_p (offset, 0, GET_MODE_SIZE (cmode)))
{
rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
if (tem && CONSTANT_P (tem))
&& GET_CODE (trueop0) == VEC_CONCAT)
{
rtx vec = trueop0;
- int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
+ offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
/* Try to find the element in the VEC_CONCAT. */
while (GET_MODE (vec) != mode
&& GET_CODE (vec) == VEC_CONCAT)
{
- HOST_WIDE_INT vec_size;
+ poly_int64 vec_size;
if (CONST_INT_P (XEXP (vec, 0)))
{
else
vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
- if (offset < vec_size)
+ if (known_lt (offset, vec_size))
vec = XEXP (vec, 0);
- else
+ else if (known_ge (offset, vec_size))
{
offset -= vec_size;
vec = XEXP (vec, 1);
}
+ else
+ break;
vec = avoid_constant_pool_reference (vec);
}
: GET_MODE_INNER (mode));
gcc_assert (VECTOR_MODE_P (mode));
- gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
- == GET_MODE_SIZE (mode));
+ gcc_assert (known_eq (GET_MODE_SIZE (op0_mode)
+ + GET_MODE_SIZE (op1_mode),
+ GET_MODE_SIZE (mode)));
if (VECTOR_MODE_P (op0_mode))
gcc_assert (GET_MODE_INNER (mode)
gcc_assert (GET_MODE (op) == innermode
|| GET_MODE (op) == VOIDmode);
- if (!multiple_p (byte, GET_MODE_SIZE (outermode)))
+ poly_uint64 outersize = GET_MODE_SIZE (outermode);
+ if (!multiple_p (byte, outersize))
return NULL_RTX;
- if (maybe_ge (byte, GET_MODE_SIZE (innermode)))
+ poly_uint64 innersize = GET_MODE_SIZE (innermode);
+ if (maybe_ge (byte, innersize))
return NULL_RTX;
if (outermode == innermode && known_eq (byte, 0U))
if (GET_CODE (op) == SUBREG)
{
machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
+ poly_uint64 innermostsize = GET_MODE_SIZE (innermostmode);
rtx newx;
if (outermode == innermostmode
/* See whether resulting subreg will be paradoxical. */
if (!paradoxical_subreg_p (outermode, innermostmode))
{
- /* In nonparadoxical subregs we can't handle negative offsets. */
- if (maybe_lt (final_offset, 0))
- return NULL_RTX;
/* Bail out in case resulting subreg would be incorrect. */
- if (!multiple_p (final_offset, GET_MODE_SIZE (outermode))
- || maybe_ge (final_offset, GET_MODE_SIZE (innermostmode)))
+ if (maybe_lt (final_offset, 0)
+ || maybe_ge (poly_uint64 (final_offset), innermostsize)
+ || !multiple_p (final_offset, outersize))
return NULL_RTX;
}
else
if (SUBREG_PROMOTED_VAR_P (op)
&& SUBREG_PROMOTED_SIGN (op) >= 0
&& GET_MODE_CLASS (outermode) == MODE_INT
- && IN_RANGE (GET_MODE_SIZE (outermode),
- GET_MODE_SIZE (innermode),
- GET_MODE_SIZE (innermostmode))
+ && known_ge (outersize, innersize)
+ && known_le (outersize, innermostsize)
&& subreg_lowpart_p (newx))
{
SUBREG_PROMOTED_VAR_P (newx) = 1;
have instruction to move the whole thing. */
&& (! MEM_VOLATILE_P (op)
|| ! have_insn_for (SET, innermode))
- && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
+ && known_le (outersize, innersize))
return adjust_address_nv (op, outermode, byte);
/* Handle complex or vector values represented as CONCAT or VEC_CONCAT
if (GET_CODE (op) == CONCAT
|| GET_CODE (op) == VEC_CONCAT)
{
- unsigned int part_size;
poly_uint64 final_offset;
rtx part, res;
machine_mode part_mode = GET_MODE (XEXP (op, 0));
if (part_mode == VOIDmode)
part_mode = GET_MODE_INNER (GET_MODE (op));
- part_size = GET_MODE_SIZE (part_mode);
+ poly_uint64 part_size = GET_MODE_SIZE (part_mode);
if (known_lt (byte, part_size))
{
part = XEXP (op, 0);
else
return NULL_RTX;
- if (maybe_gt (final_offset + GET_MODE_SIZE (outermode), part_size))
+ if (maybe_gt (final_offset + outersize, part_size))
return NULL_RTX;
part_mode = GET_MODE (part);
size = int_size_in_bytes (type);
}
else
- size = GET_MODE_SIZE (mode);
+ /* Targets with variable-sized modes must override this hook
+ and handle variable-sized modes explicitly. */
+ size = GET_MODE_SIZE (mode).to_constant ();
if (size < (PARM_BOUNDARY / BITS_PER_UNIT))
return PAD_DOWNWARD;
unsigned int
default_hard_regno_nregs (unsigned int, machine_mode mode)
{
- return CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD);
+ /* Targets with variable-sized modes must provide their own definition
+ of this hook. */
+ return CEIL (GET_MODE_SIZE (mode).to_constant (), UNITS_PER_WORD);
}
bool
return (unsigned char) CLASS_MAX_NREGS ((enum reg_class) rclass,
MACRO_MODE (mode));
#else
- return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
+ /* Targets with variable-sized modes must provide their own definition
+ of this hook. */
+ unsigned int size = GET_MODE_SIZE (mode).to_constant ();
+ return (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
#endif
}
|| (!INTEGRAL_TYPE_P (lhs_type)
&& !SCALAR_FLOAT_TYPE_P (lhs_type))))
|| !useless_type_conversion_p (lhs_type, rhs2_type)
- || (GET_MODE_SIZE (element_mode (rhs2_type))
- < 2 * GET_MODE_SIZE (element_mode (rhs1_type))))
+ || maybe_lt (GET_MODE_SIZE (element_mode (rhs2_type)),
+ 2 * GET_MODE_SIZE (element_mode (rhs1_type))))
{
error ("type mismatch in widening sum reduction");
debug_generic_expr (lhs_type);
if (TREE_CODE (rhs1_type) != VECTOR_TYPE
|| TREE_CODE (lhs_type) != VECTOR_TYPE
|| !types_compatible_p (rhs1_type, rhs2_type)
- || (GET_MODE_SIZE (element_mode (lhs_type))
- != 2 * GET_MODE_SIZE (element_mode (rhs1_type))))
+ || maybe_ne (GET_MODE_SIZE (element_mode (lhs_type)),
+ 2 * GET_MODE_SIZE (element_mode (rhs1_type))))
{
error ("type mismatch in vector widening multiplication");
debug_generic_expr (lhs_type);
|| (INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type))
== INTEGRAL_TYPE_P (TREE_TYPE (lhs_type))))
|| !types_compatible_p (rhs1_type, rhs2_type)
- || (GET_MODE_SIZE (element_mode (rhs1_type))
- != 2 * GET_MODE_SIZE (element_mode (lhs_type))))
+ || maybe_ne (GET_MODE_SIZE (element_mode (rhs1_type)),
+ 2 * GET_MODE_SIZE (element_mode (lhs_type))))
{
error ("type mismatch in vector pack expression");
debug_generic_expr (lhs_type);
&& !SCALAR_FLOAT_TYPE_P (lhs_type))))
|| !types_compatible_p (rhs1_type, rhs2_type)
|| !useless_type_conversion_p (lhs_type, rhs3_type)
- || (GET_MODE_SIZE (element_mode (rhs3_type))
- < 2 * GET_MODE_SIZE (element_mode (rhs1_type))))
+ || maybe_lt (GET_MODE_SIZE (element_mode (rhs3_type)),
+ 2 * GET_MODE_SIZE (element_mode (rhs1_type))))
{
error ("type mismatch in dot product reduction");
debug_generic_expr (lhs_type);
if (TREE_CODE (type) == VECTOR_TYPE)
{
scalar_mode inner = SCALAR_TYPE_MODE (TREE_TYPE (type));
- machine_mode simd
- = targetm.vectorize.preferred_simd_mode (inner);
- int simd_mode_size = GET_MODE_SIZE (simd);
- return ((GET_MODE_SIZE (TYPE_MODE (type)) + simd_mode_size - 1)
+ machine_mode simd = targetm.vectorize.preferred_simd_mode (inner);
+ int orig_mode_size
+ = estimated_poly_value (GET_MODE_SIZE (TYPE_MODE (type)));
+ int simd_mode_size = estimated_poly_value (GET_MODE_SIZE (simd));
+ return ((orig_mode_size + simd_mode_size - 1)
/ simd_mode_size);
}
op1 = gimple_assign_rhs1 (def_stmt);
if (conv_code == ERROR_MARK)
{
- if (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (elt->value)))
- != GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (op1))))
+ if (maybe_ne (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (elt->value))),
+ GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (op1)))))
return false;
conv_code = code;
}
= build_vector_type (build_nonstandard_integer_type (elem_size, 1),
nelts);
if (GET_MODE_CLASS (TYPE_MODE (mask_type)) != MODE_VECTOR_INT
- || GET_MODE_SIZE (TYPE_MODE (mask_type))
- != GET_MODE_SIZE (TYPE_MODE (type)))
+ || maybe_ne (GET_MODE_SIZE (TYPE_MODE (mask_type)),
+ GET_MODE_SIZE (TYPE_MODE (type))))
return false;
op2 = vec_perm_indices_to_tree (mask_type, indices);
if (conv_code == ERROR_MARK)
mem_mode = TYPE_MODE (TREE_TYPE (*use->op_p));
if (((USE_LOAD_PRE_INCREMENT (mem_mode)
|| USE_STORE_PRE_INCREMENT (mem_mode))
- && GET_MODE_SIZE (mem_mode) == cstepi)
+ && known_eq (GET_MODE_SIZE (mem_mode), cstepi))
|| ((USE_LOAD_PRE_DECREMENT (mem_mode)
|| USE_STORE_PRE_DECREMENT (mem_mode))
- && GET_MODE_SIZE (mem_mode) == -cstepi))
+ && known_eq (GET_MODE_SIZE (mem_mode), -cstepi)))
{
enum tree_code code = MINUS_EXPR;
tree new_base;
}
if (((USE_LOAD_POST_INCREMENT (mem_mode)
|| USE_STORE_POST_INCREMENT (mem_mode))
- && GET_MODE_SIZE (mem_mode) == cstepi)
+ && known_eq (GET_MODE_SIZE (mem_mode), cstepi))
|| ((USE_LOAD_POST_DECREMENT (mem_mode)
|| USE_STORE_POST_DECREMENT (mem_mode))
- && GET_MODE_SIZE (mem_mode) == -cstepi))
+ && known_eq (GET_MODE_SIZE (mem_mode), -cstepi)))
{
add_candidate_1 (data, base, step, important, IP_AFTER_USE, use,
use->stmt);
ainc_cost_data_list[idx] = data;
}
- HOST_WIDE_INT msize = GET_MODE_SIZE (mem_mode);
+ poly_int64 msize = GET_MODE_SIZE (mem_mode);
if (known_eq (ainc_offset, 0) && known_eq (msize, ainc_step))
return comp_cost (data->costs[AINC_POST_INC], 0);
if (known_eq (ainc_offset, 0) && known_eq (msize, -ainc_step))
vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
gcc_assert (vectype);
+ /* At present we don't support versioning for alignment
+ with variable VF, since there's no guarantee that the
+ VF is a power of two. We could relax this if we added
+ a way of enforcing a power-of-two size. */
+ unsigned HOST_WIDE_INT size;
+ if (!GET_MODE_SIZE (TYPE_MODE (vectype)).is_constant (&size))
+ {
+ do_versioning = false;
+ break;
+ }
+
/* The rightmost bits of an aligned address must be zeros.
Construct the mask needed for this test. For example,
GET_MODE_SIZE for the vector mode V4SI is 16 bytes so the
mask must be 15 = 0xf. */
- mask = GET_MODE_SIZE (TYPE_MODE (vectype)) - 1;
+ mask = size - 1;
/* FORNOW: use the same mask to test all potentially unaligned
references in the loop. The vectorizer currently supports
;
else if (!loop_vinfo
|| (nested_in_vect_loop
- && (TREE_INT_CST_LOW (DR_STEP (dr))
- != GET_MODE_SIZE (TYPE_MODE (vectype)))))
+ && maybe_ne (TREE_INT_CST_LOW (DR_STEP (dr)),
+ GET_MODE_SIZE (TYPE_MODE (vectype)))))
return dr_explicit_realign;
else
return dr_explicit_realign_optimized;
return false;
}
- if ((GET_MODE_SIZE (TYPE_MODE (vectype))
- != GET_MODE_SIZE (TYPE_MODE (vf_vectype))))
+ if (maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype)),
+ GET_MODE_SIZE (TYPE_MODE (vf_vectype))))
{
if (dump_enabled_p ())
{
if (dump_enabled_p ())
dump_printf (MSG_NOTE, "op not supported by target.\n");
- if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
+ if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
|| !vect_worthwhile_without_simd_p (loop_vinfo, code))
return false;
|| code == VIEW_CONVERT_EXPR)
&& (!vectype_in
|| maybe_ne (TYPE_VECTOR_SUBPARTS (vectype_in), nunits)
- || (GET_MODE_SIZE (TYPE_MODE (vectype))
- != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
+ || maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype)),
+ GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
return false;
/* We do not handle bit-precision changes. */
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"op not supported by target.\n");
/* Check only during analysis. */
- if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
+ if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
|| (!vec_stmt
&& !vect_worthwhile_without_simd_p (vinfo, code)))
return false;
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"op not supported by target.\n");
/* Check only during analysis. */
- if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
+ if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
|| (!vec_stmt && !vect_worthwhile_without_simd_p (vinfo, code)))
return false;
if (dump_enabled_p ())
nested within an outer-loop that is being vectorized. */
if (nested_in_vect_loop
- && (DR_STEP_ALIGNMENT (dr) % GET_MODE_SIZE (TYPE_MODE (vectype))) != 0)
+ && !multiple_p (DR_STEP_ALIGNMENT (dr),
+ GET_MODE_SIZE (TYPE_MODE (vectype))))
{
gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
compute_in_loop = true;
if (VECTOR_BOOLEAN_TYPE_P (vectype))
return vectype;
- unsigned HOST_WIDE_INT size = GET_MODE_SIZE (TYPE_MODE (vectype));
+ poly_uint64 size = GET_MODE_SIZE (TYPE_MODE (vectype));
- if (!size)
+ if (known_eq (size, 0U))
size = tree_to_uhwi (TYPE_SIZE_UNIT (vectype));
return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype), size);
case PRE_INC:
case PRE_DEC:
- gcc_assert (mem_mode != VOIDmode && mem_mode != BLKmode);
- return gen_rtx_PLUS (GET_MODE (x),
- cleanup_auto_inc_dec (XEXP (x, 0), mem_mode),
- gen_int_mode (code == PRE_INC
- ? GET_MODE_SIZE (mem_mode)
- : -GET_MODE_SIZE (mem_mode),
- GET_MODE (x)));
+ {
+ gcc_assert (mem_mode != VOIDmode && mem_mode != BLKmode);
+ poly_int64 offset = GET_MODE_SIZE (mem_mode);
+ if (code == PRE_DEC)
+ offset = -offset;
+ return gen_rtx_PLUS (GET_MODE (x),
+ cleanup_auto_inc_dec (XEXP (x, 0), mem_mode),
+ gen_int_mode (offset, GET_MODE (x)));
+ }
case POST_INC:
case POST_DEC:
{
machine_mode mode, wider_mode;
rtx loc2;
- HOST_WIDE_INT offset;
+ HOST_WIDE_INT offset, size, wider_size;
if (i == 0 && var->onepart)
{
mode = GET_MODE (var->var_part[i].cur_loc);
if (mode == VOIDmode && var->onepart)
mode = DECL_MODE (decl);
- last_limit = offsets[n_var_parts] + GET_MODE_SIZE (mode);
+ /* We ony track subparts of constant-sized objects, since at present
+ there's no representation for polynomial pieces. */
+ if (!GET_MODE_SIZE (mode).is_constant (&size))
+ {
+ complete = false;
+ continue;
+ }
+ last_limit = offsets[n_var_parts] + size;
/* Attempt to merge adjacent registers or memory. */
for (j = i + 1; j < var->n_var_parts; j++)
break;
if (j < var->n_var_parts
&& GET_MODE_WIDER_MODE (mode).exists (&wider_mode)
+ && GET_MODE_SIZE (wider_mode).is_constant (&wider_size)
&& var->var_part[j].cur_loc
&& mode == GET_MODE (var->var_part[j].cur_loc)
&& (REG_P (loc[n_var_parts]) || MEM_P (loc[n_var_parts]))
if ((REG_P (XEXP (loc[n_var_parts], 0))
&& rtx_equal_p (XEXP (loc[n_var_parts], 0),
XEXP (XEXP (loc2, 0), 0))
- && INTVAL (XEXP (XEXP (loc2, 0), 1))
- == GET_MODE_SIZE (mode))
+ && INTVAL (XEXP (XEXP (loc2, 0), 1)) == size)
|| (GET_CODE (XEXP (loc[n_var_parts], 0)) == PLUS
&& CONST_INT_P (XEXP (XEXP (loc[n_var_parts], 0), 1))
&& rtx_equal_p (XEXP (XEXP (loc[n_var_parts], 0), 0),
XEXP (XEXP (loc2, 0), 0))
- && INTVAL (XEXP (XEXP (loc[n_var_parts], 0), 1))
- + GET_MODE_SIZE (mode)
+ && INTVAL (XEXP (XEXP (loc[n_var_parts], 0), 1)) + size
== INTVAL (XEXP (XEXP (loc2, 0), 1))))
new_loc = adjust_address_nv (loc[n_var_parts],
wider_mode, 0);
{
loc[n_var_parts] = new_loc;
mode = wider_mode;
- last_limit = offsets[n_var_parts] + GET_MODE_SIZE (mode);
+ last_limit = offsets[n_var_parts] + wider_size;
i = j;
}
}