This removes a few more uses.
2021-07-16 Richard Biener <rguenther@suse.de>
* gimple-ssa-store-merging.c (verify_symbolic_number_p): Use
the type of the LHS.
(find_bswap_or_nop_1): Likewise.
(find_bswap_or_nop): Likewise.
* tree-vectorizer.h (vect_get_smallest_scalar_type): Adjust
prototype.
* tree-vect-data-refs.c (vect_get_smallest_scalar_type):
Remove unused parameters, pass in the scalar type. Fix
internal store function handling.
* tree-vect-stmts.c (vect_analyze_stmt): Remove assert.
(vect_get_vector_types_for_stmt): Move down check for
existing vector stmt after we've determined a scalar type.
Pass down the used scalar type to vect_get_smallest_scalar_type.
* tree-vect-generic.c (expand_vector_condition): Use
the type of the LHS.
(expand_vector_scalar_condition): Likewise.
(expand_vector_operations_1): Likewise.
* tree-vect-patterns.c (vect_widened_op_tree): Likewise.
(vect_recog_dot_prod_pattern): Likewise.
(vect_recog_sad_pattern): Likewise.
(vect_recog_widen_op_pattern): Likewise.
(vect_recog_widen_sum_pattern): Likewise.
(vect_recog_mixed_size_cond_pattern): Likewise.
{
tree lhs_type;
- lhs_type = gimple_expr_type (stmt);
+ lhs_type = TREE_TYPE (gimple_get_lhs (stmt));
if (TREE_CODE (lhs_type) != INTEGER_TYPE
&& TREE_CODE (lhs_type) != ENUMERAL_TYPE)
int i, type_size, old_type_size;
tree type;
- type = gimple_expr_type (stmt);
+ type = TREE_TYPE (gimple_assign_lhs (stmt));
type_size = TYPE_PRECISION (type);
if (type_size % BITS_PER_UNIT != 0)
return NULL;
gimple *
find_bswap_or_nop (gimple *stmt, struct symbolic_number *n, bool *bswap)
{
- tree type_size = TYPE_SIZE_UNIT (gimple_expr_type (stmt));
+ tree type_size = TYPE_SIZE_UNIT (TREE_TYPE (gimple_get_lhs (stmt)));
if (!tree_fits_uhwi_p (type_size))
return NULL;
types. */
tree
-vect_get_smallest_scalar_type (stmt_vec_info stmt_info,
- HOST_WIDE_INT *lhs_size_unit,
- HOST_WIDE_INT *rhs_size_unit)
+vect_get_smallest_scalar_type (stmt_vec_info stmt_info, tree scalar_type)
{
- tree scalar_type = gimple_expr_type (stmt_info->stmt);
HOST_WIDE_INT lhs, rhs;
/* During the analysis phase, this function is called on arbitrary
lhs = rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
gassign *assign = dyn_cast <gassign *> (stmt_info->stmt);
- if (assign
- && (gimple_assign_cast_p (assign)
+ if (assign)
+ {
+ scalar_type = TREE_TYPE (gimple_assign_lhs (assign));
+ if (gimple_assign_cast_p (assign)
|| gimple_assign_rhs_code (assign) == DOT_PROD_EXPR
|| gimple_assign_rhs_code (assign) == WIDEN_SUM_EXPR
|| gimple_assign_rhs_code (assign) == WIDEN_MULT_EXPR
|| gimple_assign_rhs_code (assign) == WIDEN_LSHIFT_EXPR
|| gimple_assign_rhs_code (assign) == WIDEN_PLUS_EXPR
|| gimple_assign_rhs_code (assign) == WIDEN_MINUS_EXPR
- || gimple_assign_rhs_code (assign) == FLOAT_EXPR))
- {
- tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (assign));
+ || gimple_assign_rhs_code (assign) == FLOAT_EXPR)
+ {
+ tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (assign));
- rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type));
- if (rhs < lhs)
- scalar_type = rhs_type;
+ rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type));
+ if (rhs < lhs)
+ scalar_type = rhs_type;
+ }
}
else if (gcall *call = dyn_cast <gcall *> (stmt_info->stmt))
{
if (gimple_call_internal_p (call))
{
internal_fn ifn = gimple_call_internal_fn (call);
- if (internal_load_fn_p (ifn) || internal_store_fn_p (ifn))
- /* gimple_expr_type already picked the type of the loaded
- or stored data. */
+ if (internal_load_fn_p (ifn))
+ /* For loads the LHS type does the trick. */
i = ~0U;
+ else if (internal_store_fn_p (ifn))
+ {
+ /* For stores use the tyep of the stored value. */
+ i = internal_fn_stored_value_index (ifn);
+ scalar_type = TREE_TYPE (gimple_call_arg (call, i));
+ i = ~0U;
+ }
else if (internal_fn_mask_index (ifn) == 0)
i = 1;
}
}
}
- *lhs_size_unit = lhs;
- *rhs_size_unit = rhs;
return scalar_type;
}
expand_vector_condition (gimple_stmt_iterator *gsi, bitmap dce_ssa_names)
{
gassign *stmt = as_a <gassign *> (gsi_stmt (*gsi));
- tree type = gimple_expr_type (stmt);
+ tree type = TREE_TYPE (gimple_assign_lhs (stmt));
tree a = gimple_assign_rhs1 (stmt);
tree a1 = a;
tree a2 = NULL_TREE;
expand_vector_scalar_condition (gimple_stmt_iterator *gsi)
{
gassign *stmt = as_a <gassign *> (gsi_stmt (*gsi));
- tree type = gimple_expr_type (stmt);
+ tree lhs = gimple_assign_lhs (stmt);
+ tree type = TREE_TYPE (lhs);
tree compute_type = get_compute_type (COND_EXPR, mov_optab, type);
machine_mode compute_mode = TYPE_MODE (compute_type);
gcc_assert (compute_mode != BLKmode);
- tree lhs = gimple_assign_lhs (stmt);
tree rhs2 = gimple_assign_rhs2 (stmt);
tree rhs3 = gimple_assign_rhs3 (stmt);
tree new_rhs;
return;
rhs1 = gimple_assign_rhs1 (stmt);
- type = gimple_expr_type (stmt);
if (rhs_class == GIMPLE_BINARY_RHS)
rhs2 = gimple_assign_rhs2 (stmt);
+ type = TREE_TYPE (lhs);
if (!VECTOR_TYPE_P (type)
|| !VECTOR_TYPE_P (TREE_TYPE (rhs1)))
return;
if (rhs_code != code && rhs_code != widened_code)
return 0;
- tree type = gimple_expr_type (assign);
+ tree type = TREE_TYPE (gimple_assign_lhs (assign));
if (!INTEGRAL_TYPE_P (type))
return 0;
&oprnd0, &oprnd1))
return NULL;
- type = gimple_expr_type (last_stmt);
+ type = TREE_TYPE (gimple_get_lhs (last_stmt));
vect_unpromoted_value unprom_mult;
oprnd0 = vect_look_through_possible_promotion (vinfo, oprnd0, &unprom_mult);
&plus_oprnd0, &plus_oprnd1))
return NULL;
- tree sum_type = gimple_expr_type (last_stmt);
+ tree sum_type = TREE_TYPE (gimple_get_lhs (last_stmt));
/* Any non-truncating sequence of conversions is OK here, since
with a successful match, the result of the ABS(U) is known to fit
/* Pattern detected. */
vect_pattern_detected (name, last_stmt);
- tree type = gimple_expr_type (last_stmt);
+ tree type = TREE_TYPE (gimple_get_lhs (last_stmt));
tree itype = type;
if (TYPE_PRECISION (type) != TYPE_PRECISION (half_type) * 2
|| TYPE_UNSIGNED (type) != TYPE_UNSIGNED (half_type))
&oprnd0, &oprnd1))
return NULL;
- type = gimple_expr_type (last_stmt);
+ type = TREE_TYPE (gimple_get_lhs (last_stmt));
/* So far so good. Since last_stmt was detected as a (summation) reduction,
we know that oprnd1 is the reduction variable (defined by a loop-header
if (comp_vectype == NULL_TREE)
return NULL;
- type = gimple_expr_type (last_stmt);
+ type = TREE_TYPE (gimple_assign_lhs (last_stmt));
if (types_compatible_p (type, comp_scalar_type)
|| ((TREE_CODE (then_clause) != INTEGER_CST
|| TREE_CODE (else_clause) != INTEGER_CST)
if (STMT_VINFO_RELEVANT_P (stmt_info))
{
- tree type = gimple_expr_type (stmt_info->stmt);
- gcc_assert (!VECTOR_MODE_P (TYPE_MODE (type)));
gcall *call = dyn_cast <gcall *> (stmt_info->stmt);
gcc_assert (STMT_VINFO_VECTYPE (stmt_info)
|| (call && gimple_call_lhs (call) == NULL_TREE));
"not vectorized: irregular stmt.%G", stmt);
}
- if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))))
- return opt_result::failure_at (stmt,
- "not vectorized: vector stmt in loop:%G",
- stmt);
-
tree vectype;
tree scalar_type = NULL_TREE;
if (group_size == 0 && STMT_VINFO_VECTYPE (stmt_info))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "vectype: %T\n", vectype);
}
+
+ if (scalar_type && VECTOR_MODE_P (TYPE_MODE (scalar_type)))
+ return opt_result::failure_at (stmt,
+ "not vectorized: vector stmt in loop:%G",
+ stmt);
+
*stmt_vectype_out = vectype;
/* Don't try to compute scalar types if the stmt produces a boolean
/* The number of units is set according to the smallest scalar
type (or the largest vector size, but we only support one
vector size per vectorization). */
- HOST_WIDE_INT dummy;
- scalar_type = vect_get_smallest_scalar_type (stmt_info, &dummy, &dummy);
+ scalar_type = vect_get_smallest_scalar_type (stmt_info,
+ TREE_TYPE (vectype));
if (scalar_type != TREE_TYPE (vectype))
{
if (dump_enabled_p ())
extern bool vect_can_force_dr_alignment_p (const_tree, poly_uint64);
extern enum dr_alignment_support vect_supportable_dr_alignment
(vec_info *, dr_vec_info *, bool);
-extern tree vect_get_smallest_scalar_type (stmt_vec_info, HOST_WIDE_INT *,
- HOST_WIDE_INT *);
+extern tree vect_get_smallest_scalar_type (stmt_vec_info, tree);
extern opt_result vect_analyze_data_ref_dependences (loop_vec_info, unsigned int *);
extern bool vect_slp_analyze_instance_dependence (vec_info *, slp_instance);
extern opt_result vect_enhance_data_refs_alignment (loop_vec_info);